seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
18363144949 | a,b=map(int,input().split())
count = 0
num = 0
for i in range(int((a+b)/2)-5, int((a+b)/2)+5):
if abs(a-i) == abs(b-i):
count += 1
num = i
if count > 0:
print(num)
else:
print("IMPOSSIBLE") | Aasthaengg/IBMdataset | Python_codes/p02957/s402072802.py | s402072802.py | py | 203 | python | en | code | 0 | github-code | 90 |
4946296430 | # coding:utf-8
# __autor__:'cyb'
# sqlite示例 查询 修改 删除
import sqlite3
connect = sqlite3.connect("testsqlite.db")
cursor = connect.cursor()
cursor.execute("""
SELECT id,name from student;
""")
student_list = cursor.fetchall()
print(student_list)
cursor.execute("""
SELECT * FROM student WHERE name="小青";
""")
student = cursor.fetchone()
print(student)
cursor.execute("""
SELECT * FROM student WHERE id=0;
""")
student2 = cursor.fetchone()
print(student2)
cursor.execute("""
SELECT * FROM student WHERE id>0;
""")
# student_list3 = cursor.fetchall()
student3 = cursor.fetchone()
# print(student_list3)
print(f'学生姓名{student3[1]}')
cursor.execute("""
UPDATE student SET name="大红" WHERE id=4
""")
connect.commit()
cursor.execute("""
select * from student;
""")
print(cursor.fetchall())
cursor.close()
connect.close()
"""
cursor.fetchall() 取回结果集,形如[(1,'小王'),(2,'小明')] 大列表,列表每一项是元组、是一行,元组里的每一项对应每一列的值。
cursor.fetchone() 取回一条数据,形如(2,'小青'),结果空返回None类型。如果select符合多条,返回多条结果里的第一条(形如id>0)。
cursor.fetchxxx() 方法为了节省内存和速度采用了生成器结构,只能取一次。
"""
"""
sql基础语法补充:
1.一张表一般都有一列主键,主键primary key 一般名叫 id ,字段类型一般为自增整数。当insert行内容时,sql语句可以不插入id列,数据库会帮你自动插入并自增。
主键不能重复。主键好处是确保数据一致性、方便查询。
2.如果工作中一个数据库连接实例下有多个库,那么表名要带上命名空间,例如main.student
3.丢弃表 drop。跟delete关键字相比更为严重,delete删除某行或清空表内容 表结构还在。而drop是完全删除丢弃整个表,内容和结构都删除。drop table[表名]。
4.字段被双引号括住,形如SELECT "id","name" FROM student;结果一样好处是避免数据库关键字导致的错误,当数据库解释器遇到引号时,会认为引号里的名字就是用户自定义的字段名而不是内置关键字,
"""
"""
数据库概念补充:
数据库的大概原理:数据库按树型结构存储,查找数据时只需比对几次就能查出来。数据量大时,查询时间成对数慢速增长。
索引:index,目录,索引会占据一定存储空间,在数据库中以树型数据结构存储,建立的是目录到硬盘存储的数据的映射。就好像平时看的书籍。创建主键的那一列会自动创建索引。一般在查询经常比较的字段上创建索引。(如id列、phone列)、优点大概提高select效率。缺点是占据更多的硬盘存储空间。
事务:
transaction 。当又多句sql语句的时候,例如sqll)插入银行交易表一行数据金额100元,sql2修改刚才插入的一行数据金额为98元,但执行sql的时候由于用户拥堵等原因执行失败,这事再执行sql2必然错误或误修改其他的正常数据。为了避免这种情况,把这两句sql都放在一个事务执行种,只要一个事务中任意一条sql执行失败,那么其他已执行的sql会回到修改前状态(回滚rolling)
,只有当所有sql执行成功,才会一起commit生效。简单来说,事务要么都执行,要么出错都不行,优点保证数据一致性。
"""
"""
作业四:cursor.fetchall() 默认返回每一行为元组格式[(1, '小王'), (2, '小明')] , 百度“sqlite python dict”,设置方法的返回值为字典形式[{'id':1, 'name':小王'}, {'id':2, 'name':小明'}]
""" | wantwantwant/tutorial | L10数据库基础sqlite(重要)/《3》sqlit_select.py | 《3》sqlit_select.py | py | 3,722 | python | zh | code | 1 | github-code | 90 |
10972727567 | # -*- coding: utf-8 -*-
# author: itimor
import requests
import datetime
salt_info = {
"url": "http://salt.tbsysmanager.com:8080",
"username": "saltdev",
"password": "FF01VeF4hs1FqZ5M"
}
class SaltAPI(object):
def __init__(self, url, username, password):
self.__url = url
self.__username = username
self.__password = password
self.__header = dict()
self.__header["Accept"] = "application/json"
self.token_s_time = ''
self.__token = self.get_token()
def get_token(self, prefix='/login'):
"""
登录获取token
"""
data = {
"username": self.__username,
"password": self.__password,
"eauth": "pam"
}
loginurl = self.__url + prefix
req = requests.post(loginurl, data=data, headers=self.__header, verify=False)
try:
token = req.json()["return"][0]["token"]
self.token_s_time = datetime.datetime.now()
return token
except KeyError:
raise KeyError
def salt_request(self, data, prefix='/'):
"""
接收请求,返回结果
"""
token_e_time = datetime.datetime.now()
print("token_e_time: %s" % token_e_time)
print("token_s_time: %s" % self.token_s_time)
if (token_e_time - self.token_s_time).seconds / 3600 > 3:
print("salt-api token is Expired")
self.get_token()
url = self.__url + prefix
self.__header["X-Auth-Token"] = self.__token
# 传入data参数字典,data为None 则方法为get,有date为post方法
if data:
req = requests.post(url, data=data, headers=self.__header, verify=False)
else:
req = requests.get(url, headers=self.__header)
return req.json()
def list_key(self):
"""
获取包括认证、未认证salt主机
"""
prefix = '/keys'
content = self.salt_request(None, prefix)
accepted = content['return']['minions']
denied = content['return']['minions_denied']
unaccept = content['return']['minions_pre']
rejected = content['return']['minions_rejected']
return {"accepted": accepted, "denied": denied, "unaccept": unaccept, "rejected": rejected}
def accept_key(self, key_id):
"""
接受salt主机
"""
data = {'client': 'wheel', 'fun': 'key.accept', 'match': key_id}
content = self.salt_request(data)
ret = content['return'][0]['data']['success']
return ret
def delete_key(self, key_id):
"""
删除salt主机
"""
data = {'client': 'wheel', 'fun': 'key.delete', 'match': key_id}
content = self.salt_request(data)
ret = content['return'][0]['data']['success']
return ret
def minions_status(self):
"""
salt主机存活检测
"""
data = {'client': 'runner', 'fun': 'manage.status'}
content = self.salt_request(data)
ret = content['return'][0]
return ret
def remote_cmd(self, tgt, client='local_async', expr_form='list', arg=''):
"""
异步执行远程命令、部署模块
"""
data = {'client': client, 'tgt': tgt, 'fun': 'cmd.run', 'arg': arg, 'expr_form': expr_form}
print(data)
content = self.salt_request(data)
ret = content['return'][0]['jid']
return ret
def get_result(self, jid):
"""
通过jid获取执行结果
"""
data = {'client': 'runner', 'fun': 'jobs.lookup_jid', 'jid': jid}
content = self.salt_request(data)
ret = content['return'][0]
return ret
def get_job_info(self, jid=''):
"""
获取任务的详细执行信息
"""
if jid:
prefix = '/jobs/' + jid
else:
prefix = '/jobs'
content = self.salt_request(None, prefix)
ret = content['return'][0]
return ret
def running_jobs(self):
"""
获取运行中的任务
"""
data = {'client': 'runner', 'fun': 'jobs.active'}
content = self.salt_request(data)
ret = content['return'][0]
return ret
def check_job(self, jid):
"""
检查任务是否已经执行并成功退出
"""
data = {'client': 'runner', 'fun': 'jobs.exit_success', 'jid': jid}
content = self.salt_request(data)
ret = content['return'][0]
return ret
def sync_remote_server(self, tgt=[], arg=[], expr_form='list'):
"""
获取远程主机信息
"""
data = {'client': 'local', 'tgt': tgt, 'fun': 'grains.item', 'arg': arg, 'expr_form': expr_form}
content = self.salt_request(data)['return']
return content
def main():
sapi = SaltAPI(url=salt_info["url"], username=salt_info["username"], password=salt_info["password"])
jid = '20180221113323607348'
tgt = ['tw-btj-web-test-01']
arg = 'ls'
# jid = sapi.remote_cmd(tgt=tgt, fun='cmd.run', arg=cmd)
# print(jid)
print(sapi.get_result(20180308105331061269))
#print(sapi.remote_cmd(tgt=tgt, arg=arg))
if __name__ == '__main__':
main()
| OpsWorld/oms | omsBackend/salts/saltapi.py | saltapi.py | py | 5,505 | python | en | code | 37 | github-code | 90 |
71877451176 | import os
import glob
import datetime
import hashlib
import json
import numpy
from numpy import random
from keras import models
from keras import layers
from matplotlib import pyplot
name, ext = os.path.splitext(__file__)
MODELS_DIR = 'models_' + name
examples_amount = 100000
x_start = 0
x_end = 50
x_size = x_end - x_start
param_min = 0.3
param_max = 10
separation_factor = 0.8
noise_magnitude = 0.01
batch_size = 128
epochs = 10000
def gaussian(x, mu, sigma=1, scale=1):
"""
x - array of arguments
mu - expected value in probability theory
sigma - standard deviation in statistics
"""
return numpy.exp(-(x - mu)**2 / (2 * sigma**2)) / (sigma * numpy.sqrt(2 * numpy.pi)) / scale
def lagrange(x, x0, gamma=1, scale=1):
return 1.0 / (numpy.pi * gamma * (1 + ((x - x0) / gamma)**2)) / scale
functions = [
gaussian,
lagrange
]
def generate_data():
X = []
Y = []
Z_type = []
Z_params = []
x = numpy.linspace(x_start, x_end, x_size)
for i in range(examples_amount):
y = numpy.zeros(x_size)
# add target function
function_id = numpy.random.randint(0, len(functions))
function = functions[function_id]
# центр функции в середине вектора
offset = 1/2 * (x_end - x_start) + x_start
target_random_param = random.random()
target_param = param_min + (param_max - param_min) * target_random_param
y = y + function(x, offset, target_param)
# add another (noise) functions
for j in range(random.randint(0, 3)):
function_id = numpy.random.randint(0, len(functions))
function = functions[function_id]
random_offset = random.random()
offset = random_offset * (x_end - x_start) + x_start
random_param = random.random()
param = param_min + (param_max - param_min) * random_param
y = y + function(x, offset, param)
y_max = numpy.max(y)
scale = y_max
if y_max > 0:
y = y / y_max
y = y.reshape((-1, 1))
z_type = numpy.zeros(len(functions))
z_type[function_id] = 1
z_params = numpy.array([target_random_param, scale])
X.append(x)
Y.append(y)
Z_type.append(z_type)
Z_params.append(z_params)
X = numpy.array(X)
Y = numpy.array(Y)
Z_type = numpy.array(Z_type)
Z_params = numpy.array(Z_params)
return X, Y, (Z_type, Z_params)
def get_data(data=None):
_, y, z = data or generate_data()
separator = int(examples_amount * separation_factor)
y_train = y[:separator]
z_train = [arr[:separator] for arr in z]
y_test = y[separator:]
z_test = [arr[separator:] for arr in z]
# add noise
#y_test = random.normal(y_test, noise_magnitude)
return (y_train, z_train), (y_test, z_test)
def create_model():
input = layers.Input(shape=(x_size, 1))
x = layers.Conv1D(64, 5)(input)
x = layers.Activation('relu')(x)
x = layers.MaxPooling1D(2)(x)
x = layers.Dropout(0.25)(x)
x = layers.Conv1D(128, 5)(x)
x = layers.Activation('relu')(x)
x = layers.MaxPooling1D(2)(x)
x = layers.Dropout(0.25)(x)
conv_out = layers.Flatten()(x)
# type
x = layers.Dense(len(functions))(conv_out)
output_1 = layers.Activation('softmax')(x)
# x0, a, scale
x = layers.Concatenate(axis=-1)([conv_out, output_1])
x = layers.Dense(64)(x)
x = layers.Activation('tanh')(x)
x = layers.Dropout(0.25)(x)
x = layers.Dense(32)(conv_out)
x = layers.Activation('tanh')(x)
x = layers.Dropout(0.25)(x)
x = layers.Dense(2)(x)
output_2 = layers.Activation('relu')(x)
outputs = [output_1, output_2]
model = models.Model(inputs=input, outputs=outputs)
return model
def compile_model(model):
loss='mean_squared_error'
#loss='categorical_crossentropy'
#optimizer = optimizers.rmsprop(lr=0.001, decay=1e-6)
optimizer = 'adam'
model.compile(loss=loss,
optimizer=optimizer,
metrics=['accuracy'])
def train_model(model, y_train, z_train, y_test, z_test):
model.fit(y_train, z_train,
batch_size=batch_size,
epochs=epochs,
validation_data=(y_test, z_test),
shuffle=True)
def test_model(model, y_test, z_test):
result = model.evaluate(y_test, z_test)
return result
def test_predict(model, y_test, z_test):
predict = model.predict(y_test)
for i, (z, p) in enumerate(zip(z_test, predict)):
print(i, ':', z, p)
def show_predict(model, y_test, z_test):
predict = model.predict(y_test)
#import pdb; pdb.set_trace()
for i, (type, params) in enumerate(zip(*predict)):
function_id = numpy.argmax(type)
function = functions[function_id]
x0 = 1/2 * (x_end - x_start) + x_start
a, scale = params
a = param_min + (param_max - param_min) * a
scale = scale or 1.0
print('Prediction:', function.__name__, x0, a, scale)
x = numpy.linspace(x_start, x_end, x_size)
y = function(x, x0, a, scale)
#y = y / numpy.max(y)
pyplot.plot(x, y_test[i])
pyplot.plot(x, y)
pyplot.show()
def save_model(model):
"""
Model name: <file name>_<date>_<model arch hash>
"""
if not os.path.isdir(MODELS_DIR):
os.makedirs(MODELS_DIR)
name, ext = os.path.splitext(__file__)
now = datetime.datetime.now()
date = now.strftime('%Y-%m-%dT%H:%M:%S')
config = model.get_config()
raw = json.dumps(config)
hash = hashlib.md5(raw.encode('utf-8')).hexdigest()
model_name = name + '_' + date + '_' + hash + '.h5'
model_path = os.path.join(MODELS_DIR, model_name)
#model.save(model_path)
models.save_model(model, model_path) # *.h5
print('Saved trained model at %s ' % model_path)
def load_model(model_path):
return models.load_model(model_path)
def load_model_choice():
name, ext = os.path.splitext(__file__)
search_pattern = name + '_*.h5'
search_pattern = os.path.join(MODELS_DIR, search_pattern)
files = glob.glob(search_pattern)
if len(files) == 0:
raise ValueError('No saved models')
print('Saved models:')
for i, f in enumerate(files):
print('[%i]' % i, f)
index = int(input('Choice model file: '))
model_path = files[index]
model = load_model(model_path)
return model
def main():
(y_train, z_train), (y_test, z_test) = get_data()
#model = create_model()
model = load_model_choice()
model.summary()
compile_model(model)
train_model(model, y_train, z_train, y_test, z_test)
result = test_model(model, y_test, z_test)
print('Model acc:', result)
#test_predict(model, y_test, z_test)
save_model(model)
show_predict(model, y_test, z_test)
def test_gaussian():
x = numpy.linspace(x_start, x_end, x_size)
g = gaussian(x, (x_start + x_end) / 2)
pyplot.plot(x, g)
pyplot.show()
def test_lagrange():
x = numpy.linspace(x_start, x_end, x_size)
g = lagrange(x, (x_start + x_end) / 2)
pyplot.plot(x, g)
pyplot.show()
def show_generated_data():
X, Y, Z = generate_data()
for i, (x, y, z) in enumerate(zip(X, Y, Z)):
print('Out:', z)
pyplot.plot(x, y)
pyplot.show()
print('Press ENTER to show next example:')
input()
def show_train_data():
data = X, Y, Z = generate_data()
(y_train, z_train), (y_test, z_test) = get_data(data)
for i, (x, y, z) in enumerate(zip(X, y_train, z_train)):
print('Out:', z)
pyplot.plot(x, y)
pyplot.show()
print('Press ENTER to show next example:')
input()
def show_test_data():
data = X, Y, Z = generate_data()
(y_train, z_train), (y_test, z_test) = get_data(data)
for i, (x, y, z) in enumerate(zip(X, y_test, z_test)):
print('Out:', z)
pyplot.plot(x, y)
pyplot.show()
print('Press ENTER to show next example:')
input()
if __name__ == '__main__':
#test_gaussian()
#test_lagrange()
#show_generated_data()
#show_test_data()
main()
| ihoromi4/cnn_spectrum_approximate | approximate/hypothesis_22.py | hypothesis_22.py | py | 8,247 | python | en | code | 0 | github-code | 90 |
72292470698 | # 주요변수에 대한 설명
# N : 정렬해야하는 원소들의 수
# arr : 정렬해야하는 원소를 가지고 있는 리스트
'''문제해결 팁
1. 전체 선택정렬 알고리즘에 대한 코드를 공부했다면 해결할 수 있다.
'''
T = int(input())
for test_case in range(1, T + 1):
############################################################
N = int(input())
arr = list(map(int, input().split()))
############################################################
for i in range(N-1): # i = 0~8
select = i
for j in range(i+1, N): # 1~9
if i % 2 == 0 :
if arr[select] < arr[j]:
select = j
elif i % 2 != 0 :
if arr[select] > arr[j]:
select = j
arr[i], arr[select] = arr[select], arr[i]
# 최종 결과 출력
print('#' + str(test_case), end='')
for i in range(10):
print(' ' + str(arr[i]), end='')
print() | ldgeao99/Algorithm_Study | SW_Academy/Programming_Intermediate/2. Python_SW_Problem_Solving_Basic-LIST2/Q4. 4843 특별한 정렬(선택정렬).py | Q4. 4843 특별한 정렬(선택정렬).py | py | 994 | python | ko | code | 0 | github-code | 90 |
32414677215 | from flask import Flask, request, redirect, render_template, url_for
from flask_sqlalchemy import SQLAlchemy
app = Flask(__name__)
#user:password@server:portNumber/databaseName
app.config['SQLALCHEMY_DATABASE_URI'] = 'mysql+pymysql://buildblog:buildblog@localhost:3306/buildblog'
#Not for deployment
app.config['SQLALCHEMY_ECHO'] = True
app.config['DEBUG'] = True
db = SQLAlchemy(app)
class Blog(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(120))
content = db.Column(db.String(120))
def __init__(self, name, content):
self.name = name
self.content = content
def get_blog_ids():
ids = db.session.query(Blog.id).all()
ids=[i[0] for i in ids]
return ids
def get_blog_names():
names = db.session.query(Blog.name).all()
names=[i[0] for i in names]
return names
def get_blog_contents():
contents = db.session.query(Blog.content).all()
contents=[i[0] for i in contents]
return contents
@app.route('/blog')
def blog():
id = request.args.get('id')
currBlog = db.session.query(Blog).get(id)
return render_template('blog.html', name=currBlog.name, content=currBlog.content)
@app.route('/newpost', methods=['POST', 'GET'])
def newpost():
if request.method == 'POST':
title = request.form['blogTitle']
content = request.form['blogContent']
new_blog = Blog(title, content)
db.session.add(new_blog)
db.session.commit()
redir_url = "/blog?id=" + str(new_blog.id)
return redirect(redir_url)
return render_template('newpost.html')
@app.route("/")
def index():
return render_template('index.html', ids=get_blog_ids(), names=get_blog_names(), contents=get_blog_contents())
if __name__ == '__main__':
app.run() | antwon97/BuildABlog | main.py | main.py | py | 1,812 | python | en | code | 0 | github-code | 90 |
19910102076 | print("Welcome to the Assil Canteen")
print("1.Veg")
print("2.Non-Veg")
items=[]
count=0
price=0
option=input("Select one of the following options given above")
#if the user selects the veg option
if option == '1':
print("1.Masala Dosa")
print("2.Idli")
print("3.Veg Puff")
print("4.Fried Rice")
print("5.Paneer Frankie")
option1=input("Select your option given above")
if option1 == '1':
items.append("Masala Dosa")
count=count+1
price=price+30
elif option1 == '2':
items.append("Idli")
count=count+1
price=price+20
elif option1 == '3':
items.append("Veg Puff")
count=count+1
price=price+12
elif option1 == '4':
items.append("Freid Rice")
count=count+1
price=price+20
elif option1 == '5':
items.append("Paneer Frankie")
count=count+1
price=price+34
else:
print("Please enter the valid option")
elif option =='2':
print("Chicken Biryani")
print("Chicken Puff")
print("Chicken Fried Rice")
option2=input("Please select from one of the Following items")
if option2 == '1':
items.append("Chicken Biryani")
count=count+1
price=price+50
elif option2 == '2':
items.append("Chicken Puff")
count=count+1
price=price+20
elif option2 == '3':
items.append("Chicken Fried Rice")
count=count+1;
price=price+30
else:
print("Please select from one of the following options")
else:
print("Please select one of the following options given below")
print("Your Bill is as follows")
print(items)
print(price)
print(count)
| firas98/Python-program | canteen.py | canteen.py | py | 1,703 | python | en | code | 0 | github-code | 90 |
22346763105 | """Pipeline module."""
from typing import Any
from typing_extensions import Self
from pysetl.utils import BenchmarkResult, pretty
from pysetl.utils.exceptions import InvalidDeliveryException, PipelineException
from pysetl.utils.mixins import (
HasRegistry, HasLogger, IsIdentifiable, HasBenchmark,
HasDiagram
)
from .factory import Factory
from .stage import Stage
from .deliverable import Deliverable
from .dispatcher import Dispatcher
from .inspector import Inspector
from .expected_deliverable import ExpectedDeliverable
from .external import External
PotentialDeliverables = list[tuple[ExpectedDeliverable, ExpectedDeliverable]]
class Pipeline(
HasRegistry[Stage], IsIdentifiable,
HasBenchmark, HasDiagram, HasLogger
):
"""Pipeline is a complete data transformation workflow."""
def __init__(self, benchmark: bool = False) -> None:
super().__init__()
IsIdentifiable.__init__(self)
self.benchmarked = benchmark
self.inspector = Inspector(self)
self.dispatcher = Dispatcher()
self.__stage_counter: int = 0
self.__benchmark_result: list[BenchmarkResult] = []
@property
def stages(self: Self) -> list[Stage]:
"""Return current stages registered in popeline."""
return list(self.get_registry().values())
def set_input_from_deliverable(
self: Self,
deliverable: Deliverable) -> Self:
"""Register a deliverable Dispatcher."""
self.dispatcher.add_deliverable(deliverable)
return self
def add_stage(self: Self, stage: Stage) -> Self:
"""Register a stage in pipeline."""
self.log_debug(f"Add stage {self.__stage_counter}")
self.reset_end_stage()
stage.stage_id = len(self.get_registry())
self.register(stage)
return self
def reset_end_stage(self: Self) -> None:
"""Reset end of pipeline."""
last_item = self.last_registered_item
if last_item:
last_item.end = False
def add_stage_from_factory(self: Self, factory: Factory) -> Self:
"""Register a new stage with a single factory."""
stage = Stage().add_factory(factory)
self.add_stage(stage)
return self
def add_stage_from_type(
self: Self,
factory_type: type[Factory],
*args, **kwargs) -> Self:
"""Instantiate a factory and register a new stage with it."""
factory = factory_type(*args, **kwargs)
stage = Stage().add_factory(factory)
self.add_stage(stage)
return self
def describe(self: Self) -> str:
"""Graph representation of the pipeline."""
self.inspector = Inspector(self)
return str(self.inspector)
def to_diagram(self: Self) -> str:
"""Diagram represention of the pipeline."""
self.inspector = Inspector(self)
return self.inspector.inspect().graph.to_diagram()
@property
def diagram_id(self: Self) -> str:
"""Pipeline has no diagram id."""
raise NotImplementedError("Pipeline doesn't have a diagram id")
def get_pipeline_deliverables(self: Self) -> set[ExpectedDeliverable]:
"""Retrive deliverables set directly on pipeline."""
consumer_deliverables = {
ExpectedDeliverable(
deliverable_type=pretty(deliverable.payload_type),
delivery_id=deliverable.delivery_id,
producer=pretty(deliverable.producer),
consumer=pretty(consumer)
)
for deliverable
in self.dispatcher.get_registry().values()
for consumer
in deliverable.consumers
}
producer_deliverables = {
ExpectedDeliverable(
deliverable_type=pretty(deliverable.payload_type),
delivery_id=deliverable.delivery_id,
producer=pretty(deliverable.producer),
consumer=None
)
for deliverable
in self.dispatcher.get_registry().values()
if len(deliverable.consumers) == 0
}
return consumer_deliverables.union(producer_deliverables)
def get_stages_deliverables(self: Self) -> set[ExpectedDeliverable]:
"""Retrive deliverables available from stages."""
consumer_deliverables = {
ExpectedDeliverable(
deliverable_type=pretty(factory.delivery_type()),
delivery_id=factory.delivery_id,
producer=pretty(type(factory)),
consumer=pretty(consumer)
)
for stage
in self.stages[:-1]
for factory
in stage.factories
for consumer
in factory.consumers
}
producer_deliverables = {
ExpectedDeliverable(
deliverable_type=pretty(factory.delivery_type()),
delivery_id=factory.delivery_id,
producer=pretty(type(factory)),
consumer=None
)
for stage
in self.stages[:-1]
for factory
in stage.factories
}
return consumer_deliverables.union(producer_deliverables)
def get_needed_deliverables(self: Self) -> set[ExpectedDeliverable]:
"""Retrive expected deliverables from each Pipeline node."""
return {
ExpectedDeliverable(
deliverable_type=pretty(delivery.payload_type),
delivery_id=delivery.delivery_id,
producer=pretty(delivery.producer),
consumer=pretty(delivery.consumer)
)
for stage
in self.stages
for node
in stage.create_nodes()
for delivery
in node.input
}
def __compare_deliverables(
self: Self,
needed_deliverables: set[ExpectedDeliverable],
available_deliverables: set[ExpectedDeliverable]
) -> PotentialDeliverables:
"""Find potential deliverables for each needed deliverable."""
return [
(needed, available)
for available
in available_deliverables
for needed
in needed_deliverables
if available.deliverable_type == needed.deliverable_type and
available.delivery_id == needed.delivery_id and
(
True
if needed.producer == pretty(External)
else needed.producer == available.producer
) and
(
available.consumer == needed.consumer or
not available.consumer
)
]
def __exists_solution(
self: Self,
needed: ExpectedDeliverable,
potential_deliverables: PotentialDeliverables) -> None:
"""Validate if dependency solution exists."""
found_by_consumer = list(filter(
lambda _: _[1].consumer == needed.consumer and
_[0].deliverable_type == needed.deliverable_type,
potential_deliverables
))
found_implicit = list(filter(
lambda _: _[1].consumer is None and
_[0].deliverable_type == needed.deliverable_type,
potential_deliverables
))
if not (found_by_consumer or found_implicit):
delivery_id_str = (
"''"
if (needed.delivery_id == "")
else needed.delivery_id
)
raise InvalidDeliveryException(" ".join([
f"Deliverable of type {needed.deliverable_type}",
f"with deliveryId {delivery_id_str}",
f"produced by {pretty(needed.producer)}",
f"is expected in {pretty(needed.consumer)}."
]))
def run(self: Self) -> Self:
"""Run a pipeline."""
self.inspector = Inspector(self).inspect()
needed_deliverables = self.get_needed_deliverables()
pipeline_deliverables = self.get_pipeline_deliverables()
stages_output = self.get_stages_deliverables()
available_deliverables = pipeline_deliverables.union(stages_output)
potential_deliverables = self.__compare_deliverables(
needed_deliverables,
available_deliverables
)
_ = [
self.__exists_solution(needed, potential_deliverables)
for needed
in needed_deliverables
]
self.__benchmark_result = [
benchmark
for stage
in self.stages
for benchmark
in self.execute_stage(stage)
]
return self
def execute_stage(self: Self, stage: Stage) -> list[BenchmarkResult]:
"""Dispatch dependencies and run a stage."""
self.log_info(str(stage))
if len(self.dispatcher.get_registry()) != 0:
list(map(self.dispatcher.dispatch, stage.factories))
stage.benchmarked = self.benchmarked
stage.run()
self.dispatcher.collect_stage_deliverables(stage)
return stage.get_benchmarks()
def get_last_output(self: Self) -> Any:
"""Return last output from pipeline."""
if self.last_registered_item:
return self.last_registered_item.factories[-1].get()
return None
def get_output(self: Self, factory_type: type[Factory]) -> Any:
"""Return output from a given factory."""
factory = [
factory
for stage
in self.stages
for factory
in stage.factories
if isinstance(factory, factory_type)
]
if len(factory) == 0:
raise PipelineException(
f"There isn't any class {pretty(factory_type)}"
)
return factory[0].get()
def get_deliverable(
self: Self,
deliverable_type: type) -> list[Deliverable]:
"""Find deliverables of a given type."""
return self.dispatcher.find_deliverable_by_type(deliverable_type)
def get_benchmarks(self) -> list[BenchmarkResult]:
"""Return collected benchmarks."""
return self.__benchmark_result
| JhossePaul/pysetl | src/pysetl/workflow/pipeline.py | pipeline.py | py | 10,279 | python | en | code | 0 | github-code | 90 |
18641651323 | #!/usr/bin/env python3
# coding : utf-8
# Date : 2022/5/6
# Author: knight2008
# QQ&Wechat: 496966425
# TG: @knight2008
# Filename: batch_get_balance.py
# Github: https://github.com/knight2008
# 版权:自由转载-开源免费-任意使用
# 功能: 批量查询 EVM chain 基础币余额 或 ERC20 token 余额
# 版本: Python 3.8.6
# 依赖库:
# pip install argparse
# pip install web3
from decimal import Decimal
from web3 import Web3
abi_json = '''[{
"constant": true,
"inputs": [{
"name": "_owner",
"type": "address"
}],
"name": "balanceOf",
"outputs": [{
"name": "",
"type": "uint256"
}],
"payable": false,
"stateMutability": "view",
"type": "function"},
{
"constant": true,
"inputs": [],
"name": "decimals",
"outputs": [{
"name": "",
"type": "uint8"
}],
"payable": false,
"stateMutability": "view",
"type": "function"
}]'''
def get_token_balance(input_file, erc20_contract, web3_rpc_url):
w3 = Web3(Web3.HTTPProvider(web3_rpc_url))
contract_addresss = Web3.toChecksumAddress(erc20_contract)
erc20 = w3.eth.contract(address=contract_addresss, abi=abi_json)
with open(input_file) as fin:
addr_list = fin.readlines()
decimals = erc20.functions.decimals().call()
for addr in addr_list:
addr = addr.strip()
addr_checksum = Web3.toChecksumAddress(addr)
raw_balance = erc20.functions.balanceOf(addr_checksum).call()
token_balance = raw_balance / Decimal(10 ** decimals)
print(f"{addr},{token_balance}")
def get_eth_balance(input_file, web3_rpc_url):
w3 = Web3(Web3.HTTPProvider(web3_rpc_url))
with open(input_file) as fin:
addr_list = fin.readlines()
for addr in addr_list:
addr = addr.strip()
addr_checksum = Web3.toChecksumAddress(addr)
raw_balance = w3.eth.getBalance(addr_checksum)
eth_balance = raw_balance / Decimal(10 ** 18)
print(f"{addr},{eth_balance}")
if __name__ == '__main__':
import sys
import argparse
parser = argparse.ArgumentParser(
usage="-c -i -w", description="-r web3 rpc url, -c contract address, -i file name for address.")
parser.add_argument("-r", "--rpc_url", default="", help="web3 rpc url")
parser.add_argument("-c", "--contract", default="", help="base coin balance if null")
parser.add_argument("-i", "--input", default="address.txt", help="file name for address")
args = parser.parse_args()
# print("contract: {0}".format(args.contract))
# print("input: {0}".format(args.input))
if "" == args.rpc_url.strip():
print(parser.print_help())
sys.exit(-1)
else:
web3_rpc_url = args.rpc_url
if "" == args.contract.strip():
get_eth_balance(args.input, web3_rpc_url)
else:
get_token_balance(args.input, args.contract, web3_rpc_url)
| knight2008/eth-tools | batch_get_balance.py | batch_get_balance.py | py | 2,987 | python | en | code | 4 | github-code | 90 |
40730344229 | from conf import conf
from pro_data import dataloader
from Model import ADMN
import pickle
import time
import os
def save_data(data,parameter):
now = time.strftime('%Y%m%d_%H%M%S', time.localtime(time.time()))
save_path = os.path.join(os.getcwd(), "Result")
save_file = os.path.join(save_path, str(parameter["data"]) ,now + "_result.txt")
output = open(save_file, 'w+', encoding="utf-8")
output.write('best_mse:' + str(min(data))+'\n')
for i in data:
output.writelines(str(i)+'\n')
output.writelines("-----------------------------------"+'\n')
output.writelines("parameter"+'\n')
for key, item in parameter.items():
lines = str(key) + ":" + str(item)
output.writelines(lines + '\n')
output.writelines(str(type(model)))
output.close()
if __name__=="__main__":
a = conf.args()
#print(b["batch_size"])
parameter = a.args
current_path =os.path.join(os.getcwd(),"Data")
print(current_path)
path = os.path.join(current_path,parameter["data"]) #
file_name = parameter["data"] + ".json"
datahelper = dataloader.dataloader(path,file_name,parameter)
datahelper.load_file() #数据处理完就不用处理
datahelper.process_data()
#下面为了免去重复数据分析
parameter["vocabulary_num"] = datahelper.args["vocabulary_num"] #548680
parameter["user_num"] = datahelper.args["user_num"] #5541
parameter["item_num"] = datahelper.args["item_num"] #3568
parameter['is_sample'] = False
#model = ADMN.ADMN(parameter)
#print(type(model))
pkl_file =os.path.join(current_path,parameter["data"] ,parameter["data"] + '.para')
train_path = os.path.join(current_path,parameter["data"],parameter["data"]+'.train')
test_path = os.path.join(current_path,parameter["data"],parameter["data"]+'.test')
#model.load_data(train_path,test_path,pkl_file)
#list = ['base','softmax','unbias_softmax','abs_unbias','abs_unbias_softmax','no_rating']
list = ['base', 'softmax', 'unbias_softmax', 'abs_unbias', 'abs_unbias_softmax']
sample_ratio = [0.1,0.2,0.4,0.8]
#list = ['no_rating']
for i in list:
for j in sample_ratio:
parameter["rating_weight"] = i
parameter["sample_ratio"] = j
model = ADMN.ADMN(parameter)
print(type(model))
print("model.rating_weight:{}".format(model.rating_weight))
model.load_data(train_path, test_path, pkl_file)
model.model_train()
model.show_test_result()
result = model.test_loss_list
save_data(result, parameter)
# model.model_train()
# model.show_test_result()
# result = model.test_loss_list
# save_data(result,parameter)
| wiio12/ADMN | main.py | main.py | py | 2,756 | python | en | code | 4 | github-code | 90 |
70904584617 | array = list(map(int, input()))
#제일 처음 value로 시작
result = array[0]
for i in array[1:]:
if result <= 1 or i <= 1:
result += i
else: result *= i
print(result) | dohun31/algorithm | 2021/week_01/greedy/02곱하기혹은더하기.py | 02곱하기혹은더하기.py | py | 189 | python | ko | code | 1 | github-code | 90 |
5390590202 |
from rich.console import Group
import time
from rich.columns import Columns
from rich.console import Console
from rich.panel import Panel
from rich.markdown import Markdown
from rich.align import Align
from rich.table import Table
from rich import print
from rich.layout import Layout
import win32gui, win32con
from rich.text import Text
hwnd = win32gui.GetForegroundWindow()
win32gui.ShowWindow(hwnd, win32con.SW_MAXIMIZE)
md = f"""
# Title
"""
more_text = """
"""
md_2 = """
# Title
"""
main_title = """
# PyStats
"""
console = Console(height=15, width=160)
layout = Layout()
layout.split_column(
Layout(name="lower", ratio=5)
)
layout["lower"].split_row(
Layout(name="left"),
Layout(name="right"),
)
layout["lower"]["left"].size = 50
panel_group = Group(
Markdown(md),
Align(Panel('[red]Okay', border_style='blue'), align='left')
)
layout["lower"]["right"].update(Panel(panel_group, border_style='blue', style='green'))
#num 73 might change
#layout["lower"]["right"].update(Panel(Align(Panel(md, box=box.DOUBLE, padding=(0, 73)), align='center'), border_style='blue', style='green'))
layout["lower"]["left"].update(Panel(Markdown(md_2), border_style='blue', style='magenta'))
print(layout)
| DamnUi/PyStats | All current dashboard ideas.py/design 1.py | design 1.py | py | 1,316 | python | en | code | 7 | github-code | 90 |
73679664935 | n, k = map(int, input().split())
dp = [10001] * (k + 1)
dp[0] = 0
for i in list(int(input()) for _ in range(n)):
for j in range(i, k + 1):
dp[j] = dp[j] if dp[j] < dp[j - i] + 1 else dp[j - i] + 1
if dp[j] > 10000:
print(-1)
exit()
print(dp[k]) | y7y1h13/Algo_Study | 백준/Silver/2294. 동전 2/동전 2.py | 동전 2.py | py | 264 | python | en | code | 0 | github-code | 90 |
18881559055 | import json
class StatsVM:
def __init__(self, _attribute, _min, _max, _avg, _stdv, _median, _iqr, _q1, _q3):
self._attribute = _attribute
self._min = _min
self._max = _max
self._avg = _avg
self._stdv = _stdv
self._median = _median
self._iqr = _iqr
self._q1 = _q1
self._q3 = _q3
def to_json(self):
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=4)
| MarcinJaworski247/kepler-classifier | api/app/main/util/stats_vm.py | stats_vm.py | py | 468 | python | en | code | 0 | github-code | 90 |
22936096372 | """
Team Id: HC#3266
Author List: Hemanth Kumar K L, Mahantesh R, Aman Bhat T, Nischith B.O.
Filename: main.py
Theme: Homecoming
Functions: run_module , imshow , predict
Global variables: class_names
"""
import torch
from torchvision.transforms import transforms
from torch.autograd import Variable
from PIL import Image
import matplotlib.pyplot as plt
import numpy as np
import cv2
import argparse
import warnings
warnings.filterwarnings("ignore")
class_names=[] # List to store the different class names for animals and habitats
def run_module():
"""
Function name: run_module.
Calls respective model based on command line flags
Input: None
Output: None
"""
global class_names
# Create a parser for command line flags
#---------------------------------------
ap = argparse.ArgumentParser(add_help=False)
ap.add_argument("-a", "--animal", type=str, required=False)
ap.add_argument("-h", "--habitat", type=str, required=False)
ap.add_argument("--amod",type=str, default="HC#3266animal_model.pth",required=False)
ap.add_argument("--hmod",type=str, default="HC#3266habitat_model.pth",required=False)
args = ap.parse_args()
# Checking for animal/habitat model path
#-------------------------------------------
if args.animal:
if args.amod:
model = torch.load(args.amod, map_location=lambda storage, loc: storage)
else:
model = torch.load("HC#3266animal_model.pth", map_location=lambda storage, loc: storage)
class_names = model['class_names'] # Extract class names from model
trained_model = model['model'] # Extract weights from model and save it to trained model
predict(trained_model, args.animal) # Call function to predict the animal
if args.habitat:
if args.hmod:
model = torch.load(args.hmod, map_location=lambda storage, loc: storage)
else:
model = torch.load("HC#3266habitat_model.pth", map_location=lambda storage, loc: storage)
class_names = model['class_names'] # Extract class names from model
trained_model = model['model'] # Extract weights from model and save it to trained model
predict(trained_model, args.habitat) # Call function to predict the habitat
def imshow(inp_img, title=None):
"""
Function_name -> imshow
input parameters:
inp_img -> image to be displayed
title -> Predicted title by the model
output: Displays the output image with predicted title
"""
# Transpose the image to extract mean and standard deviation to add back the clipped parts
# ----------------------------------------------------------------------------------------
inp_img = inp_img.numpy().transpose((1, 2, 0))
mean = np.array([0.485, 0.456, 0.406])
std = np.array([0.229, 0.224, 0.225])
inp_img = std * inp_img + mean
inp_img = np.clip(inp_img, 0, 1)
plt.imshow(inp_img)
# Display the prediction as the title
# -----------------------------------
if title is not None:
plt.title(title)
plt.pause(3) # pause a bit so that plots are updated
def predict(model,image):
"""
Function name : predict
Input parameters :
model -> trained model
image -> the image to be predicted
Output : None
Generates the output image with predicted title
"""
global class_names
model.eval() # Update initial weights of the model
# pre-processing the image
# -------------------------
img_loader = transforms.Compose([ transforms.CenterCrop(224), transforms.ToTensor(),
transforms.Normalize([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])])
desired_size=247
img = Image.open(image)
# Resize the image to 224*224 to CenterCrop the image, paste it over a black RGB image, and then convert to tensor
# ----------------------------------------------------------------------------------------------------------------
display_img=img.resize((224,224),Image.ANTIALIAS)
blank = Image.new("RGB", (224, 224))
blank.paste(display_img, (0, 0))
displayimage_tensor = img_loader(blank).float()
display_img = displayimage_tensor.unsqueeze(0).float()
# Resize the image to desired size, paste it over a black RGB image, and then convert to tensor to send to the model
# ------------------------------------------------------------------------------------------------------------------
model_img=img.resize((247,247),Image.ANTIALIAS)
blank = Image.new("RGB", (desired_size, desired_size))
blank.paste(model_img, (0,0))
modelimage_tensor = img_loader(blank).float()
model_img = modelimage_tensor.unsqueeze(0).float()
outputs = model(model_img) # sending the pre-processed image to the model for prediction.
_, preds = torch.max(outputs, 1) # Returns the most likely class name for the image
ax = plt.subplot(111) # Select the position for image
ax.axis('off')
ax.set_title('predicted: {}'.format(class_names[preds[0]])) # Append the prediction to the title
imshow(display_img.cpu().data[0]) # Call function to plot the output image
if __name__ == "__main__":
run_module()
| MahanteshR/eYRC_2018 | Task3/Task3a/Code/main.py | main.py | py | 5,489 | python | en | code | 0 | github-code | 90 |
382953529 | # -*- coding: utf-8 -*-
import scrapy
from ..items import BookItem
class BookSpider(scrapy.Spider):
name = 'book'
allowed_domains = ['book.douban.com']
start_urls = 'https://book.douban.com/top250'
def start_requests(self):
yield scrapy.Request(url=self.start_urls, callback=self.parse_book_list, dont_filter=True)
def parse_book_list(self, response):
for i in range(10):
current_url = response.url + '?start=' + str(i * 25)
yield scrapy.Request(url=current_url, callback=self.parse, dont_filter=True)
def parse(self, response):
selector = scrapy.Selector(response)
infos = selector.xpath('//tr[@class="item"]')
item = BookItem()
for info in infos:
item['name'] = info.xpath('./td/div/a/@title').extract()[0]
item['url'] = info.xpath('./td/div/a/@href').extract()[0]
author_info = info.xpath('./td/p/text()').extract()[0]
author_infos = author_info.split('/')
item['price'] = str(author_infos[len(author_infos) -1])
item['author'] = author_infos[0]
item['pubday'] = author_infos[len(author_infos) - 2]
item['rating_nums'] = info.xpath('./td/div/span[2]/text()').extract()[0]
comment_nums = info.xpath('normalize-space(td/div/span[3]/text())').extract()[0]
item['comment_nums'] = comment_nums.replace('( ', '').replace('人评价 )', '')
quote = info.xpath('td/p/span[@class="inq"]/text()').extract()
if len(quote) > 0:
quote = quote[0]
else:
quote = ''
item['quote'] = quote
yield item
| WhiteBrownBottle/Python- | DouBan/DouBan/spiders/book.py | book.py | py | 1,723 | python | en | code | 0 | github-code | 90 |
10237098646 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from argparse import ArgumentParser
import codecs
import os
import random
import tempfile
import numpy as np
from prepare_dict import load_lexicon
from compare_lexicons import compare_lexicons
def create_tmp_file_name():
basedir = os.path.join(os.path.dirname(__file__), 'data', 'tmp')
fp = tempfile.NamedTemporaryFile(mode='w', dir=basedir, delete=False)
filename = fp.name
fp.close()
return filename
def split_words_and_transcriptions_for_cv(words_and_transcriptions, cv):
assert len(words_and_transcriptions) > 0, 'List of texts is empty!'
assert cv > 0, 'Number of folds for crossvalidation must be a positive integer value!'
assert len(words_and_transcriptions) >= cv, '{0} > {1}. Number of folds for crossvalidation is too large!'.format(
cv, len(words_and_transcriptions)
)
folds = list()
fold_size = len(words_and_transcriptions) // cv
words = sorted(list(words_and_transcriptions.keys()))
random.shuffle(words)
for fold_ind in range(cv):
start_text_idx = fold_ind * fold_size
end_text_idx = (fold_ind + 1) * fold_size
words_for_training = list()
for cur_word in sorted(words[:start_text_idx] + words[end_text_idx:]):
for cur_transcription in words_and_transcriptions[cur_word]:
words_for_training.append(u'{0}\t{1}\n'.format(cur_word, cur_transcription))
words_for_testing = list()
for cur_word in sorted(words[start_text_idx:end_text_idx]):
for cur_transcription in words_and_transcriptions[cur_word]:
words_for_testing.append(u'{0}\t{1}\n'.format(cur_word, cur_transcription))
folds.append((tuple(words_for_training), tuple(words_for_testing)))
return folds
def main():
parser = ArgumentParser()
parser.add_argument('-s', '--src', dest='word_list', required=True, type=str,
help='Source file with words without their phonetical transcriptions.')
parser.add_argument('-t', '--train', dest='lexicon_for_training', type=str, required=False,
default=os.path.join(os.path.dirname(__file__), 'data', 'ru_training.dic'),
help='File with source vocabulary for training.')
parser.add_argument('-d', '--dst', dest='destination_lexicon', type=str, required=True,
help='Destination file into which the creating phonetic transcriptions shall be written.')
parser.add_argument('--cv', dest='cv', type=int, required=False, default=None,
help='Fold number for crossvalidation (if it is not '
'specified, then cross-validation will not be '
'executed, and final training will be started '
'right away).')
parser.add_argument('-n', '--ngram', dest='ngram', type=int, required=False, default=5, help='Maximal N-gram size.')
parser.add_argument('-p', '--pmass', dest='pmass', type=float, required=False, default=0.85,
help='% of total probability mass constraint for transcriptions generating.')
parser.add_argument('--seed', dest='seed', type=int, required=False, default=0, help='Random seed.')
args = parser.parse_args()
src_wordlist_name = os.path.normpath(args.word_list)
assert os.path.isfile(src_wordlist_name), u'File "{0}" does not exist!'.format(src_wordlist_name)
dst_vocabulary_name = os.path.normpath(args.destination_lexicon)
dst_vocabulary_dir = os.path.dirname(dst_vocabulary_name)
assert os.path.isdir(dst_vocabulary_dir), u'Directory "{0}" does not exist!'.format(dst_vocabulary_dir)
training_vocabulary_name = os.path.normpath(args.lexicon_for_training)
assert os.path.isfile(training_vocabulary_name), u'File "{0}" does not exist!'.format(training_vocabulary_name)
cv = args.cv
if cv is not None:
assert cv > 1, u'Fold number for crossvalidation is too small!'
ngram = args.ngram
assert ngram > 1, u'Maximal N-gram size is too small!'
pmass = args.pmass
assert (pmass > 0.0) and (pmass <= 1.0), u'% of total probability mass constraint is wrong!'
model_dir = os.path.join(os.path.dirname(__file__), 'model')
random.seed(args.seed)
words_and_transcriptions = load_lexicon(training_vocabulary_name)
if cv is not None:
folds = split_words_and_transcriptions_for_cv(words_and_transcriptions, cv)
WERs = list()
PERs = list()
for cur_fold in folds:
tmp_file_for_training = create_tmp_file_name()
tmp_file_for_testing = create_tmp_file_name()
tmp_file_for_wordlist = create_tmp_file_name()
tmp_file_for_result = create_tmp_file_name()
try:
with codecs.open(tmp_file_for_training, mode='w',
encoding='utf-8', errors='ignore') as fp:
for cur in cur_fold[0]:
fp.write(cur)
with codecs.open(tmp_file_for_testing, mode='w',
encoding='utf-8', errors='ignore') as fp:
for cur in cur_fold[1]:
fp.write(cur)
with codecs.open(tmp_file_for_wordlist, mode='w',
encoding='utf-8', errors='ignore') as fp:
for cur in cur_fold[1]:
fp.write(cur.split()[0] + '\n')
cmd = u'phonetisaurus-train --lexicon "{0}" --dir_prefix "{1}" --model_prefix russian_g2p ' \
u'--ngram_order {2} --seq2_del'.format(
tmp_file_for_training, model_dir, ngram)
os.system(cmd)
cmd = u'phonetisaurus-apply --model "{0}" --word_list "{1}" -p {2} -a > "{3}"'.format(
os.path.join(model_dir, 'russian_g2p.fst'),
tmp_file_for_wordlist, pmass, tmp_file_for_result
)
os.system(cmd)
word_error_rate, phone_error_rate = compare_lexicons(
tmp_file_for_testing, tmp_file_for_result)
WERs.append(word_error_rate)
PERs.append(phone_error_rate)
finally:
if os.path.isfile(tmp_file_for_training):
os.remove(tmp_file_for_training)
if os.path.isfile(tmp_file_for_testing):
os.remove(tmp_file_for_testing)
if os.path.isfile(tmp_file_for_wordlist):
os.remove(tmp_file_for_wordlist)
if os.path.isfile(tmp_file_for_result):
os.remove(tmp_file_for_result)
for cur in filter(lambda it: it.startswith(u'russian_g2p'),
os.listdir(model_dir)):
os.remove(os.path.join(model_dir, cur))
WERs = np.array(WERs, dtype=np.float64)
PERs = np.array(PERs, dtype=np.float64)
print(u'')
print(u'Word error rate is {0:.2%} +- {1:.2%}'.format(WERs.mean(), WERs.std()))
print(u'Phone error rate is {0:.2%} +- {1:.2%}'.format(PERs.mean(), PERs.std()))
print(u'')
print(u'Crossvalidation is finised...')
tmp_file_for_training = create_tmp_file_name()
tmp_file_for_result = create_tmp_file_name()
try:
with codecs.open(tmp_file_for_training, mode='w', encoding='utf-8', errors='ignore') as fp:
for cur_word in sorted(list(words_and_transcriptions.keys())):
for cur_transcription in words_and_transcriptions[cur_word]:
fp.write(u'{0}\t{1}\n'.format(cur_word, cur_transcription))
print(u'')
print(u'Final training is started...')
cmd = u'phonetisaurus-train --lexicon "{0}" --dir_prefix "{1}" --model_prefix russian_g2p ' \
u'--ngram_order {2} --seq2_del'.format(tmp_file_for_training, model_dir, ngram)
os.system(cmd)
print(u'')
print(u'Final training is finished...')
print(u'')
print(u'Final recognition of transcriptions for words is started...')
cmd = u'phonetisaurus-apply --model "{0}" --word_list "{1}" -p {2} -a > "{3}"'.format(
os.path.join(model_dir, 'russian_g2p.fst'), src_wordlist_name, pmass, tmp_file_for_result
)
os.system(cmd)
print(u'')
print(u'Final recognition of transcriptions for words is finished...')
predicted_phonetic_dictionary = load_lexicon(tmp_file_for_result)
finally:
if os.path.isfile(tmp_file_for_training):
os.remove(tmp_file_for_training)
if os.path.isfile(tmp_file_for_result):
os.remove(tmp_file_for_result)
for cur_word in predicted_phonetic_dictionary:
if cur_word in words_and_transcriptions:
for cur_transcription in predicted_phonetic_dictionary[cur_word]:
if cur_transcription not in words_and_transcriptions[cur_word]:
words_and_transcriptions[cur_word].append(cur_transcription)
else:
words_and_transcriptions[cur_word] = predicted_phonetic_dictionary[cur_word]
with codecs.open(dst_vocabulary_name, mode='w', encoding='utf-8', errors='ignore') as fp:
for cur_word in sorted(list(words_and_transcriptions.keys())):
fp.write(u'{0} {1}\n'.format(cur_word, words_and_transcriptions[cur_word][0]))
for ind in range(1, len(words_and_transcriptions[cur_word])):
fp.write(u'{0}({1}) {2}\n'.format(cur_word, ind + 1, words_and_transcriptions[cur_word][ind]))
if __name__ == '__main__':
main()
| nsu-ai-team/russian_g2p_neuro | do_experiments.py | do_experiments.py | py | 9,704 | python | en | code | 19 | github-code | 90 |
74728744935 | #!/usr/bin/env python3
class Solution:
def searchInsert(self, nums: [int], target: int) -> int:
left = 0
right = len(nums) - 1
while left <= right:
mid = (left + right)//2
if nums[mid] == target:
return mid
elif nums[mid] > target:
right = mid-1
else:
left = mid+1
if left>=right and nums[right] < target:
return right+1
if left>=right and nums[right] > target:
return left
a = Solution()
print(Solution.searchInsert(a, [1], 0)) | GeneralLi95/leetcode | Python/35.py | 35.py | py | 478 | python | en | code | 0 | github-code | 90 |
29307286053 | import numpy as np
import matplotlib.pyplot as plt
import librosa
import librosa.display
import os
import sys
import tensorflow as tf
import dataset
def get_dev_eval_str(idx):
if idx == 0:
output = 'dev_data'
else:
output = 'eval_data'
return output
def get_machine_type_str(idx):
if idx == 0:
output = 'fan'
elif idx == 1:
output = 'pump'
elif idx == 2:
output = 'slider'
elif idx == 3:
output = 'ToyCar'
elif idx == 4:
output = 'ToyConveyor'
else:
output = 'valve'
return output
def get_train_test_str(idx):
if idx == 0:
output = 'train'
else:
output = 'test'
return output
def get_normal_anomaly_str(idx):
if idx == 0:
output = 'normal'
else:
output = 'anomaly'
return output
def get_machine_id_str(idx):
output = 'id_0{}'.format(idx)
return output
def get_data_num_str(idx):
output = f'{idx:08}'
return output
def get_in_out_str(idx):
if idx == 0:
output = 'input'
elif idx == 1:
output = 'output'
elif idx == 2:
output = 'error'
elif idx == 3:
output = 'error_input'
elif idx == 4:
output = 'error_output'
elif idx == 5:
output = 'error_diff'
elif idx == 10:
output = 'original_input'
else:
output = 'error_diff'
return output
def file_to_log_mel_spectrogram(file_name,
n_mels=128,
n_fft=2048,
hop_length=512,
power=2.0):
# generate mel_spectrogram using librosa
y, sr = dataset.file_load(file_name)
mel_spectrogram = librosa.feature.melspectrogram(y=y,
sr=sr,
n_fft=n_fft,
hop_length=hop_length,
n_mels=n_mels,
power=power)
# convert mel_spectrogram to log mel spectrogram
log_mel_spectrogram = 20.0 / power * np.log10(mel_spectrogram + sys.float_info.epsilon + 0.0001)
return log_mel_spectrogram
def one_data_normalizing(data, machine, idx):
if idx == 0:
return data
else:
mean = np.load('train_dataset_{}_mean.npy'.format(machine))
std = np.load('train_dataset_{}_std.npy'.format(machine))
for ii in range(data.shape[0]):
data[ii, :] = (data[ii, :] - mean[ii]) / std[ii]
return data
def one_data_preprocessing(data):
data = data.reshape((1, data.shape[0], data.shape[1]))
# output = np.zeros((1, data.shape[1], 312))
# output[:, :, :] = data[:, :, 0:312]
output = np.moveaxis(data, 1, 2)
return output
def plot_figure0_original_input(fig, ma, de, tt, na, mi, dn, md):
data = fig.squeeze()
data = np.moveaxis(data, 0, 1)
plt.figure(figsize=(25, 10))
plt.subplots_adjust(wspace=0.3, hspace=0.3)
librosa.display.specshow(data,
cmap=plt.get_cmap('magma'),
vmin=-5,
vmax=5,
y_axis='mel')
figure_name = '{ma} {de} {tt} {na} {mi} #{dn} 0.original_input'.format(ma=ma, de=de, tt=tt, na=na, mi=mi, dn=dn)
plt.title(figure_name, fontsize=30)
plt.xlabel('Time Frame', fontsize=30)
plt.ylabel('Frequency', fontsize=30)
plt.colorbar(label='normalized')
os.makedirs('./model/' + md + '/figures/' + ma, exist_ok=True)
plt.savefig('./model/' + md + '/figures/' + ma + '/' + figure_name + '.png')
def plot_figure1_input(fig, ma, de, tt, na, mi, dn, md, rf):
data = fig.squeeze()
data = np.moveaxis(data, 0, 1)
data1 = data[:, rf:]
plt.figure(figsize=(25, 10))
librosa.display.specshow(data1,
cmap=plt.get_cmap('magma'),
vmin=-5,
vmax=5,
y_axis='mel')
figure_name = '{ma} {de} {tt} {na} {mi} #{dn} 1.input'.format(ma=ma, de=de, tt=tt, na=na, mi=mi, dn=dn)
plt.title(figure_name, fontsize=30)
plt.xlabel('Time Frame', fontsize=30)
plt.ylabel('Frequency', fontsize=30)
plt.colorbar(label='normalized')
os.makedirs('./model/' + md + '/figures/' + ma, exist_ok=True)
plt.savefig('./model/' + md + '/figures/' + ma + '/' + figure_name + '.png')
def plot_figure2_output(fig, ma, de, tt, na, mi, dn, md):
data = fig.squeeze()
data = np.moveaxis(data, 0, 1)
plt.figure(figsize=(25, 10))
librosa.display.specshow(data,
cmap=plt.get_cmap('magma'),
vmin=-5,
vmax=5,
y_axis='mel')
figure_name = '{ma} {de} {tt} {na} {mi} #{dn} 2.reconstruction'.format(ma=ma, de=de, tt=tt, na=na, mi=mi, dn=dn)
plt.title(figure_name, fontsize=30)
plt.xlabel('Time Frame', fontsize=30)
plt.ylabel('Frequency', fontsize=30)
plt.colorbar(label='normalized')
os.makedirs('./model/' + md + '/figures/' + ma, exist_ok=True)
plt.savefig('./model/' + md + '/figures/' + ma + '/' + figure_name + '.png')
def plot_figure3_error(fig1, fig2, ma, de, tt, na, mi, dn, md, rf):
data1 = fig1.squeeze()
data2 = fig2.squeeze()
input_fig = data1[rf:, :]
error = input_fig - data2
error = np.moveaxis(error, 0, 1)
plt.figure(figsize=(25, 10))
librosa.display.specshow(error,
cmap=plt.get_cmap('RdGy'),
vmin=-5,
vmax=5,
y_axis='mel')
figure_name = '{ma} {de} {tt} {na} {mi} #{dn} 3.error'.format(ma=ma, de=de, tt=tt, na=na, mi=mi, dn=dn)
plt.title(figure_name, fontsize=30)
plt.xlabel('Time Frame', fontsize=30)
plt.ylabel('Frequency', fontsize=30)
plt.colorbar(label='normalized')
os.makedirs('./model/' + md + '/figures/' + ma, exist_ok=True)
plt.savefig('./model/' + md + '/figures/' + ma + '/' + figure_name + '.png')
def plot_figure4_classification(data, ma, de, tt, na, mi, dn, md):
y_prob = np.squeeze(data)
if ma == 'ToyCar' or ma == 'ToyConveyor':
x_label = ['id_01', 'id_02', 'id_03', 'id_04']
else:
x_label = ['id_00', 'id_02', 'id_04', 'id_06']
plt.figure(figsize=(10, 10))
x = np.arange(4)
plt.bar(x, y_prob)
plt.ylim(0, 1)
plt.xticks(x, x_label, fontsize=20)
figure_name = '{ma} {de} {tt} {na} {mi} #{dn} 4.classification'.format(ma=ma, de=de, tt=tt, na=na, mi=mi, dn=dn)
plt.title(figure_name, fontsize=20)
# plt.xlabel('ID', fontsize=30)
plt.ylabel('Probability', fontsize=20)
# plt.colorbar(label='normalized')
os.makedirs('./model/' + md + '/figures/' + ma, exist_ok=True)
plt.savefig('./model/' + md + '/figures/' + ma + '/' + figure_name + '.png')
os.environ["CUDA_VISIBLE_DEVICES"] = "3"
if __name__ == '__main__':
# get parameters
model_dir = '2021-06-26 WN'
# 0: not normalization / 1: normalization
norm_idx = 1
# 0: development / 1: evaluation
dev_eval_idx = 0
# 0: fan / 1: pump / 2: slider / 3: ToyCar / 4: ToyConveyor / 5: valve
machine_type_idx = 5
# 0: train / 1: test
train_test_idx = 0
# 0: normal / 1: anomaly
normal_anomaly_idx = 0
# 0: id_00 / 1: id_01 / 2: id_02 / 3: id_03 / 4: id_04 ...
machine_id_idx = 0
# data number
data_num_idx = [0, 1, 2, 3, 4]
# n fft
nfft = 2048
# get str of parameters
dev_eval_str = get_dev_eval_str(dev_eval_idx)
machine_type_str = get_machine_type_str(machine_type_idx)
train_test_str = get_train_test_str(train_test_idx)
normal_anomaly_str = get_normal_anomaly_str(normal_anomaly_idx)
machine_id_str = get_machine_id_str(machine_id_idx)
# set model path
model1_path = './model/{model}/model1_{machine}'.format(model=model_dir, machine=machine_type_str)
model2_path = './model/{model}/model2_{machine}'.format(model=model_dir, machine=machine_type_str)
# get model
model1 = tf.keras.models.load_model(model1_path)
model2 = tf.keras.models.load_model(model2_path)
for ii in range(len(data_num_idx)):
data_num_str = get_data_num_str(data_num_idx[ii])
# get sound data
sound_file_dir = './{d_e}/{m_t}/{t_t}/{n_a}_{m_i}_{d_n}.wav'.format(d_e=dev_eval_str,
m_t=machine_type_str,
t_t=train_test_str,
n_a=normal_anomaly_str,
m_i=machine_id_str,
d_n=data_num_str)
# get log mel spectrogram
sound_log_mel = file_to_log_mel_spectrogram(sound_file_dir, n_fft=nfft)
norm_data = one_data_normalizing(sound_log_mel, machine_type_str, norm_idx)
input_data = one_data_preprocessing(norm_data)
global_feature = model1.predict(input_data)
reconstruction = model2.predict(global_feature)
receptive_field = input_data.shape[1] - reconstruction.shape[1]
plot_figure0_original_input(input_data, machine_type_str, dev_eval_str, train_test_str, normal_anomaly_str, machine_id_str, data_num_idx[ii], model_dir)
plot_figure1_input(input_data, machine_type_str, dev_eval_str, train_test_str, normal_anomaly_str, machine_id_str, data_num_idx[ii], model_dir, receptive_field)
plot_figure2_output(reconstruction, machine_type_str, dev_eval_str, train_test_str, normal_anomaly_str, machine_id_str, data_num_idx[ii], model_dir)
plot_figure3_error(input_data, reconstruction, machine_type_str, dev_eval_str, train_test_str, normal_anomaly_str, machine_id_str, data_num_idx[ii], model_dir, receptive_field)
print('end')
| PDDBori/DCASE2020 | WN_figure.py | WN_figure.py | py | 10,475 | python | en | code | 0 | github-code | 90 |
36508004638 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Tue Jan 4 16:14:55 2022
@author: amanda
"""
##define function##
def calculate_savings(portion_saved, current_savings, monthly_salary):
for months in range (0, 36):
current_savings = current_savings + (current_savings*r/12.0) + \
(portion_saved * monthly_salary)
##provides raise every 6 mos##
if ((months + 1) % 6 == 0) :
monthly_salary = monthly_salary + (monthly_salary * semi_annual_raise)
return current_savings
##define variables##
annual_salary = float(input("Enter your annual salary: "))
portion_saved = 0
total_cost = 1000000.0
semi_annual_raise = 0.07
portion_down_payment = 0.25
current_savings = 0
r = 0.04
monthly_salary = (annual_salary / 12.0)
goal = portion_down_payment * total_cost
steps = 0
high = 10000
low = 0
portion_saved_bisection = ((high + low) / 2) / 10000
##check if it's possible to save in 36 mos##
max_savings = calculate_savings(1.0, current_savings, monthly_salary)
if max_savings < goal:
print("It is not possible to pay the down payment in three years.")
##function runs until you are within 100 of goal##
while abs(current_savings - goal) >= 100 :
current_savings = 0.0
monthly_salary = (annual_salary / 12.0)
current_savings = calculate_savings(portion_saved_bisection, current_savings, monthly_salary)
print("portion_saved:", portion_saved_bisection)
if (current_savings < goal):
low = int(portion_saved_bisection * 10000)
elif (current_savings > goal):
high = int(portion_saved_bisection * 10000)
else:
break
portion_saved_bisection = ((high + low) / 2) / 10000
steps += 1
print ("Best savings rate:", portion_saved_bisection)
print ("Steps in bisection search:", steps)
| Amanda-Wright17/6.0001 | pset1/ps1c.py | ps1c.py | py | 1,857 | python | en | code | 0 | github-code | 90 |
43852855880 | from scipy.spatial.distance import cdist
import scipy.io as sio
from FeatureExtractor import *
from config import *
import h5py
def extractLayerFeat_whole_fixrr(category, extractor, resize_tar, set_type='train'):
print('extracting {} set features for {} at resize value {}'.format(set_type, category, resize_tar))
# img_dir = Dataset['occ_img_dir'].format(category,'NINE')
if set_type == 'occ':
img_dir = Dataset['occ_img_dir'].format(category, SP['occ_level'])
anno_dir = Dataset['anno_dir'].format(category)
filelist = Dataset['{}_list'.format(set_type)].format(category)
with open(filelist, 'r') as fh:
contents = fh.readlines()
img_list = [cc.strip()[0:-2] for cc in contents if cc != '\n']
idx_list = [cc.strip()[-1] for cc in contents if cc != '\n']
N = len(img_list)
print('Total image number for {} set of {}: {}'.format(set_type, category, N))
else:
img_dir = Dataset['img_dir_org'].format(category)
anno_dir = Dataset['anno_dir'].format(category)
filelist = Dataset['{}_list'.format(set_type)].format(category)
with open(filelist, 'r') as fh:
contents = fh.readlines()
img_list = [cc.strip().split()[0] for cc in contents if cc != '\n']
idx_list = [cc.strip().split()[1] for cc in contents if cc != '\n']
N = len(img_list)
print('Total image number for {} set of {}: {}'.format(set_type, category, N))
feat_set = [None for nn in range(N)]
for nn in range(N):
if nn%100==0:
print(nn, end=' ', flush=True)
if set_type == 'occ':
matfile = os.path.join(img_dir, '{}_{}.mat'.format(img_list[nn], idx_list[nn]))
f = h5py.File(matfile)
img = np.array(f['record']['img']).T
img = img[:,:,::-1] # RGB to BGR
else:
img_file = os.path.join(img_dir, '{}.JPEG'.format(img_list[nn]))
try:
assert(os.path.exists(img_file))
except:
print('file not exist: {}'.format(img_file))
continue
img = cv2.imread(img_file)
resize_ratio = resize_tar/np.min(img.shape[0:2])
img_resized = cv2.resize(img,None,fx=resize_ratio, fy=resize_ratio)
layer_feature = extractor.extract_feature_image(img_resized)[0]
assert(featDim == layer_feature.shape[2])
feat_set[nn] = layer_feature
print('\n')
dir_feat_cache = os.path.join(Feat['cache_dir'], 'resize_{}'.format(resize_tar))
if not os.path.exists(dir_feat_cache):
os.makedirs(dir_feat_cache)
file_cache_feat = os.path.join(dir_feat_cache, 'feat_{}_{}_{}.pickle'.format(category, set_type, VC['layer']))
with open(file_cache_feat, 'wb') as fh:
pickle.dump(feat_set, fh)
# file_cache_rr = os.path.join(Feat['cache_dir'], 'feat_{}_{}_rr.pickle'.format(category, set_type))
# with open(file_cache_rr, 'wb') as fh:
# pickle.dump(resize_ratio_ls, fh)
if __name__=='__main__':
resize_tar = int(sys.argv[1])
extractor = FeatureExtractor(cache_folder=model_cache_folder, which_net='vgg16', which_layer=VC['layer'], which_snapshot=0)
for category in all_categories2[3:4]:
extractLayerFeat_whole_fixrr(category, extractor, resize_tar, set_type='test') | qliu24/SP | src/extractLayerFeat_whole_fixrr.py | extractLayerFeat_whole_fixrr.py | py | 3,465 | python | en | code | 0 | github-code | 90 |
12474073056 | from aiogram.types import InlineKeyboardMarkup
from aiogram.utils.callback_data import CallbackData
from tgbot.misc.markup_constructor.inline import InlineMarkupConstructor
class UsersInlineMarkup(InlineMarkupConstructor):
def menu(self, is_admin: bool) -> InlineKeyboardMarkup:
schema = [2, 1]
actions = [
{'text': '🛒 Каталог', 'switch_inline_query_current_chat': ''},
{'text': '📨 Обратная свзяь', 'callback_data': 'feedback_user'},
{'text': '🌐 Рефералка', 'callback_data': 'referrer_user'},
]
if is_admin:
schema = [2, 1, 1]
actions.append({'text': '🔐 Административная панель', 'callback_data': 'admin_panel'})
return self.markup(actions, schema)
def referral(self, condition: str) -> InlineKeyboardMarkup:
schema = [1]
actions = [
{'text': '◀️ Назад', 'callback_data': 'back_button'},
]
if condition is None:
schema = [1, 1]
actions.insert(0, {'text': '🔐 Установить код приглашения', 'callback_data': 'install_referral_code'})
return self.markup(actions, schema)
def register(self) -> InlineKeyboardMarkup:
schema = [1]
actions = [
{'text': '🔑 Код приглашения', 'callback_data': 'invitation_code'},
]
return self.markup(actions, schema)
| mandico21/bot_course_task | tgbot/keyboards/inline/iusers.py | iusers.py | py | 1,494 | python | en | code | 1 | github-code | 90 |
18455772409 | from heapq import heappush, heappop
import sys
input = sys.stdin.readline
N, K = map(int, input().split())
sushi = [None] * N
for i in range(N):
t, d = map(int, input().split())
sushi[i] = (d, t-1)
sushi.sort(reverse=True)
types = set()
cand = []
s = x = 0
for d, t in sushi[:K]:
if t in types:
heappush(cand, d)
else:
types.add(t)
x += 1
s += d
ans = s + x*x
for d, t in sushi[K:]:
if t in types:
continue
if not cand:
break
dr = heappop(cand)
s += d - dr
types.add(t)
x += 1
ans = max(ans, s + x*x)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03148/s200889209.py | s200889209.py | py | 605 | python | en | code | 0 | github-code | 90 |
10540116381 | import os
import glob
import pdftotext
import re
import math
def print_options():
print("Please select an option from below:")
print("1. Set directory for indexing")
print("2. Add search category")
print("3. Build index")
print("4. Print index")
print("5. Enter query")
print("(or type 'quit' to exit)")
class Posting:
def __init__(self, doc_id, term_frequency):
self.doc_id = doc_id
self.term_frequency = term_frequency
search_dir = ""
categories = []
categories_index = 0
window_size = 3 #on each side
doc_lex = []
doc_length = []
doc_score = []
avg_doc_length = 1
doc_index = 0
index = dict()
term_lex = dict()
term_index = 0
b = 0.2
def pivoted_normalization(term, query_term_count):
global index
global b
global avg_doc_length
global doc_index
global doc_length
global doc_score
doc_freq = 0
postings = index[term]
for posting in postings:
#calculate document frequency
doc_freq = doc_freq + 1
for posting in postings:
first_term = (1+math.log(1+math.log(posting.term_frequency))) / ((1-b)+b * doc_length[posting.doc_id] / avg_doc_length)
second_term = query_term_count * math.log((doc_index+1)/doc_freq)
doc_score[posting.doc_id] += first_term * second_term
#for pivoted normalization, need doc length and avg doc length
# also need num of docs in collection (doc_index after indexing) and document frequency
# so we need to check document frequency and query frequency before running this function on each term in the query
def rank(query):
global doc_index
#initialize scores to zero
for i in range(doc_index):
doc_score[i] = 0
query_terms = query.split(" ")
query_one = []
for qt in query_terms:
if qt not in [item[0] for item in query_one]:
query_one.append((qt, 1))
else:
for (match, freq) in query_one:
if (match == qt):
freq = freq + 1
for (qt, qt_freq) in query_one:
pivoted_normalization(qt, qt_freq)
return
def retrieve(ret_count):
global index
global categories
high_score = 0
top_index = 0
for i in range(ret_count):
for j in range(doc_index):
if doc_score[j] > high_score:
high_score = doc_score[j]
top_index = j
print(str(i+1)+".) "+doc_lex[top_index]+" with score "+str(high_score))
for category in categories:
cat_score = 0
best_cat_word = ""
for cat_word in category:
if cat_word != category[0] and cat_word in index: #category 0 is the category name
postings = index[cat_word]
for posting in postings:
if posting.doc_id == top_index:
if posting.term_frequency > cat_score:
cat_score = posting.term_frequency
best_cat_word = cat_word
if cat_score != 0:
print("\t" + category[0] + ": " + best_cat_word + " (" + str(cat_score) + ")")
else:
print("\t" + category[0] + ": not found")
#set retrieved article's score to 0 to avoid reprinting it
doc_score[top_index] = 0
top_index = 0
high_score = 0
return
#need to count how many times each term occurs and call pivoted_normalization only once for each
#then send as an argument (query_term_count)
#need to calculate doc_freq here for each query term
print_options()
user_choice = input()
while user_choice != "quit":
if user_choice == "1":
# choose directory
search_dir = input("Enter directory: ")
elif user_choice == "2":
# add search category
cat = input("Enter a category name:")
#categories is list of lists
#each list is category name followed by associated terms
categories.append([])
categories[categories_index].append(cat)
cat_terms = input("Enter associated terms (comma separated, no spaces):")
cat_terms = cat_terms.split(',')
for cat_term in cat_terms:
categories[categories_index].append(cat_term)
categories_index += 1
print("Categories:", categories)
elif user_choice == "3":
# build index
print("Building index . . .")
for filename in glob.glob(os.path.join(search_dir, '*.pdf')):
with open(filename, 'rb') as f:
print("\tOpening", filename)
pdf = pdftotext.PDF(f)
sub_index = dict()
doc_lex.append(filename)
doc_length.append(0)
doc_score.append(0)
# need to initialize list of tuples for this doc's cat terms
# need to append tuples for each cat term that is in the doc
# need to update it (value will be tf of the term)
for page in pdf:
#iterate over pages from pdf
#print(page)
#split into lines
lines = page.split("\n")
for line in lines:
#clean punctuation
#print(line)
clean_line = re.sub(r'[^\w\s]',"", line)
#print(clean_line)
words = clean_line.split(" ")
for word in words:
#need to decide how to organize everything. Take another look tomorrow and make a decision. This needs progress now
doc_length[doc_index] += 1
if word in sub_index:
tf = sub_index[word].term_frequency
else:
tf = 0
sub_index[word] = Posting(doc_index, tf + 1)
new_dict = { key: [posting]
if key not in index
else index[key] + [posting]
for (key, posting) in sub_index.items() }
index.update(new_dict)
doc_index = doc_index + 1
for length in doc_length:
avg_doc_length += length
avg_doc_length = avg_doc_length / doc_index
elif user_choice == "4":
# print index
for (key, posting) in index.items():
print(key + ":")
for doc_posting in posting:
print("\t"+ doc_lex[doc_posting.doc_id]+", "+ str(doc_posting.term_frequency))
elif user_choice == "5":
# search query
query = input("Please enter a query: ")
rank(query)
retrieve(5)
print_options()
user_choice = input()
print("Exiting . . .")
| dawsfox/laase | laase.py | laase.py | py | 6,860 | python | en | code | 0 | github-code | 90 |
23578212893 | #!/bin/usr/python
from threading import *
from time import sleep
class Prod:
def __init__(self):
self.products=[]
#self.flag=False
self.c=Condition()
def produce(self):
self.c.acquire()
for i in range(1,5):
self.products.append("Product"+str(i))
sleep(1)
print("added")
#self.flag=True
self.c.notify()
self.c.release()
class Consumer:
def __init__(self,prood):
self.prod=prood
def consume(self):
self.prod.c.acquire()
'''while self.prod.flag==False:
sleep(0.2)
print("waiting for orders")'''
self.prod.c.wait(timeout=0)
print("shipped"+str(self.prod.products))
p=Prod()
c=Consumer(p)
t1=Thread(target=p.produce)
t2=Thread(target=c.consume)
t1.start()
t2.start() | kokot300/python-core-and-advanced | threadcommunicationusingthreatingapi.py | threadcommunicationusingthreatingapi.py | py | 849 | python | en | code | 0 | github-code | 90 |
41276430363 | import torch
from torch import nn
from torchvision import models
import config
def load_model():
model = models.vgg16(pretrained=True, progress=True)
# Freezing other layers of the model
for p in model.parameters():
p.requires_grad = False
model.classifier = nn.Sequential()
model.classifier = nn.Sequential(*list(model.classifier) +
[nn.Linear(25088, 1000)] +
[nn.ReLU(inplace=True)] +
[nn.Linear(1000, config.NUM_CLASSES)] +
[nn.LogSoftmax(dim=1)])
return model
| akuma527/IProjects | Fruit_Prediction/scripts/model.py | model.py | py | 648 | python | en | code | 0 | github-code | 90 |
34681053325 | def __check(A):
for i in A:
if i % 2 == 0:
continue
else:
# print(i,"は2で割れません")
return 1
return 0
def do_division(A, count, n):
if __check(A) == 0:
for i in range(n):
# print(count - 1, "回目の処理です。\nprint before:", A[i])
A[i] = A[i] / 2
# print("print after:", A[i])
count += 1
# print("print1:", A, "countの値: ",count)
return do_division(A, count, n)
else:
# print("print2:", A, count)
return count
N = int(input())
A = list(map(int, input().split()))
# print("print:", A)
count = 0
result = do_division(A, count, N)
print(result) | fideguch/AtCoder_answers | AtCoder_Beginners_Selection/made_by_python/shift_only.py | shift_only.py | py | 605 | python | en | code | 0 | github-code | 90 |
7927690601 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# gingerprawn / api.ui / draggable Grid control
# this code is taken from wxPython demo with minor modifications
import wx
import wx.grid as gridlib
import wx.lib.gridmovers as gridmovers
from gingerprawn.api.utils.titledtable import TitledTable
#---------------------------------------------------------------------------
class CustomDataTable(gridlib.PyGridTableBase):
'''\
usage is basically
self.SetIdentifiers(['id','ds','sv','pr','pl','op','fx','ts'])
self.SetRowLabels(['Row1','Row2','Row3'])
self.SetColLabels({'id':'ID','ds':'Description','sv':'Severity',
'pr':'Priority','pl':'Platform','op':'Opened?',
'fx':'Fixed?','ts':'Tested?'})
self.SetData([{'id':1010,
'ds':"The foo doesn't bar",
'sv':"major",
'pr':1,
'pl':'MSW',
'op':1,
'fx':1,
'ts':1
},
{'id':1011,
'ds':"I've got a wicket in my wocket",
'sv':"wish list",
'pr':2,
'pl':'other',
'op':0,
'fx':0,
'ts':0
},
{'id':1012,
'ds':"Rectangle() returns a triangle",
'sv':"critical",
'pr':5,
'pl':'all',
'op':0,
'fx':0,
'ts':0
}
])
'''
def __init__(self):
gridlib.PyGridTableBase.__init__(self)
# dummy data
self.identifiers = []
self.data = []
self.colLabels = {}
self.rowLabels = []
#--------------------------------------------------
# required methods for the wxPyGridTableBase interface
def GetNumberRows(self):
return len(self.data)
def GetNumberCols(self):
return len(self.identifiers)
def IsEmptyCell(self, row, col):
id = self.identifiers[col]
return not self.data[row][id]
def GetValue(self, row, col):
id = self.identifiers[col]
return self.data[row][id]
def SetValue(self, row, col, value):
id = self.identifiers[col]
self.data[row][id] = value
#--------------------------------------------------
# Some optional methods
# Called when the grid needs to display column labels
def GetColLabelValue(self, col):
id = self.identifiers[col]
return self.colLabels[id]
# Called when the grid needs to display row labels
# MODIFIED: give off 1-based seq numbers when custom
# values aren't supplied
def GetRowLabelValue(self,row):
try:
return self.rowLabels[row]
except IndexError:
return str(row + 1)
#--------------------------------------------------
# Methods added for demo purposes.
# The physical moving of the cols/rows is left to the implementer.
# Because of the dynamic nature of a wxGrid the physical moving of
# columns differs from implementation to implementation
# Move the column
def MoveColumn(self,frm,to):
grid = self.GetView()
if grid:
# Move the identifiers
old = self.identifiers[frm]
del self.identifiers[frm]
if to > frm:
self.identifiers.insert(to-1,old)
else:
self.identifiers.insert(to,old)
# Notify the grid
grid.BeginBatch()
msg = gridlib.GridTableMessage(
self, gridlib.GRIDTABLE_NOTIFY_COLS_INSERTED, to, 1
)
grid.ProcessTableMessage(msg)
msg = gridlib.GridTableMessage(
self, gridlib.GRIDTABLE_NOTIFY_COLS_DELETED, frm, 1
)
grid.ProcessTableMessage(msg)
grid.EndBatch()
# Move the row
def MoveRow(self,frm,to):
grid = self.GetView()
if grid:
# Move the rowLabels and data rows
oldLabel = self.rowLabels[frm]
oldData = self.data[frm]
del self.rowLabels[frm]
del self.data[frm]
if to > frm:
self.rowLabels.insert(to-1,oldLabel)
self.data.insert(to-1,oldData)
else:
self.rowLabels.insert(to,oldLabel)
self.data.insert(to,oldData)
# Notify the grid
grid.BeginBatch()
msg = gridlib.GridTableMessage(
self, gridlib.GRIDTABLE_NOTIFY_ROWS_INSERTED, to, 1
)
grid.ProcessTableMessage(msg)
msg = gridlib.GridTableMessage(
self, gridlib.GRIDTABLE_NOTIFY_ROWS_DELETED, frm, 1
)
grid.ProcessTableMessage(msg)
grid.EndBatch()
################################################################
## for setting row and col and data, making this general-purpose
################################################################
def SetRowLabels(self, newlabel, copy=True):
self.rowLabels = newlabel[:] if copy else newlabel
def SetColLabels(self, newmap, copy=True):
self.colLabels = newmap.copy() if copy else newmap
def SetData(self, newdata):
self.data = newdata
def SetIdentifiers(self, newid, copy=True):
self.identifiers = newid[:] if copy else newid
#---------------------------------------------------------------------------
class DragableGrid(gridlib.Grid):
def __init__(self, parent):
gridlib.Grid.__init__(self, parent, -1)
self.table = CustomDataTable()
# Enable Column moving
gridmovers.GridColMover(self)
self.Bind(gridmovers.EVT_GRID_COL_MOVE, self.OnColMove, self)
# Enable Row moving
gridmovers.GridRowMover(self)
self.Bind(gridmovers.EVT_GRID_ROW_MOVE, self.OnRowMove, self)
def SetTable(self, tbl):
if issubclass(type(tbl), TitledTable):
tgttbl = self.table
ids = tbl.titleline
# py2.6 doesn't have dict comprehension...
# here's a (mostly dirty) hack when i don't want to fix the columns
# to some particular things...
collabels = dict(zip(ids, ids))
data = [dict(zip(ids, row)) for row in tbl]
# populate the CustomDataTable
tgttbl.SetIdentifiers(ids)
tgttbl.SetColLabels(collabels)
# to support row dragging, rowlabels must be present.
tgttbl.SetRowLabels([str(i) for i in range(1, len(tbl.rows) + 1)])
tgttbl.SetData(data)
# The second parameter means that the grid is to take ownership of
# the table and will destroy it when done. Otherwise you would
# need to keep a reference to it and call it's Destroy method
# later.
gridlib.Grid.SetTable(self, tgttbl, True)
else:
gridlib.Grid.SetTable(self, tbl, True)
# Event method called when a column move needs to take place
def OnColMove(self,evt):
frm = evt.GetMoveColumn() # Column being moved
to = evt.GetBeforeColumn() # Before which column to insert
self.GetTable().MoveColumn(frm,to)
# Event method called when a row move needs to take place
def OnRowMove(self,evt):
frm = evt.GetMoveRow() # Row being moved
to = evt.GetBeforeRow() # Before which row to insert
self.GetTable().MoveRow(frm,to)
# vi:ai:et:ts=4 sw=4 sts=4 fenc=utf-8
| xen0n/gingerprawn | gingerprawn/api/ui/dragablegrid.py | dragablegrid.py | py | 7,749 | python | en | code | 1 | github-code | 90 |
27613616394 | from data.redressal import Redressal
from repository.issue_repository import IssueRepository
from repository.redressal_repository import RedressalRepository
from data.const import IN_PROGRESS, PENDING, REDRESSED, REJECTED
from utils.printing import print_issue_details, print_issues, print_redressal_details, print_redressal_items, print_redressed_issues
class AdminFlow:
def __init__(self, user) -> None:
self.user = user
self.issue_repo = IssueRepository()
self.red_repo = RedressalRepository()
@property
def issues_for_category(self):
"""Returns issues that are the same category as the admin user's category."""
issues = self.issue_repo.list_by_issue_votes()
return list(filter(lambda i: i.category == self.user.category, issues))
def connect_redressal(self, issue):
redressal_id = input(
'Input the redressal id that has solved this issue: ')
try:
self.red_repo.get(redressal_id)
except KeyError:
print(f"The id {redressal_id} doesn't exist!")
return
# Link to other redressal
issue.redressal_id = redressal_id
# Mark the issue as redressed
issue.status = REDRESSED
self.issue_repo.save(issue)
def reject_issue(self, issue):
decision = input(
'Are you sure you want to reject this issue (Y/N)? ').lower()
if decision == 'y':
issue.status = REJECTED
self.issue_repo.save(issue)
def log_redressal_activity(self, redressal_id):
redressal = self.red_repo.get(redressal_id)
items = self.red_repo.items(redressal)
print_redressal_items(items)
message = input('Enter new redressal message (0 to cancel): ')
if message == '0':
return
# Save the item
self.red_repo.add_redressal_item(redressal, message, self.user)
def change_status(self, issue_id):
issue = self.issue_repo.get(issue_id)
print()
print(f'Current status: {issue.status}')
if issue.status == REDRESSED:
print("This issue have been redressed! You can't change the status.")
elif issue.status == PENDING:
option = input(f'Change status to {IN_PROGRESS}? (Y/N) ').lower()
if option == 'y':
issue.status = IN_PROGRESS
# Create redressal for this issue
redressal = Redressal(id='')
redressal = self.red_repo.save(redressal)
self.red_repo.add_redressal_item(
redressal, 'Redressal in progress', self.user)
# Update the issue
issue.redressal_id = redressal.id
self.issue_repo.save(issue)
elif issue.status == IN_PROGRESS:
option = input(f'Change status to {REDRESSED}? (Y/N) ').lower()
if option == 'y':
issue.status = REDRESSED
self.issue_repo.save(issue)
# Log update
redressal = self.red_repo.get(issue.redressal_id)
self.red_repo.add_redressal_item(
redressal, 'Issue resolved', self.user)
def get_actions_for_issue(self, issue):
actions = ['Back']
if issue.status != REDRESSED:
actions.append('Change status')
if issue.status == IN_PROGRESS:
actions.append('Log redressal activity')
if issue.status == PENDING or issue.status == IN_PROGRESS:
actions.append('Solve with other redressal')
if issue.rejectable:
actions.append('Reject issue')
return actions
def handle_issue(self, issue_id):
while True:
issue = self.issue_repo.get(issue_id)
print()
print_issue_details(issue, self.user)
actions = self.get_actions_for_issue(issue)
for i, a in enumerate(actions):
print(f'{i}. {a}')
action = input('Action: ')
if action == '0':
break
try:
int_action = int(action)
if int_action >= len(actions):
continue
except ValueError:
continue
action_str = actions[int_action]
if action_str == 'Change status':
self.change_status(issue_id)
elif action_str == 'Log redressal activity':
self.log_redressal_activity(issue.redressal_id)
elif action_str == 'Reject issue':
self.reject_issue(issue)
elif action_str == 'Solve with other redressal':
self.connect_redressal(issue)
def list_issues(self):
while True:
issues = self.issues_for_category
print('0. Back')
print_issues(issues)
action = input("Which issue do you want to redress? ")
if action == '0':
break
else:
try:
issue_index = int(action) - 1
except ValueError:
continue
self.handle_issue(issues[issue_index].id)
def view_redressed(self, issue, redressal):
print_redressal_details(
issue, redressal, self.red_repo.items(redressal), self.user)
if redressal.complaint:
print('Users are not satisfied with the redressal!')
print()
print('0. Back')
print('1. Re-redress')
action = input('Action: ')
if action == '1':
# Back to in progress
issue.status = IN_PROGRESS
self.issue_repo.save(issue)
# Log the activity
self.red_repo.add_redressal_item(
redressal, "Redressal re-opened", self.user)
# Remove the votes for redressal
self.red_repo.clear_votes(redressal)
else:
print('Users are satisfied with the redressal. No actions required.')
def list_redressed(self):
while True:
redressed = list(filter(lambda issue: issue.status ==
REDRESSED, self.issues_for_category))
if len(redressed) > 0:
redressed = self.issue_repo.list_by_redressal_votes(
self.red_repo.list()
)
print(
'\nHere are all issues that have been redressed and the corresponding response from users.')
print('0. Back')
print_redressed_issues(redressed, self.red_repo)
action = input('Choose redressal to view: ')
if action == '0':
break
else:
try:
index = int(action) - 1
except ValueError:
continue
issue = redressed[index]
redressal = self.red_repo.get(issue.redressal_id)
self.view_redressed(issue, redressal)
else:
print('No redressed issue!')
break
def main(self):
while True:
print(f'\nYou are responsible for category: {self.user.category}')
# Filter by category
new_issues = list(
filter(lambda issue: issue.status == PENDING, self.issues_for_category))
in_progress_issues = list(
filter(lambda issue: issue.status == IN_PROGRESS, self.issues_for_category))
if len(new_issues) > 0:
print(f"{len(new_issues)} new issues")
if len(in_progress_issues) > 0:
print(f'{len(in_progress_issues)} issues in progress')
print()
print("0. Back")
print("1. List Issue")
print("2. List Redressed")
action = input('Action: ')
if action == '0':
print()
break
elif action == '1':
print()
self.list_issues()
elif action == '2':
print()
self.list_redressed()
| Farhan-Khalifa-Ibrahim/CZ4010-Project | admin.py | admin.py | py | 8,306 | python | en | code | 0 | github-code | 90 |
27992718025 | import os
import setuptools
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "PyAnEn", "version.py")) as fp:
exec(fp.read())
setuptools.setup(
name="PyAnEn",
version=__version__,
author="Weiming Hu",
author_email="huweiming950714@gmail.com",
description="The python interface to parallel Analog Ensemble",
url="https://github.com/Weiming-Hu/PyAnEn",
packages=setuptools.find_packages(exclude=("tests",)),
python_requires=">=3",
license='LICENSE',
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
install_requires=[
'xarray',
'properscoring',
'netCDF4',
'numpy',
'scipy',
'scikit-learn',
'pandas',
'ray',
'tqdm',
],
)
| Weiming-Hu/PyAnEn | setup.py | setup.py | py | 889 | python | en | code | 3 | github-code | 90 |
5645258036 | # coding: utf-8
'''
Veredi Mediator Message.
For a server mediator (e.g. WebSockets) talking to a game.
'''
# -----------------------------------------------------------------------------
# Imports
# -----------------------------------------------------------------------------
from typing import (TYPE_CHECKING,
Optional, Union, Any, Type, NewType, Tuple)
if TYPE_CHECKING:
from veredi.interface.mediator.context import UserConnToken
import enum
from veredi.logs import log
from veredi.security import abac
from veredi.data.codec import (Codec,
Encodable,
EncodedComplex,
EncodedSimple)
from veredi.data.exceptions import EncodableError
from veredi.base.identity import MonotonicId
from veredi.data.identity import UserId, UserKey
from veredi.game.ecs.base.identity import EntityId
from ..user import UserPassport
from .const import MsgType
from .payload.base import BasePayload
from .payload.bare import BarePayload
from .payload.logging import LogPayload
# -----------------------------------------------------------------------------
# Constants
# -----------------------------------------------------------------------------
# ------------------------------
# Types
# ------------------------------
MsgIdTypes = NewType('MsgIdTypes',
Union[None, MonotonicId, 'Message.SpecialId', int])
# -----------------------------------------------------------------------------
# Code
# -----------------------------------------------------------------------------
class Message(Encodable,
name_dotted='veredi.interface.mediator.message.message',
name_string='message'):
'''
Message object between game and mediator.
Saves id as an int. Casts back to ID type in property return.
'''
# -------------------------------------------------------------------------
# Constants
# -------------------------------------------------------------------------
@enum.unique
class SpecialId(enum.IntEnum):
'''
Super Special Message IDs for Super Special Messages!
'''
INVALID = 0
'''Ignore me.'''
CONNECT = enum.auto()
'''
Client -> Server: Hello / Auth / Register-Me-Please.
Server -> Client: Result.
OR Server -> Game: This client has connected.
'''
# -------------------------------------------------------------------------
# Initialization
# -------------------------------------------------------------------------
def _define_vars(self) -> None:
'''
Instance variable definitions, type hinting, doc strings, etc.
'''
self._msg_id: MsgIdTypes = None
'''
ID for message itself. Can be initialized as None, but messages must be
sent with a non-None message id.
'''
self._type: 'MsgType' = MsgType.IGNORE
'''
Message's Type. Determines how Mediators handle the message itself and
its payload.
'''
self._entity_id: Optional[EntityId] = None
'''
The specific entity related to the message, if there is one.
E.g. if a skill roll happens, it will be assigned the EntityId of the
entity used to roll the skill.
Or if some sort of not-tied-to-an-entity message, it will be None.
'''
self._user_id: Optional[UserId] = None
'''
The UserId this message will be sent to. Can be None if not set yet, or
if broadcast maybe?
# TODO: isthis None, or something else, for broadcast?
'''
self._user_key: Optional[UserId] = None
'''
The UserKey this message will be sent to. Should only be set if
_user_id is also set. Can be None if not set yet, or if broadcast
maybe?
# TODO: is this None, or something else, for broadcast?
'''
self._payload: Optional[Any] = None
'''
The actual important part of the message: what'sin it.
'''
self._security_subject: Optional[abac.Subject] = None
'''
The actual important part of the message: what's in it.
'''
def __init__(self,
msg_id: Union[MonotonicId, SpecialId, int, None],
type: 'MsgType',
payload: Optional[Any] = None,
entity_id: Optional[EntityId] = None,
user_id: Optional[UserId] = None,
user_key: Optional[UserKey] = None,
subject: Optional[abac.Subject] = None) -> None:
self._define_vars()
self._msg_id = msg_id
self._type = type
self._entity_id = entity_id
self._user_id = user_id
self._user_key = user_key
self._payload = payload
self._security_subject = subject
# -------------------------------------------------------------------------
# General MsgType Init Helpers
# -------------------------------------------------------------------------
@classmethod
def echo(klass: Type['Message'],
msg: 'Message') -> 'Message':
'''
Create an echo reply for this message.
'''
# Return same message but with type changed to ECHO_ECHO.
return klass(msg.msg_id, MsgType.ECHO_ECHO,
payload=msg.payload,
user_id=msg.user_id,
user_key=msg.user_key)
# -------------------------------------------------------------------------
# ACK_CONNECT: Connected (response to client) Helpers
# -------------------------------------------------------------------------
@classmethod
def connected(klass: Type['Message'],
msg: 'Message',
id: UserId,
key: UserKey,
success: bool) -> 'Message':
'''
Creates a MsgType.ACK_CONNECT message reply for success/failure of
connection.
'''
if success:
return klass(msg.msg_id, MsgType.ACK_CONNECT,
payload=BarePayload({'text': 'Connected.',
'code': True}),
user_id=id,
user_key=key)
return klass(msg.msg_id, MsgType.ACK_CONNECT,
payload=BarePayload({'text': 'Failed to connect.',
'code': False}),
user_id=id,
user_key=key)
def verify_connected(self) -> Tuple[bool, Optional[str]]:
'''
Verifies this is an ACK_CONNECTED message and that it was a successful
connection.
Returns Tuple[bool, Optional[str]]:
- bool: success/failure
- str:
- if success: None
- if failure: Failure reason
'''
if self.type != MsgType.ACK_CONNECT:
return (False,
f"Message is not MsgType.ACK_CONNECT. Is {self.type}")
# Correct type of message, now check the payload.
if not isinstance(self.payload, BarePayload):
return (False,
"Message's payload is unexpected type. Expecting "
f"BarePayload, got: {type(self.payload)}")
# Correct payload - check for success.
try:
# Do we have the 'code' field we need to determine success?
if not self.payload.data or 'code' not in self.payload.data:
return (False,
"Cannot understand BarePayload's data: "
f"{self.payload.data}")
# Was it a failure?
elif self.payload.data['code'] is not True:
return (False,
"Connection failed with code "
f"'{self.payload.data['code']}' and reason: "
f"{self.payload.data['text']}")
# The One Good Return. 'code' was True/success, so return success.
else:
return (True, None)
# Unexpected things happened. Probably 'code' doesn't exist or data
# isn't a dict.
except (TypeError, KeyError):
return (False,
"Connection success unknown - received exception when "
"trying to check. Payload has unexpected data format "
f"probably: {self.payload}")
# Not expecting to get here unless BarePayload or Message.connected()
# has changed and this hasn't...
return (False,
"You're not supposed to be able to get this far here. "
f"What's wrong? {self.payload}")
# -------------------------------------------------------------------------
# Payload Helpers
# -------------------------------------------------------------------------
@classmethod
def payload_basic(klass: Type['Message'],
payload: str) -> BarePayload:
'''
Creates and returns a bare payload for the message string.
'''
return BarePayload(payload)
# -------------------------------------------------------------------------
# Logging MsgType Init Helpers
# -------------------------------------------------------------------------
@classmethod
def log(klass: Type['Message'],
msg_id: Union[MonotonicId, int],
user_id: Optional[UserId],
user_key: Optional[UserKey],
log_payload: LogPayload) -> 'Message':
'''
Creates a LOGGING message with the supplied data.
'''
msg = Message(msg_id, MsgType.LOGGING,
user_id=user_id,
user_key=user_key,
payload=log_payload)
return msg
# -------------------------------------------------------------------------
# Properties
# -------------------------------------------------------------------------
@property
def msg_id(self) -> Union[MonotonicId, SpecialId]:
'''
Return our msg_id as a MonotonicId or SpecialId.
'''
return self._msg_id
@property
def type(self) -> 'MsgType':
'''
Return our message type.
'''
return self._type
@property
def entity_id(self) -> Optional[EntityId]:
'''
Return our message's EntityId, if any.
'''
return self._entity_id
@entity_id.setter
def entity_id(self, value: Optional[EntityId]) -> None:
'''
Sets or clears the message's EntityId.
'''
self._entity_id = value
@property
def user_id(self) -> Optional[UserId]:
'''
Return our message's UserId, if any.
'''
return self._user_id
@user_id.setter
def user_id(self, value: Optional[UserId]) -> None:
'''
Sets or clears the message's UserId.
'''
self._user_id = value
@property
def user_key(self) -> Optional[UserId]:
'''
Return our message's UserKey, if any.
'''
return self._user_key
@user_key.setter
def user_key(self, value: Optional[UserId]) -> None:
'''
Sets or clears the message's UserKey.
'''
self._user_key = value
@property
def payload(self) -> Optional[Any]:
'''
Return our message payload.
'''
return self._payload
@payload.setter
def payload(self, value: str) -> None:
'''
Replace payload with its (encoded/decoded/serialized/deserialized)
equal.
TODO: Should payload be straight up replaced? Keep original somewhere?
'''
self._payload = value
@property
def security_subject(self) -> Optional[abac.Subject]:
'''
Return our security.abac.Subject value.
'''
# TODO [2020-10-27]: What should 'None' do? Fail message
# eventually, probably. Shouldn't send without security involved.
# Security should be set to 'debug' or something if undesired for
# whatever reason.
return self._security_subject
# -------------------------------------------------------------------------
# Encodable API
# -------------------------------------------------------------------------
def encode_simple(self, codec: 'Codec') -> EncodedSimple:
'''
Don't support simple for Messages.
'''
msg = (f"{self.klass} doesn't support encoding to a "
"simple string.")
raise NotImplementedError(msg)
@classmethod
def decode_simple(klass: Type['Message'],
data: EncodedSimple,
codec: 'Codec') -> 'Message':
'''
Don't support simple by default.
'''
msg = (f"{klass.__name__} doesn't support decoding from a "
"simple string.")
raise NotImplementedError(msg)
def encode_complex(self, codec: 'Codec') -> EncodedComplex:
'''
Encode ourself as an EncodedComplex, return that value.
'''
# Tell our payload to encode... or use as-is if not an Encodable.
encoded_payload = self.payload
if isinstance(self.payload, Encodable):
encoded_payload = codec.encode(self.payload)
# Put our data into a dict for encoding.
encoded = {
'msg_id': codec.encode(self._msg_id),
'type': codec.encode(self._type),
'entity_id': codec.encode(self._entity_id),
'user_id': codec.encode(self._user_id),
'user_key': codec.encode(self._user_key),
'payload': encoded_payload,
'security': codec.encode(self._security_subject),
}
return encoded
@classmethod
def decode_complex(klass: Type['Message'],
data: EncodedComplex,
codec: 'Codec',
instance: Optional['Message'] = None) -> 'Message':
'''
Decode ourself from an EncodedComplex, return a new instance of `klass`
as the result of the decoding.
'''
try:
klass.error_for(data,
keys=[
'msg_id', 'type',
'entity_id', 'user_id', 'user_key',
'security',
'payload',
])
# msg_id could be a few different types.
msg_id = codec.decode(None, data['msg_id'])
# These are always their one type.
_type = codec.decode(MsgType, data['type'])
entity_id = codec.decode(EntityId, data['entity_id'])
user_id = codec.decode(UserId, data['user_id'])
user_key = codec.decode(UserKey, data['user_key'])
security = codec.decode(abac.Subject, data['security'])
# Payload can be encoded or just itself. So try to decode, then
# fallback to use its value as is.
payload = codec.decode(None,
data['payload'],
fallback=data['payload'])
return klass(msg_id, _type,
payload=payload,
entity_id=entity_id,
user_id=user_id,
user_key=user_key,
subject=security)
except Exception as error:
log.exception(error,
"Caught exception decoding Message.")
raise
# -------------------------------------------------------------------------
# Python Functions
# -------------------------------------------------------------------------
def __str__(self):
return (
f"{self.klass}"
f"[{self.msg_id}, "
f"{self.type}, "
f"{self.user_id}, "
f"{self.user_key}]("
f"{type(self.payload)}: "
f"{str(self.payload)})"
)
def __repr__(self):
return (
"<Msg["
f"{repr(self.msg_id)},"
f"{repr(self.type)}, "
f"{self.user_id}, "
f"{self.user_key}]"
f"({repr(self.payload)})>"
)
# -----------------------------------------------------------------------------
# Message for Connecting/Disconnecting Users
# -----------------------------------------------------------------------------
class ConnectionMessage(
Message,
name_dotted='veredi.interface.mediator.message.connection',
name_string='message.connection'):
'''
Mediator -> Game message for a connecting or disconnecting client.
'''
# -------------------------------------------------------------------------
# Initialization
# -------------------------------------------------------------------------
def __init__(self,
connected: bool,
user_id: Optional[UserId],
user_key: Optional[UserKey],
connection: 'UserConnToken') -> None:
# Type will be CONNECT or DISCONNECT, depending.
msg_type = (MsgType.CONNECT
if connected else
MsgType.DISCONNECT)
# Init base class with our data. `connection` token will be the
# payload.
super().__init__(ConnectionMessage.SpecialId.CONNECT,
msg_type,
connection, None,
user_id, user_key)
@classmethod
def connected(klass: Type['ConnectionMessage'],
user_id: UserId,
user_key: Optional[UserKey],
connection: 'UserConnToken'
) -> 'ConnectionMessage':
'''
Create a "connected" version of a ConnectionMessage.
'''
return ConnectionMessage(True, user_id, user_key, connection)
@classmethod
def disconnected(klass: Type['ConnectionMessage'],
user_id: Optional[UserId],
user_key: Optional[UserKey],
connection: 'UserConnToken'
) -> 'ConnectionMessage':
'''
Create a "disconnected" version of a ConnectionMessage.
'''
return ConnectionMessage(False, user_id, user_key, connection)
# -------------------------------------------------------------------------
# Properties
# -------------------------------------------------------------------------
@property
def connection(self) -> 'UserConnToken':
'''
Get connection token from message.
'''
return self.payload
# -------------------------------------------------------------------------
# Helpers
# -------------------------------------------------------------------------
def user(self) -> UserPassport:
'''
Create a UserPassport instance with our Connection information.
'''
return UserPassport(self.user_id, self.user_key, self.connection)
| cole-brown/veredi-code | interface/mediator/message.py | message.py | py | 19,684 | python | en | code | 1 | github-code | 90 |
7621789259 | import os
import asdf
import pytest
from astropy.time import Time
from roman_datamodels.testing.utils import mk_level2_image
from roman_datamodels.datamodels import ImageModel, FlatRefModel
from romancal.stpipe import RomanPipeline, RomanStep
@pytest.mark.parametrize("step_class", [RomanPipeline, RomanStep])
def test_open_model(step_class, tmp_path):
"""
Test that the class is properly hooked up to datamodels.open.
More comprehensive tests can be found in romancal.datamodels.tests,
this is just a smoke test of the integration.
"""
file_path = tmp_path / "test.asdf"
with asdf.AsdfFile() as af:
imod = mk_level2_image(shape=(20, 20))
af.tree = {'roman': imod}
af.write_to(file_path)
step = step_class()
with step.open_model(file_path) as model:
assert model.meta.telescope == "ROMAN"
@pytest.mark.skipif(
os.environ.get("CI") == "true",
reason="Roman CRDS servers are not currently \
available outside the internal network"
)
@pytest.mark.parametrize("step_class", [RomanPipeline, RomanStep])
def test_get_reference_file(step_class):
"""
Test that CRDS is properly integrated.
"""
im = mk_level2_image(shape=(20, 20))
# This will be brittle while we're using the dev server.
# If this test starts failing mysteriously, check the
# metadata values against the flat rmap.
im.meta.instrument.optical_element = "F158"
im.meta.exposure.start_time = Time('2021-01-01T12:00:00')
model = ImageModel(im)
step = step_class()
reference_path = step.get_reference_file(model, "flat")
with step.open_model(reference_path) as reference_model:
assert isinstance(reference_model, FlatRefModel)
@pytest.mark.skip(reason="There are no grism flats.")
@pytest.mark.skipif(
os.environ.get("CI") == "true",
reason="Roman CRDS servers are not currently \
available outside the internal network"
)
@pytest.mark.parametrize("step_class", [RomanPipeline, RomanStep])
def test_get_reference_file_spectral(step_class):
"""
Test that CRDS is properly integrated.
"""
im = mk_level2_image(shape=(20, 20))
# This will be brittle while we're using the dev server.
# If this test starts failing mysteriously, check the
# metadata values against the flat rmap.
im.meta.instrument.optical_element = "GRISM"
im.meta.exposure.start_time = Time('2021-01-01T12:00:00')
model = ImageModel(im)
step = step_class()
reference_path = step.get_reference_file(model, "flat")
with step.open_model(reference_path) as reference_model:
assert isinstance(reference_model, FlatRefModel)
assert reference_model.meta.instrument.optical_element == "GRISM"
def test_log_messages(tmp_path):
class LoggingStep(RomanStep):
def process(self):
self.log.warning("Splines failed to reticulate")
return ImageModel(mk_level2_image(shape=(20, 20)))
result = LoggingStep().run()
assert any("Splines failed to reticulate" in l for l in result.cal_logs)
| kmacdonald-stsci/romancal | romancal/stpipe/tests/test_core.py | test_core.py | py | 3,071 | python | en | code | null | github-code | 90 |
8727567558 | # Practice Exercise 6_1 (decision control)
def seizoen(maand):
if 0 <= maand <= 2:
print('Het is winter')
elif 3 <= maand <= 5:
print('Het is lente')
elif 6 <= maand <= 8:
print('Het is zomer')
elif 9 <= maand <= 11:
print('Het is herfst')
seizoen(2)
#Practice Exercise 6_2 (lists)
lijst = eval(input('Geef een lijst met minimaal 10 strings: '))
lijst_1 = []
for word in lijst:
if len(word) == 4:
lijst_1.append(word)
print(lijst_1)
#Practice Exercise 6_3 (lists)
invoer = "5-9-7-1-7-8-3-2-4-8-7-9"
list = str(invoer[0::]).split('-')
for i in range(len(list)):
list[i] = int(list[i])
for j in range(0,10):
for k in range(len(list)-1):
if list[k] > list[k+1]:
temp = list[k]
list[k] = list[k+1]
list[k+1] = temp
gemiddelde = int(sum(list)) / len(list)
print('Gesorteerde list van ints: {}'.format(list))
print('Grootste getal {} en kleinste getal {}'.format(max(list), min(list)))
print('Aantal getallen {} en Som van de getallen {}'.format(len(list), sum(list)))
print('Gemiddelde: {:.2f}'.format(gemiddelde))
# Practice Exercise 6_4 (two-dimensional-lists)
studentencijfers = [[95, 92, 86],[66, 75, 54],[89, 72, 100],[34, 0, 0]]
def gemiddelde_per_student(studentencijfers):
list = []
for i in studentencijfers:
gem = sum(i)/len(i)
list.append(gem)
return list
print(gemiddelde_per_student(studentencijfers)) # CHECKEN
def gemiddelde_van_alle_studenten(studentencijfers):
list = []
for i in studentencijfers:
gem = sum(i)/len(i)
list.append(gem)
totaal_gemiddelde = sum(list) / len(list)
return '{:.0f}'.format(totaal_gemiddelde)
print(gemiddelde_van_alle_studenten(studentencijfers))
#Practice Exercise 6_5 (nested loop)
for i in range(1,11): # looping 10 times
for j in range(1,11):
print(i * j, end=' ')
print()
| Fardowsa030/Programmeren | Practice Exercises/Les 6.py | Les 6.py | py | 1,896 | python | nl | code | 0 | github-code | 90 |
1249440112 | import dateparser
import scrapy
import time
import datetime
import json
import re
from scrapy import Selector
from tpdb.BasePerformerScraper import BasePerformerScraper
from tpdb.items import PerformerItem
class PornCZPerformerSpider(BasePerformerScraper):
name = 'PornCZPerformer'
network = 'PornCZ'
start_urls = [
'https://www.porncz.com'
]
headers = {
'x-requested-with': 'XMLHttpRequest'
}
cookies = {
'age-verified': '1',
}
selector_map = {
'external_id': 'models\/(.*)\/',
'pagination': '/en/models?do=next&_=%s'
}
def start_requests(self):
if not hasattr(self, 'start_urls'):
raise AttributeError('start_urls missing')
if not self.start_urls:
raise AttributeError('start_urls selector missing')
for link in self.start_urls:
yield scrapy.Request(url="https://www.porncz.com/en/models",
callback=self.parse,
meta={'page': 0},
headers=self.headers, cookies=self.cookies)
def parse(self, response, **kwargs):
count = 0
currpage = response.meta['page']
if response.meta['page']:
performers = self.get_performers(response)
count = len(performers)
for performer in performers:
yield performer
if count or not response.meta['page']:
if 'page' in response.meta and response.meta['page'] < self.limit_pages:
meta = response.meta
meta['page'] = meta['page'] + 1
timetext = datetime.datetime.utcnow().strftime("%H%M%S%f")
yield scrapy.Request(url=self.get_next_page_url(response.url, timetext),
callback=self.parse,
meta=meta,
headers=self.headers, cookies=self.cookies)
def get_performers(self, response):
item_list = []
jsondata = response.json();
jsondata = jsondata['snippets']
jsondata = jsondata['snippet-modelsGrid-modelItemsAppend'].lower()
jsonsel = Selector(text=jsondata)
performers = jsonsel.xpath('//div[contains(@class,"color_12-shadow-sm-hover")]')
count = 0
for performer in performers:
count = count + 1
item = PerformerItem()
item['bio'] = ''
item['gender'] = ''
item['birthday'] = ''
item['astrology'] = ''
item['birthplace'] = ''
item['ethnicity'] = ''
item['nationality'] = ''
item['haircolor'] = ''
item['measurements'] = ''
item['tattoos'] = ''
item['piercings'] = ''
item['fakeboobs'] = ''
item['eyecolor'] = ''
item['cupsize'] = ''
item['height'] = ''
item['weight'] = ''
item['network'] = "PornCZ"
name = performer.xpath('./div/h3/a/text()').get()
if name:
item['name'] = name.strip().title()
url = performer.xpath('./a/@href').get()
if url:
item['url'] = "https://www.porncz.com/" + url.strip()
image = performer.xpath('./a/img/@data-src').get()
if image:
item['image'] = "https://www.porncz.com" + image.strip()
descline = performer.xpath('./a/div/p/text()').get()
if descline:
descline = descline.replace("-", "").strip()
if re.search('size:(.*)weight', descline):
cupsize = re.search('size:(.*)weight', descline).group(1)
if cupsize:
item['cupsize'] = cupsize.strip().title()
if re.search('(\d+\ kg)', descline):
weight = re.search('(\d+\ kg)', descline).group(1)
if weight:
item['weight'] = weight.strip().title()
if re.search('(\d+\ cm)', descline):
height = re.search('(\d+\ cm)', descline).group(1)
if height:
item['height'] = height.strip().title()
item_list.append(item.copy())
item.clear()
return item_list
def get_next_page_url(self, base, page):
url = self.format_url(base, self.get_selector_map('pagination') % page)
return url
| SFTEAM/scrapers | performers/networkPornczPerformer.py | networkPornczPerformer.py | py | 4,842 | python | en | code | null | github-code | 90 |
24247433868 | class Pet:
def __init__(self, name, type, tricks, health, energy, sound):
self.name = name
self.type = type
self.tricks = tricks
self.health = health
self.energy = energy
self.sound = sound
def sleep(self):
self.energy += 25
return self
def eat(self):
self.energy += 5
self.health += 10
return self
def play(self):
self.health += 5
return self
def noise(self):
print(self.sound)
return self
class Ninja:
def __init__(self, first_name, last_name, treats, pet_food, pet):
self.first_name = first_name
self.last_name = last_name
self.treats = treats
self.pet_food = pet_food
self.pet = pet
def walk(self):
self.pet.play()
print(f"{self.first_name} took {self.pet.name} for a walk.")
print(f"{self.pet.name}'s health is now {self.pet.health}.")
return self
def feed(self):
self.pet.eat()
print(f"{self.first_name} fed {self.pet.name} some {self.pet_food}.")
print(f"{self.pet.name}'s energy is now {self.pet.energy}.")
print(f"{self.pet.name}'s health is now {self.pet.health}.")
return self
def bathe(self):
print(f"As {self.first_name} bathed {self.pet.name}, {self.pet.name} said {self.pet.sound}!")
return self
Dog_1 = Pet("Spot","Dog","Jumping", 85, 60, "Arf")
Fighter_1 = Ninja("Daniel","Larusso","Biscuits","Kibble", Dog_1)
Fighter_1.walk()
Fighter_1.feed()
Fighter_1.bathe()
| EmilioTello/Python_Part_1 | fundamentals/fundamentals/Dojo_Pets.py | Dojo_Pets.py | py | 1,576 | python | en | code | 0 | github-code | 90 |
2653496374 | from dataclasses import dataclass
from distutils.sysconfig import PREFIX
from tokenize import maybe
from typing import List, Optional, Tuple
import rdflib
from pathlib import Path
def get_first_interaction_uri(grap: rdflib.Graph) ->str:
interaction_uri = """
SELECT ?conversationmap ?interaction
WHERE{
?conversationmap a <http://example.com/types/conversation-map>.
?interaction a <http://example.com/types/interaction>.
?conversationmap <http://example.com/predicates/hasNextInteraction> ?interaction.
}
"""
sparql_results = grap.query(interaction_uri)
results: List[rdflib.query.ResultRow] = list(sparql_results) # type: ignore
if len(results) != 1:
raise Exception(f"Got an unexpected number of results {len(results)}.")
result_dict = results[0].asdict()
return str(result_dict["interaction"])
@dataclass
class Interaction:
statement: str
character_name: str
def get_initial_question(interaction_uri: str, graph: rdflib.Graph)-> Interaction:
query = """
BASE <http://example.com/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT ?initialStatement ?name
WHERE{
?character a <types/character>.
?interaction <predicates/hasInitialStatement> ?initialStatement.
?interaction <predicates/initiatedBy> ?character.
?character rdf:label ?name.
}
"""
stuff = graph.query(query, initBindings={"interaction": rdflib.URIRef(interaction_uri)})
results: List[rdflib.query.ResultRow] = list(stuff) # type: ignore
if len(results) != 1:
raise Exception(f"Got an unexpected number of results {len(results)}.")
result_dict = results[0].asdict()
return Interaction(statement=str(result_dict["initialStatement"]),
character_name=str(result_dict["name"]))
def no_furter_interaction()-> None:
print("Conversation over!")
quit()
def get_responses(interaction_uri: str, graph: rdflib.Graph)->List[Tuple[str, str]]:
quey2 = """
BASE <http://example.com/>
PREFIX rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#>
SELECT ?response_text ?nexInteraction
WHERE{
?response a <types/response>.
?interaction <predicates/hasResponseOptions> ?response.
?response rdf:label ?response_text.
OPTIONAL {
?response <predicates/hasNextInteraction> ?nexInteraction
}
}
"""
responses = graph.query(quey2, initBindings={"interaction": rdflib.URIRef(interaction_uri)})
results: List[rdflib.query.ResultRow] = list(responses) # type: ignore
results_list = []
for response in results:
result_dict = response.asdict()
response_text = str(result_dict["response_text"])
#this will lead to a bug in the future
if "nexInteraction" in result_dict:
next_interaction_uri = str(result_dict["nexInteraction"])
else:
next_interaction_uri = None
results_list.append((response_text, next_interaction_uri))
return results_list
def get_interaction_information(maybe_interaction_uri: Optional[str] = None):
interaction_uri = maybe_interaction_uri or get_first_interaction_uri(g)
person_inertacting = get_initial_question(interaction_uri, g)
print(f"{person_inertacting.character_name} says: {person_inertacting.statement}")
list_of_anwsers = get_responses(interaction_uri, g)
if not any(list_of_anwsers):
no_furter_interaction()
for i, answer in enumerate(list_of_anwsers):
print(f"({i}) {answer[0]}")
the_anwser = int(input("What would you like to respond?"))
print(f"you have selected {list_of_anwsers[the_anwser][0]}" )
maybe_next_interaction_uri = list_of_anwsers[the_anwser][1]
if maybe_next_interaction_uri is None:
no_furter_interaction()
else:
get_interaction_information(maybe_next_interaction_uri)
if __name__ == '__main__':
file = Path("tes_file.ttl")
g = rdflib.Graph()
g.parse(file, format='ttl')
get_interaction_information() | nimshi89/Individual_Project | Project.py | Project.py | py | 4,185 | python | en | code | 0 | github-code | 90 |
28734464160 | import json
import os
import confuse
import boto3
from botocore.exceptions import ClientError
from cachetools import Cache
required_credentials = [
'aws_access_key',
'aws_secret_key',
'lwa_app_id',
'lwa_client_secret'
]
class MissingCredentials(Exception):
"""
Credentials are missing, see the error output to find possible causes
"""
pass
class BaseCredentialProvider:
errors = []
credentials = None
def __init__(self, account: str = 'default', *args, **kwargs):
self.account = account
def __call__(self, *args, **kwargs):
self.load_credentials()
return self.check_credentials()
def load_credentials(self):
raise NotImplementedError()
def check_credentials(self):
try:
self.errors = [c for c in required_credentials if
c not in self.credentials.keys() or not self.credentials[c]]
except (AttributeError, TypeError):
raise MissingCredentials(f'Credentials are missing: {", ".join(required_credentials)}')
if not len(self.errors):
return self.credentials
raise MissingCredentials(f'Credentials are missing: {", ".join(self.errors)}')
class FromCodeCredentialProvider(BaseCredentialProvider):
def load_credentials(self):
return None
def __init__(self, credentials: dict, *args, **kwargs):
super(FromCodeCredentialProvider, self).__init__('default', credentials)
self.credentials = credentials
class FromConfigFileCredentialProvider(BaseCredentialProvider):
def load_credentials(self):
try:
config = confuse.Configuration('python-sp-api')
config_filename = os.path.join(config.config_dir(), 'credentials.yml')
config.set_file(config_filename)
account_data = config[self.account].get()
self.credentials = account_data
except (confuse.exceptions.NotFoundError, confuse.exceptions.ConfigReadError):
return
class FromSecretsCredentialProvider(BaseCredentialProvider):
def load_credentials(self):
if not os.environ.get('SP_API_AWS_SECRET_ID', None):
return
try:
client = boto3.client('secretsmanager')
response = client.get_secret_value(
SecretId=os.environ.get('SP_API_AWS_SECRET_ID')
)
secret = json.loads(response.get('SecretString'))
account_data = dict(
refresh_token=secret.get('SP_API_REFRESH_TOKEN'),
lwa_app_id=secret.get('LWA_APP_ID'),
lwa_client_secret=secret.get('LWA_CLIENT_SECRET'),
aws_secret_key=secret.get('SP_API_SECRET_KEY'),
aws_access_key=secret.get('SP_API_ACCESS_KEY'),
role_arn=secret.get('SP_API_ROLE_ARN')
)
except ClientError:
return
else:
self.credentials = account_data
class FromEnvironmentVariablesCredentialProvider(BaseCredentialProvider):
def load_credentials(self):
account_data = dict(
refresh_token=self._get_env('SP_API_REFRESH_TOKEN'),
lwa_app_id=self._get_env('LWA_APP_ID'),
lwa_client_secret=self._get_env('LWA_CLIENT_SECRET'),
aws_secret_key=self._get_env('SP_API_SECRET_KEY'),
aws_access_key=self._get_env('SP_API_ACCESS_KEY'),
role_arn=self._get_env('SP_API_ROLE_ARN')
)
self.credentials = account_data
def _get_env(self, key):
return os.environ.get(f'{key}_{self.account}',
os.environ.get(key))
class CredentialProvider:
credentials = None
cache = Cache(maxsize=10)
CREDENTIAL_PROVIDERS = [
FromCodeCredentialProvider,
FromEnvironmentVariablesCredentialProvider,
FromSecretsCredentialProvider,
FromConfigFileCredentialProvider
]
def __init__(self, account='default', credentials=None):
self.account = account
for cp in self.CREDENTIAL_PROVIDERS:
try:
self.credentials = cp(account=account, credentials=credentials)()
break
except MissingCredentials:
continue
if self.credentials:
self.credentials = self.Config(**self.credentials)
else:
raise MissingCredentials(f'Credentials are missing: {", ".join(required_credentials)}')
class Config:
def __init__(self, **kwargs):
self.refresh_token = kwargs.get('refresh_token')
self.lwa_app_id = kwargs.get('lwa_app_id')
self.lwa_client_secret = kwargs.get('lwa_client_secret')
self.aws_access_key = kwargs.get('aws_access_key')
self.aws_secret_key = kwargs.get('aws_secret_key')
self.role_arn = kwargs.get('role_arn')
| LancersSEO/sp-api-amazon | base/credential_provider.py | credential_provider.py | py | 4,890 | python | en | code | 0 | github-code | 90 |
18202398619 | N = int(input())
A = list(map(int, input().split()))
if 0 in A:
print(0)
else:
ans = 1
a = sorted(A, reverse=True)
for i in a:
ans *= i
if ans > 10 ** 18:
print(-1)
break
else:
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02658/s187258590.py | s187258590.py | py | 256 | python | en | code | 0 | github-code | 90 |
18242444649 | def divisors(n):
lst = []; i = 1
while i * i <= n:
q, r = divmod(n, i)
if r == 0:
lst.append(i)
if i != q: lst.append(q)
i += 1
return lst
N = int(input())
ans = len(divisors(N-1)) - 1
divs = divisors(N)
for k in divs:
if k == 1: continue
temp = N
while temp % k == 0: temp //= k
if temp % k == 1: ans += 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02722/s811950870.py | s811950870.py | py | 394 | python | en | code | 0 | github-code | 90 |
34384607257 | class InvalidFormatException(Exception):
"""Raised when the input format isn't fitting"""
pass
class InvalidOperatorException(Exception):
"""Raised when the operator format isn't fitting"""
pass
# Checks if number is one digit and integer
def is_one_digit(v):
v = float(v)
output = - 10 < v < 10 and v.is_integer()
return output
# Checks the laziness of a user
def check(v1, v2, v3):
msg = ""
if is_one_digit(v1) and is_one_digit(v2):
msg = msg + msg_6
if (v1 == 1 or v2 == 1) and v3 == "*":
msg = msg + msg_7
if (v1 == 0 or v2 == 0) and (v3 in "+*-"):
msg = msg + msg_8
if msg != "":
msg = msg_9 + msg
print(msg)
return
msg_0 = "Enter an equation"
msg_1 = "Do you even know what numbers are? Stay focused!"
msg_2 = "Yes ... an interesting math operation. You've slept through all classes, haven't you?"
msg_3 = "Yeah... division by zero. Smart move..."
msg_4 = "Do you want to store the result? (y / n):"
msg_5 = "Do you want to continue calculations? (y / n):"
msg_6 = " ... lazy"
msg_7 = " ... very lazy"
msg_8 = " ... very, very lazy"
msg_9 = "You are"
msg_10 = "Are you sure? It is only one digit! (y / n)"
msg_11 = "Don't be silly! It's just one number! Add to the memory? (y / n)"
msg_12 = "Last chance! Do you really want to embarrass yourself? (y / n)"
msg_list = [msg_0, msg_1, msg_2, msg_3, msg_4, msg_5, msg_6, msg_7, msg_8, msg_9, msg_10, msg_11, msg_12]
memory = 0
while True:
try:
# Checking input on correctness and calculating result
print(msg_0)
calc = input().split()
if len(calc) != 3:
raise InvalidFormatException
x, op, y = calc
x = memory if x == "M" else float(x)
y = memory if y == "M" else float(y)
if len(op) != 1 or op not in "+-/*":
raise InvalidOperatorException
check(x, y, op)
if op == "+":
result = float(x + y)
elif op == "-":
result = float(x - y)
elif op == "*":
result = float(x * y)
elif op == "/":
result = float(x / y)
print(result)
# Saving result in memory
while True:
print(msg_4)
answer_memory = input()
if is_one_digit(result):
msg_index = 10
while answer_memory != "n" and msg_index <= 12:
answer_memory = input(msg_list[msg_index] + "\n")
msg_index = msg_index + 1
if answer_memory == "y":
memory = result
elif answer_memory != "n" and answer_memory != "y":
continue
break
# Asking about continuing the calculations
while True:
print(msg_5)
answer_continue = input()
if answer_continue != "n" and answer_continue != "y":
continue
break
if answer_continue == "y":
continue
except ValueError:
print(msg_1)
continue
except InvalidOperatorException:
print(msg_2)
continue
except ZeroDivisionError:
print(msg_3)
continue
except InvalidFormatException:
print("Your format should be 'x operator y'")
continue
break
| Cilosan/calculator | main.py | main.py | py | 3,323 | python | en | code | 0 | github-code | 90 |
1964853761 | import retro #pip install gym-retro
import numpy as np #pip install numpy
import cv2 #pip install opencv-python==4.1.2.30
import neat #pip install neat-python
import pickle #pip install pickle
#IMPORTANT! You need the ROMS of Balloon Fight and Arkanoid to run this.
#Note the original config-feedforward file was taken from https://gitlab.com/lucasrthompson/Sonic-Bot-In-OpenAI-and-NEAT and we tweaked it for our needs.
#We used the Sonic tutorial as a base to familiarize ourselves with.
#We then implemented the NEAT algorithm based on the OpenAI-Gym-Retro features for Balloon Fight (score and pixel of water).
#To run: ./TrainingAlg.py
#Swap BalloonFight with Arkanoid and vice versa
env = retro.make('BalloonFight-Nes', 'Level1')
screen_input = []
def evaluate_genomes(genomes, config):
for genome_id, genome in genomes:
#image of screen @ time of action
ob = env.reset()
#action of agent
ac = env.action_space.sample()
#inputs: x, y (size of screen), colors
inx, iny, inc = env.observation_space.shape
#Scale the image down to make the learning faster
inx = int(inx/8)
iny = int(iny/8)
ann = neat.nn.recurrent.RecurrentNetwork.create(genome, config)
curr_max_fitness = 0
fitness = 0
frame = 0
incr = 0
score = 0
score_max = 0
done = False
#We have to adjust for level changes
prevScreenInput = None
noScreenChange = False
#Uncomment to show what ANN sees
#cv2.namedWindow("main", cv2.WINDOW_NORMAL)
while not done:
#comment out to hide what ANN sees
env.render()
"""
The bottom of the main stages only contains water; use the
water pixel to tell if we're changing levels OR on a bonus stage or not.
"""
newLevel = (np.array_equal(ob[len(ob)-1][len(ob)-1],[0,0,168]))
frame += 1
#Scaledimg is used for showing ANN visually
scaledimg = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
scaledimg = cv2.resize(scaledimg, (iny, inx))
#Actually resizes the screenshot input
ob = cv2.resize(ob, (inx, iny))
#Turn screenshot input -> greyscale (simplifies input)
ob = cv2.cvtColor(ob, cv2.COLOR_BGR2GRAY)
#Resize the input for the ANN
ob = np.reshape(ob, (inx, iny))
#Uncomment to show what ANN sees
#cv2.imshow('main', scaledimg)
#cv2.waitKey(1)
#Flatten screen for input
screen_input = np.ndarray.flatten(ob)
noScreenChange = (np.array_equal(prevScreenInput,screen_input))
prevScreenInput = screen_input
nnOutput = ann.activate(screen_input)
ob, rew, done, info = env.step(nnOutput)
#screen_input.clear()
score = info['score']
#For a basic neat alg comment this if statement
if score > score_max:
fitness += 1
score_max = score
#Uncomment this for the basic neat alg
#fitness += rew
#Reset done counter if fitness is increased from previous best
if fitness > curr_max_fitness:
curr_max_fitness = fitness
incr = 0
#if it's a bonus stage, reset the done counter
elif not newLevel:
incr = 0
#if there was not a screen change, increment the done counter
elif not noScreenChange:
incr += 1
if done or incr == int(500 + frame / 50):
done = True
print("Genome ID: " + str(genome_id) + " Fitness: " + str(fitness) + " Frames: "+str(frame))
genome.fitness = fitness
config = neat.Config(neat.DefaultGenome, neat.DefaultReproduction,
neat.DefaultSpeciesSet, neat.DefaultStagnation,
'config-feedforward')
#Creates the population based on the config file
p = neat.Population(config)
#insert desired filename to load a checkpoint
#p = neat.Checkpointer.restore_checkpoint("neat-checkpoint-78")
#Creates statistics for each generation
p.add_reporter(neat.StdOutReporter(True))
stats = neat.StatisticsReporter()
p.add_reporter(stats)
p.add_reporter(neat.Checkpointer(10,10000))
winner = p.run(evaluate_genomes)
#Saves current ANN
with open('winner.pkl', 'wb') as output:
pickle.dump(winner, output, 1)
| ZackPoorman/CIS365-Project-3 | TrainingAlg.py | TrainingAlg.py | py | 4,672 | python | en | code | 2 | github-code | 90 |
11609423816 | import os
import sys
sys.path.append("./")
# pylint:disable=no-name-in-module
from PySide2.QtWidgets import QApplication
from GWA2019.anmorph import AnmorphWindow
if __name__ == "__main__":
if len(sys.argv) >= 2:
app = QApplication(sys.argv)
data_path = sys.argv[1]
if not os.path.exists(data_path):
print("Cannot find ", data_path)
exit()
anmorph = AnmorphWindow(sys.argv[1])
anmorph.show()
sys.exit(app.exec_())
else:
print(sys.argv)
exit()
| seantyh/GWA2019 | anmorph.py | anmorph.py | py | 552 | python | en | code | 0 | github-code | 90 |
12303475940 | import sqlite3
from api_ege.database.Problem import Problem
class Database:
def __init__(self):
self.db = sqlite3.connect('problems.db')
self.cursor = self.db.cursor()
self.cursor.execute('''
CREATE TABLE IF NOT EXISTS problems (
id INTEGER PRIMARY KEY,
text TEXT NOT NULL,
answers TEXT NOT NULL,
type INTEGER
)
''')
self.db.commit()
def add_problem(self, problem: Problem):
self.cursor.execute(f'''
INSERT or IGNORE INTO problems VALUES ({problem.number} ,"{problem.question}", "{"&".join(problem.answer)}", {problem.type});
''')
self.db.commit()
def get_problem_answer(self, text):
self.cursor.execute(f'SELECT answers FROM problems WHERE text="{text}"')
fetched = self.cursor.fetchone()
return fetched[0].split("&") if fetched is not None else None
def close(self):
self.db.close()
| BonePolk/AutoInfEgeSolver | api_ege/database/Database.py | Database.py | py | 1,041 | python | en | code | 1 | github-code | 90 |
7990456378 | import re
import reprlib
RE_WORD = re.compile('\w+')
class Sentence:
def __init__(self, text):
self.text = text
self.words = RE_WORD.findall(text)
def __getitem__(self, index):
return self.words[index]
def __len__(self):
return len(self.words)
# def __iter__(self):
# return self.words
def __repr__(self):
return 'Sentence(%s)' % reprlib.repr(self.text)
s1 = Sentence('"The time ha... Walrus said,"')
for word in s1:
print(word)
print()
from collections import abc
print(isinstance(s1, abc.Iterable))
print(isinstance(s1, abc.Iterator))
print()
s2 = "ABC"
for v in s2:
print(v)
print()
it1 = iter(s1)
it2 = iter(s2)
while True:
try:
print(next(it1))
except StopIteration:
break
print()
| SELO77/selo_python | 3.X/FluentPython/14-iterator,generator/14-1.py | 14-1.py | py | 808 | python | en | code | 0 | github-code | 90 |
27093389228 | from spack import *
class Portcullis(AutotoolsPackage):
"""PORTable CULLing of Invalid Splice junctions"""
homepage = "https://github.com/maplesond/portcullis"
url = "https://github.com/maplesond/portcullis/archive/Release-1.1.2.tar.gz"
version('1.1.2', '5c581a7f827ffeecfe68107b7fe27ed60108325fd2f86a79d93f61b328687749')
depends_on('autoconf@2.53:', type='build')
depends_on('automake@1.11:', type='build')
depends_on('libtool@2.4.2:', type='build')
depends_on('m4', type='build')
depends_on('zlib', type='build')
depends_on('samtools', type='build')
depends_on('python@3.4:', type=('build', 'run'))
depends_on('py-setuptools', type=('build', 'run'))
depends_on('py-pandas', type=('build', 'run'))
# later versions of py-sphinx don't get detected by the configure script
depends_on('py-sphinx@1.3:1.4')
def patch(self):
# remove static linking to libstdc++
filter_file(
'AM_LDFLAGS="-static-libstdc++"',
'AM_LDFLAGS=""',
'configure.ac', string=True
)
# prevent install scripts from ruining our PYTHONPATH
filter_file(
'export PYTHONPATH=$(DESTDIR)$(pythondir)',
'export PYTHONPATH="$(PYTHONPATH):$(DESTDIR)$(pythondir)"',
'scripts/Makefile.am', string=True
)
def build(self, spec, prefix):
# build manpages
make('man')
# run boost build script
sh = which('sh')
sh('build_boost.sh')
| matzke1/spack | var/spack/repos/builtin/packages/portcullis/package.py | package.py | py | 1,529 | python | en | code | 2 | github-code | 90 |
38984752044 |
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import tensorflow as tf
import numpy as np
from tensorflow.examples.tutorials.mnist import input_data
plt.ioff()
# Parameters for user to set
testing_batch_size = 64
epochs = 10
plot_rows = 4 # num of test image rows to run and plot
plot_cols = 6 # ditto (cols must be even)
add_skip_connect = False # if true will learn ID function in like 2 epochs
# Getting the MNIST data set and logging its parameters
data = input_data.read_data_sets('data/MNIST', one_hot=True)
img_size = 28
img_size_flat = img_size*img_size
img_shape = (img_size, img_size)
num_channels_x = 1
training_size = len(data.train.labels)
session = tf.Session()
saver = tf.train.import_meta_graph('AE_model/AE_model.meta')
saver.restore(session, tf.train.latest_checkpoint('AE_model'))
graph = tf.get_default_graph()
x = graph.get_tensor_by_name('x:0')
output_layer = graph.get_tensor_by_name('output_layer:0')
conv_layer_1 = graph.get_tensor_by_name('conv_layer_1:0')
conv_layer_2 = graph.get_tensor_by_name('conv_layer_2:0')
conv_layer_3 = graph.get_tensor_by_name('conv_layer_3:0')
# Run the learned network on testing data
flat_imgs,_ = data.test.next_batch(testing_batch_size)
imgs = flat_imgs.reshape((-1,img_size,img_size,1))
feed_dict = {x: imgs}
[imgs_recon, layer_3_imgs] = session.run([output_layer, conv_layer_3], feed_dict)
# Show original images and reconstructed images
fig = plt.figure(figsize=(10,10))
for i in range(1,plot_rows*plot_cols+1):
if(i%2==1):
fig.add_subplot(plot_rows,plot_cols,i)
plt.imshow(imgs[i-1,:,:,0],cmap='gray')
else:
fig.add_subplot(plot_rows,plot_cols,i)
plt.imshow(imgs_recon[i-2,:,:,0], cmap='gray')
plt.savefig('sample_encodings.png')
plt.close(fig)
| Adrian-Markelov/Deblurring | experiments/AE_model/AE_test.py | AE_test.py | py | 1,804 | python | en | code | 0 | github-code | 90 |
6384857318 | from unittest import TestCase
from unittest.mock import patch, MagicMock
from domain.book.entities.book_entity import Book, BookStatus
from domain.book_borrowing.entities.book_borrowing_entity import BookBorrowing, BookBorrowingBookStatus
from domain.book_borrowing.use_cases.return_book.return_book_use_case import ReturnBookUseCase
from errors.business_error import BusinessError
from helpers.date.date_helper import DateHelper
module_path = ReturnBookUseCase.__module__
class TestReturnBookUseCase(TestCase):
def test_return_book_in_the_deadline_should_not_charge_fine(self):
book = Book()
book_borrowing = BookBorrowing(
created_at=DateHelper.now()
)
estimate_borrowing_fine = MagicMock(
total_fine=0
)
with patch.object(Book, 'first', return_value=book), \
patch(f'{module_path}.request'), \
patch.object(BookBorrowing, 'first', return_value=book_borrowing), \
patch(f'{module_path}.EstimateBorrowingFineUseCase', return_value=estimate_borrowing_fine), \
patch.object(BookBorrowing, 'save', spec=True) as save_book_borrowing, \
patch.object(Book, 'save', spec=True) as save_book:
ReturnBookUseCase(
book_id='5f25eac0159cb9dfa8a2e8d2'
).exec()
self.assertEqual(BookBorrowingBookStatus.CLOSED.value, book_borrowing.status)
save_book_borrowing.assert_called_once()
self.assertEqual(BookStatus.AVAILABLE.value, book.status)
save_book.assert_called_once()
def test_return_book_with_fine_due_delay_should_raise_error(self):
book = Book()
book_borrowing = BookBorrowing(
created_at=DateHelper.now()
)
estimate_borrowing_fine = MagicMock(
total_fine=10
)
with patch.object(Book, 'first', return_value=book), \
patch(f'{module_path}.request'), \
patch.object(BookBorrowing, 'first', return_value=book_borrowing), \
patch(f'{module_path}.EstimateBorrowingFineUseCase', return_value=estimate_borrowing_fine):
try:
ReturnBookUseCase(
book_id='5f25eac0159cb9dfa8a2e8d2'
).exec()
self.fail()
except BusinessError as e:
self.assertEqual(BusinessError.NOT_ALLOWED_TO_RETURN_BOOK_WITH_OPEN_FINE, e.code)
def test_return_book_that_doesnt_exist_should_raise_error(self):
with patch.object(Book, 'first', return_value=None):
try:
ReturnBookUseCase(
book_id='5f25eac0159cb9dfa8a2e8d2'
).exec()
self.fail()
except BusinessError as e:
self.assertEqual(BusinessError.BOOK_NOT_FOUND, e.code)
def test_return_book_that_its_not_borrowed_to_you_should_raise_error(self):
book = Book()
with patch.object(Book, 'first', return_value=book), \
patch(f'{module_path}.request'), \
patch.object(BookBorrowing, 'first', return_value=None):
try:
ReturnBookUseCase(
book_id='5f25eac0159cb9dfa8a2e8d2'
).exec()
self.fail()
except BusinessError as e:
self.assertEqual(BusinessError.BOOK_BORROWING_NOT_FOUND, e.code)
| EduardoThums/online-bookstore-challenge | domain/book_borrowing/use_cases/return_book/return_book_use_case_test.py | return_book_use_case_test.py | py | 3,457 | python | en | code | 0 | github-code | 90 |
22205107861 | ### Requisitos
#Para resolver este problema, você deve usar no máximo três comparações básicasNão
#serão consideradas soluções que utilizem funções prontas para identificação dos valores maiores/menores
#--------------------------------------------------------------------------------------------------------
### Maior
#Dados três valores inteiros, sua tarefa é identificar quem é o maior
#--------------------------------------------------------------------------------------------------------
### Entrada
#Três linhas, cada uma com um valor inteiro
#--------------------------------------------------------------------------------------------------------
### Saída
#Como nos exemplos
#--------------------------------------------------------------------------------------------------------
variavel1 = int(input("Digite o primeiro número:"))
variavel2 = int(input("Digite o segundo número:"))
variavel3 = int(input("Digite o terceiro número:"))
if variavel1>(variavel2 and variavel3):
print("O primeiro número é o maior")
else:
if variavel3>(variavel1 and variavel2):
print("O terceiro número é o maior")
else:
print("O segundo número é o maior") | amandachipolito/Estudos | Atividade8.py | Atividade8.py | py | 1,212 | python | pt | code | 0 | github-code | 90 |
383114929 | import requests
import os
from bs4 import BeautifulSoup
def get_html(url):
r = requests.get(url, timeout = 30)
r.raise_for_status()
r.encoding = 'gbk'
return r.text
def get_content(url):
html = get_html(url)
soup = BeautifulSoup(html, 'lxml')
#找到电影排行榜的ul列表
movie_list = soup.find('ul', class_ = 'picList clearfix')
movies = movie_list.find_all('li')
for top in movies:
img_url = 'http:' + top.find('img')['src']
print(img_url)
name = top.find('span', class_ = 'sTit').a.text
try:
time = top.find('span', class_ = 'sIntro').text
except:
time = '暂无上映时间'
actors = top.find('p', class_ = 'pActor')
actor = ''
for act in actors.contents:
actor = actor + act.string + ' '
#找到影片简介
intro = top.find('p', class_ = 'pTxt pIntroShow').text
print("片名:{}\t{}\n{}\n{} \n \n ".format(name, time, actor, intro))
with open("D:/img/" + name + ".png", 'wb+' ) as f:
f.write(requests.get(img_url).content)
def main():
url = 'http://dianying.2345.com/top/'
get_content(url)
if __name__ == '__main__':
main() | WhiteBrownBottle/Python- | dianying.py | dianying.py | py | 1,244 | python | en | code | 0 | github-code | 90 |
33074765629 | #!/usr/bin/env python3
import chess
import chess.pgn # read Portable Game Notation format
import chess.svg # Scalable Vector Graphics
from cairosvg import svg2png
import os
import io
from PIL import Image
from tqdm.auto import tqdm # Progress bar
from pydub import AudioSegment
images = dict([(os.path.splitext(f)[0], Image.open(f"images/{f}", 'r')) for f in os.listdir("images")]) # Open each image from the images folder
audio_clips = dict([(os.path.splitext(f)[0], AudioSegment.from_file(f"sound clips/{f}")[:1000]) for f in os.listdir("sound clips")])
with open("pgn/kasparov-deep-blue-1997.pgn") as f:
first_game = chess.pgn.read_game(f)
def render_image(filename, chessboard_svg, image=None, is_check=False):
buffer = io.BytesIO()
svg2png(bytestring=chessboard_svg, write_to=buffer) # Chessboard to PNG in memory
chessboard = Image.open(buffer)
new_im = Image.new('RGB', (1920, 1080))
new_im.paste(chessboard, (0,0,400,400))
if image:
new_im.paste(image, (400,0))
if is_check:
new_im.paste(images["king_under_threat"], (0, 400))
new_im.save(filename)
board = first_game.board()
svg = chess.svg.board(board=board)
render_image("render/0.jpg", svg)
audio = AudioSegment.silent(duration=1000)
moves = list(first_game.mainline_moves())
for i, move in enumerate(tqdm(moves)):
board.push(move)
if board.is_game_over():
print(board.result())
piece_type = chess.piece_name(board.piece_type_at(move.to_square))
svg = chess.svg.board(board=board, lastmove=move, size=400)
render_image(f"render/{i + 1}.jpg", svg, images[piece_type], board.is_check())
clip = audio_clips[piece_type]
if board.is_check():
clip = clip.overlay(audio_clips["alert"])
audio += clip
audio.export("audio.mp3", format="mp3") | UoA-eResearch/chess | generate.py | generate.py | py | 1,802 | python | en | code | 0 | github-code | 90 |
6593743891 | class Solution:
def setZeroes(self, matrix: List[List[int]]) -> None:
"""
Do not return anything, modify matrix in-place instead.
"""
ncol = len(matrix[0])
zero_col = []
for index, row in enumerate(matrix):
if 0 in row:
zero_col = zero_col + [i for i, x in enumerate(row) if x == 0]
row[:] = [0] * ncol
for row in matrix:
for col in set(zero_col):
row[col] = 0
| ConorMcNamara/Project-Euleetcode | Leetcode/Python/73_setMatrixZeros.py | 73_setMatrixZeros.py | py | 494 | python | en | code | 0 | github-code | 90 |
40920895688 |
from testconfig import config
from collections import defaultdict
from tests.integration.core.chroma_integration_testcase import ChromaIntegrationTestCase
class TestStartingAndStoppingTargets(ChromaIntegrationTestCase):
def test_filesystem_stops_and_starts(self):
filesystem_id = self.create_filesystem_standard(config['lustre_servers'][0:4])
filesystem = self.get_filesystem(filesystem_id)
target = defaultdict(list)
host = defaultdict(list)
for kind in ['osts', 'mdts']:
for target_index in range(0, len(filesystem[kind])):
if kind == 'mdts':
target[kind].append(filesystem[kind][target_index])
elif kind == 'osts':
response = self.chroma_manager.get(filesystem[kind][target_index])
self.assertEqual(response.status_code, 200)
target[kind].append(response.json)
host_response = self.chroma_manager.get(target[kind][target_index]['active_host'])
self.assertEqual(host_response.status_code, 200)
host[kind].append(host_response.json)
self.assertState(filesystem['resource_uri'], 'available')
for kind in ['osts', 'mdts']:
for target_index in range(0, len(filesystem[kind])):
self.assertTrue(self.remote_operations.get_resource_running(host[kind][target_index],
target[kind][target_index]['ha_label']))
for fs_state in ['stopped', 'available']:
self.set_state(filesystem['resource_uri'], fs_state)
for kind in ['osts', 'mdts']:
for target_index in range(0, len(filesystem[kind])):
self.assertEqual(fs_state == 'available',
self.remote_operations.get_resource_running(host[kind][target_index],
target[kind][target_index][
'ha_label']))
| GarimaVishvakarma/intel-chroma | chroma-manager/tests/integration/shared_storage_configuration/test_stop_and_start_filesystem.py | test_stop_and_start_filesystem.py | py | 2,150 | python | en | code | 0 | github-code | 90 |
21171911137 | import matplotlib.pyplot as plt
import pandas as pd
import argparse
import numpy as np
from pathlib import Path
from datetime import datetime
from loadingData import univariateDatasets
import os
def parse_args():
parser = argparse.ArgumentParser(description='Create plot 1')
parser.add_argument('--filepath', '-f', required=True, type=str)
parser.add_argument('--plotdir', '-d', required=False, type=str, default=f'plots/{datetime.now().strftime("%d-%m-%Y-%H-%M-%S")}/')
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args()
plots_dir = Path(args.plotdir)
os.mkdir(plots_dir)
csv_path = args.filepath
df = pd.read_csv(csv_path, dtype="str")
print(df.head())
method = 'hmm nn'
df = df[df['method'] == method]
datasets = list(set(df['dataset']))
datasets.sort()
for dataset in datasets:
dataset_df = df[df['dataset'] == dataset]
if dataset_df.shape[0] != 270:
print(f"Skipping {dataset} (only {dataset_df.shape[0]} rows)")
continue
cov_types = ['spherical', 'diag', 'full']
fig, axs = plt.subplots(1, len(cov_types), figsize=(16, 5), dpi=100)
for c in range(len(cov_types)):
covariance_type = cov_types[c]
cov_df = dataset_df[dataset_df['covariance_type'] == covariance_type]
steps = sorted(list(set(cov_df['no_states'])), reverse=True)
maxiters = sorted(list(set(cov_df['maxiter'])))
no_inits = sorted(list(set(cov_df['no_random_initializations'])))
bw_parameters = [(mi, noi) for noi in no_inits for mi in maxiters]
all_accuracies = []
for step in steps:
step_df = cov_df[cov_df['no_states'] == step]
step_accuracies = []
for mi, noi in bw_parameters:
bw_params_df = step_df[step_df['maxiter'] == mi]
bw_params_df = bw_params_df[bw_params_df['no_random_initializations'] == noi]
accuracies = bw_params_df['accuracy']
if (accuracies == '?').any():
step_accuracies.append(0)
else:
accuracies = [float(a) for a in list(accuracies)]
step_accuracies.append(sum(accuracies)/len(accuracies))
all_accuracies.append(step_accuracies)
all_accuracies = np.asarray(all_accuracies)
im = axs[c].imshow(np.asarray(all_accuracies), cmap='hot', interpolation='nearest', vmin=0.0, vmax=1.0)
axs[c].set_xticks(np.arange(len(bw_parameters)))
axs[c].set_yticks(np.arange(len(steps)))
axs[c].set_xticklabels(bw_parameters, rotation = 45)
axs[c].set_yticklabels(steps)
axs[c].set_xlabel('Baum-Welch parameters')
axs[c].set_ylabel('Hidden states')
for i in range(len(steps)):
for j in range(len(bw_parameters)):
text = axs[c].text(j, i, f"{all_accuracies[i, j]:.2f}",
ha="center", va="center", color="black")
axs[c].set_title(f"{covariance_type}")
cbar = fig.colorbar(im, ax=axs.tolist(), orientation="horizontal")
cbar.ax.set_xlabel('Accuracy')
dataset_info = univariateDatasets.DATASET_NAME_TO_INFO[dataset]
no_classes = dataset_info[3]
train_size = dataset_info[0]
series_length = dataset_info[2]
plt.suptitle(f'{dataset} ({no_classes} classes, {train_size}x{series_length} train)')
# plt.show()
plt.savefig(plots_dir / f'{dataset}.png')
plt.close()
| JakubBilski/mini-fcm | src/render_heatmap_acc.py | render_heatmap_acc.py | py | 3,720 | python | en | code | 0 | github-code | 90 |
35133245169 | from django.core.cache import cache
prefix = 'player_'
def set_player_hall(key, value):
u_key = '%s%s' % (prefix, key)
if cache.has_key(u_key):
cache.delete(u_key)
cache.set(u_key, value)
def get_player_hall(key):
u_key = '%s%s' % (prefix, key)
if cache.has_key(u_key):
return cache.get(u_key)
return None
| ydtg1993/shaibao-server-python | system/cache/player.py | player.py | py | 370 | python | en | code | 0 | github-code | 90 |
30786654314 | """
Radio button page test
"""
import time, unittest
from selenium import webdriver
from selenium.webdriver.common.by import By
from webdriver_manager.chrome import ChromeDriverManager
from pages.radio_button_page import RadioButtonPage
class TestRadioButton(unittest.TestCase):
RADIO_BTN = (By.XPATH, '/html/body/div/div/li[12]/a')
RADIO_BTN_1 = (By.ID, 'radio-button-1')
RADIO_BTN_2 = (By.XPATH, '/html/body/div/div[2]/input')
RADIO_BTN_3 = (By.XPATH, '/html/body/div/div[3]/input')
def setUp(self) -> None:
self.driver = webdriver.Chrome(ChromeDriverManager().install())
self.driver.get('https://formy-project.herokuapp.com/')
time.sleep(2)
self.driver.maximize_window()
self.driver.find_element(*self.RADIO_BTN).click()
time.sleep(1.5)
def test_btn_1_displayed(self):
btn_1 = self.driver.find_element(*self.RADIO_BTN_1)
self.assertTrue(btn_1.is_selected())
time.sleep(1)
self.driver.quit()
def test_btn_3(self):
radio_click = RadioButtonPage(self.driver)
radio_click.click_on_btn_3()
btn_1 = self.driver.find_element(*self.RADIO_BTN_1)
btn_2 = self.driver.find_element(*self.RADIO_BTN_2)
btn_3 = self.driver.find_element(*self.RADIO_BTN_3)
self.assertFalse(btn_1.is_selected())
time.sleep(1)
self.assertFalse(btn_2.is_selected())
time.sleep(1)
self.assertTrue(btn_3.is_selected())
time.sleep(1)
self.driver.close()
def test_btn_2(self):
radio_click = RadioButtonPage(self.driver)
radio_click.click_on_btn_2()
btn_1 = self.driver.find_element(*self.RADIO_BTN_1)
btn_2 = self.driver.find_element(*self.RADIO_BTN_2)
btn_3 = self.driver.find_element(*self.RADIO_BTN_3)
self.assertFalse(btn_1.is_selected())
time.sleep(1)
self.assertTrue(btn_2.is_selected())
time.sleep(1)
self.assertFalse(btn_3.is_selected())
time.sleep(1)
self.driver.close()
def test_btn_1(self):
radio_click = RadioButtonPage(self.driver)
radio_click.click_on_btn_1()
btn_1 = self.driver.find_element(*self.RADIO_BTN_1)
btn_2 = self.driver.find_element(*self.RADIO_BTN_2)
btn_3 = self.driver.find_element(*self.RADIO_BTN_3)
self.assertTrue(btn_1.is_selected())
time.sleep(1)
self.assertFalse(btn_2.is_selected())
time.sleep(1)
self.assertFalse(btn_3.is_selected())
time.sleep(1)
self.driver.close()
def tearDown(self) -> None:
time.sleep(2)
self.driver.quit()
if __name__ == '__main__':
unittest.main()
| AntonioIonica/Automation_testing | OOP_formy/src/tests/test_radio_button_page.py | test_radio_button_page.py | py | 2,719 | python | en | code | 0 | github-code | 90 |
3753639128 | #Write pay computation to give the employee 1.5 times the hourly rate for hours worked above 40 hours (Input: hours and rate).
hrs=int(input('Enter the number of hours the employee worked '))
rate=int(input("Enter the hourly rate "))
if hrs<=40:
pay=round(rate*hrs)
else:
pay=round(rate*40 + 1.5*rate*(hrs-40))
print("The total payment to be received by the employee is ",pay) | Sounav201/itworkshop | Assignment2._Prog2.py | Assignment2._Prog2.py | py | 399 | python | en | code | 0 | github-code | 90 |
13117964979 | """This module contains helper functions for the api blueprint."""
import os
from flask import current_app
from datetime import datetime
def string_to_date(date_string, format):
"""Given a string date and format, return a
corresponding date object.
"""
try:
return datetime.strptime(date_string, format).date()
except ValueError:
return None
def allowed_file_extension(filename):
"""Return True if the extension of the given file is in the set of
allowed file extensions.
"""
if "." not in filename:
return False
file_extension = filename.lower().split(".")[-1]
return file_extension in current_app.config["ALLOWED_EXTENSIONS"]
def create_directory(directory):
"""Create the given directory if it doesn't already exist."""
if not os.path.exists(directory):
os.makedirs(directory)
def create_filepath(filename, version=1):
"""Given a filename and version, return the filepath where
the file will be stored as a string.
"""
extensions = current_app.config["ALLOWED_EXTENSIONS"]
for extension in extensions:
index = filename.find(extension)
#creaate new filename in the format of my_old_file_2.jpg
if index != - 1:
new_filename = filename[:index - 1] + "_" + str(version) + "." + extension
break
filepath = current_app.config["UPLOAD_DIRECTORY"] + "/" + new_filename
return filepath
| EricMontague/MailChimp-Newsletter-Project | server/app/api/helpers.py | helpers.py | py | 1,456 | python | en | code | 0 | github-code | 90 |
11601127778 | from setuptools import find_namespace_packages, setup
with open("README.md", encoding="utf8") as readme_file:
long_description = readme_file.read()
setup(
name="merlin",
version="0.0.1",
packages=[],
url="https://github.com/NVIDIA-Merlin/Merlin",
author="NVIDIA Corporation",
license="Apache 2.0",
long_description=long_description,
long_description_content_type="text/markdown",
classifiers=[
"Development Status :: 4 - Beta",
"Programming Language :: Python :: 3",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Topic :: Software Development :: Libraries",
"Topic :: Scientific/Engineering",
],
zip_safe=False,
install_requires=[],
extras_require={},
)
| NVIDIA-Merlin/Merlin | setup.py | setup.py | py | 804 | python | en | code | 627 | github-code | 90 |
3207647136 | # coding=utf-8
import json
from flask import Flask, jsonify, request
from flask_cors import CORS
from .entities.entity import Session, engine, Base
from .entities.exam import Exam, ExamSchema
###################################################
from .entities.phone_book import PhoneBook, PhoneBookSchema
###################################################
app = Flask(__name__)
CORS(app)
# generate database schema
Base.metadata.create_all(engine)
@app.route('/phones')
def get_phone():
session = Session()
phone_objects = session.query(PhoneBook).all()
schema = PhoneBookSchema(many=True)
phones = schema.dump(phone_objects)
session.close()
return jsonify(phones), 200
@app.route('/exams')
def get_exam():
session = Session()
exam_objects = session.query(Exam).all()
schema = ExamSchema(many=True)
exams = schema.dump(exam_objects)
session.close()
return jsonify(exams), 200
# DB에 로그인 하는 부분
session = Session()
exams = session.query(Exam).all()
if len(exams) == 0:
# create and persist dummy exam
python_exam = Exam("Kim tae seong", "hpc tech team", "E640025")
session.add(python_exam)
session.commit()
session.close()
# reload exams
exams = session.query(Exam).all()
print('### Exams:')
examSchema = ExamSchema(many=True)
examJson = examSchema.dump(exams)
print(json.dumps(examJson))
# DB 로그아웃
session.close() | ted-ghd/react-flask-sample | 5th-191029/phone-book/backend/src/main.py | main.py | py | 1,489 | python | en | code | 0 | github-code | 90 |
48890916530 | import os
import time
import unittest
from selenium import webdriver
from pages.base_page import BasePage
from pages.dashboard import Dashboard
from pages.login_page import LoginPage
from utils.settings import DRIVER_PATH, IMPLICITLY_WAIT
from selenium.webdriver.chrome.service import Service
class TestLoginPage(unittest.TestCase):
driver = None
def __init__(self, methodName: str = ...):
super().__init__(methodName)
@classmethod
def setUp(self):
os.chmod(DRIVER_PATH, 755)
self.driver_service = Service(executable_path=DRIVER_PATH)
self.driver = webdriver.Chrome(service=self.driver_service)
self.driver.get('https://scouts.futbolkolektyw.pl/en/')
self.driver.fullscreen_window()
self.driver.implicitly_wait(IMPLICITLY_WAIT)
self.user_login = LoginPage(self.driver)
self.dashboard_page = Dashboard(self.driver)
self.base_page = BasePage(self.driver)
def test_log_in_to_the_system(self):
user_login = LoginPage(self.driver)
user_login.title_of_page()
user_login.check_title_of_box()
user_login.type_in_email('user07@getnada.com')
user_login.type_in_password('Test-1234')
user_login.click_on_the_sign_in_button()
dashboard_page = Dashboard(self.driver)
dashboard_page.title_of_page()
time.sleep(4)
def test_empty_login(self):
self.user_login.log_in('', 'Test-1234')
time.sleep(4)
def test_invalid_password_to_log_in(self):
self.user_login.log_in('user07@getnada.com', 'Test-4444')
time.sleep(4)
def test_text_page_title(self):
self.user_login.title_of_page()
time.sleep(4)
@classmethod
def tearDown(self):
self.driver.quit()
| NataSQT/Challenge_NataliiaSokolova | test_cases/login_to_the_system.py | login_to_the_system.py | py | 1,783 | python | en | code | 0 | github-code | 90 |
22029052740 | def bin_to_dec(binary_str):
# 将二进制字符串转换为十进制整数
return str(int(binary_str, 2))
def dec_to_bin(decimal_str):
# 将十进制字符串转换为二进制字符串
decimal_int = int(decimal_str)
return bin(decimal_int)[2:]
def hex_to_dec(hex_str):
# 将十六进制字符串转换为十进制整数
return str(int(hex_str, 16))
def dec_to_hex(decimal_str):
# 将十进制字符串转换为十六进制字符串
decimal_int = int(decimal_str)
return hex(decimal_int)[2:].upper()
def bin_to_hex(binary_str):
# 将二进制字符串转换为十六进制字符串
decimal_str = bin_to_dec(binary_str)
return dec_to_hex(decimal_str)
def hex_to_bin(hex_str):
# 将十六进制字符串转换为二进制字符串
decimal_str = hex_to_dec(hex_str)
return dec_to_bin(decimal_str)
# 输入二进制,转换为十进制字符串输出
binary_str = "1101"
decimal_str = bin_to_dec(binary_str)
print(f"{binary_str} in binary is {decimal_str} in decimal.")
# 将十进制字符串转换为二进制字符串
decimal_str = "10"
binary_str = dec_to_bin(decimal_str)
print(f"{decimal_str} in decimal is {binary_str} in binary.")
# 将十六进制字符串转换为十进制字符串
hex_str = "64"
decimal_str = hex_to_dec(hex_str)
print(f"{hex_str} in hexadecimal is {decimal_str} in decimal.")
# 将十进制字符串转换为十六进制字符串
decimal_str = "100"
hex_str = dec_to_hex(decimal_str)
print(f"{decimal_str} in decimal is {hex_str} in hexadecimal.")
# 将二进制字符串转换为十六进制字符串
binary_str = "1100100"
hex_str = bin_to_hex(binary_str)
print(f"{binary_str} in binary is {hex_str} in hexadecimal.")
# 将十六进制字符串转换为二进制字符串
hex_str = "64"
binary_str = hex_to_bin(hex_str)
print(f"{hex_str} in hexadecimal is {binary_str} in binary.")
| ShawnVon98/RaydarTest | RaydarTest.py | RaydarTest.py | py | 1,942 | python | en | code | 0 | github-code | 90 |
26717472414 | """
Toy Python Knowledgebase
"""
class Knowledgebase():
def __init__(self):
self._entities = {}
self._constraints = {}
self._inverses = {}
self._graph = KnowledgeGraph()
def ent(self, name, type=None):
""" Adds a new Entity to the graph. """
self._entities[name] = Entity(self, name, type)
def get(self, name):
""" Returns a graph entity. """
if name not in self._entities:
raise EntityNotFound(
"No entity found with name '{}'".format(name)
)
return self._entities[name]
def rel(self, subject, rel, target, certainty=1, scope=None):
""" Adds a relationship between two objects. """
scope = scope or self._graph
if rel in self._inverses:
# Invert the relationship if necessary
sub = self.get(target)
tar = self.get(subject)
rel = self._inverses[rel]
else:
sub = self.get(subject)
tar = self.get(target)
constraints = self._get_constraints(rel)
for c in constraints:
if c[0] is not None and sub.type != c[0]:
raise ConstraintException(
"Subject of {} must be of type {}; got entity of type"
" {} instead"
.format(rel, c[0], sub.type)
)
if c[1] is not None and tar.type != c[1]:
raise ConstraintException(
"Target of {} must be of type {}; got entity of type "
" {} instead"
.format(rel, c[1], tar.type)
)
scope.add(sub.name, rel, tar.name, certainty)
def rels(self, subject=None, rel=None, target=None, certainty=None,
scope=None):
""" Returns relationships which meet passed criteria. """
scope = scope or self._graph
data = scope.get(subject, rel, target, certainty)
inverse = self._get_inverse_rels(
subject, rel, target, certainty, scope
)
return join_sets(data, inverse)
def constrain(self, rel, subject=None, target=None):
""" Constrains a relationship to types. """
self._get_constraints(rel).append((subject, target))
def inverse(self, rel, inverse_of):
""" Creates an inverse relationship. """
self._inverses[rel] = inverse_of
self._inverses[inverse_of] = rel
def _get_constraints(self, rel):
if rel not in self._constraints:
self._constraints[rel] = []
return self._constraints[rel]
def _get_inverse_rels(self, subject=None, rel=None, target=None,
certainty=None, scope=None):
if rel not in self._inverses:
return None
inverse_rel = self._inverses[rel]
inverse = scope.get(target, inverse_rel, subject, certainty)
if inverse is not None:
data = []
for i in inverse:
data.append((i[1], i[0], i[2])) # Invert relationship.
return data
class Entity():
def __init__(self, parent, name, type=None):
self._type = type
self._name = name
self._kb = parent
self._graph = KnowledgeGraph()
self._shares = []
def rel(self, subject=None, rel=None, target=None, certainty=1):
subject = subject or self.name
self._kb.rel(subject, rel, target, certainty, self._graph)
def rels(self, subject=None, rel=None, target=None, certainty=None):
mine = self._kb.rels(subject, rel, target, certainty, self._graph)
glob = self._kb.rels(subject, rel, target, certainty)
return join_sets(mine, glob)
def local(self, subject=None, rel=None, target=None, certainty=None):
return self._kb.rels(subject, rel, target, certainty, self._graph)
def has(self, rel=None, target=None, certainty=None):
result = self.rels(self.name, rel, target, certainty)
if len(result) > 0:
return True
return False
def knows(self, subject=None, rel=None, target=None, certainty=1):
""" This entity is certain this relationship is true. """
subject = subject or self.name
if self.rejects(subject, rel, target):
return False
if self.certain(subject, rel, target):
return True
result = self.rels(subject, rel, target, certainty)
if len(result) > 0:
return True
return False
def suspects(self, subject=None, rel=None, target=None):
""" This entity suspects but doesn't know relationship is true. """
subject = subject or self.name
return self.knows(subject, rel, target, -1)
def thinks(self, subject=None, rel=None, target=None):
""" The entity either suspects or knows the relationship is true. """
subject = subject or self.name
return self.knows(subject, rel, target) or \
self.suspects(subject, rel, target)
def rejects(self, subject=None, rel=None, target=None):
"""
Entity won't even entertain a notion within global scope if it knows
it's false.
"""
subject = subject or self.name
rejection = self.local(subject, rel, target, 0)
if len(rejection) > 0:
return True
def certain(self, subject=None, rel=None, target=None):
"""
Entity won't even entertain a notion within global scope if it knows
it's true.
"""
subject = subject or self.name
sure = self.local(subject, rel, target, 0)
if len(sure) > 0:
return True
@property
def type(self):
return self._type
@property
def name(self):
return self._name
class KnowledgeGraph():
"""
The graph stores relationships by key, with a tuple of the subject, the
target, and the certainty of each relationship.
Hamlet is located in Elsinore:
{'located':('hamlet', 'elsinore', 1)}
Elsinore is the location of Hamlet:
{'location':('elsinore', 'hamlet', 1)}
"""
def __init__(self):
self._rels = {}
def add(self, subject=None, rel=None, target=None, certainty=1):
previous = self.get(subject, rel, target)
if previous: # Don't duplicate.
for p in previous:
self._rels[rel].remove(p)
# Update the certainty.
self._rels[rel].append((p[0], p[1], certainty))
else:
if rel not in self._rels:
self._rels[rel] = []
self._rels[rel].append((subject, target, certainty))
def get(self, subject=None, rel=None, target=None, certainty=None):
if rel is None or rel not in self._rels:
return None
out = []
entries = self._rels[rel]
for i in entries:
if subject and i[0] != subject:
continue
if target and i[1] != target:
continue
if certainty is not None and i[2] != certainty:
continue
out.append(i)
if len(out) == 0:
return None
return out
def join_sets(*sets):
o = []
for s in sets:
if s is not None:
o += s
return o
class ConstraintException(Exception): pass
class EntityNotFound(Exception): pass
| Yuffster/toykb | kb.py | kb.py | py | 7,440 | python | en | code | 1 | github-code | 90 |
73094946855 |
import Adafruit_DHT
#import adafruit_dht
#from board import *
DHT_SENSOR = Adafruit_DHT.DHT11
DHT_PIN = 17
#SENSOR_PIN = D4
#dht22 = adafruit_dht.DHT22(SENSOR_PIN, use_pulseio=False)
def get_data():
humidity, temperature = Adafruit_DHT.read_retry(DHT_SENSOR, DHT_PIN)
#temperature = dht22.temperature
#humidity = dht22.humidity
if humidity is not None and temperature is not None:
print("Temp={0:0.1f}*C Humidity={1:0.1f}%".format(temperature, humidity))
else:
print("Failed to retrieve data from humidity sensor")
return temperature, humidity
ans = get_data() | heavenluv/AC-Remote-Control | webapp/humidity.py | humidity.py | py | 607 | python | en | code | 0 | github-code | 90 |
30137664918 | # File: semantics.py
# Template file for Informatics 2A Assignment 2:
# 'A Natural Language Query System in Python/NLTK'
# John Longley, November 2012
# Revised November 2013 and November 2014 with help from Nikolay Bogoychev
# Revised October 2015 by Toms Bergmanis
# Revised October 2017 by Chunchuan Lyu with help from Kiniorski Filip
# PART A: Processing statements
from nltk.corpus import brown
def add(lst,item):
if (item not in lst):
lst.insert(len(lst),item)
class Lexicon:
#stores known word stems of various part-of-speech categories
def __init__ (self):
self.TokenList = []
def add (self, stem, cat):
self.TokenList.append((stem, cat))
def getAll (self, cat):
stems = []
for pair in self.TokenList:
if (pair[1] == cat):
add(stems, pair[0])
return stems
#lx = Lexicon()
#lx.add("John", "P")
#lx.add("Marry", "P")
#lx.add("like", "T")
#lx.add("like", "P")
#lx.getAll("P")
class FactBase:
#stores unary and binary relational facts
def __init__ (self):
self.featuresList = []
def addUnary (self, pred, e1):
self.featuresList.append((pred, e1))
def addBinary (self, pred, e1, e2):
self.featuresList.append((pred, e1, e2))
def queryUnary (self, pred, e1):
return (pred, e1) in self.featuresList
def queryBinary (self, pred, e1, e2):
return (pred, e1, e2) in self.featuresList
#fb = FactBase()
#fb.addUnary("duck", "John")
#print(fb.queryUnary("duck", "John"))
#print(fb.queryUnary("duck", "Adelina"))
#print(fb.queryBinary("love", "Mary", "John")) // note that in this case e1 and e2 are not interchangable
import re
from nltk.corpus import brown
def verb_stem(s):
#extracts the stem from the 3sg form of a verb, or returns empty string
if (re.match("\w*([^(ch)(sh)aeiou])s$", s)):
# print(s[:-1])
infinitive = s[:-1]
elif(re.match("\w*([aeiou])ys$", s)):
# print(s[:-1])
infinitive = s[:-1]
elif (re.match("\w*([^aeiou])ies$", s) and len(s) >=5):
# print(s[:-3] + "y")
infinitive = s[:-3] + "y"
elif (re.match("[^aeiou]ies$", s)):
# print(s[:-1])
infinitive = s[:-1]
elif (re.match("\w*([(ch)(sh)(zz)(ss)ox])es$", s)):
# print(s[:-2])
infinitive = s[:-2]
elif (re.match("\w*([^s][s])es$", s) or re.match("\w*([^z][z])es$", s)):
# print(s[:-1])
infinitive = s[:-1]
elif (re.match("has", s)):
# print("have")
infinitive = "have"
elif (re.match("\w*([^(ch)(sh)iosxz])es$", s)):
# print(s[:-1])
infinitive = s[:-1]
else:
# print("")
infinitive = ""
#return infinitive
if ((s, 'VBZ') in brown.tagged_words() and (infinitive, 'VB') in brown.tagged_words()):
return infinitive
else:
return ''
#verb_stem("flys")
#verb_stem("flies")
def add_proper_name (w,lx):
"""adds a name to a lexicon, checking if first letter is uppercase"""
if ('A' <= w[0] and w[0] <= 'Z'):
lx.add(w,'P')
return ''
else:
return (w + " isn't a proper name")
def process_statement (lx,wlist,fb):
"""analyses a statement and updates lexicon and fact base accordingly;
returns '' if successful, or error message if not."""
# Grammar for the statement language is:
# S -> P is AR Ns | P is A | P Is | P Ts P
# AR -> a | an
# We parse this in an ad hoc way.
msg = add_proper_name (wlist[0],lx)
if (msg == ''):
if (wlist[1] == 'is'):
if (wlist[2] in ['a','an']):
lx.add (wlist[3],'N')
fb.addUnary ('N_'+wlist[3],wlist[0])
else:
lx.add (wlist[2],'A')
fb.addUnary ('A_'+wlist[2],wlist[0])
else:
stem = verb_stem(wlist[1])
if (len(wlist) == 2):
lx.add (stem,'I')
fb.addUnary ('I_'+stem,wlist[0])
else:
msg = add_proper_name (wlist[2],lx)
if (msg == ''):
lx.add (stem,'T')
fb.addBinary ('T_'+stem,wlist[0],wlist[2])
return msg
# End of PART A.
| adelliinaa/ProcessingNaturalLanguages-CW2 | statements.py | statements.py | py | 4,259 | python | en | code | 0 | github-code | 90 |
17862643547 | import animal
from math import sin, cos
class Aquarium:
def __init__(self, height, length):
self.Height = height
self.length = length
self.animals = {} # dict of tha object and there place re presented as x * y
"""
@property
def animals(self):
return self._animals
@animals.setter
def animals(self,animal,place):
self._animals[animal] = place
"""
def move(self):
for animal in self.animals:
self.animals[animal] = self.define_move(self.animals[animal], animal)
def define_move(self, place, animal):
x_animal, y_animal = place
x_animal += animal.speed * sin(animal.angle)
y_animal += animal.speed * cos(animal.angle)
change_angle = False
if x_animal > self.length:
change_angle = True
x_animal = 2 * self.length - x_animal
"""
example :
length = 9 , x_animal = 11
means at the end x animal needs to be 7
x_animal = self.length - (x_animal - self.length) and after bit of magic called math
x_animal = 2 * self.length - x_animal
"""
if y_animal > self.Height:
change_angle = True
y_animal = 2 * self.Height - y_animal
if x_animal < 0:
change_angle = True
x_animal = abs(x_animal)
if y_animal < 0:
change_angle = True
y_animal = abs(y_animal)
if change_angle:
animal.angle = 360 - animal.angle
return [x_animal, y_animal]
| og134/aquarim | Aquarium.py | Aquarium.py | py | 1,653 | python | en | code | 0 | github-code | 90 |
1442450456 | class Solution:
def findOrder(self, numCourses: int, prerequisites: List[List[int]]) -> List[int]:
in_degree = [0] * numCourses
adj_list = [[] for x in range(numCourses)]
queue = []
final = []
counter = 0
for course, prereq in prerequisites:
in_degree[course] += 1
adj_list[prereq].append(course)
for i in range(numCourses):
if in_degree[i] == 0:
queue.append(i)
while(queue != []):
node = queue.pop(0)
final.append(node)
counter += 1
for dependent in adj_list[node]:
in_degree[dependent] += -1
if in_degree[dependent] == 0:
queue.append(dependent)
if counter != numCourses:
return []
return final | tanyajha16/6Companies30Days | Intuit/Construct Schedule II.py | Construct Schedule II.py | py | 904 | python | en | code | 1 | github-code | 90 |
28151382291 | import PyDelFEM2 as dfm2
import PyDelFEM2.gl.glfw
import numpy
########################################
def example1():
cad = dfm2.Cad2D()
cad.add_polygon([+0,0, +1,0, +1,+1, 0,+1.0])
cad.add_polygon([+2,0, +3,0, +3,+1, 2,+1.0])
mesher = dfm2.Mesher_Cad2D(edge_length=0.03)
mesh = mesher.meshing(cad)
####
pbd = dfm2.PBD_Cloth()
# pbd.param_gravity_y = -0.1
pbd.dt = 0.08
pbd.updated_topology(mesh)
trans0 = dfm2.Trans_Rigid2DTo3D()
trans0.org2 = numpy.array([0.5,0.5])
trans0.org3 = numpy.array([0.0,0.0,0.5])
trans1 = dfm2.Trans_Rigid2DTo3D()
trans1.org2 = numpy.array([2.5,0.5])
trans1.org3 = numpy.array([0.0,0.0,-0.5])
trans1.R = dfm2.util.mat3_rot_cartesian(numpy.array([0,3.1415,0]))
npIndP_Face0 = mesher.points_on_faces([0],cad)
pbd.vec_val[npIndP_Face0] = trans0.trans(pbd.dmsh.np_pos[npIndP_Face0])
npIndP_Face1 = mesher.points_on_faces([1],cad)
pbd.vec_val[npIndP_Face1] = trans1.trans(pbd.dmsh.np_pos[npIndP_Face1])
npIndP_Edge0a = mesher.points_on_one_edge(1,True,cad)
npIndP_Edge0b = mesher.points_on_one_edge(7,True,cad)
npIndP_Seam0 = numpy.vstack([npIndP_Edge0a,npIndP_Edge0b[::-1]]).transpose()
npIndP_Edge1a = mesher.points_on_one_edge(3,True,cad)
npIndP_Edge1b = mesher.points_on_one_edge(5,True,cad)
npIndP_Seam1 = numpy.vstack([npIndP_Edge1a,npIndP_Edge1b[::-1]]).transpose()
pbd.elems_seam = numpy.vstack([npIndP_Seam0,npIndP_Seam1]).copy().astype(numpy.uint32) # to allign data
mesh2 = dfm2.Mesh(np_pos=pbd.vec_val,np_elm=mesh.np_elm)
mesh3 = dfm2.Mesh(np_pos=pbd.vec_val, np_elm=pbd.elems_seam, elem_type=dfm2.LINE)
pbd.sdf = dfm2.SDF()
pbd.sdf.add( dfm2.CppSDF3_Sphere(0.3, [0.0, 0.0, 0.0], True) )
axis = dfm2.gl.AxisXYZ(1.0)
dfm2.gl.glfw.winDraw3d([pbd,pbd.sdf,mesh2,mesh3,axis])
#########
msh_trg = dfm2.Mesh()
msh_trg.set_sphere(0.3, 16, 16)
pbd.sdf = dfm2.Collider_PointsToMeshTri3D()
pbd.sdf.set_mesh(msh_trg)
dfm2.gl.glfw.winDraw3d([pbd,pbd.sdf,mesh2,mesh3,msh_trg,axis])
#########
gltf = PyDelFEM2.CppGLTF()
gltf.read("../test_inputs/CesiumMan.glb")
np_pos0, np_elm, np_rigw, np_rigj = dfm2.CppGLTF_GetMeshInfo(gltf, 0, 0)
np_pos = np_pos0.copy()
bones = dfm2.CppGLTF_GetBones(gltf,0)
bones.set_rotation_bryant(0, [-3.1415 * 0.5, 0.0, 0.0])
bones.set_translation(0, [0.0, 0.0, +0.2])
dfm2.update_rig_skin(np_pos,
np_pos0, np_elm, bones, np_rigw, np_rigj)
msh_trg = dfm2.Mesh(np_pos,np_elm,dfm2.TRI)
pbd.sdf = dfm2.Collider_PointsToMeshTri3D()
pbd.sdf.set_mesh(msh_trg)
dfm2.gl.glfw.winDraw3d([pbd,pbd.sdf,mesh2,mesh3,msh_trg,axis], winsize=(400, 300))
if __name__ == "__main__":
example1()
| nobuyuki83/pydelfem2 | examples_py/53_pbd_cloth.py | 53_pbd_cloth.py | py | 2,681 | python | en | code | 10 | github-code | 90 |
18263878569 | from bisect import bisect_left
import string
dic={c:[] for c in string.ascii_lowercase}
N=int(input())
S=list(input())
Q=int(input())
for i in range(len(S)):
dic[S[i]].append(i+1)
for i in range(Q):
a,b,c=map(str,input().split())
if a=='1':
if S[int(b)-1]==c:
continue
b=int(b)
f=S[b-1]
d=bisect_left(dic[f],b)
e=bisect_left(dic[c],b)
dic[f].pop(d)
dic[c].insert(e,b)
S[b-1]=c
else:
ans=0
b,c=int(b),int(c)
for j in string.ascii_lowercase:
d=bisect_left(dic[j],b)
if d<len(dic[j]):
if dic[j][d]<=c:
ans+=1
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02763/s410883081.py | s410883081.py | py | 612 | python | en | code | 0 | github-code | 90 |
43978807060 | import os
import json
from web3 import Web3
from pathlib import Path
from typing import Any, Dict, List
PROTOCOL_TO_ID = {
'uniswap_v2': 0,
'sushiswap_v2': 0,
'uniswap_v3': 1,
'sushiswap_v3': 1,
}
DIR = os.path.dirname(os.path.abspath(__file__))
ABI_FILE_PATH = Path(DIR) / 'SimulatorV1.json'
SIMULATOR_ABI = json.load(open(ABI_FILE_PATH, 'r'))['abi']
class OnlineSimulator:
"""
This class will be used temporarily before an offline simulator is built.
Using an online simulator is easy, but comes with a cost of latency.
"""
def __init__(self,
rpc_endpoints: Dict[str, str],
tokens: Dict[str, Dict[str, List[str or int]]],
pools: List[Dict[str, Any]],
contracts: Dict[str, str],
handlers: Dict[str, Dict[str, str]]):
"""
:param rpc_endpoints: refer to data.dex.DEX
:param tokens: refer to data.dex.DEX
:param pools: refer to dadta.dex.DEX
:param contracts: the dict of address of SimulatorV1 contract deployed
ex) {'ethereum': '<ADDRESS>', 'polygon': '<ADDRESS>', ... }
:param handlers: dict of handler addresses for uniswap_v2, sushiswap_v2, uniswap_v3, sushiswap_v3, etc...
For simulations an Uniswap V2 variant uses Factory, and an Uniswap V3 variant uses QuoterV2 to simulate swaps.
ex) {'ethereum': {'uniswap_v2': '<FACTORY_ADDRESS>', ... }, ... }
"""
self.rpc_endpoints = rpc_endpoints
self.tokens = tokens
self.pools = pools
self.contracts = contracts
self.handlers = handlers
# extract keys from tokens, pools
self.chains_list = sorted(list(tokens.keys()))
self.exchanges_list = sorted(set([p['exchange'] for p in pools]))
tokens_list = []
for exchange, tokens_dict in tokens.items():
tokens_list.extend(list(tokens_dict.keys()))
self.tokens_list = sorted(list(set(tokens_list)))
# map chains, exchanges, tokens to int id value
# this is used to map chains/exchanges/tokens to numpy array index values
self.chain_to_id = {k: i for i, k in enumerate(self.chains_list)}
self.exchange_to_id = {k: i for i, k in enumerate(self.exchanges_list)}
self.token_to_id = {k: i for i, k in enumerate(self.tokens_list)}
self.web3 = {k: Web3(Web3.HTTPProvider(v)) for k, v in rpc_endpoints.items()}
self.sim = {
chain: self.web3[chain].eth.contract(address=self.contracts[chain], abi=SIMULATOR_ABI)
for chain in self.chains_list
}
def make_params(self,
amount_in: float,
buy_path: List[List[int]],
sell_path: List[List[int]],
buy_pools: List[int],
sell_pools: List[int]) -> List[Dict[str, Any]]:
params = []
params.extend(self._make_buy_params(amount_in, buy_path, buy_pools))
params.extend(self._make_sell_params(sell_path, sell_pools))
return params
def _make_buy_params(self,
amount_in: float,
path: List[List[int]],
pools: List[int]):
params_list = []
for i in range(len(path)):
_path = path[i]
if not sum(_path):
continue
_pool_idx = pools[i]
pool = self.pools[_pool_idx]
chain = pool['chain']
exchange = pool['exchange']
version = pool['version']
exchange_key = f'{exchange}_v{version}'
token_in = self.tokens_list[_path[2]]
token_out = self.tokens_list[_path[3]]
amount_in_scaled = amount_in if i == 0 else 0
params = {
'protocol': PROTOCOL_TO_ID[exchange_key],
'handler': self.handlers[chain][exchange_key],
'tokenIn': self.tokens[chain][token_in][0],
'tokenOut': self.tokens[chain][token_out][0],
'fee': pool['fee'],
'amount': int(amount_in_scaled),
}
params_list.append(params)
return params_list
def _make_sell_params(self, path: List[List[int]], pools: List[int]):
params_list = []
for i in range(len(path)):
_path = path[i]
if not sum(_path):
continue
_pool_idx = pools[i]
pool = self.pools[_pool_idx]
chain = pool['chain']
exchange = pool['exchange']
version = pool['version']
exchange_key = f'{exchange}_v{version}'
# not the index difference with buy_params
token_in = self.tokens_list[_path[3]]
token_out = self.tokens_list[_path[2]]
params = {
'protocol': PROTOCOL_TO_ID[exchange_key],
'handler': self.handlers[chain][exchange_key],
'tokenIn': self.tokens[chain][token_in][0],
'tokenOut': self.tokens[chain][token_out][0],
'fee': pool['fee'],
'amount': 0, # no need to set amount
}
params_list.append(params)
return list(reversed(params_list))
def simulate(self, chain: str, params: List[Dict[str, Any]]) -> int:
return self.sim[chain].functions.simulateSwapIn(params).call()
if __name__ == '__main__':
import os
from dotenv import load_dotenv
from configs import RPC_ENDPOINTS
from addresses.ethereum import TOKENS, POOLS, SIMULATION_HANDLERS
load_dotenv(override=True)
ETHEREUM_SIMULATOR_ADDRESS = os.getenv('ETHEREUM_SIMULATOR_ADDRESS')
chain = 'ethereum'
rpc_endpoints = {chain: RPC_ENDPOINTS[chain]}
tokens = {chain: TOKENS}
pools = [pool for pool in POOLS if pool['chain'] == chain]
contracts = {chain: ETHEREUM_SIMULATOR_ADDRESS}
handlers = {chain: SIMULATION_HANDLERS}
sim = OnlineSimulator(rpc_endpoints, tokens, pools, contracts, handlers)
"""
ETH/USDT
- Buy: USDT -> ETH
- Sell: ETH -> USDT
Buy, sell should work like CEXs
"""
for i in range(900, 1300, 100):
amount_in = i * 10 ** 6
print('==========')
print('Amount in: ', amount_in)
buy_path = [[0, 1, 5, 2, 1], [0, 0, 0, 0, 0]]
sell_path = [[0, 0, 5, 2, 1], [0, 0, 0, 0, 0]]
buy_pools = [0]
sell_pools = [9]
params = sim.make_params(amount_in, buy_path, sell_path, buy_pools, sell_pools)
for param in params:
print(param)
"""
SUS3ETHUSDT/UNI3ETHUSDT
- Buy: SUS3ETHUSDT
- Sell: UNI3ETHUSDT
Output:
{'protocol': 1, 'handler': '0x64e8802FE490fa7cc61d3463958199161Bb608A7', 'tokenIn': '0xdAC17F958D2ee523a2206206994597C13D831ec7', 'tokenOut': '0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2', 'fee': 500, 'amount': 20000000000}
{'protocol': 1, 'handler': '0x61fFE014bA17989E743c5F6cB21bF9697530B21e', 'tokenIn': '0xC02aaA39b223FE8D0A0e5C4F27eAD9083C756Cc2', 'tokenOut': '0xdAC17F958D2ee523a2206206994597C13D831ec7', 'fee': 500, 'amount': 0}
"""
simulated_amount_out = sim.simulate(chain, params)
print(f'Simulated amount out: {simulated_amount_out / 10 ** 6} USDT')
simulated_profit_in_usdt = (simulated_amount_out - amount_in) / 10 ** 6
print(f'Simulated profit: {simulated_profit_in_usdt} USDT')
| solidquant/whack-a-mole | simulation/online_simulator.py | online_simulator.py | py | 7,530 | python | en | code | 47 | github-code | 90 |
16517285628 | # coding=utf-8
#
# 기업 open_api 이용하기
#
from urllib.request import urlopen
import pandas as pd
from bs4 import BeautifulSoup
import webbrowser
API_KEY="2672b31cfb74118a9f7c11cfcce685bbeef77f19"
company_code="014680"
url = "http://dart.fss.or.kr/api/search.xml?auth="+API_KEY+"&crp_cd="+company_code+"&start_dt=19990101&bsn_tp=A001&bsn_tp=A002&bsn_tp=A003"
resultXML=urlopen(url)
result=resultXML.read()
xmlsoup=BeautifulSoup(result,'html.parser')
print(result)
data = pd.DataFrame()
te = xmlsoup.findAll("list")
print('te', te)
for t in te:
temp = pd.DataFrame(([[t.crp_cls.string, t.crp_nm.string, t.crp_cd.string, t.rpt_nm.string,
t.rcp_no.string, t.flr_nm.string, t.rcp_dt.string, t.rmk.string]]),
columns=["crp_cls", "crp_nm", "crp_cd", "rpt_nm", "rcp_no", "flr_nm", "rcp_dt", "rmk"])
data = pd.concat([data, temp])
print(data)
data=data.reset_index(drop=True)
url2="http://dart.fss.or.kr/dsaf001/main.do?rcpNo="+data['rcp_no'][0]
print("url2",url2)
webbrowser.open(url2) | picopoco/gongsi | gapi.py | gapi.py | py | 1,051 | python | en | code | 0 | github-code | 90 |
40128212015 | # class DeluxePizza:
# number_of_pizzas = 0
#
# def __init__(self, size_of_pizza = "s", cheese_toppings = 0, pepperoni_toppings = 0, mushroom_toppings = 0,
# veggie_toppings = 0, stuffed_with_cheese = False):
# self.size_of_pizza = size_of_pizza
# self.cheese_toppings = cheese_toppings
# self.pepperoni_toppings = pepperoni_toppings
# self.mushroom_toppings = mushroom_toppings
# self.veggie_toppings = veggie_toppings
# self.stuffed_with_cheese = stuffed_with_cheese
# DeluxePizza.number_of_pizzas += 1
#
# def get_size_of_pizza(self):
# """Accessor method of size of pizza attribute."""
# if self.size_of_pizza.lower() == "s":
# return "small"
# elif self.size_of_pizza.lower() == "m":
# return "medium"
# else:
# return "large"
#
# def get_cheese_toppings(self):
# """Accessor method of cheese toppings attribute."""
# return self.cheese_toppings
#
# def get_pepperoni_toppings(self):
# """Accessor method of pepperoni toppings attribute."""
# return self.pepperoni_toppings
#
# def get_mushroom_toppings(self):
# """Accessor method of mushroom toppings attribute."""
# return self.mushroom_toppings
#
# def get_veggie_toppings(self):
# """Accessor method of veggie toppings attribute."""
# return self.veggie_toppings
#
# def get_stuffed_with_cheese(self):
# """Accessor method of stuffed with cheese attribute."""
# return self.stuffed_with_cheese
#
# def get_number_of_pizzas(self):
# """Accessor method of number of pizzas attribute."""
# return self.number_of_pizzas
#
# def set_size_of_pizza(self, size_of_pizza):
# """Mutator method of size of pizza attribute."""
# self.size_of_pizza = size_of_pizza
#
# def set_cheese_toppings(self, cheese_toppings):
# """Mutator method of cheese toppings attribute."""
# self.cheese_toppings = cheese_toppings
#
# def set_pepperoni_toppings(self, pepperoni_toppings):
# """Mutator method of pepperoni toppings attribute."""
# self.pepperoni_toppings = pepperoni_toppings
#
# def set_mushroom_toppings(self, mushroom_toppings):
# """Mutator method of mushroom toppings attribute."""
# self.mushroom_toppings = mushroom_toppings
#
# def set_veggie_toppings(self, veggie_toppings):
# """Mutator method of veggie toppings attribute."""
# self.veggie_toppings = veggie_toppings
#
# def set_stuffed_with_cheese(self, stuffed_with_cheese):
# """Mutator method of stuffed with cheese attribute."""
# self.stuffed_with_cheese = stuffed_with_cheese
#
# def calc_cost(self):
# if not self.stuffed_with_cheese:
# if self.size_of_pizza.lower() == "s":
# cost_of_pizza = 10 + (
# self.cheese_toppings + self.pepperoni_toppings + self.mushroom_toppings) * 2 + \
# self.veggie_toppings * 3
# elif self.size_of_pizza.lower() == "m":
# cost_of_pizza = 12 + (
# self.cheese_toppings + self.pepperoni_toppings + self.mushroom_toppings) * 2 + \
# self.veggie_toppings * 3
# else:
# cost_of_pizza = 14 + (
# self.cheese_toppings + self.pepperoni_toppings + self.mushroom_toppings) * 2 + \
# self.veggie_toppings * 3
# return cost_of_pizza
# else:
# if self.size_of_pizza.lower() == "s":
# cost_of_pizza = 10 + (
# self.cheese_toppings + self.pepperoni_toppings + self.mushroom_toppings) * 2 + \
# self.veggie_toppings * 3 + 2
# elif self.size_of_pizza.lower() == "m":
# cost_of_pizza = 12 + (
# self.cheese_toppings + self.pepperoni_toppings + self.mushroom_toppings) * 2 + \
# self.veggie_toppings * 3 + 4
# else:
# cost_of_pizza = 14 + (
# self.cheese_toppings + self.pepperoni_toppings + self.mushroom_toppings) * 2 + \
# self.veggie_toppings * 3 + 6
# return cost_of_pizza
#
# def __str__(self):
# return f"\nPizza # {self.get_number_of_pizzas()}" \
# f"\n\tPizza size: {self.get_size_of_pizza()}" \
# f"\n\tCheese filled dough: {self.stuffed_with_cheese}" \
# f"\n\t# of cheese toppings: {self.cheese_toppings}" \
# f"\n\t# of pepperoni toppings: {self.pepperoni_toppings}" \
# f"\n\t# of mushroom toppings: {self.mushroom_toppings}" \
# f"\n\t# of vegetable toppings: {self.veggie_toppings}" \
# f"\n\tCost: ${self.calc_cost()}"
#
#
# def main():
# my_pizza1 = DeluxePizza()
# print(my_pizza1)
# my_pizza2 = DeluxePizza("m", 1, 2, 3, 4)
# print(my_pizza2)
# my_pizza3 = DeluxePizza("l", 1, 2, 2, 3, True)
# print(my_pizza3)
#
#
# if __name__ == '__main__':
# main()
import datetime
def welcome():
print("\n=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
print("\t Welcome to Papa John's PIZZERIA")
print("=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=-=")
hour = int(datetime.datetime.now().hour)
if 0 <= hour <= 11:
greet = "Good morning"
elif 12 <= hour <= 17:
greet = "Good afternoon"
else:
greet = "Good evening"
maximum_pizzas = int(input(f"\n{greet}! How many pizzas can you make today ? "))
today_pizzas = [{}] * maximum_pizzas
return today_pizzas
def password_check():
for i in range(3):
password = input("Enter password: ")
if password == "":
break
elif i == 2:
print("\nSorry - you are not authorized to perform requested action")
main()
else:
continue
def pizza_quantity():
while True:
pizza_count = int(input("\nHow many pizzas are in this order ? "))
if pizza_count > today_pizza.count({}):
print(
f"Sorry I have less amount of ingredients to make {today_pizza.count({})} numbers of pizza(s) "
f"only")
continue
else:
return pizza_count
def cheaper_than(price):
cheap_list = []
for i in range(len(today_pizza) - today_pizza.count({})):
if today_pizza[i]['cost'] <= price:
cheap_list.append("Pizza # " + str(i + 1) + ", cost: $" + today_pizza[i]['cost'])
return cheap_list
def lowest_price():
cost_list = []
for i in range(len(today_pizza) - today_pizza.count({})):
cost_list.append(today_pizza[i]['cost'])
if len(cost_list) == 0:
return None
else:
index_cheapest = cost_list.index(min(cost_list))
return index_cheapest
def highest_price():
cost_list = []
for i in range(len(today_pizza) - today_pizza.count({})):
cost_list.append(today_pizza[i]['cost'])
if len(cost_list) == 0:
return None
else:
index_costly = cost_list.index(max(cost_list))
return index_costly
def number_of_pizzas_of_size(size):
number_of_pizzas_of_specific_size = 0
for i in range(len(today_pizza) - today_pizza.count({})):
if today_pizza[i]['size'] == size:
number_of_pizzas_of_specific_size += 1
return number_of_pizzas_of_specific_size
def average_cost():
add = 0
for i in range(len(today_pizza) - today_pizza.count({})):
add += today_pizza[i]['cost']
if add == 0:
return None
else:
return add / (len(today_pizza) - today_pizza.count({}))
class DeluxePizza:
number_of_pizzas = 0
modify = 0
def __init__(self, size_of_pizza = "s", cheese_toppings = 0, pepperoni_toppings = 0, mushroom_toppings = 0,
veggie_toppings = 0, stuffed_with_cheese = False):
self.size_of_pizza = size_of_pizza
self.cheese_toppings = cheese_toppings
self.pepperoni_toppings = pepperoni_toppings
self.mushroom_toppings = mushroom_toppings
self.veggie_toppings = veggie_toppings
self.stuffed_with_cheese = stuffed_with_cheese
DeluxePizza.number_of_pizzas += 1
def get_size_of_pizza(self):
"""Accessor method of size of pizza attribute."""
return self.size_of_pizza
def get_cheese_toppings(self):
"""Accessor method of cheese toppings attribute."""
return self.cheese_toppings
def get_pepperoni_toppings(self):
"""Accessor method of pepperoni toppings attribute."""
return self.pepperoni_toppings
def get_mushroom_toppings(self):
"""Accessor method of mushroom toppings attribute."""
return self.mushroom_toppings
def get_veggie_toppings(self):
"""Accessor method of veggie toppings attribute."""
return self.veggie_toppings
def get_stuffed_with_cheese(self):
"""Accessor method of stuffed with cheese attribute."""
return self.stuffed_with_cheese
def get_number_of_pizzas(self):
"""Accessor method of number of pizzas attribute."""
return self.number_of_pizzas
def set_size_of_pizza(self, size_of_pizza):
"""Mutator method of size of pizza attribute."""
if size_of_pizza.lower() == "s" or size_of_pizza.lower() == "small":
self.size_of_pizza = "small"
elif size_of_pizza.lower() == "m" or size_of_pizza.lower() == "medium":
self.size_of_pizza = "medium"
else:
self.size_of_pizza = "large"
def set_cheese_toppings(self, cheese_toppings):
"""Mutator method of cheese toppings attribute."""
self.cheese_toppings = cheese_toppings
def set_pepperoni_toppings(self, pepperoni_toppings):
"""Mutator method of pepperoni toppings attribute."""
self.pepperoni_toppings = pepperoni_toppings
def set_mushroom_toppings(self, mushroom_toppings):
"""Mutator method of mushroom toppings attribute."""
self.mushroom_toppings = mushroom_toppings
def set_veggie_toppings(self, veggie_toppings):
"""Mutator method of veggie toppings attribute."""
self.veggie_toppings = veggie_toppings
def set_stuffed_with_cheese(self, stuffed_with_cheese):
"""Mutator method of stuffed with cheese attribute."""
if stuffed_with_cheese == "y" or stuffed_with_cheese is True:
self.stuffed_with_cheese = True
else:
self.stuffed_with_cheese = False
def pizza_details(self):
details = {'size': self.get_size_of_pizza(), 'stuffed': self.get_stuffed_with_cheese(),
'cheese': self.get_cheese_toppings(), 'pepperoni': self.get_pepperoni_toppings(),
'mushroom': self.get_mushroom_toppings(), 'veggie': self.get_veggie_toppings(),
'cost': self.calc_cost()}
index = today_pizza.index({})
today_pizza[index] = details
def pizza_input(self, i):
self.set_size_of_pizza(input(f"\nPizza # {i} size please (s/m/l): "))
self.set_stuffed_with_cheese(input("Cheese in dough (y/any key for no)? "))
self.set_cheese_toppings(int(input("Number of cheese toppings: ")))
self.set_pepperoni_toppings(int(input("Number of pepperoni toppings: ")))
self.set_mushroom_toppings(int(input("Number of Mushroom toppings: ")))
self.set_veggie_toppings(int(input("Number of veggie toppings: ")))
self.pizza_details()
def modify_pizza(self):
self.modify = int(input("\nWhich pizza do you wish to modify ? "))
while self.modify > (len(today_pizza) - today_pizza.count({})) or self.modify <= 0:
print(f"There is no pizza # {self.modify}"
f"\nThere are only {len(today_pizza) - today_pizza.count({})} pizza(s) in your order")
option = input(
"\n==> Press 1 to enter another pizza number or press anything else to quit this operation and go "
"back to main menu\n")
if option == "1":
self.modify = int(input("\nWhich pizza do you wish to modify ? "))
else:
break
else:
print("\nPizza #", self.modify)
print(self.__str__())
while True:
choice = int(input("\nPapa John, what would you like to change?"
"\n\t1. Size"
"\n\t2. Cheese filled or not"
"\n\t3. Number of cheese toppings"
"\n\t4. Number of pepperoni toppings"
"\n\t5. Number of mushroom toppings"
"\n\t6. Number of vegetable toppings"
"\n\t7. Quit"
"\n\tEnter choice > "))
if choice == 1:
self.set_size_of_pizza(input("\nsize please (s/m/l): "))
today_pizza[self.modify - 1]['size'] = self.get_size_of_pizza()
elif choice == 2:
self.set_stuffed_with_cheese(input("\nCheese in dough (y/any key for no)? "))
today_pizza[self.modify - 1]['stuffed'] = self.get_stuffed_with_cheese()
elif choice == 3:
today_pizza[self.modify - 1]['cheese'] = int(input("\nNumber of cheese toppings: "))
elif choice == 4:
today_pizza[self.modify - 1]['pepperoni'] = int(input("\nNumber of pepperoni toppings: "))
elif choice == 5:
today_pizza[self.modify - 1]['mushroom'] = int(input("\nNumber of Mushroom toppings: "))
elif choice == 6:
today_pizza[self.modify - 1]['veggie'] = int(input("\nNumber of veggie toppings: "))
elif choice == 7:
break
else:
print("\nInvalid value!\nPlease choose again")
self.set_size_of_pizza(today_pizza[self.modify - 1]['size'])
self.set_stuffed_with_cheese(today_pizza[self.modify - 1]['stuffed'])
self.set_cheese_toppings(today_pizza[self.modify - 1]['cheese'])
self.set_pepperoni_toppings(today_pizza[self.modify - 1]['pepperoni'])
self.set_mushroom_toppings(today_pizza[self.modify - 1]['mushroom'])
self.set_veggie_toppings(today_pizza[self.modify - 1]['veggie'])
today_pizza[self.modify - 1]['cost'] = self.calc_cost()
def pizzas_of_size(self, size):
if size == "s":
size = "small"
elif size == "m":
size = "medium"
else:
size = "large"
count = 0
print(f"\nList of pizzas of size {size.upper()} sold today:")
for i in range(len(today_pizza) - today_pizza.count({})):
if today_pizza[i]['size'] == size:
self.modify = i + 1
print("\nPizza #", self.modify)
print(self.__str__())
count += 1
if count == 0:
print(f"\nThere is no pizza order of size {size.upper()} in the list")
print("\n\t\t...............")
print(f"\nNumber of pizzas of {size.upper()} size:", count)
def pizzas_statistics(self):
while True:
stat = int(input("\nPapa John, what information would you like?"
"\n\t1. Cost and details of cheapest pizza"
"\n\t2. Cost and details of most costly pizza"
"\n\t3. Number of pizzas sold today"
"\n\t4. Number of pizzas of a specific size"
"\n\t5. Average cost of pizzas"
"\n\t6. Quit"
"\nEnter your choice > "))
if stat == 1:
index_cheapest = lowest_price()
if index_cheapest is None:
print("\nThere is no pizza order in the list")
else:
self.modify = index_cheapest + 1
print(f"\npizza # {self.modify} is the cheapest pizza")
print(self.__str__())
elif stat == 2:
index_costly = highest_price()
if index_costly is None:
print("\nThere is no pizza order in the list")
else:
self.modify = index_costly + 1
print(f"\npizza # {self.modify} is the most costly pizza")
print(self.__str__())
elif stat == 3:
print(
f"\nNumber of pizzas sold today({datetime.date.today()}): "
f"{len(today_pizza) - today_pizza.count({})}")
elif stat == 4:
specific_size = input("\nWhat size pizza do you want?(s/m/l): ")
self.pizzas_of_size(specific_size)
elif stat == 5:
avg = average_cost()
if avg is None:
print("\nThere is no pizza order in the list")
else:
print(f"\nAverage cost of pizzas sold today({datetime.date.today()}): ${round(avg, 2)}")
elif stat == 6:
break
else:
print("\n------ Invalid value! choose again ------")
def calc_cost(self):
if not self.get_stuffed_with_cheese():
if self.get_size_of_pizza() == "small":
cost_of_pizza = 10 + (
self.get_cheese_toppings() + self.get_pepperoni_toppings() + self.get_mushroom_toppings()) * \
2 + self.get_veggie_toppings() * 3
elif self.get_size_of_pizza() == "medium":
cost_of_pizza = 12 + (
self.get_cheese_toppings() + self.get_pepperoni_toppings() + self.get_mushroom_toppings()) * \
2 + self.get_veggie_toppings() * 3
else:
cost_of_pizza = 14 + (
self.get_cheese_toppings() + self.get_pepperoni_toppings() + self.get_mushroom_toppings()) * \
2 + self.get_veggie_toppings() * 3
return cost_of_pizza
else:
if self.get_size_of_pizza() == "small":
cost_of_pizza = 10 + (
self.get_cheese_toppings() + self.get_pepperoni_toppings() + self.get_mushroom_toppings()) * \
2 + self.get_veggie_toppings() * 3 + 2
elif self.get_size_of_pizza() == "medium":
cost_of_pizza = 12 + (
self.get_cheese_toppings() + self.get_pepperoni_toppings() + self.get_mushroom_toppings()) * \
2 + self.get_veggie_toppings() * 3 + 4
else:
cost_of_pizza = 14 + (
self.get_cheese_toppings() + self.get_pepperoni_toppings() + self.get_mushroom_toppings()) * \
2 + self.get_veggie_toppings() * 3 + 6
return cost_of_pizza
def __str__(self):
return f"\tPizza size: {today_pizza[self.modify - 1]['size']}" \
f"\n\tCheese filled dough: {today_pizza[self.modify - 1]['stuffed']}" \
f"\n\t# of cheese toppings: {today_pizza[self.modify - 1]['cheese']}" \
f"\n\t# of pepperoni toppings: {today_pizza[self.modify - 1]['pepperoni']}" \
f"\n\t# of mushroom toppings: {today_pizza[self.modify - 1]['mushroom']}" \
f"\n\t# of vegetable toppings: {today_pizza[self.modify - 1]['veggie']}" \
f"\n\tCost: ${today_pizza[self.modify - 1]['cost']}"
def main():
while True:
print("\nPapa John, what do you want to do?"
"\n\t1. Enter a new pizza order (password required)"
"\n\t2. Change information of a specific order (password required)"
"\n\t3. Display details for all pizzas of a specific size (s/m/l)"
"\n\t4. Statistics on today’s pizzas"
"\n\t5. Quit")
choice = int(input("Please enter your choice > "))
if choice < 1 or choice > 5:
print("\n----- Please choose between 1 and 5 only (inclusive) -----")
continue
if choice == 1:
password_check()
p_quantity = pizza_quantity()
for i in range(1, p_quantity + 1):
my_pizza = DeluxePizza()
my_pizza.pizza_input(i)
elif choice == 2:
password_check()
my_pizza = DeluxePizza()
my_pizza.modify_pizza()
elif choice == 3:
my_pizza = DeluxePizza()
size_list = input("\nJohn, what size pizza do you want a list of ? (s/m/l): ")
my_pizza.pizzas_of_size(size_list)
elif choice == 4:
my_pizza = DeluxePizza()
my_pizza.pizzas_statistics()
else:
print("\nAnother days work done!!!\nThank you and have a nice day!\n\t\t\( ・_・)")
exit()
if __name__ == '__main__':
today_pizza = welcome()
main()
| DavinderSohal/Python | Activities/Final/ProjectPizza.py | ProjectPizza.py | py | 21,602 | python | en | code | 0 | github-code | 90 |
33458262714 | from typing import List
class Solution:
def minMutation(self, startGene: str, endGene: str, bank: List[str]) -> int:
def isValidMutation(s1: str, s2: str) -> bool:
diff = 0
for i in range(0, len(s1)):
diff+=1 if s1[i] != s2[i] else 0
return diff == 1
# start at the end?
# for each iteration get the next available paths and add them to the queue
queue = [(startGene, 0)]
visited = set([startGene])
while queue:
current, count = queue.pop(0)
for mutation in bank:
if mutation not in visited and isValidMutation(mutation, current):
if mutation == endGene:
return count+1
queue.append((mutation, count+1))
visited.add(mutation)
return -1
solution = Solution()
# print(solution.minMutation("AACCGGTT", "AACCGGTA", ["AACCGGTA"]))
print(solution.minMutation("AACCGGTT", "AAACGGTA", ["AACCGGTA","AACCGCTA","AAACGGTA"])) | Samuel-Black/leetcode | minimum-genetic-mutation.py | minimum-genetic-mutation.py | py | 1,019 | python | en | code | 0 | github-code | 90 |
4483678378 | import json
import requests
import pprint
from sqlalchemy import all_
#let's try to get the response 1st test whats up dawg?
response = requests.get("https://api.opendota.com/api/players/1163336706/matches")
all_match_ids = []
for i in response.json():
match_id = i['match_id']
all_match_ids.append(match_id)
print(all_match_ids)
players_bank = []
for id in all_match_ids[0:60]:
print("retrieving match with id: {id}".format(id = id))
match_data = requests.get("https://api.opendota.com/api/matches/{match_id}".format(match_id = match_id)).json()
for player in match_data['players']:
players_bank.append(player['account_id'])
print(players_bank) | rbekeris/databakery | get_all_matches_for_player.py | get_all_matches_for_player.py | py | 666 | python | en | code | 0 | github-code | 90 |
21237705852 | from django import forms
from django.contrib.auth.models import User
from foi.models import Case, Comment, Referral, Assessment, Outcome, InternalReview, InformationCommissionerAppeal, AdministrativeAppealsTribunal
class CaseForm(forms.ModelForm):
class Meta:
model = Case
fields = [
'title',
'subject',
'received_date',
'enquiry_date',
'enquiry_ref',
'enquiry_method',
'response_method',
'enquiry_description',
'urgent_flag',
'handling_instructions',
'addressee_name']
labels = {
'urgent_flag': ('Urgent?')
}
widgets = {
'title': forms.TextInput(attrs = {'placeholder': 'Enter a title for this case'}),
'received_date': forms.DateInput(attrs = {'type': 'date'}),
'enquiry_date': forms.DateInput(attrs = {'type': 'date'}),
'urgent_flag': forms.CheckboxInput(attrs = {'label': 'Urgent?'})
}
class CaseEnquirerForm(forms.ModelForm):
class Meta:
model = Case
fields = [
'enquirer_title',
'enquirer_name',
'enquirer_department',
'enquirer_organisation',
'enquirer_address',
'enquirer_postcode',
'enquirer_telephone',
'enquirer_email_address',
'enquirer_enquirer_group',
'enquirer_industry_body',
'enquirer_region']
labels = {
'enquirer_title': ('Title'),
'enquirer_name': ('Name'),
'enquirer_department': ('Department'),
'enquirer_organisation': ('Organisation'),
'enquirer_address': ('Address'),
'enquirer_postcode': ('Postcode'),
'enquirer_telephone': ('Telephone'),
'enquirer_email_address': ('Email Address'),
'enquirer_enquirer_group': ('Enquirer Group'),
'enquirer_industry_body': ('Industry Body'),
'enquirer_region': ('Region')
}
class CommentForm(forms.ModelForm):
class Meta:
model = Comment
fields = ['subject', 'body']
widgets = {
'subject': forms.TextInput(attrs = {'placeholder': 'Enter a subject for this comment'}),
'body': forms.Textarea(attrs = {'placeholder': 'Enter a body for this comment'})
}
class ReferralForm(forms.ModelForm):
refer_to = forms.ModelChoiceField(queryset = User.objects.all())
class Meta:
model = Referral
fields = ['subject', 'body', 'refer_to']
widgets = {
'subject': forms.TextInput(attrs = {'placeholder': 'Enter a subject for this referral'}),
'body': forms.Textarea(attrs = {'placeholder': 'Enter a body for this referral'})
}
class AssessmentForm(forms.ModelForm):
class Meta:
model = Assessment
fields = [
'third_party_consultation',
'precedents',
'precedent_details']
labels = {
'third_party_consultation': ('Third Party Consultation Required?'),
'precedents': ('Precedents Exist?')
}
widgets = {}
help_texts = {}
class AssessmentFeeForm(forms.ModelForm):
class Meta:
model = Assessment
fields = [
'fee_flag',
'search_and_retrieval_time',
'decision_making_time',
'photocopy_charges',
'other_access_time',
'postage_charges',
'initial_deposit',
'request_general_description',
'include_refine_request_flag',
'include_third_party_consultation_flag',
'request_concerning',
'contact_name',
'contact_telephone',
'fee_notice_issued_flag',
'fee_notice_issued_date',
'fee_payment_required_date',
'fee_paid_flag',
'fee_received_date',
'fee_limit_flag']
labels = {
'fee_flag': ('Fees Applicable?'),
'include_refine_request_flag': ('Include Refine Request Clause?'),
'include_third_party_consultation_flag': ('Include Third Party Consultation Clause?'),
'fee_notice_issued_flag': ('Fee Notice Issued?'),
'fee_paid_flag': ('Fee Paid?'),
'fee_limit_flag': ('Fee Exceeds Cost Limit?')
}
widgets = {
'fee_notice_issued_date': forms.DateInput(attrs = {'type': 'date'}),
'fee_payment_required_date': forms.DateInput(attrs = {'type': 'date'}),
'fee_received_date': forms.DateInput(attrs = {'type': 'date'})
}
class AssessmentThirdPartyForm(forms.ModelForm):
class Meta:
model = Assessment
fields = [
'third_party_request_general_description',
'documents_attached_or_described',
'include_s47_flag',
'include_s47b_flag',
'include_s47f_flag',
'include_s47g_flag',
'respond_by_date',
'third_party_contact_name',
'third_party_contact_telephone',
'third_party_title',
'third_party_name',
'third_party_department',
'third_party_organisation',
'third_party_address',
'third_party_postcode']
labels = {
'include_s47_flag': ('Include s47?'),
'include_s47b_flag': ('Include s47B?'),
'include_s47f_flag': ('Include s47F?'),
'include_s47g_flag': ('Include s47G?')
}
widgets = {
'respond_by_date': forms.DateInput(attrs = {'type': 'date'})
}
help_texts = {}
class OutcomeForm(forms.ModelForm):
class Meta:
model = Outcome
fields = ['foi_outcomes', 'foi_exemptions', 'foi_conditional_exemptions', 'disclosure_outcomes', 'certificates']
labels = {
'foi_outcomes': ('Specific Outcome'),
'foi_exemptions': ('FOI Exemption'),
'foi_conditional_exemptions': ('FOI Conditional Exemption'),
'disclosure_outcomes': ('Disclosure Outcome'),
'certificates': ('Certificate')
}
class InternalReviewForm(forms.ModelForm):
class Meta:
model = InternalReview
fields = ['requested_date', 'review_held_date', 'days_taken_to_hold_review', 'review_members', 'review_decision']
widgets = {
'requested_date': forms.DateInput(attrs = {'type': 'date'}),
'review_held_date': forms.DateInput(attrs = {'type': 'date'})
}
class InformationCommissionerAppealForm(forms.ModelForm):
class Meta:
model = InformationCommissionerAppeal
fields = ['contacted_date', 'documents_provided_date', 'decision_recieved_date', 'decision', 'decision_notice']
widgets = {
'contacted_date': forms.DateInput(attrs = {'type': 'date'}),
'documents_provided_date': forms.DateInput(attrs = {'type': 'date'}),
'decision_recieved_date': forms.DateInput(attrs = {'type': 'date'})
}
class AdministrativeAppealsTribunalForm(forms.ModelForm):
class Meta:
model = AdministrativeAppealsTribunal
fields = ['contacted_date', 'documents_provided_date', 'decision_recieved_date', 'decision', 'decision_notice']
widgets = {
'contacted_date': forms.DateInput(attrs = {'type': 'date'}),
'documents_provided_date': forms.DateInput(attrs = {'type': 'date'}),
'decision_recieved_date': forms.DateInput(attrs = {'type': 'date'})
}
| switchtrue/FOXFOI | foi/forms.py | forms.py | py | 7,660 | python | en | code | 0 | github-code | 90 |
31780751809 | # -*- coding: utf-8 -*-
"""
Created on Fri Apr 22 21:51:00 2022
@author: JalenL
"""
import time
import random
Ingio_lost_list = ['Victory is fleeting. Losing is forever.','You have no choices about how you lose,' +
'but you do have a choice about how you come back and prepare to win again.',
' Losing is part of the game. If you never lose,' +
' you are never truly tested, and never forced to grow.',
'You beat me this time.....you won\'t be so lucky next time.']
random_lost = random.choice(Ingio_lost_list)
def play():
score = 0
print("\nRock...")
time.sleep (3)
print("\nPaper...")
time.sleep (3)
print("\nScissors...")
time.sleep (3)
print("\nShoot...")
time.sleep (3)
while True:
user = str(input("Enter: 'r' for Rock, 'p' for Paper, 's' for Scissors: ")).lower()
Ingio = random.choice(['r','p', 's'])
if user == Ingio:
print("\nIt's a tie")
print("\n---Greats minds do think alike---")
elif is_win(user, Ingio):
print("\nYou won!")
print("\nIngio: ",random_lost)
score += 1
if score == 3:
print("\n\n")
print("\nYou may proceed on your adventure, you truly are a wonder.")
break
elif is_win != True:
print("You lose!")
print("\nWitness my true strength!")
game = play
def is_win(user, Ingio):
if (user == 'r' and Ingio == 's') or (user =='s' and Ingio== 'p') \
or (user == 'p' and Ingio == 'r'):
return True
def intro_choice():
print("\nHello player, you've found my secert room. My name is Inigo Montoya and"
+ " I'm the BEST Rock, Paper, Scissors player in the world, prepare to lose.")
time.sleep(3)
print("\nThese will be rapidfire games, be ready!")
print("\n----Inigo holds out his hands eager to play------")
time.sleep(0)
while True:
user_choice = input("Do you accept the duel? Y/N ").upper()
if user_choice == "N":
print("\nWell.... that's too bad you will play anyway!")
return game()
elif user_choice == "Y":
print("\nThank you for humoring me....let's make this an epic battle...for me!")
return game()
else:
print("\nInigo does not like that answer....try again")
intro_choice()
| CtrlVEarlSweatpants/Games | RPS_Game.py | RPS_Game.py | py | 2,741 | python | en | code | 0 | github-code | 90 |
28518879404 | import copy
import math
import sys
from filter_list import filter_module, filter_real_numbers
from list_operations import _replace, move_elements, delete_elements
from list_operations import sort_descending_imaginary
from user_menu import right_operation
class Complex:
def __init__(self, real, imaginary, module):
self.real = real
self.imaginary = imaginary
self.module = module
def show_numbers(self):
print("[" + str(self.real) + "+" + str(self.imaginary) + "i" + "]", end="")
def __add__(self, other):
real_ans = self.real + other.real
imaginary_ans = self.imaginary + other.imaginary
return Complex(real_ans, imaginary_ans, int(math.sqrt(real_ans * real_ans + imaginary_ans * imaginary_ans)))
def __mul__(self, other):
real_ans = self.real * other.real - self.imaginary * other.imaginary
imaginary_ans = self.imaginary * other.real + self.real * other.imaginary
return Complex(real_ans, imaginary_ans, int(math.sqrt(real_ans * real_ans + imaginary_ans * imaginary_ans)))
def do_task_1(array):
num1, num2 = map(int, input("Introdu a si b:").split())
print("1 Adaugă număr complex la sfârșitul listei")
print("2 Inserare număr complex pe o poziție dată")
_next = right_operation(1, 2, "Introdu 1 sau 2:")
module = int(math.sqrt(num1 * num1 + num2 * num2))
if _next == 1:
array.append(Complex(num1, num2, module))
else:
position = right_operation(0, len(array), "Introdu pozitia:")
if position == 0 and len(array) == 0:
array.append(Complex(num1, num2, module))
else:
move_elements(position, array)
array.insert(position, Complex(num1, num2, module))
for nums in array:
nums.show_numbers()
print('\n')
def do_task_2(array):
print("Care este urmatoarea operatie?")
print("1 Șterge elementele de pe un interval de poziții.")
print("2 Înlocuiește toate aparițiile unui număr complex cu un alt număr complex.")
_next = right_operation(1, 2, "Introdu operatia:")
if _next == 1:
pos1 = right_operation(0, len(array) - 1, "Introdu prima pozitie:")
pos2 = right_operation(0, len(array) - 1, "Introdu a doua pozitie:")
delete_elements(pos1, pos2, array)
else:
a, b = map(int, input("Introduceti coeficientii numarului complex separati prin enter care doriti sa fie "
"inlocuit:").split())
x, y = map(int, input("Introduceti coeficientii numarului complex separati prin enter care sa il inlocuiasca:")
.split())
_replace(a, b, x, y, array)
for nums in array:
nums.show_numbers()
print('\n')
def do_task_3(array):
print("Care este urmatoarea operatie?")
print("1 Tipărește partea imaginara pentru numerele din listă. Se dă intervalul de poziții (sub secvența).")
print("2 Tipărește toate numerele complexe care au modulul mai mic decât 10")
print("3 Tipareste toate numerele complexe care au modulul egal cu 10")
_next = right_operation(1, 3, "Introdu operatia:")
if _next == 1:
pos1 = right_operation(0, len(array) - 1, "Introdu prima pozitie:")
pos2 = right_operation(0, len(array) - 1, "Introdu a doua pozitie:")
for it in range(pos1, pos2 + 1):
print(array[it].imaginary, end=" ")
print('\n')
elif _next == 2:
for it in array:
if it.module < 10:
it.show_numbers()
print('\n')
elif _next == 3:
for it in array:
if it.module == 10:
it.show_numbers()
print('\n')
def do_task_4(array):
print("1 suma numerelor dintr-o subsecventă dată:")
print("2 Produsul numerelor dintr-o subsecventă dată:")
print("3 Tipărește lista sortată descrescător după partea imaginara:")
_next = right_operation(1, 3, "Alegeti urmatoarea operatie:")
if _next == 1 or _next == 2:
pos1 = right_operation(0, len(array) - 1, "Introdu prima pozitie:")
pos2 = right_operation(0, len(array) - 1, "Introdu a doua pozitie:")
if _next == 1:
add = Complex(0, 0, 0)
for i in range(pos1, pos2 + 1):
add.imaginary += array[i].imaginary
add.real += array[i].real
add.show_numbers()
print('\n')
else:
multiply = Complex(array[pos1].real, array[pos1].imaginary, array[pos1].module)
add = Complex(0, 0, 0)
for i in range(pos1 + 1, pos2 + 1):
multiply = multiply * array[i]
add = add + multiply
add.show_numbers()
print('\n')
elif _next == 3:
aux = copy.deepcopy(array)
sort_descending_imaginary(aux)
for i in aux:
i.show_numbers()
print('\n')
def do_task_5(array):
print("1 Filtrare parte reala prim – elimină din listă numerele complexe la care partea reala este prim.:")
print("2 Filtrare modul – elimina din lista numerele complexe la care modulul este <,= sau > decât un număr dat.:")
_next = right_operation(1, 2, "Introduceti urmatoarea operatie:")
if _next == 1:
filter_real_numbers(array)
else:
target = right_operation(0, sys.maxsize, "Introduceti numarul:")
operator = right_operation(1, 3, "Introduceti 1 pt '<', 2 pentru '=' si 3 pentru '>':")
filter_module(array, target, operator)
for it in array:
it.show_numbers()
def show_list(array):
for i in array:
i.show_numbers()
| darian200205/FP-python | lab4/do_task.py | do_task.py | py | 5,659 | python | ro | code | 0 | github-code | 90 |
24414923952 | # coding=utf-8
from __future__ import print_function
import numpy as np
from imgProcessor.exceptions import EnoughImages
class Iteratives(object):
def __init__(self, max_iter=1e4, max_dev=1e-5):
self._max_iter = max_iter
self._max_dev = max_dev
self._last_dev = None
self._n = 0
def checkConverence(self, arr):
dev = np.mean(arr)
print('residuum: %s' % dev)
# STOP ITERATION?
if self._n > self._max_iter or (self._last_dev and (
(self._n > 4 and dev > self._last_dev) or dev < self._max_dev)):
raise EnoughImages()
| radjkarl/imgProcessor | imgProcessor/utils/baseClasses.py | baseClasses.py | py | 650 | python | en | code | 28 | github-code | 90 |
19017277473 | def create_array(input_lines):
res = []
for l in input_lines:
row = []
for c in l:
row.append(c)
res.append(row)
return res
def solve(fname, row_steps, col_steps):
with open(fname) as f:
lines = f.read().split('\n')
tree_array = create_array(lines)
row = 0
col = 0
tree_counter = 0
while row <= len(tree_array) - 1:
if tree_array[row][col] == '#':
tree_counter += 1
col = (col + col_steps) % len(tree_array[row])
row += row_steps
return tree_counter
if __name__ == "__main__":
fname = 'day03.txt'
print(solve(fname, 1, 1) * solve(fname, 1, 3) * solve(fname, 1, 5) * solve(fname, 1, 7) * solve(fname, 2, 1))
| PyJay/aoc2020 | day03.py | day03.py | py | 743 | python | en | code | 1 | github-code | 90 |
25525439418 | from flask import request
from app import app
from app.db import postgre
from app import utils
from app.utils import wrappers, session, crossdomain, logger
logger = logger.Logger(__name__)
# This method removes pack from user's assinged packs
# Pack itself is not removed
def removePack():
logger.info("API Handler pack/remove")
try:
hash_id = request.json['pack_id']
except Exception:
return {
'status': 'error',
'message': 'Pack id required'
}
pack_id = postgre.pack.hashIdToId(hash_id)
if(pack_id is None):
return {
'status': 'error',
'message': 'Pack with given hash id not found'
}
if(session.isAnonymous()):
return {
'status': 'error',
'message': 'Working with packs in anonymous mode is not supported'
}
postgre.pack.removePack(session.userID(), pack_id, False)
return {
'status':'ok'
}
@app.route('/api/pack/remove', methods = ['POST'])
@wrappers.nullable()
def removePackWeb():
return removePack()
@app.route('/api/mobile/pack/remove', methods = ['POST', 'OPTIONS'])
@crossdomain.crossdomain()
@wrappers.tokenize()
def removePackMobile():
return removePack()
| codingjerk/ztd.blunders-web | app/api/pack/remove.py | remove.py | py | 1,259 | python | en | code | 0 | github-code | 90 |
27620508174 | # Definition for a binary tree node.
# class TreeNode(object):
# def __init__(self, x):
# self.val = x
# self.left = None
# self.right = None
class Solution(object):
def maxPathSum(self, root):
"""
:type root: TreeNode
:rtype: int
"""
if root is None:
return 0
def helper(node):
if node is None:
return 0
ln = helper(node.left)
rn = helper(node.right)
max_single = max(max(ln, rn) + node.val, node.val)
max_top = max(max_single, ln + rn + node.val)
helper.res = max(helper.res, max_top)
return max_single
helper.res = float("-inf")
helper(root)
return helper.res | Nazmul-islam-apu/Leetcode-Problem-Solution | Problem 101 - 200/124. Binary Tree Maximum Path Sum.py | 124. Binary Tree Maximum Path Sum.py | py | 781 | python | en | code | 0 | github-code | 90 |
10299627765 | import argparse, os, sys
from os import listdir
from os.path import isfile, join
from ipywidgets import widgets
import pickle
import math
import collections
import time
import numpy as np
import pandas as pd
import scanpy as sc
import warnings
warnings.filterwarnings('ignore')
from sklearn.metrics import silhouette_score
import multiprocess as mp
from functools import partial
import seaborn as sns
from matplotlib import pyplot as plt
import time
def autoResolution(adata,cpus=4):
r"""Automatically determine clustering resolution
Parameters
----------
- adata : `scanpy.AnnData`
The single cell data.
- cpus : `int`, optional (default: 4)
The number of cpus used for parallel computing.
Returns
-------
- adata : `scanpy.AnnData`
The single cell data with the clustering resolution.
- res : `float`
The clustering resolution.
- df_sil: `pandas.DataFrame`
The silhouette score of each clustering resolution.
"""
print("Automatically determine clustering resolution...")
start = time.time()
def subsample_clustering(adata, sample_n, subsample_n, resolution, subsample):
subadata = adata[subsample]
sc.tl.louvain(subadata, resolution=resolution)
cluster = subadata.obs['louvain'].tolist()
subsampling_n = np.zeros((sample_n, sample_n), dtype=bool)
coclustering_n = np.zeros((sample_n, sample_n), dtype=bool)
for i in range(subsample_n):
for j in range(subsample_n):
x = subsample[i]
y = subsample[j]
subsampling_n[x][y] = True
if cluster[i] == cluster[j]:
coclustering_n[x][y] = True
return (subsampling_n, coclustering_n)
rep_n = 5
subset = 0.8
sample_n = len(adata.obs)
subsample_n = int(sample_n * subset)
resolutions = np.linspace(0.4, 1.4, 6)
silhouette_avg = {}
np.random.seed(1)
best_resolution = 0
highest_sil = 0
for r in resolutions:
r = np.round(r, 1)
print("Clustering test: resolution = ", r)
sub_start = time.time()
subsamples = [np.random.choice(sample_n, subsample_n, replace=False) for t in range(rep_n)]
p = mp.Pool(cpus)
func = partial(subsample_clustering, adata, sample_n, subsample_n, r)
resultList = p.map(func, subsamples)
p.close()
p.join()
subsampling_n = sum([result[0] for result in resultList])
coclustering_n = sum([result[1] for result in resultList])
subsampling_n[np.where(subsampling_n == 0)] = 1e6
distance = 1.0 - coclustering_n / subsampling_n
np.fill_diagonal(distance, 0.0)
sc.tl.louvain(adata, resolution=r, key_added = 'louvain_r' + str(r))
silhouette_avg[str(r)] = silhouette_score(distance, adata.obs['louvain_r' + str(r)], metric="precomputed")
if silhouette_avg[str(r)] > highest_sil:
highest_sil = silhouette_avg[str(r)]
best_resolution = r
print("robustness score = ", silhouette_avg[str(r)])
sub_end = time.time()
print('time: {}', sub_end - sub_start)
print()
adata.obs['louvain'] = adata.obs['louvain_r' + str(best_resolution)]
print("resolution with highest score: ", best_resolution)
res = best_resolution
# write silhouette record to uns and remove the clustering results except for the one with the best resolution
adata.uns['sihouette score'] = silhouette_avg
# draw lineplot
df_sil = pd.DataFrame(silhouette_avg.values(), columns=['silhouette score'], index=[float(x) for x in silhouette_avg.keys()])
df_sil.plot.line(style='.-', color='green', title='Auto Resolution', xticks=resolutions, xlabel='resolution', ylabel='silhouette score', legend=False)
#pp.savefig()
#plt.close()
end = time.time()
print('time: {}', end-start)
return adata, res, df_sil
def writeGEP(adata_GEP,path):
r"""Write the gene expression profile to a file
Parameters
----------
- adata_GEP : `scanpy.AnnData`
The single cell data with gene expression profile.
- path : `str`
The path to save the gene expression profile.
Returns
-------
"""
print('Exporting GEP...')
sc.pp.normalize_total(adata_GEP, target_sum=1e6)
mat = adata_GEP.X.transpose()
if type(mat) is not np.ndarray:
mat = mat.toarray()
GEP_df = pd.DataFrame(mat, index=adata_GEP.var.index)
GEP_df.columns = adata_GEP.obs['louvain'].tolist()
# GEP_df = GEP_df.loc[adata.var.index[adata.var.highly_variable==True]]
GEP_df.dropna(axis=1, inplace=True)
GEP_df.to_csv(os.path.join(path, 'GEP.txt'), sep='\t')
class Drug_Response:
r"""
Drug_Response class for drug response prediction.
The raw code could be found at https://github.com/ailabstw/scDrug
"""
def __init__(self,adata,scriptpath,modelpath,output='./',model='GDSC',clusters='All',
cell='A549',cpus=4,n_drugs=10):
r"""
Initializes the Drug_Response class.
Parameters
----------
- adata : `AnnData object`
Annotated data matrix with cells as rows and genes as columns.
- scriptpath : `str`
Path to the directory containing the CaDRReS scripts for the analysis.
You need to download the scirpt according `git clone https://github.com/CSB5/CaDRReS-Sc.git`
and set the path to the directory.
- modelpath : `str`
Path to the directory containing the pre-trained models.
You need to download the model according `Pyomic.utils.download_GDSC_data()` and `Pyomic.utils.download_CaDRReS_model()`
and set the path to the directory.
- output : `str`, optional (default: './')
Path to the directory where the output files will be saved.
- model : `str`, optional (default: 'GDSC')
The name of the pre-trained model to be used for the analysis.
- clusters : `str`, optional (default: 'All')
The cluster labels to be used for the analysis. Default is all cells.
- cell : `str`, optional (default: 'A549')
The cell line to be analyzed.
- cpus : `int`, optional (default: 4)
The number of CPUs to be used for the analysis.
- n_drugs : `int`, optional (default: 10)
The number of top drugs to be selected based on the predicted sensitivity.
Returns
-------
None
"""
self.model = model
self.adata=adata
self.clusters=clusters
self.output=output
self.n_drugs=n_drugs
self.modelpath=modelpath
self.scriptpath = scriptpath
sys.path.append(os.path.abspath(scriptpath))
from cadrres_sc import pp, model, evaluation, utility
self.load_model()
self.drug_info()
self.bulk_exp()
self.sc_exp()
self.kernel_feature_preparartion()
self.sensitivity_prediction()
if self.model == 'GDSC':
self.masked_drugs = list(pd.read_csv(self.modelpath+'masked_drugs.csv')['GDSC'].dropna().astype('int64').astype('str'))
self.cell_death_proportion()
else:
self.masked_drugs = list(pd.read_csv(self.modelpath+'masked_drugs.csv')['PRISM'])
self.output_result()
self.figure_output()
def load_model(self):
r"""
load the pre-trained model.
"""
from cadrres_sc import pp, model, evaluation, utility
### IC50/AUC prediction
## Read pre-trained model
#model_dir = '/Users/fernandozeng/Desktop/analysis/scDrug/CaDRReS-Sc-model/'
model_dir = self.modelpath
obj_function = widgets.Dropdown(options=['cadrres-wo-sample-bias', 'cadrres-wo-sample-bias-weight'], description='Objetice function')
self.model_spec_name = obj_function.value
if self.model == 'GDSC':
model_file = model_dir + '{}_param_dict_all_genes.pickle'.format(self.model_spec_name)
elif self.model == 'PRISM':
model_file = model_dir + '{}_param_dict_prism.pickle'.format(self.model_spec_name)
else:
sys.exit('Wrong model name.')
self.cadrres_model = model.load_model(model_file)
def drug_info(self):
r"""
read the drug information.
"""
## Read drug information
if self.model == 'GDSC':
self.drug_info_df = pd.read_csv(self.scriptpath + '/preprocessed_data/GDSC/drug_stat.csv', index_col=0)
self.drug_info_df.index = self.drug_info_df.index.astype(str)
else:
self.drug_info_df = pd.read_csv(self.scriptpath + '/preprocessed_data/PRISM/PRISM_drug_info.csv', index_col='broad_id')
def bulk_exp(self):
r"""
extract the bulk gene expression data.
"""
## Read test data
if self.model == 'GDSC':
#GDSC_exp exists in the data folder
files=os.listdir(self.scriptpath + '/data/GDSC')
if 'GDSC_exp.tsv' not in files:
self.gene_exp_df = pd.read_csv(self.modelpath + 'GDSC_exp.tsv.gz', sep='\t', index_col=0)
self.gene_exp_df = self.gene_exp_df.groupby(self.gene_exp_df.index).mean()
else:
self.gene_exp_df = pd.read_csv(self.scriptpath + '/data/GDSC/GDSC_exp.tsv', sep='\t', index_col=0)
self.gene_exp_df = self.gene_exp_df.groupby(self.gene_exp_df.index).mean()
else:
self.gene_exp_df = pd.read_csv(self.scriptpath + '/data/CCLE/CCLE_expression.csv', low_memory=False, index_col=0).T
self.gene_exp_df.index = [gene.split(sep=' (')[0] for gene in self.gene_exp_df.index]
def sc_exp(self):
r"""
Load cluster-specific gene expression profile
"""
## Load cluster-specific gene expression profile
if self.clusters == 'All':
clusters = sorted(self.adata.obs['louvain'].unique(), key=int)
else:
clusters = [x.strip() for x in self.clusters.split(',')]
self.cluster_norm_exp_df = pd.DataFrame(columns=clusters, index=self.adata.raw.var.index)
for cluster in clusters:
self.cluster_norm_exp_df[cluster] = self.adata.raw.X[self.adata.obs['louvain']==cluster].mean(axis=0).T \
if np.sum(self.adata.raw.X[self.adata.obs['louvain']==cluster]) else 0.0
def kernel_feature_preparartion(self):
r"""
kernel feature preparation
"""
from cadrres_sc import pp, model, evaluation, utility
## Read essential genes list
if self.model == 'GDSC':
ess_gene_list = self.gene_exp_df.index.dropna().tolist()
else:
ess_gene_list = utility.get_gene_list(self.scriptpath + '/preprocessed_data/PRISM/feature_genes.txt')
## Calculate fold-change
cell_line_log2_mean_fc_exp_df, cell_line_mean_exp_df = pp.gexp.normalize_log2_mean_fc(self.gene_exp_df)
self.adata_exp_mean = pd.Series(self.adata.raw.X.mean(axis=0).tolist()[0], index=self.adata.raw.var.index)
cluster_norm_exp_df = self.cluster_norm_exp_df.sub(self.adata_exp_mean, axis=0)
## Calculate kernel feature
self.test_kernel_df = pp.gexp.calculate_kernel_feature(cluster_norm_exp_df, cell_line_log2_mean_fc_exp_df, ess_gene_list)
def sensitivity_prediction(self):
r"""
Predict drug sensitivity
"""
from cadrres_sc import pp, model, evaluation, utility
## Drug response prediction
if self.model == 'GDSC':
print('...Predicting drug response for using CaDRReS(GDSC): {}'.format(self.model_spec_name))
self.pred_ic50_df, P_test_df= model.predict_from_model(self.cadrres_model, self.test_kernel_df, self.model_spec_name)
print('...done!')
else:
print('...Predicting drug response for using CaDRReS(PRISM): {}'.format(self.model_spec_name))
self.pred_auc_df, P_test_df= model.predict_from_model(self.cadrres_model, self.test_kernel_df, self.model_spec_name)
print('...done!')
def cell_death_proportion(self):
r"""
Predict cell death proportion and cell death percentage at the ref_type dosage
"""
### Drug kill prediction
ref_type = 'log2_median_ic50'
self.drug_list = [x for x in self.pred_ic50_df.columns if not x in self.masked_drugs]
self.drug_info_df = self.drug_info_df.loc[self.drug_list]
self.pred_ic50_df = self.pred_ic50_df.loc[:,self.drug_list]
## Predict cell death percentage at the ref_type dosage
pred_delta_df = pd.DataFrame(self.pred_ic50_df.values - self.drug_info_df[ref_type].values, columns=self.pred_ic50_df.columns)
pred_cv_df = 100 / (1 + (np.power(2, -pred_delta_df)))
self.pred_kill_df = 100 - pred_cv_df
def output_result(self):
if self.model == 'GDSC':
drug_df = pd.DataFrame({'Drug ID': self.drug_list,
'Drug Name': [self.drug_info_df.loc[drug_id]['Drug Name'] for drug_id in self.drug_list]})
self.pred_ic50_df = (self.pred_ic50_df.T-self.pred_ic50_df.min(axis=1))/(self.pred_ic50_df.max(axis=1)-self.pred_ic50_df.min(axis=1))
self.pred_ic50_df = self.pred_ic50_df.T
self.pred_ic50_df.columns = pd.MultiIndex.from_frame(drug_df)
self.pred_ic50_df.round(3).to_csv(os.path.join(self.output, 'IC50_prediction.csv'))
self.pred_kill_df.columns = pd.MultiIndex.from_frame(drug_df)
self.pred_kill_df.round(3).to_csv(os.path.join(self.output, 'drug_kill_prediction.csv'))
else:
drug_list = list(self.pred_auc_df.columns)
drug_list = [d for d in drug_list if d not in self.masked_drugs]
drug_df = pd.DataFrame({'Drug ID':drug_list,
'Drug Name':[self.drug_info_df.loc[d, 'name'] for d in drug_list]})
self.pred_auc_df = self.pred_auc_df.loc[:,drug_list].T
self.pred_auc_df = (self.pred_auc_df-self.pred_auc_df.min())/(self.pred_auc_df.max()-self.pred_auc_df.min())
self.pred_auc_df = self.pred_auc_df.T
self.pred_auc_df.columns = pd.MultiIndex.from_frame(drug_df)
self.pred_auc_df.round(3).to_csv(os.path.join(self.output, 'PRISM_prediction.csv'))
def draw_plot(self, df, n_drug=10, name='', figsize=()):
r"""
plot heatmap of drug response prediction
Parameters
----------
- df : `pandas.DataFrame`
drug response prediction dataframe
- n_drug : `int`
number of drugs to be plotted
- name : `str`
name of the plot
- figsize : `tuple`
size of the plot
"""
def select_drug(df, n_drug):
selected_drugs = []
df_tmp = df.reset_index().set_index('Drug Name').iloc[:, 1:]
for cluster in sorted([x for x in df_tmp.columns], key=int):
for drug_name in df_tmp.sort_values(by=cluster, ascending=False).index[:n_drug].values:
if drug_name not in selected_drugs:
selected_drugs.append(drug_name)
df_tmp = df_tmp.loc[selected_drugs, :]
return df_tmp
if self.model == 'GDSC':
fig, ax = plt.subplots(figsize=figsize)
sns.heatmap(df.iloc[:n_drug,:-1], cmap='Blues', \
linewidths=0.5, linecolor='lightgrey', cbar=True, cbar_kws={'shrink': .2, 'label': 'Drug Sensitivity'}, ax=ax)
ax.set_xlabel('Cluster', fontsize=20)
ax.set_ylabel('Drug', fontsize=20)
ax.figure.axes[-1].yaxis.label.set_size(20)
for _, spine in ax.spines.items():
spine.set_visible(True)
spine.set_color('lightgrey')
plt.savefig(os.path.join(self.output, '{}.png'.format(name)), bbox_inches='tight', dpi=200)
plt.close()
else:
fig, ax = plt.subplots(figsize=(df.shape[1], int(n_drug*df.shape[1]/5)))
sns.heatmap(select_drug(df, n_drug), cmap='Reds', \
linewidths=0.5, linecolor='lightgrey', cbar=True, cbar_kws={'shrink': .2, 'label': 'Drug Sensitivity'}, ax=ax, vmin=0, vmax=1)
ax.set_xlabel('Cluster', fontsize=20)
ax.set_ylabel('Drug', fontsize=20)
ax.figure.axes[-1].yaxis.label.set_size(20)
for _, spine in ax.spines.items():
spine.set_visible(True)
spine.set_color('lightgrey')
plt.savefig(os.path.join(self.output, '{}.png'.format(name)), bbox_inches='tight', dpi=200)
plt.close()
def figure_output(self):
r"""
plot figures
"""
print('...Ploting figures...')
## GDSC figures
if self.model == 'GDSC':
tmp_pred_ic50_df = self.pred_ic50_df.T
tmp_pred_ic50_df = tmp_pred_ic50_df.assign(sum=tmp_pred_ic50_df.sum(axis=1)).sort_values(by='sum', ascending=True)
self.draw_plot(tmp_pred_ic50_df, name='GDSC prediction', figsize=(12,40))
tmp_pred_kill_df = self.pred_kill_df.T
tmp_pred_kill_df = tmp_pred_kill_df.loc[(tmp_pred_kill_df>=50).all(axis=1)]
tmp_pred_kill_df = tmp_pred_kill_df.assign(sum=tmp_pred_kill_df.sum(axis=1)).sort_values(by='sum', ascending=False)
self.draw_plot(tmp_pred_kill_df, n_drug=10, name='predicted cell death', figsize=(12,8))
## PRISM figures
else:
tmp_pred_auc_df = self.pred_auc_df.T
#tmp_pred_auc_df = tmp_pred_auc_df.assign(sum=tmp_pred_auc_df.sum(axis=1)).sort_values(by='sum', ascending=True)
self.draw_plot(tmp_pred_auc_df, n_drug=self.n_drugs, name='PRISM prediction')
print('done!')
| Starlitnightly/omicverse | omicverse/single/_scdrug.py | _scdrug.py | py | 18,135 | python | en | code | 119 | github-code | 90 |
20818549972 | from datetime import datetime
from django.shortcuts import render
from .models import FamilyMember
def home(request):
context = {'family_members': FamilyMember.objects.all()}
return render(request,'home.html',context)
def family_member_registration(request,name: str,surname: str,birthdate: str,email: str,phone_number: int):
parsed_birthday = datetime.strptime(birthdate,"%Y-%m-%d").date()
family_member = FamilyMember(name = name,surname = surname,birthdate = birthdate,email = email,phone_number = phone_number)
family_member.save()
context = { 'family_member': family_member}
return render(request,'family_member_registration.html',context)
| AgusSalvidio/MVT_Salvidio | Family/views.py | views.py | py | 683 | python | en | code | 0 | github-code | 90 |
37835769805 | import os
import cv2
import numpy as np
global dstfolder
def mse(a, b):
err = np.sum((a.astype("float") - b.astype("float")) ** 2)
err /= float(a.shape[0] * a.shape[0])
return err
def judgement_class(context_2, picture_name, output_name):
im = cv2.imread(picture_name)
h, w, _ = im.shape
objlocation = context_2.split(" ")[1:]
objlocation = [float(i) for i in objlocation]
x1 = int((float(objlocation[0]) * w))
y1 = int((float(objlocation[1]) * h))
xw = int((float(objlocation[2])) * w / 2)
xh = int((float(objlocation[3])) * h / 2)
crop_img = im[y1 - xh:y1 + xh, x1 - xw:x1 + xw]
cv2.imwrite(output_name, crop_img)
return crop_img
flag = 0
for root, dir, files in os.walk("unzip/"):
for d in dir:
if d[-1] != 'a':
with open("unzip/" + d + '/obj.names') as t_xt:
contexts = t_xt.readlines()
for context in contexts:
if context == 'sink\n':
sink = contexts.index(context)
if context == 'lidopenwithobj\n':
lidopenwithobj = contexts.index(context)
if context == 'lidclose\n':
lidclose = contexts.index(context)
if context == 'lidopen\n':
lidopen = contexts.index(context)
if context == 'wash\n':
wash = contexts.index(context)
for r, d1, f in os.walk("unzip/" + d + '/obj_train_data'):
flag_init_sink = True
flag_init_lib = True
mselist = []
for f1 in f:
if f1[-1] == 't':
with open("unzip/" + d + '/obj_train_data/' + f1) as txt:
contexts_2 = txt.readlines()
# print(d.split('_')[3][0])
if d.split('_')[3][0] in ['1', '4']:
dstfolder = "location_dataset/1-4/"
if d.split('_')[3][0] in ['2', '3', '6']:
dstfolder = "location_dataset/2-3-6/"
if d.split('_')[3][0] == '5':
dstfolder = "location_dataset/5/"
if d.split('_')[3][0] not in ['1', '2', '3', '4', '5', '6']:
continue
# if d.split('_')[3][0] not in ['1','2','3','4','5','6']:
# print(d)
if not os.path.isdir(dstfolder):
os.mkdir(dstfolder)
for context_2 in contexts_2:
if context_2[0] == str(lidopen):
if flag_init_lib == True:
origin_lib = judgement_class(context_2,
"unzip/" + d + '/obj_train_data/' + f1[0:-4] + '.PNG',
dstfolder + d + '_lidopen_' + f1[0:-4] + '.PNG')
flag_init_lib = False
judgement_class(context_2, "unzip/" + d + '/obj_train_data/' + f1[0:-4] + '.PNG',
dstfolder + d + '_lidopen_' + f1[0:-4] + '.PNG')
if context_2[0] == str(lidclose):
if flag_init_lib == True:
origin_lib = judgement_class(context_2,
"unzip/" + d + '/obj_train_data/' + f1[0:-4] + '.PNG',
dstfolder + d + '_lidclose_' + f1[0:-4] + '.PNG')
flag_init_lib = False
judgement_class(context_2, "unzip/" + d + '/obj_train_data/' + f1[0:-4] + '.PNG',
dstfolder + d + '_lidclose_' + f1[0:-4] + '.PNG')
if context_2[0] == str(lidopenwithobj):
if flag_init_lib == True:
origin_lib = judgement_class(context_2,
"unzip/" + d + '/obj_train_data/' + f1[0:-4] + '.PNG',
dstfolder + d + '_lidopen_' + f1[0:-4] + '.PNG')
flag_init_lib = False
judgement_class(context_2, "unzip/" + d + '/obj_train_data/' + f1[0:-4] + '.PNG',
dstfolder + d + '_lidopenwithobj_' + f1[0:-4] + '.PNG')
if context_2[0] == str(sink):
sinklocation = 'location_dataset/sink/'
if not os.path.isdir(sinklocation):
os.mkdir(sinklocation)
if flag_init_sink == True:
origin_sink = judgement_class(context_2,
"unzip/" + d + '/obj_train_data/' + f1[0:-4] + '.PNG',
sinklocation + d + '_lidopenwithobj_' + f1[0:-4] + '.PNG')
flag_init_sink = False
judgement_class(context_2, "unzip/" + d + '/obj_train_data/' + f1[0:-4] + '.PNG',
sinklocation + d + '_lidopenwithobj_' + f1[0:-4] + '.PNG')
flag += 1
print(float(flag) / float(len(dir))) | TaoBowoa180011/WorkingScrip | pythonProject/creat_to_efficent_and_yolo/unzip5.py | unzip5.py | py | 5,539 | python | en | code | 0 | github-code | 90 |
9776444210 | from PyQt5.QtCore import Qt
from PyQt5.QtGui import QPainter
from PyQt5.QtWidgets import QFrame
from brickv.utils import draw_rect
class ColorFrame(QFrame):
def __init__(self, width, height, color, parent=None):
super().__init__(parent)
self.color = color
self.setFixedSize(width, height)
def set_color(self, color):
self.color = color
self.update()
def paintEvent(self, event):
painter = QPainter(self)
width = self.width()
height = self.height()
painter.fillRect(0, 0, width, height, self.color)
draw_rect(painter, 0, 0, width, height, 1, Qt.black)
| Tinkerforge/brickv | src/brickv/color_frame.py | color_frame.py | py | 652 | python | en | code | 18 | github-code | 90 |
24818970018 | #Imports
import time
import os
import sys
import json
#Func's
def typingPrint(text):
for character in text:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.05)
def typingInput(text):
for character in text:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.05)
value = input()
return value
def typingAscii (text):
for character in text:
sys.stdout.write(character)
sys.stdout.flush()
time.sleep(0.08)
def save():
game_variables = {
"pet_name": pet_name,
"energy": energy,
"hunger": hunger,
"health": health,
"training": training,
"pet_food": pet_food,
"money": money,
"animal": animal
}
with open("load.json", "w+") as f:
json_save = json.dumps(game_variables, indent=4)
f.write(json_save)
#Var's
health = 'good'
energy = 8
hunger = 0
training = 0
pet_food = 10
things_at_store = ('bone', 'yarn', 'sunken boat', 'mini cave', 'fake tree')
done_choosing = False
choice = 0
choice2 = 0
money = 100
animal = 'None'
print("------------------------------------------------------------------------------------------")
words = ("""
888
8888b. .d8888b .d8888b 888 888 .d8888b 88888b. .d88b. .d88b. 88888b.
"88b 88K d88P" 888 888 88K 888 "88b d8P Y8b d8P Y8b 888 "88b
.d888888 "Y8888b. 888 888 888 "Y8888b. 888 888 88888888 88888888 888 888
888 888 X88 Y88b. 888 888 X88 888 888 Y8b. Y8b. 888 d88P
"Y888888 88888P' "Y8888P 888 888 88888P' 888 888 "Y8888 "Y8888 88888P"
888
888 """)
for line in words.split('\n'):
time.sleep(0.2)
sys.stdout.write(line + '\n')
sys.stdout.flush()
print("------------------------------------------------------------------------------------------")
time.sleep(1)
typingPrint('Enter pet:')
print("""
""")
typingPrint('1.Sheep')
print("""
""")
#Choose a pet, repeat until a valid choice is entered.
while done_choosing == False:
choice = typingInput('Which do you choose? (enter the number)')
if choice == '1':
animal = 'Sheep'
done_choosing = True
else:
typingPrint ('Sorry, that is not a choice. Please enter something else.')
#Name pet
pet_name = typingInput ("What do you want to name your pet? ")
print ('Okay, you now have a', animal ,'named', pet_name + '.')
print('')
print ('Your', animal ,'is at', health, 'health right now. You can check it at any time.')
#list choices
print('')
typingPrint('1.Feed your pet')
print('')
typingPrint('2.Buy more food')
print('')
typingPrint('3.Take your pet for a walk')
print('')
typingPrint('4.Play a game with your pet')
print('')
typingPrint('5.Train your pet')
print('')
typingPrint('6.Rest and check stats (pet health, money, etc.)')
print('')
typingPrint('7.Buy a toy for your pet')
print('')
typingPrint("8.Save Game")
print('')
typingPrint("9.Load Game")
#forever loop of things to do
while True:
print('')
choice = typingInput('What would you like to do?')
#Feed your pet
if choice == '1':
if pet_food > 5:
if hunger > 0:
pet_food -= 5
hunger -= 1
print("------------------------------------------------------------------------------------------")
typingPrint('Your pet has been fed!')
print("------------------------------------------------------------------------------------------")
print('You now have ', pet_food, ' pet food, and your pets remaining hunger is at ', hunger, '.')
else:
print("------------------------------------------------------------------------------------------")
print(pet_name, 'waits next to the food, not eating.')
else:
print("------------------------------------------------------------------------------------------")
typingPrint("You'll need to get some more food first...")
#Buy more food
elif choice == '2':
if money > 9:
money -= 10
pet_food += 5
print("------------------------------------------------------------------------------------------")
print('Food bought! Money = ', money, 'Pet food = ', pet_food)
print("------------------------------------------------------------------------------------------")
#Take pet for walk
elif choice == '3':
if animal == 'Sheep':
if energy > 5:
energy -= 3
hunger += 1
print('You go for a nice walk with ' + pet_name + '. Your pet now has', energy, 'energy and', hunger, 'hunger.')
else:
print('Your', animal, 'seems a bit too tired to go for a walk today.')
else:
print('Your', animal, 'stares at you like you are crazy.')
#Play a game
elif choice == '4':
if energy > 5:
energy -= 3
hunger += 2
print('You play with ' + pet_name + '!', pet_name,("now has"), energy, 'energy and', hunger, 'hunger.')
#Train your pet
elif choice == '5':
print('')
#Rest your pet and check stats
elif choice == '6':
print('')
typingPrint ('Okay, here are the stats.')
print('Health:', health)
print ('Pet energy (0-10):', energy ,)
print ('Hunger (0 = full 5 = starving):', hunger ,)
print ('Training (max 10):', training ,)
print('Pet food:', pet_food)
print ('Money:', money ,)
energy += 2
#Buy a toy
elif choice == '7':
print('Here are the items at the store:', things_at_store)
#Save
elif choice == '8':
save()
#Load
elif choice == '9':
with open('load.json', 'r') as f:
game_variables = json.load(f)
#Input doesn't match any choices
else:
typingPrint ('Sorry, that is not a choice. Please enter something else.') | IForgotHowToCode/Ascii-Sheep-Tomagotchi-Game | main.py | main.py | py | 6,199 | python | en | code | 0 | github-code | 90 |
32998558011 | from overrides import overrides
from allennlp.common.util import JsonDict
from allennlp.data import Instance
from allennlp.predictors.predictor import Predictor
@Predictor.register('sharc_predictor')
class ShARCPredictor(Predictor):
"""
Predictor for the :class:`~allennlp.models.bidaf.BidirectionalAttentionFlow` model.
"""
@overrides
def _json_to_instance(self, json_dict: JsonDict) -> Instance:
"""
Expects JSON that looks like ``{"question": "...", "passage": "..."}``.
"""
rule_text = json_dict['snippet']
question = json_dict['question']
scenario = json_dict['scenario']
history = json_dict['history']
return self._dataset_reader.text_to_instance(rule_text, question,\
scenario, history) | IBM/UrcaNet | orca/predictors/sharc_predictor.py | sharc_predictor.py | py | 843 | python | en | code | 2 | github-code | 90 |
74431912296 | from librep.embeddings_eval.embeddings_eval_base import *
import tabulate
import sklearn.metrics
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
import numpy as np
class RF_KNN_SVM_Scores_Result(Evaluation_Result_Text): pass
class RF_KNN_SVM_Scores(Embedding_Evaluator_Base_Class):
evaluator_name = "RF_KNN_SVM_Scores"
evaluator_description = "RF_KNN_SVM_Scores: Trains RF, KNN, and SVM machine learning models using " + \
"the ds.train data subset and report the f1-score and accuracy metrics using " + \
"the ds.test data subset. "
def eval_model(self, model, X, y_test):
y_pred = model.predict(X)
f1 = sklearn.metrics.f1_score(y_test, y_pred, average='macro')
acc = sklearn.metrics.accuracy_score(y_test, y_pred)
return (float(f1), float(acc))
models_to_evaluate = [
{
"model" : SVC(C=3.0, kernel="rbf"),
"desc" : "SVC(C=3.0, kernel=\"rbf\")"
},
{
"model" : KNeighborsClassifier(n_neighbors=1),
"desc" : "KNeighborsClassifier(n_neighbors=1)"
},
{
"model" : RandomForestClassifier(n_estimators=100),
"desc" : "RandomForestClassifier(n_estimators=100)"
}
]
def evaluate_embedding(self, ds : Flattenable_DataSet):
X_train = np.concatenate((ds.train.get_X(),ds.validation.get_X()))
y_train = np.concatenate((ds.train.get_y(),ds.validation.get_y()))
#X_train = ds.train.get_X()
#y_train = ds.train.get_y()
X_test = ds.test.get_X()
y_test = ds.test.get_y()
#if len(X_train) > 0:
# print("RF_KNN_SVM report: X_train[0] shape = ", X_train[0].shape)
#else:
# print("RF_KNN_SVM report: no elements at X_train")
table = [("Model description", "f1-score", "accuracy")]
for m in self.models_to_evaluate:
model = m["model"]
model.fit(X_train, y_train)
f1_score, acc = self.eval_model(model,X_test, y_test)
if "desc" in m: description = m["desc"]
else: description = str(model)
table.append((description,"{0:.2f}%".format(100*f1_score), "{0:.2f}%".format(100*acc)))
output_text = tabulate.tabulate(table, headers="firstrow",colalign=("left","right","right"))
return RF_KNN_SVM_Scores_Result(output_text) | joaoppagnan/demonstracao-motionsense | librep/embeddings_eval/RF_KNN_SVM_Scores.py | RF_KNN_SVM_Scores.py | py | 2,513 | python | en | code | 0 | github-code | 90 |
42300580697 | from pylab import *
import inspect
import os
import time
import vray
import path_utils
current_source_file_path = path_utils.get_current_source_file_path(frame=inspect.currentframe())
vrscene_file = os.path.abspath(os.path.join(current_source_file_path, "..", "..", "..", "examples", "00_empty_scene", "empty.vrscene"))
# create renderer
renderer = vray.VRayRenderer()
# create logging callback
def log_msg(renderer, message, level, instant):
global fail_message
if message.startswith("Failed"):
fail_message = message
fail_message = None
renderer.setOnLogMessage(log_msg)
renderer.load(vrscene_file)
time.sleep(0.5)
renderer.close()
if fail_message is not None:
print("\n[HYPERSIM: _CHECK_VRAY_APPSDK_INSTALL] The V-Ray AppSDK is not configured correctly on your system: " + fail_message + "\n")
exit(-1)
| apple/ml-hypersim | code/python/tools/_check_vray_appsdk_install.py | _check_vray_appsdk_install.py | py | 854 | python | en | code | 1,500 | github-code | 90 |
1990188255 | import asyncio
from typing import Any
class AsyncFunWrapper:
def __init__(self, blocked_fun) -> None:
super().__init__()
# 记录阻塞型 IO 函数,便于后续调用
self._blocked_fun = blocked_fun
def __call__(self, *args):
"""
重载函数调用运算符,
将阻塞型 IO 的调用过程异步化,
并返回一个可等待对象 (Awaitable),
通过重载运算符实现包装逻辑的好处是,
不用一个一个去实现阻塞型 IO 的所有成员函数,
从而大大节省了代码量。
"""
return asyncio.get_running_loop().run_in_executor(
None,
self._blocked_fun,
*args
)
class AIOWrapper:
def __init__(self, blocked_file_io) -> None:
super().__init__()
# 在包装器对象中记录阻塞型 IO 对象外界通过包装器调用其成员函数时,
# 事实上是分成两步进行
# 第一步
# 获取指定的成员 (该成员是一个可被调用 Callable 的对象)
# 第二步
# 对该成员进行调用
self._blocked_file_io = blocked_file_io
# 重载访问成员的运算符
def __getattribute__(self, name: str) -> Any:
"""
在外界通过包装器 (AIOWrapper) 访问成员操作时,
创建一个异步函数包装器 (AsyncFunWrapper),
目的是将函数调用过程异步化
"""
return AsyncFunWrapper(
super().__getattribute__(
"_blocked_file_io"
).__getattribute__(name)
)
async def open_async(*args) -> AIOWrapper:
"""
当外界调用该函数时,将返回一个包装器 (AIOWrapper) 对象,
该包装器包装了一个阻塞型 IO 对象
"""
return AIOWrapper(
# 通过 run_in_executor 函数执行阻塞型 IO 的 open 函数,
# 并转发外界传入的参数
await asyncio.get_running_loop().run_in_executor(
None, open, *args
)
)
| GitHub-WeiChiang/main | Asyncio/Chapter1/aiofile.py | aiofile.py | py | 2,084 | python | zh | code | 7 | github-code | 90 |
4388702393 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""投诉建议页面"""
__author__ = 'kejie'
from appium.webdriver.common.mobileby import MobileBy
from page_object.base_page import BasePage
class ComplaintSuggestionPage(BasePage):
# 添加投诉建议按钮
add_complaint_suggestion_button_loc = (MobileBy.ACCESSIBILITY_ID, '+ 添加投诉建议')
# 部门建议投诉选择项
complaint_choice_loc = (MobileBy.IOS_PREDICATE, 'type == "XCUIElementTypeButton" AND name == "部门建议投诉"')
# 打开添加投诉建议页面
def open_add_complaint_suggestion_page(self):
self.tap_element(self.add_complaint_suggestion_button_loc)
self.tap_element(self.complaint_choice_loc)
| reach950/hangzhoubanshi-uitest-iOS | page_object/mine/complaint_suggestion/complaint_suggestion_page.py | complaint_suggestion_page.py | py | 723 | python | en | code | 0 | github-code | 90 |
39241813827 | import wx
import inspect
import os
[wxID_ROW, wxID_ROWDELETE, wxID_ROWGO, wxID_ROWLABEL, wxID_ROWSET, wxID_ROWX,
wxID_ROWY,
] = [wx.NewId() for _init_ctrls in range(7)]
class Row(wx.Panel):
'''One row of settings in a wxmtxy table'''
def _init_coll_sizer_Items(self, parent):
# generated method, don't edit
parent.AddWindow(self.delete, 0, border=0, flag=0)
parent.AddWindow(self.label, 0, border=0, flag=0)
parent.AddWindow(self.set, 0, border=0, flag=0)
parent.AddWindow(self.x, 0, border=0, flag=0)
parent.AddWindow(self.y, 0, border=0, flag=0)
parent.AddWindow(self.go, 0, border=0, flag=0)
def _init_sizers(self):
# generated method, don't edit
self.sizer = wx.BoxSizer(orient=wx.HORIZONTAL)
self._init_coll_sizer_Items(self.sizer)
self.SetSizer(self.sizer)
def _init_ctrls(self, prnt):
# generated method, don't edit
wx.Panel.__init__(self, id=wxID_ROW, name='Row', parent=prnt,
pos=wx.Point(51, 84), size=wx.Size(312, 25),
style=wx.TAB_TRAVERSAL)
self.SetClientSize(wx.Size(312, 25))
self.SetMinSize(wx.Size(312, 25))
self.delete = wx.BitmapButton(
id=wxID_ROWDELETE, name='delete',
parent=self, pos=wx.Point(0, 0), size=wx.Size(24, 24),
style=wx.BU_AUTODRAW)
self.delete.Bind(wx.EVT_BUTTON, self.OnDeleteButton, id=wxID_ROWDELETE)
self.delete.SetToolTipString(u'Delete this row')
self.label = wx.TextCtrl(id=wxID_ROWLABEL, name='label', parent=self,
pos=wx.Point(24, 0), size=wx.Size(80, 25), style=0, value='')
self.label.SetMinSize(wx.Size(80, 25))
self.label.SetToolTipString(u'Description of this row')
self.set = wx.BitmapButton(
id=wxID_ROWSET, name='set', parent=self,
pos=wx.Point(104, 0), size=wx.Size(24, 24), style=wx.BU_AUTODRAW)
self.set.Bind(wx.EVT_BUTTON, self.OnSetButton, id=wxID_ROWSET)
self.set.SetToolTipString(u'Copy current X,Y readback values to this row')
self.x = wx.TextCtrl(id=wxID_ROWX, name='x', parent=self,
pos=wx.Point(128, 0), size=wx.Size(80, 25), style=0, value='')
self.x.SetMinSize(wx.Size(80, 25))
self.x.SetToolTipString(u'X axis target position')
self.y = wx.TextCtrl(id=wxID_ROWY, name='y', parent=self,
pos=wx.Point(208, 0), size=wx.Size(80, 25), style=0, value='')
self.y.SetMinSize(wx.Size(80, 25))
self.y.SetToolTipString(u'Y axis target position')
self.go = wx.BitmapButton(
id=wxID_ROWGO, name='go', parent=self,
pos=wx.Point(288, 0), size=wx.Size(24, 24), style=wx.BU_AUTODRAW)
self.go.Bind(wx.EVT_BUTTON, self.OnGoButton, id=wxID_ROWGO)
self.go.SetToolTipString(u'Command EPICS to move motors to this X,Y position')
self._init_sizers()
def __init__(self, tab, tabCallback):
'''initialize the row
@param tab: parent object (Tab object that owns this Row object)
@param tabCallback: callback function that takes two arguments
'''
# first, find the directory where this code is installed
# so the bitmaps can be found
# Note that this breaks edit ability of BoaConstructor
root_dir = os.path.split(inspect.getsourcefile(Row))[0]
self.bmp = {}
for item in ['delete', 'set', 'go']:
file = os.path.join(root_dir, 'graphics', item + '.bmp')
self.bmp[item] = wx.Bitmap(file, wx.BITMAP_TYPE_BMP)
self._init_ctrls(tab)
self.delete.SetBitmapLabel(self.bmp['delete'])
self.set.SetBitmapLabel(self.bmp['set'])
self.go.SetBitmapLabel(self.bmp['go'])
self.tab = tab
self.tabCallback = tabCallback
# sizes keep getting botched in Boa, fix them here
self._fix_sizer(self.label, wx.GROW, 2)
self._fix_sizer(self.x, wx.GROW, 1)
self._fix_sizer(self.y, wx.GROW, 1)
# ################################
# ## added methods ###
# ################################
def _fix_sizer(self, widget, flag, proportion):
'''sizes keep getting botched in Boa, fix them here
@param widget: GUI object to be adjusted
@param flag: usually wx.GROW
@param proportion: [int]'''
item = self.sizer.GetItem(widget)
item.SetFlag(flag)
item.SetProportion(proportion)
def GetLabel(self):
'''@return row label'''
return self.label.GetValue()
def SetLabel(self, text):
'''Define the label
@param text: [string] user description of this row'''
self.label.SetValue(text)
def GetXY(self):
'''@return X, Y values as a tuple'''
x = self.x.GetValue()
y = self.y.GetValue()
return x, y
def SetXY(self, x, y):
'''Define the values
@param x: [float] X axis position to remember
@param y: [float] Y axis position to remember'''
self.x.SetValue(x)
self.y.SetValue(y)
def DeleteRow(self, parent):
'''Tell parent to delete this row (may be tricky)
@param parent: object of Tab that owns this Row'''
self.tabCallback(self, 'delete')
def SetPositions(self, parent):
'''Tell parent to set positions on this row
@param parent: object of Tab that owns this Row'''
self.tabCallback(self, 'set')
def Go(self, parent):
'''Tell parent to move motors to this X,Y
@param parent: object of Tab that owns this Row'''
self.tabCallback(self, 'go')
# ################################
# ## event handling routines ###
# ################################
def OnDeleteButton(self, event):
'''Delete button pressed
@param event: wxPython event object'''
self.DeleteRow(self.tab)
def OnSetButton(self, event):
'''Set button pressed
@param event: wxPython event object'''
self.SetPositions(self.tab)
def OnGoButton(self, event):
'''Go button pressed
@param event: wxPython event object'''
self.Go(self.tab)
| APS-USAXS/wxmtxy | wxmtxy_row.py | wxmtxy_row.py | py | 6,277 | python | en | code | 1 | github-code | 90 |
6328160269 | #!/usr/bin/python3
""" Entry point of the command interpreter """
import cmd
from models import storage
from models.base_model import BaseModel
from models.user import User
from models.place import Place
from models.city import City
from models.amenity import Amenity
from models.state import State
from models.review import Review
import json
import shlex
class HBNBCommand(cmd.Cmd):
"""Command processor"""
prompt = "(hbnb) "
l_classes = ['BaseModel', 'User', 'Amenity',
'Place', 'City', 'State', 'Review']
l_c = ['create', 'show', 'update', 'all', 'destroy', 'count']
def precmd(self, arg):
"""parses command input"""
if '.' in arg and '(' in arg and ')' in arg:
cls = arg.split('.')
cnd = cls[1].split('(')
args = cnd[1].split(')')
if cls[0] in HBNBCommand.l_classes and cnd[0] in HBNBCommand.l_c:
arg = cnd[0] + ' ' + cls[0] + ' ' + args[0]
return arg
def help_help(self):
""" Prints help command description """
print("Provides description of a given command")
def emptyline(self):
"""do nothing when empty line"""
pass
def do_count(self, cls_name):
"""counts number of instances of a class"""
count = 0
all_objs = storage.all()
for k, v in all_objs.items():
clss = k.split('.')
if clss[0] == cls_name:
count = count + 1
print(count)
def do_create(self, type_model):
""" Creates an instance according to a given class """
if not type_model:
print("** class name missing **")
elif type_model not in HBNBCommand.l_classes:
print("** class doesn't exist **")
else:
dct = {'BaseModel': BaseModel, 'User': User, 'Place': Place,
'City': City, 'Amenity': Amenity, 'State': State,
'Review': Review}
my_model = dct[type_model]()
print(my_model.id)
my_model.save()
def do_show(self, arg):
""" Shows string representation of an instance passed """
if not arg:
print("** class name missing **")
return
args = arg.split(' ')
if args[0] not in HBNBCommand.l_classes:
print("** class doesn't exist **")
elif len(args) == 1:
print("** instance id missing **")
else:
all_objs = storage.all()
for key, value in all_objs.items():
ob_name = value.__class__.__name__
ob_id = value.id
if ob_name == args[0] and ob_id == args[1].strip('"'):
print(value)
return
print("** no instance found **")
def do_destroy(self, arg):
""" Deletes an instance passed """
if not arg:
print("** class name missing **")
return
args = arg.split(' ')
if args[0] not in HBNBCommand.l_classes:
print("** class doesn't exist **")
elif len(args) == 1:
print("** instance id missing **")
else:
all_objs = storage.all()
for key, value in all_objs.items():
ob_name = value.__class__.__name__
ob_id = value.id
if ob_name == args[0] and ob_id == args[1].strip('"'):
del value
del storage._FileStorage__objects[key]
storage.save()
return
print("** no instance found **")
def do_all(self, arg):
""" Prints string represention of all instances of a given class """
if not arg:
print("** class name missing **")
return
args = arg.split(' ')
if args[0] not in HBNBCommand.l_classes:
print("** class doesn't exist **")
else:
all_objs = storage.all()
list_instances = []
for key, value in all_objs.items():
ob_name = value.__class__.__name__
if ob_name == args[0]:
list_instances += [value.__str__()]
print(list_instances)
def do_update(self, arg):
""" Updates an instance based on the class name and id """
if not arg:
print("** class name missing **")
return
a = ""
for argv in arg.split(','):
a = a + argv
args = shlex.split(a)
if args[0] not in HBNBCommand.l_classes:
print("** class doesn't exist **")
elif len(args) == 1:
print("** instance id missing **")
else:
all_objs = storage.all()
for key, objc in all_objs.items():
ob_name = objc.__class__.__name__
ob_id = objc.id
if ob_name == args[0] and ob_id == args[1].strip('"'):
if len(args) == 2:
print("** attribute name missing **")
elif len(args) == 3:
print("** value missing **")
else:
setattr(objc, args[2], args[3])
storage.save()
return
print("** no instance found **")
def do_quit(self, line):
""" Quit command to exit the command interpreter """
return True
def do_EOF(self, line):
""" EOF command to exit the command interpreter """
return True
if __name__ == '__main__':
HBNBCommand().cmdloop()
| luischaparroc/AirBnB_clone | console.py | console.py | py | 5,636 | python | en | code | 8 | github-code | 90 |
34093976965 | nums = [1,3]
# nums = [5,1,6]
nums = [3,4,5,6,7,8]
sum = 0
f = nums[0]
for i in range(len(nums)):
sum += nums[i]
y = nums[i]
for j in range(i+1,len(nums)):
x = 0
x = nums[i]^nums[j]
sum += x
y = y^nums[j]
sum += y
print(sum) | Hotheadthing/leetcode.py | Sum of all subsets XOR total.py | Sum of all subsets XOR total.py | py | 272 | python | en | code | 2 | github-code | 90 |
12083985464 | import socket
import threading
import sqlite3
import datetime
from datetime import date
import sys
PORT = 2090
BUF_SIZE = 2048
lock = threading.Lock()
clnt_imfor = [] # [[소켓, id]]
def dbcon(): #db연결
con = sqlite3.connect('serverDB.db') # DB 연결
c = con.cursor() # 커서
return (con, c)
def handle_clnt(clnt_sock): #핸들클라
for clnt_imfo in clnt_imfor:
if clnt_imfo[0] == clnt_sock:
clnt_num = clnt_imfor.index(clnt_imfo[0])
break # 접속한 클라 저장
while True:
sys.stdout.flush() # 버퍼 비워주는거
clnt_msg = clnt_sock.recv(BUF_SIZE)
if not clnt_msg:
lock.acquire() #뮤텍스같은거
delete_imfor(clnt_sock)
lock.release()
break
clnt_msg = clnt_msg.decode() #숫자->문자열로 바꾸는거 맞나? 데이터 보낼때 incode 로 하고
sys.stdin.flush()
if 'signup' == clnt_msg:
sign_up(clnt_sock)
elif clnt_msg.startswith('login/'): # startswitch -->문자열중에 특정 문자를 찾고싶거나, 특정문자로 시작하는 문자열, 특정문자로 끝이나는 문자열 등
clnt_msg = clnt_msg.replace('login/', '') # clnt_msg에서 login/ 자름
log_in(clnt_sock, clnt_msg, clnt_num)
elif clnt_msg.startswith('find_id/'):
clnt_msg = clnt_msg.replace('find_id/', '')
find_id(clnt_sock, clnt_msg)
elif clnt_msg.startswith('find_pw/'):
clnt_msg = clnt_msg.replace('find_pw/', '')
find_pw(clnt_sock, clnt_msg)
elif clnt_msg.startswith('myinfo'):
clnt_msg = clnt_msg.replace('myinfo', '')
send_user_information(clnt_num)
elif clnt_msg.startswith('edit_data'):
clnt_msg = clnt_msg.replace('edit_data', '')
edit_data(clnt_num, clnt_msg)
elif clnt_msg.startswith('remove'):
remove(clnt_num)
else:
continue
def edit_data(clnt_num, clnt_msg): #데이터 베이스 정보변경
print(clnt_msg)
id = clnt_imfor[clnt_num][1]
con, c = dbcon()
if clnt_msg.startswith('_name/'):
clnt_msg = clnt_msg.replace('_name/', '')
lock.acquire()
c.execute("UPDATE usertbl SET username = ? WHERE userid = ?", (clnt_msg, id))
con.commit()
lock.release()
con.close()
elif clnt_msg.startswith('_pw/'):
clnt_msg = clnt_msg.replace('_pw/', '')
lock.acquire()
c.execute("UPDATE usertbl SET userpw = ? WHERE userid = ?", (clnt_msg, id))
con.commit()
lock.release()
con.close()
else:
con.close()
return
def sign_up(clnt_sock): #회원가입
con, c = dbcon()
user_data = []
while True:
imfor = clnt_sock.recv(BUF_SIZE)
imfor = imfor.decode()
if imfor == "Q_reg": # 회원가입 창 닫을 때 함수 종료
con.close()
break
c.execute("SELECT userid FROM usertbl where userid = ?", (imfor, )) # usertbl 테이블에서 id 컬럼 추출
row = c.fetchone()
if row == None: # DB에 없는 id면 None
clnt_sock.send('!NO'.encode())
print('id_found_error')
con.close()
return
clnt_sock.send('!OK'.encode()) # 중복된 id 없으면 !OK 전송
lock.acquire()
user_data.append(imfor) # user_data에 id 추가
imfor = clnt_sock.recv(BUF_SIZE) # password/name/email/usertype
imfor = imfor.decode()
if imfor == "Q_reg": # 회원가입 창 닫을 때 함수 종료
con.close()
break
imfor = imfor.split('/') # 구분자 /로 잘라서 리스트 생성
for imfo in imfor:
user_data.append(imfo) # user_data 리스트에 추가
query = "INSERT INTO usertbl(userid, userpw, username, email, usertype) VALUES(?, ?, ?, ?, ?)"
c.executemany(query, (user_data,)) # DB에 user_data 추가
con.commit() # DB에 커밋
con.close()
lock.release()
break
def log_in(clnt_sock, data, clnt_num): # 로그인
con, c = dbcon()
data = data.split('/')
user_id = data[0]
c.execute("SELECT userpw FROM usertbl where userid=?",
(user_id,)) # DB에서 id 같은 password 컬럼 선택
user_pw = c.fetchone() # 한 행 추출
if not user_pw: # DB에 없는 id 입력시
clnt_sock.send('iderror'.encode())
con.close()
return
if (data[1],) == user_pw:
# 로그인성공 시그널
print("login sucess")
clnt_imfor[clnt_num].append(data[0])
send_user_information(clnt_num)
else:
# 로그인실패 시그널
clnt_sock.send('!NO'.encode())
print("login failure")
con.close()
return
def remove(clnt_num): # 회원탈퇴
con, c = dbcon()
id = clnt_imfor[clnt_num][1]
lock.acquire()
c.execute("DELETE FROM usertbl WHERE userid = ?", (id,))
c.execute("DELETE FROM Return WHERE userid = ?", (id,))
clnt_imfor[clnt_num].remove(id)
con.commit()
lock.release()
con.close()
def send_user_information(clnt_num): # 유저정보 보낸데
con, c = dbcon()
id = clnt_imfor[clnt_num][1]
clnt_sock = clnt_imfor[clnt_num][0]
c.execute(
"SELECT username FROM usertbl where id=?", (id,)) # 이름
row = c.fetchone()
row = list(row)
for i in range(0, len(row)): # None인 항목 찾기
if row[i] == None:
row[i] = 'X'
user_data = row # 이름
user_data = '/'.join(user_data)
# 버퍼 비우기
clnt_sock.send(('!OK/'+user_data).encode())
con.close()
def find_id(clnt_sock, email): # 아이디찾기
con, c = dbcon()
c.execute("SELECT userid FROM usertbl where email=?",
(email,)) # DB에 있는 email과 일치시 id 가져오기
id = c.fetchone()
if id == None: # DB에 없는 email이면 None이므로 !NO 전송
clnt_sock.send('!NO'.encode())
print('fail')
con.close()
return
else:
clnt_sock.send('!OK'.encode())
msg = clnt_sock.recv(BUF_SIZE)
msg = msg.decode()
if msg == "Q_id_Find": # Q_id_Find 전송받으면 find_id 함수 종료
pass
elif msg == 'plz_id': # plz_id 전송받으면 id 전송
id = ''.join(id) # ''<- 여기에는 구분자임 ㅇㅇ 리스트->문자열로 바꾸기
clnt_sock.send(id.encode())
print('send_id')
con.close()
return
def find_pw(clnt_sock, id): #비번찾기
con, c = dbcon()
c.execute("SELECT userpw, email FROM usertbl where userid=?",
(id,)) # DB에 있는 id와 일치하면 비밀번호, 이메일 정보 가져오기
row = c.fetchone()
print(row)
if row == None: # DB에 없는 id면 None
clnt_sock.send('!NO'.encode())
print('iderror')
con.close()
return
clnt_sock.send('!OK'.encode()) # DB에 id 있으면 !OK 전송
email = clnt_sock.recv(BUF_SIZE)
email = email.decode()
if email == "Q_pw_Find": # Q_pw_Find 전송받으면 find_pw 함수 종료
con.close()
return
if row[1] == email: # 전송받은 email변수 값이 DB에 있는 email과 같으면
clnt_sock.send('!OK'.encode())
msg = clnt_sock.recv(BUF_SIZE)
msg = msg.decode()
if msg == "Q_pw_Find":
pass
elif msg == 'plz_pw': # plz_pw 전송받으면
pw = ''.join(row[0]) # 비밀번호 문자열로 변환
clnt_sock.send(pw.encode())
print('send_pw')
else:
pass
else:
clnt_sock.send('!NO'.encode())
print('emailerror')
con.close()
return
def delete_imfor(clnt_sock): #유저정보 삭제
global clnt_cnt
for clnt_imfo in clnt_imfor:
if clnt_sock == clnt_imfo[0]:
print('exit client')
index = clnt_imfor.index(clnt_imfo)
del clnt_imfor[index]
if __name__ == '__main__': #메인? 기본설정같은 칸지
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.bind(('', PORT))
sock.listen(5)
while True:
clnt_sock, addr = sock.accept()
lock.acquire()
clnt_imfor.append([clnt_sock])
print(clnt_sock)
lock.release()
t = threading.Thread(target=handle_clnt, args=(clnt_sock,))
t.start() | Education-project-3team/education_application | server.py | server.py | py | 8,756 | python | en | code | 0 | github-code | 90 |
74946853095 | # -*- coding: utf-8 -*-
"""
Problem 187 - Semiprimes
A composite is a number containing at least two prime factors. For example,
15 = 3 × 5
9 = 3 × 3
12 = 2 × 2 × 3
There are ten composites below thirty containing precisely two, not necessarily
distinct, prime factors: 4, 6, 9, 10, 14, 15, 21, 22, 25, 26.
How many composite integers, n < 10^8, have precisely two, not necessarily
distinct, prime factors?
"""
from common import primes_up_to
def solution():
limit = 10**8
spcount = 0
factors = []
for p in primes_up_to(limit/2):
if p*p < limit:
factors.append(p)
else:
for ix in reversed(range(len(factors))):
if p*factors[ix] < limit:
factors = factors[:ix+1]
break
spcount += len(factors)
return spcount
if __name__ == '__main__':
print(solution())
| yred/euler | python/problem_187.py | problem_187.py | py | 920 | python | en | code | 1 | github-code | 90 |
32913177252 | ### python stat.py
import sys
inFile1=open(sys.argv[1],'r')
seq=[]
for line in inFile1 :
seq.append(line)
inFile1.close()
ouFile1=open(sys.argv[1]+'.seq.len','w')
seqlen=[]
for i in range(0,len(seq),2) :
seqlen.append(len(seq[i+1])-1)
seqlen.sort()
ouFile1.write('\n'.join(str(i) for i in seqlen)+'\n')
ouFile1.close()
| wanghuanwei-gd/SIBS | Project/lncRNA/S1/lncRNA_UCSC/stat.py | stat.py | py | 335 | python | en | code | 0 | github-code | 90 |
13609236989 | #
# abc146 a
#
import sys
from io import StringIO
import unittest
class TestClass(unittest.TestCase):
def assertIO(self, input, output):
stdout, stdin = sys.stdout, sys.stdin
sys.stdout, sys.stdin = StringIO(), StringIO(input)
resolve()
sys.stdout.seek(0)
out = sys.stdout.read()[:-1]
sys.stdout, sys.stdin = stdout, stdin
self.assertEqual(out, output)
def test_入力例_1(self):
input = """SAT"""
output = """1"""
self.assertIO(input, output)
def test_入力例_2(self):
input = """SUN"""
output = """7"""
self.assertIO(input, output)
def resolve():
S = input()
W = {"SUN": 7, "MON": 6, "TUE": 5, "WED": 4, "THU": 3, "FRI": 2, "SAT": 1}
print(W[S])
if __name__ == "__main__":
# unittest.main()
resolve()
| mskt4440/AtCoder | abc146/a.py | a.py | py | 851 | python | en | code | 0 | github-code | 90 |
26208992475 | """
将benders cut生成进行展示
"""
import matplotlib.pyplot as plt
import numpy as np
# 创建一个figure和axes对象
fig, ax = plt.subplots(figsize=(15, 10))
# 设置x和y轴的范围
ax.set_xlim([-100, 1100])
ax.set_ylim([1020, 1120])
# 设置X轴的间距为50, Y轴的间距为10
ax.set_xticks(np.arange(-100, 1101, 100))
ax.set_yticks(np.arange(1020, 1121, 10))
# 画出直线y = 2x + 1
# x1 = np.linspace(-10, 10, 400) # 创建一个从-10到10的等间隔数列
# y1 = 2*x + 1
# ax.plot(x1, y1, label='y1 = 2x1 + 1', color='blue')
# 画出直线 1000 - x = 0,且添加标签
x2 = [1000, 1000]
y2 = [-100, 1200]
ax.plot(x2, y2, color='red')
ax.annotate('cut 1:1000 - x ≥ 0', xy=(1000, 1030), xytext=(1030, 1030),
arrowprops=dict(facecolor='red', arrowstyle="->"),
color='black', fontsize=10)
# 画出直线 1100 - 0.055 * y >= z,
x3 = np.linspace(0, 1000, 400)
y3 = 1100 - 0.055 * x3
ax.plot(x3, y3, color='blue')
ax.annotate('cut 2: 1100 - 0.055 * y = z', xy=(900, 1050.5), xytext=(800, 1040),
arrowprops=dict(facecolor='red', arrowstyle="->"),
color='black', fontsize=10)
# 画出直线 1055 + 1.045 * y >= z,
x4 = np.linspace(0, 1000, 400)
y4 = 1055 + 1.045 * x4
ax.plot(x4, y4, color='yellow')
ax.annotate('cut 3: 1055 + 1.045 * y >= z', xy=(52.63, 1110), xytext=(100, 1110),
arrowprops=dict(facecolor='red', arrowstyle="->"),
color='black', fontsize=10)
# 画出直线 1055 + 0.035 * y >= z,
x5 = np.linspace(0, 1000, 400)
y5 = 1055 + 0.035 * x5
ax.plot(x5, y5, color='orange')
ax.annotate('cut 4: 1055 + 0.035 * y >= z', xy=(800, 1083), xytext=(750, 1090),
arrowprops=dict(facecolor='red', arrowstyle="->"),
color='black', fontsize=10)
# 画出直线 1065 - 0.005 * y >= z,
x6 = np.linspace(0, 1000, 400)
y6 = 1065 - 0.005 * x6
ax.plot(x6, y6, color='black')
ax.annotate('cut 5: 1065 - 0.005 * y >= z', xy=(600, 1062), xytext=(500, 1050),
arrowprops=dict(facecolor='red', arrowstyle="->"),
color='black', fontsize=10)
# 画出直线 1058 + 0.015 * y >= z,
x7 = np.linspace(0, 1000, 400)
y7 = 1058 + 0.015 * x7
ax.plot(x7, y7, color='brown')
ax.annotate('cut 6: 1058 + 0.015 * y >= z', xy=(650, 1067.75), xytext=(600, 1085),
arrowprops=dict(facecolor='red', arrowstyle="->"),
color='black', fontsize=10)
x8 = np.linspace(0, 1000, 400)
y8 = 1061 + 0.005 * x8
ax.plot(x8, y8, color='pink')
ax.annotate('cut 7: 1061 + 0.005 * y >= z', xy=(300, 1062.5), xytext=(200, 1055),
arrowprops=dict(facecolor='red', arrowstyle="->"),
color='black', fontsize=10)
# 设置标题和轴标签
ax.set_title("Benders Cuts in Example")
ax.set_xlabel("Y-axis")
ax.set_ylabel("Z-axis")
# 在图上显示图例
# ax.legend()
# 在图上显示网格
ax.grid(True)
# 显示图形,将X轴和Y轴加粗显示
plt.axhline(0, color='black', linewidth=2) # 加粗的Y轴
plt.axvline(0, color='black', linewidth=2) # 加粗的X轴
plt.savefig("benders cuts.pdf")
plt.savefig("benders cuts.png", dpi=1200)
plt.show() | chiangwyz/Operation-Research-Algo | benders decomposition/benders cut illustration of example.py | benders cut illustration of example.py | py | 3,097 | python | ja | code | 2 | github-code | 90 |
18165801169 | N = int(input())
str_in = input()
num = [int(n) for n in str_in.split()]
num = list(map(int, str_in.strip().split()))
hight = num[0]
stools = 0
for x in range(1,len(num)):
if hight>num[x]:
stools+=hight-num[x]
elif num[x] > hight:
hight = num[x]
print (stools) | Aasthaengg/IBMdataset | Python_codes/p02578/s839429915.py | s839429915.py | py | 293 | python | en | code | 0 | github-code | 90 |
74761193577 | # MIMO - 02 - Tipos e comparações - DESAFIO 2
# Lorde esqueceu sua senha e está usando um programa para restaurá-la. O programa verifica se a nova senha dela é diferente da antiga. Também faz com que lorde digite a nova sena duas vezes para ter certeza que está escrita corretamente. Vamos terminar esse programa.
senha_antiga = 'hello123'
nova_senha = 'goodbye321'
# Tarefa 1: Use o operador de desigualdade em comparar_senhas para mostrar que as senhas não são iguais.
comparar_senhas = senha_antiga != nova_senha
repetir_nova_senha = 'goodbye321'
# Tarefa 2: Certifique-se de que nova_senha corresponda a repetir_nova_senha.
comparar_nova_senha = nova_senha == repetir_nova_senha
print(f'A nova senha é diferente da antiga? {comparar_senhas}.')
print(f'A nova senha foi digitada corretamente? {comparar_nova_senha}.') | dualsgo/meus-estudos | mimo_app/Desafios/02_desafio_2.py | 02_desafio_2.py | py | 834 | python | pt | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.