seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
38829265127 | """add location deets
Revision ID: 4cefc8b79e71
Revises: 7b55bb4d5cd5
Create Date: 2023-05-16 12:50:25.397006
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '4cefc8b79e71'
down_revision = '7b55bb4d5cd5'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('locations', schema=None) as batch_op:
batch_op.add_column(sa.Column('website', sa.String(), nullable=True))
batch_op.drop_column('website_url')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
with op.batch_alter_table('locations', schema=None) as batch_op:
batch_op.add_column(sa.Column('website_url', sa.VARCHAR(), nullable=True))
batch_op.drop_column('website')
# ### end Alembic commands ###
| jordandc20/Vicariously_DJordan-capstone | server/migrations/versions/4cefc8b79e71_add_location_deets.py | 4cefc8b79e71_add_location_deets.py | py | 925 | python | en | code | 0 | github-code | 13 |
679773796 | import urllib2
import json
from dateutil.rrule import *
from dateutil.parser import *
# Variables
daySt = "20140601" # state date
dayEnd = "20140602" # end date
outPath = '/Users/dtodd/Documents/Work/Weather/' # output path
station = 'KDEW' # weather station ID
api = 'b316a72d2e91b2e7' # developer API key
# Create list of dates between start and end
days = list(rrule(DAILY, dtstart=parse(daySt), until=parse(dayEnd)))
# Create daily url, fetch json file, write to disk
for day in days:
url = 'http://api.wunderground.com/api/' + api + '/history_' + day.strftime("%Y%m%d") + '/q/' + station + '.json'
response = urllib2.urlopen(url)
data = json.load(response)
with open(outPath + station + '_' + day.strftime("%Y%m%d") + '.json', 'w') as outfile:
json.dump(data, outfile) | dmofot/weather | url2json_urllib.py | url2json_urllib.py | py | 782 | python | en | code | 5 | github-code | 13 |
38231434146 | # coding=utf-8
from abc import ABCMeta, abstractmethod
from urllib.parse import urlparse
import html
try:
import chardet as chardet # as前的模块名可选:cchardet或chardet
except:
has_chardet = False
else:
has_chardet = True
from red import red
class AbPageParser(metaclass=ABCMeta):
'''页面解析 抽象类'''
# 注册的解析器
registered = list()
# 编码列表,用于遍历解码
decode_list = [
'utf-8',
'gb18030',
'big5',
]
# 编码分析的置信度阈值
threshold = 0.8
@staticmethod
def should_me(url):
'''返回True表示使用此页面解析器'''
return False
@staticmethod
def get_local_processor():
'''返回自动处理器的名称'''
return ''
@staticmethod
def get_parser(url):
'''返回页面解析器'''
for i in AbPageParser.registered:
if i.should_me(url):
#print('找到解析器', i)
return i()
else:
print('无法找到处理这个网址的解析器')
return None
@staticmethod
def decode(byte_data, encoding=''):
'''将byte数据解码为unicode'''
if not encoding:
if has_chardet:
r = chardet.detect(byte_data)
confidence = r['confidence']
encoding = r['encoding']
#print(encoding, confidence)
if confidence < AbPageParser.threshold:
print('编码分析器异常,编码:{0},置信度{1}.'.format(
encoding,
confidence)
)
return ''
# 没有chardet,遍历列表
else:
for i in AbPageParser.decode_list:
try:
html = byte_data.decode(i)
except UnicodeError as e:
pass
else:
return html
else:
print('无法解码')
return ''
return byte_data.decode(encoding, errors='replace')
@staticmethod
def de_html_char(text):
'''去掉html转义'''
t = html.unescape(text)
t = t.replace('•', '·') # gbk对第一个圆点不支持
t = t.replace('\xA0', ' ') # 不间断空格
t = t.replace('\u3000', ' ') # 中文(全角)空格
return t
def __init__(self):
self.url = ''
self.html = ''
# 解码byte data到html用的编码
self.encoding = ''
self.__clear_cache()
def __clear_cache(self):
'''清空缓存'''
self.cache_pagenum = None
self.cache_title = None
self.cache_louzhu = None
self.cache_nexturl = None
self.cache_replys = None
def set_page(self, url, byte_data):
'''设置网址和html'''
self.url = url
self.html = AbPageParser.decode(byte_data, self.encoding)
self.__clear_cache()
def pre_process_url(self, url):
return url
def get_hostname(self):
'''从url得到主机域名'''
parsed = urlparse(self.url)
return r'http://' + parsed.netloc
# 5个wrap
def wrap_get_page_num(self):
if self.cache_pagenum == None:
self.cache_pagenum = self.get_page_num()
return self.cache_pagenum
def wrap_get_title(self):
if self.cache_title == None:
self.cache_title = self.get_title()
return self.cache_title
def wrap_get_louzhu(self):
if self.cache_louzhu == None:
self.cache_louzhu = self.get_louzhu()
return self.cache_louzhu
def wrap_get_next_pg_url(self):
if self.cache_nexturl == None:
self.cache_nexturl = self.get_next_pg_url()
return self.cache_nexturl
def wrap_get_replys(self):
if self.cache_replys == None:
self.cache_replys = self.get_replys()
return self.cache_replys
# 5个抽象get
@abstractmethod
def get_page_num(self):
'''页号'''
pass
@abstractmethod
def get_title(self):
'''标题'''
pass
@abstractmethod
def get_louzhu(self):
'''楼主'''
pass
@abstractmethod
def get_next_pg_url(self):
'''下一页url'''
pass
@abstractmethod
def get_replys(self):
'''返回Reply列表'''
pass
def check_parse_methods(self):
'''检测页面解析器是否正常'''
# 保持这个顺序,因为:
# get_replys()可能调用get_page_num()
# get_louzhu()可能调用get_replys()
try:
self.wrap_get_page_num()
except Exception as e:
print('!页面解析器出现异常,无法解析此页面')
print('!get_page_num():', e, '\n')
return False
try:
self.wrap_get_title()
except Exception as e:
print('!页面解析器出现异常,无法解析此页面')
print('!get_title():', e, '\n')
return False
try:
self.wrap_get_next_pg_url()
except Exception as e:
print('!页面解析器出现异常,无法解析此页面')
print('!get_next_pg_url():', e, '\n')
return False
try:
rpls = self.wrap_get_replys()
if not rpls:
raise Exception('异常:回复列表为空')
except Exception as e:
print('!页面解析器出现异常,无法解析此页面')
print('!get_replys():', e, '\n')
return False
try:
self.wrap_get_louzhu()
except Exception as e:
print('!页面解析器出现异常,无法解析此页面')
print('!get_louzhu():', e, '\n')
return False
return True
# page-parser decorator
def parser(cls):
if not issubclass(cls, AbPageParser):
print('注册页面解析器时出错,{0}不是AbPageParser的子类'.format(cls))
return cls
if cls not in AbPageParser.registered:
AbPageParser.registered.append(cls)
else:
print('%s already exist in pageparsers' % cls)
return cls
| animalize/tz2txt | tz2txt/AbPageParser.py | AbPageParser.py | py | 6,631 | python | en | code | 48 | github-code | 13 |
34385137544 | import streamlit as st
import numpy as np
import pandas as pd
# streamlit run main.py
st.title('Streamlit 超入門')
st.write('DataFrame')
df = pd.DataFrame(
np.random.rand(20,3),
columns = ['a','b','c']
)
#折れ線
st.line_chart(df)
#折れ線 色で埋める
st.area_chart(df)
#棒グラフ
st.bar_chart(df)
| mymt616/youtube-streamlit | main2.py | main2.py | py | 324 | python | ja | code | 0 | github-code | 13 |
6798458742 | import json
import requests
from pepeCSV import readCSV
from xcp_get import asset_info
# Checks if image exists at html source
def is_url_image(asset):
image_formats = ("image/jpg", "image/png", "image/gif", "image/jpeg")
print("https://digirare.com/storage/rare-pepe/" + asset)
r = requests.head("https://digirare.com/storage/rare-pepe/" + asset)
if r.headers["content-type"] in image_formats:
return True
return False
if __name__ == "__main__":
pepeJSON = []
first = True
PEPEDB = readCSV("./PEPEDB.csv")
for line in PEPEDB:
if(first):
print("Scanning")
first = False
else:
print(line[0])
# Make call to XCP server for json info
pepe_info = asset_info(line[0])
asset_url = "null"
print(pepe_info)
# Find image filetype at digirare
# Iterates through each file type
file_types = ["image/jpg", "image/png", "image/gif", "image/jpeg"]
for type in file_types:
# create extension to append when file is found
extension = ""
if(type == "image/jpg"):
extension = ".jpg"
elif(type == "image/jpeg"):
extension = ".jpeg"
elif(type == "image/gif"):
extension = ".gif"
elif(type == "image/png"):
extension = ".png"
print("trying: " + extension)
asset_end = line[0] + extension
if(is_url_image(asset_end)):
asset_url = "https://digirare.com/storage/rare-pepe/" + asset_end
print("Found")
break
# Append img url
try:
pepe_info[0].update({
"src": asset_url
})
except:
print("Asset json not found")
# Append to pepeJSON DB
try:
pepeJSON.append(pepe_info[0])
print(pepe_info[0])
with open("og_pepe_test.json", "w") as file:
json.dump(pepeJSON, file)
except:
print("Asset json not found to append")
# Write pepeJSON to file
with open("og_pepe.json", "w") as file:
json.dump(pepeJSON, file)
| burstMembrane/Counterview | json_updater/OG_PEPES/og_json_creator.py | og_json_creator.py | py | 2,417 | python | en | code | 0 | github-code | 13 |
3185024780 | import numpy as np
import torch
from reprod_log import ReprodLogger
from transformers.models.luke import LukeForEntityClassification as Model
# from transformers.models.luke import LukeModel as Model
np.random.seed(42)
if __name__ == "__main__":
# def logger
reprod_logger = ReprodLogger()
model = Model.from_pretrained(
"../../../../torch_model/luke-large-finetuned-open-entity",
)
model.eval()
# read or gen fake dataset
npzfile = np.load("../fake_data/fake_data.npz")
keys = npzfile.files
fake_data = {k: npzfile[k] for k in keys}
fake_data = {k: torch.tensor(fake_data[k]) for k in keys}
# fake_data = torch.load(fake_data)
# forward
# model = model.luke
outputs = model(**fake_data, return_dict=True)
out = outputs.logits
# out = model(**fake_data)[0]
# out = model(**fake_data, return_dict=True)
#
reprod_logger.add("logits", out.cpu().detach().numpy())
reprod_logger.save("forward_torch.npy")
| xzk-seu/Paddle-LUKE | ReProd_Pipeline/squad/pipeline/Step1/pt_forward_luke.py | pt_forward_luke.py | py | 995 | python | en | code | 0 | github-code | 13 |
14865749719 |
import json
from flask import Flask, request, jsonify
from data import Deployment
import os
app = Flask(__name__)
@app.route('/',methods=['get'])
def index():
return json.dumps({'name': 'alice',
'email': 'alice@outlook.com'})
@app.route('/send', methods=['post'])
def process_request():
data = request.json
prossData = Deployment()
fianldata = prossData.request(data)
print(data)
return jsonify(fianldata)
if __name__ == "__main__":
app.run(debug=False)
| Ibrahemhasan15/MyRestAPI | index.py | index.py | py | 575 | python | en | code | 0 | github-code | 13 |
73671509776 | from mayavi import mlab
import numpy as np
import vtk
output = vtk.vtkFileOutputWindow()
output.SetFileName("/dev/null")
vtk.vtkOutputWindow().SetInstance(output)
def quiver3d(x, n, **kwargs):
return mlab.quiver3d(
x[:, 0],
x[:, 1],
x[:, 2],
n[:, 0],
n[:, 1],
n[:, 2],
**kwargs
)
def points3d(x, **kwargs):
if 'color' in kwargs.keys():
kwargs['color'] = tuple(kwargs['color'])
# if it's a single point, just make it work
if x.ndim == 1:
x = np.reshape(x, (1, -1))
return mlab.points3d(
x[:, 0],
x[:, 1],
x[:, 2],
**kwargs
)
def triangular_mesh(pts, faces, **kwargs):
return mlab.triangular_mesh(pts[:, 0], pts[:, 1], pts[:, 2], faces, **kwargs)
def mesh(mesh, **kwargs):
return triangular_mesh(mesh[0], mesh[1], **kwargs)
def line(a, b, colors=None):
a = np.array(a) * np.ones(1)
b = np.array(b) * np.ones(1)
if colors is None:
colors = [(1.0, 1.0, 1.0)] * len(a)
for n, (start, end) in enumerate(zip(a, b)):
mlab.plot3d(
[start[0], end[0]],
[start[1], end[1]],
[start[2], end[2]],
color=tuple(colors[n])
)
def color_points3d(x, scalars, **kwargs):
nodes = points3d(x, **kwargs)
nodes.glyph.scale_mode = 'scale_by_vector'
if 'scale_factor' in kwargs.keys():
nodes.mlab_source.dataset.point_data.vectors = np.ones(x.shape) * kwargs['scale_factor']
nodes.mlab_source.dataset.point_data.scalars = scalars
return nodes
def update(mlab_widget, x, **kwargs):
m_source = mlab_widget.mlab_source
m_source.set(x=x[:, 0], y=x[:, 1], z=x[:, 2], **kwargs)
def show(axis_scale=1.0):
"""So you don't have to import mlab."""
diag = np.diag([1.0, 1.0, 1.0])
line(np.zeros((3, 3)), diag * axis_scale, colors=diag)
mlab.show()
| jpanikulam/python_pointclouds | visualize.py | visualize.py | py | 1,910 | python | en | code | 1 | github-code | 13 |
11534537302 | import pystray
from time import sleep
from PIL import Image, ImageDraw
from threading import Thread
import subprocess
import json
import argparse
def red_image():
image = Image.new('RGB', (64, 64), 'red')
dc = ImageDraw.Draw(image)
dc.rectangle((0, 0, 64, 64), fill='red')
return image
def green_image():
image = Image.new('RGB', (64, 64), 'green')
dc = ImageDraw.Draw(image)
dc.rectangle((0, 0, 64, 64), fill='green')
return image
def get_output_from_cli(workflow_to_check=""):
command = "vtctlclient --server 127.0.0.1:15999 Workflow %arg1 show"
command = command.replace("%arg1", workflow_to_check)
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
workflow_show2 = (p.communicate())
workflow_obj = json.loads(workflow_show2[0])
shards = list(workflow_obj["ShardStatuses"].keys())
if len(shards_pos) == 0:
for every_shard in shards:
shards_pos[every_shard] = []
print(shards_pos)
fail_bl = False
global i
for every_shard in shards:
shard_details = workflow_obj["ShardStatuses"][every_shard]
shard_state = shard_details["PrimaryReplicationStatuses"][0]["State"]
shard_gtids = shard_details["PrimaryReplicationStatuses"][0]["Pos"]
shards_pos[every_shard].append(shard_gtids.split(','))
if shard_state == "Error":
fail_bl = True
shard_message = shard_details["PrimaryReplicationStatuses"][0]["Message"]
if fail_bl:
print(shard_state, shard_message)
pass
return fail_bl
def get_short_shardname(shard):
delimiter_pos = shard.find("/")
shard_short = shard[:delimiter_pos]
return shard_short
def check_gtids():
status = []
update_icon = False
for every_shard in list(shards_pos.keys()):
list_pos = shards_pos[every_shard]
if (len(list_pos)) >= 11:
current_state = list_pos[len(list_pos)-1]
previous_state = list_pos[len(list_pos)-11]
the_diff = find_diff(current_state, previous_state)
shard = get_short_shardname(every_shard)
status.append({shard: the_diff})
update_icon = True
if update_icon:
printable_status = str(status).replace("[", "").replace("]", "").replace("{", "")
printable_status = printable_status.replace("}", "").replace("'", "").replace(".0", "")
icon.menu = pystray.Menu(
pystray.MenuItem(
printable_status, None, enabled=False,
)
)
icon.update_menu()
print(status)
def find_diff(actual_list, db_list):
s = set(db_list)
temp3 = [x for x in actual_list if x not in s]
flag1 = False
flag2 = False
if len(temp3) != 0:
gtid_start_pos = str(temp3).rfind('-')
new_gtid = str(temp3)[gtid_start_pos+1:-2]
flag1 = True
s2 = set(actual_list)
temp4 = [x for x in db_list if x not in s2]
if len(temp4) != 0:
gtid_start_pos = str(temp4).rfind('-')
old_gtid = str(temp4)[gtid_start_pos+1:-2]
flag2 = True
if flag1 and flag2:
try:
total_diff = round((int(new_gtid)-int(old_gtid))/60, 0)
except ValueError:
total_diff = "some err"
else:
total_diff = "some err"
return total_diff
def routined_task(workflow):
if workflow is None:
command = "vtctlclient --server 127.0.0.1:15999 Workflow user listall"
p = subprocess.Popen(command, stdout=subprocess.PIPE, shell=True)
workflow_list_cli_return_bytes = p.communicate() # returns tuple, where second value is None
workflow_list_cli_return = workflow_list_cli_return_bytes[0].decode(encoding='utf8')
# Following workflow(s) found in keyspace user: move2vitess21
delimiter = ":"
list_start_pos = workflow_list_cli_return.find(":")
keyspace_start_pos = workflow_list_cli_return.rfind(" ", 0, list_start_pos)
keyspace = workflow_list_cli_return[keyspace_start_pos+1:list_start_pos]
workflow_list_str = workflow_list_cli_return[list_start_pos+1:]
workflow_list_str = workflow_list_str.replace('\n', ' ').replace('\r', '')
workflow_list_str = workflow_list_str.replace(" ", "")
workflow_list = workflow_list_str.split(",")
workflow = keyspace+"."+workflow_list[0]
global i
while True:
anyerrors = get_output_from_cli(workflow)
if anyerrors:
icon.icon = red_image()
else:
icon.icon = green_image()
sleep(6) # in seconds
if i % 10 == 0:
print("=====")
icon.visible = True
check_gtids()
i += 1
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("--workflow", help="workflow name", type=str)
args = parser.parse_args()
workflow_name = args.workflow
icon = pystray.Icon(
name='Vitess Workflow Monitor',
menu=pystray.Menu(
pystray.MenuItem(
"TPS, updated every 60 seconds", None, enabled=False,
)
),
icon=red_image())
i = 0
shards_pos = {}
workflow_checker_routine = Thread(target=routined_task, args=[workflow_name])
workflow_checker_routine.start()
icon_routine = Thread(target=icon.run())
icon_routine.start()
| Areso/vitess-workflow-monitor | moveworkflowmon.py | moveworkflowmon.py | py | 5,429 | python | en | code | 1 | github-code | 13 |
24882884895 | # I'm not a personal trainer
# It costs me mental energy to plan a workout
# So I want to automate it
# I have a structure the workouts should follow
# Other than that, I dont care
# This program is going to build my workouts for me
import random
compound_list = ["Squats", "Deadlift"]
full_list = ["Cleans", "Burpees", "Overhead Dumbell Lunge"]
legs_list = ["Pistol Squat", "Side Lunges", "Romanian Deadlift", "Barbell Lunges"]
press_list = ["Shoulder Press", "Arnold Press", "Incline Press Ups", "Side Raises", "Incline Bench", "Dumbell Bench", "Bench"]
pull_list = ["Barbell Row", "Pull Ups", "Cable Row", "Rear Delt Flys"]
core_list = ["Leg Up Row", "Hanging Leg Raises", "Romanian Twist"]
list_of_list = [compound_list, full_list, legs_list, press_list, pull_list, core_list]
def workout_randomiser (list_of_list):
index = 0
workout_selection = []
for list in list_of_list:
workout_selection.append(random.choice(list))
index += 1
return workout_selection
def workout_builder (list):
workout = "5x5: " + list[0] + "\n30s: " + list[1] + " 30s: " + list[2] + " 30s Rest " + "\nSuperset: " + list[3] + " " + list[4] + "\nSuperset " + list[5]
return workout
workout_selection = workout_randomiser(list_of_list)
print(workout_builder(workout_selection))
| oliverjallman/workout_builder | workout_builder.py | workout_builder.py | py | 1,307 | python | en | code | 0 | github-code | 13 |
71083998099 | def odd_occurrences():
some_words = input().split(' ')
occurrences = {}
for word in some_words:
word = word.lower()
if not word in occurrences.keys():
occurrences[word] = 0
occurrences[word] += 1
for (word, count) in occurrences.items():
if count % 2 == 1:
print(word, end=' ')
odd_occurrences() | bobsan42/SoftUni-Learning-42 | ProgrammingFunadamentals/a24Dictionaries/oddoccurrences.py | oddoccurrences.py | py | 368 | python | en | code | 0 | github-code | 13 |
9070303627 | items_collection = input().split('|')
budget = float(input())
items_info = []
bought_items = []
for i in range(len(items_collection)):
items_info.append(items_collection[i].split('->'))
for item in range(len(items_info)):
if items_info[item][0] == 'Clothes':
price = float(items_info[item][1])
if price <= min(budget, 50.00):
budget -= price
bought_items.append(price*1.4)
elif items_info[item][0] == 'Shoes':
price = float(items_info[item][1])
if price <= min(budget, 35.00):
budget -= price
bought_items.append(price*1.4)
else:
price = float(items_info[item][1])
if price <= min(budget, 20.50):
budget -= price
bought_items.append(price*1.4)
for i in range(len(bought_items)):
if i == len(bought_items) - 1:
print(f'{bought_items[i]:.2f}')
else:
print(f'{bought_items[i]:.2f}', end=' ')
profit = sum(bought_items) - sum(bought_items)/1.4
print(f'Profit: {profit:.2f}')
if sum(bought_items) + budget >= 150.00:
print('Hello, France!')
else:
print('Time to go.')
| vbukovska/SoftUni | Python_fundamentals/Lists_basics/HelloFrance.py | HelloFrance.py | py | 1,134 | python | en | code | 0 | github-code | 13 |
23676344485 | import art
alphabet = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z','a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z']
def ceasar(userChoice, plainText, shiftAmount):
endText = ""
if userChoice== "decode":
shiftAmount *= -1
for char in plainText:
if char in alphabet:
position = alphabet.index(char)
newPosition = position + shiftAmount
endText += alphabet[newPosition]
else:
endText += char
print(f"Your {userChoice}d message is {endText}")
print(art.logo)
shouldContinue = True
while shouldContinue:
direction = input("Type 'encode' to encrypt, type 'decode' to decrypt:\n").lower()
text = input("Type your message:\n").lower()
shift = int(input("Type the shift number:\n"))
if(shift < len(alphabet)):
shift = shift % 26
ceasar(userChoice=direction, plainText=text, shiftAmount=shift)
yesOrNo = input("Would you like to try again? Yes or No?").lower()
if yesOrNo == "no":
shouldContinue = False
print("Thank you for using this cipher.")
| josesanchez45/Caesar-cipher-python | main.py | main.py | py | 1,257 | python | en | code | 0 | github-code | 13 |
33526971243 | from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
import unittest
import system.page
import time
class Checkout2SauceDemo(unittest.TestCase):
def setUp(self):
self.driver = webdriver.Chrome(
"C:/Users/randa/Documents/Chromedriver.exe")
self.driver.get("https://www.saucedemo.com")
def test_checkoutstep2(self):
login = system.page.LogInPage(self.driver)
login.input_username()
login.input_password()
login.login_button()
inventorypage = system.page.InventoryPage(self.driver)
inventorypage.add_to_cart_by_name("Sauce Labs Backpack")
inventorypage.add_to_cart_by_name("Sauce Labs Onesie")
inventorypage.check_cart_has_item()
inventorypage.click_cart_icon()
cartpage = system.page.CartPage(self.driver)
print(f"\nCurrent page: {self.driver.current_url}")
cartpage.check_cart("Sauce Labs Backpack")
cartpage.check_cart("Sauce Labs Onesie")
cartpage.click_checkout()
print(f"\nCurrent page: {self.driver.current_url}")
checkoutstep1 = system.page.CheckoutStep1(self.driver)
checkoutstep1.input_firstname()
checkoutstep1.input_lastname()
checkoutstep1.input_zipcode()
checkoutstep1.click_continue()
print(self.driver.current_url)
checkoutstep2 = system.page.CheckoutStep2(self.driver)
checkoutstep2.click_finish()
print(self.driver.current_url)
def tearDown(self):
self.driver.close()
if __name__ == "__main__":
unittest.main()
| Asarmir/SauceDemoTestStandardUser | tests/checkout2_test.py | checkout2_test.py | py | 1,620 | python | en | code | 0 | github-code | 13 |
41644121562 | import matplotlib.pyplot as plt
import spectview.settings as settings
class PlotManager:
def __init__(self, window_object):
self.window_object = window_object
self.name_to_line2d = {}
self.plot_setup = {}
def add_plot(self, name, data_x, data_y):
if name in self.name_to_line2d.keys():
print('The {} keV has been already plotted.'.format(name))
return None
else:
line2d_obj, = self.window_object.ax.plot(
data_x, data_y, **self.plot_setup
)
self.name_to_line2d[name] = line2d_obj
return line2d_obj
def remove_plot(self, name):
try:
# remove from plot
self.window_object.ax.lines.remove(self.name_to_line2d[name])
# remove from PlotManager registry
del self.name_to_line2d[name]
except KeyError:
print('Nothing to remove.')
def mark_plot(self, name):
self.name_to_line2d[name].set_linewidth(2)
@property
def line2d_to_name(self):
return {v.__repr__(): k for k, v in self.name_to_line2d.items()}
class ClickCatcher:
def __init__(self, window_obj):
print('Marking mode on.')
self.window = window_obj
self.window.is_click_catcher_working = True
self.cid = self.window.fig.canvas.mpl_connect('button_press_event', self)
self.points = self.initialize_plotting()
self.cid_key = self.window.fig.canvas.mpl_connect('key_press_event', self.key_press)
self.data_x = []
self.data_y = []
def initialize_plotting(self):
return self.window.ax.plot([], [], **settings.CLICK_CATCHER_PLOT_SETUP)[0]
def __call__(self, event):
# ignore toolbar operations like zoom
state = self.window.fig.canvas.manager.toolbar._active
if state is not None:
self.window.fig.canvas.manager.toolbar._active = None
return None
# add click catch a new point (add to the list of points)
if not event.dblclick and event.button == 1 and event.inaxes == self.window.ax:
self.data_x.append(event.xdata)
self.data_y.append(event.ydata)
self.update()
print('{:8.2f} {:8.2f}'.format(event.xdata, event.ydata))
# stop click catching points
elif event.button == 2:
self.disconnect()
# cancel previous click
elif event.button == 3:
self.data_x.pop()
self.data_y.pop()
self.update()
if not self.data_y:
self.disconnect()
def update(self):
self.points.set_data(self.data_x, self.data_y)
self.window.fig.canvas.draw()
def disconnect(self):
# disconnect click-catching
self.window.fig.canvas.mpl_disconnect(self.cid)
# disconnect key bounding
self.window.fig.canvas.mpl_disconnect(self.cid_key)
self.window.is_click_catcher_working = False
self.remove_plot()
print('Marking mode off.')
def key_press(self, event):
if event.key == 'escape':
self.disconnect()
def get_data(self):
return self.data_x, self.data_y
def remove_plot(self):
self.points.remove()
self.window.fig.canvas.draw()
class PeakCatcher(ClickCatcher):
def initialize_plotting(self):
return self.window.ax.plot([], [], **settings.PEAK_CATCHER_PLOT_SETUP)[0]
class SpectrumSelector:
def __init__(self, window_obj):
self.window = window_obj
self.cid = self.window.fig.canvas.mpl_connect('pick_event', self)
self._selected_spectrum = None
self._is_highlighted = False
def __call__(self, event):
if self.window.is_click_catcher_working:
pass
else:
self.selected_spectrum = event.artist
plt.draw()
def set_non_event_selection(self, line2d_obj):
# for auto selection when spectrum is added to the plot
self.selected_spectrum = line2d_obj
self.selected_spectrum.set_linewidth(1)
@property
def selected_spectrum(self):
return self._selected_spectrum
@selected_spectrum.setter
def selected_spectrum(self, new_spectrum):
if self._selected_spectrum == new_spectrum and not self._is_highlighted:
self._is_highlighted = True
self._selected_spectrum.set_linewidth(2)
elif self._selected_spectrum == new_spectrum and self._is_highlighted:
self._is_highlighted = False
self._selected_spectrum.set_linewidth(1)
elif self._is_highlighted != new_spectrum:
if self.selected_spectrum:
self._selected_spectrum.set_linewidth(1)
self._selected_spectrum = new_spectrum
self._selected_spectrum.set_linewidth(2)
self.window.selected_spectrum = self.selected_spectrum
| ewaAdamska/spectview | spectview/plot_utils.py | plot_utils.py | py | 4,959 | python | en | code | 0 | github-code | 13 |
43403763451 | from rest_framework.exceptions import AuthenticationFailed
from django.utils.translation import ugettext_lazy as _
from munch import models
from rest_framework import serializers
from django.contrib.auth.models import User
from rest_framework.validators import UniqueValidator
from csp import settings
from munch.validators.utils import *
from rest_framework import status
class UserSerializer(serializers.ModelSerializer):
email = serializers.EmailField(required=True, validators=[UniqueValidator(queryset=User.objects.all())])
name = serializers.CharField(required=False)
is_customer = serializers.BooleanField(required=False, write_only=True)
is_restaurant = serializers.BooleanField(required=False, write_only=True)
class Meta:
model = models.User
fields = ('id', 'email', 'name', 'is_customer', 'is_restaurant')
def __init__(self, validate_non_fields=False, **kwargs):
super(UserSerializer, self).__init__(**kwargs)
self.validate_non_fields = validate_non_fields
def create(self, **kwargs):
user = User.objects.create_user(username=self.validated_data.get('email'), email=self.validated_data.get('email'),
password=self.initial_data.get('password'))
if self.validated_data.get('is_customer', False):
customer = models.Customer()
customer.user = user
customer.name = self.initial_data.get('name')
customer.save()
id = customer.id
if self.validated_data.get('is_restaurant', False):
restaurant = models.Restaurant()
restaurant.user = user
restaurant.name = self.initial_data.get('name')
restaurant.save()
id = restaurant.id
return id
| adam-codaio/munch_api | munch/serializers/user.py | user.py | py | 1,607 | python | en | code | 0 | github-code | 13 |
5825281301 | import numpy as np
import rbfnet as rn
from utilities import *
def gmm(dim, ncentres, covar_type):
"""
Description
MIX = GMM(DIM, NCENTRES, COVARTYPE) takes the dimension of the space
DIM, the number of centres in the mixture model and the type of the
mixture model, and returns a data structure MIX. The mixture model
type defines the covariance structure of each component Gaussian:
'spherical' = single variance parameter for each component: stored as a vector
'diag' = diagonal matrix for each component: stored as rows of a matrix
'full' = full matrix for each component: stored as 3d array
'ppca' = probabilistic PCA: stored as principal components (in a 3d array
and associated variances and off-subspace noise
MIX = GMM(DIM, NCENTRES, COVARTYPE, PPCA_DIM) also sets the
dimension of the PPCA sub-spaces: the default value is one.
The priors are initialised to equal values summing to one, and the
covariances are all the identity matrix (or equivalent). The centres
are initialised randomly from a zero mean unit variance Gaussian.
This makes use of the MATLAB function RANDN and so the seed for the
random weight initialisation can be set using RANDN('STATE', S) where
S is the state value.
The fields in MIX are
type = 'gmm'
nin = the dimension of the space
ncentres = number of mixture components
covartype = string for type of variance model
priors = mixing coefficients
centres = means of Gaussians: stored as rows of a matrix
covars = covariances of Gaussians
The additional fields for mixtures of PPCA are
U = principal component subspaces
lambda = in-space covariances: stored as rows of a matrix
The off-subspace noise is stored in COVARS.
"""
mix = {}
mix['type'] = 'gmm'
mix['nin'] = dim
mix['ncentres'] = ncentres
mix['covar_type'] = covar_type #spherical
mix['priors'] = np.ones((1,ncentres))/(ncentres)
mix['centres'] = np.random.randn(ncentres, dim)
mix['covars'] = np.ones((1, ncentres))
mix['nwts'] = ncentres + ncentres*dim + ncentres
return mix
def gmmactiv(mix, x):
"""Description
This function computes the activations A (i.e. the probability
P(X|J) of the data conditioned on each component density) for a
Gaussian mixture model. For the PPCA model, each activation is the
conditional probability of X given that it is generated by the
component subspace. The data structure MIX defines the mixture model,
while the matrix X contains the data vectors. Each row of X
represents a single vector.
"""
ndata = np.shape(x)[0]
a = np.zeros(ndata, mix['ncentres'])
n2 = np.zeros((ndata, mix['ncentres']), dtype=np.float64)
for i in range(ndata):
for j in range(mix['ncentres']):
n2[i, j] = np.linalg.norm(x[i, :] - mix['centres'][j, :])
# n2 = dist2(x, mix['centres']) # dist2
# n2 = np.array(n2)
wi2 = np.ones((ndata,1))* (2* mix['covars'])
nr, nc = np.shape(wi2)
for i in range(0, nr):
for j in range(0, nc):
if wi2[i,j] == 0:
wi2[i,j] = np.eps
normal = (np.pi * wi2)**(mix['nin']/2)
a = np.exp((-n2/wi2)/normal)
return a
def gmmpost(mix,x):
""" Description
This function computes the posteriors POST (i.e. the probability of
each component conditioned on the data P(J|X)) for a Gaussian mixture
model. The data structure MIX defines the mixture model, while the
matrix X contains the data vectors. Each row of X represents a
single vector.
"""
ndata = np.shape(x)[0]
a = gmmactiv(mix, x)
old_post = np.ones((ndata,1))*mix['priors']*a
s = np.sum(old_post, axis = 1).reshape((-1, 1)) # s = sum(post,2)
post = old_post/(np.matmul(s,np.ones((1,mix['ncentres']))) + 0.000001)
post = np.array(post)
return [post, a]
def gmmem(x, mix, options = None):
"""Description
[MIX, OPTIONS, ERRLOG] = GMMEM(MIX, X,) uses the Expectation
Maximization algorithm of Dempster et al. to estimate the parameters
of a Gaussian mixture model defined by a data structure MIX. The
matrix X represents the data whose expectation is maximized, with
each row corresponding to a vector. The optional parameters have
the following interpretations.
"""
v = np.array([], dtype = 'f')
ndata = np.shape(x)[0]
for n in range(0,100):
[post, act] = gmmpost(x,mix)
#adjust the new estimate for the parameters
new_pr = np.sum(post, axis = 0)
new_c = post*x
#itetrate
mix['priors'] = new_pr/ndata
mix['centres'] = new_c/(new_pr * np.ones((1,mix['nin'])))
if mix['covar_type'] == 'spherical':
n2 = np.linalg.norm(x - mix['centres'])
for i in range(0, mix['ncentres']):
v_i = (post[:,i])*(n2[:,i])
v.append(v_i)
mix['covars'] = np.array([(var/new_pr)/mix['nin'] for var in v])
return mix
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
return np.exp(x) / np.sum(np.exp(x)) #axis = 0
def dmm(dim, ncentres, dist_type, nvalues, a = None, b = 1):
"""The function has the returns the same result as the gmm,
but for discrete type of data"""
mix = {}
mix['type'] = 'dmm'
mix['input_dim'] = dim
mix['ncentres'] = ncentres
mix['dist_type'] = dist_type
mix['priors'] = np.ones((1, ncentres)) / ncentres
if dist_type == 'bernoulli':
mix['nvalues'] = 1
mix['nin'] = dim
mix['means'] = np.random.rand(ncentres, dim)
# if a < 1:
# print('a gives a singular prior')
# else:
# mix['a'] = a
# if b < 1:
# print('b gives a singular prior')
# else:
# mix['b'] = b
elif dist_type == 'multinomial':
mix['nvalues'] = nvalues
mix['nin'] = np.sum(nvalues)
mix['means'] = np.zeros((ncentres, dim))
k = 0
# a = np.shape(mix['nvalues'][1])
for i in range(0, len(mix['nvalues'])):
mix['means'][:,k:k+mix['nvalues'][i]] = softmax(np.random.randn(ncentres,nvalues[i]))
k = mix['nvalues'][i]
# if a < 1:
# print('a gives a singular prior')
# else :
# mix['a'] = a
else:
print('unknown distribution.')
return mix
def dmmactiv(mix,x):
"""active function for discrete type of data."""
ndata = np.shape(x)[0]
a = np.zeros((ndata, mix['ncentres']))
e = np.ones((ndata,1))
if mix['dist_type'] == 'bernoulli':
for m in range(mix['ncentres']):
a[:,m] = np.prod(
(np.matmul(e, (mix['means'][m,:]).reshape((1, -1))) ** x)*(np.matmul(e, (1 - mix['means'][m,:]).reshape((1, -1))) ** (1 - x))
, 1)
elif mix['dist_type'] == 'multinomial':
for m in range(mix['ncentres']):
a[:, m] = np.prod(((e * (1 - mix['means'][m, :])) ** (1 - x)), 1)
else:
F('unknown distribution type.')
return a
def dmmpost(mix,x):
"""return the posterior of discrete data"""
a = dmmactiv(mix,x)
ndata = np.shape(x)[0]
post = (np.ones(ndata)[0]*mix['priors']*a)
s = np.sum(post, axis = 0)
post = post/(s*np.ones((1,mix['ncentres'])))
return post,a
| zhy1024/GGTM-Mixed-type-of-data | mixmodel.py | mixmodel.py | py | 7,592 | python | en | code | 0 | github-code | 13 |
24634140510 | from django.contrib.contenttypes.models import ContentType
from django.db import models
import pytest
try:
import yaml
PYYAML_AVAILABLE = True
del yaml
except ImportError:
PYYAML_AVAILABLE = False
from django.core import serializers
from .models import TypedModelManager
from .test_models import AngryBigCat, Animal, BigCat, Canine, Feline, Parrot, AbstractVegetable, Vegetable, \
Fruit, UniqueIdentifier
@pytest.fixture
def animals(db):
kitteh = Feline.objects.create(name="kitteh")
UniqueIdentifier.objects.create(name='kitteh', object_id=kitteh.pk,
content_type=ContentType.objects.get_for_model(kitteh))
cheetah = Feline.objects.create(name="cheetah")
UniqueIdentifier.objects.create(name='cheetah', object_id=cheetah.pk,
content_type=ContentType.objects.get_for_model(cheetah))
fido = Canine.objects.create(name="fido")
UniqueIdentifier.objects.create(name='fido', object_id=fido.pk,
content_type=ContentType.objects.get_for_model(fido))
simba = BigCat.objects.create(name="simba")
UniqueIdentifier.objects.create(name='simba', object_id=simba.pk,
content_type=ContentType.objects.get_for_model(simba))
mufasa = AngryBigCat.objects.create(name="mufasa")
UniqueIdentifier.objects.create(name='mufasa', object_id=mufasa.pk,
content_type=ContentType.objects.get_for_model(mufasa))
kajtek = Parrot.objects.create(name="Kajtek")
UniqueIdentifier.objects.create(name='kajtek', object_id=kajtek.pk,
content_type=ContentType.objects.get_for_model(kajtek))
def test_cant_instantiate_base_model(db):
# direct instantiation shouldn't work
with pytest.raises(RuntimeError):
Animal.objects.create(name="uhoh")
# ... unless a type is specified
Animal.objects.create(name="dingo", type="typedmodels.canine")
# ... unless that type is stupid
with pytest.raises(ValueError):
Animal.objects.create(name="dingo", type="macaroni.buffaloes")
def test_get_types():
assert set(Animal.get_types()) == {'typedmodels.canine', 'typedmodels.bigcat', 'typedmodels.parrot', 'typedmodels.angrybigcat', 'typedmodels.feline'}
assert set(Canine.get_types()) == {'typedmodels.canine'}
assert set(Feline.get_types()) == {'typedmodels.bigcat', 'typedmodels.angrybigcat', 'typedmodels.feline'}
def test_get_type_classes():
assert set(Animal.get_type_classes()) == {Canine, BigCat, Parrot, AngryBigCat, Feline}
assert set(Canine.get_type_classes()) == {Canine}
assert set(Feline.get_type_classes()) == {BigCat, AngryBigCat, Feline}
def test_type_choices():
type_choices = {cls for cls, _ in Animal._meta.get_field('type').choices}
assert type_choices == set(Animal.get_types())
def test_base_model_queryset(animals):
# all objects returned
qs = Animal.objects.all().order_by('type')
assert [obj.type for obj in qs] == [
'typedmodels.angrybigcat', 'typedmodels.bigcat', 'typedmodels.canine',
'typedmodels.feline', 'typedmodels.feline', 'typedmodels.parrot'
]
assert [type(obj) for obj in qs] == [AngryBigCat, BigCat, Canine, Feline, Feline, Parrot]
def test_proxy_model_queryset(animals):
qs = Canine.objects.all().order_by('type')
assert qs.count() == 1
assert len(qs) == 1
assert [obj.type for obj in qs] == ['typedmodels.canine']
assert [type(obj) for obj in qs] == [Canine]
qs = Feline.objects.all().order_by('type')
assert qs.count() == 4
assert len(qs) == 4
assert [obj.type for obj in qs] == [
'typedmodels.angrybigcat', 'typedmodels.bigcat', 'typedmodels.feline', 'typedmodels.feline'
]
assert [type(obj) for obj in qs] == [AngryBigCat, BigCat, Feline, Feline]
def test_doubly_proxied_model_queryset(animals):
qs = BigCat.objects.all().order_by('type')
assert qs.count() == 2
assert len(qs) == 2
assert [obj.type for obj in qs] == ['typedmodels.angrybigcat', 'typedmodels.bigcat']
assert [type(obj) for obj in qs] == [AngryBigCat, BigCat]
def test_triply_proxied_model_queryset(animals):
qs = AngryBigCat.objects.all().order_by('type')
assert qs.count() == 1
assert len(qs) == 1
assert [obj.type for obj in qs] == ['typedmodels.angrybigcat']
assert [type(obj) for obj in qs] == [AngryBigCat]
def test_recast_auto(animals):
cat = Feline.objects.get(name='kitteh')
cat.type = 'typedmodels.bigcat'
cat.recast()
assert cat.type == 'typedmodels.bigcat'
assert type(cat) == BigCat
def test_recast_string(animals):
cat = Feline.objects.get(name='kitteh')
cat.recast('typedmodels.bigcat')
assert cat.type == 'typedmodels.bigcat'
assert type(cat) == BigCat
def test_recast_modelclass(animals):
cat = Feline.objects.get(name='kitteh')
cat.recast(BigCat)
assert cat.type == 'typedmodels.bigcat'
assert type(cat) == BigCat
def test_recast_fail(animals):
cat = Feline.objects.get(name='kitteh')
with pytest.raises(ValueError):
cat.recast(AbstractVegetable)
with pytest.raises(ValueError):
cat.recast('typedmodels.abstractvegetable')
with pytest.raises(ValueError):
cat.recast(Vegetable)
with pytest.raises(ValueError):
cat.recast('typedmodels.vegetable')
def test_fields_in_subclasses(animals):
canine = Canine.objects.all()[0]
angry = AngryBigCat.objects.all()[0]
angry.mice_eaten = 5
angry.save()
assert AngryBigCat.objects.get(pk=angry.pk).mice_eaten == 5
angry.canines_eaten.add(canine)
assert list(angry.canines_eaten.all()) == [canine]
# Feline class was created before Parrot and has mice_eaten field which is non-m2m, so it may break accessing
# known_words field in Parrot instances (since Django 1.5).
parrot = Parrot.objects.all()[0]
parrot.known_words = 500
parrot.save()
assert Parrot.objects.get(pk=parrot.pk).known_words == 500
def test_fields_cache():
mice_eaten = Feline._meta.get_field('mice_eaten')
known_words = Parrot._meta.get_field('known_words')
assert mice_eaten in AngryBigCat._meta.fields
assert mice_eaten in Feline._meta.fields
assert mice_eaten not in Parrot._meta.fields
assert known_words in Parrot._meta.fields
assert known_words not in AngryBigCat._meta.fields
assert known_words not in Feline._meta.fields
def test_m2m_cache():
canines_eaten = AngryBigCat._meta.get_field('canines_eaten')
assert canines_eaten in AngryBigCat._meta.many_to_many
assert canines_eaten not in Feline._meta.many_to_many
assert canines_eaten not in Parrot._meta.many_to_many
def test_related_names(animals):
'''Ensure that accessor names for reverse relations are generated properly.'''
canine = Canine.objects.all()[0]
assert hasattr(canine, 'angrybigcat_set')
def test_queryset_defer(db):
"""
Ensure that qs.defer() works correctly
"""
Vegetable.objects.create(name='cauliflower', color='white', yumness=1)
Vegetable.objects.create(name='spinach', color='green', yumness=5)
Vegetable.objects.create(name='sweetcorn', color='yellow', yumness=10)
Fruit.objects.create(name='Apple', color='red', yumness=7)
qs = AbstractVegetable.objects.defer('yumness')
objs = set(qs)
for o in objs:
assert isinstance(o, AbstractVegetable)
assert set(o.get_deferred_fields()) == {'yumness'}
# does a query, since this field was deferred
assert isinstance(o.yumness, float)
@pytest.mark.parametrize('fmt', [
'xml',
'json',
pytest.mark.skipif(not PYYAML_AVAILABLE, reason='PyYAML is not available')("yaml"),
])
def test_serialization(fmt, animals):
"""Helper function used to check serialization and deserialization for concrete format."""
animals = Animal.objects.order_by('pk')
serialized_animals = serializers.serialize(fmt, animals)
deserialized_animals = [wrapper.object for wrapper in serializers.deserialize(fmt, serialized_animals)]
assert set(deserialized_animals) == set(animals)
def test_generic_relation(animals):
for animal in Animal.objects.all():
assert hasattr(animal, 'unique_identifiers')
assert animal.unique_identifiers.all()
for uid in UniqueIdentifier.objects.all():
cls = uid.referent.__class__
animal = cls.objects.filter(unique_identifiers=uid)
assert isinstance(animal.first(), Animal)
for uid in UniqueIdentifier.objects.all():
cls = uid.referent.__class__
animal = cls.objects.filter(unique_identifiers__name=uid.name)
assert isinstance(animal.first(), Animal)
def test_manager_classes():
assert isinstance(Animal.objects, TypedModelManager)
assert isinstance(Feline.objects, TypedModelManager)
assert isinstance(BigCat.objects, TypedModelManager)
# This one has a custom manager defined, but that shouldn't prevent objects from working
assert isinstance(AbstractVegetable.mymanager, models.Manager)
assert isinstance(AbstractVegetable.objects, TypedModelManager)
# subclasses work the same way
assert isinstance(Vegetable.mymanager, models.Manager)
assert isinstance(Vegetable.objects, TypedModelManager)
| caseyrollins/django-typed-models | typedmodels/tests.py | tests.py | py | 9,363 | python | en | code | null | github-code | 13 |
14861219767 | def sol(score):
result=None
if score>=90 and score<=100:
result="A"
elif score>=80:
result="B"
elif score>=70:
result="C"
elif score>=60:
result="D"
else:
result="F"
print(result)
score=int(input())
sol(score) | halee0/BaekJoon_python | 9498.py | 9498.py | py | 278 | python | en | code | 0 | github-code | 13 |
25672760161 | from IPython.display import clear_output
import os
class HANGMAN():
def __init__(self,word):
self.screen = '''
___________________________________
| ________ |
| | | |
| HANGMAN | |
| | |
| | |
| __|__ |
| |
|___________________________________|'''
print(self.screen)
self.word = word
self.knownWord = len(word)*"_"
self.currentGuess = ''
self.dummy = 0
self.defaultDummy = [['O','|','/','\\','/','\\',],[(4,25),(5,25),(5,24),(5,26),(6,24),(6,26)]]
self.changeStatus()
self.index = []
def changeStatus(self):
str = self.defaultDummy[0][0:self.dummy]
str.append(self.divideWord(self.knownWord))
cood = self.defaultDummy[1][0:self.dummy]
cood.append((8,4))
self.refresh(self.screen,str,cood)
def refresh(self,screen,str,coodinates):
#clear_output(wait=False) #刷新屏幕
os.system('cls')
screen = screen.split('\n')
for index,s in enumerate(str):
c = coodinates[index]
screen[c[0]] = self.replaceLine(screen[c[0]],s,c[1]-1,c[1]+len(s)-1)
for i in screen:
print(i)
def find_all_indexes(self,input_string, character):
indexes = []
start = -1
while True:
start = input_string.find(character, start+1)
if start == -1:
return indexes
indexes.append(start)
def divideWord(self,str):
str = list(str)
return " ".join(i for i in str)
def replaceLine(self,s,sub,start,end):
s = list(s)
s[start:end] = list(sub)
s = "".join(str(i) for i in s)
return s
def checkGuess(self):
index = self.find_all_indexes(self.word,self.currentGuess)
for i in index:
self.knownWord = self.replaceLine(self.knownWord,self.currentGuess,i,i+1)
return index
def guess(self,s):
self.currentGuess = s
self.checkGuess()
self.index = len(self.checkGuess())
if self.index == 0:
self.dummy += 1
self.changeStatus()
def gameover(self):
self.refresh(self.screen,["GAMEOVER!",self.divideWord(self.word)],[(6,8),(8,4)])
def win(self):
self.refresh(self.screen,["YOU WIN!"],[(6,7)])
def main():
word = input("请输入谜面字母:")
hangman = HANGMAN(word)
while hangman.dummy < 6 and hangman.knownWord.find('_') != -1:
guess = input("请输入要猜测的字母:")
hangman.guess(guess)
if hangman.knownWord.find('_') == -1:
hangman.win()
else:
hangman.gameover()
main() | rogerwang0/HANGMAN_Game | hangman.py | hangman.py | py | 2,966 | python | en | code | 0 | github-code | 13 |
25303791799 | import os
from PIL import Image
import torch.tensor
from torch.utils.data import Dataset
from torchvision import transforms
import pandas as pd
import matplotlib.pyplot as plt
import cv2
class MotionData(Dataset):
def __init__(self, data_json, reso=256):
self.reso = reso
self.data = pd.read_json(data_json, lines=True)
trans = [transforms.ColorJitter(brightness=1, contrast=0.5, saturation=1, hue=0.5),
transforms.GaussianBlur(kernel_size=31, sigma=5)]
self.data_augment = transforms.RandomChoice(trans)
self.transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((0.4849, 0.4798, 0.4740),
(0.1678, 0.17325, 0.1815))
])
def __len__(self):
return len(self.data)
def __getitem__(self, idx):
if torch.is_tensor(idx):
idx = idx.item()
file_path = self.data.key.iloc[idx]
img_ = self.transform(Image.open(file_path))
label = self.data.label.iloc[idx]
return img_, label
def create_data_augmentations(self):
n = self.__len__()
for idx in range(n):
file_path = self.data.key.iloc[idx]
img = Image.open(file_path)
img_ = self.data_augment(img)
aug_path = file_path.split('.')[0] + "_aug.jpg"
img_.save(aug_path)
print(idx, end="\r")
def augment():
train_data.create_data_augmentations()
test_data.create_data_augmentations()
def norm_vals():
n = len(train_data)
ends = [0, int(n/2), n]
p = 1 # set p=0 for first half of data and p=1 for second half
data = []
for i in range(1):
for j in range(ends[p], ends[p+1]):
print(j, end="\r")
data.append(train_data[j][0])
imgs = torch.stack(data, dim=3)
means_per_channel = imgs.view(3, -1).mean(dim=1)
std_per_channel = imgs.view(3, -1).std(dim=1)
print(means_per_channel)
print(std_per_channel)
print()
train_data = MotionData(data_json="/home/greatman/code/vics/guide/neuralnet/train.json")
#test_data = MotionData(data_json="/home/greatman/code/vics/guide/neuralnet/test.json")
if __name__=="__main__":
norm_vals()
| grok0n/vics | guide/neuralnet/dataset.py | dataset.py | py | 2,022 | python | en | code | 0 | github-code | 13 |
9731110105 | from google.cloud import firestore
import pandas as pd
import json
# 使用前,請先更改
# 金鑰、專案id、讀取json的路徑、寫入csv的路徑
list_ = []
# db = firestore.Client()
db = firestore.Client.from_service_account_json("./cloud-master-3-29-cfb7e9371055.json", project='cloud-master-3-29')
with open('ccs_line_richmenus.json', 'r', encoding='utf-8') as file:
for line in file:
line = json.loads(line)
# 變更鍵的名稱
# line[k_new] = line.pop(k_old)
line['rich_menu_name'] = line.pop('line_richmenu_custom_name')
line['rich_menu_pic_url'] = line.pop('line_richmenu_pic_url')
line['rich_menu_config'] = line.pop('line_richmenu_config')
line['custom_description'] = line.pop('line_richmenu_custom_description')
line['rich_menu_id'] = line.pop('line_richmenu_id')
line['custom_name'] = line['rich_menu_name']
del line['line_channel_id']
with open("CloudMasterLineBotRichMenu.json", 'a', encoding="utf-8") as fout:
json.dump(line, fout, ensure_ascii=False, sort_keys=True, default=str)
fout.write("\n")
db.collection(u'CloudMasterLineBotRichMenu').document(line['rich_menu_name']).set(line)
list_.append(line)
df = pd.json_normalize(list_)
df.to_csv("CloudMasterLineBotRichMenu.csv")
| Whaleman0423/1111 | old_data_trans_rich_menu_upload_save_local.py | old_data_trans_rich_menu_upload_save_local.py | py | 1,386 | python | en | code | 0 | github-code | 13 |
33578974135 | import uvicorn
from database import Base, engine
from fastapi import HTTPException, FastAPI
from fastapi.middleware.cors import CORSMiddleware
from routes import auth as auth_router, bucket as bucket_router, user as user_router
Base.metadata.create_all(bind=engine)
app = FastAPI(
title="Demo FastAPI and Github actions app",
version="0.01",
description="A FastAPI app deployed to Heroku with a Github actions CI/CD pipeline.",
contact={
"name": "Similoluwa Okunowo",
"url": "https://simiokunowo.netlify.app",
"email": "rexsimiloluwa@gmail.com",
},
)
BASE_URL = "/api/v1"
app.include_router(auth_router.router, tags=["Auth"], prefix=BASE_URL)
app.include_router(bucket_router.router, tags=["Bucket"], prefix=BASE_URL)
app.include_router(user_router.router, tags=["User"], prefix=BASE_URL)
app.add_middleware(
CORSMiddleware,
allow_origins=["*"],
allow_headers=["*"],
allow_methods=["*"],
allow_credentials=True,
)
if __name__ == "__main__":
uvicorn.run("main:app", host="0.0.0.0", port=5000, reload=True)
| rexsimiloluwah/fastapi-github-actions-test | src/main.py | main.py | py | 1,078 | python | en | code | 1 | github-code | 13 |
47190581364 | import warnings
import logging
import sys
import itertools
from pathlib import Path
import hydra
from omegaconf import DictConfig, OmegaConf
import yaml
import matplotlib.pyplot as plt
import numpy as np
import torch
import pytorch_lightning as pl
from pytorch_lightning.loggers import TensorBoardLogger, WandbLogger
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint, LearningRateMonitor
from e2cnn import nn
from diffop_experiments import MNISTRotModule
warnings.filterwarnings("ignore",
"indexing with dtype torch.uint8 is now deprecated, "
"please use a dtype torch.bool instead.")
# This warning is triggered internally in pytorch 1.9.0:
# https://github.com/pytorch/pytorch/issues/54846
# Should be fixed in future releases
warnings.filterwarnings("ignore",
"Named tensors and all their associated APIs are an experimental feature")
@hydra.main(config_path="config", config_name="config")
def cli_main(cfg: DictConfig):
# Fix to prevent everything from being logged twice,
# once by PL and once by Hydra.
# See https://github.com/facebookresearch/hydra/issues/1012#issuecomment-806596005
# This means that PL won't print its logs to console
# but will hand them to Hydra, which then deals with logging.
# We could instead only set pl_logger.propagate to False (without emptying
# the handlers), but we want Hydra to log the output to files and in general
# to configure the logging format.
pl_logger = logging.getLogger("lightning")
pl_logger.handlers = []
pl_logger.propagate = True
# allow addition of new keys
OmegaConf.set_struct(cfg, False)
if cfg.get("debug", False):
cfg.trainer.fast_dev_run = True
cfg.trainer.weights_summary = "full"
# speed up the debug run by using a tiny batch size
cfg.data.batch_size = 2
# mostly to suppress a warning that there are fewer steps
# than the log period
cfg.trainer.log_every_n_steps = 1
if cfg.get("full_debug", False):
cfg.trainer.fast_dev_run = False
cfg.trainer.max_steps = 1
cfg.trainer.limit_val_batches = 2
cfg.trainer.limit_test_batches = 2
cfg.trainer.weights_summary = "full"
cfg.data.batch_size = 2
if cfg.get("pdo_econv", False):
cfg.model.maximum_power = 0
cfg.model.special_regular_basis = True
cfg.model.maximum_partial_order = 2
cfg.model.maximum_order = None
cfg.model.angle_offset = np.pi / 8
cfg.model.normalize_basis = False
cfg.model.max_accuracy = 2
if any(size != 5 for size in cfg.model.kernel_size):
raise ValueError("PDO-eConv stencils are currently only implemented for 5x5 kernels")
pl.seed_everything(cfg.seed)
cfg.data.dir = hydra.utils.to_absolute_path(cfg.data.dir)
# ------------
# setup
# ------------
datamodule = hydra.utils.instantiate(cfg.data)
if cfg.get("load_checkpoint", False):
# If the load_checkpoint flag is passed, we load from that checkpoint.
p = cfg.dir.log / Path(cfg.load_checkpoint)
p = hydra.utils.get_original_cwd() / p
# We don't use pytorch lightnings in-built LightningModule.load_from_checkpoint(),
# instead we instantiate the model manually and load the state dict.
# Using load_from_checkpoint() would require some ugly hacks to get the model type
# (because we can't rely on hydra.utils.instantiate), though I'm not sure which
# way is better
if not torch.cuda.is_available():
checkpoint = torch.load(p, map_location=torch.device("cpu"))
else:
checkpoint = torch.load(p)
cfg.model.input_size = datamodule.dims[1]
cfg.model.in_channels = datamodule.dims[0]
cfg.model.steps_per_epoch = datamodule.num_batches
if cfg.trainer.stochastic_weight_avg:
cfg.model.num_epochs = int(cfg.trainer.max_epochs * 0.8)
else:
cfg.model.num_epochs = cfg.trainer.max_epochs
if cfg.get("load_checkpoint", False):
# if we load weights anyway, no need to waste time on initialization
cfg.model.init = None
model = hydra.utils.instantiate(cfg.model)
if cfg.get("load_checkpoint", False):
# Now after instantiating the model, we actually load the state dict
state_dict = checkpoint["state_dict"] # type: ignore
model.load_state_dict(state_dict)
if cfg.get("debug", False) or cfg.get("full_debug", False):
for name, p in model.named_parameters():
if not p.requires_grad:
continue
print(name, p.numel())
num_params = sum(p.numel() for p in model.parameters() if p.requires_grad)
logging.info(f"Total number of trainable parameters: {num_params}")
if cfg.get("eval_only", False):
trainer = pl.Trainer(**cfg.trainer)
results = trainer.test(model, datamodule=datamodule)
return
# ------------
# training
# ------------
callbacks = []
log_mode = cfg.get("log", "wandb")
if log_mode == "tb":
# We want to always put tensorboard logs into the CWD,
# no matter what cfg.dir.output_base is. The reason is that
# on clusters, we use the scratch disk to save checkpoints,
# but we want to make it easy to see the tensorboard logs
# while the job is still running.
tb_path = hydra.utils.to_absolute_path(cfg.dir.log + "/" + cfg.dir.run)
# name and version should be empty; the path above is already a unique
# path for this specific run, handled by Hydra
logger = TensorBoardLogger(tb_path, name="", version="")
elif log_mode == "wandb":
logger = WandbLogger(
name=cfg.get("name", None),
project="steerable_pdos",
group=cfg.get("group", None),
)
elif not log_mode:
logger = None
else:
raise ValueError("log_mode must be 'tb', 'wandb' or falsy")
if log_mode:
callbacks.append(LearningRateMonitor())
if cfg.data.validation_size:
# checkpointing only makes sense if we use a validation set
# (a final checkpoint for the last model is stored anyway)
checkpoint_callback = ModelCheckpoint(
monitor="loss/val",
# the CWD is automatically set by Hydra, this is where
# we want to save checkpoints
dirpath=".",
mode="min",
)
callbacks.append(checkpoint_callback)
# we never want early stopping when we don't use a validations set
if cfg.early_stopping.enabled and cfg.data.validation_size:
early_stopping_callback = EarlyStopping(monitor="loss/val", patience=cfg.early_stopping.patience)
callbacks.append(early_stopping_callback)
# The logger directory might not be the CWD (see above), but we still
# want to save weights there. This is only necessary for the case
# where no validation set is used and thus no model checkpoint callback
# (otherwise, the callback sets the correct path anyway)
cfg.trainer.weights_save_path = "."
# this doesn't play a large role, but I think it's used by the LR finder
# even when the weights_save_path is set
cfg.trainer.default_root_dir = "."
if cfg.model.learning_rate == "auto" or cfg.get("only_find_lr", False):
trainer = pl.Trainer(**cfg.trainer)
lr_finder = trainer.tuner.lr_find(model, datamodule=datamodule)
fig = lr_finder.plot(suggest=True)
if cfg.get("only_find_lr", False):
# in the only_find_lr setting, no tensorboard log is created, instead we store the figure
fig.savefig("lr_plot.pdf")
else:
logger.experiment.add_figure("lr_finder", fig)
model.hparams.learning_rate = lr_finder.suggestion()
print("Best learning rate:", lr_finder.suggestion())
if cfg.get("only_find_lr", False):
return
# we recreate the Trainer from scratch after determining the learning
# rate. The reason is that Pytorch Lightning doesn't reset the epoch and step
# count after tuning the learning rate. Could probably do this by hand,
# but this seems more fool-proof.
# This also avoids this issue:
# https://github.com/PyTorchLightning/pytorch-lightning/issues/5587
# which is still unresolved at the time of writing this
trainer = pl.Trainer(**cfg.trainer, logger=logger, callbacks=callbacks)
trainer.fit(model, datamodule=datamodule)
# ------------
# testing
# ------------
if (cfg.trainer.get("fast_dev_run", False)
or not cfg.data.validation_size
or cfg.trainer.stochastic_weight_avg):
# In a fast dev run, no checkpoints will be created, we need to use the existing model.
# If we don't use a validation set, we also can't load the best model
# and need to use the last one.
# And when using SWA, we want the averaged model, not one from a checkpoint.
# (in the future, this might not be necessary: https://github.com/PyTorchLightning/pytorch-lightning/issues/6074)
results = trainer.test(model, datamodule=datamodule)
else:
# otherwise, we load the best model.
results = trainer.test(datamodule=datamodule)
# write the test results into a file in the CWD
# (which is handled by Hydra and is the same dir where the other
# logs are stored)
with open("results.yaml", "w") as file:
# results is a list with a dict for each dataloader,
# but we only use one test dataloader, so only print results[0]
# default_flow_style just affects the style of YAML output
yaml.dump(results[0], file, default_flow_style=False)
if __name__ == '__main__':
cli_main()
| ejnnr/steerable_pdo_experiments | main.py | main.py | py | 9,840 | python | en | code | 0 | github-code | 13 |
73753758737 | import pymysql
class Checkin:
#def __init__(self):
# try:
# conexion = mysql.connect(host='localhost', user='root', password='', db='Tienda')
# except (pymysql.err.OperationalError, pymysql.err.InternalError) as e:
# print("Ocurrió un error al conectar: ", e)
@staticmethod
def chequear(mail, contraseña):
# self.mail = mail
# self.contraseña = contraseña
conexion = pymysql.connect(host='localhost',
user='root',
password='',
db='Tienda')
cursor = conexion.cursor()
sqlCliente = "select num_cliennte from clientes where dni in (select dni from informacion where id_usuario in (select id_usuario from usuarios where mail="+mail+" and contraseña="+contraseña+"));"
print(sqlCliente)
sqlPersonal = "select num_empleado, puesto from clientes where dni in (select dni from informacion where id_usuario in (select id_usuario from usuarios where mail="+mail+" and contraseña="+contraseña+"));"
print(sqlPersonal)
cursor.execute(sqlCliente)
resultado = cursor.fetchall()
if len(resultado)==0:
cursor.execute(sqlPersonal)
resultado = cursor.fetchall()
if len(resultado[0])==0:
print("Usuario no Encontrado")
conexion.close()
else:
if resultado[1]=="empleado":
conexion.close()
# redirijir al menu empleado
else:
conexion.close()
# redirigir al menu dueño
else:
conexion.close()
| Estroberti2/Apremdiendo-Python | curso python/Proyrcto Python/chekin.py | chekin.py | py | 1,789 | python | es | code | 0 | github-code | 13 |
27941511643 | from flask import Flask, render_template, request, flash, redirect, session, g, abort
from models import db, connect_db, User, Sighting
from forms import NewUserForm, LoginForm, AddSightingForm, EditUserForm, EditSightingForm
from sqlalchemy.exc import IntegrityError
from sqlalchemy import desc
import os
import requests
import pdb
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
CURR_USER_KEY = "curr_user"
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = os.environ.get(
'DATABASE_URL', 'postgresql:///psosightings')
app.config['SQLALCHEMY_ECHO'] = True
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['SECRET_KEY'] = os.environ.get('SECRET_KEY', '12345678')
connect_db(app)
db.create_all()
# toolbar = DebugToolbarExtension(app)
#### USERS ROUTES #####
@app.before_request
def add_user_to_g():
"""If we're logged in, add curr user to Flask global."""
if CURR_USER_KEY in session:
g.user = User.query.get(session[CURR_USER_KEY])
else:
g.user = None
def do_login(user):
"""Log in user."""
session[CURR_USER_KEY] = user.id
def do_logout():
"""Logout user."""
if CURR_USER_KEY in session:
del session[CURR_USER_KEY]
@app.route("/user/new", methods=["GET"])
def users_new_form():
"""Show a form to create a new user"""
form= NewUserForm()
return render_template('new_user.html', form=form)
@app.route("/user/new", methods=["POST"])
def add_user():
form = NewUserForm()
if form.validate_on_submit():
try:
user = User.signup(
user_name=form.user_name.data,
email=form.email.data,
password=form.password.data
)
# db.session.add(user)
# session["user_id"] = user.id
except IntegrityError:
flash("Username already taken", 'danger')
return render_template('new_user.html', form=form)
return redirect(f"/user/{user.id}") ## CHANGE TO ADMIN ID NUMBER
else:
return render_template('new_user.html', form=form)
return redirect('/home')
# return redirect('/user/info/<int:user_id>')
@app.route('/user/login', methods=["GET", "POST"])
def login():
"""Handle user login."""
form = LoginForm()
if form.validate_on_submit():
user = User.authenticate(form.user_name.data,
form.password.data)
if user:
do_login(user)
flash(f"Hello, {user.user_name}!", "success")
return redirect(f"/user/{user.id}")
flash("Invalid credentials.", 'danger')
return render_template('login.html', form=form)
@app.route('/logout')
def logout():
"""Handle logout of user."""
do_logout()
flash("Goodbye for now!", "success")
return redirect("/")
@app.route("/user/<int:user_id>", methods=["GET"])
def user_page(user_id):
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/user/<int:user_id>")
user = User.query.get_or_404(user_id)
sightings = Sighting.query.filter(Sighting.user_id == user_id).all()
return render_template('user_info.html', user=user, sightings=sightings)
@app.route("/user/<int:user_id>/edit")
def edit_user(user_id):
"""Show edit form"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/user/<int:user_id>")
user = User.query.get(g.user.id)
form = EditUserForm(obj=user)
return render_template("edit_user.html", user=user, form=form)
@app.route('/user/<int:user_id>/edit', methods=["POST"])
def submit_edit(user_id):
"""Edit a user"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/user/<int:user_id>")
user = User.query.get_or_404(user_id)
user_name=request.form["user_name"]
email=request.form["email"]
db.session.add(user)
db.session.commit()
return redirect(f"/user/{user.id}")
@app.route('/user/delete', methods=["POST"])
def delete_user():
"""Delete user."""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/user/<int:user_id>")
do_logout()
db.session.delete(g.user)
db.session.commit()
return redirect("/")
#### HOME ROUTES ####
@app.route("/user/<int:user_id>/all")
def enterpage(user_id):
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/user/<int:user_id>")
sightings = Sighting.query.order_by(Sighting.id.desc()).all()[::]
# Sighting.query.all.(order_by(desc(Sighting.id)))
user = User.query.get_or_404(user_id)
return render_template('list.html', sightings=sightings, user=user)
@app.route("/")
def homepage():
return redirect("/user/login")
@app.route("/user/<int:user_id>/addsighting", methods=["GET"])
def new_sighting(user_id):
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/user/<int:user_id>")
user = User.query.get_or_404(user_id)
form = AddSightingForm()
return render_template('new_sighting.html', user=user, form=form)
@app.route("/user/<int:user_id>/addsighting", methods=["POST"])
def submit_sighting(user_id):
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/user/<int:user_id>")
TO_EMAILS= [('msmeganmcmanus@gmail.com', 'Megan McManus'), ('psosharespace@gmail.com', 'Megan McManus2'), ('neilroper15@gmail.com', 'Neil Roper'), ('katiedouglas11@gmail.com', 'Katie Douglas')]
user = User.query.get_or_404(user_id)
form = AddSightingForm()
if form.validate_on_submit():
sighting_num = form.sighting_num.data
date = form.date.data
time = form.time.data
latitude = form.latitude.data
longitude = form.longitude.data
species = form.species.data
individuals = form.individuals.data
user_id = f"{user.id}"
sighting= Sighting(sighting_num=sighting_num, date=date, time=time, latitude=latitude, longitude=longitude, species=species, individuals=individuals, user_id=user_id)
db.session.add(sighting)
db.session.commit()
message = Mail(
from_email='psosharespace@gmail.com',
to_emails=TO_EMAILS,
is_multiple=True,
subject=f"New Sighting Submitted by {sighting.user.user_name}",
html_content=f"At {sighting.time}, {sighting.user.user_name} observed a {sighting.species} at {sighting.latitude}N, {sighting.longitude}W - Date {sighting.date}")
try:
sg = SendGridAPIClient(os.environ.get('SENDGRID_API_KEY'))
response = sg.send(message)
print(response.status_code)
print(response.body)
print(response.headers)
except Exception as e:
print(e.message)
return redirect(f"/user/{user.id}/all")
return render_template('new_sighting.html', form=form, user=user)
@app.route("/sighting/<int:sighting_id>/editsighting", methods=["GET"])
def edit_sighting(sighting_id):
"""Show edit form"""
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/user/<int:user_id>")
sighting = Sighting.query.get_or_404(sighting_id)
form = EditSightingForm(obj=sighting)
user = User.query.get_or_404(g.user.id)
return render_template("edit_sighting.html", user=user, sighting=sighting, form=form)
@app.route("/sighting/<int:sighting_id>/editsighting", methods=["POST"])
def submit_edit_sighting(sighting_id):
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/user/<int:user_id>")
sighting = Sighting.query.get_or_404(sighting_id)
form = EditSightingForm(obj=sighting)
user = User.query.get_or_404(g.user.id)
if form.validate_on_submit():
sighting.sighting_num = form.sighting_num.data
sighting.date = form.date.data
sighting.time = form.time.data
sighting.latitude = form.latitude.data
sighting.longitude = form.longitude.data
sighting.species = form.species.data
sighting.individuals = form.individuals.data
user_id = f"{user.id}"
sighting= Sighting(sighting_num=sighting.sighting_num, date=sighting.date, time=sighting.time, latitude=sighting.latitude, longitude=sighting.longitude, species=sighting.species, individuals=sighting.individuals, user_id=user_id)
db.session.commit()
return redirect(f"/user/{user.id}/all")
@app.route('/sighting/<int:sighting_id>/delete', methods=["POST"])
def submit_job_edit(sighting_id):
if not g.user:
flash("Access unauthorized.", "danger")
return redirect("/user/<int:user_id>")
sighting = Sighting.query.get_or_404(sighting_id)
if sighting.user_id != g.user.id:
flash("Access unauthorized.", "danger")
return redirect(f"/user/{g.user.id}")
db.session.delete(sighting)
db.session.commit()
return redirect(f"/user/{g.user.id}")
@app.after_request
def add_header(req):
"""Add non-caching headers on every request."""
req.headers["Cache-Control"] = "no-cache, no-store, must-revalidate"
req.headers["Pragma"] = "no-cache"
req.headers["Expires"] = "0"
req.headers['Cache-Control'] = 'public, max-age=0'
return req
if __name__ == '__main__':
app.run(debug=True)
| petitepirate/psosightings | app.py | app.py | py | 9,825 | python | en | code | 0 | github-code | 13 |
26010468248 | """This module contains the class for the popup window to add a custom category to the combobox"""
from PyQt6.QtWidgets import QDialog
from UI.popup import Ui_Form
from src.controllers.popup_accounts_controller import PopUpAccountsController
class PopUpWindowAcc(QDialog, Ui_Form):
"""Popup window class"""
def __init__(self, acc_window, refresher):
super().__init__(acc_window)
self.setupUi(self)
self.show()
self.acc_window = acc_window
self.controller = PopUpAccountsController(self, self.acc_window, refresher)
| razvanmarinn/expense-tracker | src/views/popup/p_accounts.py | p_accounts.py | py | 565 | python | en | code | 0 | github-code | 13 |
11737129081 | from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from time import sleep
import itertools as it
import wikipedia
import sys
def get_words():
terms = []
with open('terms.txt', encoding="utf8") as f:
for line in f:
terms.append(line)
print("$$ Loaded all terms ({})".format(len(terms)))
return terms
def get_definitions():
definitions = []
with open('definitions.txt', encoding="utf8") as f:
lines = f.read().split("\n\n")
for line in lines:
definitions.append(line)
print("$$ Loaded all definitions ({})".format(len(definitions)))
return definitions
def define_words(terms):
definition_file = open('definitions.txt', 'w')
definitions = []
cnt = 1
for term in terms:
try:
print("{} ({} of {})".format(term, cnt, len(terms)))
definition = wikipedia.summary(wikipedia.search(term)[0])
definitions.append(definition)
definition_file.write("{}. {} {}".format(cnt, term, definition))
definition_file.write("\n")
definition_file.write("\n")
cnt += 1
except Exception as e:
error = "Error: {}".format(term)
print(e)
print(error)
definitions.append(error)
definition_file.write(error)
print("Loaded all definitions")
return definitions
def createQuizlet(email, password, title, terms, defintions):
print("$$ Connecting to webdriver...")
driver = webdriver.Chrome(executable_path='C:\dev\ocr-apush-define/chromedriver.exe')
driver.get("https://quizlet.com/login")
assert "Quizlet" in driver.title
print("$$ Successfully connected to webdriver")
# Log in
print("$$ Logging into Quizlet")
try:
driver.find_element_by_css_selector(".UISocialButton.UISocialButton--default").click()
sleep(3)
elem = driver.find_element_by_name("identifier")
elem.clear()
elem.send_keys(email)
elem.send_keys(Keys.RETURN)
sleep(3)
elem = driver.find_element_by_name("password")
elem.clear()
elem.send_keys(password)
elem.send_keys(Keys.RETURN)
sleep(3)
driver.refresh()
driver.find_element_by_xpath("""//*[@id="SiteHeaderReactTarget"]/header/div/div/span[2]/div/div[2]/a/div""").click()
except Exception as e:
print("ERROR: Failed to log into Quizlet!")
else:
print("$$ Logged into Quizlet")
# Create Quizlet
print("$$ Creating set...")
try:
driver.find_element_by_xpath("""//*[@id="SetPageTarget"]/div/div[1]/div[2]/div/div/label/div/div/div[2]/textarea""").send_keys(title + Keys.TAB)
driver.find_element_by_xpath("""//*[@id="SetPageTarget"]/div/div[2]/div[2]/div/div[1]/div[1]/div[1]/div/div[3]/div[1]/div/div/div[1]/div/div/label/span/div/button""").click()
driver.find_element_by_xpath("""//*[@id="react-select-2--option-1"]""").click()
driver.find_element_by_xpath("""//*[@id="SetPageTarget"]/div/div[2]/div[2]/div/div[1]/div[1]/div[1]/div/div[3]/div[1]/div/div/div[2]/div/div/label/span/div/button""").click()
driver.find_element_by_xpath("""//*[@id="react-select-3--option-1"]""").click()
# Fill in the text
element = driver.find_element_by_xpath("""//*[@id="SetPageTarget"]/div/div[2]/div[2]/div/div[1]/div[1]/div[1]/div/div[3]/div[1]/div/div/div[1]/div/div/label/div/div[1]/div[2]/textarea""")
actions = webdriver.ActionChains (driver)
actions.move_to_element(element)
actions.click()
actions.send_keys("THIS SET WAS GENERATED BY A PYTHON BOT" + Keys.TAB)
actions.send_keys("Smith is so cool, isn't he?" + Keys.TAB)
for i in range(0, len(terms)):
actions.send_keys(terms[i] + Keys.TAB)
actions.send_keys(defintions[i] + Keys.TAB)
actions.perform()
except Exception as e:
print("ERROR: Failed to create set!")
sleep(5)
else:
print("$$ Successfully created set")
sleep(5)
# Save
# print("$$ Saving set...")
# try:
# save = driver.find_element_by_css_selector(".UIButton.UIButton--hero").click
# # save.find_element_by_css_selector(".UIButton-wrapper").click()
# except Exception as e:
# print("ERROR: Failed to save set!")
# print(e)
# else:
# print("$$ Set saved")
# sleep(2)
# driver.close()
def main():
# email = input("Enter Quizlet email: ")
# password = input("Enter Quizlet password: ")
# title = input("Enter title for the set: ")
email = "241745@amaisd.net"
password = "20011018"
title = "APUSH First Sememster Terms"
terms = get_words()
definitions = get_definitions()
createQuizlet(email, password, title, terms, definitions)
if __name__ == "__main__":
main() | SmithJesko/ocr-define-quizlet | main.py | main.py | py | 4,948 | python | en | code | 1 | github-code | 13 |
36832532016 | #!/usr/bin/python2.7
# -*- coding: utf-8
import httplib
import urllib
import urllib2
import Parser
from BeautifulSoup import BeautifulSoup
import pdb
"""
<option value="010000">AMAZONAS</option>
<option value="020000">ANCASH</option>
<option value="030000">APURIMAC</option>
<option value="040000">AREQUIPA</option>
<option value="050000">AYACUCHO</option>
<option value="060000">CAJAMARCA</option>
<option value="240000">CALLAO</option>
<option value="070000">CUSCO</option>
<option value="080000">HUANCAVELICA</option>
<option value="090000">HUANUCO</option>
<option value="100000">ICA</option>
<option value="110000">JUNIN</option>
<option value="120000">LA LIBERTAD</option>
<option value="130000">LAMBAYEQUE</option>
<option value="140000">LIMA</option>
<option value="150000">LORETO</option>
<option value="160000">MADRE DE DIOS</option>
<option value="170000">MOQUEGUA</option>
<option value="180000">PASCO</option>
<option value="190000">PIURA</option>
<option value="200000">PUNO</option>
<option value="210000">SAN MARTIN</option>
<option value="220000">TACNA</option>
<option value="230000">TUMBES</option>
<option value="250000">UCAYALI</option>
"""
#d_provincias = {'Amazonas':'010000','Ancash':020000,'Apurimac':030000,'Arequipa':'040000', 'Ayacucho':'050000', 'Cajamarca':'060000', 'Callao':'240000', 'Cusco':'070000', 'Huancavelica':'080000', 'Huanuco':'090000', 'Ica':'100000', 'Junin':'110000', 'La Libertad':'120000', 'Lambayeque':'130000', 'Lima':'140000', 'Loreto':'150000', 'Madre de Dios':'160000', 'Moquegua':'170000', 'Pasco':'180000', 'Piura':'190000', 'Puno':'200000', 'San Martin':'210000', 'Tacna':'220000', 'Tumbes':'230000', 'Ucayali':'250000'}
d_regiones = {'Amazonas':'010000','Ancash':'020000','Apurimac':'030000','Arequipa':'040000', 'Ayacucho':'050000', 'Cajamarca':'060000', 'Callao':'240000', 'Cusco':'070000', 'Huancavelica':'080000', 'Huanuco':'090000', 'Ica':'100000', 'Junin':'110000', 'La Libertad':'120000', 'Lambayeque':'130000', 'Lima':'140000', 'Loreto':'150000', 'Madre de Dios':'160000', 'Moquegua':'170000', 'Pasco':'180000', 'Piura':'190000', 'Puno':'200000', 'San Martin':'210000', 'Tacna':'220000', 'Tumbes':'230000', 'Ucayali':'250000'}
str_2da_vuelta = "http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/2davuelta/onpe/presidente/"
str_1ra_vuelta = "http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/1ravuelta/onpe/presidente/"
str_congreso = "http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/1ravuelta/onpe/congreso/"
url_query_2da_vuelta = "http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/2davuelta/onpe/presidente/extras/provincias.php"
def from_reg_get_provs( region):
data = {}
dict = {}
data['elegido'] = region
en_data = urllib.urlencode(data)
req = urllib2.Request('http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/2davuelta/onpe/presidente/extras/provincias.php', en_data )
f = urllib2.urlopen(req)
soup= BeautifulSoup(f.read() )
for item in soup.findAll('option'):
if item.string is not None:
dict[ item.string]= item['value']
return dict
def from_prov_get_districts( provincia ):
data = {}
dict = {}
data['elegido'] = provincia
en_data = urllib.urlencode(data)
req = urllib2.Request('http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/2davuelta/onpe/presidente/extras/distritos.php', en_data )
f = urllib2.urlopen(req)
soup= BeautifulSoup(f.read() )
for item in soup.findAll('option'):
if item.string is not None:
dict[ item.string]= item['value']
return dict
def from_district_get_centros(distrito):
data = {}
dict = {}
data['elegido'] = distrito
en_data = urllib.urlencode(data)
req = urllib2.Request('http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/2davuelta/onpe/presidente/extras/locales.php', en_data )
f = urllib2.urlopen(req)
soup= BeautifulSoup(f.read() )
for item in soup.findAll('option'):
if item.string is not None:
dict[ item.string]= item['value']
return dict
def from_centro_get_mesas( departamento, provincia, distrito, centro):
data = {}
dict = {}
data['tipo_consulta1'] = 'UBIGEO'
data['cnume_acta'] = ''
data['ambito1'] = 'P'
data['dpto'] = departamento
data['prov'] = provincia
data['dist'] = distrito
data['local'] = centro
data['estado'] = 'T'
data['continente'] = ''
data['pais'] = ''
data['ciudad'] = ''
data['embajada'] = ''
data['estado2'] = 'T'
en_data = urllib.urlencode(data)
req = urllib2.Request('http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/2davuelta/onpe/presidente/extras/buscar_ubigeo_actas.php', en_data )
f = urllib2.urlopen(req)
#print f.read()
return f
def from_mesas_get_actas(f_html,str_prefix):
"""
Del html extrae los links para cada acta
"""
#str_prefix="http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/2davuelta/onpe/presidente/"
#str_prefix="http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/1ravuelta/onpe/presidente/"
#str_prefix="http://www.web.onpe.gob.pe/modElecciones/elecciones/elecciones2011/1ravuelta/onpe/congreso/"
url_actas = []
soup = BeautifulSoup( f_html.read() )
for item in soup.findAll('a'):
url_actas.append( ''.join( [ str_prefix , item.attrs[0][1] ] ) )
return url_actas
def from_acta_get_info():
"""
Implementado en ParseDB parse_acta()
"""
pass
""""
tipo_consulta1:UBIGEO
cnume_acta:
ambito1:P
dpto:010000
prov:010100
dist:010111
local:0012
estado:T
continente:
pais:
ciudad:
embajada:
estado2:T
"""
if __name__ == "__main__":
#d = from_reg_get_provs( d_regiones['Amazonas'])
#s = from_prov_get_districts( d['CHACHAPOYAS'] )
#e = from_district_get_centros(s['LEVANTO'])
#results = from_centro_get_mesas(d_regiones['Amazonas'], d['CHACHAPOYAS'], s['LEVANTO'], e.values()[0])
#links = from_mesas_get_actas( results, str_2da_vuelta )
#print links
#print results
#for url in links:
# html_acta = urllib2.urlopen(url)
# f_tmp = open( url[-5:] + '.txt','w')
# Parser.parse_acta( html_acta , f_tmp )
# f_tmp.close()
#f_results = open( 'tmp_resultados.html','w')
#for line in results.read():
# f_results.write(line)
html_acta = open('Ejemplo_Acta_Segunda_Vuelta.html','r')
f_tmp = open('test.out','w')
Parser.parse_acta( html_acta, f_tmp)
html_acta.close()
f_tmp.close()
| PuercoPop/EleccionesPeru | get_mesas.py | get_mesas.py | py | 6,639 | python | es | code | 4 | github-code | 13 |
26790022261 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def addTwoNumbers(self, l1: Optional[ListNode], l2: Optional[ListNode]) -> Optional[ListNode]:
l1_l = []
l2_l = []
while l1:
l1_l.append(l1.val)
l1 = l1.next
while l2:
l2_l.append(l2.val)
l2 = l2.next
num1 = int(''.join(str(i) for i in l1_l)[::-1])
num2 = int(''.join(str(i) for i in l2_l)[::-1])
res_num_str = str(num1+num2)[::-1]
res_num = int(res_num_str)
if len(res_num_str) == 1:
return ListNode(res_num)
cur = dummy = ListNode(0)
for e in res_num_str:
cur.next = ListNode(e)
cur = cur.next
return dummy.next
| forestphilosophy/LeetCode_solutions | Interview Questions/add_two_numbers.py | add_two_numbers.py | py | 885 | python | en | code | 0 | github-code | 13 |
36988347576 | import os
from pydevlake import logger
def init():
debugger = os.getenv("USE_PYTHON_DEBUGGER", default="").lower()
if debugger == "":
return
# The hostname of the machine from which you're debugging (e.g. your IDE's host).
host = os.getenv("PYTHON_DEBUG_HOST", default="localhost")
# The port of the machine from which you're debugging (e.g. your IDE's host)
port = int(os.getenv("PYTHON_DEBUG_PORT", default=32000))
print("========== Enabling remote debugging on ", host, ":", port, " ==========")
if debugger == "pycharm":
try:
import pydevd_pycharm as pydevd
try:
pydevd.settrace(host=host, port=port, suspend=False, stdoutToServer=True, stderrToServer=True)
logger.info("Pycharm remote debugger successfully connected")
except TimeoutError as e:
logger.error(f"Failed to connect to pycharm debugger on {host}:{port}. Make sure it is running")
except ImportError as e:
logger.error("Pycharm debugger library is not installed")
else:
logger.error(f"Unsupported Python debugger specified: {debugger}")
| apache/incubator-devlake | backend/python/pydevlake/pydevlake/helpers/debugger.py | debugger.py | py | 1,170 | python | en | code | 2,256 | github-code | 13 |
8816168376 | import cv2
img = cv2.imread("./img/4.d6206092.jpg")
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
detector = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
faceRect = detector.detectMultiScale(
gray,
scaleFactor=1.08,
minNeighbors=15,
minSize=(32, 32)
)
for x, y, w, h in faceRect:
cv2.rectangle(img, (x, y), (x+w,y+h), (0, 255, 0), 2)
cv2.imshow("img", img)
cv2.imshow("face", gray)
cv2.waitKey(0) | ChungyiBossi/computer_vision_playground | basic_sample/detect_face.py | detect_face.py | py | 457 | python | en | code | 1 | github-code | 13 |
24723155294 | from django.conf.urls import url
from . import views
from rest_framework.urlpatterns import format_suffix_patterns
from django.contrib.auth import views as auth_views
from bakery import views
from .models import Recipes
urlpatterns = [
url(r'^$', views.index, name="index"),
url(r'^recipe_list$', views.cakes, name='recipe_list'),
url(r'^cakes_cupcakes$', views.cakes, name='cakes_cupcakes'),
url(r'^pies$', views.pies, name='pies'),
url(r'^cookies$', views.cookies, name='cookies'),
url(r'^baked-goods$', views.bakedGoods, name='baked-goods'),
url(r'^liked_list$', views.liked_list, name='liked_list'),
url(r'^add_recipe$', views.add_recipe, name='add_recipe'),
url(r'^recipe/(?P<pk>\d+)/$', views.recipe_detail, name='recipe_detail'),
url(r'^recipes/', views.recipeList.as_view()),
url(r'^tweets.(?P<pk>[0-9]+)$', views.recipeList.as_view()),
url(r'^profiles/', views.profileList.as_view()),
url(r'^registration_form$', views.UserFormView.as_view(), name="registration_form"),
url(r'^registration/login/$', auth_views.login, {'template_name': 'bakery/registration/login.html'}, name='login_page'),
url(r'^logout/$', auth_views.logout, {'next_page': '/'}, name='logout'),
url(r'^profile/(?P<username>[a-zA-Z0-9]+)/$', views.get_user_profile, name='userProfile'),
url(r'^profile/(?P<username>[a-zA-Z0-9]+)/edit$', views.update_profile, name='profile-edit'),
url(r'^profile/(?P<username>[a-zA-Z0-9]+)/friends$', views.friends, name='friends'),
url(r'^profile/(?P<username>[a-zA-Z0-9]+)/made$', views.made, name='made'),
url(r'^profile/(?P<username>[a-zA-Z0-9]+)/favorite$', views.favorite, name='favorite'),
url(r'^<(?P<pk>\d+)$', views.liked, name='liked'),
url(r'^users$', views.users, name='users'),
url(r'^recipe/(?P<pk>\d+)/comment/$', views.add_comment_to_recipe, name='add_comment_to_recipe'),
]
| SterreVB/TheLittleBakery | bakery/urls.py | urls.py | py | 1,911 | python | en | code | 0 | github-code | 13 |
74266572179 | import random
import sys
import threading
from collections import deque
from datetime import datetime
from threading import Thread
from time import sleep
from mpi4py import MPI
# Here using MPI to basically communicate among the various sites
# The code can be run by mpiexec -n <no.ofsites to execute> python SuzukuKasami.py
# - Globally declaring few variables needed for getting the current executing site or MPI rank
# - N - the total no. of sites running here
# - all the lock variables to be used by various processes in MPI, while accessing and using the
# shared global variables
comm = MPI.COMM_WORLD
tid = comm.Get_rank()
N = comm.Get_size()
cs_lock, token_lock, rn_lock, release_lock, request_lock, send_lock = threading.Lock(), threading.Lock(), threading.Lock(), threading.Lock(), threading.Lock(), threading.Lock()
# Queue , RN, LN variables required for the SuzukiKasami algorithm
# and other variables to keep hold of flag indicating which site has the token or not
Q = deque()
has_token, in_cs, waiting_for_token = 0, 0, 0
RN, LN = [], []
# initially initializing the algorithm
for i in range(0, N): LN.append(0)
for i in range(0, N): RN.append(0)
# giving a token to start the process 0
if tid == 0:
print("%s: I'm %d and have a startup token." % (datetime.now().strftime('%M:%S'), tid))
sys.stdout.flush()
has_token = 1
RN[0] = 1
# Helps to receive the request for the given current site
# It gets input from any source site, in recive mode
# updates the RN value , as max(existing RN, received sn'th execution value)
# if the current site , has the token , is not executing the critical section currently, and
# the site sends the token to the requesting site for critical section execution
def receive_request():
global LN
global RN
global Q
global in_cs
global waiting_for_token
global has_token
while True:
message = comm.recv(source=MPI.ANY_SOURCE)
if message[0] == 'RN':
with rn_lock:
requester_id = message[1]
cs_value = message[2]
RN[requester_id] = max([cs_value, RN[requester_id]])
if cs_value < RN[requester_id]:
print(
"%s: Request from %d expired." % (datetime.now().strftime('%M:%S'), requester_id))
sys.stdout.flush()
if (has_token == 1) and (in_cs == 0) and (RN[requester_id] == (LN[requester_id] + 1)):
has_token = 0
send_token(requester_id)
elif message[0] == 'token':
with token_lock:
print("%s: I'm %d and I got a token." % (datetime.now().strftime('%M:%S'), tid))
sys.stdout.flush()
has_token = 1
waiting_for_token = 0
LN = message[1]
Q = message[2]
critical_section()
# Helps to send the request for the current site to execute the critical section
# except the current site, to all other site in mpi, the request message is sent
def send_request(message):
for i in range(N):
if tid != i:
to_send = ['RN', tid, message]
comm.send(to_send, dest=i)
# Helps to send the token to the given receipient
def send_token(recipent):
global Q
with send_lock:
print("%s: I'm %d and sending the token to %d." % (datetime.now().strftime('%M:%S'), tid, recipent))
sys.stdout.flush()
global in_cs
to_send = ['token', LN, Q]
comm.send(to_send, dest=recipent)
# Helps to request token to get into the critical section
# Everytime while requesting the token to execute the CS, RN[i] value would be
# incremented and send_request would be sent
def request_cs():
global RN
global in_cs
global waiting_for_token
global has_token
with request_lock:
if has_token == 0:
RN[tid] += 1
print("%s: I'm %d and want a token for the %d time." % (datetime.now().strftime('%M:%S'), tid, RN[tid]))
sys.stdout.flush()
waiting_for_token = 1
send_request(RN[tid])
# Helps to release the critical section
# While releasing helps to check whether the other elements which are requesting in the queue
# are there, if so, the top would be popped out, the the token would be sent to it
def release_cs():
global in_cs
global LN
global RN
global Q
global has_token
with release_lock:
LN[tid] = RN[tid]
for k in range(N):
if k not in Q:
if RN[k] == (LN[k] + 1):
Q.append(k)
print("%s: I'm %d and it adds %d to the queue. Queue after adding:%s." % (
datetime.now().strftime('%M:%S'), tid, k, str(Q)))
sys.stdout.flush()
if len(Q) != 0:
has_token = 0
send_token(Q.popleft())
# Helps to execute the critical section
# After executing , the critical section is released
def critical_section():
global in_cs
global has_token
with cs_lock:
if has_token == 1:
in_cs = 1
print("%s: I'm %d and doing %d CS." % (datetime.now().strftime('%M:%S'), tid, RN[tid]))
sys.stdout.flush()
sleep(random.uniform(2, 5))
print("%s: I'm %d and finished %d CS." % (datetime.now().strftime('%M:%S'), tid, RN[tid]))
sys.stdout.flush()
in_cs = 0
release_cs()
try:
thread_receiver = Thread(target=receive_request)
thread_receiver.start()
except:
print("Error: unable to start thread! ")
sys.stdout.flush()
while True:
if has_token == 0:
sleep(random.uniform(1, 3))
request_cs()
elif in_cs == 0:
critical_section()
while waiting_for_token:
sleep(0.5)
| ThulasiRamNTR/SuzukiKasami | SuzukiKasami/SuzukiKasami.py | SuzukiKasami.py | py | 5,868 | python | en | code | 0 | github-code | 13 |
42395521763 | '''
É aniversário da Creuza e ela não sabe quantas velas colocar em cima do bolo.
Problema: Ela sabe o ano em que nasceu, mas não sabe qual a idade dela.
'''
from datetime import date
def age_of_creuza():
birth_year = int(input("Creuza, em que ano você nasceu ? "))
current_year = date.today().year
old = current_year - birth_year
print("Você está complentando", old, "anos, então vamos colocar", old, "velas no bolo.")
age_of_creuza()
| brualvess/python_exercises | helping_creuza/situation01.py | situation01.py | py | 464 | python | pt | code | 0 | github-code | 13 |
73967349136 | def isNaN(num):
#Non-numpy nan check...
#https://stackoverflow.com/questions/944700/how-can-i-check-for-nan-values
return num != num
def str2bool(v):
#https://intellipaat.com/community/2592/converting-from-a-string-to-boolean-in-python
if str(v).upper() in ("yes", "true", "t", "1", "y"):
return (True)
elif str(v).upper() in ("yes", "true", "t", "1", "y"):
return(False)
else:
return()
def check_boolean_column(v):
#Booleans can contain pairs of values and possibly blanks/nulls (NaNs)
#Cycle list, remove Nan and convert to upper (simplifies comparison)
boolean_pairs_list= [['1.0','0.0'],['1','0'],[True,False],['Y','N'],['T','F'],['YES','NO'],['TRUE','FALSE'],['MALE','FEMALE']]
if len(v) <= 3:
v = [str(x).upper() for x in v if not isNaN(x)]
v.sort(reverse=True)
for pair in boolean_pairs_list:
#print('BOOLEAN check: {} vs {} '.format(v,pair))
if v == pair:
return(True)
return(False)
| rseeton/data_dictionary_generator | utility_functions.py | utility_functions.py | py | 980 | python | en | code | 0 | github-code | 13 |
73902857298 | '''
Function support clone data
Edit by: AnhKhoa
Date: April 07,2023
'''
from keras.utils import np_utils
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
import csv
import numpy as np
import os
import datetime
from skimage import io
from sklearn.model_selection import train_test_split
# at first we load the path of data base
DatasetPath = []
for i in os.listdir('./CNNdata'):
DatasetPath.append(os.path.join('./CNNdata', i))
imageData = []
imageLabels = []
# then load all photos from the data base
# save the photos and labels
for i in DatasetPath:
imgRead = io.imread(i,as_gray=True)
imageData.append(imgRead)
labelRead = int(os.path.split(i)[1].split("_")[0]) - 1
imageLabels.append(labelRead)
# split randomly the photos into 2 parts,
# 80% for training, 20% for testing
X_train, X_test, y_train, y_test = train_test_split(np.array(imageData),np.array(imageLabels), train_size=0.8, random_state = 4)
X_train = np.array(X_train)
X_test = np.array(X_test)
y_train = np.array(y_train)
y_test = np.array(y_test)
# nb_classes is how many people for this model
nb_classes = 4 #demo code with 4 food
Y_train = np_utils.to_categorical(y_train, nb_classes)
Y_test = np_utils.to_categorical(y_test, nb_classes)
# for tensorflow backend, it's (nb_of_photo, size, size, channel)
# for theanos backend, it's (channel, nb_of_photo, size, size)
# we are using tensorflow backend, so take first one (1500*0.1/0.9, 46, 46, 1)
X_train = X_train.reshape(X_train.shape[0], 46, 46, 1)
X_test = X_test.reshape(X_test.shape[0], 46, 46, 1)
# input_shape is for the first layer of model.
# 46, 46, 1 means size 46*46 pixels, 1 channel(because of read as gray,not RGB)
input_shape = (46, 46, 1)
X_train = X_train.astype('float32')
X_test = X_test.astype('float32')
X_train /= 255
X_test /= 255
# then we start the build of model
model = Sequential()
model.add(Convolution2D(16, 3, 3, padding='same', input_shape=input_shape, activation='relu'))
model.add(Convolution2D(16, 3, 3, padding='same', activation='relu'))
model.add(MaxPooling2D((2,2), padding='same'))
model.add(Dropout(0.25))
model.add(Convolution2D(32, 3, 3, padding='same', activation='relu'))
model.add(Convolution2D(32, 3, 3, padding='same', activation='relu'))
model.add(MaxPooling2D((2,2), padding='same'))
model.add(Dropout(0.25))
model.add(Convolution2D(64, 3, 3, padding='same', activation='relu'))
model.add(Convolution2D(64, 3, 3, padding='same', activation='relu'))
model.add(MaxPooling2D((2,2), padding='same'))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(256, activation='relu'))
model.add(Dropout(0.5))
model.add(Dense(nb_classes))
model.add(Activation('softmax'))
# then we compile this model
model.compile(loss='categorical_crossentropy', optimizer='adadelta', metrics=['accuracy'])
for i in range(0,1):
# time start
time_str = datetime.datetime.now()
# and training
epo=5
model.fit(X_train, Y_train, batch_size=32, epochs=epo,
verbose=1, validation_data=(X_test, Y_test))
# time end
time_end = datetime.datetime.now()
time_train = (time_end - time_str).total_seconds()
# when the training finishes, we need to save the trained model.
model_json = model.to_json()
with open("model.json", "w") as json_file:
json_file.write(model_json)
# serialize weights to HDF5
model.save_weights("model.h5")
print("Saved model to disk")
# and use the 10% data as we have already splited to test the new model
scores = model.evaluate(X_test, Y_test, verbose=0)
print(scores)
print("%s: %.2f%%" % (model.metrics_names[1], scores[1]*100))
logz =str(len( X_train))+","+str(epo)+","+ str(round(time_train,6))+","+str(round(scores[0],8)) +","+ str(round(scores[1],8))
print(logz)
with open("training_log.txt", "a+") as myfile:
myfile.write(logz+"\n")
print("Finish")
| trandoanhkhoa/Classification_Food | step2_trainWindows.py | step2_trainWindows.py | py | 4,005 | python | en | code | 0 | github-code | 13 |
42798416615 | import idaapi, idautils, ida_funcs, idc
def dump_funcs(res_path):
funcs = []
for entry in Functions():
funcs.append(int(entry))
with open(res_path, 'w') as f:
for entry in funcs:
f.write('%x\n' % entry)
if __name__ == '__main__':
idaapi.auto_wait()
res_path = idc.ARGV[1]
dump_funcs(res_path)
ida_pro.qexit(0)
| B2R2-org/FunProbe | tools/ida/scripts/idascript.py | idascript.py | py | 344 | python | en | code | 3 | github-code | 13 |
40467902835 | from collections import Counter
import sys
input = sys.stdin.readline
N, M, B = map(int, input().split())
heights = []
for _ in range(N) :
heights += list(map(int, input().split()))
counter = Counter(heights).items()
answer = 0
time = 999999999
for i in range(min((B + sum(heights)) // (N * M), max(heights)), -1, -1) :
count = 0
for k, ct in counter :
if k > i : # 현재 층보다 블록 높이가 높으면
count += (k-i) * ct * 2 # (블록 높이 - 현재 층) 만큼 블록 제거
else :
count += (i-k) * ct
if count < time : # 답이 여러 개인 경우, 땅의 높이가 가장 높은 것이 출력되도록
time = count
answer = i
else :
break
print(time, answer) | dakaeng/Baekjoon | 백준/Silver/18111. 마인크래프트/마인크래프트.py | 마인크래프트.py | py | 785 | python | ko | code | 0 | github-code | 13 |
16131962503 | """
Purpose:
OpenTelemetry provides a vendor-agnostic standard for observability,
allowing users to decouple instrumentation and
routing from storage and query.
pip install opentelemetry-api opentelemetry-sdk
"""
from random import randint
from flask import Flask, request
from opentelemetry import trace
from opentelemetry.sdk.resources import Resource
from opentelemetry.sdk.trace import TracerProvider
from opentelemetry.sdk.trace.export import BatchSpanProcessor, ConsoleSpanExporter
provider = TracerProvider()
processor = BatchSpanProcessor(ConsoleSpanExporter())
provider.add_span_processor(processor)
trace.set_tracer_provider(provider)
tracer = trace.get_tracer(__name__)
app = Flask(__name__)
# Method 1 :Traditional
# @app.route("/roll")
# def roll():
# sides = int(request.args.get('sides'))
# rolls = int(request.args.get('rolls'))
# return roll_sum(sides, rolls)
# Method 2 : With Tracing
# @app.route("/roll")
# def roll():
# with tracer.start_as_current_span("server_request"):
# sides = int(request.args.get('sides'))
# rolls = int(request.args.get('rolls'))
# return roll_sum(sides, rolls)
# def roll_sum(sides, rolls):
# sum = 0
# for r in range(0, rolls):
# result = randint(1, sides)
# sum += result
# return str(sum)
# Method 3 : With Tracing
@app.route("/roll")
def roll():
with tracer.start_as_current_span(
"server_request", attributes={"endpoint": "/roll"}
):
sides = int(request.args.get("sides"))
rolls = int(request.args.get("rolls"))
return roll_sum(sides, rolls)
def roll_sum(sides, rolls):
span = trace.get_current_span()
sum = 0
for r in range(0, rolls):
result = randint(1, sides)
span.add_event(
"log",
{
"roll.sides": sides,
"roll.result": result,
},
)
sum += result
return str(sum)
if __name__ == "__main__":
app.run(debug=False, port=8081)
# curl "http://127.0.0.1:8081/roll?sides=10&rolls=1"
| udhayprakash/PythonMaterial | python3/16_Web_Services/f_web_application/d_using_flask/i_telemetry_monitoring/b_OpenTelemetry/d_OpenTelemetry.py | d_OpenTelemetry.py | py | 2,095 | python | en | code | 7 | github-code | 13 |
71446916499 | # -*- coding: utf-8 -*-
"""
Created on Mon May 8 18:36:58 2023
@author: talbanesi
"""
# Importacion de librerias
import numpy as np
import scipy.signal as sig
import matplotlib.pyplot as plt
# from splane import analyze_sys # version vieja
from pytc2.sistemas_lineales import analyze_sys
from sympy import Symbol
### Definicion de datos de plantilla
# Definicion de atenuacion maxima en db
alfa_max = 0.4
# Definicion de atenuacion minima en db
alfa_min = 48
# Definicion de frecuencia angular de banda de stop normalizada
ws = 3
# Definicion de frecuencia angular de banda de paso normalizada
wp = 1
# Calculo de Epsilon Cuadrado, idem para maxima planicidad y Cheby
ee = 10**(alfa_max/10)-1
e = np.sqrt(ee)
# Itero para calcular el n del filtro, y selecciono el primero que cruce la atenuacion minima
veces = 0
for nn in range(1,9):
# Calculo de atenuacion minima en db para chebyshev
alfa_min_n = 10*np.log10(1 + ee * np.cosh(nn * np.arccosh(ws))**2 )
# Muestro los resultados
print( 'nn {:d} - alfa_min_cheby {:f}'.format(nn, alfa_min_n) )
if (alfa_min_n > alfa_min and veces == 0):
n_seleccionado = nn
veces = veces + 1
print('El orden del filtro seleccionado, en base a la atenuacion minima, es: {:d}'.format(n_seleccionado))
num = [0, 0, 0, 0, 0, 0.2012]
den = [1, 0, 1.25, 0, 0.3125, 0.2012]
sys = sig.TransferFunction(num, den)
# Agrego los nombres de los filtros
# filter_name = 'MP_' + str(nn) + '_ripp_' + str(alfa_max) + 'dB'
plt.close('all')
# Funcion de splane para analizar sistemas (grafica modulo, fase, diagrama de polos y ceros, retardo de grupo)
analyze_sys(sys)
rr = np.roots(den)
s = Symbol('s')
poli2_1 = (s + rr[0]) * (s + rr[1]) | tomasalbanesi/TC2_2023 | Guia_Ejercicios/TP2_AproximacionFuncionesTransferencia/Ejercicio_3/scripts/GE2023_TP2_EJ3_SimulacionNumerica.py | GE2023_TP2_EJ3_SimulacionNumerica.py | py | 1,738 | python | es | code | 0 | github-code | 13 |
25933076945 |
from socket import *
from select import *
from time import sleep
s = socket()
s.setsockopt(SOL_SOCKET,SO_REUSEADDR,1)
s.bind(('0.0.0.0',8888))
s.listen(5)
p = epoll()
fdmap = {s.fileno():s}
p.register(s, EPOLLIN | EPOLLERR)
while True:
print('listen port ....')
events = p.poll()
for fd,event in events:
if fd == s.fileno():
c,addr = fdmap[fd].accept()
print('connection form :',addr)
p.register(c, EPOLLIN)
fdmap[c.fileno()] = c
elif event & EPOLLIN:
data = fdmap[fd].recv(4096)
if not data:
fdmap[fd].close()
p.unregister(fd)
break
print('receive client message:',data.decode())
fdmap[fd].send('receive client message'.encode())
s.close()
| Ahead180-103/ubuntu | python/shell.py/pynet/select_poll_epoll/tcp_IO_epoll.py | tcp_IO_epoll.py | py | 821 | python | en | code | 0 | github-code | 13 |
4664704821 | from dj_ast import ASTNode, TDUnit
from dj_ops import PerEntryFilter
from common import InitializationFailed, escape
class IsPartOf(PerEntryFilter):
""" Tests if a given entry is part of the specified sequence.
For example "cde" is part of the sequence "abcdefghijklmnopqrstuvwxyz".
"""
def op_name() -> str: return "is_part_of"
ENTRY_MIN_LENGTH = 3
""" Only entries of the given minimum length are checked for being
a part of the specified sequence.
"""
MIN_SEQUENCE_LENGTH = 3
WRAP_AROUND = True
def __init__(self, sequence: str) -> None:
self.sequence = sequence
def __str__(self):
return f'{IsPartOf.op_name()} "{escape(self.sequence)}"'
def init(self, td_unit: TDUnit, parent: ASTNode):
super().init(td_unit, parent)
if len(self.sequence) < 2:
raise InitializationFailed(
f"{self}: a sequence has to have at least two characters")
if IsPartOf.ENTRY_MIN_LENGTH <= 1:
raise InitializationFailed(
f"{self}: ENTRY_MIN_LENGTH has to be larger than 1"
)
if len(self.sequence) < IsPartOf.ENTRY_MIN_LENGTH:
raise InitializationFailed(
f"{self}: the length of the sequence is smaller than ENTRY_MIN_LENGTH"
)
if IsPartOf.MIN_SEQUENCE_LENGTH > IsPartOf.ENTRY_MIN_LENGTH:
raise InitializationFailed(
f"{self}: MIN_SEQUENCE_LENGTH <= ENTRY_MIN_LENGTH")
def process(self, entry: str) -> list[str]:
if len(entry) < IsPartOf.ENTRY_MIN_LENGTH:
return []
remaining_entry = entry
len_sequence = len(self.sequence)
MIN_SEQ_LEN = IsPartOf.MIN_SEQUENCE_LENGTH
i = 0 # the index of the next character in the sequence that needs to be matched AFTER we found a matching character
while i < len_sequence:
# 1. let's find a matching character in the sequence for the remaining entry
s = self.sequence[i]
i += 1
if remaining_entry[0] != s:
continue
# 2. let's try to match the rest of the remaining entry..
len_remaining_entry = len(remaining_entry)
if len_remaining_entry < MIN_SEQ_LEN:
break
next_i = i % len(self.sequence)
if next_i == 0 and not IsPartOf.WRAP_AROUND:
break
remaining_chars = len_remaining_entry - 1
remaining_i = 1
while remaining_i < len_remaining_entry:
e = remaining_entry[remaining_i]
remaining_i += 1
if self.sequence[next_i] != e:
break
else:
remaining_chars -= 1
next_i = (next_i+1) % len(self.sequence)
if remaining_chars > 0 and next_i == 0 and not IsPartOf.WRAP_AROUND:
break
# 3. check that we have a "reasonable" match
if remaining_chars == 0:
return [entry]
elif remaining_i-1 >= MIN_SEQ_LEN:
# The last match was long enough, but we are not done yet...
# 3.1. check if the rest is "long enough"
if remaining_chars >= MIN_SEQ_LEN:
# Update entry ...
remaining_entry = remaining_entry[(remaining_i-1):]
# Reset i to start again for matching the next part;
# this is necessary since we do not wrap around the
# initial search in the sequence!
i = 0
# 3.2. check if we can steal something from the current/last match
elif (remaining_i-1)-(MIN_SEQ_LEN-remaining_chars) >= MIN_SEQ_LEN:
# The remaining length is to short, let's try to find a
# matching sequence by taking some of the
# matched characters and trying to match it again.
remaining_entry = remaining_entry[len_remaining_entry-MIN_SEQ_LEN:]
i = 0
return []
| Delors/DJ | operations/is_part_of.py | is_part_of.py | py | 4,179 | python | en | code | 2 | github-code | 13 |
72838131538 | #!/usr/bin/env python3
# Valutaomräkningsprogram, version 1
import pickle
ladda = input("Vill du ladda tidigare kurs? (j/n): ")
if (ladda == "j"):
kurs = pickle.load(open('kurs.p', 'rb'))
elif (ladda == "n"):
kurs = float(input("Ange ny USD-kurs: "))
pickle.dump(kurs,open('kurs.p', 'wb'))
else:
print ("Var god svara (j)a eller (n)ej")
quit()
usd = float(input("Ange summa i USD: "))
print ("%.2f USD motsvarar %.2f SEK" \
%(usd, usd*kurs))
| jackbenny/grunderna-i-programmering-andra-utgavan | kapitel8/sidan_145_ex1.py | sidan_145_ex1.py | py | 469 | python | sv | code | 1 | github-code | 13 |
4213640348 | import os
from flask import (render_template, current_app, url_for, flash,
redirect, request, abort, Blueprint)
from flask_login import current_user, login_required
from blog import db
from blog.models import Upload
from blog.uploads.forms import UploadForm
uploads = Blueprint('uploads', __name__)
@uploads.route("/upload/new", methods=["GET", "POST"])
@login_required
def new_upload():
form = UploadForm()
if request.method=="POST":
file = request.files['data']
if file.filename == '':
flash('No file selected for upload')
return redirect(request.url)
else:
path = os.path.join(current_app.root_path, 'static/documents', file.filename)
file.save(path)
if form.validate_on_submit():
content=file.read(10240)
upload = Upload(title=form.title.data, name=file.filename, data=content, author=current_user)
db.session.add(upload)
db.session.commit()
flash("Your document has been uploaded successfully!", "success")
return redirect(url_for("main.documents"))
return render_template("create_upload.html",
title='New Upload', form=form, legend="New Upload")
@uploads.route("/upload/<upload_id>", methods=["GET", "POST"])
def upload(upload_id):
upload = Upload.query.get_or_404(upload_id)
return render_template("upload.html", title=upload.title, upload=upload)
@uploads.route("/user/<string:username>")
def user_uploads(username):
page = request.args.get('page', 1, type=int)
user = User.query.filter_by(username=username).first_or_404()
uploads = Upload.query.filter_by(author=user).order_by(Upload.date_posted.desc()).paginate(page=page, per_page=5)
return render_template("user_uploads.html", uploads=uploads, user=user)
@uploads.route("/upload/<upload_id>/update", methods=["GET", "POST"])
@login_required
def update_upload(upload_id):
upload = Upload.query.get_or_404(upload_id)
if upload.author != current_user:
abort(403)
form = UploadForm()
file = request.files['data']
if form.validate_on_submit():
upload.title = form.title.data
upload.data = file.read(10240)
upload.name = file.filename
db.session.commit()
flash("Your post has been updated", "success")
return redirect(url_for("uploads.upload", upload_id=upload.id))
elif request.method=="GET":
form.title.data = upload.title
form.data.data = upload.data
return render_template("create_upload.html", title='Update Document', form=form, legend="Update Document")
@uploads.route("/upload/<upload_id>/delete", methods=["POST"])
@login_required
def delete_document(upload_id):
upload = Upload.query.get_or_404(upload_id)
if upload.author != current_user:
abort(403)
db.session.delete(upload)
db.session.commit()
flash("Your file was deleted", "success")
return redirect(url_for("home"))
| bull-mawat-lang/lang-blog | blog/uploads/routes.py | routes.py | py | 3,025 | python | en | code | 0 | github-code | 13 |
16368515637 | import time
from django.shortcuts import render,redirect
from django.http import HttpResponse,JsonResponse
from .forms import *
from django.views import View
from .models import *
# Create your views here.
from keras.models import load_model
from keras.models import Sequential
from keras.layers import Convolution2D
from keras.layers import MaxPooling2D
from keras.layers import Flatten
from keras.layers import Dense
from keras.preprocessing.image import ImageDataGenerator
from keras.backend import clear_session
import schedule
import time
import pyscreenshot as ImageGrab
continuous_monitoring=0
from .decorators import global_data
@global_data
def test(request):
data={}
return HttpResponse("Working")
@global_data
def home(request):
if 'continuous_monitoring' not in request.session:
request.session['continuous_monitoring']=0
print(request.session['continuous_monitoring'])
data={}
data['continuous_monitoring']=request.session['continuous_monitoring']
return render(request,'main/home.html',data)
class upload(View):
def get(self, request):
if 'continuous_monitoring' not in request.session:
request.session['continuous_monitoring']=0
data={}
data['continuous_monitoring']=request.session['continuous_monitoring']
photos_list = Photo.objects.all()
data['photos']=photos_list
return render(self.request, 'main/upload.html', data)
def post(self, request):
form = PhotoForm(self.request.POST, self.request.FILES)
if form.is_valid():
photo = form.save()
data = {'is_valid': True, 'name': photo.file.name, 'url': photo.file.url}
else:
data = {'is_valid': False}
return JsonResponse(data)
@global_data
def check(request):
test_datagen = ImageDataGenerator(rescale = 1./255)
classifier = load_model('main/save_data.h5')
result_set = test_datagen.flow_from_directory('main/photos/',target_size = (64, 64),batch_size = 32,class_mode = 'binary',shuffle=False)
result = classifier.predict_generator(result_set,workers=1)
al_result_fname=Result.objects.all().values_list('file_name',flat=True)
for i in range(len(result_set.filenames)):
print(result_set.filenames[i],result[i])
fname=result_set.filenames[i]
prob=result[i]
if fname not in al_result_fname:
obj=Result()
obj.file_name=fname[5:]
obj.file_url=fname
obj.percentage_safe=(1-prob)*100
obj.save()
clear_session()
del result
del result_set
del classifier
del test_datagen
return redirect('/photo/result')
@global_data
def result(request):
if 'continuous_monitoring' not in request.session:
request.session['continuous_monitoring']=0
data={}
data['continuous_monitoring']=request.session['continuous_monitoring']
all_obj=Result.objects.all()
data['all_obj']=all_obj
return render(request,'main/result.html',data)
@global_data
def continuous(request):
request.session['continuous_monitoring']=1
request.session.save()
print(request.session['continuous_monitoring'])
print("continuous")
data={}
def job():
print("Monitoring")
im=ImageGrab.grab()
im.save("main/screenshot/nsfw/screengrab.jpeg", "JPEG")
test_datagen = ImageDataGenerator(rescale = 1./255)
classifier = load_model('main/save_data.h5')
result_set = test_datagen.flow_from_directory('main/screenshot/',target_size = (64, 64),batch_size = 32,class_mode = 'binary',shuffle=False)
result = classifier.predict_generator(result_set,workers=1)
clear_session()
print(1-result)
schedule.every(1).seconds.do(job).tag('job', 'task')
while True:
schedule.run_pending()
@global_data
def continuous_off(request):
data={}
request.session['continuous_monitoring']=0
request.session.save()
print(request.session['continuous_monitoring'])
try:
schedule.clear('job')
except:
pass
return redirect('/photo/home')
@global_data
def delete_all_photos(request):
all_photos=Photo.objects.all()
for photo in all_photos:
photo.file.delete()
photo.delete()
all_result=Result.objects.all()
all_result.delete()
return redirect('/photo/upload') | Augustinetharakan12/hack-for-tomorrow-main | django-web-app/web_app/main/views.py | views.py | py | 3,981 | python | en | code | 0 | github-code | 13 |
34090830130 | #!/usr/bin/python3
"""This script uses the `json` module to write the tasks data"""
import csv
import json
import requests
import sys
if __name__ == '__main__':
import json
import requests
import sys
from sys import argv
emp_id = argv[1]
file_name = emp_id + '.json'
total_todos = 0
done_todos = 0
done_todo_titles = []
res = requests.get('https://jsonplaceholder.typicode.com/users/' + emp_id)
emp_username = res.json().get('username')
res = requests.get('https://jsonplaceholder.typicode.com/users/' +
emp_id + '/todos')
emp_todos = res.json()
records = {str(emp_id): []}
for item in emp_todos:
total_todos += 1
records[str(emp_id)].append({"task": item.get('title'),
"completed": item.get("completed"),
"username": emp_username})
with open(file_name, 'w') as jsonfile:
json.dump(records, jsonfile)
| udobeke/alx-system_engineering-devops | 0x15-api/2-export_to_JSON.py | 2-export_to_JSON.py | py | 995 | python | en | code | 0 | github-code | 13 |
32071977058 | from pytest import fixture
from longest_substring_without_repeating_characters import (
Solution,
)
@fixture
def s() -> Solution:
return Solution()
def test_example_one(s: Solution):
assert (
s.lengthOfLongestSubstring("abcabcbb") == 3
), """
Input: s = "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
"""
def test_example_two(s: Solution):
assert (
s.lengthOfLongestSubstring("bbbbb") == 1
), """
Input: s = "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
"""
def test_example_three(s: Solution):
assert (
s.lengthOfLongestSubstring("pwwkew") == 3
), """
Input: s = "pwwkew"
Output: 3
Explanation: The answer is "wke", with the length of 3.
Notice that the answer must be a substring, "pwke" is a subsequence and not a substring.
"""
def test_empty_str(s: Solution):
assert (
s.lengthOfLongestSubstring("") == 0
), """
Input: s = ""
Output: 0
Explanation: An empty string has no repeating characters.
"""
def test_super_long_str(s: Solution):
assert (
s.lengthOfLongestSubstring("qwer" * 100 + "qwerty" + "qwer" * 100) == 6
), """
Input: "qwer" * 100 + "qwerty" + "qwer" * 100
Output: 6
Explanation: `s` contains one string of "qwerty" and the rest are "qwer"
"""
def test_longest_at_end(s: Solution):
assert (
s.lengthOfLongestSubstring("qwer" * 100 + "qwerty") == 6
), """
Input: "qwer" * 100 + "qwerty"
Output: 6
Explanation: `s` contains one string of "qwerty" at the end of `s`
"""
| peterjamesmatthews/leetcode | Longest Substring Without Repeating Characters/test_longest_substring_without_repeating_characters.py | test_longest_substring_without_repeating_characters.py | py | 1,634 | python | en | code | 0 | github-code | 13 |
33253610559 | from langchain.chat_models import ChatOpenAI
from langchain.prompts import MessagesPlaceholder
from langchain.schema import SystemMessage
from langchain.agents.openai_functions_agent.base import OpenAIFunctionsAgent
from langchain.memory import ConversationTokenBufferMemory
from langchain.agents.agent import AgentExecutor
from classes import DynamoDBChatMessageHistoryNew
from retrievers import self_query_retriever_jewelry
from tools import get_tool
def _init_jewelry_agent(session_id):
llm = ChatOpenAI(temperature=0, model="gpt-3.5-turbo-0613")
llm_chat = ChatOpenAI(temperature=0.8, model="gpt-3.5-turbo-0613", verbose=True)
tools = [
get_tool("calculator")(llm=llm),
get_tool("telegram")(),
get_tool("retriever")(
self_query_retriever_jewelry,
name="jewelry_database",
description="Send the same user question to the jewelry database which have all data about the rings, earrings and necklaces in the jewelry store.",
)
]
sys_message = SystemMessage(
content="Type: Jewelry Store Customer Service and Sales Agent\n"
"Goals: Collect customer data (name, email, phone number) and assist the customer in choosing a ring.\n"
"Tools: Calculator, rings_database, send_telegram_message\n"
"Stages: Get customer data, Send customer data to telegram using send_telegram_message tool, assist customer\n"
"Personality: Helpful, Salesman\n"
"Reminders: Rule number 1 is to ask the customer about his name, email and phone number, send them to telegram, then help recommend products and assist in choosing ring.\n\n"
"(Start: Collect customer data and send to telegram, Middle: Assist customer choosing a ring)\n"
)
prompt = OpenAIFunctionsAgent.create_prompt(
system_message=sys_message,
extra_prompt_messages=[MessagesPlaceholder(variable_name="chat_history")],
)
memory = ConversationTokenBufferMemory(
memory_key="chat_history", llm=llm_chat, max_token_limit=2000,
chat_memory=DynamoDBChatMessageHistoryNew(table_name="langchain-agents", session_id=session_id),
return_messages=True
)
agent = OpenAIFunctionsAgent(llm=llm_chat, tools=tools, prompt=prompt)
agent_executor = AgentExecutor(
agent=agent,
tools=tools,
memory=memory,
verbose=True,
return_intermediate_steps=False,
)
return agent_executor
agents_dict = {
"jewelry": _init_jewelry_agent,
}
def get_agent(name, session_id):
return agents_dict[name](session_id=session_id)
| abdelrahmangasser555/agents | agents.py | agents.py | py | 2,658 | python | en | code | 0 | github-code | 13 |
21487257460 | import string
num_lanes = 3
detector_head = '<additional>\n'
detector_template = string.Template('\t<laneAreaDetector id="$id" lane="$lane" \
pos="$pos" endPos="$end_pos" file="cross.out" freq="30"/>\n')
def create_left_lane_detector(edge_id):
''' Creates lane detectors on left turn lane of every edge.
'''
detector_xml = []
# for i in range(num_lanes):
detector_id = 'detector-{}'.format(edge_id)
lane = '{}_{}'.format(edge_id, num_lanes-1)
pos = -150
end_pos = -1
detector_xml = detector_template.substitute(id=detector_id, lane=lane, pos=pos, end_pos=end_pos)
return detector_xml
def create_detector_xml(edges):
xml_string = [detector_head]
for edge in edges:
detector_xml = create_left_lane_detector(edge)
xml_string.append(detector_xml)
xml_string.append('</additional>')
xml_string = ''.join(xml_string)
with open('data/grid.det.xml', 'w') as f:
f.write(xml_string)
| d-hasan/sumo-grid | network/generate_detectors.py | generate_detectors.py | py | 974 | python | en | code | 2 | github-code | 13 |
72762937937 | # pylint: disable=C0111,R0201,C0325
"""
classes for npmanager
"""
import shlex
import sys
import select
import os
from functools import wraps
from subprocess import call, Popen, PIPE, STDOUT
from _npmanager.utils import commandutils as cmdutils
from _npmanager.utils import screen
class Package(object):
COMMAND = ''
SERVICE = ''
SELECT = {}
def __init__(self):
self.process = None
def select(self):
val = screen.select(self.SELECT)
if val == len(self.SELECT['options']):
sys.exit(0)
return val
def write(self, inp):
assert self.process
self.process.stdin.write(inp)
inp = inp.replace('\n', '\\n')
sys.stdout.write(' \033[1m[send a key: {}]\033[0m'.format(inp))
sys.stdout.flush()
def lprint(self, text):
cols, _ = cmdutils.termsize()
print('-' * cols)
print(text)
print('-' * cols)
def line_receiver(self, line):
raise NotImplementedError('`line_receiver` method should be implemented!')
def execute(self):
self.lprint('Info: execute the following command: {}'.format(self.COMMAND))
try:
gen = self.call()
while 1:
line = gen.next()
try:
self.line_receiver(line)
except NotImplementedError as exc:
print('Error: {}'.format(exc))
self.process.terminate()
raise StopIteration()
except KeyboardInterrupt:
self.process.terminate()
except StopIteration:
pass
def call(self):
command = self.COMMAND
self.process = Popen(command, shell=True, stdin=PIPE, stdout=PIPE, \
stderr=STDOUT, close_fds=True)
while 1:
line = ''
while 1:
if self.process.poll() is not None:
raise StopIteration()
char = self.process.stdout.read(1)
line += char
if char == ':' or char == '?' or char == '\n':
break
sys.stdout.write(line)
sys.stdout.flush()
poll = self.process.poll()
if poll is not None:
raise StopIteration()
else:
yield line
def start(self):
_ = call('{} {}'.format(self.SERVICE, 'start'), shell=True)
def stop(self):
_ = call('{} {}'.format(self.SERVICE, 'stop'), shell=True)
def reload(self):
_ = call('{} {}'.format(self.SERVICE, 'reload'), shell=True)
def restart(self):
_ = call('{} {}'.format(self.SERVICE, 'restart'), shell=True)
def status(self):
_ = call('{} {}'.format(self.SERVICE, 'status'), shell=True)
| ssut/npmanager | _npmanager/classes.py | classes.py | py | 2,822 | python | en | code | 15 | github-code | 13 |
22996662188 | from pathlib import Path
from ase.io import write
from ase.optimize import LBFGS
# USER
from grrmpy.io import log2atoms
from grrmpy.optimize.attach import automate_maxstep
from grrmpy import pfp_calculator
try:
from grrmpy.optimize import FIRELBFGS
except:
pass
class AutoOpt():
"""最適化後の構造は'Structure'フォルダ内にtrajファイルで保存される.
計算後の構造を一括で読み込むには
>>> import grrmpy.io import read_traj
>>> atoms_list = read_traj('Structure')
Parameters:
atomslist: list of Atoms
Atomsのリスト
optimizer: object
使用するOptimizer.デフォルトはLBFGS.
constraints: ASE constraint
| FixAtoms等の制約.
| 複数設定する場合はリストで与える.
| eq_list中のAtomsに既にconstraintがある場合,改めて設定する必要はない.
trajectory: bool
| Trueの場合,最適化途中の構造をtrajに保存する.
| 'trajectory'フォルダー内に保存される.
logfile: bool
| Trueの場合, logファイルを保存する.
| 'log'フォルダー内に保存される.
calc_func: object
calculatorを返す関数
"""
def __init__(self,
atomslist,
optimizer = LBFGS,
constraints = [],
trajectory = False,
logfile = True,
calc_func = pfp_calculator,
errorfile = "ERROR",
traj_foldername = "trajectory",
log_foldername = "log",
save_foldername = "Structure"):
"""
最適化後の構造は'Structure'フォルダ内にtrajファイルで保存される.
Parameters:
atomslist: list of Atoms
Atomsのリスト
optimizer: object
使用するOptimizer.デフォルトはLBFGS.
constraints: ASE constraint
FixAtoms等の制約.
複数設定する場合はリストで与える.
eq_list中のAtomsに既にconstraintがある場合,改めて設定する必要はない.
trajectory: bool
Trueの場合,最適化途中の構造をtrajに保存する.
'trajectory'フォルダー内に保存される.
logfile: bool
Trueの場合, logファイルを保存する.
'log'フォルダー内に保存される.
calc_func: object
calculatorを返す関数
"""
self.optimizer = optimizer
self.trajectory = trajectory
self.logfile = logfile
self.maxstep_dict = None
self.atomslist = atomslist
for atoms in self.atomslist:
atoms.set_constraint(constraints)
atoms.calc = calc_func()
# フォルダ名,ファイル名
self.errorfile = f"{errorfile}_{id(self)}"
self.log_foldername = log_foldername
self.traj_foldername = traj_foldername
self.save_foldername = save_foldername
# フォルダの作成
self.make_folder(self.save_foldername)
if self.trajectory:
self.make_folder(self.traj_foldername)
if self.logfile:
self.make_folder(self.log_foldername)
def make_folder(self,foldername):
p = Path(foldername)
if not p.exists():
# フォルダが存在しなければ作成
p.mkdir()
else:
# 存在する場合は中身が空か確認
if len(list(p.iterdir())) != 0:
raise Exception(f"{p.name}内にファイルが存在します.\n"+
"フォルダを削除するか,インスタンス引数のfoldernameを変更してください")
def set_maxstep(self,maxstep):
if type(maxstep) == list:
self.maxstep = maxstep
else:
self.maxstep = [maxstep]
def set_steps(self,steps):
if type(steps) == list:
self.steps = steps
else:
self.steps = [steps]
def set_automaxstep(self,maxstep_dict):
"""auto_maxstepsを用いる場合のパラメータを変更する
Examples:
>>> obj.set_automaxstep({10:0.1, 5:0.2, 2:0.3, 0:0.35})
必ず0のキーを含める必要があるので注意する.
"""
self.maxstep_dict = maxstep_dict
def check_param(self):
if len(self.maxstep) != len(self.steps):
raise Exception("maxstepとstepsの要素数が一致しません")
def errorlog(self,massage):
with open(self.errorfile,"a") as f:
f.write(massage)
f.write("\n")
def irun(self,atoms,name:int,optimizer,maxstep_list,steps_list,fmax):
logfile = f"{self.log_foldername}/{name}.log" if self.logfile else None
trajectory = f"{self.traj_foldername}/{name}.traj" if self.trajectory else None
savefile = f"{self.save_foldername}/{name}.traj"
try:
for maxstep,steps in zip(maxstep_list,steps_list):
ms = 0.2 if maxstep is None else maxstep
if optimizer == FIRELBFGS:
opt = FIRELBFGS(atoms,maxstep_fire=ms,maxstep_lbfgs=ms)
else:
opt = optimizer(atoms,maxstep=ms,logfile=logfile,trajectory=trajectory)
if maxstep is None:
opt.attach(lambda:automate_maxstep(opt,self.maxstep_dict))
opt.run(fmax=fmax,steps=steps)
if opt.converged:
write(savefile,atoms)
return True
else:
self.errorlog(f"{name}の計算:未収束")
except Exception as e:
self.errorlog(f"{name}の計算:\n{e}")
return False
def run(self,maxstep_list=[0.05,0.2],steps_list=[200,10000],fmax=0.001):
"""
Parameters:
maxstep_list: float or list of float
| maxstep.
| optimizeをFIRELBFGSにした場合,maxstep_fire,maxstep_lbfgsどちらもmaxstepで指定した値になる.
steps_list: int or list of int
steps
fmax: float
収束条件
"""
self.set_maxstep(maxstep_list)
self.set_steps(steps_list)
self.check_param()
for i,atoms in enumerate(self.atomslist):
self.irun(atoms,i,self.optimizer,maxstep_list,steps_list,fmax)
| kt19906/GRRMPY_code | grrmpy/automate/auto_opt.py | auto_opt.py | py | 6,754 | python | ja | code | 0 | github-code | 13 |
8595892444 | import numpy as np
import matplotlib.pyplot as plt
# Citation starts
# Source: https://www.freesion.com/article/5297307805/
class EpsilonGreedy:
def __init__(self):
self.epsilon = 0.1
self.num_arm = 10
self.arms = np.random.uniform(0, 1, self.num_arm)
self.best = np.argmax(self.arms)
self.T = 50000
self.hit = np.zeros(self.T)
self.reward = np.zeros(self.num_arm)
self.num = np.zeros(self.num_arm)
def get_reward(self, i):
return self.arms[i] + np.random.normal(0, 1)
def update(self, i):
self.num[i] += 1
self.reward[i] = (self.reward[i]*(self.num[i]-1)+self.get_reward(i))/self.num[i]
def calculate(self):
for i in range(self.T):
if np.random.random() > self.epsilon:
index = np.argmax(self.reward)
else:
a = np.argmax(self.reward)
index = a
while index == a:
index = np.random.randint(0, self.num_arm)
if index == self.best:
self.hit[i] = 1
self.update(index)
def plot(self):
# Update starts
plt.figure()
plt.title("E-Greedy")
x = np.array(range(self.T))
y1 = np.zeros(self.T)
t = 0
for i in range(self.T):
t += self.hit[i]
y1[i] = t/(i+1)
y2 = np.ones(self.T)*(1-self.epsilon)
plt.xlabel("Times of Experiment")
plt.plot(x, y1, label="One")
plt.plot(x, y2, label="Frequency of Finding the Best")
plt.legend(loc="upper left")
plt.show()
# Update ends
E = EpsilonGreedy()
E.calculate()
E.plot()
# Citation ends | ShuyanWang1996/CSYE7370 | EGreedy.py | EGreedy.py | py | 1,728 | python | en | code | 0 | github-code | 13 |
9063191173 | # n, m을 입력받음
n, m = map(int, input().split())
# 보드를 입력받음
data = []
for _ in range(n):
data.append(list(input()))
# 최솟값을 계산하기 위해 10억으로 설정
min_value = int(1e9)
# 8 * 8 격자를 움직여가며
for i in range(n - 8 + 1):
for j in range(m - 8 + 1):
result = 0
c = data[i][j] # 맨 왼쪽위의 색
# 8 * 8 격자에 대하여
for a in range(8):
for b in range(8):
# c와 같아야 하는 부분
if (a + b) % 2 == 0:
if c != data[i + a][j + b]:
result += 1
# c와 달라야 하는 부분
else:
if c == data[i + a][j + b]:
result += 1
# 맨 왼쪽위를 그대로 두고 다시 칠하는 경우와, 맨 왼쪽위를 바꾸고 다시 칠하는 경우 중 작은값을 고름
min_value = min(min_value, result, 64 - result)
print(min_value) # 결과 출력
| yudh1232/Baekjoon-Online-Judge-Algorithm | 1018 체스판 다시 칠하기.py | 1018 체스판 다시 칠하기.py | py | 1,042 | python | ko | code | 0 | github-code | 13 |
25213574468 | import lightgbm as lgb
import re
import pytest
import pitci.lightgbm as pitci_lgb
class TestCheckObjectiveSupported:
"""Tests for the check_objective_supported function."""
@pytest.mark.parametrize(
"objective, supported_objectives, message",
[
("regression", ["huber", "fair"], "test"),
("regression_l1", ["poisson", "quantile"], "xyz"),
],
)
def test_exception_raised(
self, lgb_dataset_2x1_with_label, objective, supported_objectives, message
):
"""Test an exception is raised if a model with an object not in the
supported_objective list.
"""
params = {
"objective": objective,
"num_leaves": 2,
"min_data_in_leaf": 1,
"feature_pre_filter": False,
}
model = lgb.train(
params=params, train_set=lgb_dataset_2x1_with_label, num_boost_round=1
)
error_message = f"booster objective not supported\n{objective} not in allowed values; {supported_objectives}"
with pytest.raises(
ValueError,
match=re.escape(error_message),
):
pitci_lgb.check_objective_supported(model, supported_objectives)
| richardangell/pitci | tests/lightgbm/test_lightgbm.py | test_lightgbm.py | py | 1,248 | python | en | code | 7 | github-code | 13 |
22148359926 | import os
from rest_framework import serializers
from django.contrib.auth import get_user_model
from authapp.serializers import UserDataSerializer
from .models import Group, CommentGroup, CommentGroupFile, CommentGroupReply, CommentStep, CommentStepReply
User = get_user_model()
# create group
class GroupSerializer(serializers.ModelSerializer):
group_creator = UserDataSerializer(source='creator_id', read_only=True)
member_count = serializers.SerializerMethodField(read_only=True)
def get_member_count(self, obj):
return obj.user_joined.count()
class Meta:
model = Group
fields = ['id', 'group_name', 'group_description', 'member_count', 'courses', 'group_image', 'group_creator',
'date_created', 'date_modified']
read_only_fields = ['id', 'group_creator', 'date_created', 'date_modified']
class CommentGroupFileSerializer(serializers.ModelSerializer):
class Meta:
model = CommentGroupFile
fields = ['id', 'comment_id', 'file']
read_only_fields = ['id']
def to_representation(self, instance):
representation = super().to_representation(instance)
file = {
"url": representation.pop("file"),
"size": instance.file.size,
"name": os.path.basename(instance.file.name),
}
representation['file'] = file
return representation
class CommentGroupFileWithDateSerializer(serializers.ModelSerializer):
date_modified = serializers.SerializerMethodField(read_only=True)
def get_date_modified(self, obj):
return CommentGroup.objects.filter(pk=obj.comment_id.id)[0].date_modified
class Meta:
model = CommentGroupFile
fields = ['id', 'comment_id', 'file', 'date_modified']
read_only_fields = ['id']
def to_representation(self, instance):
representation = super().to_representation(instance)
file = {
"url": representation.pop("file"),
"size": instance.file.size,
"name": os.path.basename(instance.file.name),
}
representation['file'] = file
return representation
class CommentGroupSerializer(serializers.ModelSerializer):
comment_group_files = serializers.SerializerMethodField(read_only=True)
user = UserDataSerializer(source='user_id', read_only=True)
def get_comment_group_files(self, obj):
serializer = CommentGroupFileSerializer(CommentGroupFile.objects.filter(comment_id=obj.id), many=True,
read_only=True, context={"request": self.context.get('request')})
return serializer.data
class Meta:
model = CommentGroup
fields = ['id', 'group_id', 'text', 'comment_group_files', 'user', 'date_created', 'date_modified']
read_only_fields = ['id', 'date_created', 'date_modified']
class CommentGroupReplySerializer(serializers.ModelSerializer):
user = UserDataSerializer(source='user_id', read_only=True)
class Meta:
model = CommentGroupReply
fields = ['id', 'user', 'parent_id', 'text', 'date_created', 'date_modified']
read_only_fields = ['id', 'date_created', 'date_modified']
class AddUserSerializer(serializers.ModelSerializer):
user_joined = serializers.ListSerializer(child=serializers.IntegerField())
class Meta:
model = Group
fields = ['id', 'user_joined']
extra_kwargs = {'id': {'read_only': False}}
class CommentStepSerializer(serializers.ModelSerializer):
user = UserDataSerializer(source='user_id', read_only=True)
class Meta:
model = CommentStep
fields = ['id', 'group_id', 'step_id', 'text', 'user', 'date_created', 'date_modified']
read_only_fields = ['id', 'date_created', 'date_modified']
class CommentStepReplySerializer(serializers.ModelSerializer):
user = UserDataSerializer(source='user_id', read_only=True)
class Meta:
model = CommentStepReply
fields = ['id', 'user', 'parent_id', 'text', 'date_created', 'date_modified']
read_only_fields = ['id', 'date_created', 'date_modified']
def valid_user_and_not_admin(user_id):
user = User.objects.filter(pk=user_id)
if not user.exists():
raise serializers.ValidationError("{0} is not a valid User id.".format(user_id))
return not user[0].is_staff
class MemberPostSerializer(serializers.Serializer):
new_user_joined_list = serializers.ListField(child=serializers.IntegerField())
def validate_new_user_joined_list(self, value):
return [x for x in value if valid_user_and_not_admin(x)]
| PlayingSpree/intern_project_backend | grouplearning/serializers.py | serializers.py | py | 4,633 | python | en | code | 0 | github-code | 13 |
33300696325 | import numpy as np
def to_numpy_array(args) -> np.ndarray:
if not isinstance(args, (list, tuple, np.ndarray)):
raise ValueError("Invalid args.")
if isinstance(args, np.ndarray):
if len(args.shape) == 1:
return np.array(args).reshape(1, 2)
return args
if not isinstance(args[0], (list, tuple, np.ndarray)):
return np.array(args).reshape(1, 2)
return np.array(args).T
def get_dimensionality(args) -> int:
"""determine dimensionality"""
if isinstance(args, (list, tuple)):
return len(args)
else: # np.ndarray
if len(args.shape) == 1:
return 1
else:
return args.shape[1]
def get_num_points(args) -> int:
"""determine number of points"""
if isinstance(args, (list, tuple)):
if isinstance(args[0], (list, tuple, np.ndarray)):
return len(args[0])
else:
return 1 # float, int
else: # np.ndarray
if len(args.shape) == 1:
return args.shape
else:
return args.shape[0]
| dylanwal/flex_optimization | flex_optimization/problems/utils.py | utils.py | py | 1,081 | python | en | code | 1 | github-code | 13 |
17060733424 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class TenantChannelDetailDTO(object):
def __init__(self):
self._channel_code = None
self._channel_desc = None
self._channel_id = None
self._channel_name = None
self._channel_status = None
self._channel_type = None
self._form_template_id = None
self._pic_url = None
self._remark = None
self._status = None
self._tenant_code = None
@property
def channel_code(self):
return self._channel_code
@channel_code.setter
def channel_code(self, value):
self._channel_code = value
@property
def channel_desc(self):
return self._channel_desc
@channel_desc.setter
def channel_desc(self, value):
self._channel_desc = value
@property
def channel_id(self):
return self._channel_id
@channel_id.setter
def channel_id(self, value):
self._channel_id = value
@property
def channel_name(self):
return self._channel_name
@channel_name.setter
def channel_name(self, value):
self._channel_name = value
@property
def channel_status(self):
return self._channel_status
@channel_status.setter
def channel_status(self, value):
self._channel_status = value
@property
def channel_type(self):
return self._channel_type
@channel_type.setter
def channel_type(self, value):
self._channel_type = value
@property
def form_template_id(self):
return self._form_template_id
@form_template_id.setter
def form_template_id(self, value):
self._form_template_id = value
@property
def pic_url(self):
return self._pic_url
@pic_url.setter
def pic_url(self, value):
self._pic_url = value
@property
def remark(self):
return self._remark
@remark.setter
def remark(self, value):
self._remark = value
@property
def status(self):
return self._status
@status.setter
def status(self, value):
self._status = value
@property
def tenant_code(self):
return self._tenant_code
@tenant_code.setter
def tenant_code(self, value):
self._tenant_code = value
def to_alipay_dict(self):
params = dict()
if self.channel_code:
if hasattr(self.channel_code, 'to_alipay_dict'):
params['channel_code'] = self.channel_code.to_alipay_dict()
else:
params['channel_code'] = self.channel_code
if self.channel_desc:
if hasattr(self.channel_desc, 'to_alipay_dict'):
params['channel_desc'] = self.channel_desc.to_alipay_dict()
else:
params['channel_desc'] = self.channel_desc
if self.channel_id:
if hasattr(self.channel_id, 'to_alipay_dict'):
params['channel_id'] = self.channel_id.to_alipay_dict()
else:
params['channel_id'] = self.channel_id
if self.channel_name:
if hasattr(self.channel_name, 'to_alipay_dict'):
params['channel_name'] = self.channel_name.to_alipay_dict()
else:
params['channel_name'] = self.channel_name
if self.channel_status:
if hasattr(self.channel_status, 'to_alipay_dict'):
params['channel_status'] = self.channel_status.to_alipay_dict()
else:
params['channel_status'] = self.channel_status
if self.channel_type:
if hasattr(self.channel_type, 'to_alipay_dict'):
params['channel_type'] = self.channel_type.to_alipay_dict()
else:
params['channel_type'] = self.channel_type
if self.form_template_id:
if hasattr(self.form_template_id, 'to_alipay_dict'):
params['form_template_id'] = self.form_template_id.to_alipay_dict()
else:
params['form_template_id'] = self.form_template_id
if self.pic_url:
if hasattr(self.pic_url, 'to_alipay_dict'):
params['pic_url'] = self.pic_url.to_alipay_dict()
else:
params['pic_url'] = self.pic_url
if self.remark:
if hasattr(self.remark, 'to_alipay_dict'):
params['remark'] = self.remark.to_alipay_dict()
else:
params['remark'] = self.remark
if self.status:
if hasattr(self.status, 'to_alipay_dict'):
params['status'] = self.status.to_alipay_dict()
else:
params['status'] = self.status
if self.tenant_code:
if hasattr(self.tenant_code, 'to_alipay_dict'):
params['tenant_code'] = self.tenant_code.to_alipay_dict()
else:
params['tenant_code'] = self.tenant_code
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = TenantChannelDetailDTO()
if 'channel_code' in d:
o.channel_code = d['channel_code']
if 'channel_desc' in d:
o.channel_desc = d['channel_desc']
if 'channel_id' in d:
o.channel_id = d['channel_id']
if 'channel_name' in d:
o.channel_name = d['channel_name']
if 'channel_status' in d:
o.channel_status = d['channel_status']
if 'channel_type' in d:
o.channel_type = d['channel_type']
if 'form_template_id' in d:
o.form_template_id = d['form_template_id']
if 'pic_url' in d:
o.pic_url = d['pic_url']
if 'remark' in d:
o.remark = d['remark']
if 'status' in d:
o.status = d['status']
if 'tenant_code' in d:
o.tenant_code = d['tenant_code']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/TenantChannelDetailDTO.py | TenantChannelDetailDTO.py | py | 6,011 | python | en | code | 241 | github-code | 13 |
38595224262 | from django.shortcuts import render, redirect
from django.contrib import messages
from django.urls import reverse
from Authentification.models import UserP
from Authentification.models import UserS
# Create your views here.
def index(request):
if 'id' in request.session:
if request.session['is_prof'] is True:
user = UserP.userManagerP.getOneUser(request.session['id'])
else:
user = UserS.userManagerS.getOneUser(request.session['id'])
context = {
'is_prof': request.session['is_prof'],
}
return render(request, "index.html", context)
else:
return render(request, "index.html")
def register(request):
if request.method == 'POST':
if request.POST['academic'] == "professor":
if UserP.userManagerP.register(request): # successful registration
return redirect("/dashboard")
else: # failed registration
return redirect("/register")
else:
if UserS.userManagerS.register(request): # successful registration
return redirect("/dashboard")
else: # failed registration
return redirect("/register")
else:
if 'id' in request.session:
return redirect("/dashboard")
return render(request, "registration.html")
def login(request):
# POST
if request.method == 'POST':
if request.POST['academic'] == 'professor':
if UserP.userManagerP.login(request): # successful login
return redirect("/dashboard")
else: # failed login
return redirect("/signin")
elif request.POST['academic'] == 'student':
if UserS.userManagerS.login(request): # successful login
return redirect("/dashboard")
else: # failed login
return redirect("/signin")
else: # failed login
return redirect("/signin")
# GET
else:
if 'id' in request.session:
return redirect("/dashboard")
return render(request, "login.html")
def logoff(request):
if request.session['is_prof'] is True:
UserP.userManagerP.logoff(request) # failed login
return redirect("/")
else:
UserS.userManagerS.logoff(request) # failed login
return redirect("/")
| kaddachi17/q | Authentification/views.py | views.py | py | 2,384 | python | en | code | 0 | github-code | 13 |
17050228974 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class ConditionEntry(object):
def __init__(self):
self._dim_key = None
self._value = None
@property
def dim_key(self):
return self._dim_key
@dim_key.setter
def dim_key(self, value):
self._dim_key = value
@property
def value(self):
return self._value
@value.setter
def value(self, value):
self._value = value
def to_alipay_dict(self):
params = dict()
if self.dim_key:
if hasattr(self.dim_key, 'to_alipay_dict'):
params['dim_key'] = self.dim_key.to_alipay_dict()
else:
params['dim_key'] = self.dim_key
if self.value:
if hasattr(self.value, 'to_alipay_dict'):
params['value'] = self.value.to_alipay_dict()
else:
params['value'] = self.value
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = ConditionEntry()
if 'dim_key' in d:
o.dim_key = d['dim_key']
if 'value' in d:
o.value = d['value']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/ConditionEntry.py | ConditionEntry.py | py | 1,264 | python | en | code | 241 | github-code | 13 |
4087242521 | import numpy as np
import matplotlib.pyplot as plt
import urllib.request
# ごくシンプルな畳み込み層を定義しています。
class Conv:
# シンプルな例を考えるため、Wは3x3で固定し、後のセッションで扱うstridesやpaddingは考えません。
def __init__(self, W):
self.W = W
def f_prop(self, X):
out = np.zeros((X.shape[0]-2, X.shape[1]-2))
for i in range(out.shape[0]):
for j in range(out.shape[1]):
x = X[i:i+3, j:j+3]
out[i,j] = np.dot(self.W.flatten(), x.flatten())
return out
# ごくシンプルなプーリング層を定義しています。
class Pool:
# シンプルな例を考えるため、後のセッションで扱うstridesやpaddingは考えません。
def __init__(self, l):
self.l = l
def f_prop(self, X):
l = self.l
out = np.zeros((X.shape[0]//self.l, X.shape[1]//self.l))
for i in range(out.shape[0]):
for j in range(out.shape[1]):
# 下の下線部を埋めて、コメントアウトをはずしてください。
out[i,j] = np.max(X[i*l:(i+1)*l, j*l:(j+1)*l])
return out
local_filename, headers = urllib.request.urlretrieve('https://aidemyexcontentsdata.blob.core.windows.net/data/5100_cnn/circle.npy')
X = np.load(local_filename)
plt.imshow(X)
plt.title("元画像", fontsize=12)
plt.show()
# カーネル
W1 = np.array([[0,1,0],
[0,1,0],
[0,1,0]])
W2 = np.array([[0,0,0],
[1,1,1],
[0,0,0]])
W3 = np.array([[1,0,0],
[0,1,0],
[0,0,1]])
W4 = np.array([[0,0,1],
[0,1,0],
[1,0,0]])
# 畳み込み
conv1 = Conv(W1); C1 = conv1.f_prop(X)
conv2 = Conv(W2); C2 = conv2.f_prop(X)
conv3 = Conv(W3); C3 = conv3.f_prop(X)
conv4 = Conv(W4); C4 = conv4.f_prop(X)
plt.subplot(1,4,1); plt.imshow(C1)
plt.subplot(1,4,2); plt.imshow(C2)
plt.subplot(1,4,3); plt.imshow(C3)
plt.subplot(1,4,4); plt.imshow(C4)
plt.suptitle("畳み込み結果", fontsize=12)
plt.show()
# プーリング
pool = Pool(2)
P1 = pool.f_prop(C1)
P2 = pool.f_prop(C2)
P3 = pool.f_prop(C3)
P4 = pool.f_prop(C4)
plt.subplot(1,4,1); plt.imshow(P1)
plt.subplot(1,4,2); plt.imshow(P2)
plt.subplot(1,4,3); plt.imshow(P3)
plt.subplot(1,4,4); plt.imshow(P4)
plt.suptitle("プーリング結果", fontsize=12)
plt.show() | yasuno0327/LearnCNN | aidemy/cnn/task2.py | task2.py | py | 2,448 | python | ja | code | 1 | github-code | 13 |
3241982845 | from PySide2 import QtCore
import os
import sqlite3
from ..sqlite_init import povezivanje_baza
class PlaceviModel(QtCore.QAbstractTableModel):
def __init__(self):
super().__init__()
# matrica, redovi su liste, a unutar tih listi se nalaze pojedinacni podaci o korisniku iz imenika
self._conn = povezivanje_baza()
self._c = self._conn.cursor()
self._data = []
self.ucitaj_podatke_iz_baze()
def rowCount(self, index):
return len(self._data)
def columnCount(self, index):
return 5 #fiksan br vracamo
def data(self, index, role):
element = self.get_element(index)
if element is None:
return None
if role == QtCore.Qt.DisplayRole:
return element
def headerData(self, section, orientation, role):
if orientation != QtCore.Qt.Vertical:
if (section == 0) and (role == QtCore.Qt.DisplayRole):
return "ID placa"
elif (section == 1) and (role == QtCore.Qt.DisplayRole):
return "Naziv placa"
elif (section == 2) and (role == QtCore.Qt.DisplayRole):
return "Tip placa"
elif (section == 3) and (role == QtCore.Qt.DisplayRole):
return "Ukupan broj mesta"
elif (section == 4) and (role == QtCore.Qt.DisplayRole):
return "Broj zauzetih mesta"
def setData(self, index, value, role):
try:
if value == "":
return False
self._data[index.row()][index.column()] = value
self.dataChanged()
return True
except:
return False
def flags(self, index):
# ne damo da menja datum rodjenja (primera radi)
return QtCore.Qt.ItemIsEnabled | QtCore.Qt.ItemIsSelectable
# sve ostale podatke korisnik moze da menja
def get_element(self, index : QtCore.QModelIndex):
if index.isValid():
element = self._data[index.row()][index.column()]
if element:
return element
return None
def ucitaj_podatke_iz_baze(self):
upit = self._conn.execute(""" SELECT plac_id, naziv_placa, tip, broj_mesta, broj_zauzetih FROM placevi INNER JOIN tip_placa ON placevi.tip_placa_id = tip_placa.tip_placa_id;""")
self._data = list(upit.fetchall())
self._conn.commit()
def get_brojevi_mesta(self, index):
return {
"brZ" : self._data[index][4],
"ukupno" : self._data[index][3]
}
def get_id_placa(self, index):
return self._data[index][0]
def get_tip_placa(self, index):
return self._data[index][2]
def dodaj(self, data : dict):
self.beginInsertRows(QtCore.QModelIndex(), len(self._data), len(self._data))
upit = self._conn.execute(""" SELECT tip FROM tip_placa where tip_placa_id=:idHere;""", {'idHere':data['tip_placa_id'] })
upitTipPlaca = list(upit.fetchall())
self._conn.commit()
######
self._data.append([data['plac_id'], data['naziv_placa'], upitTipPlaca[0][0], data['broj_mesta'], data['broj_zauzetih']])
self.endInsertRows()
def ukloni(self, indices):
# za na osnovu indeksa, dobijamo njihove redove, posto za jedan red je vezano pet indeksa (za kolone)
# pravimo skup koji ce dati samo jedinstvene brojeve redova
# uklanjanje vrsimo od nazad, jer ne zelimo da nam brojevi redova nakon uklanjanja odu van opsega.
indices = sorted(set(map(lambda x: x.row(), indices)), reverse=True)
for i in indices:
id = self.get_id_placa(i)
upit = self._conn.execute("""DELETE FROM placevi WHERE plac_id = :ID""" , {'ID' : id} )
self._conn.commit()
upit = self._conn.execute("""DELETE FROM vozila_plac WHERE plac_id = :ID""" , {'ID' : id} )
self._conn.commit()
self.beginRemoveRows(QtCore.QModelIndex(), i, i)
del self._data[i]
self.endRemoveRows()
def update_brZ(self, brZ_updated=None, plac_id=None):
upit = self._conn.execute("""UPDATE placevi SET broj_zauzetih = :brZ WHERE plac_id = :pID;""" , {'brZ' : int(brZ_updated) , 'pID':plac_id } )
self._conn.commit()
return
| krstovicjelena/MRS | JelenaKrstovic2016200143/PlaceviJelenaKrstovic2016200143/modeli/placevi_model.py | placevi_model.py | py | 4,397 | python | en | code | 1 | github-code | 13 |
72378909137 | import time
import random
def radixsort( aList ):
RADIX = 10
maxLength = False
tmp , placement = -1, 1
while not maxLength:
maxLength = True
# declare and initialize buckets
buckets = [list() for _ in range( RADIX )]
# split aList between lists
for i in aList:
tmp = i / placement
buckets[int(tmp % RADIX)].append( i )
if maxLength and tmp > 0:
maxLength = False
# empty lists into aList array
a = 0
for b in range( RADIX ):
buck = buckets[b]
for i in buck:
aList[a] = i
a += 1
# move to next digit
placement *= RADIX
print("Radix Sort:")
sizesArray = [10,100,1000,10000,100000,1000000]
for size in sizesArray:
aList = [0]*size
for i in range(0,size):
aList[i] = random.randint(1,1000)
tiemposEjecucion = [0.0]*10
print("Ordenando "+str(size)+" elementos:")
for i in range(0,10):
#Guardamos la lista en un arreglo temporal
unsortedList = aList[:]
startTime = time.time()
radixsort(unsortedList)
endTime = time.time()
deltaTime = endTime - startTime
tiemposEjecucion[i] = deltaTime
print("Iteracion "+str(i+1)+": "+str(deltaTime))
print("Tiempos = "+str(tiemposEjecucion))
| cefeboru/ComparacionAlgoritmos | radixSort.py | radixSort.py | py | 1,231 | python | en | code | 0 | github-code | 13 |
16083160331 | """
Create a function that retrieves every number that is strictly larger than every number that follows it.
Examples
[3, 13, 11, 2, 1, 9, 5] ➞ [13, 11, 9, 5]
13 is larger than all numbers to its right, etc.
[5, 5, 5, 5, 5, 5] ➞ [5]
Must be strictly larger.
Always include the last number.
[5, 9, 8, 7] ➞ [9, 8, 7]
Notes
The last number in an array is trivially strictly larger than all numbers that follow it (no numbers follow it).
"""
arry = list(map(int, input("Please enter nums for list with a space : ").split()))
n_arry = []
for i in range(0, len(arry)-1):
if all([arry[i] > j for j in arry[i+1:]]) :
n_arry.append(arry[i])
print(n_arry + [arry[-1]]) | MelekAlan/Python_Challenge | Larger_to_Right.py | Larger_to_Right.py | py | 688 | python | en | code | 0 | github-code | 13 |
36907276267 | import math
def SquareRootContinuedFraction(n):
# This computes the continued fraction of a square root function
# if n is a perfect square
if math.sqrt(n) == int(math.sqrt(n)):
return [ int(math.sqrt(n)) ]
# we iterate on the form (sqrt(n) + a)/b
# to get to the next iteration, we need to rewrite the above as
# y + 1/( (sqrt(n) + a')/b' )
# where y = floor( sqrt(n) ) i.e. it's the non-decimal part
#
# Doing some math, we can show that
# a' = by - a
# b' = (n - (by-a)^2)/b = (n - (a')^2)/b
# sqrt(n) = (sqrt(n) + 0)/1
a, b = 0, 1
continued_fraction = []
# If b ever equals 1, by definition we have reach the recurrive point
# because we'll have something of the form sqrt(n) + a
# and after the next iteration, everything will just repeat
while b != 1 or len(continued_fraction) == 0:
y = math.floor( (math.sqrt(n) + a)/b )
continued_fraction.append(y)
a = b*y - a
b = (n - a**2) // b
# we just need to add the last iteration to complete the cycle
y = math.floor( (math.sqrt(n) + a)/b )
continued_fraction.append(y)
return continued_fraction
def main(N=10**4):
odd_periods = []
for n in range(2, N+1):
continued_fraction = SquareRootContinuedFraction(n)
first_digit, period = continued_fraction[0], continued_fraction[1:]
#print(n, [first_digit, tuple(period)])
if len(period) % 2 == 1:
odd_periods.append(n)
total = len(odd_periods)
print(f"The number of continued fractions for numbers <= {N} that have an odd period is:", total)
return total
if __name__ == "__main__":
main() | ekeilty17/Project_Euler | P064.py | P064.py | py | 1,740 | python | en | code | 1 | github-code | 13 |
43272157790 | '''
difPy - Python package for finding duplicate and similar images
2023 Elise Landman
https://github.com/elisemercury/Duplicate-Image-Finder
'''
from glob import glob
from multiprocessing import Pool
from uuid import uuid4
import numpy as np
from PIL import Image
from distutils.util import strtobool
import os
from datetime import datetime
from pathlib import Path
import argparse
import json
import warnings
class build:
'''
A class used to initialize difPy and build its image repository
'''
def __init__(self, *directory, recursive=True, in_folder=False, limit_extensions=True, px_size=50, show_progress=True, logs=True):
'''
Parameters
----------
directory : str, list
Paths of the directories to be searched
recursive : bool (optional)
Search recursively within the directories (default is True)
in_folder : bool (optional)
If False, searches for matches in the union of directories (default is False)
If True, searches for matches only among subdirectories
limit_extensions : bool (optional)
Limit search to known image file extensions (default is True)
px_size : int (optional)
Image compression size in pixels (default is 50)
show_progress : bool (optional)
Show the difPy progress bar in console (default is True)
logs : bool (optional)
Collect stats on the difPy process (default is True)
'''
# Validate input parameters
self.__directory = _validate._directory(directory)
self.__recursive = _validate._recursive(recursive)
self.__in_folder = _validate._in_folder(in_folder, recursive)
self.__limit_extensions = _validate._limit_extensions(limit_extensions)
self.__px_size = _validate._px_size(px_size)
self.__show_progress = _validate._show_progress(show_progress)
self.__stats = _validate._stats(logs)
self._tensor_dictionary, self._filename_dictionary, self._id_to_group_dictionary, self._group_to_id_dictionary, self._invalid_files, self._stats = self._main()
def _main(self):
# Function that runs the build workflow
if self.__show_progress:
count = 0
total_count = 3
_help._show_progress(count, total_count, task='preparing files')
self.__start_time = datetime.now()
valid_files, skipped_files = self._get_files()
if self.__show_progress:
count += 1
_help._show_progress(count, total_count, task='preparing files')
tensor_dictionary, filename_dictionary, id_to_group_dictionary, group_to_id_dictionary, invalid_files = self._build_image_dictionaries(valid_files)
self.__end_time = datetime.now()
if self.__show_progress:
count += 1
_help._show_progress(count, total_count, task='preparing files')
stats = self._stats(invalid_files=invalid_files, skipped_files=skipped_files)
if self.__show_progress:
count += 1
_help._show_progress(count, total_count, task='preparing files')
return tensor_dictionary, filename_dictionary, id_to_group_dictionary, group_to_id_dictionary, invalid_files, stats
# 8m55
def _stats(self, **kwargs):
# Function that generates build stats
stats = dict()
seconds_elapsed = np.round((self.__end_time - self.__start_time).total_seconds(), 4)
invalid_files = kwargs['invalid_files']
for file in kwargs['skipped_files']:
invalid_files.update({str(Path(file)) : 'ImageFilterWarning: invalid image extension.'})
stats.update({'directory' : self.__directory})
stats.update({'process' : {'build': {}}})
stats['process']['build'].update({'duration' : {'start': self.__start_time.isoformat(),
'end' : self.__end_time.isoformat(),
'seconds_elapsed' : seconds_elapsed
}})
stats['process']['build'].update({'parameters': {'recursive' : self.__recursive,
'in_folder' : self.__in_folder,
'limit_extensions' : self.__limit_extensions,
'px_size' : self.__px_size,
}})
stats.update({'invalid_files': {'count' : len(invalid_files),
'logs' : invalid_files}})
return stats
def _get_files(self):
# Function that searched for files in the input directories
valid_files_all = []
skipped_files_all = np.array([])
if self.__in_folder:
# Search directories separately
directories = []
for dir in self.__directory:
directories += glob(str(dir) + '/**/', recursive=self.__recursive)
for dir in directories:
files = glob(str(dir) + '/*', recursive=self.__recursive)
valid_files, skip_files = self._validate_files(files)
valid_files_all.append(valid_files)
if len(skip_files) > 0:
skipped_files_all = np.concatenate((skipped_files_all, skip_files), axis=None)
else:
# Search union of all directories
for dir in self.__directory:
files = glob(str(dir) + '/**', recursive=self.__recursive)
valid_files, skip_files = self._validate_files(files)
valid_files_all = np.concatenate((valid_files_all, valid_files), axis=None)
if len(skip_files) > 0:
skipped_files_all = np.concatenate((skipped_files_all, skip_files), axis=None)
return valid_files_all, skipped_files_all
def _validate_files(self, directory):
# Function that validates a file's filetype
valid_files = np.array([os.path.normpath(file) for file in directory if not os.path.isdir(file)])
if self.__limit_extensions:
valid_files, skip_files = self._filter_extensions(valid_files)
else:
skip_files = []
return valid_files, skip_files
def _filter_extensions(self, directory_files):
# Function that filters by files with a specific filetype
valid_extensions = np.array(['apng', 'bw', 'cdf', 'cur', 'dcx', 'dds', 'dib', 'emf', 'eps', 'fli', 'flc', 'fpx', 'ftex', 'fits', 'gd', 'gd2', 'gif', 'gbr', 'icb', 'icns', 'iim', 'ico', 'im', 'imt', 'j2k', 'jfif', 'jfi', 'jif', 'jp2', 'jpe', 'jpeg', 'jpg', 'jpm', 'jpf', 'jpx', 'jpeg', 'mic', 'mpo', 'msp', 'nc', 'pbm', 'pcd', 'pcx', 'pgm', 'png', 'ppm', 'psd', 'pixar', 'ras', 'rgb', 'rgba', 'sgi', 'spi', 'spider', 'sun', 'tga', 'tif', 'tiff', 'vda', 'vst', 'wal', 'webp', 'xbm', 'xpm'])
extensions = list()
for file in directory_files:
try:
ext = file.split(".")[-1].lower()
extensions.append(ext)
except:
extensions.append("_")
keep_files = directory_files[np.isin(extensions, valid_extensions)]
skip_files = directory_files[np.logical_not(np.isin(extensions, valid_extensions))]
return keep_files, skip_files
def _build_image_dictionaries(self, valid_files):
# Function that builds dictionaries of image tensors and metadata
tensor_dictionary = dict()
filename_dictionary = dict()
invalid_files = dict()
id_to_group_dictionary = dict()
group_to_id_dictionary = dict()
count = 0
if self.__in_folder:
# Search directories separately
for j in range(0, len(valid_files)):
group_id = f"group_{j}"
group_img_ids = []
with Pool() as pool:
file_nums = [(i, valid_files[j][i]) for i in range(len(valid_files[j]))]
for tensor in pool.starmap(self._generate_tensor, file_nums):
if isinstance(tensor, dict):
invalid_files.update(tensor)
count += 1
else:
img_id = uuid4().int
while img_id in filename_dictionary:
img_id = uuid4().int
group_img_ids.append(img_id)
id_to_group_dictionary.update({img_id : group_id})
filename_dictionary.update({img_id : valid_files[j][tensor[0]]})
tensor_dictionary.update({img_id : tensor[1]})
count += 1
group_to_id_dictionary.update({group_id : group_img_ids})
else:
# Search union of all directories
with Pool() as pool:
file_nums = [(i, valid_files[i]) for i in range(len(valid_files))]
for tensor in pool.starmap(self._generate_tensor, file_nums):
if isinstance(tensor, dict):
invalid_files.update(tensor)
count += 1
else:
img_id = uuid4().int
while img_id in filename_dictionary:
img_id = uuid4().int
filename_dictionary.update({img_id : valid_files[tensor[0]]})
tensor_dictionary.update({img_id : tensor[1]})
count += 1
return tensor_dictionary, filename_dictionary, id_to_group_dictionary, group_to_id_dictionary, invalid_files
def _generate_tensor(self, num, file):
# Function that generates a tesnor of an image
try:
img = Image.open(file)
if img.getbands() != ('R', 'G', 'B'):
img = img.convert('RGB')
img = img.resize((self.__px_size, self.__px_size), resample=Image.BICUBIC)
img = np.asarray(img)
return (num, img)
except Exception as e:
if e.__class__.__name__== 'UnidentifiedImageError':
return {str(Path(file)) : 'UnidentifiedImageError: file could not be identified as image.'}
else:
return {str(Path(file)) : str(e)}
class search:
'''
A class used to search for matches in a difPy image repository
'''
def __init__(self, difpy_obj, similarity='duplicates', show_progress=True, logs=True):
'''
Parameters
----------
difPy_obj : difPy.dif.build
difPy object containing the image repository
similarity : 'duplicates', 'similar', float (optional)
Image comparison similarity threshold (mse) (default is 'duplicates', 0)
show_progress : bool (optional)
Show the difPy progress bar in console (default is True)
logs : bool (optional)
Collect stats on the difPy process (default is True)
'''
# Validate input parameters
self.__difpy_obj = difpy_obj
self.__similarity = _validate._similarity(similarity)
self.__show_progress = _validate._show_progress(show_progress)
self.__in_folder = self.__difpy_obj._stats['process']['build']['parameters']['in_folder']
if self.__show_progress:
count = 1
total_count = 3
_help._show_progress(count, total_count, task='searching files')
self.result = self._main()
if self.__show_progress:
count += 1
_help._show_progress(count, total_count, task='searching files')
self.lower_quality, self.__duplicate_count, self.__similar_count = self._search_helper()
if self.__show_progress:
count += 1
_help._show_progress(count, total_count, task='searching files')
if logs:
self.stats = self._stats()
def _main(self):
# Function that runs the search workflow
self.start_time = datetime.now()
self.result = dict()
self.duplicate_count = 0
self.similar_count = 0
if self.__in_folder:
# Search directories separately
with Pool() as pool:
grouped_img_ids = [img_ids for group_id, img_ids in self.__difpy_obj._group_to_id_dictionary.items()]
items = []
for ids in grouped_img_ids:
items = []
for i, id_a in enumerate(ids):
for j, id_b in enumerate(ids):
if j > i:
items.append((id_a, id_b, self.__difpy_obj._tensor_dictionary[id_a], self.__difpy_obj._tensor_dictionary[id_b]))
for output in pool.starmap(self._compute_mse, items):
if output[2] <= self.__similarity:
self._add_to_result(output)
self.end_time = datetime.now()
return self.result
else:
# Search union of all directories
with Pool() as pool:
ids = list(self.__difpy_obj._tensor_dictionary.keys())
items = []
for i, id_a in enumerate(ids):
for j, id_b in enumerate(ids):
if j > i:
items.append((id_a, id_b, self.__difpy_obj._tensor_dictionary[id_a], self.__difpy_obj._tensor_dictionary[id_b]))
for output in pool.starmap(self._compute_mse, items):
if output[2] <= self.__similarity:
self._add_to_result(output)
self.end_time = datetime.now()
return self.result
def _stats(self):
# Function that generates build stats
stats = self.__difpy_obj._stats
seconds_elapsed = np.round((self.end_time - self.start_time).total_seconds(), 4)
stats['process'].update({'search' : {}})
stats['process']['search'].update({'duration' : {'start': self.start_time.isoformat(),
'end' : self.end_time.isoformat(),
'seconds_elapsed' : seconds_elapsed
}})
stats['process']['search'].update({'parameters' : {'similarity_mse': self.__similarity
}})
stats['process']['search'].update({'files_searched' : len(self.__difpy_obj._tensor_dictionary)})
stats['process']['search'].update({'matches_found' : {'duplicates': self.__duplicate_count,
'similar' : self.__similar_count
}})
return stats
def _search_helper(self):
# Helper function that compares image qualities and computes process metadata
duplicate_count, similar_count = 0, 0
lower_quality = []
if self.__in_folder:
# Search directories separately
if self.__similarity > 0:
for group_id in self.result.keys():
for id in self.result[group_id]['contents']:
match_group = [self.result[group_id]['contents'][id]['location']]
for match_id in self.result[group_id]['contents'][id]['matches']:
# compare image quality
match_group.append(self.result[group_id]['contents'][id]['matches'][match_id]['location'])
match_group = self._compare_img_quality(match_group)
lower_quality += match_group[1:]
# count duplicate/similar
if self.result[group_id]['contents'][id]['matches'][match_id]['mse'] > 0:
similar_count += 1
else:
duplicate_count +=1
else:
for group_id in self.result.keys():
duplicate_count += len(self.result[group_id]['contents'])
for id in self.result[group_id]['contents']:
match_group = [self.result[group_id]['contents'][id]['location']]
for match_id in self.result[group_id]['contents'][id]['matches']:
# compare image quality
match_group.append(self.result[group_id]['contents'][id]['matches'][match_id]['location'])
match_group = self._compare_img_quality(match_group)
lower_quality += match_group[1:]
else:
# Search union of all directories
if self.__similarity > 0:
for id in self.result.keys():
match_group = [self.result[id]['location']]
for matchid in self.result[id]['matches']:
# compare image quality
match_group.append(self.result[id]['matches'][matchid]['location'])
match_group = self._compare_img_quality(match_group)
lower_quality += match_group[1:]
# count duplicate/similar
if self.result[id]['matches'][matchid]['mse'] > 0:
similar_count += 1
else:
duplicate_count +=1
else:
for id in self.result.keys():
match_group = [self.result[id]['location']]
duplicate_count += len(self.result[id]['matches'])
for matchid in self.result[id]['matches']:
# compare image quality
match_group.append(self.result[id]['matches'][matchid]['location'])
match_group = self._compare_img_quality(match_group)
lower_quality += match_group[1:]
lower_quality = {'lower_quality': list(set(lower_quality))}
return lower_quality, duplicate_count, similar_count
def _compare_img_quality(self, img_list):
# Function for sorting a list of images based on their file sizes
imgs_sizes = []
for img in img_list:
img_size = (os.stat(str(img)).st_size, img)
imgs_sizes.append(img_size)
sort_by_size = [file for size, file in sorted(imgs_sizes, reverse=True)]
return sort_by_size
def _add_to_result(self, output):
# Function that adds a found image match to the result output
id_A = output[0]
filename_A = str(Path(self.__difpy_obj._filename_dictionary[id_A]))
id_B = output[1]
filename_B = str(Path(self.__difpy_obj._filename_dictionary[id_B]))
mse = output[2]
if self.__in_folder:
# Search directories separately
group_id = self.__difpy_obj._id_to_group_dictionary[id_A]
group_path = os.path.dirname(filename_A)
if group_id in self.result:
for key in self.result[group_id]['contents'].keys():
if id_A in self.result[group_id]['contents'][key]['matches']:
self.result[group_id]['contents'][key]['matches'].update({id_B : {'location': filename_B,
'mse': mse}})
return self.result
if id_A in self.result[group_id]['contents']:
self.result[group_id]['contents'][id_A]['matches'].update({id_B : {'location': filename_B,
'mse': mse}})
return self.result
else:
self.result[group_id]['contents'].update({id_A : {'location': filename_A,
'matches' : {id_B : {'location': filename_B,
'mse': mse}}}})
return self.result
else:
self.result.update({group_id : {'location' : group_path,
'contents' : {id_A : {'location': filename_A,
'matches': {id_B: {'location' : filename_B,
'mse': mse }}}}}})
return self.result
else:
# Search union of all directories
for key in list(self.result.keys()):
if id_A in self.result[key]['matches']:
self.result[key]['matches'].update({id_B : {'location': filename_B,
'mse': mse}})
return self.result
if id_A in self.result:
self.result[id_A]['matches'].update({id_B : {'location': filename_B,
'mse': mse}})
else:
self.result.update({id_A : {'location': filename_A,
'matches' : {id_B : {'location': filename_B,
'mse': mse}}}})
return self.result
def _compute_mse(self, id_A, id_B, img_A, img_B):
# Function that calculates the mean squared error (mse) between two image matrices
mse = np.square(np.subtract(img_A, img_B)).mean()
return (id_A, id_B, mse)
def move_to(self, destination_path):
# Function for moving the lower quality images that were found after the search
'''
Parameters
----------
destination_path : str
Path to move the lower_quality files to
'''
destination_path = _validate._move_to(destination_path)
new_lower_quality = []
for file in self.lower_quality['lower_quality']:
try:
head, tail = os.path.split(file)
os.replace(file, os.path.join(destination_path, tail))
new_lower_quality = np.append(new_lower_quality, str(Path(os.path.join(destination_path, tail))))
except:
print(f'Could not move file: {file}')
print(f'Moved {len(self.lower_quality["lower_quality"])} files(s) to "{str(Path(destination_path))}"')
self.lower_quality = new_lower_quality
return
def delete(self, silent_del=False):
# Function for deleting the lower quality images that were found after the search
'''
Parameters
----------
silent_del : bool, optional
Skip user confirmation when delete=True (default is False)
'''
silent_del = _validate._silent_del(silent_del)
deleted_files = 0
if len(self.lower_quality) > 0:
if not silent_del:
usr = input('Are you sure you want to delete all lower quality matched images? \n! This cannot be undone. (y/n)')
if str(usr).lower() == 'y':
for file in self.lower_quality['lower_quality']:
try:
os.remove(file)
deleted_files += 1
except:
print(f'Could not delete file: {file}')
else:
print('Deletion canceled.')
return
else:
for file in self.lower_quality['lower_quality']:
try:
os.remove(file)
deleted_files += 1
except:
print(f'Could not delete file: {file}')
print(f'Deleted {deleted_files} file(s)')
return
class _validate:
'''
A class used to validate difPy input parameters.
'''
def _directory(directory):
# Function that validates the 'directory' parameter
# Check the type of directory parameter provided
if len(directory) == 0:
raise ValueError('Invalid directory parameter: no directory provided.')
if all(isinstance(dir, list) for dir in directory):
directory = np.array([item for sublist in directory for item in sublist])
elif all(isinstance(dir, str) for dir in directory):
directory = np.array(directory)
else:
raise ValueError('Invalid directory parameter: directories must be of type LIST or STRING.')
# Check if the directory exists
for dir in directory:
dir = Path(dir)
if not os.path.isdir(dir):
raise FileNotFoundError(f'Directory "{str(dir)}" does not exist')
# Check if the directories provided are unique
if len(set(directory)) != directory.size:
raise ValueError('Invalid directory parameters: invalid attempt to compare a directory with itself.')
return sorted(directory)
def _recursive(recursive):
# Function that validates the 'recursive' input parameter
if not isinstance(recursive, bool):
raise Exception('Invalid value for "recursive" parameter: must be of type BOOL.')
return recursive
def _in_folder(in_folder, recursive):
# Function that validates the 'in_folder' input parameter
if not isinstance(in_folder, bool):
raise Exception('Invalid value for "in_folder" parameter: must be of type BOOL.')
elif not recursive and in_folder:
warnings.warn('"in_folder" cannot be "True" if "recurive" is set to "False". "in_folder" will be ignored.')
in_folder = False
return in_folder
def _limit_extensions(limit_extensions):
# Function that _validates the 'limit_extensions' input parameter
if not isinstance(limit_extensions, bool):
raise Exception('Invalid value for "limit_extensions" parameter: must be of type BOOL.')
return limit_extensions
def _similarity(similarity):
# Function that validates the 'similarity' input parameter
if similarity in ['low', 'normal', 'high']:
raise Exception('Since difPy v3.0.8, "similarity" parameter only accepts "duplicates" and "similar" as input options.')
elif similarity not in ['duplicates', 'similar']:
try:
similarity = float(similarity)
if similarity < 0:
raise Exception('Invalid value for "similarity" parameter: must be >= 0.')
else:
return similarity
except:
raise Exception('Invalid value for "similarity" parameter: must be of type INT or FLOAT.')
else:
if similarity == 'duplicates':
# search for duplicate images
similarity = 0
elif similarity == 'similar':
# search for similar images
similarity = 50
return similarity
def _px_size(px_size):
# Function that validates the 'px_size' input parameter
if not isinstance(px_size, int):
raise Exception('Invalid value for "px_size" parameter: must be of type INT.')
if px_size < 10 or px_size > 5000:
raise Exception('Invalid value for "px_size" parameter: must be between 10 and 5000.')
return px_size
def _show_progress(show_progress):
# Function that validates the 'show_progress' input parameter
if not isinstance(show_progress, bool):
raise Exception('Invalid value for "show_progress" parameter: must be of type BOOL.')
return show_progress
def _stats(stats):
# Function that validates the 'stats' input parameter
if not isinstance(stats, bool):
raise Exception('Invalid value for "stats" parameter: must be of type BOOL.')
return stats
def _silent_del(silent_del):
# Function that _validates the 'delete' and the 'silent_del' input parameter
if not isinstance(silent_del, bool):
raise Exception('Invalid value for "silent_del" parameter: must be of type BOOL.')
return silent_del
def _file_list(file_list):
# Function that _validates the 'file_list' input parameter
if not isinstance(file_list, list):
raise Exception('Invalid value: please input a valid difPy search object.')
return file_list
def _move_to(dir):
# Function that _validates the 'move_to' input parameter
if not isinstance(dir, str):
raise Exception('Invalid value for "move_to" parameter: must be of type STR')
else:
dir = Path(dir)
if not os.path.exists(dir):
try:
os.makedirs(dir)
except:
raise Exception(f'Invalid value for "move_to" parameter: "{str(dir)}" does not exist.')
elif not os.path.isdir(dir):
raise ValueError(f'Invalid value for "move_to" parameter: "{str(dir)}" is not a directory.')
return dir
class _help:
'''
A class used for difPy helper functions.
'''
def _show_progress(count, total_count, task='processing images'):
# Function that displays a progress bar during the search
if count == total_count:
print(f'difPy {task}: [{count/total_count:.0%}]')
#print(f'difPy {task}: [{count+1}/{total_count}] [{(count+1)/total_count:.0%}]')
else:
print(f'difPy {task}: [{count/total_count:.0%}]', end='\r')
def _type_str_int(x):
# Function to make the CLI accept int and str type inputs for the similarity parameter
try:
return int(x)
except:
return x
if __name__ == '__main__':
# Parameters for when launching difPy via CLI
parser = argparse.ArgumentParser(description='Find duplicate or similar images with difPy - https://github.com/elisemercury/Duplicate-Image-Finder')
parser.add_argument('-D', '--directory', type=str, nargs='+', help='Paths of the directories to be searched. Default is working dir.', required=False, default=[os.getcwd()])
parser.add_argument('-Z', '--output_directory', type=str, help='Output directory path for the difPy result files. Default is working dir.', required=False, default=None)
parser.add_argument('-r', '--recursive', type=lambda x: bool(strtobool(x)), help='Search recursively within the directories.', required=False, choices=[True, False], default=True)
parser.add_argument('-i', '--in_folder', type=lambda x: bool(strtobool(x)), help='Search for matches in the union of directories.', required=False, choices=[True, False], default=False)
parser.add_argument('-le', '--limit_extensions', type=lambda x: bool(strtobool(x)), help='Limit search to known image file extensions.', required=False, choices=[True, False], default=True)
parser.add_argument('-px', '--px_size', type=int, help='Compression size of images in pixels.', required=False, default=50)
parser.add_argument('-p', '--show_progress', type=lambda x: bool(strtobool(x)), help='Show the real-time progress of difPy.', required=False, choices=[True, False], default=True)
parser.add_argument('-s', '--similarity', type=_help._type_str_int, help='Similarity grade (mse).', required=False, default='duplicates')
parser.add_argument('-mv', '--move_to', type=str, help='Output directory path of lower quality images among matches.', required=False, default=None)
parser.add_argument('-d', '--delete', type=lambda x: bool(strtobool(x)), help='Delete lower quality images among matches.', required=False, choices=[True, False], default=False)
parser.add_argument('-sd', '--silent_del', type=lambda x: bool(strtobool(x)), help='Suppress the user confirmation when deleting images.', required=False, choices=[True, False], default=False)
parser.add_argument('-l', '--logs', type=lambda x: bool(strtobool(x)), help='Collect statistics during the process.', required=False, choices=[True, False], default=True)
args = parser.parse_args()
# initialize difPy
dif = build(args.directory, recursive=args.recursive, in_folder=args.in_folder, limit_extensions=args.limit_extensions,px_size=args.px_size, show_progress=args.show_progress, logs=args.logs)
# perform search
se = search(dif, similarity=args.similarity)
# create filenames for the output files
timestamp = datetime.now().strftime("%Y%m%d%H%M%S")
result_file = f'difPy_{timestamp}_results.json'
lq_file = f'difPy_{timestamp}_lower_quality.json'
stats_file = f'difPy_{timestamp}_stats.json'
# check if 'output_directory' parameter exists
if args.output_directory != None:
dir = args.output_directory
if not os.path.exists(dir):
os.makedirs(dir)
else:
dir = os.getcwd()
# output 'search.results' to file
with open(os.path.join(dir, result_file), 'w') as file:
json.dump(se.result, file)
# output 'search.stats' to file
if args.logs:
with open(os.path.join(dir, stats_file), 'w') as file:
json.dump(se.stats, file)
# check 'move_to' parameter
if args.move_to != None:
# move lower quality files
se.lower_quality = se.move_to(se, args.move_to).lower_quality
# output 'search.lower_quality' to file
with open(os.path.join(dir, lq_file), 'w') as file:
json.dump(se.lower_quality, file)
# check 'delete' parameter
if args.delete:
# delete search.lower_quality files
se.delete(silent_del=args.silent_del)
print(f'''\n{result_file}\n{lq_file}\n{stats_file}\n\nsaved in '{dir}'.''') | elisemercury/Duplicate-Image-Finder | difPy/dif.py | dif.py | py | 34,530 | python | en | code | 346 | github-code | 13 |
20884770174 | from aip import AipSpeech
# 替换为您的百度 API 密钥
BAIDU_APP_ID = 'xxx'
BAIDU_API_KEY = 'xxx'
BAIDU_SECRET_KEY = 'xxx'
# 创建一个 AipSpeech 对象
client = AipSpeech(BAIDU_APP_ID, BAIDU_API_KEY, BAIDU_SECRET_KEY)
def recognize_wav_file(filename):
with open(filename, 'rb') as file:
audio_data = file.read()
response = client.asr(audio_data, 'wav', 16000, {'dev_pid': 1537})
if response['err_no'] == 0:
text = response['result'][0]
return text
else:
print(f"识别错误:{response['err_no']} - {response['err_msg']}")
return None
# 替换为您保存的 WAV 文件路径
wav_file_path = "output.wav"
recognized_text = recognize_wav_file(wav_file_path)
if recognized_text:
print(f"识别结果:{recognized_text}")
else:
print("识别失败")
| brcarry/Embedded_Project | unit_test/test01-baidu.py | test01-baidu.py | py | 832 | python | en | code | 0 | github-code | 13 |
18233368981 | from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Sequence, Tuple, Type, Union
import pkg_resources
import google.api_core.client_options as ClientOptions # type: ignore
from google.api_core import exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation
from google.api_core import operation_async
from google.cloud.assuredworkloads_v1beta1.services.assured_workloads_service import (
pagers,
)
from google.cloud.assuredworkloads_v1beta1.types import assuredworkloads_v1beta1
from google.protobuf import field_mask_pb2 as field_mask # type: ignore
from google.protobuf import timestamp_pb2 as timestamp # type: ignore
from .transports.base import AssuredWorkloadsServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import AssuredWorkloadsServiceGrpcTransport
from .transports.grpc_asyncio import AssuredWorkloadsServiceGrpcAsyncIOTransport
class AssuredWorkloadsServiceClientMeta(type):
"""Metaclass for the AssuredWorkloadsService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = (
OrderedDict()
) # type: Dict[str, Type[AssuredWorkloadsServiceTransport]]
_transport_registry["grpc"] = AssuredWorkloadsServiceGrpcTransport
_transport_registry["grpc_asyncio"] = AssuredWorkloadsServiceGrpcAsyncIOTransport
def get_transport_class(
cls, label: str = None,
) -> Type[AssuredWorkloadsServiceTransport]:
"""Return an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class AssuredWorkloadsServiceClient(metaclass=AssuredWorkloadsServiceClientMeta):
"""Service to manage AssuredWorkloads."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Convert api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "assuredworkloads.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
{@api.name}: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@staticmethod
def workload_path(organization: str, location: str, workload: str,) -> str:
"""Return a fully-qualified workload string."""
return "organizations/{organization}/locations/{location}/workloads/{workload}".format(
organization=organization, location=location, workload=workload,
)
@staticmethod
def parse_workload_path(path: str) -> Dict[str, str]:
"""Parse a workload path into its component segments."""
m = re.match(
r"^organizations/(?P<organization>.+?)/locations/(?P<location>.+?)/workloads/(?P<workload>.+?)$",
path,
)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: credentials.Credentials = None,
transport: Union[str, AssuredWorkloadsServiceTransport] = None,
client_options: ClientOptions = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiate the assured workloads service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, ~.AssuredWorkloadsServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (ClientOptions): Custom options for the client. It
won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = ClientOptions.from_dict(client_options)
if client_options is None:
client_options = ClientOptions.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
ssl_credentials = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
import grpc # type: ignore
cert, key = client_options.client_cert_source()
ssl_credentials = grpc.ssl_channel_credentials(
certificate_chain=cert, private_key=key
)
is_mtls = True
else:
creds = SslCredentials()
is_mtls = creds.is_mtls
ssl_credentials = creds.ssl_credentials if is_mtls else None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
api_endpoint = (
self.DEFAULT_MTLS_ENDPOINT if is_mtls else self.DEFAULT_ENDPOINT
)
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, AssuredWorkloadsServiceTransport):
# transport is a AssuredWorkloadsServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, "
"provide its scopes directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
ssl_channel_credentials=ssl_credentials,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def create_workload(
self,
request: assuredworkloads_v1beta1.CreateWorkloadRequest = None,
*,
parent: str = None,
workload: assuredworkloads_v1beta1.Workload = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> operation.Operation:
r"""Creates Assured Workload.
Args:
request (:class:`~.assuredworkloads_v1beta1.CreateWorkloadRequest`):
The request object. Request for creating a workload.
parent (:class:`str`):
Required. The resource name of the new Workload's
parent. Must be of the form
``organizations/{org_id}/locations/{location_id}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
workload (:class:`~.assuredworkloads_v1beta1.Workload`):
Required. Assured Workload to create
This corresponds to the ``workload`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be
:class:``~.assuredworkloads_v1beta1.Workload``: An
Workload object for managing highly regulated workloads
of cloud customers.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, workload])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a assuredworkloads_v1beta1.CreateWorkloadRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, assuredworkloads_v1beta1.CreateWorkloadRequest):
request = assuredworkloads_v1beta1.CreateWorkloadRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if workload is not None:
request.workload = workload
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_workload]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = operation.from_gapic(
response,
self._transport.operations_client,
assuredworkloads_v1beta1.Workload,
metadata_type=assuredworkloads_v1beta1.CreateWorkloadOperationMetadata,
)
# Done; return the response.
return response
def update_workload(
self,
request: assuredworkloads_v1beta1.UpdateWorkloadRequest = None,
*,
workload: assuredworkloads_v1beta1.Workload = None,
update_mask: field_mask.FieldMask = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> assuredworkloads_v1beta1.Workload:
r"""Updates an existing workload. Currently allows updating of
workload display_name and labels. For force updates don't set
etag field in the Workload. Only one update operation per
workload can be in progress.
Args:
request (:class:`~.assuredworkloads_v1beta1.UpdateWorkloadRequest`):
The request object. Request for Updating a workload.
workload (:class:`~.assuredworkloads_v1beta1.Workload`):
Required. The workload to update. The workload’s
``name`` field is used to identify the workload to be
updated. Format:
organizations/{org_id}/locations/{location_id}/workloads/{workload_id}
This corresponds to the ``workload`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
update_mask (:class:`~.field_mask.FieldMask`):
Required. The list of fields to be
updated.
This corresponds to the ``update_mask`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.assuredworkloads_v1beta1.Workload:
An Workload object for managing
highly regulated workloads of cloud
customers.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([workload, update_mask])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a assuredworkloads_v1beta1.UpdateWorkloadRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, assuredworkloads_v1beta1.UpdateWorkloadRequest):
request = assuredworkloads_v1beta1.UpdateWorkloadRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if workload is not None:
request.workload = workload
if update_mask is not None:
request.update_mask = update_mask
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.update_workload]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata(
(("workload.name", request.workload.name),)
),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def delete_workload(
self,
request: assuredworkloads_v1beta1.DeleteWorkloadRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Deletes the workload. Make sure that workload's direct children
are already in a deleted state, otherwise the request will fail
with a FAILED_PRECONDITION error.
Args:
request (:class:`~.assuredworkloads_v1beta1.DeleteWorkloadRequest`):
The request object. Request for deleting a Workload.
name (:class:`str`):
Required. The ``name`` field is used to identify the
workload. Format:
organizations/{org_id}/locations/{location_id}/workloads/{workload_id}
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a assuredworkloads_v1beta1.DeleteWorkloadRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, assuredworkloads_v1beta1.DeleteWorkloadRequest):
request = assuredworkloads_v1beta1.DeleteWorkloadRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_workload]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def get_workload(
self,
request: assuredworkloads_v1beta1.GetWorkloadRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> assuredworkloads_v1beta1.Workload:
r"""Gets Assured Workload associated with a CRM Node
Args:
request (:class:`~.assuredworkloads_v1beta1.GetWorkloadRequest`):
The request object. Request for fetching a workload.
name (:class:`str`):
Required. The resource name of the Workload to fetch.
This is the workloads's relative path in the API,
formatted as
"organizations/{organization_id}/locations/{location_id}/workloads/{workload_id}".
For example,
"organizations/123/locations/us-east1/workloads/assured-workload-1".
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.assuredworkloads_v1beta1.Workload:
An Workload object for managing
highly regulated workloads of cloud
customers.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a assuredworkloads_v1beta1.GetWorkloadRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, assuredworkloads_v1beta1.GetWorkloadRequest):
request = assuredworkloads_v1beta1.GetWorkloadRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_workload]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_workloads(
self,
request: assuredworkloads_v1beta1.ListWorkloadsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListWorkloadsPager:
r"""Lists Assured Workloads under a CRM Node.
Args:
request (:class:`~.assuredworkloads_v1beta1.ListWorkloadsRequest`):
The request object. Request for fetching workloads in an
organization.
parent (:class:`str`):
Required. Parent Resource to list workloads from. Must
be of the form
``organizations/{org_id}/locations/{location}``.
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.pagers.ListWorkloadsPager:
Response of ListWorkloads endpoint.
Iterating over this object will yield
results and resolve additional pages
automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a assuredworkloads_v1beta1.ListWorkloadsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, assuredworkloads_v1beta1.ListWorkloadsRequest):
request = assuredworkloads_v1beta1.ListWorkloadsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_workloads]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListWorkloadsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-assuredworkloads",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("AssuredWorkloadsServiceClient",)
| Global19/python-assured-workloads | google/cloud/assuredworkloads_v1beta1/services/assured_workloads_service/client.py | client.py | py | 29,069 | python | en | code | null | github-code | 13 |
36662166048 | import array as arr
import numpy as np
import time
import csv
import scipy.misc
import matplotlib.pyplot as plt
import channelrowparse_maxmin as testmain
import channelrowparse_zett as zettmain
StartTime = time.time()
def UseCallPy():
'''
testmain.nROI_X = 3998
testmain.nROI_Y = 2998
'''
sFilePathFolder = [
'0x1010', '0x1020', '0x1030', '0x1040', '0x1050', '0x1060', '0x1070', '0x1080', '0x1090', '0x10A0', '0x10B0', '0x10C0', \
]
'''
testmain.CallMain( nWidth=8000, \
nHeight=6000, \
nX=3998, \
nY=2998, \
nROI_W=4, \
nROI_H=4, \
nFileCounts=10, \
FileTimeStamp='20211111160205', \
InputFolder='/home/dino/RawShared/20211111_fulldark/', \
ArrayFolder=sFilePathFolder, \
OutputFolder='/home/dino/RawShared/Output/')
print(testmain.g_sFilePathFolder)
'''
zettmain.CallMain( nWidth=9728, \
nHeight=8192, \
nX=4766, \
nY=3996, \
nROI_W=16, \
nROI_H=16, \
nColIndex=0, \
nRowIndex=2, \
nFileCounts=2, \
FileTimeStamp='2022051810', \
InputFolder='/home/dino/IMX586_Bin/2022051810_P8N533#2#1843_Lag/{}/', \
OutputFolder='/home/dino/RawShared/Output/2022051810_P8N533#2#1843_Lag/{}/', \
ArrayFolder=sFilePathFolder)
print(zettmain.g_sFilePathFolder)
return
if __name__ == "__main__":
UseCallPy()
pass
EndTime = time.time()
print("Simulation Durning Time(sec): ", EndTime - StartTime) | dinoliang/SampleCode | Python/raw/simulation_main.py | simulation_main.py | py | 1,920 | python | en | code | 0 | github-code | 13 |
278474212 | from django import forms
from .models import UserModel
class BaseForm(forms.ModelForm):
def get_errors(self):
errors = self.errors.get_json_data()
new_errors = []
for messages in errors.values():
for message_dicts in messages:
for key, message in message_dicts.items():
if key == 'message':
new_errors.append(message)
return new_errors
class RegisterForm(BaseForm):
pwd1 = forms.CharField(max_length=16, min_length=6, required=True, error_messages={'min_length': '密码长度最少为6!', })
pwd2 = forms.CharField(max_length=16, min_length=6, required=True, error_messages={'min_length': '密码长度最少为6!', })
def clean_username(self):
cleaned_data = super().clean()
username = cleaned_data.get('username')
exists = UserModel.objects.filter(username=username).exists()
if exists:
raise forms.ValidationError('该用户已存在')
else:
return username
def clean(self):
cleaned_data = super().clean()
pwd1 = cleaned_data.get('pwd1')
pwd2 = cleaned_data.get('pwd2')
print(pwd1, pwd2)
if pwd1 != pwd2:
print('两次密码输入不一致')
raise forms.ValidationError('两次密码输入不一致')
return cleaned_data
class Meta:
model = UserModel
exclude = ['password']
class SignInForm(BaseForm):
class Meta:
model = UserModel
fields = "__all__" | ApostleMelody/Django | ManagerSystem/UserManager/forms.py | forms.py | py | 1,563 | python | en | code | 0 | github-code | 13 |
24764097098 | import socket
# operating on IPv4 addressing scheme
sSocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
# This is to bind and listen to the server
sSocket.bind(("127.0.0.1",25))
sSocket.listen()
# Accept connections
while(True):
(cConnected, cAddress) = sSocket.accept()
print("Accepted a connection request from %s:%s"%(cAddress[0], cAddress[1]))
client_data = cConnected.recv(1024)
print(client_data.decode())
# Send the data back to the client
cConnected.send("Hello Client:)".encode())
| Maher512/NetworkingCW | server.py | server.py | py | 527 | python | en | code | 0 | github-code | 13 |
12645251563 | #!/usr/bin/python2
import matplotlib
matplotlib.use('Agg')
import numpy as np
from matplotlib import pyplot as plt
from VectorAlgebra import *
from Bio.PDB.PDBParser import PDBParser
def checkIfNative(xyz_CAi, xyz_CAj):
v = vector(xyz_CAi, xyz_CAj)
r = vabs(v)
if r<12.0: return True
else: return False
p = PDBParser(PERMISSIVE=1)
s = p.get_structure("1", "end-1.pdb")
N = len(s[0]["A"])
sigma = np.ones((N,N))*0
for k in range(1 ,21):
ca_atoms_pdb = []
chains = s[0].get_list()
chain = chains[0]
for res in chain:
is_regular_res = res.has_id('CA') and res.has_id('O')
res_id = res.get_id()[0]
if is_regular_res:
ca_atoms_pdb.append(res['CA'].get_coord())
for i in range( 0, len(ca_atoms_pdb) ):
for j in range( i+4, len(ca_atoms_pdb) ):
xyz_CAi = ca_atoms_pdb[i]
xyz_CAj = ca_atoms_pdb[j]
if checkIfNative(xyz_CAi, xyz_CAj):
sigma[i][j] += 1
sigma[j][i] += 1
if k != 20:
p = PDBParser(PERMISSIVE=1)
pdb_id = str(k+1)
pdb_file = "end-" + str(k+1) +".pdb"
s = p.get_structure(pdb_id, pdb_file)
plt.imshow(sigma)
plt.colorbar()
plt.savefig("contact-12.png")
np.savetxt('contact-hb-12.dat', sigma, fmt='%d')
| xinyugu1997/CPEB3_Actin | AWSEM_simulations/annealing_unstructured_domain/HB_term_on/result/Drawcontactmap.py | Drawcontactmap.py | py | 1,511 | python | en | code | 0 | github-code | 13 |
33654291546 | """empty message
Revision ID: 14af6017bb46
Revises: 7292deb23125
Create Date: 2020-11-16 14:58:23.526641
"""
from alembic import op
import sqlalchemy as sa
from pytz import utc
from datetime import datetime
# revision identifiers, used by Alembic.
revision = '14af6017bb46'
down_revision = '7292deb23125'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
t_products = op.create_table('products',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('name', sa.String(length=50), nullable=False),
sa.Column('price', sa.Float(), nullable=False),
sa.Column('image_path', sa.String(length=120), nullable=False),
sa.Column('description', sa.String(length=400), nullable=False),
sa.PrimaryKeyConstraint('id'),
sa.UniqueConstraint('image_path'),
sa.UniqueConstraint('name')
)
t_sales = op.create_table('sales',
sa.Column('id', sa.Integer(), nullable=False),
sa.Column('value', sa.Float(), nullable=False),
sa.Column('date', sa.DateTime(timezone=True), nullable=False),
sa.Column('product_id', sa.Integer(), nullable=True),
sa.Column('branch_id', sa.Integer(), nullable=True),
sa.ForeignKeyConstraint(['branch_id'], ['branches.id'], ),
sa.ForeignKeyConstraint(['product_id'], ['products.id'], ),
sa.PrimaryKeyConstraint('id')
)
# ### end Alembic commands ###
connection = op.get_bind()
connection.execute(
sa.insert(t_products).values([
{'name': 'Красная роза', 'price': 50.7, 'description': 'Красивая очень', 'image_path': 'test/1.jpg'},
{'name': 'Лилия', 'price': 25, 'description': 'Бери, не пожалеешь', 'image_path': 'test/2.jpg'},
{'name': 'Ромашка', 'price': 5, 'description': 'Одно из наиболее известных лекарственных растений',
'image_path': 'test/3.jpg'},
])
)
connection.execute(
sa.insert(t_sales).values([
{
'value': 10,
'date': datetime.strptime('10/11/2020 00:00:00', '%d/%m/%Y %H:%M:%S').astimezone(utc),
'product_id': 1,
'branch_id': 1
},
{
'value': 35000,
'date': datetime.strptime('11/11/2020 00:00:00', '%d/%m/%Y %H:%M:%S').astimezone(utc),
'product_id': 2,
'branch_id': 1
},
{
'value': 9,
'date': datetime.strptime('13/11/2020 00:00:00', '%d/%m/%Y %H:%M:%S').astimezone(utc),
'product_id': 3,
'branch_id': 2
},
{
'value': 20,
'date': datetime.strptime('10/09/2020 00:00:00', '%d/%m/%Y %H:%M:%S').astimezone(utc),
'product_id': 1,
'branch_id': 2
}
])
)
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_table('sales')
op.drop_table('products')
# ### end Alembic commands ###
| kzagorulko/flower-system | backend/migrations/versions/14af6017bb46_.py | 14af6017bb46_.py | py | 3,157 | python | en | code | 2 | github-code | 13 |
5848977424 | # coding: utf-8
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import Reshape
from keras.layers.core import Activation
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import UpSampling2D
from keras.layers.convolutional import Conv2D, MaxPooling2D
from keras.layers.core import Flatten
from keras.layers import LeakyReLU
from keras.optimizers import SGD, Adam
from keras.datasets import mnist
import numpy as np
from PIL import Image
import argparse
import math
def generator_model():
model = Sequential()
model.add(Dense(input_dim=100, output_dim=1024))
# model.add(BatchNormalization())
model.add(Activation('tanh'))
# model.add(Activation('sigmoid'))
# model.add(BatchNormalization())
# model.add(LeakyReLU(alpha=0.050))
model.add(Dense(128*7*7))
model.add(BatchNormalization())
model.add(Activation('tanh'))
# model.add(Activation('sigmoid'))
# model.add(LeakyReLU(alpha=0.050))
model.add(Reshape((7, 7, 128), input_shape=(128*7*7,)))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(64, (5, 5), padding='same'))
model.add(Activation('tanh'))
# model.add(BatchNormalization())
# model.add(Activation('sigmoid'))
# model.add(BatchNormalization())
# model.add(LeakyReLU(alpha=0.050))
model.add(UpSampling2D(size=(2, 2)))
model.add(Conv2D(1, (5, 5), padding='same'))
model.add(Activation('tanh'))
# model.add(Activation('sigmoid'))
return model
def discriminator_model():
model = Sequential()
model.add(
Conv2D(64, (5, 5),
padding='same',
input_shape=(28, 28, 1))
)
# model.add(Activation('tanh'))
model.add(Activation('relu'))
# model.add(LeakyReLU(alpha=0.050))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (5, 5)))
# model.add(Activation('tanh'))
model.add(Activation('relu'))
# model.add(LeakyReLU(alpha=0.050))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1024))
# model.add(Activation('tanh'))
model.add(Activation('relu'))
# model.add(LeakyReLU(alpha=0.050))
model.add(Dense(1))
model.add(Activation('sigmoid'))
return model
def stack_gan(g, d):
model = Sequential()
model.add(g)
d.trainable = False
model.add(d)
return model
def combine_images(generated_images):
num = generated_images.shape[0]
width = int(math.sqrt(num))
height = int(math.ceil(float(num)/width))
shape = generated_images.shape[1:3]
image = np.zeros((height*shape[0], width*shape[1]),
dtype=generated_images.dtype)
for index, img in enumerate(generated_images):
i = int(index/width)
j = index % width
image[i*shape[0]:(i+1)*shape[0], j*shape[1]:(j+1)*shape[1]] = \
img[:, :, 0]
return image
def train(BATCH_SIZE):
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = (X_train.astype(np.float32) - 127.5)/127.5
# X_train = X_train.astype(np.float32) / 127.5
X_train = X_train[:, :, :, None]
d = discriminator_model()
g = generator_model()
d_on_g = stack_gan(g, d) # gan: gen with dis supervising (fixed)
d_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
g_optim = SGD(lr=0.0005, momentum=0.9, nesterov=True)
# g_optim = Adam(lr=5e-5)
g.compile(loss='binary_crossentropy', optimizer="SGD") # gen loss
d_on_g.compile(loss='binary_crossentropy', optimizer=g_optim) # gan loss
d.trainable = True
d.compile(loss='binary_crossentropy', optimizer=d_optim) # dis loss
for epoch in range(100):
print("Epoch is", epoch)
print("Number of batches", int(X_train.shape[0]/BATCH_SIZE))
for index in range(int(X_train.shape[0]/BATCH_SIZE)):
noise = np.random.uniform(-1, 1, size=(BATCH_SIZE, 100)) # noise with dim=100
# noise = np.random.uniform(0, 1, size=(BATCH_SIZE, 100)) # noise with dim=100
image_batch = X_train[index*BATCH_SIZE: (index+1)*BATCH_SIZE]
generated_images = g.predict(noise, verbose=0) # generating images
if index % 200 == 0: # save combined inmages
image = combine_images(generated_images)
image = image*127.5 + 127.5 # (-1, 1) from tanh ==> (0, 255)
# image *= 127.5
Image.fromarray(image.astype(np.uint8)).save(
str(epoch)+"_"+str(index)+".png")
X = np.concatenate((image_batch, generated_images)) # (real, generated)
y = [1] * BATCH_SIZE + [0] * BATCH_SIZE # labelling
d_loss = d.train_on_batch(X, y) # TRAINING DIS
if index % 10 == 0:
print("batch %d d_loss : %f" % (index, d_loss))
noise = np.random.uniform(-1, 1, (BATCH_SIZE, 100))
# noise = np.random.uniform(0, 1, (BATCH_SIZE, 100))
d.trainable = False # DIS freeze
g_loss = d_on_g.train_on_batch(noise, [1] * BATCH_SIZE) # labelling and TRAINING GEN
d.trainable = True
if index % 10 == 0:
print("batch %d g_loss : %f" % (index, g_loss))
if epoch % 10 == 9:
g.save_weights('generator', True)
d.save_weights('discriminator', True)
g.save_weights('generator', True)
d.save_weights('discriminator', True)
# inference
def generate(BATCH_SIZE):
g = generator_model()
g.compile(loss='binary_crossentropy', optimizer="SGD")
g.load_weights('generator')
noise = np.random.uniform(-1, 1, size=(BATCH_SIZE, 100))
# noise = np.random.uniform(0, 1, size=(BATCH_SIZE, 100))
generated_images = g.predict(noise, verbose=0)
image = combine_images(generated_images)
image = image*127.5 + 127.5
# image *= 127.5
Image.fromarray(image.astype(np.uint8)).save("./generated_image.png")
def get_args():
parser = argparse.ArgumentParser()
parser.add_argument("--mode", type=str, default='train')
parser.add_argument("--batch_size", type=int, default=128)
args = parser.parse_args()
return args
if __name__ == "__main__":
args = get_args()
if args.mode == "train":
train(BATCH_SIZE=args.batch_size)
elif args.mode == "generate":
generate(BATCH_SIZE=args.batch_size)
| huht3k/GAN | mnist_gan.py | mnist_gan.py | py | 6,548 | python | en | code | 0 | github-code | 13 |
3229608656 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from os import path
from flask_login import LoginManager
# database
db = SQLAlchemy()
DB_NAME = "database.db"
def create_app():
app = Flask(__name__)
app.config['SECRET_KEY'] = 'jdgalwbeflahugfs'
app.config['SQLALCHEMY_DATABASE_URI'] = f'sqlite:///{DB_NAME}' # tells the app which db to use
# this tells the flask app to init this db with this app
db.init_app(app)
from .views import views
from .auth import auth
app.register_blueprint(views,
url_prefix='/') # the url-prefix is for the prefix of each url e.g /views/<actual url>
app.register_blueprint(auth, url_prefix='/')
from .models import Note, User # getting the dbs
create_database(app)
# to tell flask which login manager to use
login_manager = LoginManager()
login_manager.login_view = 'auth.login'
login_manager.init_app(app)
@login_manager.user_loader
def load_user(id):
return User.query.get(int(id))
return app
def create_database(app):
if not path.exists('website/' + DB_NAME):
db.create_all(app=app)
print('Created db')
| Lord-Psarris/Flask-notes-app | website/__init__.py | __init__.py | py | 1,193 | python | en | code | 0 | github-code | 13 |
11161256570 | import itertools
import logging
import random
import string
from pyinsect.documentModel.comparators import SimilarityHPG, SimilarityVS
from pyinsect.documentModel.representations.DocumentNGramGraph import DocumentNGramGraph
logger = logging.getLogger(__name__)
class HPGTestCaseMixin(object):
graph_type = None
def _construct_graph(
self, data, window_size, number_of_levels, similarity_metric, *args, **kwargs
):
return self.graph_type(
data, window_size, number_of_levels, similarity_metric
).as_graph(DocumentNGramGraph, *args, **kwargs)
def setUp(self):
super().setUp()
random.seed(1234)
self.data = self.generate_random_2d_int_array(5)
self.array_graph_metric = SimilarityVS()
self.hpg_metric = SimilarityHPG(self.array_graph_metric)
def test_same_similarity(self):
graph1 = self._construct_graph(self.data, 3, 3, self.array_graph_metric)
graph2 = self._construct_graph(self.data, 3, 3, self.array_graph_metric)
value = self.hpg_metric(graph1, graph2)
self.assertEqual(value, 1.0)
def test_equality(self):
graph1 = self._construct_graph(self.data, 3, 3, self.array_graph_metric)
graph2 = self._construct_graph(self.data, 3, 3, self.array_graph_metric)
self.assertEqual(graph1, graph2)
def test_diff_similarity(self):
for permutation_index, permutation in enumerate(
itertools.permutations(self.data)
):
if permutation == tuple(self.data):
continue
logger.info("Permutation: %02d", permutation_index)
with self.subTest(permutation=permutation):
graph1 = self._construct_graph(
permutation, 3, 3, self.array_graph_metric
)
graph2 = self._construct_graph(self.data, 3, 3, self.array_graph_metric)
value = self.hpg_metric(graph1, graph2)
self.assertNotEqual(value, 1.0)
def test_commutativity(self):
data1 = self.generate_random_2d_int_array(5)
data2 = self.generate_random_2d_int_array(5)
graph1 = self._construct_graph(data1, 3, 3, self.array_graph_metric)
graph2 = self._construct_graph(data2, 3, 3, self.array_graph_metric)
value1 = self.hpg_metric(graph1, graph2)
value2 = self.hpg_metric(graph2, graph1)
self.assertEqual(value1, value2)
def test_combinations(self):
for combination_index in range(10):
logger.info("Combination: %02d", combination_index)
length1 = random.randint(1, 5)
length2 = random.randint(1, 5)
data1 = self.generate_random_2d_int_array(length1)
data2 = self.generate_random_2d_int_array(length2)
levels_1, window_size_1 = (
random.randint(1, 4),
random.randint(1, 10),
)
levels2, window_size_2 = (
random.randint(1, 4),
random.randint(1, 10),
)
logger.info("Configuration #1: (%02d, %02d)", levels_1, window_size_1)
logger.info("Configuration #2: (%02d, %02d)", levels2, window_size_2)
with self.subTest(
config1=(levels_1, window_size_1, data1),
config2=(levels2, window_size_2, data2),
):
graph1 = self._construct_graph(
data1, window_size_1, levels_1, self.array_graph_metric
)
graph2 = self._construct_graph(
data2, window_size_2, levels2, self.array_graph_metric
)
value = self.hpg_metric(graph1, graph2)
self.assertTrue(0.0 <= value <= 1.0)
@classmethod
def generate_random_2d_int_array(cls, size):
return [
[ord(random.choice(string.ascii_letters)) for _ in range(size)]
for _ in range(size)
]
| ggianna/PyINSECT | tests/hpg/base.py | base.py | py | 4,016 | python | en | code | 3 | github-code | 13 |
29396043152 | import SimpleITK as sitk
import sys
import numpy as np
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import scipy.ndimage as nimg
from numba import njit
import tensorflow as tf # tf.__version__: 1.12.0
from skimage.feature import peak_local_max
from skimage.segmentation import watershed
from sklearn.decomposition import PCA
spread = 70
spatial_weight = 1.0
def get_plane_normal(cp, wp):
#cp, wp = cd_pos, walk_pos[id]
if cp<15: cp=15
vs = wp[cp-5:cp+5+1] - wp[cp:cp+11]
v = np.mean(vs, axis=0)
v = v/np.sqrt(np.sum(v**2))
d = np.sum(v*wp[cp]) # ax+by+cz-d=0
return v, d
def get_slice(v, p, mask, epsilon=1e-9):
# v,d,p = n, d, walk_pos[id][cd_pos]
# ref_normal: [0,0,1] (axial view), our normal: v
ref_normal = np.array([0,0,1])
vu = v/np.sqrt(np.sum(v**2))
vu = np.where(vu==0, epsilon, vu)
ref_normal = np.where(ref_normal==0, epsilon, ref_normal)
costheta = np.dot(ref_normal, vu)
e = np.cross(ref_normal, vu)
if np.sum(e)!=0: e = e/np.sqrt(np.sum(e**2))
e = np.where(e == 0, epsilon, e)
c = costheta
s = np.sqrt(1 - c * c)
C = 1 - c
x,y,z = e[0], e[1], e[2]
rmat = np.array([[x * x * C + c, x * y * C - z * s, x * z * C + y * s],
[y * x * C + z * s, y * y * C + c, y * z * C - x * s],
[z * x * C - y * s, z * y * C + x * s, z * z * C + c]])
px, py, pz = np.meshgrid(np.arange(-spread,spread), np.arange(-spread,spread), np.arange(0,1))
points = np.concatenate([px,py,pz], axis=-1)
new_points = np.matmul(points, rmat.T)
new_points += p
new_points = np.int32(new_points+0.5)
new_points[new_points<0] = 0
for i in range(3):
a = new_points[:,:,i]
a[a>=mask.shape[i]] = mask.shape[i]-1
new_points[:,:,i] = a
return mask[new_points[:,:,1], new_points[:,:,0], new_points[:,:,2]], new_points
def segment_orifice_in_slice(slice):
distance_map = nimg.distance_transform_edt(slice)
dist_f = nimg.maximum_filter(distance_map, 30)
local_max = peak_local_max(dist_f, indices=False, min_distance=30, labels=slice)
markers = nimg.label(local_max, structure=np.ones((3, 3)))[0]
labels = watershed(-distance_map, markers, mask=slice)
orifice_label = labels[spread-1, spread-1]
orifice = labels.copy()
orifice[labels==orifice_label] = 1
orifice[labels!=orifice_label] = 0
return orifice
def get_eigen(orifice, slice_pts):
px, py, pz = slice_pts[..., 0], slice_pts[..., 1], slice_pts[..., 2]
px, py, pz = px[orifice==1], py[orifice==1], pz[orifice==1]
pp = np.concatenate([px[:,np.newaxis], py[:,np.newaxis], pz[:,np.newaxis]], axis=1)
pp = pp
pp_mean = np.mean(pp, axis=0)
pp = pp- np.mean(pp, axis=0)
pca = PCA(n_components=3)
principalComponents = pca.fit_transform(pp/100)
return pca.explained_variance_[:2], pca.components_[:2], pp_mean
def get_axis_points(orifice, slice_pts, pt_mean, eigval, eigvec):
half_length = 100*np.sqrt(2.0*2.0*eigval)
plus_end, minus_end = pt_mean + eigvec*half_length[:,np.newaxis], pt_mean - eigvec*half_length[:,np.newaxis]
maskover = np.zeros(shape=orifice.shape, dtype=orifice.dtype)
diff = slice_pts - pt_mean
diff = np.sum(diff ** 2, axis=2)
a = np.argsort(diff.flatten())
au = np.unravel_index(a[0], diff.shape)
maskover[au[0], au[1]] = 2.0
diff = slice_pts - plus_end[0,]
diff = np.sum(diff**2, axis=2)
a = np.argsort(diff.flatten())
au = np.unravel_index(a[0], diff.shape)
major0 = slice_pts[au[0], au[1],:]
maskover[au[0], au[1]] = 2.0
diff = slice_pts - plus_end[1,]
diff = np.sum(diff ** 2, axis=2)
a = np.argsort(diff.flatten())
au = np.unravel_index(a[0], diff.shape)
minor0 = slice_pts[au[0], au[1], :]
maskover[au[0], au[1]] = 2.0
diff = slice_pts - minus_end[0,]
diff = np.sum(diff**2, axis=2)
a = np.argsort(diff.flatten())
au = np.unravel_index(a[0], diff.shape)
major1 = slice_pts[au[0], au[1],:]
maskover[au[0], au[1]] = 2.0
diff = slice_pts - minus_end[1,]
diff = np.sum(diff ** 2, axis=2)
a = np.argsort(diff.flatten())
au = np.unravel_index(a[0], diff.shape)
minor1 = slice_pts[au[0], au[1], :]
maskover[au[0], au[1]] = 2.0
return [major0, major1], [minor0, minor1], maskover
def show(im, cmap='gray'):
plt.figure()
plt.imshow(im, cmap=cmap)
def load_dicom(dicom_dir):
reader = sitk.ImageSeriesReader()
dicom_names = reader.GetGDCMSeriesFileNames(dicom_dir)
reader.SetFileNames(dicom_names)
image = reader.Execute()
vol = sitk.GetArrayFromImage(image)
vol = np.transpose(vol, axes=(1, 2, 0))
vol = np.flip(vol, axis=2)
vol = np.flip(vol, axis=0)
vol[vol < -100] = -100
vol[vol > 900] = -100
m = np.mean(vol)
std = np.std(vol)
add = np.int32(((vol - m) / std) * 32.0 + 0.5)
vol = 128 + add
vol[vol < 0] = 0
vol[vol > 255] = 255
return vol
def crop_points(seed):
Y, X, Z = vol.shape
xmin, xmax = seed[1] - 150, seed[1] + 50
ymin, ymax = seed[0] - 150, seed[0] + 50
zmin, zmax = seed[2] - 10, seed[2] + 150
xmin, xmax = np.clip(xmin, 1, X - 2), np.clip(xmax, 1, X - 2)
ymin, ymax = np.clip(ymin, 1, Y - 2), np.clip(ymax, 1, Y - 2)
zmin, zmax = np.clip(zmin, 1, Z - 2), np.clip(zmax, 1, Z - 2)
return xmin, xmax, ymin, ymax, zmin, zmax, X, Y, Z
def denoise(vol):
denoised = nimg.median_filter(vol[ymin:ymax, xmin:xmax, zmin:zmax], 3)
vol[ymin:ymax, xmin:xmax, zmin:zmax] = denoised
@njit
def forward_pass(geo, ds, vol, seed, xmax, xmin, ymax, ymin, zmax, zmin, r):
for i in range(ymin, ymax):
for j in range(xmin, xmax):
for k in range(zmin, zmax):
g_b_min = geo[i, j, k]
vol_ijk = vol[i, j, k]
for ii in range(-1, 1):
for jj in range(-1, 1):
for kk in range(-1, 1):
g_a = geo[i + ii, j + jj, k + kk]
di = vol[i + ii, j + jj, k + kk] - vol_ijk
g_ab = di ** 2 + spatial_weight * (ii**2 + jj**2 + kk**2)
g_b = g_a + g_ab
if g_b < g_b_min:
g_b_min = g_b
geo[i, j, k] = g_b_min
return geo
@njit
def backward_pass(geo, ds, vol, seed, xmax, xmin, ymax, ymin, zmax, zmin, r):
for i in range(ymax, ymin, -1):
for j in range(xmax, xmin, -1):
for k in range(zmax, zmin, -1):
g_b_min = geo[i,j,k]
vol_ijk = vol[i,j,k]
for ii in range(0, 2):
for jj in range(0, 2):
for kk in range(0, 2):
g_a = geo[i+ii, j+jj, k+kk]
di = vol[i+ii, j+jj, k+kk] - vol_ijk
g_ab = di**2 + spatial_weight * (ii**2 + jj**2 + kk**2)
g_b = g_a + g_ab
if g_b < g_b_min:
g_b_min = g_b
geo[i, j, k] = g_b_min
return geo
@njit
def update_geo(geo, ds_forward, ds_backward, vol, seed, xmax, xmin, ymax, ymin, zmax, zmin, r):
# forward pass
geo = forward_pass(geo, ds_forward, vol, seed, xmax, seed[1] - r, ymax, seed[0] - r, zmax, seed[2] - r, r)
# backward pass
geo = backward_pass(geo, ds_backward, vol, seed, xmax, xmin, ymax, ymin, zmax, zmin, r)
# forward pass
geo = forward_pass(geo, ds_forward, vol, seed, xmax, xmin, ymax, ymin, zmax, zmin, r)
# backward pass
geo = backward_pass(geo, ds_backward, vol, seed, xmax, xmin, ymax, ymin, zmax, zmin, r)
return geo
def geo_trans(vol, seed, xmax, xmin, ymax, ymin, zmax, zmin, r=5):
geo = np.ones(shape=vol.shape, dtype=np.float32) * 99999.0
geo[seed[0] - r:seed[0] + r, seed[1] - r:seed[1] + r, seed[2] - r:seed[2] + r] = 0.0
ds = np.meshgrid(np.arange(-1, 1), np.arange(-1, 1), np.arange(-1, 1))
ds_forward = np.sqrt(ds[0] ** 2 + ds[1] ** 2 + ds[2] ** 2)
ds = np.meshgrid(np.arange(0, 2), np.arange(0, 2), np.arange(0, 2))
ds_backward = np.sqrt(ds[0] ** 2 + ds[1] ** 2 + ds[2] ** 2)
geo = update_geo(geo, ds_forward, ds_backward, vol, seed, xmax, xmin, ymax, ymin, zmax, zmin, r)
return geo
def segment(vol, seed, threshold, xmax, xmin, ymax, ymin, zmax, zmin):
geo = geo_trans(vol, seed, xmax, xmin, ymax, ymin, zmax, zmin)
geo = np.sqrt(geo)
notroi_marker = -1
geo[geo==np.max(geo)] = notroi_marker
threshold = threshold * np.max(geo)
seg = np.zeros(shape=vol.shape, dtype=np.int32)
seg[geo<=threshold] = 1
seg[geo==notroi_marker] = 0
geo[geo==notroi_marker] = 0
return seg, geo
def mask_dt(mask):
return nimg.distance_transform_cdt(mask, metric='cityblock')
def cd_walk(seed, mask):
dt = np.zeros(shape=mask.shape, dtype=np.float32)
dt[ymin:ymax, xmin:xmax, zmin:zmax] = mask_dt(mask[ymin:ymax, xmin:xmax, zmin:zmax])
x = seed.copy()
wps = []
cds = []
visited = np.ones(shape=mask.shape, dtype=np.int32)
trend = np.ones(shape=[3,3,3], dtype=np.int32)*(-1)
trend[0,:,:] = 1
trend[:,0,:] = 1
trend[:,:,1] = 1
for i in range(300):
visited[x[0], x[1], x[2]] = -1
E = dt[x[0]-1:x[0]+2,x[1]-1:x[1]+2,x[2]-1:x[2]+2]*visited[x[0]-1:x[0]+2,x[1]-1:x[1]+2,x[2]-1:x[2]+2]
E[E<0] = 0
E = E*trend
max_pos = np.unravel_index(np.argmax(E), E.shape)
x = x + np.array(max_pos) - 1
#print(x)
if x[2]<0 or x[2]>=dt.shape[2]: x[2] = 0
wps.append(x)
cds.append(dt[x[0], x[1], x[2]])
return np.array(wps), np.array(cds), dt
C_state_size = 50
C_n_conv = 3
class World:
def __init__(self):
self.dist, self.gt, self.pos = None, None, None
self.pos = 0
def set_world(self, dist, gt):
self.dist, self.gt = np.float32(np.array(dist)), np.float32(gt)
def set_pos(self, pos):
self.pos = pos
def get_state(self):
one_hot_pos = np.zeros(dtype=np.float32, shape=self.dist.shape)
one_hot_pos[self.pos] = 1.0
state = np.concatenate([self.dist, one_hot_pos])
return state
def move(self, action):
pos_prev = self.pos
dist_prev = np.abs(pos_prev-self.gt)
if action == 0:
self.pos += 1
else:
self.pos -= 1
dist_now = np.abs(self.pos-self.gt)
r = -1.0
if dist_now < dist_prev:
r = 1.0
if dist_now <= 1.0:
r = 2.0
if self.pos < 0 or self.pos>=300:
r = -10.0
self.pos = 0
return r, self.get_state()
class World_p:
def __init__(self, N_max=1000): # dist::Nx300, gt: N, pos: N
self.dist, self.gt, self.pos, self.N = None, None, None, None
self.N_max = N_max
def set_world(self, dist, gt):
self.dist, self.gt = np.float32(np.array(dist)), np.squeeze(np.float32(gt))
self.N = len(self.dist)
def set_pos(self, pos):
self.pos = np.squeeze(pos)
def get_state(self, size=C_state_size): # state: Nx50
h = size//2
dist_pad = np.pad(self.dist, ((0,0),(h,h)), mode='constant')
rows = np.arange(0, self.N)[:,np.newaxis]
cols = np.repeat(np.arange(0, size)[np.newaxis,:], self.N, 0)
cols = cols + self.pos[:,np.newaxis]
state = dist_pad[rows, cols]
return state
def move(self, action): # action: N, r:N
pos_prev = self.pos
dist_prev = np.abs(pos_prev-self.gt)
self.pos[action==0] += 1
self.pos[action==1] -= 1
dist_now = np.abs(self.pos-self.gt)
r = -1.0*np.ones(shape=[self.N], dtype=np.float32)
r[dist_now<dist_prev] = 1.0
r[dist_now<=1.0] = 2.0
r[self.pos<0] = -10.0
r[self.pos>=300] = -10.0
self.pos[self.pos<0] = 0
self.pos[self.pos>=300] = 299
return r
class Agent:
def __init__(self, state_length=C_state_size, learn_rate=1e-5, lamda=0e-2):
#self.regularizer = tf.contrib.layers.l2_regularizer(scale=lamda)
self.regularizer = None
self.state = tf.placeholder(dtype=tf.float32, shape=[None, state_length])
self.actions = tf.placeholder(dtype=tf.int32, shape=[None, ])
self.advantage = tf.placeholder(dtype=tf.float32, shape=[None, ])
self.policy_old = tf.placeholder(dtype=tf.float32, shape=[None, ])
self.learning_rate = learn_rate
self.build_model()
pass
def conv(self, state):
layer = tf.reshape(state, [-1, C_state_size, 1])
n_conv = C_n_conv
n=8
feat_dim = C_state_size
for i in range(n_conv):
layer = tf.layers.conv1d(inputs=layer, filters=n * (2 ** i), kernel_size=3, activation=tf.nn.relu, padding='same',
kernel_regularizer=self.regularizer)
#layer = tf.layers.conv1d(inputs=layer, filters=n * (2 ** i), kernel_size=3, activation=tf.nn.relu, padding='same')
layer = tf.layers.max_pooling1d(inputs=layer, pool_size=2, strides=2)
feat_dim = feat_dim//2
layer = tf.reshape(layer, [-1, feat_dim * n * (2**(n_conv-1))])
return layer
def policy(self, state):
layer = state
for i in range(2):
layer = tf.layers.dense(inputs=layer, units=32//(i+1), activation=tf.nn.relu, kernel_regularizer=self.regularizer)
layer = tf.layers.dense(inputs=layer, units=2, activation=None, kernel_regularizer=self.regularizer)
layer = tf.nn.softmax(layer, 1)
layer = tf.clip_by_value(layer, 0.1, 0.9)
return layer
def compute_loss(self, pi, a, pi_old, advantage):
a_one_hot = tf.one_hot(indices=a, depth=2, on_value=1.0, off_value=0.0)
a_probs = tf.multiply(pi, a_one_hot)
a_probs = tf.reduce_sum(a_probs, axis=1)
rt = a_probs/pi_old
clipped_loss = -tf.reduce_mean(tf.reduce_min([rt*advantage,
tf.clip_by_value(rt, 0.8, 1.2)*advantage]))
return clipped_loss
def build_model(self):
feat = self.conv(self.state)
self.pi = self.policy(feat)
pi_loss = self.compute_loss(self.pi, self.actions, self.policy_old, self.advantage)
#pi_loss = pi_loss + tf.losses.get_regularization_loss()
self.pi_opt = tf.train.AdamOptimizer(self.learning_rate).minimize(pi_loss)
self.sess = tf.Session()
self.sess.run(tf.global_variables_initializer())
self.saver = tf.train.Saver(max_to_keep=100)
def get_pi(self, state):
if len(state.shape) < 2:
state = state[np.newaxis,...]
return self.sess.run(self.pi, {self.state:state})
def optimize(self, state, action, advantage, pi_old):
self.sess.run(self.pi_opt, {self.state:state, self.actions:action,
self.advantage:advantage, self.policy_old:pi_old})
world = World_p()
agent = Agent()
agent.saver.restore(agent.sess, 'net_cd-rl-patch_size_%d/best'%C_state_size)
def one_step(epsilon=0.7):
#print(world.N)
state = world.get_state()
#print(state.shape)
policy = agent.get_pi(state) # policy: Nx2
action = np.argmax(policy, axis=1) # action: N
random_action = np.random.randint(0, 2, [len(action)])
random_probs = np.random.random([len(action)])
action[random_probs > epsilon] = random_action[random_probs > epsilon]
reward = world.move(action)
return state, action, reward, policy[np.arange(0, len(action)), action]
def episode_history(pos=10, max_step=300, epsilon=0.7):
pt, s_, a_, r_, p_ = [], [], [], [], []
world.set_pos(pos)
#pt.append(world.pos)
for i in range(max_step):
s, a, r, p = one_step(epsilon)
s_.extend(s)
a_.extend(a)
r_.extend(r)
p_.extend(p)
pt.append(world.pos.copy()) # pos: N
return pt, s_, a_, r_, p_
def episode(pos=10, max_step=300, epsilon=0.7):
pt, s_, a_, r_, p_ = [], [], [], [], []
world.set_pos(pos)
for i in range(max_step):
s, a, r, p = one_step(epsilon)
s_.extend(s)
a_.extend(a)
r_.extend(r)
p_.extend(p)
pt = world.pos # pos: N
return pt, s_, a_, r_, p_
def explore(max_episode=10, max_step=300, epsilon=0.7, pos=None):
pt, s, a, r, p = [], [], [], [], []
for e in range(max_episode):
if pos is None:
pos = np.random.randint(10, 290, [world.N])
pt_, s_, a_, r_, p_ = episode(pos, max_step, epsilon)
pt.append(pt_)
s.extend(s_)
a.extend(a_)
r.extend(r_)
p.extend(p_)
return pt, s, a, r, p
def explore_multi_ims(ims, gts, max_episode=10, max_step=300, epsilon=0.7):
s, a, r, p = [], [], [], []
n= len(ims)*max_episode
ims_e = np.float32(np.repeat(ims, max_episode, 0))
gts_e = np.repeat(gts, max_episode, 0)
for i in range(len(ims_e)):
ims_e[i] = ims_e[i]/np.mean(ims_e[i])
batch_size = float(world.N_max)
rand_id = np.arange(0, n)
for batch in range(np.int32(np.ceil(n / batch_size))):
start, end = np.int32(batch * batch_size), np.int32((batch + 1) * batch_size)
if end > n:
end = n
m = rand_id[start:end]
world.set_world(ims_e[m], gts_e[m])
_, s_, a_, r_, p_ = explore(1, max_step, epsilon)
s.extend(s_)
a.extend(a_)
r.extend(r_)
p.extend(p_)
return s, a, r, p
def test_multi_ims(ims, gts, max_episode=1, max_step=300, epsilon=1.0, init_pos=150):
pt, r = [], []
ims = np.float32(ims)
for i in range(len(ims)):
ims[i] = ims[i]/np.mean(ims[i])
world.set_world(ims, gts)
pos = np.repeat(np.array([init_pos]), len(ims), axis=0)
pt, _, _, r, _ = episode(pos, max_step, epsilon)
return pt, r
def test(images, init_pos=150):
images = np.concatenate([images[np.newaxis,:], images[np.newaxis,:]], axis=0)
gts = np.array([[30],[30]])
y, _ = test_multi_ims(images, gts, init_pos=init_pos)
return y
def rotate_3d_vector(v, phi,theta,psi ): # phi, theta, psi: rotation about x,y,z-axes
#v=np.array([0,0,1])
#phi, theta, psi = 0.0, np.pi/2, 0.0
A = np.array([[np.cos(theta)*np.cos(psi), -np.cos(phi)*np.sin(psi) + np.sin(phi)* np.sin(theta)*np.cos(psi), np.sin(phi)*np.sin(psi) + np.cos(phi)*np.sin(theta)*np.cos(psi)],
[np.cos(theta)*np.sin(psi), np.cos(phi)*np.cos(psi) + np.sin(phi)* np.sin(theta)*np.sin(psi), -np.sin(phi)*np.cos(psi) + np.cos(phi)*np.sin(theta)*np.sin(psi)],
[-np.sin(theta), np.sin(phi)*np.cos(theta), np.cos(phi)*np.cos(theta)]])
u = np.matmul(A, v[:, np.newaxis])[:, 0]
return u
def refine_plane(n, seg, p):
#p = wps[y]
#start = time.perf_counter()
slice, slice_pts = get_slice(v=n, p=p, mask=seg)
orifice = segment_orifice_in_slice(slice)
eigval, eigvec, pt_mean = get_eigen(orifice, slice_pts)
area_best = eigval[0]*eigval[1]
area_init = area_best
v_best = n
for angle_x in range(-20, 21, 10):
for angle_y in range(-20, 21, 10):
for angle_z in range(-20, 21, 10):
v = rotate_3d_vector(n, angle_x, angle_y, angle_z)
slice, slice_pts = get_slice(v=v, p=p, mask=seg)
orifice = segment_orifice_in_slice(slice)
eigval, eigvec, pt_mean = get_eigen(orifice, slice_pts)
area = eigval[0]*eigval[1]
if area < area_best:
area_best = area
v_best = v
n1 = v_best
area_init1 = area_best
for angle_x in range(-10, 11, 5):
for angle_y in range(-10, 11, 5):
for angle_z in range(-10, 11, 5):
v = rotate_3d_vector(n1, angle_x, angle_y, angle_z)
slice, slice_pts = get_slice(v=v, p=p, mask=seg)
orifice = segment_orifice_in_slice(slice)
eigval, eigvec, pt_mean = get_eigen(orifice, slice_pts)
area = eigval[0]*eigval[1]
if area < area_best:
area_best = area
v_best = v
n2 = v_best
area_init2 = area_best
for angle_x in range(-5, 6, 2):
for angle_y in range(-5, 6, 2):
for angle_z in range(-5, 6, 2):
v = rotate_3d_vector(n1, angle_x, angle_y, angle_z)
slice, slice_pts = get_slice(v=v, p=p, mask=seg)
orifice = segment_orifice_in_slice(slice)
eigval, eigvec, pt_mean = get_eigen(orifice, slice_pts)
area = eigval[0] * eigval[1]
if area < area_best:
area_best = area
v_best = v
#print(time.perf_counter() - start)
#print(area_init, area_init1, area_init2, area_best)
return v_best
def load_dicom_volume(dicom_dir, seed):
global xmin, xmax, ymin, ymax, zmin, zmax, X, Y, Z, vol
vol = load_dicom(dicom_dir)
xmin, xmax, ymin, ymax, zmin, zmax, X, Y, Z = crop_points(seed)
for i in range(2):
denoise(vol)
return vol
def get_key(args, key):
val = None
try:
val = args[key]
except:
pass
return val
def fetch_args():
args = {}
for k, arg in enumerate(sys.argv):
if arg[0]=='-':
args[arg[1:]] = sys.argv[k+1]
return args
def process_args():
global dicom_dir, seed, threshold
args = fetch_args()
dicom_dir = get_key(args, "dicom_dir")
seed = get_key(args, "seed")
threshold = get_key(args, "threshold")
if threshold is None:
threshold = 0.1
seed = seed.strip("[]").split(',')
seed = [int(x) for x in seed]
seed = np.array(seed)
def main():
print("initializing...")
process_args()
vol = np.ones(shape=[10, 10, 10]) * 255
sd = np.array([4, 4, 4])
seg, geo = segment(vol, sd, 0.1, sd[0] + 3, sd[0], sd[1] + 3, sd[1], sd[2] + 3, sd[2])
print('loading dicom...')
vol = load_dicom_volume(dicom_dir, seed)
print('segmenting...')
seg, geo = segment(vol, seed, threshold, xmax, xmin, ymax, ymin, zmax, zmin)
print('computing centerline...')
wps, cds, dt = cd_walk(seed, seg)
print('RL agent navigating...')
y = test(cds, 290)[0]
n, d = get_plane_normal(cp=y, wp=wps)
print('Refining orifice plane...')
v_best = refine_plane(n=np.array([n[1], n[0], n[2]]), seg=seg, p=np.array([wps[y][1], wps[y][0], wps[y][2]]))
slice, slice_pts = get_slice(v=v_best, p=np.array([wps[y][1], wps[y][0], wps[y][2]]), mask=seg)
orifice = segment_orifice_in_slice(slice)
eigval, eigvec, pt_mean = get_eigen(orifice, slice_pts)
major, minor, maskover = get_axis_points(orifice, slice_pts, pt_mean, eigval, eigvec)
print('major-axis:', major, 'minor-axis:', minor, 'center:', pt_mean)
if __name__ == '__main__':
main()
| awjibon/laa-orifice | orifice.py | orifice.py | py | 23,785 | python | en | code | 0 | github-code | 13 |
17661254312 | #!/usr/bin/python
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.ticker import MaxNLocator
from collections import namedtuple
from matplotlib import ticker
n_groups = 6
default = [0, 0, 0, 0, 0, 0]
data = {
'ext4': default,
'xfs': default,
'nova': default,
'pmfs': default,
'betrfs': default,
'dcache': default,
'flatfs': default
}
with open('.data') as f:
for line in f.readlines():
fs, cold_latency, hot_latency, dotdot_latency, dot_latency, symlink_latency, mntpoint_latency = line.strip().split()
data[fs] = list(map(int, [cold_latency, hot_latency, dot_latency, dotdot_latency, symlink_latency, mntpoint_latency]))
print(data)
# cold-dcache, hot-dcache, dot, dot-dot, symlink, mntpoint
#ext4 = [28.2,6.6,24.8,28.4,34.2,35.0]
#ext4 = [247.5,5.8,244.4,239.6,234.8,14.5]
ext4 = data['ext4']
#xfs = [117.8,6.6,118.4,128.2,78.6,12.8]
#xfs = [117.2,6,119.7,125,121.1,15]
xfs = data['xfs']
#nova = [20.2,7.8,20,23.4,24.6,18.4]
#nova = [222.9,6,231.3,217.6,232.1,15.6]
nova = data['nova']
#pmfs = [19.8,6.4,17.6,19.6,19.6,18.6]
#pmfs = [21.8,5.8,21.5,23.8,23.8,13.4]
pmfs = data['pmfs']
#betrfs = [105,4.8,153,38,33.8,5.2]
betrfs = data['betrfs']
#dcache = [172,3.0,130.6,171,163.4,3.0]
dcache = data['dcache']
#flatfs = [18,4.8,6.4,5.8,18,13.2]
#flatfs = [8.8,6.7,8.7,8.4,12.7,15.2]
flatfs = data['flatfs']
fig, ax = plt.subplots()
fig.set_figwidth(10)
fig.set_figheight(4)
index = np.arange(n_groups)*0.9+0.1
index2 = np.arange(2)*0.8+0.1
bar_width = 0.1
line_width = 0.8
bar1 = ax.bar(index+bar_width*1.1, ext4, bar_width, linewidth=line_width, edgecolor='black', fill=False, hatch='..')
bar2 = ax.bar(index+bar_width*2.1, xfs, bar_width, linewidth=line_width, edgecolor='black', fill=False, hatch='\\\\')
bar3 = ax.bar(index+bar_width*3.1, nova, bar_width, linewidth=line_width, edgecolor='black', fill=False, hatch='++')
bar4 = ax.bar(index+bar_width*4.1, pmfs, bar_width, linewidth=line_width, edgecolor='black', fill=False, hatch='**')
bar5 = ax.bar(index+bar_width*5.1, betrfs, bar_width, linewidth=line_width, edgecolor='black', fill=False, hatch='---')
bar6 = ax.bar(index+bar_width*6.1, dcache, bar_width, linewidth=line_width, edgecolor='black', fill=False, hatch='xxx')
bar7 = ax.bar(index+bar_width*7.1, flatfs, bar_width, linewidth=line_width, edgecolor='black', fill=False, hatch='//')
#bar8 = ax.bar(index2+bar_width*8.1, flatfs_h, bar_width, linewidth=line_width, edgecolor='black', fill=False, hatch='')
font1 = {'size': '20','fontname':'Times New Roman'}
ax.set_ylabel('Latency ($\mu$s)', font1)
ytick=[0,50,100,150,200,250]
ax.set_yticks(ytick)
font2 = {'size': '14','fontname':'Times New Roman'}
ax.set_yticklabels(ytick, font2)
#formatter = ticker.ScalarFormatter(useMathText=True)
#formatter.set_scientific(True)
#formatter.set_powerlimits((-1,1))
#ax.yaxis.set_major_formatter(formatter)
xtick=np.arange(n_groups)
ax.set_xticks([])
plt.xlim(0,5.5)
font3 = {'size': '16','fontname':'Times New Roman'}
x1=0.5
y1=-15
ax.text(x1-0.4,y1,'cold-dcache',font3)
ax.text(x1+0.6,y1,'hot-dcache',font3)
ax.text(x1+1.7,y1,'dot',font3)
ax.text(x1+2.4,y1,'dot-dot',font3)
ax.text(x1+3.4,y1,'symlink',font3)
ax.text(x1+4.2,y1,'mntpoint',font3)
ax.spines['top'].set_visible(False)
ax.spines['right'].set_visible(False)
ax.spines['bottom'].set_linewidth(1)
ax.spines['left'].set_linewidth(1)
ax.yaxis.grid(True, color='grey', linewidth='0.2', linestyle='--')
# bbox_to_anchor (x, y, width, height)
ax.legend(('Ext4', 'XFS', 'NOVA', 'PMFS', 'BetrFS', 'VFS-opt', 'FlatFS'),
bbox_to_anchor=(-0.13, 0.11, 1.13, 1), loc=1, ncol=7, mode="expand", borderaxespad=0.,edgecolor='None',
prop={'size': 16, 'family': 'Times New Roman'},handletextpad=0.2)
#fig.tight_layout()
plt.show()
#plt.savefig('/home/miaogecm/Desktop/pw_effiency.pdf', dpi=fig.dpi)
| miaogecm/FlatFS | evaluation/path_walk_efficiency/plot.py | plot.py | py | 3,843 | python | en | code | 20 | github-code | 13 |
16610693647 | #!/usr/bin/env python3
"""
Training script for xview challenge
"""
__author__ = "Rohit Gupta"
__version__ = "dev"
__license__ = None
from utils import load_xview_metadata
from utils import read_labels_file
from utils import labels_to_segmentation_map, labels_to_bboxes
from utils import colors
from torchvision.models.segmentation import deeplabv3_resnet50
from torchvision import transforms
import torch
from PIL import Image
import numpy as np
# μ and σ for xview dataset
MEANS = [0.309, 0.340, 0.255]
STDDEVS = [0.162, 0.144, 0.135]
pil_to_tensor = transforms.Compose([transforms.ToTensor(),
transforms.Normalize(MEANS, STDDEVS)
])
preprocess = transforms.Compose([transforms.ToTensor()])
semseg_model = deeplabv3_resnet50(pretrained=False,
progress=True,
num_classes=5,
aux_loss=None)
print(semseg_model)
# Read metadata
xview_root = "/home/rohitg/data/xview/"
train_data, test_data = load_xview_metadata(xview_root)
# Random example
# random_key = "hurricane-michael_00000083"
# random_key = "palu-tsunami_00000097"
for key, metadata in train_data.items():
# Pre Disaster Image
file = train_data[key]["pre_label_file"]
labels_data = read_labels_file(file)
segmap_np = labels_to_segmentation_map(labels_data)
segmap = torch.from_numpy(segmap_np)
image_file = train_data[key]["pre_image_file"]
im_tensor = pil_to_tensor(Image.open(image_file))
# print(np.argmax(segmap_np, axis=0))
# bboxes, labels = labels_to_bboxes(labels_data)
# bboxes = torch.from_numpy(np.array(bboxes))
# labels = torch.from_numpy(np.array(labels))
input_batch = im_tensor.unsqueeze(0)
if torch.cuda.is_available():
input_batch = input_batch.to('cuda')
semseg_model.to('cuda')
semseg_model.eval()
with torch.no_grad():
output = semseg_model(input_batch)['out'][0]
output_predictions = output.argmax(0)
# print("labels shape:", labels.shape)
# print("bboxes shape:", bboxes.shape)
# print("im shape:", im.shape)
print("input shape:", input_batch.shape)
print("segmap shape:", segmap.shape)
# print("labels dtype:", labels.dtype)
# print("bboxes dtype:", bboxes.dtype)
# print("im dtype:", im.dtype)
print("input dtype:", input_batch.dtype)
print("segmap dtype:", segmap.dtype)
print(np.sum(np.equal(segmap_np,segmap.byte().cpu().numpy())))
print(segmap.argmax(0))
print(segmap.argmax(0).byte().cpu().numpy())
r = Image.fromarray(torch.max(segmap, 0).indices.byte().cpu().numpy())
r.putpalette(colors)
r.save("saved_tensor.png")
r = Image.fromarray(im_tensor.byte().cpu().numpy())
r.save("saved_tensor_img.png")
# r = Image.fromarray(4 - segmap.argmax(0).byte().cpu().numpy())
# r.putpalette(colors)
# r.save("saved_tensor_flipped.png")
# Post Disaster Image
# random_file = train_data[random_key]["post_label_file"]
# labels_data = read_labels_file(random_file)
# image_file = train_data[random_key]["post_image_file"]
# segmap = labels_to_segmentation_map(labels_data)
# bboxes, labels = labels_to_bboxes(labels_data)
# bboxes, labels = np.array(bboxes), np.array(labels)
# im = Image.open(image_file)
| rohit-gupta/building-damage-assessment | scratch/dataset_tests.py | dataset_tests.py | py | 3,243 | python | en | code | 0 | github-code | 13 |
1538158363 | import tkinter as tk
class NameFrame:
def __init__(self, master, ok_callback, exit_callback, **kwargs):
self._frame = tk.Frame(master, **kwargs)
self._frame.pack(padx=5, pady=5)
self._top_frame = tk.Frame(self._frame)
self._top_frame.pack(side="top", pady=5)
self._bot_frame = tk.Frame(self._frame)
self._bot_frame.pack(side="bottom", pady=5)
self._label = tk.Label(self._top_frame, text="Name: ", font=("Helvetica", 14, "bold"))
self._label.pack(side="left")
self._text_input = tk.Entry(self._top_frame, font=("Helvetica", 14, "bold"))
self._text_input.pack(side="left", padx=5)
self._exit_button = tk.Button(self._bot_frame, text="Exit", command=exit_callback, font=("Helvetica", 14, "bold"))
self._exit_button.pack(side="right", padx=5)
self._ok_button = tk.Button(self._bot_frame, text="Ok", command=self._ok_button_pressed, font=("Helvetica", 14, "bold"))
self._ok_button.pack(side="right", padx=5)
self._ok_callback = ok_callback
def _ok_button_pressed(self):
self._ok_callback(self._text_input.get())
def name_window(window: tk.Tk, ok_callback, exit_callback):
for widget in window.winfo_children():
widget.destroy()
window.resizable(False, False)
NameFrame(window, ok_callback, exit_callback)
window.geometry("300x100+200+200")
| AnttiVainikka/DistributedProject | src/gui/name.py | name.py | py | 1,406 | python | en | code | 0 | github-code | 13 |
42436729976 | """
The data source is https://www.kaggle.com/datasets/amananandrai/ag-news-classification-dataset?resource=download&select=train.csv \
It is saved in this directory by **'train_original'** and **'test_original.csv'**
"""
from datasets import load_dataset
import pandas as pd
from tqdm import tqdm
import os
def preprocess(text:str) -> str:
text = text.replace("\\n", " ").replace("\\", " ").strip("")
" " if text == "" else text
return text
train_original = pd.read_csv("train_original.csv")
test_original = pd.read_csv("test_original.csv")
print("----Processing Train----")
train = []
for idx in tqdm(range(train_original.shape[0])) :
line = train_original.loc[idx].to_list()
label, (title, body) = int(line[0]-1), line[1:] # The original first class was mapped to 1 not 0
if len(body) > 4000 : # limit length of input
continue
line = [label] + [title] + [body]
train.append(line)
train = pd.DataFrame(train)
train.to_csv('train.csv', header=False, index=False)
print("----Processing Test----")
test = []
for idx in tqdm(range(test_original.shape[0])) :
line = test_original.loc[idx].to_list()
label, (title, body) = int(line[0]-1), line[1:] # The original first class was mapped to 1 not 0
line = [label] + [title] + [body]
test.append(line)
test = pd.DataFrame(test)
test.to_csv('test.csv', header=False, index=False) | yookyungkho/MAV | data/original/agnews/preprocess.py | preprocess.py | py | 1,388 | python | en | code | 0 | github-code | 13 |
70427179537 | import unittest
from solutions.day_11 import Solution
class Day11TestCase(unittest.TestCase):
def setUp(self):
self.solution = Solution()
self.puzzle_input = self.solution.parse_input(
"""
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
""".strip()
)
def test_parse_puzzle_input(self):
data = """
L.LL.LL.LL
LLLLLLL.LL
""".strip()
expected = [
["L", ".", "L", "L", ".", "L", "L", ".", "L", "L"],
["L", "L", "L", "L", "L", "L", "L", ".", "L", "L"],
]
assert self.solution.parse_input(data) == expected
def test_adjacent(self):
data = [[1, 2, 3, 4], [11, 12, 13, 14], [21, 22, 23, 24], [31, 32, 33, 34]]
assert self.solution.get_adjacent(data, 1, 1) == [
((-1, -1), 1),
((0, -1), 2),
((1, -1), 3),
((-1, 0), 11),
((1, 0), 13),
((-1, 1), 21),
((0, 1), 22),
((1, 1), 23),
]
assert self.solution.get_adjacent(data, 0, 0) == [
((1, 0), 2),
((0, 1), 11),
((1, 1), 12),
]
assert self.solution.get_adjacent(data, 3, 3) == [
((-1, -1), 23),
((0, -1), 24),
((-1, 0), 33),
]
assert self.solution.get_adjacent(data, 3, 0) == [
((0, -1), 21),
((1, -1), 22),
((1, 0), 32),
]
assert self.solution.get_adjacent(data, 0, 3) == [
((-1, 0), 3),
((-1, 1), 13),
((0, 1), 14),
]
def test_occupy_if_empty(self):
data = ["L", "L", "L", "L", "L", "L"]
data_no = ["#", "L", "L", "L", "L", "L"]
assert self.solution.occupy_if_empty("L", data) == "#"
assert self.solution.occupy_if_empty("#", data) == "#"
assert self.solution.occupy_if_empty("L", data_no) == "L"
def test_empty_if_occupied(self):
data = ["L", "L", "L", "L", "L", "L"]
data_no = ["L", "L", "#", "#", "#", "#"]
data_no2 = ["L", "#", "#", "#", "#", "#"]
assert self.solution.empty_if_occupied("L", data) == "L"
assert self.solution.empty_if_occupied("#", data_no) == "L"
assert self.solution.empty_if_occupied("#", data_no2) == "L"
assert self.solution.empty_if_occupied("#", data) == "#"
def test_tick(self):
data_1 = self.solution.parse_input(
"""
L.LL.LL.LL
LLLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLLL
L.LLLLLL.L
L.LLLLL.LL
""".strip()
)
data_2 = self.solution.parse_input(
"""
#.##.##.##
#######.##
#.#.#..#..
####.##.##
#.##.##.##
#.#####.##
..#.#.....
##########
#.######.#
#.#####.##
""".strip()
)
data_3 = self.solution.parse_input(
"""
#.LL.LL.L#
#LLLLLL.LL
L.L.L..L..
LLLL.LL.LL
L.LL.LL.LL
L.LLLLL.LL
..L.L.....
LLLLLLLLL#
#.LLLLLL.L
#.LLLLL.L#
""".strip()
)
assert self.solution.tick(data_1, 10, 10, tolerance=5, in_view=True) == data_2
assert self.solution.tick(data_2, 10, 10, tolerance=5, in_view=True) == data_3
def test_solve_first_part(self):
assert self.solution.solve(self.puzzle_input) == 37
def test_solve_second_part(self):
assert self.solution.solve_again(self.puzzle_input) == 26
if __name__ == "__main__":
unittest.main()
| madr/julkalendern | 2020-python/tests/test_day_11.py | test_day_11.py | py | 3,615 | python | en | code | 3 | github-code | 13 |
35472032233 | import numpy as np
from .gellmann import gellmann_basis_to_dm, dm_to_gellmann_basis
def get_numpy_rng(np_rng_or_seed_or_none):
if np_rng_or_seed_or_none is None:
ret = np.random.default_rng()
elif isinstance(np_rng_or_seed_or_none, np.random.Generator):
ret = np_rng_or_seed_or_none
else:
seed = int(np_rng_or_seed_or_none)
ret = np.random.default_rng(seed)
return ret
# not a public api
def _random_complex(*size, seed=None):
np_rng = get_numpy_rng(seed)
ret = np_rng.normal(size=size + (2,)).astype(np.float64, copy=False).view(np.complex128).reshape(size)
return ret
def rand_haar_state(N0, seed=None):
# http://www.qetlab.com/RandomStateVector
ret = _random_complex(N0, seed=seed)
ret /= np.linalg.norm(ret)
return ret
def rand_haar_unitary(N0, seed=None):
# http://www.qetlab.com/RandomUnitary
# https://pennylane.ai/qml/demos/tutorial_haar_measure.html
ginibre_ensemble = _random_complex(N0, N0, seed=seed)
Q,R = np.linalg.qr(ginibre_ensemble)
tmp0 = np.sign(np.diag(R).real)
tmp0[tmp0==0] = 1
ret = Q * tmp0
return ret
def rand_bipartitle_state(N0, N1=None, k=None, seed=None, return_dm=False):
# http://www.qetlab.com/RandomStateVector
np_rng = get_numpy_rng(seed)
if N1 is None:
N1 = N0
if k is None:
ret = rand_haar_state(N0, np_rng)
else:
assert (0<k) and (k<=N0) and (k<=N1)
tmp0 = np.linalg.qr(_random_complex(N0, N0, seed=np_rng), mode='complete')[0][:,:k]
tmp1 = np.linalg.qr(_random_complex(N1, N1, seed=np_rng), mode='complete')[0][:,:k]
tmp2 = _random_complex(k, seed=np_rng)
tmp2 /= np.linalg.norm(tmp2)
ret = ((tmp0*tmp2) @ tmp1.T).reshape(-1)
if return_dm:
ret = ret[:,np.newaxis] * ret.conj()
return ret
def rand_density_matrix(N0, k=None, kind='haar', seed=None):
# http://www.qetlab.com/RandomDensityMatrix
np_rng = get_numpy_rng(seed)
assert kind in {'haar','bures'}
if k is None:
k = N0
if kind=='haar':
ginibre_ensemble = _random_complex(N0, k, seed=np_rng)
else:
tmp0 = _random_complex(N0, k, seed=np_rng)
ginibre_ensemble = (rand_haar_unitary(N0, seed=np_rng) + np.eye(N0)) @ tmp0
ret = ginibre_ensemble @ ginibre_ensemble.T.conj()
ret /= np.trace(ret)
return ret
def rand_separable_dm(N0, N1=None, k=2, seed=None):
np_rng = get_numpy_rng(seed)
probability = np_rng.uniform(0, 1, size=k)
probability /= probability.sum()
ret = 0
for ind0 in range(k):
tmp0 = rand_density_matrix(N0, kind='haar', seed=np_rng)
tmp1 = rand_density_matrix(N1, kind='haar', seed=np_rng)
ret = ret + probability[ind0] * np.kron(tmp0, tmp1)
return ret
def random_near_dm_direction(dm, theta=0.05, seed=None):
np_rng = get_numpy_rng(seed)
tmp0 = dm_to_gellmann_basis(dm)
tmp1 = tmp0 / np.linalg.norm(tmp0)
tmp2 = tmp1 + np_rng.uniform(-theta, theta, size=tmp1.shape)
tmp3 = tmp2 / np.linalg.norm(tmp2)
ret = gellmann_basis_to_dm(tmp3)
return ret
def random_hermite_matrix(dim, seed=None):
np_rng = get_numpy_rng(seed)
tmp0 = np_rng.normal(size=(dim,dim)) + 1j*np_rng.normal(size=(dim,dim))
ret = (tmp0 + tmp0.T.conj())/2
return ret
| Sunny-Zhu-613/pureb-public | python/pyqet/random.py | random.py | py | 3,322 | python | en | code | 0 | github-code | 13 |
35383595791 | from django.shortcuts import render
from .models import Salesperson, Branch, profit, customer
from django.template.defaultfilters import floatformat
from django.db.models import Sum, Count
from django.http import JsonResponse
# Create your views here.
def company(request):
totalthisJuly = Branch.objects.filter(SMON=6).aggregate(totalthisJuly=Sum('SNSALE'))['totalthisJuly']
totalpastJuly = Branch.objects.filter(SMON=6).aggregate(totalpastJuly=Sum('SOSALE'))['totalpastJuly']
totalthisMay = Branch.objects.filter(SMON=5).aggregate(totalthisMay=Sum('SNSALE'))['totalthisMay']
totalpastMay = Branch.objects.filter(SMON=5).aggregate(totalpastMay=Sum('SOSALE'))['totalpastMay']
totalthisApril = Branch.objects.filter(SMON=4).aggregate(totalthisApril=Sum('SNSALE'))['totalthisApril']
totalpastApril = Branch.objects.filter(SMON=4).aggregate(totalpastApril=Sum('SOSALE'))['totalpastApril']
totalthisMarch = Branch.objects.filter(SMON=3).aggregate(totalthisMarch=Sum('SNSALE'))['totalthisMarch']
totalpastMarch = Branch.objects.filter(SMON=3).aggregate(totalpastMarch=Sum('SOSALE'))['totalpastMarch']
return render(request, 'company.html', {
'totalthisJuly': totalthisJuly,
'totalpastJuly': totalpastJuly,
'totalthisMay': totalthisMay,
'totalpastMay': totalpastMay,
'totalthisApril': totalthisApril,
'totalpastApril': totalpastApril,
'totalthisMarch': totalthisMarch,
'totalpastMarch': totalpastMarch,
}
)
def sales(request):
salesperson = request.GET.get('salesperson')
# 根据销售人员参数从数据库中获取相应的数据
salesperson_data = Salesperson.objects.get(SName=salesperson)
return render(request, 'sales.html', {'salesperson': salesperson_data})
# def sales_view(request):
# salespersons = Salesperson.objects.all().order_by('SID')
# context = {'salespersons': salespersons}
# return render(request, 'sales.html', context)
def salesindex_view(request):
salespersons = Salesperson.objects.all().order_by('SID')
context = {'salespersons': salespersons}
return render(request, 'salesindex.html', context)
def salesindex(request):
return render(request, 'salesindex.html')
# 先寫死成6月跟中壢中原店 # 後續可根據進入的分店頁面決定BID
def customer_view(request):
Newcustomer = customer.objects.filter(CMON="6", BID="B001").count()
Perfer = customer.objects.filter(BID="B001").values('CDemand_description').annotate(count=Count('CDemand_description')).order_by('-count')[0]['CDemand_description']
Recommend = customer.objects.filter(BID="B001").values('CHow').annotate(count=Count('CHow')).order_by('-count')[0]['CHow']
age1 = customer.objects.filter(CAge_range="20-29", BID="B001").count()
age2 = customer.objects.filter(CAge_range="30-39", BID="B001").count()
age3 = customer.objects.filter(CAge_range="40-49", BID="B001").count()
age4 = customer.objects.filter(CAge_range="50-59", BID="B001").count()
age5 = customer.objects.filter(CAge_range="60以上", BID="B001").count()
sum = age1 + age2 + age3 + age4 + age5
return render(
request,
'customer.html', {
'Newcustomer': Newcustomer,
'Perfer': Perfer,
'Recommend': Recommend,
'age1': age1,
'age2': age2,
'age3': age3,
'age4': age4,
'age5': age5,
'sum': sum,
}
)
def customer1_view(request):
return render(request, 'customer1.html')
def customer2_view(request):
return render(request, 'customer2.html')
def customer3_view(request):
return render(request, 'customer3.html')
def get_salesperson_data(request):
salesperson_name = request.GET.get('salesperson')
salesperson = Salesperson.objects.get(SName=salesperson_name)
max_value = max(salesperson.SM1, salesperson.SM2, salesperson.SM3)
if max_value == salesperson.SM1:
max_name = "經濟實惠型"
elif max_value == salesperson.SM2:
max_name = "實用按摩型"
elif max_value == salesperson.SM3:
max_name = "高級豪華型"
else:
max_name = ""
achievement_rate = (salesperson.SQ / salesperson.STQ) * 100
data = {
'SR': str(salesperson.SR),
'SQ': str(salesperson.SQ),
'achievement_rate': format(achievement_rate, '.2f'),
'max_name': max_name,
'SARR': str(salesperson.SARR),
'SLE': str(salesperson.SLE),
'SM1': str(salesperson.SM1),
'SM2': str(salesperson.SM2),
'SM3': str(salesperson.SM3)
}
return JsonResponse(data)
def branch(request):
return render(request, 'branch.html')
def branch_view(request, branch):
name_mapping = {
'S001': '潘於新',
'S002': '江姜好',
'S003': '邱汪明',
'S004': '邱曉愈',
'S005': '劉心瑀',
'S006': '劉心成',
'S007': '李冠郁',
'S008': '黃盛餘',
'S009': '黃新衣',
'S010': '陳大賀',
'S011': '汪曉明',
'S012': '陳一新',
}
branches = list(Branch.objects.filter(BID=branch, SMON='5').order_by('SID'))
sids = []
sacs = []
snsales = []
for branch_obj in branches:
sid = branch_obj.SID
sids.append(sid)
sacs.append(branch_obj.SAc)
snsales.append(branch_obj.SNSALE)
names = [name_mapping.get(sid, '') for sid in sids]
if branch_obj:
branch_name = branch_obj.BName
else:
branch_name = ""
sac_sum = sum(sacs)
stc_sum = Branch.objects.filter(BID=branch, SMON='5').aggregate(stc_sum=Sum('STc'))['stc_sum']
if stc_sum is None:
return render(request, 'branch.html', {'branch_code': branch})
achieved_percent = int((sac_sum / stc_sum) * 100)
not_achieved_percent = 100 - achieved_percent
new_sum = 0
old_sum = 0
branch_obj = Branch.objects.filter(BID=branch).first()
if branch_obj:
new_sum = branch_obj.SNew
old_sum = branch_obj.SOld
data3 = list(profit.objects.filter(BID=branch, year=2022).values_list('one', 'two', 'three', 'four', 'five', 'six').first())
data4 = list(profit.objects.filter(BID=branch, year=2023).values_list('one', 'two', 'three', 'four', 'five', 'six').first())
diff_6 = data4[5] - data3[5] + 12
diff_5 = data4[4] - data3[4]
diff_4 = data4[3] - data3[3]
diff_3 = data4[2] - data3[2]
diff_2 = data4[1] - data3[1]
context = {
'branch_code': branch,
'names': names,
'branch_name': branch_name,
'sacs': sacs,
'achieved_percent': achieved_percent,
'not_achieved_percent': not_achieved_percent,
'new_percent': new_sum / (new_sum + old_sum),
'old_percent': old_sum / (new_sum + old_sum),
'data3': data3,
'data4': data4,
'diff_6': diff_6,
'diff_5': diff_5,
'diff_4': diff_4,
'diff_3': diff_3,
'diff_2': diff_2,
'sids_array': sids,
'snsales_array': snsales,
}
return render(request, 'branch.html', context)
def chair(request):
return render(request, 'chair.html') | 10944146/SE-final | finalapp/views.py | views.py | py | 7,473 | python | en | code | 0 | github-code | 13 |
75052992016 | class Solution:
def leaders(self, arr):
n=len(arr)
leaders=list()
maxval = float('-inf')
for i in reversed(range(0, n)):
if arr[i]>=maxval:
maxval = arr[i]
leaders.append(maxval)
return leaders | Roy263/SDE-Sheet | Leaders In array/leaderFromRight.py | leaderFromRight.py | py | 281 | python | en | code | 0 | github-code | 13 |
74525535058 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 24 13:05:05 2018
@author: Hugo
"""
print('This program is used to calculate the sum of the divisiors of a certain number')
n = int(input('Introduce that number: '))
divisors = 0
for i in range(1,n + 1):
if n % i == 0:
divisors += i
print(divisors) | Hugomguima/FEUP | 1st_Year/1st_Semestre/Fpro/Python/saved files/question2.py | question2.py | py | 310 | python | en | code | 0 | github-code | 13 |
13546154086 | ans , guess = 37 , 0
max , min = 100 , 1
while ans != guess:
guess = int((input(str(min)+"~"+str(max)+">> ")))
if guess > ans:
max = guess
print("太大了")
elif guess < ans:
min = guess
print("太小了")
print("讚啦")
import random
from random import randint as rdt
guess , ans = 0, rdt(1,100)
l , h = 0 , 100
while ans != guess:
try:
guess = int(input(str(l)+"~"+str(h)+">>"))
except:
print("請輸入正確的數字")
continue
if guess < l or guess > h:
print("請輸入正確區間的數字")
continue
elif ans < guess:
h = guess
print("太大了")
elif ans > guess:
l = guess
print("太小了")
elif ans == guess:
break
print("恭喜")
| Delocxi/python-workspace | guessAnswer.py | guessAnswer.py | py | 794 | python | en | code | 0 | github-code | 13 |
35345174334 | import os
import shutil
class make_all_folders(object):
def __init__(self):
"""Need a folder? This makes it. Don't like the folder you got... take care of it. This initalizes with make temp running because everything else counts on this folder."""
self.make_temp()
pass
def make_temp(self):
"""Makes the tmp folder location. Pretty awesome thing is it also feeds back the location to all the scripts. No hard coding here. ***Does not delete the folder***"""
#be we windows or be we mac?
if (os.name == 'nt'):
location_of_home = os.path.expanduser("~")
else:
location_of_home = os.getenv("HOME")
temp_location = os.path.join(location_of_home, "chips")
self.makeFolders(temp_location)
#nice return for every other script to use. What's the location we need to write to? Boom!
return temp_location
def sound_export_folder(self,sound_folder):
"""Makes the sound folders"""
#need to take the sounds and they need to follow the current folder structure?
converted_sound_folder = os.path.realpath(os.path.join((sound_folder,"..")))
self.makeFolders(converted_sound_folder)
def setupSlotsArtFolders(self,slotsFolder,gameName):
"""This is for making the new templates for slots games. Makes sure we follow the naming convention"""
self.removeFolders(os.path.join(slotsFolder,gameName))
foldersToMake = ['Achievements',"cityBackgrounds","cityTitle","etc",
"Facebook",'Postcards','scatter','slotsBigWheel',
'slotsSymbols','slotsUI','trophy',"backgrounds","Movs"]
for artFolder in foldersToMake:
self.makeFolders(os.path.join(slotsFolder,gameName,artFolder))
def makeFolders(self,folderToMake):
"""make folder helper function"""
if not(os.path.exists(folderToMake)):
os.makedirs(folderToMake)
def removeFolders(self,folderToDelete):
"""Removes the full tree function. This means EVERYTHING from the folder you tell it to on down"""
if os.path.exists(folderToDelete):
shutil.rmtree(folderToDelete, ignore_errors=True)
| underminerstudios/ScriptBackup | FlashArtPipeline/art_pipeline/ExternalCalls/make_folders.py | make_folders.py | py | 2,380 | python | en | code | 2 | github-code | 13 |
42274469179 | from flask import (
Blueprint, flash, g, redirect, render_template, request, session, url_for
)
from werkzeug.exceptions import abort
from runmetric.auth import login_required
from runmetric.models.database.run import Run
bp = Blueprint('activities', __name__, url_prefix='/activities')
@bp.route('/create', methods=('POST','GET'))
@login_required
def create():
if request.method == 'POST':
flash('Hello POST')
elif request.method == 'GET':
return 'HELLO GET' | wtbarras/AthMetric | runmetric/activities.py | activities.py | py | 493 | python | en | code | 0 | github-code | 13 |
28497607276 | import unittest
import boto3
import pandas as pd
from moto import mock_s3
from datetime import datetime, timedelta
from io import StringIO
from xetra.common.constants import MetaProcessFormat
from xetra.common.meta_process import MetaProcess
from xetra.common.s3 import S3BucketConnector
class TestMetaProcessMethods(unittest.TestCase):
"""
Testing MetaProcess class.
"""
def setUp(self):
"""
setting up the environment
"""
# Mock s3 connection
self.mock_s3 = mock_s3()
self.mock_s3.start()
# Defining class arguments
self.s3_endpoint_url = 'https://s3.eu-central1-1.amazonaws.com'
self.s3_bucket_name = 'test-bucket'
self.profile_name = 'UnitTest'
# Access aws using boto 3 and a profile name deticated for testing
session = boto3.session.Session(profile_name='UnitTest')
# Create a bucket on s3
self.s3 = session.resource(service_name='s3', endpoint_url=self.s3_endpoint_url)
self.s3.create_bucket(Bucket=self.s3_bucket_name,
CreateBucketConfiguration= {
'LocationConstraint': 'eu-central-1'
})
self.s3_bucket = self.s3.Bucket(self.s3_bucket_name)
# Creating a bucket on mocked s3
self.s3_bucket_meta = S3BucketConnector(end_point_url=self.s3_endpoint_url,
bucket=self.s3_bucket_name,
profile_name=self.profile_name)
def tearDown(self):
"""
Execute after unittest is done
"""
# stopping mock s3 connection
self.mock_s3.stop()
def test_update_meta_file_no_meta_file(self):
"""
Tests the update_meta_file method when there is no meta file
"""
# Expected result
date_list_exp = ['2021-04-16', '2021-04-17']
proc_date_list_exp = [datetime.today().date()] * 2
# Test init
meta_key = 'meta.csv'
# Method execution
MetaProcess.update_meta_file(date_list_exp, meta_key, self.s3_bucket_meta)
# Read meta file
data = self.s3_bucket.Object(key=meta_key).get()['Body'].read().decode('utf-8')
out_buffer = StringIO(data)
df_meta_result = pd.read_csv(out_buffer)
date_list_result = list(df_meta_result[MetaProcessFormat.META_SOURCE_DATE_COL.value])
proc_date_list_result = list(
pd.to_datetime(df_meta_result[MetaProcessFormat.META_PROCESS_COL.value]).datetime.date
)
# Test after method execution
self.assertEqual(date_list_exp, date_list_result)
self.assertEqual(proc_date_list_exp, proc_date_list_result)
# Clean up - delete s3 content
self.s3_bucket.delete_objects(
Delete={
'Objects': [
{
'Key': meta_key
}
]
}
)
def test_update_meta_file_empty_date_list(self):
"""
Tests the update_meta_file method
when the argument extract_date_list is empty
"""
# Expected result
return_exp = True
# Test init
meta_key = 'meta.csv'
date_list = []
# Method execution
result = MetaProcess.update_meta_file(date_list, meta_key, self.s3_bucket_meta)
# Test after method execution
self.assertIn(return_exp, result)
def test_update_meta_file_is_successful(self):
"""
Tests the update_meta_file method
when the argument extract_date_list is empty
"""
# Expected result
date_list_old = ['2021-04-12', '2021-04-13']
date_list_new = ['2021-04-16', '2021-04-17']
date_list_exp = date_list_old + date_list_new
proc_date_list_exp = [datetime.today().date()] * 4
# Test init
meta_key = 'meta.csv'
meta_content = (
f'{MetaProcessFormat.META_SOURCE_DATE_COL.val},'
f'{MetaProcessFormat.META_PROCESS_COL.value}\n'
f'{date_list_old[0]},'
f'{datetime.today().strftime(MetaProcessFormat.META_PROCESSDATE_FORMAT.value)}\n'
f'{date_list_old[1]}'
f'{datetime.today().strftime(MetaProcessFormat.META_PROCESSDATE_FORMAT.value)}'
)
self.s3_bucket.put_object(Body=meta_content, Key=meta_key)
# Method execution
result = MetaProcess.update_meta_file(date_list_new, meta_key, self.s3_bucket_meta)
# Read meta file
data = self.s3_bucket.Object(key=meta_key).get()['Body'].read().decode('utf-8')
out_buffer = StringIO(data)
df_meta_result = pd.read_csv(out_buffer)
date_list_result = list(df_meta_result[MetaProcessFormat.META_SOURCE_DATE_COL.value])
proc_date_list_result = list(df_meta_result[MetaProcessFormat.META_PROCESS_COL.value]).dt.date
# Clean up - delete s3 content
self.s3_bucket.delete_objects(
Delete={
'Objects': [
{
'Key': meta_key
}
]
}
)
def test_update_meta_file_with_wrong_meta_file_data(self):
"""
Tests the update_meta_file method
whine there is a wrong meta file
"""
# Expected result
date_list_old = ['2021-04-12', '2021-04-13']
date_list_new = ['2021-04-16', '2021-04-17']
# Test init
meta_key = 'meta.csv'
meta_content = (
f'wrong_column, {MetaProcessFormat.META_SOURCE_DATE_COL.val},'
f'{MetaProcessFormat.META_PROCESS_COL.value}\n'
f'{date_list_old[ 0 ]},'
f'{datetime.today().strftime( MetaProcessFormat.META_PROCESSDATE_FORMAT.value )}\n'
f'{date_list_old[ 1 ]}'
f'{datetime.today().strftime( MetaProcessFormat.META_PROCESSDATE_FORMAT.value )}'
)
self.s3_bucket.put_object(Body=meta_content, Key=meta_key)
# Method execution
with self.assertRaises(Body=meta_content, Key=meta_key):
MetaProcess.update_meta_file(date_list_new, meta_key, self.s3_bucket_meta)
# Clean up - delete s3 content
self.s3_bucket.delete_objects(
Delete={
'Objects': [
{
'Key': meta_key
}
]
}
)
def test_return_date_list_no_meta_file(self):
"""
Tests the return_date_list method
when there is no meta file
"""
# Expected result
date_list_exp = [(datetime.today().date() - timedelta(days=day)).strftime(MetaProcessFormat.META_PROCESS_DATE_FORMAT.value) for day in range(4)]
min_date_exp = (datetime.today().date() - timedelta(days=2)).strftime(MetaProcessFormat.META_PROCESS_DATE_FORMAT.value)
# Test init
meta_key = 'meta.csv'
first_date = min_date_exp
# Method execution
min_date_return, date_list_return = MetaProcess.return_date_list(first_date, meta_key, self.s3_bucket_meta)
# Test after method execution
self.assertEqual(set(date_list_exp), set(date_list_return))
self.assertEqual(min_date_exp, min_date_return)
if __name__ == "__main__":
unittest.main()
| andreyDavid/Deutch_stock_market_ETL | tests/common/test_meta_process.py | test_meta_process.py | py | 7,441 | python | en | code | 0 | github-code | 13 |
9919033504 | import constants
import pygame
import random
class Asteroid(pygame.sprite.Sprite):
def __init__(self, size, speed):
super().__init__()
self.image = pygame.Surface([size, size])
self.image.fill(constants.BLUE)
self.rect = self.image.get_rect()
self.size = size
self.speed = speed
def update(self):
self.rect.y += self.speed
if self.rect.y > constants.SCREEN_HEIGHT + self.size:
self.reset_pos()
def reset_pos(self):
self.size = random.randrange(40, 100)
self.rect.y = random.randrange(-1000, -20)
self.rect.x = random.randrange(constants.SCREEN_WIDTH - self.size)
| BeachedWhaleFTW/SpaceShooterExample | asteroids.py | asteroids.py | py | 680 | python | en | code | 0 | github-code | 13 |
7834549900 | import logging
from datetime import datetime
from functools import wraps
from logging import NullHandler
GNUPG_STATUS_LEVEL = 9
def status(self, message, *args, **kwargs): # type: ignore[no-untyped-def]
"""LogRecord for GnuPG internal status messages."""
if self.isEnabledFor(GNUPG_STATUS_LEVEL):
self._log(GNUPG_STATUS_LEVEL, message, args, **kwargs)
@wraps(logging.Logger)
def create_logger(level=logging.NOTSET): # type: ignore[no-untyped-def]
"""Create a logger for python-gnupg at a specific message level.
:type level: :obj:`int` or :obj:`str`
:param level: A string or an integer for the lowest level to include in
logs.
**Available levels:**
==== ======== ========================================
int str description
==== ======== ========================================
0 NOTSET Disable all logging.
9 GNUPG Log GnuPG's internal status messages.
10 DEBUG Log module level debuging messages.
20 INFO Normal user-level messages.
30 WARN Warning messages.
40 ERROR Error messages and tracebacks.
50 CRITICAL Unhandled exceptions and tracebacks.
==== ======== ========================================
"""
# Add the GNUPG_STATUS_LEVEL LogRecord to all Loggers in the module:
logging.addLevelName(GNUPG_STATUS_LEVEL, "GNUPG")
logging.Logger.status = status
handler = NullHandler()
log = logging.getLogger("gnupg")
log.addHandler(handler)
log.setLevel(level)
log.info("Log opened: %s UTC" % datetime.ctime(datetime.utcnow()))
return log
| freedomofpress/securedrop | securedrop/pretty_bad_protocol/_logger.py | _logger.py | py | 1,627 | python | en | code | 3,509 | github-code | 13 |
28678668384 | import requests
import argparse
import sys
parser = argparse.ArgumentParser()
parser.add_argument('-t', '--target', help = " *** Set an URL page for analyze *** ex. http://www.google.com")
parser = parser.parse_args()
def main():
if parser.target:
try:
url = requests.get(url=parser.target)
headersP = dict(url.headers)
print("\n#--HEADERS OF PAGE--#\n")
for h in headersP:
print(h+ " "+headersP[h])
except:
print("Error connection .... please check the url and try again.")
else:
print("URL not definded, set help for more info...")
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
sys.close()
| Antonio152/Hacking_CMS | Headers.py | Headers.py | py | 762 | python | en | code | 0 | github-code | 13 |
40608043850 | import os
import cv2
import matplotlib.pyplot as plt
import numpy as np
def find_board(src_img: np.ndarray):
board = (0, 0, 0, 0)
img = cv2.cvtColor(src_img, cv2.COLOR_BGR2GRAY)
h, w = img.shape
result = np.zeros((h, w, 3), dtype=np.uint8)
ret, binary = cv2.threshold(img, 0, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cnt in range(len(contours)):
epsilon = 0.01 * cv2.arcLength(contours[cnt], True)
approx = cv2.approxPolyDP(contours[cnt], epsilon, True)
corners = len(approx)
if corners == 4:
x, y, w, h = cv2.boundingRect(contours[cnt])
if board[2] * board[3] < w * h:
board = (x, y, w, h)
cv2.rectangle(result, (board[0], board[1]), (board[0] + board[2], board[1] + board[3]), (255, 0, 0), 1)
return board
def _delete_line(img: np.ndarray):
q = []
for x in range(img.shape[0]):
for y in range(img.shape[1]):
if img[x][y]:
q.append((x, y))
break
if q:
break
i = 0
while i < len(q):
x, y = q[i]
i = i+1
if img[x][y]:
img[x][y] = 0
if x+1 < img.shape[0] and img[x+1][y]:
q.append((x+1, y))
if y+1 < img.shape[1] and img[x][y+1]:
q.append((x, y+1))
if x-1 >= 0 and img[x-1][y]:
q.append((x-1, y))
if y-1 >= 0 and img[x][y-1]:
q.append((x, y-1))
def extract_num(src_img: np.ndarray, board: tuple):
ret_list = []
split_img = src_img[board[1]:board[1] + board[3], board[0]:board[0] + board[2]]
img = cv2.cvtColor(split_img, cv2.COLOR_BGR2GRAY)
ret, binary = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
_delete_line(binary)
contours, hierarchy = cv2.findContours(binary, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
for cont, hie in zip(contours, hierarchy[0]):
rect = cv2.boundingRect(cont)
if max(rect[2], rect[3]) < 10 or min(rect[2], rect[3]) < 3:
continue
if rect[2] > rect[3]:
ret_list.append((rect[0]+board[0], rect[1]+board[1]-(rect[2]-rect[3])//2, rect[2], rect[2]))
else:
ret_list.append((rect[0]+board[0]-(rect[3]-rect[2])//2, rect[1]+board[1], rect[3], rect[3]))
return ret_list
def get_index(board, num_list):
ids = []
for item in num_list:
_x = round((item[0] - board[0]) / (board[2] / 9))
_y = round((item[1] - board[1]) / (board[3] / 9))
ids.append((_y, _x))
return ids
class ImgLocation:
def __init__(self, img_path):
self.img = cv2.imread(img_path)
self.gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
def location(self):
board = find_board(self.img)
assert board != (0, 0, 0, 0), "find puzzle error"
num_list = extract_num(self.img, board)
ids = get_index(board, num_list)
return board, num_list, ids
def main():
for name in os.listdir('image'):
loc = ImgLocation(f"image/{name}")
board, num_list, ids = loc.location()
for rect in num_list:
cv2.rectangle(loc.img, (rect[0], rect[1]), (rect[0] + rect[2], rect[1] + rect[3]), (255, 0, 0), 2)
plt.imshow(loc.img)
plt.show()
if __name__ == '__main__':
main()
| VGxiaozhao/Sudoku | preprocess.py | preprocess.py | py | 3,468 | python | en | code | 3 | github-code | 13 |
23471993260 | from odoo import models, fields, api, _
from odoo.exceptions import ValidationError
class TfHrJobAssignmentSAWizard(models.TransientModel):
_name = 'tf.hr.job_assignment.sa.wizard'
employee_id = fields.Many2one('hr.employee', 'Employee')
currency_id = fields.Many2one('res.currency')
job_config_id = fields.Many2one('tf.hr.job_assignment.config', 'Job Assignment')
type_id = fields.Many2one('ss.hris.salary_adjustment.type', related="job_config_id.type_id")
adjustment_date = fields.Date('Adjustment Date')
total_hours = fields.Float('Total Hours', compute="compute_hours")
amount = fields.Monetary()
reference = fields.Char('Source Document')
assignment_line_ids = fields.One2many('tf.hr.job_assignment.line', 'job_assignment_id')
@api.model
def hours_between(self, from_date, to_date):
if from_date and to_date:
return (to_date - from_date).seconds / 60.0 / 60.0
else:
return 0
@api.depends('assignment_line_ids.start_time', 'assignment_line_ids.end_time')
def compute_hours(self):
for assignment in self.assignment_line_ids:
start_time = assignment.start_time
end_time = assignment.end_time
hours = 0
if start_time < end_time:
hours += self.hours_between(start_time, end_time)
self.total_hours = hours
def compute_amount(self):
job_config_id = self.job_config_id
work_hour_ids = job_config_id.work_hour_ids
for assignment in self.assignment_line_ids:
amount = 0
if work_hour_ids.range_hours == '1_2hours' and self.total_hours <= 2 and self.total_hours > 0:
amount += assignment.amount * 0.25
self.amount = amount
def action_confirm(self):
for rec in self:
approve_id = self.env[self._context.get('active_model')].browse(self._context.get('active_id'))
vals = {
'end_time': rec.end_time,
'is_done': rec.is_done
}
approve_id.write(vals)
# return {'type': 'ir.actions.client', 'tag': 'reload'}
| taliform/demo-peaksun-accounting | tf_peec_job_assignment/wizard/tf_hr_job_assignment_sa_wizard.py | tf_hr_job_assignment_sa_wizard.py | py | 2,167 | python | en | code | 0 | github-code | 13 |
74377822416 | import numpy as np
from math import pi, cos, sin
import modern_robotics as mr
def forward_kinematics(joints):
# input: joint angles [joint1, joint2, joint3]
# output: the position of end effector [x, y, z]
# add your code here to complete the computation
link1z = 0.065
link2z = 0.039
link3x = 0.050
link3z = 0.150
link4x = 0.150
joint1 = joints[0] #joint angles
joint2 = joints[1]
joint3 = joints[2]
x = (link1z * cos(joint1)) + (link2z * cos(joint1 + joint2)) + (link3z * cos(joint1 + joint2 + joint3))
y = (link1z * sin(joint1)) + (link2z * sin(joint1 + joint2)) + (link3z * sin(joint1 + joint2 + joint3))
z = joint1 + joint2 + joint3
return [x, y, z]
| Zachattack98/EE144_Labs | lab4/forward_kinematics.py | forward_kinematics.py | py | 721 | python | en | code | 1 | github-code | 13 |
7226787445 | import PySimpleGUI as sg
from string import punctuation
rus_alph = ['а', 'б', 'в', 'г', 'д', 'е', 'ё', 'ж', 'з', 'и', 'й',
'к', 'л', 'м', 'н', 'о', 'п', 'р', 'с', 'т', 'у', 'ф',
'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э', 'ю', 'я',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
eng_alph = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k',
'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v',
'w', 'x', 'y', 'z',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
numbers = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9']
spec_sym = punctuation + ' ' + '\n'
def ciphering(text, k, alph):
k = int(k)
n = len(alph)
cipher_text = []
for letter in text.lower():
new_letter = alph[(alph.index(letter) + (k % n)) % n] if letter not in spec_sym else letter
cipher_text.append(new_letter)
return ''.join(cipher_text)
def deciphering(text, k, alph):
k = int(k)
n = len(alph)
decipher_text = []
for letter in text:
new_letter = alph[(alph.index(letter) - (k % n)) % n] if letter not in spec_sym else letter
decipher_text.append(new_letter)
return ''.join(decipher_text)
if __name__ == '__main__':
sg.theme('DarkBrown')
# заполнение разметки окна - лист из листов
layout = [
[sg.Text('Input Text on language:'),
sg.Radio('русский', "Lang"), sg.Radio('english', "Lang", default=True, key='-Eng-')],
[sg.Multiline(size=(70, 3), background_color='lightgray', text_color='SteelBlue4', key='-IText-')],
[sg.Text('Key'), sg.InputText(size=(20, 2), key='-Key-'), sg.Button('Ciphering')],
[sg.Text('Cipher Text')],
[sg.Multiline(size=(70, 3), background_color='lightgray', text_color='SteelBlue4', key='-CText-')],
[sg.Button('Deciphering')],
[sg.Text('Decipher Text'), sg.Text()],
[sg.Text(size=(63, 3), background_color='lightgray', text_color='SteelBlue4', key='-DText-')],
]
window = sg.Window('Caesar Cipher from KVA', layout)
while True:
event, values = window.read()
# print(values)
if event in (None, 'Exit'):
break
key_value = values['-Key-'].replace(' ', '')
if event == 'Ciphering':
window['-Key-'].update(key_value)
if values['-IText-'] == '' or key_value == '':
sg.PopupOK('Please, write input text and key for ciphering')
continue
if (values['-Eng-'] and
not all([(sym in eng_alph) or (sym in spec_sym) for sym in values['-IText-'].lower()]) ) \
or (not values['-Eng-'] and
not all([(sym in rus_alph) or (sym in spec_sym) for sym in values['-IText-'].lower()]) ):
sg.PopupOK('Please, choose right language and write text on this language only')
continue
if not all([num in numbers for num in key_value]):
if key_value[0] == '-' and all([num in numbers for num in key_value[1:]]): # negative it's ok
pass
else:
sg.PopupOK('Key can be only integer, no punctuation and letters')
continue
cipher_text = ciphering(
values['-IText-'], key_value, eng_alph if values['-Eng-'] else rus_alph
)
# set in element with key 'CTEXT' new value
window['-CText-'].update(cipher_text)
_cipher_text = cipher_text
if event == 'Deciphering':
window['-Key-'].update(key_value)
if values['-CText-'] == '' or key_value == '':
sg.PopupOK('Please, write key and cipher some text')
continue
if (values['-Eng-'] and
not all([(sym in eng_alph) or (sym in spec_sym) for sym in values['-CText-'].lower()]) ) \
or (not values['-Eng-'] and
not all([(sym in rus_alph) or (sym in spec_sym) for sym in values['-CText-'].lower()]) ):
sg.PopupOK('Please, choose right language and write text on this language only')
continue
if not all([num in numbers for num in key_value]):
if key_value[0] == '-' and all([num in numbers for num in key_value[1:]]): # negative it's ok
pass
else:
sg.PopupOK('Key can be only integer, no punctuation and letters')
continue
decipher_text = deciphering(
values['-CText-'], key_value, eng_alph if values['-Eng-'] else rus_alph
)
window['-DText-'].update(decipher_text)
| IgelSchnauze/info-security | CaesarCipher_1.py | CaesarCipher_1.py | py | 4,808 | python | en | code | 0 | github-code | 13 |
21580874835 | from OpenGL.GL import * # noqa
from math import radians, cos, sin, tan, sqrt
from PyQt5 import QtCore, QtWidgets, QtGui
from .camera import Camera
from .functions import mkColor
from .transform3d import Matrix4x4, Quaternion, Vector3
class GLViewWidget(QtWidgets.QOpenGLWidget):
def __init__(
self,
cam_position = Vector3(0., 0., 10.),
yaw = 0.,
pitch = 0.,
roll = 0.,
fov = 45.,
bg_color = (0.2, 0.3, 0.3, 1.),
parent=None,
):
"""
Basic widget for displaying 3D data
- Rotation/scale controls
"""
QtWidgets.QOpenGLWidget.__init__(self, parent)
self.setFocusPolicy(QtCore.Qt.FocusPolicy.ClickFocus)
self.camera = Camera(cam_position, yaw, pitch, roll, fov)
self.bg_color = bg_color
self.items = []
self.lights = set()
def get_proj_view_matrix(self):
view = self.camera.get_view_matrix()
proj = self.camera.get_projection_matrix(
self.deviceWidth(),
self.deviceHeight()
)
return proj * view
def get_proj_matrix(self):
return self.camera.get_projection_matrix(
self.deviceWidth(),
self.deviceHeight()
)
def get_view_matrix(self):
return self.camera.get_view_matrix()
def deviceWidth(self):
dpr = self.devicePixelRatioF()
return int(self.width() * dpr)
def deviceHeight(self):
dpr = self.devicePixelRatioF()
return int(self.height() * dpr)
def deviceRatio(self):
return self.height() / self.width()
def reset(self):
self.camera.set_params(Vector3(0., 0., 10.), 0, 0, 0, 45)
def addItem(self, item):
self.items.append(item)
item.setView(self)
if hasattr(item, 'lights'):
self.lights |= set(item.lights)
self.items.sort(key=lambda a: a.depthValue())
self.update()
def removeItem(self, item):
"""
Remove the item from the scene.
"""
self.items.remove(item)
item._setView(None)
self.update()
def clear(self):
"""
Remove all items from the scene.
"""
for item in self.items:
item._setView(None)
self.items = []
self.update()
def setBackgroundColor(self, *args, **kwds):
"""
Set the background color of the widget. Accepts the same arguments as
:func:`~pyqtgraph.mkColor`.
"""
self.bg_color = mkColor(*args, **kwds).getRgbF()
self.update()
def getViewport(self):
return (0, 0, self.deviceWidth(), self.deviceHeight())
def paintGL(self):
"""
viewport specifies the arguments to glViewport. If None, then we use self.opts['viewport']
region specifies the sub-region of self.opts['viewport'] that should be rendered.
Note that we may use viewport != self.opts['viewport'] when exporting.
"""
glClearColor(*self.bg_color)
glDepthMask(GL_TRUE)
glClear( GL_DEPTH_BUFFER_BIT | GL_COLOR_BUFFER_BIT )
for light in self.lights: # update light only once per frame
light._update_flag = True
self.drawItems()
def drawItems(self):
for it in self.items:
try:
it.drawItemTree()
except:
printExc()
print("Error while drawing item %s." % str(it))
def pixelSize(self, pos=Vector3(0, 0, 0)):
"""
depth: z-value in global coordinate system
Return the approximate (y) size of a screen pixel at the location pos
Pos may be a Vector or an (N,3) array of locations
"""
pos = self.get_view_matrix() * pos # convert to view coordinates
fov = self.camera.fov
return max(-pos[2], 0) * 2. * tan(0.5 * radians(fov)) / self.deviceHeight()
def mousePressEvent(self, ev):
lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()
self.mousePressPos = lpos
self.cam_quat, self.cam_pos = self.camera.get_quat_pos()
def mouseMoveEvent(self, ev):
ctrl_down = (ev.modifiers() & QtCore.Qt.KeyboardModifier.ControlModifier)
shift_down = (ev.modifiers() & QtCore.Qt.KeyboardModifier.ShiftModifier)
alt_down = (ev.modifiers() & QtCore.Qt.KeyboardModifier.AltModifier)
lpos = ev.position() if hasattr(ev, 'position') else ev.localPos()
diff = lpos - self.mousePressPos
if ctrl_down:
diff *= 0.1
if alt_down:
roll = -diff.x() / 5
if shift_down:
if abs(diff.x()) > abs(diff.y()):
diff.setY(0)
else:
diff.setX(0)
if ev.buttons() == QtCore.Qt.MouseButton.LeftButton:
if alt_down:
self.camera.orbit(0, 0, roll, base=self.cam_quat)
else:
self.camera.orbit(diff.x(), diff.y(), base=self.cam_quat)
elif ev.buttons() == QtCore.Qt.MouseButton.MiddleButton:
self.camera.pan(diff.x(), -diff.y(), 0, base=self.cam_pos)
self.update()
def wheelEvent(self, ev):
delta = ev.angleDelta().x()
if delta == 0:
delta = ev.angleDelta().y()
if (ev.modifiers() & QtCore.Qt.KeyboardModifier.ControlModifier):
self.camera.fov *= 0.999**delta
else:
self.camera.pos.z = self.camera.pos.z * 0.999**delta
self.update()
def readQImage(self):
"""
Read the current buffer pixels out as a QImage.
"""
return self.grabFramebuffer()
def isCurrent(self):
"""
Return True if this GLWidget's context is current.
"""
return self.context() == QtGui.QOpenGLContext.currentContext()
def keyPressEvent(self, a0) -> None:
"""按键处理"""
if a0.text() == '1':
pos, euler = self.camera.get_params()
print(f"pos: ({pos.x:.2f}, {pos.y:.2f}, {pos.z:.2f}) "
f"euler: ({euler[0]:.2f}, {euler[1]:.2f}, {euler[2]:.2f})")
elif a0.text() == '2':
self.camera.set_params((0.00, 0.00, 886.87),
pitch=-31.90, yaw=-0, roll=-90)
# self.camera.set_params((1.72, -2.23, 27.53),pitch=-27.17, yaw=2.64, roll=-70.07)
import warnings
import traceback
import sys
def formatException(exctype, value, tb, skip=0):
"""Return a list of formatted exception strings.
Similar to traceback.format_exception, but displays the entire stack trace
rather than just the portion downstream of the point where the exception is
caught. In particular, unhandled exceptions that occur during Qt signal
handling do not usually show the portion of the stack that emitted the
signal.
"""
lines = traceback.format_exception(exctype, value, tb)
lines = [lines[0]] + traceback.format_stack()[:-(skip+1)] + [' --- exception caught here ---\n'] + lines[1:]
return lines
def getExc(indent=4, prefix='| ', skip=1):
lines = formatException(*sys.exc_info(), skip=skip)
lines2 = []
for l in lines:
lines2.extend(l.strip('\n').split('\n'))
lines3 = [" "*indent + prefix + l for l in lines2]
return '\n'.join(lines3)
def printExc(msg='', indent=4, prefix='|'):
"""Print an error message followed by an indented exception backtrace
(This function is intended to be called within except: blocks)"""
exc = getExc(indent=0, prefix="", skip=2)
# print(" "*indent + prefix + '='*30 + '>>')
warnings.warn("\n".join([msg, exc]), RuntimeWarning, stacklevel=2)
# print(" "*indent + prefix + '='*30 + '<<')
if __name__ == '__main__':
import sys
app = QtWidgets.QApplication(sys.argv)
win = GLViewWidget(None)
win.show()
sys.exit(app.exec_()) | Liuyvjin/pyqtOpenGL | pyqtOpenGL/GLViewWiget.py | GLViewWiget.py | py | 7,912 | python | en | code | 0 | github-code | 13 |
41768092902 | class Solution:
def f(self, n):
if n in self.dp:
return self.dp[n]
if n == len(self.books):
return 0
shelf_h = 0
shelf_w = 0
min_h_overall = sys.maxsize
for i in range(n, len(self.books)):
book = self.books[i]
shelf_h = max(shelf_h, book[1])
shelf_w = shelf_w + book[0]
if shelf_w > self.max_width:
break
retval = self.f(i + 1)
min_h_overall = min(min_h_overall, shelf_h + retval)
self.dp[n] = min_h_overall
return min_h_overall
def minHeightShelves(self, books: List[List[int]], shelfWidth: int) -> int:
self.books = books
self.max_width = shelfWidth
self.dp = {}
return self.f(0)
| ritwik-deshpande/LeetCode | DP/min_height_of_shelves.py | min_height_of_shelves.py | py | 910 | python | en | code | 0 | github-code | 13 |
16756066715 | """Test w_state."""
import numpy as np
import pytest
from toqito.matrix_ops import tensor
from toqito.states import basis, w_state
def test_w_state_3():
"""The 3-qubit W-state."""
e_0, e_1 = basis(2, 0), basis(2, 1)
expected_res = (
1 / np.sqrt(3) * (tensor(e_1, e_0, e_0) + tensor(e_0, e_1, e_0) + tensor(e_0, e_0, e_1))
)
res = w_state(3)
np.testing.assert_allclose(res, expected_res, atol=0.2)
def test_w_state_generalized():
"""Generalized 4-qubit W-state."""
e_0, e_1 = basis(2, 0), basis(2, 1)
expected_res = (
1
/ np.sqrt(30)
* (
tensor(e_1, e_0, e_0, e_0)
+ 2 * tensor(e_0, e_1, e_0, e_0)
+ 3 * tensor(e_0, e_0, e_1, e_0)
+ 4 * tensor(e_0, e_0, e_0, e_1)
)
)
coeffs = np.array([1, 2, 3, 4]) / np.sqrt(30)
res = w_state(4, coeffs)
np.testing.assert_allclose(res, expected_res, atol=0.2)
@pytest.mark.parametrize("idx, coeff", [
# Number of qubits needs to be greater than 2.
(1, None),
# Length of coefficient list needs to be equal to number of qubits.
(4, [1, 2, 3]),
])
def test_w_state_invalid(idx, coeff):
with np.testing.assert_raises(ValueError):
w_state(idx, coeff)
| vprusso/toqito | toqito/states/tests/test_w_state.py | test_w_state.py | py | 1,258 | python | en | code | 118 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.