seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
11880730133 | #!/usr/bin/python
import argparse
import time
import struct
import socket
import select
import sys
class PingOptions(object):
def __init__(self):
"""Reset the class; indicates the class hasn't been initailized"""
self.initialized = False
def initialize(self, parser):
parser.add_argument('--host', required=True, help='')
parser.add_argument('--packet-size', type=int, default=32, help='')
parser.add_argument('--ping-times', type=int, default=5, help='')
return parser
def gather_options(self):
if not self.initialized:
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser = self.initialize(parser)
# opt, _ = parser.parse_known_args()
return parser.parse_args()
def print_options(self, opt):
message = ''
message += '----------------- Options ---------------\n'
for k, v in sorted(vars(opt).items()):
comment = ''
default = self.parser.get_default(k)
if v != default:
comment = '\t[default: %s]' % str(default)
message += '{:>25}: {:<30}{}\n'.format(str(k), str(v), comment)
message += '----------------- End -------------------'
print(message)
def parse(self):
opt = self.gather_options()
self.opt = opt
return self.opt
| jansona/MyPingGUI | src/main/python/MyPing/opts.py | opts.py | py | 1,425 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentDefaultsHelpFormatter",
"line_number": 25,
"usage_type": "attribute"
}
] |
71688402345 | __author__ = 'vamsee'
import geojson as geo
Center = (-83.79706,42.268138)
Center_Point = geo.Point(Center)
def get_locations(filename,lat_idx,long_idx, write_filename,point_properties):
opened_file = open(filename)
features_list = []
for line in opened_file:
line_contents = line.split(',')
(longitude,latitude) = (float(line_contents[lat_idx])/10000000,float(line_contents[long_idx])/10000000 )
f = geo.Feature(geometry=geo.Point( (longitude,latitude)), \
properties = point_properties )
features_list.append(f)
#break
F = geo.FeatureCollection(features_list)
# generate data
geo_data_str = geo.dumps(F,sort_keys=True)
# Write into file
json_file = open('../Mapbox/data/'+write_filename,'w')
json_file.write(geo_data_str)
json_file.close()
# get_locations('../../Transportation-Data/Data/RSE_BSM/RSE_BSM_trimmed.csv')
get_locations('../BSM-RSE.csv',7,6,'vehicle-locations.json',{'title':'Vehicle Location', "marker-symbol": "car", "marker-size": "medium", "marker-color": "#09f"})
get_locations('../Geometry.csv',4,3,'rse-data-locations.json',{'title':'RSE Location', "marker-symbol": "commercial", "marker-size": "large", "marker-color": "#f83"})
| vamseedhar-reddyvari/Connected-Vehicles | scripts/rse-locations.py | rse-locations.py | py | 1,254 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "geojson.Point",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "geojson.Feature",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "geojson.Point",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "geojson.FeatureCollection"... |
37215521511 | import pymc as pm
import numpy as np
x = np.random.normal(loc=0, scale=1, size=10) # Data Placeholder
y = np.random.normal(loc=2*x, scale=1, size=10) # Data Placeholder
with pm.Model() as model:
# Priors
α = pm.Normal("α", 1, 2)
β = pm.Normal("β", -1, 2)
σ = pm.Exponential("σ", 3)
# Linear Model
μ = pm.Deterministic("μ", α + β*x)
# Likelihood
y_ = pm.Normal("y", μ, σ, observed=y)
with model:
trace = pm.sample()
if __name__ == "__main__":
from pykrusch import krusch
krusch(model, outname="../img/simple_model_posterior.png", posterior_trace=trace) | pbrowne88/pykrusch | examples/simple_model.py | simple_model.py | py | 587 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.normal",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.random",... |
3520925666 | #!/usr/bin/env python
import math
import copy
import matplotlib.pyplot as plt
def plot_obstacles(previous_obstacles, new_obstacles, updated_obstacles, updated_labels):
# Extract x and y coordinates from the obstacles lists
prev_x, prev_y, new_x, new_y, updated_x, updated_y = [], [],[],[],[],[]
if len(previous_obstacles)>0:
prev_x, prev_y = zip(*previous_obstacles)
if len(new_obstacles)>0:
new_x, new_y = zip(*new_obstacles)
if len(updated_obstacles)>0:
updated_x, updated_y = zip(*updated_obstacles)
# Create a new figure
plt.figure()
# Plot the obstacle positions from each list
#plt.scatter(prev_x, prev_y, color='red', label='Previous Obstacles', s=100)
#plt.scatter(new_x, new_y, color='blue', label='New Obstacles', s=80)
plt.scatter(updated_x, updated_y, color='green', label='Updated Obstacles')
for obstacle, label in zip(updated_obstacles, updated_labels):
plt.text(obstacle[0], obstacle[1], label, ha='center', va='bottom') # Display the label near the obstacle
# Add labels and legend
plt.xlabel('X')
plt.ylabel('Y')
plt.xlim(0, 1.4)
plt.ylim(0, 1.4)
plt.legend()
# Show the plot
plt.show(block=False)
def calculate_distance(point1, point2):
x1, y1 = point1
x2, y2 = point2
return math.sqrt((x2 - x1)**2 + (y2 - y1)**2)
def update_obstacles(previous_obstacles, new_obstacles, previous_labels, new_labels, camera_position, camera_orientation=0.2, fov=160, task="task1"):
# Calculate FOV boundaries
camera_orientation = math.degrees(camera_orientation)
fov_half_angle = math.radians(fov / 2)
fov_left_boundary = math.radians(camera_orientation) - fov_half_angle
fov_right_boundary = math.radians(camera_orientation) + fov_half_angle
# Initialize the updated obstacle positions list
updated_obstacles = []
updated_labels = []
# Identify previous obstacles in the FOV
obstacles_in_fov = []
labels_in_fov = []
for obstacle, label in zip(previous_obstacles, previous_labels):
angle = math.atan2(obstacle[1] - camera_position[1], obstacle[0] - camera_position[0])
angle = math.degrees(angle)
angle_diff = (angle - camera_orientation + 180) % 360 - 180
if -fov / 2 <= angle_diff <= fov / 2:
obstacles_in_fov.append(obstacle)
labels_in_fov.append(label)
print('obstacles_in_fov', obstacles_in_fov)
copy_obstacles_in_fov = copy.deepcopy(obstacles_in_fov)
copy_new_obstacles = copy.deepcopy(new_obstacles)
obstacles_in_fov_used = []
new_obstacles_used = []
previous_obstacles_used = []
# Check if the number of obstacles in FOV is less than the number of new obstacles
# Update obstacle positions for close obstacles
for new_obstacle, new_label in zip(new_obstacles, new_labels):
min_distance = 0.25
closest_obstacle = None
closest_label = None
for obstacle in previous_obstacles:
distance = math.sqrt((new_obstacle[0] - obstacle[0]) ** 2 + (new_obstacle[1] - obstacle[1]) ** 2)
if distance < min_distance:
min_distance = distance
closest_obstacle = obstacle
closest_label = label
# Calculate the updated position as the average for close obstacles
if closest_obstacle is not None and closest_obstacle not in previous_obstacles_used:
if calculate_distance(camera_position, closest_obstacle) < 0.6:
updated_position = new_obstacle
else:
updated_position = ((new_obstacle[0] + closest_obstacle[0]) / 2, (new_obstacle[1] + closest_obstacle[1]) / 2) # Calculate the updated position as the average
#obstacles_in_fov.remove(closest_obstacle) # Remove the closest obstacle to avoid duplicate updating
# labels_in_fov.remove(closest_label)
updated_obstacles.append(updated_position) # Append the updated obstacle position to the updated list
updated_labels.append(new_label)
#add to used
previous_obstacles_used.append(closest_obstacle)
obstacles_in_fov_used.append(closest_obstacle)
new_obstacles_used.append(new_obstacle)
# Append the remaining new obstacles and previosu obstacles as updated obstacles that did not find a match
updated_obstacles.extend(obstacle for obstacle in new_obstacles if obstacle not in new_obstacles_used)
updated_labels.extend(label for label, obstacle in zip(new_labels, new_obstacles) if obstacle not in new_obstacles_used) #do you want to add previosu points that are in FOV but not used?
updated_obstacles.extend(obstacle for obstacle in previous_obstacles if obstacle not in previous_obstacles_used)
updated_labels.extend(label for label, obstacle in zip(previous_labels, previous_obstacles) if obstacle not in previous_obstacles_used)
plot_obstacles(previous_obstacles, copy_new_obstacles, updated_obstacles, updated_labels)
return updated_obstacles, updated_labels
| husamhamu/ps_robotik | update_obstacles_approach.py | update_obstacles_approach.py | py | 5,081 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "... |
20142637580 | import os, sys, argparse
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import pandas as pd
import tensorflow as tf
import tensorflow.keras.backend as K
from tensorflow.keras.layers import Add, Concatenate, Dense, Embedding, Flatten, Input, InputLayer, Lambda, Layer, Reshape, Subtract
tf.keras.backend.set_floatx('float64')
import util
elem_z = {
'H' : 1,
'C' : 6,
'N' : 7,
'O' : 8,
'F' : 9,
'P' : 15,
'S' : 16,
'CL' : 17,
'BR' : 35,
}
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Train an atomic multipole model')
parser.add_argument('dataset_train',
help='Dataset for training (must be in ./data/)')
parser.add_argument('dataset_val',
help='Dataset for validation (must be in ./data/)')
parser.add_argument('modelname',
help='Name for saving model')
args = parser.parse_args(sys.argv[1:])
if os.path.isfile(f'models/{args.modelname}.hdf5'):
print(f'Model models/{args.modelname}.hdf5 already exists!')
exit()
# these are defined by the dataset
pad_dim = 40
nelem = 36
# this is up to the user
nembed = 10
nepochs = 200
nnodes = [256,128,64]
nmessage = 3
# make the model
mus = np.linspace(0.8, 5.0, 43)
etas = np.array([-100.0] * 43)
model = util.get_model(mus, etas, pad_dim, nelem, nembed, nnodes, nmessage)
model.compile(optimizer=tf.keras.optimizers.Adam(0.001),
loss=['mse', util.mse_mp, util.mse_mp, util.mse_mp],
loss_weights=[1.0, 1.0, 1.0, 1.0],
metrics=[util.mae_mp])
print(model.summary())
# load data
RT, ZT, yT = util.get_data(f'data/{args.dataset_train}.pkl', pad_dim)
RV, ZV, yV = util.get_data(f'data/{args.dataset_val}.pkl', pad_dim)
#RT, ZT, yT = RT[:800], ZT[:800], yT[:800]
#RV, ZV, yV = RV[:800], ZV[:800], yV[:800]
# monopole
yV_ = yV[:,:,0]
# dipole (mu_x, mu_y, mu_z)
yV_i_ = yV[:,:,1:4]
# quadrupole diagonal (Q_xx, Q_yy, Q_zz)
yV_ii_ = yV[:,:,[4,7,9]]
# quadrupole off-diagonal (Q_xy, Q_xz, Q_yz)
yV_ij_ = yV[:,:,[5,6,8]]
print('Validation Target Magnitudes (MAD):')
for Z_subset, y_subset in [(ZV, [yV_, yV_i_, yV_ii_, yV_ij_])]:
y_subset_pos = [ys[Z_subset > 0] for ys in y_subset]
mad = [np.mean(np.abs(ys - np.mean(ys))) for ys in y_subset_pos]
print(f'ALL ({y_subset_pos[0].shape[0]:6d}) : q({mad[0]:.4f}) mu({mad[1]:.4f}) Qii({mad[2]:.4f}) Qij({mad[3]:.4f})')
for name, z in elem_z.items():
mask = (Z_subset == z)
y_element = [ys_[mask] for ys_ in y_subset]
if np.sum(mask) > 0:
mad = [np.mean(np.abs(ys - np.mean(ys))) for ys in y_element]
else:
mad = [np.nan for ys in y_element]
print(f'{name:3s} ({np.sum(mask):6d}) : q({mad[0]:.4f}) mu({mad[1]:.4f}) Qii({mad[2]:.4f}) Qij({mad[3]:.4f})')
print()
print('Fitting Model...')
callbacks = [tf.keras.callbacks.ModelCheckpoint(f'models/{args.modelname}.hdf5', save_best_only=True, monitor='val_loss', mode='min'),
tf.keras.callbacks.ReduceLROnPlateau(monitor='val_loss', factor=(10.0 ** (-1/4)), patience=10, verbose=1, mode='min', min_delta=0, cooldown=0, min_lr=(10.0 ** -5))]
model.fit(x=util.RotationGenerator(RT, ZT, yT, batch_size=8),
epochs=nepochs,
validation_data=([RV, ZV], [yV_, yV_i_, yV_ii_, yV_ij_]),
callbacks=callbacks,
verbose=2)
print('...Done')
for w in model.get_layer(name='rbf').get_weights():
print(w)
yV_pred = model.predict([RV, ZV])
yV_err = yV_pred[0] - yV_
yV_i_err = yV_pred[1] - yV_i_
yV_ii_err = yV_pred[2] - yV_ii_
yV_ij_err = yV_pred[3] - yV_ij_
print('Validation Prediction Magnitudes (MAE):')
for Z_subset, y_subset in [(ZV, [yV_err, yV_i_err, yV_ii_err, yV_ij_err])]:
y_subset_pos = [ys[Z_subset > 0] for ys in y_subset]
mae = [np.mean(np.abs(ys)) for ys in y_subset_pos]
print(f'ALL ({y_subset_pos[0].shape[0]:6d}) : q({mae[0]:.4f}) mu({mae[1]:.4f}) Qii({mae[2]:.4f}) Qij({mae[3]:.4f})')
for name, z in elem_z.items():
mask = (Z_subset == z)
y_element = [ys_[mask] for ys_ in y_subset]
if np.sum(mask) > 0:
mae = [np.mean(np.abs(ys)) for ys in y_element]
else:
mae = [np.nan for ys in y_element]
print(f'{name:3s} ({np.sum(mask):6d}) : q({mae[0]:.4f}) mu({mae[1]:.4f}) Qii({mae[2]:.4f}) Qij({mae[3]:.4f})')
print()
| zachglick/directional-mpnn | train.py | train.py | py | 4,767 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.backend.set_floatx",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name":... |
625334349 | import os
import time
import aiofiles
import aiohttp
import asyncio
from retrying import retry
class Spider(object):
"""
下载路径在实例化时候指定,比如:r'd:\test\\',这个目录如果不存在,会出错。
默认路径为当前文件下的downpic目录,此目录如果不存在会自动生成
"""
def __init__(self, down_path='', ):
self.headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/96.0.4664.45 Safari/537.36',
}
self.data = {"type": "1", "choose_type": "", "page": "1", "size": "20"}
self.num = 0
if down_path == "":
if 'downpic' not in os.listdir('.'): # 当前目录下的downpic目录
os.mkdir('downpic')
self.path = os.path.join(os.path.abspath('.'), 'downpic')
os.chdir(self.path) # 进入文件下载路径
self.down_path = down_path
self.url = "https://www.yituhuitu.com/api/newapi/SourceMaterial/index"
self.beforeurl = 'https://admin.yituhuitu.com/'
self.limit = 10 # tcp连接数
self.page = 0
self.sleep = 0 # 每页抓取间隔时间
# self.headers['Cookie']=(self.getcookie(self.url[:-6]))
def getcookie(self, url):
from playwright.sync_api import sync_playwright
with sync_playwright() as playwright:
browser = playwright.chromium.launch(headless=True)
context = browser.new_context()
page = context.new_page()
page.goto(url)
cookie = context.cookies()
# ---------------------
context.close()
browser.close()
cookie = ';'.join([f'{eh["name"]}={eh["value"]}' for eh in cookie])
return cookie
@retry(stop_max_attempt_number=5, wait_fixed=10000) # 如果出错10秒后重试,最多重试5次
async def run(self, startpage, endpage):
async with aiohttp.TCPConnector(limit=self.limit) as conn: # 限制tcp连接数
async with aiohttp.ClientSession(connector=conn, headers=self.headers, ) as session:
if endpage == 0:
async with session.post(self.url, data=self.data) as respone:
r = await respone.json(content_type='text/html')
ncount = r["data"]["count"]
page_list = r["data"]["page_limit"]
total_page = ncount // int(page_list) + 1 if ncount % int(page_list) else 0
endpage = total_page
print(f'总页数:{total_page}')
data = self.data
for pagen in range(startpage, endpage + 1):
data.update({'page': str(pagen)})
async with session.post(self.url, data=data) as respone:
r = await respone.json(content_type='text/html')
# print(r)
urls = r['data']['archivesInfo']
# print((urls))
# 开始爬一页图片
tasks = [self._get_content(link) for link in urls]
await asyncio.gather(*tasks, return_exceptions=True)
await asyncio.sleep(self.sleep) # 每页间隔时间,太快了,服务器不让抓
self.page += 1
print(f'爬取{self.page}页成功')
print(f'一共下载成功{self.num}张图片')
async def _get_img_links(self, page, session): # 获取图片连接
try:
data = {'p': str(page)}
async with session.get(url=self.url, data=data) as respone:
r = await respone.text()
print(r)
# urls = r['data']
# CONCURRENCY = 20
# semaphore = asyncio.Semaphore(CONCURRENCY)
# getpictasks = [self._get_content(ehurl, semaphore) for ehurl in urls]
# await asyncio.gather(*getpictasks, return_exceptions=True)
self.page += 1
print(f'下载成功{self.page}页')
except Exception as e:
print(e)
async def _get_content(self, link, ): # 传入的是图片连接
if link['litpic'].startswith('/'):
link['litpic'] = self.beforeurl + link['litpic']
# async with semaphore:
async with aiohttp.ClientSession() as session:
try:
async with session.get(url=link['litpic']) as response:
content = await response.read()
await self._write_img(f"{link['aid']}.{link['litpic'].split('.')[-1]}", content)
except (asyncio.TimeoutError, ClientPayloadError):
pass
async def _write_img(self, file_name, content):
file_name = os.path.join(self.down_path, file_name)
# file_name += '.jpg'
async with aiofiles.open(file_name, 'wb') as f:
await f.write(content)
# print('下载第%s张图片成功' % self.num)
self.num += 1
if __name__ == '__main__':
start_time = time.perf_counter()
down_path = r'D:\Download'
startpage = 1
endpage = 0 # 0默认全部爬取
spider = Spider(down_path)
types = ['1', '2'] # 对应 新款图案浏览 满印图案浏览
# 原创付费爆款 要vip,无法爬取
for ehtype in types:
spider.data['type'] = ehtype
loop = asyncio.get_event_loop()
loop.run_until_complete(spider.run(startpage, endpage, ))
print(f'总用时:{time.perf_counter() - start_time:.0f}秒')
| chenxy2022/long | yituhuitu.py | yituhuitu.py | py | 5,707 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.mkdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 26,
... |
733407856 | import sqlite3
import pandas as pd
from sklearn.metrics.pairwise import cosine_similarity
def get_latent_dataframe():
conn = sqlite3.connect('db/latent_features.db')
# Load the DataFrame from the database
latent_df = pd.read_sql_query('SELECT * FROM latent_features', conn,index_col='index')
conn.close()
return latent_df
def get_similar_movies(movie_name,N=12):
latent_df = get_latent_dataframe()
movie_latent = None
if movie_name in latent_df.index:
movie_latent = latent_df.loc[movie_name].values.reshape(1, -1)
similarity_scores = cosine_similarity(movie_latent, latent_df.values)
# Get the top-N similar movies
similar_movies_indices = similarity_scores.argsort()[0][-N-1:-1][::-1] # Exclude the movie itself
recommended_movies = latent_df.iloc[similar_movies_indices].index.tolist()
return recommended_movies
return []
def combine_data():
import os
import json
import pandas as pd
import sqlite3
# Establishing SQLite connection
conn = sqlite3.connect('movie-night.db')
# Loop over all json files in the directory
for filename in os.listdir('people_datasets'):# Replace with your directory path
print(filename)
if filename.endswith('.json'):
with open(os.path.join('people_datasets', filename)) as file: # Replace with your directory path
data = json.load(file)
# Convert json data to pandas DataFrame
df = pd.json_normalize(data)
# Filtering the data by known_for_department
actors = df[df['known_for_department'] == 'Acting']
directors = df[df['known_for_department'] == 'Directing']
# Selecting necessary columns
actors = actors[['id', 'name', 'gender', 'popularity', 'profile_path']]
directors = directors[['id', 'name', 'gender', 'popularity', 'profile_path']]
# Saving the data into SQLite DB
actors.to_sql('actor', conn, if_exists='append', index=False)
conn.commit()
directors.to_sql('director', conn, if_exists='append', index=False)
conn.commit()
# Closing the SQLite connection
conn.close()
def query_data():
import sqlite3
import pandas as pd
# Establishing SQLite connection
conn = sqlite3.connect('instance/movie-night.db')
# Creating a cursor
cur = conn.cursor()
# Executing a SQL query to get the first 10 rows from 'actor' and 'director' tables
cur.execute("SELECT * FROM actor LIMIT 10")
actors = cur.fetchall()
cur.execute("SELECT * FROM director LIMIT 10")
directors = cur.fetchall()
# Closing the cursor and the connection
cur.close()
conn.close()
# Converting the data to pandas DataFrame and printing
actors_df = pd.DataFrame(actors, columns=['id', 'name', 'gender', 'popularity', 'profile_path'])
print("Actors:\n", actors_df)
directors_df = pd.DataFrame(directors, columns=['id', 'name', 'gender', 'popularity', 'profile_path'])
print("\nDirectors:\n", directors_df)
#query_data()
combine_data()
| haris-bit/TMDB | util.py | util.py | py | 3,156 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.read_sql_query",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sklearn.metrics.pairwise.cosine_similarity",
"line_number": 18,
"usage_type": "call"
},
{
"ap... |
1188276033 | import json
from fastapi.testclient import TestClient
from sqlalchemy.orm import Session
from app import config
class TestNodeInfoNodeInfo:
# テスト対象API
apiurl = "/NodeInfo"
# <正常系1>
# 通常参照
def test_nodeinfo_normal_1(self, client: TestClient, session: Session):
resp = client.get(self.apiurl)
payment_gateway = json.load(open("app/contracts/json/PaymentGateway.json", "r"))
personal_info = json.load(open("app/contracts/json/PersonalInfo.json", "r"))
ibet_exchange = json.load(open("app/contracts/json/IbetExchange.json", "r"))
ibet_escrow_json = json.load(open("app/contracts/json/IbetEscrow.json", "r"))
ibet_security_token_escrow_json = json.load(
open("app/contracts/json/IbetSecurityTokenEscrow.json", "r")
)
e2e_messaging_json = json.load(
open("app/contracts/json/E2EMessaging.json", "r")
)
payment_gateway_address = config.PAYMENT_GATEWAY_CONTRACT_ADDRESS
payment_gateway_abi = payment_gateway["abi"]
personalinfo_address = config.PERSONAL_INFO_CONTRACT_ADDRESS
personalinfo_abi = personal_info["abi"]
membership_exchange_address = config.IBET_MEMBERSHIP_EXCHANGE_CONTRACT_ADDRESS
membership_exchange_abi = ibet_exchange["abi"]
coupon_exchange_address = config.IBET_COUPON_EXCHANGE_CONTRACT_ADDRESS
coupon_exchange_abi = ibet_exchange["abi"]
ibet_escrow_address = config.IBET_ESCROW_CONTRACT_ADDRESS
ibet_escrow_abi = ibet_escrow_json["abi"]
ibet_security_token_escrow_address = (
config.IBET_SECURITY_TOKEN_ESCROW_CONTRACT_ADDRESS
)
ibet_security_token_escrow_abi = ibet_security_token_escrow_json["abi"]
e2e_messaging_address = config.E2E_MESSAGING_CONTRACT_ADDRESS
e2e_messaging_abi = e2e_messaging_json["abi"]
assumed_body = {
"payment_gateway_address": payment_gateway_address,
"payment_gateway_abi": payment_gateway_abi,
"personal_info_address": personalinfo_address,
"personal_info_abi": personalinfo_abi,
"ibet_membership_exchange_address": membership_exchange_address,
"ibet_membership_exchange_abi": membership_exchange_abi,
"ibet_coupon_exchange_address": coupon_exchange_address,
"ibet_coupon_exchange_abi": coupon_exchange_abi,
"ibet_escrow_address": ibet_escrow_address,
"ibet_escrow_abi": ibet_escrow_abi,
"ibet_security_token_escrow_address": ibet_security_token_escrow_address,
"ibet_security_token_escrow_abi": ibet_security_token_escrow_abi,
"e2e_messaging_address": e2e_messaging_address,
"e2e_messaging_abi": e2e_messaging_abi,
}
assert resp.status_code == 200
assert resp.json()["meta"] == {"code": 200, "message": "OK"}
assert resp.json()["data"] == assumed_body
# <エラー系1>
# HTTPメソッド不正
# -> 404エラー
def test_nodeinfo_error_1(self, client: TestClient, session: Session):
headers = {"Content-Type": "application/json"}
request_body = json.dumps({})
resp = client.post(self.apiurl, headers=headers, json=json.loads(request_body))
assert resp.status_code == 405
assert resp.json()["meta"] == {
"code": 1,
"description": "method: POST, url: /NodeInfo",
"message": "Method Not Allowed",
}
| BoostryJP/ibet-Wallet-API | tests/app/node_info_NodeInfo_test.py | node_info_NodeInfo_test.py | py | 3,543 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "json.l... |
16636321945 | from torch import nn
__all__ = ["DropPath"]
def drop_path(x, drop_prob: float = 0.0, training: bool = False, scale_by_keep: bool = True):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks).
This is the same as the DropConnect impl I created for EfficientNet, etc networks, however,
the original name is misleading as 'Drop Connect' is a different form of dropout in a separate paper...
See discussion: https://github.com/tensorflow/tpu/issues/494#issuecomment-532968956 ... I've opted for
changing the layer and argument names to 'drop path' rather than mix DropConnect as a layer name and use
'survival rate' as the argument.
"""
if drop_prob == 0.0 or not training:
return x
keep_prob = 1 - drop_prob
shape = (x.shape[0],) + (1,) * (x.ndim - 1) # work with diff dim tensors, not just 2D ConvNets
random_tensor = x.new_empty(shape).bernoulli_(keep_prob)
if keep_prob > 0.0 and scale_by_keep:
random_tensor.div_(keep_prob)
return x * random_tensor
class DropPath(nn.Module):
"""Drop paths (Stochastic Depth) per sample (when applied in main path of residual blocks)."""
def __init__(self, drop_prob: float = 0.0, scale_by_keep: bool = True):
super(DropPath, self).__init__()
self.drop_prob = drop_prob
self.scale_by_keep = scale_by_keep
def forward(self, x):
return drop_path(x, self.drop_prob, self.training, self.scale_by_keep)
def extra_repr(self):
return f"drop_prob={round(self.drop_prob,3):0.3f}"
| BloodAxe/pytorch-toolbelt | pytorch_toolbelt/modules/drop_path.py | drop_path.py | py | 1,579 | python | en | code | 1,447 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 26,
"usage_type": "name"
}
] |
32120333326 | import scrapy
from scrapy.loader import ItemLoader
from goodreads.items import QuoteItem # used to filter the data
class GoodReadsSpider(scrapy.Spider):
#identity
name = 'goodreads'
#request
def start_requests(self):
url = 'https://www.goodreads.com/quotes?page=1'
yield scrapy.Request(url=url,callback=self.parse)
#reponse
def parse(self,response):
for quote in response.selector.xpath("//div[@class='quote']"):
loader = ItemLoader(item=QuoteItem(),selector=quote, response=response)
loader.add_xpath('text',".//div[@class='quoteText']/text()[1]")
loader.add_xpath('author',".//div[@class='quoteText']/child::span")
loader.add_xpath('tags',".//div[@class='greyText smallText left']/a")
yield loader.load_item()
# yield{
# 'text' : quote.xpath(".//div[@class='quoteText']/text()[1]").extract_first(),
# 'author' : quote.xpath(".//div[@class='quoteText']/child::span/text()").extract_first(),
# 'tags' : quote.xpath(".//div[@class='greyText smallText left']/a/text()").extract() # it will extract list
# }
#.extract_first() it will extract first data only
# ".//div" woithout . it will get the same data
#/quote?page=2
next_page = response.selector.xpath("//a[@class='next_page']/@href").extract_first()
if(next_page is not None):
next_page_link = response.urljoin(next_page)
yield scrapy.Request(url=next_page_link,callback=self.parse)
| Karthiindia90/Goodreads | goodreads/spiders/goodreads.py | goodreads.py | py | 1,593 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.Spider",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "scrapy.Request",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "scrapy.loader.ItemLoader",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "goodreads.i... |
31352736355 | # For data loading
from scipy.io import loadmat
# For splitting the data into test, train, validation splits
from sklearn.model_selection import train_test_split
# For manipulation of the arrays
import numpy as np
# For file manipulation and locating
import os
# For plotting
import json
# For showing progress
from tqdm import tqdm
import copy
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
# We define some constants that we are going to reuse
DATA_DIR = "data/"
RAW_DIR = "data/raw/"
PROCESSED_DIR = "data/processed/"
N = 14096
TOTAL_SIZE = 2048
def plot_correlation_matrix(data, name):
"""
Plot the correlation matrix for the features
Parameters
----------
data: numpy array
Feature array
name: string
File name of the correlation matrix
Returns
-------
"""
N,F = data.shape
indeces = np.random.choice(N, size=100, replace=False)
data = data[indeces,:]
sns.set(style="white")
d = pd.DataFrame(data=data)
# Compute the correlation matrix
corr = d.corr()
fig, ax = plt.subplots(figsize=(100,100))
cax = plt.matshow(corr, interpolation="nearest")
plt.colorbar(cax)
plt.title("Features",fontsize=12,y=1.08)
plt.xlabel("Correlation matrix", fontsize=12)
plt.ylabel("Features",fontsize=12)
plt.savefig("results/{}.png".format(name))
plt.close()
def select_features(gallery_camIds, query_camId, gallery_labels, query_label, gallery_features):
"""
Preselects features with the respective query
Parameters
----------
gallery_camIds: numpy array
Camera IDs for the respective gallery images
query_camId: int
Id with respect to which we need to filter the dataset
gallery_labels: numpy array
Labels for the respective gallery images
query_label: int
label with respect to which we need to filter the dataset
gallery_features: numpy array
The gallery samples that we need to filter for this particular query
Returns
-------
selected_gallery_samples: list
* pre-selected gallery samples
selected_gallery_labels: list
* pre-selected gallery labels corresponding to each sample
"""
selected_gallery_samples = []
selected_gallery_labels = []
for j in range(len(gallery_features)):
if not (gallery_camIds[j]==query_camId and gallery_labels[j]==query_label):
selected_gallery_samples.append(gallery_features[j])
selected_gallery_labels.append(gallery_labels[j])
selected_gallery_samples = np.array(selected_gallery_samples)
selected_gallery_labels = np.array(selected_gallery_labels)
return selected_gallery_samples, selected_gallery_labels
def load_mat(file_path, label):
"""
Loading of the data indexes of the images
Parameters
----------
file_path: str
Name of the `.mat` input file
label: str
Name of the sheet for the indexes in the '.mat' input file
Returns
-------
idxs: list
* idxs corresponding to the given category
"""
idxs = loadmat(file_path)[label].flatten()
return (idxs)
def normalize(data):
"""
Removes the mean of the image
normalizses it between 0 and 1
among all data poings
Parameters
----------
data: numpy matrix
Data matrix with features
Returns
-------
_data: numpy matrix
"""
_data = []
shape = data.shape
for i in tqdm(range(len(data))):
_data.append(copy.deepcopy((data[i] - data[i].mean(axis=0)) / data[i].std(axis=0)))
_data = np.array(_data)
_data = _data.reshape(shape)
return _data
def save_data(data, file_path, name):
"""
Saves the data
given the name and
the file path
Parameters
----------
data: numpy matrix
Data matrix with features
file_path: str
File path where the file should be saved
name: str
Specific name of the given file
"""
np.save(file_path + "{}.npy".format(name),data)
def preprocess():
"""
1. Preprocesses the dataset into three splits: training, validation, test
2. Performs z normalization on the three different chunks
3. Saves the data
Parameters
----------
None
Returns
-------
all_data: list
* All the data split into lists of [features, labels]
"""
types = ["training","query", "gallery"]
print("Loading of index data...")
labels = load_mat(RAW_DIR + "cuhk03_new_protocol_config_labeled.mat", "labels")
_training_indexes = loadmat(RAW_DIR + 'cuhk03_new_protocol_config_labeled.mat')['train_idx'].flatten()
_query_indexes = loadmat(RAW_DIR + 'cuhk03_new_protocol_config_labeled.mat')['query_idx'].flatten()
_gallery_indexes = loadmat(RAW_DIR + 'cuhk03_new_protocol_config_labeled.mat')['gallery_idx'].flatten()
camIds = loadmat(RAW_DIR + 'cuhk03_new_protocol_config_labeled.mat')['camId'].flatten()
training_indexes = np.array([i-1 for i in _training_indexes])
query_indexes = np.array([i-1 for i in _query_indexes])
gallery_indexes = np.array([i-1 for i in _gallery_indexes])
training_labels = labels[training_indexes]
query_labels = labels[query_indexes]
gallery_labels = labels[gallery_indexes]
training_camId = camIds[training_indexes]
query_camId = camIds[query_indexes]
gallery_camId = camIds[gallery_indexes]
print("Loading of features...")
with open(RAW_DIR + "feature_data.json", 'r') as data:
features = np.array(json.load(data))
features = features.reshape((N,TOTAL_SIZE))
_training_data = features[training_indexes,:]
_query_data = features[query_indexes,:]
_gallery_data = features[gallery_indexes,:]
print("Normalizing data...")
training_data = copy.deepcopy(_training_data)
query_data = copy.deepcopy(_query_data)
gallery_data = copy.deepcopy(_gallery_data)
plot_correlation_matrix(training_data,"training_corr_matrix")
plot_correlation_matrix(query_data,"query_corr_matrix")
plot_correlation_matrix(gallery_data,"gallery_corr_matrix")
training_data_normalized = normalize(_training_data)
query_data_normalized = normalize(_query_data)
gallery_data_normalized = normalize(_gallery_data)
print("Saving data...")
all_data = [[training_data, training_data_normalized ,training_labels, training_camId], \
[query_data, query_data_normalized, query_labels, query_camId], \
[gallery_data, gallery_data_normalized ,gallery_labels, gallery_camId]]
for i,t in enumerate(types):
save_data(all_data[i][0],PROCESSED_DIR,"{}_features".format(t))
save_data(all_data[i][1],PROCESSED_DIR,"{}_normalized_features".format(t))
save_data(all_data[i][2],PROCESSED_DIR,"{}_labels".format(t))
save_data(all_data[i][3],PROCESSED_DIR,"{}_camId".format(t))
return all_data
def load_data(z_normalized = True):
"""
Load the cached data or call preprocess()
to generate new data
Parameters
----------
None
Returns
-------
all_data: list
* All the data split into lists of [features, labels]
"""
if not os.path.exists(os.path.join(DATA_DIR, "processed/", "training_normalized_features.npy")):
print("Generating new data...")
all_data = preprocess()
if z_normalized:
del all_data[0][0]
del all_data[1][0]
del all_data[2][0]
else:
del all_data[0][1]
del all_data[1][1]
del all_data[2][1]
print("Loading data...")
types = ["training","query", "gallery"]
all_data = []
for t in types:
data = []
if z_normalized:
data.append(np.load(PROCESSED_DIR + "{}_normalized_features.npy".format(t)))
else:
data.append(np.load(PROCESSED_DIR + "{}_features.npy".format(t)))
data.append(np.load(PROCESSED_DIR + "{}_labels.npy".format(t)))
data.append(np.load(PROCESSED_DIR + "{}_camId.npy".format(t)))
all_data.append(data)
print("Finished loading data...")
return all_data
if __name__ == '__main__':
preprocess()
| martinferianc/PatternRecognition-EIE4 | Coursework 2/pre_process.py | pre_process.py | py | 8,332 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.random.choice",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "seaborn.set",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
... |
29719398042 |
# # Hot Wire RNN
#%%
import tensorflow as tf
import numpy as np
import matplotlib.pyplot as plt
import time
#%%
#Import data, U for velocity and sample rate as frequency
import h5py
with h5py.File('../resource/asnlib/publicdata/hot_wire_data.mat', 'r') as f:
U = list(f['U'])
freq = list(f['samp_rate'])
#%%
def prep_dataset(U, freq, split_ratio):
###
#Create time array, split it into 10000 first steps and store within arrays, then create train valid split
time_1 = (np.arange(0,len(U[1]))*(1/freq[0]))[0:10000]
x = U[1][0:10000]
n = len(x)
x_train = x[0:int(n*split_ratio)]
x_valid = x[int(n*split_ratio):]
time_train = time_1[0:int(n*split_ratio)]
time_valid = time_1[int(n*split_ratio):]
###
return time_train, x_train, time_valid, x_valid
split_ratio = 0.8
time_train, x_train, time_valid, x_valid = prep_dataset(U,freq,split_ratio)
#%%
#Create windowed dataset with window size, batch size and shuffle buffer for training and validation
def windowed_dataset(series, window_size, batch_size, shuffle_buffer):
###
dataset = tf.data.Dataset.from_tensor_slices(series)
dataset = dataset.window(window_size+1,shift = 1, stride = 1, drop_remainder = True)
dataset = dataset.flat_map(lambda window: window.batch(window_size+1))
dataset = dataset.shuffle(shuffle_buffer).map(lambda window: (window[:-1], window[-1]))
dataset = dataset.batch(batch_size).prefetch(1)
###
return dataset
window_size = 60
batch_size = 32
shuffle_buffer_size = 1000
train_dataset = windowed_dataset(x_train, window_size, batch_size, shuffle_buffer_size)
test_dataset = windowed_dataset(x_valid, window_size, batch_size, 1)
#%%
#Build a good infrastructure for LSTM model, includes lambda functions for creating a 3D tensor and scaling the output to be in the same range as the input
def build_model():
###
model = tf.keras.models.Sequential([
tf.keras.layers.Lambda(lambda x: tf.expand_dims(x,axis = -1), input_shape = [None]),
tf.keras.layers.LSTM(100, return_sequences = False),
tf.keras.layers.Dense(1),
tf.keras.layers.Lambda(lambda x: x*10.0)
])
optimizer = tf.keras.optimizers.Adam(learning_rate = 1e-4)
model.compile(optimizer=optimizer,loss = tf.keras.losses.Huber(),metrics = ['mape'])
###
return model
model = build_model()
model.summary()
#%%
#Train model using optimal learning rate and no. of epochs
def train_model(train_dataset):
start_time = time.time()
###
history = model.fit(train_dataset, epochs = 30, validation_data = test_dataset)
###
end_time = time.time()
runtime = end_time-start_time
return runtime, history, model#train using optimal learning rate and no. of epochs
runtime, history, model = train_model(train_dataset)
#%%
# Visualize your loss using this cell
epochs = range(len(history.history['loss']))
plt.plot ( epochs, history.history['loss'], label = 'Training')
# plt.plot ( epochs, history.history['val_loss'], label = 'Validation')
plt.title ('Training and validation loss')
plt.xlabel('epochs')
plt.legend()
plt.ylim([0,0.01])
#%%
#Model prediction and visualisation
forecast = model.predict(test_dataset)
# Plot your results alongside ground truth
plt.figure(figsize=(10, 6))
plt.plot(time_valid[window_size:],x_valid[window_size:], label='data')
plt.plot(time_valid[window_size:],forecast, label='RNN prediction on validation data')
plt.xlabel('time step')
plt.ylabel('label')
plt.title('RNN prediction')
plt.legend()
plt.ylim([9,9.5])
plt.xlim([0.145,0.15])
| nathanboachie/sw_projects | src_deeplearningtf/wire_rnn_lstm_tf.py | wire_rnn_lstm_tf.py | py | 3,546 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "h5py.File",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "tensorflow.data.Dataset.from_tensor_slices",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "ten... |
25734461539 | import discord
import os
from discord.ext import commands
from asyncio import sleep
from dotenv import load_dotenv
from cogs.giveaway import giveaway
from cogs.polls import Polls
load_dotenv()
intents = discord.Intents.default()
intents.members = True
prefix = commands.when_mentioned_or('-')
bot = commands.Bot(command_prefix=prefix, intents=intents)
def load_extensions(directory):
for file in os.listdir(directory):
if file.endswith(".py"):
dir = directory.replace(".py", "").replace("./", "")
try:
bot.load_extension(f"{dir}.{file[:-3]}")
print(f"[Bot] Loaded extension {file[:-3]} in {dir}!")
except discord.ExtensionFailed as error:
print(f"[Bot] Couldn't load extension {file[:-3]} in {dir}!\n [Error] {error}")
elif os.path.isdir(f"./cogs/{file}") is True:
load_extensions(f"./cogs/{file}")
load_extensions("./cogs")
@bot.event
async def on_ready():
print(f"[Bot] I have started up and logged in {bot.user.name}#{bot.user.discriminator}!")
g = giveaway(bot)
p = Polls(bot)
await g.check_for_active_giveaways(bot)
await p.check_for_active_polls()
@bot.event
async def on_member_join(member):
await sleep(10*60)
for channel in member.guild.channels:
if channel.name.startswith('member'):
await channel.edit(name=f'Members | {member.guild.member_count}')
break
bot.run(os.getenv("TOKEN2"))
| tyler7kehoe/Argus | bot.py | bot.py | py | 1,489 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "discord.Intents.default",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "discord.Intents",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "disco... |
25465846204 | from drf_yasg.utils import swagger_auto_schema
from rest_framework.generics import ListCreateAPIView
from rest_framework.response import Response
from rest_framework.views import APIView
from rest_framework.parsers import MultiPartParser
from service_objects.services import ServiceOutcome
from api.constants import *
from api.custom_schema import *
from api.serializers import ApiCommentSerializer, ApiCreateCommentSerializer
from api.services import ListCommentForPhotoService, ApiCreateCommentService, ApiUpdateCommentService, \
ApiDeleteCommentService
from api.utils import CustomTokenAuthentication
class CommentListCreateView(ListCreateAPIView):
parser_classes = [MultiPartParser, ]
authentication_classes = (CustomTokenAuthentication,)
@swagger_auto_schema(responses={status.HTTP_200_OK: 'successes'})
def get(self, request, *args, **kwargs):
try:
outcome = ServiceOutcome(
ListCommentForPhotoService, kwargs | {USER: request.user if request.user.is_authenticated else None}
)
except Exception as e:
return Response({ERROR: e.detail, STATUS_ERROR: e.status_code}, status=e.status_code)
return Response(
ApiCommentSerializer(outcome.result, context={ID_OF_USER: request.user.pk if request.user.pk else None,
'request': request}, many=True).data,
outcome.response_status or status.HTTP_200_OK, )
@swagger_auto_schema(**COMMENT_CREATE)
def post(self, request, *args, **kwargs):
try:
outcome = ServiceOutcome(
ApiCreateCommentService, request.POST.dict() | kwargs |
{USER: request.user if request.user.is_authenticated else None}
)
except Exception as error:
return Response(
{
ERROR: {key: value for key, value in error.errors_dict.items()},
STATUS_ERROR: error.response_status
}, status=status.HTTP_400_BAD_REQUEST)
return Response(ApiCreateCommentSerializer(outcome.result, context={'request': request}).data,
outcome.response_status or status.HTTP_201_CREATED, )
class CommentUpdateDestroyView(APIView):
parser_classes = [MultiPartParser, ]
authentication_classes = (CustomTokenAuthentication,)
@swagger_auto_schema(**COMMENT_DELETE)
def delete(self, request, *args, **kwargs):
try:
outcome = ServiceOutcome(
ApiDeleteCommentService, kwargs | {USER: request.user if request.user.is_authenticated else None}
)
except Exception as error:
return Response(
{
ERROR: {key: value for key, value in error.errors_dict.items()},
STATUS_ERROR: error.response_status
}, status=status.HTTP_400_BAD_REQUEST)
return Response(status=status.HTTP_204_NO_CONTENT)
@swagger_auto_schema(**COMMENT_PATCH)
def patch(self, request, *args, **kwargs):
try:
outcome = ServiceOutcome(
ApiUpdateCommentService, request.data.dict() | kwargs |
{USER: request.user if request.user.is_authenticated else None}
)
except Exception as error:
return Response(
{
ERROR: {key: value for key, value in error.errors_dict.items()},
STATUS_ERROR: error.response_status
}, status=status.HTTP_400_BAD_REQUEST)
return Response(ApiCreateCommentSerializer(outcome.result, context={'request': request}).data,
outcome.response_status or status.HTTP_200_OK)
| sasha-pipec/mentor_project | api/views/comment/views.py | views.py | py | 3,808 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "rest_framework.parsers.MultiPartParser",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "api.utils.CustomTokenAuthentication",
"line_number": 18,
... |
29029006849 | """
Definition of forms.
"""
from django import forms
from django.contrib.auth.forms import AuthenticationForm
from django.utils.translation import ugettext_lazy as _
from django.db import models # Лаба 9
from .models import Comment # Лаба 9
from .models import Blog #лаба 10
from .models import Catalog #New
class BootstrapAuthenticationForm(AuthenticationForm):
"""Authentication form which uses boostrap CSS."""
username = forms.CharField(max_length=254,
widget=forms.TextInput({
'class': 'form-control',
'placeholder': 'Имя пользователя'}))
password = forms.CharField(label=_("Password"),
widget=forms.PasswordInput({
'class': 'form-control',
'placeholder':'Пароль'}))
class Support(forms.Form):
name = forms.CharField(label='Ваше имя', min_length=2, max_length=100)
city = forms.CharField(label='Ваш город', min_length=2, max_length=100)
gender = forms.ChoiceField(label='Ваш пол',
choices=[('1','Мужской'),('2','Женский')],
widget=forms.RadioSelect, initial=1)
face = forms.ChoiceField(label='Вы являетесь',
choices=(('1','индивидуальным предпринимателем'),
('2','физическим лицом'),
('3','юридическим лицом'),
('4','организацией')), initial=1)
email = forms.EmailField(label='Ваш e-mail', min_length=7)
message = forms.CharField(label='Текст обращения', widget=forms.Textarea(attrs={'rows':12, 'cols':20}))
class CommentForm (forms.ModelForm): # Лаба 9 v
class Meta:
model = Comment # используемая модель
fields = ('text',) # требуется заполнить только поле text
labels = {'text': "Комментарий"} # метка к полю формы text
#author будет автоматически выбран в зависимости от авторизованного пользователя
# date автоматически добавляется в момент создания записи
# лаба 10 v
class BlogForm(forms.ModelForm):
class Meta:
model = Blog
fields = ('title','description','content','image',)
labels = {'title':"Заголовок",'description':"Краткое содержание",'content':"Полное содержание",'image':"Изображение"}
# лаба 10 ^
class CatalogForm(forms.ModelForm):
class Meta:
model = Catalog # используемая модель
fields = ('text',) # требуется заполнить только поле text
labels = {'text': "",} # метка к полю формы text
| InovTe4/DjangoWebProject1 | app/forms.py | forms.py | py | 3,202 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.forms.AuthenticationForm",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 15,
"usage_type": "name"
},
{
"... |
40086679560 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 27 13:25:02 2021
@author: Luca Medeiros
"""
import glob
import yaml
import torch
import wandb
import random
import json
from argparse import ArgumentParser, Namespace
from collections import defaultdict
from sklearn.metrics import f1_score
from model import EmbedderWrapper
from utils import ImageInstance
from tqdm import tqdm
def read_config(config):
with open(config, 'r') as stream:
cfg_dict = yaml.safe_load(stream)
cfg = Namespace(**cfg_dict)
return cfg
def make_dataloader(cfg, test_path):
test = ImageInstance(test_path,
transform=None)
print('Testset length: ', len(test))
return torch.utils.data.DataLoader(test,
batch_size=1,
shuffle=True,
num_workers=16)
def calculate_porcentage(counter):
topk = defaultdict(list)
trans = dict()
for target, preds in counter.items():
total = sum(preds.values())
top1 = max(preds, key=preds.get)
topk[target].append([top1, preds[top1]/total])
trans[top1] = target
# for pred, count in preds.items():
# porc = count/total
# if [pred, porc] in topk[target]:
# continue
# if porc >= 0.15:
# topk[target].append([pred, porc])
return topk, trans
def inference(model, dataloader, name, type_, constraint=None):
const_flag = 'not_constraint'
classes = dataloader.dataset.classes
path = dataloader.dataset.path
classes_in = classes
table = wandb.Table(columns=['img', 'target', 'pred', 'score', 'basename'])
counter = defaultdict(lambda: defaultdict(int))
with torch.no_grad():
for batch_idx, batch in enumerate(tqdm(dataloader)):
if batch_idx > 1000:
break
inputs, targets, indexes, filenames, class_names = batch
# classes_in = random.sample(classes, 8)
# classes_in.append(class_names[0])
results = model(inputs.cuda(), classes_in)
targets = [classes[k] for k in targets]
for basename, img, target, pred, score in zip(filenames,
inputs,
targets,
results['preds'].cpu(),
results['scores'].cpu()):
pred_class = classes_in[pred]
# if score < 0.3:
# pred_class = 'unknown'
counter[target][pred_class] += 1
table.add_data(wandb.Image(img),
target,
pred_class,
float((score * 100)),
str(basename))
topk, trans = calculate_porcentage(counter)
with open(path + 'topk.json', 'w') as f:
json.dump(dict(topk), f, ensure_ascii=False, indent=2)
with open(path + 'trans.json', 'w') as f:
json.dump(dict(trans), f, ensure_ascii=False, indent=2)
y_true = [row[1] for row in table.data]
y_pred = [row[2] for row in table.data]
if not model.constraint:
y_pred = [trans[k] if k in trans else k for k in y_pred]
f1 = f1_score(y_true, y_pred, average='macro')
wandb.log({f'{name}/{const_flag}_{type_}_f1': f1}, commit=False)
wandb.log({f'{name}/{const_flag}_{type_}': table}, commit=True)
def main(args):
cfg = read_config(args.config)
project, instance = cfg.instance.split('/')
wandb.init(project=project, name=instance, job_type='eval')
if cfg.resume == '':
raise 'Resume model not set'
model = EmbedderWrapper.load_from_checkpoint(cfg.resume)
model.eval()
model.cuda()
folders = glob.glob(args.data_path + '/*/')
for path in folders:
name, type_, _ = path.split('/')[-3:]
test_dataloader = make_dataloader(cfg, path)
inference(model, test_dataloader, name, type_)
return
classes = test_dataloader.dataset.classes
table = wandb.Table(columns=['img', 'target', 'pred', 'score', 'basename'])
counter = defaultdict(lambda: defaultdict(int))
with torch.no_grad():
for batch_idx, batch in enumerate(tqdm(test_dataloader)):
inputs, targets, indexes, filenames = batch
results = model(inputs.cuda())
targets = [classes[k] for k in targets]
for basename, img, target, pred, score in zip(filenames,
inputs,
targets,
results['preds'].cpu(),
results['scores'].cpu()):
pred_class = model.classes[pred]
counter[target][pred_class] += 1
table.add_data(wandb.Image(img,
caption=f'GT: {target} | pred: {pred_class}'),
target,
pred_class,
float((score * 100)),
str(basename))
wandb.log({'val_table': table}, commit=True)
topk, trans = calculate_porcentage(counter)
constrain_classes = list()
for k, v in topk.items():
for class_ in v:
constrain_classes.append(class_[0])
constrain_classes_unique = list(set(constrain_classes))
# constrain_classes_unique = ['쌀밥', '보리밥', '시루떡', '어묵볶음', '미소된장국', '호박죽', '된장찌개', '수수팥떡', '홍어무침', '오징어국', '현미밥', '고등어조림', '깻잎나물볶음', '시금치나물', '배추김치']
model.class_constraint(constrain_classes_unique)
counter2 = defaultdict(lambda: defaultdict(int))
tableconst = wandb.Table(columns=['img', 'target', 'pred', 'pred_trans', 'score', 'basename'])
with torch.no_grad():
for batch_idx, batch in enumerate(tqdm(test_dataloader)):
inputs, targets, indexes, filenames = batch
results = model(inputs.cuda())
targets = [classes[k] for k in targets]
for basename, img, target, pred, score in zip(filenames,
inputs,
targets,
results['preds'].cpu(),
results['scores'].cpu()):
pred_class = model.const_classes[pred]
pred_class_trans = trans[pred_class]
counter2[target][pred_class] += 1
tableconst.add_data(wandb.Image(img,
caption=f'GT: {target} | pred: {pred_class}'),
target,
pred_class,
pred_class_trans,
float((score * 100)),
str(basename))
wandb.log({'val_table_constraint_max': tableconst}, commit=True)
topk2, trans = calculate_porcentage(counter2)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--config', type=str, help='YML config file')
parser.add_argument('--data_path', type=str, help='path to input data')
args = parser.parse_args()
main(args)
| luca-medeiros/zeroshot_lightning | test.py | test.py | py | 7,875 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yaml.safe_load",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "argparse.Namespace",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "utils.ImageInstance",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.utils.dat... |
41028938045 | from Functions import Functions_Configs as configFunc
from Functions import Functions_SQL_Interfacing as sqlf
from Functions import Functions_FFlogsAPI as fflogs
import pandas as pd
import csv
import time
import os
import datetime
import mysql.connector
import _01_Extract_Data as extract
import _02_Map_Statics as mapstatic
pd.options.display.width = None
def get_Static_Performance(sqlconnection):
reporttable = 'reportdata'
reportdf = sqlf.get_from_MYSQL_to_df(sqlconnection, reporttable)
reportdf_reportsorted = reportdf.set_index('reportid')
fighttable = 'fights'
fightdf_original = sqlf.get_from_MYSQL_to_df(sqlconnection, fighttable)
fightdf_original['date'] = [reportdf_reportsorted.loc[report, 'date'] for report in fightdf_original['reportid']]
fightdf = fightdf_original.sort_values(['date', 'run_num'])
fightdf.reset_index(inplace=True, drop=True)
# print(reportdf)
statictable = 'static_data'
print(fightdf)
bosscolumn = 'boss_zone'
all_statics = fightdf['static'].drop_duplicates()
# all_statics = all_statics[all_statics != 'N/A']
static_columns = ['static', 'boss_zone', 'first_attempt', 'first_clear',
'total_wipes', 'total_clears', 'total_time_toclear',
'total_time_spent', 'zone_area_name', 'days_met', 'days_met_until_first']
# print(all_statics)
for eachstatic in all_statics:
static_data = []
print('Analyzing fights for ' + str(eachstatic))
static_fights = fightdf[fightdf['static'] == eachstatic]
bosses = static_fights[bosscolumn].drop_duplicates()
# print(bosses)
clearedbosses = static_fights[static_fights['defeated'] == 'True']
wipedbosses = static_fights[static_fights['defeated'] == 'False']
for eachboss in bosses:
# print(eachboss)
all_bosses = static_fights[static_fights['boss_zone'] == eachboss]
dates_found = all_bosses['date']
# print(dates_found)
first_attempt = min(dates_found.tolist())
total_wipes = 0
first_clear = datetime.datetime(2000, 1, 1)
total_clears = 0
days_until_clear = 0
if any(wipedbosses[bosscolumn] == eachboss):
bosswipes = wipedbosses[wipedbosses[bosscolumn] == eachboss]
total_wipes = len(bosswipes)
if any(clearedbosses[bosscolumn] == eachboss):
bossclears = clearedbosses[clearedbosses[bosscolumn] == eachboss]
cleardates = bossclears['date'].tolist()
if len(cleardates) > 0:
first_clear = min(cleardates)
total_clears = len(cleardates)
days_met = len(dates_found.drop_duplicates())
static_date_df = fightdf[(fightdf['static'] == eachstatic) & (fightdf[bosscolumn] == eachboss)]
static_date_df.reset_index(inplace=True, drop=True)
dates = static_date_df['date']
clear_index = dates.where((dates == first_clear) & (static_date_df['defeated'] == 'True')).first_valid_index()
total_time_spent = static_date_df['fight_length_min'].sum()
if clear_index is not None:
wipes_to_clear = static_date_df[:clear_index+1]
days_until_clear = len(dates_found[:clear_index+1].drop_duplicates())
total_time_toclear = wipes_to_clear['fight_length_min'].sum()
else:
total_time_toclear = 0
zone_area_name = all_bosses['zone_area_name'].drop_duplicates().values[0]
values = [eachstatic, eachboss, first_attempt, first_clear, total_wipes,
total_clears, total_time_toclear, total_time_spent, zone_area_name,
days_met, days_until_clear]
static_data.append(dict(zip(static_columns, values)))
print('Saving static date for: ' + str(eachstatic))
static_df = pd.DataFrame.from_dict(static_data)
static_columns = list(static_df.columns)
delete_syntax = 'DELETE FROM ' + statictable + " WHERE static = '" + eachstatic + "'"
# print(delete_syntax)
sqlf.save_to_SQL(sqlconnection, statictable, static_columns, static_df, delete_syntax)
if __name__ == '__main__':
config, found = configFunc.check_for_config('SQL_Config.csv')
# get SQL connection working. Add database=config['Database_Name'] if not using localhost
connection = mysql.connector.connect(host=config['Host'],
user=config['User'],
passwd=config['Password'],
database=config['Database_Name'])
get_Static_Performance(connection)
# with pd.ExcelWriter('C:/Users/Eugene/Desktop/Projects/FFLogs Extraction/testdata.xlsx') as writer:
# fights.to_excel(writer, sheet_name='fights_sorted')
| ystol/FFlogs-Analysis | _04_Get_static_performance.py | _04_Get_static_performance.py | py | 4,951 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.options",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "Functions.Functions_SQL_Interfacing.get_from_MYSQL_to_df",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "Functions.Functions_SQL_Interfacing",
"line_number": 18,
"us... |
9285859360 | from os import path
from tkinter import Label, Frame, TOP, BOTTOM, RIGHT, LEFT, N, E, S, W, X, Y, BOTH, Tk, filedialog
import math
from pathlib import Path
import pyautogui
import numpy as np
from time import sleep
from PIL import Image, ImageTk
from pyautogui import position
import cv2
from src.auto_align import auto_align
from src.lib import state_manager
from src.lib.variables import Env, WATER_COMPANIES
from src.lib.tk_overlay import TkOverlay
from src.lib.pdf_processor import PdfProcessor
from src.lib.tk_resizer import TkResizer
from src.lib.utils import Utils
from src.lib.file_prompt import FilePromptSave
class Snap(TkOverlay):
def __init__(self, root, pipe_type, water_company):
super().__init__(root)
self.initial_img = Image.open(path.join(Env.appdata_path, "images/initial.png"))
state = state_manager.get()
self.initial_x = 0
self.initial_y = 0
self.previous_rotation = int(state["rotation"] if "rotation" in state else 0)
self.water_company = water_company
self.capture_x = int(state["x"] if "x" in state else 0)
self.capture_y = int(state["y"] if "y" in state else 0)
self.capture_size = int(state["size"] if "size" in state else 0)
self.capture_rotation = int(state["rotation"] if "rotation" in state else 0)
self.pipe_type = pipe_type
self.img_path = path.join(Env.appdata_path, f"images/{pipe_type}.png")
self.generate_frames()
self.root.attributes("-fullscreen", True)
self.root.attributes("-alpha", 0.5)
self.image_frame = Frame(
self.back_frame,
bg="#212121"
)
self.Resizer = TkResizer(self.image_frame)
initial_photoimage = ImageTk.PhotoImage(self.initial_img)
self.image_label = Label(
self.image_frame,
image=initial_photoimage,
bg="#212121",
borderwidth=0,
cursor="fleur"
)
self.image_label.image = initial_photoimage
self.image_label.pack()
self.top_label = Label(
self.back_frame,
text="Click and drag to align, resize and rotate",
font=("Courier", 16),
bg="black",
fg="white",
padx=10, pady=10
)
self.top_label.pack(side=TOP, pady=(100, 0))
self.top_info_label = Label(
self.back_frame,
font=("Calibri", 12),
bg="black",
fg="white",
padx=10, pady=5
)
self.top_info_label.pack(side=TOP, pady=(5, 0))
self.right_label = Label(
self.back_frame,
text="ARROW KEYS - Align\nPLUS - Enlarge\nMINUS - Shrink\n\nENTER - Confirm\nESC - Cancel",
font=("Courier", 16),
bg="black",
fg="white",
padx=10, pady=10
)
self.right_label.pack(side=RIGHT, padx=(0, 50), anchor=N)
self.root.bind("<Key>", self.key_press)
self.root.bind("<Button-1>", self.mouse_1_down)
self.root.bind("<ButtonRelease-1>", self.mouse_1_up)
self.image_label.bind("<B1-Motion>", self.mouse_1_move)
self.image_frame.bind("<Configure>", self.update_top_info_label)
self.Resizer.bind_events(self.corner_resize, self.on_rotate)
self.back_frame.after(1, self.back_frame_after)
def on_destroy(self):
self.save_state()
def corner_resize(self, event, corner):
mouse_x = position()[0]
mouse_y = position()[1]
corner_x = self.capture_x
corner_y = self.capture_y
if "w" in corner:
corner_x = self.capture_x + self.capture_size
if "n" in corner:
corner_y = self.capture_y + self.capture_size
relative_x = mouse_x - corner_x
relative_y = mouse_y - corner_y
if "w" in corner:
relative_x = np.invert(relative_x)
if "n" in corner:
relative_y = np.invert(relative_y)
size = relative_x if relative_x > relative_y else relative_y
size_difference = self.capture_size - size
if size < 0:
return
new_x = self.capture_x
new_y = self.capture_y
if "w" in corner:
new_x = self.capture_x + size_difference
if "n" in corner:
new_y = self.capture_y + size_difference
self.move_initial_img_x(new_x)
self.move_initial_img_y(new_y)
self.resize_initial_img(size, True)
def on_rotate(self, event, side):
x = position()[0]
y = position()[1]
rotation = self.previous_rotation
if side == "n":
rotation += (self.initial_x - x) / 4
elif side == "e":
rotation += (self.initial_y - y) / 4
elif side == "s":
rotation += (x - self.initial_x) / 4
elif side == "w":
rotation += (y - self.initial_y) / 4
self.rotate_initial_img(rotation)
def mouse_1_down(self, event):
self.initial_x = position()[0]
self.initial_y = position()[1]
def mouse_1_move(self, event):
self.capture_x = int(self.image_frame.winfo_x())
self.capture_y = int(self.image_frame.winfo_y())
mouse_x = position()[0]
mouse_y = position()[1]
self.move_initial_img_x(mouse_x - (self.capture_size / 2))
self.move_initial_img_y(mouse_y - (self.capture_size / 2))
def mouse_1_up(self, event):
self.previous_rotation = self.capture_rotation
self.save_state()
def save_state(self):
state_manager.update({
"x": self.capture_x,
"y": self.capture_y,
"size": self.capture_size
})
def key_right(self):
self.move_initial_img_x(self.capture_x + 1)
def key_left(self):
self.move_initial_img_x(self.capture_x - 1)
def move_initial_img_x(self, x):
self.capture_x = int(x)
if x <= 0:
x = 0
if x >= Env.res_x - self.capture_size:
x = Env.res_x - self.capture_size
self.image_frame.place(x=x + math.floor(self.capture_size / 2))
def key_up(self):
self.move_initial_img_y(self.capture_y - 1)
def key_down(self):
self.move_initial_img_y(self.capture_y + 1)
def move_initial_img_y(self, y):
self.capture_y = int(y)
if y <= 0:
y = 0
if y >= Env.res_y - self.capture_size:
y = Env.res_y - self.capture_size
self.image_frame.place(y=y + math.floor(self.capture_size / 2))
def update_top_info_label(self, event):
self.top_info_label.config(
text=f"({self.capture_x}, {self.capture_y}) | " +
f"{self.capture_size} x {self.capture_size}"
)
def key_plus(self):
self.resize_initial_img(self.capture_size + 2, False)
def key_minus(self):
self.resize_initial_img(self.capture_size - 2, False)
def finish(self):
self.root.destroy()
self.take_screenshot()
self.convert_to_alpha()
self.apply_masks()
PdfProcess = PdfProcessor(path.join(Env.index_dir, f"./pdf_templates/{self.pipe_type}_template.pdf"))
PdfProcess.insert_img(
path.join(Env.appdata_path, f"images/{self.pipe_type}_final.png"),
( 60, 46, 472, 472 ), 0
)
PdfProcess.insert_img(
path.join(Env.index_dir, "./images/copyright.png"),
( 60, 510, 210, 9 ), 0
)
print("Processed PDF")
state = state_manager.get()
output_path = map_path = FilePromptSave(
f"Save {self.pipe_type} PDF file", state["save_dir"] if "save_dir" in state else "/",
[("PDF File", "*.pdf")], ".pdf", state["reference"] if "reference" in state else "" + (" CC" if self.pipe_type == "clean" else " DD")
).path
if output_path:
PdfProcess.pdf.save(output_path, deflate=True)
Utils.send_toast(
f"Created {path.basename(output_path)} at",
output_path
)
def resize_initial_img(self, size, no_offset=False):
size = int(size)
self.capture_size = size
if size <= 0 or size > Env.res_x or size > Env.res_y:
return
initial_photoimage = ImageTk.PhotoImage(
self.initial_img
.rotate(self.capture_rotation, Image.BILINEAR, expand=True)
.resize((size, size), Image.BILINEAR)
)
self.image_label.config(image=initial_photoimage)
self.image_label.image = initial_photoimage
offset = -1 if size > self.image_label.winfo_width() else 1
if no_offset:
offset = 0
self.move_initial_img_x(self.capture_x + offset)
self.move_initial_img_y(self.capture_y + offset)
def rotate_initial_img(self, rotation):
rotation = int(rotation)
self.capture_rotation = rotation
initial_photoimage = ImageTk.PhotoImage(
self.initial_img
.rotate(rotation, Image.BILINEAR, expand=True)
.resize((self.capture_size, self.capture_size), Image.BILINEAR)
)
self.image_label.config(image=initial_photoimage)
self.image_label.image = initial_photoimage
def take_screenshot(self):
Path(Path(self.img_path).parent).mkdir(parents=True, exist_ok=True)
print(f"{self.capture_size}, {self.capture_size}")
pyautogui.screenshot(self.img_path, (
self.capture_x + 1, self.capture_y + 1,
self.capture_size, self.capture_size
))
print(f"{self.pipe_type} screenshot taken at: {self.capture_x}, {self.capture_y}\n Size: {self.capture_size} x {self.capture_size}")
def convert_to_alpha(self):
if not path.exists(self.img_path):
raise f"No {self.pipe_type} image found"
img = cv2.imread(self.img_path)
img = cv2.cvtColor(img, cv2.COLOR_BGR2BGRA)
cv2.imwrite(self.img_path, img)
def apply_masks(self):
final_img = Image.open(path.join(Env.appdata_path, "images/initial.png"))
img = cv2.imread(self.img_path, flags=cv2.IMREAD_UNCHANGED)
mask_dir_path = path.join(Env.appdata_path, f"images/masks/{self.pipe_type}")
Path(mask_dir_path).mkdir(parents=True, exist_ok=True)
bgra_bounds = WATER_COMPANIES[self.water_company][self.pipe_type]
for i in bgra_bounds:
mask_path = path.join(mask_dir_path, f"{i}.png")
bgra_bound = bgra_bounds[i]
masked_img = self.apply_mask(img, bgra_bound)
print(f"Generated {i} mask")
masked_img = cv2.resize(
masked_img,
(final_img.width, final_img.height),
interpolation=cv2.INTER_CUBIC
)
cv2.imwrite(mask_path, masked_img)
masked_img = Image.open(mask_path)
final_img.paste(masked_img, (0, 0), masked_img)
final_img.save(path.join(Env.appdata_path, f"images/{self.pipe_type}_final.png"), "PNG")
def apply_mask(self, img, bgra_bound):
mask = cv2.inRange(
img,
np.asarray(bgra_bound if type(bgra_bound) == list else bgra_bound[0]),
np.asarray(bgra_bound if type(bgra_bound) == list else bgra_bound[1])
)
return cv2.bitwise_and(
img,
img,
mask=mask
)
def key_press(self, event):
key_events = {
27: self.root.destroy,
37: self.key_left,
38: self.key_up,
39: self.key_right,
40: self.key_down,
107: self.key_plus,
109: self.key_minus,
13: self.finish
}
try:
key_events[event.keycode]()
except Exception as err:
print(err)
pass
def back_frame_after(self):
self.back_frame.focus_force()
with open(path.join(Env.appdata_path, "state.json"), "r", encoding='utf-8') as state_file:
self.resize_initial_img(self.capture_size, True)
self.image_frame.place(
anchor="center",
x=self.capture_x + (self.capture_size / 2),
y=self.capture_y + (self.capture_size / 2)
) | IronicPickle/dw-piper | src/snap.py | snap.py | py | 11,091 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "src.lib.tk_overlay.TkOverlay",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "os.path.join",
... |
12217757601 | import zmq
import os
import time
#Send Receve Server
process = str(os.getpid())
print ('PID: ' + process)
try:
ctx = zmq.Context.instance()
#socket = ctx.socket(zmq.REP)
socket = ctx.socket(zmq.DEALER)
socket.bind('tcp://127.0.0.1:4000')
print('Server listening on : 127.0.0.1:3000\n...')
except Exception as e:
print('Error:'+ str(type(e)) + str(e))
socket.close()
while True:
for k in range(1,10):
#Sending
print("Sending a message to client:\n")
socket.send_string("{}".format(str(k)))
#Waiting for answer
message = socket.recv()
print("Message from client: ", message)
time.sleep(1)
| mcesarpl/pyZMQ-Monitor-Master-Client | SR_server.py | SR_server.py | py | 681 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.getpid",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "zmq.Context.instance",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "zmq.Context",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "zmq.DEALER",
"line... |
72193969063 | #!/usr/bin/env python
# coding: utf-8
# In[7]:
import os
import csv
import json
f2 = open('allproducts.csv', 'w')
f2.write("prod_id,prod_sku,prod_cat,prod_name\n")
f2.close()
f2 = open('allproducts.csv', 'a')
writer = csv.writer(f2, delimiter=',', lineterminator='\n')
for root, dirs, files in os.walk('output'):
path = root.split(os.sep)
for fn in files:
fp = root + os.sep + fn
print("Reading from the File: " + fp)
with open(fp, 'r') as openfile:
json_dictionary = json.load(openfile)
prod_id = json_dictionary['prod_id']
prod_sku = json_dictionary['prod_sku']
prod_cat = json_dictionary['prod_cat']
prod_name = json_dictionary['prod_name']
row = [prod_id, prod_sku, prod_cat, prod_name]
writer.writerow(row)
print("Success writing :" + fp + " to allproducts.csv")
f2.close()
# In[ ]:
| chennavc/threding | combine.py | combine.py | py | 942 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "csv.writer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.walk",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "os.sep",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.sep",
"line_number": 22,
"u... |
37380582441 | # File name: floatlayout.py
from kivy.lang import Builder
from kivy.app import App
from kivy.uix.floatlayout import FloatLayout
from kivy.uix.togglebutton import ToggleButton
from kivy.clock import Clock
from kivy.utils import platform
class FloatLayoutWidget(FloatLayout):
def __init__(self, *args, **kwargs):
super(FloatLayoutWidget, self).__init__(*args, **kwargs)
text1 = self.ids['button1']
text2 = self.ids['label1']
text1.text='Hello'
text1.font_size=40
text2.text='World!'
class FloatLayoutApp(App):
title = 'Magnetic Sensor'
def __init__(self, **kwargs):
super(FloatLayoutApp,self).__init__(**kwargs)
if platform() == 'android':
from jnius import autoclass
Hardware = autoclass('org.renpy.android.Hardware')
self.hw = Hardware()
self.hw.magneticFieldSensorEnable(False)
else:
self.hw = None
def build(self):
Clock.schedule_interval(self.update_compass,0.1)
return FloatLayoutWidget()
def update_compass(self,*args):
if self.hw is None:
return
cur = self.hw.magneticFieldSensorReading()
hell = self.root.ids['label1']
hell.font_size=35
hell.text= str(cur)
def on_pause(self):
# when you are going on pause, don't forget to stop the $
if self.hw is None:
return
self.hw.magneticFieldSensorEnable(False)
return True
def on_resume(self):
# reactivate the sensor when you are back to the app
if self.hw is None:
return
self.hw.magneticFieldSensorEnable(True)
if __name__=="__main__":
FloatLayoutApp().run()
| Conner-JD/ISAT-280 | lab5/mfield/myfield/main.py | main.py | py | 1,503 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "kivy.uix.floatlayout.FloatLayout",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "kivy.app.App",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "kivy.utils.platform",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "jni... |
73478576745 | from pprint import pformat
from six import iteritems
class PIResponse(object):
swagger_types = {
'status': 'int',
'headers': 'dict(str, str)',
'content': 'object',
}
attribute_map = {
'status': 'Status',
'headers': 'Headers',
'content': 'Content',
}
def __init__(self, status=None, headers=None, content=None):
self._status = None
self._headers = None
self._content = None
if status is not None:
self.status = status
if headers is not None:
self.headers = headers
if content is not None:
self.content = content
@property
def status(self):
return self._status
@status.setter
def status(self, status):
self._status = status
@property
def headers(self):
return self._headers
@headers.setter
def headers(self, headers):
self._headers = headers
@property
def content(self):
return self._content
@content.setter
def content(self, content):
self._content = content
def to_dict(self):
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
return pformat(self.to_dict())
def __repr__(self):
return self.to_str()
def __ne__(self, other):
return not self == other
def __eq__(self, other):
if not isinstance(other, PIResponse):
return False
return self.__dict__ == other.__dict__
| dcbark01/PI-Web-API-Client-Python | osisoft/pidevclub/piwebapi/models/pi_response.py | pi_response.py | py | 1,762 | python | en | code | 39 | github-code | 36 | [
{
"api_name": "six.iteritems",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "pprint.pformat",
"line_number": 76,
"usage_type": "call"
}
] |
38115304895 | import torch;
import torch.nn as nn;
import torch.nn.functional as F;
import torchvision;
import torchvision.transforms as transforms;
from torchvision.models.detection.faster_rcnn import FastRCNNPredictor;
from torchvision.models.detection.mask_rcnn import MaskRCNNPredictor;
from engine import train_one_epoch, evaluate;
import utils;
from pycocotools.coco import COCO;
import numpy as np;
from PIL import Image;
import matplotlib.pyplot as plt;
def barycenterWeight(an,w,h):
box = an['bbox'];
x = w/2 - (box[0] + box[2]/2);
y = h/2 - (box[1] + box[3]/2);
if (y == 0 and x == 0):
return np.Inf;
return an['area']/(x*x + y*y)**2;
def bestAnn(coco, imgData):
annIds = coco.getAnnIds(imgIds=imgData['id'], catIds=[1]);
anns = coco.loadAnns(annIds);
bst = 0;
bstAnn = None;
for a in anns:
if a['iscrowd'] == 1:
continue;
scr = barycenterWeight(a,imgData['width'],imgData['height']);
if scr > bst:
bst = scr;
bstAnn = a;
return bstAnn;
class MyCocoDataset(torch.utils.data.Dataset):
def __init__(self, root, annFile, transform):
self.root = root;
self.transform = transform;
self.coco = COCO(annFile);
self.ds = [];
imgIds = self.coco.getImgIds();
for i in imgIds:
imgData = self.coco.loadImgs(i)[0];
ann = bestAnn(self.coco,imgData);
if ann is not None:
if (ann['area']/(imgData['width']*imgData['height']) > 0.40):
self.ds.append((imgData,ann));
def __getitem__(self, idx):
imgData, ann = self.ds[idx];
img = Image.open(self.root + imgData['file_name']);
if self.transform is not None:
img = self.transform(img);
target = {};
target["masks"] = torch.as_tensor([self.coco.annToMask(ann)], dtype=torch.uint8);
target['image_id'] = torch.tensor([ann['image_id']]);
x,y,dx,dy = ann['bbox'];
target["boxes"] = torch.as_tensor([[x,y,x+dx,y+dy]], dtype=torch.float32);
target['area'] = torch.as_tensor([ann['area']]);
target['labels'] = torch.as_tensor([ann['category_id']]);
target['iscrowd'] = torch.as_tensor([ann['iscrowd']]);
return img, target;
def __len__(self):
return len(self.ds);
testset = MyCocoDataset(root=img_path_val, annFile=ann_path_val, transform=transforms.ToTensor());
testloader = torch.utils.data.DataLoader(testset,shuffle=False);
evaluate(model, testloader, device=device);
path = './data/';
ann_path = path + 'annotations_train_val2014/instances_train2014.json';
img_path = path + 'train2014/';
ann_path_val = path + 'annotations_train_val2014/instances_val2014.json';
img_path_val = path + 'val2014/';
trainset = MyCocoDataset(root=img_path, annFile=ann_path, transform=transforms.ToTensor());
testset = MyCocoDataset(root=img_path_val, annFile=ann_path_val, transform=transforms.ToTensor());
trainloader = torch.utils.data.DataLoader(trainset,shuffle=True);
testloader = torch.utils.data.DataLoader(testset,shuffle=False);
#dataiter = iter(testloader);
#img, trg = dataiter.next();
num_classes = 2;
model = torchvision.models.detection.maskrcnn_resnet50_fpn(pretrained=True);
for param in model.parameters():
param.requires_grad = False;
in_features = model.roi_heads.box_predictor.cls_score.in_features;
model.roi_heads.box_predictor = FastRCNNPredictor(in_features, num_classes);
in_features_mask = model.roi_heads.mask_predictor.conv5_mask.in_channels;
hidden_layer = 256;
model.roi_heads.mask_predictor = MaskRCNNPredictor(in_features_mask,hidden_layer,num_classes);
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu');
model = model.to(device);
params = [p for p in model.parameters() if p.requires_grad];
optimizer = torch.optim.SGD(params, lr=0.005,momentum=0.9, weight_decay=0.0005);
lr_scheduler = torch.optim.lr_scheduler.StepLR(optimizer,step_size=3,gamma=0.1);
train_one_epoch(model, optimizer, trainloader, device, i, print_freq=10);
evaluate(model, testloader, device=device);
#train
#train_one_epoch(model, optimizer, trainloader, device, i, print_freq=10);
#lr_scheduler.step();
#test
| evgen32cd32/torchvisionBGremoval | BGRemovalTest.py | BGRemovalTest.py | py | 4,392 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.Inf",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "torch.utils",
"line_number": 44,
"usage_type": "attribute"
},
{
"api_name": "pycocotools.coco.COCO",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open"... |
6791686071 | from django.core.exceptions import ObjectDoesNotExist
from rest_framework import serializers
from exo_mentions.api.serializers import SearchMentionResultsSerializer
from circles.models import Circle
class SearchMentionSerializer(serializers.Serializer):
search = serializers.CharField(required=False, allow_blank=True)
circle_pk = serializers.CharField()
def validate_circle_pk(self, value):
try:
actor = self.context.get('request').user
circle = Circle.objects.get(pk=value)
assert circle.check_user_can_post(actor, False)
except ObjectDoesNotExist:
raise serializers.ValidationError(
'Circle with id {} object does not exist.'.format(
value,
)
)
except AssertionError:
raise serializers.ValidationError(
'You are not able to mention at this circle')
return value
class UserMentionResultsSerializer(SearchMentionResultsSerializer):
url = serializers.URLField()
| tomasgarzon/exo-services | service-exo-core/mentions/api/serializers.py | serializers.py | py | 1,062 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.serializers.Serializer",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.serializers",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "rest_framework.serializers.CharField",
"line_number": 11,
"usage_type... |
70188064425 | import re
import requests
url = "https://search.51job.com/list/020000,000000,0000,00,9,99,python,2,{}.html?lang=c&stype=&postchannel=0000&workyear=99&cotype=99°reefrom=99&jobterm=99&companysize=99&providesalary=99&lonlat=0%2C0&radius=-1&ord_field=0&confirmdate=9&fromType=&dibiaoid=0&address=&line=&specialarea=00&from=&welfare="
#爬取第一页数据
firstPage = url.format(1)
response = requests.get(firstPage)
#处理中文编码 转化成gbk content字节码byte
html = str(response.content, "gbk")
# print(html)
#获取总页数
pattern = re.compile('<span class="td">共(.*?)页,到第</span>', re.S)
result = re.findall(pattern, html)
totalPage = int(result[0])
print("总页数:{}".format(totalPage))
for page in range(1, totalPage + 1):
print("正在爬取第{}页数据".format(page))
#组装当前页的页码
currentUrl = url.format(page)
# print(currentUrl[0:80])
response = requests.get(currentUrl)
html = str(response.content, "gbk")
#匹配规则
reg = re.compile(
'class="t1 ">.*? <a target="_blank" title="(.*?)".*? <span class="t2"><a target="_blank" title="(.*?)".*?<span class="t3">(.*?)</span>.*?<span class="t4">(.*?)</span>.*?<span class="t5">(.*?)</span>'
,re.S) # re.S匹配多行
#获取一页中所有的职位信息
onePage = re.findall(reg, html)
# 列表中有元组,遍历方式
for jobName, company, place, salary, postDate in onePage:
print("名称:{},公司:{},地址:{},薪资:{},发布日期:{}".format(jobName, company, place, salary, postDate))
print("-------------------")
| M0025/PythonSpider | codes/crawlers/51job-demo-2.py | 51job-demo-2.py | py | 1,620 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 17,
... |
20895643740 | from . import create_app
from .model import Option, Meal, TIME_FORMAT, list_of
import json
from datetime import datetime
from typing import List
from flask import request
from flask_restful import (
Api,
Resource,
fields,
marshal_with,
marshal_with_field,
)
app = create_app()
api = Api(app)
meals: List[Meal] = []
def create_meal(meal_time):
idx = len(meals)
meal = Meal(idx, datetime.strptime(meal_time, TIME_FORMAT))
meals.append(meal)
return meal
def create_option(meal_id, place):
meal = meals[meal_id]
options = meal.options
idx = len(options)
option = Option(idx, place)
options.append(option)
return option
class Meals(Resource):
@marshal_with_field(list_of(Meal))
def get(self):
return meals
@marshal_with(Meal.fields)
def post(self):
data = json.loads(request.data)
return create_meal(data['meal_time'])
api.add_resource(Meals, '/meals')
class Options(Resource):
@marshal_with_field(list_of(Option))
def get(self, meal_id):
return meals[meal_id].options
@marshal_with(Option.fields)
def post(self, meal_id):
data = json.loads(request.data)
return create_option(meal_id, data['place'])
api.add_resource(Options, '/meals/<int:meal_id>/options')
class Votes(Resource):
@marshal_with_field(fields.Integer)
def post(self, meal_id, option_id):
meals[meal_id].options[option_id].votes += 1
return meals[meal_id].options[option_id].votes
api.add_resource(Votes, '/meals/<int:meal_id>/options/<int:option_id>/votes')
| genzj/flask-restful-api-course | s03-output-fields/meal_options/meal_options/app.py | app.py | py | 1,605 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask_restful.Api",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "model.Meal",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "model.Meal",
"line_numbe... |
19878151397 | import re
import functools
class InputError(Exception):
"""Exception to be raised for invalid inputs.
"""
def __init__(self, char, message="Input is invalid."):
"""
Parameters
----------
char : str
Invalidad character raising this exception.
message: str
Message to be shown when this exception is raised.
"""
self.char = char
self.message = message
super().__init__(self.message)
def __str__(self):
return "{0} - > {1}".format(self.char,self.message)
class PlugboardError(Exception):
"""Exception to be raised when there's an error with the PlugBoard.
"""
def __init__(self, char, message):
"""
Parameters
----------
char : str
Invalidad character raising this exception.
message: str
Message to be shown when this exception is raised.
"""
self.char = char
self.message = message
super().__init__(self.message)
def __str__(self):
return "{0} - > {1}".format(self.char,self.message)
def inputValidator(component, element):
"""Decorator for validating the input for several
functions across the programme.
Parameters
----------
component : Str
Part of the enigma machine or action to be performed.
For example: 'encodingElement' or 'ringSettting'.
element : Str
String to be included within the error message.
Makes reference to the type of component.add()
For example: 'Ring Settings' or 'Initial Position'.
"""
def function_decorator(function):
def cleaning_function(selfObject, currentInput, *args, **kwargs):
if component in ['inputElement','encodingElement']:
lettersClean = "".join(re.findall("[a-zA-Z]+", currentInput))
amountLettersInput = len(currentInput)
amountLettersClean = len(lettersClean)
if amountLettersInput > amountLettersClean:
raise InputError(currentInput, message="[{0}] Input can only take letters [a-zA-Z].".format(element))
if component in 'inputElement':
if amountLettersClean != 2:
raise InputError(currentInput, message="[{0}] Input must cointain TWO letters.".format(element))
elif lettersClean[0] == lettersClean[1]:
raise InputError(currentInput, message="[{0}] Input letters must be different.".format(element))
elif component == 'encodingElement':
if amountLettersClean != 1:
raise InputError(currentInput, message="[{0}] Input must cointain ONE letter.".format(element))
return function(selfObject,lettersClean.lower(), *args, **kwargs)
elif component in ['ringSetting']:
inputClean = int("0"+"".join(re.findall("[0-9]+", str(currentInput))))
amountCharsInput = len(str(currentInput))
amountCharsClean = len(str(inputClean))
if inputClean < 1 or inputClean > 26 or amountCharsInput > amountCharsClean:
raise InputError(currentInput, message="[{0}] Input can only take numbers between 1-26.".format(element))
return function(selfObject,inputClean, *args, **kwargs)
else:
print('tu vieja')
r = functools.update_wrapper(cleaning_function, function)
return r
return function_decorator
| gonzaferreiro/python_enigma_machine | errorHandling.py | errorHandling.py | py | 3,663 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.findall",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "re.findall",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "functools.update_wrapper",
"line_number": 101,
"usage_type": "call"
}
] |
19510966399 | #from selenium import webdriver
#from selenium.webdriver.common.by import By
#import time
#from selenium.webdriver import ActionChains
from pages.home.login_pages import LoginPage
from utilities.teststatus import TestStatus
import unittest
import pytest
@pytest.mark.usefixtures("oneTimeSetUp","setUp")
class LoginTests(unittest.TestCase):
@pytest.fixture(autouse=True)
def classSetup(self, oneTimeSetUp):
self.lp = LoginPage(self.driver)
self.ts = TestStatus(self.driver)
@pytest.mark.run(order=1)
def test_validLogin(self):
#self.driver.get(self.baseURL)
#self.lp = LoginPage(self.driver)
self.lp.login("preeti_pradhan2005@yahoo.co.in", "Mylord_03")
result1 = self.lp.verifyLoginTitle()
#assert result1 == True
self.ts.mark(result1,"TitleVerification")
result2 = self.lp.verifyLoginSuccessful()
self.ts.markFinal("test_validLogin",result2,"LoginVerification")
#assert result2 == True
'''
@pytest.mark.run(order=2)
def test_InvalidLogin(self):
# baseURL = "https://www.amazon.in/"
# self.driver.get(self.baseURL)
# self.lp = LoginPage(self.driver)
self.lp.login("preeti_pradhan2005@yahoo.co.in", "mylord_03")
result = self.lp.verifyLoginFaild()
assert result == True
'''
| pprad123/python-selenium-framework | tests/home/login_tests.py | login_tests.py | py | 1,344 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pages.home.login_pages.LoginPage",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "utilities.teststatus.TestStatus",
"line_number": 17,
"usage_type": "call"
},
... |
38227453623 | from collections import defaultdict
from datetime import time
from typing import Dict, List
import pandas as pd
from pandas import ExcelFile
class ConfigMaker:
def make_regions_config(self, excel_file: ExcelFile) -> Dict[str, str]:
regions = {}
df = pd.read_excel(excel_file, sheet_name="Region-->TZB_Reg_code")
for _, row in df.iterrows():
regions[row["Unnamed: 0"].strip()] = row["из проекта ТЗБ"].strip()
return regions
def make_region_codes_config(self, excel_file: ExcelFile) -> Dict[str, int]:
region_codes = {}
df = pd.read_excel(excel_file, sheet_name="Region-->TZB_Reg_code")
for _, row in df.iterrows():
if pd.isna(row["Code_region_TZB"]):
continue
region_codes[row["Region_TZB"]] = int(row["Code_region_TZB"])
return region_codes
def make_operators_config(self, excel_file: ExcelFile) -> Dict[str, str]:
operators = {}
df = pd.read_excel(excel_file, sheet_name="Oper-->TZB_Oper_Code")
for _, row in df.iterrows():
operators[row["OPERATOR"]] = row["GrM_Name"]
return operators
def make_operator_codes_config(self, excel_file: ExcelFile) -> Dict[str, int]:
operator_codes = {}
df = pd.read_excel(excel_file, sheet_name="Oper-->TZB_Oper_Code")
for _, row in df.iterrows():
if pd.isna(row["GrS_Name"]):
continue
operator_codes[row["GrS_Name"]] = int(row["GrS_Code"])
return operator_codes
def make_time_difference_config(self, excel_file: ExcelFile) -> Dict[str, str]:
time_difference_config = {}
df = pd.read_excel(excel_file, sheet_name="Region-->UTC")
for _, row in df.iterrows():
time_difference_config[row["RegionName"]] = row["TimeDifference"]
return time_difference_config
def make_intervals_config(self, excel_file: ExcelFile) -> Dict[str, Dict[str, str]]:
intervals = {}
df = pd.read_excel(excel_file, sheet_name="Region-->Interval")
for _, row in df.iterrows():
intervals[row["RegionName"]] = {
"begin": self.convert_time_to_string(row["CallIntervalBegin"]),
"end": self.convert_time_to_string(row["CallIntervalEnd"]),
}
return intervals
def convert_time_to_string(self, time: time) -> str:
if pd.isna(time):
return ""
return time.strftime("%H:%M:%S")
def make_ignore_config(self, excel_file: ExcelFile) -> List[str]:
ignore = []
df = pd.read_excel(excel_file, sheet_name="Region-->TZB_Reg_code")
for _, row in df.iterrows():
if row["Code_region_TZB"] == 0:
ignore.append(row["Region_TZB"])
return ignore
def make_allowed_operators_config(self, excel_file: ExcelFile) -> Dict[str, list]:
allowed_operators = defaultdict(list)
df = pd.read_excel(excel_file, sheet_name="Oper-->Allowed_Region")
for _, row in df.iterrows():
allowed_operators[row["OPERATOR"]].append(row["REGION"])
return allowed_operators
def make_config_file(self, excel_file: ExcelFile) -> Dict:
config = {}
config["regions"] = self.make_regions_config(excel_file)
config["region_codes"] = self.make_region_codes_config(excel_file)
config["operators"] = self.make_operators_config(excel_file)
config["operator_codes"] = self.make_operator_codes_config(excel_file)
config["time_difference"] = self.make_time_difference_config(excel_file)
config["intervals"] = self.make_intervals_config(excel_file)
config["ignores"] = self.make_ignore_config(excel_file)
config["allowed_operators"] = self.make_allowed_operators_config(excel_file)
return config
| tenetko/phone-numbers-beautifier | backend/src/core/config_maker/tzb_config_maker.py | tzb_config_maker.py | py | 3,898 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.ExcelFile",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pandas.read_excel",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pandas.ExcelFile",
... |
12196143390 | import matplotlib.pyplot as plt
import numpy as np
from matplotlib.animation import FuncAnimation
fig = plt.figure()
ax = fig.add_subplot()
x_data = np.linspace(-10, 10, 100)
sinx = np.sin(x_data)
cosx = np.cos(x_data)
ax.set_xlim(-10, 10)
ax.set_ylim(2, -2)
line1, = plt.plot(x_data, sinx)
line2, = plt.plot(x_data, cosx)
def next_frame(frame_number):
i = frame_number/10
line1.set_ydata(np.sin(x_data*i))
line2.set_ydata(np.cos(x_data/np.sqrt(i)))
ani = FuncAnimation(fig, func=next_frame, interval=50)
plt.show()
| marksverdhei/advanced_matplotlib_workshop | demos/enkel_animasjon.py | enkel_animasjon.py | py | 531 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.sin",
... |
39807470849 | from grid_world import *
from numpy.random import choice
import numpy as np
import random
from matplotlib import pyplot as plt
class DiscreteSoftmaxPolicy(object):
def __init__(self, num_states, num_actions, temperature):
self.num_states = num_states
self.num_actions = num_actions
self.temperature = temperature
self.init_temperature = temperature
self.terminal_temperature = 0.8
# here are the weights for the policy
self.weights = np.random.randn(num_states, num_actions) * np.sqrt(2/num_actions)
# TODO: fill this function in
# it should take in an environment state
# return the action that follows the policy's distribution
def act(self, state):
probs = self._action_probs(state)
return choice(range(self.num_actions), p=probs)
def _sigmoid(self):
return 1 / (1 + np.exp(-self.weights))
def _action_probs(self, state):
prob_actions = np.zeros((self.num_actions, ))
total = 0
for a in range(self.num_actions):
prob_actions[a] = np.exp( self.weights[state][a] / self.temperature )
total += prob_actions[a]
return prob_actions / total
def _custom_softmax(self, weights, state):
prob_actions = np.zeros((self.num_actions, ))
total = 0
for a in range(self.num_actions):
prob_actions[a] = np.exp( weights[state][a] / self.temperature )
total += prob_actions[a]
return prob_actions / total
# TODO: fill this function in
# computes the gradient of the discounted return
# at a specific state and action
# return the gradient, a (self.num_states, self.num_actions) numpy array
def compute_gradient(self, state, action, discounted_return):
grad = np.zeros((self.num_states, self.num_actions))
probs = self._action_probs(state)
for a in range(self.num_actions):
if a == action:
grad[state][a] = ( self.weights[state][action] / self.temperature ) * ( 1 - probs[a] ) * discounted_return
else:
grad[state][a] = ( self.weights[state][action] / self.temperature ) * (- probs[a] ) * discounted_return
return grad
# TODO: fill this function in
# takes a step of gradient ascent given a gradient (from compute_gradient())
# and a step size. adjust self.weights
def gradient_step(self, grad, step_size):
self.weights = self.weights + grad * step_size
# TODO: fill this function in
# takes in a list of rewards from an episode
# and returns a list of discounted rewards
# Ex. get_discounted_returns([1, 1, 1], 0.5)
# should return [1.75, 1.5, 1]
def get_discounted_returns(rewards, gamma):
cumulative_returns = np.zeros((len(rewards), ))
future_returns = 0
for i in range(len(rewards) - 1, -1, -1):
cumulative_returns[i] = rewards[i] + gamma * future_returns
future_returns = cumulative_returns[i]
return cumulative_returns
# TODO: fill this function in
# this will take in an environment, GridWorld
# a policy (DiscreteSoftmaxPolicy)
# a discount rate, gamma
# and the number of episodes you want to run the algorithm for
def reinforce(env, policy, gamma, num_episodes, learning_rate):
total_rewards = []
state = env.reset() # initial state
for ep in range(num_episodes):
# Decrease temperature overtime
policy.temperature -= ((policy.init_temperature - policy.terminal_temperature) / num_episodes)
rewards = []
states = []
actions = []
done = False
# Generate a full episode folowing the policy
while not done:
states.append(state)
action = policy.act(state)
actions.append(action)
state, reward, done = env.step(action)
rewards.append(reward)
state = env.reset()
G = get_discounted_returns(rewards, gamma)
# loop through each episode
for i in range(len(states)):
grad = policy.compute_gradient(states[i], actions[i], G[i])
policy.gradient_step(grad, learning_rate)
total_rewards.append(np.mean(rewards))
total_rewards = np.convolve(total_rewards, np.ones((300,)) / 300, mode='valid')
return list(range(len(total_rewards))), total_rewards
def print_policy(policy: DiscreteSoftmaxPolicy, env: GridWorld, i):
result = np.chararray((env.n_rows, env.n_cols), unicode=True)
act_arrow_map = {
0: u"\u2191",
1: u"\u2192",
2: u"\u2193",
3: u"\u2190"
}
for r in range(env.n_rows):
for c in range(env.n_cols):
result[r][c] = act_arrow_map[policy.act(env.map[r][c])]
print('Iteration: [{}]\nPolicy:\n{}\n\n'.format(i, result))
if __name__ == "__main__":
gamma = 0.9
num_episodes = 20000
learning_rate = 1e-4
fig = plt.figure()
for i in range(1, 21):
env = GridWorld(MAP2)
policy = DiscreteSoftmaxPolicy(env.get_num_states(), env.get_num_actions(), temperature=1.4)
iteration, total_mean_return = reinforce(env, policy, gamma, num_episodes, learning_rate)
print_policy(policy, env, i)
ax = fig.add_subplot(4,5,i)
ax.plot(iteration, total_mean_return)
ax.set_title(i)
print('{} | Reached goal: {} times\n\n'.format(i, env.goals_reached))
plt.tight_layout()
plt.show()
# gives a sample of what the final policy looks like
print("Rolling out final policy")
state = env.reset()
env.print()
done = False
while not done:
input("press enter to continue:")
action = policy.act(state)
state, reward, done = env.step(action)
env.print()
| nipunbhanot/Reinforcement-Learning---Policy-Gradient | Policy Gradient Control/reinforce_skeleton.py | reinforce_skeleton.py | py | 5,841 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "numpy.random.randn",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.sqrt",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random.choice",... |
17895586270 | r"""CLIP ImageNet zero-shot evaluation.
"""
# pylint: enable=line-too-long
import ml_collections
from configs import clip_common # local file import from experimental.multimodal
def get_config():
"""Config for zero-shot evaluation of CLIP on ImageNet."""
config = ml_collections.ConfigDict()
config.model_name = 'vit_b16'
config.only_eval = True
# Fine-tuning dataset
config.dataset = 'coco_captions'
config.train_split = 'train'
config.val_split = 'val'
BATCH_SIZE = 512 # pylint: disable=invalid-name
config.batch_size = BATCH_SIZE
config.batch_size_eval = BATCH_SIZE
config.val_cache = False
config.total_steps = 20_000
config.tokenizer_max_len = 77
# PP modified from
# third_party/py/big_vision/configs/proj/image_text/lit_coco.py
INPUT_RES = clip_common.IMAGE_RESOLUTION[config.model_name] # pylint: disable=invalid-name
coco_pp = 'get_coco_captions("text")'
image_pp = f'|decode|resize({INPUT_RES})|flip_lr|randaug(2,10)|value_range(-1,1)'
text_pp = f'|clip_tokenize({config.tokenizer_max_len}, key="text", key_result="text")'
final_pp = '|keep(["image", "text"])'
config.pp_train = config.pp_eval = coco_pp + image_pp + text_pp + final_pp
config.shuffle_buffer_size = 50_000 # Per host, so small-ish is ok.
config.log_training_steps = 100
config.log_eval_steps = 1000
config.checkpoint_steps = 4000
config.checkpoint_timeout = 1
config.prefetch_to_device = 2
config.trial = 0
# Model section
config.model_init = clip_common.CHECKPOINTS[config.model_name]
config.convert_pytorch = True
config.model = ml_collections.config_dict.create(
**clip_common.CONFIGS[config.model_name])
# Optimizer section
config.optim_name = 'Momentum'
config.optim = ml_collections.ConfigDict()
config.grad_clip_norm = 1.0
config.weight_decay = None # No explicit weight decay
config.lr = ml_collections.ConfigDict()
config.lr.base = 0.06
config.lr.warmup_steps = 500
config.lr.decay_type = 'cosine'
# zeroshot section
def zeroshot_pp(num_classes, resize_method='area'):
zeroshot_pp = f'decode|resize_small({INPUT_RES}, method="{resize_method}")|central_crop({INPUT_RES})'
# zeroshot_pp += '|value_range(-1, 1)'
zeroshot_pp += f'|value_range(0, 1)|normalize({clip_common.CLIP_IMAGE_MEAN}, {clip_common.CLIP_IMAGE_STD})'
zeroshot_pp += f'|onehot({num_classes}, key="label", key_result="text")'
zeroshot_pp += '|keep(["image", "text"])'
return zeroshot_pp
config.zeroshot_eval_datasets = {
'imagenet': {
'dataset': 'imagenet2012',
'split': 'validation',
'classnames_key': 'imagenet',
'prompts_key': 'none',
'pp_spec': zeroshot_pp(1000)
},
'cars196': {
'dataset': 'cars196',
'split': 'train',
'classnames_key': 'cars196',
'prompts_key': 'none',
'pp_spec': zeroshot_pp(196)
},
'caltech101': {
'dataset': 'caltech101',
'split': 'train',
'classnames_key': 'caltech101',
'prompts_key': 'none',
'pp_spec': zeroshot_pp(102)
},
'cifar10': {
'dataset': 'cifar10',
'split': 'train',
'classnames_key': 'cifar10',
'prompts_key': 'none',
'pp_spec': zeroshot_pp(10, 'bicubic')
},
'cifar100': {
'dataset': 'cifar100',
'split': 'train',
'classnames_key': 'cifar100',
'prompts_key': 'none',
'pp_spec': zeroshot_pp(100, 'bicubic')
},
'dtd': {
'dataset': 'dtd',
'split': 'test',
'classnames_key': 'dtd',
'prompts_key': 'none',
'pp_spec': zeroshot_pp(47)
},
'eurosat': {
'dataset': 'eurosat',
'split': 'train',
'classnames_key': 'eurosat',
'prompts_key': 'none',
'pp_spec': zeroshot_pp(10, 'bicubic')
},
'resisc45': {
'dataset': 'resisc45',
'split': 'train',
'classnames_key': 'resisc45',
'prompts_key': 'none',
'pp_spec': zeroshot_pp(45, 'bicubic')
},
'imagenet_prompt': {
'dataset': 'imagenet2012',
'split': 'validation',
'classnames_key': 'imagenet',
'prompts_key': 'imagenet',
'pp_spec': zeroshot_pp(1000)
},
'cars196_prompt': {
'dataset': 'cars196',
'split': 'train',
'classnames_key': 'cars196',
'prompts_key': 'cars196',
'pp_spec': zeroshot_pp(196)
},
'caltech101_prompt': {
'dataset': 'caltech101',
'split': 'train',
'classnames_key': 'caltech101',
'prompts_key': 'caltech101',
'pp_spec': zeroshot_pp(102)
},
'cifar10_prompt': {
'dataset': 'cifar10',
'split': 'train',
'classnames_key': 'cifar10',
'prompts_key': 'cifar10',
'pp_spec': zeroshot_pp(10, 'bicubic')
},
'cifar100_prompt': {
'dataset': 'cifar100',
'split': 'train',
'classnames_key': 'cifar100',
'prompts_key': 'cifar100',
'pp_spec': zeroshot_pp(100, 'bicubic')
},
'dtd_prompt': {
'dataset': 'dtd',
'split': 'test',
'classnames_key': 'dtd',
'prompts_key': 'dtd',
'pp_spec': zeroshot_pp(47)
},
'eurosat_prompt': {
'dataset': 'eurosat',
'split': 'train',
'classnames_key': 'eurosat',
'prompts_key': 'eurosat',
'pp_spec': zeroshot_pp(10, 'bicubic')
},
'resisc45_prompt': {
'dataset': 'resisc45',
'split': 'train',
'classnames_key': 'resisc45',
'prompts_key': 'resisc45',
'pp_spec': zeroshot_pp(45, 'bicubic')
},
'imagenet_best': {
'dataset': 'imagenet2012',
'split': 'validation',
'classnames_key': 'imagenet',
'prompts_key': 'imagenet_best',
'pp_spec': zeroshot_pp(1000)
},
}
return config
def get_sweep(hyper):
return hyper.product([
])
| google/uncertainty-baselines | experimental/multimodal/configs/clip_zeroshot_eval.py | clip_zeroshot_eval.py | py | 6,176 | python | en | code | 1,305 | github-code | 36 | [
{
"api_name": "ml_collections.ConfigDict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "configs.clip_common.IMAGE_RESOLUTION",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "configs.clip_common",
"line_number": 34,
"usage_type": "name"
},
... |
19241188993 | """Can we download user information?"""
from datetime import date, datetime, timedelta
import pytest
import requests
import mal_scraper
class TestDiscovery(object):
"""Test discovery of usernames."""
DISCOVERY_LINK = 'http://myanimelist.net/users.php'
# TODO: Test Cache
# TODO: Test fall-back
def test_web_discovery(self, mock_requests):
"""Can we discover usernames?"""
mock_requests.always_mock(self.DISCOVERY_LINK, 'users_discovery')
users = mal_scraper.discover_users(use_cache=False, use_web=True)
assert len(users) == 20
assert users == {
'WitcherEnvy', 'TrollTama', 'Kumatetsu_Rei82', 'Dragh', 'Tom_West',
'MrNoname7890', 'Aaaavis002', 'nguyenthugiang', 'mrtnlm', 'Kaxkk',
'9broken_angeL', 'AnimeFreak2205', 'TMZ', 'Oxenia', 'justunknown',
'0ldboy', 'alkodrak', 'Derin', 'Insufance', 'fatalbert357',
}
def test_web_discovery_on_garbled_page(self, mock_requests):
"""Do we return a failure on bad pages?"""
mock_requests.always_mock(self.DISCOVERY_LINK, 'garbled_user_discovery_page')
users = mal_scraper.discover_users(use_cache=False, use_web=True)
assert users == set()
def test_web_discovery_on_failed_page(self, mock_requests):
"""Do we return a failure on failed pages?"""
mock_requests.always_mock(self.DISCOVERY_LINK, 'garbled_user_discovery_page', status=500)
with pytest.raises(requests.exceptions.HTTPError):
mal_scraper.discover_users(use_cache=False, use_web=True)
class TestUserStats:
"""Test retrieving basic stats information."""
PROFILE_URL = 'http://myanimelist.net/profile/'
TEST_USER = 'SparkleBunnies' # Sorry SparkleBunniesm, 'twas a random selection...
TEST_USER_PAGE = PROFILE_URL + TEST_USER
TEST_LAST_ONLINE_NOW_USER = 'Ichigo_Shiba'
TEST_LAST_ONLINE_NOW_PAGE = PROFILE_URL + TEST_LAST_ONLINE_NOW_USER
TEST_LAST_ONLINE_MINS_USER = 'El_Anciano'
TEST_LAST_ONLINE_MINS_PAGE = PROFILE_URL + TEST_LAST_ONLINE_MINS_USER
TEST_LAST_ONLINE_HOURS_USER = 'snip-snip'
TEST_LAST_ONLINE_HOURS_PAGE = PROFILE_URL + TEST_LAST_ONLINE_HOURS_USER
TEST_LAST_ONLINE_YESTERDAY_USER = 'Nyanloveanimes'
TEST_LAST_ONLINE_YESTERDAY_PAGE = PROFILE_URL + TEST_LAST_ONLINE_YESTERDAY_USER
TEST_LAST_ONLINE_DATE_USER = 'Elizi'
TEST_LAST_ONLINE_DATE_PAGE = PROFILE_URL + TEST_LAST_ONLINE_DATE_USER
def test_detect_bad_download(self, mock_requests):
mock_requests.always_mock(self.TEST_USER_PAGE, 'garbled_user_page')
with pytest.raises(mal_scraper.ParseError):
mal_scraper.get_user_stats(self.TEST_USER)
def test_user_does_not_exist(self, mock_requests):
mock_requests.always_mock(
'http://myanimelist.net/profile/asdghiuhunircg',
'user_does_not_exist',
status=404,
)
with pytest.raises(mal_scraper.RequestError) as err:
mal_scraper.get_user_stats('asdghiuhunircg')
assert err.value.code == mal_scraper.RequestError.Code.does_not_exist
def test_user_stats(self, mock_requests):
"""Do we retrieve the right stats about a user?"""
# Always mock this because the user will change it himself
mock_requests.always_mock(self.TEST_USER_PAGE, 'user_test_page')
meta, data = mal_scraper.get_user_stats(self.TEST_USER)
# Fuzzy match datetime
assert datetime.utcnow() - meta['when'] < timedelta(seconds=30)
# Assert meta contained by ...
assert meta.items() >= {
'user_id': self.TEST_USER,
}.items()
# Fuzzy match datetime - last online was "10 hours ago"
last_online = data['last_online']
assert datetime.utcnow() - last_online < timedelta(hours=11)
assert data == {
'name': self.TEST_USER,
'joined': date(year=2014, month=1, day=6),
'last_online': last_online, # Already checked
'num_anime_watching': 14,
'num_anime_completed': 129,
'num_anime_on_hold': 9,
'num_anime_dropped': 4,
'num_anime_plan_to_watch': 16,
}
def test_user_last_online_now(self, mock_requests):
mock_requests.always_mock(self.TEST_LAST_ONLINE_NOW_PAGE, 'user_last_online_now')
data = mal_scraper.get_user_stats(self.TEST_LAST_ONLINE_NOW_USER).data
last_online = data['last_online']
assert datetime.utcnow() - last_online < timedelta(seconds=10)
def test_user_last_online_minutes(self, mock_requests):
# 23 minutes ago
mock_requests.always_mock(self.TEST_LAST_ONLINE_MINS_PAGE, 'user_last_online_mins')
data = mal_scraper.get_user_stats(self.TEST_LAST_ONLINE_MINS_USER).data
last_online = data['last_online']
assert (datetime.utcnow() - timedelta(minutes=23)) - last_online < timedelta(minutes=1)
def test_user_last_online_hours(self, mock_requests):
mock_requests.always_mock(self.TEST_LAST_ONLINE_HOURS_PAGE, 'user_last_online_hours')
data = mal_scraper.get_user_stats(self.TEST_LAST_ONLINE_HOURS_USER).data
last_online = data['last_online'] # '6 hours ago'
assert (datetime.utcnow() - timedelta(hours=6)) - last_online < timedelta(seconds=10)
def test_user_last_online_yesterday(self, mock_requests):
# Yesterday, 9:01 AM
mock_requests.always_mock(
self.TEST_LAST_ONLINE_YESTERDAY_PAGE,
'user_last_online_yesterday',
)
data = mal_scraper.get_user_stats(self.TEST_LAST_ONLINE_YESTERDAY_USER).data
yesterday = datetime.utcnow() - timedelta(days=1)
expected_date = yesterday.replace(hour=9, minute=1, second=0, microsecond=0)
assert data['last_online'] == expected_date
def test_user_last_online_date(self, mock_requests):
# May 4, 8:09 AM
mock_requests.always_mock(self.TEST_LAST_ONLINE_DATE_PAGE, 'user_last_online_date')
_, info = mal_scraper.get_user_stats(self.TEST_LAST_ONLINE_DATE_USER)
this_year = datetime.utcnow().year
assert datetime(year=this_year, month=5, day=4, hour=8, minute=9) == info['last_online']
def test_user_discovery_on_user_profile_page(self, mock_requests):
mock_requests.always_mock('http://myanimelist.net/profile/SparkleBunnies', 'user_test_page')
meta = mal_scraper.get_user_stats('SparkleBunnies').meta
html = meta['response'].text
usernames = list(mal_scraper.user_discovery.discover_users_from_html(html))
assert usernames == [
'SparkleBunnies', 'SparkleBunnies', 'SparkleBunnies', 'SparkleBunnies', 'AkitoKazuki',
'Exmortus420', 'ChannelOrange', 'Brandon', 'Zeally', 'Daedalus',
'HaXXspetten', 'Kagami', 'no_good_name', 'BlackFIFA19', 'Ichigo_Shiba',
'Ichigo_Shiba', 'Sacchie', 'Sacchie', 'Woodenspoon', 'Woodenspoon',
'Teddy_Bear56', 'Teddy_Bear56', 'Speeku', 'Speeku', 'stonemask',
'stonemask', 'IIDarkII', 'IIDarkII', 'ThisNameSucks', 'ThisNameSucks',
'Z6890', 'Z6890', 'BKZekken', 'BKZekken', 'Woodenspoon',
'Woodenspoon', 'ChannelOrange', 'ChannelOrange', 'Padgit', 'Padgit',
]
class TestUserAnimeList(object):
"""Test retrieving basic stats information."""
LIST_URL = 'http://myanimelist.net/animelist/{username}/load.json?offset={offset:d}&status=7'
TEST_FORBIDDEN_USERNAME = 'SparkleBunnies'
TEST_FORBIDDEN_PAGE = LIST_URL.format(username=TEST_FORBIDDEN_USERNAME, offset=0)
TEST_USER_SMALL_NAME = 'Littoface' # ~100 anime
TEST_USER_SMALL_PAGE = LIST_URL.format(username=TEST_USER_SMALL_NAME, offset=0)
TEST_USER_SMALL_END_PAGE = LIST_URL.format(username=TEST_USER_SMALL_NAME, offset=158)
TEST_USER_LOTS_NAME = 'Vindstot' # 5k anime...
TEST_USER_LOTS_LIST_PAGE = LIST_URL.format(username=TEST_USER_LOTS_NAME, offset=0)
TEST_USER_TAGS_NAME = 'reltats'
TEST_USER_TAGS_PAGE = LIST_URL.format(username=TEST_USER_TAGS_NAME, offset=0)
TEST_USER_TAGS_END_PAGE = LIST_URL.format(username=TEST_USER_TAGS_NAME, offset=263)
def test_non_ok_download(self, mock_requests):
mock_requests.always_mock(self.TEST_FORBIDDEN_PAGE, 'user_anime_list_forbidden', status=401)
with pytest.raises(mal_scraper.RequestError) as err:
mal_scraper.get_user_anime_list(self.TEST_FORBIDDEN_USERNAME)
assert err.value.code == mal_scraper.RequestError.Code.forbidden
def test_forbidden_access(self, mock_requests):
mock_requests.always_mock(self.TEST_FORBIDDEN_PAGE, 'user_anime_list_forbidden', status=400)
with pytest.raises(mal_scraper.RequestError) as err:
mal_scraper.get_user_anime_list(self.TEST_FORBIDDEN_USERNAME)
assert err.value.code == mal_scraper.RequestError.Code.forbidden
def test_download_one_page_anime(self, mock_requests):
mock_requests.always_mock(self.TEST_USER_SMALL_PAGE, 'user_anime_list_small')
mock_requests.always_mock(self.TEST_USER_SMALL_END_PAGE, 'user_anime_list_end')
anime = mal_scraper.get_user_anime_list(self.TEST_USER_SMALL_NAME)
assert len(anime) == 158
assert anime[0] == {
'name': 'Danshi Koukousei no Nichijou',
'id_ref': 11843,
'consumption_status': mal_scraper.ConsumptionStatus.consuming,
'is_rewatch': False,
'score': 0,
# 'start_date': None,
'progress': 9,
# 'finish_date': None,
'tags': set(),
}
def test_user_tags(self, mock_requests):
mock_requests.always_mock(self.TEST_USER_TAGS_PAGE, 'user_anime_list_tags')
mock_requests.always_mock(self.TEST_USER_TAGS_END_PAGE, 'user_anime_list_tags_end')
anime_list = mal_scraper.get_user_anime_list(self.TEST_USER_TAGS_NAME)
assert len(anime_list) == 263
assert anime_list[99]['tags'] == {
'A masterpiece of failures. Yami wo Kirisaku',
'LOAD THIS DRYER!',
}
| QasimK/mal-scraper | tests/mal_scraper/test_users.py | test_users.py | py | 10,125 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "mal_scraper.discover_users",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "mal_scraper.discover_users",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "r... |
39266700309 | import sys
import cv2
import matplotlib.pyplot as plt
print(cv2.__version__)
path = '../main/big_data/lecture/week9/data/cat.bmp'
img = cv2.imread(path)
plt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
plt.show()
if img is None:
print('image load failed')
sys.exit()
cv2.namedWindow('image')
cv2.imshow('image',img)
cv2.waitKey()
cv2.destroyAllWindows()
# cv2.destroyWindow('image')
# img write
cv2.imwrite('../main/big_data/lecture/week9/data/cat.png',img)
#
path = '../main/big_data/lecture/week9/data/cat.bmp'
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
plt.imshow(img)
plt.imshow(img,cmap='gray')
plt.imshow(cv2.cvtColor(img,cv2.COLOR_BGR2RGB))
plt.show()
img.shape
if img is None:
print('image load failed')
sys.exit()
# 확장자 변경
# img : bmp file
cv2.imwrite('../main/big_data/lecture/week9/data/cat_gray.png',img)
path = '../main/big_data/lecture/week9/data/cat_gray.png'
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
cv2.imshow('../main/big_data/lecture/week9/data/cat_gray.png',img)
cv2.waitKey()
cv2.destroyAllWindows()
#
path = '../main/big_data/lecture/week9/data/cat.bmp'
img = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
cv2.imshow('image',img)
cv2.waitKey()
# 키 할당
while True:
if cv2.waitKey() == ord('q'):
cv2.destroyAllWindows()
print(1)
break
imgBGR = cv2.imread(path)
imgRGB = cv2.cvtColor(imgBGR,cv2.COLOR_BGR2RGB)
plt.axis('off')
plt.imshow(imgRGB)
plt.imshow(imgBGR)
img_gray = cv2.imread(path,cv2.IMREAD_GRAYSCALE)
plt.axis('off')
plt.imshow(img_gray,cmap='gray')
plt.subplot(121)
plt.axis('off')
plt.imshow(imgRGB)
plt.subplot(122)
plt.axis('off')
plt.imshow(img_gray,cmap='gray')
| jjh0987/multi_campus | big_data/lecture/week9/cv2_practice0.py | cv2_practice0.py | py | 1,666 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.__version__",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "matplotlib.pypl... |
39247108881 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 26 11:46:50 2023
@author: BD
Evaluate and plot the effect of package color on the win rate
"""
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def draw_it(i):
ax.errorbar(x[i], MC_mean[i], yerr=uncertainty[i], fmt='o', elinewidth=2,\
ms=5, color='black', zorder=0)
ax.plot(x[i]-0.1, MC_mean[i], marker='>', ms=5, color='black', zorder=0)
ax.plot(x[i]+0.1, MC_mean[i], marker='<', ms=5, color='black', zorder=0)
ax.errorbar(x[i], MC_mean[i], yerr=uncertainty[i]*0.99, fmt='o', elinewidth=1.5,\
ms=1, color=color_codes[i], zorder=1)
ax.plot(x[i], MC_mean[i], marker='o', ms=5, color='black', zorder=2)
ax.plot(x[i], MC_mean[i], marker='o', ms=4, color=color_codes[i], zorder=3)
ax.plot(x[i]-0.1, MC_mean[i], marker='>', ms=4, color=color_codes[i], zorder=2)
ax.plot(x[i]+0.1, MC_mean[i], marker='<', ms=4, color=color_codes[i], zorder=2)
if __name__ == '__main__':
path = ''
# Read in the table
tbl = pd.read_csv(path+'candy_data_extended.csv')
# The list of all colors and their color codes (thank you chatGPT)
# cyan is used when there are many colors or no colors (placebo)
colors = np.unique(tbl['main color'])
color_codes = ['#00FFFF', 'k', 'b', '#8B4513', 'y', 'gray', 'g', '#00FFFF',\
'#FFA500', '#FFC0CB', 'purple', 'r', 'w', '#FFD700']
# Calculate the mean
# Calculate the uncertainty of the mean as the rms/sqrt(N)
MC_mean = []
MC_std = []
N = []
for color in colors:
MC_mean.append(np.mean(tbl['winpercent'][tbl['main color']==color]))
MC_std.append(np.std(tbl['winpercent'][tbl['main color']==color]))
N.append(len(tbl[tbl['main color']==color]))
uncertainty = MC_std/np.sqrt(N)
# Plot the results
x = range(len(colors))
plt.figure(figsize=(7,4))
ax = plt.subplot(111)
for i in range(len(N)):
draw_it(i)
ax.text(x[i]-0.1, 32, N[i])
ax.text(-0.85, 32, 'N =')
ax.set_xticks(range(len(colors)), colors, rotation=90)
ax.set_ylabel('win [%]')
ax.set_xlim([-1,len(colors)])
plt.tight_layout()
plt.savefig(path+'package_colors.pdf') | deshev/Candies | package_color.py | package_color.py | py | 2,290 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.mean",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.std",
"line_number"... |
37820408361 | from django.contrib.gis.geos import Point, Polygon, GEOSGeometry
import requests
import pytest
import json
API_URL = "http://127.0.0.1:8000/api/buildings/"
@pytest.fixture
def building_data():
return {
"geom": {
"type": "Polygon",
"coordinates": [
[
[
5.0,
5.0
],
[
5.0,
10.0
],
[
10.0,
10.0
],
[
10.0,
5.0
],
[
5.0,
5.0
]
]
]
},
"address": "CreatedFromTest3"
}
def test_list_buildings():
res = requests.get(API_URL + '')
assert res.status_code == 200
data = res.json()
return data
@pytest.fixture
def get_big_distance_and_point():
return (100000000, Point(0, 0))
@pytest.fixture
def get_big_distance_filtered_list(get_big_distance_and_point):
res = requests.get(API_URL + '?dist={0}&&point={1},{2}'.
format(get_big_distance_and_point[0],
get_big_distance_and_point[1].x, get_big_distance_and_point[1].y))
assert res.status_code == 200
data = res.json()
return data
@pytest.fixture
def get_full_building_list():
return test_list_buildings()
#Testing that for 100000 km (100000000m) it will return all values
def test_distance_filter_big(get_big_distance_filtered_list, get_full_building_list):
assert get_big_distance_filtered_list == get_full_building_list
@pytest.fixture
def get_zero_distance_and_point():
return (0, Point(-5, -5, srid=4326))
@pytest.fixture
def get_zero_distance_filtered_list(get_zero_distance_and_point):
res = requests.get(API_URL + '?dist={0}&&point={1},{2}'.
format(get_zero_distance_and_point[0],
get_zero_distance_and_point[1].x, get_zero_distance_and_point[1].y))
assert res.status_code == 200
data = res.json()
return (data, get_zero_distance_and_point[1])
def test_distance_filter_zero(get_zero_distance_filtered_list, get_full_building_list):
filtered = get_zero_distance_filtered_list[0]["features"]
full = get_full_building_list["features"]
point = get_zero_distance_filtered_list[1]
for feature in filtered:
geom = GEOSGeometry(json.dumps(feature['geometry']))
#For zero distance each filtered geometry should cover this point
assert geom.covers(point)
#We count here from full list how many objects covers point
cnt_test = 0
for feature in full:
geom = GEOSGeometry(json.dumps(feature['geometry']))
#For zero distance each geometry should contain this point
if geom.covers(point):
cnt_test += 1
cnt_api = len(filtered)
#Checking that cnts of filtered object in api and in test are equal
assert cnt_api == cnt_test
@pytest.mark.skip(reason="no need to check now")
def test_distance_filter(get_full_building_list):
for i in range(10):
dist = 10
x = 0.0
y = 0.0
for j in range(10):
res = requests.get(API_URL + '?dist={0}&&point={1},{2}'.
format(dist, x, y))
assert res.status_code == 200
data = res.json()
x += 2
y += 2
dist *= 10
def test_distance_filter(get_full_building_list):
res = requests.get(API_URL + '?dist={0}&&point={1},{2}'.
format(5, 5, 5))
assert res.status_code == 200
# if filter args are incorrect, should return all
res = requests.get(API_URL + '?&point={0},{1}'.
format(5, 5))
assert res.status_code == 200
data = res.json()
data == get_full_building_list
#if filter args are incorrect, should return all
res = requests.get(API_URL + '??dist=&point={0},{1}'.
format(5, 5))
assert res.status_code == 200
data = res.json()
data == get_full_building_list
@pytest.fixture
def post_new_polygon():
x = 0
y = 1
d = {"geom": {"type": '', "coordinates": ''}}
# area is 1/2 square degree or ~ 6160500000 square meters
p = Polygon(((x, x), (y, y), (x, y), (x, x)), srid=4326)
d["geom"]["type"] = p.geom_type
d["geom"]["coordinates"] = p.coords
res = requests.post(API_URL + '', json=d)
assert res.status_code == 201
newBuildingData = res.json()
return newBuildingData
def test_distance_filtel_value(post_new_polygon):
id = post_new_polygon["id"]
#point is 1 degree distance or ~ 111000
res = requests.get(API_URL + '?dist=100000&point=0,2.0')
filteredData = res.json()
assert res.status_code == 200
assert post_new_polygon not in filteredData["features"]
res = requests.get(API_URL + '?dist=200000&point=0,2.0')
filteredData = res.json()
assert res.status_code == 200
assert post_new_polygon in filteredData["features"]
res = requests.delete(API_URL + str(id))
assert res.status_code == 204
return
#print(Distance(Point(0,0,srid=27700),(Point(1,0,srid=27700))). | ValarValar/GeoDjangoRestTest | GeoBack/TestTask/tests/test_dist.py | test_dist.py | py | 5,384 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pytest.fixture",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "requests.get",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "django.contrib.gis.geos.Point",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "pytest.... |
23522566087 | # "Eugene Morozov"<Eugene ~at~ HiEugene.com>
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from util import get_color
import time
def plotX(X, mu, M, N, K, r, ax):
for i in range(M):
if plotX.px[i]: plotX.px[i].remove()
if N == 2:
plotX.px[i] = ax.scatter(X[i,0], X[i,1], color=get_color(r[i]), marker="o")
else:
plotX.px[i] = ax.scatter(X[i,0], X[i,1], X[i,2], color=get_color(r[i]), marker="o")
for i in range(K):
if plotX.pmu[i]: plotX.pmu[i].remove()
if N == 2:
plotX.pmu[i] = ax.scatter(mu[i,0], mu[i,1], color=get_color(i+K), marker="*", linewidths=5)
else:
plotX.pmu[i] = ax.scatter(mu[i,0], mu[i,1], mu[i,2], color=get_color(i+K), marker="*", linewidths=5)
def calc_dist(x, y):
d = ((x - y)**2).sum() # sqrt omitted
return d
def K_means(X, K, v, title_str):
M, N = X.shape # M samples of dimension N
r = np.zeros(M, dtype="int") # cluster id for each point
mu_new = np.empty([K,N])
for i in range(K):
mu_new[i,:] = X[i*M//K,:]
mu = np.zeros([K,N])
if v and (N==2 or N==3):
plotX.px = [None]*M
plotX.pmu = [None]*K
fig = plt.figure()
if N == 2:
ax = fig.gca()
else:
ax = fig.gca(projection='3d')
plt.ion()
plt.grid(True)
plt.show()
# while np.abs(mu_new - mu).sum() > 1e-10: # or track when the assignments no longer change
while calc_dist(mu_new, mu) > 1e-10: # or track when the assignments no longer change
if v and (N==2 or N==3):
plotX(X, mu_new, M, N, K, r, ax)
# ax.set_title(f"diff = {np.abs(mu_new - mu).sum()}")
ax.set_title(f"diff = {calc_dist(mu_new, mu)}")
fig.canvas.flush_events(); time.sleep(0.5)
mu = mu_new.copy()
for i in range(M):
min_dist = 1e10
r[i] = 0
for k in range(K):
dist = calc_dist(X[i,:], mu[k,:])
if dist < min_dist:
min_dist = dist
r[i] = k
for k in range(K):
mu_new[k] = X[r==k,:].sum(axis=0) / X[r==k,:].shape[0]
if v and (N==2 or N==3):
ax.set_title(title_str)
fig.canvas.flush_events(); time.sleep(0.5)
return r
| eugegit/examples | k_means.py | k_means.py | py | 2,211 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "util.get_color",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "util.get_color",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "util.get_color",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "util.get_color",
"li... |
10262708742 | import rospy
import open3d as o3d
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
import numpy as np
class RealSensePointCloud:
def __init__(self):
# Initialize ROS node
rospy.init_node('realsense_pointcloud_visualizer')
# Create a subscriber to the RealSense point cloud ROS topic
# Make sure to use the correct topic where your RealSense camera publishes the point cloud data
self.pc_subscriber = rospy.Subscriber("/camera/depth/color/points", PointCloud2, self.pointcloud_callback, queue_size=1)
# Initialize Open3D visualizer
self.vis = o3d.visualization.Visualizer()
self.vis.create_window(window_name='RealSense Point Cloud')
# Initialize point cloud variable
self.pcd = o3d.geometry.PointCloud()
self.voxel_size = 0.01 # Adjust this value as needed
# Variable to control the main loop
self.is_running = True
def pointcloud_callback(self, msg):
try:
# Convert ROS PointCloud2 message to array of xyz points
pc_array = pc2.read_points(msg, field_names=("x", "y", "z"), skip_nans=True)
points = np.array(list(pc_array))
if points.size == 0:
rospy.logwarn("Received an empty point cloud.")
return
# Update point cloud
self.pcd.clear()
self.pcd.points = o3d.utility.Vector3dVector(points)
# Voxel downsample the point cloud
self.pcd = self.pcd.voxel_down_sample(voxel_size=self.voxel_size)
# Update the visualizer
self.update_visualizer()
except Exception as e:
rospy.logerr("An error occurred in pointcloud_callback: %s", str(e))
def update_visualizer(self):
self.vis.clear_geometries() # Clear old geometries
self.vis.add_geometry(self.pcd) # Add the current point cloud
self.vis.poll_events() # Update the visualizer events
self.vis.update_renderer() # Render the point cloud
def run(self):
# Main loop
while not rospy.is_shutdown() and self.is_running:
rospy.spin()
# If ROS is shut down or the script is stopped, close the visualizer window
self.vis.destroy_window()
# Main function
if __name__ == "__main__":
try:
pointcloud_visualizer = RealSensePointCloud()
pointcloud_visualizer.run()
except rospy.ROSInterruptException:
pass | yanglh14/DIA | DIA/real_exp/catkin_ws/src/robot_control/scripts/camera.py | camera.py | py | 2,507 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rospy.init_node",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "rospy.Subscriber",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sensor_msgs.msg.PointCloud2",
"line_number": 14,
"usage_type": "argument"
},
{
"api_name": "open... |
72198921384 | # -*- coding: utf-8 -*-
"""
Created on Fri Aug 2 11:57:41 2019
Updated 20220904 22:42WER
@authors: wrosing, mfitz
"""
import os
import pathlib
import sys
import socket
import glob
# This routine here removes all mention of previous configs from the path...
# for safety and local computer got clogged with all manner of configs in the path
path_removals = []
for q in range(len(sys.path)):
if "ptr-observatory" in sys.path[q] and "configs" in sys.path[q]:
print("Removing old config path: " + str(sys.path[q]))
path_removals.append(sys.path[q])
for remover in path_removals:
sys.path.remove(remover)
pathdone = 0
# First try to get the hostname from a file in the directory above (..) ptr-observatory
cwd = str(pathlib.Path().resolve())
hwd = cwd.replace("ptr-observatory", "")
hostname_file = glob.glob(hwd + "hostname*")
try:
site_name = hostname_file[0].replace('.txt','').split("hostname")[1]
sys.path.append(os.path.join(pathlib.Path().resolve(), "configs", site_name))
pathdone = 1
except OSError:
print(
"Could not find a hostname* file in the directory above ptr-observatory \
(e.g. hostnamesro).\n Trying another method..."
)
if pathdone == 0:
print("Attempting hostname approach to config file...")
host_site = socket.gethostname()[:3].lower()
sys.path.append(os.path.join(pathlib.Path().resolve(), "configs", host_site))
try:
from obs_config import *
except ImportError:
print(
"Failed the hostname approach to config file.\n"
+ str(host_site)
+ " isn't a real place, or there isn't a config file \
that I can find!"
)
try:
site_name = input("What site am I running at?\n")
sys.path.append(os.path.join(pathlib.Path().resolve(), "configs", site_name))
from site_config import *
except ImportError:
print(
str(site_name)
+ " isn't a real place, or there isn't a config file \
that I can find! Make sure you supplied \
a correct site name. Exiting."
)
sys.exit()
| LCOGT/ptr-observatory | ptr_config.py | ptr_config.py | py | 2,150 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "sys.path",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "sys.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "sys.path",
"line_number... |
11347574978 | from typing import MutableSequence
def fsort(a: MutableSequence, max:int)->None:
# 원소의값은 0이상 max이하
n = len(a)
f = [0]*(max+1)
b = [0]*n
# 도수 분포표 : 해당 값을 인덱스로 가지는 배열을 만들어서 count하면 해당 값이 몇번? 나왔는지 알수있음
for i in range(n): f[a[i]] += 1 # 1 step
# 누적 도수 분포표 : 0 ~ n까지 몇개의 데이터가 있는지 누적된 값을 나타냄
for i in range(1,max+1) : f[i] += f[i-1]
# 작업용 배열 : 원래배열의 원소값과 누적도수 분포표를 대조해서 정렬을 완료한 배열
for i in range(n-1, -1, -1) : f[a[i]] -=1 ; b[f[a[i]]] = a[i]
for i in range(n) : a[i] = b[i]
def countingSort(a:MutableSequence)->None:
fsort(a, max(a))
def main()->None:
from random import sample
data = sample(range(1000),10)
print(f"original : {data}")
countingSort(data)
print(f"sorted : {data}")
if __name__=="__main__":
main()
| leekyuyoungcalgo/python_algo | 20220819/countingSort2.py | countingSort2.py | py | 1,028 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "typing.MutableSequence",
"line_number": 2,
"usage_type": "name"
},
{
"api_name": "typing.MutableSequence",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "random.sample",
"line_number": 20,
"usage_type": "call"
}
] |
36789253348 | from __future__ import print_function
import os.path
from googleapiclient.discovery import build
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import Request
from google.oauth2.credentials import Credentials
import pandas as pd
from IPython.display import HTML
import logging
import boto3
import boto3.session
from botocore.exceptions import ClientError
#ELO Weights
WIN = 1
TIE = 0.5
LOSS = 0
class CatanStats():
def __init__(self):
# If modifying these scopes, delete the file token.json.
self.SCOPES = ['https://www.googleapis.com/auth/spreadsheets.readonly']
# The ID and range of a sample spreadsheet.
self.SAMPLE_SPREADSHEET_ID = '1MG7jhiarGpRunEILVbkEfD3mLRUBpIMI4au7cMID_EM'
self.SAMPLE_RANGE_NAME = 'Games'
# Dataframe containing Games and All Player Data from Google Sheets
self.games, self.player_scores = self.get_games()
# Initialize Elo Dict
self.elo = self.elo_init()
for index, row in self.games.iterrows():
self.new_ratings(row)
# Fetches data from Google Sheets
def get_games(self):
creds = None
# The file token.json stores the user's access and refresh tokens, and is
# created automatically when the authorization flow completes for the first
# time.
if os.path.exists('token.json'):
creds = Credentials.from_authorized_user_file('token.json', self.SCOPES)
# If there are no (valid) credentials available, let the user log in.
if not creds or not creds.valid:
if creds and creds.expired and creds.refresh_token:
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
'credentials.json', self.SCOPES)
creds = flow.run_local_server(port=0)
# Save the credentials for the next run
with open('token.json', 'w') as token:
token.write(creds.to_json())
service = build('sheets', 'v4', credentials=creds)
sheet = service.spreadsheets()
result = sheet.values().get(spreadsheetId=self.SAMPLE_SPREADSHEET_ID,
range=self.SAMPLE_RANGE_NAME).execute()
values = result.get('values', [])
df = pd.DataFrame(values[1:], columns=values[0])
p1 = df[['Player 1', 'Starting Production 1', 'Ending VP 1']].rename(columns={"Player 1": "Player", "Starting Production 1": "Starting Production", "Ending VP 1": "Ending VP"})
p2 = df[['Player 2', 'Starting Production 2', 'Ending VP 2']].rename(columns={"Player 2": "Player", "Starting Production 2": "Starting Production", "Ending VP 2": "Ending VP"})
p3 = df[['Player 3', 'Starting Production 3', 'Ending VP 3']].rename(columns={"Player 3": "Player", "Starting Production 3": "Starting Production", "Ending VP 3": "Ending VP"})
p4 = df[['Player 4', 'Starting Production 4', 'Ending VP 4']].rename(columns={"Player 4": "Player", "Starting Production 4": "Starting Production", "Ending VP 4": "Ending VP"})
players = pd.concat([p1, p2, p3, p4])
nan_value = float("NaN")
players.replace("", nan_value, inplace=True)
players = players.dropna()
players = players.reset_index()
players_scores = players.apply(pd.to_numeric, errors='ignore')
return df, players_scores
# Initializes Elo Dict
def elo_init(self):
elo = {}
for player in self.player_scores['Player'].unique():
elo[player] = 1000
return elo
# Individual elo calculation based on outcome
def elo_calc(self, eloA, eloB, actual_outcome):
K_VAL = 15
expected_outcome = 1 / (1 + 10**((eloB - eloA)/400) )
elo_adjustment_a = K_VAL * (actual_outcome - expected_outcome)
return elo_adjustment_a
# Calculates new elo ratings for game
def new_ratings(self, game):
# Check if Variant Game
if(game["Variant"] == "TRUE"):
variant = True
else:
variant = False
# Check if 3 or 4 player
if(game["Player 4"] == ''):
four_player_game = False
else:
four_player_game = True
# Pairwise elo_calc
player_elo_adj = []
if(not variant):
if(four_player_game):
# Iterate over pairwise player matchups
for i in range(1,5):
player_cummulative = 0
for j in range(1,5):
if(int(game["Ending VP {}".format(i)]) > int(game["Ending VP {}".format(j)])):
outcome = WIN
elif(int(game["Ending VP {}".format(i)]) == int(game["Ending VP {}".format(j)])):
outcome = TIE
else:
outcome = LOSS
match_up = self.elo_calc(self.elo[game['Player {}'.format(i)]], self.elo[game['Player {}'.format(j)]], outcome)
player_cummulative += match_up
player_elo_adj.append(player_cummulative)
# Elo adjustments after calculations
for i in range(0,4):
self.elo[game['Player {}'.format(i + 1)]] += round(player_elo_adj[i])
else:
# Iterate over pairwise player matchups
for i in range(1,4):
player_cummulative = 0
for j in range(1,4):
try:
if(int(game["Ending VP {}".format(i)]) > int(game["Ending VP {}".format(j)])):
outcome = WIN
elif(int(game["Ending VP {}".format(i)]) == int(game["Ending VP {}".format(j)])):
outcome = TIE
else:
outcome = LOSS
except ValueError:
print(game["Game #"],i, " ", j, " ", game["Ending VP {}".format(i)], " " , game["Ending VP {}".format(j)] )
match_up = self.elo_calc(self.elo[game['Player {}'.format(i)]], self.elo[game['Player {}'.format(j)]], outcome)
player_cummulative += match_up
player_elo_adj.append(player_cummulative)
# Elo adjustments after calculations
for i in range(0,3):
self.elo[game['Player {}'.format(i + 1)]] += round(player_elo_adj[i])
return None
# Aggregate Stats on Players
def player_info(self):
# Add Elo scores to Player_Scores
self.player_scores['Elo'] = self.player_scores.apply(lambda row: int(self.elo[row['Player']]), axis = 1)
# Add aggregate data to player_stats df
player_stats = self.player_scores.groupby('Player').agg({'Starting Production': ['mean', 'std'], 'Ending VP': ['mean', 'std'], 'Elo': ['mean']})
player_stats["Games Played"] = self.player_scores.groupby('Player').size()
player_stats["Wins"] = self.player_scores.loc[self.player_scores['Ending VP'] >= 10].groupby('Player').size()
player_stats["Win Percentage"] = player_stats["Wins"] / player_stats["Games Played"]
# Rounds data to 2 decimal spots
player_stats[('Starting Production','mean')] = player_stats[('Starting Production','mean')].round(2)
player_stats[('Starting Production','std')] = player_stats[('Starting Production','std')].round(2)
player_stats[('Ending VP','mean')] = player_stats[('Ending VP','mean')].round(2)
player_stats[('Ending VP','std')] = player_stats[('Ending VP','std')].round(2)
player_stats['Win Percentage'] = (player_stats['Win Percentage'] * 100).round(2)
# Filter players who have played more than 1 game
player_stats = player_stats[player_stats['Games Played'] > 1]
# return player_stats.sort_values([('Ending VP', 'mean'),('Elo', 'mean'),"Win Percentage"], ascending=False)
# return player_stats.sort_values(["Games Played", ('Ending VP', 'mean'),('Elo', 'mean'),"Win Percentage"], ascending=False)
# return player_stats.sort_values([('Starting Production', 'mean'),"Win Percentage",('Ending VP', 'mean')], ascending=False)
# return player_stats.sort_values(["Win Percentage",('Elo', 'mean'),('Ending VP', 'mean')], ascending=False)
return player_stats.sort_values([('Elo', 'mean'),"Win Percentage",('Ending VP', 'mean')], ascending=False)
# Aggregate Stats on Dice
def dice_info(self):
df = get_data()
# Creates HTML File from player_info
def create_player_table(self):
html = self.player_info().to_html(classes=["table", "table-striped"])
# write html to file
text_file = open("index.html", "w")
text_file.write('<link rel="stylesheet" href="https://stackpath.bootstrapcdn.com/bootstrap/4.3.1/css/bootstrap.min.css" integrity="sha384-ggOyR0iXCbMQv3Xipma34MD+dH/1fQ784/j6cY/iJTQUOhcWr7x9JvoRxT2MZw1T" crossorigin="anonymous">\n')
text_file.write(html)
text_file.close()
def upload_s3(file):
session = boto3.session.Session(profile_name='default')
s3_client = session.client('s3')
try:
s3_client.upload_file(Filename=file, Bucket="adamlenning.com", Key=file, ExtraArgs={'ContentType': "text/html"})
except ClientError as e:
logging.error(e)
return False
return True
if __name__ == '__main__':
c1 = CatanStats()
c1.create_player_table()
upload_s3('index.html')
| AdamLenning/sheets-catan | get_stats.py | get_stats.py | py | 9,937 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.path.exists",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "os.path.path",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "os.path",
"line_number": 45,
"usage_type": "name"
},
{
"api_name": "google.oauth2.credentia... |
41766780834 | import curses
##########################################################################
## Este código 'movimenta' o X no terminal ao pressionar as teclas W,S,A,D
##########################################################################
def main(stdscr):
# Configurações iniciais do terminal
stdscr.clear()
curses.curs_set(0) # Desativa o cursor
# Tamanho da tela
height, width = stdscr.getmaxyx()
# Posição inicial do jogador
player_y, player_x = height // 2, width // 2
# Loop principal do jogo
while True:
stdscr.nodelay(True)
# Obtém o input do usuário (não bloqueante)
key = stdscr.getch()
stdscr.addch(player_y, player_x, ' ')
# Atualiza a posição do jogador com base na tecla pressionada
if key == ord('w'):
player_y -= 1
elif key == ord('s'):
player_y += 1
elif key == ord('a'):
player_x -= 1
elif key == ord('d'):
player_x += 1
# Limita a posição do jogador dentro dos limites da tela
player_y = max(0, min(player_y, height - 1))
player_x = max(0, min(player_x, width - 1))
# Desenha o jogador na tela
stdscr.addch(player_y, player_x, 'X')
# Atualiza a tela
stdscr.refresh()
if __name__ == "__main__":
curses.wrapper(main)
| mathemaia/studies | Python/Bibliotecas/Curses/main.py | main.py | py | 1,378 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "curses.curs_set",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "curses.wrapper",
"line_number": 50,
"usage_type": "call"
}
] |
11836108476 | from collections import OrderedDict as OD
from numpy import exp, log as ln
from styles import mark_styles
def KWW(t, tau, beta):
"Kohlrausch-Williams-Watts compressed (beta>1) exponential function."
return 1 - exp(-(t/tau)**beta)
KWW.title = 'Non-normalized Kohlrausch-Williams-Watts compressed (beta>1) exponential function'
KWW.title_lowercase = 'non-normalized Kohlrausch-Williams-Watts compressed (beta>1) exponential function'
KWW.equation = '1 - exp(-(t/tau)^beta)'
def KWW_inverse(KWW_output, tau, beta):
return tau * (-ln(1 - KWW_output))**(1/beta)
def KWW_prime(t, tau, beta):
return (beta/tau) * (t/tau)**(beta-1) * exp(-(t/tau)**beta)
def KWW_doubleprime(t, tau, beta):
return -(beta/tau**2) * (t/tau)**(beta-2) * exp(-(t/tau)**beta) * (1 + beta*((t/tau)**beta - 1))
def KWW_inflection(tau, beta):
"Returns inflection time."
return tau * ((beta-1)/beta)**(1/beta)
def KWW_lagtime(tau, beta, t_inflection, leadtime = False):
_KWW = KWW(t_inflection, tau, beta)
_KWW_prime = KWW_prime(t_inflection, tau, beta)
return ((leadtime - _KWW)/_KWW_prime) + t_inflection
def KWW_scaled(t, tau, slowbeta, scale, t0):
fastbeta = slowbeta**4
_KWW = lambda t: KWW(t, tau, fastbeta)
return ( scale * _KWW((t-t0)/scale) )
KWW_scaled.title = 'Scaled KWW'
KWW_scaled.title_lowercase = 'scaled KWW'
KWW_scaled.equation = '( scale * KWW((t-t0)/scale, tau, slowbeta^4) )'
KWW_scaled.marks = {'t_5%', 't_10%', 't_90%', 't_95%', 'lagtime'}
KWW_scaled.styles = mark_styles
def KWW_descaler(tau, slowbeta, scale = None, t0 = None):
"Converts KWW_scaled parameters to KWW parameters."
fastbeta = slowbeta**4
return tau, fastbeta
def KWW_timeinfo(tau, beta, t_inflection):
t_percent = lambda percent: KWW_inverse(percent, tau, beta)
return OD({
't_5%': t_percent(0.05),
't_10%': t_percent(0.1),
't_90%': t_percent(0.9),
't_95%': t_percent(0.95),
'lagtime': KWW_lagtime(tau, beta, t_inflection) })
def KWW_timescaler(t, scale, t0):
"Takes a time on an unscaled KWW curve and returns the corresponding time on the scaled curve."
return scale * t + t0
if __name__ == '__main__':
scaled_args = { 'tau': 1.2, 'slowbeta': 1.2, 'scale': 2.9, 't0': 0.4 }
descaled_args = KWW_descaler(**scaled_args)
t_inflection = KWW_inflection(*descaled_args)
rate_max = KWW_prime(t_inflection, *descaled_args)
print(*(f"{value['title']}: {value['value']}" for key, value in KWW_timeinfo(*descaled_args, t_inflection).items()), sep = '\n') | hingels/CoOP-Assembly-Analyzer | Curves/KWW.py | KWW.py | py | 2,547 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.exp",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 17,
"... |
6994296870 | from lib.cuckoo.common.abstracts import Signature
class DiskInformation(Signature):
name = "antivm_generic_disk"
description = "Queries information on disks, possibly for anti-virtualization"
severity = 3
categories = ["anti-vm"]
authors = ["nex"]
minimum = "2.0"
filter_apinames = [
"NtCreateFile",
"DeviceIoControl",
"NtDeviceIoControlFile",
]
indicators = [
"scsi0",
"physicaldrive0",
]
ioctls = {
2954240: "IOCTL_STORAGE_QUERY_PROPERTY",
458752: "IOCTL_DISK_GET_DRIVE_GEOMETRY",
315400: "IOCTL_SCSI_MINIPORT",
}
def init(self):
self.drive_opened = False
def on_call(self, call, process):
if call["api"] == "NtCreateFile":
filepath = call["arguments"]["filepath"].lower()
if "scsi0" in filepath or "physicaldrive0" in filepath:
self.drive_opened = True
self.mark_call()
if call["api"] in ["DeviceIoControl", "NtDeviceIoControlFile"]:
if self.drive_opened and call["arguments"]["control_code"] in self.ioctls:
self.mark_call()
return True
| cuckoosandbox/community | modules/signatures/windows/antivm_generic_disk.py | antivm_generic_disk.py | py | 1,192 | python | en | code | 312 | github-code | 36 | [
{
"api_name": "lib.cuckoo.common.abstracts.Signature",
"line_number": 3,
"usage_type": "name"
}
] |
27932648838 | import streamlit as st
import pandas as pd
import numpy as np
import folium
import os
from folium.plugins import HeatMap
from streamlit_folium import st_folium, folium_static
# from gbq_functions.big_query_download import *
from gbq_functions.params import *
import matplotlib.pyplot as plt
import matplotlib as mpl
from google.cloud import bigquery
from google.oauth2 import service_account
import streamlit.components.v1 as components
from functions_for_website.load_outputs import *
st.set_page_config(page_title="LocA", layout="wide", initial_sidebar_state="auto", menu_items=None)
for key in st.session_state.keys():
del st.session_state[key]
st.markdown("<h1 style='text-align: center; color: black;'>LocA</h1>", unsafe_allow_html=True)
st.markdown("<h2 style='text-align: center; color: black;'>Alternative Mapping Technique </h2>", unsafe_allow_html=True)
credentials = service_account.Credentials.from_service_account_info(st.secrets["gcp_service_account"])
@st.cache_data(persist=True)
def get_master_district_df():
'''function that returns the full master district df.
Dataframe contains district name (primary key), lat_lons for the center,
lat_lons for the edges of rectangle around area, and the area of the
rectangle in Hectares'''
query = f"""
SELECT {",".join(MASTER_COLUMN_NAMES_RAW)}
FROM {GCP_PROJECT}.{BQ_DATASET}.{BQ_DISTRICT_TABLE}
ORDER BY HECTARES DESC
"""
client = bigquery.Client(project=GCP_PROJECT, credentials=credentials)
query_job = client.query(query)
result = query_job.result()
master_districts_df = result.to_dataframe()
return master_districts_df
master_df = get_master_district_df()
master_df['start'] = master_df['District_ID'].astype(str).str[0] #gets the letter at start of dist.
master_df = master_df[master_df['start'] == "E"]
master_df = master_df[~master_df['District'].str.contains("London",regex=False)]
master_df = master_df.sort_values(by="District", ascending=True) #sorts
# create drop down box
option = st.selectbox("Select District:",
list(master_df['District']))
# set up the website to show Dorset on initializing
if 'district' not in st.session_state:
st.session_state['district'] = 'Adur District'
@st.cache_data
def create_map(district):
@st.cache_data # :point_left: Add the caching decorator
def load_data(csv):
df = pd.read_csv(csv)
return df
df_good = load_data(os.path.abspath("outputs/display_gd.csv"))
df_bad = load_data(os.path.abspath("outputs/display_bad.csv"))
df = pd.concat([df_good, df_bad],ignore_index=True)
golden_df = df[df['district_name'] == district]
golden_df = golden_df.drop_duplicates(['lat', 'lng'])
golden_df['id'] = golden_df.index
mapObj = folium.Map(location=[golden_df['lat'].mean(),golden_df['lng'].mean()], zoom_start=10, prefer_canvas=True)
lats = np.array( golden_df['lat'] )
longs = np.array( golden_df['lng'] )
# set up the grid
lat_step = max(n2 - n1 for n1, n2 in zip(sorted(set(lats)), sorted(set(lats))[1:]))
long_step = max(n2 - n1 for n1, n2 in zip(sorted(set(longs)), sorted(set(longs))[1:]))
my_geo_json = {
"type": "FeatureCollection",
"features": []}
for i in range(len(lats)):
my_geo_json['features'].append(
{
"type": "Feature",
"properties": {},
"geometry": {
"type": "Polygon",
"coordinates": [[
[longs[i] - long_step/2, lats[i] - lat_step/2],
[longs[i] - long_step/2, lats[i] + lat_step/2],
[longs[i] + long_step/2, lats[i] + lat_step/2],
[longs[i] + long_step/2, lats[i] - lat_step/2],
[longs[i] - long_step/2, lats[i] - lat_step/2],
]]},
"id": int(golden_df['id'].values[i])
}
)
folium.Choropleth(
geo_data=my_geo_json,
data=golden_df,
columns = ['id','metric'],
fill_color='RdYlGn',
fill_opacity=0.6,
line_opacity=0,
key_on='feature.id',
bins=5
).add_to(mapObj)
folium_static(mapObj, width = 725)
if st.button('Submit!'):
st.session_state['district'] = option
create_map(st.session_state['district'])
| willbanny/Location-Analysis-Website | streamlit/pages/3_District_Chloropleth.py | 3_District_Chloropleth.py | py | 4,362 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "streamlit.session_state.keys",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "streamlit.session_state",
"line_number": 19,
"usage_type": "attribute"
},
{
... |
38707667108 | #!/usr/bin/python
import numpy as np
import copy
import time
import itertools
from sim import *
from scenario import *
from AStarAlgo import aStar, clearcache
def parse_env(env):
fires = []
lakes = []
#print(env)
for value in env:
x, y, z = value
x = np.float(x)
y = np.float(y)
z = np.float(z)
#print(x, y, z)
if env[value] == "orange":
fires.append(np.array([x, y + 2, z]))
if env[value] == "blue":
lakes.append(np.array([x, y + 1, z]))
if env[value] == "drone":
fires.insert(0, np.array([x, y, z]))
graph = []
for i, fire1 in enumerate(fires):
vertex = []
for j, fire2 in enumerate(fires):
edge = {}
edge['start'] = fire1
edge['dist'] = np.inf
edge['end'] = fire1
edge['path'] = []
edge['path'].append(fire2)
if i == j:
edge['lake'] = None
vertex.append(edge)
continue
for k, lake in enumerate(lakes):
#print(i, j, k)
#dist1 = len(aStar(tuple(fire1), tuple(lake), env))
#dist2 = len(aStar(tuple(lake), tuple(fire2), env))
dist1 = np.linalg.norm(fire1 - lake)
dist2 = 2 * np.linalg.norm(lake - fire2)
#num_edges += 1
if (dist1 + dist2) < edge['dist']:
edge['dist'] = dist1 + dist2
edge['lake'] = lake
edge['fire'] = fire2
#print(vertex)
vertex.append(edge)
graph.append(vertex)
#print(graph, fires, lakes)
return (graph, fires, lakes)
def pick_neighbors(graph, start_vertex_id, max_dist, max_num_vertices):
vertex = graph[start_vertex_id]
short_list = []
num_vertices = 1
short_list.append(start_vertex_id)
for i, edge in enumerate(vertex):
if edge['dist'] > max_dist:
continue
short_list.append(i)
num_vertices += 1
if num_vertices >= max_num_vertices:
break
return short_list
def pick_farthest_neighbor(graph, cur_vertex_id):
farthest_vertex_id = 0
farthest_dist = 0
vertex = graph[cur_vertex_id]
for i, edge in enumerate(vertex):
if edge['dist'] == np.inf:
continue
if edge['dist'] > farthest_dist:
farthest_dist = edge['dist']
farthest_vertex_id = i
#print(farthest_vertex_id)
return farthest_vertex_id
def mark_vertices_covered(graph, short_list):
for vertex_id in short_list:
#vertex = graph[vertex_id]
#for edge in vertex:
# edge['dist'] = np.inf
for vertex in graph:
vertex[vertex_id]['dist'] = np.inf
#print(graph)
if len(short_list) == 1:
return graph
for vertex_id in short_list[1:]:
graph[vertex_id][vertex_id]['path'] = None
return graph
def find_shortest_path(graph, short_list):
shortest_path = []
shortest_dist = np.inf
if (short_list[0] == 0):
start_vertex_id = short_list[0]
for path in itertools.permutations(short_list[1:]):
prev_vertex_id = start_vertex_id
cur_dist = 0
for vertex_id in path:
cur_dist += graph[prev_vertex_id][vertex_id]['dist']
prev_vertex_id = vertex_id
if cur_dist < shortest_dist:
shortest_path = copy.deepcopy(list(path))
shortest_dist = cur_dist
shortest_path.insert(0, start_vertex_id)
else:
for path in itertools.permutations(short_list):
prev_vertex_id = path[0]
cur_dist = 0
for vertex_id in path[1:]:
cur_dist += graph[prev_vertex_id][vertex_id]['dist']
prev_vertex_id = vertex_id
if cur_dist < shortest_dist:
shortest_path = copy.deepcopy(list(path))
shortest_dist = cur_dist
return shortest_path
def populate_path_with_lakes(graph, path):
path_with_lakes = []
#final_path.append(fires[0])
prev_vertex_id = path[0]
path_with_lakes.extend(graph[prev_vertex_id][prev_vertex_id]['path'])
for vertex_id in path[1:]:
path_with_lakes.append(graph[prev_vertex_id][vertex_id]['lake'])
path_with_lakes.extend(graph[prev_vertex_id][vertex_id]['path'])
#print(path_with_lakes)
prev_vertex_id = vertex_id
#print(final_path)
return path_with_lakes
def reconstruct_graph(graph, lakes):
new_graph = []
for i, v1 in enumerate(graph):
if v1[i]['path'] == None:
continue
vertex = []
for j, v2 in enumerate(graph):
if v2[j]['path'] == None:
continue
edge = {}
edge['start'] = v1[i]['start']
edge['dist'] = np.inf
edge['end'] = v1[i]['path'][-1]
edge['path'] = copy.deepcopy(v2[j]['path'])
if i == j:
edge['lake'] = None
vertex.append(edge)
continue
for k, lake in enumerate(lakes):
#print(i, j, k)
#dist1 = len(aStar(tuple(fire1), tuple(lake), env))
#dist2 = len(aStar(tuple(lake), tuple(fire2), env))
dist1 = np.linalg.norm(v1[i]['path'][-1] - lake)
dist2 = 2 * np.linalg.norm(lake - v2[j]['path'][0])
#num_edges += 1
if (dist1 + dist2) < edge['dist']:
edge['dist'] = dist1 + dist2
edge['lake'] = lake
edge['fire'] = v2[j]['path'][0]
#print(vertex)
vertex.append(edge)
new_graph.append(vertex)
#print(graph, fires, lakes)
return new_graph
def form_clusters(graph, max_dist, max_num_vertices, lakes):
num_nodes_covered = 0
num_clusters = 0
start_vertex_id = 0
while num_nodes_covered < len(graph):
short_list = pick_neighbors(graph, start_vertex_id, max_dist, max_num_vertices)
if (len(short_list) > 1):
shortest_path = find_shortest_path(graph, short_list)
shortest_path_with_lakes = populate_path_with_lakes(graph, shortest_path)
#if num_clusters % 2 == 0:
graph[shortest_path[0]][shortest_path[0]]['path'] = copy.deepcopy(shortest_path_with_lakes)
#print("Clustered Nodes", num_clusters)
#print(shortest_path)
#print(graph[shortest_path[0]][shortest_path[0]]['path'])
graph = mark_vertices_covered(graph, shortest_path)
start_vertex_id = pick_farthest_neighbor(graph, shortest_path[-1])
#else:
# graph[shortest_path[-1]][shortest_path[-1]]['path'] = copy.deepcopy(list(reversed(shortest_path_with_lakes)))
# graph = mark_vertices_covered(graph, short_list)
# start_vertex_id = pick_farthest_neighbor(graph, shortest_path[0])
else:
graph = mark_vertices_covered(graph, short_list)
start_vertex_id = pick_farthest_neighbor(graph, short_list[0])
#print("Clustered Nodes", num_clusters)
#print(short_list)
#print(graph[short_list[0]][short_list[0]]['path'])
num_nodes_covered += len(short_list)
num_clusters += 1
#print(num_nodes_covered, num_clusters)
#print(start_vertex_id)
#print("Number of Clusters", num_clusters)
graph = reconstruct_graph(graph, lakes)
return (graph, num_clusters)
def shortest(env):
orig_graph, orig_fires, orig_lakes = parse_env(env)
#print("Orig_Graph", orig_graph)
#print("Orig_Lakes", orig_lakes)
#print("Orig_Fires", orig_fires)
graph = copy.deepcopy(orig_graph)
max_dist = 64
max_num_vertices = 10
while True:
#print("Max Dist", max_dist)
(graph, num_clusters) = form_clusters(graph, max_dist, max_num_vertices, orig_lakes)
if (num_clusters == 1):
break
max_dist = max_dist * 2
#print(graph[0][0]['path'])
#prev_coord = graph[0][0]['path'][0]
#total_dist = 0
#for i, coord in enumerate(graph[0][0]['path'][1:]):
# if i % 2 == 0:
# total_dist += np.linalg.norm(prev_coord - coord)
# else:
# total_dist += 2 * np.linalg.norm(prev_coord - coord)
# prev_coord = coord
#print("Total Distance: ", total_dist)
convertedPath=[]
for path in graph[0][0]['path'][1:]:
x, y, z = path
convertedPath.append(tuple((x, y, z)))
#print(convertedPath)
return convertedPath
#print(orig_graph)
if __name__ == "__main__":
scene = scenario_cityfire()
scene.generate_scenario()
clearcache()
env = scene.simul.state()
start = time.time()
p = shortest(env)
elapsed = time.time() - start
print("Elapsed Time")
print(elapsed) | NithyaMA/Artificial-Intelligence | ai-cs540-team-e-proj-master/Divide_And_Conquer_Functional.py | Divide_And_Conquer_Functional.py | py | 8,982 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.float",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": ... |
37877829101 | import array
import fcntl
import os
import re
import subprocess
import time
from multiprocessing import cpu_count
from tempfile import mkstemp
from termios import FIONREAD
from catkin_tools.common import log
from catkin_tools.common import version_tuple
from catkin_tools.terminal_color import ColorMapper
mapper = ColorMapper()
clr = mapper.clr
def memory_usage():
"""
Get used and total memory usage.
:returns: Used and total memory in bytes
:rtype: tuple
"""
# Handle optional psutil support
try:
import psutil
psutil_version = version_tuple(psutil.__version__)
if psutil_version < (0, 6, 0):
usage = psutil.phymem_usage()
used = usage.used
else:
usage = psutil.virtual_memory()
used = usage.total - usage.available
return used, usage.total
except ImportError:
pass
return None, None
JOBSERVER_SUPPORT_MAKEFILE_OLD = b'''
all:
\techo $(MAKEFLAGS) | grep -- '--jobserver-fds'
'''
JOBSERVER_SUPPORT_MAKEFILE = b'''
all:
\techo $(MAKEFLAGS) | grep -- '--jobserver-auth'
'''
def test_gnu_make_support_common(makefile_content):
"""
Test if "make -f MAKEFILE -j2" runs successfully when MAKEFILE
contains makefile_content.
"""
fd, makefile = mkstemp()
os.write(fd, makefile_content)
os.close(fd)
ret = subprocess.call(['make', '-f', makefile, '-j2'], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
os.unlink(makefile)
return ret == 0
def test_gnu_make_support_old():
"""
Test if the system 'make' supports the job server implementation.
This simply checks if the `--jobserver-fds` option is supported by the
`make` command. It does not tests if the jobserver is actually working
properly.
"""
return test_gnu_make_support_common(JOBSERVER_SUPPORT_MAKEFILE_OLD)
def test_gnu_make_support():
"""
Test if the system 'make' supports the job server implementation.
This simply checks if the `--jobserver-auth` option is supported by the
`make` command. It does not tests if the jobserver is actually working
properly.
"""
return test_gnu_make_support_common(JOBSERVER_SUPPORT_MAKEFILE)
class GnuMake(object):
def __init__(self):
if test_gnu_make_support():
self.make_args = lambda job_pipe: ["--jobserver-auth=%d,%d" % JobServer._job_pipe]
elif test_gnu_make_support_old():
self.make_args = lambda job_pipe: ["--jobserver-fds=%d,%d" % JobServer._job_pipe, "-j"]
else:
self.make_args = None
def is_supported(self):
return not (self.make_args is None)
class JobServer(object):
# Whether the job server has been initialized
_initialized = False
# Flag designating whether the `make` program supports the GNU Make
# jobserver interface
_gnu_make = None
# Initialize variables
_load_ok = True
_mem_ok = True
_internal_jobs = []
_max_load = 0
_max_jobs = 0
_job_pipe = os.pipe()
# Setting fd inheritance is required in Python > 3.4
# This is set by default in Python 2.7
# For more info see: https://docs.python.org/3.4/library/os.html#fd-inheritance
if hasattr(os, 'set_inheritable'):
for fd in _job_pipe:
os.set_inheritable(fd, True)
if not os.get_inheritable(fd):
log(clr('@{yf}@!Warning: jobserver file descriptors are not inheritable.@|'))
@classmethod
def _set_max_jobs(cls, max_jobs):
"""Set the maximum number of jobs to be used with the jobserver.
This will wait for all active jobs to be completed, then re-initialize the job pipe.
"""
# Read all possible tokens from the pipe
try:
os.read(cls._job_pipe[0], cls._max_jobs)
except (BlockingIOError, InterruptedError):
pass
# Update max jobs
cls._max_jobs = max_jobs
# Initialize the pipe with max_jobs tokens
for i in range(cls._max_jobs):
os.write(cls._job_pipe[1], b'+')
@classmethod
def _set_max_mem(cls, max_mem):
"""
Set the maximum memory to keep instantiating jobs.
:param max_mem: String describing the maximum memory that can be used
on the system. It can either describe memory percentage or absolute
amount. Use 'P%' for percentage or 'N' for absolute value in bytes,
'Nk' for kilobytes, 'Nm' for megabytes, and 'Ng' for gigabytes.
:type max_mem: str
"""
if max_mem is None:
cls._max_mem = None
return
elif type(max_mem) is float or type(max_mem) is int:
mem_percent = max_mem
elif type(max_mem) is str:
m_percent = re.search(r'([0-9]+)\%', max_mem)
m_abs = re.search(r'([0-9]+)([kKmMgG]{0,1})', max_mem)
if m_percent is None and m_abs is None:
cls._max_mem = None
return
if m_percent:
mem_percent = m_abs.group(1)
elif m_abs:
val = float(m_abs.group(1))
mag_symbol = m_abs.group(2)
_, total_mem = memory_usage()
if mag_symbol == '':
mag = 1.0
elif mag_symbol.lower() == 'k':
mag = 1024.0
elif mag_symbol.lower() == 'm':
mag = pow(1024.0, 2)
elif mag_symbol.lower() == 'g':
mag = pow(1024.0, 3)
mem_percent = 100.0 * val * mag / total_mem
cls._max_mem = max(0.0, min(100.0, float(mem_percent)))
@classmethod
def _check_load(cls):
if cls._max_load is not None:
try:
load = os.getloadavg()
if load[0] < cls._max_load:
cls._load_ok = True
else:
cls._load_ok = False
except NotImplementedError:
cls._load_ok = True
return cls._load_ok
@classmethod
def _check_mem(cls):
if cls._max_mem is not None:
mem_used, mem_total = memory_usage()
mem_percent_used = 100.0 * float(mem_used) / float(mem_total)
if mem_percent_used > cls._max_mem:
cls._mem_ok = False
else:
cls._mem_ok = True
return cls._mem_ok
@classmethod
def _check_conditions(cls):
return (cls._check_load() and cls._check_mem()) or cls._running_jobs() == 0
@classmethod
def _acquire(cls):
"""
Obtain a job server token. Be sure to call _release() to avoid
deadlocks.
"""
try:
# read a token from the job pipe
token = os.read(cls._job_pipe[0], 1)
return token
except (BlockingIOError, InterruptedError):
pass
return None
@classmethod
def _release(cls):
"""
Write a token to the job pipe.
"""
os.write(cls._job_pipe[1], b'+')
@classmethod
def _running_jobs(cls):
try:
buf = array.array('i', [0])
if fcntl.ioctl(cls._job_pipe[0], FIONREAD, buf) == 0:
return cls._max_jobs - buf[0]
except (NotImplementedError, OSError):
pass
return cls._max_jobs
def initialized():
"""Return True if the job server has been initialized."""
return JobServer._initialized
def initialize(max_jobs=None, max_load=None, max_mem=None, gnu_make_enabled=False):
"""
Initialize the global GNU Make jobserver.
:param max_jobs: the maximum number of jobs available
:param max_load: do not dispatch additional jobs if this system load
value is exceeded
:param max_mem: do not dispatch additional jobs if system physical
memory usage exceeds this value (see _set_max_mem for additional
documentation)
:param gnu_make_enabled: Set gnu make compatibility enabled
"""
# Check initialization
if JobServer._initialized is True:
return
# Check if the jobserver is supported
if JobServer._gnu_make is None:
JobServer._gnu_make = GnuMake()
if not JobServer._gnu_make.is_supported():
log(clr('@!@{yf}WARNING:@| Make job server not supported. The number of Make '
'jobs may exceed the number of CPU cores.@|'))
# Set gnu make compatibility enabled
JobServer._gnu_make_enabled = gnu_make_enabled
# Set the maximum number of jobs
if max_jobs is None:
try:
max_jobs = cpu_count()
except NotImplementedError:
log('@{yf}WARNING: Failed to determine the cpu_count, falling back to 1 jobs as the default.@|')
max_jobs = 1
else:
max_jobs = int(max_jobs)
JobServer._set_max_jobs(max_jobs)
JobServer._max_load = max_load
JobServer._set_max_mem(max_mem)
JobServer._initialized = True
def load_ok():
return JobServer._load_ok
def mem_ok():
return JobServer._mem_ok
def set_max_mem(max_mem):
"""
Set the maximum memory to keep instantiating jobs.
:param max_mem: String describing the maximum memory that can be used on
the system. It can either describe memory percentage or absolute amount.
Use 'P%' for percentage or 'N' for absolute value in bytes, 'Nk' for
kilobytes, 'Nm' for megabytes, and 'Ng' for gigabytes.
:type max_mem: str
"""
JobServer._set_max_mem(max_mem)
def wait_acquire():
"""
Block until a job server token is acquired, then return it.
"""
token = None
while token is None:
# make sure we're observing load and memory maximums
if not JobServer._check_conditions():
time.sleep(0.01)
continue
# try to get a job token
token = JobServer._acquire()
return token
def acquire():
"""
Block until a job server token is acquired, then return it.
"""
token = None
# make sure we're observing load and memory maximums
if JobServer._check_conditions():
# try to get a job token
token = JobServer._acquire()
return token
def add_label(label):
JobServer._internal_jobs.append(label)
def del_label(label):
JobServer._internal_jobs.remove(label)
def try_acquire_gen():
"""
Yield None until a job server token is acquired, then yield it.
"""
while True:
# make sure we're observing load and memory maximums
if JobServer._check_conditions() and running_jobs() < max_jobs():
# try to get a job token
token = JobServer._acquire()
yield token
else:
yield None
def try_acquire():
"""
Try to acquire a job token, return None if not available.
"""
# make sure we're observing load and memory maximums
if JobServer._check_conditions() and running_jobs() < max_jobs():
# try to get a job token
token = JobServer._acquire()
return token
return None
def release(label=None):
"""
Release a job server token.
"""
JobServer._release()
if label is not None:
del_label(label)
def gnu_make_enabled():
return JobServer._gnu_make.is_supported() and JobServer._gnu_make_enabled
def gnu_make_args():
"""
Get required arguments for spawning child gnu Make processes.
"""
if JobServer._gnu_make_enabled:
return JobServer._gnu_make.make_args(JobServer._job_pipe)
else:
return []
def max_jobs():
"""
Get the maximum number of jobs.
"""
return JobServer._max_jobs
def running_jobs():
"""
Try to estimate the number of currently running jobs.
"""
if not gnu_make_enabled():
return 0
return JobServer._running_jobs()
def internal_jobs():
return JobServer._internal_jobs
class JobGuard:
"""
Context manager representing a jobserver job.
"""
def __enter__(self):
wait_acquire()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
release()
return False
| catkin/catkin_tools | catkin_tools/execution/job_server.py | job_server.py | py | 12,196 | python | en | code | 153 | github-code | 36 | [
{
"api_name": "catkin_tools.terminal_color.ColorMapper",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "catkin_tools.common.version_tuple",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "psutil.__version__",
"line_number": 31,
"usage_type": "attribut... |
2251121998 | """
Random test stuff
"""
from pysynth.seq import Sequencer
from pysynth.utils import *
from pysynth.osc import *
from pysynth.synth import *
from pysynth.filters import *
from pysynth.output.base import OutputHandler
from pysynth.output.modules import *
from pysynth.wrappers import querty, mml
from pysynth.wrappers.midi import midi
from pysynth.envelope.amp import *
import time
import pyaudio
import threading
import struct
import wave
import math
import os
import copy
from tkinter import Tk, Frame
BITRATE = 16000
#MAX_AMPLITUDE = 32767.0
#MAX_AMPLITUDE = 127.0
paudio = pyaudio.PyAudio()
stream = paudio.open(format=pyaudio.paFloat32,
channels=1,
output=True,
rate=44100)
def type_test():
# Tests the audio type test
audio = AudioValue(0.5, 0, 1000)
#audio.add_event(ExponentialRamp, 800.0, get_time()+10000000000)
#audio.add_event(LinearRamp, 500.0, get_time()+20000000000)
#audio.add_event(ExponentialRamp, 800.0, get_time()+10)
#audio.add_event(LinearRamp, 500.0, get_time()+20)
#audio.exponential_ramp(0, get_time() + 2000000000)
audio.linear_ramp(1, get_time() + 1000000000)
audio.cancel_all_events()
audio.linear_ramp(0, get_time() + 2000000000)
while True:
thing = audio.value
print(thing)
if thing == 0:
break
time.sleep(0.01)
def start_stream(osc):
"""
Starts the audio thread and starts streaming data
"""
thread = threading.Thread(target=stream_data, args=[osc])
thread.daemon = True
thread.start()
return thread
def stream_data(data, time=None):
"""
Streams data to PyAudio.
Data is a oscillator we iterate over
:param data: Data to stream
"""
for num, i in enumerate(data):
#print(i)
stream.write(struct.pack('f', i))
if time and num > time:
# We are done
return
def write_wave(data, seconds):
"""
Writes a specified amount of wave data:
:param data: Data to write
:param seconds: Seconds to write
"""
# Open the wave file:
wav = wave.open('blorck.wav', 'wb')
wav.setnchannels(1)
wav.setsampwidth(4)
wav.setframerate(44100.0)
iter(data)
for i in range(44100*seconds):
value = next(data)
wav.writeframesraw(struct.pack('<f', value))
wav.close()
class KeyboardHandler:
"""
Handles keyboard input, removing and adding
oscillators to an AudioCollection as necessary.
"""
def __init__(self, aud, keys):
self.aud = aud # Audio collection to maintain
self.keys = keys # Mapping keys to oscillators.
# Mapping keys to oscillators to generate
self.osc_keys = {'z': SineOscillator, 'x': SquareOscillator, 'c': SawToothOscillator, 'v': TriangleOscillator}
def remove_key(self, key):
"""
Removes an oscillator from the AudioCollection.
:param key: Oscillator to remove
"""
print("Removed key: {}".format(key.keysym))
if key.keysym in self.osc_keys:
# DO nothing,
return
if self.keys[key.keysym] not in self.aud._objs:
# Nothing, return
return
self.aud.remove_module(self.keys[key.keysym])
def add_key(self, key):
"""
Adds an oscillator from the AudioCollection.
:param key: Oscillator to add
"""
print("Added Key: {}".format(key.char))
if key.char in self.osc_keys:
# Generate a new oscillator mp
self.keys = gen_oscs(self.osc_keys[key.char])
return
if self.keys[key.char] in self.aud._objs:
# Nothing, return
return
self.aud.add_module(self.keys[key.char])
def gen_oscs(osc):
"""
Generates a list of oscillators
:param osc: Oscillator to use to generate our list
:return: List of keys mapped to oscillators
"""
oscs = {}
keys = ['q', 'a', 'w', 's', 'e', 'd', 'r', 'f', 't', 'g', 'y', 'h', 'u', 'j', 'i', 'k', 'o', 'l', 'p']
# Generating oscillators and mapping them to keys:
for note in range(-17, 2):
oscs[keys[note+17]] = osc()
oscs[keys[note + 17]].freq = 440 * (2 ** (1 / 12)) ** note
iter(oscs[keys[note + 17]])
return oscs
def old_keyboard_input():
"""
Maps certain keys to a keyboard,
and adding the notes as we press/release keys.
"""
oscs = gen_oscs(SineOscillator)
print(oscs)
# Create AudioCollection for output
collec = AudioCollection()
collec.add_module(ZeroOscillator())
# Create KeyBoardHandler:
hand = KeyboardHandler(collec, oscs)
# Start streaming the AudioCollection:
start_stream(collec)
# Disabling continuous keypresses:
os.system("xset r off")
# Creating TKinter data:
root = Tk()
f = Frame(root, width=100, height=100)
f.bind("<KeyPress>", hand.add_key)
f.bind("<KeyRelease>", hand.remove_key)
f.pack()
f.focus_set()
root.mainloop()
os.system("xset r on")
def pitch_comp(osc):
"""
Tests the pitch of the incoming oscillator.
:param osc:
"""
sine = SineOscillator(freq=osc.freq.value)
print("Pitch One:")
stream_data(sine, time=220500)
print("Pitch Two:")
stream_data(osc, time=220500)
def chord_test():
"""
Tests if the additive synthesis is producing valid chords
"""
osc1 = TriangleOscillator(freq=440.0)
osc2 = TriangleOscillator(freq=880.0)
osc3 = TriangleOscillator(freq=1320.0)
osc4 = TriangleOscillator(freq=1760.0)
thing = AudioCollection()
thing.add_module(osc1, start=True)
start_stream(thing)
time.sleep(1)
thing.add_module(osc2, start=True)
time.sleep(1.5)
thing.add_module(osc3, start=True)
time.sleep(1.76)
thing.add_module(osc4, start=True)
def avgfilt_test():
"""
Tests Moving Average Filter
"""
# Set up a sine wave:
sine = SineOscillator()
# Set up a filter:
filter = MovingAverage(101)
filter.bind(sine)
sine.freq = 1234.0
print(sine.freq.value)
print(filter.freq.value)
# Start the stream:
start_stream(filter)
def fm_test():
"""
Tests FM Synthesis
"""
osc1 = SineOscillator(freq=440)
osc2 = SineOscillator(freq=6160)
oscmain = SineOscillator(freq=440)
collec = AudioCollection()
twomain = FMSynth(oscmain, osc2, 1)
oneandtwo = FMSynth(twomain, osc1, 5)
collec.add_node(oneandtwo)
start_stream(twomain)
def keyboard_input():
# Tests the QUERTY sequencer, and the QUERTYKeyboard input module.
# Create the QWERTY wrapper
sequencer = querty.QWERTYWrapper()
# Create the output handler:
out = OutputHandler()
pyaudo = PyAudioModule()
pyaudo.special = True
out.add_output(pyaudo)
#out.add_output(WaveModule("test_seq.wav"))
# Configure for keyboard:
sequencer.load_keyboard()
attack = 1000000000
decay = 1000000000
sustain = 0.2
release = 1000000000
env1 = ADSREnvelope(attack, decay, sustain, release)
env2 = ADSREnvelope(attack, decay, sustain, release)
env3 = ADSREnvelope(attack, decay, sustain, release)
env4 = ADSREnvelope(attack, decay, sustain, release)
env1.bind(SineOscillator(440.0))
env2.bind(SquareOscillator(440.0))
env3.bind(SawToothOscillator(440.0))
env4.bind(TriangleOscillator(440.0))
# Get controller for sine wave oscillator:
sine = out.bind_synth(env1)
square = out.bind_synth(env2)
saw = out.bind_synth(env3)
tri = out.bind_synth(env4)
# Add sine oscillator for default instrument:
sequencer.add_synth(sine, name=0)
sequencer.add_synth(square, name=1)
sequencer.add_synth(saw, name=2)
sequencer.add_synth(tri, name=3)
# Start the output handler:
out.start()
# Start the sequencer:
sequencer.start()
sequencer.join()
sequencer.stop()
out.stop()
def freq_conv(num, middle_pitch=440.0):
# Calculate and return the frequency of the note:
return middle_pitch * pow(2, (((num) / 12)))
def mixing_test():
# Tests mixing operations:
pass
def mml_test():
# Tests the MML wrapper
#song = '$ t120 o4 l4 e f+ b > c+ d < f+ e > c+ < b f+ > d c+ <e f+ b > c+ d < f+ e > c+ < b f+ > d c+'
#song = '$o4 c r e r g r b r;$o4 r d r f r a r <c'
#song = 'o4 c d e f g a b <c d'
#song = "o4 l1 ca"
#song = "t60 l4 o4 /: [ceg] [fac]1 :/4"
#song = "t92 l8 o4 [>cg<cea]2. [>cg<ceg]4 [>>a<a<c+fa+]2. [>>a<a<c+ea]4 " \
# "[>>f<fg+<cg]2. [>>f<fg+<cf]4 [>>g<gg+b<g+]2." \
# "[>>g<g<g]4 o3 l32 v6 cdef ga b<c de fg"
#song = "t92 l4 o4 [>>a<a<c+fa+]"
#song = 'o3 l32 v6 cdefgab<cdefg'
#song = 't30 a'
#song = "t92 [>>f<fg+<cg]2. [>>f<fg+<cf]4 [>>g<gg+b<g+]2. [>>g<g<g]4 o3 t92 l32 v6 cdef ga b<c de fg"
#song = 't60 [>>g<g<g]4'
#song = 't60 o3 l4 cdefgab<c> l8 cdefgab<c> l16 cdefgab l32 cdefgab'
song1 = "t92 l8 o4 $ [>cg<cea]2. [>cg<ceg]4 [>>a<a<c+fa+]2. [>>a<a<c+ea]4 " \
"[>>f<fg+<cg]2. [>>f<fg+<cf]4 [>>g<gg+b<g+]2. r4; " \
"t92 $ l1 o3 v12 r r r r2 r8 l32 v6 cdef ga b<c de fg;"
song = "t60 l4 o4 a+ r a+;" \
"t60 l4 o4 r r >a+"
song = "t120$l8 o3 >g+2.. g+ a+4. a+ <c2 >a+ g+2.. a+4 a+4 <c4. >d+ a+ g+2. g+ a+4. a+ <c2 >a+ g+2.. a+4 a+4 <c2."
song = "t120$l8 o3 >g+2.. g+ a+4. a+ <c2 >a+ g+2.. a+4 a+4 <c4. >d+ a+ g+2. g+ a+4. a+ <c2 >a+ g+2.. a+4 a+4 <c2."
song = 't60 l1 a r8 a. r8 a.. r8 a...'
song = "t105 l8 o5 q75 v100 " \
"ab-> c4c4c4 c4.faf fedc<b-4 [gb-]2 [fa]4 agb-a>c<b- >c+dc<b-ag f2[ea]g f4r4" \
"[fa][eg] [eg]2[gb-][fa] [fa]2>c<b b>dfd<b>d c4.<b-" \
"ab-> c4c4c4 c4.faf fedc<b-4 [gb-]2 [fa]4 agb-a>c<b- >c+dc<b-ag f2[ea]g f4r4;" \
"t105 l8 o4 q75 v75" \
"r4 f>c<a>c<a>c< f>c<a>c<a>c< g>c<b->c<b->c< [e>c]2 [f>c]4 [b->d]2.^2 [<b->b-]4 [ca]2[cb-]4 [fa]4 <f4>" \
"r4 c4>c4r4< c4>c4r4< [cdf]4[cdf]4[cdf]4 [ce]4r4" \
"r4 f>c<a>c<a>c< f>c<a>c<a>c< g>c<b->c<b->c< [e>c]2 [f>c]4 [b->d]2.^2 [<b->b-]4 [ca]2[cb-]4 [fa]4 <f4>;" \
song2 = "t120$l8 o4 v9rr g g4 g+ a+4 d4 d4 d+2 d c g g4 g+ a+4 d4 d4 d+2 rr g g4 g+ a+4 d4 d4 d+2 d c g g4 g+ a+4 d4 d4 d+2.;" \
"t120$l8 o4 v9 rr d+ d+2 r >a+4 a+4 <c2 >a+ g+ <d+ d+2 r >a+4 a+4 a+2 rr d+ d+2 r >a+4 a+4 <c2 >a+ g+ <d+ d+2 r >a+4 a+4 a+2.;" \
"t120$l8 o4 v9 rr c c2 r >f4 f4 g2 a+ g+ <c c2 >f f4 r f g2< rr c c2 r >f4 f4 g2 a+ g+ <c c2 >f f4 r f g2.<;" \
"t120$l8 o3 v8 >g+2.. g+ a+4. a+ <c2 >a+ g+2.. a+4 a+4 <c4. >d+ a+ g+2. g+ a+4. a+ <c2 >a+ g+2.. a+4 a+4 <r2." \
#song = 't60 o3 l4 cdefgab<c> l8 cdefgab<c> l16 cdefgab l32 cdefgab'
#song = '$ t120 o4 l4 e f+ b > c+ d < f+ e > c+ < b f+ > d c+ <e f+ b > c+ d < f+ e > c+ < b f+ > d c+'
sec = mml.MMLWrapper()
sec.load_string(song2)
out = OutputHandler()
pyaud = PyAudioModule()
pyaud.special = True
out.add_output(pyaud)
# --== Instrument Selection: ==--
# Uncomment the instrument you want to use!
#osc = SineOscillator(freq=440.0)
#osc = SquareOscillator(freq=440.0)
osc = SawToothOscillator(freq=440.0)
#osc = TriangleOscillator(freq=440.0)
final = osc
# --== End Instrument Selection! ==--
# --== Other Output Options: ==--
# Uncomment to write to a wav file:
#wave = WaveModule('saw_test.wav')
#out.add_output(wave)
# --== End Other Output Options! ==--
# --== ADSR Options: ==--
# Configure the parameters for the ADSR envelope:
attack = 10000000
decay = 100000
sustain = 0.2
release = 5000000
# Uncomment to enable the envelope:
#env = ADSREnvelope(attack, decay, sustain, release)
#env.bind(osc)
#final = env
# --== End ADSR Options! ==--
amp = AmpScale()
amp.bind(final)
final = amp
cont = out.bind_synth(final)
sec.add_synth(cont)
# Start output handler:
out.start()
# Start the sequencer:
sec.start()
sec.join()
print("Done joining")
print("Stopping sequencer...")
sec.stop()
print("Stopping output...")
out.stop()
def delay():
# Tests the delay function of OutputControl
osc = TriangleOscillator(freq=440.0)
out = OutputHandler()
pyaud = PyAudioModule()
pyaud.special = True
out.add_output(pyaud)
cont = out.bind_synth(osc)
out.start()
cont.start(time=3000000000 + get_time())
cont.stop(time=5000000000 + get_time())
time.sleep(8)
out.stop()
def deepcopy():
# Tests the deep copy of synths
# OutputHandler:
out = OutputHandler()
# PyAudio module:
pyaud = PyAudioModule()
pyaud.special = True
out.add_output(pyaud)
osc = SineOscillator(freq=440.0)
final = out.bind_synth(osc)
# Make a deep copy:
thing = copy.deepcopy(final)
def audio_param():
# Tests the audio parameter as it alters the frequency of the pitch
# Create a simple sine oscillator:
osc = SineOscillator(freq=440)
out = OutputHandler()
pyaud = PyAudioModule()
pyaud.special = True
out.add_output(pyaud)
sine = out.bind_synth(osc)
out.start()
# Start the oscillator:
sine.start()
# Have the frequency go up to 550 in 5 seconds:
sine.info.freq.exponential_ramp(550.0, get_time()+5000000000)
time.sleep(5)
# Have the frequency go back down to 440 in 10 seconds:
sine.info.freq.linear_ramp(440.0, get_time()+60000000000)
sine.join()
out.stop()
def test_output():
# Creates and registers a oscillator. Used for testing output.
osc1 = TriangleOscillator(freq=440.0)
osc2 = TriangleOscillator(freq=880.0)
osc3 = TriangleOscillator(freq=1320.0)
osc4 = TriangleOscillator(freq=1760.0)
out = OutputHandler()
out.add_output(WaveModule("test.wav"))
# Add the PyAudio module:
pyaud = PyAudioModule()
pyaud.special = True
out.add_output(pyaud)
# Add the WaveModule:
# Bind the synth:
final = out.bind_synth(osc1)
final2 = out.bind_synth(osc2)
final3 = out.bind_synth(osc3)
final4 = out.bind_synth(osc4)
# Start the OutputHandler:
#out.start()
# Start the synth:
final.start()
out.start()
time.sleep(1)
final2.start()
time.sleep(1)
final3.start()
time.sleep(1)
final4.start()
time.sleep(1)
final4.stop()
time.sleep(1)
final3.stop()
time.sleep(1)
final2.stop()
time.sleep(3)
final.stop()
out.stop()
def module_connection():
# Tests the connectivity features of modules
mod1 = SineOscillator(freq=640.0)
mod2 = DummyModule()
mod3 = DummyModule()
mod4 = DummyModule()
mod2.bind(mod1)
mod3.bind(mod2)
mod4.bind(mod3)
print("Connected modules: {}".format(mod4.info.connected))
print("Should be 4!")
print("Chain frequency: {}".format(mod4.info.freq))
print("Should be 640.0!")
class DummyWait(BaseModule):
"""
Passes values from the inputs attached,
but continues to play for 5 seconds after we are released.
"""
def __init__(self):
super().__init__()
self.finishing = False
self.wait = 5000000000
self.start_time = 0
def start(self):
self.finishing = False
self.wait = 5000000000
self.start_time = 0
def finish(self):
print("Dummy module finishing...")
self.finishing = True
self.start_time = get_time()
print("Starting time: {}".format(self.start_time))
def get_next(self):
# Check if we are finishing
if self.finishing:
if get_time() > self.start_time + self.wait:
# We are done, lets say we are finished:
print("Dummy module done!")
print("Current time: {}".format(get_time()))
print("Target time: {}".format(self.start_time + self.wait))
self.done()
return None
# Otherwise, lets just return the input:
return self.get_input()
def fade_test():
# Tests the ability for synths to continue to play after they have been stopped
out = OutputHandler()
pyaud = PyAudioModule()
pyaud.special = True
out.add_output(pyaud)
osc = TriangleOscillator(freq=440.0)
dummy = DummyWait()
dummy.bind(osc)
cont = out.bind_synth(dummy)
sec = Sequencer()
sec.add_synth(cont)
print(cont.info.connected)
print(cont.info.done)
out.start()
print("Starting controller...")
cont.start()
print("Waiting...")
time.sleep(5)
print("Stopping controller...")
cont.stop()
time.sleep(5)
print(("Waiting..."))
time.sleep(5)
print("Synth should be stopped!")
print("Starting synth...")
cont.start()
print("Waiting...")
time.sleep(5)
print("Stopping cont...")
cont.stop()
print("Waiting three seconds to interrupt...")
time.sleep(3)
print("Interrupting!")
cont.start()
cont.stop()
print("Finished!")
#sec.start()
def adsr_test():
# Tests the ADSR envelope
attack = 1000000000
decay = 1000000000
sustain = 0.5
release = 1000000000
# Create the OutputHandler:
out = OutputHandler()
pyaud = PyAudioModule()
pyaud.special = True
out.add_output(pyaud)
osc = SawToothOscillator(freq=440.0)
env = ADSREnvelope(attack, decay, sustain, release)
env.bind(osc)
cont = out.bind_synth(env)
out.start()
print("Starting:")
cont.start()
time.sleep(5)
print("Stopping:")
cont.stop()
time.sleep(5)
def MIDI_test():
# Tests if we can get MIDI info from ALSA
out = OutputHandler()
pyaud = PyAudioModule()
pyaud.special = True
out.add_output(pyaud)
osc = SineOscillator(freq=440.0)
#osc = SquareOscillator(freq=440.0)
#osc = SawToothOscillator(freq=440.0)
#osc = TriangleOscillator(freq=440.0)
attack = 1000000000
decay = 1000000000
sustain = 0.2
release = 1000000000
env = ADSREnvelope(attack, decay, sustain, release)
env.bind(osc)
cont = out.bind_synth(env)
seq = midi.MIDIWrapper()
seq.alsa_live()
seq.add_synth(cont)
out.start()
seq.start()
seq.join()
seq.stop()
out.stop()
| Owen-Cochell/python-audio-synth | pysynth/temp.py | temp.py | py | 19,592 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pyaudio.PyAudio",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pyaudio.paFloat32",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
... |
34440124613 | """ 05_Collisions_v2 by Sun Woo Yi
This version will be carried on from 05_Collisions_v1_testing_2
This version will show a collision being detected between two objects
When the objects collide the game will quit automatically
26/05/2023
"""
import pygame
# Initialize Pygame
pygame.init()
# Set the dimensions of the window
window_width = 261
window_height = 377
# Create the window
screen = pygame.display.set_mode((window_width, window_height))
class GreenBox(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = pygame.Surface((50, 50))
self.image.fill((0, 255, 0))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
class BlueBox(pygame.sprite.Sprite):
def __init__(self, x, y):
super().__init__()
self.image = pygame.Surface((50, 50))
self.image.fill((0, 0, 255))
self.rect = self.image.get_rect()
self.rect.x = x
self.rect.y = y
# Set up the green box
green_box_x = window_width/2 - 50/2
green_box_y = 377 - 50
green_box = GreenBox(green_box_x, green_box_y)
all_sprites = pygame.sprite.Group(green_box)
# Set up the blue box
blue_box_x = window_width/2 - 50/2
blue_box_y = 0
blue_box = BlueBox(blue_box_x, blue_box_y)
all_sprites.add(blue_box)
# Game loop
running = True
while running:
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
# Move the boxes
green_box.rect.y -= 1
blue_box.rect.y += 1
# Check for collisions
if pygame.sprite.collide_rect(green_box, blue_box):
quit()
# Draw the boxes
screen.fill((255, 255, 255))
all_sprites.draw(screen)
pygame.display.update()
pygame.quit()
| yis1234/Car-Game | 05_Collisions_v2.py | 05_Collisions_v2.py | py | 1,753 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.init",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.sprite... |
21119688527 | from functools import lru_cache
from typing import List
class Solution:
def minScoreTriangulation(self, values: List[int]) -> int:
@lru_cache(None)
def dp(i, j):
if i + 2 > j:
return 0
if i + 2 == j:
return values[i] * values[i + 1] * values[j]
return min((values[i] * values[k] * values[j] + dp(i, k) + dp(k, j)) for k in range(i + 1, j))
return dp(0, len(values) - 1)
if __name__ == '__main__':
values = [1,2,3]
values = [3,7,4,5]
values = [1,3,1,4,1,5]
rtn = Solution().minScoreTriangulation(values)
print(rtn) | plattanus/leetcodeDAY | python/1039. 多边形三角剖分的最低得分.py | 1039. 多边形三角剖分的最低得分.py | py | 647 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "functools.lru_cache",
"line_number": 7,
"usage_type": "call"
}
] |
31835787818 | import os
from fontTools.designspaceLib import DesignSpaceDocument, AxisDescriptor, SourceDescriptor, InstanceDescriptor, RuleDescriptor
root = os.getcwd()
doc = DesignSpaceDocument()
familyName = "MutatorSansTest"
#------
# axes
#------
a1 = AxisDescriptor()
a1.maximum = 1000
a1.minimum = 0
a1.default = 0
a1.name = "width"
a1.tag = "wdth"
doc.addAxis(a1)
a2 = AxisDescriptor()
a2.maximum = 1000
a2.minimum = 0
a2.default = 0
a2.name = "weight"
a2.tag = "wght"
doc.addAxis(a2)
#---------
# masters
#---------
s0 = SourceDescriptor()
s0.path = "MutatorSansLightCondensed.ufo"
s0.name = "master.MutatorSansTest.LightCondensed.0"
s0.familyName = familyName
s0.styleName = "LightCondensed"
s0.location = dict(weight=0, width=0)
s0.copyLib = True
s0.copyInfo = True
s0.copyGroups = True
s0.copyFeatures = True
doc.addSource(s0)
s1 = SourceDescriptor()
s1.path = "MutatorSansBoldCondensed.ufo"
s1.name = "master.MutatorSansTest.BoldCondensed.1"
s1.familyName = familyName
s1.styleName = "BoldCondensed"
s1.location = dict(weight=1000, width=0)
doc.addSource(s1)
s2 = SourceDescriptor()
s2.path = "MutatorSansLightWide.ufo"
s2.name = "master.MutatorSansTest.LightWide.2"
s2.familyName = familyName
s2.styleName = "LightWide"
s2.location = dict(weight=0, width=1000)
doc.addSource(s2)
s3 = SourceDescriptor()
s3.path = "MutatorSansBoldWide.ufo"
s3.name = "master.MutatorSansTest.BoldWide.3"
s3.familyName = familyName
s3.styleName = "BoldWide"
s3.location = dict(weight=1000, width=1000)
doc.addSource(s3)
#-----------
# instances
#-----------
i0 = InstanceDescriptor()
i0.name = 'instance_LightCondensed'
i0.familyName = familyName
i0.styleName = "Medium"
i0.path = os.path.join(root, "instances", "MutatorSansTest-Medium.ufo")
i0.location = dict(weight=500, width=327)
i0.kerning = True
i0.info = True
doc.addInstance(i0)
#-------
# rules
#-------
rd = RuleDescriptor()
rd.name = 'fold_I_serifs'
rd.conditionSets = [[{'minimum': 0.0, 'maximum': 328.0, 'name': 'width'}]]
rd.subs = [('I', 'I.narrow')]
doc.addRule(rd)
rd = RuleDescriptor()
rd.name = 'fold_S_terminals'
rd.conditionSets = [[{'minimum': 0.0, 'maximum': 1000.0, 'name': 'width'}, {'minimum': 0.0, 'maximum': 500.0, 'name': 'weight'}]]
rd.subs = [('S', 'S.closed')]
doc.addRule(rd)
#--------
# saving
#--------
path = os.path.join(root, "MutatorSans__Test__.designspace")
doc.write(path)
| LettError/mutatorSans | makeDesignSpace.py | makeDesignSpace.py | py | 2,364 | python | en | code | 112 | github-code | 36 | [
{
"api_name": "os.getcwd",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "fontTools.designspaceLib.DesignSpaceDocument",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "fontTools.designspaceLib.AxisDescriptor",
"line_number": 14,
"usage_type": "call"
... |
41205022751 | '''
Write a Python Script that captures images from your webcam video stream
Extract all faces from the image frame(using haarcascade)
Store the face information into numpy arrays
1. Read and show video stream, capture images
2. Detect faces and show bounding box
3. Flatten the largest face image(gray scale image) and save it in numpy arrays
4. Repeat the above for multiple people to generate training data
'''
import cv2
from cv2 import sort
import numpy as np
cap = cv2.VideoCapture(0)
# Face detection
face_cascade = cv2.CascadeClassifier("haarcascade_frontalface_alt.xml")
skip=0
face_data = []
dataset_path = './data/'
file_name = input("Enter your name - ")
while True:
ret, frame = cap.read()
if ret ==False:
continue
gray_frame = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(frame, 1.3,5)
# print(faces)
faces = sorted(faces,key = lambda f:f[2]*f[3], reverse=True)
face_section=frame
for face in faces:
x,y,w,h = face
cv2.rectangle(frame, (x,y),(x+w, y+h),(255,255,0),3)
#Extract region of interest means crop out the face
offset = 10
face_section = frame[y-offset:y+h+offset, x-offset:x+w+offset]
face_section = cv2.resize(face_section,(100,100))
if skip%10==0:
face_data.append(face_section)
print(skip/10)
cv2.imshow("Frame", frame)
#cv2.imshow("Face frame", face_section)
skip+=1
key_pressed = cv2.waitKey(1) & 0xFF
if key_pressed==ord('q'):
break
face_data = np.asarray(face_data)
face_data = face_data.reshape((face_data.shape[0],-1))
print(face_data.shape)
np.save(dataset_path+file_name+'.npy', face_data)
print("data saved at - "+dataset_path+file_name+'.npy')
cap.release()
cv2.destroyAllWindows() | ankan-das-2001/Machine-learning-and-Deep-learning | Projects/Face Recognition/face_data_collect.py | face_data_collect.py | py | 1,813 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.CascadeClassifier",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRA... |
14066016259 | # 섬의 개수
# pypy3: 200ms
from collections import deque
def bfs(start):
queue = deque()
queue.append(start)
mapp[start[0]][start[1]] = 0
while queue:
now = queue.popleft()
for m in move:
new_r, new_c = now[0] + m[0], now[1] + m[1]
if new_r in range(h) and new_c in range(w) and mapp[new_r][new_c]:
mapp[new_r][new_c] = 0
queue.append((new_r, new_c))
move = [(0, 1), (1, 0), (0, -1), (-1, 0), (1, 1), (1, -1), (-1, 1), (-1, -1)]
w, h = map(int, input().split())
while w and h:
mapp = []
for _ in range(h):
mapp.append(list(map(int, input().split())))
cnt = 0
for i in range(h):
for j in range(w):
if mapp[i][j]:
bfs((i,j))
cnt += 1
print(cnt)
w,h = map(int, input().split()) | yeon-june/BaekJoon | 4963.py | 4963.py | py | 858 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 7,
"usage_type": "call"
}
] |
24486683451 | """Backward compatibility: in the past, APITaxi was deployed on a dedicated
server and nginx was configured to redirect / to the console, and /doc to
swagger.
Now, the infrastructure is deployed on CleverCloud. We need to perform these
redirections here, since it is impossible to perform redirections on
CleverCloud's loadbalancers.
"""
from flask import Blueprint, current_app, redirect
blueprint = Blueprint('index', __name__)
@blueprint.route('/', methods=['GET'])
def index():
url = current_app.config.get('CONSOLE_URL')
return redirect(url, code=301)
@blueprint.route('/doc', methods=['GET'])
def doc():
url = current_app.config.get('SWAGGER_URL')
return redirect(url, code=301)
| openmaraude/APITaxi | APITaxi2/views/redirect.py | redirect.py | py | 709 | python | en | code | 24 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.current_app.config.get",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "flask.current_app.config",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_na... |
6260370623 | import time
import requests as rs
from bs4 import BeautifulSoup as bs
from email.MIMEMultipart import MIMEMultipart
from email.MIMEText import MIMEText
from email.MIMEImage import MIMEImage
import smtplib
url = "http://www.gumtree.com.au/s-cats-kittens/launceston/c18435l3000393"
domain = "http://www.gumtree.com.au"
def load_exempt(path="./exempt.txt"):
exempt_list = []
with open(path) as f:
for line in f.readlines():
exempt_list.append(line.strip())
return exempt_list
def abstract_entry(entry):
result = {}
result["name"] = entry("a", itemprop="url", class_="ad-listing__title-link")[0].text.strip()
if result["name"].upper().startswith("WANTED"):
result["name"]= ""
result["price"]= entry("span", class_="j-original-price")[0].text.strip()
result["add_datetime"]= entry("div", class_="ad-listing__date")[0].text.strip()
result["entry_url"]= entry("a", itemprop="url")[0]["href"]
img = entry("img")
if len(img)==0:
img = None
else:
img = img[0]["src"]
result["img"] = img
area = entry("span", class_="ad-listing__location-area")[0].text.strip()
suburb = entry("span", class_="ad-listing__location-suburb")
if len(suburb) == 0:
suburb = ""
else:
suburb = suburb[0].text.strip()
result["loc"] = area + suburb
result["description"] = entry("p", class_="ad-listing__description")[0].text.strip()
return result
def notify(result):
# Send an HTML email with an embedded image and a plain text message for
# email clients that don't want to display the HTML.
# Define these once; use them twice!
strFrom = 'myme5261314@sina.com'
# strTo = 'jingna93@163.com'
strTo = 'myme5261314@126.com'
# Create the root message and fill in the from, to, and subject headers
msgRoot = MIMEMultipart('related')
msgRoot['Subject'] = result["name"]
msgRoot['From'] = strFrom
msgRoot['To'] = strTo
msgRoot.preamble = 'This is a multi-part message in MIME format.'
# Encapsulate the plain and HTML versions of the message body in an
# 'alternative' part, so message agents can decide which they want to display.
msgAlternative = MIMEMultipart('alternative')
msgRoot.attach(msgAlternative)
text_message_template = "Title: %s\nPrice: %s\nLocation: %s\nAdd Time: %s\nDescription: %s\n"
text_message = text_message_template % (result["name"], result["price"], result["loc"] , result["add_datetime"], result["description"])
# msgText = MIMEText('This is the alternative plain text message.')
msgText = MIMEText(text_message)
msgAlternative.attach(msgText)
# We reference the image in the IMG SRC attribute by the ID we give it below
html_msg = text_message.replace("\n", "<br>")
if result["img"] is not None:
html_msg += '<img src="%s">' % result["img"]
html_msg += '<b><a href="%s">link</a></b>' % (domain + result["entry_url"])
# msgText = MIMEText('<b>Some <i>HTML</i> text</b> and an image.<br><img src="cid:image1"><br>Nifty!', 'html')
msgText = MIMEText(html_msg, 'html')
msgAlternative.attach(msgText)
# # This example assumes the image is in the current directory
# fp = open('test.jpg', 'rb')
# msgImage = MIMEImage(fp.read())
# fp.close()
# # Define the image's ID as referenced above
# msgImage.add_header('Content-ID', '<image1>')
# msgRoot.attach(msgImage)
# Send the email (this example assumes SMTP authentication is required)
try:
smtp = smtplib.SMTP()
smtp.connect('smtp.sina.com')
smtp.login('myme5261314', 'Lp5261314!')
smtp.sendmail(strFrom, strTo, msgRoot.as_string())
smtp.quit()
except Exception as e:
print(e)
return False
return True
def main():
exempt_list = load_exempt()
while True:
list_page = rs.get(url)
list_page = bs(list_page.text)
area = list_page("ul", id="srchrslt-adtable")
entry_list = area[0]("li")
result_list = [abstract_entry(entry) for entry in entry_list]
result_list = [result for result in result_list if result["name"] != "" and result["entry_url"].split("/")[-1] not in exempt_list]
for result in result_list:
print(result)
if notify(result):
exempt_list.append(result["entry_url"].split("/")[-1])
time.sleep(30)
with open("./exempt.txt", "a") as f:
for result in result_list:
idx = result["entry_url"].split("/")[-1]
f.write("%s\n" % idx)
time.sleep(120)
if __name__ == '__main__':
main()
| myme5261314/GumtreeCatNotifier | main.py | main.py | py | 4,672 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "email.MIMEMultipart.MIMEMultipart",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "email.MIMEMultipart.MIMEMultipart",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "email.MIMEText.MIMEText",
"line_number": 71,
"usage_type": "call"
}... |
5521906377 | from scripts.helpful_scripts import get_account, get_contract, OPENSEA_URL
from brownie import DappToken, Escrow, SimpleNFT, network, config, ANFT
from web3 import Web3
import time
import yaml
import json
import os
import shutil
sample_token_uri = (
"ipfs://Qmd9MCGtdVz2miNumBHDbvj8bigSgTwnr4SbyH6DNnpWdt?filename=0-PUG.json"
)
KEPT_BALANCE = Web3.toWei(1000, "ether")
KEPT_LOAT_BALANCE = Web3.toWei(500000, "ether")
def update_front_end():
# sending the build folder
src = "./build"
# dest = "./front_end/src/chain-info"
dest = "../loanAgainstNFT/contracts"
copy_folders_to_front_end(src, dest)
# # sending the front end our config in JSON format
# with open("brownie-config.yaml", "r") as brownie_config:
# config_dict = yaml.load(brownie_config, Loader=yaml.FullLoader)
# with open("./front_end/src/brownie-config.json", "w") as brownie_config_json:
# json.dump(config_dict, brownie_config_json)
# print("front end updated")
def copy_folders_to_front_end(src, dest):
if os.path.exists(dest):
shutil.rmtree(dest)
shutil.copytree(src, dest)
def deploy_escrow_and_tokens_and_nfts():
account = get_account()
# non_owner = get_account(index=1) # gonachi
non_owner = get_account(key=1) # goerli
dapp_token = DappToken.deploy({"from": account}) # governance token
# loan_token = LoanToken.deploy({"from": account}) # loan token
escrow = Escrow.deploy( # escrow wallet
dapp_token.address,
{"from": account},
publish_source=config["networks"][network.show_active()]["verify"],
)
tx = dapp_token.transfer( # no approval because the account is the owner
escrow.address,
dapp_token.totalSupply() - KEPT_BALANCE,
{"from": account}, # 99.9%
)
tx.wait(1)
# SimpleNFT, and we have the NFT address, can we mock NFT too? no need?
simple_nft = SimpleNFT.deploy({"from": account})
tx = simple_nft.createNFT(sample_token_uri, {"from": account})
tx.wait(1)
simple_nft_id = 0
#
# a_nft = get_contract("a_nft")
a_nft = ANFT.deploy({"from": account})
tx = a_nft.createNFT(sample_token_uri, {"from": account})
tx.wait(1)
a_nft_id = 0
loan_token = get_contract("loan_token")
loan_token_price_feed = get_contract("loan_token_price_feed")
init_amount = (
loan_token.balanceOf(account.address) / 10
) # give escrow half of the loan token for test
loan_token.approve(escrow.address, init_amount, {"from": account})
tx = loan_token.transfer(
escrow.address,
init_amount,
{"from": account},
)
tx.wait(1)
init_amount = (
loan_token.balanceOf(account.address) / 100
) # give non-owner half of the left loan token for test
loan_token.approve(non_owner.address, init_amount, {"from": account})
tx = loan_token.transfer(
non_owner.address,
init_amount,
{"from": account},
)
tx.wait(1)
dict_of_allowed_nfts = {
simple_nft: get_contract("simple_nft_price_feed"),
a_nft: get_contract("a_nft_price_feed"),
# b_nft: get_contract("b_nft_price_feed"),
}
add_allowed_nfts(escrow, dict_of_allowed_nfts, account)
# set simple and a nft collection offers and specific offers
loan_Amount = Web3.toWei(0.0001, "ether")
loan_Days = 3
loan_Interest = 286
sel_index = 1
tx = escrow.setCollectionOffers(
simple_nft.address, loan_Amount, loan_Days, loan_Interest, sel_index
)
tx.wait(1)
sel_index = 2
tx = escrow.setCollectionOffers(
simple_nft.address, loan_Amount / 2, loan_Days * 2, loan_Interest, sel_index
)
tx.wait(1)
sel_index = 3
tx = escrow.setCollectionOffers(
simple_nft.address, loan_Amount / 4, loan_Days * 5, loan_Interest, sel_index
)
tx.wait(1)
tx = escrow.setOffers(
simple_nft.address, simple_nft_id, loan_Amount * 2, loan_Days * 3, loan_Interest
)
tx.wait(1)
sel_index = 1
tx = escrow.setCollectionOffers(
a_nft.address, loan_Amount, loan_Days, loan_Interest, sel_index
)
tx.wait(1)
sel_index = 2
tx = escrow.setCollectionOffers(
a_nft.address, loan_Amount / 4, loan_Days * 3, loan_Interest, sel_index
)
tx.wait(1)
sel_index = 3
tx = escrow.setCollectionOffers(
a_nft.address, loan_Amount / 8, loan_Days * 6, loan_Interest, sel_index
)
tx.wait(1)
tx = escrow.setOffers(
a_nft.address, a_nft_id, loan_Amount * 3, loan_Days * 2, loan_Interest
)
tx.wait(1)
sel_index = 2
loanProcess2(escrow, simple_nft, simple_nft_id, sel_index, account, loan_token)
get_stats(escrow)
sel_index = 0
loanProcess2(escrow, a_nft, a_nft_id, sel_index, account, loan_token)
get_stats(escrow)
tx = simple_nft.createNFT(sample_token_uri, {"from": non_owner})
tx.wait(1)
simple_nft_id = 1
sel_index = 0
loanProcess2(escrow, simple_nft, simple_nft_id, sel_index, non_owner, loan_token)
get_stats(escrow)
tx = a_nft.createNFT(sample_token_uri, {"from": non_owner})
tx.wait(1)
a_nft_id = 1
sel_index = 2
loanProcess2(escrow, a_nft, a_nft_id, sel_index, non_owner, loan_token)
get_stats(escrow)
simple_nft_id = 0
repayProcess2(escrow, simple_nft, simple_nft_id, account, loan_token)
get_stats(escrow)
a_nft_id = 1
repayProcess2(escrow, a_nft, a_nft_id, non_owner, loan_token)
get_stats(escrow)
simple_nft_id = 1
repayProcess2(escrow, simple_nft, simple_nft_id, non_owner, loan_token)
get_stats(escrow)
a_nft_id = 0
repayProcess2(escrow, a_nft, a_nft_id, account, loan_token)
get_stats(escrow)
# print(escrow.allowedNfts(0))
# print(escrow.allowedNfts(1))
# print(escrow.numOfAllowedNfts())
# tx = escrow.updateAllowedNfts(simple_nft.address, False, {"from": account})
# tx.wait(1)
# print(escrow.allowedNfts(0))
# print(escrow.numOfAllowedNfts())
# return escrow, simple_nft, dapp_token, loan_token
def get_stats(escrow):
numOfBorrowers = int(escrow.numOfBorrowers())
print("There are", numOfBorrowers, "borrowers")
print("They are", escrow.borrowers)
for borrowerIndex in range(int(numOfBorrowers)):
borrower = escrow.borrowers(borrowerIndex)
numOfNftStaked = escrow.numOfNftStaked(borrower)
print(borrower, "has", numOfNftStaked, "nfts staked")
for index in range(int(numOfNftStaked)):
stakedNftAddress = escrow.stakedNftAddress(borrower, index)
stakedNftId = escrow.stakedNftId(borrower, index)
print(index, "th staked nft address is ", stakedNftAddress)
print(index, "th staked nft id is ", stakedNftId)
nftLoanAmount = escrow.nftLoanAmount(stakedNftAddress, stakedNftId)
nftLoanPeriod = escrow.nftLoanPeriod(stakedNftAddress, stakedNftId)
nftLoanInterest = escrow.nftLoanInterest(stakedNftAddress, stakedNftId)
nftLoanRepayAmount = escrow.nftLoanRepayAmount(
stakedNftAddress, stakedNftId
)
nftLoanExpireTime = escrow.nftLoanExpireTime(stakedNftAddress, stakedNftId)
nftLoanHolderAddress = escrow.nftLoanHolderAddress(
stakedNftAddress, stakedNftId
)
print(
"nftLoanAmount=",
nftLoanAmount,
"nftLoanPeriod=",
nftLoanPeriod,
"nftLoanInterest=",
nftLoanInterest,
"nftLoanRepayAmount=",
nftLoanRepayAmount,
"nftLoanExpireTime=",
nftLoanExpireTime,
"nftLoanHolderAddress=",
nftLoanHolderAddress,
)
def loanProcess2(escrow, simple_nft, simple_nft_id, sel_index, _account, loan_token):
simple_nft.approve(escrow.address, simple_nft_id, {"from": _account})
loan_amount, loan_days, loan_interest = escrow.getOffers(
simple_nft.address, simple_nft_id, sel_index
)
loan_token.approve(escrow.address, loan_amount, {"from": _account})
tx = escrow.requestLoan(
loan_token.address,
simple_nft.address,
simple_nft_id,
sel_index,
{"from": _account},
)
tx.wait(1)
def repayProcess2(escrow, simple_nft, simple_nft_id, account, loan_token):
time.sleep(1)
# holder_address, expire_time, repay_amount = escrow.getNftLockData(
# simple_nft.address, simple_nft_id, {"from": account}
# )
deposit_amount = escrow.nftLoanRepayAmount(simple_nft.address, simple_nft_id)
loan_token.approve(escrow.address, deposit_amount, {"from": account})
tx = escrow.redeemLoan(
loan_token.address,
simple_nft.address,
simple_nft_id,
{"from": account},
)
tx.wait(1)
def loanProcess(escrow, simple_nft, simple_nft_id, account, loan_token):
simple_nft.approve(escrow.address, simple_nft_id, {"from": account})
tx = escrow.nftStaking(
simple_nft.address,
simple_nft_id,
{
"from": account, # it's not the approval problem that make us cannt pass here
# "gas_price": 0,
# "gas_limit": 120000000000,
# "allow_revert": True,
},
)
tx.wait(1)
loan_Amount = Web3.toWei(0.001, "ether")
loan_Days = 3
loan_Interest = 286
escrow.setOffers(
simple_nft.address, simple_nft_id, loan_Amount, loan_Days, loan_Interest
)
loan_amount, loan_days, loan_interest = escrow.getOffers(
simple_nft.address, simple_nft_id
)
loan_token.approve(escrow.address, loan_amount, {"from": account})
tx = escrow.loanTransfer(
loan_token.address, account, loan_amount, {"from": account}
)
tx.wait(1)
initTime = time.time()
expireTime = initTime + loan_days * 24 * 60 * 60
repayAmount = loan_amount * (1 + loan_interest / (10000))
tx = escrow.nftLock(
simple_nft.address,
simple_nft_id,
account,
expireTime,
repayAmount,
{"from": account},
)
tx.wait(1)
def repayProcess(escrow, simple_nft, simple_nft_id, account, loan_token):
time.sleep(1)
holder_address, expire_time, repay_amount = escrow.getNftLockData(
simple_nft.address, simple_nft_id, {"from": account}
)
deposit_amount = repay_amount
current_time = time.time()
if (holder_address == account.address) & (time.time() < expire_time):
loan_token.approve(escrow.address, deposit_amount, {"from": account})
tx = escrow.loanRepay(
loan_token.address,
deposit_amount,
{"from": account},
)
tx.wait(1)
if deposit_amount >= repay_amount:
# simple_nft.approve(account, 0, {"from": account})
tx = escrow.nftUnStaking(
simple_nft.address, simple_nft_id, {"from": account}
)
tx.wait(1)
def add_allowed_nfts(escrow, dict_of_allowed_nfts, account):
update = True
for nft in dict_of_allowed_nfts:
add_tx = escrow.updateAllowedNfts(nft.address, update, {"from": account})
add_tx.wait(1)
set_tx = escrow.setPriceFeedContract(
nft.address, dict_of_allowed_nfts[nft], {"from": account}
)
set_tx.wait(1)
def main():
deploy_escrow_and_tokens_and_nfts()
# update_front_end()
| dankorea/loanAgainstNFT | scripts/deploy.py | deploy.py | py | 11,494 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "web3.Web3.toWei",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "web3.Web3",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "web3.Web3.toWei",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "web3.Web3",
"line_numbe... |
25691822349 | from django.core.mail import EmailMessage
from django.contrib.sites.shortcuts import get_current_site
from django.utils.encoding import force_bytes, force_text
from django.utils.http import urlsafe_base64_encode, urlsafe_base64_decode
from django.template.loader import render_to_string
from .tokens import user_activation_token
def send_email(user, request, mail_subject, template_name, to_email):
current_site = get_current_site(request)
mail_subject = mail_subject
if current_site.domain == 'localhost':
port = ':1337'
protocol ='http'
else:
port = ''
protocol = 'https'
message = render_to_string(template_name, {
'user': '%s %s' %(user.first_name, user.last_name),
'domain': current_site.domain,
'port': '%s' % port,
'protocol': protocol,
'uid': urlsafe_base64_encode(force_bytes(user.pk)),
'token': user_activation_token.make_token(user),
'new_email': to_email
})
email = EmailMessage(mail_subject, message, to=[to_email])
email.send() | michaeljohannesmeier/vidamia | project/app/django-src/api/utils.py | utils.py | py | 1,063 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.sites.shortcuts.get_current_site",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.template.loader.render_to_string",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "django.utils.http.urlsafe_base64_encode",
"line_numbe... |
5451484871 | from .models import Auction
from .serializer import AuctionSerializer
from utils.utils import Utils
from django.db import connection
class AuctionService:
def create(self, auction):
serializer = AuctionSerializer(data=auction)
if serializer.is_valid():
serializer.save()
return True
return False
def isNotEmpty(self):
return Auction.objects.all().first()
def deleteOldAuctions(self):
cursor = connection.cursor()
cursor.execute("DELETE FROM %s WHERE `isActive`=0" % Auction._meta.db_table)
cursor.execute("OPTIMIZE TABLE %s" % Auction._meta.db_table)
def getConnectedRealmActiveAucCount(self, connected_realm):
count = 0
for realm in connected_realm:
count += Auction.objects.filter(ownerRealm=realm.name, isActive=1).count()
return count
def getActiveAuctions(self, connected_realms):
current_auctions = []
current_auctions_auc = {}
for connected_realm in connected_realms:
current_auctions += Auction.objects.filter(ownerRealm=connected_realm['name'], isActive=1)
counter = 0
for current_auction in current_auctions:
counter += 1
current_auctions_auc[current_auction.auc] = current_auction
return current_auctions_auc
def getRealmPriceList(self, _itemId, _realm_name):
price_tab = []
current_auctions = Auction.objects.filter(ownerRealm=_realm_name, isActive=1, item=_itemId)
for current_auction in current_auctions:
quantity = current_auction.quantity
price = Utils.unifyPrice(current_auction.buyout)/quantity
# If price == 0 then auction is only for bid
if price > 0:
while quantity > 0:
price_tab.append(price)
quantity -= 1
return price_tab
def getAllRealmActiveAuctionsList(self, _realm_name):
return Auction.objects.filter(ownerRealm=_realm_name, isActive=1)
def getCurrentAuctions(self, connected_realms):
current_auctions = []
current_auctions_auc = []
for connected_realm in connected_realms:
current_auctions += Auction.objects.filter(ownerRealm=connected_realm['name'], isActive=1)
counter = 0
for current_auction in current_auctions:
counter += 1
current_auctions_auc.append(current_auction.auc)
return current_auctions_auc
def unactive(self, _connected_realms, _auc):
for connected_realm in _connected_realms:
auction = Auction.objects.filter(ownerRealm=connected_realm['name'], auc=_auc).first()
if auction:
auction.isActive = False
auction.save()
return True
return False
def updateAuction(self, _auc):
auction = Auction.objects.filter(ownerRealm=_auc['ownerRealm'],
auc=_auc['auc']).first()
if auction:
if str(auction.bid) != str(_auc['bid']) or str(auction.buyout) != str(_auc['buyout']) or str(auction.quantity) != str(_auc['quantity']) or str(auction.timeLeft) != str(_auc['timeLeft']):
auction.bid = _auc['bid']
auction.buyout = _auc['buyout']
auction.quantity = _auc['quantity']
auction.timeLeft = _auc['timeLeft']
auction.save()
return True
return False
| wboniecki/time_is_money | TimeIsMoney/model_auction/auction_service.py | auction_service.py | py | 3,526 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "serializer.AuctionSerializer",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "serializer.is_valid",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "serializer.save",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "model... |
18665391705 | """
Input
-----
intensities_experimental.csv
intensities_sensitivity_unscaled.csv
intensities_singlef_unscaled.csv
intensities_multif_unscaled.csv
conf[model][reference_amplitude]
conf[experimental][reference_amplitude]
Output
------
intensities.csv :
columns: view, Experimental, Model_SingleFreq_Max, Model_SingleFreq_Centre,
"""
import logging
import pandas as pd
import arim
from . import common
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def finalise_intensities(dataset_name, save):
conf = arim.io.load_conf(dataset_name)
result_dir = conf["result_dir"]
try:
exp_ref_amp = conf["experimental"]["reference_amplitude"]
model_ref_amp = conf["model"]["reference_amplitude"]
except KeyError:
exp_ref_amp = 1.0
model_ref_amp = 1 / conf["model"]["scaling"]
logger.warning(
"Missing reference amplitude model scaling. Use model scaling instead."
)
df_exp = pd.read_csv(result_dir / "intensities_experimental.csv", index_col=0)
intensities = df_exp / exp_ref_amp
df2 = pd.read_csv(result_dir / "intensities_sensitivity_unscaled.csv", index_col=0)
# scale and save
intensities["Model_Sensitivity"] = df2["Model_Sensitivity"] / model_ref_amp
try:
df3 = pd.read_csv(result_dir / "intensities_singlef_unscaled.csv", index_col=0)
except FileNotFoundError:
logger.info("Could not find intensities_singlef_unscaled.csv")
else:
# scale and save
intensities["Model_SingleFreq_Centre"] = (
df3["Model_SingleFreq_Centre"] / model_ref_amp
)
intensities["Model_SingleFreq_Max"] = (
df3["Model_SingleFreq_Max"] / model_ref_amp
)
try:
df4 = pd.read_csv(result_dir / "intensities_multif_unscaled.csv", index_col=0)
except FileNotFoundError:
logger.info("Could not find intensities_multif_unscaled.csv")
else:
# ignore useless model Model_MultiFreq_Max
intensities["Model_MultiFreq_Max"] = df4["Model_MultiFreq_Max"] / model_ref_amp
if save:
intensities.to_csv(result_dir / "intensities.csv")
return intensities
if __name__ == "__main__":
args = common.argparser(__doc__).parse_args()
intensities = finalise_intensities(args.dataset_name, args.save)
print(intensities)
| nbud/arimtoolkit | arimtoolkit/collect_intensities.py | collect_intensities.py | py | 2,376 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "arim.io.load_... |
6800684121 | from django.shortcuts import get_object_or_404
from rest_framework import viewsets
from rest_framework.permissions import IsAuthenticated
from project.models import Project
from ...models import ProjectTeamRole
from ..serializers.role import TeamRoleSerializer
class TeamRoleViewSet(
viewsets.ModelViewSet):
model = ProjectTeamRole
permission_classes = (IsAuthenticated,)
serializer_class = TeamRoleSerializer
pagination_class = None
@property
def project(self):
return get_object_or_404(Project, pk=self.kwargs.get('project_pk'))
def get_queryset(self):
user = self.request.user
if user.is_superuser:
queryset = self.model.objects.filter(
project_id=self.kwargs.get('project_pk'))
else:
queryset = self.model.objects.filter(
project_id=self.kwargs.get('project_pk'),
project__created_by=user)
return queryset
| tomasgarzon/exo-services | service-exo-projects/team/api/views/role.py | role.py | py | 968 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.viewsets.ModelViewSet",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "models.ProjectTeamRole",
"line_number": 15,
"usage_type": "name"
},
... |
21536945118 | # Desenvolva um programa que leia o nome, idade e sexo de 4 pessoas. No final mostre:
# A média de idade do grupo.
# Qual o nome do homem mais velho.
# Quantas mulheres tem menos de 20 anos.
from datetime import date
# Variáveis de controle:
idadeh = 0
nm_h = ''
nm_m = ''
sx = ''
idadem = 0
somaidade = 0
# Objetos principais:
ano_at = date.today().year
# Algoritmo:
for i in range(1, 5, 1):
print('----- {}ª PESSOA -----'.format(i))
nome = str(input('Diga o nome da {}ª pessoa:\n'.format(i))).upper().strip()
sexo = str(input('Qual o sexo da {}ª pessoa (m/h):\n'.format(i))).upper().strip()
idadei = int(input('Por fim, em que ano nasceu a pessoa?\n'))
somaidade += (ano_at - idadei)
if i == 1:
if sexo == 'H':
nm_h = nome
idadeh = ano_at - idadei
else:
nm_m = nome
idadem += 1
else:
if sexo == 'H' and (ano_at - idadei) > idadeh:
nm_h = nome
idadeh = ano_at - idadei
if sexo == 'M':
if (ano_at - idadei) < 20:
idadem += 1
média_idade = somaidade/i
print('O homem mais velho é {}, com {} anos.'.format(nm_h,idadeh))
print('A média_idade do grupo é {} anos.'.format(média_idade))
print('Existem {} mulheres com menos de 20 anos.'.format(idadem))
| FelipePassos09/Curso-em-Video-Python-mod2 | Exercícios/Ex#56.py | Ex#56.py | py | 1,438 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "datetime.date.today",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 17,
"usage_type": "name"
}
] |
40500439125 | #!/usr/bin/env python3
"""
Script to make graph for QMEE collaboration network. Edge width represents number of collaborations, node colour represents
institution type.
INPUTS:
None
OUTPUTS:
../Results/QMEENet_python.svg.pdf = network representing QMEE collaboration network
"""
__appname__ = 'Nets.py'
__author__ = 'Sam Turner (sat19@ic.ac.uk)'
__version__ = '0.0.1'
__license__ = 'GNU public'
# imports
import numpy as np
import pandas as pd
import itertools
import networkx as nx
import matplotlib.pylab as p
import math
import matplotlib.patches as mpatches
# load edges and nodes
edges = pd.read_csv("../Data/QMEE_Net_Mat_edges.csv", header=0)
edges.index = edges.columns
nodes = pd.read_csv("../Data/QMEE_Net_Mat_nodes.csv", header=0,index_col=0)
# adjacency list
AdjL = []
for inst1,inst2 in itertools.product(edges.index, edges.columns):
if edges.loc[inst1, inst2] > 0:
AdjL.append((inst1,inst2,edges.loc[inst1, inst2]))
# nodes list
sps = nodes.index.to_list()
# sizes
sizeL = nodes['Pis']
# colours for legend
conv = {'University':'green','Hosting Partner':'red','Non-Hosting Partners':'blue'}
collist = [conv[ty] for ty in nodes['Type'].to_list()]
red_patch = mpatches.Patch(color='red', label='Hosting Partner')
green_patch = mpatches.Patch(color='green', label='University')
blue_patch = mpatches.Patch(color='blue', label='Non-Hosting Partner')
# get positions
pos = nx.circular_layout(sps)
# Initialise graph, add edges to graph, and produce list of edge weights
ws=np.array([])
G = nx.Graph()
G.add_nodes_from(sps)
for l in AdjL:
G.add_edges_from([(l[0], l[1])])
ws=np.append(ws,l[2])
ws = np.log(ws) * 2
# calculate node sizes
NodSizs= 2000 * (1+sizeL-min(sizeL))/(1+max(sizeL)-min(sizeL))
# draw network
nx.draw_networkx(G, pos, width=ws,node_size= NodSizs,edge_color='grey', arrows = True, node_color=collist)
p.legend(handles=[red_patch,green_patch,blue_patch], loc = [0,0.7])
#save network
print("Saving network to ../Results/QMEENet_python.svg")
p.savefig("../Results/QMEENet_python.svg", format = "svg") | SamT123/CMEECoursework | Week7/Code/Nets.py | Nets.py | py | 2,092 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "itertools.product",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "matplotlib.patches.... |
14547426726 | from app_ml.functionalities.preprocessing_mongo import read_from_mongo_as_dataframe
from app_ml.functionalities.constants import SAVED_MODEL_PATH
from app_ml.models.RNN import RNN
import pymongo
def main():
cars_db = pymongo.MongoClient('mongodb://localhost:27017')['cars']
data, labels = read_from_mongo_as_dataframe(cars_db)
rnn_model = RNN(data.iloc[0][0].shape, 2)
x_train, y_train, x_test, y_test = rnn_model.split_train_test(data, labels)
rnn_model.train(x_train, y_train)
rnn_model.test(x_test, y_test)
rnn_model.model.save(SAVED_MODEL_PATH)
if __name__ == '__main__':
main() | serapan/DrEYEve | app_ml/train/train_rnn.py | train_rnn.py | py | 620 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "app_ml.functionalities.preprocessing_mongo.read_from_mongo_as_dataframe",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "app_ml.models.RNN.RNN",
"line_number": 9,
"usag... |
17289255049 | #sql requesting in python,
import sqlite3
import pandas
conn = sqlite3.connect("database.db")
cur = conn.cursor()
cur.execute("SELECT * FROM countries WHERE area >= 2000000")
rows = cur.fetchall()
conn.close()
for i in rows:
print(i)
#put in csv
df = pandas.DataFrame.from_records(rows)
df.columns = ["Rank", "Country", "Area", "Population"]
print(df)
df.to_csv("countries_big_area.csv", index=False)
#insert into db
data = pandas.read_csv("ten_more_countries.txt")
conn = sqlite3.connect("database.db")
cur = conn.cursor()
for index,row in data.iterrows():
print(row["Country"],row["Area"])
cur.execute("INSERT INTO countries VALUES(NULL,?,?,NULL)",(row["Country"],row["Area"]))
conn.commit()
conn.close()
#files counter
import glob
file_list = glob.glob1("files", "*.py")
print(len(file_list))
#recursive files counter
file_list = glob.glob("files/subdirs/**/*.py", recursive=True)
print(len(file_list))
| michalmendygral/python | python101/advanced_2.py | advanced_2.py | py | 926 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pa... |
36731154633 | # -*- coding: utf-8 -*-
from django.conf.urls import url
from baremetal_service.bw_views import BmSharedBandwidthsViews, BmSharedBandwidthViews, BmSharedBandwidthFipViews
from baremetal_service.views import BaremetalServiceFloatingIPViews
urlpatterns = [
# 共享带宽实例管理
# 功能:购买,列表,编辑,详情,删除/批量删除
# resource: shared_bandwidths_order
# 功能:购买
url(r'^shared_bandwidths_order$', BmSharedBandwidthsViews.as_view(
actions={'post': 'create'})),
# resource: shared_bandwidths
# 功能:列表,编辑,删除/批量删除
url(r'^shared_bandwidths$', BmSharedBandwidthsViews.as_view(
actions={'get': 'list', 'put': 'update', 'post': 'destroy'})),
# resource: shared_bandwidth
# 功能:搜索详情
url(r'shared_bandwidth$', BmSharedBandwidthViews.as_view(
actions={'post': 'search'})),
# resource: floating IP
# 功能:添加弹性公网IP,移除弹性公网IP
url(r'^shared_bandwidth/(?P<bandwidth_id>[0-9a-fA-F-]{1,})/floating_ips$',
BmSharedBandwidthFipViews.as_view(actions={'get': "list_bandwidth_floatingips"})),
url(r'^floating_ip/(?P<floating_ip_id>[0-9a-fA-F-]{1,})/shared_bandwidths$',
BmSharedBandwidthFipViews.as_view(actions={'get': "list_floatingip_bandwidths"})),
url(r'^shared_bandwidth/(?P<bandwidth_id>[0-9a-fA-F-]{1,})/attach_floating_ips$',
BmSharedBandwidthFipViews.as_view(actions={'post': 'attach'},
name='bandwidth_attach_floatingips')),
url(r'^shared_bandwidth/(?P<bandwidth_id>[0-9a-fA-F-]{1,})/detach_floating_ips$',
BmSharedBandwidthFipViews.as_view(actions={'post': 'detach'},
name='bandwidth_detach_floatingips')),
# 裸金属平台中涉及的更新:弹性公网IP(floating_ip)
# 功能:列表,删除
url(r'^floating_ips$', BaremetalServiceFloatingIPViews.as_view(
actions={'get': 'floating_ip_list', 'delete': 'floating_ip_delete'})),
]
| 21vcloud/Controller | app/baremetal_service/urls_bandwidth.py | urls_bandwidth.py | py | 2,081 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "baremetal_service.bw_views.BmSharedBandwidthsViews.as_view",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "baremetal_service.bw_views.BmSharedBandwidthsViews",
"line_nu... |
42871168335 | import _
import config
from logging import Logger
from sklearn.metrics import *
from utils.experiments_utils import *
from utils.experiments_utils.results.tables import *
from utils.helpers.datasets import Dataset
from utils.rulekit.classification import RuleClassifier
from steps.train import TrainedModelsResults
def reduce_mean(df: pd.DataFrame) -> pd.DataFrame:
numerical_columns = df.select_dtypes('number').columns.tolist()
data = {}
for column in df.columns.tolist():
if column in numerical_columns and '(std)' not in column:
data[column] = [df[column].mean()]
data[f'{column} (std)'] = [df[column].std()]
else:
data[column] = [df[column].iloc[0]]
return pd.DataFrame(data)
def write_rules(
results_dir: str,
models: TrainedModelsResults
):
models = getattr(models, 'original')
for i, model in enumerate(models):
results_cv_dir = f'{results_dir}/original/cv/{i + 1}'
os.makedirs(results_cv_dir, exist_ok=True)
rule_file_path = f'{results_cv_dir}/rules.txt'
with open(rule_file_path, 'w+') as file:
for rule in model.model.rules:
file.write(
f'{str(rule)} (p={int(rule.weighted_p)}, n={int(rule.weighted_n)}, P={int(rule.weighted_P)}, N={int(rule.weighted_N)})\n')
def evaluate_models(
dataset: Dataset,
variant_name: str,
dataset_type: str,
models: List[RuleClassifier],
logger: Logger
) -> None:
logger.info(
f'Evalute model for dataset: "{dataset.name}" ({dataset_type}) for variant: "{variant_name}"')
metrics: Table = Tables.get(dataset.name, variant_name, dataset_type, 'metrics')
conditions_stats: Table = Tables.get(
dataset.name, variant_name, dataset_type, 'conditions_stats')
confusion_matrix_train_avg = None
confusion_matrix_test_avg = None
for i, model in enumerate(models):
if len(models) == 1:
logger.info(f'Evaluate on train_test')
X_train, y_train, X_test, y_test = dataset.get_train_test()
else:
logger.info(f'Evaluate for fold: {i + 1}')
X_train, y_train, X_test, y_test = dataset.get_cv_fold(i + 1)
fold_metrics: Table = Tables.get(
dataset.name, variant_name, dataset_type, 'cv', str(i + 1), 'metrics')
prediction_test = model.predict(X_test)
prediction_train = model.predict(X_train)
fold_metrics.rows.append({
'dataset': dataset.name,
'variant': variant_name,
'dataset type': dataset_type,
'BAcc (test)': balanced_accuracy_score(y_test, prediction_test),
'BAcc (train)': balanced_accuracy_score(y_train, prediction_train),
'Acc (test)': accuracy_score(y_test, prediction_test),
'Acc (train)': accuracy_score(y_train, prediction_train),
'rules': sum([
value if 'statistics' not in key else 0 for key, value in model.model.stats.conditions_stats.stats.items()
]), # change conditiosn counting so that alternatives are counted as one conditions
'conditions_count': model.model.stats.rules_count * model.model.stats.conditions_per_rule,
'avg conditions per rule': model.model.stats.conditions_per_rule,
'avg rule quality': model.model.stats.avg_rule_quality,
'avg rule precision': model.model.stats.avg_rule_precision,
'avg rule coverage': model.model.stats.avg_rule_coverage,
'training time total (s)': model.model.stats.time_total_s,
'training time growing (s)': model.model.stats.time_growing_s,
'training time pruning (s)': model.model.stats.time_pruning_s,
'induction measure': model.get_params()['induction_measure'].replace('Measures.', ''),
'pruning measure': model.get_params()['pruning_measure'].replace('Measures.', ''),
'voting measure': model.get_params()['voting_measure'].replace('Measures.', ''),
})
fold_metrics.save()
labels_values = Dataset(dataset.name).get_full()[1].unique().tolist()
cm = confusion_matrix(
y_train, prediction_train, labels=labels_values)
if confusion_matrix_train_avg is None:
confusion_matrix_train_avg = cm
else:
confusion_matrix_train_avg += cm
train_confusion_matrix = pd.DataFrame(
cm,
index=[f'true:{value}' for value in labels_values],
columns=[f'pred:{value}' for value in labels_values]
)
train_confusion_matrix.to_csv(
f'{os.path.dirname(fold_metrics._file_path)}/confusion_matrix_train.csv')
cm = confusion_matrix(
y_test, prediction_test, labels=labels_values)
if confusion_matrix_test_avg is None:
confusion_matrix_test_avg = cm
else:
confusion_matrix_test_avg += cm
test_confusion_matrix = pd.DataFrame(
cm,
index=[f'true:{value}' for value in labels_values],
columns=[f'pred:{value}' for value in labels_values]
)
test_confusion_matrix.to_csv(
f'{os.path.dirname(fold_metrics._file_path)}/confusion_matrix_test.csv')
fold_conditions_stats: Table = Tables.get(
dataset.name, variant_name, dataset_type, 'cv', str(i + 1), 'conditions_stats')
conditions_stats_dict: dict = model.model.stats.conditions_stats.stats
conditions_stats_dict['dataset'] = dataset.name
conditions_stats_dict['variant'] = variant_name
conditions_stats_dict['dataset type'] = dataset_type
tmp = conditions_stats_dict
if 'Inner alternatives statistics' in conditions_stats_dict:
for key, value in conditions_stats_dict['Inner alternatives statistics'].items():
tmp[f'Inner alternatives - {key}'] = value
del conditions_stats_dict['Inner alternatives statistics']
fold_conditions_stats.rows.append(conditions_stats_dict)
fold_conditions_stats.save()
metrics.rows += fold_metrics.rows
conditions_stats.rows += fold_conditions_stats.rows
confusion_matrix_train_avg = confusion_matrix_train_avg / len(models)
confusion_matrix_train_avg = pd.DataFrame(
confusion_matrix_train_avg,
index=[f'true:{value}' for value in labels_values],
columns=[f'pred:{value}' for value in labels_values]
)
confusion_matrix_train_avg.to_csv(
f'{os.path.dirname(metrics._file_path)}/confusion_matrix_train.csv')
confusion_matrix_test_avg = confusion_matrix_test_avg / len(models)
confusion_matrix_test_avg = pd.DataFrame(
confusion_matrix_test_avg,
index=[f'true:{value}' for value in labels_values],
columns=[f'pred:{value}' for value in labels_values]
)
confusion_matrix_test_avg.to_csv(
f'{os.path.dirname(metrics._file_path)}/confusion_matrix_test.csv')
metrics.set_df(reduce_mean(metrics.as_pandas()))
conditions_stats.set_df(reduce_mean(conditions_stats.as_pandas()))
def evaluate_all_models(
dataset_name: str,
variant_name: str,
models: TrainedModelsResults,
logger: Logger
) -> None:
logger.info(
f'Evalute model for dataset: "{dataset_name}" for variant: "{variant_name}"')
Tables.configure(directory=config.RESULTS_BASE_PATH)
logger.info('Write model rules')
write_rules(
f'{config.RESULTS_BASE_PATH}/{dataset_name}/{variant_name}', models)
if len(models.original) > 0:
dataset = Dataset(dataset_name)
evaluate_models(dataset, variant_name, 'original',
models.original, logger)
@step()
def evaluate_plain_model(
dataset_name: str,
variant_name: str,
models: TrainedModelsResults,
):
evaluate_all_models(dataset_name, variant_name, models,
evaluate_plain_model.logger)
@step()
def evaluate_models_inner_alternatives(
dataset_name: str,
variant_name: str,
models: TrainedModelsResults,
):
evaluate_all_models(dataset_name, variant_name, models,
evaluate_models_inner_alternatives.logger)
@step()
def evaluate_models_complex_conditions(
dataset_name: str,
variant_name: str,
models: TrainedModelsResults,
):
evaluate_all_models(dataset_name, variant_name, models,
evaluate_models_complex_conditions.logger)
@step()
def evaluate_models_complex_conditions_and_alternatives(
dataset_name: str,
variant_name: str,
models: TrainedModelsResults,
):
evaluate_all_models(dataset_name, variant_name, models,
evaluate_models_complex_conditions_and_alternatives.logger)
| cezary986/complex_conditions | src/experiments/public_datasets/steps/evaluate.py | evaluate.py | py | 9,017 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "steps.train.TrainedModelsResults",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "utils.helpers.datasets.Dataset",
"line_number": 40,
"usage_type": "name"
},
{
"api_name": "utils.rulekit.classification.RuleClassifier",
"line_number": 43,
"usage_t... |
6510269028 | from enum import Enum
from selenium_util.locator import Locator
from selenium.webdriver.common.by import By
from pages.mortgage_rates_page import MortgageRatesPage
from pages.zillow_base_page import ZillowBasePage
from utilities.mortgage_math import calculate_payment
class LoanPrograms(Enum):
"""
A class to represent the loan programs that are available in the web select element
Note to developer, if you change the structure of this enum, make sure you fix all references to it, there are some
references to the 0th and 1st indexes of the tuple values
"""
FIXED_30 = ("Fixed30Year", 30)
FIXED_15 = ("Fixed15Year", 15)
ARM_5 = ("ARM5", 5)
@staticmethod
def lookup(html_value):
"""
If you have an html value and you need the corresponding enum, use this static helper method
:param html_value: the html "value" attribute on a <option> that you would like the enum for
:return: The enum that represents that value
"""
for program in LoanPrograms:
if program.value[0] == html_value:
return program
assert False, "Failed to find [" + html_value + "] in LoanPrograms enum, please add a new enum value"
class MortgageCalcPage(ZillowBasePage):
"""
Class that represents the mortgage calculator web page, inherits from ZillowBasePage
...
Attributes
----------
driver : webdriver
webdriver that this page will use to interact with the web page, defined in parent class
various locators : Locator
Locator objects that define ways to find web elements on this page, see decelerations below
Methods
-------
See below - numerous methods for setting, and asserting different web elements on this page
"""
'''
***** BEGIN LOCATORS *****
'''
_HOME_PRICE_INPUT = Locator(By.ID, "homePrice")
_DOWN_PAYMENT_PERCENT_INPUT = Locator(By.ID, "form-1_downPaymentPercent")
_DOWN_PAYMENT_AMOUNT_INPUT = Locator(By.ID, "form-1_downPayment")
_TERM_SELECT = Locator(By.ID, "form-1_term")
_RATE_INPUT = Locator(By.ID, 'rate')
_RATE_HELP_BUTTON = Locator(By.XPATH, "//span[text()=\"More info on Interest rate\"]/ancestor::button")
# TODO ask dev for an ID on this element, this is the best way to locate the element currently and it is decently
# fragile to future changes
_SEE_CURRENT_RATES_LINK = Locator(By.XPATH, "//a[text()=\"See current rates\"]")
_RATE_ERROR_MESSAGE = Locator(By.CSS_SELECTOR, "[class*=StyledFormHelp]")
# TODO ask dev for an ID on this element, this is the best way to locate the element currently and it is very
# fragile to future changes
_ADVANCED_BUTTON = Locator(By.XPATH, "//button[text() = \"Advanced\"]")
_PMI_CHECKBOX = Locator(By.ID, "form-1_includePMI")
_TAXES_INSURANCE_CHECKBOX = Locator(By.ID, "form-1_includeTaxesInsurance")
_TAXES_INPUT = Locator(By.ID, "form-1_propertyTaxRateAnnualAmount")
_INSURANCE_INPUT = Locator(By.ID, "annualHomeownersInsurance")
# TODO ask dev for an ID on this element, this is the best way to locate the element currently and it is very
# fragile to future changes
_PAYMENT_TEXT = Locator(By.CSS_SELECTOR, "[y=\"20\"]")
'''
***** END LOCATORS *****
'''
def __init__(self, driver):
"""
Create a new MortgageCalcPage
Assumes you have already navigated to this page, and will wait for the mortgage rate input element to be
clickable before returning your new page object to you
:param driver: webdriver that this page will use to interact with the web page
"""
super().__init__(driver)
self.wait_for_element_to_exist(self._RATE_INPUT).wait_for_element_to_be_clickable()
def set_interest_rate(self, rate):
"""
Set the interest rate to the given value
:param rate: rate to set, can be any type, eventually it will be casted to string for entry
:return: self, this page object after any changes
"""
# TODO if pressing enter bug on Interest rate input is fixed, we could make this method send the enter key
# and then we could remove the click of payment
self.get_element(self._RATE_INPUT).set_text(rate)
# click on something else to make the input field lose focus and cause a re-calculation
self.get_element(self._PAYMENT_TEXT).click()
return self
def assert_interest_rate(self, expected_rate):
"""
assert that the interest rate input has an expected value
:param expected_rate: the rate you expect
:return: self, this page object after any changes
"""
actual_value = self.get_element(self._RATE_INPUT).get_value()
assert actual_value == expected_rate, \
"Expected interest rate to be [" + expected_rate + "] but it was [" + actual_value + "]"
return self
def assert_interest_rate_has_value(self):
"""
Simple assertion, just verifies that the interest rate input has a value
:return: self, this page object after any changes
"""
actual_value = self.get_element(self._RATE_INPUT).get_value()
assert float(actual_value) > 0, \
"Expected interest rate to be a non empty value greater than 0 but it was [" + actual_value + "]"
return self
def assert_payment(self, expected_payment):
"""
Assert the calculated payment on the web page is the expected value
:param expected_payment: numeric value of what you expect the payment to be
:return: self, this page object after any changes
"""
# add a $ and strip off an decimal places in the expected
expected_payment = "${:,.0f}".format(expected_payment)
payment_element = self.get_element(self._PAYMENT_TEXT)
# We might have just changed an input,
# give the payment element a chance to update if it hasn't yet (race condition)
payment_element.wait_for_element_to_have_text(expected_payment)
actual_value = payment_element.get_text()
print("Asserting payment is [" + expected_payment + "]")
# note we are comparing strings here
assert actual_value == expected_payment, \
"Expected payment to be [" + expected_payment + "] but it was [" + actual_value + "]"
return self
def assert_payment_given_input_values(self):
"""
Assertion method to assert that based on what value all of the inputs have, the displayed calculation is correct
This method was created to specifically test that when the page loads, it loads with all of the inputs having
a value, and that the displayed calculation is correct based on those values.
This method should NOT be used in place of assert_payment to prevent false positives. It's better practice to
fully calculate the payment yourself, based on what you inputted
:return: self, this page object after any changes
"""
price = float(self.get_element(self._HOME_PRICE_INPUT).get_value().replace(",", ""))
down_payment = float(self.get_element(self._DOWN_PAYMENT_AMOUNT_INPUT).get_value().replace(",", ""))
term_selected = self.get_select_element(self._TERM_SELECT).get_selected_value()
loan_program = LoanPrograms.lookup(term_selected)
rate = float(self.get_element(self._RATE_INPUT).get_value())
calculated_payment = calculate_payment(price, down_payment, rate, loan_program)
monthly_taxes = float(self.get_element(self._TAXES_INPUT).get_value().replace(",", "")) / 12
monthly_insurance = float(self.get_element(self._INSURANCE_INPUT).get_value().replace(",", "")) / 12
# our calculation above does not consider taxes and insurance, so now that we have scraped them off the page,
# add them into to our expected total
total_payment = calculated_payment + monthly_taxes + monthly_insurance
self.assert_payment(total_payment)
return self
def set_home_price(self, home_price):
"""
Set the home price input to the desired value
:param home_price: value to enter into the input, will be casted to string
:return: self, this page object after any changes
"""
self.get_element(self._HOME_PRICE_INPUT).set_text(home_price)
return self
def set_down_payment_percent(self, percent):
"""
Set the down payment percent input to the desired value (numeric or string accepted)
Note your percent should not include a % if you choose to pass a string
:param percent: desired percent (numeric or string accepted)
:return: self, this page object after any changes
"""
self.get_element(self._DOWN_PAYMENT_PERCENT_INPUT).set_text(percent, True)
return self
def assert_down_payment_percent(self, expected_percent):
"""
Assert the down payment percent input is the expected value
:param expected_percent: value to assert against the input, should be numeric
:return: self, this page object after any changes
"""
percent_element = self.get_element(self._DOWN_PAYMENT_PERCENT_INPUT)
percent_element.wait_for_element_to_have_value(expected_percent, True)
actual_value = percent_element.get_value()
assert float(actual_value) == float(expected_percent), \
"Expected down payment percent to be [" + str(expected_percent) + "] but it was [" + str(actual_value) + "]"
return self
def set_down_payment_amount(self, amount):
"""
Set the down payment amount input to the desired value
:param amount: desired value, will be casted to string before entered
:return: self, this page object after any changes
"""
self.get_element(self._DOWN_PAYMENT_AMOUNT_INPUT).set_text(amount, True)
return self
def assert_down_payment_amount(self, expected_amount):
"""
Assert that the down payment amount has an expected value
:param expected_amount: numeric value that you expect the input to have
:return: self, this page object after any changes
"""
# Add in commas to your expected amount and remove any decimal places
expected_amount = "{:,.0f}".format(expected_amount)
actual_value = self.get_element(self._DOWN_PAYMENT_AMOUNT_INPUT).get_value()
# note comparing strings here
assert actual_value == expected_amount, \
"Expected down payment to be [" + str(expected_amount) + "] but it was [" + str(actual_value) + "]"
return self
def select_loan_program(self, loan_program: LoanPrograms):
"""
Select the desired loan program in the drop-down
:param loan_program: Enum for the value you would like selected
:return: self, this page object after any changes
"""
# 0th index of the loan program value is the html value attribute for that choice
self.get_select_element(self._TERM_SELECT).select_by_value(loan_program.value[0])
return self
def _open_advanced(self):
"""
Open the advanced drop down to expose the advanced options, smart enough to first determine if it is already
open and do nothing
"""
optional_element = self.get_element_if_exists(self._ADVANCED_BUTTON)
if optional_element is not None:
optional_element.click()
self.wait_for_element_to_exist(self._TAXES_INSURANCE_CHECKBOX)
# private method that does not refresh the page, does not need to return self
def check_taxes_insurance(self, check):
"""
Check or uncheck the taxes/insurance checkbox based on your desired value
:param check: true to check the box, false to uncheck it
:return: self, this page object after any changes
"""
self._open_advanced()
self.get_checkbox_element(self._TAXES_INSURANCE_CHECKBOX).check(check)
return self
def check_pmi(self, check):
"""
Check or uncheck the PMI checkbox based on your desired value
:param check: true to check the box, false to uncheck it
:return: self, this page object after any changes
"""
self._open_advanced()
self.get_checkbox_element(self._PMI_CHECKBOX).check(check)
return self
def assert_interest_help_modal_opens_and_closes(self, click_x):
"""
Assert that the interest help modal will open, be click-able and then assert that it closes if someone clicks
off the modal, or clicks the close x button
:param click_x: do you want to click the x button (True) or do you want to click off the modal (FALSE)
:return: self, this page object after any changes
"""
# create locator inline because it is very unlikely that any other method in this class will ever need it. If
# that changes, move this to a class deceleration
modal_p_loc = Locator(By.XPATH, "//p[contains(text(), \"Representative interest rates\")]")
# Assert that the modal is not open, needed to prove we can tell it's not open
assert len(self.driver.find_elements(*modal_p_loc.as_args())) == 0, \
"Found modal in DOM before trying to open it, this means our next assertion cannot prove " \
"that the modal is actually open"
self.get_element(self._RATE_HELP_BUTTON).click()
# Wait for the modal to open and be click-able, if either of these fail this test will fail before the assertion
modal_p_element = self.wait_for_element_to_exist(modal_p_loc)
modal_p_element.wait_for_element_to_be_clickable()
# Technically covered by the conditions above, but a good sanity check
assert len(self.driver.find_elements(*modal_p_loc.as_args())) == 1, \
"Interest rate help modal did not open"
if click_x:
# create locator inline because it is very unlikely that any other method in this class will ever need it.
# If that changes, move this to a class deceleration
self.get_element(Locator(By.CSS_SELECTOR, "[class*=CloseButton]")).click()
else:
# click somewhere else to close the modal
self.get_element(self._PAYMENT_TEXT).click()
# Wait until the modal is no longer in the DOM
modal_p_element.wait_for_element_to_be_stale()
# Technically covered by the condition above, but a good sanity check to make sure it's actually gone
assert len(self.driver.find_elements(*modal_p_loc.as_args())) == 0, \
"Found modal after it was supposedly closed"
return self
def click_see_current_rates(self):
"""
Click the see current rates link and navigate to that page
:return: a new MortgageRatesPage as a result of navigation
"""
# Note! opens in a new tab
self.get_element(self._SEE_CURRENT_RATES_LINK).click()
# close the current window
self.driver.close()
# switch to the new tab we opened
self.driver.switch_to.window(self.driver.window_handles[0])
return MortgageRatesPage(self.driver)
def assert_interest_rate_error_message(self, expected_message):
"""
Assert the interest rate error message
:param expected_message: error message that you expect to be showing
:return: self, this page object after any changes
"""
message_p_element = self.wait_for_element_to_exist(self._RATE_ERROR_MESSAGE)
actual_message = message_p_element.get_text()
assert actual_message == expected_message, \
"Expected down payment percent to be [" + expected_message + "] but it was [" + actual_message + "]"
return self
def assert_no_interest_rate_error_message(self):
"""
Assert that the interest rate input does NOT have any error messages
:return: self, this page object after any changes
"""
potential_message_elements = self.get_elements(self._RATE_ERROR_MESSAGE)
assert len(potential_message_elements) == 0, \
"Unexpected error message found: " + potential_message_elements[0].get_text()
return self
| jcahill-ht/Hometap-zillow-test | pages/mortage_calculator_page.py | mortage_calculator_page.py | py | 16,429 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pages.zillow_base_page.ZillowBasePage",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "selenium_util.locator.Locator",
"line_number": 57,
"usage_type": "call"
},
{
"api... |
28054516597 | import feedparser
import datetime
import dateutil.parser
from django.http import HttpResponseRedirect
from django.shortcuts import get_object_or_404, render, redirect
from django.urls import reverse
from django.views import generic
from django_feedparser.settings import *
from .models import Story
from source.models import Sources
from django.utils import timezone
from django.template import loader
from django.db.models.aggregates import Count
from random import randint
def view_stories(request,id):
num_post = 15
# adding stories in database
try:
rss_url = Sources.objects.get(id=id).rss_text
except Sources.DoesNotExist:
alert("Does not Exist")
feed = feedparser.parse(rss_url)
for post in Story.objects.all():
post.delete()
story_urls = Story.objects.filter(id=id).values_list('url', flat=True).distinct()
for entry in feed['entries']:
url = entry.get('link')
if url not in story_urls:
story = Story()
story.title = entry.get('title')
story.url = url
date=entry.get('published')
story.pub_date = dateutil.parser.parse(date)
body = entry.get('description')
from bs4 import BeautifulSoup
soup = BeautifulSoup(body)
body = soup.get_text()
story.body_text = body
story.save()
# passing stories data to template context and render it
context ={
"id":id
}
return render(request, 'story/story_list.html', context)
def newsfeed(request):
num_post = 20
args={}
# adding stories in database
query_params = {}
source_qs = Sources.objects.filter(**query_params).values('rss_text')
for post in Story.objects.all():
post.delete()
for source in source_qs:
feed = feedparser.parse(source.get('rss_text'))
story_urls = Story.objects.all().values_list('url', flat=True).distinct()
for entry in feed['entries']:
url = entry.get('link')
if url not in story_urls:
story = Story()
story.title = entry.get('title')
story.url = url
date=entry.get('published')
story.pub_date = dateutil.parser.parse(date)
story.save()
args['story']=story
return render(request, 'story/newsfeed.html', args) | arpitmandal/newsmonitor | story/views.py | views.py | py | 2,429 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "source.models.Sources.objects.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "source.models.Sources.objects",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "source.models.Sources",
"line_number": 20,
"usage_type": "name"
},... |
19618117170 | import argparse
import logging
import pathlib
import matplotlib.pyplot as plt
# from train import test
plt.style.use("ggplot")
import gc
from pprint import pformat
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
# from functorch import grad, make_functional_with_buffers, vmap
from torch.utils.data import DataLoader, Subset
from torchsummary import summary
from tqdm import trange
from tqdm import tqdm
from utils.common import (
AlexNet,
DatasetwithIndices,
create_config,
get_dataset,
get_dataset_with_indices,
get_logger,
get_model,
get_optimizer,
get_parser,
get_test_dataset,
get_train_dataset,
seed_everything,
)
# def get_mean_gradients(model, loader, use_all_params=False):
# num_params = len(
# list(model.parameters() if use_all_params else model.fc.parameters())
# )
# mean_gradients = [None for i in range(num_params)]
# num_iter = len(loader)
# progress_bar = tqdm(
# loader, total=num_iter, desc="Mean Gradients", leave=False, position=2
# )
# for batch in progress_bar:
# images, labels, _ = batch
# torch.cuda.empty_cache()
# images, labels = images.to(device), labels.to(device)
# output = model(images)
# gradient = torch.autograd.grad(
# F.nll_loss(output, labels),
# model.parameters() if use_all_params else model.fc.parameters(),
# )
# if mean_gradients[0] is not None:
# for j in range(num_params):
# mean_gradients[j] += gradient[j].detach() # .cpu().numpy()
# else:
# for j in range(len(gradient)):
# mean_gradients[j] = gradient[j].detach() # .cpu().numpy()
# for j in range(len(gradient)):
# mean_gradients[j] /= num_iter
# return mean_gradients
# def get_similarities(model, dataset, batch_size, mean_gradients, use_all_params=False):
# slmodel, params, buffers = make_functional_with_buffers(
# model if use_all_params else model.fc
# )
# loader = DataLoader(
# dataset, batch_size, shuffle=True, num_workers=2, pin_memory=True
# )
# def loss_function(params, buffers, x, y):
# x = x.unsqueeze(0)
# y = y.unsqueeze(0)
# preds = slmodel(params, buffers, x)
# return F.nll_loss(preds, y)
# batched_loss = vmap(
# grad(loss_function),
# (None, None, 0, 0),
# )
# similarities = []
# img_indices = []
# progress_bar = tqdm(
# enumerate(loader),
# total=len(loader),
# desc="Per Sample Gradient Similarity",
# leave=False,
# position=2,
# )
# for i, batch in progress_bar:
# imgs, labels, inds = batch
# torch.cuda.empty_cache()
# imgs, labels, inds = imgs.to(device), labels.to(device), inds.numpy()
# with torch.no_grad(): ### TODO: Add if else for if use_all_params
# hidden_state = model.features(imgs)
# gradient = batched_loss(params, buffers, hidden_state, labels)
# gc.collect()
# torch.cuda.empty_cache()
# # gradient = torch.autograd.grad(F.nll_loss(model(imgs), labels), model.parameters())
# sim = (
# torch.stack(
# [
# F.cosine_similarity(a.view(a.shape[0], -1), b.view(1, -1))
# for a, b in zip(gradient, mean_gradients)
# ],
# dim=-1,
# )
# .sum(dim=-1)
# .detach()
# .cpu()
# .numpy()
# )
# # sim = torch.stack(list(map(lambda, gradient, mean_gradients))).sum()
# similarities.append(sim)
# img_indices.append(inds)
# return np.concatenate(similarities), np.concatenate(img_indices)
# def cosinesimilarity(a,b):
# return np.divide(np.dot(a, b.T), (np.linalg.norm(a, axis=-1, keepdims=True) * np.linalg.norm(b, keepdims=True)))
def cosinesimilarity(a, b):
return torch.divide(
torch.mm(a, b.T),
(
torch.linalg.norm(a, dim=-1, keepdims=True)
* torch.linalg.norm(b, keepdims=True)
),
)
def get_sims(gradients):
mean_gradients = gradients.mean(dim=0, keepdims=True)
return cosinesimilarity(gradients, mean_gradients)
def get_mean_gradients(p, model, loader, criterion, optimizer):
num_params = len(list(model.get_last_layer().parameters()))
num_iter = len(loader)
embedding_dim = model.get_last_layer().in_features
sample_num = len(loader.dataset)
gradients = torch.zeros(
[sample_num, p.num_classes * (embedding_dim + 1)],
requires_grad=False,
device="cuda",
)
img_indices = torch.zeros([sample_num], requires_grad=False, device="cuda")
# gradients = np.zeros([sample_num, p.num_classes * (embedding_dim + 1)])
# img_indices = np.zeros(sample_num)
progress_bar = tqdm(
loader, total=num_iter, desc="Mean Gradients", leave=False, position=2
)
model.eval()
model.no_grad = True
with model.embedding_recorder:
for i, batch in enumerate(progress_bar):
optimizer.zero_grad(set_to_none=True)
images, labels, inds = batch
torch.cuda.empty_cache()
images, labels = images.to(device), labels.squeeze().to(device)
output = model(images).requires_grad_(True)
# logger.info((output.shape, labels.shape, inds.shape))
loss = criterion(output, labels).sum()
batch_num = labels.shape[0]
with torch.no_grad():
bias_parameters_grads = torch.autograd.grad(
loss, output, retain_graph=True
)[0].cpu()
weight_parameters_grads = model.embedding_recorder.embedding.cpu().view(
batch_num, 1, embedding_dim
).repeat(1, p.num_classes, 1) * bias_parameters_grads.view(
batch_num, p.num_classes, 1
).repeat(
1, 1, embedding_dim
)
gradients[
i * p.batch_size : min((i + 1) * p.batch_size, sample_num)
] = torch.cat(
(bias_parameters_grads, weight_parameters_grads.flatten(1)), dim=1
)
img_indices[
i * p.batch_size : min((i + 1) * p.batch_size, sample_num)
] = inds.squeeze()
return gradients, img_indices
def train_epoch(
loader: torch.utils.data.DataLoader,
model: nn.Module,
criterion: nn.NLLLoss,
optimizer: torch.optim.Optimizer,
device: torch.device,
) -> None:
"""Trains model for one epoch
Args:
loader (torch.utils.data.DataLoader): Dataloader
model (nn.Module): model
criterion (nn.NLLLoss): Loss Function
optimizer (torch.optim.Optimizer): Optimizer
device (torch.device): device
"""
model.train()
model.no_grad = False
# losses, accs = [], []
for images, labels, _ in loader:
images, labels = images.to(device), labels.to(device)
optimizer.zero_grad(set_to_none=True)
output = model(images)
loss = criterion(output, labels)
loss.backward()
optimizer.step()
# losses.append(loss.item())
# acc = output.argmax(dim=1).eq(labels).float().mean().item()
# accs.append(acc)
model.eval()
# return np.mean(losses), np.mean(accs)
# def train_loop(p, model):
# # train_loader = DataLoader(data, p.batch_size, shuffle=True)
# train_loader = DataLoader(get_train_dataset(p), p.batch_size, shuffle=True, num_workers=2, pin_memory=True)
# test_data = get_test_dataset(p)
# test_loader = DataLoader(test_data, p.batch_size)
# criterion = nn.NLLLoss()
# optimizer = get_optimizer(p, model)
# losses, accs = [], []
# for epoch in trange(p.train_epochs, leave=True):
# model.train()
# loss, acc = train_epoch(
# train_loader, model, criterion, optimizer, device
# )
# losses.append(loss)
# accs.append(acc)
# gc.collect()
# torch.cuda.empty_cache()
# test_correct = test(test_loader, model, device)
# test_acc = test_correct / len(test_data) * 100
# return test_acc
def gradient_mathcing(p, data, logger):
"""Calculated mean gradient for the given dataset and find per sample similarity with mean gradients
Args:
p (EasyDict): Hyperparameters
data (Dataset): Dataset
logger
Returns:
tuple[np.ndarray, np.ndarray]: Arrays of shape (iter, len(dataset)) for similarities calculated for each sample for every iteration and corresponding indices
"""
iterations = p.iter
logger.debug(len(data))
# assert len(data) % p.batch_size == 0, "All batches are not of same shape"
if p.per_class:
logger.info("Finding Mean Gradients for each class individually.")
train_labels = torch.as_tensor(data.targets)
cls_data = [
Subset(data, torch.argwhere(train_labels == c))
for c in range(p.num_classes)
]
logger.debug(f"len datasets: {len(data)}")
else:
logger.info("Finding Mean Gradients for whole dataset at once.")
seed_everything(p.seed)
model = get_model(p, device)
logger.info(
"Model Summary\n"
+ str(summary(model, (p.channel, *p.im_size), verbose=0, device=device))
)
train_loader = DataLoader(
data, p.batch_size, shuffle=True, num_workers=2, pin_memory=True
)
criterion = nn.CrossEntropyLoss().to(device)
optimizer = get_optimizer(p, model)
all_similarities, all_imginds = [], []
progressbar = trange(iterations, desc="Iterations", position=0, leave=True)
for k in progressbar:
# if p.with_train:
# moving to the end of loop
if not p.with_train:
seed_everything(p.seed + k)
model = get_model(p, device)
# slmodel, params, buffers = make_functional_with_buffers(model.fc)
if not p.per_class:
gradients, img_indices = get_mean_gradients(
p, model, train_loader, criterion, optimizer
)
# similarities, img_indices = get_similarities(
# model, data, p.batch_size, mean_gradients
# )
similarities = get_sims(gradients).cpu().numpy()
img_indices = img_indices.cpu().numpy()
elif p.per_class:
similarities, img_indices = [], []
progressbar2 = tqdm(
cls_data, desc="Per CLass Gradient Mathcing", position=1, leave=False
)
for dataset in progressbar2:
loader = DataLoader(
dataset,
p.batch_size,
shuffle=True,
num_workers=2,
pin_memory=True,
# drop_last=True,
)
gradients, cls_all_inds = get_mean_gradients(
p, model, loader, criterion, optimizer
)
# cls_all_sims, cls_all_inds = get_similarities(
# model, dataset, p.batch_size, mean_gradients,
# )
cls_all_sims = get_sims(gradients).cpu().numpy().squeeze()
cls_all_inds = cls_all_inds.cpu().numpy().squeeze()
similarities.append(cls_all_sims)
img_indices.append(cls_all_inds)
try:
similarities, img_indices = np.stack(similarities), np.stack(
img_indices
)
except ValueError:
similarities, img_indices = np.array(
similarities, dtype=object
), np.array(img_indices, dtype=object)
all_similarities.append(similarities)
all_imginds.append(img_indices)
if p.with_train:
train_epoch(train_loader, model, criterion, optimizer, device)
gc.collect()
torch.cuda.empty_cache()
if p.per_class:
logger.info(progressbar2)
logger.info(progressbar)
all_similarities, all_imginds = np.stack(all_similarities), np.stack(all_imginds)
return all_similarities, all_imginds
def main(p, logger):
# p = create_config(args.config, args)
# logger.info("Hyperparameters\n" + pformat(vars(p)))
global device
if torch.cuda.is_available:
device = torch.device("cuda")
else:
device = torch.device("cpu")
logger.warning("Using CPU to run the program.")
seed_everything(p.seed)
# dataset
train_data, _ = get_dataset(p)
train_data = DatasetwithIndices(train_data)
logger.info(f"Dataset\n{str(train_data)}")
logger.info("Hyperparameters\n" + pformat(vars(p)))
# p.num_classes = len(train_data.classes)
# logger.debug(f"Num Classes: {p.num_classes}")
all_similarities, all_imginds = gradient_mathcing(p, train_data, logger)
logger.info(
f"All similarities shape: {all_similarities.shape}, All imgindices shape: {all_imginds.shape}"
)
np.save(
p.output_dir
/ f"all_similarities{'_perclass' if p.per_class else ''}{'_withtrain' if p.with_train else ''}.npy",
all_similarities,
)
np.save(
p.output_dir
/ f"all_imginds{'_perclass' if p.per_class else ''}{'_withtrain' if p.with_train else ''}.npy",
all_imginds,
)
if __name__ == "__main__":
# parser = argparse.ArgumentParser(
# description="Getting gradient similarity for each sample."
# )
# parser.add_argument("--config", help="Location of config file", required=True)
# parser.add_argument("--seed", default=0, help="Seed")
# parser.add_argument("--dataset", default="cifar100", help="Dataset to use")
# parser.add_argument("--dataset_dir", default="./data", help="Dataset directory")
# parser.add_argument("--topn", default=1000, type=int, help="Size of Coreset")
# parser.add_argument(
# "--iter", default=100, type=int, help="Number of iterations for finding coreset"
# )
# parser.add_argument("-bs", "--batch_size", default=1000, help="BatchSize", type=int)
# parser.add_argument(
# "--per_class",
# action="store_true",
# help="Specify whether to find Mean Gradients classwise",
# )
# parser.add_argument(
# "--with_train",
# action="store_true",
# help="No. of epochs to train before finding Gmean",
# )
# parser.add_argument(
# "--temp",
# action="store_true",
# help="Specify whether to use temp folder",
# )
# parser.add_argument(
# "--use_all_params",
# help="Specify if all model parameters' gradients to be used. Defaults: (FC layers only)",
# action="store_true",
# )
# parser.add_argument(
# "--resume", default=None, help="path to checkpoint from where to resume"
# )
parser = get_parser()
args = parser.parse_args()
if args.output_dir is not None:
args.output_dir = pathlib.Path(args.dataset.lower()) / args.output_dir
else:
args.output_dir = pathlib.Path(args.dataset.lower())
if args.pretrained:
args.output_dir = args.output_dir / "pretrained"
if args.temp:
args.output_dir = args.output_dir / f"temp"
args.logdir = args.output_dir / "logs"
args.logdir.mkdir(parents=True, exist_ok=True)
# temporary fix
args.class_balanced = None
args.augment = None
logger = get_logger(args, "gradmatch")
try:
main(args, logger)
except Exception:
logger.exception("A Error Occurred")
| ABD-01/Coreset | src/grad_match.py | grad_match.py | py | 15,845 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 9,
"usage_type": "name"
},
{
"api_name"... |
1804307548 | '''
Loads up each users config and creates the service watchers
'''
import yaml
def load_config(filename):
'''Loads config file,
format is in yaml
and looks like:
services:
- name: openvpn
input: 10
output: 2
- name: samba
input: 10
output: 2
'''
with open(filename) as f:
config = yaml.load(f)
assert 'services' in config, "no services in config"
services = []
for service in config['services']:
assert 'name' in service, 'Missing code for one service '
assert 'input' in service, 'Missing input pin for {}'.format(service['name'])
assert 'output' in service, 'Missing output pin for {}'.format(service['name'])
return config
if __name__ == '__main__':
print(load_config('./sample.yaml'))
| jammers-ach/systemd-gpio | config.py | config.py | py | 830 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "yaml.load",
"line_number": 21,
"usage_type": "call"
}
] |
24177742490 | #!/usr/bin/python3
# -*- coding: utf-8 -*-
import os, sys, re, time, datetime, logging, random, string, logging.handlers, gzip, paramiko
import multiprocessing, subprocess, requests, urllib3, uuid
from threading import Timer
from configparser import ConfigParser
from Crypto.Cipher import AES
from iscpy.iscpy_dns.named_importer_lib import *
import base64, hashlib, zlib, json, lxml.etree, pexpect, dns, dns.resolver
from time import sleep
import threading, binascii, xml.dom.minidom, shutil
from daemon import Daemon
from waj import *
def get_transfer_ip_and_delay(soa):
target = 'serial ' + str(soa)
try:
with open(run_file) as f:
l = f.readlines()
for i in range(len(l)):
if l[i].find(target) > 0:
for v in l[i:]:
if v.find('Transfer completed') > 0:
res = v
return int(1000*float(res.split(', ')[-1].split(' ')[0])) , res.split('#')[0].split(' ')[-1]
except Exception as e:
logger.warning('get transfer ip and delay error:'+str(e))
return 0,'0.0.0.0'
def get_server_from_file():
try:
server = ''
with open(standard_source, 'r') as f:
data = f.read()
named_data = MakeNamedDict(data)
servers = named_data['orphan_zones']['.']['options']['masters']
for ip in servers:
server += ip + ','
with open(exigency_source, 'r') as f:
data = f.read()
named_data = MakeNamedDict(data)
servers = named_data['orphan_zones']['.']['options']['masters']
for ip in servers:
server += ip + ','
return server[:-1]
except Exception as e:
logger.warning('get server from root source file error:'+str(e))
return ''
def get_transfer_ip_and_delay_from_file(soa):
try:
with open(root_source, 'r') as f:
data = f.read()
named_data = MakeNamedDict(data)
servers = named_data['orphan_zones']['.']['options']['masters']
dns_query = dns.message.make_query('.', 'SOA')
for ip in servers:
begin = datetime.datetime.now()
res = dns.query.udp(dns_query, ip, port = 53,timeout = 2)
end = datetime.datetime.now()
for i in res.answer:
for j in i.items:
if j.serial == soa:
return (end - begin).microseconds//1000,ip
except Exception as e:
logger.warning('get transfer ip and delay from swotch_root.zone error:'+str(e))
return 0,'0.0.0.0'
def get_root_file_size():
try:
with open(root_source, 'r') as f:
data = f.read()
named_data = MakeNamedDict(data)
return os.path.getsize(named_data['orphan_zones']['.']['file'])
except Exception as e:
logger.warning('get root_copy file size error:'+str(e))
return 0
def upload_root_run_data(soa):
result = 'get source or size error'
delay,ip = get_transfer_ip_and_delay(soa)
if delay == 0 and ip == '0.0.0.0':
delay,ip = get_transfer_ip_and_delay_from_file(soa)
size = get_root_file_size()
if delay != 0 and ip != '0.0.0.0' and size != 0:
result = 'success'
server = get_server_from_file()
timestamp = time.strftime('%Y-%m-%d %H:%M:%S')
root_soa_str_data = dns_id + '|' + root_copy_room_id + '|' + server_id + '|' + server + '|' + ip\
+ '|' + timestamp + '|' + result + '|' + str(size) + '|' + str(soa) + '|' + str(delay)
file_name = 'zoneOperation_full_' + dns_id + '_0_' + time.strftime('%Y%m%d%H%M%S') + '.gz'
logger.info(root_soa_str_data)
try:
cache_dir = '/var/drms_toggle_data/'
with gzip.open(cache_dir + file_name, "wb") as f:
f.write(bytes(root_soa_str_data, 'utf-8'))
upload_to_ftp(cache_dir,file_name,'15')
except Exception as e:
logger.error('upload root resove data error:'+str(e))
def get_root_copy_soa():
try:
dns_query = dns.message.make_query('.', 'SOA')
res = dns.query.udp(dns_query, '127.0.0.1', port = 53,timeout = 2)
for i in res.answer:
for j in i.items:
return j.serial
except Exception as e:
logger.warning('Server exception get soa error:'+str(e))
return 0
def check_soa_and_upload():
global loop_count
now_soa,root_soa = 0,0
while True:
if server_type == 'root_copy' and loop_count % 60 == 0:
now_soa = get_root_copy_soa()
if now_soa > 0:
if root_soa != now_soa:
root_soa = now_soa
upload_root_run_data(now_soa)
if loop_count >= 900 and int(share_delay.value) == 900:
upload_root_run_data(now_soa)
else:
logger.warning('dns server can not work please check')
if loop_count >= 900:
loop_count = 0
sleep(1)
loop_count += 1
class DrmsToggle(Daemon):
def run(self):
logger.info('main process start at: %s' % time.ctime())
threading._start_new_thread(xgj_main_task,())
threading._start_new_thread(waj_main_task,())
check_soa_and_upload()
logger.info('main process end at: %s' % time.ctime())
if __name__ == '__main__':
drms_toggle = DrmsToggle('/var/drms_toggle_data/drms_toggle.pid')
drms_toggle.start()
| heweiblog/bind_command | src/drms_toggle.py | drms_toggle.py | py | 4,738 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dns.message.make_query",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "dns.message",
"line_number": 65,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "datetim... |
38697991171 | """Define the Autorization Manager."""
from datetime import datetime, timedelta
from typing import Optional
import jwt
from fastapi import BackgroundTasks, Depends, HTTPException, Request, status
from fastapi.security import HTTPAuthorizationCredentials, HTTPBearer
from app.config.settings import get_settings
from app.database.db import get_database
from app.managers.email import EmailManager
from app.models.enums import RoleType
from app.models.user import User
from app.schemas.email import EmailTemplateSchema
from app.schemas.request.auth import TokenRefreshRequest
class ResponseMessages:
"""Error strings for different circumstances."""
CANT_GENERATE_JWT = "Unable to generate the JWT"
CANT_GENERATE_REFRESH = "Unable to generate the Refresh Token"
CANT_GENERATE_VERIFY = "Unable to generate the Verification Token"
INVALID_TOKEN = "That token is Invalid" # nosec
EXPIRED_TOKEN = "That token has Expired" # nosec
VERIFICATION_SUCCESS = "User succesfully Verified"
USER_NOT_FOUND = "User not Found"
ALREADY_VALIDATED = "You are already validated"
VALIDATION_RESENT = "Validation email re-sent"
class AuthManager:
"""Handle the JWT Auth."""
@staticmethod
def encode_token(user):
"""Create and return a JTW token."""
try:
payload = {
"sub": user["id"],
"exp": datetime.utcnow()
+ timedelta(minutes=get_settings().access_token_expire_minutes),
}
return jwt.encode(
payload, get_settings().secret_key, algorithm="HS256"
)
except Exception as exc:
# log the exception
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.CANT_GENERATE_JWT
) from exc
@staticmethod
def encode_refresh_token(user):
"""Create and return a JTW token."""
try:
payload = {
"sub": user["id"],
"exp": datetime.utcnow() + timedelta(minutes=60 * 24 * 30),
"typ": "refresh",
}
return jwt.encode(
payload, get_settings().secret_key, algorithm="HS256"
)
except Exception as exc:
# log the exception
raise HTTPException(
status.HTTP_401_UNAUTHORIZED,
ResponseMessages.CANT_GENERATE_REFRESH,
) from exc
@staticmethod
def encode_verify_token(user):
"""Create and return a JTW token."""
try:
payload = {
"sub": user["id"],
"exp": datetime.utcnow() + timedelta(minutes=10),
"typ": "verify",
}
return jwt.encode(
payload, get_settings().secret_key, algorithm="HS256"
)
except Exception as exc:
# log the exception
raise HTTPException(
status.HTTP_401_UNAUTHORIZED,
ResponseMessages.CANT_GENERATE_VERIFY,
) from exc
@staticmethod
async def refresh(refresh_token: TokenRefreshRequest, database):
"""Refresh an expired JWT token, given a valid Refresh token."""
try:
payload = jwt.decode(
refresh_token.refresh,
get_settings().secret_key,
algorithms=["HS256"],
)
if payload["typ"] != "refresh":
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.INVALID_TOKEN
)
user_data = await database.fetch_one(
User.select().where(User.c.id == payload["sub"])
)
if not user_data:
raise HTTPException(
status.HTTP_404_NOT_FOUND, ResponseMessages.USER_NOT_FOUND
)
# block a banned user
if user_data["banned"]:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.INVALID_TOKEN
)
new_token = AuthManager.encode_token(user_data)
return new_token
except jwt.ExpiredSignatureError as exc:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.EXPIRED_TOKEN
) from exc
except jwt.InvalidTokenError as exc:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.INVALID_TOKEN
) from exc
@staticmethod
async def verify(code: str, database):
"""Verify a new User's Email using the token they were sent."""
try:
payload = jwt.decode(
code,
get_settings().secret_key,
algorithms=["HS256"],
)
user_data = await database.fetch_one(
User.select().where(User.c.id == payload["sub"])
)
if not user_data:
raise HTTPException(
status.HTTP_404_NOT_FOUND, ResponseMessages.USER_NOT_FOUND
)
if payload["typ"] != "verify":
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.INVALID_TOKEN
)
# block a banned user
if user_data["banned"]:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.INVALID_TOKEN
)
if user_data["verified"]:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.INVALID_TOKEN
)
await database.execute(
User.update()
.where(User.c.id == payload["sub"])
.values(
verified=True,
)
)
raise HTTPException(
status.HTTP_200_OK, ResponseMessages.VERIFICATION_SUCCESS
)
except jwt.ExpiredSignatureError as exc:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.EXPIRED_TOKEN
) from exc
except jwt.InvalidTokenError as exc:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.INVALID_TOKEN
) from exc
@staticmethod
async def resend_verify_code(
user: int, background_tasks: BackgroundTasks, database
): # pragma: no cover (code not used at this time)
"""Resend the user a verification email."""
user_data = await database.fetch_one(
User.select().where(User.c.id == user)
)
if not user_data:
raise HTTPException(
status.HTTP_404_NOT_FOUND, ResponseMessages.USER_NOT_FOUND
)
# block a banned user
if user_data["banned"]:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.INVALID_TOKEN
)
if user_data["verified"]:
raise HTTPException(
status.HTTP_400_BAD_REQUEST,
ResponseMessages.ALREADY_VALIDATED,
)
email = EmailManager()
email.template_send(
background_tasks,
EmailTemplateSchema(
recipients=[user_data["email"]],
subject=f"Welcome to {get_settings().api_title}!",
body={
"application": f"{get_settings().api_title}",
"user": user_data["email"],
"base_url": get_settings().base_url,
"verification": AuthManager.encode_verify_token(user_data),
},
template_name="welcome.html",
),
)
# await email.simple_send(
# EmailSchema(
# recipients=[user_data["email"]],
# subject=f"Welcome to {get_settings().api_title}!",
# body="Test Email",
# ),
# )
raise HTTPException(
status.HTTP_200_OK, ResponseMessages.VALIDATION_RESENT
)
class CustomHTTPBearer(HTTPBearer):
"""Our own custom HTTPBearer class."""
async def __call__(
self, request: Request, db=Depends(get_database)
) -> Optional[HTTPAuthorizationCredentials]:
"""Override the default __call__ function."""
res = await super().__call__(request)
try:
payload = jwt.decode(
res.credentials, get_settings().secret_key, algorithms=["HS256"]
)
user_data = await db.fetch_one(
User.select().where(User.c.id == payload["sub"])
)
# block a banned or unverified user
if user_data["banned"] or not user_data["verified"]:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.INVALID_TOKEN
)
request.state.user = user_data
return user_data
except jwt.ExpiredSignatureError as exc:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.EXPIRED_TOKEN
) from exc
except jwt.InvalidTokenError as exc:
raise HTTPException(
status.HTTP_401_UNAUTHORIZED, ResponseMessages.INVALID_TOKEN
) from exc
oauth2_schema = CustomHTTPBearer()
def is_admin(request: Request):
"""Block if user is not an Admin."""
if request.state.user["role"] != RoleType.admin:
raise HTTPException(status.HTTP_403_FORBIDDEN, "Forbidden")
def can_edit_user(request: Request):
"""Check if the user can edit this resource.
True if they own the resource or are Admin
"""
if request.state.user["role"] != RoleType.admin and request.state.user[
"id"
] != int(request.path_params["user_id"]):
raise HTTPException(status.HTTP_403_FORBIDDEN, "Forbidden")
def is_banned(request: Request):
"""Dont let banned users access the route."""
if request.state.user["banned"]:
raise HTTPException(status.HTTP_403_FORBIDDEN, "Banned!")
| seapagan/fastapi-template | app/managers/auth.py | auth.py | py | 10,228 | python | en | code | 45 | github-code | 36 | [
{
"api_name": "datetime.datetime.utcnow",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "datetime.timedelta",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "app.con... |
73339229863 | from pydantic import BaseModel, Field
from faststream import FastStream, Logger
from faststream.kafka import KafkaBroker
class Employee(BaseModel):
name: str = Field(..., examples=["Mickey"], description="name example")
surname: str = Field(..., examples=["Mouse"], description="surname example")
email: str = Field(
..., examples=["mikey.mouse@mail.ai"], description="email example"
)
broker = KafkaBroker("localhost:9092")
app = FastStream(broker)
to_notify_accounting = broker.publisher("notify_accounting")
to_notify_all_employees = broker.publisher("notify_all_employees")
@broker.subscriber("new_employee")
async def on_new_employee(msg: Employee, logger: Logger) -> None:
logger.info(msg)
await to_notify_accounting.publish(
f"Please prepare all the paper work for: {msg.name} {msg.surname}"
)
await to_notify_all_employees.publish(
f"Please welcome our new colleague: {msg.name} {msg.surname}"
)
| airtai/faststream-gen | search/examples/example_new_employee/app.py | app.py | py | 973 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "pydantic.Field",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pydantic.Field",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pydantic.Field",
"l... |
27283985016 | from pypaq.lipytools.files import r_json
from pypaq.lipytools.plots import two_dim_multi
from pypaq.lipytools.moving_average import MovAvg
from typing import List, Dict, Optional
from envy import RESULTS_FP
from run.functions import get_saved_dmks_names
def get_ranks(
all_results: Optional[Dict]= None,
mavg_factor= 0.3,
) -> Dict:
if all_results is None:
all_results = r_json(RESULTS_FP)
all_names = []
for lix in all_results['loops']:
all_names += all_results['loops'][lix]
all_names = set(all_names)
ranks = {dn: [] for dn in all_names}
for lix in all_results['loops']:
names_present = []
for r,dn in enumerate(all_results['loops'][lix]):
names_present.append(dn)
ranks[dn].append(r)
for dn in ranks:
if dn not in names_present:
ranks[dn].append(None)
ranks_smooth = {}
for dn in ranks:
ranks_smooth[dn] = []
ma = MovAvg(mavg_factor)
for v in ranks[dn]:
if v is None:
ma = MovAvg(mavg_factor)
ranks_smooth[dn].append(None)
else:
ranks_smooth[dn].append(ma.upd(v))
"""
len_ranks = max([len(v) for v in ranks.values()])
families = set([all_results[dn]['family'] for dn in dmk_TR])
ranks_fam = {f: [[] for _ in range(len_ranks)] for f in families}
for dn in ranks:
dnf = all_results[dn]['family']
for ix,r in enumerate(ranks[dn]):
ranks_fam[dnf][ix].append(r)
print(families)
print(ranks_fam)
for f in ranks_fam:
for ix in range(len(ranks_fam[f])):
ranks_fam[f][ix] = sum(ranks_fam[f][ix]) / len(ranks_fam[f][ix])
"""
return {
'ranks': ranks,
'ranks_smooth': ranks_smooth,
#'ranks_fam': ranks_fam
}
if __name__ == "__main__":
rd = get_ranks()
print(rd)
"""
two_dim_multi(
ys= list(rd['ranks'].values()),
names= list(rd['ranks'].keys()))
two_dim_multi(
ys= list(rd['ranks_smooth'].values()),
names= list(rd['ranks_smooth'].keys()))
two_dim_multi(
ys= list(rd['ranks_fam'].values()),
names= list(rd['ranks_fam'].keys()))
""" | piteren/pypoks | run/after_run/ranks.py | ranks.py | py | 2,314 | python | en | code | 19 | github-code | 36 | [
{
"api_name": "typing.Optional",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "pypaq.lipytools.files.r_json",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "envy.RESULTS... |
3240003586 | '''
Set Dataset
This file was developed as a project for DACO subject from Bioengeneering Masters at FEUP
It separates the images from a folder into a respective one according to its class
It helps to better analyse and organize the project
'''
import os
import pandas as pd
from torch.utils.data import DataLoader
import torch
from torch.utils.data import Dataset
from torchvision import transforms
import numpy as np
import shutil
TRAIN = "train_features"
TEST = "test_features"
BATCH_SIZE = 32
BASE_CSV_PATH = "features"
MODEL_PATH = "features/model.cpickle"
LE_PATH = "features/le.cpickle"
CLASSES = ['antelope_duiker',
'bird',
'blank',
'civet_genet',
'hog',
'leopard',
'monkey_prosimian',
'rodent']
train_features = pd.read_csv("train_features.csv", index_col="id")
test_features = pd.read_csv("test_features.csv", index_col="id")
train_labels = pd.read_csv("train_labels.csv", index_col="id")
frac = 1
y = train_labels.sample(frac=frac, random_state=1)
x = train_features.loc[y.index].filepath.to_frame()
# This class was given in the benchmark.ipynb provided by the competition. It pre-processes the images and gets data from it
# This will be applyed to organize the images into folders
class ImagesDataset(Dataset):
"""Reads in an image, transforms pixel values, and serves
a dictionary containing the image id, image tensors, and label.
"""
def __init__(self, x_df, y_df=None):
self.data = x_df
self.label = y_df
self.transform = transforms.Compose(
[
transforms.Resize((224, 224)),
transforms.ToTensor(),
transforms.Normalize(
mean=(0.485, 0.456, 0.406), std=(0.229, 0.224, 0.225)
),
]
)
def __getitem__(self, index):
image = self.data.iloc[index]["filepath"]
image_id = self.data.index[index]
if self.label is None:
sample = {"image_id": image_id, "image": image}
else:
label = torch.tensor(self.label.iloc[index].values,
dtype=torch.float)
sample = {"image_id": image_id, "image": image, "label": label}
return sample
def __len__(self):
return len(self.data)
train_dataset = ImagesDataset(x, y)
train_dataloader = DataLoader(train_dataset, batch_size=32)
# Using the information obtained with the provided class (above) we save the images into different class folders
for i in range(len(train_dataset)):
filename = train_dataset[i]['image_id']
label = CLASSES[np.where(train_dataset[i]['label'].numpy() == 1.)[0][0]]
dirPath = "images/train/" + label
if not os.path.exists(dirPath):
os.makedirs(dirPath)
p = dirPath + '/' + filename +'.jpg'
shutil.copy2(train_dataset[i]['image'], p) | mariamiguel01/Project_DACO | Features/setDataset.py | setDataset.py | py | 2,832 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "torch.utils.data.Data... |
10899219070 | import unittest
import torch
import numpy as np
import onnx
from onnx import helper
from onnx.helper import make_tensor_value_info, make_sequence_value_info
from functools import reduce
from interp.interp_utils import AbstractionInitConfig
from interp.interp_operator import Abstraction, Interpreter
from tests.test_abstraction import tf_equal, correct_abstraction
class TestAbstraction(unittest.TestCase):
def abst_shape_check(self, obj: Abstraction):
target_size = [len(x) for x in obj.splits]
self.assertEqual(obj.get_dim(), len(obj.splits))
self.assertEqual(obj.lb.dim(), obj.get_dim())
self.assertEqual(obj.ub.dim(), obj.get_dim())
self.assertEqual(obj.lb.shape, torch.Size(target_size))
self.assertEqual(obj.ub.shape, torch.Size(target_size))
def test_loop_11(self):
# case loop_11 from https://github.com/onnx/onnx/blob/master/docs/Operators.md#Loop
# Given a tensor x of values [x1, ..., xN], and initial tensor y
# sum up its elements using a scan
# returning the final state (y+x1+x2+...+xN) as well the scan_output
# [y+x1, y+x1+x2, ..., y+x1+x2+...+xN]
y_in = onnx.helper.make_tensor_value_info('y_in', onnx.TensorProto.FLOAT, [1])
y_out = onnx.helper.make_tensor_value_info('y_out', onnx.TensorProto.FLOAT, [1])
scan_out = onnx.helper.make_tensor_value_info('scan_out', onnx.TensorProto.FLOAT, [1])
cond_in = onnx.helper.make_tensor_value_info('cond_in', onnx.TensorProto.BOOL, [])
cond_out = onnx.helper.make_tensor_value_info('cond_out', onnx.TensorProto.BOOL, [])
iter_count = onnx.helper.make_tensor_value_info('iter_count', onnx.TensorProto.INT64, [])
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
y = np.array([-2]).astype(np.float32)
x_const_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['x'],
value=onnx.helper.make_tensor(
name='const_tensor_x',
data_type=onnx.TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(float),
)
)
one_const_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['one'],
value=onnx.helper.make_tensor(
name='const_tensor_one',
data_type=onnx.TensorProto.INT64,
dims=(),
vals=[1]
)
)
i_add_node = onnx.helper.make_node(
'Add',
inputs=['iter_count', 'one'],
outputs=['end']
)
start_unsqueeze_node = onnx.helper.make_node(
'Unsqueeze',
inputs=['iter_count'],
outputs=['slice_start'],
axes=[0]
)
end_unsqueeze_node = onnx.helper.make_node(
'Unsqueeze',
inputs=['end'],
outputs=['slice_end'],
axes=[0]
)
slice_node = onnx.helper.make_node(
'Slice',
inputs=['x', 'slice_start', 'slice_end'],
outputs=['slice_out']
)
y_add_node = onnx.helper.make_node(
'Add',
inputs=['y_in', 'slice_out'],
outputs=['y_out']
)
identity_node = onnx.helper.make_node(
'Identity',
inputs=['cond_in'],
outputs=['cond_out']
)
scan_identity_node = onnx.helper.make_node(
'Identity',
inputs=['y_out'],
outputs=['scan_out']
)
loop_body = onnx.helper.make_graph(
[identity_node, x_const_node, one_const_node, i_add_node,
start_unsqueeze_node, end_unsqueeze_node, slice_node, y_add_node,
scan_identity_node],
'loop_body',
[iter_count, cond_in, y_in],
[cond_out, y_out, scan_out]
)
node = onnx.helper.make_node(
'Loop',
inputs=['trip_count', 'cond', 'y'],
outputs=['res_y', 'res_scan'],
body=loop_body
)
trip_count = np.array(5).astype(np.int64)
res_y = np.array([13]).astype(np.float32)
cond = np.array(1).astype(bool)
res_scan = np.array([-1, 1, 4, 8, 13]).astype(np.float32).reshape((5, 1))
# expect(node, inputs=[trip_count, cond, y], outputs=[res_y, res_scan],
# name='test_loop11', opset_imports=[onnx.helper.make_opsetid("", 11)])
interp = Interpreter()
cfg_precise = AbstractionInitConfig(diff=True, from_init=True, stride=1)
abst_trip_count = Abstraction().load(cfg_precise, 'trip_count', trip_count.shape, 'INT', trip_count)
abst_cond = Abstraction().load(cfg_precise, 'cond', cond.shape, 'BOOL', cond)
abst_y = Abstraction().load(cfg_precise, 'y', y.shape, 'INT', y)
outputs, _ = interp.interp_Loop([abst_trip_count, abst_cond, abst_y], node, 'Loop', 'res_y')
self.assertTrue(correct_abstraction(outputs[0], np.array([13]), tight=True))
self.assertTrue(correct_abstraction(outputs[1], np.array([-1,1,4,8,13]).reshape((5,1)), tight=True))
def test_loop_13(self):
# case loop_13 from https://github.com/onnx/onnx/blob/master/docs/Operators.md#Loop
# Given a tensor x of values [x1, ..., xN],
# Return a sequence of tensors of
# [[x1], [x1, x2], ..., [x1, ..., xN]]
seq_in = onnx.helper.make_sequence_value_info('seq_in', onnx.TensorProto.FLOAT, None)
seq_out = onnx.helper.make_sequence_value_info('seq_out', onnx.TensorProto.FLOAT, None)
cond_in = onnx.helper.make_tensor_value_info('cond_in', onnx.TensorProto.BOOL, [])
cond_out = onnx.helper.make_tensor_value_info('cond_out', onnx.TensorProto.BOOL, [])
iter_count = onnx.helper.make_tensor_value_info('iter_count', onnx.TensorProto.INT64, [])
x = np.array([1, 2, 3, 4, 5]).astype(np.float32)
x_const_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['x'],
value=onnx.helper.make_tensor(
name='const_tensor_x',
data_type=onnx.TensorProto.FLOAT,
dims=x.shape,
vals=x.flatten().astype(float),
)
)
one_const_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['one'],
value=onnx.helper.make_tensor(
name='const_tensor_one',
data_type=onnx.TensorProto.INT64,
dims=(),
vals=[1]
)
)
zero_const_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['slice_start'],
value=onnx.helper.make_tensor(
name='const_tensor_zero',
data_type=onnx.TensorProto.INT64,
dims=(1,),
vals=[0]
)
)
axes_node = onnx.helper.make_node(
'Constant',
inputs=[],
outputs=['axes'],
value=onnx.helper.make_tensor(
name='const_tensor_axes',
data_type=onnx.TensorProto.INT64,
dims=(1,),
vals=[0]
)
)
add_node = onnx.helper.make_node(
'Add',
inputs=['iter_count', 'one'],
outputs=['end']
)
# end_unsqueeze_node = onnx.helper.make_node(
# 'Unsqueeze',
# inputs=['end', 'axes'],
# outputs=['slice_end']
# )
slice_node = onnx.helper.make_node(
'Slice',
inputs=['x', 'slice_start', 'end'],
outputs=['slice_out']
)
insert_node = onnx.helper.make_node(
'SequenceInsert',
inputs=['seq_in', 'slice_out'],
outputs=['seq_out']
)
identity_node = onnx.helper.make_node(
'Identity',
inputs=['cond_in'],
outputs=['cond_out']
)
loop_body = onnx.helper.make_graph(
[identity_node, x_const_node, one_const_node, zero_const_node, add_node,
axes_node, slice_node, insert_node],
'loop_body',
[iter_count, cond_in, seq_in],
[cond_out, seq_out]
)
node = onnx.helper.make_node(
'Loop',
inputs=['trip_count', 'cond', 'seq_empty'],
outputs=['seq_res'],
body=loop_body
)
trip_count = np.array(5).astype(np.int64)
seq_empty = [] # type: List[Any]
seq_res = [x[:int(i)] for i in x]
cond = np.array(1).astype(bool)
interp = Interpreter()
cfg_precise = AbstractionInitConfig(diff=True, from_init=True, stride=1)
abst_trip_count = Abstraction().load(cfg_precise, 'trip_count', trip_count.shape, 'INT', trip_count)
abst_seq_empty = Abstraction().load(cfg_precise, 'seq_empty', [1], 'INT', seq_empty)
abst_cond = Abstraction().load(cfg_precise, 'cond', cond.shape, 'BOOL', cond)
outputs, _ = interp.interp_Loop([abst_trip_count, abst_cond, abst_seq_empty], node, 'Loop', 'output')
seq_res = [x[:int(i)] for i in x]
# outputs[0].print()
self.assertTrue(correct_abstraction(outputs[0], seq_res))
if __name__ == '__main__':
unittest.main() | llylly/RANUM | tests/test_abstraction_loop.py | test_abstraction_loop.py | py | 9,508 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "interp.interp_operator.Abstraction",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "torch.Size",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "... |
14940527187 | from __future__ import print_function
import os, sys, tempfile, shutil, tarfile
import log, argdb
from urllib.request import urlretrieve
from urllib import parse as urlparse_local
import subprocess
import socket
from shutil import which # just to break compatibility with python2
# Fix parsing for nonstandard schemes
urlparse_local.uses_netloc.extend(['bk', 'ssh', 'svn'])
class Package:
packages = [] # list of packages processed and added so far
def __init__(self,argdb,log):
self.installable = False # an already installed package can be picked --with-xxx-dir
self.downloadable = False # package can be downloaded and installed with --download-xxx
self.downloadpackage = 0
self.packagetype = '' # can be 'gnu', 'cmake', 'source_c', or empty
self.packagedir = ''
self.packagelibs = ''
self.packageincludes = ''
self.packageurl = ''
self.buildflags = ''
self.log = log
self.supportsscalar = ['real', 'complex']
self.supportssingle = False
self.supports64bint = False
self.fortran = False
self.hasheaders = False
self.requested = False
self.havepackage = False
def RunCommand(self,instr):
try:
self.log.write('- '*35+'\nRunning command:\n'+instr+'\n'+'- '*35)
except AttributeError: pass
try:
output = subprocess.check_output(instr,shell=True,stderr=subprocess.STDOUT)
result = 0
except subprocess.CalledProcessError as ex:
output = ex.output
result = ex.returncode
output = output.decode(encoding='UTF-8',errors='replace').rstrip()
try:
self.log.write('Output:\n'+output+'\n'+'- '*35)
except AttributeError: pass
return (result,output)
def ProcessArgs(self,argdb,petscpackages=''):
if hasattr(self,'petscdepend') and self.petscdepend in petscpackages:
self.requested = True
if self.installable and not hasattr(self,'petscdepend'):
string,found = argdb.PopPath('with-'+self.packagename+'-dir',exist=True)
if found:
self.requested = True
self.packagedir = string
string,found = argdb.PopString('with-'+self.packagename+'-lib')
if found:
if self.packagedir:
self.log.Exit('Specify either "--with-'+self.packagename+'-dir" or "--with-'+self.packagename+'-lib%s", but not both!' % (' --with-'+self.packagename+'-include' if self.hasheaders else ''))
self.requested = True
self.packagelibs = string
if self.hasheaders:
string,found = argdb.PopString('with-'+self.packagename+'-include')
if found:
self.requested = True
self.packageincludes = string
if self.installable:
value,found = argdb.PopBool('with-'+self.packagename)
if found:
self.requested = value
if self.downloadable:
flagsfound = False
if self.packagetype == 'gnu':
string,flagsfound = argdb.PopString('download-'+self.packagename+'-configure-arguments')
elif self.packagetype == 'cmake':
string,flagsfound = argdb.PopString('download-'+self.packagename+'-cmake-arguments')
elif self.packagetype == 'source_c':
string,flagsfound = argdb.PopString('download-'+self.packagename+'-cflags')
url,flag,found = argdb.PopUrl('download-'+self.packagename)
if found:
if self.requested:
self.log.Exit('Cannot request both download and install simultaneously')
self.requested = True
self.download = True
self.packageurl = url
self.downloadpackage = flag
if flagsfound:
if not hasattr(self,'download') or not self.download:
if self.packagetype == 'gnu':
self.log.Exit('--download-'+self.packagename+'-configure-arguments must be used together with --download-'+self.packagename)
elif self.packagetype == 'cmake':
self.log.Exit('--download-'+self.packagename+'-cmake-arguments must be used together with --download-'+self.packagename)
elif self.packagetype == 'source_c':
self.log.Exit('--download-'+self.packagename+'-cflags must be used together with --download-'+self.packagename)
self.buildflags = string
def Process(self,slepcconf,slepcvars,slepcrules,slepc,petsc,archdir=''):
self.make = petsc.make
if petsc.buildsharedlib:
self.slflag = petsc.cc_linker_slflag
if self.requested:
name = self.packagename.upper()
if self.downloadpackage:
if hasattr(self,'version') and self.packageurl=='':
self.log.NewSection('Installing '+name+' version '+self.version+'...')
else:
self.log.NewSection('Installing '+name+'...')
self.Precondition(slepc,petsc)
self.DownloadAndInstall(slepcconf,slepcvars,slepc,petsc,archdir,slepc.prefixdir)
elif self.installable:
self.log.NewSection('Checking '+name+'...')
self.Precondition(slepc,petsc)
if petsc.buildsharedlib:
self.packagelibs = self.DistilLibList(self.packagelibs,petsc)
self.Check(slepcconf,slepcvars,petsc,archdir)
if not self.havepackage: self.log.setLastFailed()
try:
self.LoadVersion(slepcconf)
self.log.write('Version number for '+name+' is '+self.iversion)
except AttributeError:
pass
if self.havepackage: Package.packages.append(self)
else: # not requested
if hasattr(self,'SkipInstall'):
self.SkipInstall(slepcrules)
def Precondition(self,slepc,petsc):
package = self.packagename.upper()
if petsc.scalar == 'complex':
if 'complex' not in self.supportsscalar:
self.log.Exit(package+' does not support complex scalars')
elif petsc.scalar == 'real':
if 'real' not in self.supportsscalar:
self.log.Exit(package+' is supported only with complex scalars')
if petsc.precision == 'single':
if not self.supportssingle:
self.log.Exit(package+' is supported only in double precision')
elif petsc.precision != 'double':
self.log.Exit('Precision '+petsc.precision+' is not supported for external packages')
if petsc.ind64 and not self.supports64bint:
self.log.Exit(package+' cannot be used with 64-bit integers')
if self.downloadpackage and self.fortran and not hasattr(petsc,'fc'):
self.log.Exit('Option --download-'+self.packagename+' requires a Fortran compiler')
def Require(self,packagename):
for p in Package.packages:
if p.packagename.upper() == packagename.upper():
return p
self.log.Exit('The package '+self.packagename.upper()+' requires configuring also with '+packagename.upper()+". Run configure --help for details on how to install it")
def DistilLibList(self,packagelibs,petsc):
libs = []
for l in packagelibs.split():
if l.endswith(petsc.sl_linker_suffix):
filename = os.path.basename(l)
libs.append('-L'+l.rstrip(filename))
libs.append(self.slflag+l.rstrip(filename))
libs.append('-l'+filename.lstrip('lib').rstrip('.'+petsc.sl_linker_suffix))
else:
libs.append(l)
newldflags = []
newlibs = []
dupflags = ['-L',self.slflag]
for j in libs:
# remove duplicate -L, -Wl,-rpath options - and only consecutive -l options
if j in newldflags and any([j.startswith(flg) for flg in dupflags]): continue
if newlibs and j == newlibs[-1]: continue
if list(filter(j.startswith,['-l'])) or list(filter(j.endswith,['.lib','.a','.so','.o'])) or j in ['-Wl,-Bstatic','-Wl,-Bdynamic','-Wl,--start-group','-Wl,--end-group']:
newlibs.append(j)
else:
newldflags.append(j)
liblist = ' '.join(newldflags + newlibs)
return liblist
def GetArchiveName(self):
'''Return name of archive after downloading'''
if self.packageurl=='':
archivename = self.archive
else:
parsed = urlparse_local.urlparse(self.packageurl)
archivename = os.path.basename(parsed[2])
if archivename[0] == 'v':
archivename = archivename[1:]
try:
if archivename[0].isdigit() or int(archivename.split('.')[0],16):
archivename = self.packagename+'-'+archivename
except: pass
return archivename
def GetDirectoryName(self):
'''Return name of the directory after extracting the tarball'''
dirname = self.GetArchiveName()
for suffix in ('.tar.gz','.tgz'):
if dirname.endswith(suffix):
dirname = dirname[:-len(suffix)]
return dirname
def MissingTarball(self,downloaddir):
'''Check if tarball is missing in downloaddir'''
if self.downloadable and hasattr(self,'download') and self.download:
localFile = os.path.join(downloaddir,self.GetArchiveName())
if not os.path.exists(localFile):
url = self.packageurl
if url=='':
url = self.url
return self.packagename+': '+url+' --> '+localFile
def Download(self,externdir,downloaddir):
# Quick return: check if source is already available
if os.path.exists(os.path.join(externdir,self.GetDirectoryName())):
self.log.write('Using '+os.path.join(externdir,self.GetDirectoryName()))
return os.path.join(externdir,self.GetDirectoryName())
if downloaddir:
# Get tarball from download dir
localFile = os.path.join(downloaddir,self.GetArchiveName())
if not os.path.exists(localFile):
self.log.Exit('Could not find file '+self.GetArchiveName()+' under '+downloaddir)
url = localFile
filename = os.path.basename(url)
else:
# Download tarball
url = self.packageurl
if url=='':
url = self.url
if os.path.exists(url):
url = 'file:'+url
filename = os.path.basename(urlparse_local.urlparse(url)[2])
localFile = os.path.join(externdir,self.GetArchiveName())
self.log.write('Downloading '+url+' to '+localFile)
if os.path.exists(localFile):
os.remove(localFile)
try:
sav_timeout = socket.getdefaulttimeout()
socket.setdefaulttimeout(30)
urlretrieve(url, localFile)
socket.setdefaulttimeout(sav_timeout)
except Exception as e:
socket.setdefaulttimeout(sav_timeout)
failureMessage = '''\
Unable to download package %s from: %s
* If URL specified manually - perhaps there is a typo?
* If your network is disconnected - please reconnect and rerun ./configure
* Or perhaps you have a firewall blocking the download
* You can run with --with-packages-download-dir=/adirectory and ./configure will instruct you what packages to download manually
* or you can download the above URL manually, to /yourselectedlocation/%s
and use the configure option:
--download-%s=/yourselectedlocation/%s
''' % (self.packagename.upper(), url, filename, self.packagename, filename)
self.log.Exit(failureMessage)
# Uncompress tarball
extractdir = os.path.join(externdir,self.GetDirectoryName())
self.log.write('Uncompressing '+localFile+' to directory '+extractdir)
if os.path.exists(extractdir):
for root, dirs, files in os.walk(extractdir, topdown=False):
for name in files:
os.remove(os.path.join(root,name))
for name in dirs:
os.rmdir(os.path.join(root,name))
failureMessage = '''\
Downloaded package %s from: %s is not a tarball.
[or installed python cannot process compressed files]
* If you are behind a firewall - please fix your proxy and rerun ./configure
For example at LANL you may need to set the environmental variable http_proxy (or HTTP_PROXY?) to http://proxyout.lanl.gov
* You can run with --with-packages-download-dir=/adirectory and ./configure will instruct you what packages to download manually
* or you can download the above URL manually, to /yourselectedlocation/%s
and use the configure option:
--download-%s=/yourselectedlocation/%s
''' % (self.packagename.upper(), url, filename, self.packagename, filename)
try:
tf = tarfile.open(localFile)
except tarfile.ReadError as e:
self.log.Exit(str(e)+'\n'+failureMessage)
else:
if not tf: self.log.Exit(failureMessage)
with tf:
#git puts 'pax_global_header' as the first entry and some tar utils process this as a file
firstname = tf.getnames()[0]
if firstname == 'pax_global_header':
firstmember = tf.getmembers()[1]
else:
firstmember = tf.getmembers()[0]
# some tarfiles list packagename/ but some list packagename/filename in the first entry
if firstmember.isdir():
dirname = firstmember.name
else:
dirname = os.path.dirname(firstmember.name)
tf.extractall(path=externdir)
# fix file permissions for the untared tarballs
try:
# check if 'dirname' is set'
if dirname:
(result,output) = self.RunCommand('cd '+externdir+'; chmod -R a+r '+dirname+'; find '+dirname+r' -type d -name "*" -exec chmod a+rx {} \;')
if dirname != self.GetDirectoryName():
self.log.write('The directory name '+dirname+' is different from the expected one, renaming to '+self.GetDirectoryName())
os.rename(os.path.join(externdir,dirname),os.path.join(externdir,self.GetDirectoryName()))
dirname = self.GetDirectoryName()
else:
self.log.Warn('Could not determine dirname extracted by '+localFile+' to fix file permissions')
except RuntimeError as e:
self.log.Exit('Error changing permissions for '+dirname+' obtained from '+localFile+ ' : '+str(e))
if not downloaddir:
os.remove(localFile)
return os.path.join(externdir,dirname)
wd = 36
def ShowHelp(self):
wd = Package.wd
if self.downloadable or self.installable:
print(self.packagename.upper()+':')
if self.downloadable:
print((' --download-'+self.packagename+'[=<fname>]').ljust(wd)+': Download and install '+self.packagename.upper())
if self.packagetype == 'gnu':
print((' --download-'+self.packagename+'-configure-arguments=<flags>').ljust(wd)+': Indicate extra flags to configure '+self.packagename.upper())
elif self.packagetype == 'cmake':
print((' --download-'+self.packagename+'-cmake-arguments=<flags>').ljust(wd)+': Indicate extra flags to build '+self.packagename.upper()+' with CMake')
elif self.packagetype == 'source_c':
print((' --download-'+self.packagename+'-cflags=<flags>').ljust(wd)+': Indicate extra flags to compile '+self.packagename.upper())
if self.installable:
print((' --with-'+self.packagename+'=<bool>').ljust(wd)+': Test for '+self.packagename.upper()+(' (requires PETSc with %s)'%self.petscdepend.upper() if hasattr(self,'petscdepend') else ''))
if self.installable and not hasattr(self,'petscdepend'):
print((' --with-'+self.packagename+'-dir=<dir>').ljust(wd)+': Indicate the root directory of the '+self.packagename.upper()+' installation')
print((' --with-'+self.packagename+'-lib=<libraries>').ljust(wd)+': Indicate quoted list of libraries and link flags for '+self.packagename.upper())
if self.hasheaders:
print((' --with-'+self.packagename+'-include=<dirs>').ljust(wd)+': Indicate the directory of the '+self.packagename.upper()+' include files')
def ShowInfo(self):
if self.havepackage:
if hasattr(self,'version') and self.downloadpackage and self.packageurl=='':
packagename = self.packagename.upper()+' version '+self.version
else:
packagename = self.packagename.upper()
if hasattr(self,'petscdepend'):
self.log.Println(packagename+' from %s linked by PETSc' % self.petscdepend.upper())
elif hasattr(self,'packageflags'):
self.log.Println(packagename+' library flags:')
self.log.Println(' '+self.packageflags)
else:
self.log.Println(packagename+' installed')
def Link(self,functions,callbacks,flags,givencode='',cflags='',clanguage='c',logdump=True):
# Create temporary directory and makefile
try:
tmpdir = tempfile.mkdtemp(prefix='slepc-')
if not os.path.isdir(tmpdir): os.mkdir(tmpdir)
except:
self.log.Exit('Cannot create temporary directory')
try:
with open(os.path.join(tmpdir,'makefile'),'w') as makefile:
makefile.write('checklink: checklink.o\n')
makefile.write('\t${CLINKER} -o checklink checklink.o ${LINKFLAGS} ${PETSC_SNES_LIB}\n')
makefile.write('\t@${RM} -f checklink checklink.o\n')
makefile.write('include '+os.path.join('${PETSC_DIR}','lib','petsc','conf','variables')+'\n')
makefile.write('include '+os.path.join('${PETSC_DIR}','lib','petsc','conf','rules')+'\n')
if cflags:
if clanguage=='c++': makefile.write('CXXFLAGS='+cflags+'\n')
else: makefile.write('CFLAGS='+cflags+'\n')
except:
self.log.Exit('Cannot create makefile in temporary directory')
# Create source file
if givencode == '':
code = '#include "petscsnes.h"\n'
for f in functions:
code += 'PETSC_EXTERN int\n' + f + '();\n'
for c in callbacks:
code += 'int '+ c + '() { return 0; } \n'
code += 'int main() {\n'
code += 'PetscErrorCode ierr; Vec v; Mat m; KSP k;\n'
code += 'ierr = PetscInitializeNoArguments();\n'
code += 'ierr = VecCreate(PETSC_COMM_WORLD,&v);\n'
code += 'ierr = MatCreate(PETSC_COMM_WORLD,&m);\n'
code += 'ierr = KSPCreate(PETSC_COMM_WORLD,&k);\n'
code += '(void)ierr;\n'
for f in functions:
code += f + '();\n'
code += 'return 0;\n}\n'
else:
code = givencode
with open(os.path.join(tmpdir,'checklink.cxx' if clanguage=='c++' else 'checklink.c'),'w') as cfile:
cfile.write(code)
if logdump:
try:
self.log.write('- '*35+'\nChecking link with code:\n')
self.log.write(code)
except AttributeError: pass
# Try to compile test program
(result, output) = self.RunCommand('cd ' + tmpdir + ';' + self.make + ' checklink LINKFLAGS="'+flags+'"')
shutil.rmtree(tmpdir)
if result:
return (0,code + output)
else:
return (1,code + output)
def FortranLink(self,functions,callbacks,flags):
f = []
for i in functions:
f.append(i+'_')
c = []
for i in callbacks:
c.append(i+'_')
(result, output1) = self.Link(f,c,flags,logdump=False)
output1 = '\n====== With underscore Fortran names\n' + output1
if result: return ('UNDERSCORE',output1)
f = []
for i in functions:
f.append(i.upper())
c = []
for i in callbacks:
c.append(i.upper())
(result, output2) = self.Link(f,c,flags,logdump=False)
output2 = '\n====== With capital Fortran names\n' + output2
if result: return ('CAPS',output2)
output = '\n=== With linker flags: '+flags
return ('',output + output1 + output2)
def GenerateGuesses(self,name,archdir,word='lib'):
installdirs = [os.path.join(os.path.sep,'usr','local'),os.path.join(os.path.sep,'opt')]
if 'HOME' in os.environ:
installdirs.insert(0,os.environ['HOME'])
dirs = []
for i in installdirs:
dirs = dirs + [os.path.join(i,word)]
for d in [name,name.upper(),name.lower()]:
dirs = dirs + [os.path.join(i,d)]
dirs = dirs + [os.path.join(i,d,word)]
dirs = dirs + [os.path.join(i,word,d)]
for d in dirs[:]:
if not os.path.exists(d):
dirs.remove(d)
dirs = [''] + dirs + [os.path.join(archdir,word)]
return dirs
def FortranLib(self,slepcconf,slepcvars,dirs,libs,functions,callbacks = []):
name = self.packagename.upper()
mangling = ''
if isinstance(libs, str): # user-provided string with link options
flags = libs
(mangling, output) = self.FortranLink(functions,callbacks,flags)
error = output
else:
error = ''
flags = ''
for d in dirs:
for l in libs:
if d:
if hasattr(self,'slflag'):
flags = ' '.join([self.slflag + d] + ['-L' + d] + l)
else:
flags = ' '.join(['-L' + d] + l)
else:
flags = ' '.join(l)
(mangling, output) = self.FortranLink(functions,callbacks,flags)
error += output
if mangling: break
if mangling: break
if mangling:
self.log.write(output)
else:
self.log.write(error)
self.log.Exit('Unable to link with '+name+' library in directories '+' '.join(dirs)+' with libraries and link flags '+flags)
slepcconf.write('#define SLEPC_HAVE_' + name + ' 1\n#define SLEPC_' + name + '_HAVE_'+mangling+' 1\n')
slepcvars.write(name + '_LIB = '+flags+'\n')
self.libflags = flags
self.havepackage = True
self.packageflags = flags
def WriteMakefile(self,fname,builddir,cont):
self.log.write('Using makefile:\n')
self.log.write(cont)
with open(os.path.join(builddir,fname),'w') as mfile:
mfile.write(cont)
def DefaultIncludePath(self,petsc,file):
(result,output) = self.RunCommand('echo | '+petsc.cpp+' -Wp,-v -')
if not result:
import re
dirs = re.findall('^ .*',output,re.MULTILINE)
for s in dirs:
d = s[1:]
if os.path.isfile(os.path.join(d,file)):
self.log.write('Found '+os.path.join(d,file))
return d
return '/usr/include'
| firedrakeproject/slepc | config/package.py | package.py | py | 21,339 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "urllib.parse.uses_netloc.extend",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "urllib.parse.uses_netloc",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "urllib.parse",
"line_number": 11,
"usage_type": "name"
},
{
"api_na... |
33096692946 | import torch
import torch.nn as nn
from torchvision import models
from torch.autograd import Variable
from models.head import ClassBlock
class Resnet50_ft(nn.Module):
def __init__(self, class_num=751, droprate=0.5, stride=2, circle=False, ibn=False):
super(Resnet50_ft, self).__init__()
model_ft = models.resnet50(pretrained=True)
if ibn == True:
model_ft = torch.hub.load('XingangPan/IBN-Net', 'resnet50_ibn_a', pretrained=True)
# avg pooling to global pooling
if stride == 1:
model_ft.layer4[0].downsample[0].stride = (1,1)
model_ft.layer4[0].conv2.stride = (1,1)
model_ft.avgpool = nn.AdaptiveAvgPool2d((1,1))
self.model = model_ft
self.circle = circle
self.classifier = ClassBlock(2048, class_num, droprate, return_f = circle)
def forward(self, x):
x = self.model.conv1(x)
x = self.model.bn1(x)
x = self.model.relu(x)
x = self.model.maxpool(x)
x = self.model.layer1(x)
x = self.model.layer2(x)
x = self.model.layer3(x)
x = self.model.layer4(x)
x = self.model.avgpool(x)
x = x.view(x.size(0), x.size(1))
x = self.classifier(x)
return x
if __name__ == '__main__':
net = Resnet50_ft(751, stride=1, ibn=True)
# net.classifier = nn.Sequential()
# print(net)
input = Variable(torch.FloatTensor(8, 3, 224, 224))
print(input.shape)
output = net(input)
print(output.shape)
| SHT-Club4/ReID-PyTorch | models/backbone.py | backbone.py | py | 1,513 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torchvision.models.resnet50",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torchvision.m... |
4023568474 | import pandas as pd
import geopandas
from shapely import wkt
'''
Reader function that decompresses a csv file containing trajectory data for toy object needed in
homework 2 assignment for Spatial Databases.
Parameters:
path = path to csv compressed file
column_names = names of columns for dataframe once created, format is a list with 3 strings
Returns:
a dataframe
'''
def read_toy_dataset(path, column_names):
# Read and save as dataframe after decompressing csv file, no header available,
# adding a tab as a separator between each val found and using the column names given
traj_df = pd.read_csv(filepath_or_buffer=path, compression='gzip', header=None, sep='\t', names=column_names)
# Column ts converted to datetime format, i.e. 1353775800000 => 2012-11-24 16:50:00
traj_df['ts'] = pd.to_datetime(traj_df['ts'], unit='ms')
# Convert into a geometry
traj_df['geom'] = traj_df['geom'].apply(wkt.loads)
traj_df = geopandas.GeoDataFrame(traj_df, geometry='geom')
# Return geodataframe
return traj_df
# Set up file path and column values, then call the function to create geo-df
path = './toy_traj.csv.gz'
columns = ['id', 'ts', 'geom']
tdf = read_toy_dataset(path, columns)
# print(tdf.info()) | Sedwards8900/gridmapped_interval_tree | Util.py | Util.py | py | 1,257 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "shapely.wkt.loads",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "shapely.wkt... |
30335762959 | import agate
from agatecharts.charts.base import Chart
from agatecharts.colors import Qualitative
class Lines(Chart):
def __init__(self, x_column_name, y_column_names):
self._x_column_name = x_column_name
if isinstance(y_column_names, str):
y_column_names = [y_column_names]
self._y_column_names = y_column_names
def show_legend(self):
return len(self._y_column_names) > 1
def get_x_domain(self, table):
if not isinstance(table.columns[self._x_column_name].data_type, agate.Number):
return (None, None)
x_min = min(table.columns[self._x_column_name])
x_max = max(table.columns[self._x_column_name])
return (x_min, x_max)
def get_y_domain(self, table):
y_min = min([min(table.columns[name]) for name in self._y_column_names])
y_max = max([max(table.columns[name]) for name in self._y_column_names])
return (y_min, y_max)
def plot(self, table, axes):
colors = Qualitative()
legend_lines = []
x_column = table.columns[self._x_column_name]
if not isinstance(x_column.data_type, agate.Number) and \
not isinstance(x_column.data_type, agate.Date) and \
not isinstance(x_column.data_type, agate.DateTime):
raise ValueError('Only Number, Date and DateTime data are supported for line chart X-axis.')
for y_column_name in self._y_column_names:
y_column = table.columns[y_column_name]
if not isinstance(y_column.data_type, agate.Number):
raise ValueError('Only Number data is supported for line chart Y-axis.')
plot_lines = axes.plot(
x_column,
y_column,
linewidth=2,
color=next(colors),
label=y_column_name
)
legend_lines.append(plot_lines[0])
axes.set_xlabel(self._x_column_name)
if len(self._y_column_names) == 1:
axes.set_ylabel(self._y_column_names[0])
return (legend_lines, self._y_column_names)
| wireservice/agate-charts | agatecharts/charts/lines.py | lines.py | py | 2,121 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "agatecharts.charts.base.Chart",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "agate.Number",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "agatecharts.colors.Qualitative",
"line_number": 35,
"usage_type": "call"
},
{
"api... |
74330590505 | # -*- coding: utf-8 -*-
__author__ = "Amir Arfan, Sebastian Becker"
__email__ = "amar@nmbu.no"
from biosim.map import Map
from biosim.cell import Mountain, Ocean, Savannah, Jungle, Desert
from biosim.animals import Herbivore, Carnivore
import pytest
import textwrap
@pytest.fixture
def standard_map():
"""
Creates a standard map fixture which can be used for tests
Returns
-------
sgegor = str
Standard map taken from 'check_sim.py'
"""
sgeogr = """\
OOOOOOOOOOOOOOOOOOOOO
OOOOOOOOSMMMMJJJJJJJO
OSSSSSJJJJMMJJJJJJJOO
OSSSSSSSSSMMJJJJJJOOO
OSSSSSJJJJJJJJJJJJOOO
OSSSSSJJJDDJJJSJJJOOO
OSSJJJJJDDDJJJSSSSOOO
OOSSSSJJJDDJJJSOOOOOO
OSSSJJJJJDDJJJJJJJOOO
OSSSSJJJJDDJJJJOOOOOO
OOSSSSJJJJJJJJOOOOOOO
OOOSSSSJJJJJJJOOOOOOO
OOOOOOOOOOOOOOOOOOOOO"""
sgeogr = textwrap.dedent(sgeogr)
return sgeogr
@pytest.fixture
def populated_island(standard_map):
ini_herbs = [
{
"loc": (5, 5),
"pop": [
{"species": "Herbivore", "age": 5, "weight": 20}
for _ in range(150)
],
}
]
ini_carns = [
{
"loc": (5, 5),
"pop": [
{"species": "Carnivore", "age": 5, "weight": 20}
for _ in range(150)
],
}
]
island_map = Map(standard_map)
island_map.add_animals(ini_herbs)
island_map.add_animals(ini_carns)
return island_map
def test_constructor_map(standard_map):
"""
Tests the constructor in Map
Parameters
----------
standard_map: str
String based map from fixture
"""
test_map = Map(standard_map)
assert isinstance(test_map, Map)
def test_uneven_map():
test_map = "OOO\nODSDO\nOOO"
with pytest.raises(ValueError):
Map(test_map)
def test_non_allowed_cell_type():
test_map = "OOO\nOKO\nOOO"
with pytest.raises(ValueError):
Map(test_map)
def test_non_island_map():
test_map = "DDD\nOOO\nDDD"
with pytest.raises(ValueError):
Map(test_map)
def test_get_neighbours(standard_map):
island = Map(standard_map)
neighbours = island.get_neighbour((2, 2))
assert len(list(neighbours)) == 4
# Should only get two values at the edge
neighbours_edge = island.get_neighbour((0, 0))
assert len(list(neighbours_edge)) == 2
# Testing non existing indexes
non_exist_neighbours = island.get_neighbour((30, 30))
assert len(list(non_exist_neighbours)) == 0
# Testing negative indexes
negative_neighbours = island.get_neighbour((2, -10))
assert len(list(negative_neighbours)) == 0
def test_add_animals_map(standard_map):
ini_herbs = [
{
"loc": (5, 5),
"pop": [
{"species": "Herbivore", "age": 5, "weight": 20}
for _ in range(150)
],
}
]
ini_carns = [
{
"loc": (5, 5),
"pop": [
{"species": "Carnivore", "age": 5, "weight": 20}
for _ in range(150)
],
}
]
island = Map(standard_map)
island.add_animals(ini_carns)
assert island.num_animals_on_map() == 150
island.add_animals(ini_herbs)
assert island.num_animals_on_map() == 300
def test_add_animals_on_ocean_loc(standard_map):
ini_herbs = [
{
"loc": (0, 0),
"pop": [
{"species": "Herbivore", "age": 10, "weight": 20}
for _ in range(10)
],
}
]
island = Map(standard_map)
with pytest.raises(ValueError):
island.add_animals(ini_herbs)
def test_add_animals_on_no_loc(standard_map):
ini_herbs = [
{
"pop": [
{"species": "Herbivore", "age": 10, "weight": 20}
for _ in range(10)
]
}
]
ini_carns = [
{
"loc": None,
"pop": [
{"species": "Carnivore", "age": 5, "weight": 20}
for _ in range(150)
],
}
]
island = Map(standard_map)
island.add_animals(ini_herbs)
assert island.num_animals_on_map() == 0
with pytest.raises(ValueError):
island.add_animals(ini_carns)
def test_move_all_animals(populated_island):
island = populated_island
curr_cell = island.map[(5, 5)]
prev_val = curr_cell.num_animals_per_cell()
island.move_all_animals()
new_val = curr_cell.num_animals_per_cell()
assert prev_val > new_val
def test_all_animals_eat(populated_island):
island = populated_island
curr_cell = island.map[(5, 5)]
prev_amount_herbs, prev_amount_carns = curr_cell.num_species_per_cell()
island.all_animals_eat()
# If the carnivores eat, there should be a reduction in herbivore pop.
new_amount_herbs, new_amount_carns = curr_cell.num_species_per_cell()
assert new_amount_herbs < prev_amount_herbs
def test_mate_all_animals(standard_map, mocker):
mocker.patch("numpy.random.choice", return_value=True)
ini_carns = [
{
"loc": (5, 5),
"pop": [
{"species": "Carnivore", "age": 5, "weight": 50}
for _ in range(150)
],
}
]
island = Map(standard_map)
island.add_animals(ini_carns)
prev_val = island.num_animals_on_map()
island.mate_all_animals()
new_val = island.num_animals_on_map()
assert prev_val < new_val
def test_age_all_animal(populated_island):
island = populated_island
curr_cell = island.map[(5, 5)]
prev_age_sum = sum(
[
anim.age
for anim_list in curr_cell.animal_classes.values()
for anim in anim_list
]
)
island.age_all_animals()
new_age_sum = sum(
[
anim.age
for anim_list in curr_cell.animal_classes.values()
for anim in anim_list
]
)
assert prev_age_sum < new_age_sum
def test_annual_weight_loss(populated_island):
island = populated_island
curr_cell = island.map[(5, 5)]
prev_weight_sum = sum(
[
anim.weight
for anim_list in curr_cell.animal_classes.values()
for anim in anim_list
]
)
island.annual_weight_loss_all_animals()
new_weight_sum = sum(
[
anim.weight
for anim_list in curr_cell.animal_classes.values()
for anim in anim_list
]
)
assert new_weight_sum < prev_weight_sum
def test_annual_weight(populated_island, mocker):
mocker.patch("numpy.random.choice", return_value=True)
island = populated_island
prev_val = island.num_animals_on_map()
island.annual_death_all_animals()
new_val = island.num_animals_on_map()
assert prev_val > new_val
def test_num_animals_per_species(populated_island):
island = populated_island
tot_herbs, tot_carns = island.num_species_on_map()
assert tot_herbs == 150
assert tot_carns == 150
def test_update_params_animals(populated_island):
island = populated_island
island.update_animal_params_all_cells("Herbivore", {"F": 15})
assert Herbivore.param["F"] == 15
island.update_animal_params_all_cells("Carnivore", {"DeltaPhiMax": 5})
assert Carnivore.param["DeltaPhiMax"] == 5
def test_update_params_cell(populated_island):
island = populated_island
island.update_param_all_cells("J", {"f_max": 500})
assert Jungle.param["f_max"] == 500
def test_cycle_runs(populated_island):
island = populated_island
island.cycle()
| amirarfan/BioSim_G03_Amir_Sebastian | tests/test_map.py | test_map.py | py | 7,808 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "textwrap.dedent",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "biosim.map.Map",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
... |
9377241284 | import sys
import pandas as pd
import numpy as np
import sklearn
import matplotlib
import keras
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
import seaborn as sns
cleveland = pd.read_csv('input/heart.csv')
print('Shape of DataFrame: {}'.format(cleveland.shape))
print (cleveland.loc[1])
cleveland.loc[280:]
data = cleveland[~cleveland.isin(['?'])]
data.loc[280:]
data = data.dropna(axis=0)
data.loc[280:]
print(data.shape)
print(data.dtypes)
data = data.apply(pd.to_numeric)
data.dtypes
data.describe()
sns.heatmap(data.corr(), annot=True, fmt='.1f')
plt.show()
age_unique = sorted(data.age.unique())
age_thalach_values = data.groupby('age')['thalach'].count().values
mean_thalach = []
for i, age in enumerate(age_unique):
mean_thalach.append(sum(data[data['age'] == age].thalach) / age_thalach_values[i])
X = np.array(data.drop(['target'], 1))
y = np.array(data['target'])
mean = X.mean(axis=0)
X -= mean
std = X.std(axis=0)
X /= std
from sklearn import model_selection
X_train, X_test, y_train, y_test = model_selection.train_test_split(X, y, stratify=y, random_state=42, test_size=0.2)
from keras.utils.np_utils import to_categorical
Y_train = to_categorical(y_train, num_classes=None)
Y_test = to_categorical(y_test, num_classes=None)
print (Y_train.shape)
print (Y_train[:10])
X_train[0]
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras.layers import Dropout
from keras import regularizers
Y_train_binary = y_train.copy()
Y_test_binary = y_test.copy()
Y_train_binary[Y_train_binary > 0] = 1
Y_test_binary[Y_test_binary > 0] = 1
print(Y_train_binary[:20])
# define a new keras model for binary classification
def create_binary_model():
# create model
model = Sequential()
model.add(Dense(16, input_dim=13, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.001), activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(8, kernel_initializer='normal', kernel_regularizer=regularizers.l2(0.001), activation='relu'))
model.add(Dropout(0.25))
model.add(Dense(1, activation='sigmoid'))
# Compile model
adam = Adam(lr=0.001)
model.compile(loss='binary_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
return model
binary_model = create_binary_model()
binary_model.save('heart1.model')
print(binary_model.summary())
history = binary_model.fit(X_train, Y_train_binary, validation_data=(X_test, Y_test_binary), epochs=100, batch_size=10)
# generate classification report using predictions for categorical model
from sklearn.metrics import classification_report, accuracy_score
# generate classification report using predictions for binary model
binary_pred = np.round(binary_model.predict(X_test)).astype(int)
print('Results for Binary Model')
print(accuracy_score(Y_test_binary, binary_pred))
print(classification_report(Y_test_binary, binary_pred))
| MasudCodes/HeartAnalysis | heart.py | heart.py | py | 2,941 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.to_numeric",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "seaborn.heatmap",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyp... |
74339900582 | import numpy as np
import math
from gym.envs.mujoco import mujoco_env
from gym import utils
from mujoco_py import functions as mjcf
import mujoco_py
#from mujoco_py import mjvisualize as mjcv
def mass_center(model, sim):
mass = np.expand_dims(model.body_mass, 1)
xpos = sim.data.xipos
speed_weights = np.array([0, 10, 5, 3, .2, 5, 3, .2, 15, 25, 5, 3, 5, 3, 0])
return (np.sum(mass * xpos[:,2] * speed_weights) / np.sum(mass) / np.sum(speed_weights))
def euler_to_quaternion(rot, theta):
roll = rot*math.cos(theta)
pitch = rot*math.sin(theta)
qw = np.cos(roll/2) * np.cos(pitch/2)
qx = np.cos(roll/2) * np.sin(pitch/2)
qy = np.sin(roll/2) * np.cos(pitch/2)
qz = - np.sin(roll/2) * np.sin(pitch/2)
return [qw, qx, qy, qz]
class Kevin_FallingHumanoidEnv(mujoco_env.MujocoEnv, utils.EzPickle):
def __init__(self):
# timers used to determine end of episode, declared here so they carry over to next timestep
self.still_timer = 0
self.begin_timer = 0
# variable which selects a test scenario if needed
self.test = 0
###### Constants and ranges of initial positions and velocities ######
# contact force weights
self.force_weights = np.array([0, 1, 10, 2, 5, .1, .1, 10, 2, 5, .1, .1, 20, 20, 100, 20, 10, 5, 2, 10, 5, 2])
dtr = math.pi/180 #degrees to radians
# Initial free and joint positions, qpos[3:7] (rotation) are determined by qrot, so the quaternion can be declared properly
# free trans free rot right leg left leg abdomen right arm left arm
self.init_qpos_low = np.array([0, 0, 0.87, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -35*dtr, -45*dtr, -30*dtr, -85*dtr, -85*dtr, -90*dtr, -60*dtr, -60*dtr, -90*dtr])
self.init_qpos_high= np.array([0, 0, 0.87, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 35*dtr , 45*dtr , 75*dtr , 60*dtr , 60*dtr , 50*dtr, 85*dtr , 85*dtr , 50*dtr])
# [rotation of fall, direction of fall (0=forward)]
self.init_qrot_low = np.array([5*dtr, -15*dtr])
self.init_qrot_high= np.array([15*dtr, 15*dtr])
# Velocity of fall and initial joint velocities. qvel[0:6] is determined based upon qvel[0 & 3] and qrot[1] (direction of fall), so velocity is always in the direction of the fall
# free trans free rot right leg left leg abdomen right arm left arm
self.init_qvel_low = np.array([0.3, 0, 0, 0.6, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
self.init_qvel_high= np.array([0.5, 0, 0, 1.0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
###### End of constants ######
mujoco_env.MujocoEnv.__init__(self, 'kevin_fallinghumanoid_pelvis_stiff.xml', 2)
utils.EzPickle.__init__(self)
print("Kevin Falling Humanoid environment set-up")
def _get_obs(self):
data = self.sim.data
'''
# Realistic input
return np.concatenate([data.qpos.flat[np.array([6, 8, 9, 10, 12, 14, 15, 16])],
data.qvel.flat[np.array([5, 7, 8, 9, 11, 13, 14, 15])],
data.sensordata,
-data.site_xmat.flat[np.array([2,5,8, 11,14,17, 20,23,26, 29,32,35, 38,41,44, 47,50,53])],
[mjcf.mj_getTotalmass(self.model)],
data.qfrc_actuator.flat[np.array([6, 8, 9, 10, 12, 14, 15, 16])]])
'''
# Original input
return np.concatenate([data.qpos.flat[2:],
data.qvel.flat,
data.cinert.flat[10:-10],
data.cvel.flat[6:],
data.qfrc_actuator.flat[6:]]) #[np.array([6, 8, 9, 10, 12, 14, 15, 16])]])
def get_plot_obs(self):
data = self.sim.data
impact_force = np.zeros(self.force_weights.shape[0])
for i in range(data.ncon):
c_force = np.zeros(6, dtype=np.float64)
mjcf.mj_contactForce(self.model, data, i, c_force)
impact_force[data.contact[i].geom1] += c_force[0]
impact_force[data.contact[i].geom2] += c_force[0]
impact_force[0]=0.0
vert_vel = data.cvel[:,2]
return impact_force, vert_vel
def step(self, a):
pos_before = mass_center(self.model, self.sim)
head_height_before = self.sim.data.body_xpos[14, 2]
self.do_simulation(a, self.frame_skip)
pos_after = mass_center(self.model, self.sim)
data = self.sim.data
kin_energy_cost = 2 * np.sign(pos_after - pos_before) * np.square(pos_after - pos_before) / self.dt #kinetic energy is measured as the vertical displacement of the total CoM
head_height_cost = -0.2 * max(min(data.body_xpos[14, 2]-0.3, 0)*((data.body_xpos[14, 2]-head_height_before)/self.dt), 0) # A cost associated to keeping the head as high as possible
quad_ctrl_cost = 0#-0.002 * np.square(data.ctrl).sum()
force_normals = np.zeros(self.force_weights.shape[0])
for i in range(data.ncon):
c_force = np.zeros(6, dtype=np.float64)
mjcf.mj_contactForce(self.model, data, i, c_force)
#if c_force[0] > 5000: c_force[0]*=2
force_normals[data.contact[i].geom1] += c_force[0]
force_normals[data.contact[i].geom2] += c_force[0]
#print("hit bodies are: {:d} and {:d} with force {:f}".format(data.contact[i].geom1, data.contact[i].geom2, c_force[0]), end="\n")
body_hit_cost = -3e-6 * np.sum(self.force_weights * force_normals) # Cost that is related to the impact force, with different weights for different body parts
reward = kin_energy_cost + head_height_cost + quad_ctrl_cost + body_hit_cost
#reward = head_height_cost
#print("\rkin_energy_cost: {:f} body_hit_cost: {:f} head_height_cost: {:f} quad_ctrl_cost: {:f} reward: {:f}".format(kin_energy_cost, body_hit_cost, head_height_cost, quad_ctrl_cost, reward), end="\n")
if kin_energy_cost > -0.01:
self.still_timer+= 1
else:
self.still_timer = 0
self.begin_timer+=1
done = False#(self.still_timer > 20 and self.begin_timer > 100)
return self._get_obs(), reward, done, dict(reward_kin_energy=kin_energy_cost, reward_contact_force=body_hit_cost, reward_head_height=head_height_cost)
def reset_model(self):
c = 0.01
qpos, qrot, qvel = self.select_init(c)
self.still_timer = 0
self.begin_timer = 0
self.set_state(qpos, qvel)
return self._get_obs()
def viewer_setup(self):
self.viewer.cam.trackbodyid = 1
self.viewer.cam.distance = self.model.stat.extent * 1.0
self.viewer.cam.lookat[2] = 1.0
self.viewer.cam.elevation = -20
def select_init(self, c):
if self.test == 0:
mjcf.mj_setTotalmass(self.model, self.np_random.uniform(low=70, high=90))
qpos = self.np_random.uniform(low=self.init_qpos_low, high=self.init_qpos_high) + self.np_random.uniform(low=-c, high=c, size=self.model.nq)
qrot = self.np_random.uniform(low=self.init_qrot_low, high=self.init_qrot_high)
qvel = self.np_random.uniform(low=self.init_qvel_low, high=self.init_qvel_high) + self.np_random.uniform(low=-c, high=c, size=self.model.nv)
else:
dtr = math.pi/180
mjcf.mj_setTotalmass(self.model, 80)
qpos = np.array([0, 0, 0.87, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0])
qrot = (self.init_qrot_low + self.init_qrot_high)/2
qvel = (self.init_qvel_low + self.init_qvel_high)/2
if self.test==2: #minimum mass
mjcf.mj_setTotalmass(self.model, 70)
elif self.test==3: # maximum mass
mjcf.mj_setTotalmass(self.model, 90)
elif self.test==4: #minimum initial push
qrot[0] = self.init_qrot_low[0]
qvel = self.init_qvel_low + 0
elif self.test==5: #maximum initial push
qrot[0] = self.init_qrot_high[0]
qvel = self.init_qvel_high + 0
elif self.test==6: #most towards the left
qrot[1] = self.init_qrot_low[1]
elif self.test==7: #most towards the right
qrot[1] = self.init_qrot_high[1]
elif self.test==8: #arms at the sides
qpos = np.array([0, 0, 0.87, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 50*dtr, -30*dtr, -90*dtr, -50*dtr, 30*dtr, -90*dtr])
elif self.test==9: #arms aimed backwards
qpos = np.array([0, 0, 0.87, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -85*dtr, -85*dtr, -90*dtr, 85*dtr, 85*dtr, -90*dtr])
print("\rNew episode created with, Angle with ground: {:f} Direction of fall: {:f} Translational velocity: {:f} Rotational velocity: {:f} Mass: {:f}".format(qrot[0], qrot[1], qvel[0], qvel[3], mjcf.mj_getTotalmass(self.model)), end="\n")
qpos[2] = qpos[2]*math.cos(qrot[0]*0.75)
qpos[3:7] = euler_to_quaternion(qrot[0], qrot[1])
self.model.qpos_spring[19:28] = qpos[19:28]
qvel[0:6] = np.array([qvel[0]*math.cos(qrot[1]), -qvel[0]*math.sin(qrot[1]), 0, qvel[3]*math.sin(qrot[1]), qvel[3]*math.cos(qrot[1]), 0]) + self.np_random.uniform(low=-c, high=c, size=6)
return qpos, qrot, qvel
def add_to_test(self, value):
self.test=(self.test+value)%10
def get_test(self):
return self.test
'''
#self.viewer.add_overlay()
fncs = mjcf.__dict__
#print(dir(mjcf))
#print(dir(mjcf.mjr_figure))
figure = mujoco_py.cymj.PyMjvFigure()
con = mujoco_py.cymj.PyMjrContext()
rect = mujoco_py.cymj.PyMjrRect()
mujoco_py.cymj.MjRenderContextWindow()
figure
#print(figure.__doc__())
#print(dir(con))
#print(dir(rect))
#print(rect.width)
mjcf.mjr_figure(rect, figure, con);
#mjcf.mjv_defaultFigure(figure)
raise SystemExit(0)
''' | kvogelzang/GP_exoskeleton | gym/envs/mujoco/kevin_fallinghumanoid.py | kevin_fallinghumanoid.py | py | 10,527 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.expand_dims",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "math.cos",
"line_number":... |
31563105051 | from datetime import datetime
from dateutil import parser
from lxml import etree
import json
import logging
from StringIO import StringIO
import requests
from requests import RequestException
from moxie_events.domain import Event
logger = logging.getLogger(__name__)
class TalksCamEventsImporter(object):
FETCH_TIMEOUT = 30
def __init__(self, feeds, indexer):
self.feeds = feeds
self.indexer = indexer
def run(self):
for feed in self.feeds:
data, encoding = self.retrieve_feed(feed)
events = self.index_feed(data, encoding)
self.indexer.index(events)
def retrieve_feed(self, url):
try:
response = requests.get(url, timeout=self.FETCH_TIMEOUT)
response.raise_for_status()
return response.content, response.encoding
except RequestException as re:
logger.error('Error fetching events (TalksCam)', exc_info=True,
extra={
'data': {
'url': url}
})
raise re
def index_feed(self, data, encoding):
"""Index talks in given feed
:param url: URL of the feed
:return: list of events
"""
response = json.loads(data)
talks = []
for talk in response['_embedded']['talks']:
try:
talks.append(self.parse_talk(talk))
except Exception as e:
logger.error("Couldn't parse talk", exc_info=True)
return talks
def parse_talk(self, talk):
"""Parse an XML "talk"
:param xml: talk object
:return: Event object
"""
event = Event(talk['slug'])
event.id = talk['slug']
event.name = talk['title_display']
event.source_url = talk['_links']['self']['href']
event.start_time = self.parse_date(talk['start'])
event.end_time = self.parse_date(talk['end'])
if 'location_summary' in talk:
event.location = talk['location_summary']
solr_dict = event.to_solr_dict()
return solr_dict
def parse_date(self, date):
"""Parse date as ISO. e.g 2016-07-14T10:30:00+01:00
"""
return parser.parse(date)
| ox-it/moxie-events | moxie_events/importers/talks_cam.py | talks_cam.py | py | 2,358 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "requests.RequestException",
"line_number": 35,
"usage_type": "name"
},
{
"api_name": "json.loads",... |
20199237869 | from django.shortcuts import render, redirect
from .models import AiClass, AiStudent, StudentPost
from django.contrib.auth.models import User
from django.contrib import auth
# Create your views here.
def home(request):
context = {
'AiClass': AiClass.objects.all()
}
return render(request, 'home.html', context)
def detail(request, class_pk):
context = {
'class_obj': AiClass.objects.get(pk=class_pk)
}
return render(request, 'detail.html', context)
def add(request, student_pk):
student = AiStudent.objects.get(pk=student_pk)
if request.method == "POST":
# StudentPost를 생성하는 것으로 변경
StudentPost.objects.create(
intro=request.POST['intro'],
writer=student
)
return redirect('student', student_pk)
return render(request, 'add.html')
def student(request, student_pk):
student = AiStudent.objects.get(pk=student_pk)
context = {
'student': student
}
return render(request, 'student.html', context)
def edit(request, student_pk):
if request.method == "POST":
# 업데이트 하기. update()는 QuerySet에서만 동작
target_student = AiStudent.objects.filter(pk=student_pk)
target_student.update(
name=request.POST['name'],
phone_num=request.POST['phone_num'],
)
return redirect('student', student_pk)
student = AiStudent.objects.get(pk=student_pk)
context = {
'student': student
}
return render(request, 'edit.html', context)
def delete(request, class_num, student_pk):
target_student = AiStudent.objects.get(pk=student_pk)
target_student.delete()
class_pk = class_num
return redirect('detail', class_pk)
ERROR_MSG = {
'ID_EXIST': '이미 사용 중인 아이디입니다.',
'ID_NOT_EXIST': '존재하지 않는 아이디입니다.',
'ID_PW_MISSING': '아이디와 비밀번호를 다시 확인해주세요.',
'PW_CHECK': '비밀번호가 일치하지 않습니다.',
}
def signup(request):
context = {
'error': {
'state': False,
'msg': ''
}
}
if request.method == "POST":
user_id = request.POST['user_id']
user_pw = request.POST['user_pw']
user_pw_check = request.POST['user_pw_check']
user_name = request.POST['user_name']
phone_num = request.POST['phone_num']
class_num = request.POST['class_num']
target_class = AiClass.objects.get(class_num=class_num)
if (user_id and user_pw):
user = User.objects.filter(username=user_id)
# 존재하지 않는 아이디라면 => 써도 되는 아이디
if len(user) == 0:
if user_pw == user_pw_check:
# 회원가입 진행
created_user = User.objects.create_user(
username=user_id,
password=user_pw
)
AiStudent.objects.create(
user=created_user,
participate_class=target_class,
name=user_name,
phone_num=phone_num
)
# 회원가입 했으면 자동으로 로그인까지 되어야지
auth.login(request, created_user)
return redirect('home')
else:
context['error']['state'] = True
context['error']['msg'] = ERROR_MSG['PW_CHECK']
# 존재하는 아이디라면 => 쓸 수 없는 아이디
else:
context['error']['state'] = True
context['error']['msg'] = ERROR_MSG['ID_EXIST']
else:
context['error']['state'] = True
context['error']['msg'] = ERROR_MSG['ID_PW_MISSING']
return render(request, 'signup.html')
def login(request):
context = {
'error': {
'state': False,
'msg': ''
}
}
if request.method == "POST":
user_id = request.POST['user_id']
user_pw = request.POST['user_pw']
user = User.objects.filter(username=user_id)
if (user_id and user_pw):
if len(user) != 0:
user = auth.authenticate(
username=user_id,
password=user_pw
)
if user != None:
auth.login(request, user)
return redirect('home')
else:
context['error']['state'] = True
context['error']['msg'] = ERROR_MSG['PW_CHECK']
else:
context['error']['state'] = True
context['error']['msg'] = ERROR_MSG['ID_NOT_EXIST']
else:
context['error']['state'] = True
context['error']['msg'] = ERROR_MSG['ID_PW_MISSING']
return render(request, 'login.html', context)
def logout(request):
if request.method == "POST":
auth.logout(request)
return redirect('home')
| WooseopIM/temp_django | AiSchoolProject/AiInfoApp/views.py | views.py | py | 5,147 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "models.AiClass.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "models.AiClass.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "models.AiClass",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": ... |
16094884293 | #=============================================================
"""
Plots contribution functions for forward model results that have
utilized ONE variable only
Uses data from kk.dat
Saves plots in a new directory: contribution_plots
Usage:
Set show below to True or False
python -W ignore plot_contribution.py
"""
#=============================================================
# Inputs
# Show plots?
# True = yes, False = No
# The plots will be saved regardless
show = False
#=============================================================
# Imports
import numpy as np
import matplotlib.pyplot as plt
import os
import glob
#=============================================================
# Main
sstring = 'core_*'
directories = glob.glob(sstring)
length = len(directories)
if not os.path.exists('contribution_plots'):
os.mkdir('contribution_plots')
for i in range(6):
print('core_{}'.format(i+1))
with open('core_{}/kk.dat'.format(i+1)) as f:
data = f.readlines()
f.close()
par_array = data[0].split()
n_press = int(par_array[0])
n_wave = int(par_array[1])
# Contribution (Jacobian) function
c_data = np.loadtxt('core_{}/kk.dat'.format(i+1),skiprows=1)
c_line = np.reshape(c_data,(n_press * n_wave))
c_array = np.reshape(c_line,(n_wave,n_press))
len_arr2 = len(c_array)
c_maxw = [None]*len_arr2
pressure_max = [None]*len_arr2
for j in range(len_arr2):
c_maxw[j] = np.argmax(c_array[j])
c_array_t = np.transpose(c_array)
# Pressure data
p_data = np.loadtxt('core_{}/nemesis.prf'.format(i+1),skiprows=15,usecols=(1))
p_data *= 1013.25
# Wavelengths
w_data = np.loadtxt('core_{}/nemesis.mre'.format(i+1),skiprows=5,usecols=(1),max_rows=n_wave)
plt.pcolor(w_data,p_data,c_array_t,cmap='inferno')
plt.colorbar()
plt.ylim(max(p_data),min(p_data))
plt.yscale('log')
plt.xlabel('Wavelength ($\mu m$)')
plt.ylabel('Pressure ($atm$)')
plt.savefig('contribution_plots/core_{}_contribution.png'.format(i+1))
if show == True:
plt.show()
plt.clf()
new_w_array = [None]*120
new_p_array = [None]*120
for j in range(120):
new_w_array[j] = w_data
new_p_array[j] = [p_data[j]]*n_wave
for j in range(len_arr2):
pressure_max[j] = p_data[c_maxw[j]]
plt.plot(w_data,pressure_max,'k-',linewidth=0.5)
plt.xlabel('Wavelength ($\mu m$)')
plt.ylabel('Pressure($atm$)')
plt.ylim(max(pressure_max),min(pressure_max))
plt.grid()
plt.yscale('log')
plt.savefig('contribution_plots/core_{}_max_pressure.png'.format(i+1))
if show == True:
plt.show()
plt.clf()
#=============================================================
print('End of script\n')
| JHarkett/MIRI-code | plot_contribution.py | plot_contribution.py | py | 2,611 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "glob.glob",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 4... |
9502216060 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# ====#====#====#====
# Author: wangben
# CreatDate: 2020/9/23 16:04
# Filename:read_log.py
# Function:历史记录
# ====#====#====#====
import json
import sys
from PyQt5.QtWidgets import QMainWindow, QApplication
from UI.GUI_style import log_MainWindow
from common._util import log_txt
class MY_window3(log_MainWindow,QMainWindow):
def __init__(self):
super(log_MainWindow, self).__init__()
self.setupUi(self)
self.send_txt()
def send_txt(self):
data = self._set_txt()
data_list = []
for i in data:
data = (' Time : %s;'+'\n'+' Token : %s;'+'\n'+'PCtoken : %s;'+'\n'+' URL : %s'+'\n')%(i['time'],i['token'],i['PCtoken'],i['URL'])
data_list.append(data)
# print(data)
self.textBrowser.setText(''.join(data_list))
def _set_txt(self):
with open(log_txt, 'r')as f:
data = f.readlines()
data_list = []
for line in data:
line = line.strip('\n')
data = line.replace("'", '"')
data_josn = json.loads(data)
data_list.append(data_josn)
return data_list
def open(self): # 被调用的类需要再编写一个open函数
self.show()
def shut(self): #被调用的类需要再编写一个close函数
self.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
mainWindow = MY_window3()
mainWindow.show()
sys.exit(app.exec_())
# def _set_txt():
# with open(log_txt,'r')as f:
# data = f.readlines()
# data_list = []
# for line in data:
# line = line.strip('\n')
# data = line.replace("'", '"')
# data_josn = json.loads(data)
# data_list.append(data_josn)
# _set_txt() | falling3wood/pyexe | common/read_log.py | read_log.py | py | 1,867 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "UI.GUI_style.log_MainWindow",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "UI.GUI_style.log_MainWindow",
"line_number": 18,
"usage_type": "argument"
},
... |
4014355062 | # 문제 출처 : https://programmers.co.kr/learn/courses/30/lessons/68645
# 참고 블로그 : https://inspirit941.tistory.com/entry/Python-프로그래머스-삼각-달팽이-Level-2
# chain 사용법 : https://python.flowdas.com/library/itertools.html
from itertools import chain
def solution(n):
maps = [[0 for _ in range(n)] for _ in range(n)]
y, x = -1, 0
num = 1
for i in range(n):
for j in range(i, n):
if i % 3 == 0: # 아래로 이동
y += 1
elif i % 3 == 1: # 오른쪽으로 이동
x += 1
elif i % 3 == 2: # 위로 이동
y -= 1
x -= 1
maps[y][x] = num
num += 1
answer = [i for i in chain(*maps) if i != 0]
print(answer)
return answer
| ThreeFive85/Algorithm | Programmers/level2/triangleSnail/triangle_snail.py | triangle_snail.py | py | 814 | python | ko | code | 1 | github-code | 36 | [
{
"api_name": "itertools.chain",
"line_number": 23,
"usage_type": "call"
}
] |
23771931621 | import requests
url = 'http://challenge01.root-me.org/web-client/ch19/?section=admin'
headers = {"Content-Type":"application/x-www-form-urlencoded",
"Cookie":'''"><script>document.location='http://llartngjebaflscnrijk19it2k8dw2.burpcollaborator.net?'.concat(document.cookie)</script>'''}
data = {"titre":"abc",
"message":"<script>alert(1)</script>"}
x = requests.post(url,headers=headers,data=data)
headers = {
"Cookie":"invite; ADMIN_COOKIE=SY2USDIH78TF3DFU78546TE7F"
}
x = requests.get(url,headers=headers)
print(x.text)
| huydoppa/CTF | root_me/storedxss2.py | storedxss2.py | py | 550 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 18,
"usage_type": "call"
}
] |
74876356585 | import tweepy
import csv
from textblob import TextBlob
import operator
import copy
def checkminimum(test):
#test has the intracluster distances for your data point,refer to kmeans function, first for loop
min_dist = test[0]
c=0
p=0
for i in test:
if min_dist>i:
min_dist = i
p=c #c is the position of the minimum intracluster distance element
c=c+1
return p
def kmeans(reviews):
medians=[]
clusters={}
clusters2={}
test=[]
k = 3 #3 clusters, good, neutral, bad
count=0
for i in reviews:
if count<k:
medians.append(i)
count=count+1
for i in range(k):
clusters[i]=[] #empty list holds your members of each class
count=0
test=[]
for i in reviews:
for x in range(k):
test.append(abs(i-medians[x]))
pos = checkminimum(test)
clusters[pos].append(i)
test=[]
#print(clusters)
while True:
medians=[]
for x in clusters:
b = sum(clusters[x])/len(clusters[x]) #centroid of the cluster
medians.append(b)
for i in range(k):
clusters[i]=[] #empty list holds your members of each class
for i in reviews:
for x in range(k):
test.append(abs(i-medians[x]))
pos = checkminimum(test)
clusters[pos].append(i)
test=[]
if clusters2==clusters:
break
#print(clusters)
for i in range(k):
clusters2[i]=[] #empty list holds your members of each class
clusters2=copy.deepcopy(clusters)
return clusters
consumerKey='xxx'
consumerSecret='xxx'
accessToken='xxx'
accessTokenSecret='xxx' #developers.twitter.com is where you can get these keys
#read and write permissions from Twitter
auth = tweepy.OAuthHandler(consumerKey, consumerSecret)
auth.set_access_token(accessToken, accessTokenSecret)
api = tweepy.API(auth, wait_on_rate_limit=True) #wait_on_rate_limit states whether or not automatically wait for rate limits to replenish (dynamic)
csvFile = open('tweets.csv', 'a') #a is for append
csvWriter = csv.writer(csvFile) #we use a writer because it will write a tweet on each row of the CSV file!
hashtags = ["#s10", "#samsungs10"]
for x in hashtags:
for t in tweepy.Cursor(api.search, q=x, count=1000, lang="en", since="2019-02-28", until="2019-03-08").items():
#as per GSMArena, the Samsung S10 was announced in Feb 2019
#print(t.created_at, t.text)
csvWriter.writerow([t.created_at, t.text.encode('utf-8')])
csvFile.close()
csvFile = open('tweets.csv', 'r')
csvReader = csv.reader(csvFile, delimiter = '\n', quoting = csv.QUOTE_NONE)
words={}
spam = ["lucky", "LUCKY", "Lucky", "offer", "OFFER", "Offer", "WINNER", "Winner", "winner", "win", "WIN","Win", "giveaway", "Giveaway"]
reviews_polarity=[]
reviews=[]
flag = 0
for x in csvReader:
flag=0
test = TextBlob(str(x))
#print(test)
for y in spam:
if y in test:
flag=1
if flag==0:
reviews_polarity.append(test.sentiment.polarity)
reviews.append(str(x))
print(x)
print(reviews_polarity)
#print(reviews)
#applying k means on reviews to cluster good and bad reviews
#result_one=kmeans(reviews)
result_two=kmeans(reviews_polarity) #gives us good, bad, and neutral clusters. data centric clusters so as to see how the distribution is.
#print(reviews)
for x in reviews:
print(x)
print("\n")
print(result_two)
#cluster result_two[0] holds all the positive sentiment tweet polarities, result_two[1] holds all the neutral tweets, result_two[2] holds negative tweets
print(len(result_two[0]))
print(len(result_two[1]))
print(len(result_two[2]))
rs_two=[]
for x in result_two[2]:
if x!=0:
rs_two.append(x)
result_two[2]=rs_two
print(len(result_two[0]))
print(len(result_two[1]))
print(len(result_two[2])) | anirudhkamath/sentimentAnalyser | twitter-extract-revised.py | twitter-extract-revised.py | py | 3,704 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "copy.deepcopy",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "tweepy.OAuthHandler",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 74,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_n... |
74430361063 | from flask import Flask
from flask import render_template, request
import subprocess, os, base64
from subprocess import PIPE, Popen
app = Flask(__name__)
@app.route('/')
def Compiler():
check = ''
return render_template('home.html', check=check)
@app.route('/submit', methods=['GET', 'POST'])
def submit():
if request.method == 'POST':
code = request.form['code']
inp = request.form['input']
chk = request.form.get('check')
if chk == '1':
# If checkbox was ckecked then the input field will stay the same and checkbox will be checked.
check = 'checked'
else:
# If checkbox was not ckecked then the input field will be empty and checkbox will be unchecked.
inp = ""
check = ''
output = cpp_complier_output(code, inp, chk)
return render_template('home.html', code=code, input=inp, output=output, check=check)
def cpp_complier_output(code, inp, chk):
if not os.path.exists('test.cpp'):
with open('test.cpp', 'w') as file_test:
file_test.write(code)
application = "C:\\gcc\\bin\\g++.exe"
# result = subprocess.run([application, "test.cpp", "-o", "new.exe"], stderr=PIPE)
result = subprocess.run([application, "test.cpp", "-o", "new.exe"], stderr=PIPE)
check = result.returncode
if check != 0:
if os.path.exists('test.cpp'):
os.remove('test.cpp')
if os.path.exists('new.exe'):
os.remove('new.exe')
return result.stderr
else:
if chk == '1':
inp = str.encode(inp)
r = subprocess.run(["new"], input=inp, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
else:
r = subprocess.run(["new"], stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if os.path.exists('test.cpp'):
os.remove('test.cpp')
if os.path.exists('new.exe'):
os.remove('new.exe')
outp = r.stdout
return outp.decode("windows-1251")
if __name__ == '__main__':
app.run() | apollliinaria/moodle_compiler_testik | app.py | app.py | py | 2,066 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask.request.method",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.requ... |
496284447 | import os
import pytest
from dagster_bash import bash_command_solid, bash_script_solid
from dagster import DagsterExecutionStepExecutionError, composite_solid, execute_solid
def test_bash_command_solid():
solid = bash_command_solid('echo "this is a test message: $MY_ENV_VAR"', name='foobar')
result = execute_solid(
solid,
environment_dict={'solids': {'foobar': {'config': {'env': {'MY_ENV_VAR': 'foobar'}}}}},
)
assert result.output_values == {'result': 'this is a test message: foobar'}
def test_bash_command_retcode():
with pytest.raises(DagsterExecutionStepExecutionError) as exc_info:
execute_solid(bash_command_solid('exit 1'))
assert 'step key: "bash_solid.compute"' in str(exc_info.value)
assert 'solid invocation: "bash_solid"' in str(exc_info.value)
assert 'solid definition: "bash_solid"' in str(exc_info.value)
def test_bash_command_buffer_logs():
solid = bash_command_solid('for i in 1 2 3 4 5; do echo "hello ${i}"; done', name='foobar')
result = execute_solid(
solid,
environment_dict={
'solids': {
'foobar': {'config': {'output_logging': 'BUFFER', 'env': {'MY_ENV_VAR': 'foobar'}}}
}
},
)
assert result.output_values == {'result': 'hello 1\nhello 2\nhello 3\nhello 4\nhello 5\n'}
def test_bash_script_solid():
script_dir = os.path.dirname(os.path.abspath(__file__))
solid = bash_script_solid(os.path.join(script_dir, 'test.sh'), name='foobar')
result = execute_solid(
solid,
environment_dict={'solids': {'foobar': {'config': {'env': {'MY_ENV_VAR': 'foobar'}}}}},
)
assert result.output_values == {'result': 'this is a test message: foobar'}
def test_bash_script_solid_no_config():
script_dir = os.path.dirname(os.path.abspath(__file__))
solid = bash_script_solid(os.path.join(script_dir, 'test.sh'), name='foobar')
result = execute_solid(solid)
assert result.output_values == {'result': 'this is a test message:'}
def test_bash_script_solid_no_config_composite():
script_dir = os.path.dirname(os.path.abspath(__file__))
solid = bash_script_solid(os.path.join(script_dir, 'test.sh'), name='foobar')
@composite_solid(config={}, config_fn=lambda cfg: {})
def composite():
return solid()
result = execute_solid(composite)
assert result.output_values == {'result': 'this is a test message:'}
| helloworld/continuous-dagster | deploy/dagster_modules/libraries/dagster-bash/dagster_bash_tests/test_solids.py | test_solids.py | py | 2,443 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "dagster_bash.bash_command_solid",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dagster.execute_solid",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pytest.raises",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "d... |
16858298223 | from collections import OrderedDict
order = OrderedDict()
for i in range(int(input())):
a = input().split()
order[' '.join(a[:-1])] = order.get(' '.join(a[:-1]), 0) + int(a[-1])
for i, j in order.items():
print(i, j)
'''
input:
9
BANANA FRIES 12
POTATO CHIPS 30
APPLE JUICE 10
CANDY 5
APPLE JUICE 10
CANDY 5
CANDY 5
CANDY 5
POTATO CHIPS 30
'''
'''
output:
BANANA FRIES 12
POTATO CHIPS 60
APPLE JUICE 20
CANDY 20
''' | polemeest/daily_practice | hackerrank_ordered_dict.py | hackerrank_ordered_dict.py | py | 432 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.OrderedDict",
"line_number": 3,
"usage_type": "call"
}
] |
21604577101 | import base64
from collections import OrderedDict
import datetime
from google.cloud._helpers import UTC
from google.cloud._helpers import _date_from_iso8601_date
from google.cloud._helpers import _datetime_from_microseconds
from google.cloud._helpers import _datetime_to_rfc3339
from google.cloud._helpers import _RFC3339_NO_FRACTION
from google.cloud._helpers import _time_from_iso8601_time_naive
from google.cloud._helpers import _to_bytes
def _not_null(value, field):
"""Check whether 'value' should be coerced to 'field' type."""
return value is not None or field.mode != 'NULLABLE'
def _int_from_json(value, field):
"""Coerce 'value' to an int, if set or not nullable."""
if _not_null(value, field):
return int(value)
def _float_from_json(value, field):
"""Coerce 'value' to a float, if set or not nullable."""
if _not_null(value, field):
return float(value)
def _bool_from_json(value, field):
"""Coerce 'value' to a bool, if set or not nullable."""
if _not_null(value, field):
return value.lower() in ['t', 'true', '1']
def _string_from_json(value, _):
"""NOOP string -> string coercion"""
return value
def _bytes_from_json(value, field):
"""Base64-decode value"""
if _not_null(value, field):
return base64.decodestring(_to_bytes(value))
def _timestamp_from_json(value, field):
"""Coerce 'value' to a datetime, if set or not nullable."""
if _not_null(value, field):
# value will be a float in seconds, to microsecond precision, in UTC.
return _datetime_from_microseconds(1e6 * float(value))
def _datetime_from_json(value, field):
"""Coerce 'value' to a datetime, if set or not nullable."""
if _not_null(value, field):
# value will be a string, in YYYY-MM-DDTHH:MM:SS form.
return datetime.datetime.strptime(value, _RFC3339_NO_FRACTION)
def _date_from_json(value, field):
"""Coerce 'value' to a datetime date, if set or not nullable"""
if _not_null(value, field):
# value will be a string, in YYYY-MM-DD form.
return _date_from_iso8601_date(value)
def _time_from_json(value, field):
"""Coerce 'value' to a datetime date, if set or not nullable"""
if _not_null(value, field):
# value will be a string, in HH:MM:SS form.
return _time_from_iso8601_time_naive(value)
def _record_from_json(value, field):
"""Coerce 'value' to a mapping, if set or not nullable."""
if _not_null(value, field):
record = {}
record_iter = zip(field.fields, value['f'])
for subfield, cell in record_iter:
converter = _CELLDATA_FROM_JSON[subfield.field_type]
if subfield.mode == 'REPEATED':
value = [converter(item['v'], subfield) for item in cell['v']]
else:
value = converter(cell['v'], subfield)
record[subfield.name] = value
return record
_CELLDATA_FROM_JSON = {
'INTEGER': _int_from_json,
'INT64': _int_from_json,
'FLOAT': _float_from_json,
'FLOAT64': _float_from_json,
'BOOLEAN': _bool_from_json,
'BOOL': _bool_from_json,
'STRING': _string_from_json,
'BYTES': _bytes_from_json,
'TIMESTAMP': _timestamp_from_json,
'DATETIME': _datetime_from_json,
'DATE': _date_from_json,
'TIME': _time_from_json,
'RECORD': _record_from_json,
}
def _int_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, int):
value = str(value)
return value
def _float_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
return value
def _bool_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, bool):
value = 'true' if value else 'false'
return value
def _bytes_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, bytes):
value = base64.encodestring(value)
return value
def _timestamp_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.datetime):
if value.tzinfo not in (None, UTC):
# Convert to UTC and remove the time zone info.
value = value.replace(tzinfo=None) - value.utcoffset()
value = '%s %s+00:00' % (
value.date().isoformat(), value.time().isoformat())
return value
def _datetime_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.datetime):
value = _datetime_to_rfc3339(value)
return value
def _date_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.date):
value = value.isoformat()
return value
def _time_to_json(value):
"""Coerce 'value' to an JSON-compatible representation."""
if isinstance(value, datetime.time):
value = value.isoformat()
return value
_SCALAR_VALUE_TO_JSON = {
'INTEGER': _int_to_json,
'INT64': _int_to_json,
'FLOAT': _float_to_json,
'FLOAT64': _float_to_json,
'BOOLEAN': _bool_to_json,
'BOOL': _bool_to_json,
'BYTES': _bytes_to_json,
'TIMESTAMP': _timestamp_to_json,
'DATETIME': _datetime_to_json,
'DATE': _date_to_json,
'TIME': _time_to_json,
}
def _row_from_json(row, schema):
"""Convert JSON row data to row with appropriate types.
:type row: dict
:param row: A JSON response row to be converted.
:type schema: tuple
:param schema: A tuple of
:class:`~google.cloud.bigquery.schema.SchemaField`.
:rtype: tuple
:returns: A tuple of data converted to native types.
"""
row_data = []
for field, cell in zip(schema, row['f']):
converter = _CELLDATA_FROM_JSON[field.field_type]
if field.mode == 'REPEATED':
row_data.append([converter(item['v'], field)
for item in cell['v']])
else:
row_data.append(converter(cell['v'], field))
return tuple(row_data)
def _rows_from_json(rows, schema):
"""Convert JSON row data to rows with appropriate types."""
return [_row_from_json(row, schema) for row in rows]
class _ConfigurationProperty(object):
"""Base property implementation.
Values will be stored on a `_configuration` helper attribute of the
property's job instance.
:type name: str
:param name: name of the property
"""
def __init__(self, name):
self.name = name
self._backing_name = '_%s' % (self.name,)
def __get__(self, instance, owner):
"""Descriptor protocal: accesstor"""
if instance is None:
return self
return getattr(instance._configuration, self._backing_name)
def _validate(self, value):
"""Subclasses override to impose validation policy."""
pass
def __set__(self, instance, value):
"""Descriptor protocal: mutator"""
self._validate(value)
setattr(instance._configuration, self._backing_name, value)
def __delete__(self, instance):
"""Descriptor protocal: deleter"""
delattr(instance._configuration, self._backing_name)
class _TypedProperty(_ConfigurationProperty):
"""Property implementation: validates based on value type.
:type name: str
:param name: name of the property
:type property_type: type or sequence of types
:param property_type: type to be validated
"""
def __init__(self, name, property_type):
super(_TypedProperty, self).__init__(name)
self.property_type = property_type
def _validate(self, value):
"""Ensure that 'value' is of the appropriate type.
:raises: ValueError on a type mismatch.
"""
if not isinstance(value, self.property_type):
raise ValueError('Required type: %s' % (self.property_type,))
class _EnumProperty(_ConfigurationProperty):
"""Pseudo-enumeration class.
Subclasses must define ``ALLOWED`` as a class-level constant: it must
be a sequence of strings.
:type name: str
:param name: name of the property.
"""
def _validate(self, value):
"""Check that ``value`` is one of the allowed values.
:raises: ValueError if value is not allowed.
"""
if value not in self.ALLOWED:
raise ValueError('Pass one of: %s' ', '.join(self.ALLOWED))
class UDFResource(object):
"""Describe a single user-defined function (UDF) resource.
:type udf_type: str
:param udf_type: the type of the resource ('inlineCode' or 'resourceUri')
:type value: str
:param value: the inline code or resource URI.
See
https://cloud.google.com/bigquery/user-defined-functions#api
"""
def __init__(self, udf_type, value):
self.udf_type = udf_type
self.value = value
def __eq__(self, other):
return(
self.udf_type == other.udf_type and
self.value == other.value)
class UDFResourcesProperty(object):
"""Custom property type, holding :class:`UDFResource` instances."""
def __get__(self, instance, owner):
"""Descriptor protocol: accessor"""
if instance is None:
return self
return list(instance._udf_resources)
def __set__(self, instance, value):
"""Descriptor protocol: mutator"""
if not all(isinstance(u, UDFResource) for u in value):
raise ValueError("udf items must be UDFResource")
instance._udf_resources = tuple(value)
class AbstractQueryParameter(object):
"""Base class for named / positional query parameters.
"""
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct paramter from JSON resource.
:type resource: dict
:param resource: JSON mapping of parameter
:rtype: :class:`ScalarQueryParameter`
"""
raise NotImplementedError
def to_api_repr(self):
"""Construct JSON API representation for the parameter.
:rtype: dict
"""
raise NotImplementedError
class ScalarQueryParameter(AbstractQueryParameter):
"""Named / positional query parameters for scalar values.
:type name: str or None
:param name: Parameter name, used via ``@foo`` syntax. If None, the
paramter can only be addressed via position (``?``).
:type type_: str
:param type_: name of parameter type. One of 'STRING', 'INT64',
'FLOAT64', 'BOOL', 'TIMESTAMP', 'DATETIME', or 'DATE'.
:type value: str, int, float, bool, :class:`datetime.datetime`, or
:class:`datetime.date`.
:param value: the scalar parameter value.
"""
def __init__(self, name, type_, value):
self.name = name
self.type_ = type_
self.value = value
@classmethod
def positional(cls, type_, value):
"""Factory for positional paramters.
:type type_: str
:param type_:
name of paramter type. One of 'STRING', 'INT64',
'FLOAT64', 'BOOL', 'TIMESTAMP', 'DATETIME', or 'DATE'.
:type value: str, int, float, bool, :class:`datetime.datetime`, or
:class:`datetime.date`.
:param value: the scalar parameter value.
:rtype: :class:`ScalarQueryParameter`
:returns: instance without name
"""
return cls(None, type_, value)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct paramter from JSON resource.
:type resource: dict
:param resource: JSON mapping of parameter
:rtype: :class:`ScalarQueryParameter`
:returns: instance
"""
name = resource.get('name')
type_ = resource['parameterType']['type']
value = resource['parameterValue']['value']
converted = _CELLDATA_FROM_JSON[type_](value, None)
return cls(name, type_, converted)
def to_api_repr(self):
"""Construct JSON API representation for the parameter.
:rtype: dict
:returns: JSON mapping
"""
value = self.value
converter = _SCALAR_VALUE_TO_JSON.get(self.type_)
if converter is not None:
value = converter(value)
resource = {
'parameterType': {
'type': self.type_,
},
'parameterValue': {
'value': value,
},
}
if self.name is not None:
resource['name'] = self.name
return resource
class ArrayQueryParameter(AbstractQueryParameter):
"""Named / positional query parameters for array values.
:type name: str or None
:param name: Parameter name, used via ``@foo`` syntax. If None, the
paramter can only be addressed via position (``?``).
:type array_type: str
:param array_type:
name of type of array elements. One of `'STRING'`, `'INT64'`,
`'FLOAT64'`, `'BOOL'`, `'TIMESTAMP'`, or `'DATE'`.
:type values: list of appropriate scalar type.
:param values: the parameter array values.
"""
def __init__(self, name, array_type, values):
self.name = name
self.array_type = array_type
self.values = values
@classmethod
def positional(cls, array_type, values):
"""Factory for positional paramters.
:type array_type: str
:param array_type:
name of type of array elements. One of `'STRING'`, `'INT64'`,
`'FLOAT64'`, `'BOOL'`, `'TIMESTAMP'`, or `'DATE'`.
:type values: list of appropriate scalar type
:param values: the parameter array values.
:rtype: :class:`ArrayQueryParameter`
:returns: instance without name
"""
return cls(None, array_type, values)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct paramter from JSON resource.
:type resource: dict
:param resource: JSON mapping of parameter
:rtype: :class:`ArrayQueryParameter`
:returns: instance
"""
name = resource.get('name')
array_type = resource['parameterType']['arrayType']['type']
values = [
value['value']
for value
in resource['parameterValue']['arrayValues']]
converted = [
_CELLDATA_FROM_JSON[array_type](value, None) for value in values]
return cls(name, array_type, converted)
def to_api_repr(self):
"""Construct JSON API representation for the parameter.
:rtype: dict
:returns: JSON mapping
"""
values = self.values
converter = _SCALAR_VALUE_TO_JSON.get(self.array_type)
if converter is not None:
values = [converter(value) for value in values]
resource = {
'parameterType': {
'type': 'ARRAY',
'arrayType': {
'type': self.array_type,
},
},
'parameterValue': {
'arrayValues': [{'value': value} for value in values],
},
}
if self.name is not None:
resource['name'] = self.name
return resource
class StructQueryParameter(AbstractQueryParameter):
"""Named / positional query parameters for struct values.
:type name: str or None
:param name: Parameter name, used via ``@foo`` syntax. If None, the
paramter can only be addressed via position (``?``).
:type sub_params: tuple of :class:`ScalarQueryParameter`
:param sub_params: the sub-parameters for the struct
"""
def __init__(self, name, *sub_params):
self.name = name
self.struct_types = OrderedDict(
(sub.name, sub.type_) for sub in sub_params)
self.struct_values = {sub.name: sub.value for sub in sub_params}
@classmethod
def positional(cls, *sub_params):
"""Factory for positional paramters.
:type sub_params: tuple of :class:`ScalarQueryParameter`
:param sub_params: the sub-parameters for the struct
:rtype: :class:`StructQueryParameter`
:returns: instance without name
"""
return cls(None, *sub_params)
@classmethod
def from_api_repr(cls, resource):
"""Factory: construct paramter from JSON resource.
:type resource: dict
:param resource: JSON mapping of parameter
:rtype: :class:`StructQueryParameter`
:returns: instance
"""
name = resource.get('name')
instance = cls(name)
types = instance.struct_types
for item in resource['parameterType']['structTypes']:
types[item['name']] = item['type']['type']
struct_values = resource['parameterValue']['structValues']
for key, value in struct_values.items():
type_ = types[key]
value = value['value']
converted = _CELLDATA_FROM_JSON[type_](value, None)
instance.struct_values[key] = converted
return instance
def to_api_repr(self):
"""Construct JSON API representation for the parameter.
:rtype: dict
:returns: JSON mapping
"""
types = [
{'name': key, 'type': {'type': value}}
for key, value in self.struct_types.items()
]
values = {}
for name, value in self.struct_values.items():
converter = _SCALAR_VALUE_TO_JSON.get(self.struct_types[name])
if converter is not None:
value = converter(value)
values[name] = {'value': value}
resource = {
'parameterType': {
'type': 'STRUCT',
'structTypes': types,
},
'parameterValue': {
'structValues': values,
},
}
if self.name is not None:
resource['name'] = self.name
return resource
class QueryParametersProperty(object):
"""Custom property type, holding query parameter instances."""
def __get__(self, instance, owner):
"""Descriptor protocol: accessor
:type instance: :class:`QueryParametersProperty`
:param instance: instance owning the property (None if accessed via
the class).
:type owner: type
:param owner: the class owning the property.
:rtype: list of instances of classes derived from
:class:`AbstractQueryParameter`.
:returns: the descriptor, if accessed via the class, or the instance's
query paramters.
"""
if instance is None:
return self
return list(instance._query_parameters)
def __set__(self, instance, value):
"""Descriptor protocol: mutator
:type instance: :class:`QueryParametersProperty`
:param instance: instance owning the property (None if accessed via
the class).
:type value: list of instances of classes derived from
:class:`AbstractQueryParameter`.
:param value: new query parameters for the instance.
"""
if not all(isinstance(u, AbstractQueryParameter) for u in value):
raise ValueError(
"query parameters must be derived from AbstractQueryParameter")
instance._query_parameters = tuple(value)
| a0x8o/kafka | sdks/python/.tox/lint/lib/python2.7/site-packages/google/cloud/bigquery/_helpers.py | _helpers.py | py | 19,540 | python | en | code | 59 | github-code | 36 | [
{
"api_name": "base64.decodestring",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "google.cloud._helpers._to_bytes",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "google.cloud._helpers._datetime_from_microseconds",
"line_number": 52,
"usage_type": ... |
295139608 | import sqlite3
conn = sqlite3.connect('coffee_main.db')
c = conn.cursor()
with conn:
c.execute("SELECT * FROM coffee_products")
coffees = c.fetchall()
c.execute("SELECT * FROM coffee_components")
component_quantity = c.fetchall()
quantity = []
name = []
for comp_name, comp_quant in component_quantity:
quantity.append(comp_quant)
name.append(comp_name)
coffees_dict = {}
price_list = []
coffee_id = []
for c_id, price, coffee, sugar, water in coffees:
price_list.append(price)
coffee_id.append(c_id)
d = {c_id:(coffee,sugar,water)}
coffees_dict.update(d)
print(coffee_id)
for i in quantity:
if i == 0:
print("Your supplies of coffee/water/sugar has ended.")
print("Please refill your supplies")
exit() | arayik-99/Coffee-Machine | src/coffee_database.py | coffee_database.py | py | 779 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 3,
"usage_type": "call"
}
] |
31316835712 | from django.conf.urls import url
from.import views
# from django.contrib.auth.decorators import permission_required
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^nova-conta$', views.novaConta, name='novaConta'),
url(r'^cadastra-nova-conta$', views.novoUsuario, name='novoUsuario'),
url(r'^entrar$', views.logar, name='logar'),
url(r'^sair$', views.sair, name='sair'),
url(r'^correntista$', views.correntista, name='correntista'),
url(r'^operacao$', views.operacao, name='operacao'),
url(r'^administracao$', views.administracao, name='administracao'),
url(r'^relatorio-cliente/(?P<cliente_id>[0-9]+)/$', views.relatorioCliente, name='relatorioCliente'),
]
| Sosantos/bancoamaro | bancoamaro/urls.py | urls.py | py | 710 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.conf.urls.url",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.conf.urls.url",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.co... |
31188070789 | #!/usr/bin/env python
import json
import numpy as np
from scipy import stats
import os
import sys
from pathlib import Path
import pandas as pd
#from sklearn.cluster import KMeans
from scipy.cluster.vq import vq, kmeans2
# Choosing config file
configFilename = "config-sample.json"
argCount = len(sys.argv)
if(argCount > 1):
configFilename = sys.argv[1]
# Defining paths
outputDirectory = "output/csv"
if(not os.path.exists(outputDirectory)):
os.makedirs(outputDirectory)
# Reading config file
with open(configFilename, "r") as fd:
config = json.load(fd)
print("Loading time series...")
timeseriesFilename = config["tsv"]
nclus = config["nclus"]
print("Loading time series...")
ts = pd.read_csv(timeseriesFilename,sep="\t")
K = np.sum(ts, axis=0)
columns=ts.columns
ts=ts.drop(columns[np.where(K == 0)[0]],axis=1)
# z-scored time series
z = stats.zscore(np.asarray(ts),1)
print("Building edge time series...")
T, N= ts.shape
u,v = np.where(np.triu(np.ones(N),1)) # get edges
# element-wise prroduct of time series
ets = (z[:,u]*z[:,v])
edgeids = {"edgeid"+str(e):edge for e,edge in enumerate(zip(columns[u],columns[v]))}
nclus=int(config['nclus'])
#Clustering edge time series
#etsclus_original=KMeans(n_clusters=nclus, random_state=0).fit(ets).labels_
etsclus=kmeans2(ets,k=nclus)[1]
np.savetxt('output/csv/clustered-edge_timeseries.csv',etsclus,delimiter=',')
with open('output/label.json', 'w') as outfile:
json.dump(edgeids,outfile) | FarnazZE/bnbl-brainlife-clustering-edge-time-series | main.py | main.py | py | 1,486 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "os.path.exists",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number... |
39553556437 | # OFT network module
import math
import os
from typing import Dict, List, Optional, Tuple, Type, Union
from diffusers import AutoencoderKL
from transformers import CLIPTextModel
import numpy as np
import torch
import re
RE_UPDOWN = re.compile(r"(up|down)_blocks_(\d+)_(resnets|upsamplers|downsamplers|attentions)_(\d+)_")
class OFTModule(torch.nn.Module):
"""
replaces forward method of the original Linear, instead of replacing the original Linear module.
"""
def __init__(
self,
oft_name,
org_module: torch.nn.Module,
multiplier=1.0,
dim=4,
alpha=1,
):
"""
dim -> num blocks
alpha -> constraint
"""
super().__init__()
self.oft_name = oft_name
self.num_blocks = dim
if "Linear" in org_module.__class__.__name__:
out_dim = org_module.out_features
elif "Conv" in org_module.__class__.__name__:
out_dim = org_module.out_channels
if type(alpha) == torch.Tensor:
alpha = alpha.detach().numpy()
self.constraint = alpha * out_dim
self.register_buffer("alpha", torch.tensor(alpha))
self.block_size = out_dim // self.num_blocks
self.oft_blocks = torch.nn.Parameter(torch.zeros(self.num_blocks, self.block_size, self.block_size))
self.out_dim = out_dim
self.shape = org_module.weight.shape
self.multiplier = multiplier
self.org_module = [org_module] # moduleにならないようにlistに入れる
def apply_to(self):
self.org_forward = self.org_module[0].forward
self.org_module[0].forward = self.forward
def get_weight(self, multiplier=None):
if multiplier is None:
multiplier = self.multiplier
block_Q = self.oft_blocks - self.oft_blocks.transpose(1, 2)
norm_Q = torch.norm(block_Q.flatten())
new_norm_Q = torch.clamp(norm_Q, max=self.constraint)
block_Q = block_Q * ((new_norm_Q + 1e-8) / (norm_Q + 1e-8))
I = torch.eye(self.block_size, device=self.oft_blocks.device).unsqueeze(0).repeat(self.num_blocks, 1, 1)
block_R = torch.matmul(I + block_Q, (I - block_Q).inverse())
block_R_weighted = self.multiplier * block_R + (1 - self.multiplier) * I
R = torch.block_diag(*block_R_weighted)
return R
def forward(self, x, scale=None):
x = self.org_forward(x)
if self.multiplier == 0.0:
return x
R = self.get_weight().to(x.device, dtype=x.dtype)
if x.dim() == 4:
x = x.permute(0, 2, 3, 1)
x = torch.matmul(x, R)
x = x.permute(0, 3, 1, 2)
else:
x = torch.matmul(x, R)
return x
class OFTInfModule(OFTModule):
def __init__(
self,
oft_name,
org_module: torch.nn.Module,
multiplier=1.0,
dim=4,
alpha=1,
**kwargs,
):
# no dropout for inference
super().__init__(oft_name, org_module, multiplier, dim, alpha)
self.enabled = True
self.network: OFTNetwork = None
def set_network(self, network):
self.network = network
def forward(self, x, scale=None):
if not self.enabled:
return self.org_forward(x)
return super().forward(x, scale)
def merge_to(self, multiplier=None, sign=1):
R = self.get_weight(multiplier) * sign
# get org weight
org_sd = self.org_module[0].state_dict()
org_weight = org_sd["weight"]
R = R.to(org_weight.device, dtype=org_weight.dtype)
if org_weight.dim() == 4:
weight = torch.einsum("oihw, op -> pihw", org_weight, R)
else:
weight = torch.einsum("oi, op -> pi", org_weight, R)
# set weight to org_module
org_sd["weight"] = weight
self.org_module[0].load_state_dict(org_sd)
def create_network(
multiplier: float,
network_dim: Optional[int],
network_alpha: Optional[float],
vae: AutoencoderKL,
text_encoder: Union[CLIPTextModel, List[CLIPTextModel]],
unet,
neuron_dropout: Optional[float] = None,
**kwargs,
):
if network_dim is None:
network_dim = 4 # default
if network_alpha is None:
network_alpha = 1.0
enable_all_linear = kwargs.get("enable_all_linear", None)
enable_conv = kwargs.get("enable_conv", None)
if enable_all_linear is not None:
enable_all_linear = bool(enable_all_linear)
if enable_conv is not None:
enable_conv = bool(enable_conv)
network = OFTNetwork(
text_encoder,
unet,
multiplier=multiplier,
dim=network_dim,
alpha=network_alpha,
enable_all_linear=enable_all_linear,
enable_conv=enable_conv,
varbose=True,
)
return network
# Create network from weights for inference, weights are not loaded here (because can be merged)
def create_network_from_weights(multiplier, file, vae, text_encoder, unet, weights_sd=None, for_inference=False, **kwargs):
if weights_sd is None:
if os.path.splitext(file)[1] == ".safetensors":
from safetensors.torch import load_file, safe_open
weights_sd = load_file(file)
else:
weights_sd = torch.load(file, map_location="cpu")
# check dim, alpha and if weights have for conv2d
dim = None
alpha = None
has_conv2d = None
all_linear = None
for name, param in weights_sd.items():
if name.endswith(".alpha"):
if alpha is None:
alpha = param.item()
else:
if dim is None:
dim = param.size()[0]
if has_conv2d is None and param.dim() == 4:
has_conv2d = True
if all_linear is None:
if param.dim() == 3 and "attn" not in name:
all_linear = True
if dim is not None and alpha is not None and has_conv2d is not None:
break
if has_conv2d is None:
has_conv2d = False
if all_linear is None:
all_linear = False
module_class = OFTInfModule if for_inference else OFTModule
network = OFTNetwork(
text_encoder,
unet,
multiplier=multiplier,
dim=dim,
alpha=alpha,
enable_all_linear=all_linear,
enable_conv=has_conv2d,
module_class=module_class,
)
return network, weights_sd
class OFTNetwork(torch.nn.Module):
UNET_TARGET_REPLACE_MODULE_ATTN_ONLY = ["CrossAttention"]
UNET_TARGET_REPLACE_MODULE_ALL_LINEAR = ["Transformer2DModel"]
UNET_TARGET_REPLACE_MODULE_CONV2D_3X3 = ["ResnetBlock2D", "Downsample2D", "Upsample2D"]
OFT_PREFIX_UNET = "oft_unet" # これ変えないほうがいいかな
def __init__(
self,
text_encoder: Union[List[CLIPTextModel], CLIPTextModel],
unet,
multiplier: float = 1.0,
dim: int = 4,
alpha: float = 1,
enable_all_linear: Optional[bool] = False,
enable_conv: Optional[bool] = False,
module_class: Type[object] = OFTModule,
varbose: Optional[bool] = False,
) -> None:
super().__init__()
self.multiplier = multiplier
self.dim = dim
self.alpha = alpha
print(
f"create OFT network. num blocks: {self.dim}, constraint: {self.alpha}, multiplier: {self.multiplier}, enable_conv: {enable_conv}"
)
# create module instances
def create_modules(
root_module: torch.nn.Module,
target_replace_modules: List[torch.nn.Module],
) -> List[OFTModule]:
prefix = self.OFT_PREFIX_UNET
ofts = []
for name, module in root_module.named_modules():
if module.__class__.__name__ in target_replace_modules:
for child_name, child_module in module.named_modules():
is_linear = "Linear" in child_module.__class__.__name__
is_conv2d = "Conv2d" in child_module.__class__.__name__
is_conv2d_1x1 = is_conv2d and child_module.kernel_size == (1, 1)
if is_linear or is_conv2d_1x1 or (is_conv2d and enable_conv):
oft_name = prefix + "." + name + "." + child_name
oft_name = oft_name.replace(".", "_")
# print(oft_name)
oft = module_class(
oft_name,
child_module,
self.multiplier,
dim,
alpha,
)
ofts.append(oft)
return ofts
# extend U-Net target modules if conv2d 3x3 is enabled, or load from weights
if enable_all_linear:
target_modules = OFTNetwork.UNET_TARGET_REPLACE_MODULE_ALL_LINEAR
else:
target_modules = OFTNetwork.UNET_TARGET_REPLACE_MODULE_ATTN_ONLY
if enable_conv:
target_modules += OFTNetwork.UNET_TARGET_REPLACE_MODULE_CONV2D_3X3
self.unet_ofts: List[OFTModule] = create_modules(unet, target_modules)
print(f"create OFT for U-Net: {len(self.unet_ofts)} modules.")
# assertion
names = set()
for oft in self.unet_ofts:
assert oft.oft_name not in names, f"duplicated oft name: {oft.oft_name}"
names.add(oft.oft_name)
def set_multiplier(self, multiplier):
self.multiplier = multiplier
for oft in self.unet_ofts:
oft.multiplier = self.multiplier
def load_weights(self, file):
if os.path.splitext(file)[1] == ".safetensors":
from safetensors.torch import load_file
weights_sd = load_file(file)
else:
weights_sd = torch.load(file, map_location="cpu")
info = self.load_state_dict(weights_sd, False)
return info
def apply_to(self, text_encoder, unet, apply_text_encoder=True, apply_unet=True):
assert apply_unet, "apply_unet must be True"
for oft in self.unet_ofts:
oft.apply_to()
self.add_module(oft.oft_name, oft)
# マージできるかどうかを返す
def is_mergeable(self):
return True
# TODO refactor to common function with apply_to
def merge_to(self, text_encoder, unet, weights_sd, dtype, device):
print("enable OFT for U-Net")
for oft in self.unet_ofts:
sd_for_lora = {}
for key in weights_sd.keys():
if key.startswith(oft.oft_name):
sd_for_lora[key[len(oft.oft_name) + 1 :]] = weights_sd[key]
oft.load_state_dict(sd_for_lora, False)
oft.merge_to()
print(f"weights are merged")
# 二つのText Encoderに別々の学習率を設定できるようにするといいかも
def prepare_optimizer_params(self, text_encoder_lr, unet_lr, default_lr):
self.requires_grad_(True)
all_params = []
def enumerate_params(ofts):
params = []
for oft in ofts:
params.extend(oft.parameters())
# print num of params
num_params = 0
for p in params:
num_params += p.numel()
print(f"OFT params: {num_params}")
return params
param_data = {"params": enumerate_params(self.unet_ofts)}
if unet_lr is not None:
param_data["lr"] = unet_lr
all_params.append(param_data)
return all_params
def enable_gradient_checkpointing(self):
# not supported
pass
def prepare_grad_etc(self, text_encoder, unet):
self.requires_grad_(True)
def on_epoch_start(self, text_encoder, unet):
self.train()
def get_trainable_params(self):
return self.parameters()
def save_weights(self, file, dtype, metadata):
if metadata is not None and len(metadata) == 0:
metadata = None
state_dict = self.state_dict()
if dtype is not None:
for key in list(state_dict.keys()):
v = state_dict[key]
v = v.detach().clone().to("cpu").to(dtype)
state_dict[key] = v
if os.path.splitext(file)[1] == ".safetensors":
from safetensors.torch import save_file
from library import train_util
# Precalculate model hashes to save time on indexing
if metadata is None:
metadata = {}
model_hash, legacy_hash = train_util.precalculate_safetensors_hashes(state_dict, metadata)
metadata["sshs_model_hash"] = model_hash
metadata["sshs_legacy_hash"] = legacy_hash
save_file(state_dict, file, metadata)
else:
torch.save(state_dict, file)
def backup_weights(self):
# 重みのバックアップを行う
ofts: List[OFTInfModule] = self.unet_ofts
for oft in ofts:
org_module = oft.org_module[0]
if not hasattr(org_module, "_lora_org_weight"):
sd = org_module.state_dict()
org_module._lora_org_weight = sd["weight"].detach().clone()
org_module._lora_restored = True
def restore_weights(self):
# 重みのリストアを行う
ofts: List[OFTInfModule] = self.unet_ofts
for oft in ofts:
org_module = oft.org_module[0]
if not org_module._lora_restored:
sd = org_module.state_dict()
sd["weight"] = org_module._lora_org_weight
org_module.load_state_dict(sd)
org_module._lora_restored = True
def pre_calculation(self):
# 事前計算を行う
ofts: List[OFTInfModule] = self.unet_ofts
for oft in ofts:
org_module = oft.org_module[0]
oft.merge_to()
# sd = org_module.state_dict()
# org_weight = sd["weight"]
# lora_weight = oft.get_weight().to(org_weight.device, dtype=org_weight.dtype)
# sd["weight"] = org_weight + lora_weight
# assert sd["weight"].shape == org_weight.shape
# org_module.load_state_dict(sd)
org_module._lora_restored = False
oft.enabled = False
| kohya-ss/sd-scripts | networks/oft.py | oft.py | py | 14,491 | python | en | code | 3,347 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_numbe... |
26374121947 | import numpy as np
import matplotlib.pyplot as plt
def MC_Ising_model(beta, vis = False, N = 50, rng = 3000):
s = np.random.choice([-1, 1],[N,N])
numbers = np.arange(N*N).reshape(N,N)
M_list = []
blacks = ((numbers//N + numbers%N)%2).astype(bool)
whites = np.logical_not(blacks)
if vis == True:
plt.rcParams['image.cmap'] = 'Wistia'
fig, ax = plt.subplots(figsize=(16, 9))
ax.imshow(s, vmin=0, vmax=1)
ax.axis(False);
for j in range(rng):
m_i = np.roll(s, 1, axis=0)
m_i += np.roll(s, -1, axis=0)
m_i += np.roll(s, 1, axis=1)
m_i += np.roll(s, -1, axis=1)
p = 1/(1+np.exp(-2*beta*m_i))
p = p.reshape(N,N)
rand = np.random.random([N,N])
up = np.where(p >= rand, True, False).reshape(N,N)
steps = np.random.choice([0, 1],2)
for i in steps:
if i == 0:
s[whites] = -1
s[whites & up] = +1
else:
s[blacks] = -1
s[blacks & up] = +1
if j > rng/2:
M = np.abs(np.mean(s))
M_list.append(M)
if vis == True:
plt.rcParams['image.cmap'] = 'Wistia'
fig, ax = plt.subplots(figsize=(16, 9))
ax.imshow(s, vmin=0, vmax=1)
ax.axis(False);
M_mean = np.mean(M_list)
chi = np.var(M_list)*beta
M_list.clear()
return M_mean, chi
betas = np.linspace(0.2,2,num=40)
print(betas)
data = []
for beta in betas:
mb = []
cb = []
for i in range(10):
values = (MC_Ising_model(beta))
mb.append(values[0])
cb.append(values[1])
print(beta, np.mean(mb), np.mean(cb))
data.append([beta, np.mean(mb), np.mean(cb)])
data = np.array(data)
b = data[:,0]
m = data[:,1]
c = data[:,2]
plt.plot(b, m)
plt.show()
plt.plot(b, c)
plt.show()
| czaro2k/Ising-Model | MC_Ising.py | MC_Ising.py | py | 1,694 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.choice",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "numpy.arange",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.logical_not",
... |
12323619959 | # -*- coding: utf-8 -*-
"""
Created on Wed Oct 26 09:28:19 2022
@author: bjk_a
"""
#1.kutuphaneler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
# veri yukleme
veriler = pd.read_csv('maaslar.csv')
x = veriler.iloc[:,1:2]
y = veriler.iloc[:,2:]
X = x.values
Y = y.values
#linear regression
from sklearn.linear_model import LinearRegression
lin_reg = LinearRegression()
lin_reg.fit(X,Y)
plt.scatter(X,Y,color='red')
plt.plot(x,lin_reg.predict(X), color = 'blue')
plt.show()
#polynomial regression
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 2)
x_poly = poly_reg.fit_transform(X)
print(x_poly)
lin_reg2 = LinearRegression()
lin_reg2.fit(x_poly,y)
plt.scatter(X,Y,color = 'red')
plt.plot(X,lin_reg2.predict(poly_reg.fit_transform(X)), color = 'blue')
plt.show()
from sklearn.preprocessing import PolynomialFeatures
poly_reg = PolynomialFeatures(degree = 4)
x_poly = poly_reg.fit_transform(X)
print(x_poly)
lin_reg2 = LinearRegression()
lin_reg2.fit(x_poly,y)
plt.scatter(X,Y,color = 'red')
plt.plot(X,lin_reg2.predict(poly_reg.fit_transform(X)), color = 'blue')
plt.show()
#tahminler
print(lin_reg.predict([[11]]))
print(lin_reg.predict([[6.6]]))
print(lin_reg2.predict(poly_reg.fit_transform([[6.6]])))
print(lin_reg2.predict(poly_reg.fit_transform([[11]])))
#verilerin olceklenmesi
from sklearn.preprocessing import StandardScaler
sc1=StandardScaler()
x_olcekli = sc1.fit_transform(X)
sc2=StandardScaler()
y_olcekli = np.ravel(sc2.fit_transform(Y.reshape(-1,1)))
from sklearn.svm import SVR
svr_reg = SVR(kernel='rbf')
svr_reg.fit(x_olcekli,y_olcekli)
plt.scatter(x_olcekli,y_olcekli,color='red')
plt.plot(x_olcekli,svr_reg.predict(x_olcekli),color='blue')
plt.show()
print(svr_reg.predict([[11]]))
print(svr_reg.predict([[6.6]]))
from sklearn.tree import DecisionTreeRegressor
r_dt = DecisionTreeRegressor(random_state=0)
#veriyi fit ediyoruz (X ve Y numpy arrayleri x ve y olan df leri değil)
#yani X,Y arasında ilişki kuruyor, eğitim seviyesine (x) göre aldığı maaş (y)
r_dt.fit(X,Y)
# 3 farklı değer de x z k aynı sonucu veriyor, çünkü aynı aralıktalar
Z = X + 0.5
K = X - 0.4
# X ve Y arası plotu çiz
plt.scatter(X,Y, color='red')
# X değerini çiz r.dt nin tahminini kullan
plt.plot(x,r_dt.predict(X), color='blue')
plt.plot(x,r_dt.predict(Z),color='green')
plt.plot(x,r_dt.predict(K),color='yellow')
print(r_dt.predict([[11]]))
print(r_dt.predict([[6.6]]))
| SamedAkbulut/Machine_Learning | ML_17_Decision_tree.py | ML_17_Decision_tree.py | py | 2,633 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.scatter",
"line_number": 27,
"usage_type": "call"
},
{
"a... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.