seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
32545476270 | #!/usr/bin/env python3
import pandas as pd
import math
import numpy as np
from scipy.stats import entropy
from tqdm import tqdm
from scipy.spatial.distance import euclidean
from fastdtw import fastdtw
from evaluate import ACTIVITIES
import sys
import pickle
import os
def extractFeatures(x_axis, y_axis, z_axis, user):
features = {}
features['x_mean'] = x_axis.mean()
features['y_mean'] = y_axis.mean()
features['z_mean'] = z_axis.mean()
features['x_std'] = x_axis.std()
features['y_std'] = y_axis.std()
features['z_std'] = z_axis.std()
features['xy_corr'] = np.correlate(x_axis,y_axis)
features['xz_corr'] = np.correlate(x_axis,z_axis)
features['yz_corr'] = np.correlate(y_axis,z_axis)
features['x_freq'] = np.abs(np.fft.rfft(x_axis))**2
features['y_freq'] = np.abs(np.fft.rfft(y_axis))**2
features['z_freq'] = np.abs(np.fft.rfft(z_axis))**2
features['x_energy'] = sum(features['x_freq'])/len(features['x_freq'])
features['y_energy'] = sum(features['y_freq'])/len(features['y_freq'])
features['z_energy'] = sum(features['z_freq'])/len(features['z_freq'])
features['x_entropy'] = entropy(features['x_freq']/sum(features['x_freq']))
features['y_entropy'] = entropy(features['y_freq']/sum(features['y_freq']))
features['z_entropy'] = entropy(features['z_freq']/sum(features['z_freq']))
features['pitch_mean'] = np.arctan(features['x_mean']/math.sqrt(np.abs(features['y_mean'] + features['z_mean'])))
features['roll_mean'] = np.arctan(features['y_mean']/math.sqrt(np.abs(features['x_mean'] + features['z_mean'])))
features['yaw_mean'] = np.arctan(features['z_mean']/math.sqrt(np.abs(features['y_mean'] + features['x_mean'])))
# for activity in ACTIVITIES:
# if len(centroids[user][activity.lower()]['x_axis']) != 0:
# features[activity + 'x_dtw_dist'], path = fastdtw(centroids[user][activity.lower()]['x_axis'], x_axis)
# features[activity + 'y_dtw_dist'], path = fastdtw(centroids[user][activity.lower()]['y_axis'], y_axis)
# features[activity + 'z_dtw_dist'], path = fastdtw(centroids[user][activity.lower()]['z_axis'], z_axis)
# else:
# features[activity + 'x_dtw_dist'] = sys.maxsize
# features[activity + 'y_dtw_dist'] = sys.maxsize
# features[activity + 'z_dtw_dist'] = sys.maxsize
return features
def extractCentroid(dataset):
print('Extracting centroids...', flush=True)
centroids = {}
for user in tqdm(dataset.user.unique()):
for activity in dataset.activity.unique():
num = len(dataset[(dataset.user == user) & (dataset.activity == activity)])
if user not in centroids:
centroids[user] = {}
centroids[user][activity] = {}
if num == 0:
centroids[user][activity]['x_axis'] = []
centroids[user][activity]['y_axis'] = []
centroids[user][activity]['z_axis'] = []
continue
best_x_axis = 0
best_y_axis = 0
best_z_axis = 0
best_squared_dist = sys.maxsize
for row1 in dataset[(dataset.user == user) & (dataset.activity == activity)].itertuples():
best_x_axis = np.add(best_x_axis, row1.x_axis)
best_y_axis = np.add(best_y_axis, row1.y_axis)
best_z_axis = np.add(best_z_axis, row1.z_axis)
centroids[user][activity]['x_axis'] = np.divide(best_x_axis,num)
centroids[user][activity]['y_axis'] = np.divide(best_y_axis,num)
centroids[user][activity]['z_axis'] = np.divide(best_z_axis,num)
return centroids
def add_features(dataset, outputfile=None, use_cache=True):
if use_cache and outputfile and os.path.isfile(outputfile) and os.path.isfile('_' + outputfile):
dataset = pd.read_pickle(outputfile)
availableFeatures = pickle.load('_' + outputfile)
return dataset, availableFeatures
availableFeatures = {
'acc_means': ['x_mean', 'y_mean', 'z_mean'],
'acc_corrs': ['xy_corr', 'xz_corr', 'yz_corr'],
'acc_stds': ['x_std', 'y_std', 'z_std'],
'energies': ['x_energy', 'y_energy', 'z_energy'],
'entropies': ['x_entropy', 'y_entropy', 'z_entropy'],
'time': ['HH', 'total_duration'],
'rotation_means': ['pitch_mean', 'yaw_mean', 'roll_mean'],
# 'dtw_dist': [],
}
consolidatedFeatures = {}
# for activity in ACTIVITIES:
# availableFeatures['dtw_dist'].append(activity + 'x_dtw_dist')
# availableFeatures['dtw_dist'].append(activity + 'y_dtw_dist')
# availableFeatures['dtw_dist'].append(activity + 'z_dtw_dist')
print('Extracting features...', flush=True)
for row in tqdm(dataset.itertuples()):
features = extractFeatures(row.x_axis, row.y_axis, row.z_axis, row.user)
for feature, value in features.items():
if feature not in consolidatedFeatures:
consolidatedFeatures[feature] = []
consolidatedFeatures[feature].append(value)
for feature, values in consolidatedFeatures.items():
dataset[feature] = values
if outputfile:
dataset.to_pickle(outputfile)
pickle.dump(availableFeatures, open('_' + outputfile, 'wb'))
return dataset, availableFeatures
if __name__ == "__main__":
print('Usage: "./feature_engineer.py [data_pickle] [outputfile]"')
if len(sys.argv) < 2:
raise FileNotFoundError
else:
data_pickle = sys.argv[1]
dataset = pd.read_pickle(data_pickle)
add_features(dataset) | kennethtxytqw/Wharf-Experiments | feature_engineer.py | feature_engineer.py | py | 5,733 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.correlate",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.correlate",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "numpy.correlate",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "numpy.abs",
"line... |
22261025139 | from django.db import models
from django.db.models import Q
from users.models import User
class Schedule(models.Model):
statuses = (
('pending', 'Pending'),
('occupied', 'Occupied'),
)
status = models.CharField(
max_length=64,
choices=statuses,
default='pending'
)
dentist = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='dentist_schedules',
limit_choices_to=Q(groups__name='Doctors')
)
start_time = models.DateTimeField()
class Meta:
ordering = ['pk']
def __str__(self):
return str(self.start_time)
| GaneaFunpay/Dentist-booking | dentist_booking/booking/models/schedule.py | schedule.py | py | 647 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.db.models.Model",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": ... |
24864353513 | import docker
import smtplib
import os
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from typing import Optional
RUNNING = "running"
containers = [
os.environ["WEB_MANAGER_CONTAINER_NAME"],
os.environ["SALT_API_CONTAINER_NAME"]
]
def is_container_running(container_name: str) -> Optional[bool]:
"""
Verify the status of a container by its name
:return: boolean or None
"""
# Connect to Docker using the default socket or the configuration
# in your environment
docker_client = docker.from_env()
try:
container = docker_client.containers.get(container_name)
except docker.errors.NotFound as exc:
print(f"Check container name!\n{exc.explanation}")
else:
container_state = container.attrs["State"]
return container_state["Status"] == RUNNING
def generate_message() -> MIMEMultipart:
plain_body = """
Hello Team.
"""
html_body = """
<html>
<head></head>
<body>
<p>Hello Team.</p>
"""
for container_name in containers:
if is_container_running(container_name):
plain_body += f"""
{container_name} is fine.
"""
html_body += f"""
<p>{container_name} is fine</p>
"""
else:
plain_body += f"""
{container_name} is down.
"""
html_body += f"""
<p>{container_name} is down.</p>
"""
plain_body += """
Cheers, Web Manager Health check.
"""
html_body += """
<p>Cheers, Web Manager Health check.</p>
</body>
<html>
"""
message = MIMEMultipart("alternative")
message["Subject"] = "Web Manager Health Check."
message["To"] = os.environ["TO_EMAIL"]
message["From"] = f"SALT Team <{os.environ['FROM_EMAIL']}>"
message.attach(MIMEText(plain_body, "plain"))
message.attach(MIMEText(html_body, "html"))
return message
def send_email(message: MIMEMultipart) -> None:
smtp_obj = smtplib.SMTP(os.environ["SMTP_SERVER"])
smtp_obj.sendmail(
msg=message.as_string(),
from_addr=os.environ["FROM_EMAIL"],
to_addrs=[os.environ["TO_EMAIL"]]
)
if __name__ == "__main__":
for cn in containers:
if not is_container_running(cn):
msg = generate_message()
send_email(msg)
| saltastroops/health-check | main.py | main.py | py | 2,314 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "docker.from_env",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "docker.errors",
"... |
32433714928 | import sys
import torch
import torch.nn.functional as F
import wandb
from tqdm import tqdm
from config import ParamConfig
from help_funcs_wandb import define_wandb_lr_metrics
class Trainer:
def __init__(self, device: str, model: torch.nn.Module, config: ParamConfig, train_loader,
optimizer, lr_scheduler):
self.model = model
self.optimizer = optimizer
self.device = device
self.total_epoch = config.epoch_total
self.train_loader = train_loader
self.lr_scheduler = lr_scheduler
# use wandb to record change of lr
self.wandb_metric_batch, self.wandb_metric_lr = define_wandb_lr_metrics()
self.bs_print = 100
self.tqdm_bar = tqdm(total=len(self.train_loader) * self.total_epoch,
file=sys.stdout, position=0, ncols=100)
def train_epoch(self, idx_epoch):
"""
idx_epoch should start from 1
"""
self.model.train()
loss = 0.
lr = 0.
for batch_idx, (data, target) in enumerate(self.train_loader, 1):
self.optimizer.zero_grad()
lr = self.lr_scheduler.get_last_lr()[0]
logits = self.model(data.to(self.device))
loss = F.cross_entropy(logits, target.to(self.device))
loss.backward()
self.optimizer.step()
self.lr_scheduler.step()
if batch_idx % self.bs_print == 0:
self.tqdm_bar.write(f'[{idx_epoch:<2}, {batch_idx + 1:<2}] '
f'loss: {loss:<6.4f} '
f'lr: {lr:.4f} ')
self.tqdm_bar.update(1)
self.tqdm_bar.set_description(f'epoch-{idx_epoch:<3} '
f'batch-{batch_idx + 1:<3} '
f'loss-{loss:<.2f} '
f'lr-{lr:.3f}')
idx_batch_total = (idx_epoch - 1) * len(self.train_loader) + batch_idx
wandb.log({self.wandb_metric_lr: lr,
self.wandb_metric_batch: idx_batch_total})
if idx_epoch >= self.total_epoch:
self.tqdm_bar.close()
return loss.item(), lr
| geyao1995/wandb_demo | trainer.py | trainer.py | py | 2,248 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "config.ParamConfig",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "config.epoch_total",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "help_funcs_w... |
8625942380 | #!/usr/bin/python3
# -*-coding:utf-8 -*-
import psycopg2
from helper import config, utils
from psycopg2 import pool
from psycopg2.extensions import ISOLATION_LEVEL_AUTOCOMMIT
""" SQLHandler(데이터베이스 처리)
- postgresql 사용
- ThreadedConnectionPool 사용
- 참고 : https://pynative.com/psycopg2-python-postgresql-connection-pooling/
"""
class SQLHandler:
def __init__(self):
self.threaded_postgreSQL_pool = None
self.connection()
def get_conn(self):
"""Connection 요청"""
ps_connection = self.threaded_postgreSQL_pool.getconn()
ps_connection.set_isolation_level(ISOLATION_LEVEL_AUTOCOMMIT)
if ps_connection:
ps_cursor = ps_connection.cursor()
return ps_connection, ps_cursor
def put_conn(self, ps_connection):
"""Connection 반환"""
self.threaded_postgreSQL_pool.putconn(ps_connection)
def destroy(self):
"""모든 Connection 종료"""
if self.threaded_postgreSQL_pool:
self.threaded_postgreSQL_pool.closeall
def connection(self):
"""Database 연결 및 ConnectionPool 생성"""
_d = config.get_config("postgres_%s" % (utils.get_host()))
self.threaded_postgreSQL_pool = psycopg2.pool.ThreadedConnectionPool(
1,
1000,
user=_d["db_user"],
password=_d["db_pass"],
host=_d["db_host"],
port=_d["db_port"],
database=_d["db_name"],
)
def fetch_one(self, query):
"""쿼리"""
ps_connection, ps_cursor = self.get_conn()
ps_cursor.execute(query)
_data = ps_cursor.fetchone()
_cols = [desc[0] for desc in ps_cursor.description]
record = []
if _data is not None:
record = self.get_dict_one(_cols, _data)[0]
ps_cursor.close()
self.put_conn(ps_connection)
return record
def fetch_all(self, query):
"""쿼리"""
ps_connection, ps_cursor = self.get_conn()
ps_cursor.execute(query)
_data = ps_cursor.fetchall()
_cols = [desc[0] for desc in ps_cursor.description]
records = []
if _data is not None:
records = self.get_dict_all(_cols, _data)
self.put_conn(ps_connection)
return records
def execute(self, query):
"""쿼리"""
ps_connection, ps_cursor = self.get_conn()
ps_cursor.execute(query)
rowcount = ps_cursor.rowcount
ps_cursor.close()
self.put_conn(ps_connection)
return rowcount
def get_dict_all(self, cols, data):
"""쿼리 Row 생성"""
record = []
for row in data:
record.append(dict(list(zip(cols, row))))
return record
def get_dict_one(self, cols, data):
"""쿼리 Row 생성"""
rows = []
record = []
for row in data:
rows.append(row)
record.append(dict(list(zip(cols, rows))))
return record
if __name__ == "__main__":
pp = SQLHandler()
# pp.connection()
pp.fetch_one("select * from users limit 10")
pp.fetch_all("select * from users limit 10")
pp.execute("update users set fcm_token='ABCD' where pid='volt772@naver.com'")
| volt772/prooya | BE (Python)/database/sql.py | sql.py | py | 3,396 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT",
"line_number": 24,
"usage_type": "argument"
},
{
"api_name": "helper.config.get_config",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "helper.config",
"line_number": 41,
"usage_type": "name"
},
... |
15252805798 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# author: evan-gyy
import pandas as pd
import openpyxl
from openpyxl.styles import PatternFill, colors, Font
import traceback
import os
import gc
class LCStats:
def __init__(self):
self.file = ''
self.find_file('.xlsx', '.')
self.map = {}
"""
self.data样例:
'10': {
'loc': [],
'sum': 3,
'red': 1,
'order': {
'601': {
'蛋': 1
},
'301': {
'蛋': 2
}
}
},
"""
self.data = {}
self.wb = openpyxl.load_workbook(self.file)
self.ws = self.wb.worksheets[0]
def find_file(self, type, path):
file_list = []
for f in os.listdir(path):
if type in f and 'res-' not in f:
file_list.append(f)
if len(file_list) > 1:
print("检测到以下excel文件:")
for f in file_list:
print("{}:{}".format(file_list.index(f), f))
while True:
try:
self.file = file_list[int((input("请输入文件序号:")))]
break
except:
print("发生错误:请正确输入文件前的序号(0-n)")
elif len(file_list) == 1:
self.file = file_list[0]
else:
input("请核对目录下是否有excel文件")
exit()
def get_data(self, sheet):
df = pd.read_excel(self.file, sheet_name=sheet)
for index, row in df.iterrows():
if pd.isnull(row['跟团号']):
break
lou = int(row['楼号'])
nong = int(row['弄号'])
key = str(lou) if nong != 719 else '719-' + str(lou)
room = str(int(row['房间号']))
good = str(row['物资'])
n = int(row['数量'])
red = 1 if row['是否封控'] != '未' else 0
if key not in self.data:
self.data[key] = {
'loc': [],
'sum': 0,
'red': 0,
'order': {}
}
self.data[key]['sum'] += n
self.data[key]['red'] = red
if room not in self.data[key]['order']:
self.data[key]['order'][room] = {}
if good not in self.data[key]['order'][room]:
self.data[key]['order'][room][good] = 0
self.data[key]['order'][room][good] += n
# print(self.data)
def to_map(self):
total = 0
for i in range(1, self.ws.max_row + 1):
sum = 0
for j in range(2, self.ws.max_column + 1):
cell = self.ws.cell(i, j).value
# print(cell, type(cell))
if not cell:
continue
cell = str(cell)
if cell not in self.data:
continue
d = self.data[cell]
sum += d['sum']
if d['red']:
self.ws.cell(i, j).fill = PatternFill("solid", fgColor="FF0000")
self.ws.cell(i + 1, j).fill = PatternFill("solid", fgColor="FF0000")
self.ws.cell(i, j).font = Font('Times New Roman', bold=True, color="FFFFFF")
self.ws.cell(i + 1, j).font = Font('Times New Roman', bold=True, color="FFFFFF")
else:
self.ws.cell(i, j).fill = PatternFill("solid", fgColor="FFC000")
self.ws.cell(i + 1, j).fill = PatternFill("solid", fgColor="FFC000")
orders = []
for room, order in d['order'].items():
for good, num in order.items():
info = room + good + str(num)
orders.append(info)
self.ws.cell(i + 1, j).value = '\n'.join(orders)
if sum:
total += sum
self.ws.cell(i + 1, 1).value = sum
self.ws.cell(19, 1).value = total
def run(self):
sheets = self.wb.worksheets
self.get_data(sheets[1].title)
self.to_map()
self.wb.save('res-' + self.file)
del self.wb, self.ws
gc.collect()
if __name__ == '__main__':
try:
lc = LCStats()
lc.run()
except:
traceback.print_exc()
input() | evan-gyy/OrderStats | longchen/lc_stats.py | lc_stats.py | py | 4,506 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.load_workbook",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pandas.read_excel",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "pandas.isnull",
... |
5928224769 | import argparse
import glob
import logging
import os
from typing import Dict, Optional
import ocpmodels
"""
This script provides users with an automated way to download, preprocess (where
applicable), and organize data to readily be used by the existing config files.
"""
DOWNLOAD_LINKS_s2ef: Dict[str, Dict[str, str]] = {
"s2ef": {
"200k": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_train_200K.tar",
"2M": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_train_2M.tar",
"20M": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_train_20M.tar",
"all": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_train_all.tar",
"val_id": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_val_id.tar",
"val_ood_ads": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_val_ood_ads.tar",
"val_ood_cat": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_val_ood_cat.tar",
"val_ood_both": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_val_ood_both.tar",
"test": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_test_lmdbs.tar.gz",
"rattled": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_rattled.tar",
"md": "https://dl.fbaipublicfiles.com/opencatalystproject/data/s2ef_md.tar",
},
}
DOWNLOAD_LINKS_is2re: Dict[str, str] = {
"is2re": "https://dl.fbaipublicfiles.com/opencatalystproject/data/is2res_train_val_test_lmdbs.tar.gz",
}
S2EF_COUNTS = {
"s2ef": {
"200k": 200000,
"2M": 2000000,
"20M": 20000000,
"all": 133934018,
"val_id": 999866,
"val_ood_ads": 999838,
"val_ood_cat": 999809,
"val_ood_both": 999944,
"rattled": 16677031,
"md": 38315405,
},
}
def get_data(
datadir: str, task: str, split: Optional[str], del_intmd_files: bool
) -> None:
os.makedirs(datadir, exist_ok=True)
if task == "s2ef" and split is None:
raise NotImplementedError("S2EF requires a split to be defined.")
download_link: Optional[str] = None
if task == "s2ef":
assert (
split is not None
), "Split must be defined for the s2ef dataset task"
assert (
split in DOWNLOAD_LINKS_s2ef[task]
), f'S2EF "{split}" split not defined, please specify one of the following: {list(DOWNLOAD_LINKS_s2ef["s2ef"].keys())}'
download_link = DOWNLOAD_LINKS_s2ef[task][split]
elif task == "is2re":
download_link = DOWNLOAD_LINKS_is2re[task]
else:
raise Exception(f"Unrecognized task {task}")
assert download_link is not None
os.system(f"wget {download_link} -P {datadir}")
filename = os.path.join(datadir, os.path.basename(download_link))
logging.info("Extracting contents...")
os.system(f"tar -xvf {filename} -C {datadir}")
dirname = os.path.join(
datadir,
os.path.basename(filename).split(".")[0],
)
if task == "s2ef" and split != "test":
assert (
split is not None
), "Split must be defined for the s2ef dataset task"
compressed_dir = os.path.join(dirname, os.path.basename(dirname))
if split in ["200k", "2M", "20M", "all", "rattled", "md"]:
output_path = os.path.join(datadir, task, split, "train")
else:
output_path = os.path.join(datadir, task, "all", split)
uncompressed_dir = uncompress_data(compressed_dir)
preprocess_data(uncompressed_dir, output_path)
verify_count(output_path, task, split)
if task == "s2ef" and split == "test":
os.system(f"mv {dirname}/test_data/s2ef/all/test_* {datadir}/s2ef/all")
elif task == "is2re":
os.system(f"mv {dirname}/data/is2re {datadir}")
if del_intmd_files:
cleanup(filename, dirname)
def uncompress_data(compressed_dir: str) -> str:
import uncompress
parser = uncompress.get_parser()
args, _ = parser.parse_known_args()
args.ipdir = compressed_dir
args.opdir = os.path.dirname(compressed_dir) + "_uncompressed"
uncompress.main(args)
return args.opdir
def preprocess_data(uncompressed_dir: str, output_path: str) -> None:
import preprocess_ef as preprocess
parser = preprocess.get_parser()
args, _ = parser.parse_known_args()
args.data_path = uncompressed_dir
args.out_path = output_path
preprocess.main(args)
def verify_count(output_path: str, task: str, split: str) -> None:
paths = glob.glob(os.path.join(output_path, "*.txt"))
count = 0
for path in paths:
lines = open(path, "r").read().splitlines()
count += len(lines)
assert (
count == S2EF_COUNTS[task][split]
), f"S2EF {split} count incorrect, verify preprocessing has completed successfully."
def cleanup(filename: str, dirname: str) -> None:
import shutil
if os.path.exists(filename):
os.remove(filename)
if os.path.exists(dirname):
shutil.rmtree(dirname)
if os.path.exists(dirname + "_uncompressed"):
shutil.rmtree(dirname + "_uncompressed")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--task", type=str, help="Task to download")
parser.add_argument(
"--split", type=str, help="Corresponding data split to download"
)
parser.add_argument(
"--keep",
action="store_true",
help="Keep intermediate directories and files upon data retrieval/processing",
)
# Flags for S2EF train/val set preprocessing:
parser.add_argument(
"--get-edges",
action="store_true",
help="Store edge indices in LMDB, ~10x storage requirement. Default: compute edge indices on-the-fly.",
)
parser.add_argument(
"--num-workers",
type=int,
default=1,
help="No. of feature-extracting processes or no. of dataset chunks",
)
parser.add_argument(
"--ref-energy", action="store_true", help="Subtract reference energies"
)
parser.add_argument(
"--data-path",
type=str,
default=os.path.join(os.path.dirname(ocpmodels.__path__[0]), "data"),
help="Specify path to save dataset. Defaults to 'ocpmodels/data'",
)
args: argparse.Namespace
args, _ = parser.parse_known_args()
get_data(
datadir=args.data_path,
task=args.task,
split=args.split,
del_intmd_files=not args.keep,
)
| Open-Catalyst-Project/ocp | scripts/download_data.py | download_data.py | py | 6,541 | python | en | code | 518 | github-code | 1 | [
{
"api_name": "typing.Dict",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "os.makedirs",
"line_numbe... |
37554850767 | import sys, pickle
from PySide2 import QtCore, QtGui, QtWidgets
from PySide2.QtCore import *
from PySide2.QtGui import *
from PySide2.QtWidgets import *
from ui_splash_screen import Ui_Splash_Screen
from ui_login import Ui_Login
from ui_test_screen import Ui_MainWindow
from main import Main
from user import User
splash_counter = 0
class MainWindow(QMainWindow):
def __init__(self, email):
QMainWindow.__init__(self)
self.ui = Ui_MainWindow()
self.ui.setupUi(self)
if email != "":
self.ui.label_2.setText(str("Username: " + email))
class Login(QWidget):
def __init__(self, user):
QWidget.__init__(self)
self.ui = Ui_Login()
self.ui.setupUi(self)
self.user = user
self.ui.login_btn.clicked.connect(self.login_click)
def login_click(self):
email = self.ui.email_line.text()
password = self.ui.password_line.text()
remember = self.ui.check_remember.isChecked()
self.user.login(email, password, remember)
if self.user.is_logged_in:
self.main = Main(self.user)
self.main.show()
self.close()
else:
QMessageBox.warning(self,"Login failed",
"The email and password you entered did not match our records. Please double-check and try again.")
class Splash_Screen(QMainWindow):
def __init__(self):
QMainWindow.__init__(self)
self.ui = Ui_Splash_Screen()
self.ui.setupUi(self)
self.setWindowFlag(QtCore.Qt.FramelessWindowHint)
self.setAttribute(QtCore.Qt.WA_TranslucentBackground)
self.timer = QtCore.QTimer()
self.timer.timeout.connect(self.progress)
self.timer.start(10)
self.show()
def progress(self):
global splash_counter
self.ui.progressBar.setValue(splash_counter)
if splash_counter > 100:
self.timer.stop()
user = User()
if user.is_logged_in:
self.main = Main(user)
self.main.show()
self.close()
else:
self.login = Login(user)
self.login.show()
self.close()
splash_counter += 1
if __name__ == "__main__":
app = QApplication(sys.argv)
win = Splash_Screen()
sys.exit(app.exec_()) | hirokiyaginuma/scriptspinner-software | ScriptSpinner.py | ScriptSpinner.py | py | 2,369 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ui_test_screen.Ui_MainWindow",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "ui_login.Ui_Login",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "main.Main",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "ui_splash_sc... |
29883473876 | import threading
import time
from typing import Optional, Any, TypeVar, Callable
import wx
from morphzero.core.common.matrix_board import MatrixBoardCoordinates
from morphzero.core.game import Player, State, Move
from morphzero.core.game_service import GameService, GameServiceListener
from morphzero.ui.common import GameGraphicsContext
from morphzero.ui.gameconfig import GameConfig
from morphzero.ui.util import matrixgame
from morphzero.ui.util.player_name_decorator import ColorPlayerNameDecorator
_MIN_AI_PLAY_TIME_SEC: float = 0.2
T = TypeVar('T')
def _execute_off_thread(
function: Callable[[], T],
callback: Callable[[T], None],
use_busy_cursor: bool = False,
min_duration: Optional[float] = None) -> None:
def off_thread() -> None:
if use_busy_cursor:
wx.CallAfter(wx.BeginBusyCursor)
start_time_sec = time.time()
t = function()
elapsed_time_sec = time.time() - start_time_sec
if min_duration and min_duration > elapsed_time_sec:
time.sleep(min_duration - elapsed_time_sec)
if use_busy_cursor:
wx.CallAfter(wx.EndBusyCursor)
wx.CallAfter(callback, t)
threading.Thread(target=off_thread).start()
class BaseGamePanel(wx.Panel, GameServiceListener):
game_config: GameConfig
game_service: GameService
game_graphics_context: GameGraphicsContext
board: wx.Window
def __init__(self, game_config: GameConfig, **kwargs: Any):
super().__init__(**kwargs)
self.SetDoubleBuffered(True)
self.game_config = game_config
self.game_service = GameService(self.game_config.rules.create_engine())
self.game_graphics_context = GameGraphicsContext(
game_config=self.game_config,
graphics_renderer=wx.GraphicsRenderer.GetDefaultRenderer(),
player_colors={
Player.FIRST_PLAYER: wx.BLUE,
Player.SECOND_PLAYER: wx.RED,
})
self.board = self.create_board()
# create layout
self.create_layout()
# bind
self.Bind(wx.EVT_WINDOW_DESTROY, self.on_destroy)
# Init game service
self.game_service.add_listener(self)
self.game_service.new_game()
self.maybe_play_ai_move()
def create_layout(self) -> None:
name_decorator = ColorPlayerNameDecorator(self.game_graphics_context)
def create_player_name_static_text(player: Player) -> wx.StaticText:
label = self.game_config.players[player].name
if len(label) > 30:
label = label[:30] + "…"
player_name = wx.StaticText(self, label=label)
name_decorator.decorate_player_label(player, player_name)
return player_name
first_player_name, second_player_name = (
create_player_name_static_text(player)
for player in [Player.FIRST_PLAYER, Player.SECOND_PLAYER]
)
sizer = wx.BoxSizer(wx.VERTICAL)
sizer.Add(first_player_name,
wx.SizerFlags().Left().Border())
sizer.Add(self.board,
wx.SizerFlags(1).Expand())
sizer.Add(second_player_name,
wx.SizerFlags().Right().Border())
self.SetSizerAndFit(sizer)
def create_board(self) -> wx.Window:
raise NotImplementedError()
def show_result(self, state: State) -> None:
raise NotImplementedError()
def maybe_play_ai_move(self) -> None:
"""Uses separate Thread to make a move for the AI, if it is AI's turn."""
game_service = self.game_service
state = game_service.state
if state.is_game_over:
return
ai_model = self.game_config.players[state.current_player].ai_model
if ai_model:
def play() -> Move:
assert ai_model
move_or_move_index = ai_model.play_move(state)
if isinstance(move_or_move_index, Move):
return move_or_move_index
else:
return game_service.engine.create_move_from_move_index(move_or_move_index)
_execute_off_thread(
function=play,
callback=game_service.play_move,
use_busy_cursor=True,
min_duration=_MIN_AI_PLAY_TIME_SEC,
)
# window events
def on_destroy(self, _: wx.WindowDestroyEvent) -> None:
self.game_service.remove_listener(self)
# GameService events
def on_new_game(self, state: State) -> None:
self.board.Refresh()
def on_move(self, old_state: State, move: Move, new_state: State) -> None:
self.board.Refresh()
self.maybe_play_ai_move()
def on_game_over(self, state: State) -> None:
self.show_result(state)
class BaseHoverDrawer(matrixgame.MatrixGameBoard.AdditionalDrawing):
board: wx.Window
hover_board_coordinates = Optional[MatrixBoardCoordinates]
def __init__(self, board: wx.Window):
self.board = board
self.hover_board_coordinates = None
self.board.Bind(wx.EVT_PAINT, self.on_paint)
self.board.Bind(wx.EVT_MOTION, self.on_motion)
self.board.Bind(wx.EVT_LEAVE_WINDOW, self.on_leave)
def get_board_coordinates_for_mouse_event(self, event: wx.MouseEvent) -> MatrixBoardCoordinates:
raise NotImplementedError()
def draw(self, gc: wx.GraphicsContext) -> None:
raise NotImplementedError()
def on_paint(self, event: wx.PaintEvent) -> None:
dc = wx.PaintDC(self.board)
gc = wx.GraphicsContext.Create(dc)
self.draw(gc)
event.Skip()
def on_motion(self, event: wx.MouseEvent) -> None:
self.hover_board_coordinates = self.get_board_coordinates_for_mouse_event(event)
self.board.Refresh()
def on_leave(self, _: wx.MouseEvent) -> None:
self.hover_board_coordinates = None
self.board.Refresh()
| morph-dev/self-learning-ai | morphzero/ui/basegamepanel.py | basegamepanel.py | py | 5,976 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.TypeVar",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "typing.Callable",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "typing.Callable",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
... |
26780954219 | import sys
import skimage
from skimage import io, filters, feature
import numpy as np
import math
import time
DEBUG = False
edges = skimage.io.imread(fname="edges.png", as_gray=True)
map = np.zeros(shape=(len(edges),len(edges[0]))).astype(int)
#Current group ID number
segment = 1
#Dictionary for equivalent tags
tags = dict()
#------------------------------------------------------------------------------
# Given a binary image, make a map that assigns each edge (non-0) pixel
# to a 'connected component' #, linking adjacent pixels as a component
#
# Create a dictionary to keep track of which resulting edge component #s
# are equivalent, e.g. 1 and 3 or 2 and 4 in the example below
'''
. . . . . . . . . . . . . . . .
. 1 . . . . 1 . . 1 . . . . 2 .
. 1 . 1 1 . 1 . ..\ . 1 . 3 3 . 2 .
. . 1 . . . 1 . ''/ . . 1 . . . 2 .
. . . . 1 1 . . . . . . 4 2 . .
. . . . . . . . . . . . . . . .
'''
startTime = time.time()
## TODO update the comments in between lines here to increase readability
#For every pixel...
for row in range(1, len(map)-1):
for col in range(1, len(map[0])-1):
#If this is an edge pixel
if(edges[row][col] != 0):
# 1 2 3
#Check surrounding 8 pixels for already found (non-0) 4 X .
#pixels, disregard bottom right 4 for efficiency . . .
neighbors = [map[row-1][col-1], map[row-1][col ], map[row-1][col+1],
map[row ][col-1]]
for pix in neighbors:
if(pix != 0):
#If we haven't already found a value...
if(map[row][col] == 0):
map[row][col] = pix
#If this pix value isn't a duplicate, record it
elif(pix != map[row][col]):
tags[ map[row][col] ].add(pix)
#If there was no non-zero pixel in neighbors...
if(map[row][col] == 0):
map[row][col] = segment
segment += 1
#If this key doesn't yet exist...
if(map[row][col] not in tags):
tags[ map[row][col] ] = set()
endTime = time.time()
print("Connected component assignment:",endTime-startTime)
#------------------------------------------------------------------------------
if(DEBUG):
#Print the dictionary
for tag in tags:
print (tag,":",tags[tag])
#------------------------------------------------------------------------------
# Taking the dictionary created in the last step, consolidate it so that all
# equivalent #s point to a single parent #
'''
. . . . . . . . . . . . . . . . ...
. 1 . . . . 2 . . 3 . . . . 4 . 1: {3}
. 1 . 3 3 . 2 . ..\ . 3 . 3 3 . 4 . 2: {4}
. . 1 . . . 2 . ''/ . . 3 . . . 4 . 3: { }
. . . . 4 2 . . . . . . 4 4 . . 4: { }
. . . . . . . . . . . . . . . . ...
This example is different than the one used throughout this file
1: {2, 5} 6: { } 1: {9} 6: { }
2: {3} 7: {8} ..\ 2: {9} 7: {9}
3: { } 8: {9} ''/ 3: {9} 8: {9}
4: { } 9: { } 4: { } 9: {1, 2, 3, 5, 7, 8}
5: {7} ... 5: {9} ...
'''
startTime = time.time()
#For every component #...
for compNum in tags:
#Grab the list of numbers this compNum is equivalent to
lst = list(tags[compNum])
index = 0
while index < len(lst):
#For every number this compNum is equivalent to...
equiv = lst[index]
#Grab the list of numbers THAT is equivalent to
children = tags[equiv]
#Remove compNum if it exists in this set (we don't want 2 -> 2, ...)
children.discard(compNum)
#And add the set to the current running list, minus duplicates
children.difference_update(lst)
lst = lst + list(children)
#Point the child # to only this compNum
tags[equiv] = set([compNum])
index += 1
#Finally, update this compNum in the dictionary with the new extended list
tags[compNum] = set(lst)
endTime = time.time()
print("Connected component consolidation:",endTime-startTime)
#------------------------------------------------------------------------------
if(DEBUG):
print("\n")
for tag in tags:
print (tag,":",tags[tag])
if(True):
#Print the old map
for row in range(0, len(map)):
print()
for col in range(0, len(map[0])):
if(map[row][col] != 0):
print("{:^3}".format(map[row][col]), end="")
else:
print("-|-", end="")
print("\n")
for tag in tags:
print (tag,":",tags[tag])
print("\n")
if(True):
#Print the new map
for row in range(0, len(map)):
print()
for col in range(0, len(map[0])):
if(map[row][col] != 0):
key = map[row][col]
s = tags[key]
if (len(s) != 0):
arbitratryVal = next(iter( tags[key] ))
if (key < arbitratryVal):
key = arbitratryVal
print("{:^3}".format(key), end="")
else:
print("-|-", end="")
print("\n")
#------------------------------------------------------------------------------
# For ease of use and readability, trim the dictionary so that
# parent component #s always point to an empty set
#
# This can easily be combined with the step below for a slight increase in
# efficiency, but is made separate for readability
#
# A parent component # will always point to either:
# - An empty set
# - A list of child component #s, all of which will be < parent
'''
1: {9} 6: { } 1: {9} 6: { }
2: {9} 7: {9} ..\ 2: {9} 7: {9}
3: {9} 8: {9} ''/ 3: {9} 8: {9}
4: { } 9: {1, 2, 3, 5, 7, 8} 4: { } 9: { }
5: {9} ... 5: {9} ...
'''
startTime = time.time()
for key in tags:
if (len( tags[key] ) > 0):
if (key > next(iter( tags[key] ))):
tags[key] = set()
if (DEBUG):
for tag in tags:
print (tag,":",tags[tag])
print("\n")
endTime = time.time()
print("Connected component dictionary trimming:",endTime-startTime)
#------------------------------------------------------------------------------
# Make a list of all coordinates per component
'''
. . . . . . . . ...
. 3 . . . . 4 . ...
. 3 . 3 3 . 4 . ..\ 3: (1,1), (2,1), (2,3), (2,4), (3,2)
. . 3 . . . 4 . ''/ 4: (1,6), (2,6), (3,6), (4,4), (4,5)
. . . . 4 4 . . ...
. . . . . . . . ...
'''
startTime = time.time()
#Create a new dictionary
components = dict()
for row in range(0, len(map)):
for col in range(0, len(map[0])):
#If this pixel is part of a component...
if(map[row][col] != 0):
#Use the tag dictionary to find if this is a parent component #.
#If it is a child #, we need to find the parent #.
key = map[row][col]
s = tags[key]
# The step above can be inserted here for efficiency
#If this component points to an empty set, it is a parent
if (len(s) != 0):
#Grab the parent #
key = next(iter( s ))
#Add this coordinate to the dictionary
coord = (row, col)
if(key in components):
components[key].append((row, col))
else:
components.setdefault(key, [(row, col)])
if (DEBUG):
for comp in components:
print (comp,":",len(components[comp]),":",components[comp])
print("\n")
endTime = time.time()
print("Coordinates per connected component:",endTime-startTime)
#------------------------------------------------------------------------------
# Build an adjacency matrix between all components, using the shortest
# distance between two respective components as an edge
#
# This is an extremely slow, naive aproach to this problem.
# I will be optimizing this later
#
# Could also weight the cost based on whether the start and end points are
# a line segment end or not here
''' _______3__________ ...
3: (1,1), (2,1), (2,3), (2,4), (3,2) ..\ 4| [(2,4), (2,6), 2]
4: (1,6), (2,6), (3,6), (4,4), (4,5) ''/ 5| [(2,4), (2,9), 5]
5: (2,9), (3,9) ...| Start End Dist
'''
startTime = time.time()
size = len(components.keys())
adjacency = np.empty(shape=(size, size)).astype(tuple)
for i, (comp, coords) in enumerate(components.items()):
for start in coords:
for j, (comp2, coords2) in enumerate(components.items()):
if (comp == comp2):
adjacency[i][j] = ((-1,-1), (-1,-1), sys.maxsize)
continue
for end in coords2:
#Calculate distance
dist = math.hypot(end[0] - start[0], end[1] - start[1])
if(False):
#Check if either point is the end of a line segment
neighbors1=[map[start[0]-1][start[1]-1], map[start[0]-1][start[1] ], map[start[0]-1][start[1]+1],
map[start[0] ][start[1]-1], map[start[0] ][start[1]+1],
map[start[0]+1][start[1]-1], map[start[0]+1][start[1] ], map[start[0]+1][start[1]+1]]
neighbors2=[map[end[0]-1][end[1]-1], map[end[0]-1][end[1] ], map[end[0]-1][end[1]+1],
map[end[0] ][end[1]-1], map[end[0] ][end[1]+1],
map[end[0]+1][end[1]-1], map[end[0]+1][end[1] ], map[end[0]+1][end[1]+1]]
numNonzero1 = np.count_nonzero(neighbors1)
numNonzero2 = np.count_nonzero(neighbors2)
#Weight the distance so that we favor connecting lines between
#line segment endpoints vs in their centers
if (numNonzero1 == 1 and numNonzero2 == 1):
dist *= .75
elif (numNonzero1 == 1 or numNonzero2 == 1):
dist *= .5
#Check if there is no existing distance or
#if this distance is shorter than the one we have
if(not isinstance(adjacency[i][j], tuple) or
adjacency[i][j][2] > dist):
adjacency[i][j] = (start, end, dist)
#print(np.matrix(adjacency))
endTime = time.time()
print("Adjacency matrix creation:",endTime-startTime)
#------------------------------------------------------------------------------
# Using the adjacency matrix from the last step, create a minimum spanning
# tree with distance as the edge cost
#
# The below is Prim's algorithm
startTime = time.time()
numVertices = len(adjacency)
visited = [False] * numVertices
numEdges = 0
#List to store the MST
MST = []
#Set the first vertex to 'visited'
visited[0] = True
while (numEdges < numVertices - 1):
min = sys.maxsize
x = 0
y = 0
for i in range(numVertices):
if (visited[i]):
for j in range(numVertices):
if (not visited[j]):
if (min > adjacency[i][j][2]):
min = adjacency[i][j][2]
x = i
y = j
MST.append(adjacency[x][y])
visited[y] = True
numEdges += 1
if (DEBUG):
print("\n")
print(visited)
for edge in MST:
print(edge)
endTime = time.time()
print("MST creation:",endTime-startTime)
#------------------------------------------------------------------------------
# Using the MST created above, draw lines along the
# edges between closest points to link components
#
# The below is Bresenham's Line Generation algorithm
'''
. . . . . . . .
. 3 . . . . 4 .
. 3 . 3 3 = 4 .
. . 3 . . . 4 .
. . . . 4 4 . .
. . . . . . . .
'''
startTime = time.time()
for edge in MST:
#Set up initial conditions
x1, y1 = edge[0]
x2, y2 = edge[1]
dx = x2 - x1
dy = y2 - y1
#Determine if the line slopes vertically or horizontally
slopedVertically = abs(dy) > abs(dx)
#Rotate if vertically sloped
if (slopedVertically):
x1, y1 = y1, x1
x2, y2 = y2, x2
#Swap points to keep things positive
swapped = False
if (x1 > x2):
x1, x2 = x2, x1
y1, y2 = y2, y1
swapped = True
#Recalculate slopes
dx = x2 - x1
dy = y2 - y1
#Calculate error
error = int(dx / 2)
ystep = 1 if (y1 < y2) else -1
#Generate points
y = y1
for x in range(x1, x2+1):
if (slopedVertically):
map[y][x] = 255
else:
map[x][y] = 255
error -= abs(dy)
if (error < 0):
y += ystep
error += dx
endTime = time.time()
print("Bresenham line generation:",endTime-startTime)
#------------------------------------------------------------------------------
io.imsave(fname='final.png', arr=skimage.img_as_int(map))
#Display edges
io.imshow(map)
io.show()
if(False):
#Print the new map
for row in range(0, len(map)):
print()
for col in range(0, len(map[0])):
if(map[row][col] != 0):
print(" O ", end="")
else:
#print("-|-", end="")
print(" ", end="")
print("\n") | Sgordon4/ImgToTrack | OldInProgress/WorkingTwoPass.py | WorkingTwoPass.py | py | 12,420 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "skimage.io.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "skimage.io",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_n... |
7718670034 | import json
from sqlnet.lib.dbengine import DBEngine
import numpy as np
from tqdm import tqdm
import re
# pattern = re.compile(r'[-一二三四五六七八九十百千万亿年\d]{2,}|\d+')
def load_data(sql_paths, table_paths, use_small=False):
if not isinstance(sql_paths, list):
sql_paths = (sql_paths, )
if not isinstance(table_paths, list):
table_paths = (table_paths, )
sql_data = []
table_data = {}
for SQL_PATH in sql_paths:
with open(SQL_PATH, encoding='utf-8') as inf:
for idx, line in enumerate(inf):
sql = json.loads(line.strip())
if use_small and idx >= 1000:
break
sql_data.append(sql)
print("Loaded %d data from %s" % (len(sql_data), SQL_PATH))
for TABLE_PATH in table_paths:
with open(TABLE_PATH, encoding='utf-8') as inf:
for line in inf:
tab = json.loads(line.strip())
table_data[tab[u'id']] = tab
print("Loaded %d data from %s" % (len(table_data), TABLE_PATH))
ret_sql_data = []
for sql in sql_data:
if sql[u'table_id'] in table_data:
ret_sql_data.append(sql)
return ret_sql_data, table_data
def load_dataset(use_small=False, mode='train'):
print("Loading dataset")
dev_sql, dev_table = load_data('data/val/val.json', 'data/val/val.tables.json', use_small=use_small)
dev_db = 'data/val/val.db'
if mode == 'train':
train_sql, train_table = load_data('data/train/train.json', 'data/train/train.tables.json', use_small=use_small)
train_db = 'data/train/train.db'
return train_sql, train_table, train_db, dev_sql, dev_table, dev_db
elif mode == 'test':
test_sql, test_table = load_data('data/test/test.json', 'data/test/test.tables.json', use_small=use_small)
test_db = 'data/test/test.db'
return dev_sql, dev_table, dev_db, test_sql, test_table, test_db
def to_batch_seq(sql_data, table_data, idxes, st, ed, raw_data=False):
q_seq = []
col_seq = []
col_num = []
ans_seq = []
gt_cond_seq = []
raw_seq = []
sel_num_seq = []
table_content = []
for i in range(st, ed):
sql = sql_data[idxes[i]]
# SELECT 子句选定 Column 的数量
sel_num = len(sql['sql']['sel'])
sel_num_seq.append(sel_num)
# WHERE 子句选定 Condition 的数量,没参与模型训练???
conds_num = len(sql['sql']['conds'])
# 抽取问句,并做字符级的分割
one_question = ''.join(sql['question'].split())
q_seq.append([char for char in one_question])
# 抽取 SQL 语句对应表格的表头,其中 table_data 曾单独取出 id 构造字典
col_seq.append([[char for char in ''.join(header.split())] for header in table_data[sql['table_id']]['header']])
# 抽取表格列数
col_num.append(len(table_data[sql['table_id']]['header']))
# 作为标注来计算模型损失,没有加入 WHERE value
ans_seq.append(
(
len(sql['sql']['agg']),
sql['sql']['sel'],
sql['sql']['agg'],
conds_num,
# WHERE Column
tuple(x[0] for x in sql['sql']['conds']),
# WHERE Operator
tuple(x[1] for x in sql['sql']['conds']),
sql['sql']['cond_conn_op'],
)
)
# 访问表格内容,忽略内容类型
# table_content_types = table_data[sql['table_id']]['types']
# table_content = [[[str1, str2, ...], column2, ...], table2, ...]
one_table = []
table_content_rows = table_data[sql['table_id']]['rows']
for content_column in range(col_num[-1]):
one_table.append([str(x[content_column]) for x in table_content_rows])
table_content.append(one_table)
# 另外用一个变量保存所有 WHERE Condition
gt_cond_seq.append(sql['sql']['conds'])
# 原始问题与表头
raw_seq.append((sql['question'], table_data[sql['table_id']]['header']))
if raw_data:
return q_seq, sel_num_seq, col_seq, col_num, ans_seq, gt_cond_seq, raw_seq, table_content
else:
return q_seq, sel_num_seq, col_seq, col_num, ans_seq, gt_cond_seq, table_content
def to_batch_seq_test(sql_data, table_data, idxes, st, ed):
q_seq = []
col_seq = []
col_num = []
raw_seq = []
table_ids = []
table_content = []
for i in range(st, ed):
sql = sql_data[idxes[i]]
one_question = ''.join(sql['question'].split())
q_seq.append([char for char in one_question])
col_seq.append([[char for char in ''.join(header.split())] for header in table_data[sql['table_id']]['header']])
col_num.append(len(table_data[sql['table_id']]['header']))
raw_seq.append(sql['question'])
table_ids.append(sql['table_id'])
one_table = []
table_content_rows = table_data[table_ids[-1]]['rows']
for content_column in range(col_num[-1]):
one_table.append([x[content_column] for x in table_content_rows])
table_content.append(one_table)
return q_seq, col_seq, col_num, raw_seq, table_ids, table_content
def to_batch_query(sql_data, idxes, st, ed):
query_gt = []
table_ids = []
for i in range(st, ed):
sql_data[idxes[i]]['sql']['conds'] = sql_data[idxes[i]]['sql']['conds']
query_gt.append(sql_data[idxes[i]]['sql'])
table_ids.append(sql_data[idxes[i]]['table_id'])
return query_gt, table_ids
def cn_to_num(str, selected_num):
digits_dict = {'零': '0', '一': '1', '二': "2", '两': '2', '三': '3', '四': '4',
'五': '5', '六': '6', '七': '7', '八': '8','九': '9'}
num_dict = {'十': '0', '百': '00', '千': "000", '万': '0000', '亿': '00000000'}
if re.search(r'年$', str) is not None:
re_str = str
for s in str:
if digits_dict.__contains__(s):
# replace_pattern = re.compile(r'%s' % s)
re_str = re.sub(r'%s' % s, digits_dict[s], re_str)
if num_dict.__contains__(s):
re_str = re.sub(r'%s' % s, num_dict[s], re_str)
if len(re_str) == 3 and int(re_str[0:-1]) > 50:
final_str = '19' + re_str[0:-1]
elif len(re_str) == 3:
final_str = '20' + re_str[0:-1]
else:
final_str = re_str[0:-1]
else:
final_str = str
for s in str:
if digits_dict.__contains__(s):
final_str = re.sub(r'%s' % s, digits_dict[s], final_str)
if num_dict.__contains__(s):
final_str = re.sub(r'%s' % s, num_dict[s], final_str)
selected_num.extend(re.findall(r'[0-9]+', final_str))
return final_str
def generate_gt_value(table, cond_seq, q):
"""
:param table: all tables for one batch queries, all columns for one table
:param cond_seq: [[[codition_coloumn, condition_type, condition_value],[...]], ..., ]
:return:
- gt_index: a tensor describe index of column and value, shape=[condition num, 2]
- gt_value: all passable values for all conditions, [[value list for condition n], ...,]
"""
pattern = re.compile(r'[两\-一二三四五六七八九十.百千万亿年\d]+')
num_dict = {'十': '0', '百': '00', '千': "000", '万': '0000', '亿': '00000000'}
gt_index = []
gt_value = []
condition_num = []
# e_num = 0
# len(cond_seq)=query_number
for i, one_q_codition in enumerate(cond_seq):
condition_num.append(len(one_q_codition))
selected_num = pattern.findall(''.join(q[i]))
for j, element in enumerate(selected_num):
if re.search(r'[\u4e00-\u9fa5]', element) is not None:
selected_num[j] = cn_to_num(element, selected_num)
zero_nums = []
for e_str in element:
if num_dict.__contains__(e_str):
zero_nums.append(num_dict[e_str])
selected_num.append('1' + num_dict[e_str])
if len(zero_nums) >= 2:
selected_num.append('1' + ''.join(zero_nums))
selected_num.extend(re.findall(r'[0-9]+', ''.join(q[i])))
selected_num = list(set(selected_num))
selected_table = table[i]
# len(one_q_condition) = num of condition for one query
for one_condition in one_q_codition:
gt_one_index = [one_condition[0]]
selected_column = selected_table[one_condition[0]]
for e, element in enumerate(selected_column):
if re.search(".0$", element) is not None:
selected_column[e] = re.sub(r'.0$', '', element)
# print(one_condition)
try:
# select column by ground truth
if one_condition[1] >= 2:
# print('selected_column', selected_column)
gt_one_value = selected_column
gt_one_index.append(selected_column.index(one_condition[-1]))
else:
# print('selected_num', selected_num)
gt_one_value = selected_num
gt_one_index.append(selected_num.index(one_condition[-1]))
except BaseException as e:
# print('==============================')
# print('Bad case for condition value\'s ground truth: ', e)
# print(''.join(q[i]))
# print('selected_column', selected_column)
# print('selected_num', selected_num)
# e_num += 1
gt_one_index.append(np.random.randint(0, len(gt_one_value), 1))
gt_index.append(gt_one_index)
gt_value.append(gt_one_value)
max_value_length = max([len(x) for x in gt_value])
assert np.array(condition_num).sum() == len(gt_value)
return np.array(gt_index, dtype=np.int64), gt_value, condition_num, max_value_length
def epoch_train(model, optimizer, batch_size, sql_data, table_data, use_table=False):
model.train()
perm = np.random.permutation(len(sql_data))
# perm = list(range(len(sql_data)))
badcase = 0
cum_loss = 0.0
for st in tqdm(range(len(sql_data)//batch_size+1)):
ed = (st+1)*batch_size if (st+1)*batch_size < len(perm) else len(perm)
st = st * batch_size
q_seq, gt_sel_num, col_seq, col_num, ans_seq, gt_cond_seq, \
table_content = to_batch_seq(sql_data, table_data, perm, st, ed)
try:
if use_table:
gt_where_seq = generate_gt_value(table_content, gt_cond_seq, q_seq)
# print(gt_index.shape)
# print(len(gt_value))
# print(np.array(condition_num).sum())
# print(max_value_length)
# quit()
else:
gt_where_seq = model.generate_gt_where_seq_test(q_seq, gt_cond_seq)
except BaseException:
badcase += 1
print('badcase for generating gt_where_seq: ', badcase)
continue
gt_sel_seq = [x[1] for x in ans_seq]
score = model.forward(q_seq, col_seq, col_num, table_content, gt_where=gt_where_seq, gt_cond=gt_cond_seq,
gt_sel=gt_sel_seq, gt_sel_num=gt_sel_num)
# sel_num_score, sel_col_score, sel_agg_score, cond_score, cond_rela_score
# compute loss
loss = model.loss(score, ans_seq, gt_where_seq)
cum_loss += loss.data.cpu().numpy()*(ed - st)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return cum_loss / len(sql_data)
def predict_test(model, batch_size, sql_data, table_data, output_path):
model.eval()
perm = list(range(len(sql_data)))
fw = open(output_path, 'w')
for st in tqdm(range(len(sql_data)//batch_size+1)):
ed = (st+1)*batch_size if (st+1)*batch_size < len(perm) else len(perm)
st = st * batch_size
q_seq, col_seq, col_num, raw_q_seq, table_ids, table_content = to_batch_seq_test(sql_data, table_data, perm, st, ed)
score = model.forward(q_seq, col_seq, col_num, table_content)
sql_preds = model.gen_query(score, q_seq, col_seq, raw_q_seq)
for sql_pred in sql_preds:
sql_pred = eval(str(sql_pred))
fw.writelines(json.dumps(sql_pred, ensure_ascii=False)+'\n')
# fw.writelines(json.dumps(sql_pred,ensure_ascii=False).encode('utf-8')+'\n')
fw.close()
def epoch_acc(model, batch_size, sql_data, table_data, db_path):
engine = DBEngine(db_path)
model.eval()
perm = list(range(len(sql_data)))
badcase = 0
one_acc_num, tot_acc_num, ex_acc_num = 0.0, 0.0, 0.0
for st in tqdm(range(len(sql_data)//batch_size+1)):
ed = (st+1)*batch_size if (st+1)*batch_size < len(perm) else len(perm)
st = st * batch_size
q_seq, gt_sel_num, col_seq, col_num, ans_seq, gt_cond_seq, raw_data, table_content = \
to_batch_seq(sql_data, table_data, perm, st, ed, raw_data=True)
# query_gt: ground truth of sql, data['sql'], containing sel, agg, conds:{sel, op, value}
query_gt, table_ids = to_batch_query(sql_data, perm, st, ed)
raw_q_seq = [x[0] for x in raw_data]
try:
score = model.forward(q_seq, col_seq, col_num, table_content)
# generate predicted format
pred_queries = model.gen_query(score, q_seq, col_seq, raw_q_seq)
one_err, tot_err = model.check_acc(raw_data, pred_queries, query_gt)
except:
badcase += 1
print('badcase for validation', badcase)
continue
one_acc_num += (ed-st-one_err)
tot_acc_num += (ed-st-tot_err)
# Execution Accuracy
for sql_gt, sql_pred, tid in zip(query_gt, pred_queries, table_ids):
ret_gt = engine.execute(tid, sql_gt['sel'], sql_gt['agg'], sql_gt['conds'], sql_gt['cond_conn_op'])
try:
ret_pred = engine.execute(tid, sql_pred['sel'], sql_pred['agg'], sql_pred['conds'], sql_pred['cond_conn_op'])
except:
ret_pred = None
ex_acc_num += (ret_gt == ret_pred)
return one_acc_num / len(sql_data), tot_acc_num / len(sql_data), ex_acc_num / len(sql_data)
def load_word_emb(file_name):
print('Loading word embedding from %s'%file_name)
f = open(file_name)
ret = json.load(f)
f.close()
print('Vocabulary size: ', len(ret))
return ret
| HoratioJSY/NL2SQL_CN | sqlnet/utils.py | utils.py | py | 14,666 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 165,
... |
373923622 | from typing import Any, List
import vyper.utils as util
from vyper.ast.signatures.function_signature import FunctionSignature, VariableRecord
from vyper.exceptions import CompilerPanic
from vyper.old_codegen.context import Context
from vyper.old_codegen.expr import Expr
from vyper.old_codegen.function_definitions.utils import get_nonreentrant_lock
from vyper.old_codegen.lll_node import Encoding, LLLnode
from vyper.old_codegen.parser_utils import get_element_ptr, getpos, make_setter
from vyper.old_codegen.stmt import parse_body
from vyper.old_codegen.types.types import BaseType, ByteArrayLike, ListType, TupleLike, TupleType
def _should_decode(typ):
# either a basetype which needs to be clamped
# or a complex type which contains something that
# needs to be clamped.
if isinstance(typ, BaseType):
return typ.typ not in ("int256", "uint256", "bytes32")
if isinstance(typ, ByteArrayLike):
return True
if isinstance(typ, ListType):
return _should_decode(typ.subtype)
if isinstance(typ, TupleLike):
return any(_should_decode(t) for t in typ.tuple_members())
raise CompilerPanic(f"_should_decode({typ})")
# register function args with the local calling context.
# also allocate the ones that live in memory (i.e. kwargs)
def _register_function_args(context: Context, sig: FunctionSignature) -> List[LLLnode]:
pos = None
ret = []
# the type of the calldata
base_args_t = TupleType([arg.typ for arg in sig.base_args])
# tuple with the abi_encoded args
if sig.is_init_func:
base_args_ofst = LLLnode(
"~codelen", location="code", typ=base_args_t, encoding=Encoding.ABI
)
else:
base_args_ofst = LLLnode(4, location="calldata", typ=base_args_t, encoding=Encoding.ABI)
for i, arg in enumerate(sig.base_args):
arg_lll = get_element_ptr(base_args_ofst, i, pos=pos)
if _should_decode(arg.typ):
# allocate a memory slot for it and copy
p = context.new_variable(arg.name, arg.typ, is_mutable=False)
dst = LLLnode(p, typ=arg.typ, location="memory")
ret.append(make_setter(dst, arg_lll, pos=pos))
else:
# leave it in place
context.vars[arg.name] = VariableRecord(
name=arg.name,
pos=arg_lll,
typ=arg.typ,
mutable=False,
location=arg_lll.location,
encoding=Encoding.ABI,
)
return ret
def _annotated_method_id(abi_sig):
method_id = util.abi_method_id(abi_sig)
annotation = f"{hex(method_id)}: {abi_sig}"
return LLLnode(method_id, annotation=annotation)
def _generate_kwarg_handlers(context: Context, sig: FunctionSignature, pos: Any) -> List[Any]:
# generate kwarg handlers.
# since they might come in thru calldata or be default,
# allocate them in memory and then fill it in based on calldata or default,
# depending on the signature
# a kwarg handler looks like
# (if (eq _method_id <method_id>)
# copy calldata args to memory
# write default args to memory
# goto external_function_common_lll
def handler_for(calldata_kwargs, default_kwargs):
calldata_args = sig.base_args + calldata_kwargs
# create a fake type so that get_element_ptr works
calldata_args_t = TupleType(list(arg.typ for arg in calldata_args))
abi_sig = sig.abi_signature_for_kwargs(calldata_kwargs)
method_id = _annotated_method_id(abi_sig)
calldata_kwargs_ofst = LLLnode(
4, location="calldata", typ=calldata_args_t, encoding=Encoding.ABI
)
# a sequence of statements to strictify kwargs into memory
ret = ["seq"]
# TODO optimize make_setter by using
# TupleType(list(arg.typ for arg in calldata_kwargs + default_kwargs))
# (must ensure memory area is contiguous)
n_base_args = len(sig.base_args)
for i, arg_meta in enumerate(calldata_kwargs):
k = n_base_args + i
dst = context.lookup_var(arg_meta.name).pos
lhs = LLLnode(dst, location="memory", typ=arg_meta.typ)
rhs = get_element_ptr(calldata_kwargs_ofst, k, pos=None, array_bounds_check=False)
ret.append(make_setter(lhs, rhs, pos))
for x in default_kwargs:
dst = context.lookup_var(x.name).pos
lhs = LLLnode(dst, location="memory", typ=x.typ)
kw_ast_val = sig.default_values[x.name] # e.g. `3` in x: int = 3
rhs = Expr(kw_ast_val, context).lll_node
ret.append(make_setter(lhs, rhs, pos))
ret.append(["goto", sig.external_function_base_entry_label])
ret = ["if", ["eq", "_calldata_method_id", method_id], ret]
return ret
ret = ["seq"]
keyword_args = sig.default_args
# allocate variable slots in memory
for arg in keyword_args:
context.new_variable(arg.name, arg.typ, is_mutable=False)
for i, _ in enumerate(keyword_args):
calldata_kwargs = keyword_args[:i]
default_kwargs = keyword_args[i:]
ret.append(handler_for(calldata_kwargs, default_kwargs))
ret.append(handler_for(keyword_args, []))
return ret
# TODO it would be nice if this returned a data structure which were
# amenable to generating a jump table instead of the linear search for
# method_id we have now.
def generate_lll_for_external_function(code, sig, context, check_nonpayable):
# TODO type hints:
# def generate_lll_for_external_function(
# code: vy_ast.FunctionDef, sig: FunctionSignature, context: Context, check_nonpayable: bool,
# ) -> LLLnode:
"""Return the LLL for an external function. Includes code to inspect the method_id,
enter the function (nonpayable and reentrancy checks), handle kwargs and exit
the function (clean up reentrancy storage variables)
"""
func_type = code._metadata["type"]
pos = getpos(code)
nonreentrant_pre, nonreentrant_post = get_nonreentrant_lock(func_type)
# generate handlers for base args and register the variable records
handle_base_args = _register_function_args(context, sig)
# generate handlers for kwargs and register the variable records
kwarg_handlers = _generate_kwarg_handlers(context, sig, pos)
# once optional args have been handled,
# generate the main body of the function
entrance = [["label", sig.external_function_base_entry_label]]
entrance += handle_base_args
if check_nonpayable and sig.mutability != "payable":
# if the contract contains payable functions, but this is not one of them
# add an assertion that the value of the call is zero
entrance += [["assert", ["iszero", "callvalue"]]]
entrance += nonreentrant_pre
body = [parse_body(c, context) for c in code.body]
exit = [["label", sig.exit_sequence_label]] + nonreentrant_post
if sig.is_init_func:
pass # init func has special exit sequence generated by parser.py
elif context.return_type is None:
exit += [["stop"]]
else:
# ret_ofst and ret_len stack items passed by function body; consume using 'pass'
exit += [["return", "pass", "pass"]]
# the lll which comprises the main body of the function,
# besides any kwarg handling
func_common_lll = ["seq"] + entrance + body + exit
if sig.is_default_func or sig.is_init_func:
# default and init funcs have special entries generated by parser.py
ret = func_common_lll
else:
ret = kwarg_handlers
# sneak the base code into the kwarg handler
# TODO rethink this / make it clearer
ret[-1][-1].append(func_common_lll)
return LLLnode.from_list(ret, pos=getpos(code))
| webanck/GigaVoxels | lib/python3.8/site-packages/vyper/old_codegen/function_definitions/external_function.py | external_function.py | py | 7,824 | python | en | code | 23 | github-code | 1 | [
{
"api_name": "vyper.old_codegen.types.types.BaseType",
"line_number": 19,
"usage_type": "argument"
},
{
"api_name": "vyper.old_codegen.types.types.ByteArrayLike",
"line_number": 21,
"usage_type": "argument"
},
{
"api_name": "vyper.old_codegen.types.types.ListType",
"line_num... |
6608870263 | #coding:utf-8
'''
Created on 2013-5-24
@author: shuangluo
'''
import json
from django.http import HttpResponse
from ldap.models import Module, BizGroup, BizSet, Machine
from ldap.utils import modules_for_user
def top_group(request):
tgSelect = []
tg = BizSet.objects.all()
for item in tg:
tgSelect.append("<option value='%s'>%s</option>" % (item.tgID, item.tgName))
response = json.dumps(tgSelect)
return HttpResponse(response)
def biz_group(request, top_id):
bgSelect = []
bg = BizGroup.objects.all()
for item in bg:
if top_id == str(item.bgParent.tgID):
bgSelect.append("<option value='%s'>%s</option>" % (item.bgID, item.bgName))
response = json.dumps(bgSelect)
return HttpResponse(response)
def machine_group(request, biz_id):
machineSelect = []
mg = Module.objects.all()
groups = modules_for_user(request)
for item in mg:
if (biz_id == str(item.mgParent.bgID)) and (item.mgID in groups):
machineSelect.append("<option value='%s'>%s</option>" % (item.mgID, item.mgName))
response = json.dumps(machineSelect)
return HttpResponse(response)
def machine_from_group(request, mg_id):
machines = []
m = Machine.objects.all()
for item in m:
if mg_id == str(item.mGroupID.mgID):
machines.append("<option value='%s' selected='selected'>%s</option>" % (item.mIP, item.mIP))
response = json.dumps(machines)
return HttpResponse(response)
| no2key/ldap_management | ldap/ajax.py | ajax.py | py | 1,492 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ldap.models.BizSet.objects.all",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "ldap.models.BizSet.objects",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "ldap.models.BizSet",
"line_number": 15,
"usage_type": "name"
},
{
... |
4926506431 | #1
from openvino.inference_engine import IENetwork, IECore, IEPlugin
from time import time
import logging as log
class face_detection:
'''
Class for the Face Detection Model.
'''
def __init__(self, model_name, device='CPU', extensions=None):
self.model_name = model_name
model_weights = model_name+'.bin'
model_structure = model_name+'.xml'
log.info(f"\nModel: {self.model_name}")
def load_model(self):#1
start = time()
self.model = IENetwork(self.model_structure, self.model_weights)
ie = IECore()
self.net = ie.load_network(network=self.model, device='CPU', num_requests=1)
log.info("Model Load Time: ".format(time()-start))
def predict(self, image):#4
input_dict = {self.input_blob:image}
infer_time = time()
self.net.infer(input_dict)
log.info("Inference Complete in {}".format(infer_time))
def check_model(self):#2
self.input_blob = next(iter(self.model.inputs))
self.output_blob = next(iter(self.model.outputs))
def preprocess_input(self, image_path):#3
pframe = cv2.imread(image_path)
b, c, h, w = self.model.get_input_shape()
pframe = cv2.cvtColor(frame, cv2.COLOR_RGB2BGR) # (H,W,c-BGR)
pframe = cv2.resize(pframe, (w,h), interpolation=cv2.INTER_AREA) # (h,w,c-BGR)
pframe = pframe.transpose((2,0,1)) #(c-BGR,h,w)
pframe = pframe.reshape((b,c,h,w)) #(b,c-BGR,h,w)
return pframe
def preprocess_output(self, outputs):#5
"""
The net outputs blob with shape: [1, 1, N, 7], where N is the number of
detected bounding boxes. Each detection has the format [image_id, label,
conf, x_min, y_min, x_max, y_max]
"""
_,_,N,values = self.output_blob.shape
out = self.output_blob.reshape((N,values))
objects = [out[n,:] for n in range(N)]
return objects
| pra-dan/Intel-EdgeAI-Nanodegree | starter/src/face_detection.py | face_detection.py | py | 1,940 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.info",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "openvino.inference_engine.IENetwork",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "openvino.i... |
21534189825 |
import pygame
pygame.init()
import random as rand
#screen
screen = pygame.display.set_mode((800,800))
pygame.display.set_caption("quiozz")
doExit = False
#outer
oX = 399
oY = 399
oR = 255
oG = 120
oB = 0
oRadius = 100
oThicc = 20
#inner
iX = 399
iY = 399
iR = 0
iG = 120
iB = 255
iRadius = 60
iThicc = 20
#middle
mX = 399
mY = 399
mR = 255
mG = 120
mB = 0
mRadius = 20
mThicc = 0
#scores
targetScore = 0
while not doExit:
#render ---------------------------------------
#screen.fill((240,240,240))
screen.fill((0,0,0))
pygame.draw.circle(screen, (oR,oG,oB), (oX, oY), oRadius, oThicc)
pygame.draw.circle(screen, (iR,iG,iB), (iX, iY), iRadius, iThicc)
pygame.draw.circle(screen, (mR, mG, mB), (mX, mY), mRadius, mThicc)
pygame.draw.circle(screen, (255,255,255), (399, 399), 80, 20)
pygame.draw.circle(screen, (255,255,255), (399, 399), 40, 20)
pygame.display.flip()
#score
level = int(input("What level, from 5 (outer level) to 1 (bullseye) of the target you hit"))
if level == 1:
print("bullseye!!!")
print("You get a score of 50")
elif level == 2:
print("You get a score of 40")
elif level == 3:
print("You get a score of 30")
elif level == 4:
print("You get a score of 20")
elif level == 5:
print("You get a score of 10")
else:
print("you get no score >:(")
pygame.quit()
| SebastianStucklen/quizzzz2172023 | quizzzz2172023/quizzzz2172023.py | quizzzz2172023.py | py | 1,396 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.display.s... |
6742353274 | import math
import pylab
import itertools
import random
import random as rand
import numpy as np
import networkx as nx
from networkx.utils import powerlaw_sequence
import scipy.stats as stats
def buildConfigModelNetwork(degreeSequence):
MG = nx.MultiGraph()
iter = (sum(degreeSequence)/2)
print("Degree Sequence: " + degreeSequence.__str__())
originalDegrees = degreeSequence.copy()
while sum(degreeSequence) > 0:
#print("Degree Sequence: " + degreeSequence.__str__())
first, second = np.random.randint(0,len(degreeSequence)), np.random.randint(0,len(degreeSequence))
while (first != second and (degreeSequence[first] < 1 or degreeSequence[second] < 1)) or (first == second and (degreeSequence[first] < 2)):
first, second = np.random.randint(0,len(degreeSequence)), np.random.randint(0,len(degreeSequence))
degreeSequence[first] = degreeSequence[first] - 1
degreeSequence[second] = degreeSequence[second] - 1
MG.add_edge(first, second)
for n in MG.nodes:
assert(MG.degree(n) == originalDegrees[n])
# Remove multiedges creating a normal graph
G = nx.Graph(MG)
# Remove self loops
G.remove_edges_from(nx.selfloop_edges(G))
print("Degree assortativity:", nx.degree_assortativity_coefficient(G))
print("Clustering coefficient:", nx.average_clustering(G))
nx.draw(G, with_labels=True)
pylab.show()
return G
N = 1000
degreesUniform = [0] * N
degreesNormal = [0] * N
degreesPower = [0] * N
mu, sigma = 3, 1
alpha = 1.5
a, b = 3, 7
degreesNormal = np.round(np.random.normal(mu, sigma, N)).astype(int)
while sum(degreesNormal) % 2 != 0 and sum(degreesNormal)/2 > len(degreesNormal):
degreesNormal = np.round(np.random.normal(mu, sigma, N)).astype(int)
degreesUniform = np.round(np.random.uniform(a,b, size=N)).astype(int)
while sum(degreesUniform) % 2 != 0 and sum(degreesUniform)/2 > len(degreesUniform):
degreesUniform = np.round(np.random.uniform(a,b, size=N)).astype(int)
degreesPower = nx.random_powerlaw_tree_sequence(N, tries=50000)
buildConfigModelNetwork(degreeSequence=degreesNormal)
buildConfigModelNetwork(degreeSequence=degreesUniform)
buildConfigModelNetwork(degreeSequence=degreesPower) | Dosclic98/Esame_Network_Science | configurationModel.py | configurationModel.py | py | 2,310 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "networkx.MultiGraph",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.random.randint",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.rand... |
7981324402 | import time
import sys
import os
# force MAVLink 2.0
os.environ["MAVLINK20"] = "1"
# doc: https://mavlink.io/en/mavgen_python/
from pymavlink import mavutil
# Create a function to send RC values
# More information about Joystick channels
# here: https://www.ardusub.com/operators-manual/rc-input-and-output.html#rc-inputs
def set_rc_channel_pwm(channel_id, pwm=1500):
""" Set RC channel pwm value
Args:
channel_id (TYPE): Channel ID
pwm (int, optional): Channel pwm value 1100-1900
"""
"""
1 pitch
2 roll
3 throttle
4 yaw
5 forward
6 lateral
7 camera pan
8 camera tilt
9 lights 1 level
10 lights 2 level
11 video switch
[RC Mode 2]
^ throttle ^ pitch
< > yaw < > roll
v v
"""
if channel_id < 1 or channel_id > 18:
print("Channel does not exist.")
return
# Mavlink 2 supports up to 18 channels:
# https://mavlink.io/en/messages/common.html#RC_CHANNELS_OVERRIDE
rc_channel_values = [65535 for _ in range(18)]
rc_channel_values[channel_id - 1] = pwm
master.mav.rc_channels_override_send(
master.target_system, # target_system
master.target_component, # target_component
*rc_channel_values) # RC channel list, in microseconds.
if __name__ == "__main__":
master = mavutil.mavlink_connection("udpin:127.0.0.1:14550")
# make sure the connection is valid
master.wait_heartbeat()
print("Heartbeat from system (system %u component %u)" %
(master.target_system, master.target_component))
print(master.__dict__)
print("-----")
# set / connect (virtual) RC before arming to prevent px4 from
# engaging the failsafe mode right away
master.mav.command_long_send(
master.target_system,
master.target_component,
mavutil.mavlink.MAV_CMD_COMPONENT_ARM_DISARM,
0,
1, 0, 0, 0, 0, 0, 0
)
print("waiting for the vehicle to arm")
master.motors_armed_wait()
print("armed!")
# ack = False
# while not ack:
# # Wait for ACK command
# ack_msg = master.recv_match(type='COMMAND_ACK', blocking=True)
# ack_msg = ack_msg.to_dict()
# print(mavutil.mavlink.enums['MAV_RESULT'][ack_msg['result']].description)
# break
time.sleep(1)
# # Request all parameters
# master.mav.param_request_list_send(
# master.target_system, master.target_component
# )
# while True:
# # time.sleep(0.01)
# try:
# message = master.recv_match(type='PARAM_VALUE', blocking=True).to_dict()
# print('name: {}\tvalue: {}'.format(message['param_id'],
# message['param_value']))
# except Exception as error:
# print(error)
# sys.exit(0)
# print("end")
# # px4 verification
# mav_type = master.sysid_state[master.sysid].mav_type
# mav_autopilot = master.sysid_state[master.sysid].mav_autopilot
# print(mav_autopilot == mavutil.mavlink.MAV_AUTOPILOT_PX4)
"""
{'MANUAL': (81, 1, 0), 'STABILIZED': (81, 7, 0), 'ACRO': (65, 5, 0), 'RATTITUDE': (65, 8, 0), 'ALTCTL': (81, 2, 0), 'POSCTL': (81, 3, 0), 'LOITER': (29, 4, 3), 'MISSION': (29, 4, 4), 'RTL': (29, 4, 5), 'LAND': (29, 4, 6), 'RTGS': (29, 4, 7), 'FOLLOWME': (29, 4, 8), 'OFFBOARD': (29, 6, 0), 'TAKEOFF': (29, 4, 2)}
"""
mode_str = "MANUAL"
(mode, custom_mode, custom_sub_mode) = master.mode_mapping()[mode_str]
master.set_mode(mode, custom_mode, custom_sub_mode)
while True:
# Wait for ACK command
ack_msg = master.recv_match(type='COMMAND_ACK', blocking=True)
ack_msg = ack_msg.to_dict()
print("mode ack:", ack_msg)
# Check if command in the same in `set_mode`
if ack_msg['command'] != mavutil.mavlink.MAV_CMD_DO_SET_MODE:
continue
# Print the ACK result !
print(mavutil.mavlink.enums['MAV_RESULT'][ack_msg['result']].description)
break
print(f"mode set to {mode_str}")
time.sleep(5)
for i in range(1000):
master.mav.manual_control_send(
master.target_system,
0, # x
0,# y
1000, # z
0, # r
0)
time.sleep(0.01)
for i in range(1000):
master.mav.manual_control_send(
master.target_system,
0, # x
0,# y
50, # z
0, # r
0)
time.sleep(0.01)
# time.sleep(10)
# set_rc_channel_pwm(3, 1900)
# # https://mavlink.io/en/messages/common.html#MAV_CMD_NAV_TAKEOFF
# master.mav.command_long_send(
# master.target_system, # target_system
# master.target_component, # target_component
# mavutil.mavlink.MAV_CMD_NAV_TAKEOFF, # command (22)
# 0, # confirmation
# 0, # param1 - pitch (deg)
# 0, # param2 - empty
# 0, # param3 - empty
# 0, # param4 - yaw angle (deg)
# 0, # param5 - lat
# 0, # param6 - lon
# 100) # param7 - altitude (m)
# ack = False
# while not ack:
# # Wait for ACK command
# ack_msg = master.recv_match(type='COMMAND_ACK', blocking=True)
# ack_msg = ack_msg.to_dict()
# print("takeoff ack:", ack_msg)
# if ack_msg['command'] != mavutil.mavlink.MAV_CMD_NAV_TAKEOFF:
# continue
# print(mavutil.mavlink.enums['MAV_RESULT'][ack_msg['result']].description)
# break
# print("takeoff command acked")
# time.sleep(25)
while True:
print("blocking")
time.sleep(2)
| sslab-gatech/RoboFuzz | src/ros_to_mav.py | ros_to_mav.py | py | 5,941 | python | en | code | 13 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pymavlink.mavutil.mavlink_connection",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "pymavlink.mavutil",
"line_number": 57,
"usage_type": "name"
},
{
"api_name": ... |
40894326962 | # -*- coding: utf-8 -*-
'''---------------------------------------------------------------------------------------------------------------------------------------
version date author memo
------------------------------------------------------------------------------------------------------------------------------------------
------------------------------------------------------------------------------------------------------------------------------------------
non-function requirement:
*
*
*
------------------------------------------------------------------------------------------------------------------------------------------
feature list:
*
*
*
---------------------------------------------------------------------------------------------------------------------------------------'''
import re
import petl as etl
import time
import json
import logging
import webTableCrawler as webCrawler
from utility import date_util
from utility import web_util
from lxml import html
_CONVERT_ZERO = ['', '--', '---', '---', 'x', 'X', 'null', 'NULL'] # convert illegal value into 0
_ENGLISH_HEADER = 'symbol_id,name,volume,trans,amount,open,high,low,close,sign,change,af_buy,af_buy_amount,af_sell, af_sell_amout,pe'.split(
',')
_HEADER = 'symbol_id,trade_date,volume,amount,open,high,low,close,change,trans'.split(',')
class tseCrawler(webCrawler.webHtmlTableCrawler):
def __init__(self, trade_date='20160701', short=True):
self.trade_date = trade_date
self._taiwan_date = date_util.to_taiwan_date(trade_date)
self.url = "http://www.twse.com.tw/ch/trading/exchange/MI_INDEX/MI_INDEX.php?download=&qdate={}&selectType=ALL".format(self._taiwan_date)
self.postfix = 't'
outfile = ('{}-{}.csv').format(trade_date, self.postfix) # outdate.replace('/', ''))
xheader = '//*[@id="main-content"]/table[2]/thead/tr[2]/td/text()' # '//*[@id="main-content"]/table[2]/thead/tr[2]'
xbody = '//table[2]/tbody/tr' # loop for td to get the table content
self.short = short
fn_transform = self._transform if short else None
super(tseCrawler, self).__init__(url=self.url, xheader=xheader, xbody=xbody, outfile=outfile,
fn_clean=self._clean, fn_transform=fn_transform)
def _clean(self, x):
x=x.strip()
return '0' if (x in _CONVERT_ZERO) else re.sub(",", "", x)
def _transform(self, row=None): # , date_str=None):
# to-do: use dynamic arguments
sign = '-' if len(row[9]) == 1 and row[9] in ['-', u'-'] else ''
change = sign + row[10]
return (row[0], self.trade_date, row[2], row[4], row[5], row[6], row[7], row[8], change, row[3])
def get_header(self):
if (self.short):
if (self.doc is None): self.get_doc()
self.header = _HEADER
else:
super(tseCrawler, self).get_header()
class otcCrawler(webCrawler.webJsonTableCarwler):
def __init__(self, trade_date='20160701', short=True):
self.trade_date = trade_date
self._taiwan_date = date_util.to_taiwan_date(trade_date)
self.postfix = 'o'
ttime = str(int(time.time() * 100))
self.url = 'http://www.tpex.org.tw/web/stock/aftertrading/daily_close_quotes/stk_quote_result.php?l=zh-tw&d={}&_={}'.format(self._taiwan_date, ttime)
outfile = ('{}-{}.csv').format(trade_date, self.postfix) # outdate.replace('/', ''))
xheader = None
xbody = ['mmData', 'aaData']
self.short = short
fn_transform = self._transform if short else None
super(otcCrawler, self).__init__(url=self.url, xheader=xheader, xbody=xbody, outfile=outfile,
fn_clean=self._clean, fn_transform=fn_transform)
def _clean(self, x):
x=x.strip()
return '0' if (x in _CONVERT_ZERO) else re.sub(",", "", x)
def _transform(self, row=None): # , date_str=None):
return (row[0], self.trade_date, row[8], row[9], row[4], row[5], row[6], row[2], row[3], row[10])
def get_header(self):
if (self.doc is None): self.get_doc()
self.header = _HEADER
def get_historical_quotes_tse(trade_date= '20160701'):
sc = tseCrawler(trade_date=trade_date)
sc.run()
return (sc.rows)
def get_historical_quotes_otc(trade_date= '20160701'):
sc = otcCrawler(trade_date=trade_date)
sc.run()
return (sc.rows)
def get_historical_quotes(trade_date= '20160701'):
tse = get_historical_quotes_tse(trade_date=trade_date)
otc = get_historical_quotes_otc(trade_date=trade_date)
table = etl.stack(tse, otc)
return (table)
def main():
print(get_historical_quotes_tse())
print(get_historical_quotes_otc())
if __name__ == '__main__':
main()
| Why-Not-Sky/hunting | webTableCrawler/stockCrawler.py | stockCrawler.py | py | 4,823 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "webTableCrawler.webHtmlTableCrawler",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "utility.date_util.to_taiwan_date",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "utility.date_util",
"line_number": 37,
"usage_type": "name"
}... |
27052130514 | import mock
from dtat.models.player import Player
data1 = {
"status": "ok",
"result": [
{
"guild_id": "1",
"guild_name": "Testtttttt",
"level": 10
},
{
"guild_id": "2",
"guild_name": "testytesty",
"level": 0
},
{
"guild_id": "3",
"guild_name": "test",
"level": 0
},
],
"message": 'placeholder',
"server_time": "2019-04-28T16:49:29.538Z"
}
data2 = {
"status": "ok",
"result": {
"guild_id": "1",
"guild_name": "Zion England",
"guild_level": 16,
"total_donations": 6284013.165,
"total_level": 10453,
"average_level": 209.06,
"members": [
{
"user_id": "100333100473279671201",
"user_name": "ChestnutSprite9338",
"donations": 0,
"last_online": "2019-04-17T22:51:08.064Z",
"level": 69,
"depth": 208,
"smelters_count": 2,
"crafters_count": 2,
"miners_count": 13,
"chemistry_mining_station_count": 4,
"green_house_building_slot_count": 1,
"chemistry_building_slot_count": 1
},
{
"user_id": "100438959092138638847",
"user_name": "raphaelbüchinger12",
"donations": 236,
"received_donation": 13,
"last_event_donation": 0,
"last_online": "2019-04-30T15:46:20.064Z",
"level": 139,
"depth": 387,
"smelters_count": 2,
"crafters_count": 2,
"miners_count": 19,
"chemistry_mining_station_count": 5,
"green_house_building_slot_count": 1,
"chemistry_building_slot_count": 2
},
]
},
"message": 'placeholder'
}
@mock.patch('dtat.services.rockbite.rockbiteGuildById.requests')
@mock.patch('dtat.services.rockbite.rockbiteGuildByName.requests')
def test_updateId(mReqName, mReqId, client, app, session):
assert len(Player.query.all()) == 0
mReqId.get.return_value.json.return_value = data2
res = client.get('/data/update/id/1')
assert res.get_json()['message'] == 'Guild was not found'
assert res.status_code == 404
assert len(Player.query.all()) == 0
mReqName.get.return_value.json.return_value = data1
res = client.get('/data/update/name/test')
data2['status'] = 'nok'
mReqId.get.return_value.json.return_value = data2
res = client.get('/data/update/id/1')
assert res.status_code == 404
assert res.get_json()['message'] == 'Api response was not ok.'
data2['status'] = 'ok'
mReqId.get.return_value.json.return_value = data2
res = client.get('/data/update/id/1')
assert res.get_json()['result'] == 'ok'
assert res.status_code == 200
assert len(Player.query.all()) == 2
| deeptownadmintools/main-server | tests/00_integration/test_data_update_id.py | test_data_update_id.py | py | 3,152 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "dtat.models.player.Player.query.all",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "dtat.models.player.Player.query",
"line_number": 78,
"usage_type": "attribute"
},
{
"api_name": "dtat.models.player.Player",
"line_number": 78,
"usage_type": "na... |
26074180933 | #%% # 1
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
import xgboost as xgb
from sklearn import linear_model
from sklearn.model_selection import ShuffleSplit, cross_val_score
from sklearn.cluster import MiniBatchKMeans
from sklearn.decomposition import PCA
from sklearn.model_selection import train_test_split
#%% # 2
# read data and store in a dataframe
train_data = pd.read_csv('train.zip', compression = 'zip')
test_data = pd.read_csv('test.zip', compression = 'zip')
train_data.head()
#%% # 3
# converting to respective dtypes
train_data['pickup_datetime'] = pd.to_datetime(train_data['pickup_datetime'])
train_data['dropoff_datetime'] = pd.to_datetime(train_data['dropoff_datetime'])
train_data['store_and_fwd_flag'] = 1 * (train_data['store_and_fwd_flag'] == 'Y')
test_data['store_and_fwd_flag'] = 1 * (test_data['store_and_fwd_flag'] == 'Y')
#%% # 4
# data exploration
print('Ids are unique') if train_data['id'].nunique() == len(train_data['id']) else print('Ids not unique')
print('No missing values') if train_data.count().min() == len(train_data['id']) and test_data.count().min() == len(test_data['id']) else print('There are missing values')
#%% # 5
# plotting the geographical data
N = int(len(train_data) / 10) # since the data is too large. Plot only for 1/10th of data
fig, ax = plt.subplots(ncols = 1, nrows = 1,figsize = (12,10))
plt.xlim(-74.1,-73.7) # longitude
plt.ylim(40.6, 40.9) # latitude
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
ax.set_facecolor('k')
ax.scatter(train_data['pickup_longitude'].values[:N],train_data['pickup_latitude'].values[:N], c = 'y', s = 0.0009, alpha = 1)
#%% # 6
# removig outliers in trip duration
fig, ax = plt.subplots(ncols = 2)
ax[0].set_title('With outliers')
ax[0].boxplot(np.log(train_data.trip_duration + 1))
q = train_data['trip_duration'].quantile([0.01, 0.99])
train_data = train_data[train_data['trip_duration'] > q.iloc[0]]
train_data = train_data[train_data['trip_duration'] < q.iloc[1]]
ax[1].set_title('Without outliers')
ax[1].boxplot(np.log(train_data.trip_duration + 1)) # transform into log scale
plt.show()
#%% # 7
# distance between pickup and dropoff points can be calculated using haversine formula
def haversine_distance(lat1, lon1, lat2, lon2):
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
diff_lon = lon2 - lon1
diff_lat = lat2 - lat1
# haversine formula
a = np.sin(diff_lat/2.0)**2 + np.cos(lat1) * np.cos(lat2) * np.sin(diff_lon/2.0)**2
c = 2 * np.arcsin(np.sqrt(a)) # returns central angle of earth
km = 6367 * c # 6367 for radius of earth in km
return km
train_data.loc[:,'distance'] = haversine_distance(train_data['pickup_longitude'], train_data['pickup_latitude'],
train_data['dropoff_longitude'], train_data['dropoff_latitude'])
test_data.loc[:, 'distance'] = haversine_distance(test_data['pickup_longitude'], test_data['pickup_latitude'],
test_data['dropoff_longitude'], test_data['dropoff_latitude'])
# finding average distance for each step using distance and trip duration
train_data.loc[:, 'avg speed'] = 1000 * train_data['distance'] / train_data['trip_duration']
#%% # 8
# finding direction of each trip using basic trigonometry
def find_direction(lat1, lon1, lat2, lon2):
delta_lon = np.radians(lon2 - lon1)
lat1, lon1, lat2, lon2 = map(np.radians, [lat1, lon1, lat2, lon2])
y = np.sin(delta_lon) * np.cos(lat2)
x = np.cos(lat1) * np.sin(lat2) - np.sin(lat1) * np.cos(lat2) * np.cos(delta_lon)
return np.degrees(np.arctan2(y, x))
train_data.loc[:, 'direction'] = find_direction(train_data['pickup_latitude'], train_data['pickup_longitude'],
train_data['dropoff_latitude'], train_data['dropoff_longitude'])
test_data.loc[:, 'direction'] = find_direction(test_data['pickup_latitude'], test_data['pickup_longitude'],
test_data['dropoff_latitude'], test_data['dropoff_longitude'])
#%% # 9
# trying with k-fold cross validation for linear regression. Here we choose k = 5
feature_df = train_data[['pickup_latitude', 'pickup_longitude', 'passenger_count', 'distance']]
target_df = train_data[['trip_duration']]
regression = linear_model.LinearRegression()
cv = ShuffleSplit(n_splits = 5, test_size = 0.25, random_state = False)
print(cross_val_score(regression, feature_df, target_df, cv = cv))
# score for linear regression is squared coefficient of determination (R^2).
# we can see that the validation score is bad with linear regression
#%% # 10
# try to fit with ridge regression
test_feature_df = test_data[['pickup_latitude', 'pickup_longitude', 'passenger_count', 'distance']]
ridge_reg = linear_model.Ridge(alpha = 0.5)
ridge_reg.fit(feature_df, target_df)
pred = ridge_reg.predict(test_feature_df)
test_feature_df.loc[:, 'trip_duration'] = pred.astype(int)
#%% # 11
# try with k-means clustering
# since #of data is large, we can use minibatch k-means
coordinates = np.vstack((train_data[['pickup_latitude', 'pickup_longitude']],
train_data[['dropoff_latitude', 'dropoff_longitude']],
test_data[['pickup_latitude', 'pickup_longitude']],
test_data[['dropoff_latitude', 'dropoff_longitude']]))
# take some sample from population and cluseterd it
sample_index = np.random.permutation(len(coordinates))[:500000]
kmeans = MiniBatchKMeans(n_clusters = 80, batch_size = 10000).fit(coordinates[sample_index])
cx = [c[0] for c in kmeans.cluster_centers_]
cy = [c[1] for c in kmeans.cluster_centers_]
#%% # 12
# predict with the fitted k-means clustering. Predict the cluster centers
train_data.loc[:, 'pickup_cluster'] = kmeans.predict(train_data[['pickup_latitude', 'pickup_longitude']])
train_data.loc[:, 'dropoff_cluster'] = kmeans.predict(train_data[['dropoff_latitude', 'dropoff_longitude']])
test_data.loc[:, 'pickup_cluster'] = kmeans.predict(test_data[['pickup_latitude', 'pickup_longitude']])
test_data.loc[:, 'dropoff_cluster'] = kmeans.predict(test_data[['dropoff_latitude', 'dropoff_longitude']])
#%% # 13
# visualize the clusters
fig, ax = plt.subplots(ncols=1, nrows=1)
plt.xlim(-74.1,-73.7) # longitude
plt.ylim(40.6, 40.9) # latitude
ax.set_xlabel('Longitude')
ax.set_ylabel('Latitude')
# shading clusters
ax.scatter(train_data['pickup_longitude'].values[:N], train_data['pickup_latitude'].values[:N],
s=0.02, c=train_data['pickup_cluster'].values[:N], alpha=0.2)
# plotting cluster centers'
ax.scatter(cy, cx, color = 'Black', s = 5, alpha = 1)
plt.show()
#%% # 14
# some area are always in traffic and some others are crowded. So, it'be helpful if we find avg speed and
# taxi's count at each unique latitude and longitude values
grpby_cols = ['pickup_latitude', 'pickup_longitude']
stats_in_coords = train_data.groupby(grpby_cols)[['avg speed']].mean()
count_in_coords = train_data.groupby(grpby_cols)['id'].count()
stats_in_coords.loc[:, 'count'] = count_in_coords
stats_in_coords.reset_index()
#%% # 15
# use PCA to transform our latitude and longitude data independent ones
# here we're getting two compoenents from PCA say comp1, comp2 one for latitude and so on...
pca = PCA().fit(coordinates) # coordinates having only latitude and longitude which vertically stacked
train_data.loc[:, 'comp0_pickup'] = pca.transform(train_data[['pickup_latitude', 'pickup_longitude']])[:, 0]
train_data.loc[:, 'comp1_pickup'] = pca.transform(train_data[['pickup_latitude', 'pickup_longitude']])[:, 1]
train_data.loc[:, 'comp0_dropoff'] = pca.transform(train_data[['dropoff_latitude', 'dropoff_longitude']])[:, 0]
train_data.loc[:, 'comp1_dropoff'] = pca.transform(train_data[['dropoff_latitude', 'dropoff_longitude']])[:, 1]
test_data.loc[:, 'comp0_pickup'] = pca.transform(test_data[['pickup_latitude', 'pickup_longitude']])[:, 0]
test_data.loc[:, 'comp1_pickup'] = pca.transform(test_data[['pickup_latitude', 'pickup_longitude']])[:, 1]
test_data.loc[:, 'comp0_dropoff'] = pca.transform(test_data[['dropoff_latitude', 'dropoff_longitude']])[:, 0]
test_data.loc[:, 'comp1_dropoff'] = pca.transform(test_data[['dropoff_latitude', 'dropoff_longitude']])[:, 1]
# also find manhatten distance for the pca components
train_data.loc[:, 'manhatten_dis_pca'] = np.abs(train_data['comp0_dropoff'] - train_data['comp0_pickup']) + np.abs(train_data['comp1_dropoff'] - train_data['comp1_pickup'])
test_data.loc[:, 'manhatten_dis_pca'] = np.abs(test_data['comp0_dropoff'] - test_data['comp0_pickup']) + np.abs(test_data['comp1_dropoff'] - test_data['comp1_pickup'])
#%% # 16
# By using Open Source Routing Machine (OSRM) of Newyork routes, we can find shortest distance between two points.
# OSRM data are stored in different file. WE can read and use it our model
fast_rout1 = pd.read_csv('C:\\Users\\sivaram\\Documents\\Packages\\Predictive package\\fastest_routes_train_part_1.csv',
usecols = ['id', 'total_distance', 'total_travel_time', 'number_of_steps'])
fast_rout2 = pd.read_csv('C:\\Users\\sivaram\\Documents\\Packages\\Predictive package\\fastest_routes_train_part_2.csv',
usecols = ['id', 'total_distance', 'total_travel_time', 'number_of_steps'])
test_street_info = pd.read_csv('C:\\Users\\sivaram\\Documents\\Packages\\Predictive package\\fastest_routes_test.csv',
usecols = ['id', 'total_distance', 'total_travel_time', 'number_of_steps'])
train_street_info = pd.concat((fast_rout1, fast_rout2))
train_data = train_data.merge(train_street_info, how = 'left', on = 'id')
test_data = test_data.merge(test_street_info, how = 'left', on = 'id')
train_street_info.head()
#%% # 17
# prepare for modelling
feature_names = ['pickup_cluster', 'dropoff_cluster', 'manhatten_dis_pca', 'comp1_pickup',
'total_distance', 'pickup_longitude', 'pickup_latitude', 'distance', 'dropoff_latitude',
'total_travel_time', 'direction', 'dropoff_longitude', 'comp1_dropoff', 'comp0_dropoff', 'comp0_pickup']
# logistic transform of trip duraion which is the target variable
y = np.log(train_data['trip_duration'].values + 1)
#%% # 18
# modelling using xgboost...
Xtrain, Xvalid, ytrain, yvalid = train_test_split(train_data[feature_names].values, y, test_size = 0.2)
dtrain = xgb.DMatrix(Xtrain, label = ytrain)
dvalid = xgb.DMatrix(Xvalid, label = yvalid)
#feature_names.remove('avg speed')
dtest = xgb.DMatrix(test_data[feature_names].values)
watchlist = [(dtrain, 'train'), (dvalid, 'valid')]
#%% # 19
# We can change xgboost's parameters so that get minimal RMSE value
xgb_pars = {'min_child_weight': 50, 'eta': 0.3, 'colsample_bytree': 0.3, 'max_depth': 10,
'subsample': 0.8, 'lambda': 1., 'nthread': 4, 'booster' : 'gbtree', 'silent': 1,
'eval_metric': 'rmse', 'objective': 'reg:linear'}
#%% # 20
# finding best RMSE value.
model = xgb.train(xgb_pars, dtrain, 60, watchlist, early_stopping_rounds=50,
maximize=False, verbose_eval=10)
print('Modeling RMSLE %.5f' % model.best_score)
#%% # 21
# Now we can use our model to predict trip duration for test dataset which is our ultimate goal
ytest = model.predict(dtest)
# Since we've log transformed our trip duration for model fitting, it is necessary to inverse transform
# to get original trip duraion for test_data
test_data['trip_duration'] = np.exp(ytest) - 1
print('We predict the trip duration for test_data with RMSE error of', model.best_score)
| Sivaram46/NYC-taxi-trip-duration-prediction | taxi_trip_duration.py | taxi_trip_duration.py | py | 11,553 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime... |
29229830776 | from github import Github
import time
import schedule
import requests
import json
import logging
import os
logging.basicConfig(filename='debug.log', format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO, datefmt='%Y-%m-%d %H:%M:%S')
"""
GITHUB CREDENTIALS
"""
GITHUB_TOKEN = os.environ.get('GITHUB_TOKEN')
g = Github(GITHUB_TOKEN)
github_user = g.get_user()
"""
ZENDESK CREDENTIALS
"""
ZENDESK_URL = os.environ.get('ZENDESK_URL')
ZENDESK_EMAIL = os.environ.get('ZENDESK_EMAIL') + '/token'
ZENDESK_TOKEN = os.environ.get('ZENDESK_TOKEN')
"""
CRON JOB
"""
SCHEDULE_INTERVAL_MINUTES = int(1)
def notification_count():
notif_count = github_user.get_notifications().totalCount
logging.info("You have {} notifications.".format(notif_count))
return notif_count
def notification_parser():
for notification in github_user.get_notifications():
if notification.subject.latest_comment_url is None:
mark_notification_read(notification.id)
else:
comment_content = get_comment_data(notification)
create_zendesk_ticket(comment_content)
mark_notification_read(notification.id)
def mark_notification_read(notification_id):
headers = {'Authorization': 'token ' + GITHUB_TOKEN}
requests.patch("https://api.github.com/notifications/threads/{}".format(notification_id), headers=headers)
notification_count()
def get_comment_data(notif):
issue_subject = notif.subject.title
issue_url = requests.get(notif.subject.latest_comment_url).json()['html_url']
issue_comment = requests.get(notif.subject.latest_comment_url).json()['body']
return {"url": issue_url, "comment": issue_comment, "subject": issue_subject}
def create_zendesk_ticket(comment_content):
# New ticket info
subject = 'Comment on Github ' + comment_content['subject']
body = "Kindly review the comment on {}".format(comment_content['url']) + \
"\n" + "Comment" + "\n" + comment_content['comment']
# Package the data in a dictionary matching the expected JSON
data = {'ticket': {'subject': subject, 'comment': {'body': body}}}
# Encode the data to create a JSON payload
payload = json.dumps(data)
# Set the request parameters
headers = {'content-type': 'application/json'}
# Do the HTTP post request
response = requests.post(ZENDESK_URL, data=payload, auth=(ZENDESK_EMAIL, ZENDESK_TOKEN), headers=headers)
# Check for HTTP codes other than 201 (Created)
if response.status_code != 201:
logging.error('Status:', response.status_code, 'Problem with the request. Exiting.')
# Report success
logging.info('Successfully created the ticket.')
def main():
notif_count = notification_count()
if notif_count == 0:
pass
else:
notification_parser()
# Executes a function at every X minutes. Reference - https://stackoverflow.com/a/55756963
schedule.every(SCHEDULE_INTERVAL_MINUTES).minutes.do(main)
while True:
schedule.run_pending()
time.sleep(1)
| mukeshtiwari1987/ghub_watcher | gtoz.py | gtoz.py | py | 3,062 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.environ",
... |
73403156193 | import webapp2
from twython import *
import json
TWITTER_APP_KEY = '' #supply the appropriate value
TWITTER_APP_KEY_SECRET = ''
TWITTER_ACCESS_TOKEN = ''
TWITTER_ACCESS_TOKEN_SECRET = ''
class MainHandler(webapp2.RequestHandler):
def get(self):
self.response.write('Hello world!')
class GetTweets(webapp2.RequestHandler):
def get(self, hashtag, no):
no = int(no)
t = Twython(app_key=TWITTER_APP_KEY,
app_secret=TWITTER_APP_KEY_SECRET,
oauth_token=TWITTER_ACCESS_TOKEN,
oauth_token_secret=TWITTER_ACCESS_TOKEN_SECRET)
search = t.search(q=hashtag,count=no)
tweets = search['statuses']
responses = []
for t in tweets:
resptweet = {"text":t['text'], "handle":t['user']['screen_name']}
responses.append(resptweet)
self.response.write(json.dumps(responses))
app = webapp2.WSGIApplication([
('/', MainHandler),
('/tweets/(.*)/(.*)', GetTweets)
], debug=True)
| aneesh-neelam/TwitterSearch-GAE | main.py | main.py | py | 943 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "webapp2.RequestHandler",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "webapp2.RequestHandler",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "json.dumps",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "web... |
42468668289 | import cv2
import numpy as np
from matplotlib import pyplot as plt
def Thresholding():
img = cv2.imread('/home/mark/Desktop/gradient.png',0)
ret, thresh1 = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
ret,thresh2 = cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
ret,thresh3 = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
ret,thresh4 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO)
ret,thresh5 = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
titles = ['Original Image','BINARY','BINARY_INV','TRUNC','TOZERO','TOZERO_INV']
images = [img, thresh1, thresh2, thresh3, thresh4, thresh5]
for i in xrange(6):
plt.subplot(2,3,i+1),plt.imshow(images[i],'gray')
plt.title(titles[i])
plt.xticks([]),plt.yticks([])
plt.show()
def Convolution():
img = cv2.imread('/home/mark/Desktop/Apple.jpg')
kernel = np.ones((5,5),np.float32) / 25
dst = cv2.filter2D(img,-1,kernel)
plt.subplot(121),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(dst),plt.title('Averaging')
plt.xticks([]), plt.yticks([])
plt.show()
def Averaging():
img = cv2.imread('/home/mark/Desktop/Apple.jpg')
blur = cv2.blur(img,(5,5))
plt.subplot(121),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(blur),plt.title('Blurred')
plt.xticks([]), plt.yticks([])
plt.show()
def GaussianBlur():
img = cv2.imread('/home/mark/Desktop/RMills.jpg')
blur = cv2.GaussianBlur(img,(5,5),0)
plt.subplot(121),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(blur),plt.title('Blurred')
plt.xticks([]), plt.yticks([])
plt.show()
def MedianFilter():
img = cv2.imread('/home/mark/Desktop/Pic.jpg')
median = cv2.medianBlur(img,5)
plt.subplot(121),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(median),plt.title('Filtered')
plt.xticks([]), plt.yticks([])
plt.show()
def BilateralFilter():
img = cv2.imread('/home/mark/Desktop/RMills.jpg')
blur = cv2.bilateralFilter(img,9,75,75)
plt.subplot(121),plt.imshow(img),plt.title('Original')
plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(blur),plt.title('Filtered')
plt.xticks([]), plt.yticks([])
plt.show()
"""Morphological Transformations"""
def Erosion():
img = cv2.imread('/home/mark/Desktop/j.png', 0)
kernel = np.ones((5,5), np.uint8)
erosion = cv2.erode(img, kernel, iterations=1)
cv2.imshow('Original', img)
cv2.imshow('Erosion', erosion)
cv2.waitKey(0)
def Dilation():
img = cv2.imread('/home/mark/Desktop/j.png', 0)
kernel = np.ones((5,5), np.uint8)
dilation = cv2.dilate(img, kernel, iterations=1)
cv2.imshow('Original', img)
cv2.imshow('Dilation', dilation)
cv2.waitKey(0)
def MorphGradient():
img = cv2.imread('/home/mark/Desktop/j.png', 0)
kernel = np.ones((5,5), np.uint8)
dilation = cv2.morphologyEx(img, cv2.MORPH_GRADIENT, kernel)
cv2.imshow('Original', img)
cv2.imshow('Dilation', dilation)
cv2.waitKey(0)
GaussianBlur()
| olinrobotics/irl | irl_archive/Fall_2017/button_game/Practice/Image_Processing.py | Image_Processing.py | py | 3,235 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.threshold",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.THRESH_BINARY",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "cv2.threshold",
"lin... |
30560767372 | from enum import Enum
import glm
import numpy as np
import math
from Ray import *
class Plane():
def __init__(self,pointOnPlane = glm.vec3(0.0,0.0,0.0),normal = glm.vec3(0.0,-1.0,0.0) ):
self.pointOnPlane = pointOnPlane
self.normal = normal
# testpoint is glm.vec3
def isPointOnPlane(self,testpoint):
vectotestpoint = testpoint - self.pointOnPlane
testresult = glm.dot(vectotestpoint,self.normal)
if glm.abs(testresult) < 0.0001:
return True # on the plane
else:
return False
def testRay(self, camRay = Ray()):
n_dot_d = glm.dot(self.normal,camRay.rayDirection)
if glm.abs(n_dot_d) < 0.0001:
return False
n_dot_ps = glm.dot(self.normal,self.pointOnPlane - camRay.startPoint)
camRay.t = n_dot_ps / n_dot_d
planePoint = camRay.startPoint + camRay.t * camRay.rayDirection
return planePoint
#class TriangleTest():
# # glm vec3 for pointA pointB pointC
# def __init__(self,Point_A = glm.vec3( 1.0,1.0,0.0),Point_B = glm.vec3(-1.0,1.0,0.0),Point_C = glm.vec3( 0.0,0.0,0.0)):
# self.triPoint_A = Point_A
# self.triPoint_B = Point_B
# self.triPoint_C = Point_C
# self.normal = glm.cross(Point_B - Point_A,Point_C - Point_A)
## self.triPoint_A = glm.vec3( 1.0,1.0,0.0)
## self.triPoint_B = glm.vec3(-1.0,1.0,0.0)
## self.triPoint_C = glm.vec3( 0.0,0.0,0.0)
#
# def testRay(self , camRay = Ray()):
# plane = Plane(self.triPoint_A,self.normal)
# n_dot_d = glm.dot(self.normal,camRay.rayDirection)
# if glm.abs(n_dot_d) < 0.0001:
# return False
# n_dot_ps = glm.dot(self.normal,self.triPoint_A - camRay.startPoint)
# camRay.t = n_dot_ps / n_dot_d
## planePoint = camRay.startPoint + camRay.t * camRay.rayDirection
# planePoint = camRay.pointFromRay()
# AtoB_Edge = triPoint_B - triPoint_A
# BtoC_Edge = triPoint_C - triPoint_B
# CtoA_Edge = triPoint_A - triPoint_C
# AtoPoint = planePoint - triPoint_A
# BtoPoint = planePoint - triPoint_B
# CtoPoint = planePoint - triPoint_C
# ATestVec = glm.cross(AtoB_Edge,AtoPoint)
# BTestVec = glm.cross(BtoC_Edge,BtoPoint)
# CTestVec = glm.cross(CtoA_Edge,CtoPoint)
# AtestVecMatchNormal = glm.dot(ATestVec,self.normal) > 0.0
# BtestVecMatchNormal = glm.dot(BTestVec,self.normal) > 0.0
# CtestVecMatchNormal = glm.dot(CTestVec,self.normal) > 0.0
# hitTriangle = AtestVecMatchNormal and BtestVecMatchNormal and CtestVecMatchNormal
# return hitTriangle
class CameraBehavior(Enum):
FIRST_PERSON = 1
SPECTATOR = 2
FLIGHT = 3
ORBIT = 4
SPEED = 4.0
# SENSITIVITY and DEFAULT_ROTATION_SPEED have the same objective for rotation with mouse
SENSITIVITY = 0.01
DEFAULT_ROTATION_SPEED = 0.3
DEFAULT_FOVX = 70.0
DEFAULT_ZNEAR = 0.1
DEFAULT_ZFAR = 500.0
DEFAULT_ORBIT_MIN_ZOOM = DEFAULT_ZNEAR + 1.0
DEFAULT_ORBIT_MAX_ZOOM = DEFAULT_ZFAR * 0.5
DEFAULT_ORBIT_OFFSET_DISTANCE = DEFAULT_ORBIT_MIN_ZOOM + (DEFAULT_ORBIT_MAX_ZOOM - DEFAULT_ORBIT_MIN_ZOOM) * 0.25
WORLD_XAXIS = glm.vec3(1.0, 0.0, 0.0);
WORLD_YAXIS = glm.vec3(0.0, 1.0, 0.0);
WORLD_ZAXIS = glm.vec3(0.0, 0.0, 1.0);
CAMERA_ZOOM_MAX = 5.0
CAMERA_ZOOM_MIN = 1.5
CAMERA_SPEED_FLIGHT_YAW = 100.0
CAMERA_SPEED_ORBIT_ROLL = 100.0
CAMERA_ACCELERATION = glm.vec3(4.0, 4.0, 4.0);
CAMERA_VELOCITY = glm.vec3(1.0, 1.0, 1.0);
class Cameras(dict):
_instance = None
mainCamera = None
def __init__(self):
raise RuntimeError('Call instance() instead')
@classmethod
def inst(self,oglFrame = None):
if self._instance is None:
self._instance = self.__new__(self)
if oglFrame is not None:
self.oglFrame = oglFrame
else:
self.oglFrame = None
self._instance.__setitem__('_Default_',Camera(self.oglFrame,position=(5,5,40),pitch=-5,yaw=-90))
self.mainCamera = self._instance['_Default_']
# Put any initialization here.
return self._instance
def newCamera(self,name):
self.__setitem__(name,Camera(self.oglFrame,position=(5,5,40),pitch=-5,yaw=-90))
def setCamera(self,name):
if name in self:
self.mainCamera = self._instance[name]
else:
self.mainCamera = self._instance['_Default_']
def getMainCamera(self):
return self.mainCamera
class Camera:
def __init__(self, oglFrame, position=(0, 0, 20), yaw=-90, pitch=0, roll=0):
self.oglFrame = oglFrame
# self.m_firstPersonYOffset = 0.0
self.m_behavior = CameraBehavior.FIRST_PERSON
# self.m_preferTargetYAxisOrbiting = True
self.m_accumPitchDegrees = 0.0
self.m_savedAccumPitchDegrees = 0.0
self.m_rotationSpeed = DEFAULT_ROTATION_SPEED
self.m_fovx = DEFAULT_FOVX
self.m_aspectRatio = 0.0
self.m_znear = DEFAULT_ZNEAR
self.m_zfar = DEFAULT_ZFAR
self.m_orbitMinZoom = DEFAULT_ORBIT_MIN_ZOOM
self.m_orbitMaxZoom = DEFAULT_ORBIT_MAX_ZOOM
self.m_orbitOffsetDistance = DEFAULT_ORBIT_OFFSET_DISTANCE
# vectors
# position of the camera
self.m_eye = glm.vec3(position);
# saved position of the camera for Orbiting
self.m_savedEye = glm.vec3(position);
# position of the object that the camera is looking at or Orbiting around
self.m_target = glm.vec3(0.0, 0.0, 0.0);
# the camera axes
# left / right axe
self.right = glm.vec3(1.0, 0.0, 0.0) # X axe
# up axe
self.up = glm.vec3(0.0, 1.0, 0.0) # Y axe
# forward / direction axe
self.forward = glm.vec3(0.0, 0.0,-1.0) # Z axe
# axis of the target for Orbiting
self.m_targetYAxis = glm.vec3(0.0, 1.0, 0.0);
# the direction of the camera negative of the zAxis
self.m_viewDir = glm.vec3(0.0, 0.0, -1.0);
self.yaw = yaw # yaw rotation around up vector
self.pitch = pitch # pitch rotation around right or left vector
self.roll = roll # roll rotation around forward vector
# the acceleration of the movement of the camera
self.m_acceleration = glm.vec3(0.0, 0.0, 0.0);
# the velocity of the movement of the camera
self.m_currentVelocity = glm.vec3(0.0, 0.0, 0.0);
self.m_velocity = glm.vec3(0.0, 0.0, 0.0);
self.speed = SPEED
# quaternion
self.m_orientation = glm.quat()
self.m_savedOrientation = glm.quat()
# self.positionChange = False
# matrix
# the view matrix of the camera
self.m_viewMatrix = glm.mat4(1.0)
self.m_projMatrix = glm.mat4(0.0)
self.m_viewProjMatrix = glm.mat4(1.0)
self.m_orthoMatrix = glm.mat4(1.0)
self.ortho(-1.0, 1.0, -1.0, 1.0, -1.0, 1.0)
self.viewWidth = oglFrame.size().width()
self.viewHeight = oglFrame.size().height()
self.setAspectRatio(oglFrame.size().width(),oglFrame.size().height())
self.lookAt(self.m_eye, self.m_eye + self.forward, self.up)
self.m_viewProjMatrix = self.m_viewMatrix * self.m_projMatrix
self.planeXY = Plane(pointOnPlane = glm.vec3(0.0,0.0,0.0),normal = glm.vec3(0.0,0.0,1.0))
self.planeYZ = Plane(pointOnPlane = glm.vec3(0.0,0.0,0.0),normal = glm.vec3(1.0,0.0,0.0))
self.planeXZ = Plane(pointOnPlane = glm.vec3(0.0,0.0,0.0),normal = glm.vec3(0.0,1.0,0.0))
def lookAt(self,eye, target, up):
self.m_eye = eye;
self.m_target = target;
# calculate the forward vector
m_zAxis = eye - target
m_zAxis = glm.normalize(m_zAxis)
self.m_viewDir = -m_zAxis
m_xAxis = glm.cross(up, m_zAxis)
m_xAxis = glm.normalize(m_xAxis)
m_yAxis = glm.cross(m_zAxis, m_xAxis)
m_yAxis = glm.normalize(m_yAxis)
self.m_viewMatrix[0][0] = m_xAxis.x
self.m_viewMatrix[1][0] = m_xAxis.y
self.m_viewMatrix[2][0] = m_xAxis.z
self.m_viewMatrix[3][0] = -glm.dot(m_xAxis, eye)
self.m_viewMatrix[0][1] = m_yAxis.x
self.m_viewMatrix[1][1] = m_yAxis.y
self.m_viewMatrix[2][1] = m_yAxis.z
self.m_viewMatrix[3][1] = -glm.dot(m_yAxis, eye)
self.m_viewMatrix[0][2] = m_zAxis.x
self.m_viewMatrix[1][2] = m_zAxis.y
self.m_viewMatrix[2][2] = m_zAxis.z
self.m_viewMatrix[3][2] = -glm.dot(m_zAxis, eye)
# // Extract the pitch angle from the view matrix.
self.m_accumPitchDegrees = glm.degrees(glm.asin(self.m_viewMatrix[1][2]))
self.m_orientation = glm.quat(self.m_viewMatrix)
# self.m_orientation.fromMatrix(self.m_viewMatrix);
# self.updateViewMatrix();
# perspective Right Handed
def perspective(self,fovx, aspect, znear, zfar):
cotangent = 1.0 / glm.tan(fovx / 2.0)
self.m_projMatrix = glm.mat4(0)
self.m_projMatrix[0][0] = cotangent / aspect
self.m_projMatrix[1][1] = cotangent
self.m_projMatrix[2][2] = -(zfar + znear) / (zfar - znear)
self.m_projMatrix[2][3] = -1.0
self.m_projMatrix[3][2] = -(2.0 * zfar * znear) / (zfar - znear)
# self.m_viewProjMatrix = self.m_viewMatrix * self.m_projMatrix
# ortho Right Handed
def ortho(self,left, right, bottom, top, zNear, zFar):
self.m_orthoMatrix = glm.mat4(0)
self.m_orthoMatrix[0][0] = 2.0 / (right - left)
self.m_orthoMatrix[1][1] = 2.0 / (top - bottom)
self.m_orthoMatrix[2][2] = -2.0 / (zFar - zNear)
self.m_orthoMatrix[3][0] = -(right + left) / (right - left)
self.m_orthoMatrix[3][1] = -(top + bottom) / (top - bottom)
self.m_orthoMatrix[3][2] = -(zFar + zNear) / (zFar - zNear)
def update(self):
# set the new position
self.move()
# set the new orientation from the mouse delta
self.rotate()
# fix the forward right and up vector from the orientation
# set the viewMatrix of the camera for all objects get from postion and quaternion / euclide yaw pitch roll ?
self.update_camera_vectors()
# set the inverted view matrix for the ray picking
self.invertedViewMatrix = glm.inverse(self.m_viewMatrix)
def updateViewMatrix(self):
# // Reconstruct the view matrix.
# self.m_viewMatrix = self.m_orientation.toMatrix4()
m_xAxis = glm.vec3(self.m_viewMatrix[0][0], self.m_viewMatrix[1][0], self.m_viewMatrix[2][0])
m_yAxis = glm.vec3(self.m_viewMatrix[0][1], self.m_viewMatrix[1][1], self.m_viewMatrix[2][1])
m_zAxis = glm.vec3(self.m_viewMatrix[0][2], self.m_viewMatrix[1][2], self.m_viewMatrix[2][2])
self.m_viewDir = -m_zAxis
if (self.m_behavior == CameraBehavior.ORBIT):
# // Calculate the new camera position based on the current
# // orientation. The camera must always maintain the same
# // distance from the target. Use the current offset vector
# // to determine the correct distance from the target.
self.m_eye = self.m_target + m_zAxis * self.m_orbitOffsetDistance
self.m_viewMatrix[3][0] = -glm.dot(m_xAxis, self.m_eye)
self.m_viewMatrix[3][1] = -glm.dot(m_yAxis, self.m_eye)
self.m_viewMatrix[3][2] = -glm.dot(m_zAxis, self.m_eye)
def setAspectRatio(self,width,height):
self.viewWidth = width
self.viewHeight = height
self.m_aspectRatio = width / height
# set the projection Matrix
self.perspective(glm.radians(self.m_fovx), self.m_aspectRatio, self.m_znear, self.m_zfar)
self.invertedProjectionMatrix = glm.inverse(self.m_projMatrix)
def getFovx(self):
return self.m_fovx
def setFovx(self,fovx):
self.m_fovx = fovx
self.perspective(glm.radians(self.m_fovx), self.m_aspectRatio, self.m_znear, self.m_zfar)
self.invertedProjectionMatrix = glm.inverse(self.m_projMatrix)
def get_view_matrix(self):
return self.m_viewMatrix
def get_view(self):
return self.m_viewMatrix
def get_projection(self):
return self.m_projMatrix
def get_Ortho(self):
return self.m_orthoMatrix
def setBehavior(self,m_behavior):
prevBehavior = self.m_behavior
if self.m_behavior == m_behavior:
return
self.m_behavior = m_behavior
def rotate(self):
if self.oglFrame.moveCamera:
if self.m_behavior == CameraBehavior.FIRST_PERSON:
self.yaw += self.oglFrame.rel_x * SENSITIVITY
self.pitch -= self.oglFrame.rel_y * SENSITIVITY
self.pitch = max(-89, min(89, self.pitch))
# self.oglFrame.root.cameraFrame.setXYZAngle((self.pitch,self.yaw,self.roll))
if self.m_behavior == CameraBehavior.SPECTATOR:
self.yaw += self.oglFrame.rel_x * SENSITIVITY
self.pitch -= self.oglFrame.rel_y * SENSITIVITY
self.pitch = max(-89, min(89, self.pitch))
# self.oglFrame.root.cameraFrame.setXYZAngle((self.pitch,self.yaw,self.roll))
if self.m_behavior == CameraBehavior.FLIGHT:
self.roll += self.oglFrame.rel_x * SENSITIVITY
self.pitch -= self.oglFrame.rel_y * SENSITIVITY
self.pitch = max(-89, min(89, self.pitch))
# self.oglFrame.root.cameraFrame.setXYZAngle((self.pitch,self.yaw,self.roll))
if self.m_behavior == CameraBehavior.ORBIT:
self.yaw += self.oglFrame.rel_x * SENSITIVITY
self.pitch -= self.oglFrame.rel_y * SENSITIVITY
self.pitch = max(-89, min(89, self.pitch))
# self.oglFrame.root.cameraFrame.setXYZAngle((self.pitch,self.yaw,self.roll))
def move(self):
velocity = self.speed * self.oglFrame.delta_time
if self.oglFrame.keysPress[0]: # 'w'
self.m_eye += self.forward * velocity
# self.positionChange = True
if self.oglFrame.keysPress[1]: # 's'
self.m_eye -= self.forward * velocity
# self.positionChange = True
if self.oglFrame.keysPress[2]: # 'a'
self.m_eye -= self.right * velocity
# self.positionChange = True
if self.oglFrame.keysPress[3]: # 'd'
self.m_eye += self.right * velocity
# self.positionChange = True
if self.oglFrame.keysPress[4]:
self.m_eye += self.up * velocity
# self.positionChange = True
if self.oglFrame.keysPress[5]:
self.m_eye -= self.up * velocity
# self.positionChange = True
if self.oglFrame.keysPress[6]:
self.speed += 0.1
print(self.speed)
if self.oglFrame.keysPress[7]:
self.speed -= 0.1
print(self.speed)
# if self.positionChange:
## self.oglFrame.root.cameraFrame.setXYZPosition(self.m_eye)
# self.positionChange = False
# set the viewMatrix of the camera for all objects
def update_camera_vectors(self):
# pitchMatrix = glm.rotate(self.pitch,glm.vec3(1.0,0.0,0.0))
# yawMatrix = glm.rotate(self.yaw,glm.vec3(0.0,1.0,0.0))
# rollMatrix = glm.rotate(self.roll,glm.vec3(0.0,0.0,1.0))
# rotationMatrix = rollMatrix * yawMatrix * pitchMatrix
yaw, pitch = glm.radians(self.yaw), glm.radians(self.pitch)
self.forward.x = glm.cos(yaw) * glm.cos(pitch)
self.forward.y = glm.sin(pitch)
self.forward.z = glm.sin(yaw) * glm.cos(pitch)
self.forward = glm.normalize(self.forward)
self.right = glm.normalize(glm.cross(self.forward, glm.vec3(0, 1, 0)))
self.up = glm.normalize(glm.cross(self.right, self.forward))
# get the view matrix from forward right and up vector
self.lookAt(self.m_eye, self.m_eye + self.forward, self.up)
# def rotateFirstPerson(headingDegrees, pitchDegrees):
## Implements the rotation logic for the first person style and
## spectator style camera behaviors. Roll is ignored.
# if headingDegrees != 0.0:
# rot.fromAxisAngle(WORLD_YAXIS, headingDegrees)
# m_orientation = rot * m_orientation#
## Rotate camera about its local x axis.
## Note the order the quaternions are multiplied. That is important!
# if pitchDegrees != 0.0:
# rot.fromAxisAngle(WORLD_XAXIS, pitchDegrees)
# m_orientation = m_orientation * rot
# def rotateFlight(headingDegrees, pitchDegrees, rollDegrees):
## Implements the rotation logic for the flight style camera behavior.
# Quaternion rot;
#
# rot.fromHeadPitchRoll(headingDegrees, pitchDegrees, rollDegrees);
# m_orientation *= rot;
def rotateOrbit(headingDegrees, pitchDegrees, rollDegrees):
# Implements the rotation logic for the orbit style camera behavior.
# Roll is ignored for target Y axis orbiting.
#
# Briefly here's how this orbit camera implementation works. Switching to
# the orbit camera behavior via the setBehavior() method will set the
# camera's orientation to match the orbit target's orientation. Calls to
# rotateOrbit() will rotate this orientation. To turn this into a third
# person style view the updateViewMatrix() method will move the camera
# position back 'm_orbitOffsetDistance' world units along the camera's
# local z axis from the orbit target's world position.
rot = glm.quat(glm.radians(glm.vec3(headingDegrees, pitchDegrees, rollDegrees)))
# Quaternion rot;
#
# if m_preferTargetYAxisOrbiting:
# if headingDegrees != 0.0
# rot.fromAxisAngle(m_targetYAxis, headingDegrees)
# m_orientation = rot * m_orientation
#
# if pitchDegrees != 0.0
# rot.fromAxisAngle(WORLD_XAXIS, pitchDegrees)
# m_orientation = m_orientation * rot
# else:
# rot.fromHeadPitchRoll(headingDegrees, pitchDegrees, rollDegrees)
self.m_orientation = self.m_orientation * rot
#Walk around
#Look around
#Zoom
#Mouse input
def setYaw(self,yaw):
self.yaw = yaw
self.update_camera_vectors()
def getYaw(self):
return self.yaw
def getPitch(self):
return self.pitch
def setPitch(self,pitch):
self.pitch = max(-89, min(89, pitch))
self.update_camera_vectors()
def getRoll(self):
return self.roll
def getPosition(self):
return self.m_eye
def setPosition(self,position):
self.m_eye = position
def setTarget(self,target):
self.m_target = target
def getTarget(self):
return self.m_target
def getNormalizedDeviceCoord(self,m_PosX,m_PosY):
x = (2.0 * m_PosX) / self.viewWidth - 1.0
y = 1.0 - (2.0 * m_PosY) / self.viewHeight
return glm.vec4(x,y,-1.0,1.0)
def toEyeCoords(self,clipCoords):
eyeCoord = self.invertedProjectionMatrix * clipCoords
return glm.vec4(eyeCoord.x,eyeCoord.y,-1.0,0.0)
def toWorldCoords(self,eyeCoord):
rayWorld = glm.inverse(self.m_viewMatrix) * eyeCoord
rayWorld = glm.vec3(round(rayWorld.x,5),round(rayWorld.y,5),round(rayWorld.z,5))
rayWorld = glm.normalize(rayWorld)
return rayWorld
def get_Ray(self,pos):
self.m_Pos = (pos[0],pos[1])
normalizeDeviceCoord = self.getNormalizedDeviceCoord(pos[0],pos[1])
clipCoord = normalizeDeviceCoord
eyeCoord = self.toEyeCoords(clipCoord)
rayWorld = self.toWorldCoords(eyeCoord)
return Ray(self.m_eye,rayWorld)
def testRay(self,mPos):
# planeXY = Plane(pointOnPlane = glm.vec3(0.0,0.0,0.0),normal = glm.vec3(0.0,0.0,1.0))
# planeYZ = Plane(pointOnPlane = glm.vec3(0.0,0.0,0.0),normal = glm.vec3(1.0,0.0,0.0))
# planeXZ = Plane(pointOnPlane = glm.vec3(0.0,0.0,0.0),normal = glm.vec3(0.0,1.0,0.0))
# return self.planeXY.testRay(self.get_Ray(mPos))
return self.planeXZ.testRay(self.get_Ray(mPos))
def glUnProject(self,point1):
self.ray1 = self.get_Ray(point1)
# self.planeXZ = Plane(pointOnPlane = glm.vec3(0.0,0.0,0.0),normal = glm.vec3(0.0,1.0,0.0))
# return self.planeXY.testRay(self.ray1)
return self.planeXZ.testRay(self.ray1)
def glProject(self,pos1):
windowCoordinate = [0,0]
# Modelview transform
fTx = self.m_viewMatrix[0][0]*pos1.x+self.m_viewMatrix[1][0]*pos1.y+self.m_viewMatrix[2][0]*pos1.z+self.m_viewMatrix[3][0] # w is always 1
fTy = self.m_viewMatrix[0][1]*pos1.x+self.m_viewMatrix[1][1]*pos1.y+self.m_viewMatrix[2][1]*pos1.z+self.m_viewMatrix[3][1]
fTz = self.m_viewMatrix[0][2]*pos1.x+self.m_viewMatrix[1][2]*pos1.y+self.m_viewMatrix[2][2]*pos1.z+self.m_viewMatrix[3][2]
fTw = self.m_viewMatrix[0][3]*pos1.x+self.m_viewMatrix[1][3]*pos1.y+self.m_viewMatrix[2][3]*pos1.z+self.m_viewMatrix[3][3]
# Projection transform, the final row of projection matrix is always [0 0 -1 0]
# so we optimize for that.
fTOx = self.m_projMatrix[0][0]*fTx+self.m_projMatrix[1][0]*fTy+self.m_projMatrix[2][0]*fTz+self.m_projMatrix[3][0]*fTw
fTOy = self.m_projMatrix[0][1]*fTx+self.m_projMatrix[1][1]*fTy+self.m_projMatrix[2][1]*fTz+self.m_projMatrix[3][1]*fTw
fTOz = self.m_projMatrix[0][2]*fTx+self.m_projMatrix[1][2]*fTy+self.m_projMatrix[2][2]*fTz+self.m_projMatrix[3][2]*fTw
fTOw =-fTz
# The result normalizes between -1 and 1
if(fTOw!=0.0): # The w value
fTOw=1.0/fTOw
# Perspective division
fTOx*=fTOw
fTOy*=fTOw
fTOz*=fTOw
# Window coordinates
# Map x, y to range 0-1
windowCoordinate[0]=np.round(((fTOx*0.5+0.5)*self.viewWidth)+0, 0)
windowCoordinate[1]=np.round(self.viewHeight - ((fTOy*0.5+0.5)*self.viewHeight)+0,0)
return windowCoordinate[0],windowCoordinate[1]
#// pseudo code found at:
#// http://www.gamedev.net/topic/221071-simple-raysphere-collision/
#Vec3d ClosestPoint(const Vec3d A, const Vec3d B,
# const Vec3d P, double *t)
#{
# Vec3d AB = B - A;
# double ab_square = DotProduct(AB, AB);
# Vec3d AP = P - A;
# double ap_dot_ab = DotProduct(AP, AB);
# // t is a projection param when we project vector AP onto AB
# *t = ap_dot_ab / ab_square;
# // calculate the closest point
# Vec3d Q = A + AB * (*t);
# return Q;
#}
#bool RayTest(const Vec3d, const Vec3d start, const Vec3d end,
# Vec3d *pt, double *t, double epsilon)
#{
# *pt = ClosestPoint(start, end, center, t);
# double len = Distance(*pt, m_pos);
# return len < (m_radius+epsilon);
#}
#// note that "t" param can be used further
#// the same is with "pt" | Gaterman007/PythonPyQt | src/Camera.py | Camera.py | py | 23,068 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "glm.vec3",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "glm.dot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "glm.abs",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "glm.dot",
"line_number": 23,
"usage_t... |
10389850703 | import matplotlib.pyplot as plt
import numpy as np
from math import factorial, sqrt
from scipy.misc import derivative
def f(x, l):
return (x ** 2 - 1) ** l
def der_f(x, l, m):
order = l + abs(m)
return derivative(f, x0=x, n=order, args=[l], order=2 * order + 1)
def calculate_func(theta, phi, l, m):
a = sqrt(factorial(l - abs(m)) * (2 * l + 1) / (factorial(l + abs(m)) * 4 * np.pi))
factor = 1 / 2 ** l * 1 / factorial(l)
result = (a * np.cos(phi * m) * factor * (1 - np.cos(theta)**2)**(abs(m) / 2) * der_f(np.cos(theta), l, m))**2
return result
def draw_axis(fig, y_lim: list):
rect = [0.1, 0.1, 0.8, 0.8]
ax_linear = fig.add_axes(rect)
ax_linear.axes.get_xaxis().set_visible(False)
ax_linear.spines["right"].set_visible(False)
ax_linear.spines["top"].set_visible(False)
ax_linear.spines["bottom"].set_visible(False)
ax_linear.set_ylim(y_lim)
ax_polar = fig.add_axes(rect, polar=True, frameon=False)
ax_polar.set_theta_zero_location("N")
ax_polar.set_xticks([i / 10000 for i in range(0, 2 * 31415 + 1, 5236)])
return ax_polar
def draw_tick_circles(polar_subplot, max_r):
colors = ["green", "blue"]
circle_amount = 1
while max_r / circle_amount > 0.05:
circle_amount += 1
for i in range(circle_amount):
polar_subplot.plot(theta,
[(i + 1) * max_r / circle_amount for _ in range(len(theta))],
color=colors[i % 2],
linewidth=0.5)
def draw_func_plot(theta, r, polar_subplot):
polar_subplot.plot(theta, r, color="black", linewidth=0.5)
def draw_func_plot_3d(fig, theta, phi, r):
ax = fig.add_subplot(1, 1, 1, projection='3d')
func_x = r * np.sin(phi) * np.cos(theta)
func_y = r * np.sin(phi) * np.sin(theta)
func_z = r * np.cos(phi)
plot_func = ax.plot_wireframe(func_x, func_y, func_z, color='blue')\
r_sphere = np.empty((len(theta), len(phi)))
a = np.max(r)
r_sphere.fill(a)
x_sphere = r_sphere * np.sin(phi) * np.cos(theta)
y_sphere = r_sphere * np.sin(phi) * np.sin(theta)
z_sphere = r_sphere * np.cos(phi)
plot_sphere = ax.contour3D(x_sphere, y_sphere, z_sphere, 20, cmap='binary')
if __name__ == '__main__':
fig_3d = plt.figure()
fig_2d = plt.figure()
np.set_printoptions(threshold=np.inf, linewidth=np.inf)
theta, phi = np.linspace(0, 2 * np.pi, 360), np.linspace(0, 2 * np.pi, 360)
tuple_phi, tuple_theta = np.meshgrid(theta, phi)
r = calculate_func(tuple_theta, tuple_phi, 10, -8)
max_r = np.max(r)
polar_subplot = draw_axis(fig_2d, [-max_r, max_r])
draw_tick_circles(polar_subplot, max_r)
draw_func_plot(theta, np.amax(r, axis=1), polar_subplot)
draw_func_plot_3d(fig_3d, tuple_theta, tuple_phi, r)
plt.yticks([])
plt.show()
| Mihinator3000/Group-Projects | Physics/Modeling5/main.py | main.py | py | 2,850 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.misc.derivative",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "math.sqrt",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "math.factorial",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_n... |
34000015765 | from collections import deque
import sys
sys.setrecursionlimit(10**8)
N, X, Y = map(int, input().split())
OXY = [list(map(int, input().split())) for _ in range(N)]
D_POS = [(1, 1), (0, 1), (-1, 1), (1, 0), (-1, 0), (0, -1)]
grid = [["." for _ in range(410)] for _ in range(410)]
grid[205+Y][205+X] = "G"
for ox, oy in OXY:
ox, oy = ox+205, oy+205
grid[oy][ox] = "#"
grid[205][205] = "#"
min_routes = [[10**18 for _ in range(410)] for _ in range(410)]
min_routes[205][205] = 0
q = deque([(205, 205)])
while q:
# print(q)
tx, ty = q.popleft()
t_mr = min_routes[ty][tx]
# print(tx, ty, t_mr)
if grid[ty][tx] == "G":
break
for dx, dy in D_POS:
x, y = tx+dx, ty+dy
if not (0 <= x < 410 and 0 <= y < 410):
continue
if grid[y][x] == "#":
continue
if min_routes[y][x] <= t_mr + 1:
continue
grid[y][x] = "#"
min_routes[y][x] = t_mr + 1
q.append((x, y))
print(-1 if min_routes[205+Y][205+X] >= 10**18 else min_routes[205+Y][205+X])
| yojiyama7/python_competitive_programming | atcoder/_old/past_3/g_.py | g_.py | py | 1,061 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.setrecursionlimit",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 20,
"usage_type": "call"
}
] |
8971658666 | from kafka import KafkaConsumer
from kpong.serde import ping_pong_deserializer
consumer = KafkaConsumer(
"pingpong",
group_id="kpong-1",
client_id="kpong",
bootstrap_servers='localhost:9092',
auto_offset_reset='earliest',
value_deserializer=ping_pong_deserializer
)
def consume_ping_pong():
print("reading...")
for msg in consumer:
print(msg.value)
if __name__ == '__main__':
consume_ping_pong()
| apmaros/kpong | src/kpong/consumer.py | consumer.py | py | 446 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "kafka.KafkaConsumer",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "kpong.serde.ping_pong_deserializer",
"line_number": 11,
"usage_type": "name"
}
] |
42917236725 | """A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
# from codecs import open
from os import path
# from agentml import __version__
here = path.abspath(path.dirname(__file__))
# Get the long description from the relevant file
# with open(path.join(here, 'DESCRIPTION.rst'), encoding='utf-8') as f:
# long_description = f.read()
def readme():
with open('README.rst') as f:
return f.read()
setup(
name='AgentML',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.3.1',
description='An XML dialect for creating natural language software agents',
long_description=readme(),
# The project's main homepage.
url='https://github.com/FujiMakoto/AgentML',
# Author details
author='Makoto Fujimoto',
author_email='makoto@makoto.io',
# License
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
'Topic :: Text Processing :: Markup :: XML'
],
keywords=['bot', 'chatbot', 'chatterbot', 'ai', 'aiml', 'rivescript'],
packages=find_packages(exclude=['tests', 'demo']),
install_requires=['lxml>=3.4.4,<3.5', 'six>=1.10.0,<1.11'],
package_data={
'agentml': ['intelligence/*.aml', 'schemas/*.rng', 'schemas/*.xsd', 'schemas/tags/*.rng'],
},
)
| rainyDayDevs/AgentML | setup.py | setup.py | py | 2,107 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "os.path.abspath",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "os.path.dirname",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_... |
42488837913 | from spylls.hunspell import Dictionary
import pandas as pd
import unidecode
if __name__ == '__main__':
target_length = 5
list_of_words = []
dictionary = Dictionary.from_files('/Users/aitoriraolagalarza/Desktop/pycharm_projects/wordle_dictionary_builder/data/hunspell-cat/catalan')
for word in dictionary.dic.words:
if str(word.captype) != 'Type.NO': # Don't include words with no capital letters
continue
if len(word.stem) == target_length:
list_of_words.append(unidecode.unidecode(word.stem))
df = pd.DataFrame(list_of_words)
# Filter duplicates
df = df.drop_duplicates()
df.to_csv('catalan_dictionary.csv', index=False, header=False)
| aitirga/wordle_dictionary_builder | tasks/generate_catalan_dictionary/generate_catalan_dictionary.py | generate_catalan_dictionary.py | py | 713 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "spylls.hunspell.Dictionary.from_files",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "spylls.hunspell.Dictionary",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "unidecode.unidecode",
"line_number": 15,
"usage_type": "call"
},
{
... |
41277479802 | import pandas as pd
import plot_likert as plot_likert
import numpy as np
import matplotlib.pyplot as plt
import pylab as p
import streamlit as st
st.set_option('deprecation.showPyplotGlobalUse', False)
st.title('Visualisierung der Seminarevaluation')
st.text('Hier kannst du deine Seminarevaluation in Histogrammen anzeigen lassen')
uploaded_file = st.file_uploader('Hier die CSV-Datei aus dem Moodle-Kurs hochladen:')
if uploaded_file:
df2 = pd.read_csv(uploaded_file)
df2 = df2.replace('Nicht beantwortbar', np.NaN)
scale = ['Trifft nicht zu', 'Trifft eher nicht zu', 'Teils/Teils', 'Trifft eher zu', 'Trifft voll und ganz zu']
data_1 = df2[['1.1) Aufbau und Gliederung der Veranstaltung waren klar. ',
'1.2) Die Lehrveranstaltung hat mir viele neue inhaltliche Erkenntnisse gebracht.',
'1.3) Die Leistungsanforderungen wurden transparent dargelegt.',
'1.4) Die zu Beginn der Veranstaltung beschriebenen Lernziele wurden bisher erfüllt.',
'1.5) Die veranstaltungsbegleitenden Materialien erleichterten das Verständnis des Seminarinhalts/-stoffes.',
'1.6) Die digitalen Formate unterstützten den Lernprozess.']]
data_2 = df2[['2.1) Die/der Dozent_in war es wichtig, dass die Studierenden etwas lernen',
'2.2) Die Lehrveranstaltung/Aufgaben trugen zum Verständnis von Theorie und Praxis bei.',
'2.3) Die Lerninhalte wurden mit Beispielen aus der Praxis veranschaulicht.',
'2.4) Die/Der Dozent_in folgte immer einem klar nachvollziehbarem roten Faden.',
'2.5) Die/Der Dozent_in stellte Verbindungen zu bereits besprochenem Stoff aus der Veranstaltung her.',
'2.6) Die/Der Dozent_in hat klar und deutlich gesprochen.',
'2.7) Die/Der Dozent_in antwortete verständlich auf die Fragen der Studierenden.',
'2.8) Die Aufgaben trugen zum Verständnis der Veranstaltung bei.',
'2.9) Die Lehrformen waren abwechslungsreich gestaltet.']]
data_3 = df2[[
'3.1 Die/der Dozent_in schuf eine Atmosphäre, in der Studierende Fragen und Kommentare zum Stoff ohne Scheu äußerten.',
'3.2) Die/der Dozent_in trug zu einem respektvollen Lehr-Lernklima in der Veranstaltung bei.',
'3.3) Die Studierenden trugen zu einem respektvollen Lehr-Lernklima in der Veranstaltung bei.',
'3.4) Die Studierenden wurden zur kritischen Auseinandersetzung mit den Inhalten der Veranstaltung angeregt.']]
data_4 = df2[[
'4.1) Die/Der Dozent_in achtete darauf, dass in ihren Ausführungen Menschen nicht in stereotypen/diskriminierenden Bildern beschrieben wurden.',
'4.2) Wenn Inhalte erläutert wurden, wurde die Vielfalt der Erfahrungen der Studierenden berücksichtigt.']]
#st.write(df2.head())
plot_likert.plot_likert(data_1, scale, plot_percentage=True,
bar_labels=True, bar_labels_color="snow",
colors=plot_likert.colors.default_with_darker_neutral, figsize=(8, 11))
st.pyplot()
plot_likert.plot_likert(data_2, scale, plot_percentage=True, bar_labels=True, bar_labels_color="snow",
colors=plot_likert.colors.default_with_darker_neutral, figsize=(8, 13))
st.pyplot()
plot_likert.plot_likert(data_3, scale, plot_percentage=True,
bar_labels=True, bar_labels_color="snow",
colors=plot_likert.colors.default_with_darker_neutral, figsize=(8, 6))
st.pyplot()
plot_likert.plot_likert(data_4, scale, plot_percentage=True,
bar_labels=True, bar_labels_color="snow",
colors=plot_likert.colors.default_with_darker_neutral, figsize=(8, 3))
st.pyplot()
st.markdown('## Was hat Ihnen an der Lehrveranstaltung besonders gut gefallen?')
df3 = df2['Was hat Ihnen an der Lehrveranstaltung besonders gut gefallen (Stichpunkte)'].dropna()
st.markdown(df3.values)
df4 = df2['Welche Verbesserungsvorschläge haben Sie? (Stichpunkte)'].dropna()
st.markdown('## Welche Verbesserungsvorschläge haben Sie?')
st.markdown(df4.values)
| larspelz/semev | webapp.py | webapp.py | py | 4,263 | python | de | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.set_option",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "streamlit.title",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "streamlit.text",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "streamlit.file_upl... |
4313095146 | # -*- coding: utf-8 -*-
"""
Created on Sun Apr 12 16:45:32 2020
@author: hitar
"""
import collections
nums = [0,0,1]
c = 0
l = len(nums)
co = collections.Counter(nums)
for i in range(co[0]):
nums.remove(0)
for i in range(co[0]):
nums.append(0)
print(nums) | smarthitarth/python-scripts | MoveZeroes.py | MoveZeroes.py | py | 265 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 12,
"usage_type": "call"
}
] |
33764452546 | from easyprocess import EasyProcess
from pyvirtualdisplay.abstractdisplay import AbstractDisplay
import logging
log = logging.getLogger(__name__)
PROGRAM = 'Xvfb'
URL = None
PACKAGE = 'xvfb'
class XvfbDisplay(AbstractDisplay):
'''
Xvfb wrapper
Xvfb is an X server that can run on machines with no display
hardware and no physical input devices. It emulates a dumb
framebuffer using virtual memory.
'''
def __init__(self, size=(1024, 768), color_depth=24, bgcolor='black', fbdir=None, dpi=None, randomizer=None):
'''
:param bgcolor: 'black' or 'white'
:param fbdir: If non-null, the virtual screen is memory-mapped
to a file in the given directory ('-fbdir' option)
:param dpi: screen resolution in dots per inch if not None
'''
self.screen = 0
self.size = size
self.color_depth = color_depth
self.process = None
self.bgcolor = bgcolor
self.display = None
self.fbdir = fbdir
self.dpi = dpi
AbstractDisplay.__init__(self, randomizer=randomizer)
@classmethod
def check_installed(cls):
EasyProcess([PROGRAM, '-help'], url=URL,
ubuntu_package=PACKAGE).check_installed()
@property
def _cmd(self):
cmd = [
dict(black='-br', white='-wr')[self.bgcolor],
'-nolisten',
'tcp',
'-screen',
str(self.screen),
'x'.join(map(str, list(self.size) + [self.color_depth])),
self.new_display_var,
]
if self.fbdir:
cmd += ['-fbdir', self.fbdir]
if self.dpi is not None:
cmd += ['-dpi', str(self.dpi)]
if self.check_startup:
cmd += ['-displayfd', str(self.check_startup_fd)]
return [PROGRAM] + cmd
| tawfiqul-islam/RM_DeepRL | venv/lib/python3.6/site-packages/pyvirtualdisplay/xvfb.py | xvfb.py | py | 1,872 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pyvirtualdisplay.abstractdisplay.AbstractDisplay",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pyvirtualdisplay.abstractdisplay.AbstractDisplay.__init__",
"line_number": ... |
16817075767 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Thu Mar 15 04:18:20 2018
@author: sadievrenseker
"""
#1. kutuphaneler
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
#2. Veri Onisleme
#2.1. Veri Yukleme
veriler = pd.read_csv('veriler.csv')
#encoder: Kategorik -> Numeric
ulke = veriler.iloc[:,0:1].values
from sklearn.preprocessing import LabelEncoder
le = LabelEncoder()
ulke[:,0] = le.fit_transform(ulke[:,0])
from sklearn.preprocessing import OneHotEncoder
ohe = OneHotEncoder(categorical_features='all')
ulke=ohe.fit_transform(ulke).toarray()
# ----
cinsiyet = veriler.iloc[:,-1:].values
le = LabelEncoder()
cinsiyet[:,0] = le.fit_transform(cinsiyet[:,0])
ohe = OneHotEncoder(categorical_features='all')
cinsiyet = ohe.fit_transform(cinsiyet).toarray()
#numpy dizileri dataframe donusumu
cinsiyet_erkek_dataframe = pd.DataFrame(data=cinsiyet[:,0], index=range(22), columns=['cinsiyet'])
cinsiyet_kadin_dataframe = pd.DataFrame(data=cinsiyet[:,1], index=range(22), columns=['cinsiyet'])
ulke_dataframe = pd.DataFrame(data=ulke, index=range(22), columns=['us', 'tr', 'fr'])
boy_dataframe = pd.DataFrame(veriler.iloc[:, 1:2])
kilo_dataframe = pd.DataFrame(veriler.iloc[:, 2:3])
yas_dataframe = pd.DataFrame(veriler.iloc[:, 3:4])
#dataframe birlestirme islemi
sonuc = pd.concat([ulke_dataframe, kilo_dataframe, yas_dataframe, cinsiyet_erkek_dataframe], axis=1)
#verilerin egitim ve test icin bolunmesi
from sklearn.cross_validation import train_test_split
x_train, x_test, y_train, y_test = train_test_split(sonuc, boy_dataframe, test_size=0.33, random_state=0)
'''
Modelleme, tahmin etme ve skorlama
'''
from sklearn.linear_model import LinearRegression
lr = LinearRegression()
lr.fit(x_train, y_train)
predict = lr.predict(x_test) | erkanzileli/learning-ml | cokluveriler.py | cokluveriler.py | py | 1,789 | python | tr | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.LabelEncoder",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.OneHotEncoder",
"line_number": 26,
"usage_type": "call"
},
{... |
27833537769 | from pyzbar import pyzbar
import cv2
import time
import argparse
import keras_ocr
#
# import os
# os.environ["CUDA_VISIBLE_DEVICES"] = "-1"
class NpImageBarcode:
def predict(self, path):
image = cv2.imread(path)
barcodes = pyzbar.decode(image)
if len(barcodes) == 0:
return None
data = barcodes[0].data.decode("utf-8")
return data.upper()
def parse_int(self, s):
if s is None or len(s) < 12:
return None
res = 0
nbl = 0
for i in range(len(s)):
try:
nb = int(s[i])
except:
nb = 0
nbl += 1
if nbl > 3:
return None
res = res * 10 + nb
return res
# images/chuv/Articles/Image/07323190073177_BOITE_01.JPG 0.035s
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Barcode and OCR reader")
parser.add_argument("path", help="Image path")
args = parser.parse_args()
np = NpImageBarcode()
t = time.perf_counter()
res = np.predict(args.path)
print(res)
print(f"Found in {time.perf_counter() - t:.3f}s")
| cyrilvincent/3CE | np_image_barcode.py | np_image_barcode.py | py | 1,182 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pyzbar.pyzbar.decode",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyzbar.pyzbar",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser... |
71936376673 | import json
import sys
from colorcet import bmw
import dask
import pandas as pd
import datashader as ds
from datashader import transfer_functions as tf
from datashader.utils import lnglat_to_meters as webm
from datashader_fix.tiles import render_tiles # use version of render tiles with this fix: https://github.com/holoviz/datashader/pull/874
# The threads scheduler is more efficient than the multiprocessor one (which is the default for dask.bag)
# See https://docs.dask.org/en/latest/setup/single-machine.html
dask.config.set(scheduler='threads')
df = pd.read_csv('./data/inaturalist.csv')
def get_extents(df, x, y):
return df[x].min(), df[y].min(), df[x].max(), df[y].max()
def load_data_func(x_range, y_range):
return df.loc[df['x'].between(*x_range) & df['y'].between(*y_range)]
def rasterize_func(df, x_range, y_range, height, width):
cvs = ds.Canvas(x_range=x_range, y_range=y_range,
plot_height=height, plot_width=width)
agg = cvs.points(df, 'x', 'y')
return agg
def shader_func(agg, span=None):
img = tf.shade(agg, cmap=bmw, how='log', span=span)
return img
def post_render_func(img, **kwargs):
return img
if __name__ == '__main__':
output_path = 'tiles'
full_extent_of_data = get_extents(df, 'x', 'y')
results = render_tiles(full_extent_of_data,
range(1, 7),
load_data_func=load_data_func,
rasterize_func=rasterize_func,
shader_func=shader_func,
post_render_func=post_render_func,
output_path=output_path)
| tomwhite/inaturalist-datashader-map | generate_tiles.py | generate_tiles.py | py | 1,636 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "dask.config.set",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "dask.config",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datashader.Canvas",
... |
34519823291 | from random import choice
from sys import argv
from base64 import b64encode
b = 22
def dwfregrgre(x, z):
wdef = []
for a in range(x, z + 1):
for i in range(2, a):
if (a % i) == 0:
break
else:
wdef.append(a)
return wdef
def sdsd(edefefef):
fvfegve = [x for x in range(2, edefefef)]
x = 2
rrerrrr = True
while rrerrrr:
for i in range(x * x, edefefef, x):
if i in fvfegve:
fvfegve.remove(i)
rrerrrr = False
for i in fvfegve:
if i > x:
x = i
rrerrrr = True
break
return fvfegve
def swsdwd(a, b):
if a == 0:
return (b, 0, 1)
else:
g, y, x = swsdwd(b % a, a)
return (g, x - (b // a) * y, y)
def swsdwdwdwa(a, m):
g, x, y = swsdwd(a, m)
if g != 1:
raise Exception('Oops! Error!')
else:
return x % m
def L(u, n):
return (u - 1) // n
if __name__ == '__main__':
print("Key cryptor v1.0")
if len(argv) != 2:
print("Start script like: python crypt.py <YourOwnPasswordString>")
if (not str(argv[1]).startswith("KLCTF{")) or (not str(argv[1]).endswith("}")):
print("Error! Password must starts with KLCTF")
exit()
p = choice(dwfregrgre(100, 1000))
q = choice(dwfregrgre(200, 1000))
print("Waiting for encryption...")
n = p * q
g = None
for i in range(n + 1, n * n):
if ((i % p) == 0) or ((i % q) == 0) or ((i % n) == 0):
continue
g = i
break
if g is None:
print("Error! Can't find g!")
exit()
lamb = (p - 1) * (q - 1)
mu = swsdwdwdwa(L(pow(g, lamb, n * n), n), n) % n
rc = sdsd(n - 1)
if len(rc) == 0:
print("Error! Candidates for r not found!")
exit()
if p in rc:
rc.remove(p)
if q in rc:
rc.remove(q)
r = choice(rc)
wdwfewgwggrgrg = [ord(x) for x in argv[1][6:-1]]
dcew = (pow(g, b, (n * n)) * pow(r, n, (n * n))) % (n * n)
for i in range(len(wdwfewgwggrgrg)):
wdwfewgwggrgrg[i] = (((pow(g, wdwfewgwggrgrg[i], (n * n)) * pow(r, n, (n * n))) % (n * n)) * dcew) % (n * n)
wdwfewgwggrgrg[i] = (L(pow(wdwfewgwggrgrg[i], lamb, (n * n)), n) * mu) % n
wdwfewgwggrgrg = b64encode(bytearray(wdwfewgwggrgrg))
print(str(wdwfewgwggrgrg)[2:-1])
| p4-team/ctf | 2017-10-06-klctf/bad_computations/crypt.py | crypt.py | py | 2,426 | python | en | code | 1,716 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 62,
"usage_type": "argument"
},
{
"api_name": "sys.argv",
"line_number": 65,
"usage_type": "name"
},
{
"api_name": "random.choice",
"line_number": 69,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number"... |
269449406 | from collections import defaultdict
import os
from pathlib import Path
from urllib.request import urlretrieve
import xml.etree.ElementTree as ET
# import the countries xml file
tmp = Path(os.getenv("TMP", "/tmp"))
countries = tmp / 'countries.xml'
if not countries.exists():
urlretrieve(
'https://bites-data.s3.us-east-2.amazonaws.com/countries.xml',
countries
)
def get_income_distribution(xml=countries):
"""
- Read in the countries xml as stored in countries variable.
- Parse the XML
- Return a dict of:
- keys = incomes (wb:incomeLevel)
- values = list of country names (wb:name)
"""
country_dict = defaultdict(list)
countries_data = ET.parse(countries).getroot()
for country in countries_data:
country_dict[country[4].text].append(country[1].text)
return country_dict
if __name__ == "__main__":
data = get_income_distribution()
print(data)
| rhelmstedter/pybites | 190/income.py | income.py | py | 975 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "urllib.request.urlretrieve",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.defaultdi... |
38951988250 | from flask import Flask, request, jsonify
from flask_sqlalchemy import SQLAlchemy
from flask_marshmallow import Marshmallow
import os
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
basedir = os.path.abspath(os.path.dirname(__file__))
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///' + \
os.path.join(basedir, 'app.sqlite')
db = SQLAlchemy(app)
ma = Marshmallow(app)
class Professor(db.Model):
id = db.Column(db.Integer, primary_key=True)
name = db.Column(db.String(100), unique=False)
school = db.Column(db.String(144), unique=False)
def __init__(self, name, school):
self.name = name
self.school = school
class ProfessorSchema(ma.Schema):
class Meta:
fields = ('name', 'school')
professor_schema = ProfessorSchema()
professors_schema = ProfessorSchema(many=True)
# Endpoint to create a new professor
@app.route('/professor', methods=["POST"])
def add_professor():
name = request.json['name']
school = request.json['school']
new_professor = Professor(name, school)
db.session.add(new_professor)
db.session.commit()
professor = Professor.query.get(new_professor.id)
return professor_schema.jsonify(professor)
# Endpoint to query all professors
@app.route("/professors", methods=["GET"])
def get_professors():
all_professors = Professor.query.all()
result = professors_schema.dump(all_professors)
return jsonify(result.data)
# Endpoint to query a single professors
@app.route("/professor/<id>", methods=["GET"])
def get_professor(id):
professor = Professor.query.get(id)
return professor_schema.jsonify(professor)
if __name__ == '__main__':
app.run(debug=True)
| AgentIsaacson/rate-my-professor | app.py | app.py | py | 1,697 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path.abspath",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
17561869695 | from fastapi import FastAPI
import random
from typing import Optional, List
from models import User, Gender, Role
from uuid import uuid4
app = FastAPI()
db: List[User] = [
User(
id=uuid4(),
first_name="Daniel",
last_name="Villery",
gender=Gender.male,
roles=[Role.user, Role.admin],
)
]
@app.get("/")
async def root():
return {"example": "this is an example", "data": 0}
@app.get("/users")
async def get_users():
return db
@app.post("/users/new")
async def register_user(user: User):
db.append(user)
return {"id": user.id}
@app.get("/random")
def get_random():
rn: int = random.randint(0, 100)
return {"number": rn, "limit": 100}
@app.get("/random/{limit}")
async def get_random(limit: int):
rn: int = random.randint(0, limit)
return {"number": rn, "limit": limit}
@app.get("/beats")
def get_beats():
name: str = "name"
artist: str = "artist"
url: Optional[str] = None
return {"name": name, "artist": artist, "url": url}
| villeryd/beatsAPI | main.py | main.py | py | 1,031 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.FastAPI",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "models.User",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "models.User",
"line_number":... |
36061072150 | import functools
import uuid
import datetime
from dataclasses import asdict
from flask import (
Blueprint,
current_app,
flash,
redirect,
render_template,
session,
url_for,
request,
)
from movie_library.forms import LoginForm, RegisterForm, MovieForm, ExtendedMovieForm
from movie_library.models import User, Movie
from passlib.hash import pbkdf2_sha256
pages = Blueprint(
"pages", __name__, template_folder="templates", static_folder="static"
)
def login_required(route):
@functools.wraps(route)
def route_wrapper(*args, **kwargs):
if session.get("email") is None:
return redirect(url_for(".login"))
return route(*args, **kwargs)
return route_wrapper
#base route
@pages.route("/")
@login_required#user must be logged in to see this page
def index():
#get current user data and make a User object with it
user_data = current_app.db.user.find_one({"email": session["email"]})
user = User(**user_data)
#get movie data that is in the current users list of movies and make a list of Movie objects
movie_data = current_app.db.movie.find({"_id": {"$in": user.movies}})
movies = [Movie(**movie) for movie in movie_data]
return render_template(
"index.html",
title="Movies Watchlist",
movies_data=movies,
)
#route for registering users
@pages.route("/register", methods=["POST", "GET"])
def register():
#if session already has a logged in email redirect to base route
if session.get("email"):
return redirect(url_for(".index"))
form = RegisterForm()
#if form is submitted and validated then get the data from it and save it as user
if form.validate_on_submit():
user = User(
_id=uuid.uuid4().hex,
email=form.email.data,
password=pbkdf2_sha256.hash(form.password.data),
)
#add user to the database of users
current_app.db.user.insert_one(asdict(user))
#flash a success message
flash("User registered successfully", "success")
#redirect user to login page
return redirect(url_for(".login"))
return render_template(
"register.html", title="Movies Watchlist - Register", form=form
)
#route for logging in
@pages.route("/login", methods=["GET", "POST"])
def login():
#if user is already signed in redirect to base
if session.get("email"):
return redirect(url_for(".index"))
#create form and check validation
form = LoginForm()
if form.validate_on_submit():
#try to find the user in the db using the email from the form
user_data = current_app.db.user.find_one({"email": form.email.data})
#if couldnt find user data using the email flash message
if not user_data:
flash("Login credentials not correct", category="danger")
return redirect(url_for(".login"))
#create a user object using user_data that we got using the email
user = User(**user_data)
# check if the form password equals the user password
if user and pbkdf2_sha256.verify(form.password.data, user.password):
#populate the session with anything that we need and redirect to base
session["user_id"] = user._id
session["email"] = user.email
return redirect(url_for(".index"))
flash("Login credentials not correct", category="danger")
#if user couldnt be verified return to login page
return render_template("login.html", title="Movies Watchlist - Login", form=form)
#logout route
@pages.route("/logout")
def logout():
#clear everything from session except the theme
del session["email"]
del session["user_id"]
return redirect(url_for(".login"))
#route for adding movies using the form
@pages.route("/add", methods=["GET", "POST"])
@login_required
def add_movie():
form = MovieForm()
if form.validate_on_submit():
movie = Movie(
_id=uuid.uuid4().hex,
title=form.title.data,
director=form.director.data,
year=form.year.data,
)
current_app.db.movie.insert_one(asdict(movie))
current_app.db.user.update_one(
{"_id": session["user_id"]}, {"$push": {"movies": movie._id}}
)
return redirect(url_for(".movie", _id=movie._id))
return render_template(
"new_movie.html", title="Movies Watchlist - Add Movie", form=form
)
@pages.route("/edit/<string:_id>", methods=["GET", "POST"])
@login_required#user must be logged in
def edit_movie(_id: str):
#get movie class data
movie = Movie(**current_app.db.movie.find_one({"_id": _id}))
#create a form using our extended form class passing our movie object
form = ExtendedMovieForm(obj=movie)
if form.validate_on_submit():
#populate all the fields for the movie class
movie.title = form.title.data
movie.description = form.description.data
movie.year = form.year.data
movie.cast = form.cast.data
movie.series = form.series.data
movie.tags = form.tags.data
movie.video_link = form.video_link.data
#update the movie passing it as a dictionary so that mongodb can use it
current_app.db.movie.update_one(
{"_id": movie._id},
{"$set": asdict(movie)}
)
return redirect(url_for(".movie", _id=movie._id))
return render_template("movie_form.html", movie=movie, form=form)
#route for displaying a given movies details
@pages.get("/movie/<string:_id>")
def movie(_id: str):
#create a Movie class using the info that we get from a given movie using .find_one(_id)
movie = Movie(**current_app.db.movie.find_one({"_id": _id}))
return render_template("movie_details.html", movie=movie)
#route for changing rating of a movie
@pages.get("/movie/<string:_id>/rate")
@login_required#user must be logged in
def rate_movie(_id):
#get the new rating
rating = int(request.args.get("rating"))
#update the rating of the movie with the new rating
current_app.db.movie.update_one({"_id": _id}, {"$set": {"rating": rating}})
return redirect(url_for(".movie", _id=_id))
#for marking a movie as watched today
@pages.get("/movie/<string:_id>/watch")
@login_required#user must be logged in
def watch_today(_id):
#update the last_watched parameter with todays date
current_app.db.movie.update_one(
{"_id": _id},
{"$set": {"last_watched": datetime.datetime.today()}})
return redirect(url_for(".movie", _id=_id))
#route for choosing theme if this route gets called the theme switches
@pages.get("/toggle-theme")
def toggle_theme():
current_theme = session.get("theme")
if current_theme is None:# set the default theme if it doesn't exist
session["theme"] = "light"
elif current_theme == "dark":
session["theme"] = "light"
else:
session["theme"] = "dark"
# return the current page after switching themes
return redirect(request.args.get("current_page")) | ashereth/Movie-Watchlist | movie_library/routes.py | routes.py | py | 7,094 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask.session.get",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "flask.redirect",
... |
20520643564 | import argparse
import os
import PyInstaller.building.makespec
import PyInstaller.log
try:
from argcomplete import autocomplete
except ImportError:
def autocomplete(parser):
return None
def generate_parser():
p = argparse.ArgumentParser()
PyInstaller.building.makespec.__add_options(p)
PyInstaller.log.__add_options(p)
p.add_argument(
'scriptname',
nargs='+',
)
return p
def run():
p = generate_parser()
autocomplete(p)
args = p.parse_args()
PyInstaller.log.__process_options(p, args)
# Split pathex by using the path separator.
temppaths = args.pathex[:]
args.pathex = []
for p in temppaths:
args.pathex.extend(p.split(os.pathsep))
try:
name = PyInstaller.building.makespec.main(args.scriptname, **vars(args))
print('Wrote %s.' % name)
print('Now run pyinstaller.py to build the executable.')
except KeyboardInterrupt:
raise SystemExit("Aborted by user request.")
if __name__ == '__main__':
run()
| pyinstaller/pyinstaller | PyInstaller/utils/cliutils/makespec.py | makespec.py | py | 1,049 | python | en | code | 10,769 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "PyInstaller.building.makespec.building.makespec.__add_options",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "PyInstaller.building.makespec.building",
"line_number":... |
35235288051 | __author__ = "Rohan Pandit"
from algo import algorithm, triTueAlgo, withDelay
import numpy as np
from time import time
from random import randint
from flask import Flask, abort, jsonify, request
from flask_cors import CORS
screenSize = 700
app = Flask(__name__)
CORS(app)
@app.route('/optimize_route', methods=['POST'])
def optimize():
data = request.get_json(force=True)
cities = data['matrix']
path, length = triTueAlgo(cities)
return jsonify(length = length, path = path)
@app.route('/optimize_with_time', methods=['POST'])
def optimize_with_time():
data = request.get_json(force = True)
cities = data['matrix']
maximumTime = data['maximum_time']
path, length = withDelay(cities, maximumTime)
return jsonify(found = not (len(path) == 0), length = length, path = path)
app.run(host='0.0.0.0', port = 8888)
| petrpan26/ShipDirect | server/salesman.py | salesman.py | py | 826 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request.get_json",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request",
... |
20128360861 | import pybg.ql
import pybg.curves as curves
import pybg.instruments.bulletbond as bb
import pybg.instruments.sinkingfundbond as sf
from pybg.enums import (
DayCounters, Frequencies, BusinessDayConventions, Calendars
)
from datetime import date
dt0 = date(2008, 9, 15)
print("\nSetting eval date: %s" % dt0)
pybg.ql.set_eval_date(dt0)
govbondcurve = curves.CurveBase(
Calendars.UnitedStates(Calendars.GOVERNMENTBOND),
3,
DayCounters.Actual360(),
Frequencies.Semiannual,
BusinessDayConventions.Unadjusted,
DayCounters.ActualActual(DayCounters.Bond),
DayCounters.ActualActual(DayCounters.ISDA)
)
bcrv = curves.BondCurve(govbondcurve)
dated = [date(2005, 3, 15),
date(2005, 6, 15),
date(2006, 6, 30),
date(2002, 11, 15),
date(1987, 5, 15)
]
maturities = [
date(2010, 8, 31),
date(2011, 8, 31),
date(2013, 8, 31),
date(2018, 8, 15),
date(2038, 5, 15)
]
couponRates = [
0.02375,
0.04625,
0.03125,
0.04000,
0.04500
]
marketQuotes = [
100.390625,
106.21875,
100.59375,
101.6875,
102.140625
]
bond_ids = [
"B1", "B2", "B3", "B4", "B5"
]
depos = {
"3M": 0.0096,
"6M": 0.0145,
"1Y": 0.0194}
q = zip(marketQuotes, maturities, couponRates, dated)
bndcrv = dict(zip(bond_ids, q))
print("build bond curve...")
bcrv.update(bndcrv, depos)
output_line1 = "bond: {}, price/yield: {:7.3f}/{:6.3f}%"
output_line2 = " check {:7.3f} vs {:7.3f}"
for id, bndrow in bndcrv.items():
qt, mty, cpn, dtd = bndrow
b = bb.BulletBond(cpn, mty, dtd, Calendars.UnitedStates(Calendars.GOVERNMENTBOND))
print(output_line1.format(id, qt, 100.0*b.toYield(qt)))
b.setEngine(bcrv)
print(output_line2.format(qt, b.toPrice()))
#bulletbond
print("test bond")
dated = date(2003, 5, 15)
mty = date(2027, 5, 15)
bnd1 = bb.BulletBond(.06, mty, dated, Calendars.UnitedStates(Calendars.GOVERNMENTBOND))
#sinker
print("sinking fund bond")
sfbnd = sf.SinkingFundBond(.06,
mty,
(40., 40., 40.),
Frequencies.Annual,
dated)
| bondgeek/pybg | pybg_examples/demos/sinker.py | sinker.py | py | 2,295 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "datetime.date",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pybg.ql.ql.set_eval_date",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pybg.ql.ql",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pybg.ql",
... |
74807329952 | from playwright.sync_api import Playwright, sync_playwright
from main.pages.app import App
app = App()
def run(playwright: Playwright) -> None:
browser = playwright.chromium.launch(headless=False, slow_mo=1000)
context = browser.new_context()
page = context.new_page()
app.login_ui(page)
page.close()
context.close()
browser.close()
with sync_playwright() as playwright:
run(playwright)
| Lexamenrf44/ABarashkov_Python_Playwright_SauceDemo_project | main/specs/smoke/e2e.py | e2e.py | py | 424 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "main.pages.app.App",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "playwright.sync_api.Playwright",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "playwright.sync_api.chromium.launch",
"line_number": 8,
"usage_type": "call"
},
{
... |
38681629975 | import re
import itertools
import collections
reg = re.compile("(\\w+) would (\\w+) (\\d+) happiness units by sitting next to (\\w+).")
people = collections.defaultdict(dict)
arrangement_happiness = []
def parse_data(line):
match = re.match(reg, line)
if match:
attendee = match.group(1)
action = match.group(2)
seated_next = match.group(4)
happiness = ""
if action == "gain":
happiness = int(match.group(3))
else:
happiness = -int(match.group(3))
people[attendee][seated_next] = happiness
def get_arrangements():
all_arrangements = itertools.permutations(people.keys())
for arrangement in all_arrangements:
total_happiness = 0
for i in range(len(arrangement) - 1):
total_happiness += people[arrangement[i]][arrangement[i + 1]]
total_happiness += people[arrangement[i + 1]][arrangement[i]]
total_happiness += people[arrangement[-1]][arrangement[0]]
total_happiness += people[arrangement[0]][arrangement[-1]]
arrangement_happiness.append(total_happiness)
def main():
with open("input.txt") as f:
contents = f.readlines()
for line in contents:
parse_data(line)
get_arrangements()
print("Part one: ", max(arrangement_happiness))
for person in people:
people[person]["Me"] = 0
people["Me"] = {person: 0 for person in people.keys()}
arrangement_happiness.clear()
get_arrangements()
print("Part two: ", max(arrangement_happiness))
if __name__ == "__main__":
main() | nemo-0/advent-of-code | 2015/day13/solution.py | solution.py | py | 1,436 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "collections.defaultdict",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
... |
28452183977 | from PIL import Image
from requests import get # to make GET request
from torch.utils.data import DataLoader
import codecs
import copy
import errno
import gzip
import hashlib
import numpy as np
import os
import os.path
import torch
import torch.utils.data as data
import torchvision.transforms as transforms
import scipy.io
class NORB(data.Dataset):
BASE_URL = "https://cs.nyu.edu/~ylclab/data/norb-v1.0/"
TRAINING_FILE = 'training.pt'
TEST_FILE = 'test.pt'
def __init__(self, root="./data/", transform=None):
self.root = os.path.expanduser(root)
self.raw_folder = os.path.join(self.root, "raw/")
self.processed_folder = os.path.join(self.root, "processed/")
dirs = [
self.root,
self.raw_folder,
self.processed_folder,
]
for directory in dirs:
if not os.path.exists(directory):
os.makedirs(directory)
if not self._check_exists():
self.download()
def __getitem__(self, index):
"""
Args:
index (int): Index
Returns:
tuple: (image, target) where target is index of the target class.
"""
pass
def __len__(self):
return len(self.data)
def _check_exists(self):
return (os.path.exists(
os.path.join(self.processed_folder, self.TRAINING_FILE))
and os.path.exists(
os.path.join(self.processed_folder, self.TEST_FILE)))
def download(self):
# Get training images
for i in range(1, 3):
num = "{0:0=2d}".format(i)
url = (self.BASE_URL +
"norb-5x46789x9x18x6x2x108x108-training-{}-dat.mat.gz".
format(num))
filename = os.path.basename(url)
fpath = os.path.join(self.raw_folder, filename)
if not os.path.exists(fpath.replace(".gz", "")):
download_url(url, fpath)
self.extract_gzip(gzip_path=fpath, remove_finished=True)
# Get training labels
for i in range(1, 3):
num = "{0:0=2d}".format(i)
url = (self.BASE_URL +
"norb-5x46789x9x18x6x2x108x108-training-{}-cat.mat.gz".
format(num))
filename = os.path.basename(url)
fpath = os.path.join(self.raw_folder, filename)
if not os.path.exists(fpath.replace(".gz", "")):
download_url(url, fpath)
self.extract_gzip(gzip_path=fpath, remove_finished=True)
# Get testing images
for i in range(1, 3):
num = "{0:0=2d}".format(i)
url = (self.BASE_URL +
"norb-5x46789x9x18x6x2x108x108-testing-{}-dat.mat.gz".
format(num))
filename = os.path.basename(url)
fpath = os.path.join(self.raw_folder, filename)
if not os.path.exists(fpath.replace(".gz", "")):
download_url(url, fpath)
self.extract_gzip(gzip_path=fpath, remove_finished=True)
# Get testing labels
for i in range(1, 3):
num = "{0:0=2d}".format(i)
url = (self.BASE_URL +
"norb-5x46789x9x18x6x2x108x108-testing-{}-cat.mat.gz".
format(num))
filename = os.path.basename(url)
fpath = os.path.join(self.raw_folder, filename)
if not os.path.exists(fpath.replace(".gz", "")):
download_url(url, fpath)
self.extract_gzip(gzip_path=fpath, remove_finished=True)
# process and save as torch files
print('Processing...')
training_set = (
read_image_files(
os.path.join(
self.raw_folder,
"norb-5x46789x9x18x6x2x108x108-training-{}-dat.mat")),
read_label_files(
os.path.join(
self.raw_folder,
"norb-5x46789x9x18x6x2x108x108-training-{}-cat.mat")))
test_set = (
read_image_files(
os.path.join(
self.raw_folder,
"norb-5x46789x9x18x6x2x108x108-testing-{}-dat.mat")),
read_label_files(
os.path.join(
self.raw_folder,
"norb-5x46789x9x18x6x2x108x108-testing-{}-cat.mat")))
with open(
os.path.join(self.processed_folder, self.training_file),
'wb') as f:
torch.save(training_set, f)
with open(os.path.join(self.processed_folder, self.test_file),
'wb') as f:
torch.save(test_set, f)
print('Done!')
@staticmethod
def extract_gzip(gzip_path, remove_finished=False):
print('Extracting {}'.format(gzip_path))
with open(gzip_path.replace('.gz', ''), 'wb') as out_f, \
gzip.GzipFile(gzip_path) as zip_f:
out_f.write(zip_f.read())
if remove_finished:
os.unlink(gzip_path)
def read_label_files(path_template):
pass
def read_image_files(path_template):
# Get training images
for i in range(1, 3):
fpath = path_template.format(i)
mat = scipy.io.loadmat(fpath)
print(mat)
def download_url(url, file_name):
print("Downloading " + url + " to " + file_name)
# open in binary mode
with open(file_name, "wb") as file:
# get request
response = get(url)
# write to file
file.write(response.content)
| lokhande-vishnu/DeepHermites | Code/3-semisupervised_setting/aws_costestimates/epoch_measurements/norb/4hermites_v2l/lib/datasets/norb.py | norb.py | py | 5,587 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "torch.utils.data",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "os.path.expanduser",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.... |
7300614348 | from http import HTTPStatus
from uuid import UUID
from fastapi import APIRouter, Depends, HTTPException
from core.constants.exception_details import GENRE_NOT_FOUND
from core.utils import verify_auth_tokens
from models.genre import Genre
from services.genre import GenreService, get_genre_service
router = APIRouter()
@router.get('/',
response_model=list[Genre | None],
description='Список всех жанров',
summary='Endpoint позволяет получить список жанров',
response_description='Лист объектов Genre',
tags=['Доступ ко всем элементам'],
dependencies=[Depends(verify_auth_tokens)])
async def all_genres(genre_service: GenreService = Depends(get_genre_service)) -> list[Genre]:
genres = await genre_service.get_list()
if not genres:
return []
return genres
@router.get('/{genre_id}',
response_model=Genre,
description='Детальная информация по жанру',
summary='Endpoint позволяет получить детальную информацию по жанру',
response_description='Объект Genre',
tags=['Доступ ко всем элементам'],
dependencies=[Depends(verify_auth_tokens)])
async def genre_details(genre_id: UUID, genre_service: GenreService = Depends(get_genre_service)) -> Genre:
genre = await genre_service.get_by_id(genre_id)
if not genre:
raise HTTPException(status_code=HTTPStatus.NOT_FOUND, detail=GENRE_NOT_FOUND)
return genre
| Moralex45/middle-python | asyncapi-service/src/api/v1/genres.py | genres.py | py | 1,661 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "fastapi.APIRouter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "services.genre.GenreService",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "fastapi.Depends",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "service... |
25463876505 | import unittest
import gflags as flags
import unittest as googletest
from closure_linter import errors
from closure_linter import runner
from closure_linter.common import erroraccumulator
flags.FLAGS.strict = True
class StrictTest(unittest.TestCase):
"""Tests scenarios where strict generates warnings."""
def testUnclosedString(self):
"""Tests warnings are reported when nothing is disabled.
b/11450054.
"""
original = [
'bug = function() {',
' (\'foo\'\');',
'};',
'',
]
expected = [errors.FILE_DOES_NOT_PARSE, errors.MULTI_LINE_STRING,
errors.FILE_IN_BLOCK]
self._AssertErrors(original, expected)
def _AssertErrors(self, original, expected_errors):
"""Asserts that the error fixer corrects original to expected."""
# Trap gjslint's output parse it to get messages added.
error_accumulator = erroraccumulator.ErrorAccumulator()
runner.Run('testing.js', error_accumulator, source=original)
error_nums = [e.code for e in error_accumulator.GetErrors()]
error_nums.sort()
expected_errors.sort()
self.assertListEqual(error_nums, expected_errors)
if __name__ == '__main__':
googletest.main()
| hanpfei/chromium-net | third_party/catapult/third_party/closure_linter/closure_linter/strict_test.py | strict_test.py | py | 1,225 | python | en | code | 289 | github-code | 1 | [
{
"api_name": "gflags.FLAGS",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "closure_linter.errors.FILE_DOES_NOT_PARSE",
"line_number": 28,
"usage_type": "attribute"
},
... |
14656758384 | from jax_sandbox.common.dataset import TransitionBatch
import jax
import jax.numpy as jnp
@jax.jit
def returns_to_go(batch: TransitionBatch, gamma: float = 1.0) -> jnp.ndarray:
'''
Computes returns to go, optionally discounted by gamma.
Rewards are of shape (B,).
'''
rewards = batch.rewards
B = rewards.shape[0]
if gamma < 1.0:
discounts = jnp.geomspace(1.0, gamma ** (B - 1), num=B)
rewards *= discounts
return jnp.cumsum(rewards) | dhruvsreenivas/jax_sandbox | jax_sandbox/policy_gradient/pg_utils.py | pg_utils.py | py | 491 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "jax_sandbox.common.dataset.TransitionBatch",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "jax.numpy.geomspace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 15,
"usage_type": "name"
},
{
"api_name"... |
28106266111 | import os
os.environ['CUDA_VISIBLE_DEVICES'] = '0' #'3,2' #'3,2,1,0'
import numpy as np
import pickle
import cv2
import time
from timeit import default_timer as timer
# torch libs
import torch
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torch.utils.data.sampler import SequentialSampler, RandomSampler
import torch.optim as optim
from tensorboardX import SummaryWriter
from common import RESULTS_DIR, IDENTIFIER, SEED, PROJECT_PATH
from utility.file import Logger, time_to_str
from net.rate import get_learning_rate, adjust_learning_rate
from net.resnet50_mask_rcnn.configuration import Configuration
from net.resnet50_mask_rcnn.model import MaskRcnnNet
from dataset.reader import ScienceDataset, multi_mask_to_annotation
import dataset.transform as tr
WIDTH, HEIGHT = 256, 256
OUT_DIR = RESULTS_DIR + '/mask-rcnn-50-gray500-02'
tb_log = SummaryWriter(OUT_DIR + '/tb_logs/train/' + IDENTIFIER)
def train_augment(image, multi_mask, meta, index):
image, multi_mask = tr.random_shift_scale_rotate_transform2(
image,
multi_mask,
shift_limit=[0, 0],
scale_limit=[1 / 2, 2],
rotate_limit=[-45, 45],
borderMode=cv2.BORDER_REFLECT_101,
u=0.5) #borderMode=cv2.BORDER_CONSTANT
image, multi_mask = tr.random_crop_transform2(image, multi_mask, WIDTH, HEIGHT, u=0.5)
image, multi_mask = tr.random_horizontal_flip_transform2(image, multi_mask, 0.5)
image, multi_mask = tr.random_vertical_flip_transform2(image, multi_mask, 0.5)
image, multi_mask = tr.random_rotate90_transform2(image, multi_mask, 0.5)
image = tr.random_hue_transform(image, u=0.5)
image = tr.random_saturation_transform(image, u=0.5)
image = tr.random_brightness_transform(image, u=0.5)
image = tr.random_brightness_shift_transform(image, u=0.5)
input = torch.from_numpy(image.transpose((2, 0, 1))).float().div(255)
box, label, instance = multi_mask_to_annotation(multi_mask)
return input, box, label, instance, meta, index
def valid_augment(image, multi_mask, meta, index):
image, multi_mask = tr.fix_crop_transform2(image, multi_mask, -1, -1, WIDTH, HEIGHT)
input = torch.from_numpy(image.transpose((2, 0, 1))).float().div(255)
box, label, instance = multi_mask_to_annotation(multi_mask)
return input, box, label, instance, meta, index
def train_collate(batch):
batch_size = len(batch)
inputs = torch.stack([batch[b][0] for b in range(batch_size)], 0)
boxes = [batch[b][1] for b in range(batch_size)]
labels = [batch[b][2] for b in range(batch_size)]
instances = [batch[b][3] for b in range(batch_size)]
metas = [batch[b][4] for b in range(batch_size)]
indices = [batch[b][5] for b in range(batch_size)]
return [inputs, boxes, labels, instances, metas, indices]
def evaluate(net, test_loader):
test_num = 0
test_loss = np.zeros(6, np.float32)
for inputs, truth_boxes, truth_labels, truth_instances, metas, indices in test_loader:
with torch.no_grad():
inputs = Variable(inputs).cuda()
net(inputs, truth_boxes, truth_labels, truth_instances)
loss = net.loss(inputs, truth_boxes, truth_labels, truth_instances)
batch_size = len(indices)
test_loss += batch_size * np.array((
loss.cpu().data.numpy(),
net.rpn_cls_loss.cpu().data.numpy(),
net.rpn_reg_loss.cpu().data.numpy(),
net.rcnn_cls_loss.cpu().data.numpy(),
net.rcnn_reg_loss.cpu().data.numpy(),
net.mask_cls_loss.cpu().data.numpy(),
))
test_num += batch_size
assert (test_num == len(test_loader.sampler))
return test_loss / test_num
def log_losses(train_loss, valid_loss, step):
def _log_loss(loss_title, loss_index):
tb_log.add_scalars(
loss_title, {
'train': train_loss[loss_index],
'valid': valid_loss[loss_index]
},
global_step=step)
_log_loss('total_loss', 0)
_log_loss('rpn_cls_loss', 1)
_log_loss('rpn_reg_loss', 2)
_log_loss('rcnn_cls_loss', 3)
_log_loss('rcnn_reg_loss', 4)
_log_loss('mask_cls_loss', 5)
def run_train():
out_dir = RESULTS_DIR + '/mask-rcnn-50-gray500-02'
initial_checkpoint = RESULTS_DIR + '/mask-rcnn-50-gray500-02/checkpoint/best_model.pth'
pretrain_file = RESULTS_DIR + '/mask-rcnn-50-gray500-02/checkpoint/best_model.pth'
#None #RESULTS_DIR + '/mask-single-shot-dummy-1a/checkpoint/00028000_model.pth'
skip = ['crop', 'mask']
## setup -----------------
os.makedirs(out_dir + '/checkpoint', exist_ok=True)
os.makedirs(out_dir + '/train', exist_ok=True)
log = Logger()
log.open(out_dir + '/log.train.txt', mode='a')
log.write('\n--- [START %s] %s\n\n' % (IDENTIFIER, '-' * 64))
log.write('** some experiment setting **\n')
log.write('\tSEED = %u\n' % SEED)
log.write('\tPROJECT_PATH = %s\n' % PROJECT_PATH)
log.write('\tout_dir = %s\n' % out_dir)
log.write('\n')
## net ----------------------
log.write('** net setting **\n')
cfg = Configuration()
net = MaskRcnnNet(cfg).cuda()
if initial_checkpoint is not None:
log.write('\tinitial_checkpoint = %s\n' % initial_checkpoint)
net.load_state_dict(
torch.load(initial_checkpoint, map_location=lambda storage, loc: storage))
#with open(out_dir +'/checkpoint/configuration.pkl','rb') as pickle_file:
# cfg = pickle.load(pickle_file)
if pretrain_file is not None:
log.write('\tpretrain_file = %s\n' % pretrain_file)
net.load_pretrain(pretrain_file, skip)
log.write('%s\n\n' % (type(net)))
log.write('%s\n' % (net.version))
log.write('\n')
## optimiser ----------------------------------
iter_accum = 1
batch_size = 8
num_iters = 1000 * 1000
iter_smooth = 20
iter_log = 50
iter_valid = 100
iter_save = [0, num_iters - 1] + list(range(0, num_iters, 500))
LR = None #LR = StepLR([ (0, 0.01), (200, 0.001), (300, -1)])
optimizer = optim.SGD(
filter(lambda p: p.requires_grad, net.parameters()),
lr=0.01 / iter_accum,
momentum=0.9,
weight_decay=0.0001)
start_iter = 0
start_epoch = 0.
log.write('** dataset setting **\n')
train_dataset = ScienceDataset(
'train1_ids_gray2_500',
mode='train',
#'debug1_ids_gray_only_10', mode='train',
#'disk0_ids_dummy_9', mode='train', #12
#'train1_ids_purple_only1_101', mode='train', #12
#'merge1_1', mode='train',
transform=train_augment)
train_loader = DataLoader(
train_dataset,
sampler=RandomSampler(train_dataset),
batch_size=batch_size,
drop_last=True,
num_workers=4,
pin_memory=True,
collate_fn=train_collate)
valid_dataset = ScienceDataset(
'valid1_ids_gray2_43',
mode='train',
#'debug1_ids_gray_only_10', mode='train',
#'disk0_ids_dummy_9', mode='train',
#'train1_ids_purple_only1_101', mode='train', #12
#'merge1_1', mode='train',
transform=valid_augment)
valid_loader = DataLoader(
valid_dataset,
sampler=SequentialSampler(valid_dataset),
batch_size=batch_size,
drop_last=False,
num_workers=4,
pin_memory=True,
collate_fn=train_collate)
log.write('\tWIDTH, HEIGHT = %d, %d\n' % (WIDTH, HEIGHT))
log.write('\ttrain_dataset.split = %s\n' % (train_dataset.split))
log.write('\tvalid_dataset.split = %s\n' % (valid_dataset.split))
log.write('\tlen(train_dataset) = %d\n' % (len(train_dataset)))
log.write('\tlen(valid_dataset) = %d\n' % (len(valid_dataset)))
log.write('\tlen(train_loader) = %d\n' % (len(train_loader)))
log.write('\tlen(valid_loader) = %d\n' % (len(valid_loader)))
log.write('\tbatch_size = %d\n' % (batch_size))
log.write('\titer_accum = %d\n' % (iter_accum))
log.write('\tbatch_size*iter_accum = %d\n' % (batch_size * iter_accum))
log.write('\n')
log.write('** start training here! **\n')
log.write(' optimizer=%s\n' % str(optimizer))
log.write(' momentum=%f\n' % optimizer.param_groups[0]['momentum'])
log.write(' LR=%s\n\n' % str(LR))
log.write(' images_per_epoch = %d\n\n' % len(train_dataset))
log.write(
' rate current_iter epoch num | valid_loss | train_loss | batch_loss | time \n'
)
log.write(
'-------------------------------------------------------------------------------------------------------------------------------\n'
)
train_loss = np.zeros(6, np.float32)
train_acc = 0.0
valid_loss = np.zeros(6, np.float32)
batch_loss = np.zeros(6, np.float32)
batch_acc = 0.0
rate = 0
start = timer()
j = 0
current_iter = 0
last_saved_model_filepath = None
while current_iter < num_iters: # loop over the dataset multiple times
sum_train_loss = np.zeros(6, np.float32)
sum_train_acc = 0.0
sum = 0
net.set_mode('train')
optimizer.zero_grad()
for inputs, truth_boxes, truth_labels, truth_instances, metas, indices in train_loader:
if all(len(b) == 0 for b in truth_boxes): continue
batch_size = len(indices)
current_iter = j / iter_accum + start_iter
epoch = (current_iter - start_iter
) * batch_size * iter_accum / len(train_dataset) + start_epoch
num_products = epoch * len(train_dataset)
if current_iter % iter_valid == 0:
net.set_mode('valid')
valid_loss = evaluate(net, valid_loader)
net.set_mode('train')
print('\r', end='', flush=True)
log.write('%0.4f %5.1f k %6.1f %4.1f m | %0.3f %0.2f %0.2f %0.2f %0.2f %0.2f | %0.3f %0.2f %0.2f %0.2f %0.2f %0.2f | %0.3f %0.2f %0.2f %0.2f %0.2f %0.2f | %s\n' % (\
rate, current_iter/1000, epoch, num_products/1000000,
valid_loss[0], valid_loss[1], valid_loss[2], valid_loss[3], valid_loss[4], valid_loss[5],#valid_acc,
train_loss[0], train_loss[1], train_loss[2], train_loss[3], train_loss[4], train_loss[5],#train_acc,
batch_loss[0], batch_loss[1], batch_loss[2], batch_loss[3], batch_loss[4], batch_loss[5],#batch_acc,
time_to_str((timer() - start)/60)))
log_losses(train_loss=train_loss, valid_loss=valid_loss, step=current_iter)
time.sleep(0.01)
if current_iter in iter_save:
torch.save(net.state_dict(),
out_dir + '/checkpoint/%08d_model.pth' % (current_iter))
"""
torch.save({
'optimizer': optimizer.state_dict(),
'current_iter': current_iter,
'epoch': epoch,
}, out_dir + '/checkpoint/%08d_optimizer.pth' % (current_iter))
"""
with open(out_dir + '/checkpoint/configuration.pkl', 'wb') as pickle_file:
pickle.dump(cfg, pickle_file, pickle.HIGHEST_PROTOCOL)
# learning rate schduler -------------
if LR is not None:
lr = LR.get_rate(current_iter)
if lr < 0: break
adjust_learning_rate(optimizer, lr / iter_accum)
rate = get_learning_rate(optimizer) * iter_accum
# one current_iter update -------------
inputs = Variable(inputs).cuda()
net(inputs, truth_boxes, truth_labels, truth_instances)
loss = net.loss(inputs, truth_boxes, truth_labels, truth_instances)
# accumulated update
loss.backward()
if j % iter_accum == 0:
#torch.nn.utils.clip_grad_norm(net.parameters(), 1)
optimizer.step()
optimizer.zero_grad()
# print statistics ------------
batch_acc = 0 #acc[0][0]
batch_loss = np.array((
loss.cpu().data.numpy(),
net.rpn_cls_loss.cpu().data.numpy(),
net.rpn_reg_loss.cpu().data.numpy(),
net.rcnn_cls_loss.cpu().data.numpy(),
net.rcnn_reg_loss.cpu().data.numpy(),
net.mask_cls_loss.cpu().data.numpy(),
))
sum_train_loss += batch_loss
sum_train_acc += batch_acc
sum += 1
if current_iter % iter_smooth == 0:
train_loss = sum_train_loss / sum
train_acc = sum_train_acc / sum
sum_train_loss = np.zeros(6, np.float32)
sum_train_acc = 0.
sum = 0
print('\r%0.4f %5.1f k %6.1f %4.1f m | %0.3f %0.2f %0.2f %0.2f %0.2f %0.2f | %0.3f %0.2f %0.2f %0.2f %0.2f %0.2f | %0.3f %0.2f %0.2f %0.2f %0.2f %0.2f | %s %d,%d,%s' % (\
rate, current_iter/1000, epoch, num_products/1000000,
valid_loss[0], valid_loss[1], valid_loss[2], valid_loss[3], valid_loss[4], valid_loss[5],#valid_acc,
train_loss[0], train_loss[1], train_loss[2], train_loss[3], train_loss[4], train_loss[5],#train_acc,
batch_loss[0], batch_loss[1], batch_loss[2], batch_loss[3], batch_loss[4], batch_loss[5],#batch_acc,
time_to_str((timer() - start)/60) ,current_iter,j, ''), end='',flush=True)#str(inputs.size()))
j = j + 1
pass #-- end of one data loader --
pass #-- end of all iterations --
if 1: #save last
torch.save(net.state_dict(), out_dir + '/checkpoint/%d_model.pth' % (current_iter))
"""
torch.save({
'optimizer': optimizer.state_dict(),
'current_iter': current_iter,
'epoch': epoch,
}, out_dir + '/checkpoint/%d_optimizer.pth' % (current_iter))
"""
log.write('\n')
# main #################################################################
if __name__ == '__main__':
print('%s: calling main function ... ' % os.path.basename(__file__))
run_train()
print('\nsucess!')
| shvetsiya/mask-rcnn | train.py | train.py | py | 14,440 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "common.RESULTS_DIR",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "tensorboardX.SummaryWriter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "common.I... |
1061377237 | import os
import pandas as pd
import numpy as np
from sklearn.impute import KNNImputer
from sklearn.preprocessing import OrdinalEncoder
from sklearn.pipeline import Pipeline
from sklearn.compose import ColumnTransformer
from imblearn.over_sampling import RandomOverSampler
import warnings
warnings.filterwarnings("ignore")
import logging
os.makedirs("Application_Logs", exist_ok=True)
logging.basicConfig(
filename=os.path.join("Application_Logs", 'running_logs.log'),
level=logging.INFO,
format="[%(asctime)s: %(levelname)s: %(module)s]: %(message)s",
filemode="a"
)
class Preprocessor:
def __init__(self):
"""
Initializing the log file object
"""
pass
def check_null_values(self, data):
"""
Checking the null values in the Training Data and storing
the features in the DataWithMissingValues.CSV
"""
feature_with_null = [feature for feature in data.columns if data[feature].isnull().sum() > 0]
logging.info("Start of Train Data Preprocessing. checking the null values of the training data")
if len(feature_with_null) > 0:
dataframe_with_null = data[feature_with_null].isnull().sum().to_frame().reset_index()
dataframe_with_null.columns = ["Feature Name", "Number of Missing Values"]
Missing_Values = "MissingValues"
os.makedirs(Missing_Values, exist_ok=True)
dataframe_with_null.to_csv("MissingValues/DataWithMissingValues.CSV", index=False)
logging.info("Feature Have Some Missing Values.Check Missing Values folder for features having missing values.Exiting the function")
else:
logging.info("No Missing Values in any feature.Exiting the function")
def encode_and_impute_data(self, data):
"""
Encoding some of the features of the training data and
dividing the features into numerical and categorical
:return: training data
:rtype: DataFrame
"""
logging.info("Entered the Encode and Impute method function of training")
data["sex"] = np.where(data["sex"] == "F", 0, 1)
data["referral_source"] = data["referral_source"].map({"other": 0, "SVI": 1, "SVHC": 2, "STMW": 4, "SVHD": 5})
data["Class"] = data["Class"].map({"negative": 0
, "compensated_hypothyroid": 1
, "primary_hypothyroid": 2
, "secondary_hypothyroid": 3})
self.categorical_features = [feature for feature in data.columns if len(data[feature].unique()) < 10
and feature not in ["Class", "sex", "referral_source"]]
self.numerical_features = [feature for feature in data.columns if feature not in self.categorical_features
and feature not in ["Class"]]
logging.info("Exited the Encode and Impute method of training")
return data
def separate_label_feature(self, data, label_name):
"""
Separating features into dependent and independent features
:return: X,y
:rtype: DataFrame, Series
"""
logging.info("Entered the separate_label_feature function of training")
self.X = data.drop(label_name, axis=1)
self.y = data[label_name]
logging.info("Labels are separated into dependent and independent.Exiting the function of training")
return self.X, self.y
def handle_imbalance_data(self, x_train, y_train, x_valid, y_valid):
"""
Handling the imbalanceness of the training data
:return: x_train_sampled, y_train_sampled, x_valid_sampled, y_valid_sampled
:rtype: DataFrame and Series
"""
logging.info("Entered the handle_imbalance_data function of training")
rdsample = RandomOverSampler()
x_train_sampled, y_train_sampled = rdsample.fit_resample(x_train, y_train)
x_valid_sampled, y_valid_sampled = rdsample.fit_resample(x_valid, y_valid)
logging.info("Balancing of data is done.Exiting the function of training")
return x_train_sampled, y_train_sampled, x_valid_sampled, y_valid_sampled
def preprocessor_pipeline(self, x_train_sampled, x_valid_sampled):
"""
Creating a pipeline to encode and impute the features of the training data
:return: x_train_processed, x_valid_processed
:rtype: Numpy Array
"""
logging.info("Entered the preprocessor_pipeline function of training ")
numerical_transformer = KNNImputer(n_neighbors=2, weights='uniform', missing_values=np.nan)
categorical_transformer = Pipeline(steps=[
('encoder', OrdinalEncoder()),
('imputer', KNNImputer(n_neighbors=2, weights='uniform', missing_values=np.nan))
])
preprocessor = ColumnTransformer(
transformers=[
('num', numerical_transformer, self.numerical_features),
('cat', categorical_transformer, self.categorical_features)
])
x_train_processed = preprocessor.fit_transform(x_train_sampled)
x_valid_processed = preprocessor.transform(x_valid_sampled)
x_train_processed = pd.DataFrame(x_train_processed, columns=x_train_sampled.columns)
x_valid_processed = pd.DataFrame(x_valid_processed, columns=x_valid_sampled.columns)
logging.info("All the features are encoded and imputed of training data. Exiting the module")
return x_train_processed, x_valid_processed
| guptadikshant/DetectionOfThyroid | DataPreprocessing/data_preprocess.py | data_preprocess.py | py | 5,631 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.basicConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join"... |
26407571543 | import discord
import logging
from dotenv import load_dotenv
import os
load_dotenv()
KEY = os.getenv('DISCORD_KEY')
#logging set up
logger = logging.getLogger('discord')
logger.setLevel(logging.DEBUG)
handler = logging.FileHandler(filename='discord.log', encoding='utf-8', mode='w')
handler.setFormatter(logging.Formatter('%(asctime)s:%(levelname)s:%(name)s: %(message)s'))
logger.addHandler(handler)
class MyClient(discord.Client):
async def on_ready(self):
print('Logged on as {0}!'.format(self.user))
async def on_message(self, message):
print('Message from {0.author}: {0.content}'.format(message))
client = MyClient()
client.run(f'{KEY}')
| julianjohnson10/Discord-Bot | DiscordBot.py | DiscordBot.py | py | 674 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.getLogger",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"lin... |
210457700 | from django.shortcuts import render
from django.contrib import messages
from polls.forms import RegistrationForm
def index(request):
context_dict = {'form': None}
form = RegistrationForm()
if request.method == 'GET':
context_dict['form'] = form
elif request.method == 'POST':
form = RegistrationForm(request.POST)
context_dict['form'] = form
if form.is_valid():
cleaned_data = form.cleaned_data
print(cleaned_data)
messages.success(request, 'Your data has been submitted')
else:
messages.error(request, 'Something is wrong in form.')
return render(request, 'polls/index.html', context_dict)
| slow999/DjangoAndReactComponentForm | polls/views.py | views.py | py | 704 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "polls.forms.RegistrationForm",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "polls.forms.RegistrationForm",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.contrib.messages.success",
"line_number": 18,
"usage_type": "call"
},
... |
27277520150 | #!/usr/bin/env python3
import requests
from bs4 import BeautifulSoup
from urllib.parse import urlparse
import sys
# crawling
def download_page(url):
resp = requests.get(url)
while resp.status_code != 200:
resp = requests.get(url)
return resp.text
def parse_html(url, html):
path = urlparse(url).path.split('/')
uid = path[-3]
soup = BeautifulSoup(html, 'html.parser')
selected = soup.select('div#thing_t3_{0} div.md'.format(uid))[0]
return selected.get_text()
if __name__ == '__main__':
url = sys.argv[1]
html = download_page(url)
content = parse_html(url, html)
print(content)
| TeddyHartanto/searchreddit | search_engine.py | search_engine.py | py | 643 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "urllib.parse.urlparse",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
... |
15189440155 | from typing import Any, Callable, Dict, List, Optional, Tuple, Union
import numpy as np
from Orange.data import ContinuousVariable, Domain, Table, Variable
from Orange.misc.utils.embedder_utils import EmbedderCache
from Orange.util import dummy_callback
from orangecontrib.imageanalytics.local_embedder import LocalEmbedder
from orangecontrib.imageanalytics.server_embedder import ServerEmbedder
from orangecontrib.imageanalytics.squeezenet_model import SqueezenetModel
from orangecontrib.imageanalytics.utils.image_utils import extract_paths
MODELS = {
"inception-v3": {
"name": "Inception v3",
"description": "Google's Inception v3 model trained on ImageNet.",
"target_image_size": (299, 299),
"layers": ["penultimate"],
"order": 0,
# batch size tell how many images we send in parallel, this number is
# high for inception since it has many workers, but other embedders
# send less images since bottleneck are workers, this way we avoid
# ReadTimeout because of images waiting in a queue at the server
"batch_size": 500,
},
"painters": {
"name": "Painters",
"description": "A model trained to predict painters from artwork\nimages.",
"target_image_size": (256, 256),
"layers": ["penultimate"],
"order": 4,
"batch_size": 500,
},
"deeploc": {
"name": "DeepLoc",
"description": "A model trained to analyze yeast cell images.",
"target_image_size": (64, 64),
"layers": ["penultimate"],
"order": 5,
"batch_size": 500,
},
"vgg16": {
"name": "VGG-16",
"description": "16-layer image recognition model trained on\nImageNet.",
"target_image_size": (224, 224),
"layers": ["penultimate"],
"order": 2,
"batch_size": 500,
},
"vgg19": {
"name": "VGG-19",
"description": "19-layer image recognition model trained on\nImageNet.",
"target_image_size": (224, 224),
"layers": ["penultimate"],
"order": 3,
"batch_size": 500,
},
"openface": {
"name": "openface",
"description": "Face recognition model trained on FaceScrub and\n"
"CASIA-WebFace datasets.",
"target_image_size": (256, 256),
"layers": ["penultimate"],
"order": 6,
"batch_size": 500,
},
"squeezenet": {
"name": "SqueezeNet",
"description": "Deep model for image recognition that achieves \n"
"AlexNet-level accuracy on ImageNet with \n"
"50x fewer parameters.",
"target_image_size": (227, 227),
"layers": ["penultimate"],
"order": 1,
"is_local": True,
"batch_size": 16,
"model": SqueezenetModel,
},
}
class ImageEmbedder:
"""
Client side functionality for accessing a remote image embedding backend.
Attributes
----------
model
Name of the model, must be one from MODELS dictionary
server_url
The url of the server with embedding backend.
Examples
--------
>>> import Orange
>>> from orangecontrib.imageanalytics.image_embedder import ImageEmbedder
>>> # embedding from list of paths
>>> image_file_paths = ['image001.jpg', 'image001.jpg']
>>> with ImageEmbedder(model='model_name') as emb:
... embeddings = emb(image_file_paths)
>>> # embedding from orange tabl
>>> table = Orange.data.Table('Table_with_image_path.csv')
>>> with ImageEmbedder(model='model_name') as emb:
... embeddings = emb(table, col="image_path_column")
"""
_embedder = None
def __init__(
self,
model: str = "inception-v3",
server_url: str = "https://api.garaza.io/",
):
self.server_url = server_url
self.model = model
self._model_settings = self._get_model_settings_confidently()
def is_local_embedder(self) -> bool:
"""
Tells whether selected embedder is local or not.
"""
return self._model_settings.get("is_local", False)
def _get_model_settings_confidently(self) -> Dict[str, Any]:
"""
Returns the dictionary with model settings
Returns
-------
The dictionary with model settings
"""
if self.model not in MODELS.keys():
model_error = "'{:s}' is not a valid model, should be one of: {:s}"
available_models = ", ".join(MODELS.keys())
raise ValueError(model_error.format(self.model, available_models))
return MODELS[self.model]
def _init_embedder(self) -> None:
"""
Init local or server embedder.
"""
if self.is_local_embedder():
self._embedder = LocalEmbedder(self.model, self._model_settings)
else:
self._embedder = ServerEmbedder(
self.model,
self._model_settings["batch_size"],
self.server_url,
"image",
self._model_settings["target_image_size"]
)
def __call__(
self,
data: Union[Table, List[str], np.array],
col: Optional[Union[str, Variable]] = None,
callback: Optional[Callable] = dummy_callback,
) -> Union[Tuple[Table, Table, int], List[List[float]]]:
"""
Embedd images.
Parameters
----------
data
Data contains the path to images (locally or online). It can be
Orange data table or list/array. When data table on input col
parameter must define which column in the table contains images.
col
The column with images in Orange data table. It is not required
when data are list or array.
callback
Optional callback - function that is called for every embedded
image and is used to report the progress.
Returns
-------
Embedded images. When data is Table it returns tuple with two tables:
1) original table with embedded images appended to it, 2) table with
skipped images, 3) number of skipped images.
When data is array/list it returns the list of list with embeddings,
each image is represented with vector of numbers.
"""
assert data is not None
assert isinstance(data, (np.ndarray, list, Table))
self._init_embedder()
if isinstance(data, Table):
assert col is not None, "Please provide a column for image path"
# if table on input tables on output
return self.from_table(data, col=col, callback=callback)
elif isinstance(data, (np.ndarray, list)):
# if array-like on input array-like on output
return self._embedder.embedd_data(data, callback=callback)
def from_table(
self,
data: Table,
col: Union[str, Variable] = "image",
callback: Callable = None,
) -> Tuple[Table, Table, int]:
"""
Calls embedding when data are provided as a Orange Table.
Parameters
----------
data
Data table with image paths
col
The column with image paths
callback
Optional callback - function that is called for every embedded
image and is used to report the progress.
"""
file_paths = extract_paths(data, data.domain[col])
embeddings_ = self._embedder.embedd_data(file_paths, callback=callback)
return ImageEmbedder.prepare_output_data(data, embeddings_)
def __enter__(self) -> "ImageEmbedder":
return self
def __exit__(self, _, __, ___) -> None:
pass
def __del__(self) -> None:
self.__exit__(None, None, None)
@staticmethod
def construct_output_data_table(
embedded_images: Table, embeddings_: np.ndarray
) -> Table:
"""
Join the orange table with embeddings.
Parameters
----------
embedded_images
Table with images that were successfully embedded
embeddings_
Embeddings for images from table
Returns
-------
Table with added embeddings to data.
"""
new_attributes = [
ContinuousVariable("n{:d}".format(d)) for d in range(embeddings_.shape[1])
]
# prevent embeddings to be shown in long drop-downs in e.g. scatterplot
for a in new_attributes:
a.attributes["hidden"] = True
domain_new = Domain(
list(embedded_images.domain.attributes) + new_attributes,
embedded_images.domain.class_vars,
embedded_images.domain.metas,
)
table = embedded_images.transform(domain_new)
with table.unlocked(table.X): # writing to fresh part, can be unlocked
table[:, new_attributes] = embeddings_
return table
@staticmethod
def prepare_output_data(
input_data: Table, embeddings_: List[List[float]]
) -> Tuple[Table, Table, int]:
"""
Prepare output data when data table on input.
Parameters
----------
input_data
The table with original data that are joined with embeddings
embeddings_
List with embeddings
Returns
-------
Tuple where first parameter is table with embedded images, the second
table with skipped images and third the number of skipped images.
"""
skipped_images_bool = [x is None or len(x) == 0 for x in embeddings_]
if np.any(skipped_images_bool):
skipped_images = input_data[skipped_images_bool].copy()
skipped_images.name = "Skipped images"
num_skipped = len(skipped_images)
else:
num_skipped = 0
skipped_images = None
embedded_images_bool = np.logical_not(skipped_images_bool)
if np.any(embedded_images_bool):
embedded_images = input_data[embedded_images_bool]
embeddings_ = [
e for e, b in zip(embeddings_, embedded_images_bool) if b
]
embeddings_ = np.vstack(embeddings_)
embedded_images = ImageEmbedder.construct_output_data_table(
embedded_images, embeddings_
)
embedded_images.ids = input_data.ids[embedded_images_bool]
embedded_images.name = "Embedded images"
else:
embedded_images = None
return embedded_images, skipped_images, num_skipped
def clear_cache(self) -> None:
"""
Function clear cache for the selected embedder. If embedder is loaded
cache is cleaned from its dict otherwise we load cache and clean it
from file.
"""
if self._embedder:
# embedder is loaded so we clean its cache
self._embedder.clear_cache()
else:
# embedder is not initialized yet - clear it cache from file
cache = EmbedderCache(self.model)
cache.clear_cache()
if __name__ == "__main__":
image_file_paths = ["tests/test_images/example_image_0.jpg"]
# with ImageEmbedder(model='inception-v3') as embedder:
with ImageEmbedder(model="squeezenet") as embedder:
embedder.clear_cache()
print(embedder(image_file_paths))
| biolab/orange3-imageanalytics | orangecontrib/imageanalytics/image_embedder.py | image_embedder.py | py | 11,435 | python | en | code | 32 | github-code | 1 | [
{
"api_name": "orangecontrib.imageanalytics.squeezenet_model.SqueezenetModel",
"line_number": 77,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 126,
"usage_type": "name"
},
{
"api_name": "typing.Any",
"line_number": 126,
"usage_type": "name"
},
{... |
36159117274 | import os
import json
class Config:
@staticmethod
def getConfig(name: str):
fname = name
if not os.path.exists(fname):
return None
with open(fname, "r") as fp:
raw = fp.read()
return json.loads(raw)
@staticmethod
def saveConfig(name: str, conf: dict):
fname = name
dirname = os.path.dirname(fname)
if not os.path.exists(dirname):
os.mkdir(dirname)
with open(fname, "w") as fp:
fp.write(json.dumps(conf))
@staticmethod
def getValue(confname: str, key: str):
conf = Config.getConfig(confname)
if conf is None:
return None
if key in conf:
return conf[key]
return None
@staticmethod
def setValue(confname: str, key: str, val):
conf = Config.getConfig(confname)
if conf is None:
return
conf[key] = val
fname = confname
with open(fname, "w") as fp:
fp.write(json.dumps(conf))
| dhy2000/CO_Judger | configs/config.py | config.py | py | 1,085 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_numb... |
2422070701 | from tensorboard.backend.event_processing import event_accumulator
import matplotlib.pyplot as plt
from os import listdir
from os.path import isfile, join
import math
import numpy as np
import sys
import tuneConfigurations
import warnings
from ray.tune import Analysis
plt.rcParams['figure.dpi'] = 200
def plot(configs, sets='test', save=False, colorsFirst=False, title="", limits=None):
keyLoss = 'loss'
lineStyles = ['solid', 'dashed', 'dotted', 'dashdot']
lineColors = ['C0', 'C1', 'C2', 'C3', 'C4', 'C5', 'C6', 'C7', 'C8', 'C9']
if not type(configs) is list:
configs = [configs]
if not type(sets) is list:
sets = [sets]
if len(configs)*len(sets) > len(lineStyles)*len(lineColors):
raise ValueError("Too many curves to plot, {} of max {}.".format(len(configs)*len(sets), len(lineStyles)*len(lineColors)))
fig = plt.figure(figsize=(8,9))
#gs = fig.add_gridspec(2,1)
#axLoss = fig.add_subplot(gs[0, 0])
#axAcc = fig.add_subplot(gs[1, 0])
axLoss = fig.add_axes([0.07, 0.53, 0.6, 0.42])
axAcc = fig.add_axes([0.07, 0.05, 0.6, 0.42])
#fig, (axLoss, axAcc) = plt.subplots(2)
metrics = []
i = 0
for set in sets:
for config in configs:
myConfig = getattr(sys.modules['tuneConfigurations'], config)
metrics.append(myConfig.trackMetric)
#use tune to pick best in run
analysis = Analysis(join("tuneOutput", myConfig.path))
mode = ("max" if myConfig.bestSign == '>' else "min")
#print("best hyperparameters for {}: {}".format(config, analysis.get_best_config(metric=myConfig.bestKey, mode=mode)))
tunePath = analysis.get_best_logdir(metric=myConfig.bestKey, mode=mode)
expPath = join(tunePath,'files', 'tensorBoard')
keyAcc = "{}_{}".format(metrics[-1], metrics[-1])
# metrics.append(getattr(sys.modules['configurations'], config).trackMetric)
# keyAcc = "{}_{}".format(metrics[-1], metrics[-1])
# expPath = join('files', getattr(sys.modules['configurations'], config).path, 'tensorBoard')
try:
keys = [f for f in listdir(expPath) if not isfile(join(expPath, f))]
except FileNotFoundError:
warnings.warn("Configuration {} not present. Skipping.".format(config))
continue
points = {}
for k in keys:
eventPathPart = join(expPath, k, set)
for runPath in sorted([f for f in listdir(eventPathPart) if isfile(join(eventPathPart, f))]):
eventPath = join(eventPathPart, runPath)
ea = event_accumulator.EventAccumulator(eventPath)
ea.Reload()
if not k in points:
points[k] = [[v.step for v in ea.Scalars(k)], [v.value for v in ea.Scalars(k)]]
else:
points[k][0].extend([v.step for v in ea.Scalars(k)])
points[k][1].extend([v.value for v in ea.Scalars(k)])
if limits is None:
valuesLoss = points[keyLoss]
valuesAcc = points[keyAcc]
else:
valuesLoss = [points[keyLoss][i][limits[0]:limits[1]] for i in [0,1]]
valuesAcc = [points[keyAcc][i][limits[0]:limits[1]] for i in [0,1]]
linesLoss = axLoss.plot(valuesLoss[0], valuesLoss[1], label="{} {}".format(config, set))
linesAcc = axAcc.plot(valuesAcc[0], valuesAcc[1], label="{} {}".format(config, set))
if colorsFirst:
linesLoss[0].set_color(lineColors[i%len(lineColors)])
linesLoss[0].set_linestyle(lineStyles[(i//len(lineColors))%len(lineStyles)])
linesAcc[0].set_color(lineColors[i%len(lineColors)])
linesAcc[0].set_linestyle(lineStyles[(i//len(lineColors))%len(lineStyles)])
else:
linesLoss[0].set_linestyle(lineStyles[i%len(lineStyles)])
linesLoss[0].set_color(lineColors[(i//len(lineStyles))%len(lineColors)])
linesAcc[0].set_linestyle(lineStyles[i%len(lineStyles)])
linesAcc[0].set_color(lineColors[(i//len(lineStyles))%len(lineColors)])
i += 1
# axLoss.legend(loc='upper left', bbox_to_anchor=(1, 1),
# ncol=math.ceil(len(configs)/20), fancybox=True, shadow=True)
#axLoss.set_title(title)
axLoss.set_xlabel("Epoch")
axLoss.set_ylabel("Loss")
# axLoss.set_xticks(np.arange(0, round(axLoss.get_xlim()[1])+10, 10))
# axLoss.set_xticks(np.arange(round(axLoss.get_xlim()[0]), round(axLoss.get_xlim()[1])+1, 1), minor=True)
# axLoss.set_yticks(np.arange(0, round(axLoss.get_ylim()[1])+0.05, 0.05))
# axLoss.set_yticks(np.arange(0, round(axLoss.get_ylim()[1])+0.01, 0.01), minor=True)
axLoss.grid(which='both')
axLoss.grid(which='minor', alpha=0.2)
axLoss.grid(which='major', alpha=0.5)
# axAcc.legend(loc='upper left', bbox_to_anchor=(1, 1),
# ncol=math.ceil(len(configs)/20), fancybox=True, shadow=True)
#axAcc.set_title(title)
axAcc.set_xlabel("Epoch")
axAcc.set_ylabel("Metric ({})".format(list(dict.fromkeys(metrics))))
#axAcc.set_ylim(0.49,1.)
#axAcc.set_xticks(np.arange(0, round(axAcc.get_xlim()[1])+10, 10))
#axAcc.set_xticks(np.arange(round(axAcc.get_xlim()[0]), round(axAcc.get_xlim()[1])+1, 1), minor=True)
axAcc.set_yticks(np.arange(0.5, 1.01, 0.1))
axAcc.set_yticks(np.arange(0.49, 1.01, 0.01), minor=True)
axAcc.grid(which='both')
axAcc.grid(which='minor', alpha=0.2)
axAcc.grid(which='major', alpha=0.5)
fig.suptitle(title)
handles, labels = axAcc.get_legend_handles_labels()
fig.legend(handles, labels, bbox_to_anchor=(0.68, 0.95), loc=2, borderaxespad=0.)
# fig.legend(handles, labels, loc='upper left', bbox_to_anchor=(1, 1),
# bbox_transform = plt.gcf().transFigure,
# ncol=math.ceil(len(configs)/20), fancybox=True, shadow=True)
# plt.legend( handles, labels, loc = 'upper left', bbox_to_anchor = (0.9,-0.1,2,2),
# bbox_transform = plt.gcf().transFigure )
# plt.figlegend(handles, labels, loc='upper center', bbox_to_anchor=(0.5, 0), bbox_transform=plt.gcf().transFigure)
# fig.subplots_adjust(wspace=2, hspace=2,left=0,top=2,right=2,bottom=0)
#fig.tight_layout()
#fig.subplots_adjust(right=2)
fig.show()
if save:
fig.savefig('img/plot.eps')#, bbox_inches = 'tight')#, pad_inches = 0)
plt.close()
def printMetrics(configs, printAllConfigs=False):
sets = ['train', 'valid', 'test']
if not type(configs) is list:
configs = [configs]
bestConfig = None
points = {}
for config in configs:
myConfig = getattr(sys.modules['tuneConfigurations'], config)
metric = myConfig.trackMetric
try:
#use tune to pick best in run
analysis = Analysis(join("tuneOutput", myConfig.path))
except ValueError:
warnings.warn("Configuration {} not present. Skipping.".format(config))
continue
mode = ("max" if myConfig.bestSign == '>' else "min")
print("best hyperparameters for {}: {}".format(config, analysis.get_best_config(metric=myConfig.bestKey, mode=mode)))
tunePath = analysis.get_best_logdir(metric=myConfig.bestKey, mode=mode)
expPath = join(tunePath,'files', 'tensorBoard')
keyAcc = "{}_{}".format(metric, metric)
try:
keys = [f for f in listdir(expPath) if not isfile(join(expPath, f))]
except FileNotFoundError:
warnings.warn("Configuration {} not present. Skipping.".format(config))
continue
points[config] = {}
for set in sets:
points[config][set] = {}
for k in keys:
eventPathPart = join(expPath, k, set)
for runPath in sorted([f for f in listdir(eventPathPart) if isfile(join(eventPathPart, f))]):
eventPath = join(eventPathPart, runPath)
ea = event_accumulator.EventAccumulator(eventPath)
ea.Reload()
if not k in points[config][set]:
points[config][set][k] = [[v.step for v in ea.Scalars(k)], [v.value for v in ea.Scalars(k)]]
else:
points[config][set][k][0].extend([v.step for v in ea.Scalars(k)])
points[config][set][k][1].extend([v.value for v in ea.Scalars(k)])
bestSign = myConfig.bestSign
if bestSign == '>':
bestI = np.argmax(points[config]['valid'][keyAcc][1]) #point where better metric
else:
bestI = np.argmin(points[config]['valid'][keyAcc][1]) #point where better metric
thisConfig = {
'name': config,
'epoch': bestI+1,
'train': points[config]['train'][keyAcc][1][bestI],
'valid': points[config]['valid'][keyAcc][1][bestI],
'test': points[config]['test'][keyAcc][1][bestI],
}
if printAllConfigs:
print("{} (epoch {}):\ttrain {:.3};\tvalid {:.3};\ttest {:.3}".format(thisConfig['name'], thisConfig['epoch'], thisConfig['train'], thisConfig['valid'], thisConfig['test']))
if bestConfig is None or (bestSign == '>' and thisConfig['valid']>bestConfig['valid']) or (bestSign == '<' and thisConfig['valid']<bestConfig['valid']):
bestConfig = thisConfig
if not bestConfig is None:
print("BEST ==== {} (epoch {}):\ttrain {:.3};\tvalid {:.3};\ttest {:.3}".format(bestConfig['name'], bestConfig['epoch'], bestConfig['train'], bestConfig['valid'], bestConfig['test']))
| trianam/quantumNoiseClassification | funPlot.py | funPlot.py | py | 9,824 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 29,
"usage_type": "call"
},
{
"api_na... |
72989251553 | import cv2
import os
import numpy as np
import multiprocessing
from concurrent.futures import ThreadPoolExecutor
def apply_clahe(image):
lab = cv2.cvtColor(image, cv2.COLOR_BGR2LAB)
l, a, b = cv2.split(lab)
clahe = cv2.createCLAHE(clipLimit=4.0, tileGridSize=(8, 8))
cl = clahe.apply(l)
merged = cv2.merge([cl, a, b])
result = cv2.cvtColor(merged, cv2.COLOR_LAB2BGR)
return result
def gamma_correction(image: np.ndarray, gamma):
gamma_corrected = np.power(image/255.0, gamma)
gamma_corrected = gamma_corrected*255.0
gamma_corrected = gamma_corrected.astype(np.uint8)
return gamma_corrected
root_dir = "Queensland Dataset CE42/"
classes = ["BCC/", "IEC/", "SCC/"]
images_list = []
io_files = []
for idx, obj in enumerate(classes):
image_file = os.listdir(root_dir + obj + "Images/")
for _, image in enumerate(image_file):
img = root_dir + obj + "Images/" + image
images_list.append(img)
num_cores = multiprocessing.cpu_count()
print(f"Number of cores: {num_cores}" )
# Preprocessing the image with CLAHE and Gamma-correction
def preprocess_image(image_path):
image = cv2.imread(image_path)
clahe_img = apply_clahe(image)
gamma_img = gamma_correction(clahe_img, 3)
resize_img = cv2.resize(gamma_img, (256, 256))
return resize_img
# Processing image in parallel using ThreadPoolExecutor
def process_image_parallel(image):
return preprocess_image(image)
batch_size = 50
preprocessed_image = []
with ThreadPoolExecutor(max_workers=4) as executor:
for i in range(0, len(images_list), batch_size):
batch_images = images_list[i : i + batch_size]
# Using ThreadPoolExecutor.map to to preprocess images in parallel
preprocess_batch = list(executor.map(process_image_parallel, batch_images))
preprocessed_image.extend(preprocess_batch)
for idx, obj in enumerate(classes):
mask_files = os.listdir(root_dir + obj + "Masks/")
for idx, mask in enumerate(mask_files):
mask = cv2.imread(root_dir + obj + "Masks/" + mask)
r_mask = cv2.resize(mask, (256, 256))
io_files.append(cv2.hconcat([preprocessed_image[idx], r_mask]))
np.save("Preprocessed_data", np.array(io_files))
cv2.imshow("Image + mask: ", io_files[0])
cv2.waitKey(0) | Muawizodux/Multi-class-Segmentation-and-Classification-for-Skin-Disease | pix2pix-GANs/Data-Preprocessing(Non-Melanoma).py | Data-Preprocessing(Non-Melanoma).py | py | 2,364 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "cv2.cvtColor",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2LAB",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "cv2.split",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "cv2.createCLAHE",
"li... |
13415632812 | import copy
from SPARQLWrapper import SPARQLWrapper, JSON
from slot_recognition import *
from SPARQL_generation import *
class QueryManager:
def __init__(self, verbose=False):
self.__conn = SPARQLWrapper(ENDPOINT_URL)
self.__verbose = verbose
def set_verbose(self, verbose):
if type(verbose) == bool:
self.__verbose = verbose
def ask(self, ques):
query_success = True
sentence = ques.replace('“', '"').replace('”', '"')
recognition_result = SlotRecognizer.recognize(sentence)
# 无法理解问题的情况处理
if recognition_result is None:
result_value = None
result_reply = '对不起,我无法理解您的问题,请换种说法问我吧!'
query_success = False
if self.__verbose:
print('*' * 13)
print('答案内容:', result_value)
print('答案回复:', result_reply)
print('*' * 13)
query_note = {'success': query_success,
'question': ques,
'result': result_value,
'reply': result_reply, }
return query_note
tid = recognition_result[0]
arguments = recognition_result[1]
template = templates[tid]
temp_type = template['type']
sparql = SparqlGenerator.generate_sparql_from_recognition_result(recognition_result)
query_result = self.query(sparql)
result_list = self.parse_result(query_result, temp_type)
result_value = '、'.join(result_list)
result_arguments = copy.deepcopy(arguments)
result_arguments['value'] = result_value
result_reply = None
# 没有结果的处理
if (temp_type == 'select' and result_value == '') or \
(temp_type == 'count' and result_value == '0'): # 继续跟踪查询是不是事件名等输错
checks = template['checks']
checks_note = []
for check in checks:
check_template = checks_templates[check]
check_temp_type = check_template['type']
check_sparql = SparqlGenerator.generate_sparql_from_check_template(check_template, arguments)
check_query_result = self.query(check_sparql)
check_result_list = self.parse_result(check_query_result, check_temp_type)
check_result_value = '、'.join(check_result_list)
if (check_temp_type == 'count' and check_result_value == '0') or \
(check_temp_type == 'select' and check_result_value == ''):
note = check_template['if_none_reply'].format(**arguments)
checks_note.append(note)
checks_reply = ','.join(checks_note)
if checks_reply != '':
checks_reply += ','
result_reply = '对不起,没有查询到结果。' + checks_reply + '请检查您的提问是否有误。'
query_success = False
else:
result_reply = template['none_reply'].format(**arguments)
query_success = True
else:
result_reply = template['reply'].format(**result_arguments)
if self.__verbose:
print('*' * 13)
print('问题:', ques)
print('模板ID:', tid)
print('槽识别结果:', arguments)
print('SPARQL:\n', sparql)
print('请求返回结果:', query_result)
print('答案内容:', result_value)
print('答案回复:', result_reply)
print('*' * 13)
query_note = {'success': query_success,
'question': ques,
'template_id': tid,
'arguments': arguments,
'SPARQL': sparql,
'result': result_value,
'reply': result_reply, }
return query_note
def query(self, sparql, format=JSON):
self.__conn.setQuery(sparql)
self.__conn.setReturnFormat(format)
query_result = self.__conn.query().convert()
return query_result
@staticmethod
def parse_result(query_result, type):
bindings = query_result['results']['bindings']
result_list = []
key_val = 'x'
if type == 'count':
key_val = 'callret-0'
for item in bindings:
value = item[key_val]['value']
if type == 'select':
index = value.find('#')
value = value[index + 1:]
result_list.append(value)
return result_list
| btyu/R3K_KBQA | R3K-KBQA/query_management.py | query_management.py | py | 4,858 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "SPARQLWrapper.SPARQLWrapper",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "copy.deepcopy",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "SPARQLWrapper.JSON",
"line_number": 101,
"usage_type": "name"
}
] |
36064808560 | import FeatureProject
from sklearn.linear_model import LogisticRegression
import os
import ROCX
from sklearn.model_selection import cross_val_score
from sklearn.metrics import classification_report, confusion_matrix
import pandas as pd
from sklearn.decomposition import PCA
import time
import numpy as np
'''进行模型的保存和加载'''
import joblib
import csv
def n_components_analysis(n, x_train, y_train, x_val, y_val,result_file,resultPath,classifier,splitFile): #
start = time.time()
pca = PCA(n_components=n)
result_file.write("特征降维,传递参数为{}\n".format(n))
# print("特征降维,传递参数为{}".format(n))
pca.fit(x_train)
x_train_pca = pca.transform(x_train)
x_val_pca = pca.transform(x_val)
result_file.write("开始进行SVM训练")
# print("开始进行SVM训练")
ss = LogisticRegression(class_weight='balanced', penalty='l1', solver='liblinear')
ss.fit(x_train_pca, y_train)
## 使用测试集进行预测概率
y_pred_prob = ss.fit(x_train_pca, y_train).predict_proba(x_val_pca)
y_pred = ss.fit(x_train_pca, y_train).predict(x_val_pca)
# str1 = 'classification_report:\n', classification_report(y_val, y_pred,digits=3)
# result_file.writelines(str1)
# str1 = 'confusion_matrix:\n',confusion_matrix(y_val, y_pred)
# result_file.writelines(str1)
"""
主成分分析的模型结果也需要进行存储
"""
pd.DataFrame(y_pred_prob).to_csv(resultPath + '/pred_proba.csv')
pd.DataFrame(y_pred).to_csv(resultPath + '/pred.csv')
df1 = pd.read_csv(splitFile + '/y_test.csv')
df2 = pd.read_csv(resultPath + '/pred.csv', index_col='Unnamed: 0')
df3 = pd.read_csv(resultPath + '/pred_proba.csv', index_col='Unnamed: 0')
df4 = pd.concat(objs=[df1, df2, df3], axis=1)
if classifier == 2:
df4.columns = ['SubjID', 'Group', 'pred', 'pred_prob1', 'pred_prob2']
if classifier == 3:
df4.columns = ['SubjID', 'Group', 'pred', 'pred_prob1', 'pred_prob2', 'pred_prob3']
df4.to_csv(resultPath + '/' + str(n) + 'result_compare.csv')
'''将交叉验证结果写入文件'''
scores = cross_val_score(ss, x_train_pca, y_train, cv=5, scoring='accuracy')
result_file.write(str('\n训练集交叉验证结果:\n'))
result_file.writelines(str(scores))
'''将模型测试评分结果写入文件'''
score = ss.score(x_val_pca, y_val) # 多分类单看一个score不恰当,应该看单独的
result_file.write(str('\n模型测试评分结果:'))
result_file.writelines(str(score))
'''将分类报告写入文件'''
clf_rep = classification_report(y_val, y_pred, digits=3)
result_file.write(str('\n分类报告:\n'))
result_file.write(clf_rep)
'''将混淆矩阵写入文件'''
cfu_mx = confusion_matrix(y_val, y_pred)
result_file.write(str('混淆矩阵:\n'))
result_file.writelines(str(cfu_mx))
result_file.write('\n-----------------------------------------------------------------------------\n')
# ## 绘制roc曲线
picName = '选择' + str(n) + '的主成分ClassifierAuc.png'
if classifier == 3:
ROCX.three_auc_report(n, classifier,y_val,y_pred_prob,resultPath+'/ROC', picName=picName)
else:
ROCX.auc_report(n, y_val, y_pred_prob[:, 1], resultPath+'/ROC', picName=picName)
'''
函数参数说明:
classifer:用于说明是进行二分类还是进行三分类
splitFile:存放测试集、训练集的文件路径
resultPath:用于保存结果的文件路径,比如SVM就写到 ...... /result/SVM
'''
def logistic_reg(classifer,splitFile,resultPath, tp):
if os.path.exists(resultPath) == False: # 如果存放结果的目标路径不存在,则进行创建
os.makedirs(resultPath)
X_train = pd.read_csv(splitFile + '/X_train.csv').iloc[:, 2:].values
X_test = pd.read_csv(splitFile + '/X_test.csv').iloc[:, 2:].values
y_train = pd.read_csv(splitFile + '/y_train.csv').iloc[:, 1].values
y_test = pd.read_csv(splitFile + '/y_test.csv').iloc[:, 1].values
estimator = LogisticRegression(class_weight='balanced', penalty='l1', solver='liblinear')
estimator.fit(X_train, y_train)
y_pred_prob = estimator.predict_proba(X_test)
y_pred = estimator.predict(X_test)
'''训练集同样当成测试集输入并且进行测试'''
train_pred_prob = estimator.predict_proba(X_train)
train_pred = estimator.predict(X_train)
pd.DataFrame(train_pred_prob).to_csv(resultPath + '/train_pred_proba.csv')
pd.DataFrame(train_pred).to_csv(resultPath + '/train_pred.csv')
df1 = pd.read_csv(splitFile + '/y_train.csv')
df2 = pd.read_csv(resultPath + '/train_pred.csv', index_col='Unnamed: 0')
df3 = pd.read_csv(resultPath + '/train_pred_proba.csv', index_col='Unnamed: 0')
df4 = pd.concat(objs=[df1, df2, df3], axis=1)
if classifer == 2:
df4.columns = ['SubjID', 'Group', 'pred', 'train_pred_prob1', 'train_pred_prob2']
if classifer == 3:
df4.columns = ['SubjID', 'Group', 'pred', 'train_pred_prob1', 'train_pred_prob2', 'train_pred_prob3']
df4.to_csv(resultPath + '/train_result_compare.csv')
"""
将预测结果、预测结果概率存入/result/logisticRegression/result_compare.csv文件
"""
pd.DataFrame(y_pred_prob).to_csv(resultPath + '/pred_proba.csv')
pd.DataFrame(y_pred).to_csv(resultPath + '/pred.csv')
df1 = pd.read_csv(splitFile + '/y_test.csv')
df2 = pd.read_csv(resultPath + '/pred.csv', index_col='Unnamed: 0')
df3 = pd.read_csv(resultPath + '/pred_proba.csv', index_col='Unnamed: 0')
df4 = pd.concat(objs=[df1, df2, df3], axis=1)
if classifer == 2:
df4.columns = ['SubjID', 'Group', 'pred', 'pred_prob1', 'pred_prob2']
if classifer == 3:
df4.columns = ['SubjID', 'Group', 'pred', 'pred_prob1', 'pred_prob2', 'pred_prob3']
df4.to_csv(resultPath + '/result_compare.csv')
"""
权重保存
"""
weights = estimator.coef_.tolist()[0]
with open(splitFile + '/X_train.csv', 'r') as f:
reader = csv.reader(f)
feature = next(reader)
feature = feature[2:]
# print('feature:', feature)
# print('weights:', weights)
dataframe = pd.DataFrame({'feature': feature, 'weight': weights})
# print('path:', resultPath + '/Logitic_weights.csv')
dataframe.to_csv(resultPath + '/Logitic_weights.csv', index=False, sep=',')
"""
模型得分
"""
'''打开文件'''
proba_result = open(resultPath + '/model_score.txt', mode='w')
'''将模型得出的权重系数写入到文件中'''
# proba_result.write('逻辑回归得到的权重系数:' + estimator.)
'''将交叉验证结果写入文件'''
scores = cross_val_score(estimator, X_train, y_train, cv=5, scoring='accuracy')
proba_result.write(str('训练集交叉验证结果:\n'))
proba_result.writelines(str(scores))
'''将模型测试评分结果写入文件'''
score = estimator.score(X_test, y_test) # 多分类单看一个score不恰当,应该看单独的
proba_result.write('测试集:\n')
proba_result.write(str('\n模型测试评分结果:'))
proba_result.writelines(str(score))
y_pred = estimator.predict(X_test)
'''将分类报告写入文件'''
clf_rep = classification_report(y_test, y_pred, digits=6)
proba_result.write(str('\n分类报告:\n'))
proba_result.write(clf_rep)
'''将混淆矩阵写入文件'''
cfu_mx = confusion_matrix(y_test, y_pred)
proba_result.write(str('混淆矩阵:\n'))
proba_result.writelines(str(cfu_mx))
'''关闭文件'''
'''训练集评分保存至文件'''
score = estimator.score(X_train, y_train) # 多分类单看一个score不恰当,应该看单独的
proba_result.write(str('\n测试集模型测试评分结果:'))
proba_result.writelines(str(score))
y_pred = estimator.predict(X_train)
'''将分类报告写入文件'''
clf_rep = classification_report(y_train, y_pred, digits=6)
proba_result.write(str('\n分类报告:\n'))
proba_result.write(clf_rep)
'''将混淆矩阵写入文件'''
cfu_mx = confusion_matrix(y_train, y_pred)
proba_result.write(str('混淆矩阵:\n'))
proba_result.writelines(str(cfu_mx))
'''关闭文件'''
proba_result.close()
if classifer == 3:
# ROCX.three_auc_report(1, classifer, y_test, y_pred_prob, resultPath+'/ROC', picName='logisticRegression.png', tp=tp)
joblib.dump(estimator, resultPath + '/model.pkl')
# else:
# ROCX.auc_report(1, y_test, y_pred_prob[:,1], resultPath+'/ROC', picName='logisticRegression.png')
# joblib.dump(estimator, resultPath + '/model.pkl')
#
"""
PCA
"""
# if os.path.exists(resultPath+'/PCA') == False:
# os.makedirs(resultPath+'/PCA')
#
# result_file = open(resultPath + '/PCA' + '/result.txt', mode='w')
# n_s = np.linspace(0.6, 0.8, num=5)
#
# accuracy = []
# for n in n_s:
# tmp = n_components_analysis(n, X_train, y_train, X_test,
# y_test, result_file, resultPath + '/PCA',
# classifier=classifer,splitFile=splitFile) # 使用原始数据直接进行主成分分析,8-2分
#
# accuracy.append(tmp)
# # acc = 'total accurcy:\n', accuracy
# # result_file.writelines(acc)
# result_file.close()
return | asd567/HC-MCI-AD-classification-ML | code/LogisticRegressionX.py | LogisticRegressionX.py | py | 9,482 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "time.time",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LogisticRegression",
"line_number": 29,
"usage_type": "call"
},
{
"api_n... |
5130228201 |
# coding: utf-8
# In[1]:
import shapefile
import matplotlib.pyplot as plt
import numpy as np
# In[16]:
import pandas as pd
path_to_netatmo_coords_df = (r'X:\hiwi\ElHachem\Prof_Bardossy\Extremes'
r'\NetAtmo_BW'
r'\rain_bw_1hour'
r'\netatmo_bw_1hour_coords.csv')
df_c = pd.read_csv(path_to_netatmo_coords_df, sep=';', index_col=0)
plt.ioff()
fig = plt.figure(figsize=(15, 15), dpi=200)
ax = fig.add_subplot(111)
path_to_shpfile = (
r"X:\exchange\ElHachem\Netatmo\Landesgrenze_ETRS89\Landesgrenze_10000_ETRS89_lon_lat.shp")
shp_de = shapefile.Reader(path_to_shpfile)
# read and plot shapefile (BW or Germany) should be lon lat
for shape_ in shp_de.shapeRecords():
lon = [i[0] for i in shape_.shape.points[:][::-1]]
lat = [i[1] for i in shape_.shape.points[:][::-1]]
ax.scatter(lon, lat, marker='.', c='lightgrey',
alpha=0.25, s=1)
df_coords = pd.read_csv(
r"X:\hiwi\ElHachem\Prof_Bardossy\Extremes\DWD_coords_BW.csv", sep=',', index_col=0)
ax.scatter(df_c[' lon'], df_c[' lat'], c='b', alpha=0.85,
marker='o', s=25, label='Netatmo stations')
ax.scatter(df_coords['lon'], df_coords['lat'], c='r', alpha=0.85,
s=25, marker='d', label='DWD stations')
plt.axis('equal')
plt.grid(alpha=.05)
plt.legend(loc=0, fontsize=12)
plt.xlabel('Longitude', fontsize=10)
plt.ylabel('Latitude', fontsize=10)
plt.xticks(fontsize=10)
plt.yticks(fontsize=10)
plt.tight_layout()
plt.savefig(r"X:\hiwi\ElHachem\Prof_Bardossy\Extremes\stations.png",
frameon=True, papertype='a4',
bbox_inches='tight', pad_inches=.2)
# In[7]:
plt.savefig(r"X:\hiwi\ElHachem\Prof_Bardossy\Extremes\stations.png",
frameon=True, papertype='a4',
bbox_inches='tight', pad_inches=.2)
| AbbasElHachem/extremes | _05_plot_ppt_dwd_netatmo_stations.py | _05_plot_ppt_dwd_netatmo_stations.py | py | 1,849 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.ioff",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "matplotlib.p... |
13149683279 | from src.datamodules.common.generic_datamodule import GenericDatamodule
from src.utils.hydra import instantiate_delayed
import os
from torchvision.utils import save_image
import torch
from src.utils.audio import save_mp3_to_tensor
class AudioDataModule(GenericDatamodule):
def __init__(
self,
batch_size=64,
num_workers: int = 0,
pin_memory: bool = False,
train_ratio=0.85,
val_ratio=0.15,
sr=44100,
interval_length=20,
extensions=[],
loader_type="torch",
transform=None,
preparers=None,
train_datasets=None,
test_datasets=None,
images_preparers=None,
images_dir="",
torch_preparers=None,
torch_dir="",
audio_dir="",
device="gpu",
):
super().__init__(
batch_size,
num_workers,
pin_memory,
train_ratio,
val_ratio,
train_datasets,
test_datasets,
)
self.preparers = preparers if preparers is not None else []
self.images_preparers = images_preparers if images_preparers is not None else []
self.images_dir = images_dir
self.torch_preparers = torch_preparers if torch_preparers is not None else []
self.torch_dir = torch_dir
self.audio_dir = audio_dir
self.sr = sr
self.loader_type = loader_type
self.device = "cuda" if device == "gpu" else device
def prepare_data(self):
print("Audio data module prepare start...")
for preparer in self.preparers.values():
preparer.prepare()
print("Audio data module prepare finished.")
def create_audio_tensors(self):
if not os.path.exists(self.torch_dir):
os.mkdir(self.torch_dir)
files_num = 0
for (
root,
dirs,
files,
) in os.walk(self.audio_dir):
destination_dir = root.replace(self.audio_dir, self.torch_dir)
if not os.path.exists(destination_dir):
os.makedirs(destination_dir)
for file in files:
if file.endswith(".mp3") or file.endswith(".wav"):
source_path = os.path.join(root, file)
destination_path = os.path.join(destination_dir, file).replace(
".mp3", ".pt"
)
save_mp3_to_tensor(
source_path,
destination_path,
self.sr,
self.loader_type,
self.device,
)
def create_spectrograms(self):
if not os.path.exists(self.images_dir):
os.mkdir(self.images_dir)
datasets = [
*self.train_datasets_configs,
*self.test_datasets_configs,
]
for image_preparer in self.images_preparers.values():
dataset_images_dir = image_preparer.images_dir
if not os.path.exists(dataset_images_dir):
os.makedirs(dataset_images_dir, exist_ok=True)
dataset_name = image_preparer.dataset_name
dataset_config = next(filter(lambda d: d["name"] == dataset_name, datasets))
dataset = instantiate_delayed(dataset_config)
idx_to_class = {v: k for k, v in dataset.class_to_idx.items()}
for index, entry in enumerate(dataset):
if entry is None or entry[0] is None:
continue
sample, label = entry
image = sample[0]
key_dir = idx_to_class[label]
filename = os.path.basename(dataset.samples[index][0][:-4]) + ".png"
full_dir = os.path.join(dataset_images_dir, key_dir)
full_path = os.path.join(full_dir, filename)
if not os.path.exists(full_dir):
os.mkdir(full_dir)
save_image(image, full_path)
| radziminski/audio-key-classification | src/datamodules/audio_datamodule.py | audio_datamodule.py | py | 4,037 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "src.datamodules.common.generic_datamodule.GenericDatamodule",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "os.path.exists",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 58,
"usage_type": "attribute"
},
{... |
12060803554 | """
Hi, here's your problem today. This problem was recently asked by Microsoft:
Given the root of a binary tree, print its level-order traversal. For example:
1
/ \
2 3
/ \
4 5
The following tree should output 1, 2, 3, 4, 5.
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def print_level_order(root):
# Fill this in.
root = Node(1, Node(2), Node(3, Node(4), Node(5)))
print_level_order(root)
# 1 2 3 4 5
"""
from collections import deque
class Node:
def __init__(self, val, left=None, right=None):
self.val = val
self.left = left
self.right = right
def print_level_order(root):
queue = deque([root])
order = []
while len(queue) > 0:
node = queue.pop()
order.append(node.val)
if node.left:
queue.appendleft(node.left)
if node.right:
queue.appendleft(node.right)
print(" ".join([str(x) for x in order]))
root = Node(1, Node(2), Node(3, Node(4), Node(5)))
print_level_order(root)
# 1 2 3 4 5
| winkitee/coding-interview-problems | 81-90/87_level_order_traversal_of_binary_tree.py | 87_level_order_traversal_of_binary_tree.py | py | 1,096 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 39,
"usage_type": "call"
}
] |
36510945058 | from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from dao.character_dao import CharacterDao
from dao.weapon_dao import WeaponDao
class Genshin:
character_dao = None
weapon_dao = None
__instance = None
@staticmethod
def get_instance():
"""Static method access"""
if Genshin.__instance is None:
Genshin.__instance = Genshin()
return Genshin.__instance
def __init__(self, connection_url="sqlite:///genshin-data.db"):
engine = create_engine(connection_url, echo=True)
Session = sessionmaker(bind=engine)
self.__db_session = Session()
def get_character_dao(self):
"""Get character dao."""
if self.character_dao is None:
self.character_dao = CharacterDao(session=self.__db_session)
return self.character_dao
def get_weapon_dao(self):
"""Get weapon dao."""
if self.weapon_dao is None:
self.weapon_dao = WeaponDao(session=self.__db_session)
return self.weapon_dao
def close_db(self):
"""Close all the database"""
self.__db_session.close() | bemyXmas/genshin-dao | genshin.py | genshin.py | py | 1,161 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "dao.character_dao.CharacterDao",
"line_number": 27,
"usage_type": "call"
},
{
... |
32937853207 | import decimal
import re
import lxml.html
class Base(object):
fetched = False
def __init__(self, mal_id, mal):
self.mal_id = mal_id
self.mal = mal
def _get_url(self):
return self.base_url % self.mal_id
def fetch(self):
if not self.fetched:
return self.mal._fetch(self)
def parse(self, html):
# Ignoring errors here because MAL allows users to use their own encodings
# Without testing, probably allows the users to store pictures from their latest vacation as a review
# Anyways, anything we need is (hopefully) in utf-8
tree = lxml.html.fromstring(html.decode('utf-8', errors='ignore'))
schema = tree.xpath('//div[@id="contentWrapper"]')[0]
self.title = schema.xpath('.//span[@itemprop="name"]/text()')[0].strip()
synopsis = schema.xpath('.//span[@itemprop="description"]/text()')
if synopsis:
self.synopsis = synopsis[0].strip()
else:
self.synopsis = ''
self.cover = schema.xpath('.//img[@itemprop="image"]')[0]
if 'data-src' in self.cover.attrib:
self.cover = self.cover.attrib['data-src']
else:
self.cover = self.cover.attrib['src']
self.info = info = {}
self.alternative_titles = alternative_titles = {}
self.statistics = statistics = {}
self.related = related = {}
self.reviews = []
def duration2int(x):
runtime = 0
hours = re.findall(r'(\d+) hr', x)
minutes = re.findall(r'(\d+) min', x)
if hours:
try:
runtime += int(hours[0])*60
except ValueError:
pass
if minutes:
try:
runtime += int(minutes[0])
except ValueError:
pass
return runtime
def num2int(x):
try:
return int(x.replace(',', ''))
except ValueError:
return None
def num2dec(x):
try:
return decimal.Decimal(x)
except decimal.InvalidOperation:
return None
strip2int = lambda x: x != 'N/A' and int(x.strip('#')) or None
loop_elements = [
('Alternative Titles', True, [], alternative_titles, {}),
('Information', False, ['Producers', 'Genres', 'Authors', 'Serialization', 'Licensors', 'Studios'], info, {'Episodes': num2int, 'Duration': duration2int, 'Volumes': num2int, 'Chapters': num2int}),
('Statistics', False, [], statistics, {'Favorites': num2int, 'Members': num2int, 'Popularity': strip2int, 'Ranked': strip2int}),
]
for block, splitlist, linklist, save_target, postprocess in loop_elements:
for el in tree.xpath('//h2[text()="%s"]/following-sibling::*' % block):
if el.tag != 'div' or not el.xpath('span') or ':' not in el.xpath('span/text()')[0]:
break
text = ''.join(el.xpath('text()')).strip()
info_type = el.xpath('span/text()')[0].strip(':')
if info_type in linklist:
save_target[info_type] = []
if 'None found' not in text:
for a in el.xpath('a'):
save_target[info_type].append({
'id': int(re.findall('\d+', a.attrib['href'])[-1]),
'name': a.text
})
elif info_type == 'Type':
save_target[info_type] = str(sorted(el.xpath('.//text()'), key=lambda x:len(x))[-1])
elif info_type == 'Premiered':
premiered = el.xpath('./a/text()')[0].split(' ')
if premiered:
year = premiered[1]
try:
year = int(premiered[1])
except ValueError:
pass
save_target[info_type] = {
'season': premiered[0],
'year': year,
}
else:
save_target[info_type] = text.strip()
if splitlist:
save_target[info_type] = map(lambda x:x.strip(), save_target[info_type].split(','))
elif info_type in postprocess:
save_target[info_type] = postprocess[info_type](save_target[info_type])
score_box = tree.xpath('//div[./span[text()="Score:"]]/span')
votes = tree.xpath('//span[@itemprop="ratingCount"]/text()')
if votes:
statistics['Votes'] = votes[0]
else:
statistics['Votes'] = score_box[2].xpath('./text()')[0]
if 'Votes' in statistics:
statistics['Votes'] = int(statistics['Votes'].replace(',', ''))
score = tree.xpath('//span[@itemprop="ratingValue"]/text()')
if score:
statistics['Score'] = score[0]
else:
statistics['Score'] = score_box[1].xpath('./text()')[0]
if 'Score' in statistics:
statistics['Score'] = num2dec(statistics['Score'])
found_h2 = False
tags = iter(filter(lambda x:x, map(lambda x:x.strip(': ,'), tree.xpath('//h2[starts-with(text(), "Related ")]/../text()'))))
current_tag = None
for el in tree.xpath('//table[@class="anime_detail_related_anime"]/tr'):
name, relationships = el.xpath('./td')
name = name.text.strip(':')
related[name] = []
for r in relationships.xpath('./a'):
url = r.attrib['href'].split('/')
tag_type = url[1]
tag_id = url[2]
related[name].append({'type': tag_type, 'id': int(tag_id)})
self.mal._handle_related(self)
for review in tree.xpath('//h2[contains(text(), "Reviews")]/following-sibling::*//div[contains(@class, "borderLight")]'):
rating = int(review.xpath('.//a[text()="Overall Rating"]/../text()')[0].strip(': '))
review = ''.join(review.xpath('following-sibling::div/text()')).strip() + '\n'.join(review.xpath('following-sibling::div/span/text()')).strip()
review = review.replace('\n\n', '\n')
self.reviews.append({
'rating': rating,
'review': review
})
self.fetched = True | JohnDoee/web-parsers | myanimelist/malparser/base.py | base.py | py | 6,592 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "lxml.html.html.fromstring",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "lxml.html.html",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "lxml.html",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "re.findall",
... |
42603908137 | from django import forms
from .models import Post
from django.core.validators import FileExtensionValidator
class PostForm(forms.ModelForm):
thumbnail = forms.FileField(required=False, widget=forms.ClearableFileInput(attrs={'class': 'input'}))
video = forms.FileField(required=False, validators=[FileExtensionValidator(allowed_extensions=['MOV','avi','mp4','webm','mkv'])], widget=forms.ClearableFileInput(attrs={'class': 'input'}))
class Meta:
model = Post
fields = ['title', 'intro', 'body', 'contribution_amount', 'thumbnail', 'video']
widgets = {
'title': forms.TextInput(attrs={'class': 'input', 'placeholder': 'The title of your post', 'required': True}),
'intro': forms.TextInput(attrs={'class': 'input', 'placeholder': 'The intro of your post', 'required': True}),
'body': forms.Textarea(attrs={'class': 'textarea', 'placeholder': 'Include details about your cause'}),
'contribution_amount': forms.TextInput(attrs={'class': 'input', 'placeholder': 'Contribution amount'}),
}
validators = {
'video': FileExtensionValidator(allowed_extensions=['MOV','avi','mp4','webm','mkv'])
}
| Varad-13/django-crowdfund | crowdfunding/forms.py | forms.py | py | 1,207 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "django.forms.FileField",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.f... |
23070045502 | import os
import time
from flask import Flask, render_template, request
from flask_socketio import SocketIO, emit, disconnect
from collections import deque
app = Flask(__name__)
# socket-io configure
app.config["SECRET_KEY"] = os.getenv("SECRET_KEY")
socketio = SocketIO(app)
# in-memory data
USERS = {}
CHANNELS = {"general": deque([], maxlen=100)}
@app.route("/")
def index():
return render_template("index.html")
@socketio.on('connect')
def connection():
print("new user connected")
@socketio.on('userdata')
def user_data(data):
if 'username' in data:
USERS[data['username']] = request.sid
@socketio.on('new channel')
def new_channel(data):
if data['name'] in CHANNELS:
return False
else:
CHANNELS[data['name']] = deque(maxlen=100)
emit('new channel', { "name" : data['name']}, broadcast=True)
@socketio.on('new msg')
def new_msg(data):
if 'channel' in data:
data['created_at'] = int(time.time())
CHANNELS[data['channel']].append(data)
emit('msg', data, broadcast=True)
@socketio.on('get channels')
def get_channels():
emit('channels', list(CHANNELS.keys()))
@socketio.on('get msgs')
def get_msgs(data):
if 'name' in data:
emit('msgs', list(CHANNELS[data['name']]))
if __name__ == "__main__":
socketio.run(app, host="0.0.0.0", port=int(os.environ.get("PORT", 5000))) | muedie/flack | application.py | application.py | py | 1,388 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "flask_socketio.SocketIO",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "collections.deque",
... |
19093601252 | import pygame
import config
from CentipedeComponent import CentipedeComponent
from Direction import Direction
from threading import Timer
from PendingMovement import PendingMovement
def backwards(dir):
if dir == Direction.up: return Direction.down
elif dir == Direction.right: return Direction.left
elif dir == Direction.down: return Direction.up
elif dir == Direction.left: return Direction.left
class Centipede():
def __init__(self, controller, initAmount = 10, initX = 20 + (5 * 10), initY = 20, initMove = 0, newG = pygame.sprite.Group()):
self.centipede_group = newG
self.moveAmount = initMove
self.facingRight = True
self.speed = 2
self.controller = None
self.pendingMovements = []
print("initAmount: " + str(initAmount))
self.controller = controller
self.headX = initX
self.headY = initY
if initAmount > 0:
for x in range(0, initAmount):
self.centipede_group.add(CentipedeComponent(self))
if initX > 0:
self.initPos()
def update(self, screen, background, collideWith, bullets):
self.updatePos(collideWith, bullets)
self.centipede_group.draw(screen)
def divide(self, at):
try:
origLen = len(self.centipede_group)
if origLen == 0:
return
atX = self.centipede_group.sprites()[at].rect.x
atY = self.centipede_group.sprites()[at].rect.y
rem = 0
newG = pygame.sprite.Group()
while len(self.centipede_group) >= at and len(self.centipede_group) >= 1:
obj = self.centipede_group.sprites()[len(self.centipede_group.sprites()) - 1]
rem += 1
if rem != 0:
newG.add(obj)
self.centipede_group.remove(obj)
if len(self.centipede_group) <= 1:
self.controller.centipedes.remove(self)
if len(newG) <= 2:
return
temp = Centipede(self.controller, 0, -1, -1, self.moveAmount, newG)
for x in temp.centipede_group:
x.parent = temp
x.direction = backwards(x.direction)
x.updateDirection()
self.controller.centipedes.append(temp)
except Exception as e:
print(e.message)
def initPos(self):
count = 0
for x in self.centipede_group:
x.rect.x = self.headX + (20 * count)
x.rect.y = self.headY + 20
count += 1
def updatePos(self, collideWith, bullets):
count = 0
direction = None
newX = 0
newY = 0
trail = self.centipede_group.sprites()[len(self.centipede_group.sprites()) - 1]
trail.move(self.headX, self.headY, self.speed, collideWith, bullets, count, self.centipede_group)
direction = trail.direction
newX = trail.rect.x
newY = trail.rect.y
self.pendingMovements.append(PendingMovement((7 / 2) * self.speed, self.centipede_group, newX, newY, collideWith, bullets, trail.direction))
for m in self.pendingMovements:
if m.tick():
self.pendingMovements.remove(m) | justinoboyle/learn-python | Centipede.py | Centipede.py | py | 3,265 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Direction.Direction.up",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "Direction.Direction",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "Direction.Direction.down",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_na... |
21749274335 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
.. _log
Basic stdout log classes: Error, Verbose and Warning.
**Content**
"""
# *credits*: `gjacopo <jacopo.grazzini@ec.europa.eu>`_
# *since*: Fri May 8 15:21:31 2020
#%% Settings
import os, sys, warnings
import inspect
import functools
import six
DEFVERBOSE = False # True
REDUCE_ANSWER = False # ! used for testing purpose: do not change !
EXCLUSIVE_ARGUMENTS = False # ! used for settings: do not change !
#%% Core functions/classes
#==============================================================================
# Class Warning
#==============================================================================
class Warnings(Warning):
"""Dummy class for warnings in this package.
>>> Warnings(warnmsg, expr=None)
Arguments
---------
warnmsg : str
warning message to display.
Keyword arguments
-----------------
expr : str
input expression in which the warning occurs; default: :data:`expr` is
:data:`None`.
Example
-------
>>> Warnings('This is a very interesting warning');
Warnings: ! This is a very interesting warning !
"""
def __init__(self, msg='', **kwargs):
self.msg = msg
expr = kwargs.pop('expr',None)
if expr is not None: self.expr = expr
else: self.expr = ''
# warnings.warn(self.msg)
print(self)
def __repr__(self): return self.msg
def __str__(self):
#return repr(self.msg)
return (
"! %s%s%s !" %
(self.msg,
' ' if self.msg and self.expr else '',
self.expr
)
)
#==============================================================================
# Class Verbose
#==============================================================================
class Verbose(object):
"""Dummy class for verbose printing mode in this package.
>>> Verbose(msg, verb=True, expr=None)
Arguments
---------
msg : str
verbose message to display.
Keyword arguments
-----------------
verb : bool
flag set to :data:`True` when the string :literal:`[verbose] -` is added
in front of each verbose message displayed.
expr : str
input expression in which the verbose mode is called; default: :data:`expr` is
:data:`None`.
Example
-------
>>> Verbose('The more we talk, we less we do...', verb=True);
[verbose] - The more we talk, we less we do...
"""
def __init__(self, msg='', **kwargs):
expr = kwargs.pop('expr','')
verb = kwargs.pop('verb', DEFVERBOSE)
self.msg = msg
if verb is True:
print('\n! [verbose] - %s !' % self.msg)
if expr is not None: self.expr = expr
#def __repr__(self):
# return self.msg
def __str__(self):
return repr(self.msg)
#==============================================================================
# Class Error
#==============================================================================
class Error(Exception):
"""Dummy class for exception raising in this package.
>>> raise Error(msg, type=None, code=None, expr='')
Arguments
---------
errmsg : str
message -- explanation of the error.
Keyword arguments
-----------------
type : object
error type; when :data:`errtype` is left to :data:`None`, the system tries
to retrieve automatically the error type using :data:`sys.exc_info()`.
code : (float,int)
error code; default: :data:`errcode` is :data:`None`.
expr : str
input expression in which the error occurred; default: :data:`expr` is
:data:`None`.
Example
-------
>>> try:
assert False
except:
raise Error('It is False')
Traceback ...
...
Error: !!! AssertionError: It is False !!!
"""
def __init__(self, msg='', **kwargs):
self.msg = msg
typ = kwargs.pop('type',None)
code = kwargs.pop('code',None)
expr = kwargs.pop('expr','')
if expr is not None: self.expr = expr
else: self.expr = ''
if typ is None:
try: typ = sys.exc_info()[0]
except: pass
if inspect.isclass(typ): self.type = typ.__name__
elif isinstance(typ, (int,float)): self.type = str(typ)
else: self.type = typ
if code is not None: self.code = str(code)
else: self.code = ''
# super(Error,self).__init__(self, msg)
def __str__(self):
# return repr(self.msg)
str_ = ("%s%s%s%s%s%s%s" %
(self.type or '',
' ' if self.type and self.code else '',
self.code or '',
': ' if (self.type or self.code) and (self.msg or self.expr) else '',
self.msg or '',
' ' if self.msg and self.expr else '',
self.expr or '' #[' ' + self.expr if self.expr else '']
)
)
return ( "%s%s%s" %
('' if str_.startswith('!!!') else '!!! ',
str_,
'' if str_.endswith('!!!') else ' !!!'
)
)
#==============================================================================
# Method deprecated
#==============================================================================
def deprecated(reason, run=True):
"""This is a decorator which can be used to mark functions as deprecated.
>>> new = deprecated(reason)
Arguments
---------
reason : str
optional string explaining the deprecation.
Keywords arguments
------------------
run : bool
set to run the function/method/... despite being deprecated; default:
:data:`False` and the decorated method/function/... is not run.
Examples
--------
The deprecated function can be used to decorate different objects:
>>> @deprecated("use another function")
... def old_function(x, y):
... return x + y
>>> old_function(1, 2)
__main__:1: DeprecationWarning: Call to deprecated function old_function (use another function).
3
>>> class SomeClass(object):
... @deprecated("use another method", run=False)
... def old_method(self, x, y):
... return x + y
>>> SomeClass().old_method(1, 2)
__main__:1: DeprecationWarning: Call to deprecated function old_method (use another method).
>>> @deprecated("use another class")
... class OldClass(object):
... pass
>>> OldClass()
__main__:1: DeprecationWarning: Call to deprecated class OldClass (use another class).
<__main__.OldClass at 0x311e410f0>
Note
----
It will result in a warning being emitted when the function is used and when
a :data:`reason` is passed.
"""
# see https://stackoverflow.com/questions/2536307/decorators-in-the-python-standard-lib-deprecated-specifically
if isinstance(reason, six.string_types): # happyType.isstring(reason):
def decorator(func1):
if inspect.isclass(func1):
fmt1 = "Call to deprecated class {name} ({reason})."
else:
fmt1 = "Call to deprecated function {name} ({reason})."
@functools.wraps(func1)
def new_func1(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
fmt1.format(name=func1.__name__, reason=reason),
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
if run is True:
return func1(*args, **kwargs)
return new_func1
return decorator
elif inspect.isclass(reason) or inspect.isfunction(reason):
func2 = reason
if inspect.isclass(func2):
fmt2 = "Call to deprecated class {name}."
else:
fmt2 = "Call to deprecated function {name}."
@functools.wraps(func2)
def new_func2(*args, **kwargs):
warnings.simplefilter('always', DeprecationWarning)
warnings.warn(
fmt2.format(name=func2.__name__),
category=DeprecationWarning,
stacklevel=2
)
warnings.simplefilter('default', DeprecationWarning)
if run is True:
return func2(*args, **kwargs)
return new_func2
else:
raise Error('wrong type for input reason - %s not supported' % repr(type(reason)))
| eurostat/pyDatUtils | pydatutils/log.py | log.py | py | 9,469 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.exc_info",
"line_number": 163,
"usage_type": "call"
},
{
"api_name": "inspect.isclass",
"line_number": 165,
"usage_type": "call"
},
{
"api_name": "six.string_types",
"line_number": 241,
"usage_type": "attribute"
},
{
"api_name": "inspect.isclass... |
28151218606 | # -*- coding: utf-8 -*-
import scrapy
from scrapy.linkextractors import LinkExtractor
from scrapy.spiders import CrawlSpider, Rule
from ncar.items import NcarItem
class GetsomeSpider(CrawlSpider):
global li
li = list()
name = 'getsome'
allowed_domains = ['ncar.cc']
start_urls = ['http://bbs.ncar.cc/forum.php?mod=forumdisplay&fid=129']
rules = (
Rule(LinkExtractor(restrict_xpaths="//div[@class='xbs xbs_4 block move-span']//li/a"), callback='parse_item', follow=True),
)
def parse_start_url(self,response):
for sel in response.xpath("//div[@class='xbs xbs_4 block move-span']//li/a"):
item = NcarItem()
item['name'] = sel.xpath("text()").extract()
item['url'] = sel.xpath("@href").extract()
item['links'] = dict()
li.append(item)
# print li
def parse_item(self, response):
for l in li:
# print response.url.find(l['url'][0]),l['url'][0],response.url,type(l['url']),type(response.url.find(l['url'][0]))
#if(l['url']==response.url):
if(response.url.find(l['url'][0])!=-1):
for sel in response.xpath("//td[@class='t_f']/div[3]/a"):
l['links'][sel.xpath("text()").extract()[0]]=sel.xpath("@href").extract()[0]
yield l
| sv2sv/ncar | ncar/spiders/getsome.py | getsome.py | py | 1,337 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scrapy.spiders.CrawlSpider",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "scrapy.spiders.Rule",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scrapy.linkextractors.LinkExtractor",
"line_number": 16,
"usage_type": "call"
},
{
... |
32017566370 | from django.contrib.auth.models import User
from django.http import HttpResponse
from django.shortcuts import redirect, render
from .HospitalDBConnect import *
def home(request):
''' the home for hosptial admins '''
appt_count = view_appt_count()
doc_count = view_doc_count()
room_count = view_room_count()
appt_count = appt_count[0]['count(PatientID)']
doc_count = doc_count[0]['count(DocID)']
room_count = room_count[0]['count(RoomNumber)']
context = {
"appt_count": appt_count,
'doc_count': doc_count,
'room_count': room_count
}
# handle get only
return HttpResponse(render(request, 'Hosptial/home.html', context))
def search(request):
''' search for firstname, lastname, email '''
# handle post
if request.method == 'POST':
# get data
first_name = request.POST.get("firstName", "")
last_name = request.POST.get("lastName", "")
matching_patients = view_person_search_count(first_name, last_name)
context = {'patients': matching_patients}
# reorganize data and pass as context
return HttpResponse(render(request, 'Hosptial/search.html', context))
else:
return HttpResponse(render(request, 'Hosptial/search.html'))
def appointments(request):
''' handles appointments '''
# get all appointments
all_appointments = view_Appointments()
context = {'appts': all_appointments}
return HttpResponse(render(request, 'Hosptial/appointments.html', context))
def profile(request, patient_id):
''' shows patient profile '''
# get the data for that id
patient_profile = view_history(patient_id)
context = {"profile": patient_profile}
return HttpResponse(render(request, 'Hosptial/profile.html', context))
def treatment(request, patient_id):
''' Handles treatment stuff '''
# handle post, redirect to patient profile
if request.method == 'POST':
doc_id = request.POST.get("doctorName", "")
aliment = request.POST.get("aliment", "")
pre_date = request.POST.get("pdate", "")
expected = request.POST.get("expected", "")
warnings = request.POST.get("warnings", "")
InsertTreatment(aliment, str(pre_date), str(expected), str(warnings))
return redirect('/hosptial/profile/' + patient_id)
else:
# get profile data for side display
patient_profile = view_history(patient_id)
# get all doctors
all_doctors = view_Doctors()
# set context
context = {"profile": patient_profile[-1], "doctors": all_doctors}
return HttpResponse(
render(request, 'Hosptial/create_treatment.html', context))
def update_appointment(request, patient_id, doc_id):
''' handles appointments update '''
# handle get
if request.method == 'POST':
prefered_doctor = request.POST.get("preferedDoctor", "")
prefered_date = request.POST.get("preferedDate", "")
reason = request.POST.get("reason", "")
Update_Appointment(
int(patient_id), int(doc_id.strip('/')), int(prefered_doctor),
str(prefered_date), reason)
return redirect('/hosptial/appointments/')
else:
# get old apt data and pass it as context
# get all doctors
all_doctors = view_Doctors()
context = {
'doctors': all_doctors,
'PatientID': patient_id,
'DocID': doc_id
}
return HttpResponse(
render(request, 'Hosptial/update_appt.html', context))
def patients(request):
''' shows all patients and leads to profile '''
# get data
all_patients = view_Patients()
context = {'patients': all_patients}
return HttpResponse(render(request, 'Hosptial/patients.html', context))
def doctors(request):
''' shows all doctors and their info '''
# get all the data
all_doctors = view_Doctors()
context = {'doctors': all_doctors}
return HttpResponse(render(request, 'Hosptial/doctors.html', context))
def nurses(request):
''' shows all doctors and their info '''
# get all the data
all_nurses = view_Nurses()
context = {'nurses': all_nurses}
return HttpResponse(render(request, 'Hosptial/nurses.html', context))
def employees(request):
''' shows all doctors and their info '''
# get all the data
all_employees = view_Employees()
context = {'employees': all_employees}
return HttpResponse(render(request, 'Hosptial/employees.html', context))
def rooms(request):
''' shows info about rooms: handles get only '''
# get all data and set context
all_rooms = view_Rooms()
context = {'rooms': all_rooms}
return HttpResponse(render(request, 'Hosptial/rooms.html', context))
def departments(request):
''' shows dep info: handles get only '''
# get data
all_departments = view_Departments()
context = {'departments': all_departments}
return HttpResponse(render(request, 'Hosptial/departments.html', context))
def bills(request):
''' just shows all the bills '''
# get all the data
all_bills = view_Bills()
context = {'bills': all_bills}
# print(context)
return HttpResponse(render(request, 'Hosptial/bills.html', context))
def bills_more(request, billNumber):
''' shows the user profile '''
all_bills = view_Bill_more(billNumber)
# print (all_bills)
context = {'bill': all_bills[0]}
print(context)
return HttpResponse(render(request, 'Hosptial/bills_more.html', context))
def create_bill(request, patient_id):
''' handles the view and creation of bills '''
if request.method == 'POST':
due_date = request.POST.get("dueDate", "")
re_date = request.POST.get("reDate", "")
amount = request.POST.get("amount", "")
description = request.POST.get("description", "")
InsertBill(
int(patient_id), str(re_date), int(amount), str(description),
str(due_date))
return redirect('/hosptial/bills')
context = {'PatientID': patient_id}
return HttpResponse(render(request, 'Hosptial/create_bill.html', context))
def delete_bill(request, bill_num):
''' deletes a bill given the bill_id '''
# call the delete function
DeleteBill(bill_num)
return redirect('/hosptial/bills')
| yonathanF/Hospital_Management | HospitalManagement/Hospital/views.py | views.py | py | 6,376 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 42,
"usage_type": "call"
},
{
"api_nam... |
36502272504 | from __future__ import print_function
import argparse
import os
import random
import torch
import torch.nn as nn
import torch.nn.parallel
import torch.backends.cudnn as cudnn
import torch.optim as optim
import torch.utils.data
import torchvision.datasets as dset
import torchvision.transforms as transforms
import torchvision.utils as vutils
from models import *
from my_utils import *
import matplotlib.pyplot as plt
def train_and_validation(opt,dataloader):
#=== parse opt =============
device = torch.device("cuda:0" if opt.cuda else "cpu")
ngpu = int(opt.ngpu)
nz = int(opt.nz)
ngf = int(opt.ngf)
ndf = int(opt.ndf)
nc = 3 #num channel
imagesize = opt.imageSize
#load checkpoint by inline code
#opt.netG = './saves/lfw/netG_epoch_12.pth'
#opt.netD = ''
#============================
#====== model and optimizer =========
netG = Generator(ngpu,nz = nz, ngf = ngf, nc = nc , imagesize = imagesize).to(device)
netG.apply(weights_init)
if opt.netG != '':
netG.load_state_dict(torch.load(opt.netG))
print(netG)
netD = Discriminator(ngpu,nc = nc , ndf= ndf,imagesize = imagesize).to(device)
netD.apply(weights_init)
if opt.netD != '':
netD.load_state_dict(torch.load(opt.netD))
print(netD)
criterion = nn.BCELoss()
optimizer_netD = torch.optim.Adam(netD.parameters(),lr = opt.lr,betas = (opt.beta1,0.9999))
optimizer_netG = torch.optim.Adam(netG.parameters(),lr = opt.lr, betas = (opt.beta1,0.9999))
fixed_noise = torch.randn(opt.batchSize, nz, 1, 1, device=device)
for epo in range(opt.nepo):
for itr, (data,label) in enumerate(dataloader):
#=================================
# (1) update Discriminator :
# maximize log D(x) + log (1 - D(G(z))
# = minimize -1* (log D(x) + log( 1- D(G(z)))
#=================================
optimizer_netD.zero_grad()
real_data_cpu = data
real_data = data.to(device)
label = torch.full((opt.batchSize,), 1, device=device)
output = netD(real_data)
loss_Real = criterion(output,label)
loss_Real.backward()
label = label.fill_(0)
noise = torch.randn((opt.batchSize,nz,1,1),device = device)
fake = netG(noise)
output_fake = netD(fake.detach())
loss_Fake = criterion(output_fake,label)
loss_Fake.backward()
optimizer_netD.step()
loss_D = (loss_Real + loss_Fake).item()
# =================================
# (1) update Generator :
# maximize log (D(G(z))
# = minimize -1* (log( D(G(z)))
# =================================
optimizer_netG.zero_grad()
noise = torch.randn((opt.batchSize,nz,1,1,), device = device)
fake = netG(noise)
output_fake = netD(fake)
label = label.fill_(1)
loss1 = -1 * criterion(output_fake, torch.full((opt.batchSize,), 0, device=device))
loss2 = criterion(output_fake, label.fill_(1))
if(torch.abs(loss1) < torch.abs(loss2)):
loss = loss2
print(round(loss1.item(),4), round(loss2.item(),4))
else:
loss = loss1
print(round(loss1.item(), 4), round(loss2.item(), 4))
#loss = criterion(output_fake,label.fill_(0))
loss.backward()
optimizer_netG.step()
loss_G = loss.item()
print("itr:",itr ,"loss_D : ", round(loss_D,4), "loss_G : ", round(loss_G,4) , "D(fake) :", round(torch.mean(output_fake).detach().item(),4))
if itr% 50 == 0:
''' for display
imshow_with_tensor(vutils.make_grid(fake.detach()).cpu())
if not os.path.isdir(os.path.join(opt.outf)):
os.makedirs(os.path.join(opt.outf), exist_ok=True)
vutils.save_image(fake.detach(),'%s/fake_samples_epoch_%03d.png' % (opt.outf, epo))
'''
if not os.path.isdir(os.path.join(opt.outf)):
os.makedirs(os.path.join(opt.outf), exist_ok=True)
vutils.save_image(real_data_cpu, '%s/real_samples.png' % opt.outf, normalize=True)
fake = netG(fixed_noise)
if not os.path.isdir(os.path.join(opt.outf)):
os.makedirs(os.path.join(opt.outf), exist_ok=True)
vutils.save_image(fake.detach(), '%s/fake_samples_epoch_%03d.png' % (opt.outf, epo),
normalize=True)
# do checkpointing
torch.save(netG.state_dict(), '%s/netG_epoch_%d.pth' % (opt.outf, epo))
torch.save(netD.state_dict(), '%s/netD_epoch_%d.pth' % (opt.outf, epo))
| ppooiiuuyh/-PyTorch-implementations | DCGAN/train_and_valiate.py | train_and_valiate.py | py | 4,890 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "torch.load",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "torch.nn.BCELoss",
"line_numbe... |
10823546127 | """ Python3: Save single page of pdf as a new pdf file """
import PyPDF2
# Initialize input pdf file
in_pdf = open(r'/path/to/input.pdf', 'rb')
pdf_reader = PyPDF2.PdfFileReader(in_pdf) # Reader element
pdf_writer = PyPDF2.PdfFileWriter() # Writer element
pdf_writer.addPage(pdf_reader.getPage(n)) # Choose n-th page number
# Initialize output pdf file
out_pdf = open(r'/path/to/output.pdf', 'wb')
pdf_writer.write(out_pdf)
# Close I/O pdf files
out_pdf.close()
in_pdf.close()
| CRTejaswi/Python3 | Text Processing/PyPDF2/1.py | 1.py | py | 502 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyPDF2.PdfFileReader",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "PyPDF2.PdfFileWriter",
"line_number": 6,
"usage_type": "call"
}
] |
73477199714 | import xml.etree.ElementTree as ET
import json
# Parse the KML file
tree = ET.parse('tests/places.kml')
# Get the root element
root = tree.getroot()
# Find all Placemark elements
placemarks = root.findall('.//{http://www.opengis.net/kml/2.2}Placemark')
# Loop through the placemarks and print the name and coordinates
data = []
for placemark in placemarks:
name = placemark.find(
'.//{http://www.opengis.net/kml/2.2}name').text.strip()
coordinates = placemark.find(
'.//{http://www.opengis.net/kml/2.2}coordinates').text.strip()
temp = {"name": name, "coordinates": [float(x) for x in coordinates.split(",")[:2]]}
data.append(temp)
with open("tests/print.json","w") as f: json.dump(data,f)
| bipinkrish/campusmap | tests/places.py | places.py | py | 731 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 25,
"usage_type": "call"
}
] |
1704717638 | import itertools
def solution(users, emoticons):
answer = [0, 0]
sale = [10, 20, 30, 40]
sale_rate = list(itertools.product(sale, repeat=len(emoticons)))
for i in sale_rate:
plus = 0
earn_money = 0
for user in users:
rate, money = user
tmp = 0
for idx, e_rate in enumerate(i):
if rate <= e_rate:
tmp += int(emoticons[idx] * (100-e_rate) / 100)
if money <= tmp:
plus += 1
else:
earn_money += tmp
if answer[0] < plus:
answer[0] = plus
answer[1] = earn_money
elif answer[0] == plus:
if answer[1] < earn_money:
answer[1] = earn_money
else:
continue
print(answer)
return answer
| SunghunKim98/Algorithm_Study | sprint10/KMS/실시간/이모티콘 할인행사.py | 이모티콘 할인행사.py | py | 839 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.product",
"line_number": 7,
"usage_type": "call"
}
] |
29728640167 | import numpy as np
from PIL import Image
from tqdm import tqdm
from modules.Zest.Zest_Network import Zest_Network
class Zest_ImageProcessing(Zest_Network):
square_size_HQ = 32
square_size_LQ = 16
def __init__(self, x, y) -> None:
super().__init__(x, y)
def process_image(self, image_path = None, image_pillow_obj = None):
image_to_process = None
image_to_output = None
# Prepare Image Object
if image_path is not None:
image_to_process = Image.open(image_path)
elif image_pillow_obj is not None:
if isinstance(image_pillow_obj, Image):
image_to_process = image_pillow_obj
else:
# No Pillow Object
return None
else:
# Nothing to work on
return None
# Start Processing Image
# Crop image to fix the 16 x 16 squares
image_to_process = image_to_process.crop(
(
0,
0,
(image_to_process.size[0] // 16) * 16,
(image_to_process.size[1] // 16) * 16
)
)
# Create a blank image to output the pixels
image_to_output = Image.new(
'RGB',
(
image_to_process.size[0] * 2,
image_to_process.size[1] * 2
)
)
for img_x in range(0, image_to_process.size[0], self.square_size_LQ):
for img_y in range(0, image_to_process.size[1], self.square_size_LQ):
# Convert Square Area to an readable array for the Network
cache_square_LQ = []
for square_x in range(img_x, (img_x) + 16):
for square_y in range(img_y, (img_y) + 16):
for color_value in image_to_process.getpixel((square_x, square_y)):
cache_square_LQ.append(color_value / 255)
self.feed_forward(cache_square_LQ)
# print(self.output.sum() / self.output.size, self.output.max())
for pixel_color in range(0, len(self.output), 3):
# Recalculate where to put the pixel
pixel_y = ((pixel_color / 3) // self.square_size_HQ)
pixel_x = (pixel_color // 3) - (pixel_y * self.square_size_HQ)
# print(pixel_color, pixel_x, pixel_y, len(self.output), image_to_output.size, self.output[pixel_color])
image_to_output.putpixel(
(int(img_x * 2 + pixel_x), int(img_y * 2 + pixel_y)),
(
int(self.output[pixel_color] * 255),
int(self.output[pixel_color + 1] * 255),
int(self.output[pixel_color + 2] * 255)
)
)
# image_to_output.save('D:\\#Data\\shiro-output\\steps\\' + str(img_x) + '-' + str(img_y) + '.jpg')
# input('hi')
return image_to_output
| XOYZ69/Zest | modules/Zest/Zest_ImageProcessing.py | Zest_ImageProcessing.py | py | 3,094 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "modules.Zest.Zest_Network.Zest_Network",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "PIL.Im... |
25784868828 | from __future__ import print_function
import asyncio
import random
import threading
import time
import numpy as np
import websockets
from websocket import create_connection
from robot_dqnagent import DQNAgent
from robot_arena import Arena
class MSGWorker (threading.Thread):
def __init__(self):
self.coords = [100, 100]
state_size = 2
action_size = 5
self.currentAction = ""
self.agent = DQNAgent(state_size, action_size)
self.agent.load('trekker_5000_refactored.h5')
threading.Thread.__init__(self)
self.connected = set()
def run(self):
while True:
time.sleep(0.001)
def act(self,x,y):
state = np.array([x,y])
#arena.drawRobot(state)
#arena.setPos(np.array_str(state))
mops= np.reshape(state, [1, 2])
arena.drawRobot(state)
action = self.agent.act_execute(mops)
if action == 0 and self.currentAction != 0: # up
print("up")
self.currentAction = 0
ws = create_connection("ws://192.168.178.61:8080")
ws.send("camup")
ws.close()
if action == 1 and self.currentAction != 1: # down
print("down")
self.currentAction = 1
ws = create_connection("ws://192.168.178.61:8080")
ws.send("camdown")
ws.close()
if action == 2 and self.currentAction != 2: # left
print("left")
self.currentAction = 2
ws = create_connection("ws://192.168.178.61:8080")
ws.send("left,1")
ws.close()
if action == 3 and self.currentAction != 3: # right
print("right")
self.currentAction = 3
ws = create_connection("ws://192.168.178.61:8080")
ws.send("right,1")
ws.close()
@asyncio.coroutine
def handler(self, websocket, path):
self.connected.add(websocket)
try:
name = yield from websocket.recv()
commaindex = name.find(",")
commandlength = len(name)
direction = name[0:commaindex]
self.speed = name[commaindex+1:commandlength]
self.act(direction,self.speed)
#print(direction+','+self.speed)
# here are the coordinates coming -> handled to the message worker !
except websockets.exceptions.ConnectionClosed:
pass
finally:
self.connected.remove(websocket)
def sendData(self, data):
for websocket in self.connected.copy():
#print("Sending data: %s" % data)
coro =yield from websocket.send(data)
future = asyncio.run_coroutine_threadsafe(coro, loop)
if __name__ == "__main__":
print('AI Server')
msgWorker = MSGWorker()
arena = Arena()
try:
msgWorker.start()
ws_server = websockets.serve(msgWorker.handler, '192.168.178.67', 8080)
loop = asyncio.get_event_loop()
loop.run_until_complete(ws_server)
loop.run_forever()
except KeyboardInterrupt:
stopFlag = True
#TODO: close ws server and loop correctely
print("Exiting program...")
| SundayLab/robot_dqn | robot_server_execute.py | robot_server_execute.py | py | 3,002 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "threading.Thread",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "robot_dqnagent.DQNAgent",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 26,
"usage_type": "call"
},
{
"api_name"... |
22254776273 | import math
from PyQt5.QtWidgets import QGraphicsView, QGraphicsLineItem, QApplication, QMenu, QAction
from PyQt5.QtGui import QColor, QBrush, QPen
from PyQt5.QtCore import pyqtSlot, QLineF, QRectF, QPoint, QPointF, Qt
from pyqtgraph import GraphicsLayoutWidget, PlotItem, ViewBox, GraphicsItem, GraphicsView, PlotDataItem, TextItem, ButtonItem
from orbit import Orbit, BPM
from bpm_line_item import BPMLineItem
from magnet_view import MagnetView
from PyQt5.QtCore import QTimer
class OrbitView(GraphicsLayoutWidget):
def __init__(self, orbit=None, axis="X", use_sector_ticks=True, parent=None, ymin=-1.0, ymax=1.0, name=None, label=None, units=None, draw_timer=None, magnet_list=None):
super(OrbitView, self).__init__(parent=parent)
axis = axis.lower()
if axis not in ["x", "y", "tmit"]:
raise Exception("Axis must be 'x', 'y', or 'tmit'")
self.axis = axis
self.use_sector_ticks = use_sector_ticks
self.sector_ticks = [[],[]]
self.ci.layout.setSpacing(0.0)
self.plotLabel = self.addLabel(text=name, row=0, col=0, rowspan=3, angle=-90)
self.up_magnet_view = MagnetView(magnet_list=orbit, direction="up")
self.up_magnet_view.hideAxis('left')
self.up_magnet_view.hideAxis('bottom')
self.ci.layout.setRowStretchFactor(0,3)
self.plotItem = self.addPlot(name=name, row=0, col=1)
self.ci.layout.setRowStretchFactor(1,0)
self.down_magnet_view = MagnetView(magnet_list=orbit, direction="down")
self.down_magnet_view.hideAxis('left')
self.up_magnet_view.setXLink(self.plotItem)
self.down_magnet_view.setXLink(self.plotItem)
self.ci.layout.setRowStretchFactor(2,0)
self.show_magnet_buttons = False
if axis != "tmit" and magnet_list is not None:
self.show_magnet_views(True)
self.setViewportUpdateMode(QGraphicsView.BoundingRectViewportUpdate) # Greatly improves drawing performance.
self.plotItem.setMouseEnabled(y=False)
#Customize the right-click menu.
self.plotItem.setMenuEnabled(enableMenu=False, enableViewBoxMenu=None)
reset_view_range = QAction("Reset View Range", self.plotItem.vb.menu)
reset_view_range.triggered.connect(self.reset_range)
self.plotItem.vb.scene().contextMenu = []
existing_menu_actions = self.plotItem.vb.menu.actions()
self.plotItem.vb.menu.insertAction(existing_menu_actions[0], reset_view_range)
for action in existing_menu_actions:
if str(action.text()) == "View All":
self.plotItem.vb.menu.removeAction(action)
self.plotItem.showGrid(y=True)
#self.plotItem.getAxis('left').setStyle(tickTextWidth=60)
#self.plotItem.getAxis('left').setStyle(autoExpandTextSpace=False)
#if label is not None:
# self.plotItem.getAxis('left').enableAutoSIPrefix(enable=False)
# self.plotItem.getAxis('left').setLabel(text=label, units=units)
self.bpm_brush = QBrush(QColor(0,255,0))
self.energy_bpm_brush = QBrush(QColor(100,200,255))
self.ymin = ymin #Y axis goes from self.ymin to self.ymax by default.
self.ymax = ymax
self.yminlimit = 10.0*ymin #This is the limit on the Y axis range.
self.ymaxlimit = 10.0*ymax #This is the upper limit on the Y axis range.
self.plotItem.setLimits(minYRange=0.04, maxYRange=abs(self.ymaxlimit - self.yminlimit))
self.axis_pen = QPen(QBrush(QColor(255,255,255)), 0)
self.axis_pen.setCapStyle(Qt.FlatCap)
self.bpm_pen = QPen(self.bpm_brush, 2)
self.bpm_pen.setCosmetic(True)
self.bpm_pen.setCapStyle(Qt.FlatCap)
self.no_beam_brush = QBrush(QColor(0,255,0,45))
self.no_beam_pen = QPen(self.no_beam_brush, 2)
self.no_beam_pen.setCosmetic(True)
self.no_beam_pen.setCapStyle(Qt.FlatCap)
self.energy_bpm_pen = QPen(self.energy_bpm_brush, 2)
self.energy_bpm_pen.setCosmetic(True)
self.energy_bpm_pen.setCapStyle(Qt.FlatCap)
self.fit_brush = QBrush(QColor(255,255,255,255))
self.fit_pen = QPen(self.fit_brush, 0)
self.fit_pen.setCosmetic(True)
self.fit_pen.setCapStyle(Qt.FlatCap)
self.axis_line = QGraphicsLineItem(0.0,0.0,1.0,0.0)
self.axis_line.setPen(self.axis_pen)
self.plotItem.addItem(self.axis_line, ignoreBounds=True)
self.lines = {}
self.orbit = None
self.needs_initial_range = True
self.set_draw_timer(draw_timer)
self._display_fit = False
self.fit_data_item = None
self.fit_options = {}
if orbit is not None:
self.set_orbit(orbit)
def make_right_click_menu(self):
menu = QMenu(self)
return menu
def display_fit(self, enabled=True):
if enabled and self.fit_data_item is None:
self.fit_data_item = PlotDataItem(pen=self.fit_pen)
self.plotItem.addItem(self.fit_data_item)
elif not enabled:
self.plotItem.removeItem(self.fit_data_item)
self.fit_data_item = None
self._display_fit = enabled
def set_draw_timer(self, new_timer, start=False):
try:
self.draw_timer.timeout.disconnect(self.redraw_bpms)
except:
pass
if new_timer is None:
new_timer = QTimer(self)
new_timer.setInterval(int(1000/60))
self.draw_timer = new_timer
self.draw_timer.timeout.connect(self.redraw_bpms)
if start:
self.draw_timer.start()
def set_orbit(self, orbit, reset_range=True):
if self.orbit == orbit:
return
old_range = None
old_zmax = None
old_zmin = None
if self.orbit is not None:
old_range = self.plotItem.viewRect()
old_zmax = self.orbit.zmax()
old_zmin = self.orbit.zmin()
self.clear_orbit()
self.orbit = orbit
extent = self.orbit.zmax() - self.orbit.zmin()
self.plotItem.setLimits(xMin=self.orbit.zmin()-(0.02*extent), xMax=self.orbit.zmax()+(0.02*extent))
self.plotItem.enableAutoRange(enable=False)
self.axis_line.setLine(self.orbit.zmin(),0.0,self.orbit.zmax(),0.0)
for bpm in self.orbit:
line = BPMLineItem(bpm)
self.lines[bpm.name] = line
self.set_pen_for_bpm(bpm)
self.plotItem.addItem(self.lines[bpm.name])
if self.use_sector_ticks and (old_zmax != orbit.zmax() or old_zmin != orbit.zmin()):
self.sector_ticks = [[],[]]
self.sector_ticks[0] = self.orbit.sector_locations()
unit_nums = [name.split(":")[-1] for name in self.orbit.names()]
self.sector_ticks[1] = zip(self.orbit.z_vals(), unit_nums)
self.plotItem.getAxis('bottom').setTicks(self.sector_ticks)
self.plotItem.getAxis('bottom').setStyle(textFillLimits=[(0,0.72)])
self.plotItem.showGrid(x=True)
if reset_range or self.needs_initial_range:
self.reset_range()
self.needs_initial_range = False
else:
self.plotItem.setRange(old_range, padding=0.0, update=False)
self.draw_timer.start()
def show_magnet_views(self, enabled):
if enabled == self.show_magnet_buttons:
return
self.show_magnet_buttons = enabled
if enabled:
self.addItem(self.up_magnet_view, row=1, col=1)
self.addItem(self.down_magnet_view, row=2, col=1)
self.up_magnet_view.setXLink(self.plotItem)
self.down_magnet_view.setXLink(self.plotItem)
else:
self.removeItem(self.up_magnet_view)
self.removeItem(self.down_magnet_view)
def set_magnet_list(self, magnet_list):
self.up_magnet_view.set_magnets(magnet_list, reset_range=False)
self.down_magnet_view.set_magnets(magnet_list, reset_range=False)
@pyqtSlot(bool)
def reset_range(self, checked=False):
self.plotItem.enableAutoRange(axis=ViewBox.XAxis)
self.plotItem.setYRange(self.ymin, self.ymax)
def wheelEvent(self, event):
if event.modifiers() == Qt.ShiftModifier:
numPixels = event.pixelDelta()
numDegrees = event.angleDelta()
if not numPixels.isNull():
s = (1.005) ** (numPixels.y())
else:
s = (1.005) ** (numDegrees.y() * (-1.0/8.0))
self.plotItem.vb.scaleBy(y=s)
else:
super(OrbitView, self).wheelEvent(event)
def clear_orbit(self):
self.draw_timer.stop()
auto_range_x_enabled = self.plotItem.vb.state['autoRange'][0]
auto_range_y_enabled = self.plotItem.vb.state['autoRange'][1]
self.plotItem.enableAutoRange(enable=False)
if self.orbit is None:
return
for bpm in self.orbit:
self.plotItem.removeItem(self.lines[bpm.name])
self.plotItem.enableAutoRange(x=auto_range_x_enabled, y=auto_range_y_enabled)
self.lines = {}
@pyqtSlot()
def redraw_bpms(self):
for bpm in self.orbit:
self.set_pen_for_bpm(bpm)
self.lines[bpm.name].setLine(bpm.z,0.0,bpm.z,bpm[self.axis])
self.update_fit()
def set_pen_for_bpm(self, bpm):
if bpm.severity(self.axis) != 0:
self.lines[bpm.name].setPen(self.no_beam_pen)
else:
if bpm.is_energy_bpm:
self.lines[bpm.name].setPen(self.energy_bpm_pen)
else:
self.lines[bpm.name].setPen(self.bpm_pen)
def update_fit(self):
if not self._display_fit:
return
if self.orbit.fit_data is None:
if self.fit_data_item is not None:
self.fit_data_item.hide()
return
fit_data = None
if self.axis == 'x':
fit_data = self.orbit.fit_data['xpos']
elif self.axis == 'y':
fit_data = self.orbit.fit_data['ypos']
self.fit_data_item.show()
self.fit_data_item.setData(x=self.orbit.fit_data['zs'], y=fit_data)
@pyqtSlot()
def stop(self):
self.draw_timer.stop()
@pyqtSlot()
def start(self):
if self.orbit is not None:
self.draw_timer.start()
def setXLink(self, view):
return self.plotItem.setXLink(view.plotItem)
def setYLink(self, view):
return self.plotItem.setYLink(view.plotItem)
| mattgibbs/simui | steering/orbit_view.py | orbit_view.py | py | 10,566 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pyqtgraph.GraphicsLayoutWidget",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "magnet_view.MagnetView",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "magnet_view.MagnetView",
"line_number": 28,
"usage_type": "call"
},
{
"api_... |
38127398338 | import requests
import discord
from webdriver import keep_alive
from bs4 import BeautifulSoup
import pandas as pd
from discord.ext import commands
bot = commands.Bot(command_prefix='!')
bot.remove_command("help")
@bot.event
async def on_ready():
await bot.change_presence(status=discord.Status.online, activity=discord.Activity(type=discord.ActivityType.watching, name=""))
print(f'Logged in as {bot.user.name}')
@commands.command(name="walmart")
async def walmart(ctx, SKU, ZIP):
url = 'https://brickseek.com/walmart-inventory-checker/'
payload = {'search_method': 'sku', 'sku': SKU, 'zip': ZIP, 'sort': 'distance'}
df_record = pd.DataFrame(columns=['Store','City','Availability','Quantity'])
r = requests.post(url, data=payload).text # Make a POST request with data
bs = BeautifulSoup(r, 'html.parser')
j=0
a = bs.find_all('div', class_='table__body')
if a == []:
print('No results found in the searched area.')
else:
store=[]
city=[]
q=[]
stock=[]
for tag in bs.find_all('div', class_='table__body'):
for i in range(20):
m_Store = tag.findAll('strong', class_='address-location-name')
m=str(m_Store)
if i < m.count('/strong'):
m_s= m_Store[i].get_text().replace("\nWalmart","")
m_add = tag.findAll('address',class_="address")
m_Address = m_add[i].contents[2]
m_Availability = tag.findAll('span',class_="availability-status-indicator__text")
m_a = m_Availability[i].get_text()
if m_a =='Out of Stock'or m_a == 'Limited Stock':
m_q = str(0)
j=j-1
else:
m_Quantity = tag.findAll('span',class_="table__cell-quantity")
m_q = m_Quantity[j].get_text()[9:]
j=j+1
df_record = df_record.append({'Store':m_s, 'City':m_Address, 'Availability':m_a, 'Quantity':m_q }, ignore_index=True)
store.append(str(m_s))
city.append(str(m_Address))
q.append(str(m_a))
stock.append(str(m_q))
else:
# df_record=str(df_record)
# df_record=str(df_record)
# df_record=str(df_record)
# df_record=str(df_record)
# df_record=str(df_record)
# df_record=str(df_record)
# df_record=str(df_record)
# df_record=str(df_record)
print(store)
print(city)
print(q)
print(str(df_record))
print()
break
s='\n'
store = s.join(store)
city = s.join(city)
q = s.join(q)
stock = s.join(stock)
embed1 = discord.Embed(title='Walmart Stock Checker', color=3447003)
embed1.add_field(name = 'Store', value=store , inline = True)
embed1.add_field(name = 'City', value=city , inline = True)
embed1.add_field(name = 'Availability', value=q , inline = True)
# embed1.add_field(name = 'Quantity', value=stock , inline = True)
r = requests.get("https://brickseek.com/walmart-inventory-checker/?sku={}".format(SKU))
soup = BeautifulSoup(r.content, 'html.parser')
for tag in soup.find_all("div", "item-overview__image-wrap"):
link = tag.img.get("src")
s=str(link)
embed1.set_thumbnail(url=s)
embed1.set_footer(text='odin#9999')
await ctx.channel.send(embed=embed1)
@commands.command(name="target")
async def target(ctx, SKU, ZIP):
SKU=str(SKU)
if '-' not in SKU:
SKU = ('{}-{}-{}'.format(SKU[0:3], SKU[3:5], SKU[5:9]))
url = 'https://brickseek.com/target-inventory-checker/'
payload = {'search_method': 'sku', 'sku': SKU, 'zip': ZIP, 'sort': 'distance'}
r = requests.post(url, data=payload).text
bs = BeautifulSoup(r, 'html.parser')
print(" Store Availability Quantity ")
j=0
list1=[]
for tag in bs.find_all('div', class_='table__body'):
for i in range(10):
#print(tag)
m_Store = tag.findAll('strong', class_='address-location-name')
m=str(m_Store)
if i < m.count('/strong'):
m_s= m_Store[i].get_text()
m_add = tag.findAll('address',class_="address")
m_Address = m_add[i].contents[0]
m_Availability = tag.findAll('span',class_="availability-status-indicator__text")
m_a = m_Availability[i].get_text()
m_q = 'Unknown'
j=j+1
embeder = m_s+" " + m_Address + " " + m_a + " " + m_q
list1.append(embeder)
else:break
s='\n'
targ = s.join(list1)
r = requests.get("https://brickseek.com/target-inventory-checker/?sku={}".format(SKU))
soup = BeautifulSoup(r.content, 'html.parser')
for tag in soup.find_all("div", "item-overview__image-wrap"):
link = tag.img.get("src")
link=str(link)
embed1 = discord.Embed(title='Target Stock Checker',description=targ,color=3447003)
embed1.set_thumbnail(url=link)
embed1.set_footer(text='odin#9999')
# embed1.set_image("https://images-ext-2.discordapp.net/external/tKWiJKemxuuUUMoiRarsbbJGCABzsHXGHGFnkzBF5_g/%3Fwidth%3D608%26height%3D612/https/media.discordapp.net/attachments/765387136122880021/783219912453128202/13753__3_.png")
await ctx.channel.send(embed=embed1)
@commands.command(name="checking")
async def checking(ctx):
embed = discord.Embed(title='Stock Checker Instructions', description='**Requesting Channel**: <#783828598129295452>\n\n**__Walmart Stock Checker__**\n\nUsage:\n```!walmart sku ZIP```\n\nExample: ```!walmart 781200042 95928 ```\n\n**__Target Stock Checker__**\n\nUsage\n```!target DPCI Zip```\n\nExample: ```!target 057-10-0162 95928```', color=0x32a852)
embed.set_thumbnail(url="https://media.discordapp.net/attachments/765387136122880021/783219912453128202/13753__3_.png?width=608&height=612")
await ctx.send(embed=embed)
bot.add_command(walmart)
bot.add_command(target)
bot.add_command(checking)
keep_alive()
bot.run('token')
| mukuln-official/Target-and-walmart-stock-check | main.py | main.py | py | 6,458 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "discord.ext.commands.Bot",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "discord.Status",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "disco... |
70122758115 | """mysite URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.2/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
# -*- coding: utf-8 -*-
from django.urls import path
from . import views
from main.api import api_views
from django.conf import settings
from django.conf.urls.static import static
from rest_framework_simplejwt.views import (
TokenObtainPairView,
TokenRefreshView,
)
urlpatterns = [
path('', views.MainMenuView.as_view(), name="index"),
path('main_menu/', views.MainMenuView.as_view(), name="main_menu"),
path('login/', views.CustomLoginView.as_view(), name='login'),
path('logout/', views.CustomLogoutView.as_view(), name='logout'),
path('registration/', views.CustomRegistrationView.as_view(), name='registration'),
# objects urls
path('objects/add', views.ObjectCreateView.as_view(), name='objects_add'),
path('objects/<int:pk>', views.ObjectDetailView.as_view(), name='objects_detail'),
path('objects/edit/<int:pk>', views.ObjectEditView.as_view(), name='objects_edit'),
# tasks urls
path('tasks/add', views.TaskCreateView.as_view(), name='tasks_add'),
path('tasks/<int:pk>', views.TaskDetailView.as_view(), name='tasks_detail'),
path('tasks/edit/<int:pk>', views.TaskEditView.as_view(), name='tasks_edit'),
path('tasks/<int:task_id>/image/delete/<int:pk>', views.TaskImageRemoveView.as_view(), name='tasks_image_delete'),
# users urls
path('users/add', views.CustomUserCreateView.as_view(), name='users_add'),
path('users/<int:pk>', views.CustomUserDetailView.as_view(), name='users_detail'),
path('users/edit/<int:pk>', views.CustomUserEditView.as_view(), name='users_edit'),
path('users/edit/password/<int:pk>', views.CustomUserPasswordChangeView.as_view(), name='users_change_password'),
# watercourses urls
path('watercourses/add/<int:license_id>', views.WaterCourseCreateView.as_view(), name='watercourses_add'),
path('watercourses/children/<int:pk>', api_views.WaterCourseChildrenDetailView.as_view(), name='watercourse_children'),
path('objects/set_watercourses/<int:pk>', views.LicenseWaterCourseCreateView.as_view(), name='license_watercourse_add'),
path('objects/unset_watercourses/<int:pk>', views.LicenseWaterCourseRemoveListView.as_view(), name='license_watercourse_remove'),
path('objects/unset_watercourse/<int:license_id>/<int:pk>', views.LicenseWaterCourseRemoveView.as_view(), name='license_watercourse_remove_single'),
path('watercourses_by_license/<int:license_id>', api_views.WaterCourseListAPIView.as_view(), name='waterourse_by_license'),
# watercourses urls
path('lines/add/<int:license_id>', views.LineCreateView.as_view(), name='lines_add'),
path('objects/set_lines/<int:pk>', views.LineLicenseWaterCourseCreateView.as_view(), name='line_license_watercourse_add'),
path('objects/unset_lines/<int:pk>', views.LineLicenseWaterCourseRemoveListView.as_view(), name='line_watercourse_remove'),
path('objects/unset_line/<int:license_id>/<int:pk>', views.LineLicenseWaterCourseRemoveView.as_view(), name='line_watercourse_remove'),
path('lines/<int:watercourse_id>', api_views.LineListAPIView.as_view(), name='lines_list_by_watercourses'),
# wells urls
path('wells/add', views.WellCreateView.as_view(), name='wells_add'),
path('wells/<int:task_id>/<int:pk>', views.WellDetailView.as_view(), name='wells_detail'),
path('wells/edit/<int:task_id>/<int:pk>', views.WellEditView.as_view(), name='wells_edit'),
path('wells/set_welltasks/<int:pk>', views.WellTaskCreateView.as_view(), name='wells_task_add'),
path('wells_by_line/<int:line_id>', api_views.WellListAPIView.as_view(), name='wells_list_by_line'),
# layers urls
path('layers/add', views.LayerCreateView.as_view(), name='layers_add'),
path('layers/<int:pk>', views.LayerDetailView.as_view(), name='layers_detail'),
path('layers/edit/<int:pk>', views.LayerUpdateView.as_view(), name='layers_edit'),
# api urls
path('api/token/', TokenObtainPairView.as_view(), name='token_obtain_pair'),
path('api/token/refresh/', TokenRefreshView.as_view(), name='token_refresh'),
path('api/tasks/', api_views.TaskListView.as_view(), name='task_list'),
path('api/layer/add', api_views.LayerCreateAPIView.as_view(), name='layer_add'),
path('api/well/add', api_views.WellCreateAPIView.as_view(), name='well_add'),
path('api/layer_materials/', api_views.LayerMaterialsListAPIView.as_view(), name='layer_materials_list'),
path('api/synchronize/', api_views.SyncronizeViewSet.as_view({'post': 'create'}), name='synchronize'),
# documentation urls
path('documents/add', views.DocumentationCreateView.as_view(), name='layers_add'),
path('documents/<int:pk>', views.DocumentationDetailView.as_view(), name='documentation_detail'),
path('documents/edit/<int:pk>', views.DocumentationUpdateView.as_view(), name='documentation_edit'),
path('mine/add', views.MineCreateView.as_view(), name='mine_edit'),
path('mine/<int:pk>', views.MineDetailView.as_view(), name='mineetail'),
path('mine/edit/<int:pk>', views.MineUpdateView.as_view(), name='mine_edit'),
path('mine/images/add', api_views.MineImageCreateAPIView.as_view(), name='mine_image_add'),
path('asd', views.the_view)
]
urlpatterns += static(settings.STATIC_URL, document_root=settings.STATIC_ROOT)
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| Lifanna/geology_proj | geology_proj/main/urls.py | urls.py | py | 5,981 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "django.urls.path",... |
37297581932 | from abc import ABC, abstractmethod
import json
import os
from datetime import datetime
import requests
from utils import get_USD_conversion_rate, get_from_and_up_salary
class JobSiteAPI(ABC):
@abstractmethod
def get_vacancies(self, filter_word):
pass
class HeadHunterAPI(JobSiteAPI):
def get_vacancies(self, filter_word):
params = {
'text': f'NAME:{filter_word}',
'per_page': 50,
'only_with_salary': True,
'area': 113
}
req = requests.get('https://api.hh.ru/vacancies', params)
jsObj = json.loads(req.content.decode())
req.close()
return jsObj
class SuperJobAPI(JobSiteAPI):
__api_key = os.getenv('SJ_API_KEY')
def get_vacancies(self, filter_word):
headers = {
'X-Api-App-Id': self.__api_key
}
params = {
'keyword': filter_word,
'no_agreement': 1,
'country_id': 1,
'count': 50
}
req = requests.get('https://api.superjob.ru/2.0/vacancies/', headers=headers, params=params)
jsObj = json.loads(req.content.decode())
req.close()
return jsObj
class Vacancy:
def __init__(self, dictionary):
for key, value in dictionary.items():
setattr(self, key, value)
@property
def published_at(self):
try:
date = datetime.strptime(self.published[:10], '%Y-%m-%d')
return f'{date.day}-{date.month}-{date.year}'
except ValueError:
return self.published
except TypeError:
date = datetime.fromtimestamp(int(self.published))
return f'{date.day}-{date.month}-{date.year}'
@property
def approximate_salary(self):
if self.salary_from and self.salary_to:
mean_salary = (self.salary_from + self.salary_to) // 2
else:
mean_salary = max(self.salary_from, self.salary_to)
if 'usd' == self.currency.lower():
return mean_salary * get_USD_conversion_rate()
else:
return mean_salary
def __str__(self):
response_0 = f'Вакансия на должность: {self.name} в компанию {self.employer} в г. {self.area}\n'
if self.salary_from and self.salary_to:
response_1 = f'Зарплата от {self.salary_from} до {self.salary_to} {self.currency}\n'
elif self.salary_from:
response_1 = f'Зарплата от {self.salary_from} {self.currency}\n'
else:
response_1 = f'Зарплата до {self.salary_to} {self.currency}\n'
response_2 = f'Требования\Описание:\n{self.requirements}\nТип занятости: {self.employment_type}\n' \
f'Вакансия опубликована {self.published_at}\n{self.url}\n{"-" * 80}'
return response_0 + response_1 + response_2
def __le__(self, other):
if isinstance(other, Vacancy):
return self.approximate_salary <= other.approximate_salary
return None
def __lt__(self, other):
if isinstance(other, Vacancy):
return self.approximate_salary < other.approximate_salary
return None
def __ge__(self, other):
if isinstance(other, Vacancy):
return self.approximate_salary >= other.approximate_salary
return None
def __gt__(self, other):
if isinstance(other, Vacancy):
return self.approximate_salary > other.approximate_salary
return None
class Saver:
def __init__(self):
self.vacancies = []
def add_vacancy(self, vacancy):
if isinstance(vacancy, Vacancy):
self.vacancies.append(vacancy)
else:
print('Error, next time input valid vacancy')
def add_hh_vacancies(self, search_query):
vacancies_hh = HeadHunterAPI().get_vacancies(search_query)
for vacancy_hh in vacancies_hh['items']:
try:
dic = {
'name': vacancy_hh['name'],
'employer': vacancy_hh['employer']['name'],
'url': vacancy_hh['alternate_url'],
'area': vacancy_hh['area']['name'],
'salary_from': vacancy_hh['salary']['from'] if vacancy_hh['salary']['from'] else 0,
'salary_to': vacancy_hh['salary']['to'] if vacancy_hh['salary']['to'] else 0,
'currency': vacancy_hh['salary']['currency'],
'requirements': vacancy_hh['snippet']['requirement'],
'published': vacancy_hh['published_at'],
'employment_type': vacancy_hh['employment']['name']
}
except KeyError:
continue
vacancy = Vacancy(dic)
self.add_vacancy(vacancy)
def add_sj_vacancies(self, search_query):
vacancies_sj = SuperJobAPI().get_vacancies(search_query)
for vacancy_sj in vacancies_sj['objects']:
try:
dic = {
'name': vacancy_sj['profession'],
'employer': vacancy_sj['client']['title'],
'url': vacancy_sj['link'],
'area': vacancy_sj['town']['title'],
'salary_from': vacancy_sj['payment_from'] if vacancy_sj['payment_from'] else 0,
'salary_to': vacancy_sj['payment_to'] if vacancy_sj['payment_to'] else 0,
'currency': vacancy_sj['currency'],
'requirements': vacancy_sj['candidat'],
'published': vacancy_sj['date_published'],
'employment_type': vacancy_sj['type_of_work']['title']
}
except KeyError:
continue
vacancy = Vacancy(dic)
self.add_vacancy(vacancy)
def delete_vacancy(self, vacancy):
try:
self.vacancies.remove(vacancy)
except ValueError:
print('Vacancy does not exist')
def get_vacancies_by_salary(self, salary: str):
try:
salary_ = get_from_and_up_salary(salary)
except ValueError:
raise ValueError(
'Введите запрос в одном из форматов "50 000- 100 000 руб." "1000-2000 USD" "100_000 руб."')
filtered_vacancies = []
if len(salary_) == 2:
from_, up_to = salary_
for vacancy in self.vacancies:
if from_ < vacancy.approximate_salary < up_to:
filtered_vacancies.append(vacancy)
else:
for vacancy in self.vacancies:
if vacancy.approximate_salary == salary_[0]:
filtered_vacancies.append(vacancy)
return filtered_vacancies
class JSONSaver(Saver):
def __init__(self):
super().__init__()
def save_to_json(self, file_name):
string = [elem.__dict__ for elem in self.vacancies]
with open(file_name, 'w', encoding='utf-8') as file:
json.dump(string, file, ensure_ascii=False)
def get_from_json(self, file_name):
self.vacancies.clear()
with open(file_name, 'r', encoding='utf-8') as file:
vacancies_json = json.load(file)
for dict_json in vacancies_json:
vacancy = Vacancy(dict_json)
self.add_vacancy(vacancy)
return
| SkyLanser/vacancy_parser | classes.py | classes.py | py | 7,485 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "abc.ABC",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number... |
37469284557 | import csv
import logging
import json
import math
import random
import re
import time
import urllib.request
from pathlib import Path
import sys
from bs4 import BeautifulSoup
import requests
import get_edgar.common.my_csv as mc
import get_edgar.common.utils as utils
logger = logging.getLogger(__name__)
EDGAR_PREFIX = "https://www.sec.gov/Archives/"
SEC_PREFIX = "https://www.sec.gov"
def save_file_info(csv_in,folder):
""" from csv containing EDGAR records to csv containting EDGAR record and file information
the new csv file has prefix "info_" to the original csv file name
Arguments:
csv_in {[Path]} -- [the csv file that contains EDGAR records]
folder {[Path]} -- [the folder to save the new csv file]
Returns:
{[Path]} -- [the path for the csv created]
"""
csv_save = folder / f'info_{csv_in.name[6:]}'
if csv_save.exists() == False:
logger.info(f"start save file info to {csv_in.name}")
new_rows = get_file_info(csv_in)
sorted_rows = mc.multikeysort_int(new_rows,'cik','filing_date')
mc.save_dict_csv(sorted_rows,csv_save)
logger.info(f"{csv_save.name} created")
else:
logger.info(f"{csv_save} already exists")
return csv_save
## Add file information and htm link to the index downloaded
def get_file_info(csv_in):
""" Get form and filer information for all EDGAR records in a csv file
Arguments:
csv_in {Path} -- the Path object for the csv file that contains EDGAR records,
the columns should include "cik", "filing_date", and "html_index"
Returns:
[list] -- [list of dictionaries containing original EDGAR record, form, and filer information]
"""
logger.debug(f"start add file info to {csv_in.name}")
new_rows = []
with open(csv_in, 'r', newline='') as f:
reader = csv.DictReader(f)
for row in reader:
time.sleep(1)
isoup = get_isoup(row.get('html_index'))
if isoup == None:
continue
links = get_htm_links(isoup, row.get('html_index'))
for num,link in links:
row[f'htm_link_{num}'] = link
form_infos = get_form_info(isoup, row.get('html_index'))
row.update(form_infos)
filer_infos = get_filer_info(isoup, row)
row.update(filer_infos)
# row['year'] = int(f'{csv_in.name[-8:-4]}')
new_rows.append(row)
logger.debug(f"list of records with file info created for {csv_in.name}")
return new_rows
def get_isoup(page):
"""[Get soup for the EDGAR index page for each EDGAR file]
Arguments:
page {str} -- [webpage address]
Returns:
[BeautifulSoup] -- [the soup parsed using BeautifulSoup]
"""
i = 0
while True:
try:
html_index = requests.get(page, headers=utils.headers)
break
except (requests.exceptions.HTTPError):
logger.error(f'try wait a minute to reopen {page}')
time.sleep(70)
except Exception:
logger.error(f"cannot download {page}", exc_info=True)
i += 1
if i > 10:
return None
time.sleep(70)
try:
return BeautifulSoup(html_index.text, 'lxml')
except Exception:
logger.error(f'Cannot make soup for {page}', exc_info=True)
return None
def get_htm_links(soup, index_path):
"""[Get page address for the first webpage from an EDGAR file index page]
Arguments:
soup {[BeautifulSoup]} -- [the soup for the EDGAR file index page]
index_path {[str]} -- [the page address for the EDGAR file index page]
Returns:
[str] -- [the page address for the first webpage in the EDGAR file index page]
"""
try:
links = soup.find_all(href=re.compile(r"Archives.*\.htm"))
page_links = enumerate([SEC_PREFIX + link['href'] for link in links],1)
return [(n,re.sub(r'/ix\?doc=/','/',link)) for (n,link) in page_links]
except:
logger.exception(f'Unable to get htm pages for {index_path}')
return None
form_headers = {
"type_description":re.compile(r'Form\s(.*)(?=:)', re.IGNORECASE),
"report_period":re.compile(r'\sPeriod of Report\s(\d{4}-\d{2}-\d{2})',re.IGNORECASE),
"file_date":re.compile(r'\sFiling Date\s(\d{4}-\d{2}-\d{2})',re.IGNORECASE),
"accepted_time":re.compile(r'\sAccepted\s(\d{4}-\d{2}-\d{2}\s\d{2}:\d{2}:\d{2})',re.IGNORECASE),
"accession_num":re.compile(r'SEC Accession No.\s(\d{10}-\d{2}-\d{6})', re.IGNORECASE),
"items":re.compile(r'Items\s((Item.*))', re.IGNORECASE),
}
def get_form_info(soup, index_path, headers=form_headers):
"""[Get form information from an EDGAR file index page
the form information is set through "form_headers"
]
Arguments:
soup {[BeautifulSoup]} -- [the soup for the EDGAR file index page]
index_path {[str]} -- [the page address for the EDGAR file index page]
Returns:
[dict] -- [dictionary representing the form information on the EDGAR file index page]
"""
form_infos = {}
form_texts = []
form_texts.append(soup.find(id="formName").get_text())
form_texts.append(soup.find(id="secNum").get_text())
formgroup = soup.find_all("div", {"class":"formGrouping"})
for group in formgroup:
form_texts.append(group.get_text())
tems = headers.items()
for form_text in form_texts:
for k, v in tems:
matches = v.search(form_text)
if matches:
form_infos[k] = matches.group(1)
if len(form_infos) == 0:
logger.warning(f'No form infos for {index_path}')
elif len(form_infos) < len(headers):
logger.debug(f'Incomplete form infos for {index_path}')
return form_infos
filer_headers = {
"cname":re.compile(r'\n(.+)\(Filer\)', re.IGNORECASE),
"fcik":re.compile(r'CIK:\s(\d{10})', re.IGNORECASE),
"sic":re.compile(r'SIC:\s(\d{4})', re.IGNORECASE),
"irs_num":re.compile(r'IRS No.:\s(\d+)\s', re.IGNORECASE),
"year_end":re.compile(r'Fiscal Year End:\s(\d{4})', re.IGNORECASE),
"state_incorp":re.compile(r'State of Incorp.:\s(\w{2})', re.IGNORECASE),
"type":re.compile(r'Type:\s(.+?)\s(?=\|)', re.IGNORECASE)
}
fcik_pattern = filer_headers.get("fcik")
fcname_pattern = filer_headers.get("cname")
def get_filer_info(soup, record):
"""[Get filer information from an EDGAR file index page
the filer information is set through "filer_headers"
the filer cik needs to be the same as cik in the EDGAR file record]
Arguments:
soup {[BeautifulSoup]} -- [the soup for the EDGAR file index page]
record {[dict]} -- [dictionary representing the EDGAR file record]
Returns:
[dict] -- [dictionary representing the filer information on the EDGAR file index page]
"""
filer_infos = {}
co_filers_cik = []
co_filers_cname = []
all_filer_texts = soup.find_all("div",id="filerDiv")
tems = filer_headers.items()
for filer_texts in all_filer_texts:
filer_text = filer_texts.get_text()
fcik_info = fcik_pattern.search(filer_text)
if fcik_info:
fcik = fcik_info.group(1)
if str(int(fcik)) != record.get("cik"):
logger.debug(f'found co-filer in {record.get("html_index")}')
co_filers_cik.append(fcik)
cname_info = fcname_pattern.search(filer_text)
if cname_info:
cname = cname_info.group(1)
co_filers_cname.append(cname.strip())
continue
for k, v in tems:
matches = v.search(filer_text)
if matches:
info_raw = matches.group(1)
info = info_raw.replace('\n', ' ').replace('\r', ' ')
filer_infos[k] = info.strip()
filer_infos = {**filer_infos, **extract_address(filer_texts)}
if len(filer_infos) == 0:
logger.warning(f'No filer infos for {record.get("html_index")}')
if co_filers_cik:
for i in range(len(co_filers_cik)):
filer_infos[f'co_filers_cik_{i}'] = co_filers_cik[i]
try:
filer_infos[f'co_filers_cname_{i}'] = co_filers_cname[i]
except IndexError:
filer_infos[f'co_filers_cname_{i}'] = None
return filer_infos
def select_items(csv_in,filters):
csv_out = csv_in.resolve().parent / f'item_{csv_in.name[5:]}'
if csv_out.exists() == False:
mc.text_filter(csv_in,csv_out,'items',filters)
logger.info(f'{csv_out} created')
else:
logger.info(f'{csv_out} already exists')
def extract_address(filer_info):
addresses = filer_info.find_all('div','mailer')
if addresses:
for address in addresses:
add_des = address.contents[0].strip()
mailer_address = []
phone = None
adds = address.find_all('span',class_='mailerAddress')
if adds:
for add in adds:
add_text = add.get_text().strip()
if any(c.isalpha() for c in add_text):
mailer_address.append(add_text)
else:
phone = ''.join([c for c in add_text if c.isdigit()])
if add_des == 'Mailing Address':
mail_add = ','.join(mailer_address)
elif add_des == 'Business Address':
busi_add = ','.join(mailer_address)
busi_phone = phone
return {'mail_add':mail_add, 'busi_add':busi_add,'busi_phone':busi_phone}
return {'mail_add':None, 'busi_add':None,'busi_phone':None}
| linbaiwh/Get_EDGAR | get_edgar/extractor/fileinfo_extractor.py | fileinfo_extractor.py | py | 9,826 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "get_edgar.common.my_csv.multikeysort_int",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "get_edgar.common.my_csv",
"line_number": 38,
"usage_type": "name"
},
{
... |
74502512992 | from django.contrib.auth.decorators import login_required
from django.core.paginator import Paginator
from django.shortcuts import get_object_or_404, redirect, render
from .forms import CommentForm, PostForm
from .models import Follow, Group, Post, User
NUMBER_OF_POSTS: int = 10
def index(request):
template = 'posts/index.html'
posts_list = Post.objects.all()
paginator = Paginator(posts_list, NUMBER_OF_POSTS)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'page_obj': page_obj,
}
return render(request, template, context)
def group_posts(request, slug):
template = 'posts/group_list.html'
group = get_object_or_404(Group, slug=slug)
posts_list = group.posts.all()
paginator = Paginator(posts_list, NUMBER_OF_POSTS)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'group': group,
'page_obj': page_obj
}
return render(request, template, context)
def profile(request, username):
profile_user = get_object_or_404(User, username=username)
user = request.user
posts_count = profile_user.posts.count()
posts_list_username = profile_user.posts.all()
paginator = Paginator(posts_list_username, NUMBER_OF_POSTS)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
following = False
if user.is_authenticated:
following = None
if profile_user != user:
following = False
if profile_user.following.exists():
following = True
context = {
'user': user,
'following': following,
'profile_user': profile_user,
'page_obj': page_obj,
'posts_count': posts_count,
}
return render(request, 'posts/profile.html', context)
@login_required
def add_comment(request, post_id):
post = get_object_or_404(Post, id=post_id)
form = CommentForm(request.POST or None)
if form.is_valid():
comment = form.save(commit=False)
comment.author = request.user
comment.post = post
comment.save()
return redirect('posts:post_detail', post_id)
def post_detail(request, post_id):
post_user = get_object_or_404(Post, id=post_id)
posts_count = post_user.author.posts.count()
comments = post_user.comments.filter(post_id=post_id)
form_comments = CommentForm(request.POST or None)
context = {
'post_user': post_user,
'posts_count': posts_count,
'comments': comments,
'form_comments': form_comments
}
return render(request, 'posts/post_detail.html', context)
@login_required
def post_create(request):
form = PostForm(request.POST or None)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('posts:profile', request.user)
return render(request, 'posts/create_post.html', {'form': form})
@login_required
def post_edit(request, post_id):
post = get_object_or_404(Post, id=post_id)
if post.author != request.user:
return redirect('posts:post_detail', post_id=post_id)
form = PostForm(
request.POST or None,
files=request.FILES or None,
instance=post,
)
if form.is_valid():
post = form.save(commit=False)
post.author = request.user
post.save()
return redirect('posts:post_detail', post.id)
is_edit = post.text
context = {
'is_edit': is_edit,
'form': form,
'post': post,
}
return render(request, 'posts/create_post.html', context)
@login_required
def follow_index(request):
author_ids = Follow.objects.filter(user=request.user).values_list(
'author_id',
flat=True
)
posts_list = Post.objects.filter(author_id__in=author_ids)
paginator = Paginator(posts_list, NUMBER_OF_POSTS)
page_number = request.GET.get('page')
page_obj = paginator.get_page(page_number)
context = {
'page_obj': page_obj
}
return render(request, 'posts/follow.html', context)
@login_required
def profile_follow(request, username):
author = get_object_or_404(User, username=username)
if author != request.user:
Follow.objects.get_or_create(user=request.user, author=author)
return redirect('posts:profile', author.username)
else:
return redirect('posts:profile', author.username)
@login_required
def profile_unfollow(request, username):
author = get_object_or_404(User, username=username)
Follow.objects.filter(user=request.user, author=author).delete()
return redirect('posts:profile', author.username)
| KseniyaGurevich/hw05_final | yatube/posts/views.py | views.py | py | 4,737 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "models.Post.objects.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Post.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "models.Post",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "django.c... |
6307202826 | import numpy as np
import torch
import torch.nn as nn
class RNNBaseSTFTMask(nn.Module):
def __init__(self,
num_spk=2,
audio_channels=2,
n_fft=512,
hop_length=256,
sample_rate=16000,
rnn_hidden=256,
rnn_layer=2,
rnn_type="rnn",
drop_out=0.5,
activation="relu",
bidirectional=False,
*args,
**kwarg):
super(RNNBaseSTFTMask, self).__init__()
self.audio_channels = audio_channels
self.num_spk = num_spk
self.n_fft = n_fft
self.hop_length = hop_length
self.sample_rate = sample_rate
n_features = n_fft//2+1
self.ampltude = Amplitude()
self.phase = Phase()
# torch.rnn batch_first
# https://discuss.pytorch.org/t/could-someone-explain-batch-first-true-in-lstm/15402/9
# (batch, seq, feature)
# Without batch_first=True it will use the first dimension as the sequence dimension.
# With batch_first=True it will use the second dimension as the sequence dimension.
# out[-1] # If batch_first=True OR
# out[:, -1] # If batch_dirst=False
if rnn_type == "rnn":
self.rnn = nn.RNN(input_size=n_features,
hidden_size=rnn_hidden,
num_layers=rnn_layer,
bias=False,
dropout=drop_out,
batch_first=False,
bidirectional=bidirectional)
elif rnn_type == "lstm":
self.rnn = nn.LSTM(input_size=n_features,
hidden_size=rnn_hidden,
num_layers=rnn_layer,
bias=False,
dropout=drop_out,
batch_first=False,
bidirectional=bidirectional)
elif rnn_type == "gru":
self.rnn = nn.GRU(input_size=n_features,
hidden_size=rnn_hidden,
num_layers=rnn_layer,
bias=False,
dropout=drop_out,
batch_first=False,
bidirectional=bidirectional)
self.batchnorm = nn.BatchNorm1d(num_features=rnn_hidden if not bidirectional else rnn_hidden*2)
linear = nn.Linear(in_features=rnn_hidden if not bidirectional else rnn_hidden*2,
out_features=n_features*num_spk,
bias=True)
if activation=="relu":
activation = nn.ReLU()
self.fc_layers = nn.Sequential(
linear,
activation,
)
def forward(self, inputs):
# print(inputs.shape)
mask = self.ampltude(inputs)
batch, nchannel, nfeature, nframe = mask.shape
# batch, features, seq
mask = torch.reshape(mask, shape=(batch*nchannel, nfeature, nframe)) # merge channel
# print(mask.shape)
mask = torch.transpose(mask, 1, 2)
# print(mask.shape)
mask, _ = self.rnn(mask) # batch, seq, features
# print(mask.shape)
mask = torch.transpose(mask, 1, 2)
# print(mask.shape)
mask = self.batchnorm(mask) # batch, features, seq
# print(mask.shape)
mask = torch.transpose(mask, 1, 2)
# print(mask.shape)
mask = self.fc_layers(mask) # batch, seq, features
# print(mask.shape)
mask = torch.transpose(mask, -1, -2) # batch, seq, features
# print(mask.shape)
batch, nfeature, nframe = mask.shape
mask = torch.reshape(mask, shape=(batch, self.num_spk, int(nfeature//self.num_spk), nframe))
# mask = mask.view(batch, self.num_spk, int(nfeature//self.num_spk), nframe)
# print(mask.shape)
mask = torch.reshape(mask, shape=(batch//nchannel, nchannel, self.num_spk, int(nfeature//self.num_spk), nframe))
# mask = mask.view(batch//nchannel, nchannel, self.num_spk, int(nfeature//self.num_spk), nframe)
# print(mask.shape)
mask = torch.transpose(mask, 1, 2)
# print(mask.shape)
mask = torch.unsqueeze(mask, dim=-1) # dtype expand
out = mask*torch.unsqueeze(inputs, dim=1)
return out
class Amplitude(nn.Module):
def __init__(self,
*args,
**kwarg):
super(Amplitude, self).__init__()
def forward(self, inputs):
assert inputs.size()[-1] == 2, f"Tensor needs real and imag in the last rank..."
return torch.abs(torch.pow(inputs[..., 0], exponent=2) - torch.pow(inputs[..., 1], exponent=2))
class Phase(nn.Module):
def __init__(
self,
*args,
**kwargs,
):
super().__init__(**kwargs)
self.eps = torch.tensor(np.ones(1, dtype=np.float32)*1e-5, dtype=torch.float32)
def call(self, inputs, training=True):
inputs[..., 1] += self.eps
outputs = torch.angle(torch.complex(real=inputs[..., 0], imag=inputs[..., 1]))
return outputs
if __name__ == "__main__":
# First checking if GPU is available
train_on_gpu=torch.cuda.is_available()
def get_model():
return RNNBaseSTFTMask
if(train_on_gpu):
print('Training on GPU.')
else:
print('No GPU available, training on CPU.')
device = torch.device('cuda' if train_on_gpu else 'cpu')
import argparse
parser = argparse.ArgumentParser(
"denoiser.demucs",
description="Benchmark the streaming Demucs implementation, "
"as well as checking the delta with the offline implementation.")
parser.add_argument("--sample_rate", default=16000, type=int)
parser.add_argument("--segment", default=1.024, type=float)
parser.add_argument("--audio_channels", default=2, type=int)
parser.add_argument("--num_spk", default=2, type=int)
parser.add_argument("--n_fft", default=512, type=int)
parser.add_argument("--hop_length", default=256, type=int)
parser.add_argument("--rnn_hidden", default=896, type=int)
parser.add_argument("--rnn_layer", default=3, type=int)
parser.add_argument("--rnn_type", default="lstm", type=str)
parser.add_argument("--activation", default="relu", type=str)
parser.add_argument("--bidirectional", default=True, type=bool)
parser.add_argument("--drop_out", default=0.5, type=float)
parser.add_argument("--device", default="cpu", type=str)
args = parser.parse_args()
model = get_model()(num_spk=args.num_spk,
audio_channels=args.audio_channels,
n_fft=args.n_fft,
hop_length=args.hop_length,
sample_rate=args.sample_rate,
rnn_hidden=args.rnn_hidden,
rnn_layer=args.rnn_layer,
rnn_type=args.rnn_type,
drop_out=args.drop_out,
activation=args.activation,
bidirectional=args.bidirectional,
).to(args.device)
nframe = int(int(args.sample_rate*args.segment) // args.hop_length) + 1
nfeature = int(args.n_fft//2)+1
x = torch.randn(args.audio_channels, nfeature, nframe, 2).to(args.device) # channel, T, F, real/imag
out = model(x[None])[0]
model_size = sum(p.numel() for p in model.parameters()) * 4 / 2**20
print(f"model size: {model_size:.1f}MB")
| ooshyun/Speech-Enhancement-Pytorch | src/model/stft_rnn.py | stft_rnn.py | py | 7,628 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.RNN",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number"... |
11831798394 | from flask import Flask
from flask import render_template,request
from pymongo import MongoClient
import json
from bson import json_util
from bson.json_util import dumps
app = Flask(__name__,template_folder='/home/sri/Downloads/AAL-94_dataset/AALtorch/template')
print(app)
MONGOD_HOST = 'localhost'
MONGOD_PORT = 27017
DBS_NAME = 'pymongo_test'
COLLECTION_NAME = 'posts'
FIELDS = {'Class': True, 'Conf': True,'Dates': True}
#var date,timestamp
@app.route("/")
def demo1():
return render_template("demo.html")
@app.route("/pymongo_test/posts")
def donor_projects():
connection = MongoClient(MONGOD_HOST, MONGOD_PORT)
collection = connection[DBS_NAME][COLLECTION_NAME]
projects = collection.find(projection=FIELDS)
#print(timestamp)
cursor = []
cursor = collection.aggregate([{"$group":{"_id":"$Class","Count":{"$sum":1}}}])
json_projects = []
count = []
for document in cursor:
count.append(document)
for project in projects:
json_projects.append(project)
json_projects = json.dumps(json_projects, default=json_util.default)
count = json.dumps(count, default=json_util.default)
#print(count)
connection.close()
return json_projects
if __name__ == '__main__':
app.run(host='0.0.0.0',port=8000,debug=True)
| srinidhi17/HealthMonitoring-System- | test.py | test.py | py | 1,299 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "json.dumps",
... |
8607037151 |
import os
import pygame
from battle2.model import Direction
from battle2.model import State
import battle2.eventmanager as evm
SCREENWIDTH = 1600
SCREENHEIGHT = 800 # 1600, 800 # 1920, 1080
WINDOWWIDTH = 800
WINDOWHEIGHT = 600
WINDOWPOS = 100, 100
TILESIZE = 32
PLAYERLAYER = 2
GRIDLAYER = 4
FPS = 60
BLACK = pygame.Color("black")
WHITE = pygame.Color("white")
GREEN = pygame.Color("green")
GRAY = pygame.Color("gray38")
DARKGRAY = pygame.Color("gray12")
BLUE = pygame.Color("blue")
PURPLE = pygame.Color("purple")
class GraphicalView(object):
"""
Draws the model state onto the screen.
"""
def __init__(self, ev_manager, model):
"""
ev_manager (EventManager): Allows posting messages to the event queue.
model (GameEngine): a strong reference to the game Model.
Attributes:
isinitialized (bool): pygame is ready to draw.
screen (pygame.Surface): the screen surface.
clock (pygame.time.Clock): keeps the fps constant.
fonts (pygame.Font): a small font.
"""
self.ev_manager = ev_manager
ev_manager.register_listener(self)
self.model = model
self.isinitialized = False
def notify(self, event):
"""
Receive events posted to the message queue.
"""
if isinstance(event, evm.InitializeEvent):
self._initialize()
elif isinstance(event, evm.DrawMapEvent):
self._draw_map()
elif isinstance(event, evm.QuitEvent):
self.isinitialized = False
pygame.quit()
elif isinstance(event, evm.CharUpdateEvent):
self.current_player.rect.topleft = self.model.current_character.new_position
self.detect_collision()
self.current_player.updatespeed = self.model.current_character.movespeed
self.current_player.update(event)
elif isinstance(event, evm.InputEvent):
if event.key == pygame.K_c:
self.model.current_character.align_to_grid()
# voeg de rect van de huidige speler toe aan de lists en verwijder de rect van start_pos
self.model.map.add_rect_to_list(self.current_player.rect, self.model.map.heroes)
self.model.map.add_rect_to_list(self.current_player.rect, self.model.map.obstacles)
self.model.map.start_pos = None
self.model.current_character = self.model.characters[1]
self.current_player = self.players[1]
# de rect van de player die aan de beurt is weer verwijderen en ken de start_pos toe
self.model.map.del_rect_from_list(self.current_player.rect, self.model.map.heroes)
self.model.map.del_rect_from_list(self.current_player.rect, self.model.map.obstacles)
self.model.map.start_pos = self.current_player.rect.copy()
# voeg de info sprites toe aan de mapview vanuit de mapdata
self.map1.info.append(InfoSprite(self.model.map.start_pos, 'start', GRIDLAYER))
for rect in self.model.map.trees:
self.map1.info.append(InfoSprite(pygame.Rect(rect), 'tree', GRIDLAYER))
for rect in self.model.map.waters:
self.map1.info.append(InfoSprite(pygame.Rect(rect), 'water', GRIDLAYER))
for rect in self.model.map.heroes:
self.map1.info.append(InfoSprite(pygame.Rect(rect), 'hero', GRIDLAYER))
# voeg de obstacle waarden toe aan de mapview vanuit de mapdata
for rect in self.model.map.obstacles:
self.map1.obstacles.append(pygame.Rect(rect))
for rect in self.model.map.low_obst:
self.map1.low_obst.append(pygame.Rect(rect))
if event.key == pygame.K_F10:
self.map1.grid.show ^= True
if event.key == pygame.K_F11:
self.info ^= True
if event.key == pygame.K_F12:
self.debug ^= True # simple boolean swith
elif isinstance(event, evm.TickEvent):
if not self.isinitialized:
return
currentstate = self.model.state.peek()
if currentstate == State.Intro:
self.render_intro()
if currentstate == State.Menu:
self.render_menu()
if currentstate == State.Play:
self.render_play()
if currentstate == State.Help:
self.render_help()
self.clock.tick(FPS) # limit the redraw speed to 60 frames per second
def _initialize(self):
"""
Set up the pygame graphical display and loads graphical resources.
"""
os.environ['SDL_VIDEO_CENTERED'] = '1'
pygame.init()
self.screen = pygame.display.set_mode((SCREENWIDTH, SCREENHEIGHT), pygame.DOUBLEBUF) # | pygame.FULLSCREEN)
self.background = pygame.Surface(self.screen.get_size())
self.background.fill(BLACK)
self.background = self.background.convert()
self.window = pygame.Surface((WINDOWWIDTH, WINDOWHEIGHT))
self.window.fill(DARKGRAY)
self.window = self.window.convert()
self.clock = pygame.time.Clock()
self.debugfont = pygame.font.SysFont('courier', 11)
self.titlefont = pygame.font.SysFont('sans', 25, True)
self._init_buttons()
self.players = []
for character in self.model.characters:
self.players.append(CharSprite(character.bmp))
self.current_player = None
self.info = False
self.debug = False
self.isinitialized = True
def _draw_map(self):
import pyscroll
map_layer = pyscroll.BufferedRenderer(self.model.map.map_data, (WINDOWWIDTH, WINDOWHEIGHT), clamp_camera=True)
self.group = pyscroll.PyscrollGroup(map_layer=map_layer, default_layer=PLAYERLAYER)
# zet alle players sprites op de juiste posities
for count, player in enumerate(self.players):
player.rect.topleft = self.model.characters[count].new_position
self.current_player = self.players[0]
self.group.add(self.players)
# voeg de posities van alles heroes aan de mapdata toe en ook als obstacle
for char in self.model.characters:
rect = pygame.Rect(char.new_position[0], char.new_position[1], char.width, char.height)
self.model.map.add_rect_to_list(rect, self.model.map.heroes)
self.model.map.add_rect_to_list(rect, self.model.map.obstacles)
# de rect van de player die aan de beurt is weer verwijderen en ken de start_pos toe
self.model.map.del_rect_from_list(self.current_player.rect, self.model.map.heroes)
self.model.map.del_rect_from_list(self.current_player.rect, self.model.map.obstacles)
self.model.map.start_pos = self.current_player.rect.copy()
# creeer een mapview
self.map1 = MapView(self.model.map.width, self.model.map.height)
# maak van het current hokje een hero-info sprite
self.map1.current = InfoSprite(self.current_player.rect, 'hero', GRIDLAYER)
# voeg de info sprites toe aan de mapview vanuit de mapdata
self.map1.info.append(InfoSprite(self.model.map.start_pos, 'start', GRIDLAYER))
for rect in self.model.map.trees:
self.map1.info.append(InfoSprite(pygame.Rect(rect), 'tree', GRIDLAYER))
for rect in self.model.map.waters:
self.map1.info.append(InfoSprite(pygame.Rect(rect), 'water', GRIDLAYER))
for rect in self.model.map.heroes:
self.map1.info.append(InfoSprite(pygame.Rect(rect), 'hero', GRIDLAYER))
# voeg de obstacle waarden toe aan de mapview vanuit de mapdata
for rect in self.model.map.obstacles:
self.map1.obstacles.append(pygame.Rect(rect))
for rect in self.model.map.low_obst:
self.map1.low_obst.append(pygame.Rect(rect))
def render_intro(self):
"""
Render the game intro.
"""
bg_rect = self.background.get_rect()
somewords = self.titlefont.render('Battle...!', True, GREEN)
text_rect = somewords.get_rect()
text_rect.center = bg_rect.width/2, bg_rect.height/2
self.screen.blit(somewords, text_rect.topleft)
pygame.display.flip()
self.screen.blit(self.background, (0, 0))
def render_play(self):
"""
Render the game play.
"""
self.draw_grid()
self.draw_info()
self.show_window()
self.show_debug()
self.show_buttons()
pygame.display.flip()
self.screen.blit(self.background, (0, 0))
def draw_grid(self):
if self.map1.grid.show:
self.group.add(self.map1.grid)
else:
self.group.remove(self.map1.grid)
def draw_info(self):
if self.info:
self.map1.current.rect.topleft = self.model.current_character.new_position
self.group.add(self.map1.current)
self.group.add(self.map1.info)
else:
self.group.remove(self.map1.current)
self.group.remove(self.map1.info)
def show_window(self):
self.group.center(self.current_player.rect.center)
self.group.draw(self.window)
self.screen.blit(self.window, WINDOWPOS)
def show_debug(self):
if self.debug:
text = ("FPS: {}".format(int(self.clock.get_fps())),
"step_north: {}".format(self.model.current_character.step_north),
"step_south: {}".format(self.model.current_character.step_south),
"step_west: {}".format(self.model.current_character.step_west),
"step_east: {}".format(self.model.current_character.step_east),
"step_delay: {}".format(self.model.current_character.step_delay),
"last_direction: {}".format(self.model.current_character.last_direction),
"move_direction: {}".format(self.model.current_character.move_direction),
"movespeed: {}".format(self.model.current_character.movespeed),
"start_pos.x: {}".format(self.model.map.start_pos[0]
if self.model.map.start_pos is not None else "None"),
"old_position.x: {}".format(self.model.current_character.old_position[0]),
"new_position.x: {}".format(self.model.current_character.new_position[0]),
"start_pos.y {}".format(self.model.map.start_pos[1]
if self.model.map.start_pos is not None else "None"),
"old_position.y {}".format(self.model.current_character.old_position[1]),
"new_position.y {}".format(self.model.current_character.new_position[1]),
"step_count: {}".format(self.current_player.step_count),
"step_animation: {}".format(self.current_player.step_animation),
)
for count, line in enumerate(text):
self.screen.blit(self.debugfont.render(line, True, WHITE), (0, count * 10))
def show_buttons(self):
for button in self.buttons:
button.draw(self.screen)
def render_menu(self):
"""
Render the game menu.
"""
somewords = self.titlefont.render('You are in the Menu. Space to play. Esc exits.', True, GREEN)
self.screen.blit(somewords, (0, 0))
pygame.display.flip()
self.screen.blit(self.background, (0, 0))
def render_help(self):
"""
Render the help screen.
"""
somewords = self.titlefont.render('Help is here. space, escape or return.', True, GREEN)
self.screen.blit(somewords, (0, 0))
pygame.display.flip()
self.screen.blit(self.background, (0, 0))
def _init_buttons(self):
self.button_view = ButtonSprite((SCREENWIDTH-200, SCREENHEIGHT-300), "V")
self.button_up = ButtonSprite((SCREENWIDTH-150, SCREENHEIGHT-300), "Up")
self.button_down = ButtonSprite((SCREENWIDTH-150, SCREENHEIGHT-250), "Down")
self.button_left = ButtonSprite((SCREENWIDTH-200, SCREENHEIGHT-250), "Left")
self.button_right = ButtonSprite((SCREENWIDTH-100, SCREENHEIGHT-250), "Right")
self.button_cancel = ButtonSprite((SCREENWIDTH-100, SCREENHEIGHT-200), "C")
self.buttons = [self.button_view, self.button_up, self.button_down, self.button_left, self.button_right,
self.button_cancel]
def detect_collision(self):
# loop tegen de rand van een obstacle aan
# er mag maar 1 obstacle in deze lijst zijn
if len(self.current_player.rect.collidelistall(self.map1.obstacles)) == 1:
# obj_nr is het nummer van de betreffende obstacle
obj_nr = self.current_player.rect.collidelist(self.map1.obstacles)
self.model.current_character.move_side(self.map1.obstacles[obj_nr])
self.current_player.rect.topleft = self.model.current_character.new_position
# loop tegen de rand van een low obstacle aan, bijv water
if len(self.current_player.rect.collidelistall(self.map1.low_obst)) == 1:
obj_nr = self.current_player.rect.collidelist(self.map1.low_obst)
self.model.current_character.move_side(self.map1.low_obst[obj_nr])
self.current_player.rect.topleft = self.model.current_character.new_position
# loop tegen een obstacle of low_obst aan
while self.current_player.rect.collidelist(self.map1.obstacles) > -1 or \
self.current_player.rect.collidelist(self.map1.low_obst) > -1:
self.model.current_character.move_back()
self.current_player.rect.topleft = self.model.current_character.new_position
class MapView(object):
def __init__(self, map_width, map_height):
self.grid = Grid(map_width, map_height, TILESIZE, GRIDLAYER)
self.info = []
self.current = None
self.obstacles = []
self.low_obst = []
class CharSprite(pygame.sprite.Sprite):
# CharSprite extends the pygame.sprite.Sprite class
def __init__(self, spritesheet):
pygame.sprite.Sprite.__init__(self)
self.west_states = {0: (32, 32, 32, 32), 1: (0, 32, 32, 32), 2: (32, 32, 32, 32), 3: (64, 32, 32, 32)}
self.east_states = {0: (32, 64, 32, 32), 1: (0, 64, 32, 32), 2: (32, 64, 32, 32), 3: (64, 64, 32, 32)}
self.north_states = {0: (32, 96, 32, 32), 1: (0, 96, 32, 32), 2: (32, 96, 32, 32), 3: (64, 96, 32, 32)}
self.south_states = {0: (32, 0, 32, 32), 1: (0, 0, 32, 32), 2: (32, 0, 32, 32), 3: (64, 0, 32, 32)}
# Assign the spritesheet to self.full_sprite
self.full_sprite = pygame.image.load(spritesheet)
# 'Clip' the sheet so that only one frame is displayed (the first frame of _south_states)
self.full_sprite.set_clip(pygame.Rect(self.north_states[0]))
# Create a rect to animate around the screen
self.image = self.full_sprite.subsurface(self.full_sprite.get_clip())
self.rect = self.image.get_rect()
self.mask = pygame.mask.from_surface(self.image)
self.updatespeed = 0
self.step_count = 0
self.step_animation = 0
def update(self, event):
if event.movespeed is None:
if event.last_dir == Direction.North:
self._clip(self.north_states[0])
if event.last_dir == Direction.South:
self._clip(self.south_states[0])
if event.last_dir == Direction.West:
self._clip(self.west_states[0])
if event.last_dir == Direction.East:
self._clip(self.east_states[0])
else:
if event.move_dir == Direction.North:
self._clip(self.north_states)
if event.move_dir == Direction.South:
self._clip(self.south_states)
if event.move_dir == Direction.West:
self._clip(self.west_states)
if event.move_dir == Direction.East:
self._clip(self.east_states)
# Update the image for each pass
self.image = self.full_sprite.subsurface(self.full_sprite.get_clip())
def _clip(self, clipped_rect):
if type(clipped_rect) is dict:
self.full_sprite.set_clip(pygame.Rect(self._get_frame(clipped_rect)))
else:
self.step_count = 0
self.step_animation = 0
self.full_sprite.set_clip(pygame.Rect(clipped_rect))
return clipped_rect
def _get_frame(self, frame_set):
self.step_count += 1
if self.step_count % (24 / self.updatespeed) == 1: # 24 is deelbaar door alle movespeeds
self.step_animation += 1
if self.step_animation > 3:
self.step_animation = 0
return frame_set[self.step_animation]
class ButtonSprite(pygame.sprite.Sprite):
def __init__(self, position, caption):
pygame.sprite.Sprite.__init__(self)
self.width = 40
self.height = 40
self._bgcolor = BLACK
self._visible = True
self.image = pygame.Surface((self.width, self.height))
self.image = self.image.convert()
self.rect = self.image.get_rect()
self.rect.topleft = position
self.font = pygame.font.SysFont('sans', 14)
self.caption = self.font.render(caption, True, WHITE)
self.caprect = self.caption.get_rect()
self.caprect.center = self.rect.width / 2, self.rect.height / 2
self._update()
def draw(self, surface):
if self._visible:
surface.blit(self.image, self.rect.topleft)
def _update(self):
self.image.fill(self.bgcolor)
pygame.draw.rect(self.image, WHITE, (0, 0, self.width, self.height), 1)
self.image.blit(self.caption, self.caprect)
@property
def bgcolor(self):
return self._bgcolor
@bgcolor.setter
def bgcolor(self, value):
self._bgcolor = value
self._update()
@property
def visible(self):
return self._visible
@visible.setter
def visible(self, value):
self._visible = value
self._update()
class InfoSprite(pygame.sprite.Sprite):
def __init__(self, rect, rect_type, layer):
pygame.sprite.Sprite.__init__(self)
self._layer = layer
self.image = pygame.Surface((rect.width, rect.height))
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
self.rect_type = rect_type
pygame.draw.rect(self.image, self._color, (0, 0, rect.width, rect.height), 1)
self.image = self.image.convert()
self.rect = self.image.get_rect()
self.rect.topleft = rect.topleft
@property
def _color(self):
if self.rect_type == 'start':
return GREEN
if self.rect_type == 'hero':
return BLUE
if self.rect_type == 'tree':
return PURPLE
if self.rect_type == 'water':
return BLUE
class Grid(pygame.sprite.Sprite):
def __init__(self, map_width, map_height, tile_size, layer):
pygame.sprite.Sprite.__init__(self)
self._layer = layer
self.image = pygame.Surface((map_width, map_height))
self.image.fill(BLACK)
self.image.set_colorkey(BLACK)
for i in range(0, map_width, tile_size):
pygame.draw.line(self.image, GRAY, (0, i), (map_width, i))
for i in range(0, map_height, tile_size):
pygame.draw.line(self.image, GRAY, (i, 0), (i, map_height))
self.image = self.image.convert()
self.rect = self.image.get_rect()
self.show = False
| henkburgstra/pyRPG | battle2/view.py | view.py | py | 19,976 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.Color",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.Color",
"line_numbe... |
72964937634 | #%%
import numpy as np
import pandas as pd
from pandas import DataFrame
from retrying import retry
import random
import pickle
class Player(object):
def __init__(self, name):
self.name=name
#%%
class CpuPlayer(Player):
taken_choice=[1,2,3]
def __init__(self,name='cpu',learning_mode=True, learning_rate=0.1,searching_rate=0.9,estimated_rate=0.8,max_num=50):
super(CpuPlayer,self).__init__(name)
self.learning_rate=learning_rate
self.searching_rate=searching_rate
self.estimated_rate=estimated_rate
self.learning_mode=learning_mode
self.max_num=max_num
self._bulid_table()
def _bulid_table(self):
self.table=DataFrame(
data=np.zeros((self.max_num,3)),
columns=CpuPlayer.taken_choice,
index=range(1,self.max_num+1)
)
def get_taken_num(self, current_num):
row_value=self.table.loc[current_num,:]
if self.learning_mode and (row_value.all()==0 or np.random.uniform()>self.searching_rate):
selected=np.random.choice(CpuPlayer.taken_choice)
else:
index=row_value.values.argmax()
selected=CpuPlayer.taken_choice[index]
return selected
def learning(self, current,taken_num,state):
'''
state: 1表示赢了,0表示输了,None表示未分胜负
'''
reward=self._get_reward(state)
# 找出选择后的3个可能状态
next_range_value = self._get_next_range_value(current,taken_num, state)
em_value=reward+self.estimated_rate * next_range_value
# print(current,taken_num)
select_value=self.table.loc[current,taken_num]
# 更新table
self.table.loc[current,taken_num]+=self.learning_rate*(em_value-select_value)
def _get_reward(self, state):
reward=0
if state==1:
reward=10 # 因为赢了,给他大奖励
elif state==0:
reward=-10 # 因为赢了,给他大惩罚
return reward
def _get_next_range_value(self, current,taken_num, state):
# 如果已经分出胜负,则无需计算状态转移的估计值
if state is not None:
return 0
# 计算当前选择下的可能局面的价值
end=current-taken_num-1
start=end-2
range_df=self.table.loc[start:end,:].copy()
range_df['all_less']=range_df.apply(lambda x:(x<0).all(),axis=1)
range_df['all_zero']=range_df.apply(lambda x:(x==0).all(),axis=1)
# 假如3行中有任意一行全是负数,意味着本次选择有可能会输
if range_df['all_less'].any():
return -2
if range_df['all_zero'].any():
return 0
return range_df.iloc[:,:3].max().max()
def save_model(self,filename='cpu_play.m'):
with open(filename,'wb') as f:
pickle.dump(self,f)
# json.dumps()
res=f.name
return res
@staticmethod
def load_from_file(filename='cpu_play.m'):
with open(filename,'rb') as f:
return pickle.load(f)
#%%
class Referee(object):
def __init__(self):
pass
def ready(self, start_num):
self.current_num=start_num
def is_end_game(self):
return self.current_num<=0
def get_state(self,taken_num):
'''
state: 1表示赢了,0表示输了,None表示未分胜负
'''
if self.current_num-taken_num<=0:
return 0
if self.current_num-taken_num==1:
return 1
return None
def take_away(self, taken_num):
self.current_num-=taken_num
class UserPlayer(Player):
def __init__(self,name='user'):
super(UserPlayer,self).__init__(name)
@retry
def get_taken_num(self, current_num):
ipt=input(f'当前剩余{current_num}个石子,输入你要拿取的数量(1-3):')
try:
num=int(ipt)
except ValueError as ex :
print('哥,输入数字呀!')
raise ex
if num<1 or num>3:
print('你要输入1到3的数值')
raise Exception()
return num
| CrystalWindSnake/Creative | python/rl_learning_stone/models.py | models.py | py | 3,830 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.random",
... |
6479965465 | from django.conf.urls import url
from ..views.oj import (SearchGroupByKeyWordAPI,
JoinGroupBySearchAPI,
GroupListAndDetailAPI,
HomeWorkListAndDetailAPI
)
urlpatterns = [
url(r"^search_group/?$", SearchGroupByKeyWordAPI.as_view(), name="search_group_api"),
url(r"^join_group/?$", JoinGroupBySearchAPI.as_view(), name="join_group_api"),
url(r"^groups/?$", GroupListAndDetailAPI.as_view(), name="group_api"),
url(r"^homework/?$", HomeWorkListAndDetailAPI.as_view(), name="homework_api"),
] | PUANEY/OnlineJudge | groups/urls/oj.py | oj.py | py | 600 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.conf.urls.url",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.oj.SearchGroupByKeyWordAPI.as_view",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "views.oj.SearchGroupByKeyWordAPI",
"line_number": 10,
"usage_type": "name"
... |
3194291784 | import bibtexparser
import re
import os
from os import path
if path.exists('mytitles.txt'):
os.remove('mytitles.txt')
with open('MyCollection.bib') as bibtex_file:
bib_database = bibtexparser.load(bibtex_file)
with open("mytitles.txt","a") as file1:
for entry in bib_database.entries:
title = entry['title']
title = title.replace('{','')
title = title.replace('}','')
print(title)
file1.write('%s\n' % title)
| ShenWang9202/bibfileGenerator | getTitles.py | getTitles.py | py | 463 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "os.remove",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "bibtexparser.load",
"line_number": ... |
43064648639 | from django.test import TestCase, tag
from django.urls.base import reverse
from edc_model_wrapper import ModelWrapper
from ..models import ActionItem, ActionType
from ..templatetags.action_item_extras import add_action_item_popover
from ..view_mixins import ActionItemViewMixin
from .models import SubjectIdentifierModel
class MyModelWrapper(ModelWrapper):
next_url_name = 'dashboard_url'
class TestAction(TestCase):
def setUp(self):
self.subject_identifier_model = ActionItem.subject_identifier_model
ActionItem.subject_identifier_model = 'edc_action_item.subjectidentifiermodel'
self.subject_identifier = '12345'
SubjectIdentifierModel.objects.create(
subject_identifier=self.subject_identifier)
ActionItemViewMixin.action_item_model_wrapper_cls = MyModelWrapper
def test_view_populates_action_type(self):
self.assertEqual(ActionType.objects.all().count(), 0)
ActionItemViewMixin()
self.assertGreater(ActionType.objects.all().count(), 0)
ActionItemViewMixin()
self.assertGreater(ActionType.objects.all().count(), 0)
def test_view_context(self):
view = ActionItemViewMixin()
view.kwargs = dict(subject_identifier=self.subject_identifier)
context = view.get_context_data()
self.assertEqual(context.get('open_action_items'), [])
for action_type in ActionType.objects.all():
ActionItem.objects.create(
subject_identifier=self.subject_identifier,
action_type=action_type)
view = ActionItemViewMixin()
view.kwargs = dict(subject_identifier=self.subject_identifier)
context = view.get_context_data()
self.assertEqual(len(context.get('open_action_items')),
ActionItem.objects.all().count())
def test_templatetag(self):
context = add_action_item_popover(
self.subject_identifier, 'subject_dashboard_url')
reverse(context.get('action_item_add_url'))
| botswana-harvard/edc-action-item | edc_action_item/tests/test_view.py | test_view.py | py | 2,034 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "edc_model_wrapper.ModelWrapper",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.test.TestCase",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "models.ActionItem.subject_identifier_model",
"line_number": 18,
"usage_type": "attrib... |
71607967713 | from selenium import webdriver
import pandas as pd
from IPython.display import display
from selenium.webdriver.chrome.options import Options
# inicializa o programa sem aparecer na tela, em segundo plano
chrome_options = Options()
chrome_options.headless = True
# abre o navegador com as opções definidas acima
navegador = webdriver.Chrome(options=chrome_options)
# passo 1: pegar a cotação das moedas
# dolar
navegador.get('https://www.google.com/search?q=cotacao+dolar&oq=cotacao+dolar&aqs=chrome..69i57j35i39j0i512l3j0i433i512j0i512l4.1498j0j9&sourceid=chrome&ie=UTF-8')
cot_dolar = navegador.find_element('xpath', '//*[@id="knowledge-currency__updatable-data-column"]/div[1]/div[2]/span[1]').get_attribute('data-value')
# euro
navegador.get('https://www.google.com/search?q=cotacao+euro&oq=cotacao+euro&aqs=chrome..69i57j0i512l4j0i10i512j0i512l4.1450j0j7&sourceid=chrome&ie=UTF-8')
cot_euro = navegador.find_element('xpath', '//*[@id="knowledge-currency__updatable-data-column"]/div[1]/div[2]/span[1]').get_attribute('data-value')
# ouro
navegador.get('https://www.melhorcambio.com/ouro-hoje#:~:text=O%20valor%20do%20grama%20do,em%20R%24%20314%2C92.')
cot_ouro = navegador.find_element('xpath', '//*[@id="comercial"]').get_attribute('value')
cot_ouro = cot_ouro.replace(',', '.')
# fecha o navegador após pegar a cotação
navegador.quit()
# passo 2: importar e atualizar a base de dados
df = pd.read_excel('Produtos.xlsx')
# atualizar a cotação das moedas no dataframe
df.loc[df['Moeda'] == 'Dólar', 'Cotação'] = float(cot_dolar) # loc[linha, coluna]
df.loc[df['Moeda'] == 'Euro', 'Cotação'] = float(cot_euro)
df.loc[df['Moeda'] == 'Ouro', 'Cotação'] = float(cot_ouro)
# atualizar o preço de compra e preço de venda
df['Preço de Compra'] = df['Preço Original'] * df['Cotação']
df['Preço de Venda'] = df['Preço de Compra'] * df['Margem']
# formatar os preços
df['Preço de Compra'], df['Preço de Venda'] = df['Preço de Compra'].map('R${:.2f}'.format), df['Preço de Venda'].map('R${:.2f}'.format)
df['Cotação'] = df['Cotação'].map('R${:.2f}'.format)
# passo 3: exportar a base de dados
df.to_excel('Produtos Novo.xlsx', index=False) # nome do arquivo a ser criado | index=false para não exportar o index junto
df_novo = pd.read_excel('Produtos Novo.xlsx')
display(df_novo) | jpc963/cotacao-e-automacao | main.py | main.py | py | 2,323 | python | pt | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 11,
"usage_type": "name"
},
{
... |
75136158434 | import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import statistics as st
from math import pow
import math
import os
import numpy as np
from scipy.stats import norm
FILE_PATH=str(os.getcwd()) + '\Data\heart.csv'
df = pd.read_csv(FILE_PATH)
#list of all the column headings
col_heads = df.columns
#dictionary containing column heading as key and mean of all values in that column as value
col_means = dict([(col, df[col].mean()) for col in col_heads])
print("Means of respective columns: ", col_means)
#dictionary containing column heading as key and variance of all values in that column as value
col_variances = dict([(col, pow(df[col].std(), 2)) for col in col_heads])
print("Variances of respective columns: ",col_variances)
#normalisation/ standardisation of all numeric data (using cumulative distribution for z-scores)
normalized_data = []
for col in col_heads:
row = [((X - col_means[col]) / pow(col_variances[col], 0.5)) for X in df[col].tolist()]
normalized_data.append(row)
df_normal = pd.DataFrame(normalized_data, columns = list(range(0,1025)))
df_normal = df_normal.transpose()
df_normal.columns = col_heads
#graphical visualisation of normalized data
def plot_normal(df, col):
feature=[i for i in df[col]]
feature.sort()
plt.plot(feature, norm.pdf(feature, 0, 1), color = 'green', label = 'Normal curve')
# title and axis labels
plt.xlabel(col)
plt.ylabel('Frequency')
plt.title('Normal curve (bell shaped) for feature data')
plt.show()
#normal plots for normalized data in each column
for columns in col_heads:
plot_normal(df_normal, columns)
#find the categorical variables from the histograms
categorical_vars = ['sex', 'cp', 'fbs', 'restecg', 'exang', 'slope', 'thal', 'target']
#plot normal curves only for continuous variables
for columns in col_heads:
if(columns not in categorical_vars):
plot_normal(df_normal, columns)
#age, trestbps, chol and thalach are approximately normally distributed, trestbps and chol having slightly extended tails
| hrishitchaudhuri/sds | scripts/normalisation.py | normalisation.py | py | 2,076 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 28,
... |
28323298008 | from glob import glob
from langchain.agents import load_tools
from langchain.agents import initialize_agent
from langchain.agents import AgentType
from langchain.llms import OpenAI
import os, sys, openai
from dotenv import load_dotenv
from langchain.tools import tool, Tool
from retrieval import Retrieval
from pyepsilla import cloud
load_dotenv()
retrieval = Retrieval()
client = cloud.Client(
project_id=os.getenv("PROJECT_ID"),
api_key=os.getenv("EPSILLA_API_KEY")
)
db = client.vectordb(db_id=os.getenv("DB_ID"))
@tool
def search_api(question: str) -> str:
"""Searches the relevant information from the document set to answer the question."""
qs = retrieval.rephrase(question=question)
query_score_dict = {}
item = retrieval.vector_search(db, question)
# print(item)
query_score_dict[question] = item
for q in qs:
item = retrieval.vector_search(db, q)
query_score_dict[q] = item
# print(query_score_dict)
ranking_result = retrieval.ranking_fusion(original_query=question, query_score_dict=query_score_dict)
final_result = retrieval.generate_content_based_on_ranking(ranking_result)
return final_result
class DocAgent:
def __init__(self):
api_key = os.getenv("OPENAI_KEY")
llm = OpenAI(temperature=0, openai_api_key=api_key)
tools = [search_api]
tools = tools + load_tools(["llm-math"], llm=llm)
self.agent_executor = initialize_agent(
tools, llm, agent=AgentType.ZERO_SHOT_REACT_DESCRIPTION, verbose=True
)
def list_docs(self):
# List PDF files under ./documents/ folder
ret = []
files = glob("./documents/*.pdf")
for pdf in files:
ret.append(os.path.basename(pdf))
return ret
def solve(self, question):
response = self.agent_executor.invoke(
{
"input": question
}
)
return response['output']
def rephrase(self, question):
return retrieval.rephrase(question=question)
def solve_one(self, file, question, questions):
# Step 1. Search the relevant information from the document to answer the question.
query_score_dict = {}
item = retrieval.vector_search_one_doc(db, question, file)
# print(item)
query_score_dict[question] = item
for q in questions:
item = retrieval.vector_search_one_doc(db, q, file)
query_score_dict[q] = item
# print(query_score_dict)
ranking_result = retrieval.ranking_fusion(original_query=question, query_score_dict=query_score_dict)
context = retrieval.generate_content_based_on_ranking(ranking_result)
# Step 2. Use the prompt to answer the question for the document.
openai.api_key = os.getenv("OPENAI_KEY")
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": "You are an assistant answering questions for a given document."
},
{
"role": "user",
"content": f'''
Answer the Question based on the given Context. Please don't make things up. Ask for more information when needed.
Context:
{context}
Question:
{question}
Answer:
Let's work this out in a step by step way to be sure we have the right answer.
'''
}
],
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['message']['content']
def summary(self, question, concated):
openai.api_key = os.getenv("OPENAI_KEY")
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": "You are an assistant answering questions for a given document."
},
{
"role": "user",
"content": f'''
Answer the Question based on the Analysis Of Each Document. If some documents are not related to the question, please ignore them.
Analysis Of Each Document:
{concated}
Question:
{question}
Answer:
'''
}
],
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
return response['choices'][0]['message']['content']
def can_loop(self, question):
openai.api_key = os.getenv("OPENAI_KEY")
response = openai.ChatCompletion.create(
model="gpt-4",
messages=[
{
"role": "system",
"content": "You are an assistant answering questions for a large set of documents."
},
{
"role": "user",
"content": "For the provided question, determine if we can check the documents one by one and make the judgement and answer it purely based on facts from this file, or we need to cross validate with other files. If former, response \"YES\"; if later, response \"NO\"\n\nQuestion: " + question
}
],
temperature=0,
max_tokens=256,
top_p=1,
frequency_penalty=0,
presence_penalty=0
)
decision = response['choices'][0]['message']['content'] or 'YES'
return decision == 'YES'
# agent = DocAgent()
# print(agent.list_docs())
| epsilla-cloud/app-gallery | documents-agent/docagent.py | docagent.py | py | 5,739 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "retrieval.Retrieval",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pyepsilla.cloud.Client",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pyepsil... |
13617183674 | import os
from yt_dlp import YoutubeDL
def download_url(path,URL):
option = {
"outtmpl":f"{path}"+"%(title)s.%(ext)s"
}#パスは実行する環境に合わせて
URLs=[]
ydl = YoutubeDL(option)
URLs.append(URL)
result = ydl.download(URLs)
return f"{path}"+"%(title)s.%(ext)s"
download_url(os.getcwd(),f"https://www.youtube.com/watch?v=WEYCz553Sto&ab_channel=S2P%5BThaiLyrics%5D%3Ainfinity")
| haru-mikann/DiscordBot | test.py | test.py | py | 429 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "yt_dlp.YoutubeDL",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number": 13,
"usage_type": "call"
}
] |
16166429334 | # coding: utf-8
from abc import ABCMeta
import asyncio
import json
from aiohttp import web, MsgType
from bson import json_util
from django.conf import settings
from django.utils.timezone import now
from parkkeeper import models
from parkkeeper.event import async_recv_event, get_sub_socket
from parkkeeper.const import MONIT_SCHEDULE_EVENT, WORK_SCHEDULE_EVENT
from parkworker.const import MONIT_STATUS_EVENT, TASK_EVENT, WORKER_EVENT, WORK_STATUS_EVENT
def start_server():
app = web.Application()
add_routes(app)
loop = asyncio.get_event_loop()
handler = app.make_handler()
f = loop.create_server(handler, '0.0.0.0', settings.WEB_SOCKET_SERVER_PORT)
srv = loop.run_until_complete(f)
print('serving on', srv.sockets[0].getsockname())
try:
loop.run_forever()
except KeyboardInterrupt:
pass
finally:
loop.run_until_complete(handler.finish_connections(1.0))
srv.close()
loop.run_until_complete(srv.wait_closed())
loop.run_until_complete(app.finish())
loop.close()
def add_routes(app):
app.router.add_route('GET', '/monit_schedules', MonitSchedulesHandler().get_handler)
app.router.add_route('GET', '/work_schedules', WorkSchedulesHandler().get_handler)
app.router.add_route('GET', '/monits', MonitResultHandler().get_handler)
app.router.add_route('GET', '/works', MonitResultHandler().get_handler)
app.router.add_route('GET', '/waiting_tasks', MonitWaitingTaskHandler().get_handler)
app.router.add_route('GET', '/current_workers', MonitCurrentWorkerHandler().get_handler)
class WebSocketHandler(metaclass=ABCMeta):
stop_msg = 'close_ws'
need_background = False
stop_background_timeout = 1
async def process_msg(self, msg_text: str):
print(msg_text)
async def background(self, ws: web.WebSocketResponse):
while not ws.closed:
print(now())
await asyncio.sleep(1)
print('Close background')
async def get_handler(self, request):
ws = web.WebSocketResponse()
await ws.prepare(request)
# start background process if needed
background_task = None
if self.need_background:
loop = asyncio.get_event_loop()
background_task = loop.create_task(self.background(ws))
# process ws messages
while not ws.closed:
await self._receive_msg(ws)
# stop background
if background_task:
background_task.cancel()
return ws
async def _receive_msg(self, ws: web.WebSocketResponse):
msg = await ws.receive()
if msg.tp == MsgType.text:
if msg.data == self.stop_msg:
print('Got stop msg')
await ws.close()
else:
await self.process_msg(msg.data)
elif msg.tp == MsgType.close:
print('websocket connection closed')
elif msg.tp == MsgType.error:
print('ws connection closed with exception %s' %
ws.exception())
class MonitSchedulesHandler(WebSocketHandler):
need_background = True
stop_background_timeout = 0.1
async def background(self, ws):
subscriber_socket = get_sub_socket(MONIT_SCHEDULE_EVENT)
try:
while True:
monit_schedule_json = await async_recv_event(subscriber_socket)
# print('monit_schedule', monit_schedule_json)
ws.send_str(monit_schedule_json)
finally:
subscriber_socket.close()
class WorkSchedulesHandler(WebSocketHandler):
need_background = True
stop_background_timeout = 0.1
async def background(self, ws):
subscriber_socket = get_sub_socket(WORK_SCHEDULE_EVENT)
try:
while True:
work_schedule_json = await async_recv_event(subscriber_socket)
# print('work_schedule', work_schedule_json)
ws.send_str(work_schedule_json)
finally:
subscriber_socket.close()
class MonitResultHandler(WebSocketHandler):
need_background = True
stop_background_timeout = 0.1
async def background(self, ws):
subscriber_socket = get_sub_socket(MONIT_STATUS_EVENT)
try:
while True:
task_json = await async_recv_event(subscriber_socket)
task_data = json.loads(task_json, object_hook=json_util.object_hook)
response = _get_task_represent(task_data)
ws.send_str(json.dumps(response, default=json_util.default))
finally:
subscriber_socket.close()
class WorkResultHandler(WebSocketHandler):
need_background = True
stop_background_timeout = 0.1
async def background(self, ws):
subscriber_socket = get_sub_socket(WORK_STATUS_EVENT)
try:
while True:
task_json = await async_recv_event(subscriber_socket)
task_data = json.loads(task_json, object_hook=json_util.object_hook)
response = _get_task_represent(task_data)
ws.send_str(json.dumps(response, default=json_util.default))
finally:
subscriber_socket.close()
class MonitWaitingTaskHandler(WebSocketHandler):
need_background = True
stop_background_timeout = 0.1
async def background(self, ws):
subscriber_socket = get_sub_socket(TASK_EVENT)
try:
while True:
response = {'waiting_tasks': []}
waiting_tasks = models.MonitTask.get_waiting_tasks()
for task in waiting_tasks:
task_data = task.get_data()['task']
response['waiting_tasks'].append(_get_task_represent(task_data))
# print('waiting_tasks count', len(response['waiting_tasks']))
ws.send_str(json.dumps(response, default=json_util.default))
# waiting new events
await async_recv_event(subscriber_socket)
finally:
subscriber_socket.close()
class MonitCurrentWorkerHandler(WebSocketHandler):
need_background = True
stop_background_timeout = 0.1
async def background(self, ws):
subscriber_socket = get_sub_socket(WORKER_EVENT)
try:
while True:
response = {'current_workers': []}
workers = models.CurrentWorker.objects.all()
for worker in workers:
response['current_workers'].append(
_get_worker_represent(worker)
)
# print('current_workers count', len(response['current_workers']))
# print(response)
ws.send_str(json.dumps(response, default=json_util.default))
# waiting new events
await async_recv_event(subscriber_socket)
finally:
subscriber_socket.close()
def _get_worker_represent(worker: models.CurrentWorker) -> dict:
worker_data = {
'uuid': str(worker.main.uuid),
'id': worker.main.id,
'created_dt': worker.main.created_dt.isoformat(sep=' '),
'host_name': worker.main.host_name,
'type': worker.main.type,
'tasks': [],
}
for task in worker.get_tasks():
task_data = _get_task_represent(task.get_data()['task'])
worker_data['tasks'].append(task_data)
return worker_data
def _get_task_represent(task: dict) -> dict:
task_data = {
'id': task['id'],
'host_address': task['host_address'],
'schedule_id': task['schedule_id'],
'start_dt': None,
'result_dt': None,
'extra': None,
'level': None,
'worker': None,
}
if 'monit_name' in task:
task_data['monit_name'] = task['monit_name'],
if 'work_name' in task:
task_data['work_name'] = task['work_name'],
if 'start_dt' in task:
task_data['start_dt'] = task['start_dt'].replace(microsecond=0).isoformat(sep=' ')
if 'result' in task:
task_data['result_dt'] = task['result']['dt'].replace(microsecond=0).isoformat(sep=' ')
task_data['extra'] = task['result']['extra']
task_data['level'] = task['result']['level']
if 'worker' in task:
task_data['worker'] = task['worker']
task_data['worker']['created_dt'] = task['worker']['created_dt'].replace(microsecond=0).isoformat(sep=' ')
return task_data
| telminov/django-park-keeper | parkkeeper/ws.py | ws.py | py | 8,471 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "aiohttp.web.Application",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "aiohttp.web",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "asyncio.get_event_loop",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "django.con... |
10557505124 |
# coding: utf-8
# In[34]:
#Mandelbrot fractal creation program
#e.g. complex fractal shapes with recursive detail at increasing magnifications
#code adapted from example found @ docs.scipy.org/doc/numpy/user/quickstart.html
import matplotlib.pyplot as plt
import numpy as np
def mbrot(h,w,maxit=125): #higher iterations = more complex edges; longer run times
a,b=np.mgrid[-1.5:1.5:h*1j,-2:2.1:w*1j] #placement of object in plane
c=a+b*1j
d=c
divtime=maxit+np.zeros(d.shape,dtype=float)
for thing in range(maxit):
d=d**2+c
diverge=d*np.conj(d)>2**2
div_now=diverge & (divtime==maxit)
divtime[div_now]=thing
d[diverge]=1
return divtime
#sizing, visualization
plt.imshow(mbrot(1500,1500),cmap='winter') #cmap options: matplotlib.org/tutorials/colors/colormaps.html
plt.show()
| NathanNYC/Mandelbrot-Variations | Mbot.py.py | Mbot.py.py | py | 850 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.mgrid",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.conj",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.imshow",
... |
7074745807 | from django.conf.urls import include
from utils.urls import cbv_url_helper as url
from . import views
urlpatterns = [
url(r'^$', views.SuperAdminHomeView),
url(r'^stats/$', views.StatsView),
url(r'^stats/initial/$', views.InitialStatsView),
url(r'^stats/get/$', views.GetStatsView),
url(r'^admins/', include('accounts.urls')),
url(r'^users/', include('users.root_urls')),
url(r'^clusters/', include('clusters.urls')),
url(r'^', include('django.conf.urls.i18n')),
]
| s3vdev/sxconsole-lite | sxconsole-lite/sxconsole/urls.py | urls.py | py | 502 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.urls.cbv_url_helper",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "utils.urls.cbv_url_helper",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "utils.urls.cbv_url_helper",
"line_number": 10,
"usage_type": "call"
},
{
"api_n... |
74726242592 | import logging
from dataclasses import dataclass
from http import HTTPStatus
from paddington import (
Joint, Track, ErrorEvent, ErrorTypeSwitch, RouteNotFound, SequentialSwitch,
)
from web_framework.app import App, WsgiContext
from web_framework.rest_view import RestWheelSet, HttpResponse
from web_framework.wsgi_switch import WsgiSwitch
error_router = ErrorTypeSwitch(default=SequentialSwitch())
router = WsgiSwitch(error_track=error_router)
@dataclass
class User:
id: int
name: str
@dataclass
class Error:
error: str
class UserManager:
def __init__(self):
self.users = []
class ManagerJoint(Joint):
def __init__(self, track: Track):
super().__init__(track)
self.user_manager = UserManager()
def __call__(self, event, context: WsgiContext):
context.data["user_manager"] = self.user_manager
return self.track(event, context)
@router.track("/", methods=["GET"])
def index(environ, context: WsgiContext) -> HttpResponse[dict[str, str]]:
return HttpResponse(
body={"ok": "Index"}
)
@router.track("/users", methods=["GET"])
def get_users(environ, context: WsgiContext) -> HttpResponse[list[User]]:
user_manager: UserManager = context.data["user_manager"]
return HttpResponse(body=user_manager.users)
@router.track("/users", methods=["POST"])
def add_user(environ, context: WsgiContext) -> HttpResponse[User]:
user_manager: UserManager = context.data["user_manager"]
user = User(
id=len(user_manager.users),
name=f"User {len(user_manager.users)}",
)
user_manager.users.append(user)
return HttpResponse(body=user)
@error_router.track(RouteNotFound)
def handle_not_found_error(
environ: ErrorEvent, context: WsgiContext,
) -> HttpResponse[Error]:
logging.error(f"Resource {environ.event['PATH_INFO']} not found")
return HttpResponse(
status=HTTPStatus.NOT_FOUND,
body=Error(error=f"Resource {environ.event['PATH_INFO']} not found"),
)
@error_router.default.track()
def handle_any_error(
environ: ErrorEvent, context: WsgiContext,
) -> HttpResponse[Error]:
logging.error("Unhandled error in HTTP")
return HttpResponse(
status=HTTPStatus.INTERNAL_SERVER_ERROR,
body=Error(error=str(environ.exception)),
)
router = ManagerJoint(router)
app = App(RestWheelSet(router))
| Tishka17/paddington | examples/web_app/app.py | app.py | py | 2,384 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "paddington.ErrorTypeSwitch",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "paddington.SequentialSwitch",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "web_framework.wsgi_switch.WsgiSwitch",
"line_number": 13,
"usage_type": "call"
}... |
11732136587 | import keras
import numpy as np
from keras.layers import Input, Dense
from keras.models import Model
from keras.optimizers import Adam
from sklearn.cluster import KMeans
from keras.models import load_model
import csv
import sys
path_train = sys.argv[1]
path_test = sys.argv[2]
path_out = sys.argv[3]
def load_data():
with open(path_test, 'r') as file:
reader = csv.reader(file, delimiter=',')
test_x = []
iter = 0
for line in reader:
if iter == 0:
iter += 1
continue
test = []
test.append(int(line[1]))
test.append(int(line[2]))
test_x.append(test)
iter += 1
test_x = np.array(test_x)
print(test_x)
return test_x
def build_model():
input_img = Input(shape=(784, ))
encoded = Dense(128, activation='relu')(input_img)
encoded = Dense(64, activation='relu')(encoded)
encoded = Dense(32, activation='relu')(encoded)
decoded = Dense(64, activation='relu')(encoded)
decoded = Dense(128, activation='relu')(decoded)
decoded = Dense(784, activation='sigmoid')(decoded)
encoder = Model(input = input_img, output=encoded)
adam = Adam(lr=5e-4)
autoencoder = Model(input=input_img, output=decoded)
autoencoder.compile(optimizer=adam, loss='mse')
autoencoder.summary()
return encoder, autoencoder
def main():
x = np.load(path_train)
x = x.astype('float32') / 255.
#train_num = 130000
#print(x.shape)
#train_x = x[:train_num]
#valid_x = x[train_num:]
"""encoder, autoencoder = build_model()
autoencoder.fit(train_x, train_x, epochs=1000, batch_size=256, shuffle=True, validation_data=(valid_x, valid_x))
autoencoder.save('autoencoder.h5')
encoder.save('encoder.h5')"""
encoder = load_model('encoder.h5')
encoder_imgs = encoder.predict(x)
encoder_imgs = encoder_imgs.reshape(encoder_imgs.shape[0], -1)
kmeans = KMeans(n_clusters=2, random_state=0).fit(encoder_imgs)
test_x = load_data()
same = []
test_y = np.zeros(shape=test_x.shape)
for i in range(test_x.shape[0]):
test_y[i][0] = i
a = kmeans.labels_[test_x[i][0]]
b = kmeans.labels_[test_x[i][1]]
if a == b:
test_y[i][1] = 1
else:
test_y[i][1] = 0
with open(path_out, 'w') as file:
writer = csv.writer(file, delimiter=',')
writer.writerow(['ID', 'Ans'])
for i in range(test_y.shape[0]):
writer.writerow([int(test_y[i][0]), int(test_y[i][1])])
if __name__ == "__main__":
main()
| hungchingliu/ML2018SPRING | hw4/autoencoder.py | autoencoder.py | py | 2,623 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "csv.reader",
"line_numb... |
26297959246 | from kafka import KafkaProducer
import sys
msg = str(sys.argv[1])
def run():
try:
producer = KafkaProducer(
bootstrap_servers = ['localhost:9093','localhost:9094','localhost:9095']
)
def message() -> dict:
if msg[0] < "N":
partition = 0
else:
partition = 1
return{
"value" : msg,
"partition" : partition
}
message_sent = message()
result = producer.send(
"Users",
bytes(str(msg), 'utf-8')
)
print("Done! Created Successfully! " + str(message_sent))
producer.flush()
producer.close()
except Exception as e:
print("Something bad happened " + str(e))
finally:
sys.exit()
run()
| fzayed/Project-Milestone-Group-11 | Lab 2/Ireni_100657302/kafka-python/producer.py | producer.py | py | 837 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "kafka.KafkaProducer",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.exit",
"line_number": 43,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.