seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
75002646183 | from concurrent.futures import ThreadPoolExecutor,wait,as_completed
from socket import timeout
from turtle import done
from unittest.result import failfast
import requests
import re
import warnings
import os
import traceback
import importlib
warnings.filterwarnings('ignore')
url='https://e-hentai.org'
slist=[]
cookie=input("input your login cookie if any:")
head={
"Connection": '''keep-alive''',
"Pragma": '''no-cache''',
"Cache-Control": '''no-cache''',
"sec-ch-ua": '''" Not A;Brand";v="99", "Chromium";v="98", "Microsoft Edge";v="98"''',
"sec-ch-ua-mobile": '''?0''',
"sec-ch-ua-platform": '''"Windows"''',
"DNT": '''1''',
"Upgrade-Insecure-Requests": '''1''',
"User-Agent": '''Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/98.0.4758.80 Safari/537.36 Edg/98.0.1108.43''',
"Accept": '''text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9''',
"Sec-Fetch-Site": '''same-origin''',
"Sec-Fetch-Mode": '''navigate''',
"Sec-Fetch-User": '''?1''',
"Sec-Fetch-Dest": '''document''',
"Referer": '''https://e-hentai.org/home.php''',
"Accept-Encoding": '''gzip, deflate, br''',
"Accept-Language": '''zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6''',
"Cookie": f"{cookie}"
}
gid=input("example url:https://e-hentai.org/g/2134055/c28c645647/?p=1\nexample gallery id:2134055/c28c645647\ninput the e-hentai gallery id to download:")
try:os.makedirs(gid)
except:pass
res=requests.get(f"{url}/g/{gid}",verify=False,headers=head)
endp=res.text.rfind(f'''</a></td><td onclick="document.location=this.firstChild.href"><a href="{url}/g/{gid}/?p=1" onclick="return false">''')
count=res.text[:endp]
try:count=int(count[count.rfind(">")+1:])
except:count=1
print("pages:",count)
reslist=re.findall('''<a href="https:\/\/e-hentai\.org\/s\/([a-z\d\-/]+?)">''',res.text)
slist.extend(reslist)
def get_limit():
res=requests.get(f"{url}/home.php",verify=False,headers=head)
relist=re.findall('''<p>You are currently at <strong>(\d+?)<\/strong>''',res.text)
return relist[0]
if cookie:
print("limit used:",get_limit())
def fetch_urls(pid):
global count,slist
print(f"fetching page {pid}/{count}...")
res=requests.get(f"{url}/g/{gid}/?p={pid}",verify=False,headers=head)
reslist=re.findall('''<a href="https:\/\/e-hentai\.org\/s\/([a-z\d\-/]+?)">''',res.text)
slist.extend(reslist)
threads=ThreadPoolExecutor(20)
for i in range(1,count):
threads.submit(fetch_urls,i)
threads.shutdown(True)
sdict={each:None for each in slist}
# finally:
num=len(slist)
print("total images:",num)
def fetch_images(i,key):
global num,sdict,slist
print(f"fetching image {i}/{num}...")
if sdict[key]:
print(f"cache {key} found!")
res=requests.get(sdict[key],verify=False,headers=head,timeout=600)
open(f"{gid}/{key[key.rfind('/')+1:]}.jpg","wb").write(res.content)
else:
res=requests.get(f"{url}/s/{key}" ,verify=False,headers=head)
# print(res.text)
if cookie:
ourl=re.findall('''<a href="(https:\/\/e-hentai\.org\/fullimg\.php\?[\w=;&\-]+?)">''',res.text)[0].replace("&","&")
# print(ourl)
res=requests.get(ourl,verify=False,headers=head,allow_redirects=False)
rurl=res.headers["location"]
print(f"resolving real img url of {key}...",rurl)
sdict[key]=rurl
res=requests.get(rurl,verify=False,headers=head,timeout=600)
open(f"{gid}/{key[key.rfind('/')+1:]}.jpg","wb").write(res.content)
print("limit used:",get_limit())
else:
murl=re.findall('''<img id="img" src="([\w:/;=\.\-]+?)"''',res.text)[0]
res=requests.get(murl,verify=False,headers=head)
print(f"not login! download 1280 img url of {key}...",murl)
open(f"{gid}/{key[key.rfind('/')+1:]}_1280.jpg","wb").write(res.content)
slist.remove(key)
with ThreadPoolExecutor(max_workers=60) as t:
for j in range(int((num+59)//60)):
all_task = [t.submit(fetch_images,i+j*60,each) for i,each in enumerate(slist[j*60:(j+1)*60])]
lastundo=[]
undo=all_task
while len(lastundo)!=len(undo):
lastundo=undo
done,undo=wait(all_task, timeout=300)
# print(done,undo)
open(f"{gid}/info.py","w").write(f"{sdict}\n{slist}") | CrystalRays/pytools | ehentaidownloader.py | ehentaidownloader.py | py | 4,505 | python | en | code | 0 | github-code | 36 |
9856058749 | import re
import csv
import os
import sys
import pickle
from pprint import pprint
from enum import Enum
sys.path.insert(0, '../heatmap')
sys.path.insert(0, '../tests')
from stat_type_lookups import *
from tester import *
# Types of files:
# Fundamental files
# - PlayByPlay.csv, GameIDs.csv, BoxscoreStats.csv
# - Run after scraper
# - Generate pickles right after
# Enriched
# - PlayByPlay_enriched.csv
# - Run during enrich
# - Generate pickle right after
# Heatmap Stats
# - *_heatmap.csv
# - Run for relevent stats (can be all), for specific games and teams
# - Generate pickles right after
# Lineups
# - lineups_*.csv
# - That's up to Rossdan (assume it's his problem)
class CSVTypes(Enum):
Fundamentals = 0
Enriched = 1
Heatmap = 2
All = 3
class PickleParser:
def __init__(self, root, verbose = False):
self.root = root
self.verbose = verbose
# Lists of filenames
self.fundamentalFiles = ['PlayByPlay.csv', 'GameIDs.csv', 'BoxscoreStats.csv']
self.enrichedFiles = ['PlayByPlay_enriched.csv']
self.statFiles = list(stat_filenames.values())
for i in range(len(self.statFiles)):
self.statFiles[i] += '_heatmap.csv'
def IterateOverPickles(self, task):
for subdir, dirs, fs in os.walk(self.root):
for f in fs:
filename = os.path.join(subdir, f)
if task == 'save' and filename.endswith('.csv'):
self.Serialize(filename)
elif task == 'remove' and filename.endswith('.pickle'):
if self.verbose: print("Removing file: {}".format(filename))
os.remove(filename)
def SavePicklesInRoot(self):
self.IterateOverPickles('save')
def RemovePicklesFromRoot(self):
self.IterateOverPickles('remove')
def GetAllEnrichedFiles(self):
allEnrichedFiles = list(self.enrichedFiles)
for subdir, dirs, items in os.walk(self.root):
for item in items:
if re.match(".*PlayByPlay_enriched_game[\d]+\.csv", item):
allEnrichedFiles.append(item)
return allEnrichedFiles
def GetAllHeatmapFiles(self):
allHeatmapFiles = list(self.statFiles)
for subdir, dirs, items in os.walk(self.root):
for item in items:
if item.endswith("_heatmap.csv"):
allHeatmapFiles.append(item)
return allHeatmapFiles
def SaveTypesToPickles(self, types = []):
if type(types) != list: types = [types]
if types == [] or types == [CSVTypes.All.value]:
self.SaveTypesToPickles([CSVTypes.Fundamentals.value,
CSVTypes.Enriched.value,
CSVTypes.Heatmap.value])
if CSVTypes.Fundamentals.value in types:
self.Remove(self.fundamentalFiles, '.pickle')
self.Serialize(self.fundamentalFiles)
if CSVTypes.Enriched.value in types:
allEnrichedFiles = self.GetAllEnrichedFiles()
self.Remove(allEnrichedFiles, '.pickle')
self.Serialize(allEnrichedFiles)
if CSVTypes.Heatmap.value in types:
allEnrichedFiles = self.GetAllHeatmapFiles()
self.Remove(allEnrichedFiles, '.pickle')
self.Serialize(allEnrichedFiles)
def RemovePicklesOfType(self, csvType, fileType):
if fileType != '.csv': fileType = '.pickle'
if csvType == CSVTypes.All.value:
self.Remove([CSVTypes.Fundamentals.value,
CSVTypes.Enriched.value,
CSVTypes.Heatmap.value])
if csvType == CSVTypes.Fundamentals.value:
self.Remove(self.fundamentalFiles, fileType)
if csvType == CSVTypes.Enriched.value:
self.Remove(self.GetAllEnrichedFiles(), fileType)
if csvType == CSVTypes.Heatmap.value:
self.Remove(self.GetAllHeatmapFiles(), fileType)
def Remove(self, files = [], fileType = '.pickle'):
if type(files) == str: files = [files]
for csvF in files:
csvfilename = os.path.join(self.root, csvF)
filename = str(os.path.splitext(csvfilename)[0]) + fileType
if os.path.isfile(filename):
if self.verbose: print("Removing file: {}".format(filename))
os.remove(filename)
def RemovePickles(self, inVal = None):
if inVal == None:
self.RemovePicklesFromRoot()
else:
self.RemovePicklesOfType(inVal, '.pickle')
def Serialize(self, files = []):
if type(files) == str: files = [files]
for csvF in files:
csvfilename = os.path.join(self.root, csvF)
if not csvfilename.endswith('.csv') or not os.path.isfile(csvfilename):
continue
# Delete old pickle file of the same name if it exists
filename = str(os.path.splitext(csvfilename)[0]) + '.pickle'
if os.path.isfile(filename): os.remove(filename)
x = {}
header_flag = True
keys = []
last_gameID = -1
manualAddID = "PlayByPlay.pickle" in filename or \
os.path.basename(csvfilename) in self.GetAllEnrichedFiles() or \
os.path.basename(csvfilename) in self.GetAllHeatmapFiles()
with open(csvfilename,'rt') as f:
reader = csv.reader(f, delimiter=',')
for line in reader:
if header_flag:
header_flag = False
keys = line
else:
if manualAddID:
if line[0] != last_gameID: #New gameID
last_gameID = line[0]
x[line[0]] = []
x[line[0]].append(line[1:])
else:
x[line[0]].append(line[1:])
else:
x[line[0]] = {}
for i in range(1,len(line)):
x[line[0]][keys[i]] = line[i]
if self.verbose: print("Dumping {} data, hold on".format(filename))
with open(filename,'wb') as f:
pickle.dump(x, f, pickle.HIGHEST_PROTOCOL)
def LoadPickle(self, file = ''):
filename = os.path.join(self.root, file)
if not os.path.isfile(filename) or not filename.endswith('.pickle'):
if self.verbose: print("Unable to find pickle file {}".format(filename))
return dict()
if self.verbose: print("Loading data from {}, hold on".format(filename))
p = pickle.load( open( filename, "rb" ))
return p
if __name__=='__main__':
# Init
pickleParser = PickleParser('../data', False)
#################################################################
######################### Tests + demos #########################
#################################################################
t = Tester()
# Load empty pickle
print("Test 1")
t.Assert("Load empty pickle", pickleParser.LoadPickle('') == {})
# Remove empty pickle
print("Test 2")
pickleParser.Remove('')
t.Assert("Delete empty pickle", True)
# Create and remove a file (csv and pickle)
print("Test 3")
tempFileName = os.path.join(pickleParser.root, 'test.csv')
with open(tempFileName,'w') as f: f.write('')
pickleParser.Remove(tempFileName, '.csv')
t.Assert("Delete csv", not os.path.isfile(tempFileName))
tempFileName = os.path.join(pickleParser.root, 'test.pickle')
with open(tempFileName,'w') as f: f.write('')
pickleParser.Remove(tempFileName)
t.Assert("Delete pickle", not os.path.isfile(tempFileName))
# Load == Serialize for a file
print("Test 4")
serializeTestFile = 'turnovers_avg_heatmap'
initialRead = pickleParser.LoadPickle(serializeTestFile + '.pickle')
pickleParser.Serialize(serializeTestFile + '.csv')
rewriteAndRead = pickleParser.LoadPickle(serializeTestFile + '.pickle')
t.Assert("Seralize == Load for pickle reading", initialRead == rewriteAndRead)
# Delete stats pickles
print("Test 5")
pickleParser.RemovePicklesOfType(CSVTypes.Heatmap.value, '.pickle')
t.Assert("Delete stats pickles",
all(not os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.statFiles))
# Delete enriched pickle
print("Test 6")
pickleParser.RemovePicklesOfType(CSVTypes.Enriched.value, '.pickle')
t.Assert("Delete enriched pickles",
all(not os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.GetAllEnrichedFiles()))
# Delete fundamental pickles
print("Test 7")
pickleParser.RemovePicklesOfType(CSVTypes.Fundamentals.value, '.pickle')
t.Assert("Delete fundamental pickles",
all(not os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.fundamentalFiles))
# Pickles created during fundamentals serialization
print("Test 8")
pickleParser.SaveTypesToPickles(CSVTypes.Fundamentals.value)
t.Assert("Pickles created during fundamentals serialization",
all(os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.fundamentalFiles))
# Pickles created during enriched serialization
print("Test 9")
pickleParser.SaveTypesToPickles(CSVTypes.Enriched.value)
t.Assert("Pickles created during enriched serialization",
all(os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.GetAllEnrichedFiles()))
# Pickles created during stat serialization
print("Test 10")
pickleParser.SaveTypesToPickles(CSVTypes.Heatmap.value)
t.Assert("Pickles created during stat serialization",
all(os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle')) for x in pickleParser.GetAllHeatmapFiles()))
# Delete all pickles from dir
print("Test 11")
pickleParser.RemovePicklesFromRoot()
t.Assert("Delete all pickles from dir",
all(not os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle'))
for x in pickleParser.statFiles + pickleParser.enrichedFiles + pickleParser.fundamentalFiles))
# All pickles created from dir
print("Test 12")
pickleParser.SavePicklesInRoot()
t.Assert("All pickles created from dir",
all(os.path.isfile(os.path.join(pickleParser.root, str(os.path.splitext(x)[0]) + '.pickle'))
for x in pickleParser.GetAllHeatmapFiles() + pickleParser.GetAllEnrichedFiles() + pickleParser.fundamentalFiles))
print('\n')
t.ShowResults()
print("\nNote: There is a chance this failed because of setup. \n" + \
"I'm very lazy and don't want to create files for the sole purpose of testing," + \
"so just make sure to copy the files from ../_backup_data into ../data" + \
"and see if it still works then.")
#################################################################
#################################################################
| AdamCharron/CanadaBasketballStats | enrich/parse_to_yaml.py | parse_to_yaml.py | py | 11,609 | python | en | code | 0 | github-code | 36 |
38164556201 | import glob
import importlib
import io
import logging
import os
import shlex
import subprocess
import time
import cornet
import numpy as np
import pandas
import torch
import torch.nn as nn
import torch.utils.model_zoo
import torchvision
import tqdm
from PIL import Image
from torch.nn import Module
Image.warnings.simplefilter('ignore')
logger = logging.getLogger(__name__)
torch.backends.cudnn.benchmark = False
normalize = torchvision.transforms.Normalize(mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225])
ngpus = 2
epochs = 1
output_path = '/braintree/home/fgeiger/weight_initialization/base_models/model_weights/' # os.path.join(os.path.dirname(__file__), 'model_weights/')
data_path = '/braintree/data2/active/common/imagenet_raw/' if 'IMAGENET' not in os.environ else \
os.environ['IMAGENET']
batch_size = 256
weight_decay = 1e-4
momentum = .9
step_size = 20
lr = .1
workers = 20
if ngpus > 0:
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def set_gpus(n=2):
"""
Finds all GPUs on the system and restricts to n of them that have the most
free memory.
"""
gpus = subprocess.run(shlex.split(
'nvidia-smi --query-gpu=index,memory.free,memory.total --format=csv,nounits'), check=True,
stdout=subprocess.PIPE, shell=True).stdout
gpus = pandas.read_csv(io.BytesIO(gpus), sep=', ', engine='python')
print(gpus)
gpus = gpus[gpus['memory.total [MiB]'] > 10000] # only above 10 GB
if os.environ.get('CUDA_VISIBLE_DEVICES') is not None:
visible = [int(i)
for i in os.environ['CUDA_VISIBLE_DEVICES'].split(',')]
gpus = gpus[gpus['index'].isin(visible)]
print(f'GPUs {gpus}')
gpus = gpus.sort_values(by='memory.free [MiB]', ascending=False)
os.environ[
'CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' # making sure GPUs are numbered the same way as in nvidia_smi
os.environ['CUDA_VISIBLE_DEVICES'] = ','.join(
[str(i) for i in gpus['index'].iloc[:n]])
def get_model(pretrained=False):
map_location = None if ngpus > 0 else 'cpu'
model = getattr(cornet, f'cornet_S')
model = model(pretrained=pretrained, map_location=map_location)
if ngpus == 0:
model = model.module # remove DataParallel
if ngpus > 0:
model = model.cuda()
return model
def train(identifier,
model,
restore_path=None, # useful when you want to restart training
save_train_epochs=1, # how often save output during training
save_val_epochs=.5, # how often save output during validation
save_model_epochs=1, # how often save model weigths
save_model_secs=60 * 10, # how often save model (in sec)
areas=None
):
# if os.path.exists(output_path + f'{identifier}_epoch_{epochs:02d}.pth.tar'):
# logger.info('Model already trained')
# return
restore_path = output_path
logger.info('We start training the model')
if ngpus > 1 and torch.cuda.device_count() > 1:
logger.info('We have multiple GPUs detected')
model = nn.DataParallel(model)
model = model.to(device)
elif ngpus > 0 and torch.cuda.device_count() is 1:
logger.info('We run on one GPU')
model = model.to(device)
else:
logger.info('No GPU detected!')
trainer = ImageNetTrain(model, areas)
validator = ImageNetVal(model)
start_epoch = 0
recent_time = time.time()
for epoch in tqdm.trange(start_epoch, epochs, initial=start_epoch, desc='epoch'):
data_load_start = np.nan
for step, data in enumerate(tqdm.tqdm(trainer.data_loader, desc=trainer.name)):
data_load_time = time.time() - data_load_start
global_step = epoch * len(trainer.data_loader) + step
trainer.model.train()
frac_epoch = (global_step + 1) / len(trainer.data_loader)
trainer(frac_epoch, *data)
data_load_start = time.time()
duration = time.time() - recent_time
return {'time': duration}
def test(layer='decoder', sublayer='avgpool', time_step=0, imsize=224):
"""
Suitable for small image sets. If you have thousands of images or it is
taking too long to extract features, consider using
`torchvision.datasets.ImageFolder`, using `ImageNetVal` as an example.
Kwargs:
- layers (choose from: V1, V2, V4, IT, decoder)
- sublayer (e.g., output, conv1, avgpool)
- time_step (which time step to use for storing features)
- imsize (resize image to how many pixels, default: 224)
"""
model = get_model(pretrained=True)
transform = torchvision.transforms.Compose([
torchvision.transforms.Resize((imsize, imsize)),
torchvision.transforms.ToTensor(),
normalize,
])
model.eval()
def _store_feats(layer, inp, output):
"""An ugly but effective way of accessing intermediate model features
"""
_model_feats.append(np.reshape(output, (len(output), -1)).numpy())
try:
m = model.module
except:
m = model
model_layer = getattr(getattr(m, layer), sublayer)
model_layer.register_forward_hook(_store_feats)
model_feats = []
with torch.no_grad():
model_feats = []
fnames = sorted(glob.glob(os.path.join(data_path, '*.*')))
if len(fnames) == 0:
raise Exception(f'No files found in {data_path}')
for fname in tqdm.tqdm(fnames):
try:
im = Image.open(fname).convert('RGB')
except:
raise Exception(f'Unable to load {fname}')
im = transform(im)
im = im.unsqueeze(0) # adding extra dimension for batch size of 1
_model_feats = []
model(im)
model_feats.append(_model_feats[time_step])
model_feats = np.concatenate(model_feats)
if output_path is not None:
fname = f'CORnet-{model}_{layer}_{sublayer}_feats.npy'
np.save(os.path.join(output_path, fname), model_feats)
class ImageNetTrain(object):
def __init__(self, model, config):
self.name = 'train'
self.model = model
self.data_loader = self.data()
self.optimizer = torch.optim.SGD(filter(lambda p: p.requires_grad, model.parameters()),
lr,
momentum=momentum,
weight_decay=weight_decay)
self.lr = torch.optim.lr_scheduler.StepLR(self.optimizer, step_size=step_size)
self.loss = nn.CrossEntropyLoss()
if ngpus > 0:
self.loss = self.loss.cuda()
def data(self):
dataset = torchvision.datasets.ImageFolder(
os.path.join(data_path, 'train'),
torchvision.transforms.Compose([
torchvision.transforms.RandomResizedCrop(224),
torchvision.transforms.RandomHorizontalFlip(),
torchvision.transforms.ToTensor(),
normalize,
]))
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=True,
num_workers=workers,
pin_memory=True)
return data_loader
def __call__(self, frac_epoch, inp, target):
start = time.time()
self.lr.step(epoch=frac_epoch)
with torch.autograd.detect_anomaly():
if ngpus > 0:
inp = inp.to(device)
target = target.cuda(non_blocking=True)
output = self.model(inp)
record = {}
loss = self.loss(output, target)
record['loss'] = loss.item()
record['top1'], record['top5'] = accuracy(output, target, topk=(1, 5))
record['top1'] /= len(output)
record['top5'] /= len(output)
record['learning_rate'] = self.lr.get_lr()[0]
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
record['dur'] = time.time() - start
return record
class ImageNetVal(object):
def __init__(self, model):
self.name = 'val'
self.model = model
self.data_loader = self.data()
self.loss = nn.CrossEntropyLoss(size_average=False)
if ngpus > 0:
self.loss = self.loss.cuda()
def data(self):
dataset = torchvision.datasets.ImageFolder(
os.path.join(data_path, 'val'),
torchvision.transforms.Compose([
torchvision.transforms.Resize(256),
torchvision.transforms.CenterCrop(224),
torchvision.transforms.ToTensor(),
normalize,
]))
data_loader = torch.utils.data.DataLoader(dataset,
batch_size=batch_size,
shuffle=False,
num_workers=workers,
pin_memory=True)
return data_loader
def __call__(self):
self.model.eval()
start = time.time()
record = {'loss': 0, 'top1': 0, 'top5': 0}
with torch.no_grad():
for (inp, target) in tqdm.tqdm(self.data_loader, desc=self.name):
if ngpus > 0:
inp = inp.to(device)
target = target.to(device)
output = self.model(inp)
record['loss'] += self.loss(output, target).item()
p1, p5 = accuracy(output, target, topk=(1, 5))
record['top1'] += p1
record['top5'] += p5
for key in record:
record[key] /= len(self.data_loader.dataset.samples)
record['dur'] = (time.time() - start) / len(self.data_loader)
print(f'Validation accuracy: Top1 {record["top1"]}, Top5 {record["top5"]}\n')
return record
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
_, pred = output.topk(max(topk), dim=1, largest=True, sorted=True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = [correct[:k].sum().item() for k in topk]
return res
if __name__ == '__main__':
identifier = 'CORnet-S_cluster2_v2_IT_trconv3_bi_epoch_00'
mod = importlib.import_module(f'cornet.cornet_s')
model_ctr = getattr(mod, f'CORnet_S')
model = model_ctr()
model3 = cornet.cornet_s(False)
model2 = model_ctr()
if os.path.exists(output_path + f'{identifier}.pth.tar'):
logger.info('Resore weights from stored results')
checkpoint = torch.load(output_path + f'{identifier}.pth.tar',
map_location=lambda storage, loc: storage)
class Wrapper(Module):
def __init__(self, model):
super(Wrapper, self).__init__()
self.module = model
model.load_state_dict(checkpoint['state_dict'])
if os.path.exists(output_path + f'CORnet-S_cluster2_IT_full_train_epoch_00.pth.tar'):
logger.info('Resore weights from stored results')
checkpoint2 = torch.load(
output_path + f'CORnet-S_cluster2_v2_IT_trconv3_bi_seed31_epoch_00.pth.tar',
map_location=lambda storage, loc: storage)
checkpoint3 = torch.load(
output_path + f'CORnet-S_cluster2_v2_IT_trconv3_bi_seed42_epoch_00.pth.tar',
map_location=lambda storage, loc: storage) # map onto cpu
model2.load_state_dict(checkpoint2['state_dict'])
for name, m in model2.module.named_parameters():
for name2, m2 in model3.named_parameters():
if name == name2:
print(name)
value1 = m.data.cpu().numpy()
value2 = m2.data.cpu().numpy()
print((value1 == value2).all())
| franzigeiger/training_reductions | base_models/trainer_performance.py | trainer_performance.py | py | 12,170 | python | en | code | 3 | github-code | 36 |
13262505124 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import rospy,roslib
from std_msgs.msg import Bool
from ardrone_autonomy.msg import Navdata
from drone_controller import droneStatus
from ardrone_project.msg import ImageCalc
controlStatus = {
0:None,
1:"Take_Off_Unit",
2:"Land_Unit",
3:"Follow_controller",
}
class Central_Control_Unit:
def __init__(self):
rospy.init_node('central_control_unit',anonymous = False)
self.pubEnableControl = rospy.Publisher('follow_controller/enable_control', Bool, queue_size=3)
self.pubEnableTakeOff = rospy.Publisher('take_off_unit/enable_control', Bool, queue_size=3)
self.pubEnableLand = rospy.Publisher('land_unit/enable_control', Bool, queue_size=3)
rospy.Subscriber('Central_Control_Unit/enable', Bool, self.callbacEnable)
rospy.Subscriber('ardrone/navdata', Navdata, self.callbackNavdata)
rospy.Subscriber('image_converter/calc', ImageCalc, self.callbackCalc)
rospy.Subscriber('end_detection/is_end', Bool, self.callbackEnd)
self.who_in_control = controlStatus[0]
self.enable_toggle_control = False
self.end_visible = False
self.path_visible = False
self.img_calc = ImageCalc() #elements in msg definition are assigned zero values by the default constructor.
self.droneStatus = "Unknown"
self.altitude = None
def callbackNavdata(self,navdata):
self.droneStatus = droneStatus[navdata.state]
self.altitude = navdata.altd # not clear from documentation if [cm] or [mm]
def callbacEnable(self,msg):
enable_flag = msg.data
if enable_flag: rospy.sleep(1) #if central controll unit is enabled, its action is delayed by 10 seconds
self.enable_toggle_control = enable_flag
def callbackCalc(self, data):
"""
:type data: ImageCalc object
"""
self.path_visible = data.is_visible
self.img_calc = data
def callbackEnd(self, msg):
self.end_visible = msg.data;
def cleanup(self):
print("central_control_unit cleanup method")
if __name__ == "__main__":
ccu = Central_Control_Unit()
try:
while not rospy.is_shutdown():
if ccu.enable_toggle_control:
# toggle control block
if ccu.droneStatus == "Landed":
ccu.pubEnableControl.publish(False)
ccu.pubEnableLand.publish(False)
ccu.pubEnableTakeOff.publish(True)
if ccu.who_in_control is not controlStatus[1]:
ccu.who_in_control = controlStatus[1]
print("Central Unit: I gave control to Take Off Unit!")
elif ccu.end_visible:
ccu.pubEnableControl.publish(False)
ccu.pubEnableLand.publish(True)
ccu.pubEnableTakeOff.publish(False)
if ccu.who_in_control is not controlStatus[2]:
ccu.who_in_control = controlStatus[2]
print("Central Unit: I gave control to Land Unit!")
elif ccu.path_visible and ccu.altitude > 1500 and ccu.img_calc.angle < 10 and ccu.img_calc.distance < 100:
#condition to give controol to follow controller
#in case drone sees the path and is well stabilized above it
ccu.pubEnableControl.publish(True)
ccu.pubEnableLand.publish(False)
ccu.pubEnableTakeOff.publish(False)
if ccu.who_in_control is not controlStatus[3]:
ccu.who_in_control = controlStatus[3]
print("Central Unit: I gave control to Follow Controller!")
# end of toggle control block
else:
rospy.sleep(0.1)
except rospy.ROSInterruptException:
print("central_control_unit: ROSInterruptException")
finally:
ccu.cleanup() | alexoshri/ardrone_project_work | scripts/central_control_unit.py | central_control_unit.py | py | 4,015 | python | en | code | 0 | github-code | 36 |
74582104744 | from rest_framework.decorators import api_view, permission_classes
from rest_framework.permissions import IsAuthenticated
from rest_framework.permissions import AllowAny
from rest_framework.response import Response
from .models import File
from .models import Folder
from .serializers import FileSerializer
from .serializers import FolderSerializer
import os
from django.conf import settings
#####################################################################
###################### Get All Files of a User ######################
#####################################################################
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_all_files(request):
user = request.user
files = File.objects.filter(user=user)
serializer = FileSerializer(files, many=True, context={'request': request})
return Response(serializer.data)
#######################################################################
###################### Get All Folders of a User ######################
#######################################################################
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_all_folders(request):
user = request.user
folders = Folder.objects.filter(user=user)
serializer = FolderSerializer(folders, many=True)
return Response(serializer.data)
#####################################################################
#####################################################################
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_files(request,pk):
user = request.user
if pk=='null':
files = File.objects.filter(user=user,folder_id=None)
else:
files = File.objects.filter(user=user,folder_id=pk)
serializer = FileSerializer(files, many=True, context={'request': request})
return Response(serializer.data)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def upload_file(request):
user = request.user
serializer = FileSerializer(data=request.data, context={'request': request})
if serializer.is_valid():
original_filename = request.data['file'].name
folder_id = request.data.get('folder_id', None)
serializer.save(user=user, filename=original_filename, folder_id=folder_id)
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
@api_view(['DELETE'])
@permission_classes([IsAuthenticated])
def delete_file(request, pk):
user = request.user
try:
file = File.objects.get(id=pk, user=user)
file_path = os.path.join(settings.MEDIA_ROOT, str(file.file))
# Delete the actual file from the server
if os.path.exists(file_path):
os.remove(file_path)
file.delete()
return Response(status=204)
except File.DoesNotExist:
return Response({"detail": "File not found."}, status=404)
@api_view(['POST'])
@permission_classes([IsAuthenticated])
def create_folder(request):
user = request.user
try:
parent_folder_id = request.data['parent_folder_id']
if parent_folder_id=='null':
parent_folder = Folder.objects.get(user=user, id=None)
else:
parent_folder = Folder.objects.get(user=user, id=parent_folder_id)
except (KeyError, Folder.DoesNotExist):
parent_folder=None
serializer = FolderSerializer(data=request.data)
if serializer.is_valid():
serializer.save(user=user, parentFolder=parent_folder)
return Response(serializer.data, status=201)
return Response(serializer.errors, status=400)
@api_view(['GET'])
@permission_classes([IsAuthenticated])
def get_folders(request, pk):
user = request.user
if pk=='null':
folders = Folder.objects.filter(user=user, parentFolder=None)
else:
folders = Folder.objects.filter(user=user, parentFolder=pk)
serializer = FolderSerializer(folders, many=True)
return Response(serializer.data)
@api_view(['PUT'])
@permission_classes([IsAuthenticated])
def updated_folder_title(request,pk):
user=request.user
folder = Folder.objects.get(user=user,id=pk)
newTitle=request.data.get('title',None)
if newTitle:
folder.title=newTitle
folder.save()
serializer = FolderSerializer(folder)
return Response(serializer.data)
return Response({"error": "Title is required"}, status=400)
@api_view(['DELETE'])
@permission_classes([IsAuthenticated])
def delete_folder(request, pk):
user = request.user
try:
folder = Folder.objects.get(id=pk, user=user)
folder.delete()
return Response(status=204)
except Folder.DoesNotExist:
return Response({"detail": "Folder not found."}, status=404) | balibabu/backend | fileapi/views.py | views.py | py | 4,763 | python | en | code | 0 | github-code | 36 |
7870435087 | from bs4 import BeautifulSoup
from uk_bin_collection.uk_bin_collection.common import *
from uk_bin_collection.uk_bin_collection.get_bin_data import \
AbstractGetBinDataClass
# import the wonderful Beautiful Soup and the URL grabber
class CouncilClass(AbstractGetBinDataClass):
"""
Concrete classes have to implement all abstract operations of the
base class. They can also override some operations with a default
implementation.
"""
def parse_data(self, page: str, **kwargs) -> dict:
requests.packages.urllib3.disable_warnings()
user_uprn = kwargs.get("uprn")
check_uprn(user_uprn)
headers = {
"Accept": "*/*",
"Accept-Language": "en-GB,en;q=0.6",
"Connection": "keep-alive",
"Referer": "https://www.valeofglamorgan.gov.uk/",
"Sec-Fetch-Dest": "script",
"Sec-Fetch-Mode": "no-cors",
"Sec-Fetch-Site": "same-site",
"Sec-GPC": "1",
"sec-ch-ua": '"Not?A_Brand";v="8", "Chromium";v="108", "Brave";v="108"',
"sec-ch-ua-mobile": "?0",
"sec-ch-ua-platform": '"Windows"',
}
params = {
"RequestType": "LocalInfo",
"ms": "ValeOfGlamorgan/AllMaps",
"group": "Community and Living|Refuse HIDE2",
"type": "json",
"callback": "AddressInfoCallback",
"uid": user_uprn,
"import": "jQuery35108514154283927682_1673022974838",
"_": "1673022974840",
}
# Get a response from the council
response = requests.get(
"https://myvale.valeofglamorgan.gov.uk/getdata.aspx",
params=params,
headers=headers,
).text
# Load the JSON and seek out the bin week text, then add it to the calendar URL. Also take the weekly
# collection type and generate dates for it. Then make a GET request for the calendar
bin_week = str(
json.loads(response)["Results"]["Refuse_HIDE2"]["Your_Refuse_round_is"]
).replace(" ", "-")
weekly_collection = str(
json.loads(response)["Results"]["Refuse_HIDE2"]["Recycling__type"]
).capitalize()
weekly_dates = get_weekday_dates_in_period(
datetime.now(), days_of_week.get(bin_week.split("-")[0].strip()), amount=48
)
schedule_url = f"https://www.valeofglamorgan.gov.uk/en/living/Recycling-and-Waste/collections/Black-Bag-Collections/{bin_week}.aspx"
response = requests.get(schedule_url, verify=False)
# BS4 parses the calendar
soup = BeautifulSoup(response.text, features="html.parser")
soup.prettify()
# Some scraper variables
collections = []
# Get the calendar table and find the headers
table = soup.find("table", {"class": "TableStyle_Activities"}).find("tbody")
table_headers = table.find("tr").find_all("th")
# For all rows below the header, find all details in th next row
for tr in soup.find_all("tr")[1:]:
row = tr.find_all("td")
# Parse month and year - month needs converting from text to number
month_and_year = row[0].text.split()
if month_and_year[0] in list(calendar.month_abbr):
collection_month = datetime.strptime(month_and_year[0], "%b").month
elif month_and_year[0] == "Sept":
collection_month = int(9)
else:
collection_month = datetime.strptime(month_and_year[0], "%B").month
collection_year = datetime.strptime(month_and_year[1], "%Y").year
# Get the collection dates column, remove anything that's not a number or space and then convert to dates
for day in remove_alpha_characters(row[1].text.strip()).split():
try:
bin_date = datetime(collection_year, collection_month, int(day))
collections.append((table_headers[1].text.strip().replace(" collection date", ""), bin_date))
except Exception as ex:
continue
# Add in weekly dates to the tuple
for date in weekly_dates:
collections.append(
(weekly_collection, datetime.strptime(date, date_format))
)
# Order all the data, only including future dates
ordered_data = sorted(collections, key=lambda x: x[1])
data = {"bins": []}
for item in ordered_data:
collection_date = item[1]
if collection_date.date() >= datetime.now().date():
dict_data = {
"type": item[0],
"collectionDate": collection_date.strftime(date_format),
}
data["bins"].append(dict_data)
return data
| robbrad/UKBinCollectionData | uk_bin_collection/uk_bin_collection/councils/ValeofGlamorganCouncil.py | ValeofGlamorganCouncil.py | py | 4,863 | python | en | code | 51 | github-code | 36 |
37208016314 | from datetime import datetime, timedelta
birthdate = input("Tell us your bidrthay in DD.MM.YYYY format ")
print(birthdate)
date_obj = datetime.strptime(birthdate, '%d.%m.%Y').date()
print(date_obj)
time_difference = datetime.now().date() - date_obj
time_now = datetime.now()
if (time_now.year < date_obj.year):
print("You aren't even born yet.")
exit()
age = time_now.year - date_obj.year
if (time_now.month < date_obj.month):
age -= 1
elif (time_now.month == date_obj.month):
if (time_now.day < date_obj.day):
age -= 1
needed_age = age % 10
print(needed_age)
top = " ______"
top2 = " |:H:a:p:p:y:|"
top3 = " __|___________|__"
bottom1 = " |^^^^^^^^^^^^^^^^^|"
bottom2 = " |:B:i:r:t:h:d:a:y:|"
bottom3 = " | |"
bottom4 = " ~~~~~~~~~~~~~~~~~~~"
if (needed_age < 5):
top = top[:(10+((5-needed_age)//2))] + "i"*needed_age + "_"*(5-needed_age) + top[(10+((5-needed_age)//2)):]
elif(needed_age == 5):
top = top[:10] + "i"*needed_age + "_"*(5-needed_age) + top[10:]
else :
top = top[:(10-((needed_age-5)// 2))] + "i"*needed_age + top[(10-((needed_age-5)// 2)):-(needed_age-5)]
print(top)
print(top2)
print(top3)
print(bottom1)
print(bottom2)
print(bottom3)
print(bottom4)
| KyleKiske/DI-Bootcamp | Week2/Day2/ChallengeGold.py | ChallengeGold.py | py | 1,248 | python | en | code | 0 | github-code | 36 |
10828719550 | import math
def solution(w,h):
total = w * h
if w == h: # 정사각형인 경우
return total - w
else: # 직사각형인 경우
# 가로질러가는 가로의 개수 + 세로의 개수에서 - 최대공약수 빼주기
# ex) 가로가 2, 세로가 3일 때
# 가로질러가는 가로: 3개 , 가로질러가는 세로: 2개, 최대공약수는 1
return total - (w+h-math.gcd(w,h))
| choijaehoon1/programmers_level | src/test07.py | test07.py | py | 447 | python | ko | code | 0 | github-code | 36 |
28067670792 | # 2021-03-12
# 출처 : https://www.acmicpc.net/problem/1009
# 분산처리
# 재용이는 최신 컴퓨터 10대를 가지고 있다. 어느 날 재용이는 많은 데이터를 처리해야 될 일이 생겨서 각 컴퓨터에 1번부터 10번까지의 번호를 부여하고, 10대의 컴퓨터가 다음과 같은 방법으로 데이터들을 처리하기로 하였다.
# 1번 데이터는 1번 컴퓨터, 2번 데이터는 2번 컴퓨터, 3번 데이터는 3번 컴퓨터, ... ,
#
# 10번 데이터는 10번 컴퓨터, 11번 데이터는 1번 컴퓨터, 12번 데이터는 2번 컴퓨터, ...
#
# 총 데이터의 개수는 항상 ab개의 형태로 주어진다. 재용이는 문득 마지막 데이터가 처리될 컴퓨터의 번호가 궁금해졌다. 이를 수행해주는 프로그램을 작성하라.
n=int(input())
# 방법1) 시간초과
for _ in range(n):
a,b=map(int,input().split())
print(int(str(a**b)[-1]))
# 방법2) 하나하나 조건 입력
for _ in range(n):
a,b = map(int, input().split())
a=a%10
if a==1 or a==5 or a==6:
print(a)
elif a==0:
print(10)
elif a==4 or a==9:
if (b%2)==1:
print(a)
else:
print((a*a)%10)
else:
if (b%4)==0:
print(int(str(a**4)[-1]))
else:
print(int(str(a**(b%4))[-1]))
| hwanginbeom/algorithm_study | 1.algorithm_question/2.implemented/88.Implemented_kyounglin.py | 88.Implemented_kyounglin.py | py | 1,361 | python | ko | code | 3 | github-code | 36 |
38916812011 | import discord
from discord.ext.commands import Bot
from discord.ext import commands
import asyncio
import time
import random
from discord import Game
Client = discord.client
client = commands.Bot(command_prefix = '-')
Clientdiscord = discord.Client()
TOKEN = ("NTczNDkxODgzOTc2ODE4Njk4.XMroNg.Pzwl-RFnKN_qTQFcqeFmllynIj0")
@client.event
async def on_member_join(member):
print('Recognised that a member called ' + member.name + ' joined')
await client.send_message(member, 'Welcome in Official MajklCraft discord for help write "-pomoc"')
print('Sent message to ' + member.name)
@client.event
async def on_ready():
await client.change_presence(game=Game(name='-pomoc'))
print('Bot Is Running Sucesfully')
async def on_message(message):
author = message.author
content = message.content
print('{}: {}'.format(author, content))
@client.event
async def on_message(message):
if message.content == '-web':
await client.send_message(message.channel,'www.futurik.majklcraft.eu')
if message.content == '-cheers':
em = discord.Embed(description='Cheers')
em.set_image(url='https://cdn.discordapp.com/attachments/528194410924605440/529441936323510273/download_1.jpg')
await client.send_message(message.channel, embed=em)
if message.content.startswith('-coinflip'):
randomlist = ["head", "tail", ]
await client.send_message(message.channel, (random.choice(randomlist)))
if message.content == '-prikazy':
await client.send_message(message.channel,'-web,-cheers,-coinflip,-vyhody,-pomoc,-ts')
if message.content == '-vyhody':
await client.send_message(message.channel,'http://futurik.buycraft.net/')
if message.content == '-pomoc':
await client.send_message(message.channel,'Pro pomoc kontaktujte kohokoli z AT.')
if message.content == '-ts':
await client.send_message(message.channel,'81.0.217.180:7399')
async def on_message(message):
author = message.author
content = message.content
print('{}: {}'.format(author, content))
client.run(TOKEN)
| ANATLANTIDA/BOT | Bot.py | Bot.py | py | 2,165 | python | en | code | 0 | github-code | 36 |
36829740540 | import pytest
from launch_jenkins import launch_jenkins
from launch_jenkins import log
from launch_jenkins import errlog
from launch_jenkins import CaseInsensitiveDict
def test_log(monkeypatch, capsys):
monkeypatch.setitem(launch_jenkins.CONFIG, 'quiet', False)
log('hello', 'world')
out, err = capsys.readouterr()
assert not out
assert err == 'hello world\n'
monkeypatch.setitem(launch_jenkins.CONFIG, 'quiet', True)
log('hello', 'world')
out, err = capsys.readouterr()
assert not out
assert not err
def test_errlog(monkeypatch, capsys):
monkeypatch.setitem(launch_jenkins.CONFIG, 'quiet', False)
errlog('hello', 'world')
out, err = capsys.readouterr()
assert not out
assert err == 'hello world\n'
monkeypatch.setitem(launch_jenkins.CONFIG, 'quiet', True)
errlog('hello', 'world')
out, err = capsys.readouterr()
assert not out
assert err == 'hello world\n'
def test_caseinsensitivedict():
cid = CaseInsensitiveDict()
cid['key'] = 'value'
cid['other'] = 'othervalue'
del cid['other']
assert cid['key'] == cid['KEY']
assert list(cid) == ['key']
assert len(cid) == 1
assert cid == {'key': 'value'}
assert cid.copy() == cid
assert cid != 'somethingelse'
assert repr(cid)
@pytest.mark.parametrize('millis,expect', [
(0, '00:00'),
(1000, '00:01'),
(60000, '01:00'),
(61000, '01:01'),
(120000, '02:00'),
(630000, '10:30'),
(3599000, '59:59'),
(3600000, '1:00:00'),
(3661000, '1:01:01'),
(36061000, '10:01:01'),
])
def test_format_millis(millis, expect):
assert launch_jenkins.format_millis(millis) == expect
| ocaballeror/jenkins-launch | tests/test_misc.py | test_misc.py | py | 1,681 | python | en | code | 0 | github-code | 36 |
14041677139 | def coach_data(file_name):
try:
with open(file_name) as f:
data = f.readline()
return (data.strip().split(','))
except IOError as err:
print('File error:', str(err))
return None
def sanitize(time_string):
if '-' in time_string:
splitter = '-'
elif ':' in time_string:
splitter = ':'
else:
return time_string
(mins, secs) = time_string.split(splitter)
return (mins + '.' + secs)
def get_sort(file_name):
item = coach_data(file_name)
result = sorted([sanitize(t) for t in item])
return result
print(get_sort('james.txt'))
print(get_sort('julie.txt'))
print(get_sort('mikey.txt'))
print(get_sort('sarah.txt'))
| duheng18/python-study | headfirst/example/example15.py | example15.py | py | 722 | python | en | code | 0 | github-code | 36 |
44095045713 | from test_framework import generic_test
def closest_int_same_bit_count(x: int) -> int:
# what we want to do here is basically swap the first different bits to get the same weight but closest abs val
# to do this, we loop from 0 to 63
for i in range(63):
shift1 = x >> i
shift2 = x >> i+1
if shift1 & 1 != shift2 & 1:
return x ^ ((1 << i) | (1 << i+1))
# loop through 64 bit int and compare i with i+1, if they differ, swap them
# for i in range(63):
# # if differ
# bit1 = x >> i & 1
# bit2 = x >> i+1 & 1
# if bit1 != bit2:
# #swap em
# x ^= 1 << i | 1 << i+1
# break;
# return x
if __name__ == '__main__':
exit(
generic_test.generic_test_main('closest_int_same_weight.py',
'closest_int_same_weight.tsv',
closest_int_same_bit_count))
| kchen1025/Python-EPI | epi_judge_python/closest_int_same_weight.py | closest_int_same_weight.py | py | 1,001 | python | en | code | 0 | github-code | 36 |
21594616095 | import spacy
import plac
import numpy as np
import time
import re
import os
import sys
import argparse
from sklearn.metrics import accuracy_score
from conllToSpacy import main
# Parsing argument for command-line.
parser = argparse.ArgumentParser(description="Testing an NER model with SpaCy.")
parser.add_argument("-tp", "--test_path", help="Path to CoNLL test dataset.")
parser.add_argument("-model", help="Path to the model.")
args = parser.parse_args()
if args.test_path:
testing, ent_true = main(args.test_path, test='test')
print("Got test data at", args.test_path)
else:
print("No test data path given ! Interrupting the script...")
sys.exit()
if args.model:
model = args.model
print("Model loaded at", model)
else:
print("No model path given !")
sys.exit()
testing_tokens = [x.split() for x in testing]
print('Length testing sentences: ', len(testing))
print('Length testing tokens: ', len(testing_tokens))
def compute_score(L1, L2):
correct_answers = 0
nb_answers = 0
for i in range(len(L1)):
if L1[i] != 'O':
nb_answers += 1
if L1[i] == L2[i]:
correct_answers += 1
print('%d correct out of %d answers.' % (correct_answers, nb_answers))
return correct_answers/nb_answers
def main():
# test the saved model
print("Loading from", model)
nlp2 = spacy.load(model)
ent_pred = []
testing_pred = []
print("Start predicttion...")
count = 1
k = 1
for text in testing:
start = time.time()
doc = nlp2(text)
entities = ['O'] * len(text.split())
for ent in doc.ents:
try:
entities[text.split().index(ent.text)] = ent.label_
except IndexError:
print('Index Error! Ent:', list(ent.text), '. Text:', text)
except ValueError:
print('Value Error! Ent:', list(ent.text), '. Text:', text)
ent_pred.append(entities)
testing_pred.append([t.text for t in doc])
print(str(count)+'/'+str(len(testing))+' done in %fs' % (time.time()-start))
count += 1
# Check whether there are the same number of sentences, and the same number of words in each sentence.
print('Length pred sentences: ', len(testing_pred))
for i in range(len(testing_tokens)):
if len(ent_true[i]) != len(ent_pred[i]):
print("NOT THE SAME LENGTH !")
print("True Text: ", testing_tokens[i])
print("Pred Text: ", testing_pred[i])
print("Entities true: ", ent_true[i])
print("Entities pred: ", ent_pred[i])
print('Pred Entity: ', set([x for y in ent_pred for x in y]))
print('True Entity: ', set([x for y in ent_true for x in y]))
y_pred = [x for y in ent_pred for x in y]
y_true = [x for y in ent_true for x in y]
Precision = compute_score(y_pred, y_true)
Recall = compute_score(y_true, y_pred)
F1_score = 2*(Recall * Precision) / (Recall + Precision)
print("Random accuracy: %0.2f" % (accuracy_score(y_true, ['O']*len(y_true))))
print("Accuracy score: %0.2f" % (accuracy_score(y_true, y_pred)))
print('Precision: %0.2f' % (Precision))
print('Recall: %0.2f' % (Recall))
print('F1 score: %0.2f' % (F1_score))
# with open('score.csv', 'a', encoding='utf-8') as f:
# f.write(os.path.basename(model) + ',' + str(Precision) + ',' + str(Recall) + ',' + str(F1_score) + '\n')
if __name__ == '__main__':
main()
| Djia09/Named-Entity-Recognition-spaCy | test_ner_spacy.py | test_ner_spacy.py | py | 3,496 | python | en | code | 3 | github-code | 36 |
42576581221 | """ Exercício para mostras as faces encontradas com variação de parâmetro """
import cv2
classificador = cv2.CascadeClassifier('cascades\\haarcascade_frontalface_default.xml')
imagem = cv2.imread('pessoas\\pessoas3.jpg')
imagemcinza = cv2.cvtColor(imagem, cv2.COLOR_BGR2GRAY)
facesdetectadas = classificador.detectMultiScale(imagemcinza, scaleFactor = 1.2, minNeighbors=3, minSize= (35,35))
print(len(facesdetectadas))
for (x, y, l, a) in facesdetectadas:
cv2.rectangle(imagem, (x, y), (x + l, y + a), (0, 0, 255), 2)
cv2.imshow('Faces Detectadas', imagem)
cv2.waitKey() | alans96/PythonProject | Computer Vision/1 Detecção de Faces com Python e OpenCV/3 exe.py | 3 exe.py | py | 583 | python | pt | code | 0 | github-code | 36 |
42603056105 | import os
import json
import shutil
import time
import traceback
__author__ = 'Michael Ryan Harlich'
def update_paths(paths):
paths['partial_prediction'] = paths['output'] + 'partial_predication.ent'
paths['partial_ground_truth'] = paths['output'] + 'partial_ground_truth.ent'
paths['aligned_prediction'] = paths['output'] + 'aligned_prediction.pdb'
def execute(paths):
align(paths)
start_residue, end_residue = get_start_and_end_residue(paths)
save_partial_protein(start_residue, end_residue, paths)
pass
def save_partial_protein(start_residue, end_residue, paths):
if start_residue is None and end_residue is None:
shutil.copyfile(paths['ground_truth'], paths['partial_ground_truth'])
return
else:
gt_file = open(paths['ground_truth'], 'r')
gt_partial_file = open(paths['partial_ground_truth'], 'w')
save_partial_file(start_residue, end_residue, gt_file, gt_partial_file)
gt_partial_file.close()
gt_file.close()
return
def save_partial_file(start_residue, end_residue, src_file, des_file):
for line in src_file:
tokens = line.split()
if len(tokens) > 0:
if tokens[0] == 'ATOM':
if int(tokens[5]) >= int(start_residue) and int(tokens[5]) <= int(end_residue):
des_file.write(line)
def get_start_and_end_residue(paths):
if 'selections_file' in paths:
emdb_id = paths['prediction'].split('/')[-2]
with open(paths['selections_file']) as f:
selections = json.load(f)
if emdb_id in selections:
return selections[emdb_id]
return (None, None)
def align(paths):
try:
os.system(paths['tmalign_path'] + ' ' + paths['prediction'] + ' ' + paths['ground_truth'] + ' -o ' + paths['output'] + 'TM.sup')
time.sleep(1)
ap_file = open(paths['aligned_prediction'], 'w')
tm_sup = open(paths['output'] + 'TM.sup_all', 'r')
on = False
for line in tm_sup:
tokens = line.split()
if len(tokens) > 0:
if tokens[0] == 'TER':
on = False
if on == True:
ap_file.write(line)
if len(tokens) > 1:
if tokens[1] == 'Aligned':
on = True
ap_file.close()
tm_sup.close()
except Exception:
exc_info = sys.exc_info()
traceback.print_exception(*exc_info)
pass | RyanHarlich/Ca-Prediction-Automated-Testing-Quick-Tools | segments_rmsd/partial_protein/partial_protein.py | partial_protein.py | py | 2,534 | python | en | code | 0 | github-code | 36 |
7143422402 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
__author__ = 'wangzhefeng'
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
# data
cmb = pd.read_excel("/home/wangzhefeng/project/python/projects/zhaohanglicai/CMB_Finance.xlsx")
print(cmb.head())
print(cmb.shape)
print(cmb.dtypes)
# 数据类型转换
# 将FinDate(期限)字段转换为数值型
cmb.FinDate = cmb.FinDate.str[:-1]#.astype('int')
# 将NetValue(收益率)字段转换为数值型
cmb.NetValue = cmb.NetValue.str[:-1]#.astype('float') / 100
print(cmb.head())
# 预期收益率最高的3各产品
NetValue_sort_desc = cmb[['PrdCode', 'NetValue']].sort_values(by = 'NetValue', ascending = False)
NetValue_duplicate_top = NetValue_sort_desc.drop_duplicates(subset = 'NetValue').head(3)
print(NetValue_duplicate_top)
# 预期收益率最低的3个产品
NetValue_sort_asc = cmb[['PrdCode', 'NetValue']].sort_values(by = 'NetValue', ascending = True)
NetValue_duplicate_last = NetValue_sort_asc.drop_duplicates(subset = 'NetValue').head(3)
print(NetValue_duplicate_last)
# 对各类风险类型的样本量做统计
stats = cmb.Risk.value_counts()
print(stats)
# 理财产品期限的描述性统计
print(cmb.FinDate.describe())
#
FinDate = []
Risks = cmb.Risk.unique()
print(Risks.sort())
for Risk in Risks:
FinDate.append(cmb.loc[cmb.Risk == Risk, 'FinDate'])
print(FinDate)
| wangzhefeng/DataSpider | projects/zhaohanglicai/zhlc_analysis.py | zhlc_analysis.py | py | 1,439 | python | en | code | 0 | github-code | 36 |
17300324237 | from django.conf.urls import url
from django.views.generic import TemplateView
from .views import (
klasses_list_view,
klasses_detail_view,
klasses_create_view,
klasses_delete_view,
klasses_update_view,
)
urlpatterns =[
# This is Klasses pages
url(r'^list/$', klasses_list_view, name='klasses_list_view'),
url(r'^create/$', klasses_create_view, name='klasses_create_view'),
url(r'^(?P<id>[\w-]+)/$', klasses_detail_view, name='klasses_detail_view'),
url(r'^(?P<id>[\w-]+)/delete$', klasses_delete_view, name='klasses_delete_view'),
url(r'^(?P<id>[\w-]+)/edit$', klasses_update_view, name='klasses_update_view'),
#End klasses pages
]
| SaramCodes/School-Management-System | klass/urls.py | urls.py | py | 683 | python | en | code | 1 | github-code | 36 |
15637256017 | import os
import csv
import sys
import fnmatch
import shutil
import time
import re
import config as cfg
import numpy as np
import pandas as pd
import mysql.connector as mysql
import sqlalchemy
from datetime import datetime
from dateutil.parser import parse
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import Select
from selenium.webdriver.chrome.options import Options
pathOfFilesDled = r'C:\\Users\\ChadBot\\Downloads\\'
pathToMoveDLStocks = r'C:\Users\\ChadBot\\Desktop\\barchartdata\\Stocks\\'
pathToMoveDLETF = r'C:\Users\\ChadBot\\Desktop\\barchartdata\\ETF\\'
pathToMoveDLIndices= r'C:\Users\\ChadBot\\Desktop\\barchartdata\\Indices\\'
def dlData():
chrome_options = Options()
# chrome_options.add_argument("start-minimized")
driver = webdriver.Chrome(r'C:\chromedriver.exe', options=chrome_options)
driver.get("https://www.barchart.com/login")
element = driver.find_element_by_name("email")
element.send_keys(cfg.login['user'])
element = driver.find_element_by_name("password")
element.send_keys(cfg.login['pass'])
element.send_keys(Keys.RETURN)
driver.get("https://www.barchart.com/options/unusual-activity/stocks")
print("stocks")
driver.find_element_by_xpath("//span[contains(.,'download')]").click()
time.sleep(5)
driver.get("https://www.barchart.com/options/unusual-activity/etfs")
print("etfs")
driver.find_element_by_xpath("//span[contains(.,'download')]").click()
time.sleep(5)
driver.get("https://www.barchart.com/options/unusual-activity/indices")
print("Indices")
driver.find_element_by_xpath("//span[contains(.,'download')]").click()
time.sleep(5)
driver.quit()
'''
This function has been deprecated
Bot will not sort csv files and save them in folders or upload to git
New functions implemented to clean up data and push to MySQL DB instead
'''
def sortData():
# Open dir where the data is downloaded
# search for file with .csv
# search for etf, stocks, indices
for f_name in os.listdir(pathOfFilesDled):
if fnmatch.fnmatch(f_name, '*-etfs-*-*-*-*-*.csv'):
try:
shutil.move(pathOfFilesDled + f_name, pathToMoveDLETF)
print("File Moved: " + f_name)
except IOError:
print("Could not move files")
sys.exit()
if fnmatch.fnmatch(f_name, '*-indices-*-*-*-*-*.csv'):
try:
shutil.move(pathOfFilesDled + f_name, pathToMoveDLIndices)
print("File Moved: " + f_name)
except IOError:
print("Could not move files")
sys.exit()
if fnmatch.fnmatch(f_name, '*-stocks-*-*-*-*-*.csv'):
try:
shutil.move(pathOfFilesDled + f_name, pathToMoveDLStocks)
print("File Moved: " + f_name)
except IOError:
print("Could not move files")
sys.exit()
'''
Function also deprecated after cleaning past data
'''
def cleanData(dataPath):
df = pd.read_csv(dataPath).replace('"', '', regex=True)
dateRgx = re.compile('(\d{2}-\d{2}-\d{2})')
dateList = dateRgx.findall(dataPath)
dateStr = str(dateList[0])
dateT = datetime.strftime(parse(dateStr), '%Y-%m-%d')
df.insert(0, 'Date Inserted', dateT)
df = df.set_index('Date Inserted')
df.rename(columns={'Last Trade':'Time'}, inplace=True)
df['IV'] = df['IV'].astype(str).str.rstrip('%').astype(float)
df['Exp Date'] = pd.to_datetime(df['Exp Date'])
df['Exp Date'] = df['Exp Date'].dt.strftime('%Y-%m-%d')
df['Time'] = pd.to_datetime(df['Time'])
df['Time'] = df['Time'].dt.strftime('%H:%M')
df = df[:-1]
print(df.head)
df.to_csv(dataPath)
'''
This function is used to clean existing data that was already scraped
No need to use this function again because new data downloaded will be cleaned
and pushed to MySQL DB
'''
def cleanUpExistingData():
etfPath = r"A:\\git\\ChadBot\\barchart\\ETF\\"
indicesPath = r"A:\\git\\ChadBot\\barchart\\Indices\\"
stockPath = r"A:\\git\\ChadBot\\barchart\\Stocks\\"
for f_name in os.listdir(etfPath):
if fnmatch.fnmatch(f_name, '*-etfs-*-*-*-*-*.csv'):
try:
cleanData(etfPath + f_name)
print("ETFs Cleaned")
except ValueError as e:
print(e)
for f_name in os.listdir(indicesPath):
if fnmatch.fnmatch(f_name, '*-indices-*-*-*-*-*.csv'):
try:
cleanData(indicesPath + f_name)
print("Indices Cleaned")
except ValueError as e:
print(e)
for f_name in os.listdir(stockPath):
if fnmatch.fnmatch(f_name, '*-stocks-*-*-*-*-*.csv'):
try:
cleanData(stockPath + f_name)
print("Stocks Cleaned")
except ValueError as e:
print(e)
def POSTtoDB():
etfPath = r"A:\\git\\ChadBot\\barchart\\ETF\\"
indicesPath = r"A:\\git\\ChadBot\\barchart\\Indices\\"
stockPath = r"A:\\git\\ChadBot\\barchart\\Stocks\\"
db = mysql.connect(
host = cfg.dbLogin['host'],
user = cfg.dbLogin['user'],
password = cfg.dbLogin['pass'],
database = 'barchart'
)
cursor = db.cursor()
cursor.execute("SHOW TABLES")
databases = cursor.fetchall()
print(databases)
# df = pd.read_csv(stockPath + 'unusual-stocks-options-activity-02-14-2020.csv')
with open(stockPath + 'unusual-stocks-options-activity-02-14-2020.csv') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader:
print(row)
Date_Inserted = row[0]
Symbol= row[1]
Price= row[2]
Type= row[3]
Strike= row[4]
Exp_Date= row[5]
DTE= row[6]
Bid= row[7]
Midpoint= row[8]
Ask= row[9]
Last= row[10]
Volume= row[11]
Open_Int= row[12]
Vol_OI= row[13]
IV= row[14]
Time= row[15]
cursor.execute('''INSERT INTO stocks(Date_Inserted, Symbol, Price, Type, Strike, Exp_Date, DTE, Bid, Midpoint, Ask, Last, Volume, Open_Int, Vol_OI, IV, Time) VALUES (%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)''',(row))
cursor.commit()
if __name__ == "__main__":
dlData()
sortData()
sys.exit()
# POSTtoDB()
# cleanUpExistingData()
| xxwikkixx/ChadBot | barchart/barchartDl.py | barchartDl.py | py | 6,546 | python | en | code | 16 | github-code | 36 |
4640657000 | from django.http import Http404
from rest_framework.views import APIView
from rest_framework.response import Response
from rest_framework import status, permissions
from .models import CustomUser
from .serializers import CustomUserSerializer
# Create your views here.
class CustomUserList(APIView):
permission_classes = [permissions.IsAuthenticated]
def get(self, request):
user = CustomUser.objects.all()
serializer = CustomUserSerializer(user, many=True)
return Response(serializer.data)
def post(self, request):
serializer = CustomUserSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_201_CREATED)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
class CustomUserDetail(APIView):
permission_classes = [permissions.IsAuthenticated]
def get_object(self, pk):
try:
user = CustomUser.objects.get(pk=pk)
self.check_object_permissions(self.request, user)
return user
except CustomUser.DoesNotExist:
raise Http404
def get(self, request, pk):
user = self.get_object(pk)
serializer = CustomUserSerializer(user)
return Response(serializer.data)
def put(self, request, pk):
user = self.get_object(pk)
serializer = CustomUserSerializer(
instance=user, data=request.data, partial=True
)
if serializer.is_valid():
serializer.save()
return Response(serializer.data, status=status.HTTP_200_OK)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
def delete(self, request, pk):
user = self.get_object(pk)
user.delete()
return Response(
status=status.HTTP_204_NO_CONTENT
)
| SheCodesAus/heading_for_success_backend_bris_2023 | SheFunds_backend/users/views.py | views.py | py | 1,915 | python | en | code | 1 | github-code | 36 |
44145316591 | import json
import logging
import typing
class JSONDumpReader(typing.Iterator[dict]):
def __init__(self, dump_path: str):
self.__dump_path = dump_path
def __iter__(self):
with open(self.__dump_path) as f:
for l in f:
l = JSONDumpReader.__clean_line(l)
try:
yield json.loads(l)
except ValueError:
logging.log(level=logging.DEBUG, msg="encountered illegal string while parsing JSON dump")
@staticmethod
def __clean_line(l: str)->str:
return l.strip().rstrip(',')
class JSONDumpWriter(object):
def __init__(self, output_path: str, batch_size: int=500):
self.__output_path = output_path
self.__batch_size = batch_size
def write(self, objects: typing.Iterable[dict]):
with open(self.__output_path, mode='w') as f:
f.write('[\n')
batch = list()
for idx, o in enumerate(objects):
batch.append(json.dumps(o))
if idx and idx % self.__batch_size == 0:
logging.log(level=logging.INFO, msg='wrote {} objects'.format(idx + 1))
f.write('\n'.join(batch) + '\n')
batch = list()
f.write('\n'.join(batch) + '\n')
f.write(']\n')
| AlexandraBaier/bachelorthesis | data_analysis/dumpio.py | dumpio.py | py | 1,344 | python | en | code | 0 | github-code | 36 |
28930913211 | #!/usr/bin/python3
# -*- coding: utf8 -*-
# Code from here:
# https://stackoverflow.com/a/26289475
import psutil
import subprocess
import time
import os
class SSHTunnel(object):
"""
A context manager implementation of an ssh tunnel opened from python
"""
def __init__(self, tunnel_command):
assert "-fN" in tunnel_command, "need to open the tunnel with -fN"
self._tunnel_command = tunnel_command
self._delay = 0.1
self.ssh_tunnel = None
def create_tunnel(self):
tunnel_cmd = self._tunnel_command
ssh_process = subprocess.Popen(tunnel_cmd, universal_newlines=True,
shell=True, stdout=subprocess.PIPE, stderr=subprocess.STDOUT,
stdin=subprocess.PIPE)
# Assuming that the tunnel command has "-f" and "ExitOnForwardFailure=yes", then the
# command will return immediately so we can check the return status with a poll().
while True:
p = ssh_process.poll()
if p is not None: break
time.sleep(self._delay)
if p == 0:
# Unfortunately there is no direct way to get the pid of the spawned ssh process, so we'll find it
# by finding a matching process using psutil.
current_username = psutil.Process(os.getpid()).username()
ssh_processes = [proc for proc in psutil.process_iter() if proc.cmdline() == tunnel_cmd.split() and proc.username() == current_username]
if len(ssh_processes) == 1:
self.ssh_tunnel = ssh_processes[0]
return ssh_processes[0]
else:
raise RuntimeError('multiple (or zero?) tunnel ssh processes found: ' + str(ssh_processes))
else:
raise RuntimeError('Error creating tunnel: ' + str(p) + ' :: ' + str(ssh_process.stdout.readlines()))
def release(self):
""" Get rid of the tunnel by killin the pid
"""
if self.ssh_tunnel:
self.ssh_tunnel.terminate()
def __enter__(self):
self.create_tunnel()
return self
def __exit__(self, type, value, traceback):
self.release()
def __del__(self):
self.release()
| Vasilesk/quotes-posting | sshtunnel.py | sshtunnel.py | py | 2,205 | python | en | code | 0 | github-code | 36 |
37955494687 | from flask import Flask, request, jsonify
import util
app = Flask(__name__)
# @app.route decorator exposes the http enedpoint
@app.route("/hello")
def test():
return "hello world"
@app.route("/get-locations")
def get_locations():
response = jsonify(
{
"locations": util.get_locations()
}
)
response.headers.add("Access-Control-Allow-Origin", "*")
return response
@app.route("/predict-price", methods=['POST'])
def predict_price():
loc = request.form["loc"]
sqft = float(request.form["sft"])
bedrooms = int(request.form["bedrooms"])
bath = int(request.form["bath"])
response = jsonify(
{
"approximate_price": util.get_approx_price(loc, sqft, bedrooms, bath)
}
)
response.headers.add("Access-Control-Allow-Origin", "*")
return response
if __name__ == "__main__":
print("python flask server started...")
util.load_artifacts()
app.run() | Chiemerie1/house_prices_ML_model_deployment | server/server.py | server.py | py | 963 | python | en | code | 0 | github-code | 36 |
44310767729 | import pygame, colors, random, time, sideclass, draw, timer
from random import randint
def collision(player, enemy, player1, screen, WIDTH, HEIGHT):
if (pygame.sprite.groupcollide(player, enemy, False, True)):
draw.drawlose(enemy, screen, WIDTH, HEIGHT)
player1.score = 0
def side(screen, WIDTH, HEIGHT, clock, timer1, mode):
FPS = 60
# Initializes the groups of objects
player1 = sideclass.player(WIDTH, HEIGHT)
all_players = pygame.sprite.Group()
all_players.add(player1)
all_enemy = pygame.sprite.Group()
enemy1 = sideclass.Enemy(WIDTH, HEIGHT, 5)
all_enemy.add(enemy1)
# Loads 2 instances of the same background for scrolling
background1 = pygame.image.load("backgrounds/jumpback.jpg")
background2 = pygame.image.load("backgrounds/jumpback.jpg")
# Displays info to the user playing the game
draw.info(screen, WIDTH, HEIGHT, 'Score 15 to Move on', 100)
running = True
move1 = 800
move2 = 0
while running:
clock.tick(FPS)
for event in pygame.event.get():
if event.type == pygame.QUIT:
running = False
pygame.quit()
# Draws the background
screen.fill(colors.black)
screen.blit(background1,(move1,0))
screen.blit(background2,(move2,0))
# Randomly spawns enemies
if (random.randrange(0,100)<1 and len(all_enemy) < 2) or len(all_enemy) == 0:
enemy = sideclass.Enemy(WIDTH, HEIGHT, random.randint(5,8))
all_enemy.add(enemy)
# Displays the timer and score
draw.drawtime(timer1, screen)
draw.displayscore(screen,WIDTH,HEIGHT, player1.score)
# Updates player and enemies
all_players.update(WIDTH, HEIGHT)
all_enemy.update(all_enemy, player1)
all_players.draw(screen)
all_enemy.draw(screen)
# Detects collision between enemies and players
collision(all_players, all_enemy, player1, screen, WIDTH, HEIGHT)
# Sees if the player has reached the limit
if player1.score == 15:
if mode: # If in minigame mode
timer1.pause()
draw.drawEnd(screen, WIDTH, HEIGHT, timer1)
else:
draw.drawWin(screen, WIDTH, HEIGHT)
break
# Controls movement of the background to scroll
move1 -= 1
move2 -= 1
if move2 == -800:
move2 = 800
if move1 == -800:
move1 = 800
pygame.display.flip()
def main():
# Driver for minigame mode
WIDTH = 800
HEIGHT = 600
pygame.init()
screen = pygame.display.set_mode((WIDTH, HEIGHT))
pygame.display.set_caption("Jump! Minigame Mode")
clock = pygame.time.Clock()
timer1 = timer.Timer()
timer1.start()
side(screen, WIDTH, HEIGHT, clock, timer1, True)
if __name__=="__main__":
main()
| RamboTheGreat/Minigame-Race | sidescroll.py | sidescroll.py | py | 2,909 | python | en | code | 0 | github-code | 36 |
21136786262 | # 动态规划
class Solution:
def maxSubArray(self, nums: List[int]) -> int:
# 数组的长度
n = len(nums)
if not n: return 0
# 初始化状态
pre = nums[0]
ans = pre
# 状态转移
for i in range(1, n):
pre = pre + nums[i] if pre>0 else nums[i]
ans = max(pre, ans)
return ans | SkyChaseHsu/leetcode_cookboook | solutions/53_maximum-subarray/53_maximum-subarray.py | 53_maximum-subarray.py | py | 384 | python | en | code | 1 | github-code | 36 |
42754051183 | from datetime import datetime
from elasticsearch import Elasticsearch
from elasticsearch import helpers
import json
import time
es = Elasticsearch()
f = open("yt_data.rst")
lines = f.readlines()
cnt = 1
data_cnt = 0
actions = []
s = time.time()
for line in lines:
data = json.loads(line)
action = {
"_index": "youtube",
"_id":cnt,
"_source": data
}
actions.append(action)
data_cnt+=1
if data_cnt == 20000:
a = helpers.bulk(es, actions)
actions = []
data_cnt = 0
cnt+=1
print(cnt)
a = helpers.bulk(es, actions)
e = time.time()
print("{}s".format(e-s))
f.close() | timothyliu0912/db_project | db/c.py | c.py | py | 641 | python | en | code | 0 | github-code | 36 |
12338769948 | ################011011100110010101101111####
### neo Command Line #######################
############################################
def getcmdlist():
cmds = {
"os" :"Open active Schedule View in Excel.",
"ov" :"Open selected views in Project Browser."
}
return cmds
def runcmd(cmd, msg, recallCL=False):
if cmd == 'os':
from lib.views import neocl_open_schedule_xl as os
os.ExportActiveScheduleViewToExcel()
elif cmd == 'ov':
from lib.views import neocl_views_open as ov
ov.OpenSelectedViews()
else:
from neocl import unknowncmd
unknowncmd(cmd, recallCL, getcmdlist()) | 0neo/pyRevit.neoCL | neoCL.extension/neocl_o.py | neocl_o.py | py | 700 | python | en | code | 7 | github-code | 36 |
40554209630 | """
Stack-In-A-Box: Stack Management
"""
import logging
import re
import threading
import uuid
import six
logger = logging.getLogger(__name__)
class ServiceAlreadyRegisteredError(Exception):
"""StackInABoxService with the same name already registered."""
pass
class StackInABox(object):
"""Stack-In-A-Box Testing Service.
StackInABox provides a testing framework for RESTful APIs
The framework provides a thread-local instance holding the
StackInABoxService objects that are representing the
RESTful APIs.
The StackInABox object provides a means of accessing it
from anywhere in a thread; however, it is not necessarily
thread-safe at this time. If one is careful o setup StackInABox
and write StackInABoxService's that are thread-safe
themselves, then there is no reason it could not be used in a
multi-threaded or multi-processed test.
"""
@classmethod
def get_thread_instance(cls):
"""
Interface to the thread storage to ensure the instance properly exists
"""
create = False
# if the `instance` property doesn't exist
if not hasattr(local_store, 'instance'):
local_store.instance = None
create = True
# if the instance doesn't exist at all
elif local_store.instance is None:
create = True
# if it's something else entirely...
elif not isinstance(local_store.instance, cls):
local_store.instance = None
create = True
# if the above conditions are met, create it
if create:
logger.debug('Creating new StackInABox instance...')
local_store.instance = cls()
logger.debug(
'Created StackInABox({0})'.format(local_store.instance.__id)
)
return local_store.instance
@classmethod
def reset_services(cls):
"""Reset the thread's StackInABox instance."""
logger.debug('Resetting services')
return cls.get_thread_instance().reset()
@classmethod
def register_service(cls, service):
"""Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
For return value and errors see StackInABox.register()
"""
logger.debug('Registering service {0}'.format(service.name))
return cls.get_thread_instance().register(service)
@classmethod
def call_into(cls, method, request, uri, headers):
"""Make a call into the thread's StackInABox instance.
:param method: HTTP Method (e.g GET, POST)
:param request: a Request object containing the request data
:param uri: the URI of the request submitted with the method
:param headers: the return headers in a Case-Insensitive dict
For return value and errors see StackInABox.call()
"""
logger.debug('Request: {0} - {1}'.format(method, uri))
return cls.get_thread_instance().call(method,
request,
uri,
headers)
@classmethod
def hold_onto(cls, name, obj):
"""Add data into the a storage area provided by the framework.
Note: The data is stored with the thread local instance.
:param name: name of the data to be stored
:param obj: data to be stored
For return value and errors see StackInABox.into_hold()
"""
logger.debug('Holding on {0} of type {1} with id {2}'
.format(name, type(obj), id(obj)))
cls.get_thread_instance().into_hold(name, obj)
@classmethod
def hold_out(cls, name):
"""Get data from the storage area provided by the framework.
Note: The data is retrieved from the thread local instance.
:param name: name of the data to be retrieved
:returns: The data associated with the specified name.
For errors see StackInABox.from_hold()
"""
logger.debug('Retreiving {0} from hold'
.format(name))
obj = cls.get_thread_instance().from_hold(name)
logger.debug('Retrieved {0} of type {1} with id {2} from hold'
.format(name, type(obj), id(obj)))
return obj
@classmethod
def update_uri(cls, uri):
"""Set the URI of the StackInABox framework.
:param uri: the base URI used to match the service.
"""
logger.debug('Request: Update URI to {0}'.format(uri))
cls.get_thread_instance().base_url = uri
def __init__(self):
"""Initialize the StackInABox instance.
Default Base URI is '/'.
There are no services registered, and the storage hold
is a basic dictionary object used as a key-value store.
"""
self.__id = uuid.uuid4()
self.__base_url = '/'
self.services = {
}
self.holds = {
}
@staticmethod
def __get_service_url(base_url, service_name):
"""Get the URI for a given StackInABoxService.
Note: this is an internal function
:param base_url: base URL to use
:param service_name: name of the service the URI is for
"""
return '{0}/{1}'.format(base_url, service_name)
@staticmethod
def get_services_url(url, base_url):
"""Get the URI from a given URL.
:returns: URI within the URL
"""
length = len(base_url)
checks = ['http://', 'https://']
for check in checks:
if url.startswith(check):
length = length + len(check)
break
result = url[length:]
logger.debug('{0} from {1} equals {2}'
.format(base_url, url, result))
return result
@property
def base_url(self):
"""Base URL property."""
return self.__base_url
@base_url.setter
def base_url(self, value):
"""Set the Base URL property, updating all associated services."""
logger.debug('StackInABox({0}): Updating URL from {1} to {2}'
.format(self.__id, self.__base_url, value))
self.__base_url = value
for k, v in six.iteritems(self.services):
matcher, service = v
service.base_url = StackInABox.__get_service_url(value,
service.name)
logger.debug('StackInABox({0}): Service {1} has url {2}'
.format(self.__id, service.name, service.base_url))
def reset(self):
"""Reset StackInABox to a like-new state."""
logger.debug('StackInABox({0}): Resetting...'
.format(self.__id))
for k, v in six.iteritems(self.services):
matcher, service = v
logger.debug('StackInABox({0}): Resetting Service {1}'
.format(self.__id, service.name))
service.reset()
self.services = {}
self.holds = {}
logger.debug('StackInABox({0}): Reset Complete'
.format(self.__id))
def register(self, service):
"""Add a service to the thread's StackInABox instance.
:param service: StackInABoxService instance to add to the test
:returns: None
:raises: ServiceAlreadyRegisteredError if the service already exists
"""
if service.name not in self.services.keys():
logger.debug('StackInABox({0}): Registering Service {1}'
.format(self.__id, service.name))
regex = '^/{0}/'.format(service.name)
self.services[service.name] = [
re.compile(regex),
service
]
service.base_url = StackInABox.__get_service_url(self.base_url,
service.name)
logger.debug('StackInABox({0}): Service {1} has url {2}'
.format(self.__id, service.name, service.base_url))
else:
raise ServiceAlreadyRegisteredError(
'Service {0} is already registered'.format(service.name))
def call(self, method, request, uri, headers):
"""Make a call into the thread's StackInABox instance.
:param method: HTTP Method (e.g GET, POST)
:param request: a Request object containing the request data
:param uri: the URI of the request submitted with the method
:param headers: the return headers in a Case-Insensitive dict
:returns: A tuple containing - (i) the Status Code, (ii) the response
headers, and (iii) the response body data
This function should not emit any Exceptions
"""
logger.debug('StackInABox({0}): Received call to {1} - {2}'
.format(self.__id, method, uri))
service_uri = StackInABox.get_services_url(uri, self.base_url)
for k, v in six.iteritems(self.services):
matcher, service = v
logger.debug('StackInABox({0}): Checking if Service {1} handles...'
.format(self.__id, service.name))
logger.debug('StackInABox({0}): ...using regex pattern {1} '
'against {2}'
.format(self.__id, matcher.pattern, service_uri))
if matcher.match(service_uri):
logger.debug('StackInABox({0}): Trying Service {1} handler...'
.format(self.__id, service.name))
try:
service_caller_uri = service_uri[(len(service.name) + 1):]
return service.request(method,
request,
service_caller_uri,
headers)
except Exception as ex:
logger.exception('StackInABox({0}): Service {1} - '
'Internal Failure'
.format(self.__id, service.name))
return (596,
headers,
'Service Handler had an error: {0}'.format(ex))
return (597, headers, 'Unknown service - {0}'.format(service_uri))
def into_hold(self, name, obj):
"""Add data into the a storage area provided by the framework.
Note: The data is stored with the thread local instance.
:param name: name of the data to be stored
:param obj: data to be stored
:returns: N/A
:raises: N/A
"""
logger.debug('StackInABox({0}): Holding onto {1} of type {2} '
'with id {3}'
.format(self.__id, name, type(obj), id(obj)))
self.holds[name] = obj
def from_hold(self, name):
"""Get data from the storage area provided by the framework.
Note: The data is retrieved from the thread local instance.
:param name: name of the data to be retrieved
:returns: The data associated with the specified name.
:raises: Lookup/KeyError error if the name does not match
a value in the storage
"""
logger.debug('StackInABox({0}): Retreiving {1} from the hold'
.format(self.__id, name))
obj = self.holds[name]
logger.debug('StackInABox({0}): Retrieved {1} of type {2} with id {3}'
.format(self.__id, name, type(obj), id(obj)))
return obj
# Thread local instance of StackInABox
local_store = threading.local()
| TestInABox/stackInABox | stackinabox/stack.py | stack.py | py | 11,760 | python | en | code | 7 | github-code | 36 |
36570577773 | from haystack.forms import SearchForm
from django import forms
from haystack.query import SearchQuerySet
from haystack.query import SQ
from peeldb.models import City
valid_time_formats = ["%Y-%m-%d 00:00:00"]
class job_searchForm(SearchForm):
q = forms.CharField(max_length=200, required=False)
location = forms.CharField(required=False)
experience = forms.IntegerField(required=False)
salary = forms.IntegerField(required=False)
job_type = forms.CharField(required=False)
industry = forms.CharField(required=False)
functional_area = forms.CharField(required=False)
walkin_from_date = forms.DateField(required=False)
walkin_to_date = forms.DateField(required=False)
walkin_type = forms.CharField(required=False)
refine_location = forms.CharField(required=False)
def search(self):
# sqs = SearchQuerySet().models(JobPost).filter(status='Live')
sqs = SearchQuerySet()
sqs = sqs.filter_and(status="Live")
if not self.is_valid():
return sqs
if self.cleaned_data["q"] and self.cleaned_data["location"]:
term = self.cleaned_data["q"]
term = term.replace("[", "")
term = term.replace("]", "")
term = term.replace("'", "")
# sqs = sqs.filter_and(SQ(title=term) | SQ(designation=term)| SQ(skills=term))
terms = [t.strip() for t in term.split(",")]
sqs = sqs.filter_and(
SQ(title__in=terms) | SQ(designation__in=terms) | SQ(skills__in=terms)
)
# sqs = sqs.filter_or(SQ(designation__in=terms))
# sqs = sqs.filter_or(SQ(skills__in=terms))
location = self.cleaned_data["location"]
location = location.replace("[", "")
location = location.replace("]", "")
location = location.replace("'", "")
locations = [t.strip() for t in location.split(",")]
other_cities = City.objects.filter(name__in=locations).values_list(
"parent_city__name", flat=True
)
sqs = sqs.filter_and(
SQ(location__in=locations)
| SQ(location__startswith=self.cleaned_data["location"])
| SQ(location__in=other_cities)
)
if self.cleaned_data["job_type"]:
sqs = sqs.filter_and(job_type=self.cleaned_data["job_type"])
if self.cleaned_data["industry"]:
term = self.cleaned_data["industry"]
# sqs = sqs.filter_and(SQ(title=term) | SQ(designation=term)| SQ(skills=term))
terms = [t.strip() for t in term.split(",")]
sqs = sqs.filter_or(industry__in=terms)
if self.cleaned_data["functional_area"]:
term = self.cleaned_data["functional_area"]
# sqs = sqs.filter_and(SQ(title=term) | SQ(designation=term)| SQ(skills=term))
terms = [t.strip() for t in term.split(",")]
sqs = sqs.filter_or(functional_area__in=terms)
if self.cleaned_data["experience"] or self.cleaned_data["experience"] == 0:
sqs = sqs.filter_or(
SQ(max_experience__gte=self.cleaned_data["experience"])
& SQ(min_experience__lte=self.cleaned_data["experience"])
)
if self.cleaned_data["salary"]:
sqs = sqs.filter_or(
SQ(max_salary__gte=self.cleaned_data["salary"])
& SQ(min_salary__lte=self.cleaned_data["salary"])
)
if self.cleaned_data["walkin_type"]:
import datetime
if self.cleaned_data["walkin_type"] == "this_week":
date = datetime.date.today()
start_week = (
date
- datetime.timedelta(date.weekday())
- datetime.timedelta(1)
)
end_week = start_week + datetime.timedelta(6)
start_week = datetime.datetime.strptime(
str(start_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
end_week = datetime.datetime.strptime(
str(end_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
sqs = sqs.filter_and(
SQ(walkin_from_date__range=[start_week, end_week])
| SQ(walkin_to_date__range=[start_week, end_week])
)
if self.cleaned_data["walkin_type"] == "next_week":
date = datetime.date.today()
start_week = (
date
- datetime.timedelta(date.isoweekday())
+ datetime.timedelta(7)
)
end_week = start_week + datetime.timedelta(6)
start_week = datetime.datetime.strptime(
str(start_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
end_week = datetime.datetime.strptime(
str(end_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
sqs = sqs.filter_and(
SQ(walkin_from_date__range=[start_week, end_week])
| SQ(walkin_to_date__range=[start_week, end_week])
)
# sqs = sqs.filter_and(SQ(walkin_from_date__range=[start_week, end_week]) | SQ(walkin_to_date__range=[start_week, end_week]))
if self.cleaned_data["walkin_type"] == "this_month":
current_date = datetime.date.today()
from dateutil.relativedelta import relativedelta
from datetime import date
start_week = date(current_date.year, current_date.month, 1)
end_week = start_week + relativedelta(day=31)
start_week = datetime.datetime.strptime(
str(start_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
end_week = datetime.datetime.strptime(
str(end_week), "%Y-%m-%d"
).strftime("%Y-%m-%d")
sqs = sqs.filter_and(
SQ(walkin_from_date__range=[start_week, end_week])
| SQ(walkin_to_date__range=[start_week, end_week])
)
# if self.cleaned_data['walkin_type'] == 'next_month':
# pass
if self.cleaned_data["walkin_type"] == "custom_range":
if self.cleaned_data["walkin_from_date"]:
walkin_from_date = datetime.datetime.strptime(
str(self.cleaned_data["walkin_from_date"]), "%Y-%m-%d"
).strftime("%Y-%m-%d")
sqs = sqs.filter_and(
SQ(walkin_from_date__gte=walkin_from_date)
| SQ(walkin_to_date__gte=walkin_from_date)
)
if self.cleaned_data["walkin_to_date"]:
walkin_to_date = datetime.datetime.strptime(
str(self.cleaned_data["walkin_to_date"]), "%Y-%m-%d"
).strftime("%Y-%m-%d")
sqs = sqs.filter_and(
SQ(walkin_from_date__gte=walkin_to_date)
| SQ(walkin_to_date__lte=walkin_to_date)
)
return sqs
else:
return []
def query(self):
if self.cleaned_data["q"]:
return self.cleaned_data["q"]
return None
# 13-11-2014
# 20-11-2014 29-11-2014
| MicroPyramid/opensource-job-portal | search/forms.py | forms.py | py | 7,863 | python | en | code | 336 | github-code | 36 |
22234739193 | from pykinect2 import PyKinectV2
from pykinect2.PyKinectV2 import *
from pykinect2 import PyKinectRuntime
import numpy as np
import cv2
import time
# kinect = PyKinectRuntime.PyKinectRuntime(PyKinectV2.FrameSourceTypes_Color)
# while True:
# if kinect.has_new_color_frame():
# frame = kinect.get_last_color_frame()
# print(np.shape(frame))
# time.sleep(0.5)
cap = cv2.VideoCapture(0)
while True:
updated,frame = cap.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
cols = 9
rows = 6
# findCirclesGrid takes (num_cols,num_rows)
ret,corners = cv2.findCirclesGrid(gray,(cols,rows),None)
# once the grid is found, press a button to start tracking and make everything around it dark
# if it's not a dark point, then make it white
ret1,thresh = cv2.threshold(gray,100,255,cv2.THRESH_TOZERO)
cv2.drawChessboardCorners(thresh,(cols,rows),corners,ret)
cv2.imshow('thresh',thresh)
k = cv2.waitKey(1)
if k == 27:
break
cv2.destroyAllWindows() | zachvin/KinectImaging | tests.py | tests.py | py | 996 | python | en | code | 0 | github-code | 36 |
5577508746 | import json
with open('firm.txt', 'r', encoding='utf-8') as f:
data = []
for line in f:
line = line.replace("\n", "")
string = line.split(" ")
data.append(string)
average = 0
avg_firms = 0
diction = {}
for el in data:
profit = int(el[2]) - int(el[3])
diction.update({el[0]:profit})
if profit > 0:
average = average + profit
avg_firms += 1
average = average / avg_firms
avg_dict = {"Average profit": average}
summary = [diction, avg_dict]
with open('firm.json', 'w', encoding='utf-8') as f:
json.dump(summary, f)
| Ilyagradoboev/geekproject | lesson_5.7.py | lesson_5.7.py | py | 580 | python | en | code | 0 | github-code | 36 |
26285333278 | import os, requests, colorama
from colorama import Fore
green = Fore.GREEN
red = Fore.RED
yellow = Fore.YELLOW
reset = Fore.RESET
#banner
banner = """
__ __ __
/ / / /___ _____/ /_
/ /_/ / __ \/ ___/ __/
/ __ / /_/ (__ ) /_
/_/_/_/\____/____/\__/_ ____ ___ _____
/ ___/ ___/ __ `/ __ \/ __ \/ _ \/ ___/
(__ ) /__/ /_/ / / / / / / / __/ /
/____/\___/\__,_/_/ /_/_/ /_/\___/_/ v.1.0
"""
os.system("clear")
print(green + banner + reset)
print(green + "[1]" + reset + "Dialog")
print(green + "[2]" + reset + "Mobitel")
print(green + "[3]" + reset + "Airtel")
while True:
try:
isp = int(input(yellow + "Please select your ISP " + green + ">>>" + reset))
if 0 < isp <= 3:
break
else:
print(red+"Invalid value, please try agein!!"+reset)
continue
except:
print(red+"Invalid value, please try agein!!"+reset)
continue
isp_selfcare = ""
if isp == 1:
isp_selfcare = "www.dialog.lk"
elif isp == 2:
isp_selfcare = "202.129.235.210"
elif isp == 3:
isp_selfcare = "staysafe.gov.lk"
else:
isp_selfcare = "none"
pms = False
host_list = []
try :
requests.post(f"http://{isp_selfcare}", timeout=2)
pms = True
except:
pms = False
print(red + "OOPs...Your internet connection is not stable, Please Try agein!" + reset)
if pms == True:
while True:
try:
ask_host_list = input(yellow + "Enter host list " + green + ">>>" + reset)
h_list = open(str(ask_host_list), "r")
new_file = input(yellow + "Enter name of output file " + green + ">>>" + reset)
break
except:
print(red + "Please check your host list and try agein!" + reset)
continue
for x in h_list:
try:
requests.post(f"http://{x.strip()}", timeout=5)
host_list.append(x)
except:
pass
with open(f"{new_file}.txt", "w+") as file1:
for x in host_list:
file1.writelines(x)
print(green + "done" + reset)
else:
print("Fuck")
| Nadeesha-Prasad/Zero-Balance-Host-Scanner-For-Linux | hscan.py | hscan.py | py | 2,271 | python | en | code | 1 | github-code | 36 |
20466647981 | """
왕실의 나이트
1) x, y축 범위 벗어나는지 체크
2) 2가지 경우의 수로 이동해보기
3) 이동 가능한 count 출력
"""
import sys
location = sys.stdin.readline()
x, y = (ord(location[0]) - 96), int(location[1])
count = 0
def search(current_x, current_y, dx, dy):
global count
for i in range(4):
nx = current_x + dx[i]
ny = current_y + dy[i]
if 0 < nx <= 8 and 0 < ny <= 8:
count += 1
return count
search(x, y, [2, 2, -2, -2], [1, -1, 1, -1])
search(x, y, [1, -1, 1, -1], [2, 2, -2, -2])
print(count) | roum02/algorithm | implementation/practice4-2.py | practice4-2.py | py | 580 | python | ko | code | 0 | github-code | 36 |
30351631272 |
def load_and_get_stats(filename):
"""Reads .wav file and returns data, sampling frequency, and length (time) of audio clip."""
import scipy.io.wavfile as siow
sampling_rate, amplitude_vector = siow.read(filename)
wav_length = amplitude_vector.shape[0] / sampling_rate
return sampling_rate, amplitude_vector, wav_length
def plot_wav_curve(filename, sampling_rate, amplitude_vector, wav_length):
"""Plots amplitude curve for a particular audio clip."""
import matplotlib.pyplot as plt
import numpy as np
time = np.linspace(0, wav_length, amplitude_vector.shape[0])
plt.plot(time, amplitude_vector)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude')
plt.title(f'{filename} - viewed at {sampling_rate} samples/sec')
plt.show()
def split_audio_into_chunks(sampling_rate, amplitude_vector, chunk_size):
"""Reshape data (amplitude vector) into many chunks of chunk_size miliseconds. Returns reshaped data and leftover data not grouped."""
col_size = int(chunk_size / ((1 / sampling_rate) * 1000))
whole = int(len(amplitude_vector) / col_size)
first_partition_index = whole*col_size
first_partition = amplitude_vector[:first_partition_index]
second_partition = amplitude_vector[first_partition_index:]
return first_partition.reshape((whole, col_size)), second_partition
def apply_fourier_transform(chunked_audio):
"""Apply fourier transform to chunked audio snippets to break up each chunk into vector of scores for each frequency band. Aggregates score vectors for each snippet into spectogram to be fed into neural network."""
pass
if __name__ == '__main__':
sampling_rate, amplitude_vector, wav_length = load_and_get_stats('hello.wav')
data, leftovers = split_audio_into_chunks(sampling_rate, amplitude_vector, 20) | Sychee/Piano-Audio-Classifier | audio_to_spectogram.py | audio_to_spectogram.py | py | 1,820 | python | en | code | 0 | github-code | 36 |
41203564707 | import cv2
import numpy as np
from PIL import Image
facedetect = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')#create a cascade classifier using haar cascade
cam = cv2.VideoCapture(0)#creates avideo capture object
rec=cv2.createLBPHFaceRecognizer()#create a recognizer object
rec.load("test_trainingdata.yml")#load the training data
id=0
fontFace = cv2.FONT_HERSHEY_SIMPLEX#font to write the name of the person in the image
fontscale = 1
fontcolor = (255, 255, 255)
while(True):
ret, img= cam.read() #capture the frames from the camera object
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)#convert the frame into grayscale
faces = facedetect.detectMultiScale(gray,1.3,5)#detect and extract faces from images
for (x,y,w,h) in faces:
cv2.rectangle(img,(x,y),(x+w,y+h),(0,0,255),2)
id,conf=rec.predict(gray[y:y+h,x:x+w])#Recognize the Id of the user
if(id==8):
id="Saurav"
elif(id == 1):
id = "Upasana"
elif(id == 3):
id = "Nayan Sir"
elif(id == 4):
id = "Arnab Sir"
elif(id == 5):
id = "kabir"
elif(id == 6):
id = "Aakangsha"
elif (id==7):
id = "Anish"
else:
id="unknown"
cv2.putText(img,str(id),(x,y+h),fontFace,fontscale,fontcolor)#Put predicted Id/Name and rectangle on detected face
cv2.imshow('img',img)
if(cv2.waitKey(1) ==ord('q')):
break;
cam.release() #close the camera
cv2.destroyAllWindows() #close all windows | UPASANANAG/Face-Recognizer | facedetector.py | facedetector.py | py | 1,437 | python | en | code | 0 | github-code | 36 |
22868444012 | import pandas as pd
import numpy as np
from nltk.corpus import stopwords
nltk_stopwords = stopwords.words('english')
# Sklearn TF-IDF Libraries
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.feature_extraction.text import CountVectorizer
from sklearn.metrics.pairwise import cosine_similarity
df_dataset = pd.read_csv("../crawler/layer_three_data.csv")
print("Database loaded in search function")
df_dataset = df_dataset.drop_duplicates(subset=['df_paper_title']) # remove duplicates
def search(keyword):
vectorizer = TfidfVectorizer()
# Index paper titles
X = vectorizer.fit_transform(df_dataset['df_paper_title'])
query_vec = vectorizer.transform([keyword]) # Ip -- (n_docs,x), Op -- (n_docs,n_Feats)
results = cosine_similarity(X, query_vec).reshape((-1,))
search_result = []
# Print Top 100 results
data = {}
df_data = pd.DataFrame(columns=["Title", "URL", "Abstract", "Author", "Date"])
for i in results.argsort()[-100:][::-1]:
data["Title"] = df_dataset.iloc[i, 0]
data["URL"] = df_dataset.iloc[i, 1]
data["Abstract"] = df_dataset.iloc[i, 2]
data["Author"] = df_dataset.iloc[i, 3]
data["Date"] = df_dataset.iloc[i, 4]
df_data = df_data.append(data, ignore_index=True)
# df_data = df_data.to_numpy()
print(df_data)
return df_data
| chois11/7071CEM-R | resources/backend/search_engine.py | search_engine.py | py | 1,376 | python | en | code | 0 | github-code | 36 |
26009651155 | import argparse
import os
import random
import re
import subprocess
import time
parser = argparse.ArgumentParser()
parser.add_argument(
"-n", "--number", help="max number of problems to attempt", type=int
)
parser.add_argument(
"-r", "--random", help="attempt problems in random order", action="store_true"
)
parser.add_argument("-s", "--seed", help="random number seed", type=int)
parser.add_argument(
"-t", "--time", help="time limit per problem", type=float, default=60.0
)
parser.add_argument("files", nargs="*")
args = parser.parse_args()
if args.seed is not None:
args.random = 1
random.seed(args.seed)
if not args.files:
args.files = ["tptp"]
tptp = os.getenv("TPTP")
if not tptp:
raise Exception("TPTP environment variable not set")
problems = []
for arg in args.files:
if arg.lower() == "tptp":
arg = tptp
elif re.match(r"[A-Za-z][A-Za-z][A-Za-z]$", arg):
arg = arg.upper()
arg = os.path.join(tptp, "Problems", arg)
elif re.match(r"[A-Za-z][A-Za-z][A-Za-z]\d\d\d.\d+$", arg):
arg = arg.upper()
arg = os.path.join(tptp, "Problems", arg[:3], arg + ".p")
if os.path.isdir(arg):
for root, dirs, files in os.walk(arg):
for file in files:
ext = os.path.splitext(file)[1]
if ext == ".p" and "^" not in file and "_" not in file:
problems.append(os.path.join(root, file))
continue
if arg.endswith(".lst"):
for s in open(arg):
if "^" not in s:
problems.append(s.rstrip())
continue
problems.append(arg)
if args.random:
random.shuffle(problems)
if args.number:
problems = problems[0 : args.number]
def difficulty(file):
for s in open(file):
m = re.match(r"% Rating : (\d+\.\d+)", s)
if m:
return m[1]
return "?"
for file in problems:
print(os.path.basename(file), end="\t")
print(difficulty(file), end="\t", flush=True)
# --auto makes a big difference to performance
# don't use --auto-schedule
# for some reason, it breaks the subprocess timeout feature
cmd = "bin/eprover", "--auto", "-p", file
t = time.time()
try:
p = subprocess.run(
cmd, capture_output=True, encoding="utf-8", timeout=args.time
)
# if p.returncode == 3:
# print()
# continue
if p.returncode not in (0, 1, 9):
raise Exception(p.returncode)
print("%0.3f" % (time.time() - t), end="\t")
print(len(p.stdout.splitlines()), end="\t")
m = re.search(r"SZS status (\w+)", p.stdout)
r = m[1]
except subprocess.TimeoutExpired:
print("%0.3f" % (time.time() - t), end="\t")
print(0, end="\t")
r = "Timeout"
print(r)
| russellw/ayane | script/e.py | e.py | py | 2,820 | python | en | code | 0 | github-code | 36 |
4005001933 | #!/usr/local/bin/python
from config import *
class Primer:
"""
Primer is an object representing a primer either left or right.
Two primers are equal if their sequence are the same and their TFGP are equal.
:param target: the target instance where the primer come from.
:param sequence: sequence of the primer
:param left_or_right: left if it's a left primer, right otherwise
:param relative_pos: relative position (get from compute_optimal_primers_pairs() from target.py)
:param range_left: get from compute_optimal_primers_pairs() from target.py
:param penalty: describes how much differ the characteristic of this primer upon optimal characteristics.
:ivar TFGP: the last position of the primer if left\
or the first position of the primer if right.
:ivar primer_hybridization_sites: a dic with chromosome number as key and a list of hybridisation sites as value.
"""
def __init__(self, sequence, left_or_right, relative_pos, range_left, target, penalty=0):
self.target = target
self.sequence = sequence
self.left_or_right = left_or_right
self.relative_pos = relative_pos
self.range_left = range_left
self.penalty = penalty
self.TFGP = self.compute_thermofisher_good_pos()
self.primer_hybridization_sites = {}
def __str__(self):
return "sequence: " + self.sequence +\
"\nleft or right? " + self.left_or_right + \
"\nThermoFisher Good Position: " + str(self.TFGP) + \
"\nhybridization sites: " + str(self.primer_hybridization_sites)
def __eq__(self, other):
"""
Two primers are equal if their sequence are the same and their TFGP are equal.
:param other: an another primer instance.
:return: True if the two primers instance are equal
False otherwise
"""
return (self.sequence == other.sequence) and (self.TFGP == other.TFGP)
def __hash__(self):
return hash(str(self))
def __ne__(self, other):
return not self.__eq__(other)
def reverse_complement(self):
"""
:return: reverse complement of the primer sequence
"""
reverse_seq = self.sequence[::-1]
complement = {'A': 'T', 'C': 'G', 'G': 'C', 'T': 'A'}
res = ''
for base in reverse_seq:
res += complement[base]
return res
def compute_thermofisher_good_pos(self):
"""
:return: the last position of the left primer or the first position of the right one
"""
if self.left_or_right == "left":
first_pos = self.relative_pos
last_pos = int(first_pos) + len(self.sequence)
scaled_pos = self.range_left + 1 - last_pos
pos_primer_scaled = int(self.target.mutation_pos) - int(scaled_pos)
return pos_primer_scaled
else:
last_pos = self.relative_pos
first_pos = int(last_pos) - len(self.sequence)
scaled_pos = int(first_pos) - self.range_left + 1
pos_primer_scaled = int(self.target.mutation_pos) + int(scaled_pos)
return pos_primer_scaled
def check_snp_in_primer(self, snp_in_target):
"""
Checks if a snp is in the primer
:param snp_in_target: list containing all snp of the target\
get by get_snp() from target.py
:return: * -1 if a snp is in the primer sequence
* 0 otherwise
"""
if self.left_or_right == "left":
primer_interval = [self.TFGP - len(self.sequence), self.TFGP]
else:
primer_interval = [self.TFGP, self.TFGP + len(self.sequence)]
for element in snp_in_target:
if primer_interval[0] < element[1] < primer_interval[1] and element[2] > gmaf_limit:
return -1
return 0
def dinucleotides_repeats(self):
"""
Checks if there is 5 or more consecutive dinucleotides repeats in the primer sequence.
No more than 4 consecutive repeats.
:return: * True if the sequence containing such repeats
* False otherwise
"""
possible_repeats = ["ATATATATAT", "ACACACACAC", "AGAGAGAGAG",
"TATATATATA", "TCTCTCTCTC", "TGTGTGTGTG",
"GAGAGAGAGA", "GCGCGCGCGC", "GTGTGTGTGT",
"CGCGCGCGCG", "CACACACACA", "CTCTCTCTCT"]
for repeat in possible_repeats:
if repeat in self.sequence:
return True
return False
| gloubsi/oncodna_primers_design | code/primer.py | primer.py | py | 4,684 | python | en | code | 0 | github-code | 36 |
36060957275 | # list of registered users - pdf - full format
# list of users who availed book - name, ISBN, borrowDate and returnDate
# list of users with fine amount - name and fee pending
# send notification about the due submit and late fee - sends notification
from db_read import database
from fpdf import FPDF
from tkinter import *
from tkinter import messagebox
from twilio.rest import Client
def registered_users_list():
try:
user = database.child("Users").get().val()
pdf = FPDF()
for i in user:
pdf.add_page()
for j in user[i]:
txt = j + "->" + user[i][j]
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt=txt, ln=1, align='L')
pdf.output("Users.pdf")
messagebox.showinfo('Success', "PDF saved Successfully")
except:
messagebox.showerror('Error', "No Users.")
def borrower():
def borrower_add():
if name.get() == "" or isbn.get() == "" or title.get() == "" or date.get() == "" or due_date.get() == "":
messagebox.showerror('Error', "All fields are required", parent=borrower_window)
else:
if (database.child("Users").child(name.get()).get().val() and database.child("Books").child(isbn.get()).get().val()) is not None:
try:
quantity = int(database.child("Books").child(isbn.get()).child("Quantity").get().val())
if quantity > 0:
database.child("Books").child(isbn.get()).update({
"Quantity": str(quantity - 1)
})
data = {
"Username": name.get(),
"ISBN": isbn.get(),
"Title": title.get(),
"Date": date.get(),
"Due Date": due_date.get()
}
database.child("BorrowerList").child(name.get()).child(isbn.get()).set(data)
messagebox.showinfo('Success', "Data Updated Successfully", parent=borrower_window)
borrower_window.destroy()
else:
messagebox.showerror('Error', "Book currently unavailable.", parent=borrower_window)
except:
messagebox.showerror('Error', "Try again later", parent=borrower_window)
borrower_window.destroy()
else:
messagebox.showerror('Error', "Invalid ISBN or User.", parent=borrower_window)
borrower_window.destroy()
borrower_window = Tk()
borrower_window.title('Add Borrower')
borrower_window.geometry('500x600')
heading = Label(borrower_window, text="Add Borrower", font=('Times New Roman', 20, 'bold'))
heading.place(x=80, y=60)
name = Label(borrower_window, text="Username :", font='Verdana 10 bold')
name.place(x=80, y=160)
isbn = Label(borrower_window, text="ISBN :", font='Verdana 10 bold')
isbn.place(x=80, y=190)
title = Label(borrower_window, text="Title :", font='Verdana 10 bold')
title.place(x=80, y=220)
date = Label(borrower_window, text="Date Borrowed:", font='Verdana 10 bold')
date.place(x=80, y=250)
due_date = Label(borrower_window, text="Due Date :", font='Verdana 10 bold')
due_date.place(x=80, y=280)
name = StringVar()
isbn = StringVar()
title = StringVar()
date = StringVar()
due_date = StringVar()
name = Entry(borrower_window, width=40, textvariable=name)
name.place(x=200, y=163)
isbn = Entry(borrower_window, width=40, textvariable=isbn)
isbn.place(x=200, y=193)
title = Entry(borrower_window, width=40, textvariable=title)
title.place(x=200, y=223)
date = Entry(borrower_window, width=40, textvariable=date)
date.place(x=200, y=253)
due_date = Entry(borrower_window, width=40, textvariable=due_date)
due_date.place(x=200, y=283)
btn_signup = Button(borrower_window, text=" Update", font=('Bookman antiqua', 12, 'bold'), command=borrower_add,
bg='#2176F2',
fg='white')
btn_signup.place(x=200, y=313)
borrower_window.bind('<Return>', lambda event: borrower_add())
borrower_window.mainloop()
def return_book():
def return_add():
if name.get() == "" or isbn.get() == "" or title.get() == "" or date.get() == "":
messagebox.showerror('Error', "All fields are required", parent=return_window)
else:
if (database.child("BorrowerList").child(name.get()).child(isbn.get()).get().val()) is not None:
try:
quantity = int(database.child("Books").child(isbn.get()).child("Quantity").get().val())
database.child("Books").child(isbn.get()).update({
"Quantity": str(quantity + 1)
})
due_amount = (database.child("DueList").child(name.get()).get().val())
amount = int(due_amount['Due Amount'])
database.child("DueList").child(name.get()).update({
"Due Amount": str(amount + int(late_fees.get()))
})
data = {
"Username": name.get(),
"ISBN": isbn.get(),
"Title": title.get(),
"Date": date.get(),
"Due amount": late_fees.get()
}
database.child("BorrowerList").child(name.get()).child(isbn.get()).remove()
database.child("ReturnerList").child(name.get()).child(isbn.get()).set(data)
messagebox.showinfo('Success', "Data Updated Successfully", parent=return_window)
return_window.destroy()
except:
messagebox.showerror('Error', "Try again later", parent=return_window)
return_window.destroy()
else:
messagebox.showerror('Error', "User haven't borrowed yet.", parent=return_window)
return_window.destroy()
return_window = Tk()
return_window.title('Return Window')
return_window.geometry('500x600')
heading = Label(return_window, text="Add Returner", font=('Times New Roman', 20, 'bold'))
heading.place(x=80, y=60)
name = Label(return_window, text="Username :", font='Verdana 10 bold')
name.place(x=80, y=160)
isbn = Label(return_window, text="ISBN :", font='Verdana 10 bold')
isbn.place(x=80, y=190)
title = Label(return_window, text="Title :", font='Verdana 10 bold')
title.place(x=80, y=220)
date = Label(return_window, text="Return Date:", font='Verdana 10 bold')
date.place(x=80, y=250)
late_fees = Label(return_window, text="Due amount :", font='Verdana 10 bold')
late_fees.place(x=80, y=280)
name = StringVar()
isbn = StringVar()
title = StringVar()
date = StringVar()
late_fees = IntVar(return_window, value=0)
name = Entry(return_window, width=40, textvariable=name)
name.place(x=200, y=163)
isbn = Entry(return_window, width=40, textvariable=isbn)
isbn.place(x=200, y=193)
title = Entry(return_window, width=40, textvariable=title)
title.place(x=200, y=223)
date = Entry(return_window, width=40, textvariable=date)
date.place(x=200, y=253)
late_fees = Entry(return_window, width=40, textvariable=late_fees)
late_fees.place(x=200, y=283)
btn_signup = Button(return_window, text=" Update", font=('Bookman antiqua', 12, 'bold'), command=return_add,
bg='#2176F2',
fg='white')
btn_signup.place(x=200, y=313)
return_window.bind('<Return>', lambda event: return_add())
return_window.mainloop()
def pdf_borrower():
try:
user = database.child("BorrowerList").get().val()
print(user)
pdf = FPDF()
for i in user:
contact = database.child("Users").child(i).child("PhoneNumber").get().val()
isbn = database.child("BorrowerList").child(i).get().val()
pdf.add_page()
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt=f"Phone Number -> {contact}", ln=1, align='L')
for j in isbn:
for k in isbn[j]:
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt=f"{k} -> {isbn[j][k]}", ln=1, align='L')
pdf.output("BorrowedUsers.pdf")
messagebox.showinfo('Success', "PDF saved Successfully")
except:
messagebox.showerror('Error', "No Borrowers.")
def pdf_returner():
try:
user = database.child("ReturnerList").get().val()
print(user)
pdf = FPDF()
for i in user:
contact = database.child("Users").child(i).child("PhoneNumber").get().val()
isbn = database.child("ReturnerList").child(i).get().val()
pdf.add_page()
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt=f"Phone Number -> {contact}", ln=1, align='L')
for j in isbn:
for k in isbn[j]:
pdf.set_font("Arial", size=15)
pdf.cell(200, 10, txt=f"{k} -> {isbn[j][k]}", ln=1, align='L')
pdf.output("ReturnedUsers.pdf")
messagebox.showinfo('Success', "PDF saved Successfully")
except:
messagebox.showerror('Error', "No Returners.")
def sends_notification():
account_sid = "ACed4fd4cfe8ff5ff41c72977ac2366eb4"
auth_token = "a7dac4f9f6a0f0f74b2ed4f874d92cb8"
client = Client(account_sid, auth_token)
def sendSMS(msg, phone):
message = client.messages.create(
body=msg,
from_="+15673131780",
to="+91" + str(phone)
)
print(message.sid)
messagebox.showinfo('Success', "Message Sent Successfully")
def send_data():
if name.get() == "" or message.get() == "":
messagebox.showerror('Error', "All fields are required")
else:
try:
contact = database.child("Users").child(name.get()).child("PhoneNumber").get().val()
if contact is not None:
msg = message.get()
sendSMS(msg, contact)
else:
messagebox.showerror('Error', "No username matches the Database.", parent=send_window)
except:
messagebox.showerror('Error', "Cannot send the message right now.", parent=send_window)
finally:
send_window.destroy()
send_window = Tk()
send_window.title('Send Notifications')
send_window.geometry('500x600')
heading = Label(send_window, text="Notification Center", font=('Times New Roman', 20, 'bold'))
heading.place(x=80, y=60)
name = Label(send_window, text="Username :", font='Verdana 10 bold')
name.place(x=80, y=160)
message = Label(send_window, text="message :", font='Verdana 10 bold')
message.place(x=80, y=190)
name = StringVar()
message = StringVar()
name = Entry(send_window, width=40, textvariable=name)
name.place(x=200, y=163)
message = Entry(send_window, width=40, textvariable=message)
message.place(x=200, y=193)
btn_signup = Button(send_window, text=" Send", font=('Bookman antiqua', 12, 'bold'), command=send_data,
bg='#2176F2',
fg='white')
btn_signup.place(x=200, y=313)
send_window.bind('<Return>', lambda event: send_data())
send_window.mainloop()
| sridamul/BBMS | userManagement.py | userManagement.py | py | 11,961 | python | en | code | 0 | github-code | 36 |
40746622543 | import numpy as np
import gzip
from ase import Atom, Atoms
import gzip
import io
import os
from ase.io import write, read
import pyscal3.formats.ase as ptase
import warnings
def read_snap(infile, compressed = False):
"""
Function to read a POSCAR format.
Parameters
----------
infile : string
name of the input file
compressed : bool, optional
force to read a `gz` zipped file. If the filename ends with `.gz`, use of this keyword is not
necessary, Default False
Returns
-------
atoms : list of `Atom` objects
list of all atoms as created by user input
box : list of list of floats
list of the type `[[xlow, xhigh], [ylow, yhigh], [zlow, zhigh]]` where each of them are the lower
and upper limits of the simulation box in x, y and z directions respectively.
Examples
--------
>>> atoms, box = read_poscar('POSCAR')
>>> atoms, box = read_poscar('POSCAR.gz')
>>> atoms, box = read_poscar('POSCAR.dat', compressed=True)
"""
aseobj = read(infile, format="vasp")
atoms, box = ptase.read_snap(aseobj)
return atoms, box
def write_snap(sys, outfile, comments="pyscal", species=None):
"""
Function to read a POSCAR format.
Parameters
----------
outfile : string
name of the input file
"""
if species is None:
warnings.warn("Using legacy poscar writer, to use ASE backend specify species")
write_poscar(sys, outfile, comments=comments)
else:
aseobj = ptase.convert_snap(sys, species=species)
write(outfile, aseobj, format="vasp")
def split_snaps(**kwargs):
raise NotImplementedError("split method for mdtraj is not implemented")
def convert_snap(**kwargs):
raise NotImplementedError("convert method for mdtraj is not implemented")
def write_poscar(sys, outfile, comments="pyscal"):
"""
Function to read a POSCAR format.
Parameters
----------
outfile : string
name of the input file
"""
#get element strings
if 'species' not in sys.atoms.keys():
sys.atoms["species"] = [None for x in range(sys.atoms.ntotal)]
if sys.atoms.species[0] is None:
if species is None:
raise ValueError("Species was not known! To convert to ase, species need to be provided using the species keyword")
#otherwise we know the species
types = sys.atoms.types
unique_types = np.unique(types)
if not (len(unique_types) == len(species)):
raise ValueError("Length of species and number of types found in system are different. Maybe you specified \"Au\" instead of [\"Au\"]")
#now assign the species to custom
atomspecies = []
for cc, typ in enumerate(types):
atomspecies.append(species[int(typ-1)])
else:
atomspecies = sys.atoms.species
fout = open(outfile, 'w')
fout.write(comments+"\n")
fout.write(" 1.00000000000000\n")
#write box
vecs = sys.box
fout.write(" %1.14f %1.14f %1.14f\n"%(vecs[0][0], vecs[0][1], vecs[0][2]))
fout.write(" %1.14f %1.14f %1.14f\n"%(vecs[1][0], vecs[1][1], vecs[1][2]))
fout.write(" %1.14f %1.14f %1.14f\n"%(vecs[2][0], vecs[2][1], vecs[2][2]))
tt, cc = np.unique(atomspecies, return_counts=True)
atomgroups = [[] for x in range(len(tt))]
for count, t in enumerate(tt):
for ccount, pos in enumerate(sys.atoms.positions):
if atomspecies[ccount] == t:
atomgroups[count].append(pos)
fout.write(" ")
for c in cc:
fout.write("%d "%int(c))
fout.write("\n")
fout.write("Cartesian\n")
for i in range(len(atomgroups)):
for pos in atomgroups[i]:
fout.write(" %1.14f %1.14f %1.14f\n"%(pos[0], pos[1], pos[2]))
fout.close()
| pyscal/pyscal3 | src/pyscal3/formats/vasp.py | vasp.py | py | 3,869 | python | en | code | 2 | github-code | 36 |
33052012137 | from sqlalchemy.orm import Session
import curd, cloud, orm
def policy_with_projects(yun, projects):
if not projects or len(projects) == 0:
return None
tagvals = ','.join(['"'+p.project.name+'"' for p in projects])
return yun.CloudIAM.policy_gen_write_with_tag("Project", tagvals)
def policy_with_teams(yun, teams):
if not teams or len(teams) == 0:
return None
tagvals = ','.join(['"'+p.team.name+'"' for p in teams])
return yun.CloudIAM.policy_gen_write_with_tag("Team", tagvals)
def policy_with_repos_read(yun, repos):
if not repos or len(repos) == 0:
return None
arns = ','.join(['"'+p.repo.arn+'"' for p in repos])
return yun.CloudIAM.get_by_user_read(arns)
def policy_with_repos_write(yun, repos):
if not repos or len(repos) == 0:
return None
arns = ','.join(['"'+p.repo.arn+'"' for p in repos])
return yun.CloudIAM.get_by_user_write(arns)
def update_user_policy(db: Session, user: orm.User, newly=False):
actions = []
name = user.name
projects = curd.ProjectAdmin.get_all_by_user(db, user.id)
teams = curd.TeamAdmin.get_all_by_user(db, user.id)
repos_read = curd.Perm.get_by_user_read(db, user.id)
repos_write = curd.Perm.get_by_user_write(db, user.id)
yun = cloud.get()
projectpolicy = policy_with_projects(yun, projects)
if projectpolicy:
actions.append(projectpolicy)
teampolicy = policy_with_teams(yun, teams)
if teampolicy:
actions.append(teampolicy)
readpolicy = policy_with_repos_read(yun, repos_read)
if readpolicy:
actions.append(readpolicy)
writepolicy = policy_with_repos_write(yun, repos_write)
if writepolicy:
actions.append(writepolicy)
rules = ','.join(actions)
policy = '{"Version": "2012-10-17","Statement": {['+rules+']}}'
if newly:
return yun.CloudIAM.policy_create(name, policy)
else:
return yun.CloudIAM.policy_update(user.ccpolicy, policy)
def refresh_policy_with_uid(db: Session, userid: int):
user = curd.User.get(db, userid)
return update_user_policy(db, user)
def refresh_policy_with_uname(db: Session, username: str):
user = curd.User.get_by_name(db, username)
return update_user_policy(db, user)
| kealiu/codecommitter | app/iam.py | iam.py | py | 2,267 | python | en | code | 0 | github-code | 36 |
2109558152 | import numpy as np
import pandas as pd
from math import factorial, pi
import scipy.optimize
import scipy.misc
import os
import re
import argparse
from sklearn.gaussian_process import GaussianProcessRegressor
from sklearn.gaussian_process.kernels import RBF,ConstantKernel
# for tests
#import matplotlib.pyplot as pl
# directory where the fit_lattice_test.py is located
dir_path = os.path.dirname(os.path.realpath(__file__))
########################################################################
if __name__ == "__main__":
__doc__="""Construct a parametrization (from PhysRevC.100.064910) of the lattice QCD equation of state
(P/T^4, n/T^3, s/T^3, e/T^4) by calling function:
- param(T,muB,muQ,muS)
input: temperature and chemical potentials in [GeV]
output: dictionnary of all quantities ['T','P','s','n_B','n_Q','n_S','e']
Produces lattice data for P/T^4, nB/T^3, s/T^3, e/T^4 as a function of T for a single value of muB:
- lattice_data(EoS,muB)
input: - EoS: - 'muB' refers to the EoS with the condition \mu_Q = \mu_S = 0
- 'nS0' refers to the EoS with the condition <n_S> = 0 & <n_Q> = 0.4 <n_B>
- muB: baryon chemical potential in [GeV]
output: dictionnary of all quantities + error ['T','P','s','n_B','e']
Calculation of the equation of state under the conditions: <n_S> = 0 ; <n_Q> = factQB*<n_B>:
- EoS_nS0(fun,T,muB,**kwargs)
input: - fun: any function which calculate an EoS (by ex: param, HRG, full_EoS)
- T,muB: temperature and baryon chemical potential in [GeV]
output: dictionnary of all quantities ['T','P','s','n_B','e']
"""
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawTextHelpFormatter
)
args = parser.parse_args()
###############################################################################
# J. Phys.: Conf. Ser. 1602 012011
# critical temperature from lattice at \mu_B = 0
Tc0 = 0.158
# expansion coefficients of T_c(\mu_B)
kappa2 = 0.0153
kappa4 = 0.00032
###############################################################################
def Tc_lattice(muB):
"""
Critical temperature as a function of muB from lQCD
J. Phys.: Conf. Ser. 1602 012011
"""
return Tc0*(1.-kappa2*(muB/Tc0)**2.-kappa4*(muB/Tc0)**4.)
###############################################################################
def dTcdmuB_lattice(muB):
"""
Derivative of the critical temperature wrt \mu_B
"""
dTc = -2.*muB*kappa2/Tc0 -4.*(muB**3.)*kappa4/Tc0**3.
return dTc
def Tc_lattice_muBoT(muBoT):
"""
Find the critical temperature Tc for a fixed muB/T
"""
if(muBoT==0):
return Tc_lattice(0.)
else:
xmuB = scipy.optimize.root(lambda muB: muB/Tc_lattice(muB)-muBoT,[muBoT*Tc0],method='lm').x[0]
return Tc_lattice(xmuB)
###############################################################################
# SB limits
###############################################################################
def SB_lim(T,muB,muQ,muS,Nf=3):
"""
SB limit for p,s,n,e,cs^2
"""
Nc = 3 # number of colors
dgq = 2.*Nc # degeneracy factor for quarks
dgg = 2.*(Nc**2.-1.) # degeneracy factor for gluons
# if input is a single temperature value T
if(isinstance(T,float)):
# chemical potential asssociated to each quark
mu_u = 1./3.*muB + 2./3.*muQ
mu_d = 1./3.*muB - 1./3.*muQ
mu_s = 1./3.*muB - 1./3.*muQ - muS
# list of partons corresponding to the given number of flavors
# chemical potentials and charges
if(Nf==0):
list_mu = []
list_B = []
list_Q = []
list_S =[]
elif(Nf==2):
list_mu = [mu_u,mu_d]
list_B = [1./3.,1./3.]
list_Q = [2./3.,-1./3.]
list_S =[0.,0.]
elif(Nf==3):
list_mu = [mu_u,mu_d,mu_s]
list_B = [1./3.,1./3.,1./3.]
list_Q = [2./3.,-1./3.,-1./3.]
list_S = [0.,0.,-1.]
Pgluons = (pi**2)/90.*dgg
Pquarks = dgq*sum([(7*pi**2)/360. + ((mu_q/T)**2)/12. + ((mu_q/T)**4)/(24*pi**2) for mu_q in list_mu])
P = Pgluons+Pquarks
sgluons = 4*(pi**2)/90.*dgg
squarks = dgq*sum([(4*7*pi**2)/360. + 2*((mu_q/T)**2)/12. for mu_q in list_mu])
s = sgluons+squarks
nB = dgq*sum([B_q*((2*(mu_q/T))/12. + (4*(mu_q/T)**3)/(24*pi**2)) for B_q,mu_q in zip(list_B,list_mu)])
nQ = dgq*sum([Q_q*((2*(mu_q/T))/12. + (4*(mu_q/T)**3)/(24*pi**2)) for Q_q,mu_q in zip(list_Q,list_mu)])
nS = dgq*sum([S_q*((2*(mu_q/T))/12. + (4*(mu_q/T)**3)/(24*pi**2)) for S_q,mu_q in zip(list_S,list_mu)])
e = s-P+muB/T*nB+muQ/T*nQ+muS/T*nS
cs2 = 1./3.
# if the input is a list of temperature values
elif(isinstance(T,np.ndarray) or isinstance(T,list)):
P = np.zeros_like(T)
s = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
e = np.zeros_like(T)
cs2 = 1./3.*np.ones_like(T)
for i,xT in enumerate(T):
# see if arrays are also given for chemical potentials
try:
xmuB = muB[i]
except:
xmuB = muB
try:
xmuQ = muQ[i]
except:
xmuQ = muQ
try:
xmuS = muS[i]
except:
xmuS = muS
result = SB_lim(xT,xmuB,xmuQ,xmuS,Nf=Nf)
P[i] = result['P']
s[i] = result['s']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
e[i] = result['e']
else:
raise Exception('Problem with input')
return {'P':P,'s':s,'n_B':nB,'n_Q':nQ,'n_S':nS,'e':e,'I':e-3.*P,'cs^2':cs2}
###############################################################################
# import data for the parametrization of susceptibilities
###############################################################################
chi_a = {}
for chi_file in ["/data/chi_a_nS0.csv","/data/chi_a.csv"]:
param_chi_a = pd.read_csv(dir_path+chi_file).to_dict(orient='list')
# scan rows for each chi
for i,chi in enumerate(param_chi_a['chi']):
values = []
# scan columns with coefficients
for j,coeff in enumerate(param_chi_a):
# skip first column which is chi string
if(coeff=='chi'):
continue
# append values
values.append(param_chi_a[coeff][i])
chi_a.update({chi:values})
chi_b = {}
for chi_file in ["/data/chi_b_nS0.csv","/data/chi_b.csv"]:
param_chi_b = pd.read_csv(dir_path+chi_file).to_dict(orient='list')
# scan rows for each chi
for i,chi in enumerate(param_chi_b['chi']):
values = []
# scan columns with coefficients
for j,coeff in enumerate(param_chi_b):
# skip first column which is chi string
if(coeff=='chi'):
continue
# append values
values.append(param_chi_b[coeff][i])
chi_b.update({chi:values})
# list of all susceptibilities
list_chi = list(param_chi_a['chi'])
list_chi_nS0 = ['chiB2_nS0','chiB4_nS0']
########################################################################
# Stefan Boltzmann limit for the susceptibilities
# can be found in PhysRevC.100.064910
chi_SB = dict(zip(list_chi,[19.*pi**2./36.,\
1./3.,2./3.,1.,\
0.,-1./3.,1./3.,\
2./(9.*pi**2.),4./(3*pi**2.),6./pi**2.,\
0.,-2./(9.*pi**2.),2./(9.*pi**2.),\
4./(9.*pi**2.),-2./pi**2.,2./pi**2.,\
4./(9.*pi**2.),2./(3.*pi**2.),2./(3.*pi**2.),\
2./(9.*pi**2.),-2./(9.*pi**2.),-2./(3.*pi**2.)]))
chi_SB.update(dict(zip(list_chi_nS0,[0.1067856506125367,0.0006673764465596013])))
########################################################################
def param_chi(T,quant):
"""
Parametriation of the susceptibilities at as a function of temperature
Ex: param_chi(T,'chiBQS121')
input quant is a string with the format: chiBQS121
input T being a list or a float
"""
tt = T/Tc_lattice(0.)
numerator = sum([ai/(tt)**i for i,ai in enumerate(chi_a[quant])])
denominator = sum([bi/(tt)**i for i,bi in enumerate(chi_b[quant])])
c0 = chi_SB[quant]-chi_a[quant][0]/chi_b[quant][0]
return numerator/denominator + c0
########################################################################
# for each susceptibility, get the order of the derivative wrt B,Q,S
########################################################################
BQS = dict(zip(list_chi,[{'B': 0, 'Q': 0, 'S': 0} for i in range(len(list_chi))]))
chi_latex = {'chi0':r'$\chi_0$'}
for chi in list_chi:
# derivatives wrt to each charge
if(chi!='chi0'):
# decompose chiBQS234 in [B,Q,S] and [2,3,4]
chi_match = re.match('chi([A-Z]+)([0-9]+)', chi)
list_charge = list(chi_match.group(1)) # contains the charges
list_der = list(chi_match.group(2)) # contains the derivatives
chi_latex.update({chi:r'$\chi^{'+"".join(list_charge)+'}_{'+"".join(list_der)+'}$'})
for ich,xcharge in enumerate(list_charge):
BQS[chi][xcharge] = int(list_der[ich]) # match each charge to its derivative
chi_latex.update({'chiB2_nS0':r'$c_2$', 'chiB4_nS0':r'$c_4$'})
########################################################################
def param(T,muB,muQ,muS):
"""
Parametrization of thermodynamic quantities from lQCD
as a function of T, \mu_B, \mu_Q, \mu_S
"""
# if input is a single temperature value T
if(isinstance(T,float)):
p = 0.
nB = 0.
nQ = 0.
nS = 0.
s = 0.
e = 0.
if(muB==0. and muQ==0. and muS==0.):
p = param_chi(T,'chi0')
der = scipy.misc.derivative(param_chi,T,dx=1e-5,args=('chi0',))
s = T*der
else:
for chi in list_chi:
i = BQS[chi]['B']
j = BQS[chi]['Q']
k = BQS[chi]['S']
fact = 1./(factorial(i)*factorial(j)*factorial(k))
xchi = param_chi(T,chi)
pow_muB = ((muB/T)**i)
pow_muQ = ((muQ/T)**j)
pow_muS = ((muS/T)**k)
# pressure P/T^4
p += fact*xchi*pow_muB*pow_muQ*pow_muS
# baryon density n_B/T^3 when i > 1
if(i >= 1):
nB += fact*xchi*i*((muB/T)**(i-1.))*pow_muQ*pow_muS
# charge density n_Q/T^3 when i > 1
if(j >= 1):
nQ += fact*xchi*pow_muB*j*((muQ/T)**(j-1.))*pow_muS
# strangeness density n_S/T^3 when k > 1
if(k >= 1):
nS += fact*xchi*pow_muB*pow_muQ*k*((muS/T)**(k-1.))
# derivative of the susceptibility wrt temperature
der = scipy.misc.derivative(param_chi,T,dx=1e-5,args=(chi,))
# s/T^3 = T d(P/T^4)/d(T) + 4 P/T^4
# here we add just the 1st part
s += fact*(T*der-(i+j+k)*xchi)*pow_muB*pow_muQ*pow_muS
# add 2nd piece to s/T^3
s += 4.*p
# energy density e/T^4
e = s-p+(muB/T)*nB+(muQ/T)*nQ+(muS/T)*nS
# if the input is a list of temperature values
elif(isinstance(T,np.ndarray) or isinstance(T,list)):
p = np.zeros_like(T)
s = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
e = np.zeros_like(T)
for i,xT in enumerate(T):
# see if arrays are also given for chemical potentials
try:
xmuB = muB[i]
except:
xmuB = muB
try:
xmuQ = muQ[i]
except:
xmuQ = muQ
try:
xmuS = muS[i]
except:
xmuS = muS
result = param(xT,xmuB,xmuQ,xmuS)
p[i] = result['P']
s[i] = result['s']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
e[i] = result['e']
else:
raise Exception('Problem with input')
return {'T': T,'P':p, 's':s, 'n_B':nB, 'n_Q':nQ, 'n_S':nS, 'e':e, 'I':e-3*p}
########################################################################
def param_nS0(T,muB):
"""
Parametrization of thermodynamic quantities from lQCD
as a function of T, \mu_B for the case <n_S>=0 & <n_Q>=0.4<n_B>
"""
# if input is a single temperature value T
if(isinstance(T,float)):
p = 0.
nB = 0.
nQ = 0.
nS = 0.
s = 0.
e = 0.
p = param_chi(T,'chi0')
der = scipy.misc.derivative(param_chi,T,dx=1e-5,args=('chi0',))
s = T*der
if(muB!=0.):
for ichi,chi in enumerate(list_chi_nS0):
i = 2*(ichi+1)
xchi = param_chi(T,chi)
pow_muB = ((muB/T)**i)
# pressure P/T^4
p += xchi*pow_muB
# baryon density n_B/T^3 when i > 1
nB += xchi*i*((muB/T)**(i-1.))
# derivative of the susceptibility wrt temperature
der = scipy.misc.derivative(param_chi,T,dx=1e-5,args=(chi,))
# s/T^3 = T d(P/T^4)/d(T) + 4 P/T^4
# here we add just the 1st part
s += (T*der-(i)*xchi)*pow_muB
# add 2nd piece to s/T^3
s += 4.*p
# energy density e/T^4
e = s-p+(muB/T)*nB
# charge density
nQ = 0.4*nB
# if the input is a list of temperature values
elif(isinstance(T,np.ndarray) or isinstance(T,list)):
p = np.zeros_like(T)
s = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
e = np.zeros_like(T)
for i,xT in enumerate(T):
# see if arrays are also given for chemical potentials
try:
xmuB = muB[i]
except:
xmuB = muB
result = param_nS0(xT,xmuB)
p[i] = result['P']
s[i] = result['s']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
e[i] = result['e']
else:
raise Exception('Problem with input')
return {'T': T,'P':p, 's':s, 'n_B':nB, 'n_Q':nQ, 'n_S':nS, 'e':e, 'I':e-3*p}
###############################################################################
# import data from lattice at muB = 0
###############################################################################
# read chi0
WB_EoS0 = pd.read_csv(dir_path+"/data/WB-EoS_muB0_j.physletb.2014.01.007.csv").to_dict(orient='list')
chi_lattice2014 = {'chi0':np.array(list(zip(WB_EoS0['T'],WB_EoS0['P'],WB_EoS0['P_err'])))}
# save all other thermodynamic quantities
for quant in WB_EoS0:
WB_EoS0[quant] = np.array(WB_EoS0[quant])
# read data from 2012 (chiB2,chiQ2,chiS2)
chi_lattice2012 = {}
try:
df = pd.read_csv(dir_path+"/data/WB_chi_T_JHEP01(2012)138.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
chi_lattice2012.update({entry:np.array(list(zip(df['T'],df[entry],df[entry+'_err'])))})
except:
pass
# read data from 2015 (chiB2,chiB4,chiS2)
chi_lattice2015 = {}
try:
df = pd.read_csv(dir_path+"/data/WB_chi_T_PhysRevD.92.114505.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
chi_lattice2015.update({entry:np.array([[df['T'][iT],df[entry][iT],df[entry+'_err'][iT]] for iT,_ in enumerate(df[entry]) if np.logical_not(np.isnan(df[entry][iT]))])})
except:
pass
# read data from 2017 (chiB2,chiB4,chiB2) for <nS>=0 & <nQ>=0.4<nB>
chi_lattice2017 = {}
try:
df = pd.read_csv(dir_path+"/data/WB_chi_nS0_T_EPJWebConf.137(2017)07008.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
chi_lattice2017.update({entry:np.array([[df['T'][iT],df[entry][iT],df[entry+'_err'][iT]] for iT,_ in enumerate(df[entry]) if np.logical_not(np.isnan(df[entry][iT]))])})
except:
pass
# read data from 2018
chi_lattice2018 = {}
try:
df = pd.read_csv(dir_path+"/data/WB_chi_T_JHEP10(2018)205.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
chi_lattice2018.update({entry:np.array(list(zip(df['T'],df[entry],df[entry+'_err'])))})
except:
pass
# read data from 2020 (chiBQ11,chiBS11,chiQS11)
chi_lattice2020 = {}
try:
df = pd.read_csv(dir_path+"/data/WB_chi_T_PhysRevD.101.034506.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
chi_lattice2020.update({entry:np.array(list(zip(df['T'],df[entry],df[entry+'_err'])))})
except:
pass
# read data from 2021
WB_EoS_muBoT2021 = {}
try:
df = pd.read_csv(dir_path+"/data/WB-EoS_muBoT_2102.06660.csv").to_dict(orient='list')
for entry in df:
if(entry=='T' or '_err' in entry):
continue
WB_EoS_muBoT2021.update({entry:np.array(list(zip(df['T'],df[entry],df[entry+'_err'])))})
except:
pass
###############################################################################
def EoS_nS0(fun,T,muB,**kwargs):
"""
Calculation of the EoS defined by the input function at (T,muB) with the conditions:
<n_S> = 0
<n_Q> = factQB*<n_B>
"""
factQB = 0.4
if(isinstance(T,float)):
p = 0.
nB = 0.
nQ = 0.
nS = 0.
s = 0.
e = 0.
n = 0.
chi = np.zeros(len(list_chi))
def system(mu):
"""
Define the system to be solved
<n_S> = 0
<n_Q> = factQB * <n_B>
"""
thermo = fun(T,muB,mu[0],mu[1],**kwargs)
return [thermo['n_S']*T**3, thermo['n_Q']*T**3-factQB*thermo['n_B']*T**3]
solution = scipy.optimize.root(system,[-0.08*muB,0.03*muB],method='lm').x
muQ = solution[0]
muS = solution[1]
result = fun(T,muB,muQ,muS,**kwargs)
p = result['P']
s = result['s']
nB = result['n_B']
nQ = factQB*nB
nS = 0.
e = result['e']
# some extra quantities are calculated within HRG function
try:
n = result['n']
chi = result['chi']
except:
pass
elif(isinstance(T,np.ndarray) or isinstance(T,list)):
p = np.zeros_like(T)
s = np.zeros_like(T)
nB = np.zeros_like(T)
nQ = np.zeros_like(T)
nS = np.zeros_like(T)
n = np.zeros_like(T)
e = np.zeros_like(T)
muQ = np.zeros_like(T)
muS = np.zeros_like(T)
chi = np.zeros((len(list_chi),len(T)))
for i,xT in enumerate(T):
# see if arrays are also given for chemical potentials
try:
xmuB = muB[i]
except:
xmuB = muB
result = EoS_nS0(fun,xT,xmuB,**kwargs)
p[i] = result['P']
s[i] = result['s']
nB[i] = result['n_B']
nQ[i] = result['n_Q']
nS[i] = result['n_S']
n[i] = result['n']
e[i] = result['e']
muQ[i] = result['muQ']
muS[i] = result['muS']
chi[:,i] = result['chi']
else:
raise Exception('Problem with input')
return {'T':T, 'muQ': muQ, 'muS': muS, 'P':p, 's':s, 'n_B':nB, 'n_Q':nQ, 'n_S':nS, 'n':n, 'e':e, 'chi':chi, 'I':e-3*p}
| pierre-moreau/EoS_HRG | EoS_HRG/fit_lattice.py | fit_lattice.py | py | 20,400 | python | en | code | 0 | github-code | 36 |
36929730434 | import exdir
import quantities as pq
import numpy as np
def convert_from_list(data):
if isinstance(data, dict):
try:
for key, value in data.items():
data[key] = convert_from_list(value)
except AttributeError:
pass
elif isinstance(data, list):
return np.array(data)
return data
def convert_to_list(data):
if isinstance(data, np.ndarray):
return data.tolist()
elif isinstance(data, np.integer):
return int(data)
elif isinstance(data, np.float64):
return float(data)
else:
try:
new_result = {}
for key, val in data.items():
new_key = convert_to_list(key)
new_result[new_key] = convert_to_list(val)
return new_result
except AttributeError:
pass
return data
class AttributePlugin(exdir.plugin_interface.Attribute):
def prepare_write(self, attribute_data):
attribute_data.attrs = convert_to_list(attribute_data.attrs)
return attribute_data
def prepare_read(self, attribute_data):
attribute_data.attrs = convert_from_list(attribute_data.attrs)
return attribute_data
def plugins():
return [exdir.plugin_interface.Plugin(
"numpy_attributes",
attribute_plugins=[AttributePlugin()],
read_after=["quantities"],
write_after=["quantities"]
)]
| CINPLA/exdir | exdir/plugins/numpy_attributes.py | numpy_attributes.py | py | 1,435 | python | en | code | 69 | github-code | 36 |
24644699429 | import io
import itertools
def part_one():
file = open('inputs\\day_1_part_1.txt', 'r')
total = 0
for line in file:
total = total + int(line)
print(f'Part 1 Total {total}')
def part_two():
file = open('inputs\\day_1_part_1.txt', 'r')
observed_frequencies = {0}
total = 0
for line in itertools.cycle(file):
total = total + int(str.strip(line))
if total in observed_frequencies:
print(f"Frequency {total} has been seen twice.")
break
#print(f'Line {str.strip(line)}. Observed: {total}')
observed_frequencies.add(total)
part_two()
| mruston0/AdventOfCode2018 | day_1_chronal_calibration.py | day_1_chronal_calibration.py | py | 639 | python | en | code | 0 | github-code | 36 |
31515158551 | """Basic state machine implementation."""
# pylint: disable=unnecessary-pass, too-many-instance-attributes
from typing import Iterable, Union
from rclpy import logging
from rclpy.node import Node
from rclpy.time import Time, Duration
LOGGER = logging.get_logger("behavior")
class Resource:
"""The resource class is used to track access to shared resources.
When a state starts, it attempts to acquire all the resources it needs.
Resources are automatically freed when the state stops. By default,
multiple states can access the same resource, but a UniqueResource will
throw an error if a state tries to acquire a resource that is already owned
by another state.
The registry is a class variable that is used to keep track of all the
resources and their owners.
"""
_registry = {}
def __init__(self, name: str):
self._name = name
if name in Resource._registry:
LOGGER.error(f"Resource {name} already exists. "
"Different resources must have unique names.")
Resource._registry[name] = set()
def __del__(self):
Resource._registry.pop(self._name)
def get_name(self) -> str:
"""Get the name of the resource"""
return self._name
def get_owner(self):
"""Get the owner of the resource"""
return Resource._registry[self._name]
def is_free(self) -> bool:
"""Check if the resource is free"""
return len(Resource._registry[self._name]) == 0
def assign(self, owner: 'State') -> 'Resource':
"""Assign the resource to an owner."""
if owner in Resource._registry[self._name]:
LOGGER.error(f"Resource {self._name} already owned by {owner}.")
Resource._registry[self._name].add(owner)
return self
def transfer(self, old_owner: 'State', new_owner: 'State') -> 'Resource':
"""Transfer ownership of the resource"""
if old_owner not in Resource._registry[self._name]:
LOGGER.error(f"Resource {self._name} not owned by {old_owner}.")
Resource._registry[self._name].remove(old_owner)
Resource._registry[self._name].add(new_owner)
return self
def free(self, owner: 'State'):
"""Free the resource"""
Resource._registry[self._name].remove(owner)
@staticmethod
def validate(*resources: str):
"""Check if a set of resource names is valid"""
for resource in resources:
if resource not in Resource._registry:
LOGGER.error(f"Resource {resource} does not exist. Invalid name.")
class UniqueResource(Resource):
"""A resource that can only be owned by one state at a time"""
def assign(self, owner: 'State') -> Resource:
"""Assign the resource to an owner."""
if not self.is_free():
LOGGER.error(f"Resource {self._name} already owned by "
f"{Resource._registry[self._name]}.")
Resource._registry[self._name].add(owner)
return self
class State:
"""Superclass for states"""
def __init__(self, name,
node:Node = None,
resources:Iterable[str] = None,
parent:'StateMachine' = None):
self._base_name = name
self._name = name
self._node = None
self._running = False
self._start_time = None
self._required_resources = set() if resources is None else set(resources)
self._parent = None
self._resources = {}
if node is not None:
self._node = node
if parent is not None:
self.set_parent(parent)
def set_node(self, node:Node):
"""Set the node for the state"""
self._node = node
def set_parent(self, parent:'StateMachine'):
"""Set the parent state machine"""
if self._parent is not None:
LOGGER.error(f"State {self._name} already has parent.")
if self._running:
LOGGER.error(f"State {self._name} already running. "
"Cannot change parent.")
self._parent = parent
self._name = f"{parent.get_name()}/{self._base_name}"
def get_base_name(self):
"""Get the base name of the state"""
return self._base_name
def get_name(self):
"""Get the name of the state"""
return self._name
def get_required_resources(self) -> Iterable[str]:
"""Get the set of required resources"""
return self._required_resources
def add_required_resource(self, resource: Union[str, Iterable[str]]):
"""Add a required resource"""
if self._running:
LOGGER.error(f"State {self._name} already running. "
"Cannot add required resources.")
if isinstance(resource, str):
self._required_resources.add(resource)
else:
for res in resource:
self._required_resources.add(res)
if self._parent is not None:
self._parent.add_required_resource(resource)
def get_start_time(self) -> Time:
"""Get the time the state started"""
return self._start_time
def get_current_time(self) -> Time:
"""Get the current time"""
return self._node.get_clock().now()
def get_elapsed_time(self) -> Duration:
"""Get the time the state has been running"""
return self._node.get_clock().now() - self._start_time
def get_node(self) -> Node:
"""Get the node for the state"""
return self._node
def start(self):
"""Start the state"""
if self._node is None:
LOGGER.error(f"State {self._name} must be run within a ros2 node. "
"Use set_node() to set the node.")
if self._running:
LOGGER.warning(f"State {self._name} already running")
for resource in self._required_resources:
self._acquire_resource(resource)
self._running = True
self._start_time = self._node.get_clock().now()
def stop(self):
"""Stop the state"""
if not self._running:
LOGGER.warning(f"State {self._name} not running")
return
self._release_all_resources()
self._running = False
def step(self):
"""Do one step of the state"""
if not self._running:
LOGGER.error(f"State {self._name} has not been started. Cannot step.")
def get_resource(self, resource: str) -> Resource:
"""Get a resource reference"""
return self._resources[resource]
def _acquire_resource(self, resource_name: str):
"""Acquire a resource"""
if resource_name in self._resources:
LOGGER.warning(f"Resource {resource_name} already acquired")
resource = self._parent.get_resource(resource_name)
if isinstance(resource, UniqueResource):
self._resources[resource_name] = resource.transfer(self._parent, self)
else:
self._resources[resource_name] = resource.assign(self)
def _release_resource(self, resource_name: str):
"""Release a resource back to parent"""
if resource_name not in self._resources:
LOGGER.error(f"Resource {resource_name} not in resource list for "
f"{self.get_name()}.")
resource = self._resources[resource_name]
if isinstance(resource, UniqueResource):
resource.transfer(self, self._parent)
else:
resource.free(self)
self._resources[resource].transfer(self, self._parent)
self._resources.pop(resource)
def _release_all_resources(self):
"""Release all resources"""
for resource in self._resources.values():
if isinstance(resource, UniqueResource):
resource.transfer(self, self._parent)
else:
resource.free(self)
self._resources.clear()
def validate(self):
"""Validate the state"""
if self._parent is None:
# Only need to validate resource names at global level
Resource.validate(*self._required_resources)
class Event:
"""An event that signals a transtition between states"""
def __init__(self, name):
self._name = name
self._status = False
self._enabled = False
self._state = None
self._required_resources = set()
self.initialized = True # Some events might need additional info before
# being initialized such as subscriber events
def get_name(self) -> str:
"""Get the name of the event"""
return self._name
def reset(self):
"""Reset the event"""
self._status = False
def get_status(self) -> bool:
"""Get the status of the event without updating"""
return self._status
def update(self) -> bool:
"""Update the event and get status"""
return self._status
def add_required_resource(self, resource: Union[str, Iterable[str]]):
"""Add a required resource"""
if isinstance(resource, str):
self._required_resources.add(resource)
else:
for res in resource:
self._required_resources.add(res)
def get_required_resources(self) -> Iterable[str]:
"""Get the set of required resources"""
return self._required_resources
def enable(self, state:State):
"""Called when corresponding state is started"""
if self._state is not None:
LOGGER.error(f"Event {self._name} already enabled for state "
f"{self._state.get_name()}")
self._state = state
self._enabled = True
def is_enabled(self) -> bool:
"""Check if event is enabled"""
return self._enabled
def disable(self):
"""Called when corresponding state is stopped"""
self.reset()
self._state = None
self._enabled = False
def __invert__(self) -> 'Event':
if isinstance(self, NotEvent):
return self._e1
return NotEvent(self)
def __and__(self, other:'Event') -> 'Event':
return AndEvent(self, other)
def __or__(self, other:'Event') -> 'Event':
return OrEvent(self, other)
class CompositeEvent(Event):
"""An event that is a combination of other events."""
def __init__(self, name):
super().__init__(name)
self._events = set()
def get_base_events(self) -> Iterable[Event]:
"""Returns the base (non-composite) events"""
return self._events
def add_event(self, event:'Event'):
"""Add an event to the set"""
if isinstance(event, self.__class__):
self._events |= event.get_base_events()
self._events.add(event)
self._required_resources |= event.get_required_resources()
def reset(self):
for event in self._events:
event.reset()
self._status = False
def update(self) -> bool:
for event in self._events:
event.update()
return self._status
def enable(self, state:State):
super().enable(state)
for event in self._events:
if not event.is_enabled():
event.enable(state)
class NotEvent(CompositeEvent):
"""An event that is true if its base event is false"""
def __init__(self, event, name=None):
super().__init__(f"not_{event.get_name()}" if name is None else name)
self.add_event(event)
self._e1 = event
def update(self) -> bool:
if not self._e1.initialized:
return False
self._status = not self._e1.update()
return self._status
class AndEvent(CompositeEvent):
"""An event that is true if both of two events is true"""
def __init__(self, event1, event2, name=None):
super().__init__(f"{event1.get_name()}_and_{event2.get_name()}"
if name is None else name)
self.add_event(event1)
self.add_event(event2)
self._e1 = event1
self._e2 = event2
def update(self) -> bool:
super().update()
self._status = self._e1.get_status() and self._e2.get_status()
return self._status
class OrEvent(CompositeEvent):
"""An event that is true if at least one of two events is true"""
def __init__(self, event1, event2, name=None):
super().__init__(f"{event1.get_name()}_or_{event2.get_name()}"
if name is None else name)
self.add_event(event1)
self.add_event(event2)
self._e1 = event1
self._e2 = event2
def update(self) -> bool:
super().update()
self._status = self._e1.get_status() or self._e2.get_status()
return self._status
class StateMachine(State):
"""A basic state machine"""
def __init__(self, name, node:Node = None):
super().__init__(name, node=node)
self._states = {} # type: Dict[str,State]
self._events = {} # type: Dict[str,Event]
self._transitions = {} # type: Dict[str,Dict[str,str]] # from -> event -> to
self._current_state = None
self._start_state = None
def set_node(self, node:Node):
super().set_node(node)
for state in self._states.values():
state.set_node(node)
def add_state(self, state:State):
"""Add a state to the state machine"""
if state.get_base_name() in self._states:
LOGGER.warning(f"State {state.get_base_name()} already in state "
"machine. Skipping.")
return
state.set_node(self._node)
state.set_parent(self)
self._states[state.get_base_name()] = state
self._required_resources |= state.get_required_resources()
# By default, the start state is the first state added
if self._start_state is None:
self._start_state = state.get_base_name()
def add_transition(self,
from_state:Union[str,State],
event:Event,
to_state:Union[str,State]):
"""Add a transition to the state machine"""
if isinstance(from_state, State):
from_state = from_state.get_base_name()
if isinstance(to_state, State):
to_state = to_state.get_base_name()
if from_state not in self._states:
LOGGER.error(f"State {from_state} not in state machine. Invalid transition.")
if to_state not in self._states:
LOGGER.error(f"State {to_state} not in state machine. Invalid transition.")
if from_state not in self._events:
self._events[from_state] = {}
elif event in self._events[from_state]:
LOGGER.warning(f"Transition from {from_state} on event {event} already exists. "
"Overwriting.")
self._events[event.get_name()] = event
if from_state not in self._transitions:
self._transitions[from_state] = {}
self._transitions[from_state][event.get_name()] = to_state
self._states[from_state].add_required_resource(event.get_required_resources())
def set_start(self, state:Union[str,State]):
"""Set the start state"""
if isinstance(state, State):
state = state.get_base_name()
if state not in self._states:
LOGGER.error(f"State {state} not in state machine. Invalid start state.")
self._start_state = state
def _transition(self, state:Union[str,State]):
"""Transition to a new state"""
if isinstance(state, State):
state = state.get_base_name()
if state not in self._states:
LOGGER.error(f"State {state} not in state machine. Invalid transition.")
if self._current_state is not None:
for event in self.get_all_base_events():
event.disable()
self._current_state.stop()
self._current_state = self._states[state]
self._current_state.start()
if self._current_state.get_base_name() in self._transitions:
for event_name in self._transitions[self._current_state.get_base_name()]:
event = self._events[event_name]
event.enable(self._current_state)
LOGGER.debug(f"Transitioned to state {state}")
print (f"======Transitioned to state {state}======")
def get_all_base_events(self, state=None) -> Iterable[Event]:
"""Recursively gets a set of all base events for a state"""
if state is None:
state = self._current_state
events = set()
if state.get_base_name() not in self._transitions:
return events
for event_name in self._transitions[state.get_base_name()]:
event = self._events[event_name]
if isinstance(event, CompositeEvent):
events |= self._events[event_name].get_base_events()
events.add(event)
return events
def start(self):
"""Start the state machine"""
super().start()
if self._start_state is None:
return
self._transition(self._start_state)
LOGGER.debug(f"Started state machine {self._name}")
def stop(self):
"""Stop the state machine"""
if self._current_state is not None:
for event in self.get_all_base_events():
event.disable()
self._current_state.stop()
super().stop()
LOGGER.debug(f"Stopped state machine {self._name}")
def step(self):
"""Do one step of the state machine"""
if self._current_state is None:
return
if self._current_state.get_base_name() in self._transitions:
for event_name, to_state in self._transitions[
self._current_state.get_base_name()].items():
if self._events[event_name].update():
self._node.get_logger().info(
f"{event_name}: {self._current_state.get_base_name()} -> {to_state}")
self._transition(to_state)
break
self._current_state.step()
def _check_reachability(self, start_state:str, reachable:set):
"""Recursively check reachability of states"""
if start_state in reachable:
return
reachable.add(start_state)
if start_state not in self._transitions:
return
for to_state in self._transitions[start_state].values():
self._check_reachability(to_state, reachable)
def validate(self):
"""Validates State machine consturction to check for errors at initialization"""
super().validate()
if self._start_state is None:
return
# Recursively validate child state machines
for state in self._states.values():
state.validate()
# Check reachability
reachable = set()
self._check_reachability(self._start_state, reachable)
unreachable = set(self._states.keys()) - reachable
if len(unreachable) > 0:
LOGGER.warning(f"State machine {self._name} has unreachable states: {unreachable}")
# Check resource sharing
for state in self._states.values():
missing_resources = state.get_required_resources() - self._required_resources
if len(missing_resources) > 0:
LOGGER.error(f"State {state.get_name()} requires resources {missing_resources} "
f"that are not provided by state machine {self._name}")
for state, events in self._transitions.items():
for event in events:
missing_resources = (self._events[event].get_required_resources() -
self._states[state].get_required_resources())
if len(missing_resources) > 0:
LOGGER.error(f"Event {event} requires resources {missing_resources} "
f"that are not provided by state {state.get_name()}")
class StateMachineRoot(StateMachine):
"""A State Machine with no parent"""
def __init__(self, node:Node):
super().__init__("/", node)
def new_resource(self, resource:Resource):
"""Add a new resource to the state machine"""
self.add_required_resource(resource.get_name())
self._resources[resource.get_name()] = resource.assign(self)
def set_parent(self, parent):
LOGGER.error("Cannot set parent of root state machine")
def _acquire_resource(self, resource_name):
pass
def _release_resource(self, resource_name):
pass
def _release_all_resources(self):
pass
class ParallelState(State):
"""A state that runs multiple states in parallel"""
def __init__(self, name, node:Node = None, states:Iterable[State] = None):
super().__init__(name, node)
self._states = []
if states is not None:
for state in states:
self.add_state(state)
def add_state(self, state):
"""Add a state to the parallel state"""
self._states.append(state)
state.set_node(self._node)
state.set_parent(self._resources)
self._required_resources |= state.get_required_resources()
if self._running:
state.start()
def start(self):
"""Start the parallel state"""
super().start()
for state in self._states:
state.start()
def stop(self):
"""Stop the parallel state"""
for state in self._states:
state.stop()
super().stop()
def step(self):
"""Do one step of the parallel state"""
super().step()
for state in self._states:
state.step()
def validate(self):
super().validate()
# Validate child states
for state in self._states:
state.validate()
# Check resource sharing
resource_counts = {}
for state in self._states:
for resource in state.get_required_resources():
if resource not in resource_counts:
resource_counts[resource] = 0
resource_counts[resource] += 1
for resource, count in resource_counts.items():
if count > 1 and isinstance(resource, UniqueResource):
LOGGER.warning(f"UniqueResource {resource} is shared by {count} states in"
f"parallel state {self._name}. This may cause runtime "
"errors or unexpected behavior.")
| LARG/spl-release | src/behavior/behavior/state_machine.py | state_machine.py | py | 22,719 | python | en | code | 1 | github-code | 36 |
12171164046 | # 어린 왕자
import sys
t = int(input()) # 테스트 케이스
for i in range(t):
x1, y1, x2, y2 = map(int, sys.stdin.readline().split()) # 출발점, 도착점 좌표
n = int(input()) # 행성의 개수
stars = []
for i in range(n): # 행성의 중심과 반지름
cx, cy, r = map(int, sys.stdin.readline().split())
stars.append([cx, cy, r])
# 행성의 중심 좌표와 출발점, 도착점 좌표 사이의 거리
go = 0
fin = 0
for i in range(n):
if (stars[i][0] - x1) ** 2 + (stars[i][1] - y1) ** 2 < stars[i][2] ** 2 < (stars[i][0] - x2) ** 2 + (stars[i][1] - y2) ** 2:
go += 1
for i in range(n):
if (stars[i][0] - x2) ** 2 + (stars[i][1] - y2) ** 2 < stars[i][2] ** 2 < (stars[i][0] - x1) ** 2 + (stars[i][1] - y1) ** 2:
fin += 1
print(go+fin)
| hi-rev/TIL | Baekjoon/기하1/little_prince.py | little_prince.py | py | 862 | python | ko | code | 0 | github-code | 36 |
35056080810 | import cantera as ct
import numpy as np
from typing import List, Tuple
from scipy import integrate
from copy import copy
"""
Present a simple implementation of IDT reactors and the
cantera implementation of a LFS reactor.
Each model can be called as:
IDT, all_conditions = idt_reactor.solve(gas, flag='T', temp_rise=400)
IDT, all_conditions = idt_reactor.solve(gas, path_to_save=dir_to_save, phi_for_file_name=phi_value)
WARNINGS: idt_reactor uses only the Temperarure rise. The species peak is still under developement.
LFS, all_conditions = lfs_reactor.solve(gas)
LFS, all_conditions = lfs_reactor.solve(gas, path_to_save=dir_to_save, phi_for_file_name=phi_value)
The saved file will be named as:
<model_type>_<TEMP in K>_<PRESSURE in atm>_<PHI, default is 1.0>.csv
The first line will contain the propertie value (IDT or LFS).
The second line will be a header containing specifics and the conditions.
The rest of the file will present the values in a csv format.
"""
# --------------------------------------------------------------------------------------------------
# utils for save
def save_solution_to_file(file_path: str,
model_type: str,
model_value: float,
header: str,
states: np.ndarray,) -> None:
"""
Save the conditions to a csv file.
The fist line presents the model propertie, folowed by the header and the data.
MODEL_<model specific>=<model value>
MODEL_IDT_T_400 -> IDT model, with T flag as 400 K
MODEL_LFS -> LFS model
"""
data_to_save = f"MODEL_{model_type}={model_value}\n{header}\n"
data_to_save += "\n".join([",".join([str(states[row, col]) for col in range(states.shape[1])]) for row in range(states.shape[0])])
with open(file_path, 'w') as f:
f.write(data_to_save)
def create_solution_file_name(model_type: str, temp: float, press: float, phi: float) -> str:
"""
Creates the file name considering the initial state.
<model_type>_<temp>_<press>_<phi>.csv
temp -> Kelvin
press -> Pa
if no phi is provided, use 1.0 as default.
"""
f_name = f"{model_type}_{temp:.1f}_{press/ct.one_atm:.2f}_"
if phi:
f_name += f"{phi:.1f}"
else:
f_name += "1.0"
f_name += ".csv"
return f_name
# --------------------------------------------------------------------------------------------------
# IDT implementation
class ConstantMassPressureODE:
"""Implement the 0D, constant mass, constant pressure reactor"""
def __init__(self, gas: ct.Solution) -> None:
self.gas: ct.Solution = gas
self._pressure: float = gas.P
def __call__(self, t: float, y: np.ndarray) -> np.ndarray:
"""return the set of EDO. See Turns to understand."""
# Set the gas conditions
if y[0] <= 0:
raise ValueError(f"Negative value found for temperature.")
self.gas.set_unnormalized_mass_fractions(y[1:])
self.gas.TP = y[0], self._pressure
# calculates all the values
rho = self.gas.density
wdot = self.gas.net_production_rates
dTdt = - (np.dot(self.gas.partial_molar_enthalpies, wdot) / (rho * self.gas.cp))
dYdt = wdot * self.gas.molecular_weights / rho
return np.hstack((dTdt, dYdt))
class IDTReactor:
"""
Class implementation of an 0D reactor to obtain the IDT value.
"""
def __init__(self, rtol: float = 10**-6, atol: float = 10**-9, n_iter: int = 10000) -> None:
self.rtol = rtol
self.atol = atol
self.n_iter = n_iter
def _get_solver(self, gas: ct.Solution, inital_condition: np.ndarray) -> integrate.ode:
"""
Set the solver to run the IDT cases.
"""
ODE = ConstantMassPressureODE(gas)
solver = integrate.ode(ODE)
solver.set_integrator('lsoda', method='bdf', rtol=self.rtol, atol=self.atol, with_jacobian=False, nsteps=self.n_iter)
solver.set_initial_value(inital_condition, 0.0)
return solver
def _get_header(self, gas: ct.Solution) -> str:
return "time(s),T,P," + ",".join(["X_" + spc.name for spc in gas.species()])
def get_idt(self, gas: ct.Solution,
max_time: float = 5.0,
dt: float = 10**-7,
flag: str = 'T',
temp_rise: float = 400.0) -> float:
"""
Find the idt time.
This returns a float. If an error is raised by solver problems, it returns a -1.0,
if no IDT is found in the time window, returns -2.0.
"""
# TODO: Add the species flag. For a peak, run up to the max time.
# Prepare the initial conditions
initial_condition = np.array([gas.T, *gas.Y])
temperature_flag = temp_rise + initial_condition[0]
# Set the solver
solver = self._get_solver(gas, initial_condition)
# solve
try:
while solver.successful() and solver.t <= max_time:
if solver.y[0] >= temperature_flag:
return solver.t
solver.integrate(solver.t + dt)
# catch any temperature problem
except:
return -1.0
# if we do not find a IDT in the max_time
return -2.0
def get_norm_states(self, gas: ct.Solution, idt: float,
norm_dt: float = 0.01,
max_norm_time: float = 2.0) -> Tuple[np.ndarray, str]:
"""
Solve the idt reactor at every norm dt and return the real time conditions.
Returns a np.ndarray containing the values and a str containig the header:
time(s), T, P, Y_<spc names>
"""
initial_condition = np.array([gas.T, *gas.Y])
const_pressure = copy(gas.P)
n_points = int(max_norm_time / norm_dt + 1)
out_solution = np.zeros([n_points, 3 + gas.n_species])
out_solution[0, 1], out_solution[0, 2] = gas.T, gas.P
out_solution[0, 3:] = gas.X
# Set the solver
solver = self._get_solver(gas, initial_condition)
# set control parameters
dt = norm_dt * idt
max_time = max_norm_time * idt
cur_point = 0
try:
while solver.successful():
cur_point += 1
solver.integrate(solver.t + dt)
# for the output to be in molar fraction
gas.TPY = solver.y[0], const_pressure, solver.y[1:]
out_solution[cur_point, 0] = solver.t
out_solution[cur_point, 1] = gas.T
out_solution[cur_point, 2] = gas.P
out_solution[cur_point, 3:] = gas.X
if cur_point == n_points - 1:
break
return out_solution, self._get_header(gas)
except:
raise Exception("Failed to solve the ODE. Try a different set of tolerances.")
def solve(self, gas: ct.Solution,
path_to_save: str = "",
phi_for_file_name: float = None,
norm_dt: float = 0.01,
max_norm_time: float = 2.0,
max_time_for_idt: float = 5.0,
dt_for_idt: float = 10**-7,
idt_flag: str = 'T',
idt_temp_rise: float = 400.0) -> Tuple[float, np.ndarray, str]:
"""
Solve the reactor, returning a IDT value, a np.ndarray with all conditions and the corresponding header.
If a directory is passed as input, save the conditions to the file.
The condition in the gas solution passed to this method is consdered the initial condition.
"""
init_TPY = copy(gas.TPY)
idt = self.get_idt(gas, max_time=max_time_for_idt, dt=dt_for_idt, flag=idt_flag, temp_rise=idt_temp_rise)
if idt <= 0.0:
if idt == -2.0:
raise Exception(f"It was not possble to obtain IDT. No IDT found in the time window.")
raise Exception(f"It was not possble to obtain IDT. Solver problems found.")
gas.TPY = init_TPY
states, header = self.get_norm_states(gas, idt, norm_dt=norm_dt, max_norm_time=max_norm_time)
# check for save flag:
if path_to_save != "":
f_name = create_solution_file_name("IDT", init_TPY[0], init_TPY[1], phi_for_file_name)
save_solution_to_file(path_to_save + f_name, f"IDT_{idt_flag}_{idt_temp_rise:.2f}", idt, header, states)
return idt, states, header
# --------------------------------------------------------------------------------------------------
# LFS implementation
class LFSReactor:
"""
Class implementation of an 1D reactor to obtain the LFS value.
"""
def __init__(self, width: float = 0.014,
ratio: float=3,
slope: float=0.1,
curve: float=0.1,
max_time_step_count: int = 5000,
loglevel: int = 0) -> None:
self.width = width
self.ratio = ratio
self.slope = slope
self.curve = curve
self.max_time_step_count = max_time_step_count
self.loglevel = loglevel
def _get_header(self, gas: ct.Solution) -> str:
return "grid(s),T,P," + ",".join(["X_" + spc.name for spc in gas.species()])
def _get_states(self, flame: ct.FreeFlame) -> np.ndarray:
"""
Return the states of the current flame.
grid(m), T, P, X_<species>
"""
out_data = np.zeros([len(flame.T),len(flame.X) + 3])
out_data[:,0] = flame.grid
out_data[:,1] = flame.T
out_data[:,2] = flame.P
for i in range(len(flame.X)):
out_data[:,3 + i] = flame.X[i]
return out_data
def solve(self, gas: ct.Solution,
path_to_save: str = "",
phi_for_file_name: float = None) -> Tuple[float, np.ndarray, str]:
"""
Solve the reactor, returning a IDT value, a np.ndarray with all conditions and the corresponding header.
If a directory is passed as input, save the conditions to the file.
The condition in the gas solution passed to this method is consdered the initial condition.
"""
init_TPY = copy(gas.TPY)
# Create the flame object
flame = ct.FreeFlame(gas, width=self.width)
# flame.transport_model = 'Mix'
# Define tolerances for the solver
flame.set_refine_criteria(ratio=self.ratio, slope=self.slope, curve=self.curve)
flame.max_time_step_count = self.max_time_step_count
# Define logging level
flame.solve(loglevel=self.loglevel, auto=True)
Su0 = flame.velocity[0]
states = self._get_states(flame)
header = self._get_header(gas)
# check for save flag:
if path_to_save != "":
f_name = create_solution_file_name("LFS", init_TPY[0], init_TPY[1], phi_for_file_name)
save_solution_to_file(path_to_save + f_name, f"LFS_m_s", Su0, header, states)
return Su0, states, header
| fingeraugusto/red_app | src/reactors.py | reactors.py | py | 11,199 | python | en | code | 0 | github-code | 36 |
13412389294 | import pandas as pd
import numpy as np
from training_utils import train_eval_model, store_model
### 1. Load data from data.csv file
data_train = pd.read_csv("data_train.csv", sep=';', header=0, dtype={'Gender': int, 'Age': int, 'Competitionage': int,
'50m': np.float64, '100m': np.float64,
'200m': np.float64})
X_train = data_train.iloc[:, 0:4]
y_train = data_train.iloc[:, 4:6]
data_val = pd.read_csv("data_val.csv", sep=';', header=0, dtype={'Gender': int, 'Age': int, 'Competitionage': int,
'50m': np.float64, '100m': np.float64,
'200m': np.float64})
X_test = data_val.iloc[:, 0:4]
y_test = data_val.iloc[:, 4:6]
# Convert pandas dataframes into numpy arrays (it is needed for the fitting)
X_train = X_train.values
X_test = X_test.values
y_train100m = y_train.values[:, 0]
y_train200m = y_train.values[:, 1]
y_test100m = y_test.values[:, 0]
y_test200m = y_test.values[:, 1]
### 2. Define model, train and evaluate
print("Train 100m times")
model100m, _, _ = train_eval_model(X_train, y_train100m, X_test, y_test100m, units=32)
print("Train 200m times")
model200m, _, _ = train_eval_model(X_train, y_train200m, X_test, y_test200m, units=16)
### 3. Store model
store_model(model100m, 'model100m')
store_model(model200m, 'model200m')
### Include this to train 200m model that uses 100m times
print("Train 200m times using 100m training data")
X_train_int = np.c_[X_train, y_train100m]
X_test_int = np.c_[X_test, y_test100m]
model200m_train_100, _, _ = train_eval_model(X_train_int, y_train200m, X_test_int, y_test200m)
print("Train 200m times using 100m prediction data")
pred_100_train = model100m.predict(X_train)
pred_100_test = model100m.predict(X_test)
X_train_int = np.concatenate((X_train, pred_100_train), axis=1)
X_test_int = np.concatenate((X_test, pred_100_test), axis=1)
model200m_itermediate, _, _ = train_eval_model(X_train_int, y_train200m, X_test_int, y_test200m)
store_model(model200m_train_100, 'model200m_using_100m_orig')
store_model(model200m_itermediate, 'model200m_using_100m')
| MiriUll/Swim-result-prediction | machine_learning/train_tf_model.py | train_tf_model.py | py | 2,277 | python | en | code | 0 | github-code | 36 |
42259381708 | import numpy as np
from ochre.datagen import DataGenerator
def dgen():
ocr_seqs = ['abc', 'ab', 'ca8']
gs_seqs = ['abc', 'bb', 'ca']
p_char = 'P'
oov_char = '@'
n = 3
ci = {'a': 0, 'b': 1, 'c': 2, p_char: 3, oov_char: 4}
dg = DataGenerator(xData=ocr_seqs, yData=gs_seqs, char_to_int=ci,
seq_length=n, padding_char=p_char, oov_char=oov_char,
batch_size=1, shuffle=False)
return dg
def test_dg():
dg = dgen()
assert dg.n_vocab == len(dg.char_to_int)
assert len(dg) == 3
x, y = dg[0]
print(x)
print(y)
assert np.array_equal(x[0], np.array([0, 1, 2]))
assert np.array_equal(y[0], np.array([[1, 0, 0, 0, 0],
[0, 1, 0, 0, 0],
[0, 0, 1, 0, 0]]))
def test_convert_sample():
dg = dgen()
cases = {
'aaa': np.array([0, 0, 0]),
'a': np.array([0, 3, 3]),
'b8': np.array([1, 4, 3])
}
for inp, outp in cases.items():
assert np.array_equal(dg._convert_sample(inp), outp)
| KBNLresearch/ochre | tests/test_datagen.py | test_datagen.py | py | 1,111 | python | en | code | 119 | github-code | 36 |
15539017308 | from functools import reduce
from decimal import Decimal
# From stdin:
# num_of_elem=int(input())
# elements=list(map(int,input().split()))
# From a file:
num_of_elem=0
elements=""
with open('input/input03.txt','r') as file_in:
file_lines=file_in.readlines()
num_of_elem=int(file_lines[0])
elements=file_lines[1]
elements=list(map(int,elements.split()))
mean=Decimal(reduce(lambda x, y: x + y,elements)/num_of_elem)
oddNumber=num_of_elem %2 == 0
median=0
if not oddNumber :
median=elements[int((num_of_elem+1)/2)-1]
else:
elements.sort()
middle_left=elements[int(num_of_elem/2)-1]
middle_right=elements[int(num_of_elem/2)]
median=Decimal((middle_left+middle_right)/2.0)
elements_set=sorted(set(elements))
mode=None
mode_index_number=None
mode = count = elements.count(elements_set[0])
mode_index_number = elements_set[0]
for i in elements_set[1:len(elements_set)]:
count = elements.count(i)
if count > mode:
mode = count
mode_index_number = i
elif count == mode and i < mode_index_number:
mode_index_number = i
print(round(mean,1))
print(round(median,1))
print(mode_index_number) | gianv9/HackerRanksSubmissions | 10 Days of Statistics/Day 0/Mean Median and Mode/solution.py | solution.py | py | 1,156 | python | en | code | 0 | github-code | 36 |
28230488326 | from requests import Request, Session
import config
import json
__all__=['SendBookClass', 'SendFindClass']
config_FindClass = config.FindClass()
config_BookClass = config.BookClass()
http_proxy = "http://localhost:8888"
https_proxy = "https://localhost:8888"
ftp_proxy = "ftp://10.10.1.10:3128"
#cafile = 'FiddlerRoot.cer'
proxyDict = {
"http" : http_proxy,
"https" : https_proxy,
"ftp" : ftp_proxy
}
#print(config_FindClass.headers)
class CultSend:
def __init__(self,url=None, headers=None, type=None, payload = None):
self.url = url
self.headers = headers
self.type = type
self.payload = payload
def __prepare_session(self):
s = Session()
#params_arr = [self.type, self.url, self.headers, self.payload]
req = self.__utility_params()
print(req.url)
prepped = req.prepare()
#prepped = self.__utility_headers(prepped)
return s, prepped
def send_request(self):
s, prepped = self.__prepare_session()
print(prepped.headers)
return s.send(prepped,proxies=proxyDict,verify=False)
def __utility_headers(self, prepped):
#print(prepped.headers)
if self.type == 'GET':
del prepped.headers['Accept-Encoding']
return prepped
def __utility_params(self):
if self.type == 'GET':
return Request(self.type, self.url,headers=self.headers)
else:
print(self.payload)
return Request(self.type,self.url,data=json.dumps(self.payload), headers=self.headers)
class SendFindClass(CultSend):
def __init__(self):
url = config_FindClass.url
headers = config_FindClass.headers
type = config_FindClass.type
super().__init__(url, headers, type)
class SendBookClass(CultSend):
def __init__(self, book_id):
url =config_BookClass.url.format(book_id)
headers = config_BookClass.headers
type = config_BookClass.type
payload = config_BookClass.payload
super().__init__(url, headers, type, payload)
| akhildevelops/cult-fitness-auto-book | cult_network.py | cult_network.py | py | 2,133 | python | en | code | 1 | github-code | 36 |
35515239669 | import argparse
from datetime import datetime
import os
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.utils.data
from model import Model
from dataset import Dataset
from tqdm import tqdm
from sklearn.metrics import confusion_matrix, roc_curve, auc
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
parser = argparse.ArgumentParser(description='Test a trained multi-resolution gland classification model.')
parser.add_argument('--init_model_file', default='',help='Initial model file (optional)', dest='init_model_file')
parser.add_argument('--image_dir_high', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_25_512', help='Image directory', dest='image_dir_high')
parser.add_argument('--image_dir_medium', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_50_512', help='Image directory', dest='image_dir_medium')
parser.add_argument('--image_dir_low', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_100_512', help='Image directory', dest='image_dir_low')
parser.add_argument('--image_dir_low2', default='../../Images/gland_classification/cropped_patches__complete_and_partial_glands_50_200_512', help='Image directory', dest='image_dir_low2')
parser.add_argument('--slide_list_filename_test', default='../dataset/slide_ids_list_gland_classification_46_slides_test_saved.txt', help='slide list test', dest='slide_list_filename_test')
parser.add_argument('--dataset_type', default='test', help='', dest='dataset_type')
parser.add_argument('--num_classes', default='2', type=int, help='Number of classes', dest='num_classes')
parser.add_argument('--batch_size', default='32', type=int, help='Batch size', dest='batch_size')
parser.add_argument('--metrics_file', default='test_metrics', help='Text file to write test metrics', dest='metrics_file')
FLAGS = parser.parse_args()
model_name = FLAGS.init_model_file.split('/')[-1][15:-4]
out_dir = '{}/{}/{}'.format(FLAGS.metrics_file,model_name,FLAGS.dataset_type)
if not os.path.exists(out_dir):
os.makedirs(out_dir)
test_metrics_file = '{}/test_scores__{}.txt'.format(out_dir,model_name)
with open(test_metrics_file, 'w') as f:
f.write('# model_name: {}\n'.format(model_name))
f.write('# init_model_file: {}\n'.format(FLAGS.init_model_file))
f.write('# image_dir_high: {}\n'.format(FLAGS.image_dir_high))
f.write('# image_dir_medium: {}\n'.format(FLAGS.image_dir_medium))
f.write('# image_dir_low: {}\n'.format(FLAGS.image_dir_low))
f.write('# image_dir_low2: {}\n'.format(FLAGS.image_dir_low2))
f.write('# slide_list_filename_test: {}\n'.format(FLAGS.slide_list_filename_test))
f.write('# num_classes: {}\n'.format(FLAGS.num_classes))
f.write('# batch_size: {}\n'.format(FLAGS.batch_size))
f.write('# metrics_file: {}\n'.format(test_metrics_file))
f.write('# patient_id\tslide_id\timage_id\tlabel\tprediction\tscore_benign\tscore_malignant\n')
print('model_name: {}'.format(model_name))
print('init_model_file: {}'.format(FLAGS.init_model_file))
print('image_dir_high: {}'.format(FLAGS.image_dir_high))
print('image_dir_medium: {}'.format(FLAGS.image_dir_medium))
print('image_dir_low: {}'.format(FLAGS.image_dir_low))
print('image_dir_low2: {}'.format(FLAGS.image_dir_low2))
print('slide_list_filename_test: {}'.format(FLAGS.slide_list_filename_test))
print('num_classes: {}'.format(FLAGS.num_classes))
print('batch_size: {}'.format(FLAGS.batch_size))
print('metrics_file: {}'.format(test_metrics_file))
test_dataset = Dataset(img_dir_high=FLAGS.image_dir_high, img_dir_medium=FLAGS.image_dir_medium, img_dir_low=FLAGS.image_dir_low, img_dir_low2=FLAGS.image_dir_low2, slide_list_filename=FLAGS.slide_list_filename_test, transforms=False)
num_imgs_test = test_dataset.num_imgs
print("Test Data - num_imgs: {}".format(test_dataset.num_imgs))
# define data loaders
data_loader_test = torch.utils.data.DataLoader(test_dataset, batch_size=FLAGS.batch_size, shuffle=False, num_workers=1)
device = torch.device('cuda') if torch.cuda.is_available() else torch.device('cpu')
# get the model using our helper function
model = Model(pretrained=False, num_classes=FLAGS.num_classes, num_intermediate_features=64)
# move model to the right device
model.to(device)
if FLAGS.init_model_file:
if os.path.isfile(FLAGS.init_model_file):
state_dict = torch.load(FLAGS.init_model_file, map_location=lambda storage, loc: storage)
model.load_state_dict(state_dict['model_state_dict'])
print("Model weights loaded successfully from file: ", FLAGS.init_model_file)
print('******************** testing ********************')
pbar = tqdm(total=len(data_loader_test))
num_predictions = 0
running_correct_result = 0
label_list = []
predicted_result_list = []
probs_result_list = []
model.eval()
with torch.no_grad():
for i, (img_paths, img_high, img_medium, img_low, img_low2, label) in enumerate(data_loader_test):
img_high, img_medium, img_low, img_low2, label = img_high.to(device), img_medium.to(device), img_low.to(device), img_low2.to(device), label.to(device)
# get logits from the model
output_high, output_medium, output_low, output_low2, output_result = model(img_high, img_medium, img_low, img_low2)
# obtain probs
probs_result = F.softmax(output_result, dim=1)
# obtain predictions
_, predicted_result = torch.max(output_result, 1)
correct_result = (predicted_result == label).sum().item()
running_correct_result += correct_result
label_arr = label.cpu().numpy()
predicted_result_arr = predicted_result.cpu().numpy()
probs_result_arr = probs_result.cpu().numpy()
temp_num_predictions = label_arr.shape[0]
num_predictions += temp_num_predictions
label_list += list(label_arr)
predicted_result_list += list(predicted_result_arr)
probs_result_list += list(probs_result_arr)
for idx in range(temp_num_predictions):
with open(test_metrics_file, 'a') as f:
temp_img_path = img_paths[0][idx]
patient_id = temp_img_path.split('/')[-3].split('_')[1]
slide_id = temp_img_path.split('/')[-3].split('_')[3]
img_id = temp_img_path.split('/')[-1].split('.')[0]
f.write('{}\t{}\t{}\t{}\t{}\t{:0.4f}\t{:.4f}\n'.format(patient_id, slide_id, img_id, label_arr[idx], predicted_result_arr[idx], probs_result_arr[idx, 0], probs_result_arr[idx, 1]))
pbar.update(1)
pbar.close()
test_acc_result = running_correct_result / num_predictions
print('test_acc_result:{:.4f}'.format(test_acc_result))
# confusion matrix
cm_test = confusion_matrix(label_list, predicted_result_list, labels=[0,1])
print('cm_test:{}'.format(cm_test))
# per-class accuracy: TPR and TNR
class_acc_test = cm_test.diagonal() / cm_test.sum(1)
print('TNR:{:.4f}, TPR:{:.4f}'.format(class_acc_test[0],class_acc_test[1]))
# Receiver operating chracteristic curve and area under curve value
label_arr = np.array(label_list)
probs_result_arr = np.vstack(probs_result_list)
fpr, tpr, _ = roc_curve(label_arr, probs_result_arr[:,1])
roc_auc = auc(fpr, tpr)
test_metrics_summary_file = '{}/test_metrics_summary__{}.txt'.format(out_dir,model_name)
with open(test_metrics_summary_file, 'w') as f:
f.write('# model_name: {}\n'.format(model_name))
f.write('# init_model_file: {}\n'.format(FLAGS.init_model_file))
f.write('# image_dir_high: {}\n'.format(FLAGS.image_dir_high))
f.write('# image_dir_medium: {}\n'.format(FLAGS.image_dir_medium))
f.write('# image_dir_low: {}\n'.format(FLAGS.image_dir_low))
f.write('# image_dir_low2: {}\n'.format(FLAGS.image_dir_low2))
f.write('# slide_list_filename_test: {}\n'.format(FLAGS.slide_list_filename_test))
f.write('# num_classes: {}\n'.format(FLAGS.num_classes))
f.write('# batch_size: {}\n'.format(FLAGS.batch_size))
f.write('# test_metrics_summary_file: {}\n'.format(test_metrics_summary_file))
f.write('# test_acc_result\n')
f.write('{:.4f}\n'.format(test_acc_result))
f.write('# cm_test: cm_test[0,0]\tcm_test[0,1]\tcm_test[1,0]\tcm_test[1,1]\n')
f.write('{:d}\t{:d}\t{:d}\t{:d}\n'.format(cm_test[0,0],cm_test[0,1],cm_test[1,0],cm_test[1,1]))
f.write('# TNR\tTPR\n')
f.write('{:.4f}\t{:.4f}\n'.format(class_acc_test[0],class_acc_test[1]))
f.write('# roc_auc\n')
f.write('{:.4f}\n'.format(roc_auc))
plt.rcParams.update({'font.size':12,'axes.labelsize':12})
fig,ax = plt.subplots(figsize=(3,3))
lw = 2
ax.plot(fpr, tpr, color='darkorange', lw=lw, label='AUROC = %0.2f' % roc_auc)
ax.plot([0, 1], [0, 1], color='navy', lw=lw, linestyle='--')
ax.set_xlim([-0.05, 1.05])
ax.set_xticks(np.arange(0,1.05,0.2))
ax.set_ylim([-0.05, 1.05])
ax.set_yticks(np.arange(0,1.05,0.2))
ax.set_xlabel('FPR')
ax.set_ylabel('TPR')
ax.set_title('AUROC = %0.4f' % roc_auc)
# ax.legend(loc='lower right')
ax.grid()
fig.tight_layout()
fig_filename = '{}/ROC__{}.png'.format(out_dir,model_name)
fig.savefig(fig_filename, dpi=200)
fig_filename = '{}/ROC__{}.pdf'.format(out_dir,model_name)
fig.savefig(fig_filename, dpi=200)
# plt.show()
plt.close('all')
| onermustafaumit/MLNM | gland_classification/four_resolutions_model/test.py | test.py | py | 9,294 | python | en | code | 4 | github-code | 36 |
37770543831 | import sys
def search_next_router(start, end):
mid_distance = (end - start) // 2
count = 1
for idx, a_house in enumerate(houses[1:]):
if (a_house - houses[idx]) > mid_distance:
count += 1
return count
N, C = map(int, input().split())
houses = [int(sys.stdin.readline()) for _ in range(N)].sort()
start, end = houses[0], houses[-1]
| TB2715/python-for-coding-test | BaekJoon/2110.py | 2110.py | py | 374 | python | en | code | 0 | github-code | 36 |
72694778664 | # Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution:
def deepestLeavesSum(self, root: TreeNode) -> int:
queue, result = [root, None], 0
while len(queue) != 1:
node = queue.pop(0)
if node:
result += node.val
if node.left:
queue.append(node.left)
if node.right:
queue.append(node.right)
else:
queue.append(None)
result = 0
return result
| githubli97/leetcode-python | 202012/20201211/q1302.py | q1302.py | py | 637 | python | en | code | 0 | github-code | 36 |
41510394343 | # -*- coding: utf-8 -*-
# project 1
import pandas as pd
import numpy as np
import matplotlib
import warnings
import matplotlib.pyplot as plt
import os
import seaborn as sns
from scipy import stats as st
from scipy.linalg import svd
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from sklearn.neighbors import KNeighborsClassifier
from sklearn.model_selection import train_test_split
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import RandomForestClassifier
from scipy.stats import norm
warnings.filterwarnings("ignore", category=matplotlib.MatplotlibDeprecationWarning)
### inspect data ###
# finding NaN or no values
# looking for duplicates
def inspect_data(data):
# check for missing data
print("Is there missing Data?: ", data.isnull().sum().sum())
# check for duplicated data
print("Is there duplicated data?:", data.duplicated().sum())
# count, mean, std, min, 25, 50(median), 75, max
#with open(os.path.join(os.getcwd(), "data_measures.txt"), 'w') as f:
# f.write(data.describe().round(2).to_string())
# calculation of simple summary statistics
stat_df = pd.DataFrame(columns=data.columns, index=(
"mean", "std","var","min", "25%-percentile", "median", "75%-percentile", "max", "std (N-1)", "var (N-1)",
"mode"))
for column in data.columns:
stat_df[column]["mean"] = round(np.mean(data[column]),2)
stat_df[column]["median"] = round(np.median(data[column]),2)
stat_df[column]["min"] = round(np.min(data[column]),2)
stat_df[column]["max"] = round(np.max(data[column]),2)
stat_df[column]["std"] = round(np.std(data[column]),2)
stat_df[column]["std (N-1)"] = round(np.std(data[column], ddof=1),2)
stat_df[column]["var"] = round(np.var(data[column]),2)
stat_df[column]["var (N-1)"] = round(np.var(data[column], ddof=1),2)
stat_df[column]["mode"] = st.mode(data[column])
stat_df[column]["25%-percentile"] = round(np.quantile(data[column], 0.25),2)
stat_df[column]["75%-percentile"] = round(np.quantile(data[column], 0.75),2)
# write summary statistics to file
with open(os.path.join(os.getcwd(), "data_measures.txt"), 'w') as f:
f.write(stat_df.to_string())
# Data Visualisation
def data_visualisation(data):
### plot boxplots/distribution of features ###
plt.figure(figsize=(10, 8))
plt.boxplot((data - data.mean()) / data.std(ddof=1) , labels=data.columns)
plt.title("Boxplots of all Features")
plt.xlabel("Features")
plt.ylabel("Data values")
plt.xticks(rotation=90)
plt.show()
#n_bins = 25
#fig, ax = plt.subplots(2, int(np.ceil(len(data.columns) / 2)))
#plt.figure().set_figheight(10)
#plt.figure().set_figwidth(20)
#fig.tight_layout()
#for col_id in range(len(data.columns)):
# if col_id < int(np.ceil(len(data.columns) / 2)):
# ax[0, col_id].hist(data.iloc[:, col_id], bins=n_bins)
# ax[0, col_id].set_title(data.columns[col_id])
# if col_id >= int(np.ceil(len(data.columns) / 2)):
# ax[1, col_id - int(np.ceil(len(data.columns) / 2))].hist(data.iloc[:, col_id], bins=n_bins)
# ax[1, col_id - int(np.ceil(len(data.columns) / 2))].set_title(data.columns[col_id])
#plt.show()
### plot histogramms ###
# Set up the figure size and grid layout
plt.figure(figsize=(15, 12))
sns.set_style("whitegrid")
# Plot histograms for each column
for i, column in enumerate(data.columns.drop("season"), 1):
plt.subplot(3, 4, i)
sns.histplot(data[column], kde=True)
plt.title(f'Distribution of {column}')
plt.tight_layout()
plt.show()
### plot correlations ###
plt.figure(figsize=(10,8))
sns.heatmap(data.corr(), cmap="RdBu")
plt.xticks(rotation=90)
plt.yticks(rotation=0)
plt.title("Correlation Heat Map")
plt.tight_layout
plt.show()
print(data.corr())
#calculate empirical covariance and derive empirical correlation
cov_mat = np.cov(data, rowvar=False, ddof=1)
print(cov_mat)
cor_mat = np.zeros((data.shape[1],data.shape[1]))
for i in range(data.shape[1]):
for j in range(data.shape[1]):
cor_mat[i][j] = cov_mat[i][j]/(np.std(data.iloc[:,i],ddof=1)*np.std(data.iloc[:,j],ddof=1))
fig, ax = plt.subplots(figsize=(10,8))
sns.heatmap(cor_mat, cmap="RdBu")
plt.xticks(rotation=90)
ax.set_xticklabels(data.columns)
ax.set_yticklabels(data.columns)
plt.yticks(rotation=0)
plt.title("Empirical Correlation Heat Map")
plt.tight_layout
plt.show()
#with open(os.path.join(os.getcwd(), "data_measures.txt"), 'w') as f:
# f.write(data.corr().to_string())
### plot scatter for temperature ###
# Temp - IBH IBT
# Season - Temp vis
fig, ax = plt.subplots(1, 2, figsize=(14, 6))
ax[0].scatter(data["temp"], data["ibh"], color='blue', label='temp vs ibh')
ax[0].set_title('Temperature vs IBH')
ax[0].set_xlabel('Temperature')
ax[0].set_ylabel('IBH')
ax[1].scatter(data["temp"], data["ibt"], color='red', label='temp vs ibt')
ax[1].set_title('Temperature vs IBT')
ax[1].set_xlabel('Temperature')
ax[1].set_ylabel('IBT')
plt.show()
### Mapping season to temperature ####
# Set up the plot
plt.figure(figsize=(10, 6))
colors = ['green', "red", "blue", "orange"]
plt.axhline(y=1, color='grey', linestyle='--', lw=0.5)
for i, row in data.iterrows():
plt.scatter(row['temp'], 1, color=colors[data["season"][i]])
plt.title("Temperature with Season Symbols")
plt.xlabel("Temperature (°C)")
plt.yticks([]) # Hide y-ticks as it's a 1D plot
plt.legend()
plt.grid(True, which='both', linestyle='--', linewidth=0.5, axis='x')
plt.tight_layout()
plt.show()
### Mapping season to temperature and visibility ###
for c in range(4):
# select indices belonging to class c:
class_mask = data["season"] == c
plt.plot(data["temp"][class_mask], data["vis"][class_mask], 'o', alpha=.3)
#plt.legend(data["season"])
plt.legend(["winter", "spring", "summer", "fall"])
#plt.xlabel(data["temp"])
#plt.ylabel(data["vis"])
plt.show()
def pca(data):
### transform data ###
# standardize
data_pca = data.drop(["doy", "season"], axis=1)
mean = data_pca.mean()
std = data_pca.std(ddof=1)
data_pca_scaled = np.asarray((data_pca - mean) / std)
### PCA ###
U, S, V = svd(data_pca_scaled, full_matrices=False)
# Compute variance explained by principal components
rho = (S * S) / (S * S).sum()
threshold = 0.9
### plot explained variance ###
plt.figure()
plt.plot(range(1, len(rho) + 1), rho, 'x-', color='red')
plt.plot(range(1, len(rho) + 1), np.cumsum(rho), 'o-', color='blue')
plt.plot([1,len(rho)],[threshold, threshold],'k--')
plt.title('Variance explained by principal components');
plt.xlabel('Principal component');
plt.ylabel('Variance explained');
plt.legend(['Individual', 'Cumulative', 'Threshold'])
plt.grid()
plt.show()
### transform data onto pca components ###
V_real = V.T
Z = data_pca_scaled @ V_real
### Plot PCA projection ###
# pca component indices
pca_idx = [[0, 1], [1, 4]]
for idx in pca_idx:
plt.figure()
plt.title('Los Angeles Ozone: PCA')
# Z = array(Z)
for c in range(len(sorted(set(data["season"])))):
# select indices belonging to class c:
class_mask = data["season"] == c
plt.plot(Z[class_mask, idx[0]], Z[class_mask, idx[1]], 'o', alpha=.5)
plt.legend(["winter", "spring", "summer", "fall"])
plt.xlabel('PC{0}'.format(idx[0] + 1))
plt.ylabel('PC{0}'.format(idx[1] + 1))
plt.show()
### further analysis of most important pca components ###
# number of pca components to be analysed further
max_pca = 5
# plot matrix scatter pca plot for max_pca components
fig, ax = plt.subplots(max_pca, max_pca, figsize=(20, 10))
plt.suptitle(f'Los Angeles Ozone: PCA for {max_pca} components')
for i in range(max_pca):
for j in range(max_pca):
for c in range(len(sorted(set(data["season"])))):
# select indices belonging to class c:
class_mask = data["season"] == c
ax[i][j].plot(Z[class_mask, i], Z[class_mask, j], 'o', alpha=.5)
ax[i][j].set_xlabel('PC{0}'.format(i + 1))
ax[i][j].set_ylabel('PC{0}'.format(j + 1))
plt.legend(["winter", "spring", "summer", "fall"])
plt.tight_layout()
plt.show()
### plot for pca contribution ###
fig, ax = plt.subplots(figsize=(14, 8))
for i in range(max_pca):
ax.plot(data_pca.columns, V_real[:,i], label=f'Component {i + 1}', marker='o')
for i in range(max_pca):
print(V_real[:,i])
ax.set_xticks(data_pca.columns)
ax.set_xticklabels(data_pca.columns, rotation=45)
ax.set_ylabel('Loading')
ax.set_title('PCA Component Loadings for Each Feature')
ax.grid(True)
plt.show()
### pca heatmap ###
fig, ax = plt.subplots(figsize=(14, 8))
im = ax.imshow(V_real[:,0:max_pca], cmap="RdBu")
ax.legend()
plt.colorbar(im)
ax.set_yticks(np.arange(len(data_pca.columns)), labels=data_pca.columns)
ax.set_xticks(np.arange(max_pca), labels=np.arange(max_pca)+1)
ax.set_ylabel('Feature')
ax.set_xlabel('PCA component')
ax.set_title('PCA Component Loadings for Each Feature')
plt.show()
def main():
### load data ###
data_path = os.path.join(os.getcwd(), "LAozone.csv")
data = pd.read_csv(data_path)
### add additional feature ###
# decoding seasons from doy
# 0 = winter (december, january, february)
# 1 = spring (march, april, may)
# 2 = summer (june, july, august)
# 3 = autumn (september, october, november)
data["season"] = 0
for row in data.index:
if data["doy"][row] <= 60 or data["doy"][row] > 335:
data["season"][row] = 0
if data["doy"][row] > 60 and data["doy"][row] <= 152:
data["season"][row] = 1
if data["doy"][row] > 152 and data["doy"][row] <= 244:
data["season"][row] = 2
if data["doy"][row] > 244 and data["doy"][row] <= 335:
data["season"][row] = 3
inspect_data(data)
data_visualisation(data)
pca(data)
# train the first classifiers
#data_Y = data["season"].copy()
#data_X = data.drop(["doy", "season"], axis=1).copy()
#mean = data_X.mean()
#std = data_X.std(ddof=1)
#data_X = np.asarray((data_X - mean) / std)
#X_train, X_test, y_train, y_test = train_test_split(data_X, data_Y, test_size = 0.2, random_state=5, shuffle=True)
#KNN = KNeighborsClassifier(n_neighbors = 10)
#KNN.fit(X_train, y_train)
#print(KNN.score(X_test,y_test))
#DT = DecisionTreeClassifier()
#DT.fit(X_train,y_train)
#print(DT.score(X_test,y_test))
#RF = RandomForestClassifier()
#RF.fit(X_train,y_train)
#print(RF.score(X_test,y_test))
if __name__ == "__main__":
main()
| tirohweder/into_ml_dm_project_1 | main.py | main.py | py | 11,707 | python | en | code | 3 | github-code | 36 |
38028426057 | """
Dmytro Mishagli, UCD
04 Dec 2019 -- the script was created.
"""
import numpy as np
def basis(x,n,L):
'''
The basis function.
'''
return np.sqrt(2/L) * np.sin( x * n * np.pi / L )
def integ(n,m,lower_limit,upper_limit,L):
"""
Returns values of the integrals in a Hamiltonian of a square potential well,
calculated analytically
"""
alpha = n*np.pi/L
beta = m*np.pi/L
if n!=m:
f = lambda x: 0.5 * ( np.sin((alpha-beta)*x)/(alpha-beta) - np.sin((alpha+beta)*x)/(alpha+beta) )
return 2/L * (f(upper_limit) - f(lower_limit))
else:
f = lambda x: 0.5 * ( x - np.sin((alpha+beta)*x)/(alpha+beta) )
return 2/L * (f(upper_limit) - f(lower_limit))
def potential_piecewise(n,m,Vext,Vint,wells,barriers):
'''
Vext - list, left and right (exterior) potential wells' heights
Vint - list, interatomic wells heights
wells - list, iterior wells' widths
barriers - list, distances between the wells
'''
# number of wells
nWells = len(wells)
# add zero element to the begin of a list of barriers
barriers = [0] + barriers
# size of shift from 0
h = np.sum(wells) + np.sum(barriers)
# width of an infinite square well
L = 3*h
# initialise variables
s = 0
lower_limit = h
upper_limit = h
# iterate through the square well (sequence of wells)
for i in range(1,nWells):
lower_limit += wells[i] + barriers[i-1]
upper_limit += wells[i] + barriers[i]
s += Vint[i-1] * integ( n,m, lower_limit, upper_limit, L )
return Vext[0]*integ(n, m, 0, h, L) + Vext[1]*integ(n, m, L-h, L, L) + s
def get_solution(Vext,Vint,wells,barriers,num_bas_funs):
# size of shift from 0
h = np.sum(wells) + np.sum(barriers)
# width of an infinite square well
L = 3*h
# compute a Hamiltonian (n,m) square matrix
potential_matrix = [[potential_piecewise(n,m,Vext,Vint,wells,barriers) for n in range(1,num_bas_funs+1)] for m in range(1,num_bas_funs+1)]
# compute a list of eigenvalues for H0
evalues = [n**2*np.pi**2/(L**2) for n in range(1,num_bas_funs+1)]
# create a diagonal matrix
H0 = np.diag(evalues)
# get solution
eigvals, eigvecs = np.linalg.eigh(H0+potential_matrix)
# bound states are below the exterior height
eigvals = eigvals[eigvals<min(Vext)]
# transopse the matrix with eigenvectors (we need rows)
# and left only those who correspond to the bound states
eigvecs = eigvecs.T[:len(eigvals)]
return eigvals, eigvecs
| mishagli/qsol | qsol.py | qsol.py | py | 2,359 | python | en | code | 1 | github-code | 36 |
12316087861 | import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
import os
import glob
import cv2
import math
import csv
import re
import pandas as pd
import matplotlib.pyplot as plt
import numpy as np
from tensorflow.keras.applications.imagenet_utils import preprocess_input, decode_predictions
from tensorflow.keras.models import load_model
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.resnet50 import ResNet50
from keras.utils import np_utils
from skimage.transform import resize
from sklearn.model_selection import train_test_split
from tensorflow.keras.layers import Dense, InputLayer, Dropout
from tensorflow.keras.models import Sequential
def load_train_tab(tab_train, root):
def browse_directory():
global selected_directory
filepath = filedialog.askdirectory(title="Select a directory")
if filepath:
print("Selected file:", filepath)
selected_directory = filepath # Save the directory name to the global variable
directory_entry.delete(0, tk.END)
directory_entry.insert(0, filepath)
# Update the video files list
video_files.config(state="normal") # Set the state to normal before inserting text
video_files.delete(1.0, tk.END)
for file in os.listdir(filepath):
if file.endswith(".mp4") or file.endswith(".avi") or file.endswith(".mkv"):
video_files.insert(tk.END, file + "\n")
video_files.config(state="disabled") # Set the state back to disabled after inserting text
def frame_split():
# Create a progress bar popup window
progress_window = tk.Toplevel(root)
progress_window.title("Splitting Videos")
progress_label = ttk.Label(progress_window, text="Splitting video into frames...")
progress_label.pack(padx=10, pady=(10, 0))
progress = ttk.Progressbar(progress_window, mode="indeterminate", length=300)
progress.pack(padx=10, pady=(5, 10))
progress.start(10)
progress_window.update()
#check if train/frames path exists, if not, create it
if os.path.exists(selected_directory + "/train/frames/") == False:
print("/train/frames folder does not exist. Creating...")
os.makedirs(selected_directory + "/train/frames/")
else:
print("train/frames folder already exists")
#capture video files in chosen directory
count = 0
cap = [cv2.VideoCapture(videoFile) for videoFile in glob.glob(os.path.join(selected_directory, "*.mp4"))] # capturing the video from the given path
#split the frames from each video then output to train/frames folder
for i in cap:
print(str(i))
frameRate = i.get(5)
while (i.isOpened()):
frameID = i.get(1)
ret, frame = i.read()
if (ret != True):
break
if (frameID % math.floor(frameRate) == 0):
filename = selected_directory + "/train/frames/frame%d.jpg" % (count); count +=1
cv2.imwrite(filename, frame)
i.release()
#create the excel file from split frames
print("Creating excel file for classification...")
header = ['Image_ID', 'Class']
data = []
for i in os.listdir(selected_directory + "/train/frames"):
data.append(str(i))
data.sort(key=lambda f: int(re.sub('\D', '', f)))
data2 = []
for i in data:
data2.append([i])
with open(selected_directory + '/train/frames.csv', 'w', encoding='UTF8', newline='') as f:
writer = csv.writer(f)
writer.writerow(header)
writer.writerows(data2)
print("Done! Please label frames accordingly in 'frames.csv' ")
# Close the progress bar window
progress.stop()
progress_window.destroy()
# Show a new popup window that says "frame split complete"
complete_window = tk.Toplevel(root)
complete_window.title("Complete")
complete_label = ttk.Label(complete_window, text="Frame splitting complete. \nYour training frames are located in /train/frames/ in your selected directory. \nPlease update your excel file located in the /train/ folder with the necessary labels")
complete_label.pack(padx=10, pady=(10, 0))
ok_button = ttk.Button(complete_window, text="OK", command=complete_window.destroy)
ok_button.pack(padx=10, pady=(5, 10))
# Update the main window
root.update()
def begin_training():
# Create a progress bar popup window
progress_window = tk.Toplevel(root)
progress_window.title("Splitting Videos")
progress_label = ttk.Label(progress_window, text="Training model...")
progress_label.pack(padx=10, pady=(10, 0))
progress = ttk.Progressbar(progress_window, mode="indeterminate", length=300)
progress.pack(padx=10, pady=(5, 10))
progress.start(10)
progress_window.update()
#load training excel file
data = pd.read_csv(selected_directory + '/train/frames.csv')
#count number of areas created in excel file:
cnt = 0
visited = []
for i in range (0, len(data['Class'])):
if data['Class'][i] not in visited:
visited.append(data['Class'][i])
cnt+=1
X = [ ] # creating an empty array
for img_name in data.Image_ID:
img = plt.imread(selected_directory + '/train/frames/' + img_name)
X.append(img) # storing each image in array X
X = np.array(X)
y = data.Class
dummy_y = np_utils.to_categorical(y)
image = []
for i in range(0,X.shape[0]):
a = resize(X[i], preserve_range=True, output_shape=(224,224)).astype(int) # reshaping to 224*224*3
image.append(a)
X = np.array(image)
X = preprocess_input(X, mode='caffe')
X_train, X_valid, y_train, y_valid = train_test_split(X, dummy_y, test_size=0.3, random_state=42)
base_model = ResNet50(weights='imagenet', include_top=False, input_shape=(224, 224, 3))
X_train = base_model.predict(X_train)
X_valid = base_model.predict(X_valid)
X_train.shape, X_valid.shape
X_train = X_train.reshape(X_train.shape[0], 7*7*2048) # converting to 1-D
X_valid = X_valid.reshape(X_valid.shape[0], 7*7*2048)
train = X_train/X_train.max() # centering the data
X_valid = X_valid/X_train.max()
model = Sequential()
model.add(InputLayer((7*7*2048,))) # input layer
model.add(Dense(units=2048, activation='sigmoid')) # hidden layer
model.add(Dropout(0.5)) # adding dropout
model.add(Dense(units=1024, activation='sigmoid')) # hidden layer
model.add(Dropout(0.5)) # adding dropout
model.add(Dense(units=512, activation='sigmoid')) # hidden layer
model.add(Dropout(0.5)) # adding dropout
model.add(Dense(units=256, activation='sigmoid')) # hidden layer
model.add(Dropout(0.5)) # adding dropout
model.add(Dense(cnt, activation='softmax')) # output layer
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
model.fit(train, y_train, epochs=15, validation_data=(X_valid, y_valid))
model.save(selected_directory + '/train/model')
# Close the progress bar window
progress.stop()
progress_window.destroy()
# Show a new popup window that says "model training complete"
complete_window = tk.Toplevel(root)
complete_window.title("Complete")
complete_label = ttk.Label(complete_window, text="Model training complete. Model has been saved to /train/model/.\nYou may begin classification of new videos in the classify tab")
complete_label.pack(padx=10, pady=(10, 0))
ok_button = ttk.Button(complete_window, text="OK", command=complete_window.destroy)
ok_button.pack(padx=10, pady=(5, 10))
# Update the main window
root.update()
# Add description text above the browse button
description_label = ttk.Label(tab_train, text="Select user video folder:")
description_label.pack(padx=10, pady=(10, 0))
# Create a frame to hold the directory entry and browse button
entry_browse_frame = ttk.Frame(tab_train)
entry_browse_frame.pack(padx=10, pady=(5, 10))
# Add an empty text box for manual directory input
directory_entry = ttk.Entry(entry_browse_frame, width=50)
directory_entry.pack(side="left")
# Add a browse button to the "Train Model" tab
browse_button = ttk.Button(entry_browse_frame, text="Browse", command=browse_directory)
browse_button.pack(side="left", padx=(10, 0))
# Create a text box to show a list of video files in the chosen directory
video_files = tk.Text(tab_train, wrap="none", width=50, height=10, state="normal")
video_files.pack(padx=10, pady=(5, 10))
# Add split text description above the split button
split_description_label = ttk.Label(tab_train, text="Split videos into frames and output to a train folder:")
split_description_label.pack(padx=10, pady=(10, 0))
# Add a split button to the "Train Model" tab
split_button = ttk.Button(tab_train, text="Split", command=frame_split)
split_button.pack(padx=10, pady=(5, 10))
#Add train text description above train button
train_description_label = ttk.Label(tab_train, text="Begin training model (please make sure your excel file is properly filled out)")
train_description_label.pack(padx=10, pady=(10, 0))
# Add a train button to the "Train Model" tab
train_button = ttk.Button(tab_train, text="Train", command=begin_training)
train_button.pack(padx=10, pady=(5, 10))
| NoahSCode/EDUSIM | app_train.py | app_train.py | py | 10,305 | python | en | code | 0 | github-code | 36 |
34100493282 | # Practice python, assignment 3
# linked list
# bring codes for the single linked list and stack from the lab and lecture slides.
class LList:
class Node:
def __init__(self, val, next=None):
self.val = val
self.next = next
def __init__(self):
self.head = None
self.tail = None # initializing the tail
self.nVals = 0
def addFront(self, val):
new_node = self.Node(val, self.head)
if self.head is None: # If the linked list is empty, we should add a node and have both: the head and the tail pointing at it.
self.tail = new_node
self.head = new_node
self.nVals += 1
def getFront(self):
if self.head is None:
return None
else:
return self.head.val
def removeFront(self):
returned_value = self.head
if self.head is not None:
if self.tail == self.head: # If the tail and the head is pointing to the same node, then the list has only one node. The reference must be removed from both: the tail and the head
self.head = self.tail = None
else:
self.head = self.head.next
self.nVals -= 1
return returned_value.val # return the removed value
def toList(self):
result_lst = []
node = self.head
while node is not None:
result_lst.append(node.val)
node = node.next
# print(array)
return result_lst
# We modified the addBack() from the tutorial slides to make it O(1) instead of O(N)
def addBack(self, val):
if self.head is None:
self.addFront(val)
else:
self.tail.next = self.Node(
val) # The while loop to reach the tail is not necessary anymore, we can be replace it with the tail node.
self.tail = self.tail.next # The tail must be changed to the last node that we just added to the end of the linked list
self.nVals += 1
def getBack(self):
if self.tail is None:
return None
else:
return self.tail.val
def count(self):
return self.nVals
##### Part 1: Creating Two Singly Linked List Functions #####
def printSLL(self):
node = self.head
node_lst = []
if node is None:
print("Empty Linked List")
else:
while node is not None:
node_lst.append(str(node.val))
node = node.next
node_lst_joined = ",".join(node_lst)
print("[" + node_lst_joined + "]")
def locate(self, item):
node = self.head
if node is None:
print("Item was not located")
return
while node is not None:
if item != node.val:
node = node.next
else:
print("Item was located")
return
print("Item was not located")
# lst = LList()
# lst.addFront(2)
# lst.addFront(5)
# lst.addFront(1)
# lst.printSLL()
#
# lst.locate(2)
#
##### Part 2: Evaluating Postfix Expressions #####
class Stack:
def __init__(self):
self.llist = LList()
def __len__(self):
return self.llist.nVals
def push(self, val):
self.llist.addFront(val)
def pop(self):
return self.llist.removeFront()
def peek(self):
return self.llist.getFront()
def getBack(self):
return self.llist.getBack()
def __len__(self):
return self.llist.count()
def toList(self):
return list(reversed(self.llist.toList()))
def evalPostfix(e): # e -> string
is_error = False
split_e = e.split()
# print(split_e)
# use Stack
s = Stack()
operators = ["+", "-", "*", "/"]
for i in split_e:
if i not in operators:
s.push(int(i))
# print(int(i))
elif i in operators:
# error checking
if len(s) < 2:
current_stack = s.toList()
print(f'Error: {current_stack}')
return
operand1 = s.pop()
operand2 = s.pop()
if i == "+":
s.push(operand2 + operand1)
elif i == "-":
s.push(operand2 - operand1)
elif i == "*":
s.push(operand2 * operand1)
else:
s.push(operand2 / operand1)
# error checking
if len(s) != 1:
current_stack = s.toList()
print(f'Error: {current_stack}')
else:
# non-error
try: # to get integer value if the result is x.0
if s.peek().is_integer():
print(int(s.peek()))
else:
print(s.peek())
except:
print(s.peek())
# evalPostfix("1 2 3 4 * - /")
# evalPostfix("20 5 - 3 / 2 *")
# evalPostfix("+")
# evalPostfix("1")
| angie0bb/python-practice | practice_python_p3.py | practice_python_p3.py | py | 5,153 | python | en | code | 0 | github-code | 36 |
43103024978 | from celery import shared_task
from celery.utils.log import get_task_logger
from decouple import config
logger = get_task_logger("tasks")
expiration_time = config("EXPIRATION_TIME", default=1800, cast=int)
@shared_task(
bind=True,
default_retry_delay=3,
eta=expiration_time,
retry_kwargs={"max_retries": 5},
)
def delete_qrcode_task(self, qrcode_id):
from qrcode_api.apps.api.models import QrCode
try:
qrcode = QrCode.objects.get(id=qrcode_id)
logger.info(f"Deactivating qrcode {qrcode_id}")
qrcode.active = False
qrcode.image.delete()
qrcode.save()
except Exception as e:
logger.error(e)
self.retry(exc=e)
| guilhermehgbrito/qrcode-api | qrcode_api/apps/api/tasks.py | tasks.py | py | 721 | python | en | code | 0 | github-code | 36 |
74339948582 | # Module importieren
from machine import Pin
import time
# Pins für die LEDs aktivieren
rot = Pin(14, Pin.OUT)
gelb = Pin(12, Pin.OUT)
gruen = Pin(13, Pin.OUT)
# Funtion für die Ampelschaltung
def ampel(led):
#for i in range(5):
led(1)
time.sleep_ms(3000)
led(0)
time.sleep_ms(1)
# Ampelreihenfolge nach Wunsch schalten
for i in range(5):
ampel(rot)
ampel(gelb)
ampel(gruen)
ampel(gelb)
| kvogl/MicroPython | MicroPython_Ampel/extern_led.py | extern_led.py | py | 444 | python | de | code | 0 | github-code | 36 |
17880038233 | from tkinter import *
window = Tk()
window.title("Grid Geometry")
lblNumYears = Label(window, text = "Number of Years:")
lblNumYears.grid(row = 0, column = 0, pady = 20) #top left
entNumYears = Entry(window, width =5)
entNumYears.grid(row = 0, column = 1, sticky = S)
btnCalculate = Button(window, text = "Calculate Montly Payment")
btnCalculate.grid(row = 1, column = 0, columnspan = 2, padx = 10, pady = 10)
window.mainloop()
| ES21215/my_awesome_repository | Python/GUI/Gui4.py | Gui4.py | py | 448 | python | en | code | 0 | github-code | 36 |
1063868906 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from PIL import Image
from matplotlib import pyplot as plt
import numpy as np
import pytesseract
import cv2
import tkinter as tk
import logging
import time
import re
import threading
# Dimensioning values
# We are defining global variables based on match data in order to isolate the scoreboard
left_x = 170
upper_y = 50
right_x = 540
lower_y = 80
time_divide = 230
time_width = 60
time_position = 'left'
# If time-position = right : scoreboard is on the left and time on the right
# Else if time position = left : scoreboard is on the right and time on the left
# To deal with time.sleep() and effectively end the threads
#time_value = 0
class ImageHandler(object):
def __init__(self, export_path, filename_in):
self.scoreboard_image = None
self.time_image = None
self.time_text = None
self.teams_goals_image = None
self.teams_goals_text = None
self.video_source_path = filename_in
self.export_image_path = export_path + '/football.jpg'
self.export_path = export_path
logging.basicConfig(level=logging.WARNING)
def extract_image_from_video(self):
"""
Extracts image from video and saves on disk with specified period.
:param path_to_video: Path to video and video name with file format
:param export_image_path: Export image path and image name with file format
:return: -
"""
vidcap = cv2.VideoCapture(self.video_source_path)
count = 0
#success = True
image_lst = []
while(True):
vidcap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))
success, image = vidcap.read()
image_lst.append(image)
# Stop when last frame is identified
if count > 1:
if np.array_equal(image, image_lst[1]):
break
image_lst.pop(0) # Clean the list
# save frame as PNG file
if(ocr.count < ocr.video_length):
try:
cv2.imwrite(self.export_image_path, image)
print('{}.sec reading a new frame: {} '.format(count, success))
count += 1
ocr.count += 1
ocr.eImageExported.set()
time.sleep(1)
except Exception as e:
pass
def localize_scoreboard_image(self):
"""
Finds the scoreboard table in the upper corner, sets scoreboard_image
and exports the picture as 'scoreboard_table.jpg'
:return: True when scoreboard is found
False when scoreboard is not found
"""
# Read a snapshot image from the video and convert to gray
snapshot_image = cv2.imread(self.export_image_path)
grayscale_image = cv2.cvtColor(snapshot_image, cv2.COLOR_BGR2GRAY)
self.scoreboard_image = grayscale_image[upper_y:lower_y,
left_x:right_x]
cv2.imwrite(self.export_path + '/scoreboard_table.jpg',
self.scoreboard_image)
def split_scoreboard_image(self):
"""
Splits the scoeboard image into two parts, sets 'time_image' and 'teams_goals_image'
and exports as 'time_table.jpg' and 'teams_goals_table.jpg'
Left image represents the time.
Right image represents the teams and goals.
:return: -
"""
'''
self.time_image = self.scoreboard_image[:, 0:175]
cv2.imwrite('ocr/img/time_table.jpg', self.time_image)
self.teams_goals_image = self.scoreboard_image[:, 175:]
cv2.imwrite('ocr/img/teams_goals_table.jpg', self.teams_goals_image)
'''
relative_time_divide = time_divide-left_x
time_end = relative_time_divide + time_width
if(time_position == 'right'):
self.time_image = self.scoreboard_image[:,
relative_time_divide:time_end]
cv2.imwrite(self.export_path + '/time_table.jpg', self.time_image)
self.teams_goals_image = self.scoreboard_image[:,
0:relative_time_divide]
cv2.imwrite(self.export_path + '/teams_goals_table.jpg',
self.teams_goals_image)
else:
self.time_image = self.scoreboard_image[:, 0:relative_time_divide]
cv2.imwrite(self.export_path + '/time_table.jpg', self.time_image)
self.teams_goals_image = self.scoreboard_image[:,
relative_time_divide:]
cv2.imwrite(self.export_path + '/teams_goals_table.jpg',
self.teams_goals_image)
def enlarge_scoreboard_images(self, enlarge_ratio):
"""
Enlarges 'time_table.jpg' and 'teams_goals_table.jpg'
:param enlarge_ratio: Defines the enlarging size (e.g 2-3x)
:return: -
"""
self.time_image = cv2.resize(
self.time_image, (0, 0), fx=enlarge_ratio, fy=enlarge_ratio)
self.teams_goals_image = cv2.resize(
self.teams_goals_image, (0, 0), fx=enlarge_ratio, fy=enlarge_ratio)
def _get_time_from_image(self):
"""
Preprocesses time_image transformations for OCR.
Exports 'time_ocr_ready.jpg' after the manipulations.
Reads match time from 'time_ocr_ready.jpg' using Tesseract.
Applies result to time_text.
:return: True: string is found
False: string is not found
"""
# Count nonzero to determine contrast type
ret, threshed_img = cv2.threshold(
self.time_image, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY_INV)
self.time_image = cv2.GaussianBlur(self.time_image, (3, 3), 0)
kernel = np.ones((3, 3), np.uint8)
self.time_image = cv2.erode(self.time_image, kernel, iterations=1)
self.time_image = cv2.dilate(self.time_image, kernel, iterations=1)
cv2.imwrite(self.export_path + '/time_ocr_ready.jpg', self.time_image)
self.time_text = pytesseract.image_to_string(
Image.open(self.export_path + '/time_ocr_ready.jpg'), config="--psm 6")
logging.info('Time OCR text: {}'.format(self.time_text))
if self.time_text is not None:
return True
return False
def _get_teams_goals_from_image(self):
"""
Preprocesses teams_goals_image with transformations for OCR.
Exports 'teams_goals_ocr_ready.jpg' after the manipulations.
Reads teams and goals information from 'teams_goals_ocr_ready.jpg' using Tesseract.
Applies result to teams_goals_text.
:return: True: string is found
False: string is not found
"""
# Applying Thresholding for Teams goals OCR preprocess
ret, self.teams_goals_image = cv2.threshold(
self.teams_goals_image, 0, 255, cv2.THRESH_OTSU+cv2.THRESH_BINARY_INV)
self.teams_goals_image = cv2.GaussianBlur(
self.teams_goals_image, (3, 3), 0)
kernel = np.ones((3, 3), np.uint8)
#self.teams_goals_image = cv2.erode(self.teams_goals_image, kernel, iterations=1)
self.teams_goals_image = cv2.dilate(
self.teams_goals_image, kernel, iterations=1)
cv2.imwrite(self.export_path + '/teams_goals_ocr_ready.jpg',
self.teams_goals_image)
self.teams_goals_text = pytesseract.image_to_string(
Image.open(self.export_path + '/teams_goals_ocr_ready.jpg'))
logging.info('Teams and goals OCR text: {}'.format(
self.teams_goals_text))
if self.teams_goals_text is not None:
return True
return False
def get_scoreboard_texts(self):
"""
Returns an array of strings including OCR read time, teams and goals texts.
:return: numpy array 'scoreboard_texts'
scoreboard_texts[0] : time text value
scoreboard_texts[1] : teams and goals text value
"""
# Read text values using Tesseract OCR
time_text_exists = self._get_time_from_image()
teams_goals_text_exists = self._get_teams_goals_from_image()
scoreboard_texts = []
# Use values on successful read
if time_text_exists and teams_goals_text_exists:
scoreboard_texts.append(self.time_text)
scoreboard_texts.append(self.teams_goals_text)
scoreboard_texts = np.array(scoreboard_texts)
return scoreboard_texts
def play_match_video(self):
cap = cv2.VideoCapture(self.video_source_path)
count = 0
if(ocr.time_value < ocr.video_length):
while (cap.isOpened()):
cap.set(cv2.CAP_PROP_POS_MSEC, (count * 1000))
ret, frame = cap.read()
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
cv2.imshow('frame', gray)
time.sleep(1)
count += 1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
class Match(object):
def __init__(self, export_path, filename_out):
self.scoreboard_text_values = None
self.home_score = 0
self.home_score_temp = 0
self.home_team = None
self.home_team_temp = 0
self.home_team_fullname = None
self.home_team_identified = False
self.opponent_score = 0
self.opponent_score_temp = 0
self.opponent_team = None
self.opponent_team_temp = None
self.opponent_team_fullname = None
self.opponent_team_identified = False
self.match_time = None
self.match_time_temp = None
self._match_time_prev = []
self.export_path = export_path
self.filename_out = filename_out
def analize_scoreboard(self):
while (ocr.count < ocr.video_length):
try:
ocr.eImageExported.wait()
ocr.scoreboard.localize_scoreboard_image()
ocr.scoreboard.split_scoreboard_image()
ocr.scoreboard.enlarge_scoreboard_images(3)
OCR_text = ocr.scoreboard.get_scoreboard_texts()
ocr.football_match.provide_scoreboard_text_values(OCR_text)
ocr.football_match.update_all_match_info()
ocr.football_match.print_all_match_info()
ocr.eImageExported.clear()
except Exception as e:
logging.warning(e)
def provide_scoreboard_text_values(self, scoreboard_text_values):
self.scoreboard_text_values = scoreboard_text_values
def cleanse_match_score(self):
"""
Cleanse home_score_temp and opponent_score_temp values and removes
noisy starters and enders if present
:return: -
"""
score_string = self.scoreboard_text_values[1].split(' ')[1]
result = []
for letter in score_string:
if letter.isdigit():
result += letter
self.home_score_temp = result[0]
self.opponent_score_temp = result[1]
def cleanse_match_teams(self):
"""
Cleanse home_team_temp and opponent_team_temp values and removes
noisy starter or ender if present
:return: -
"""
self.home_team_temp = self.scoreboard_text_values[1].split(' ')[0]
self.opponent_team_temp = self.scoreboard_text_values[1].split(' ')[2]
# Check and remove noisy starters and enders
if not self.home_team_temp[0].isalpha():
self.home_team_temp = self.home_team_temp[1:4]
elif not self.opponent_team_temp[-1].isalpha():
self.opponent_team_temp = self.opponent_team_temp[0:3]
def cleanse_match_time(self):
"""
Cleanse match_time_temp, and removes noisy starter or ender if present
:return: -
"""
self.match_time_temp = self.scoreboard_text_values[0]
# Check for noisy starters and ender and clean if present
letter_ptr = 0
if not self.match_time_temp[letter_ptr].isdigit():
letter_ptr += 1
if not self.match_time_temp[letter_ptr].isdigit():
letter_ptr += 1
self.match_time_temp = self.match_time_temp[letter_ptr:]
logging.info("Time text noisy starter removed.")
elif not self.match_time_temp[-1].isdigit():
self.match_time_temp = self.match_time_temp[0:-1]
logging.info("Time text noisy ender removed.")
def update_match_time(self):
"""
Validates cleansed match_time_temp with regular expression and sets match_time if valid value exists
:return: True: time has been updated
False: time has not been updated
"""
# Check if the OCR read value is valid
time_expr = re.compile('\d\d:\d\d')
res = time_expr.search(self.match_time_temp)
if res is None:
return False
last_valid_timeval = self.match_time_temp[res.start():res.end()]
self._match_time_prev.append(last_valid_timeval)
# Check validity between last time values
if last_valid_timeval < self._match_time_prev[len(self._match_time_prev)-2]:
# Minute error occured - minute remain unchanged
if last_valid_timeval[0:2] < self._match_time_prev[len(self._match_time_prev)-2][0:2]:
logging.warning(
"Minute error occured: minute remain unchanged!")
fixed_minutes = self._match_time_prev[len(
self._match_time_prev)-2][0:2]
last_valid_timeval = fixed_minutes + last_valid_timeval[2:]
else:
# Second error occured - auto increment second
logging.warning(
"Second error occured: auto incremented second!")
seconds = self._match_time_prev[len(
self._match_time_prev)-2][-2:]
fixed_seconds = str(int(seconds)+1)
last_valid_timeval = last_valid_timeval[:-2] + fixed_seconds
# Free unnecessary time values
if len(self._match_time_prev) > 2:
self._match_time_prev.pop(0)
# Write all valid values to a text file for analysis
self.match_time = last_valid_timeval
with open(self.export_path + '/' + self.filename_out, 'a') as f:
f.write("%s,%s\n" % (self.match_time, ocr.count))
return True
def update_match_score(self):
"""
Validates cleansed score with regular expression
:return: True: score matches the regexp
False: score does not match the regexp
"""
score_expr = re.compile('\d-\d')
res = score_expr.search(self.scoreboard_text_values[1])
if res is None:
return False
self.home_score = self.home_score_temp
self.opponent_score = self.opponent_score_temp
return True
def update_match_team(self, selected_team):
"""
Sets cleansed home_team or opponent_team values if not set before
:return: -
"""
if selected_team == 'home':
self.home_team = self.home_team_temp
self.home_team_identified = True
elif selected_team == 'opponent':
self.opponent_team = self.opponent_team_temp
self.opponent_team_identified = True
def update_all_match_info(self):
"""
Attempts to update match infos:
time, teams, score
:return: True: update succeed
False: update failed
"""
if len(self.scoreboard_text_values[0]) > 0 and len(self.scoreboard_text_values[1]) > 0:
try:
# Clean OCR read time value and update time if valid
self.cleanse_match_time()
self.update_match_time()
# Clean OCR read score value and update score if valid
self.cleanse_match_score()
self.update_match_score()
# Clean OCR read team values and set teams if valid and necessary
self.cleanse_match_teams()
if self.home_team_identified is False:
self.update_match_team('home')
if self.opponent_team_identified is False:
self.update_match_team('opponent')
except Exception as e:
logging.info(e)
logging.info("Unable to update match info for some reason")
else:
logging.info("Unable to update match info: no text received!")
def print_all_match_info(self):
home_team_name = self.home_team
opponent_team_name = self.opponent_team
if self.home_team_fullname is not None and self.opponent_team_fullname is not None:
home_team_name = self.home_team_fullname
opponent_team_name = self.opponent_team_fullname
print('{} {} {}-{} {}'.format(self.match_time,
home_team_name,
self.home_score,
self.opponent_score,
opponent_team_name))
# MAIN
# Empty times.txt file
def ocr(export_path, filename_in, filename_out, video_length):
ocr.count = 0
ocr.video_length = video_length
open(export_path+'/' + filename_out, 'w').close()
ocr.eImageExported = threading.Event()
# Create objects and threads
ocr.scoreboard = ImageHandler(export_path, filename_in)
ocr.football_match = Match(export_path, filename_out)
ocr.tImageExtractor = threading.Thread(
None, ocr.scoreboard.extract_image_from_video, name="ImageExtractor")
ocr.tScoreboardAnalyzer = threading.Thread(
None, ocr.football_match.analize_scoreboard, name="ScoreboardAnalyzer")
ocr.tImageExtractor.start()
ocr.tScoreboardAnalyzer.start()
ocr.tImageExtractor.join()
ocr.tScoreboardAnalyzer.join()
if __name__ == '__main__' :
filename_in = 'ocr/tmp/secondmatch.mkv'
export_path = 'ocr/img'
filename_out = 'times.txt'
video_length = 1080
ocr(export_path, filename_in, filename_out, 1080) | BrunoSader/An-emotional-sports-highlight-generator | ocr/final_ocr.py | final_ocr.py | py | 18,525 | python | en | code | 4 | github-code | 36 |
41865132711 | from __future__ import absolute_import, print_function
import os
import numpy as np
import pyopencl as cl
os.environ['PYOPENCL_COMPILER_OUTPUT']='1'
modulepath=os.path.dirname(os.path.abspath(__file__))
class Particles(object):
def __init__(self,nparticles=1,ndim=10):
self.nparticles=nparticles
self.ndim=ndim
self.data=np.zeros(nparticles*ndim,dtype=np.float64)
class Elements(object):
value_t = np.dtype({'names' :['f64','i64','u64'],
'formats':['<f8','<i8','<u8'],
'offsets':[ 0, 0, 0],
'itemsize':8})
DriftId=1
MultipoleId=2
def __init__(self,size=40000):
self.size=size
self.data=np.zeros(size,dtype=self.value_t)
self.last=0
self.elements=[]
def add_drift(self,length=0.0):
self.elements.append(self.last)
self.data['u64'][self.last ]=self.DriftId
self.data['f64'][self.last+1]=length
self.last+=2
def add_multipole(self,knl=[],ksl=[],length=0.0,hxl=0.0,hyl=0.0):
self.elements.append(self.last)
order=max(len(knl),len(ksl))
self.data['u64'][self.last+0]=self.MultipoleId
self.data['u64'][self.last+1]=order
self.data['u64'][self.last+2]=length
self.data['u64'][self.last+3]=hxl
self.data['u64'][self.last+4]=hyl
fact=1
for nn in range(len(knl)):
self.data['f64'][self.last+5+nn*2]=knl[nn]/fact
fact*=nn+1
fact=1
for nn in range(len(ksl)):
self.data['f64'][self.last+5+nn*2+1]=ksl[nn]/fact
fact*=nn+1
self.last+=5*2*order
particles=Particles(nparticles=2560,ndim=10)
elements=Elements()
class SixTrackCL(object):
ro=cl.mem_flags.READ_ONLY | cl.mem_flags.COPY_HOST_PTR
rw=cl.mem_flags.READ_WRITE | cl.mem_flags.COPY_HOST_PTR
def __init__(self,particles,elements,device=["0"]):
self.particles=particles
self.elements=elements
srcpath='-I%s'%modulepath
self.ctx = cl.create_some_context(answers=device)
self.queue = cl.CommandQueue(self.ctx)
self.particles_g = cl.Buffer(self.ctx, self.rw, hostbuf=particles.data)
self.elements_g = cl.Buffer(self.ctx, self.ro, hostbuf=elements.data)
src=open(os.path.join(modulepath,'sixtracklib_cl.c')).read()
self.prg=cl.Program(self.ctx,src).build(options=[srcpath])
def track(self,nturns,elemids):
elemids=np.array(self.elemids,dtype='uint64')
elemids_g=cl.Buffer(self.ctx, self.rw, hostbuf=elemids)
nelems=np.int64(len(elem_ids))
nturns=np.int64(nturns)
self.prg.elements_track(queue,[npart],None,
self.elements_g, elemids_g, nelems,
nturns,
self.particles_g)
cl.enqueue_copy(queue,particles.data,self.particles_g)
| rdemaria/sixtracklib_gsoc18 | studies/study1/sixtracklib.py | sixtracklib.py | py | 2,937 | python | en | code | 0 | github-code | 36 |
72806130663 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 2 17:51:41 2018
@author: USER
"""
import sys
sys.path.append('..')
import os
import torch
import torch.nn as nn
import numpy as np
import utils.general as utils
import utils.adversarial_ae as ae_utils
from adverse_AE import Adversarial_AE, Discriminator
import torchvision
import torch.optim as optim
if __name__ == '__main__':
epochs = 50
batch_size = 100
latent_dim = 2
reg = True
dataloader = utils.get_dataloader(batch_size, pad = False)
device = utils.get_device()
step_per_epoch = np.ceil(dataloader.dataset.__len__() / batch_size)
sample_dir = './samples'
checkpoint_dir = './checkpoints'
utils.makedirs(sample_dir, checkpoint_dir)
AE = Adversarial_AE(latent_dim = latent_dim).to(device)
D = Discriminator(latent_dim = latent_dim).to(device)
ae_optim = optim.Adam(AE.parameters())
d_optim = optim.Adam(D.parameters())
rec_log = []
d_log = []
rec_criterion = nn.MSELoss().to(device)
discrim_criterion = nn.BCELoss().to(device)
result = None
for epoch_i in range(1, epochs + 1):
for step_i, (img, _) in enumerate(dataloader):
N = img.shape[0]
real_label = torch.ones(N).to(device)
fake_label = torch.zeros(N).to(device)
soft_label = torch.Tensor(batch_size).uniform_(0.9, 1).to(device)
img = img.view(N, -1).to(device)
if result is None:
result = img
# Reconstruction phase
reconstructed = AE(img)
loss = rec_criterion(reconstructed, img)
ae_optim.zero_grad()
loss.backward()
ae_optim.step()
rec_log.append(loss.item())
# Discriminator phase
z = torch.randn(N, latent_dim).to(device)
code = AE.encoder(img)
fake_score = D(code)
real_score = D(z)
real_loss = discrim_criterion(real_score, soft_label)
fake_loss = discrim_criterion(fake_score, fake_label)
loss = real_loss + fake_loss
d_optim.zero_grad()
loss.backward()
d_optim.step()
d_log.append(loss.item())
code = AE.encoder(img)
fake_score = D(code)
loss = discrim_criterion(fake_score, real_label)
ae_optim.zero_grad()
loss.backward()
ae_optim.step()
utils.show_process(epoch_i, step_i + 1, step_per_epoch, rec_log, d_log)
if epoch_i == 1:
torchvision.utils.save_image(result.reshape(-1, 1, 28, 28),
os.path.join(sample_dir, 'orig.png'),
nrow = 10)
reconstructed = AE(result)
utils.save_image(reconstructed.reshape(-1, 1, 28, 28), 10, epoch_i,
step_i + 1, sample_dir)
utils.save_model(AE, ae_optim, rec_log, checkpoint_dir, 'AE.ckpt')
utils.save_model(D, d_optim, d_log, checkpoint_dir, 'D.ckpt')
ae_utils.plot_manifold(AE.encoder, device, dataloader.dataset,
dataloader.dataset.__len__(), sample_dir)
| bchao1/Fun-with-MNIST | Adversarial_Autoencoder/train.py | train.py | py | 3,559 | python | en | code | 23 | github-code | 36 |
70712255464 |
with open('input.txt', 'r') as f:
input = f.readlines()
def findmax():
max = []
total = 0
for x in input:
if x.strip() != '':
total += int(x.strip())
else:
max.append(total)
total = 0
max.sort()
print(sum(max[-3:]))
findmax() | bg-gif/Advent-of-Code-2022 | day_one_a.py | day_one_a.py | py | 304 | python | en | code | 0 | github-code | 36 |
4969851196 | import math
import itertools
def klauber(x):
return x*x-x+41
def isPrime(x):
for i in range(2,1+int(math.sqrt(x))):
if x%i == 0:
return False
return True
def klauberNotPrime(r):
result = []
for i in range(1,r):
if not isPrime(klauber(i)):
result.append(i)
return result
class Quadratic:
def __init__(self, square,linear,constant):
self.square = square
self.linear = linear
self.constant = constant
def __call__(self, x):
return self.square*x*x + self.linear*x + self.constant
def __repr__(self):
return "Quadratic(%d,%d,%d)" % (self.square,self.linear,self.constant)
# Find best ploynomial which matches all terms in array
def bestPoly(array, cRange, lRange=None, sRange=None):
if lRange is None: lRange = cRange
if sRange is None: sRange = lRange
bestScore = -1
bestQuadratic = None
for s in xrange(sRange):
for l in xrange(lRange):
if s==0 and l==0:
continue
for c in xrange(cRange):
q = Quadratic(s,l,c)
score = testPoly(q, array)
if score>bestScore:
bestScore = score
bestQuadratic = q
print("best Score %d for %s" % (bestScore, bestQuadratic))
return bestQuadratic
def testPoly(q,array):
maxArray = array[-1]
score = 0
for x in xrange(1,len(array)):
value = q(x)
if value>maxArray:
break
elif value in array:
score += 1
else:
return -1
return score
#Calculates a string of the best polynomials in sequence - similar to repRunEx but without replacement/supplementation of the array passed
def bestPolySet(array,cRange,lRange,sRange):
polySet = []
while len(array)>0:
q = bestPoly(array, cRange, lRange, sRange)
polySet.append(q)
for x in xrange(1,len(array)):
v = q(x)
if v in array:
array.remove(v)
print("New Array length %d" % len(array))
return polySet
#Subtracts a given polynomial from a given array, and returns the result
def subPoly(array, poly):
currentVal = None
for i in xrange(0, len(array)):
currentVal = poly(i)
if not currentVal > array[len(array)-1]:
if currentVal in array:
array.remove(currentVal)
return array
def kNPsubIP(number, IP):
array = klauberNotPrime(number)
for ip in IP:
array = subPoly(array, ip)
return array
def repRunEx(number, it):
tedCruz = [Quadratic(0,0,0)] * it
for i in xrange(it):
print(kNPsubIP(number+(i*41), tedCruz))
tedCruz[i] = bestPoly(kNPsubIP(number+(i*41), tedCruz), 2000, 200, 20)
print(tedCruz)
#The following two arrays are for use by the BestList
bestQuadList = [Quadratic(0,0,0)] * 5
bestScoreList = [0] * 5
#Quadratics for testing functions
a = Quadratic(1,1,1)
b = Quadratic(2,2,2)
c = Quadratic(3,3,3)
d = Quadratic(4,4,4)
e = Quadratic(5,5,5)
def bestList(quadratic, score):
lowestScore = 100000
lowestIndex = 100000
for i in xrange(len(bestScoreList)):
if lowestScore > bestScoreList[i]:
lowestScore = bestScoreList[i]
lowestIndex = i
if score > lowestScore:
bestScoreList[lowestIndex] = score
bestQuadList[lowestIndex] = quadratic
return (bestQuadList)
return (bestQuadList)
def bestPolyLoop(kArray, cRange, lRange=None, sRange=None):
global bestQuadList
global bestScoreList
bestQuadList = [Quadratic(0,0,0)] * 5
bestScoreList = [0] * 5
bestListReceived = []
if lRange == None: lRange = cRange
if sRange == None: sRange = lRange
for s in xrange(sRange):
for l in xrange(lRange):
if s==0 and l==0:
continue
for c in xrange(cRange):
q = Quadratic(s,l,c)
score = testPoly(q, kArray)
bestListReceived = bestList(q, score)
return bestListReceived
#def pseudoGenetics(layers, krange, kInc, cRange, lRange, sRange, currentlyExcludedQuads):
# tL = []
# for i in xRange(layers):
# tL += bestPolyLoop(krange, cRange, lRange, sRange)
def layerScore(path, krange, cRange, lRange, sRange):
arrayToTrim = klauberNotPrime(krange)
importantPolyList = []
for i in xrange(len(path)):
regularPolyList = bestPolyLoop(arrayToTrim, cRange, lRange, sRange)
#print(regularPolyList)
#print(path)
#print(i)
importantPoly = regularPolyList[path[i]]
#print(path[i])
print(importantPoly)
#print("The array length before subtraction is: " + str(len(arrayToTrim)))
arrayToTrim = subPoly(arrayToTrim, importantPoly)
#print("The array length after subtraction is: " + str(len(arrayToTrim)))
#print(arrayToTrim)
importantPolyList.append(importantPoly)
return len(arrayToTrim)
def fixedPseudoGenetics(krange, cRange, lRange, sRange, recWidth):
lowestScore = 1000000000
bestTriple = [9999] * 5
for a in xrange(recWidth):
for b in xrange(recWidth):
for c in xrange(recWidth):
for d in xrange(recWidth):
for e in xrange(recWidth):
path = [a,b,c,d,e]
print(path)
tScore = layerScore(path, krange, cRange, lRange, sRange)
if lowestScore > tScore:
print(lowestScore)
print(tScore)
lowestScore = tScore
for i in xrange(len(path)):
bestTriple[i] = path[i]
return bestTriple
#Make an array of the klauberNotPrimes up to the krange
#Find the top polynomials with bestPolyLoop
#Call self with first element of array as currentlyExcludedQuads, and with layers = layers - 1
#If layers = 0 print the string of
| AlJinni/Genetic-Primes | Recovery.py | Recovery.py | py | 6,166 | python | en | code | 0 | github-code | 36 |
18626454048 | #
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 675 Mass Ave, Cambridge, MA 02139, USA.
#
"""
Operation recorder interface and implementations.
"""
from __future__ import print_function, absolute_import
from collections import namedtuple
try:
from collections import OrderedDict # pylint: disable=import-error
except ImportError:
from ordereddict import OrderedDict # pylint: disable=import-error
from datetime import datetime, timedelta
import logging
import yaml
from yaml.representer import RepresenterError
import six
from .cim_obj import CIMInstance, CIMInstanceName, CIMClass, CIMClassName, \
CIMProperty, CIMMethod, CIMParameter, CIMQualifier, \
CIMQualifierDeclaration, NocaseDict
from .cim_types import CIMInt, CIMFloat, CIMDateTime
from .exceptions import CIMError
from ._logging import PywbemLoggers, LOG_OPS_CALLS_NAME, LOG_HTTP_NAME
from .config import DEFAULT_MAX_LOG_ENTRY_SIZE
if six.PY2:
import codecs # pylint: disable=wrong-import-order
__all__ = ['BaseOperationRecorder', 'TestClientRecorder',
'LogOperationRecorder',
'OpArgs', 'OpResult', 'HttpRequest', 'HttpResponse']
if six.PY2:
_Longint = long # noqa: F821
else:
_Longint = int
OpArgsTuple = namedtuple("OpArgsTuple", ["method", "args"])
def _represent_ordereddict(dump, tag, mapping, flow_style=None):
"""PyYAML representer function for OrderedDict.
This is needed for yaml.safe_dump() to support OrderedDict.
Courtesy:
http://blog.elsdoerfer.name/2012/07/26/make-pyyaml-output-an-ordereddict/
"""
value = []
node = yaml.MappingNode(tag, value, flow_style=flow_style)
if dump.alias_key is not None:
dump.represented_objects[dump.alias_key] = node
best_style = True
if hasattr(mapping, 'items'):
mapping = mapping.items()
for item_key, item_value in mapping:
node_key = dump.represent_data(item_key)
node_value = dump.represent_data(item_value)
if not (isinstance(node_key, yaml.ScalarNode) and
not node_key.style):
best_style = False # pylint: disable=bad-indentation
if not (isinstance(node_value, yaml.ScalarNode) and
not node_value.style):
best_style = False # pylint: disable=bad-indentation
value.append((node_key, node_value))
if flow_style is None:
if dump.default_flow_style is not None:
node.flow_style = dump.default_flow_style
else:
node.flow_style = best_style
return node
yaml.SafeDumper.add_representer(
OrderedDict,
lambda dumper, value:
_represent_ordereddict(dumper, u'tag:yaml.org,2002:map', value))
# Some monkey-patching for better diagnostics:
def _represent_undefined(self, data):
"""Raises flag for objects that cannot be represented"""
raise RepresenterError("cannot represent an object: %s of type: %s; "
"yaml_representers: %r, "
"yaml_multi_representers: %r" %
(data, type(data), self.yaml_representers.keys(),
self.yaml_multi_representers.keys()))
yaml.SafeDumper.represent_undefined = _represent_undefined
class OpArgs(OpArgsTuple):
"""
A named tuple representing the name and input arguments of the invocation
of a :class:`~pywbem.WBEMConnection` method, with the following named fields
and attributes:
Attributes:
method (:term:`unicode string`):
Name of the :class:`~pywbem.WBEMConnection` method.
args (:class:`py:dict`):
Dictionary of input arguments (both positional and keyword-based).
"""
__slots__ = ()
def __repr__(self):
return "OpArgs(method={s.method!r}, args={s.args!r})".format(s=self)
OpResultTuple = namedtuple("OpResultTuple", ["ret", "exc"])
class OpResult(OpResultTuple):
"""
A named tuple representing the result of the invocation of a
:class:`~pywbem.WBEMConnection` method, with the following named fields
and attributes:
Attributes:
ret (:class:`py:object`):
Return value, if the method returned.
`None`, if the method raised an exception.
Note that `None` may be a legitimate return value, so the test for
exceptions should be done based upon the :attr:`exc` variable.
exc (:exc:`~py:exceptions.Exception`):
Exception object, if the method raised an exception.
`None`, if the method returned.
"""
__slots__ = ()
def __repr__(self):
return "OpResult(ret={s.ret!r}, exc={s.exc!r})".format(s=self)
HttpRequestTuple = namedtuple("HttpRequestTuple",
["version", "url", "target", "method", "headers",
"payload"])
class HttpRequest(HttpRequestTuple):
"""
A named tuple representing the HTTP request sent by the WBEM client, with
the following named fields and attributes:
Attributes:
version (:term:`number`):
HTTP version from the request line (10 for HTTP/1.0, 11 for HTTP/1.1).
url (:term:`unicode string`):
URL of the WBEM server (e.g. 'https://myserver.acme.com:15989').
target (:term:`unicode string`):
Target URL segment as stated in request line (e.g. '/cimom').
method (:term:`unicode string`):
HTTP method as stated in the request line (e.g. "POST").
headers (:class:`py:dict`):
A dictionary of all HTTP header fields:
* key (:term:`unicode string`): Name of the header field
* value (:term:`unicode string`): Value of the header field
payload (:term:`unicode string`):
HTTP payload, i.e. the CIM-XML string.
"""
__slots__ = ()
def __repr__(self):
return "HttpRequest(version={s.version!r}, url={s.url!r}, " \
"target={s.target!r}, method={s.method!r}, " \
"headers={s.headers!r}, payload={s.payload!r})" \
.format(s=self)
HttpResponseTuple = namedtuple("HttpResponseTuple",
["version", "status", "reason", "headers",
"payload"])
class HttpResponse(HttpResponseTuple):
"""
A named tuple representing the HTTP response received by the WBEM client,
with the following named fields and attributes:
Attributes:
version (:term:`number`):
HTTP version from the response line (10 for HTTP/1.0, 11 for HTTP/1.1).
status (:term:`number`):
HTTP status code from the response line (e.g. 200).
reason (:term:`unicode string`):
HTTP reason phrase from the response line (e.g. "OK").
headers (:class:`py:dict`):
A dictionary of all HTTP header fields:
* key (:term:`unicode string`): Name of the header field
* value (:term:`unicode string`): Value of the header field
payload (:term:`unicode string`):
HTTP payload, i.e. the CIM-XML string.
"""
__slots__ = ()
def __repr__(self):
return "HttpResponse(version={s.version!r}, status={s.status!r}, " \
"reason={s.reason!r}, headers={s.headers!r}, " \
"payload={s.payload!r})".format(s=self)
class BaseOperationRecorder(object):
# pylint: disable=too-many-instance-attributes
"""
Abstract base class defining the interface to an operation recorder,
that records the WBEM operations executed in a connection to a WBEM
server.
An operation recorder can be registered by setting the
:attr:`~pywbem.WBEMConnection.operation_recorder` instance
attribute of the :class:`~pywbem.WBEMConnection` object to an
object of a subclass of this base class.
When an operation recorder is registered on a connection, each operation
that is executed on the connection will cause the :meth:`record`
method of the operation recorder object to be called, if the recorder is
enabled.
The operation recorder is by default enabled, and can be disabled and
re-enabled using the :meth:`~pywbem.BaseOperationRecorder.disable` and
:meth:`~pywbem.BaseOperationRecorder.enable` methods, respectively.
This can be used to temporarily pause the recorder.
"""
def __init__(self):
self._enabled = True
self._conn_id = None
self.reset()
def enable(self):
"""Enable the recorder."""
self._enabled = True
def disable(self):
"""Disable the recorder."""
self._enabled = False
@property
def enabled(self):
"""Indicate whether the recorder is enabled."""
return self._enabled
@staticmethod
def open_file(filename, file_mode='w'):
"""
A static convience function that performs the open of the recorder file
correctly for different versions of python. This covers the
issue where the file should be opened in text mode but that is
done differently in python 2 and python 3
Parameters:
filename(:term:`string`):
Name of the file where the recorder output will be written
file_mode(:term:`string`):
Optional file mode. The default is 'w' which overwrites any
existing file. if 'a' is used, the data is appended to any
existing file.
Example::
recorder = TestClientRecorder(
BaseOperationRecorder.open_file('recorder.log'))
"""
if six.PY2:
# Open with codecs to define text mode
return codecs.open(filename, mode=file_mode, encoding='utf-8')
return open(filename, file_mode, encoding='utf8')
def reset(self, pull_op=None):
"""Reset all the attributes in the class. This also allows setting
the pull_op attribute that defines whether the operation is to be
a traditional or pull operation.
This does NOT reset _conn.id as that exists through the life of
the connection.
"""
self._pywbem_method = None
self._pywbem_args = None
self._pywbem_result_ret = None
self._pywbem_result_exc = None
self._http_request_version = None
self._http_request_url = None
self._http_request_target = None
self._http_request_method = None
self._http_request_headers = None
self._http_request_payload = None
self._http_response_version = None
self._http_response_status = None
self._http_response_reason = None
self._http_response_headers = None
self._http_response_payload = None
self._pull_op = pull_op
def stage_wbem_connection(self, wbem_connection):
"""
Stage information about the connection. Used only by
LogOperationRecorder.
"""
pass
def stage_pywbem_args(self, method, **kwargs):
"""
Set requst method and all args.
Normally called before the cmd is executed to record request
parameters
"""
# pylint: disable=attribute-defined-outside-init
self._pywbem_method = method
self._pywbem_args = kwargs
def stage_pywbem_result(self, ret, exc):
""" Set Result return info or exception info"""
# pylint: disable=attribute-defined-outside-init
self._pywbem_result_ret = ret
self._pywbem_result_exc = exc
def stage_http_request(self, conn_id, version, url, target, method, headers,
payload):
"""Set request HTTP information including url, headers, etc."""
# pylint: disable=attribute-defined-outside-init
self._http_request_version = version
self._http_request_conn_id = conn_id
self._http_request_url = url
self._http_request_target = target
self._http_request_method = method
self._http_request_headers = headers
self._http_request_payload = payload
# pylint: disable=unused-argument
def stage_http_response1(self, conn_id, version, status, reason, headers):
"""Set response http info including headers, status, etc.
conn_id unused here. Used in log"""
# pylint: disable=attribute-defined-outside-init
self._http_response_version = version
self._http_response_status = status
self._http_response_reason = reason
self._http_response_headers = headers
def stage_http_response2(self, payload):
"""Stage second part of http response, the payload"""
# pylint: disable=attribute-defined-outside-init
self._http_response_payload = payload
def record_staged(self):
"""Encode staged information on request and result to output"""
if self.enabled:
pwargs = OpArgs(
self._pywbem_method,
self._pywbem_args)
pwresult = OpResult(
self._pywbem_result_ret,
self._pywbem_result_exc)
httpreq = HttpRequest(
self._http_request_version,
self._http_request_url,
self._http_request_target,
self._http_request_method,
self._http_request_headers,
self._http_request_payload)
httpresp = HttpResponse(
self._http_response_version,
self._http_response_status,
self._http_response_reason,
self._http_response_headers,
self._http_response_payload)
self.record(pwargs, pwresult, httpreq, httpresp)
def record(self, pywbem_args, pywbem_result, http_request, http_response):
"""
Function that is called to record a single WBEM operation, i.e. the
invocation of a single :class:`~pywbem.WBEMConnection` method.
This function is called only when the recorder is enabled, i.e. it
does not need to check for recorder enablement.
Parameters:
pywbem_args (:class:`~pywbem.OpArgs`):
The name and input arguments of the :class:`~pywbem.WBEMConnection`
method that is recorded.
pywbem_result (:class:`~pywbem.OpResult`):
The result (return value or exception) of the
:class:`~pywbem.WBEMConnection` method that is recorded.
http_request (:class:`~pywbem.HttpRequest`):
The HTTP request sent by the :class:`~pywbem.WBEMConnection` method
that is recorded.
`None`, if no HTTP request had been sent (e.g. because an exception
was raised before getting there).
http_response (:class:`~pywbem.HttpResponse`):
The HTTP response received by the :class:`~pywbem.WBEMConnection`
method that is recorded.
`None`, if no HTTP response had been received (e.g. because an
exception was raised before getting there).
"""
raise NotImplementedError
class LogOperationRecorder(BaseOperationRecorder):
"""
An Operation Recorder that logs the information to a set of named logs.
This recorder uses two named logs:
LOG_OPS_CALLS_NAME - Logger for cim_operations method calls and responses
LOG_HTTP_NAME - Logger for http_requests and responses
This also implements a method to log information on each connection.
All logging calls are at the debug level.
"""
def __init__(self, max_log_entry_size=None):
"""
Creates the the loggers and sets the max_log_size for each if
the input parameter max_log_entry_size is not `None`.
Parameters: (:term:`integer`)
max_log_entry_size(:term:`integer`)
The maximum size of each log entry. This is primarily to limit
response sizes since they could be enormous.
If `None`, no size limit and the full request or response is logged.
"""
super(LogOperationRecorder, self).__init__()
# compute max entry size for each logger
max_sz = max_log_entry_size if max_log_entry_size \
else DEFAULT_MAX_LOG_ENTRY_SIZE
self.opslogger = logging.getLogger(LOG_OPS_CALLS_NAME)
ops_logger_info = PywbemLoggers.get_logger_info(LOG_OPS_CALLS_NAME)
opsdetaillevel = ops_logger_info[0] if ops_logger_info else None
self.ops_max_log_size = max_sz if opsdetaillevel == 'min' \
else None
self.httplogger = logging.getLogger(LOG_HTTP_NAME)
http_logger_info = PywbemLoggers.get_logger_info(LOG_HTTP_NAME)
httpdetaillevel = http_logger_info[0] if http_logger_info else None
self.http_max_log_size = max_sz if httpdetaillevel == 'min' \
else None
def stage_wbem_connection(self, wbem_connection):
"""
Log connection information. This includes the connection id
that should remain throught the life of the connection.
"""
self._conn_id = wbem_connection.conn_id
if self.enabled:
self.opslogger.debug('Connection:%s %r', self._conn_id,
wbem_connection)
def stage_pywbem_args(self, method, **kwargs):
"""
Log request method and all args.
Normally called before the cmd is executed to record request
parameters.
This method does not limit size of log record.
"""
# pylint: disable=attribute-defined-outside-init
self._pywbem_method = method
if self.enabled and self.opslogger.isEnabledFor(logging.DEBUG):
# Order kwargs. Note that this is done automatically starting
# with python 3.6
kwstr = ', '.join([('{0}={1!r}'.format(key, kwargs[key]))
for key in sorted(six.iterkeys(kwargs))])
self.opslogger.debug('Request:%s %s(%s)', self._conn_id, method,
kwstr)
def stage_pywbem_result(self, ret, exc):
"""
Log result return or exception parameter. This function allows
setting maximum size on the result parameter logged because response
information can be very large
.
"""
def format_result(ret, max_len):
""" format ret as repr while clipping it to max_len if
max_len is not None.
"""
result = '{0!r}'.format(ret)
if max_len and (len(result) > max_len):
result = (result[:max_len] + '...')
return result
if self.enabled and self.opslogger.isEnabledFor(logging.DEBUG):
if exc: # format exception
result = format_result(
'%s(%s)' % (exc.__class__.__name__, exc),
self.ops_max_log_size)
else: # format result
# test if type is tuple (subclass of tuple but not type tuple)
# pylint: disable=unidiomatic-typecheck
if isinstance(ret, tuple) and \
type(ret) is not tuple: # pylint: disable=C0123
try: # test if field instances or paths
rtn_data = ret.instances
data_str = 'instances'
except AttributeError:
rtn_data = ret.paths
data_str = 'paths'
rtn_data = format_result(rtn_data, self.ops_max_log_size)
try: # test for query_result_class
qrc = ', query_result_class=%s' % ret.query_result_class
except AttributeError:
qrc = ""
result = "{0.__name__}(context={1}, eos={2}{3}, {4}={5})" \
.format(type(ret), ret.context, ret.eos, qrc,
data_str, rtn_data)
else:
result = format_result(ret, self.ops_max_log_size)
return_type = 'Exception' if exc else 'Return'
self.opslogger.debug('%s:%s %s(%s)', return_type, self._conn_id,
self._pywbem_method,
result)
def stage_http_request(self, conn_id, version, url, target, method, headers,
payload):
"""Log request HTTP information including url, headers, etc."""
if self.enabled and self.httplogger.isEnabledFor(logging.DEBUG):
# pylint: disable=attribute-defined-outside-init
# if Auth header, mask data
if 'Authorization' in headers:
authtype, cred = headers['Authorization'].split(' ')
headers['Authorization'] = '%s %s' % (authtype, 'X' * len(cred))
header_str = ' '.join('{0}:{1!r}'.format(k, v)
for k, v in headers.items())
self.httplogger.debug('Request:%s %s %s %s %s %s\n %s',
conn_id, method, target, version, url,
header_str, payload)
def stage_http_response1(self, conn_id, version, status, reason, headers):
"""Set response http info including headers, status, etc. """
# pylint: disable=attribute-defined-outside-init
self._http_response_version = version
self._http_response_status = status
self._http_response_reason = reason
self._http_response_headers = headers
self._http_response_conn_id = conn_id
def stage_http_response2(self, payload):
"""Log complete http response, including response1 and payload"""
# required because http code uses sending all None to reset
# parameters. We ignore that
if not self._http_response_version and not payload:
return
if self.enabled and self.httplogger.isEnabledFor(logging.DEBUG):
if self._http_response_headers:
header_str = \
' '.join('{0}:{1!r}'.format(k, v)
for k, v in self._http_response_headers.items())
else:
header_str = ''
# format the payload possibly with max size limit
payload = '%r' % payload.decode('utf-8')
if self.http_max_log_size and \
(len(payload) > self.http_max_log_size):
payload = (payload[:self.http_max_log_size] + '...')
self.httplogger.debug('Response:%s %s:%s %s %s\n %s',
self._http_response_conn_id,
self._http_response_status,
self._http_response_reason,
self._http_response_version,
header_str,
payload)
def record_staged(self):
"""Not used for logging"""
pass
def record(self, pywbem_args, pywbem_result, http_request, http_response):
"""Not used for logging"""
pass
class TestClientRecorder(BaseOperationRecorder):
"""
An operation recorder that generates test cases for each recorded
operation. The test cases are in the YAML format suitable for the
`test_client` unit test module of the pywbem project.
"""
# HTTP header fields to exclude when creating the testcase
# (in lower case)
EXCLUDE_REQUEST_HEADERS = [
'authorization',
'content-length',
'content-type',
]
EXCLUDE_RESPONSE_HEADERS = [
'content-length',
'content-type',
]
# Dummy server URL and credentials for use in generated test case
TESTCASE_URL = 'http://acme.com:80'
TESTCASE_USER = 'username'
TESTCASE_PASSWORD = 'password'
def __init__(self, fp):
"""
Parameters:
fp (file):
An open file that each test case will be written to. This file
should have been opened in text mode.
Since there are differences between python 2 and 3 in opening
files in text mode, the static method
:meth:`~pywbem.BaseOperationRecorder.open_file`
can be used to open the file or python 2/3 compatible open::
from io import open
f = open('blah.log', encoding='utf-8')
Example::
recorder = TestClientRecorder(
BaseOperationRecorder.open_file('recorder.log'))
"""
super(TestClientRecorder, self).__init__()
self._fp = fp
def record(self, pywbem_args, pywbem_result, http_request, http_response):
"""
Function that records the invocation of a single
:class:`~pywbem.WBEMConnection` method, by appending a corresponding
test case to the file.
Parameters: See :meth:`pywbem.BaseOperationRecorder.record`.
"""
testcase = OrderedDict()
testcase['name'] = pywbem_args.method
testcase['description'] = 'Generated by TestClientRecorder'
tc_pywbem_request = OrderedDict()
tc_pywbem_request['url'] = TestClientRecorder.TESTCASE_URL
tc_pywbem_request['creds'] = [TestClientRecorder.TESTCASE_USER,
TestClientRecorder.TESTCASE_PASSWORD]
tc_pywbem_request['namespace'] = 'root/cimv2'
tc_pywbem_request['timeout'] = 10
tc_pywbem_request['debug'] = False
tc_operation = OrderedDict()
tc_operation['pywbem_method'] = pywbem_args.method
for arg_name in pywbem_args.args:
tc_operation[arg_name] = self.toyaml(pywbem_args.args[arg_name])
tc_pywbem_request['operation'] = tc_operation
testcase['pywbem_request'] = tc_pywbem_request
tc_pywbem_response = OrderedDict()
if pywbem_result.ret is not None:
yaml_txt = 'pullresult' if self._pull_op else 'result'
tc_pywbem_response[yaml_txt] = self.toyaml(pywbem_result.ret)
if pywbem_result.exc is not None:
exc = pywbem_result.exc
if isinstance(exc, CIMError):
tc_pywbem_response['cim_status'] = self.toyaml(exc.status_code)
else:
tc_pywbem_response['exception'] = self.toyaml(
exc.__class__.__name__)
testcase['pywbem_response'] = tc_pywbem_response
tc_http_request = OrderedDict()
if http_request is not None:
tc_http_request['verb'] = http_request.method
tc_http_request['url'] = TestClientRecorder.TESTCASE_URL
if http_request.target:
tc_http_request['url'] += http_request.target
tc_request_headers = OrderedDict()
if http_request.headers is not None:
for hdr_name in http_request.headers:
if hdr_name.lower() not in \
TestClientRecorder.EXCLUDE_REQUEST_HEADERS:
tc_request_headers[hdr_name] = \
http_request.headers[hdr_name]
tc_http_request['headers'] = tc_request_headers
if http_request.payload is not None:
data = http_request.payload.decode('utf-8')
data = data.replace('><', '>\n<').strip()
else:
data = None
tc_http_request['data'] = data
testcase['http_request'] = tc_http_request
tc_http_response = OrderedDict()
if http_response is not None:
tc_http_response['status'] = http_response.status
tc_response_headers = OrderedDict()
if http_response.headers is not None:
for hdr_name in http_response.headers:
if hdr_name.lower() not in \
TestClientRecorder.EXCLUDE_RESPONSE_HEADERS:
tc_response_headers[hdr_name] = \
http_response.headers[hdr_name]
tc_http_response['headers'] = tc_response_headers
if http_response.payload is not None:
data = http_response.payload.decode('utf-8')
data = data.replace('><', '>\n<').strip()
else:
data = None
tc_http_response['data'] = data
else:
tc_http_response['exception'] = "# Change this to a callback " \
"function that causes this " \
"condition."
testcase['http_response'] = tc_http_response
testcases = []
testcases.append(testcase)
# The file is open in text mode, so we produce a unicode string
data = yaml.safe_dump(testcases, encoding=None, allow_unicode=True,
default_flow_style=False, indent=4)
data = data.replace('\n\n', '\n') # YAML dump duplicates newlines
self._fp.write(data)
self._fp.flush()
def toyaml(self, obj):
"""
Convert any allowable input argument to or return value from an
operation method to an object that is ready for serialization into
test_client yaml format.
"""
# namedtuple is subclass of tuple so it is instance of tuple but
# not type tuple. Cvt to dictionary and cvt dict to yaml.
# pylint: disable=unidiomatic-typecheck
if isinstance(obj, tuple) and type(obj) is not tuple:
ret_dict = obj._asdict()
return self.toyaml(ret_dict)
if isinstance(obj, (list, tuple)):
ret = []
# This does not handle namedtuple
for item in obj:
ret.append(self.toyaml(item))
return ret
elif isinstance(obj, (dict, NocaseDict)):
ret_dict = OrderedDict()
for key in obj.keys(): # get keys in original case
ret_dict[key] = self.toyaml(obj[key])
return ret_dict
elif obj is None:
return obj
elif isinstance(obj, six.binary_type):
return obj.decode("utf-8")
elif isinstance(obj, six.text_type):
return obj
elif isinstance(obj, CIMInt):
return _Longint(obj)
elif isinstance(obj, (bool, int)):
# TODO ks jun 17 should the above be six.integertypes???
# The check for int must be after CIMInt, because CIMInt is int.
return obj
elif isinstance(obj, CIMFloat):
return float(obj)
elif isinstance(obj, CIMDateTime):
return str(obj)
elif isinstance(obj, datetime):
return CIMDateTime(obj)
elif isinstance(obj, timedelta):
return CIMDateTime(obj)
elif isinstance(obj, CIMInstance):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMInstance'
ret_dict['classname'] = self.toyaml(obj.classname)
ret_dict['properties'] = self.toyaml(obj.properties)
ret_dict['path'] = self.toyaml(obj.path)
return ret_dict
elif isinstance(obj, CIMInstanceName):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMInstanceName'
ret_dict['classname'] = self.toyaml(obj.classname)
ret_dict['namespace'] = self.toyaml(obj.namespace)
ret_dict['keybindings'] = self.toyaml(obj.keybindings)
return ret_dict
elif isinstance(obj, CIMClass):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMClass'
ret_dict['classname'] = self.toyaml(obj.classname)
ret_dict['superclass'] = self.toyaml(obj.superclass)
ret_dict['properties'] = self.toyaml(obj.properties)
ret_dict['methods'] = self.toyaml(obj.methods)
ret_dict['qualifiers'] = self.toyaml(obj.qualifiers)
return ret_dict
elif isinstance(obj, CIMClassName):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMClassName'
ret_dict['classname'] = self.toyaml(obj.classname)
ret_dict['host'] = self.toyaml(obj.host)
ret_dict['namespace'] = self.toyaml(obj.namespace)
return ret_dict
elif isinstance(obj, CIMProperty):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMProperty'
ret_dict['name'] = self.toyaml(obj.name)
ret_dict['value'] = self.toyaml(obj.value)
ret_dict['type'] = self.toyaml(obj.type)
ret_dict['reference_class'] = self.toyaml(obj.reference_class)
ret_dict['embedded_object'] = self.toyaml(obj.embedded_object)
ret_dict['is_array'] = self.toyaml(obj.is_array)
ret_dict['array_size'] = self.toyaml(obj.array_size)
ret_dict['class_origin'] = self.toyaml(obj.class_origin)
ret_dict['propagated'] = self.toyaml(obj.propagated)
ret_dict['qualifiers'] = self.toyaml(obj.qualifiers)
return ret_dict
elif isinstance(obj, CIMMethod):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMMethod'
ret_dict['name'] = self.toyaml(obj.name)
ret_dict['return_type'] = self.toyaml(obj.return_type)
ret_dict['class_origin'] = self.toyaml(obj.class_origin)
ret_dict['propagated'] = self.toyaml(obj.propagated)
ret_dict['parameters'] = self.toyaml(obj.parameters)
ret_dict['qualifiers'] = self.toyaml(obj.qualifiers)
return ret_dict
elif isinstance(obj, CIMParameter):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMParameter'
ret_dict['name'] = self.toyaml(obj.name)
ret_dict['type'] = self.toyaml(obj.type)
ret_dict['reference_class'] = self.toyaml(obj.reference_class)
ret_dict['is_array'] = self.toyaml(obj.is_array)
ret_dict['array_size'] = self.toyaml(obj.array_size)
ret_dict['qualifiers'] = self.toyaml(obj.qualifiers)
return ret_dict
elif isinstance(obj, CIMQualifier):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMQualifier'
ret_dict['name'] = self.toyaml(obj.name)
ret_dict['value'] = self.toyaml(obj.value)
ret_dict['type'] = self.toyaml(obj.type)
ret_dict['propagated'] = self.toyaml(obj.propagated)
ret_dict['tosubclass'] = self.toyaml(obj.tosubclass)
ret_dict['toinstance'] = self.toyaml(obj.toinstance)
ret_dict['overridable'] = self.toyaml(obj.overridable)
ret_dict['translatable'] = self.toyaml(obj.translatable)
return ret_dict
elif isinstance(obj, CIMQualifierDeclaration):
ret_dict = OrderedDict()
ret_dict['pywbem_object'] = 'CIMQualifierDeclaration'
ret_dict['name'] = self.toyaml(obj.name)
ret_dict['type'] = self.toyaml(obj.type)
ret_dict['value'] = self.toyaml(obj.value)
ret_dict['is_array'] = self.toyaml(obj.is_array)
ret_dict['array_size'] = self.toyaml(obj.array_size)
ret_dict['scopes'] = self.toyaml(obj.scopes)
ret_dict['tosubclass'] = self.toyaml(obj.tosubclass)
ret_dict['toinstance'] = self.toyaml(obj.toinstance)
ret_dict['overridable'] = self.toyaml(obj.overridable)
ret_dict['translatable'] = self.toyaml(obj.translatable)
return ret_dict
else:
raise TypeError("Invalid type in TestClientRecorder.toyaml(): "
"%s %s" % (obj.__class__.__name__, type(obj)))
| ssOleg/pywbem | pywbem/_recorder.py | _recorder.py | py | 36,379 | python | en | code | null | github-code | 36 |
12852390478 | # An implementation of the three-body problem by Logan Schmalz
# https://github.com/LoganSchmalz/threebody/
# MIT License
import numpy as np
import scipy as sci
import scipy.integrate
import scipy.linalg
import matplotlib.pyplot as plt
# As astronomers, we like to normalize values to scales that make sense
# So that's what we'll do here
# Using these norm values, we will compute some constants that make multiplying easier
# This will also speed up calculations, since we will use floating-point numbers
# as opposed to fixed point, which are slower with big differences in exponents
G = 6.67408e-11 #Newton-meters^2/kg^2, Gravitational Constant
m_norm = 1.989e30 #kg, mass of Sun
r_norm = 1.496e11 #meters, 1 AU
v_norm = 29780 #meters/sec, speed of Earth around Sun
t_norm = 1*365*24*3600 #sec, orbital period of Earth
# And here are our new constants
K1 = G * t_norm * m_norm / (r_norm**2 * v_norm)
K2 = v_norm * t_norm / r_norm
# The body_calc function takes the current conditions as an array rvs
# It returns all of the drs and dvs together, to be added to rvs by our ODE
# function and plugged back in as rvs to solve again
def body_calc(rvs, t, m1, m2, m3):
# Here we are extracting all our values from our current conditions array
r1 = rvs[ :3]
r2 = rvs[3:6]
r3 = rvs[6:9]
v1 = rvs[9:12]
v2 = rvs[12:15]
v3 = rvs[15:18]
# Getting easy access to distance values between bodies
r12 = sci.linalg.norm(r1-r2)
r23 = sci.linalg.norm(r2-r3)
r13 = sci.linalg.norm(r1-r3)
# And doing our gravity calculations with our special constants
dv1 = K1*(m2*(r2-r1)/r12**3 + m3*(r3-r1)/r13**3)
dv2 = K1*(m1*(r1-r2)/r12**3 + m3*(r3-r2)/r23**3)
dv3 = K1*(m1*(r1-r3)/r13**3 + m2*(r2-r3)/r23**3)
# And finally determining our change in position
dr1 = K2*v1
dr2 = K2*v2
dr3 = K2*v3
# Then we want to send these back to our ODE function to reuse
drs = np.concatenate((dr1, dr2, dr3)) # Takes in tuple to combine into
dvs = np.concatenate((dv1, dv2, dv3)) # single array
return np.concatenate((drs, dvs)) # Returns all the differences at once
# Sun
r1 = np.array([0,0,0])
v1 = np.array([0,0,0])
m1 = 1
# Venus
r2 = np.array([0,0.723332,0])
v2 = np.array([1.176,0,0])
#v2=np.array([2.352,0,0]) # twice Venus' normal velocity
m2 = 2.4472e-6
# Earth
r3 = np.array([1,0,0])
v3 = np.array([0,1,0])
m3 = 3.00269e-6
# Setup equal masses at the points of an equalateral triangle
# with velocities following the edges
#m1 = m2 = m3 = 1
#r1 = np.array([0,0,0])
#r2 = np.array([1,0,0])
#r3 = np.array([0.5,np.sqrt(1-0.5**2),0])
#v1 = np.array([.5,0,0])
#v2 = np.array([-0.25,np.sqrt(3)/4,0])
#v3 = np.array([-0.25,-np.sqrt(3)/4,0])
# combining all of our arrays into one to pass into our ODE solver
init_rvs = np.concatenate((r1, r2, r3, v1, v2, v3))
# generates a linear prograssion of times from 0 to 20
# the units are technically years, but the 0-20 years is divided
# into 10,000 intervals, so these are 0.002 years, or about 17.5 hours
times = np.linspace(0,20,10000)
# We use odeint as it is typically faster than integrate, due to underlying
# Python implementation, even though integrate is technically newer and more
# versatile
solution = sci.integrate.odeint(body_calc, init_rvs, times, args=(m1,m2,m3))
# Here we want to extract out position values at each time step
#
# Explanation:
#
# Solutions is a multidimensional array, we can think of it as
# a Cx6 matrix, where C is some constant for how many time steps we have
# the 6 comes from our 6 values (r1, r2, r3, v1, v2, and v3)
# these values themselves are 3-dimensional vectors
# In reality, the 6 dimensions and 3 dimensions are actually 'flattened' into
# one 18-dimensional vector.
#
# So for r1_sol for example:
# we want the first 3 values of our 18-dimensional vector
# which correspond to x1,y1,z1
# and these values at each timestep appear in all C
# so we use " : " to say that we want to be checking every C timestep
# and we use " :3" to say we want the first 3 values (again, x1,y1,z1).
#
# for r2_sol:
# we again want every value at each timestep, so we start with " : "
# and we use "3:6" to say we want the 4th, 5th, and 6th values (x2,y2,z2)
#
# similarly for r3_sol, we use "6:9" to get 7th, 8th, and 9th values
# if we wanted v1, we could use "9:12", but that's not very useful for us
#
# (note: in Python, arrays begin indexing at 0, thus for example the value
# in index 2 is the third value.
# in this sense, we can say " :3" is the same as writing "0:3", with the end
# being non-inclusive, so we get a0,a1,a2
# and for "3:6", we get a3,a4,a5)
# (extra note: the technical reason that it makes sense to allow a comma here
# is that numpy arrays can actually take a "tuple" of slice boundaries)
r1_sol = solution[ : , :3]
r2_sol = solution[ : , 3:6]
r3_sol = solution[ : , 6:9]
fig = plt.figure()
axs = fig.add_subplot(111)
# Plotting the objects' paths
# similarly here, we extract the first, second, third coordinates
# using " : " to go through every timestep, and then 0, 1, 2 as
# the index for which coordinate we want: 0=x, 1=y, 2=z
axs.plot(r1_sol[ : , 0], r1_sol[ : , 1], color="#ffa500")
axs.plot(r2_sol[ : , 0], r2_sol[ : , 1], color="#808080")
axs.plot(r3_sol[ : , 0], r3_sol[ : , 1], color="b")
# Plotting the objects' final locations
# and the -1 here means get final timestep
axs.scatter(r1_sol[-1,0], r1_sol[-1,1], color="#ffa500")
axs.scatter(r2_sol[-1,0], r2_sol[-1,1], color="#808080")
axs.scatter(r3_sol[-1,0], r3_sol[-1,1], color="b")
plt.show()
| LoganSchmalz/threebody | threebody.py | threebody.py | py | 5,543 | python | en | code | 0 | github-code | 36 |
23229025125 | import tensorflow as tf
from tools.tools import count
from tools.tools import indicator
class Loss(object):
def __init__(self):
pass
@staticmethod
def loss_l2(estimated, target):
"""estimated and target are dense tensors"""
with tf.name_scope('l2_loss'):
# with tf.control_dependencies([tf.assert_equal(count(indicator(target) - indicator(estimated)), 0.)]):
squared_difference = tf.pow(estimated - target, 2, name='squared_difference')
loss = tf.reduce_sum(squared_difference, name='summing_square_errors')
return loss
@staticmethod # Unchecked
def l2_regularisation(regularisation, weights_list):
with tf.name_scope('regularisation'):
regularizer = tf.contrib.layers.l2_regularizer(regularisation)
regularisation_penalty = tf.contrib.layers.apply_regularization(regularizer=regularizer,
weights_list=weights_list)
return regularisation_penalty
def full_l2_loss(self, regularisation, weights_list, prediction, target): # Unchecked
with tf.name_scope('loss'):
loss = tf.add(self.l2_regularisation(regularisation, weights_list),
self.loss_l2(estimated=prediction, target=target),
name='full_loss')
return loss
| MehdiAbbanaBennani/Neural-Networks-for-Collaborative-Filtering | autoencoder/Loss.py | Loss.py | py | 1,406 | python | en | code | 35 | github-code | 36 |
23314260402 | import logging
import os
from malware_extractor import MalwareExtractor
logger = logging.getLogger(__name__)
class VXVaultExtractor(MalwareExtractor):
def process_input(self):
# files are just zip files, so can simply copy those across
self.copy_files()
if __name__ == "__main__":
logger.info("VXVault Extractor starting up.")
# assume mounted path is path to directory of files, so skipping any need for sub-directories.
collector_path = os.environ.get("COLLECTOR_PATH")
extractor_path = os.environ.get("EXTRACTOR_PATH")
extractor = VXVaultExtractor(collector_path, extractor_path)
extractor.process_input()
logger.info("VXVault Extractor completed.")
| g-clef/malware_extractor | VXVaultExtractor.py | VXVaultExtractor.py | py | 708 | python | en | code | 0 | github-code | 36 |
7689734407 | def method1(arr, n, k):
arr.sort()
return arr[k - 1]
def method2(arr, k):
import heapq
smallest = []
for value in arr:
if len(smallest) < k:
heapq.heappush(smallest, -value)
else:
heapq.heappushpop(smallest, -value)
if len(smallest) < k:
return None
return -smallest[0]
if __name__ == "__main__":
"""
from timeit import timeit
arr = [12, 3, 5, 7, 19]
n = len(arr)
k = 2
print(timeit(lambda: method1(arr, n, k), number=10000)) # 0.0018417680003040005
print(timeit(lambda: method2(arr, k), number=10000)) # 0.009863699999186792
"""
| thisisshub/DSA | F_sorting/problems/D_kth_smallest_element.py | D_kth_smallest_element.py | py | 645 | python | en | code | 71 | github-code | 36 |
20238646277 | from numpy import *
import operator
import matplotlib
import matplotlib.pyplot as plt
from os import listdir
def classify0(inX, dataSet, labels, k):
dataSetSize=dataSet.shape[0]#返回dataset的第一维的长度
print(dataSetSize)
diffMat = tile(inX, (dataSetSize,1)) - dataSet
#计算出各点离原点的距离
#表示diffMat的平方
sqDiffMat = diffMat**2#平方只针对数组有效
sqDistances=sqDiffMat.sum(axis = 1)
distances=sqDistances**0.5
sortedDistIndices = distances.argsort()#返回从小到大的引索
classCount = {}
for i in range(k):
voteLabel = labels[sortedDistIndices[i]]#找到对应的从小到大的标签
classCount[voteLabel] = classCount.get(voteLabel,0)+1
sortedClassCount = sorted(classCount.items(),key=operator.itemgetter(1),reverse=True)
return sortedClassCount[0][0]
def createDataSet():
group=array([[1.0,1.1],[1.0,1.0],[0,0],[0,0.1]])#numpy里面的数组,注意和list的区别
labels=['A','A','B','B']
return group,labels
def file2matrix(filename):
fr=open(filename)
arrayOLines=fr.readlines()
numberOfLines=len(arrayOLines)
print(numberOfLines)
returnMat=zeros((numberOfLines,3))
classLabelVector=[]
index = 0
for lines in arrayOLines:
lines = lines.strip()
listFromLine = lines.split('\t')
returnMat[index,:]=listFromLine[0:3]
classLabelVector.append(int(listFromLine[-1]))
index += 1
return returnMat,classLabelVector
def show(datingDataMat,datingLabels):
fig = plt.figure()
ax = fig.add_subplot(111)
ax.scatter(datingDataMat[:, 1], datingDataMat[:, 2],15.0*array(datingLabels),15.0*array(datingLabels))
plt.show()
def autoNorm(dataSet):#将特征值归一化
minVals=dataSet.min(0)#选择数据集中最小的
maxVals=dataSet.max(0)
ranges = maxVals - minVals
normDataSet=zeros(shape(dataSet))
m = dataSet.shape[0]
normDataSet = dataSet-tile(minVals,(m,1))
normDataSet = normDataSet/tile(ranges,(m,1))
return normDataSet,ranges,minVals
def datingClassTest():
hoRatio = 0.50 # hold out 10%
datingDataMat, datingLabels = file2matrix('datingTestSet2.txt') # load data setfrom file
normMat, ranges, minVals = autoNorm(datingDataMat)
m = normMat.shape[0]
numTestVecs = int(m * hoRatio)
errorCount = 0.0
for i in range(numTestVecs):
classifierResult = classify0(normMat[i, :], normMat[numTestVecs:m, :], datingLabels[numTestVecs:m], 3)
print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, datingLabels[i]))
if (classifierResult != datingLabels[i]):
errorCount += 1.0
print( "the total error rate is: %f" % (errorCount / float(numTestVecs)))
# print(errorCount)
def img2vector(filename):
returnVect = zeros((1, 1024))
print("returnVect\n"+returnVect)
fr = open(filename)
for i in range(32):
lineStr = fr.readline()
for j in range(32):
returnVect[0, 32 * i + j] = int(lineStr[j])
return returnVect
def handwritingClassTest():
hwLabels = []
trainingFileList = listdir('trainingDigits') # load the training set
m = len(trainingFileList)
trainingMat = zeros((m, 1024))
print(trainingMat)
for i in range(m):
fileNameStr = trainingFileList[i]
fileStr = fileNameStr.split('.')[0] # take off .txt
classNumStr = int(fileStr.split('_')[0])
hwLabels.append(classNumStr)
trainingMat[i, :] = img2vector('trainingDigits/%s' % fileNameStr)
testFileList = listdir('testDigits') # iterate through the test set
errorCount = 0.0
mTest = len(testFileList)
for i in range(mTest):
fileNameStr = testFileList[i]
fileStr = fileNameStr.split('.')[0] # take off .txt
classNumStr = int(fileStr.split('_')[0])
vectorUnderTest = img2vector('testDigits/%s' % fileNameStr)
classifierResult = classify0(vectorUnderTest, trainingMat, hwLabels, 3)
print("the classifier came back with: %d, the real answer is: %d" % (classifierResult, classNumStr))
if (classifierResult != classNumStr): errorCount += 1.0
print("\nthe total number of errors is: %d" % errorCount)
print("\nthe total error rate is: %f" % (errorCount / float(mTest)))
if __name__ == "__main__":
group,labels = createDataSet()
classer=classify0([0,0],group,labels,3)
handwritingClassTest()
# datingDataMat, datingLabels=file2matrix('datingTestSet2.txt')
# show(datingDataMat,datingLabels)
| geroge-gao/MachineLeaning | kNN/kNN.py | kNN.py | py | 4,622 | python | en | code | 4 | github-code | 36 |
22264732686 | from excepciones_estrellas import RutaPeligrosa
# No modificar esta función
def verificar_condiciones_estrella(estrella):
if estrella.luminosidad > 15500:
raise RutaPeligrosa("luz", estrella.nombre)
elif estrella.magnitud > 4:
raise RutaPeligrosa("tamaño", estrella.nombre)
elif estrella.temperatura > 7200:
raise RutaPeligrosa("calor", estrella.nombre)
# Completar
def generar_ruta_estrellas(estrellas):
lista_ruta = []
for estrella in estrellas:
try:
verificar_condiciones_estrella(estrella)
except RutaPeligrosa as err:
print(err)
err.dar_alerta_peligro()
else:
lista_ruta.append(estrella.nombre)
print(f'¡La estrella {estrella.nombre} se ha agregado a tu ruta!' + u'\x02' + '\n')
return lista_ruta
| Alzvil/IIC2233-Progra-Avanzada-Tareas-2021-1 | Actividades/AF2/calcular_ruta.py | calcular_ruta.py | py | 842 | python | es | code | 0 | github-code | 36 |
35386932044 | #!/usr/bin/env python3
from sys import stderr, exit
import random
from multilanguage import Env, Lang, TALcolors
from TALinputs import TALinput
from TALfiles import TALfilesHelper
import triangle_lib as tl
# METADATA OF THIS TAL_SERVICE:
args_list = [
('source',str),
('instance_id',int),
('instance_format',str),
('n',int),
('MIN_VAL',int),
('MAX_VAL',int),
('seed',str),
('path',str),
('display',bool),
('silent',bool),
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
TALf = TALfilesHelper(TAc, ENV)
# START CODING YOUR SERVICE:
# CHECK MIN_VAL <= MAX_VAL
if ENV['MIN_VAL'] > ENV['MAX_VAL']:
TAc.NO()
TAc.print(LANG.render_feedback("range-is-empty", f"Error: I can not choose the integers for the triangle from the range [{MIN_VAL},{MAX_VAL}] since this range is empty.", {"MIN_VAL":MIN_VAL, "MAX_VAL":MAX_VAL}), "red", ["bold"])
exit(0)
# TRIANGLE GENERATION
if TALf.exists_input_file('instance'):
instance = tl.get_instance_from_str(TALf.input_file_as_str('instance'), instance_format_name=ENV["instance_format"])
TAc.print(LANG.render_feedback("successful-load", 'The file you have associated to `instance` filehandler has been successfully loaded.'), "yellow", ["bold"])
elif ENV["source"] == 'terminal':
instance = {}
TAc.print(LANG.render_feedback("waiting-lines", f'#? waiting for the first string of the triangle.\nFormat: the i-th line contains i elements\n'), "yellow")
triangle = []
for i in range(ENV['n']):
TAc.print(LANG.render_feedback("insert-line", f'Enter line n. {i+1} containing {i+1} elements:'), "yellow", ["bold"])
l = TALinput(str, line_recognizer=lambda val,TAc, LANG: True, TAc=TAc, LANG=LANG)
l = [int(x) for x in l]
for el in l:
if el < ENV['MIN_VAL'] or el > ENV['MAX_VAL']:
TAc.NO()
TAc.print(LANG.render_feedback("val-out-of-range", f"The value {el} falls outside the valid range [{ENV['MIN_VAL']},{ENV['MAX_VAL']}]."), "red", ["bold"])
exit(0)
if len(l) != i+1:
TAc.NO()
TAc.print(LANG.render_feedback("wrong-elements-number", f"Expected {i+1} elements for line {i+1}, but received {len(l)}."), "red", ["bold"])
exit(0)
triangle.append(l)
instance['triangle'] = triangle
instance['n'] = ENV['n']
instance_str = tl.instance_to_str(instance, format_name=ENV['instance_format'])
output_filename = f"terminal_instance.{ENV['instance_format']}.txt"
elif ENV["source"] == 'randgen_1':
# Get random instance
if ENV['seed'] == "random_seed":
seed = random.randint(100000,999999)
else:
seed = int(ENV['seed'])
instance = tl.instances_generator(1, 1, ENV['MIN_VAL'], ENV['MAX_VAL'], ENV['n'], ENV['n'],seed=seed)[0]
TAc.print(LANG.render_feedback("instance-generation-successful", f'The instance has been successfully generated by the pseudo-random generator {ENV["source"]} called with arguments:\nn={instance["n"]},\nMIN_VAL={instance["MIN_VAL"]},\nMAX_VAL={instance["MAX_VAL"]},\nseed={instance["seed"]}'), "yellow", ["bold"])
else: # take instance from catalogue
instance_str = TALf.get_catalogue_instancefile_as_str_from_id_and_ext(ENV["instance_id"], format_extension=tl.format_name_to_file_extension(ENV["instance_format"],'instance'))
instance = tl.get_instance_from_str(instance_str, instance_format_name=ENV["instance_format"])
TAc.print(LANG.render_feedback("instance-from-catalogue-successful", f'The instance with instance_id={ENV["instance_id"]} has been successfully retrieved from the catalogue.'), "yellow", ["bold"])
if ENV['path'] == "my_path":
TAc.print(LANG.render_feedback("enter-path", f'Enter the string encoding the path. E.G. LRRLR:'), "yellow", ["bold"])
path = TALinput(str, line_recognizer=lambda val,TAc, LANG: tl.check_path(val, TAc=TAc,LANG=LANG), TAc=TAc, LANG=LANG)[0]
else:
path = ENV['path']
if len(path) != instance['n']-1:
TAc.NO()
if len(path) < instance['n']-1:
TAc.print(LANG.render_feedback("path-too-short", f'The string of the L/R choices encoding your path is too short for a triangle with n={instance["n"]} rows.'), "red", ["bold"])
if len(path) > instance['n']-1:
TAc.print(LANG.render_feedback("path-too-long", f'The string of the L/R choices encoding your path is too long for a triangle with n={instance["n"]} rows.'), "red", ["bold"])
TAc.print(LANG.render_feedback("wrong-path-length", f'The true number of required choices is n-1={instance["n"]-1} instead of {len(path)}.'), "red", ["bold"])
exit(0)
if not ENV['silent']:
TAc.print(LANG.render_feedback("feasible-path", f'Your solution path ({path}) is a feasible one for this problem since it comprises {instance["n"]-1} subsequent choices of directions (the correct number).'), "green", ["bold"])
if ENV['display']:
TAc.print(LANG.render_feedback("this-is-the-instance", 'This is the instance:'), "white", ["bold"])
tl.print_path(instance["triangle"],path,ENV['instance_format'])
TAc.print(LANG.render_feedback("path-reward", f'The total reward collected by your path is {tl.calculate_path(instance["triangle"],path)}.'), "green", ["bold"])
exit(0)
| romeorizzi/TALight | example_problems/tutorial/triangle/services/check_and_reward_one_sol_driver.py | check_and_reward_one_sol_driver.py | py | 5,352 | python | en | code | 11 | github-code | 36 |
42412749367 | from flask import Flask, Response, jsonify
from Flask_PoolMysql import func
# 实例化flask对象
app = Flask(__name__)
app.config.from_pyfile('config.py')
class JsonResponse(Response):
@classmethod
def force_type(cls, response, environ=None):
"""这个方法只有视图函数返回非字符、非元祖、非Response对象才会调用
:param response:
:param environ:
:return:
"""
# 把字典转换成json
if isinstance(response, dict):
# jsonify将字典转换成json对象,还将该对象包装成了一个Response对象
response = jsonify(response)
return super(JsonResponse, cls).force_type(response, environ)
app.response_class = JsonResponse
# 将'/'和函数index的对应关系加到路由中
@app.route('/')
def index():
result_a = func('select * from book')
result_b = func('select subgroup,count(*) from book group by subgroup')
return {'first': result_a, 'twice': result_b}
@app.route('/get')
def get():
result = func('select s.questionid,s.level,content,answer from question r join (select questionid,level from study order by RAND() limit 15) s on r.questionid = s.questionid')
return {'topic': result}
if __name__ == '__main__':
# 监听用户请求
# 如果有用户请求到来,则执行app的__call__方法,app.__call__
app.run()
| loingjuzy/learn-flask | Flask_T1.py | Flask_T1.py | py | 1,397 | python | en | code | 0 | github-code | 36 |
25667125459 | import os
import sys
import math
import socket
import random
import threading
from cv2 import aruco
from threading import Thread
from collections import namedtuple
from gps.Address import *
from gps.ServerThreadManager import *
from gps.SimulatorClient import *
class SimulatedGps:
'''
This class implementing a simulated gps functionality based on Aruco
detection
'''
#================================ INIT ====================================
def __init__(self):
'''__init__ constructor
'''
self.logFile = logFile = ThreadSafeFileWriter('log.txt')
( bcast_ip, host_ip, negotiation_port,
subscription_port, car_subscription_port, car_com_port,
image_dimensions, frame_rate, image_brightness,
marker_width
) = SimulatedGps.init_parameters()
self.server_address = Address("",0)
self.simulatorClient = SimulatorClient(
threadID = 1,
server_address = self.server_address,
logFile = self.logFile
)
self.serverThreadManager = ServerThreadManager(
threadID = 2,
server_address = self.server_address,
broadcast_ip = bcast_ip,
host_ip = host_ip,
negotiation_port = negotiation_port,
subscription_port = subscription_port,
car_subscription_port = car_subscription_port,
car_communication_port = car_com_port,
max_wait_time_for_server = 10,
logFile = self.logFile
)
#================================ INIT PARAM ==============================
def init_parameters():
gw = os.popen("ip -4 route show default").read().split()
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
s.connect((gw[2], 0))
gateway = gw[2]
HOST_NAME = socket.gethostname()
HOST_IP = s.getsockname()[0]
s.close()
print ("IP:", HOST_IP, " GW:", gateway, " Host:", HOST_NAME)
NEGOTIATION_PORT = 12346
SUBSCRITPION_PORT = NEGOTIATION_PORT + 1
CAR_SUBSCRITPION_PORT = NEGOTIATION_PORT + 2
CAR_COMMUNICATION_PORT = CAR_SUBSCRITPION_PORT + 2
BCAST_IP = '<broadcast>'#"172.24.1.255"
IMAGE_DIMMENSIONS = (1648,1232) # (px,px)
FRAME_RATE = 15 # fps
IMAGE_BRIGHTNESS = 50 # %
MARKER_WIDTH = 100 # mm
return (BCAST_IP, HOST_IP, NEGOTIATION_PORT, SUBSCRITPION_PORT,
CAR_SUBSCRITPION_PORT,CAR_COMMUNICATION_PORT,
IMAGE_DIMMENSIONS,FRAME_RATE,IMAGE_BRIGHTNESS,MARKER_WIDTH
)
#================================ RUN =====================================
def run(self):
try:
self.logFile.open()
try:
self.serverThreadManager.NegotiateServer()
self.serverThreadManager.start()
self.simulatorClient.start()
while(True):
s=0
except KeyboardInterrupt:
print('KeyboardInterrupt')
pass
self.simulatorClient.stop()
self.simulatorClient.join()
self.serverThreadManager.stop()
self.serverThreadManager.join()
self.serverThreadManager.stopAllThread()
print("Active thread",threading.enumerate())
self.logFile.close()
except BaseException as e:
print("[Exception Thrown] %s"%e)
| AlexPirciu/BFMC | BFMC_GPS/gps/SimulatedGps.py | SimulatedGps.py | py | 4,156 | python | en | code | 0 | github-code | 36 |
72809319784 | from typing import TYPE_CHECKING, Any, Dict, List, Type, TypeVar, Union
import attr
from ..models.reference_type import ReferenceType
from ..types import UNSET, Unset
if TYPE_CHECKING:
from ..models.key import Key
T = TypeVar("T", bound="Reference")
@attr.s(auto_attribs=True)
class Reference:
"""
Attributes:
referred_semantic_id (Union[Unset, Reference]):
type (Union[Unset, ReferenceType]):
keys (Union[Unset, List['Key']]):
"""
referred_semantic_id: Union[Unset, "Reference"] = UNSET
type: Union[Unset, ReferenceType] = UNSET
keys: Union[Unset, List["Key"]] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
referred_semantic_id: Union[Unset, Dict[str, Any]] = UNSET
if not isinstance(self.referred_semantic_id, Unset):
referred_semantic_id = self.referred_semantic_id.to_dict()
type: Union[Unset, str] = UNSET
if not isinstance(self.type, Unset):
type = self.type.value
keys: Union[Unset, List[Dict[str, Any]]] = UNSET
if not isinstance(self.keys, Unset):
keys = []
for keys_item_data in self.keys:
keys_item = keys_item_data.to_dict()
keys.append(keys_item)
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if referred_semantic_id is not UNSET:
field_dict["referredSemanticId"] = referred_semantic_id
if type is not UNSET:
field_dict["type"] = type
if keys is not UNSET:
field_dict["keys"] = keys
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
from ..models.key import Key
d = src_dict.copy()
_referred_semantic_id = d.pop("referredSemanticId", UNSET)
referred_semantic_id: Union[Unset, Reference]
if isinstance(_referred_semantic_id, Unset):
referred_semantic_id = UNSET
else:
referred_semantic_id = Reference.from_dict(_referred_semantic_id)
_type = d.pop("type", UNSET)
type: Union[Unset, ReferenceType]
if isinstance(_type, Unset):
type = UNSET
else:
type = ReferenceType(_type)
keys = []
_keys = d.pop("keys", UNSET)
for keys_item_data in _keys or []:
keys_item = Key.from_dict(keys_item_data)
keys.append(keys_item)
reference = cls(
referred_semantic_id=referred_semantic_id,
type=type,
keys=keys,
)
reference.additional_properties = d
return reference
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| sdm4fzi/aas2openapi | ba-syx-submodel-repository-client/ba_syx_submodel_repository_client/models/reference.py | reference.py | py | 3,280 | python | en | code | 7 | github-code | 36 |
72515328423 |
import glob, os, sys
#sim_list = glob.glob('../*-*-*/Simulation/*/MD_R*.out')
lig_set = [x for x in os.listdir('../') if 'script' not in x]
print(f'\n\nThere are {len(lig_set)} IFD cases.\n\n')
#lig_set = list(set([x.split('/')[1] for x in sim_list]))
#print(lig_set)
total_sim = 0
total_failed_sim = 0
total_success_sim = 0
for lig in lig_set:
num_sim = 0
failed_sim = 0
success_sim = 0
finished_sim = 0
lig_sim_list = glob.glob(f'../{lig}/Simulation/*/MD_R*.out') #[x for x in sim_list if lig in x]
#print(lig_sim_list)
for idx, sim in enumerate(lig_sim_list):
#if (idx + 1) % 20 == 0:
# print(idx, end=' ')
#print(sim)
with open(sim, 'r') as f:
cont = f.readlines()
finished = False
for line in cont[-36:]:
if 'Root mean squared' in line:
finished = True
finished_sim += 1
if 'Temperature' in line:
if float(line.split(':')[-1]) > 500.0:
failed_sim += 1
#print(f' {sim} failed')
elif finished:
success_sim += 1
else:
failed_sim += 1
break
else:
failed_sim += 1
num_sim += 1
total_sim += num_sim
total_failed_sim += failed_sim
total_success_sim += success_sim
#print()
print(f'Ligand: {lig} | {num_sim} simulations | {finished_sim} finished | {success_sim} succeeded |', end=' ')
if num_sim > 0:
print(f'{failed_sim} failed ({failed_sim / num_sim * 100:.1f} %)')
else:
print(' ')
print(f'Total simulations: {total_sim}')
print(f'Total failed simulations: {total_failed_sim}')
print(f'Total success simulations: {total_success_sim}')
print(f'Success percentage: {total_success_sim / total_sim}')
| darrenjhsu/tiny_IFD | 01_Workflow/utilities/check_sims.py | check_sims.py | py | 1,869 | python | en | code | 12 | github-code | 36 |
16272104179 | import torch
import torch.nn.functional as F
# Focal Loss with alpha=0.25 and gamma=2 (standard)
class FocalLoss(torch.nn.Module):
def __init__(self, alpha=0.25, gamma=2):
super(FocalLoss, self).__init__()
self.alpha = alpha
self.gamma = gamma
def forward(self, pred, targets):
BCE_loss = F.binary_cross_entropy_with_logits(pred, targets, reduction='none')
pt = torch.exp(-BCE_loss)
F_loss = self.alpha * (1-pt)**self.gamma * BCE_loss
return F_loss.mean()
# Label Smoothing with smoothing=0.1
class LabelSmoothingLoss(torch.nn.Module):
def __init__(self, classes=2, smoothing=0.1, dim=-1, weight = None):
"""if smoothing == 0, it's one-hot method
if 0 < smoothing < 1, it's smooth method
"""
super(LabelSmoothingLoss, self).__init__()
self.confidence = 1.0 - smoothing
self.smoothing = smoothing
self.weight = weight
self.cls = classes
self.dim = dim
def forward(self, pred, target):
assert 0 <= self.smoothing < 1
pred = pred.log_softmax(dim=self.dim)
if self.weight is not None:
pred = pred * self.weight.unsqueeze(0)
with torch.no_grad():
true_dist = torch.zeros_like(pred)
true_dist.fill_(self.smoothing / (self.cls - 1))
print(target.data.shape)
print(target.data.unsqueeze(1).shape)
print(pred.data.shape)
print(true_dist.data.shape)
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
return torch.mean(torch.sum(-true_dist * pred, dim=self.dim))
'''
torch.Size([1])
torch.Size([1, 1])
torch.Size([1])
torch.Size([1])
Traceback (most recent call last):
File "main.py", line 15, in <module>
main()
File "main.py", line 12, in main
losses,accs,testResults = train(trainArgs)
File "/Midgard/home/martinig/adv-comp-bio/trainAndTest.py", line 54, in train
loss = criterion(y_pred.type(torch.DoubleTensor).squeeze(1),y.type(torch.DoubleTensor))
File "/Midgard/home/martinig/miniconda3/envs/drugVQA/lib/python3.8/site-packages/torch/nn/modules/module.py", line 1501, in _call_impl
return forward_call(*args, **kwargs)
File "/Midgard/home/martinig/adv-comp-bio/loss.py", line 44, in forward
true_dist.scatter_(1, target.data.unsqueeze(1), self.confidence)
IndexError: Dimension out of range (expected to be in range of [-1, 0], but got 1)
''' | martinigoyanes/drugVQA | loss.py | loss.py | py | 2,477 | python | en | code | 0 | github-code | 36 |
35675762725 | """
*Kind*
Second kind.
In HTML, instances include element, class, ident, pseudo-class, pseudo-element.
Importantly, this allows the instantiation of custom XML types.
"""
from abc import ABCMeta
__all__ = ["Kind"]
class Kind:
__metaclass__ = ABCMeta
| jedhsu/text | text/_form/cascade/_kind/_kind.py | _kind.py | py | 274 | python | en | code | 0 | github-code | 36 |
4513078370 | import json
import random
words = []
unavailableWordIndices = set()
rejectedWordIndices = set()
with open("words.js", "r") as f:
s = f.read()
s = s[s.find("["):s.rfind(",")] + "]"
words = json.loads(s)
with open("history.json", "r") as f:
history = json.load(f)
for item in history:
index = item.get("index")
if index is not None:
unavailableWordIndices.add(index)
with open("pool.json", "r") as f:
pool = json.load(f)
unavailableWordIndices.update(pool)
with open("solution.json", "r") as f:
solution = json.load(f)
for item in solution:
index = item.get("index")
if index is not None:
unavailableWordIndices.add(index)
with open("rejected.json") as f:
rejected = json.load(f)
rejectedWordIndices.update(rejected)
unavailableWordIndices.update(rejectedWordIndices)
availableWordIndices = [i for i in range(
len(words)) if i not in unavailableWordIndices]
random.shuffle(availableWordIndices)
poolAdditions = []
for index in availableWordIndices:
print(words[index])
quit = False
while True:
c = input("Verdict: ")
if c == "a":
poolAdditions.append(index)
break
elif c == "":
rejectedWordIndices.add(index)
break
elif c == "q":
quit = True
break
if quit:
break
reviewed = False
while not reviewed:
print("\n=====================================\n")
print("Review additions: ")
for i in range(len(poolAdditions)):
index = poolAdditions[i]
p = "X" if index in rejectedWordIndices else i
print(p, ": ", words[index])
amendments = input("Amendments: ")
if amendments == "":
reviewed = True
else:
try:
ais = [int(s) for s in amendments.split(' ')]
for ai in ais:
rejectedWordIndices.add(poolAdditions[ai])
except:
print("Invalid input")
continue
pool.extend([i for i in poolAdditions if i not in rejectedWordIndices])
with open("pool.json", "w") as f:
json.dump(pool, f, indent=2)
orderedRejected = sorted(rejectedWordIndices)
with open("rejected.json", "w") as f:
json.dump(orderedRejected, f, indent=2)
| mkacz91/slowle | picker.py | picker.py | py | 2,289 | python | en | code | 1 | github-code | 36 |
21417420352 | import pandas as pd
import numpy as np
import requests
from textblob import TextBlob as tb
from bs4 import BeautifulSoup as bs
from matplotlib import pyplot as plt
import time
import nltk
import re
from IPython.display import clear_output
import matplotlib.pyplot as plt
import seaborn as sns
stopwords = nltk.corpus.stopwords.words("english")
def ruku_likn_de(url , pg = 20):
if url[12:12+6] == "amazon":
print("Amazon Link Detected")
return find_amazon_data_ruku(url , pg)
else:
print("FLipkart Link Detected")
return find_Flip_data_ruku(url , pg)
def mood(t):
mood = tb(t).sentiment.polarity
if mood > 0:
return "Happy"
elif mood == 0:
return "No Mood"
else:
return "Sad"
#Amazon Website
def find_amazon_data_ruku(link , pg = 10 ):
raw = link
last = pg
code = 0
review = []
for p in range(1,last+1):
num = raw.find("ref")
url_1 = raw[0:num]
url_2 = f"ref=cm_cr_arp_d_paging_btm_next_{p}?ie=UTF8&reviewerType=all_reviews&pageNumber={p}"
finalurl = url_1+url_2
finalurl = finalurl.replace("/dp/","/product-reviews/")
data = requests.get(finalurl)
print("amazon Link Detected")
if (data.reason) == "OK" :
code = code+1
data = bs(data.content ,"html.parser")
data = data.find_all(class_= "aok-relative")
print(int(p/last *100) , "% Completion")
print(int(code/last * 100) , "% Success Rate")
clear_output(wait=True)
for d in data:
d = {
"Rating" : float(d.find(class_="a-link-normal").text[0:3]),
"Title" : tb(d.find(class_="review-title-content").text).correct(),
"Content" : (d.find(class_="review-text-content").text),
"Polarity": mood(d.find(class_="review-text-content").text)
}
review.append(d)
print((code/last) * 100 ,"% is the Sucess rate")
data = pd.DataFrame(review)
data.replace("\n","",regex=True,inplace=True)
data["Polartiy"] = data["Content"].apply(mood)
for d in data.columns:
try:
data[d] = data[d].apply(low)
except:
pass
show_rating_bar(data)
show_pie_chart(data)
show_Sad_chart(data , n = 1)
show_Happy_chart(data, n = 1)
return review
#flipkart
def find_Flip_data_ruku(link , pg = 50):
raw = link
last = pg
code = 0
review = []
for p in range(1,last+1):
num = raw.find("&")
url_1 = raw[0:num+1]+f"page={p}"
url_1 = url_1.replace("/p/","/product-reviews/")
data = requests.get(url_1)
if (data.reason) == "OK" :
code = code+1
data = bs(data.content,"html.parser")
data = data.find_all(class_= "col _2wzgFH K0kLPL")
print(int(p/last *100) , "% Completion")
print(int(code/last * 100) , "% Sucess Rate")
clear_output(wait=True)
for d in data:
d = {
"Rating" : float(d.find(class_="_1BLPMq").text),
"Title" : d.find(class_="_2-N8zT").text,
"Content" : d.find(class_="t-ZTKy").text
}
review.append(d)
print((code/last) * 100 ,"% is the Sucess rate")
data = pd.DataFrame(review)
data.replace("\n","",regex=True,inplace=True)
def mood(t):
mood = tb(t).sentiment.polarity
if mood > 0:
return "Happy"
elif mood == 0:
return "No Mood"
else:
return "Sad"
data["Polartiy"] = data["Content"].apply(mood)
for d in data.columns:
try:
data[d] = data[d].apply(low)
except:
pass
show_rating_bar(data)
plt.close()
show_pie_chart(data)
plt.close()
show_Sad_chart(data , n = 2)
plt.close()
show_Happy_chart(data, n = 2)
plt.close()
return review
def low(text):
return text.lower()
def show_rating_bar(data):
rating = data.groupby(by="Rating")[["Title"]].count()
sns.barplot(y=rating.Title,x = rating.index)
plt.savefig("static/rate.png")
plt.clf()
# time.sleep(1)
def show_pie_chart(data):
try:
x = data.groupby(by="Polartiy")[["Content"]].count()
plt.pie(x = x.Content,autopct='%.2f',shadow=True,labels=x.index)
plt.savefig("static/pie.png")
plt.clf()
# time.sleep(1)
except:
pass
def show_Happy_chart(data, n = 1):
sad_data = data[data["Polartiy"] == "happy"]
words = []
for i in range(0,len(sad_data)):
a = data.Content[i]
a = re.sub("[', ),:,(,.,!,&,]"," ",a)
a = re.sub("[0-9]"," ",a)
a = " ".join(a.split())
a = nltk.word_tokenize(a)
a = nltk.ngrams(a,n)
for m in a:
if m not in stopwords:
words.append(m)
val = nltk.FreqDist(words).values()
key = nltk.FreqDist(words).keys()
data_1 = pd.DataFrame(data={"Key":key, "val": val})
data_1= data_1.sort_values(by = "val",ascending=False)[0:10]
plt.figure(figsize=(8,8))
sns.barplot(x = data_1.val, y = data_1.Key,orient="h")
plt.savefig("static/hapy.png")
plt.clf()
# time.sleep(1)
def show_Sad_chart(data , n = 1):
sad_data = data[data["Polartiy"] == "sad"]
words = []
for i in range(0,len(sad_data)):
a = data.Content[i]
a = re.sub("[', ),:,(,.,!,&,]"," ",a)
a = re.sub("[0-9]"," ",a)
a = " ".join(a.split())
a = nltk.word_tokenize(a)
a = nltk.ngrams(a,n)
for m in a:
if m not in stopwords:
words.append(m)
val = nltk.FreqDist(words).values()
key = nltk.FreqDist(words).keys()
data_1 = pd.DataFrame(data={"Key":key, "val": val})
data_1= data_1.sort_values(by = "val",ascending=False)[0:10]
sns.barplot(x = data_1.val, y = data_1.Key,orient="h")
plt.savefig("static/sad.png")
plt.clf()
# time.sleep(0)
def low(text):
return text.lower()
| Ruksana-Kauser/NLP_Final_Project | reviews.py | reviews.py | py | 6,211 | python | en | code | 0 | github-code | 36 |
4578606765 | n,k = [int(x) for x in input().split()]
work = {} #work เก็บงานของอุปกรณ์
price = {} #price เก็บราคาของอุปกรณ์
sumprice = 0
for i in range(n):
data = [int(x) for x in input().split()]
work[i] =set([j-1 for j in range(1,k+1) if data[j] == 1])
price[i] = data[0]
sumprice += data[0]
check = set([i for i in range(k)]) # check คือเซตของงานทั้งหมด (0,1,2,..,n-1)
def indexing(ch):
result = 0
for i in ch:
result += 2**i
return result
tar = [[0]*(2**k)]*n
def equip(n,ch):
if ch == set():
return 0
elif n == 0:
return sumprice+1
elif tar[n-1][indexing(ch)] != 0:
return tar[n-1][indexing(ch)]
else:
tar[n-1][indexing(ch)] = min(equip(n-1,ch-work[n-1])+price[n-1] , equip(n-1,ch))
return tar[n-1][indexing(ch)]
print(equip(n,check)) | naphattar/Betaprogramming | Chapter 1/1036.ver1.py | 1036.ver1.py | py | 934 | python | en | code | 0 | github-code | 36 |
34203757063 | import torch
import torch.nn as nn
from math import sin, cos
import models
from models.base import BaseModel
from models.utils import chunk_batch
from systems.utils import update_module_step
from nerfacc import ContractionType, OccupancyGrid, ray_marching
from nerfacc.vol_rendering import render_transmittance_from_alpha, rendering
from utils.rotation import R_from_quaternions
@models.register('se3')
class SE3Model(BaseModel):
def setup(self):
self.static_geometry = models.make(self.config.geometry.name, self.config.geometry)
self.static_texture = models.make(self.config.texture.name, self.config.texture)
self.dynamic_geometry = models.make(self.config.geometry.name, self.config.geometry)
self.dynamic_texture = models.make(self.config.texture.name, self.config.texture)
init_angle = self.config.get('init_angle', 0.1)
init_dir = self.config.get('init_dir', [1., 1., 1.])
self.quaternions = nn.Parameter(self.init_quaternions(half_angle=init_angle, init_dir=init_dir), requires_grad=True)
self.translation = nn.Parameter(torch.tensor([0.001, 0.001, 0.001], dtype=torch.float32), requires_grad=True)
self.canonical = 0.5
self.register_buffer('scene_aabb', torch.as_tensor([-self.config.radius, -self.config.radius, -self.config.radius, self.config.radius, self.config.radius, self.config.radius], dtype=torch.float32))
if self.config.grid_prune:
self.grid_warmup = self.config['grid_warmup']
self.occupancy_grid = OccupancyGrid(
roi_aabb=self.scene_aabb,
resolution=128, # the resolution is open to discuss
contraction_type=ContractionType.AABB
)
self.randomized = self.config.randomized
if self.config.white_bkgd:
self.register_buffer('background_color', torch.as_tensor([1.0, 1.0, 1.0], dtype=torch.float32), persistent=False)
self.background_color.to(self.rank)
self.render_step_size = 1.732 * 2 * self.config.radius / self.config.num_samples_per_ray
def update_step(self, epoch, global_step):
update_module_step(self.static_texture, epoch, global_step)
update_module_step(self.dynamic_texture, epoch, global_step)
def occ_eval_fn(x):
density_s, _ = self.static_geometry(x)
x_d = self.rigid_transform(x)
density_d, _ = self.dynamic_geometry(x_d)
density = density_s + density_d
return density[...,None] * self.render_step_size
if self.training and self.config.grid_prune:
self.occupancy_grid.every_n_step(step=global_step, occ_eval_fn=occ_eval_fn, occ_thre=1e-4, warmup_steps=self.grid_warmup)
def isosurface(self):
mesh_s = self.static_geometry.isosurface()
mesh_d = self.dynamic_geometry.isosurface()
return {'static': mesh_s, 'dynamic': mesh_d}
def init_quaternions(self, half_angle, init_dir):
a = torch.tensor([init_dir[0], init_dir[1], init_dir[2]], dtype=torch.float32)
a = torch.nn.functional.normalize(a, p=2., dim=0)
sin_ = sin(half_angle)
cos_ = cos(half_angle)
r = cos_
i = a[0] * sin_
j = a[1] * sin_
k = a[2] * sin_
q = torch.tensor([r, i, j, k], dtype=torch.float32)
return q
def rigid_transform(self, positions, state=0.):
'''
Perform the rigid transformation: R_axis_d,rot_angle(center=axis_o) @ x + t
'''
scaling = (self.canonical - state) / self.canonical
if scaling == 1.:
R = R_from_quaternions(self.quaternions)
positions = torch.matmul(R, positions.T).T
positions = positions + self.translation
elif scaling == -1.:
positions = positions - self.translation
inv_sc = torch.tensor([1., -1., -1., -1]).to(self.quaternions)
inv_q = inv_sc * self.quaternions
R = R_from_quaternions(inv_q)
positions = torch.matmul(R, positions.T).T
else:
raise NotImplementedError
return positions
def forward_(self, rays, scene_state):
rays_o, rays_d = rays[:, 0:3], rays[:, 3:6] # both (N_rays, 3)
def sigma_fn_composite(t_starts, t_ends, ray_indices):
ray_indices = ray_indices.long()
t_origins = rays_o[ray_indices]
t_dirs = rays_d[ray_indices]
positions = t_origins + t_dirs * (t_starts + t_ends) / 2.
sigma_s, _ = self.static_geometry(positions)
positions = self.rigid_transform(positions, scene_state)
sigma_d, _ = self.dynamic_geometry(positions)
sigma = sigma_s + sigma_d
return sigma[...,None]
def rgb_sigma_fn_static(t_starts, t_ends, ray_indices):
ray_indices = ray_indices.long()
t_origins = rays_o[ray_indices]
t_dirs = rays_d[ray_indices]
positions = t_origins + t_dirs * (t_starts + t_ends) / 2.
density, feature = self.static_geometry(positions)
rgb = self.static_texture(feature, t_dirs)
return rgb, density[...,None]
def rgb_sigma_fn_dynamic(t_starts, t_ends, ray_indices):
ray_indices = ray_indices.long()
t_origins = rays_o[ray_indices]
t_dirs = rays_d[ray_indices]
positions = t_origins + t_dirs * (t_starts + t_ends) / 2.
positions = self.rigid_transform(positions, scene_state)
density, feature = self.dynamic_geometry(positions)
dirs_d = self.rigid_transform(t_dirs, scene_state)
rgb = self.dynamic_texture(feature, dirs_d)
return rgb, density[...,None]
def composite_rendering(ray_indices, t_starts, t_ends):
n_rays = rays_o.shape[0]
rgb_s, sigma_s = rgb_sigma_fn_static(t_starts, t_ends, ray_indices)
rgb_d, sigma_d = rgb_sigma_fn_dynamic(t_starts, t_ends, ray_indices)
dists = t_ends - t_starts
alpha_s = 1. - torch.exp(-sigma_s * dists)
alpha_d = 1. - torch.exp(-sigma_d * dists)
alpha_add = 1. - (1. - alpha_s) * (1. - alpha_d)
Ts = render_transmittance_from_alpha(alpha_add, ray_indices=ray_indices)
weights_s = alpha_s * Ts
weights_d = alpha_d * Ts
weights = weights_s + weights_d
# opacity
opacity = self.acc_along_rays(weights, ray_indices, n_rays)
opacity = opacity.squeeze(-1)
# acc color
rgb = weights_s * rgb_s + weights_d * rgb_d
rgb = self.acc_along_rays(rgb, ray_indices, n_rays)
# Background composition.
if self.config.white_bkgd:
rgb = rgb + self.background_color * (1. - opacity[..., None])
# validation and testing
if not self.training:
# depth
depth = weights * ((t_starts + t_ends) * 0.5)
depth = self.acc_along_rays(depth, ray_indices, n_rays)
depth = depth.squeeze(-1)
rgb_s_only, opacity_s, depth_s_only = rendering(t_starts, t_ends, ray_indices, n_rays,
rgb_sigma_fn=rgb_sigma_fn_static,
render_bkgd=self.background_color)
rgb_d_only, opacity_d, depth_d_only = rendering(t_starts, t_ends, ray_indices, n_rays,
rgb_sigma_fn=rgb_sigma_fn_dynamic,
render_bkgd=self.background_color)
return {
'rgb': rgb,
'opacity': opacity,
'depth': depth,
'rgb_s': rgb_s_only,
'rgb_d': rgb_d_only,
'depth_s': depth_s_only,
'depth_d': depth_d_only,
'opacity_s': opacity_s,
'opacity_d': opacity_d,
}
return {
'rgb': rgb,
'rays_valid': opacity > 0,
'opacity': opacity,
}
with torch.no_grad():
ray_indices, t_starts, t_ends = ray_marching(
rays_o, rays_d,
scene_aabb=self.scene_aabb,
grid=self.occupancy_grid if self.config.grid_prune else None,
sigma_fn=sigma_fn_composite,
render_step_size=self.render_step_size,
stratified=self.randomized,
)
render_out = composite_rendering(ray_indices, t_starts, t_ends)
if self.training:
return {
'comp_rgb': render_out['rgb'],
'opacity': render_out['opacity'],
'rays_valid': render_out['rays_valid'],
'num_samples': torch.as_tensor([len(t_starts)], dtype=torch.int32, device=rays.device),
}
return {
'comp_rgb': render_out['rgb'],
'opacity': render_out['opacity'],
'depth': render_out['depth'],
'rgb_s': render_out['rgb_s'],
'rgb_d': render_out['rgb_d'],
'depth_s': render_out['depth_s'],
'depth_d': render_out['depth_d'],
'opacity_s': render_out['opacity_s'],
'opacity_d': render_out['opacity_d'],
}
def forward(self, rays_0, rays_1):
if self.training:
out_0 = self.forward_(rays_0, scene_state=0.)
out_1 = self.forward_(rays_1, scene_state=1.)
else:
out_0 = chunk_batch(self.forward_, self.config.ray_chunk, rays_0, scene_state=0.)
out_1 = chunk_batch(self.forward_, self.config.ray_chunk, rays_1, scene_state=1.)
del rays_0, rays_1
return [{**out_0}, {**out_1}]
def train(self, mode=True):
self.randomized = mode and self.config.randomized
return super().train(mode=mode)
def eval(self):
self.randomized = False
return super().eval()
def regularizations(self, outs):
losses = {}
return losses | 3dlg-hcvc/paris | models/se3.py | se3.py | py | 10,421 | python | en | code | 31 | github-code | 36 |
42632745572 | from setuptools import setup, find_packages
version = '0.1'
long_description = (
open('README.rst').read()
+ '\n' +
'Contributors\n'
'============\n'
+ '\n' +
open('CONTRIBUTORS.rst').read()
+ '\n' +
open('CHANGES.rst').read()
+ '\n')
setup(
name='imio.dms.ws',
version=version,
description="",
long_description=long_description,
classifiers=[
"Environment :: Web Environment",
"Framework :: Plone",
"Operating System :: OS Independent",
"Programming Language :: Python",
"Programming Language :: Python :: 2.7",
"Topic :: Software Development :: Libraries :: Python Modules",
],
keywords='',
author='',
author_email='',
url='https://github.com/IMIO/imio.dms.ws',
license='gpl',
packages=find_packages(exclude=['ez_setup']),
namespace_packages=['imio', 'imio.dms'],
include_package_data=True,
zip_safe=False,
install_requires=[
'Plone',
'imio.wsrequest.core',
'plone.api',
'setuptools',
],
extras_require={'test': ['plone.app.testing']},
entry_points="""
# -*- Entry points: -*-
""",
)
| IMIO/imio.dms.ws | setup.py | setup.py | py | 1,189 | python | en | code | 0 | github-code | 36 |
31059838395 | import ampalibe
from views import app_view
from ampalibe import Payload
from .base import chat, query
from response import BackAndMenuButton
from applicative.contre_vote import ContreVote
from applicative import Participant, Vote, Voter
@ampalibe.command("/vote")
def vote(sender_id, participant_id, **ext):
voter = Voter.from_fb_id(sender_id)
if not voter:
profil = chat.get_user_profile(sender_id)
if profil:
voter = Voter.new(
profil["id"],
f"{profil['first_name']} {profil['last_name']}",
profil["profile_pic"],
)
else:
voter = Voter.new(sender_id, "User", "")
if not voter.vote:
chat.send_quick_reply(
sender_id,
app_view.is_yes("/comment_vote", participant_id=participant_id),
"Hanampy teny fanohanana?",
)
elif voter.vote.id == participant_id:
chat.send_text(
sender_id,
"Efa io indrindra ny safidinao 💥 \n\nMisaotra anao, tokam-po tsy"
" miala amn'ny ekipa: " + voter.vote.univ_name,
)
return BackAndMenuButton(Payload("/participant"))
else:
chat.send_quick_reply(
sender_id,
app_view.is_yes("/vote_change", participant_id=participant_id),
"Efa manana ekipa tohanana enao... ovaina ? 😱",
)
@ampalibe.command("/vote_change")
def vote_change(sender_id, participant_id, yes, **ext):
if yes:
voter = Voter.from_fb_id(sender_id)
contre_participants_id = tuple(
map(lambda x: x.participant.id, ContreVote.from_fb_id(sender_id))
)
if participant_id in contre_participants_id:
participant = Participant.from_id(participant_id)
chat.send_text(
sender_id,
f"Miala tsiny 😌, Efa anatiny lisitry ny ekipa zakanao ny ekipan'i {participant.univ_name} 😶😶",
)
return BackAndMenuButton(Payload("/participants"))
chat.send_quick_reply(
sender_id,
app_view.is_yes(
"/comment_vote", participant_id=participant_id, update=True
),
"Hanampy teny fanohanana?",
)
else:
voter = Voter.from_fb_id(sender_id)
participant = voter.vote if voter else None
if participant:
chat.send_text(
sender_id,
"Misaotra anao tokam-po, tsy miala amn'ny ekipa: "
+ participant.univ_name,
)
return BackAndMenuButton(Payload("/participants"))
@ampalibe.command("/comment_vote")
def comment_vote(sender_id, yes, participant_id, update=False, **ext):
participant = Participant.from_id(participant_id)
if yes:
chat.send_text(sender_id, "Misaotra anao, Sorato ny teny fanohananao")
query.set_action(
sender_id,
Payload("/save_vote", participant_id=participant.id, update=update),
)
else:
voter = Voter.from_fb_id(sender_id)
vote = Vote(voter, participant)
if update:
vote.refresh()
vote.change_vote(participant, "...")
else:
vote.save()
chat.send_text(
sender_id,
"Misaotra anao, tontosa ny fanohananao an'i:" f" {participant.univ_name}",
)
return BackAndMenuButton(Payload("/participants"))
@ampalibe.action("/save_vote")
def save_vote(sender_id, cmd, participant_id, update=False, **ext):
query.set_action(sender_id, None)
participant = Participant.from_id(participant_id)
voter = Voter.from_fb_id(sender_id)
vote = Vote(voter, participant, comment=cmd)
if update:
vote.refresh()
vote.change_vote(participant, cmd)
else:
vote.save()
chat.send_text(
sender_id,
f"Misaotra anao, tontosa ny fanohananao an'i: {participant.univ_name}",
)
chat.send_text(sender_id, "Ny teny fanohananao dia: \n\n" + cmd[:990] + "...")
return BackAndMenuButton(Payload("/participants"))
@ampalibe.command("/contre_vote")
def contre_vote(sender_id, participant_id, **ext):
voter = Voter.from_fb_id(sender_id)
if not voter or not voter.vote:
chat.send_text(
sender_id,
"Mila misafidy ekipa tohanina aloha vao afaka mazaka ny ekipa" " hafa...",
)
return BackAndMenuButton(Payload("/participant"))
participant = Participant.from_id(participant_id)
contre_participants_id = tuple(
map(lambda x: x.participant.id, ContreVote.from_fb_id(sender_id))
)
if participant.id in contre_participants_id:
chat.send_text(
sender_id,
f"Miala tsiny 😌, Efa anatiny lisitry ny ekipa zakanao ny ekipan'i {participant.univ_name} 😶😶",
)
return BackAndMenuButton(Payload("/participant"))
contre_vote = ContreVote(voter, participant, "")
if contre_vote.can_vote:
if voter.vote and voter.vote.id == participant.id:
chat.send_text(
sender_id,
"Efa io ny ekipa alainao 💥 \n\n Manasa anao isafidy ekipa hafa"
" ho 'zakaina'",
)
return BackAndMenuButton(Payload("/participant"))
chat.send_quick_reply(
sender_id,
app_view.is_yes(
"/comment_contre_vote",
participant_id=participant_id,
contre_participants_id=contre_participants_id,
),
"Hanisy sira?",
)
return
else:
chat.send_text(
sender_id,
f"Aoka zay 😌 Efa miotrin'ny telo ny ekipa zakanareo 🙃",
)
return BackAndMenuButton(Payload("/participant"))
def save(sender_id, participant_id, contre_participants_id, comment):
voter = Voter.from_fb_id(sender_id)
participant = Participant.from_id(participant_id)
contre_vote = ContreVote(voter, participant, comment)
contre_vote.save()
chat.send_text(
sender_id,
"Misaotra anao, zakanareo ny ekipa an'i:"
f" {participant.univ_name} 🙀 \n\n {comment[:990]}...",
)
if len(contre_participants_id) != 2:
chat.send_text(
sender_id,
f"Mbola afaka misafidy ekipa { 3 - (len(contre_participants_id) + 1) } hafa ho 'zakaina' ianao 🙃 ",
)
@ampalibe.command("/comment_contre_vote")
def comment_contre_vote(
sender_id, yes, participant_id, contre_participants_id, comment="", **ext
):
if yes:
chat.send_text(sender_id, "Sorato ny teny fanampin'ny safidinao...")
query.set_action(
sender_id,
Payload(
"/save_contre_vote",
contre_participants_id=contre_participants_id,
participant_id=participant_id,
),
)
return
save(sender_id, participant_id, contre_participants_id, comment)
@ampalibe.action("/save_contre_vote")
def save_contre_vote(sender_id, cmd, contre_participants_id, participant_id, **ext):
query.set_action(sender_id, None)
save(sender_id, participant_id, contre_participants_id, cmd)
@ampalibe.command("/description")
def description(sender_id, participant_id, **ext):
participant = Participant.from_id(participant_id)
chat.send_text(sender_id, participant.description)
return BackAndMenuButton()
@ampalibe.command("/get_votes")
def get_vote_and_contre_vote(sender_id, **ext):
voter = Voter.from_fb_id(sender_id)
if not voter:
chat.send_text(
sender_id,
"Mbola tsy nisafidy ekipa tohanana ianao",
)
return BackAndMenuButton()
participant = voter.vote
if not participant:
chat.send_text(
sender_id,
"Mbola tsy nisafidy ekipa tohanana ianao",
)
return BackAndMenuButton()
chat.send_text(
sender_id,
f"Ny ekipa tohananao amin'izao dia: 🔥 {participant.univ_name} 🔥 ",
)
contre_votes = ContreVote.from_fb_id(sender_id)
if not contre_votes:
chat.send_text(
sender_id,
"Mbola tsy nisafidy ekipa 'zakanao' ianao. \nMarihina fa afaka mahazaka"
" ekipa telo(03) ianao.",
)
else:
data = "\n- ".join([c.participant.univ_name + " 🙀" for c in contre_votes])
chat.send_text(
sender_id,
f"Ireto avy ny ekipa zakanao: \n- {data} \nMarihina fa afaka mahazaka"
" ekipa telo(03) ianao.",
)
return BackAndMenuButton()
| iTeam-S/hiu-vote-bot | controllers/voting.py | voting.py | py | 8,636 | python | en | code | 7 | github-code | 36 |
18206090350 | from scrapy.selector import HtmlXPathSelector
from scrapy.contrib.spiders import CrawlSpider # Rule
from scrapy.http.request import Request
import html2text
import time
import re
import dateutil.parser
import datetime
import urlparse
from buzz_crawler.items import BuzzCrawlerItem
from markdown import markdown
class WozSpider(CrawlSpider):
name = 'woz'
allowed_domains = ['www.woz.ch']
start_urls = ['http://www.woz.ch/']
def handle_blog(self, response):
hxs = HtmlXPathSelector(response)
item = BuzzCrawlerItem()
item['url'] = response.url
item['date'] = datetime.datetime.now()
item['title'] = hxs.xpath(".//*[@id='container']/div/div/article/header/h1/text()").extract()[0].strip()
item['blurb'] = hxs.xpath(".//*[@id='container']/div/div/article/header/h2/text()").extract()[0].strip()
unprocessed_content = hxs.xpath("//div[@class='article-content']").extract()[0].strip()
h = html2text.HTML2Text()
h.ignore_links = True
h.ignore_images = True
processed_content = h.handle(unprocessed_content)
item['content'] = markdown(processed_content)
item['source'] = 'woz.ch'
yield item
def parse(self, response):
hxs = HtmlXPathSelector(response)
posts = hxs.xpath(".//*[@id='container']/div/div/article")
for post in posts:
post_link = post.xpath("a/@href").extract()[0]
post_absolute_url = urlparse.urljoin(response.url, post_link.strip())
yield Request(post_absolute_url, self.handle_blog)
| claudehenchoz/z4 | buzz_crawler/buzz_crawler/spiders/woz_spider.py | woz_spider.py | py | 1,600 | python | en | code | 0 | github-code | 36 |
71845863784 | from django.http import HttpResponse
from django.template import loader
def index(request):
template = loader.get_template('pages/page_index.html')
context = {}
return HttpResponse(template.render(context, request))
def page(request):
template = loader.get_template('pages/page_display.html')
context = {
'page_id': request.GET['page_id']
}
print(context['page_id'])
return HttpResponse(template.render(context, request))
| craig-glass/epic_django | pages/views.py | views.py | py | 466 | python | en | code | 0 | github-code | 36 |
70434845545 | #coding = utf-8
#选择信息增益最大的10维特征
import numpy as np
from InfoGain import choose_best_feature
data_path = "C:\\Users\\TJM\\OneDrive\\graduated\\研①\\人工智能算法与实践\\homework\\分享\\Test\\1-kddcup.data_10_percent_corrected"
test_path = "C:\\Users\\TJM\\OneDrive\\graduated\\研①\\人工智能算法与实践\\homework\\分享\\Test\\3-corrected.txt"
feature1 = []
feature2 = []
feature3 = []
label = []
def read_data(path):
data = []
with open(path) as f:
for l in f.readlines():
l = l.strip('.\n').split(',')
if l[1] not in feature1:
feature1.append(l[1])
if l[2] not in feature2:
feature2.append(l[2])
if l[3] not in feature3:
feature3.append(l[3])
if l[41] not in label:
label.append(l[41])
data.append(l)
return data
def split_data(data):
data = np.array(data)
features = data[:,0:41]
labels = data[:,41]
return features,labels
def quantify(feature,Label):
feature1 = ['udp', 'tcp', 'icmp']
feature2 = ['private', 'domain_u', 'http', 'smtp', 'ftp_data', 'ftp', 'eco_i', 'other', 'auth', 'ecr_i', 'IRC', 'X11', 'finger', 'time', 'domain', 'telnet', 'pop_3', 'ldap', 'login', 'name', 'ntp_u', 'http_443', 'sunrpc', 'printer', 'systat', 'tim_i', 'netstat', 'remote_job', 'link', 'urp_i', 'sql_net', 'bgp', 'pop_2', 'tftp_u', 'uucp', 'imap4', 'pm_dump', 'nnsp', 'courier', 'daytime', 'iso_tsap', 'echo', 'discard', 'ssh', 'whois', 'mtp', 'gopher', 'rje', 'ctf', 'supdup', 'hostnames', 'csnet_ns', 'uucp_path', 'nntp', 'netbios_ns', 'netbios_dgm', 'netbios_ssn', 'vmnet', 'Z39_50', 'exec', 'shell', 'efs', 'klogin', 'kshell', 'icmp']
feature3 = ['SF', 'RSTR', 'S1', 'REJ', 'S3', 'RSTO', 'S0', 'S2', 'RSTOS0', 'SH', 'OTH']
for j in range(len(feature)):
if feature[j][2] not in feature1:
feature2.append(feature[j][2])
feature[j][1] = feature1.index(feature[j][1])+1
feature[j][2] = feature2.index(feature[j][2])+1
feature[j][3] = feature3.index(feature[j][3])+1
if Label[j]=="normal":
Label[j] = 1
#elif Label[j] =="smurf":
#Label[j] = 2
else:
Label[j] = 0
# print(feature[99999])
feature = feature.astype(float)
Label = Label.astype(float)
return feature,Label
if __name__ == "__main__":
trainset = 1
if trainset:#
d = read_data(data_path)
#print(d[0])
print("lable种类:",label)
features,labels = split_data(d)
print(feature1)
print(feature2)
print(feature3)
print(len(label))
q_feature ,q_label= quantify(features,labels)
new_data = np.array(np.column_stack((q_feature,q_label)))
#np.savetxt("./dataset/train_data_41_bin.txt", new_data,fmt = '%f',delimiter=',')
#np.savetxt("./dataset/train_data_41.txt",new_data)#保存10维训练数据 保存格式有点问题
h= len(new_data)
'''best_fearture_index,best_10_index = choose_best_feature(q_feature,q_label)#计算信息增益,取最大的前十个特征
print("最佳特征:",best_fearture_index)
print("最佳前10特征:",best_10_index)'''
best_10_index = [5, 23, 3, 24, 36, 2, 33, 35, 34, 30]#计算得出的最佳特征维度
best_5_index = [5, 23, 3, 24, 36]#取前5个特征来分类进行对比
#new_data1 = np.zeros((h,11))
#new_data1 = np.zeros((h, 6))
new_data1 = np.zeros((h, 11))
print(new_data.shape)
print(new_data1.shape)
best_10_index.append(42)
best_5_index.append(42)
'''for i in best_5_index:
j=i-1
new_data1[:,best_5_index.index(i)] = new_data[:,j]'''
for i in best_10_index:
j=i-1
new_data1[:,best_10_index.index(i)] = new_data[:,j]
np.savetxt("./dataset/train_data_10_bin.txt",new_data1,fmt='%f',delimiter=',')#保存10维训练数据'''
#np.savetxt("./dataset/train_data_4.txt", new_data1, fmt='%f', delimiter=',')
else:
d = read_data(test_path)
# print(d[0])
features, labels = split_data(d)
print(feature1)
print(feature2)
print(feature3)
print(len(label))
q_feature, q_label = quantify(features, labels)
new_data = np.column_stack((q_feature, q_label))
h = len(new_data)
#np.savetxt("./dataset/test_data_41_bin.txt", new_data,fmt = "%f",delimiter=',')#保存41维的二分类测试机
#best_fearture_index, best_10_index = choose_best_feature(q_feature, q_label)
best_10_index = [5, 23, 3, 24, 36, 2, 33, 35, 34, 30]
#print("最佳特征:", best_fearture_index)
#print("最佳前10特征:", best_10_index)
#new_data1 = np.zeros((h, 11))
#np.savetxt("./dataset/test_data_41.txt", new_data)#保存41维测试数据
best_5_index = [5, 23, 3, 24, 36] # 取前5个特征来分类进行对比
#new_data1 = np.zeros((h, 6))
new_data1 = np.zeros((h, 11))
print(new_data.shape)
print(new_data1.shape)
best_10_index.append(42)
#best_5_index.append(42)
'''for i in best_5_index:
j = i - 1
new_data1[:, best_5_index.index(i)] = new_data[:, j]'''
#np.savetxt("./dataset/test_data_4.txt", new_data1, fmt='%f', delimiter=',')
for i in best_10_index:
j = i - 1
new_data1[:, best_10_index.index(i)] = new_data[:, j]
np.savetxt("./dataset/test_data_10_bin.txt", new_data1, fmt='%f', delimiter=',')#保存10维测试数据
| JimmyTang178/ArtificialIntelligenceProject | data_process.py | data_process.py | py | 5,843 | python | en | code | 1 | github-code | 36 |
31805232109 | # /usr/bin/python3.6
# -*- coding:utf-8 -*-
# 超时
class Solution(object):
def minKBitFlips(self, A, K):
"""
:type A: List[int]
:type K: int
:rtype: int
"""
try:
old_index = A.index(0)
except:
return 0
length = len(A)
ret = 0
while old_index <= length - K:
A[old_index:old_index+K] = [1-x for x in A[old_index:old_index+K]]
ret += 1
try:
old_index = A.index(0)
except:
return ret
# print(A)
for i in range(length):
if A[i] == 0:
return -1
return ret
def main():
s = Solution()
# print(s.minKBitFlips([0,1,0],1))
print(s.minKBitFlips([1,1,0],2))
print(s.minKBitFlips(A = [0,0,0,1,0,1,1,0], K = 3))
if __name__ == "__main__":
main()
| bobcaoge/my-code | python/leetcode_bak/995_Minimum_Number_of_K_Consecutive_Bit_Flips.py | 995_Minimum_Number_of_K_Consecutive_Bit_Flips.py | py | 901 | python | en | code | 0 | github-code | 36 |
42194340126 | import datetime
import math
from sqlalchemy import desc, asc
from app.main import db
from app.main.model.unit import Unit
from app.main.service.language_helper import LanguageHelper
def save_unit(data, args):
errors = {}
language_data = LanguageHelper(args)
# Check unique field is null or not
if data['name'] == "":
errors['name'] = [language_data.get_message(
'unit.save.no_unit_name_message')]
if len(errors) > 0:
response_object = {
'status': 'FAILED',
'message': language_data.get_message('unit.save.failed_message'),
'errors': errors
}
return response_object, 200
else:
unit = Unit.query.filter_by(
name=data['name']).first()
if unit:
errors['name'] = [language_data.get_message(
'unit.save.existed_unit_name_message')]
response_object = {
'status': 'FAILED',
'message': language_data.get_message('unit.save.failed_message'),
'errors': errors
}
return response_object, 200
else:
new_unit = Unit(
name=data['name'],
description=data['description'],
created_on=datetime.datetime.utcnow(),
updated_on=datetime.datetime.utcnow()
)
save_changes(new_unit)
output = {}
output['name'] = new_unit.name
output['description'] = new_unit.description
output['created_on'] = str(new_unit.created_on)
output['updated_on'] = str(new_unit.updated_on)
response_object = {
'status': 'SUCCESS',
'message': language_data.get_message('unit.save.success_message'),
'data': output
}
return response_object, 201
def update_unit(id, data, args):
unit = Unit.query.filter_by(id=id).first()
is_updated = False
errors = {}
language_data = LanguageHelper(args)
# Check if ID is valid or not
if not unit:
errors['id'] = ["Unit ID does not exist!"]
response_object = {
'status': 'FAILED',
'message': language_data.get_message('unit.update.failed_message'),
'errors': errors
}
return response_object, 200
else:
# Check null
if data['name'] == "":
errors['name'] = [language_data.get_message(
'unit.update.no_unit_message')]
if (len(errors) > 0):
response_object = {
'status': 'FAILED',
'message': language_data.get_message('unit.update.failed_message'),
'errors': errors
}
return response_object, 200
else:
if data['name'] != unit.name:
# Check if unit name is existed or not
updated_unit = Unit.query.filter_by(name=data['name']).first()
if updated_unit:
errors['name'] = [language_data.get_message(
'unit.update.existed_unit_name_message')]
response_object = {
'status': 'FAILED',
'message': language_data.get_message('unit.update.failed_message'),
'errors': errors
}
return response_object, 200
else:
is_updated = True
unit.name = data['name']
if data['description'] != unit.description:
is_updated = True
unit.description = data['description']
if is_updated is True:
unit.updated_on = datetime.datetime.utcnow()
db.session.commit()
unit_data = {}
unit_data['id'] = str(unit.id)
unit_data['name'] = unit.name
unit_data['description'] = unit.description
unit_data['created_on'] = str(unit.created_on)
unit_data['updated_on'] = str(unit.updated_on)
respone_object = {
'status': 'SUCCESS',
'message': language_data.get_message('unit.update.success_message'),
'data': unit_data
}
return respone_object, 200
def get_all_units(args):
all_unit = Unit.query.all()
output = []
languages_data = LanguageHelper(args)
for unit in all_unit:
unit_data = {}
unit_data['id'] = unit.id
unit_data['name'] = unit.name
unit_data['description'] = unit.description
unit_data['created_on'] = str(unit.created_on)
unit_data['updated_on'] = str(unit.updated_on)
output.append(unit_data)
data = {}
data['units'] = output
respone_object = {
'status': 'SUCCESS',
'message': languages_data.get_message('unit.get_all.success_message'),
'data': data
}
return respone_object, 200
def get_unit(id, args):
unit = Unit.query.filter_by(id=id).first()
languages_data = LanguageHelper(args)
if not unit:
respone_object = {
'status': 'ERROR',
'message': languages_data.get_message('unit.get.no_unit_message')
}
return respone_object, 200
unit_data = {}
unit_data['id'] = unit.id
unit_data['name'] = unit.name
unit_data['description'] = unit.description
unit_data['created_on'] = str(unit.created_on)
unit_data['updated_on'] = str(unit.updated_on)
respone_object = {
'status': 'SUCCESS',
'message': languages_data.get_message('unit.delete.success_message'),
'data': unit_data
}
return respone_object, 200
def delete_unit(id, args):
errors = {}
unit = Unit.query.filter_by(id=id).first()
languages_data = LanguageHelper(args)
if not unit:
respone_object = {
'status': 'FAILED',
'message': languages_data.get_message('unit.delete.no_unit_message'),
'errors': errors
}
return respone_object, 200
else:
db.session.delete(unit)
db.session.commit()
response_object = {
'status': 'SUCCESS',
'message': languages_data.get_message('unit.delete.success_message')
}
return response_object, 200
def save_changes(data):
db.session.add(data)
db.session.commit()
def get_all_units_with_pagination(args):
# Query Params
page_size = 10
current_page = 1
next_page = False
key_word = None
sort_field = None
sort_order = -1
# Check query param value
if "page_size" in args:
page_size = int(args['page_size'])
if "current_page" in args:
current_page = int(args['current_page'])
if "key_word" in args:
key_word = args['key_word']
if "sort_field" in args:
sort_field = args['sort_field']
if "sort_order" in args:
sort_order = int(args['sort_order'])
# Get language data
languages_data = LanguageHelper(args)
# Sort by order value
if sort_field is None or sort_order is None:
'''Default order by the lasted created_on value'''
units = Unit.query.order_by(Unit.created_on.desc())
else:
if sort_order == -1:
units = Unit.query.order_by(desc(sort_field))
else:
units = Unit.query.order_by(asc(sort_field))
units_on_page = units.limit(page_size).offset(
(current_page - 1) * page_size)
total_pages = math.ceil(units.count() / page_size)
if math.ceil(units.count() - page_size*current_page > 0):
next_page = True
else:
next_page = False
output = []
for unit in units_on_page:
# Sort by keyword
if (key_word is not None):
if (key_word in unit.name.lower()) or (
key_word in unit.description.lower()):
unit_data = {}
unit_data['id'] = unit.id
unit_data['name'] = unit.name
unit_data['description'] = unit.description
unit_data['created_on'] = str(unit.created_on)
unit_data['updated_on'] = str(unit.updated_on)
output.append(unit_data)
else:
unit_data = {}
unit_data['id'] = unit.id
unit_data['name'] = unit.name
unit_data['description'] = unit.description
unit_data['created_on'] = str(unit.created_on)
unit_data['updated_on'] = str(unit.updated_on)
output.append(unit_data)
data = {}
data['units'] = output
data['total_pages'] = total_pages
data['current_page'] = current_page
data['has_next_page'] = next_page
response_object = {
'status': 'SUCCESS',
'message': languages_data.get_message('unit.get_all_with_pagination.success_message'),
'data': data
}
return response_object, 200
| viettiennguyen029/recommendation-system-api | app/main/service/unit_service.py | unit_service.py | py | 9,022 | python | en | code | 0 | github-code | 36 |
35153410551 | import torch
import torch.nn as nn
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.fc1 = nn.Linear(784, 256, bias = False)
self.bn1 = nn.BatchNorm1d(256)
self.relu1 = nn.ReLU()
self.fc2 = nn.Linear(256, 128)
self.bn2 = nn.BatchNorm1d(128)
self.relu2 = nn.ReLU()
self.fc3 = nn.Linear(128, 10)
def forward(self, x):
x = torch.flatten(x, 1)
x = self.fc1(x)
x = self.bn1(x)
x = self.relu1(x)
x = self.fc2(x)
x = self.bn2(x)
x = self.relu2(x)
x = self.fc3(x)
return x | Sachi-27/WiDS--Image-Captioning | Week 2/model.py | model.py | py | 649 | python | en | code | 0 | github-code | 36 |
18913307423 | class Solution:
def maxArea(self, height: list[int]) -> int:
left = 0
right = len(height) - 1
biggest_area = 0
while left < right:
left_bar = height[left]
right_bar = height[right]
current_area = min(left_bar, right_bar) * (right - left)
biggest_area = max(biggest_area, current_area)
if left_bar < right_bar:
left += 1
else:
right -= 1
return biggest_area
| lancelote/leetcode | src/container_with_most_water.py | container_with_most_water.py | py | 508 | python | en | code | 3 | github-code | 36 |
40285478633 | # %%
import logging
import os.path
import shutil
import sys
from typing import Optional
import matplotlib.pyplot as plt
import pandas as pd
import pytorch_lightning as pl
import torch
import torch.nn.functional as F
import torchaudio
from icecream import ic
from torch.utils.data import Dataset, DataLoader
import matplotlib.pyplot as plt
from io import StringIO
try:
from hubert.clustering.filter_dataframe import clean_data_parczech
except ModuleNotFoundError:
from filter_dataframe import clean_data_parczech
class ParCzechDataset(Dataset):
def __init__(self, df_path, resample_rate=16000, df_filters=None, sep='\t', sort=True, train_flag=True, iloc=True, *args, **kwargs):
super(ParCzechDataset, self).__init__()
self.df = pd.read_csv(df_path, sep=sep)
self.filter_df(df_filters)
if train_flag:
self.df = self.df[(self.df.type == 'train') | (self.df.type == 'other')]
if sort:
self.df = self.df.sort_values(by=['duration__segments'], ascending=False).copy().reset_index(drop=True)
self.new_sr = resample_rate
self.resample_transform = None
# this configures the __getitem__ method, when self.iloc is true index in __getitem__ is interpreted as integer location in dataframe
# when self.iloc is False index in __getitem__ is interpreted as an element in self.df.index
self.iloc = iloc
def index_df(self, i, column_name=None):
if self.iloc:
row = self.df.iloc[i]
else:
row = self.df.loc[i]
if column_name is not None:
return row[column_name]
return row
def get_mp3_name(self, i):
row = self.index_df(i)
try:
# need to remove prefix 'sentences_'
mp3_name = row.mp3_name.split('_')[-1]
except:
ic(row)
raise ValueError(f'can not find row by index {i}')
return mp3_name
def extract_path(self, i):
row = self.index_df(i)
mp3_name = self.get_mp3_name(i)
return os.path.join(row.segment_path, mp3_name)
def get_gold_transcript(self, path):
with open(f'{path}.prt', 'r') as f:
return f.read().rstrip()
def get_asr_transcript(self, path):
with open(f'{path}.asr', 'r') as f:
return f.read().rstrip()
def _safe_read_df(self, path, names, header, sep, dtypes, na_values):
if not os.path.isfile(path):
print(f'{path} does not exist')
replace_dict = {
'"': "__double_quotes__",
}
with open(path, 'r') as f:
src = ''.join(f.readlines())
for k, v in replace_dict.items():
src = src.replace(k, v)
df = pd.read_csv(StringIO(src), names=names, header=header, sep=sep, dtype=dtypes, na_values=na_values)
return df
def get_recognized_df(self, path, i):
# will extract recognized based on word ids
header = ['word', 'word_id', 'start_time', 'end_time', 'XXX', 'avg_char_duration', 'speaker']
try:
words_df = self._safe_read_df(
f'{path}.words',
names=header,
header=None,
sep='\t',
# replace_col=['word'],
dtypes=None,
na_values=None,
)
except Exception as e:
ic(path)
ic(e)
raise ValueError(f'Can not read file {path}')
word_ids = words_df['word_id'].values.tolist()
# read aligned file
path_aligned = f"/lnet/express/work/people/stankov/alignment/results/full/words-aligned/jan/words_{self.get_mp3_name(i)}.tsv"
# normalization is done by the length of the "true_word"
header_aligned = ['true_w', 'trans_w', 'joined', 'id', 'recognized', 'dist', 'dist_norm', 'start', 'end', 'time_len_ms', 'time_len_norm']
dtypes = dict(
true_w=str,
trans_w=str,
joined=bool,
id=str,
recognized=bool
)
for name in header_aligned:
if name not in dtypes:
dtypes[name] = float
aligned_df = self._safe_read_df(
path_aligned,
header_aligned,
sep='\t',
dtypes=dtypes,
na_values='-',
header=0
)
aligned_df.trans_w = aligned_df.trans_w.fillna('-')
aligned_df = aligned_df[aligned_df['id'].isin(word_ids)]
return aligned_df
def get_recognized_transcript(self, path, i):
aligned_df = self.get_recognized_df(path, i)
# from miliseconds to seconds
start_time = aligned_df['start'].min() / 1000
end_time = aligned_df['end'].max() / 1000
mp3_name = self.get_mp3_name(i)
path_transcribed = f'/lnet/express/work/people/stankov/alignment/results/full/scrapping/jan/{mp3_name}/{mp3_name}.tsv'
if not os.path.isfile(path_transcribed):
# print(f'{mp3_name} is not in scraping')
path_transcribed = f'/lnet/express/work/people/stankov/alignment/results/full/time-extracted/jan/{mp3_name}.tsv'
header_transcribed = ['start', 'end', 'recognized', 'true_word', 'cnt', 'dist']
transcribed_df = pd.read_csv(path_transcribed, names=header_transcribed, header=None, sep='\t')
# ic(start_time, end_time, transcribed_df.head())
transcript = transcribed_df[(transcribed_df.start >= start_time) & (transcribed_df.end <= end_time)].recognized.values.tolist()
return ' '.join(transcript)
def resample(self, sr, wav):
if self.resample_transform is None:
self.resample_transform = torchaudio.transforms.Resample(orig_freq=sr, new_freq=self.new_sr)
return self.resample_transform(wav)
def get_wav(self, path):
wav, sr = torchaudio.load(f'{path}.wav', normalize=True)
# stereo to mono if needed
if wav.size(0) == 2:
wav = torch.mean(wav, dim=0).unsqueeze(0)
return self.resample(sr, wav)
def duration_hours(self, filters=None):
if filters is not None:
df = clean_data_parczech(self.df, filters)
else:
df = self.df
return df.duration__segments.sum() / 3600
def plot_stat(self, col_name, filters=None):
if filters is not None:
df = clean_data_parczech(self.df, filters)
else:
df = self.df
# plt.boxplot(dataset.df.avg_char_duration__segments, vert=False)
print(df.sort_values(by=[col_name])[col_name])
plt.plot(range(len(df)), df.sort_values(by=[col_name])[col_name])
plt.title(f'{col_name} sorted, {self.duration_hours(filters):.2f}h')
plt.xlabel('segments')
plt.ylabel(col_name)
plt.show()
def filter_df(self, filters, reset_index=False):
if filters is None:
return
self.df = clean_data_parczech(self.df, filters)
if reset_index:
self.df.reset_index(drop=True, inplace=True)
def get_columns(self):
return self.df.columns.values
def __getitem__(self, i):
path = self.extract_path(i)
return dict(
gold_transcript=self.get_gold_transcript(path),
asr_transcript=self.get_asr_transcript(path),
wav=self.get_wav(path),
path=os.path.dirname(path)
)
def __len__(self):
return len(self.df)
def clean_data(df, params):
# thresholds were selected based on the plot
df = df[(df.type == 'train') | (df.type == 'other')]
df = df[df.recognized_sound_coverage__segments > params['recognized_sound_coverage__segments_lb']]
df = df[df.recognized_sound_coverage__segments < params['recognized_sound_coverage__segments_ub']]
# removed 404.5 hours
# use only long enough segments
ic(df.duration__segments.sum() / 3600)
if 'duration__segments_lb' in params:
df = df[df.duration__segments > params['duration__segments_lb']]
if 'duration__segments_ub' in params:
df = df[df.duration__segments < params['duration__segments_ub']]
ic(df.duration__segments.sum() / 3600)
return df
class CommonVoiceDataset(Dataset):
def __init__(self, base_dir, type, resample_rate=16000):
self.data_path = os.path.join(base_dir, 'clips')
self.df = pd.read_csv(os.path.join(base_dir, f'{type}.tsv'), sep='\t')
self.resample_rate = resample_rate
self.resample_transform = None
def resample(self, waveform, sr):
if self.resample_transform is None:
self.resample_transform = torchaudio.transforms.Resample(sr, self.resample_rate)
return self.resample_transform(waveform)
def __getitem__(self, i):
if torch.is_tensor(i):
i = i.item()
waveform, sample_rate = torchaudio.load(os.path.join(self.data_path, self.df.path[i]))
return dict(
wav=self.resample(waveform, sample_rate),
path=self.df.path[i]
)
def __len__(self):
return len(self.df)
class MFCCExtractorPL(pl.LightningModule):
def __init__(self, n_mffcs, n_mels, f_max, resample_rate, output_dir, n_fft=400):
super(MFCCExtractorPL, self).__init__()
self.output_dir = output_dir
self.n_fft = n_fft
self.sr = resample_rate
self.MFCC_transform = torchaudio.transforms.MFCC(
resample_rate,
n_mfcc=n_mffcs,
melkwargs=dict(
n_mels=n_mels,
n_fft=n_fft, # default
hop_length=n_fft // 2, # default
f_max=f_max,
)
)
self.delta_transform = torchaudio.transforms.ComputeDeltas()
def prepare_data(self):
if os.path.exists(self.output_dir):
shutil.rmtree(self.output_dir)
os.makedirs(self.output_dir)
def forward(self, batch):
wavs, _, lens = batch
mfccs_batch = self.MFCC_transform(wavs)
deltas_batch = self.delta_transform(mfccs_batch)
deltas2_batch = self.delta_transform(deltas_batch)
# all shapes [batch_size, 1, 13, max_n_frames]
# stacking features
output = torch.cat([mfccs_batch, deltas_batch, deltas2_batch], dim=2).squeeze().permute(0, 2, 1)
# [batch_size, max_n_frames, 13 * 3]
n_frames = torch.tensor([compute_frames(l, self.sr) for l in lens], device=self.device)
return output, n_frames
def compute_frames(wave_len, sample_rate):
ms_int = int(wave_len / sample_rate * 1000)
# these "random" operations mimic how hubert.feature extractor counts frames in the audio
new_ms = (ms_int - (ms_int % 5) - 1) // 20
return new_ms
class SaveResultsCB(pl.Callback):
def __init__(self, target_path, n_fft, buffer_size, df_type, total_batches, resample_rate=16000, frame_length=20):
self.df_type = df_type
self.output_dir = target_path
self.n_fft = n_fft
# number of frames to store at one csv
self.buffer_size = buffer_size
self.frame_length = frame_length
self.dataframes = []
self.current_buffer = 0
# count how many df written to disk
self.cnt = 0
self.resample_rate = resample_rate
self.total_duration_sec = 0
self.loggers = {}
self.total_batches = total_batches
def extract_name(self, path):
if self.df_type == 'common_voice':
return path
elif self.df_type == 'parczech':
return '/'.join(path.split('/')[-2:])
else:
raise NotImplementedError(f'{self.df_type} is not supported')
def write_df(self, trainer):
output_path = os.path.join(self.output_dir, f'{trainer.global_rank:02}-{self.cnt:04}.csv')
result = pd.concat(self.dataframes).reset_index()
result['path'] = result['path'] + '/' + result['index'].astype(str)
result.drop('index', axis=1).to_csv(output_path, index=False)
self.current_buffer = 0
self.dataframes = []
self.cnt += 1
def on_predict_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx):
_, paths, wave_lens = batch
self.total_duration_sec += sum(w_len / self.resample_rate for w_len in wave_lens)
mfcc_features, frames_cnt = outputs[0].cpu().numpy(), outputs[1].cpu().numpy()
for n_frames, features, path in zip(frames_cnt, mfcc_features, paths):
self.current_buffer += n_frames
# select only useful frames without padding
features = features[:n_frames]
features_df = pd.DataFrame(data=features)
features_df['path'] = self.extract_name(path)
self.dataframes.append(features_df)
if self.current_buffer >= self.buffer_size:
self.write_df(trainer)
if batch_idx % 50 == 0:
logger = self.loggers[pl_module.global_rank]
logger.debug(f'gpu={pl_module.global_rank:2} batches processed {batch_idx:4}/{self.total_batches} ... {batch_idx / self.total_batches:.4f}')
def setup(self, trainer: "pl.Trainer", pl_module: "pl.LightningModule", stage: Optional[str] = None) -> None:
# setup loggers for each gpu
# logging.basicConfig(filename=logging_file, filemode='a', level=logging.DEBUG, format='%(asctime)s - %(message)s', datefmt='%H:%M:%S %d.%m.%Y')
handler = logging.FileHandler(f'gpu-{pl_module.global_rank}.log')
formatter = logging.Formatter(fmt='%(asctime)s - %(message)s', datefmt='%H:%M:%S %d.%m.%Y')
handler.setFormatter(formatter)
logger = logging.getLogger(f'{pl_module.global_rank}')
logger.setLevel(logging.DEBUG)
logger.addHandler(handler)
self.loggers[pl_module.global_rank] = logger
def on_predict_epoch_end(self, trainer, pl_module, outputs):
if self.dataframes != []:
self.write_df(trainer)
logger = self.loggers[pl_module.global_rank]
total_duration_hours = int(self.total_duration_sec // 3600)
remaining_seconds = int(self.total_duration_sec % 3600)
total_duration_mins = int(remaining_seconds // 60)
total_duration_secs = int(remaining_seconds % 60)
logger.debug(f'gpu={pl_module.global_rank:2} finished, {total_duration_hours:3}:{total_duration_mins:2}:{total_duration_secs:.3f} or'
f' {self.total_duration_sec:.3f} seconds')
def collate_fn(batch):
M = max([x['wav'].size(-1) for x in batch])
wavs = []
paths = []
for x in batch:
padded = F.pad(x['wav'], (0, M - x['wav'].size(-1)))
wavs.append(padded)
paths.append(x['path'])
# save lengths of waveforms, will be used to cut the padding from spectrogram
lengths = [x['wav'].size(-1) for x in batch]
return torch.stack(wavs, dim=0), paths, lengths
def plot_spectrogram(spec, title=None, ylabel='freq_bin', aspect='auto', xmax=None, lim=None):
fig, axs = plt.subplots(1, 1)
axs.set_title(title or 'Spectrogram (db)')
axs.set_ylabel(ylabel)
axs.set_xlabel('frame')
im = axs.imshow(spec, origin='lower', aspect=aspect)
if xmax:
axs.set_xlim((0, xmax))
fig.colorbar(im, ax=axs)
if lim is not None:
plt.axvline(x=lim, color='red')
plt.show(block=False)
# %%
if __name__ == '__main__':
# %%
# logging.basicConfig(filename=logging_file, filemode='a', level=logging.DEBUG, format='%(asctime)s - %(message)s', datefmt='%H:%M:%S %d.%m.%Y')
params = dict(
resample_rate=16000,
batch_size=70,
n_mffcs=13,
n_mels=40,
n_fft=640,
buffer_size=130000,
df_type='parczech',
frame_length_ms=20,
data_type='validated'
)
parczech_clean_params = dict(
recognized_sound_coverage__segments_lb=0.45,
recognized_sound_coverage__segments_ub=0.93,
duration__segments_lb=0.5,
)
if 'lnet' in os.getcwd():
df_path = '/lnet/express/work/people/stankov/alignment/Thesis/clean_with_path_large.csv'
# df = pd.read_csv(df_path, sep='\t')
# directory where mfccs will be stored
output_dir = '/lnet/express/work/people/stankov/alignment/mfcc'
dataset = ParCzechDataset(df_path, resample_rate=params['resample_rate'], clean_params=parczech_clean_params)
else:
# under base dir there are tsv file and clips/ folder
base_dir = '/root/common_voice_data/cv-corpus-7.0-2021-07-21/cs'
# directory where mfccs will be stored
output_dir = os.path.join(base_dir, 'mffcs')
dataset = CommonVoiceDataset(base_dir, params['data_type'], params['resample_rate'])
# %%
dataloader = DataLoader(dataset, batch_size=params['batch_size'], shuffle=False, collate_fn=collate_fn, num_workers=os.cpu_count() // 4, pin_memory=True)
extractor = MFCCExtractorPL(n_mffcs=params['n_mffcs'], n_mels=params['n_mels'], n_fft=params['n_fft'], f_max=params['resample_rate'] // 2,
output_dir=output_dir, resample_rate=params['resample_rate'])
cb = SaveResultsCB(output_dir, params['n_fft'], buffer_size=params['buffer_size'], df_type=params['df_type'], frame_length=params['frame_length_ms'],
total_batches=len(dataloader))
trainer = pl.Trainer(gpus=-1, strategy='ddp', num_sanity_val_steps=0, callbacks=cb, deterministic=True, progress_bar_refresh_rate=0)
# trainer = pl.Trainer(gpus=1, num_sanity_val_steps=0, callbacks=cb, precision=16, deterministic=True, limit_predict_batches=10)
trainer.predict(extractor, dataloader)
ic('done')
| Stanvla/Thesis | hubert/clustering/torch_mffc_extract.py | torch_mffc_extract.py | py | 17,709 | python | en | code | 0 | github-code | 36 |
42493659115 | """
Helper function to safely convert an array to a new data type.
"""
from __future__ import absolute_import, print_function, division
import numpy as np
import theano
__docformat__ = "restructuredtext en"
def _asarray(a, dtype, order=None):
"""Convert the input to a Numpy array.
This function is almost identical to ``numpy.asarray``, but it should be
used instead of its numpy counterpart when a data type is provided in
order to perform type conversion if required.
The reason is that ``numpy.asarray`` may not actually update the array's
data type to the user-provided type. For more information see ticket
http://projects.scipy.org/numpy/ticket/870.
In that case, we check that both dtype have the same string
description (byte order, basic type, and number of bytes), and
return a view with the desired dtype.
This function's name starts with a '_' to indicate that it is meant to be
used internally. It is imported so as to be available directly through
theano._asarray
"""
if str(dtype) == 'floatX':
dtype = theano.config.floatX
dtype = np.dtype(dtype) # Convert into dtype object.
rval = np.asarray(a, dtype=dtype, order=order)
# Note that dtype comparison must be done by comparing their `num`
# attribute. One cannot assume that two identical data types are pointers
# towards the same object (e.g. under Windows this appears not to be the
# case).
if rval.dtype.num != dtype.num:
# Type mismatch between the data type we asked for, and the one
# returned by numpy.asarray.
# If both types have the same string description (byte order, basic
# type, and number of bytes), then it is safe to return a view.
if (dtype.str == rval.dtype.str):
# Silent fix.
return rval.view(dtype=dtype)
else:
# Unexpected mismatch: better know what is going on!
raise TypeError(
'numpy.array did not return the data type we '
'asked for (%s %s #%s), instead it returned type '
'%s %s #%s: function '
'theano._asarray may need to be modified to handle this '
'data type.' %
(dtype, dtype.str, dtype.num, rval.dtype, rval.str,
rval.dtype.num))
else:
return rval
| Theano/Theano | theano/misc/safe_asarray.py | safe_asarray.py | py | 2,384 | python | en | code | 9,807 | github-code | 36 |
11686617200 | import psutil
import time
import sys
# Nav : gzserver, move_base, amcl, robo state pub, rosout, mapsrv
# ObjTrack : gzserver, subscribr, objdetector, objtracker, controller
# Nav2D : stage, navigator, operator, mapper, rviz, joy, controller
cpu_util = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
mem_util = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
print(sys.argv)
# t = int(sys.argv[1])
# freq = int(sys.argv[2])
# tim = int(sys.argv[3])
# r = int(sys.argv[5])
sleep_time = 0.4
# n_o = tim/sleep_time
count = 0
def is_illixr_proc(proc) -> bool:
try:
exe = proc.exe()
except psutil.AccessDenied:
exe = ""
return "main.opt.exe" in exe
def is_running():
return any(map(is_illixr_proc, psutil.process_iter()))
def get_cpu_mem_nav2d():
for proc in filter(is_illixr_proc, psutil.process_iter()):
cpu_util[0] += proc.cpu_percent()
ts_arr = []
while not is_running():
time.sleep(0.01)
print("Detected process launch")
while is_running():
get_cpu_mem_nav2d()
count += 1
# print once every 10s i.e. 25*0.4s.
if (count % 25 == 15):
cpu = [x/count for x in cpu_util]
mem = [x/count for x in mem_util]
cms = "###Count: " + str(count) + "Avg CPU: " + str(cpu) + ", Mem: " + str(mem)
print(cms)
ts_arr.append(cms)
time.sleep(sleep_time)
print("ADDED all observations", count)
cpu_txt = ""
mem_txt = ""
for i in range(len(cpu_util)):
cpu_util[i] /= count
cpu_txt += str(cpu_util[i]) + ", "
mem_util[i] /= count
mem_txt += str(mem_util[i]) + ", "
# fname = "%s_cpu_mem.txt"% (sys.argv[4])
f = sys.stdout
# f = open(fname, "w")
for i in sys.argv:
f.write(i + ", ")
for j in ts_arr:
f.write(j)
f.write("\n")
f.write(str(count) + ", ")
f.write(cpu_txt)
f.write(mem_txt)
f.write("\n")
print(sys.argv)
print(cpu_util)
print(mem_util)
'''
if sys.argv[8] == 'yes':
with open('cpu_time_series_%s_%s%s.txt'% (sys.argv[3], sys.argv[4], sys.argv[9]), 'a') as fw:
print "Writing to file for ", sys.argv[3], sys.argv[4], sys.argv[9]
for i in move_base_cpu_arr:
fw.write(str(i) + ', ')
fw.write('\n')
for i in amcl_cpu_arr:
fw.write(str(i) + ', ')
fw.write('\n')
'''
| aditi741997/robotics_project | measure_cpu.py | measure_cpu.py | py | 2,264 | python | en | code | 1 | github-code | 36 |
17643428428 | import argparse
import os
import torch
import torch.nn as nn
import torch.optim as optim
from tqdm import tqdm
from torchvision.utils import save_image
from torch.utils.data import Dataset, DataLoader
import albumentations
from albumentations.pytorch import ToTensorV2
from PIL import Image
import numpy as np
torch.backends.cudnn.benchmark = True
# Dicriminator model definition
class Discriminator(nn.Module):
def __init__(self, in_channels=3) -> None:
super().__init__()
self.convlayers = nn.Sequential(
nn.Conv2d(in_channels=in_channels*2, out_channels=64,
kernel_size=4, stride=2, padding=1, padding_mode="reflect",),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=64, out_channels=128, kernel_size=4,
stride=2, bias=False, padding_mode="reflect",),
nn.BatchNorm2d(128),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=128, out_channels=256, kernel_size=4,
stride=2, bias=False, padding_mode="reflect",),
nn.BatchNorm2d(256),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=256, out_channels=512, kernel_size=4,
stride=1, bias=False, padding_mode="reflect",),
nn.BatchNorm2d(512),
nn.LeakyReLU(0.2),
nn.Conv2d(in_channels=512, out_channels=1, kernel_size=4,
stride=1, padding=1, padding_mode="reflect",),
nn.LeakyReLU(0.2),
)
def forward(self, x, y) -> None:
out = torch.cat([x, y], dim=1)
out = self.convlayers(out)
return out
# generator class definition
class Generator(nn.Module):
# U-NET encoder section
def encoder(self, in_channels, out_channel, is_relu=False, need_batch_norm=True):
x = nn.Sequential(
nn.Conv2d(in_channels, out_channel, 4, 2, 1,
bias=False, padding_mode="reflect"),
nn.BatchNorm2d(out_channel) if need_batch_norm else None,
nn.ReLU() if is_relu else nn.LeakyReLU(),
)
return x
# # U-NET decoder section
def decoder(self, in_channels, out_channel, is_relu=False, need_batch_norm=True, need_dropout=True, ):
return nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channel, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU() if is_relu else nn.LeakyReLU(),
) if not need_dropout else nn.Sequential(
nn.ConvTranspose2d(in_channels, out_channel, 4, 2, 1, bias=False),
nn.BatchNorm2d(out_channel),
nn.ReLU() if is_relu else nn.LeakyReLU(),
nn.Dropout(0.5),
)
def __init__(self, in_channels=3, features=64):
super().__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels, features, kernel_size=4,
stride=2, padding=1, padding_mode="reflect"),
nn.LeakyReLU(0.2),
)
self.layer2 = self.encoder(
in_channels=features, out_channel=features * 2)
self.layer3 = self.encoder(features * 2, features * 4)
self.layer4 = self.encoder(features * 4, features * 8)
self.layer5 = self.encoder(features * 8, features * 8)
self.layer6 = self.encoder(features * 8, features * 8)
self.layer7 = self.encoder(features * 8, features * 8)
# self.latent = self.encoder(
# features * 8, features * 8, need_batch_norm=False)
self.latent = nn.Sequential(
nn.Conv2d(features * 8, features * 8, kernel_size=4,
stride=2, padding=1),
nn.ReLU(),
)
self.layer8 = self.decoder(features * 8, features * 8, is_relu=True)
self.layer9 = self.decoder(
features * 8 * 2, features * 8, is_relu=True)
self.layer10 = self.decoder(
features * 8 * 2, features * 8, is_relu=True)
self.layer11 = self.decoder(
features * 8 * 2, features * 8, is_relu=True, need_dropout=False)
self.layer12 = self.decoder(
features * 8 * 2, features * 4, is_relu=True, need_dropout=False)
self.layer13 = self.decoder(
features * 4 * 2, features * 2, is_relu=True, need_dropout=False)
self.layer14 = self.decoder(
features * 2 * 2, features, is_relu=True, need_dropout=False)
self.layer15 = nn.Sequential(
nn.ConvTranspose2d(features*2, in_channels, 4, 2, 1, bias=False),
nn.Tanh(),
)
def forward(self, x):
layer1 = self.layer1(x)
layer2 = self.layer2(layer1)
layer3 = self.layer3(layer2)
layer4 = self.layer4(layer3)
layer5 = self.layer5(layer4)
layer6 = self.layer6(layer5)
layer7 = self.layer7(layer6)
latent = self.latent(layer7)
layer8 = self.layer8(latent)
layer9 = self.layer9(torch.cat([layer8, layer7], 1))
layer10 = self.layer10(torch.cat([layer9, layer6], 1))
layer11 = self.layer11(torch.cat([layer10, layer5], 1))
layer12 = self.layer12(torch.cat([layer11, layer4], 1))
layer13 = self.layer13(torch.cat([layer12, layer3], 1))
layer14 = self.layer14(torch.cat([layer13, layer2], 1))
return self.layer15(torch.cat([layer14, layer1], 1))
# global class for constants and hyperparameters
class config:
DEVICE = "cuda:0" if torch.cuda.is_available() else "cpu"
TRAIN_DIR = "data/daynight/train"
VAL_DIR = "data/daynight/val"
LEARNING_RATE = 0.0002
BATCH_SIZE = 16
NUM_WORKERS = 2
LAMBDA = 100
NUM_EPOCHS = 50
LOAD_MODEL = False
SAVE_MODEL = True
FLIP_TRAIN = False
CHECKPOINT_DISC = "disc.pth.tar"
CHECKPOINT_GEN = "gen.pth.tar"
MODEL_DEFAULT = 'maps'
MODEL_ANIME = 'anime'
MODEL_DAYNIGHT = 'daynight'
MODE = 'train'
class DataTransformation:
resize = albumentations.Compose(
[albumentations.Resize(width=256, height=256), ], additional_targets={"image0": "image"},
)
transform = albumentations.Compose(
[
albumentations.HorizontalFlip(p=0.5),
albumentations.ColorJitter(p=0.2),
albumentations.Normalize(mean=[0.5, 0.5, 0.5], std=[
0.5, 0.5, 0.5], max_pixel_value=255.0,),
ToTensorV2(),
]
)
tranform_mask = albumentations.Compose(
[
albumentations.Normalize(mean=[0.5, 0.5, 0.5], std=[
0.5, 0.5, 0.5], max_pixel_value=255.0,),
ToTensorV2(),
]
)
def save_checkpoint(model, optimizer, filename="my_checkpoint.pth.tar"):
print("=> Saving checkpoint")
torch.save({
"state_dict": model.state_dict(),
"optimizer": optimizer.state_dict(),
}, filename)
def load_checkpoint(checkpoint_file, model, optimizer, lr):
print("=> Loading checkpoint")
checkpoint = torch.load(checkpoint_file, map_location=config.DEVICE)
model.load_state_dict(checkpoint["state_dict"])
optimizer.load_state_dict(checkpoint["optimizer"])
for param_group in optimizer.param_groups:
param_group["lr"] = lr
'''
This class extends the pytorch Dataset class
'''
class SplitData(Dataset):
def __init__(self, root_dir) -> None:
self.root_dir = root_dir
self.list_files = os.listdir(self.root_dir)
def __len__(self) -> None:
return len(self.list_files)
def __getitem__(self, index) -> None:
img_file = self.list_files[index]
img_path = os.path.join(self.root_dir, img_file)
image = np.array(Image.open(img_path))
# get the image shape
image_dim = int(image.shape[1]/2)
# print('image shape: ', image_dim)
flip = config.FLIP_TRAIN
# print('flip: ', flip)
if flip:
target_image = image[:, :image_dim, :]
input_image = image[:, image_dim:, :]
else:
input_image = image[:, :image_dim, :]
target_image = image[:, image_dim:, :]
augmentations = DataTransformation.resize(
image=input_image, image0=target_image)
input_image = augmentations["image"]
target_image = augmentations["image0"]
input_image = DataTransformation.transform(image=input_image)["image"]
target_image = DataTransformation.tranform_mask(image=target_image)[
"image"]
return input_image, target_image
def get_l1_loss(weights) -> torch.Tensor:
return torch.abs(weights).sum()
def get_l2_loss(weights) -> torch.Tensor:
return torch.square(weights).sum()
def train_fn(
disc, gen, loader, opt_disc, opt_gen, l1_loss, bce, gen_scaler, disc_scaler,
) -> None:
loop = tqdm(loader, leave=True)
for idx, (x, y) in enumerate(loop):
x = x.to(config.DEVICE)
y = y.to(config.DEVICE)
# Train Discriminator
with torch.cuda.amp.autocast():
y_fake = gen(x)
Disc_real = disc(x, y)
Disc_real_loss = bce(Disc_real, torch.ones_like(Disc_real))
Disc_fake = disc(x, y_fake.detach())
Disc_fake_loss = bce(Disc_fake, torch.zeros_like(Disc_fake))
Disc_loss = (Disc_real_loss + Disc_fake_loss) / 2
disc.zero_grad()
disc_scaler.scale(Disc_loss).backward()
disc_scaler.step(opt_disc)
disc_scaler.update()
# Train generator
with torch.cuda.amp.autocast():
Disc_fake = disc(x, y_fake)
Gen_fake_loss = bce(Disc_fake, torch.ones_like(Disc_fake))
l1 = l1_loss(y_fake, y) * config.LAMBDA
params = []
for param in disc.parameters():
params.append(param.view(-1))
# l1 = config.LAMBDA * get_l1_loss(torch.cat(params))
l2 = config.LAMBDA * get_l2_loss(torch.cat(params))
Gen_loss = Gen_fake_loss + l1 + l2
opt_gen.zero_grad()
gen_scaler.scale(Gen_loss).backward()
gen_scaler.step(opt_gen)
gen_scaler.update()
if idx % 10 == 0:
loop.set_postfix(
Disc_real=torch.sigmoid(Disc_real).mean().item(),
Disc_fake=torch.sigmoid(Disc_fake).mean().item(),
)
# helper functions
def _getTrainDirectoryPath(modelname):
return 'data/'+modelname+'/train' if modelname != None or modelname != '' else 'data/maps/train'
def _getValDirectoryPath(modelname):
return 'data/'+modelname+'/val' if modelname != None or modelname != '' else 'data/maps/val'
def _getDiscCheckpointPath(modelname):
return modelname+'_'+config.CHECKPOINT_DISC if modelname != None or modelname != '' else 'maps_'+config.CHECKPOINT_DISC
def _getGenCheckpointPath(modelname):
return modelname+'_'+config.CHECKPOINT_GEN if modelname != None or modelname != '' else 'maps_'+config.CHECKPOINT_GEN
def main(args) -> None:
# get data from the command line arguments
config.LOAD_MODEL = True if str(args.loadmodel).lower() == 'true' else False
config.FLIP_TRAIN = True if str(args.flip).lower() == 'true' else False
config.NUM_EPOCHS = int(
args.epochs) if args.epochs != None else config.NUM_EPOCHS
config.MODE = args.mode if args.mode != None else config.MODE
disc = Discriminator(in_channels=3).to(config.DEVICE)
gen = Generator(in_channels=3, features=64).to(config.DEVICE)
opt_disc = optim.Adam(
disc.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999),)
opt_gen = optim.Adam(
gen.parameters(), lr=config.LEARNING_RATE, betas=(0.5, 0.999))
BCE = nn.BCEWithLogitsLoss()
L1_LOSS = nn.L1Loss()
print('saved gen checkpoint path: ', _getGenCheckpointPath(args.modelname))
print('saved disc checkpoint path: ',
_getDiscCheckpointPath(args.modelname))
print('Load model value: ', config.LOAD_MODEL, type(config.LOAD_MODEL))
if config.LOAD_MODEL:
load_checkpoint(
_getGenCheckpointPath(
args.modelname), gen, opt_gen, config.LEARNING_RATE,
)
load_checkpoint(
_getDiscCheckpointPath(
args.modelname), disc, opt_disc, config.LEARNING_RATE,
)
train_dataset = SplitData(root_dir=_getTrainDirectoryPath(args.modelname))
train_loader = DataLoader(
train_dataset,
batch_size=config.BATCH_SIZE,
shuffle=True,
num_workers=config.NUM_WORKERS,
)
gen_scaler = torch.cuda.amp.GradScaler()
disc_scaler = torch.cuda.amp.GradScaler()
val_dataset = SplitData(root_dir=_getValDirectoryPath(args.modelname))
val_loader = DataLoader(val_dataset, batch_size=1, shuffle=False)
val_itr = iter(val_loader)
for epoch in range(1, config.NUM_EPOCHS+1):
if(config.MODE == 'train'):
print('Epoch: {}/{}'.format(epoch, config.NUM_EPOCHS))
train_fn(
disc, gen, train_loader, opt_disc, opt_gen, L1_LOSS, BCE, gen_scaler, disc_scaler,
)
if config.SAVE_MODEL and epoch % 5 == 0:
save_checkpoint(
gen, opt_gen, filename=_getGenCheckpointPath(args.modelname))
save_checkpoint(
disc, opt_disc, filename=_getDiscCheckpointPath(args.modelname))
try:
x, y = next(val_itr)
# get_test_samples(gen, x, y, epoch, folder="evaluation")
x, y = x.to(config.DEVICE), y.to(config.DEVICE)
folder = "evaluation"
gen.eval()
with torch.no_grad():
y_fake = gen(x)
y_fake = y_fake * 0.5 + 0.5
save_image(y_fake, folder + f"/y_gen_{epoch}.png")
save_image(x * 0.5 + 0.5, folder + f"/input_{epoch}.png")
save_image(y * 0.5 + 0.5, folder + f"/label_{epoch}.png")
gen.train()
except:
pass
if __name__ == "__main__":
# setting up the argument parser to parse the command line arguments
argparser = argparse.ArgumentParser()
argparser.add_argument("--flip", default='false',
help="learn the left side of the image")
argparser.add_argument(
"--modelname", default=config.MODEL_DEFAULT, help="which model to load")
argparser.add_argument("--mode", default='test',
help='start in train or test mode')
argparser.add_argument("--epochs", default=50,
help="number of epochs to train")
argparser.add_argument("--loadmodel", default='false', help='load model or not')
args = argparser.parse_args()
# printing the passed args to debug
print(args)
# run the main function with all the passed command line arguments
main(args)
| ishon19/CSE676-FinalProject | Pix2Pix.py | Pix2Pix.py | py | 14,819 | python | en | code | 1 | github-code | 36 |
17849611547 | from ..config import np, Vector, DataName, MetaboliteConfig, ParameterName, LegendConfig
from ..metabolic_network_contents.metabolite import Metabolite
from ..metabolic_network_contents.reaction import Reaction
metabolite_width = MetaboliteConfig.width
class NormalLegendConfig(object):
metabolite_content_dict = {
'G6P': Metabolite('G6P'),
'LAC': Metabolite('LAC').set_mid_data_state(True),
'MAL': Metabolite('MAL').set_mid_data_state(True).set_mixed_mid_data_state(True),
'GLU': Metabolite('GLU').set_biomass_flux_state(True),
'GLY': Metabolite('GLY').set_input_state(True),
'GLC': Metabolite('GLC').set_input_state(True).set_c13_labeling_state(True),
}
reaction_content_dict = {
'fluxes': (Reaction('unidirectional'), Reaction('bidirectional', reversible=True)),
'boundary_flux': Reaction('boundary_flux').set_boundary_flux(True),
}
text_content_dict = {
'G6P': 'Normal metabolites',
'LAC': 'Metabolites with mass spec data',
'MAL': 'Metabolites with mixed mass spec\ndata (mitochondria and cytosol)',
'GLU': 'Metabolites with biomass flux',
'GLY': 'Input or output metabolites\nwith fixed MID',
'GLC': 'Input metabolites with $\mathregular{^{13}}$C\nlabelled',
'fluxes': 'Normal fluxes (unidirectional\nor reversible)',
'boundary_flux': 'Boundary fluxes with fixed value',
}
class SmallerSizeLegendConfig(object):
metabolite_content_dict = {
'GLN': Metabolite('GLN').set_input_state(True),
'OAC': Metabolite('OAC'),
'MAL': Metabolite('MAL').set_data_sensitivity_state(DataName.raw_model_raw_data),
'3PG': Metabolite('3PG').set_data_sensitivity_state(DataName.medium_data),
'GLC': Metabolite('GLC').set_data_sensitivity_state(DataName.few_data),
}
reaction_content_dict = {}
text_content_dict = {
'GLN': 'Input or output metabolites\nwith fixed MID',
'OAC': 'With MID data in all data set',
'MAL': 'With MID data in all + experimental\ndata set',
'3PG': 'With MID data in all + experimental\n+ medium data set',
'GLC': 'With MID data in all + experimental\n+ medium + small data set',
}
class RemovePathwayLegendConfig(object):
metabolite_content_dict = {
'LAC': Metabolite('LAC').set_mid_data_state(True),
'R5P': Metabolite('R5P').set_data_sensitivity_state(DataName.data_without_ppp),
'MAL': Metabolite('MAL').set_data_sensitivity_state(DataName.data_without_tca),
'GLU': Metabolite('GLU').set_data_sensitivity_state(DataName.data_without_aa),
# 'CIT': Metabolite('CIT').set_data_sensitivity_state(DataName.medium_data_without_combination),
}
reaction_content_dict = {}
text_content_dict = {
'LAC': 'Experimental data set',
'R5P': 'Removed MID data of PPP metabolites',
'MAL': 'Removed MID data of TCA metabolites',
'GLU': 'Removed MID data of AA metabolites',
# 'CIT': 'Added compartmental MID',
}
class ConstantFluxLegendConfig(object):
metabolite_content_dict = {}
reaction_content_dict = {
'fluxes': (Reaction('unidirectional'), Reaction('bidirectional', reversible=True)),
'boundary_fluxes': Reaction('boundary_flux').set_boundary_flux(True),
}
text_content_dict = {
'fluxes': 'Normal fluxes (unidirectional\nor reversible)',
'boundary_fluxes': 'Preset fixed boundary fluxes',
}
def legend_layout_generator(mode=ParameterName.normal):
if mode == ParameterName.normal or mode == ParameterName.horizontal:
legend_config = NormalLegendConfig
elif mode == DataName.smaller_data_size:
legend_config = SmallerSizeLegendConfig
elif mode == DataName.data_without_pathway:
legend_config = RemovePathwayLegendConfig
elif mode == DataName.different_constant_flux:
legend_config = ConstantFluxLegendConfig
else:
raise ValueError()
metabolite_content_dict, reaction_content_dict, text_content_dict = \
legend_config.metabolite_content_dict, legend_config.reaction_content_dict, legend_config.text_content_dict
total_item_list = []
for metabolite_key, metabolite_content in metabolite_content_dict.items():
text_content = text_content_dict[metabolite_key]
total_item_list.append((ParameterName.metabolite, metabolite_key, metabolite_content, text_content))
for reaction_key, reaction_content in reaction_content_dict.items():
text_content = text_content_dict[reaction_key]
total_item_list.append((ParameterName.reaction, reaction_key, reaction_content, text_content))
total_item_num = len(total_item_list)
each_row_height = LegendConfig.legend_each_row_height
if mode == ParameterName.horizontal:
total_row_num = 2
total_col_num = np.ceil(total_item_num / 2)
total_width = LegendConfig.legend_horizontal_width
else:
total_row_num = total_item_num
total_col_num = 1
total_width = LegendConfig.legend_width
layout_index_list = [
(item_index % total_row_num, item_index // total_row_num) for item_index in range(total_item_num)]
total_height = total_row_num * each_row_height
each_col_width = total_width / total_col_num
# patch_center_x_axis = 0.15 * total_width
# text_left_x_axis = 0.3 * total_width
# text_width = total_width - text_left_x_axis
multiple_reaction_up_down_distance = 0.005
flux_width = metabolite_width
base_patch_center_x_axis = 0.15 * each_col_width
base_text_left_x_axis = 0.3 * each_col_width
text_width = each_col_width - base_text_left_x_axis
patch_raw_obj_dict = {}
text_param_dict = {}
for (row_index, col_index), (item_type, item_key, item_content, text_content) \
in zip(layout_index_list, total_item_list):
patch_center_x_axis = col_index * each_col_width + base_patch_center_x_axis
text_left_x_axis = col_index * each_col_width + base_text_left_x_axis
flux_left_x_value, flux_right_x_value = (
patch_center_x_axis - flux_width / 2, patch_center_x_axis + flux_width / 2)
irreversible_flux_right_x_value = 0.03 * flux_left_x_value + 0.97 * flux_right_x_value
current_row_center_y_value = (total_row_num - row_index - 0.5) * each_row_height
text_param_dict[item_key] = {
ParameterName.center: Vector(text_left_x_axis + text_width / 2, current_row_center_y_value),
ParameterName.string: text_content,
ParameterName.width: text_width,
ParameterName.height: each_row_height,
}
if item_type == ParameterName.metabolite:
item_content.set_center(Vector(patch_center_x_axis, current_row_center_y_value))
patch_raw_obj_dict[item_key] = item_content
elif item_type == ParameterName.reaction:
if isinstance(item_content, tuple):
reaction_num = len(item_content)
reaction_subrow_height = (each_row_height - 2 * multiple_reaction_up_down_distance) / reaction_num
for reaction_subindex, reaction_obj in enumerate(item_content):
current_subrow_y_value = (
current_row_center_y_value + each_row_height / 2 - multiple_reaction_up_down_distance -
(reaction_subindex + 0.5) * reaction_subrow_height)
if reaction_obj.reversible:
current_flux_right_x_value = flux_right_x_value
else:
current_flux_right_x_value = irreversible_flux_right_x_value
reaction_obj.extend_reaction_start_end_list([
(
ParameterName.normal,
Vector(current_flux_right_x_value, current_subrow_y_value),
Vector(flux_left_x_value, current_subrow_y_value),
{}
)
])
patch_raw_obj_dict[reaction_obj.reaction_name] = reaction_obj
elif isinstance(item_content, Reaction):
if item_content.reversible:
current_flux_right_x_value = flux_right_x_value
else:
current_flux_right_x_value = irreversible_flux_right_x_value
item_content.extend_reaction_start_end_list([
(
ParameterName.normal,
Vector(current_flux_right_x_value, current_row_center_y_value),
Vector(flux_left_x_value, current_row_center_y_value),
{}
)
])
patch_raw_obj_dict[item_key] = item_content
else:
raise ValueError()
row_index += 1
# row_index = 0
# for reaction_key, reaction_content in reaction_content_dict.items():
# current_row_center_y_value = (total_row_num - row_index - 0.5) * each_row_height
# text_content = text_content_dict[reaction_key]
# text_param_dict[reaction_key] = {
# ParameterName.center: Vector(text_left_x_axis + text_width / 2, current_row_center_y_value),
# ParameterName.string: text_content,
# ParameterName.width: text_width,
# ParameterName.height: each_row_height,
# }
# if isinstance(reaction_content, tuple):
# reaction_num = len(reaction_content)
# reaction_subrow_height = (each_row_height - 2 * multiple_reaction_up_down_distance) / reaction_num
# for reaction_subindex, reaction_obj in enumerate(reaction_content):
# current_subrow_y_value = (
# current_row_center_y_value + each_row_height / 2 - multiple_reaction_up_down_distance -
# (reaction_subindex + 0.5) * reaction_subrow_height)
# if reaction_obj.reversible:
# current_flux_right_x_value = flux_right_x_value
# else:
# current_flux_right_x_value = irreversible_flux_right_x_value
# reaction_obj.extend_reaction_start_end_list([
# (
# ParameterName.normal,
# Vector(current_flux_right_x_value, current_subrow_y_value),
# Vector(flux_left_x_value, current_subrow_y_value),
# {}
# )
# ])
# patch_raw_obj_dict[reaction_obj.reaction_name] = reaction_obj
# elif isinstance(reaction_content, Reaction):
# if reaction_content.reversible:
# current_flux_right_x_value = flux_right_x_value
# else:
# current_flux_right_x_value = irreversible_flux_right_x_value
# reaction_content.extend_reaction_start_end_list([
# (
# ParameterName.normal,
# Vector(current_flux_right_x_value, current_row_center_y_value),
# Vector(flux_left_x_value, current_row_center_y_value),
# {}
# )
# ])
# patch_raw_obj_dict[reaction_key] = reaction_content
# else:
# raise ValueError()
# row_index += 1
return patch_raw_obj_dict, text_param_dict, total_width, total_height
| LocasaleLab/Automated-MFA-2023 | figures/figure_plotting/figure_elements/metabolic_network/layout_generator_functions/legend_layout_generator.py | legend_layout_generator.py | py | 11,543 | python | en | code | 0 | github-code | 36 |
30326237829 | import sys
import time
from threading import Thread
class ProgressThread(Thread):
def __init__(self):
super(ProgressThread, self).__init__()
self.is_stop = False
self.cursor_index = 0
self.cursor_str = '|/-\\'
self.now = None
self.info = ""
def set_progress_info(self, info):
self.info = info
def get_progress_text(self):
cursor = self.cursor_str[self.cursor_index]
self.cursor_index = self.cursor_index + 1
if self.cursor_index == len(self.cursor_str):
self.cursor_index = 0
time_second = str(int(time.time() - self.now))
progress_text = self.info + " " + time_second + "s " + cursor
return progress_text
def stop_progress(self):
self.is_stop = True
time.sleep(0.6)
def run(self):
self.now = time.time()
while True:
if not self.is_stop:
progress_text = self.get_progress_text()
sys.stdout.write(progress_text)
sys.stdout.flush()
time.sleep(0.4)
sys.stdout.write('\r')
else:
return
class Progress:
def __init__(self):
self.current_thread = None
pass
def start_progress(self, progress_info):
if self.current_thread is not None:
self.current_thread.stop_progress()
self.current_thread = ProgressThread()
self.current_thread.daemon = True
self.current_thread.set_progress_info(progress_info)
self.current_thread.start()
def stop_progress(self):
if self.current_thread is not None:
self.current_thread.stop_progress()
self.current_thread = None
def show_progress(self, info):
self.current_thread.set_progress_info(info)
if __name__ == '__main__':
progress = Progress()
progress.start_progress("开始上传文件")
for i in range(10):
time.sleep(0.5)
progress.show_progress("文件上传中")
progress.stop_progress()
| Whale-lyi/simple-predict | progress.py | progress.py | py | 2,068 | python | en | code | 0 | github-code | 36 |
24938213556 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# filename: const.py
# modified: 2019-03-30
"""
常数表
"""
__all__ = [
"PROJECT_DIR",
"PACKAGE_DIR",
"CACHE_DIR",
"CONFIG_DIR",
"STATIC_DIR",
"LOG_DIR",
"INPUT_DIR",
"OUTPUT_DIR",
"OUTPUT_SRC_DIR",
"STYLE_CSS",
"CLIENT_DEFAULT_TIMEOUT",
"CLIENT_USER_AGENT",
"TIETUKU_TOKEN",
"TIETUKU_AID",
"TIETUKU_CACHE_EXPIRED",
"TIETUKU_LINKS_CACHE_JSON",
"ELIMAGE_LINKS_CACHE_JSON",
"SMMS_LINKS_CACHE_JSON",
]
import os
def __mkdir(path):
if not os.path.exists(path):
os.mkdir(path)
__Base_dir = os.path.dirname(__file__)
__absP = lambda *path: os.path.abspath(os.path.join(__Base_dir, *path))
PROJECT_DIR = __absP("../../")
PACKAGE_DIR = __absP("../")
CACHE_DIR = __absP("../cache/")
CONFIG_DIR = __absP("../config/")
STATIC_DIR = __absP("../static/")
LOG_DIR = __absP("../../logs/")
INPUT_DIR = __absP("../../input/")
OUTPUT_DIR = __absP("../../output/")
OUTPUT_SRC_DIR = __absP("../../output/src/")
STYLE_CSS = __absP(CONFIG_DIR, "style.css")
__mkdir(LOG_DIR)
__mkdir(CACHE_DIR)
__mkdir(INPUT_DIR)
__mkdir(OUTPUT_DIR)
__mkdir(OUTPUT_SRC_DIR)
CLIENT_DEFAULT_TIMEOUT = 15
CLIENT_USER_AGENT = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/72.0.3626.122 Safari/537.36"
TIETUKU_TOKEN = "305b1ec69ae2dfd54076a3f648931b9ac51a414b:EkuHTpQkLlK07Ocf69_VxR3anu8=:eyJkZWFkbGluZSI6MTUzMzI4ODMyNCwiYWN0aW9uIjoiZ2V0IiwidWlkIjoiNjU2NTU0IiwiYWlkIjoiMTQ3ODU1NSIsImZyb20iOiJmaWxlIn0="
TIETUKU_AID = 1478555
TIETUKU_CACHE_EXPIRED = 12*60*60 # 12 h
TIETUKU_LINKS_CACHE_JSON = "tietuku.links.json"
ELIMAGE_LINKS_CACHE_JSON = "elimage.links.json"
SMMS_LINKS_CACHE_JSON = "sm.ms.links.json"
| pkuyouth/pkuyouth-html-coder | htmlcoder/core/const.py | const.py | py | 1,832 | python | en | code | 5 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.