seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
9927108927 |
# coding: utf-8
# In[89]:
import tarfile
import xml.etree.ElementTree as ET
import tqdm
import codecs
# In[87]:
members = []
tar = tarfile.open("unlabeled.tar.gz", "r:gz")
outfile = codecs.open("unlabeled.txt", 'w', 'utf-8')
for member in tar:
f = tar.extractfile(member)
if f is None:
continue
else:
print(member.name)
root = ET.fromstring(f.read())
result = ''
for para in root.iter('p'):
result = ''
if para.text is not None:
result += para.text
for child in para:
if child is not None:
if child.text is not None:
result += child.text
if child.tail is not None:
result += child.tail
outfile.write(result + "\n")
outfile.flush()
outfile.close()
tar.close()
| peteykun/NLU-Assignment3 | tar2txt.py | tar2txt.py | py | 874 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tarfile.open",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "xml.etree.ElementTree.fromstring",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "xml.etree.E... |
26728719706 | import math
import os
import random
import re
import warnings
from typing import Dict, List, Tuple, Union
import cv2
import numpy as np
import torch
import torch.distributed as dist
import torch.multiprocessing as mp
import torch.optim as optim
from torch import nn
from torch.nn.parallel import DistributedDataParallel as DDP
from torch.utils.data import DataLoader
from torch.utils.data.distributed import DistributedSampler
from tqdm.auto import tqdm
from .data import LDRDataset
from .models import EncoderDecoderModel, ImageSpaceLoss
from .tools import helper_functions
warnings.simplefilter("ignore")
TORCH_MAJOR = int(torch.__version__.split(".")[0])
TORCH_MINOR = int(torch.__version__.split(".")[1])
if TORCH_MAJOR == 1 and TORCH_MINOR < 8:
from torch._six import container_abcs, int_classes, string_classes
else:
import collections.abc as container_abcs
int_classes = int
string_classes = str
def setup(rank, world_size):
os.environ["MASTER_ADDR"] = "localhost"
os.environ["MASTER_PORT"] = "12355"
# initialize the process group
dist.init_process_group("gloo", rank=rank, world_size=world_size)
def cleanup():
dist.destroy_process_group()
def seed_worker(worker_id):
worker_seed = torch.initial_seed() % 2**32
np.random.seed(worker_seed)
random.seed(worker_seed)
class UnsupervisedHDRModel:
def __init__(
self,
video_path: str,
checkpoint_path: str = None,
encoder: str = "SimpleEncoder",
decoder: str = "SimpleDecoder",
encoder_pretrained: bool = None,
encoder_lr: float = 1e-4,
decoder_lr: float = 1e-4,
num_worker: int = 4,
device_ids: Union[str, int, List[int]] = None,
output_dir: str = "./",
seed: int = 0,
) -> None:
self.video_path = video_path
self.encoder_lr = encoder_lr
self.decoder_lr = decoder_lr
self.num_worker = num_worker
self.output_dir = output_dir
self.seed = seed
self.max_epoch = None
self.iterator = 0
self.epoch = 0
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
g = torch.Generator()
g.manual_seed(seed)
if device_ids is None:
self.device_ids = list(range(torch.cuda.device_count()))
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
else:
if device_ids == "cpu":
self.device_ids = list()
self.device = torch.device("cpu")
else:
self.device_ids = helper_functions.input2list(device_ids)
self.device = torch.device(f"cuda:{self.device_ids[0]}")
self.build_model(encoder, decoder, encoder_pretrained)
self.configure_optimizers()
self.train_dataloader = None
self.predict_dataloader = None
self.initialize_logger()
if checkpoint_path:
self.load_checkpoint(checkpoint_path)
def build_model(
self, encoder: str, decoder: str, encoder_pretrained: bool = False
) -> None:
self.model = EncoderDecoderModel(encoder, decoder, encoder_pretrained)
self.mse_loss = nn.MSELoss()
self.image_space_loss = ImageSpaceLoss()
def build_dataset(
self,
rank: int,
world_size: int,
train: bool,
batch_size: int = 1,
image_size: Tuple[int, int] = (512, 512),
) -> None:
# TODO: dataloader
dataset = LDRDataset(self.video_path, train=train, image_size=image_size)
dataloader = DataLoader(
dataset,
batch_size=batch_size,
shuffle=(rank == -1 and train),
sampler=None
if rank == -1
else DistributedSampler(dataset, rank=rank, num_replicas=world_size),
num_workers=self.num_worker,
worker_init_fn=seed_worker,
pin_memory=True,
drop_last=train,
)
return dataloader
def initialize_logger(self) -> None:
self.train_results = {}
self.lr = {}
if not os.path.exists(self.output_dir):
os.makedirs(self.output_dir)
if not os.path.exists(os.path.join(self.output_dir, "checkpoints")):
os.makedirs(os.path.join(self.output_dir, "checkpoints"))
# TODO: save hyper parameters
self.train_results["best_loss"] = float("inf")
self.result_info = ""
def forward(self, image: torch.Tensor) -> Dict[str, torch.Tensor]:
"""Forward pass. Returns logits."""
outputs = {}
outputs["pred_delta"] = self.model(image)
outputs["pred_Ib"] = image * outputs["pred_delta"]
return outputs
def loss(
self, outputs: Dict[str, torch.Tensor], batch: Dict[str, torch.Tensor]
) -> Dict[str, torch.Tensor]:
losses = {}
losses["loss_delta"] = self.mse_loss(
(batch["Ib"] + 1e-6) / (batch["Ih"] + 1e-6), outputs["pred_delta"]
)
losses["loss_image"] = self.image_space_loss(batch["Ib"], outputs["pred_Ib"])
losses["loss"] = losses["loss_delta"] + losses["loss_image"]
return losses
def training_step(
self, batch: Dict[str, torch.Tensor], batch_idx: int = None
) -> Dict[str, torch.Tensor]:
step_output = {}
outputs = self.forward(batch["Ih"])
train_loss = self.loss(outputs, batch)
step_output.update(train_loss)
train_loss["loss"].backward()
self.optimizer.step()
return step_output
def training_one_epoch(self, rank=-1, device=None) -> Dict[str, float]:
self.model.train()
losses = {}
with tqdm(
self.train_dataloader,
position=0,
leave=True,
ascii=" ##",
dynamic_ncols=True,
disable=rank > 0,
) as t:
for batch_idx, batch_data in enumerate(t):
batch_data = self.cuda(batch_data, device=device if rank < 0 else rank)
step_output = self.training_step(batch_data)
t.set_description("Epoch %i Training" % self.epoch)
print_losses = {}
for key in step_output:
print_losses[key] = step_output[key].item()
if key in losses.keys():
losses[key].append(step_output[key].item())
else:
losses[key] = [step_output[key].item()]
t.set_postfix(ordered_dict=dict(**print_losses))
self.iterator += 1
if rank <= 0:
for key in losses:
losses[key] = sum(losses[key]) / len(losses[key])
if len(losses.values()) > 1 and not ("loss" in losses.keys()):
losses["loss"] = sum(losses.values())
for key in losses:
self.train_results[key] = losses[key]
if self.train_results["best_loss"] >= self.train_results["loss"]:
self.train_results["best_loss"] = self.train_results["loss"]
self.save_checkpoint(metric="loss")
if self.epoch != 0:
self.save_checkpoint()
self.lr = {}
self.lr["lr"] = [group["lr"] for group in self.optimizer.param_groups][0]
self.result_info = ""
for result_key, result_value in zip(
self.train_results.keys(), self.train_results.values()
):
self.result_info = (
self.result_info
+ result_key
+ ":"
+ str(round(result_value, 4))
+ " "
)
for lr_key, lr_value in zip(self.lr.keys(), self.lr.values()):
self.result_info = (
self.result_info + lr_key + ":" + str(round(lr_value, 4)) + " "
)
print("Epoch %i" % self.epoch, self.result_info)
self.epoch += 1
return losses
def fit_single(
self, rank: int, max_epoch: int, world_size: int, batch_size: int = 1
) -> None:
# TODO: early stopping
self.train_dataloader = self.build_dataset(
rank=rank,
world_size=world_size,
train=True,
batch_size=batch_size,
)
use_ddp = world_size > 1
if use_ddp:
setup(rank, world_size)
self.model = self.model.to(rank)
self.model = DDP(self.model, device_ids=[rank])
else:
self.model = self.model.to(self.device)
self.max_epoch = max_epoch
for _ in range(max_epoch):
self.training_one_epoch(rank=rank, device=self.device)
if use_ddp:
cleanup()
def fit(self, max_epoch: int, batch_size: int = 1) -> None:
if len(self.device_ids) > 1:
mp.spawn(
self.fit_single,
args=(max_epoch, len(self.device_ids), batch_size),
nprocs=len(self.device_ids),
join=True,
)
else:
self.fit_single(-1, max_epoch, len(self.device_ids), batch_size)
def predict_step(
self, batch: Dict[str, torch.Tensor], batch_idx: int = None
) -> Dict[str, np.ndarray]:
with torch.no_grad():
delta = self.model(batch["Ib"]) # (bs, 3, h, w)
Il_2 = batch["Ib"] * delta
delta_2l = self.model(Il_2)
Il_4 = Il_2 * delta_2l
Ih_2 = batch["Ib"] / delta
delta_2h = self.model(Ih_2)
Ih_4 = Ih_2 / delta_2h
exposure_list = [
Il_4,
Il_2,
batch["Ib"],
Ih_2,
Ih_4,
]
exposure_list = np.stack(
[
(img.clone().detach().cpu().numpy().clip(0, 1) * 255).astype(np.uint8)
for img in exposure_list
]
).transpose(
1, 0, 3, 4, 2
) # (bs, 5, h, w, 3)
merge_mertens = cv2.createMergeMertens()
hdr_image = [merge_mertens.process(img_set) for img_set in exposure_list]
output = {"exposure_list": exposure_list, "hdr_image": hdr_image}
return output # BGR
def predict(
self,
frame_idx: Union[int, List[int]] = None,
batch_size: int = 1,
image_size=None,
) -> Dict[str, np.ndarray]:
self.predict_dataloader = self.build_dataset(
rank=-1,
world_size=1,
train=False,
batch_size=batch_size,
image_size=image_size,
)
best_checkpoint = os.path.join(self.output_dir, "checkpoints", "best_loss.pth")
print(f"Start loading best checkpoint from {best_checkpoint}")
self.load_checkpoint(best_checkpoint)
self.cuda(self.model, device=self.device)
self.model.eval()
print("Finish loading!")
output = {"exposure_list": [], "hdr_image": []}
if frame_idx is None:
for batch in tqdm(self.predict_dataloader):
batch = self.cuda(batch, self.device)
output_batch = self.predict_step(batch)
output["exposure_list"].append(output_batch["exposure_list"])
output["hdr_image"].append(output_batch["hdr_image"])
else:
frame_idx = helper_functions.input2list(frame_idx)
for i in tqdm(
range(math.ceil(len(frame_idx) / self.predict_dataloader.batch_size))
):
batch = []
for f in frame_idx[
i
* self.predict_dataloader.batch_size : (i + 1)
* self.predict_dataloader.batch_size
]:
batch.append(self.predict_dataloader.dataset[f])
batch = self.cuda(batch, self.device)
if len(batch) == 1:
batch = {k: b.unsqueeze(0) for k, b in batch[0].items()}
else:
batch = helper_functions.concat_data(batch)
output_batch = self.predict_step(batch)
output["exposure_list"].append(output_batch["exposure_list"])
output["hdr_image"].append(output_batch["hdr_image"])
output["exposure_list"] = np.concatenate(output["exposure_list"])
output["hdr_image"] = np.concatenate(output["hdr_image"])
return output
def load_checkpoint(self, checkpoint_path):
self.model.load_state_dict(
torch.load(
checkpoint_path,
map_location=torch.device("cpu"),
)["model_state_dict"]
)
def save_checkpoint(self, metric=None):
checkpoint = {}
if metric is None:
file_path = os.path.join(self.output_dir, "checkpoints", "last.pth")
else:
file_path = os.path.join(
self.output_dir, "checkpoints", "best_" + metric + ".pth"
)
checkpoint = {
"best_epoch": self.epoch,
"best_" + metric: self.train_results["best_" + metric],
}
checkpoint["model_state_dict"] = (
self.model.module.state_dict()
if isinstance(self.model, DDP)
else self.model.state_dict()
)
torch.save(checkpoint, file_path)
def configure_optimizers(self) -> optim.Optimizer:
self.optimizer = optim.Adam(
[
{"params": self.model.encoder.parameters(), "lr": self.encoder_lr},
{"params": self.model.decoder.parameters(), "lr": self.decoder_lr},
],
lr=self.decoder_lr,
weight_decay=0.0001,
)
def cuda(self, x, device=None):
np_str_obj_array_pattern = re.compile(r"[SaUO]")
if torch.cuda.is_available():
if isinstance(x, torch.Tensor):
x = x.cuda(non_blocking=True, device=device)
return x
elif isinstance(x, nn.Module):
x = x.cuda(device=device)
return x
elif isinstance(x, np.ndarray):
if x.shape == ():
if np_str_obj_array_pattern.search(x.dtype.str) is not None:
return x
return self.cuda(torch.as_tensor(x), device=device)
return self.cuda(torch.from_numpy(x), device=device)
elif isinstance(x, float):
return self.cuda(torch.tensor(x, dtype=torch.float64), device=device)
elif isinstance(x, int_classes):
return self.cuda(torch.tensor(x), device=device)
elif isinstance(x, string_classes):
return x
elif isinstance(x, container_abcs.Mapping):
return {key: self.cuda(x[key], device=device) for key in x}
elif isinstance(x, container_abcs.Sequence):
return [
self.cuda(np.array(xi), device=device)
if isinstance(xi, container_abcs.Sequence)
else self.cuda(xi, device=device)
for xi in x
]
def to_cpu(self, x):
if isinstance(x, torch.Tensor) and x.device != "cpu":
return x.clone().detach().cpu()
elif isinstance(x, np.ndarray):
return x
elif isinstance(x, container_abcs.Mapping):
return {key: self.to_cpu(x[key]) for key in x}
elif isinstance(x, container_abcs.Sequence):
return [self.to_cpu(xi) for xi in x]
else:
return x
| tattaka/unsupervised-hdr-imaging | unsupervised_hdr/core.py | core.py | py | 15,835 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "warnings.simplefilter",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "torch.__version__.split",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "torch.__version__",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "... |
39157907847 | # http://localhost:3000/objects
import requests
url = "localhost:3000/objects"
payload = "{\n \"id\": 5,\n \"item\": \"The Fiancรฉs\",\n \"artist\": \"Pierre Auguste Renoir\",\n \"collection\": \"WallrafโRichartz Museum, Cologne, Germany\",\n \"date\": \"1868\"\n }"
headers = {
'content-type': 'application/json'
}
response = requests.request("POST", url, headers=headers, data = payload)
print(response.text.encode('utf8')) | mustafaakgul/python-guide | src/32-restful_api/local_db_hitting.py | local_db_hitting.py | py | 471 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.request",
"line_number": 11,
"usage_type": "call"
}
] |
21402710703 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2018/11/30 14:47
# @Author : qingping.niu
# @File : StartTime.py
# @desc :
import os,time,datetime
import uiautomator2 as u2
def getdevices():
devices = []
result = os.popen("adb devices").readlines()
result.reverse()
try:
for line in result:
li = line.strip()
if not len(li) or "attached" in li or "?" in li or "offline" in li:
continue
else:
devices.append(li.split()[0])
return devices
except Exception as e:
print(e)
def start(devices,appName):
d = u2.connect(devices)
# d(scrollable=True).scroll.to(text=u"Notes")
d(scrollable=True).fling.horiz.forward()
d(text=u"Notes").click()
begin = datetime.datetime.now()
# end = None
if d(text="Notes").exists:
end = datetime.datetime.now()
k = (end - begin)
print(k.total_seconds())
print("dddd")
if __name__=="__main__":
devices = getdevices()
print(devices[0])
start(devices[0],appName="Notes") | nqping/MyToolkit | Quality/StartTime.py | StartTime.py | py | 1,098 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.popen",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "uiautomator2.connect",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "datetime.datetime... |
27689203044 | import discord
import yaml
with open("data/users.yaml", "r") as ymlfile:
users = yaml.load(ymlfile, Loader=yaml.BaseLoader)
def find(ctx, typ):
for i in users[typ]:
if str(i) == str(ctx.message.author):
return True
return False
async def check_admin(ctx):
if find(ctx, "admin"):
return True
else:
await ctx.send("You can't use **admin** commands.")
return False
async def admin(ctx):
return await check_admin(ctx)
| Kattulel/DisneyBot | config/usercontrol.py | usercontrol.py | py | 513 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "yaml.load",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "yaml.BaseLoader",
"line_number": 5,
"usage_type": "attribute"
}
] |
73631959395 | """Utility file to seed kindred database from Native-Land data"""
from sqlalchemy import func
from models import Tribe, Language, connect_to_db, db
from routes import app
import json
def json_reader(file_path):
"""Opens & loads json files"""
with open(file_path) as file:
json_dict = json.load(file)
return json_dict
def load_tribes():
"""Load Tribe info from indigenousTerritories.json into
database."""
print('Tribes')
Tribe.query.delete()
tribe_file = "seed_data/indigenousTerritories.json"
tribe_dict = json_reader(tribe_file)
i = 0
for key in tribe_dict['features']:
name = tribe_dict['features'][i]['properties']['Name']
region = None
description = None
i += 1
tribe = Tribe(name=name,
region=region,
description=description
)
db.session.add(tribe)
db.session.commit()
def load_languages():
"""Load language infor from indigenousLanguages.json into
database."""
print('Languages')
Language.query.delete()
lang_file = "seed_data/indigenousLanguages.json"
lang_dict = json_reader(lang_file)
i = 0
for key in lang_dict['features']:
language_name = lang_dict['features'][i]['properties']['Name']
i += 1
language = Language(language_name=language_name)
db.session.add(language)
db.session.commit()
def set_val_tribe_id():
"""Set value for the next tribe_id after seeding database"""
result = db.session.query(func.max(Tribe.tribe_id)).one()
max_id = int(result[0])
query = "SELECT setval('tribes_tribe_id_seq', :new_id)"
db.session.execute(query, {'new_id': max_id + 1})
db.session.commit()
if __name__ == "__main__":
connect_to_db(app)
# In case tables haven't been created, create them
db.create_all()
# Import different types of data
load_tribes()
load_languages()
set_val_tribe_id()
| bsmejkal/kindred-culture | seed.py | seed.py | py | 1,824 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Tribe.query.delete",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "models.Tribe.query",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "models.Tr... |
31486116618 | from numpy import linspace
from xspec import *
from cstat_deviation import *
import matplotlib.pyplot as plt
import numpy as np
def compute_deviation(file_name):
"""
This function computes the cstat deviation from an xcm file
Args:
file_name (.xcm): xcm file
Returns:
the cstat deviation for the given xcm file
"""
Xset.restore(file_name)
Fit.perform()
xspec_cstat = Fit.statistic
dof = Fit.dof
# data values should be in counts and not counts/s
t_e = AllData(1).exposure
data_values = []
for i in range (len(AllData(1).values)):
# Alldata(1).values returns a tuple and I want a list
data_values.append(AllData(1).values[i]*t_e)
# model values should be in counts and limited to noticed bins
Plot.device = '/null'
Plot.xAxis = 'channel'
Plot('counts')
model_values = Plot.model()
return db_compute_goodness_of_the_fit_from_cstat_v1(data_values,model_values,dof,xspec_cstat, verbose=False)
def select_models(file_names_list, deviation_min):
"""
This function selects the models among a list that have a deviation > deviation_min
Args:
file_names_list (list): array of xcm file names to be loaded
deviation_min (float): minimum deviation in unit of sigma
Returns:
An list of xcm file names where deviation > deviation_min
"""
selected_models = []
for file in file_names_list:
cstat_dev = compute_deviation(file)
if cstat_dev >= deviation_min:
selected_models.append(file)
return selected_models
def select_models_plot(file_names_list, deviation_min):
"""_summary_
Args:
file_names_list (_type_): _description_
deviation_min (_type_): _description_
"""
# list of deviation and file numbers
cstat_deviations = []
file_numbers = []
for file in file_names_list:
cstat_deviations.append(compute_deviation(file))
file_numbers.append(int(file[1:4]))
dev_min = [deviation_min]*len(file_names_list)
plt.style.use(['science','no-latex'])
plt.xlabel('model ID')
plt.ylabel('deviation / sigma')
plt.plot(file_numbers, cstat_deviations, 'ro')
plt.plot(file_numbers, dev_min, '-b')
plt.show()
| Lucas-Dfr/CrossCorrelationSearch-v1 | model_selection/model_selection.py | model_selection.py | py | 2,421 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.style.use",
"line_number": 77,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.style",
"line_number": 77,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 77,
"usage_type": "name"
},
{
"api_na... |
7417438363 |
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
import torch
import torch.nn as nn
import torch.optim as optim
from pathlib import Path
from .model import EncoderModel, DecoderModel
class Modelrunner():
def __init__(self, model_def, load_latest = False ):
self.models_root = "models"
self.model_def = model_def
last_session = self.getLastSession()
if(load_latest):
self.session = last_session
else:
self.session = last_session + 1
self.encoders = {}
self.metaencoder = self.setupEncoder()
self.decoder = self.setupDecoder()
self.loss_function = nn.SmoothL1Loss(reduction='sum')
# self.fig, self.inputPlots, self.outputPlots = self.setup_plot()
def save_all(self):
print("Saving all models...")
for encoder in self.encoders.values():
encoder.save()
self.metaencoder.save()
self.decoder.save()
print("All models saves")
def run_step(self, sequences):
# if( self.n_iter % 50 == 0):
# print("Epoc: {:06d}-{:02d} Loss: {}".format(self.n_iter, self.name, self.losses[-1]))
# self.writer.add_scalar(f"Loss/train_{self.name}", self.losses[-1], self.n_iter)
# self.plot_graph(batch, self.output)
# self.writer.add_figure(f"Samples/train_{self.name}", self.fig, self.n_iter)
outputs = {}
decoder = self.decoder
metaencoder = self.metaencoder
decoder.model.train()
metaencoder.model.train()
decoder.zero_grad()
metaencoder.zero_grad()
for device, sequence in sequences.items():
batch_t = torch.from_numpy(sequence.astype(np.float32)).cuda()
if(batch_t.ndim == 2):
batch_t = batch_t.reshape(
(self.model_def.sequence_length, 1, self.model_def.num_channels)
)
if( device != 0):
if( device not in self.encoders):
self.encoders[device] = self.setupEncoder(f"encoder_{device:02d}")
encoder = self.encoders[device]
encoder.model.train()
# Encoder
encoder.zero_grad()
loss, z, _ = self.run_model(encoder, decoder, batch_t)
loss.backward()
encoder.optimizer.step()
# Metaencoder
meta_loss, _, _ = self.run_model(metaencoder, decoder, batch_t)
meta_loss.backward()
z = z.squeeze().detach().cpu().numpy()
loss = loss.item()
outputs[device] = (loss, z)
else:
# Metaencoder
meta_loss, meta_z, _ = self.run_model(metaencoder, decoder, batch_t)
meta_loss.backward()
loss = meta_loss.item()
outputs[device] = (loss, None)
metaencoder.optimizer.step()
decoder.optimizer.step()
return outputs
def evaluate(self, sequences):
outputs = {}
meta_outputs = {}
decoder = self.decoder
metaencoder = self.metaencoder
for device, sequence in sequences.items():
batch_t = torch.from_numpy(sequence.astype(np.float32)).cuda()
if(batch_t.ndim == 2):
batch_t = batch_t.reshape(
(self.model_def.sequence_length, 1, self.model_def.num_channels)
)
if( device not in self.encoders):
self.encoders[device] = self.setupEncoder(f"encoder_{device:02d}")
encoder = self.encoders[device]
# Encoder
loss, z, output = self.run_model(encoder, decoder, batch_t)
z = z.squeeze().detach().cpu().numpy()
output = output.squeeze().detach().cpu().numpy()
loss = loss.item()
outputs[device] = (loss, z, output)
# Metaencoder
meta_loss, meta_z, _ = self.run_model(metaencoder, decoder, batch_t)
meta_z = meta_z.squeeze().detach().cpu().numpy()
meta_loss = meta_loss.item()
meta_outputs[device] = (loss, meta_loss, meta_z)
return outputs, meta_outputs
def run_model(self, encoder, decoder, batch_t):
mu, logvar = encoder.model(batch_t)
z = self.reparameterize(mu, logvar)
output = decoder.model(z)
loss = self.loss_function(output, batch_t)
return loss, z, output
def reparameterize(self, mu, logvar):
std = logvar.mul(0.5).exp_()
# return torch.normal(mu, std)
esp = torch.randn(*mu.size()).cuda()
z = mu + std * esp
return z
def setupEncoder(self, name = None):
if(name):
path = Path(self.models_root, f"Session_{self.session:04d}", name)
else:
path = Path(self.models_root, "metaencoder")
model = EncoderModel(path, self.model_def)
model.load()
return model
def setupDecoder(self):
path = Path(self.models_root, "decoder")
model = DecoderModel(path, self.model_def)
model.load()
return model
def getLastSession(self):
path = Path(self.models_root)
sessions = []
for p in path.glob('Session_*'):
if(p.is_dir()):
try:
sessions.append(int(p.stem[-4:]))
except:
pass
if(len(sessions)):
return max(sessions)
else:
return 0
# def setup_plot(self):
# fig = plt.figure()
# gs = gridspec.GridSpec(1, 2, figure=fig)
# inputPlot = gs[0].subgridspec(self.num_channels, 1)
# inputPlots = [0.5] * self.num_channels
# for ch in range(self.num_channels):
# inputPlots[ch] = fig.add_subplot(inputPlot[ch,:])
# inputPlots[ch].set_ylim(0.0, 1.0)
# inputPlots[ch].plot(range(self.seq_length ), np.zeros(self.seq_length ), 'b-')
# outputPlot = gs[1].subgridspec(self.num_channels, 1)
# outputPlots = [0.5] * self.num_channels
# for ch in range(self.num_channels):
# outputPlots[ch] = fig.add_subplot(outputPlot[ch,:])
# outputPlots[ch].set_ylim(0.0, 1.0)
# outputPlots[ch].plot(range(self.seq_length ), np.zeros(self.seq_length ), 'r-')
# # zAx = fig.add_subplot(gs[3])
# return fig, inputPlots, outputPlots
# def plot_graph(self, batch, output ):
# for ch in range(self.num_channels):
# input_y = batch[:,0,ch]
# line = self.inputPlots[ch].get_lines()[0]
# line.set_ydata(input_y)
# output_y = output[:,0,ch]
# line = self.outputPlots[ch].get_lines()[0]
# line.set_ydata(output_y) | REPLICA-Collective-Rep/DATECentral | model/modelrunner.py | modelrunner.py | py | 7,009 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.SmoothL1Loss",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "torch.from_numpy",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "numpy.float32",
"... |
2949776160 | from selenium.webdriver.firefox.options import Options as FirefoxOptions
import filesHandler as fh
import lpCrawler as lp
def scrape_charts(filepath):
""" Scrape the data of all the charts of Polish Radio Program 3. """
ff_options = FirefoxOptions()
ff_options.add_argument('--headless')
driver = lp.LPCrawler(options=ff_options)
data = driver.get_charts()
fh.write_json(filepath, data)
driver.quit()
| magdalena-natalia/PR3TimeMachine | lpJson.py | lpJson.py | py | 434 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.firefox.options.Options",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "lpCrawler.LPCrawler",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "filesHandler.write_json",
"line_number": 12,
"usage_type": "call"
}
] |
19360145371 | import logging
import pandas as pd
from zenml import step
class IngestData:
"""
Ingesting data from the data_path provided.
"""
def __init__(self, path: str):
"""
Args:
data_path: path to the data.
"""
self.path = path
def get_data(self):
"""
Getting the data from the path provided.
Returns:
pd.DataFrame: The data in a pandas dataframe.
"""
logging.info(f"Getting data from {self.path}")
return pd.read_csv(self.path)
@step
def ingest_df(data_path: str) -> pd.DataFrame:
"""
Ingesting the data from the data_path provided.
Args:
data_path: Path to the data.
Return:
pd.DataFrame: The data in a pandas dataframe.
"""
try:
data = IngestData(data_path)
df = data.get_data()
return df
except Exception as e:
logging.error(f"Error while ingesting data: {e}")
raise e
| MaruthiKo/customer-satisfaction_mlops | steps/ingest_data.py | ingest_data.py | py | 987 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.info",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "logging.error",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "zenml.step",
"line_num... |
71873135395 | import re
import sys
from deep_translator import GoogleTranslator
# Split the text into smaller groups up to 5000 characters without breaking lines so need to split on new lines
def split_text(text, max_len=4500):
# Split the text into lines
lines = text.split("\n")
# List of chunks
chunks = []
# Chunk buffer
chunk = ""
# Loop through the lines
for line in lines:
# If the chunk is too long, add it to the list and reset the chunk
if len(chunk + line) > max_len:
chunks.append(chunk)
chunk = ""
# Add the line to the chunk
chunk += line + "\n"
# Add the last chunk to the list
if chunk:
chunks.append(chunk)
# Return the list of chunks
return chunks
def translate(text, source, target):
# Split the text into smaller groups up to 5000 characters without breaking lines so need to split on new lines
chunks = split_text(text)
# Translate the text
translated_text = ''
for chunk in chunks:
if source is None:
source = 'auto'
translated = GoogleTranslator(source=source, target=target).translate(text=chunk)
translated_text += translated + "\n"
# Remove extra new lines
translated_text = re.sub(r'[\n]{3,}', '\n\n', translated_text.strip())
translated_text = translated_text.strip()
# Return the translated text
return translated_text
if __name__ == "__main__":
args = sys.argv
# Handling the case when no file is passed
if len(args) == 1:
print("Please pass a file to translate")
sys.exit(1)
elif len(args) == 2:
print("Please pass a target language")
sys.exit(1)
elif len(args) >= 3:
text = str(open(args[1], encoding='utf8', newline='\n').read()).strip()
# Handling `<filename> <target>`
if len(args) == 3:
translated = translate(text, None, args[2])
print(translated)
# Handling `<filename> <from> <target>`
elif len(args) == 4:
translated = translate(text, args[2], args[3])
print(translated)
| BaseMax/UnlimitedAutoTranslate | UnlimitedAutoTranslate.py | UnlimitedAutoTranslate.py | py | 2,207 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "deep_translator.GoogleTranslator",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
... |
39546171687 | '''
็ฌ่ซไธป็จๅบ
ไปๅทฅไฟก้จ่ทๅๅนถไธ่ฝฝใๅ
ๅพ่ฝฆ่พ่ดญ็ฝฎ็จ็ๆฐ่ฝๆบๆฑฝ่ฝฆ่ฝฆๅ็ฎๅฝใ๏ผใๆฐ่ฝๆบๆฑฝ่ฝฆๆจๅนฟๅบ็จๆจ่่ฝฆๅ็ฎๅฝใ๏ผใ้่ทฏๆบๅจ่ฝฆ่พ็ไบงไผไธๅไบงๅใๆไปถ๏ผๆไปถๅจๅญ่ณ './data/origin_data'
@Author: KivenChen
@Date: 2019-04-10
'''
import os
import asyncio
from . import output
from .spider import spider_1, spider_2
def main():
base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
data_path = os.path.join(base_path, 'data')
data = {}
loop = asyncio.get_event_loop()
data.update(spider_1(loop, pageNum=8))
# ๆฐๆฎไฟฎๆญฃ๏ผ็ฝ้กตไธ้พๆฅๅบ้
# ๆ นๆฎ่งๅพๆพๅฐไฟฎๆญฃ็ฝๅ
data[
'ๆฐ่ฝๆบๆฑฝ่ฝฆๆจๅนฟๅบ็จๆจ่่ฝฆๅ็ฎๅฝ๏ผ2018ๅนด็ฌฌ1ๆน๏ผ'] = 'http://123.127.164.29:18082/CVT/Jsp/zjgl/nerds/201801.html'
data[
'ๆฐ่ฝๆบๆฑฝ่ฝฆๆจๅนฟๅบ็จๆจ่่ฝฆๅ็ฎๅฝ๏ผ2018ๅนด็ฌฌ4ๆน๏ผ'] = 'http://123.127.164.29:18082/CVT/Jsp/zjgl/nerds/201804.html'
data[
'ๆฐ่ฝๆบๆฑฝ่ฝฆๆจๅนฟๅบ็จๆจ่่ฝฆๅ็ฎๅฝ๏ผ2017ๅนด็ฌฌ1ๆน๏ผ'] = 'http://123.127.164.29:18082/CVT/Jsp/zjgl/nerds/201701.htm'
data[
'ๆฐ่ฝๆบๆฑฝ่ฝฆๆจๅนฟๅบ็จๆจ่่ฝฆๅ็ฎๅฝ๏ผ2017ๅนด็ฌฌ2ๆน๏ผ'] = 'http://123.127.164.29:18082/CVT/Jsp/zjgl/nerds/201702.htm'
print('Spider 1 is done!')
data.update(spider_2(loop, pageSize=1000))
print('Spider 2 is done!')
output.to_files(data, os.path.join(data_path, 'origin_data'))
loop.close()
print('=' * 30)
print('้พๆฅ็ฌๅๅทฒๅฎๆ!')
print('=' * 30)
if __name__ == "__main__":
main()
| KivenCkl/New_Energy_Vehicles_Info_Crawler | Spider/main.py | main.py | py | 1,597 | python | zh | code | 28 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line... |
5216154869 | import pygame
import time
import random
pygame.init()
white = (255, 255, 255)
black = (0, 0, 0)
red = (255, 0, 0)
blue = (0, 0, 255)
dis_width = 800
dis_height = 600
dis = pygame.display.set_mode((dis_width, dis_height))
pygame.display.set_caption('collect the trash')
background = pygame.image.load("naturebackground.jpg")
nature = pygame.transform.rotozoom(background,0,1.75)
recycling = pygame.image.load("recycling.jpg")
soda = pygame.image.load("crushedcan.jpg")
banana = pygame.image.load("bananapeel.png")
##compost =
bag = pygame.image.load("trash.png")
compost = pygame.image.load("compost.png")
##candy =
##garbage =
Bin = pygame.image.load("trashBin.jpg")
clock = pygame.time.Clock()
bin_block = 25
bin_speed = 15
font_style = pygame.font.SysFont(None, 30)
enviro_facts = ["The average college student produces 640 pounds of solid waste each year :o",
"By 2050, the ocean will contain more plastic by weight than fish",
"In 2018, Americans disposed of 146.2 million tons of trash - 24% was food waste",
"The Great Pacific Garbage Patch contains almost 3.5 million tons of trash :'(",
"Roughly 80% of the items in landfill could be recycled :o",
"The average person generates over 4 pounds of trash every day :(",
"1/3 of all the food produced globally goes to waste :("]
def message(msg, color):
mesg = font_style.render(msg, True, color)
dis.blit(mesg, [dis_width/3, dis_height/3])
def dis_score(score):
value = font_style.render("Score: " + str(score), True, red)
dis.blit(value, [0,0])
def instruct():
instructions = font_style.render("W = trash, E = compost, R = recycling", True, red)
dis.blit(instructions, [0,20])
def recyclebin(x,y):
binimage = pygame.transform.rotozoom(recycling,0,0.075)
dis.blit(binimage,(x,y))
def sodacan(x,y):
crushsoda = pygame.transform.rotozoom(soda,0,0.050)
dis.blit(crushsoda,(x,y))
def bananapeel(x,y):
bananaskin = pygame.transform.rotozoom(banana,0,0.037)
dis.blit(bananaskin,(x,y))
def garbage_bag(x,y):
trash_bag = pygame.transform.rotozoom(bag,0,0.1)
dis.blit(trash_bag,(x,y))
def garbageBin(x,y):
garbage = pygame.transform.rotozoom(Bin, 0, 0.1)
dis.blit(garbage,(x,y))
def compostBin(x,y):
compBin = pygame.transform.rotozoom(compost, 0, 0.25)
dis.blit(compBin,(x,y))
def gameLoop(): # creating a function
game_over = False
game_close = False
x1 = dis_width / 2
y1 = dis_height - 100
x1_change = 0
trash_change = 0
trash_x = round(random.randrange(0, dis_width - bin_block) / 10.0) * 10.0
trashy = 0
bintype = 0 ##0 for recycling, 1 for compost, 2 for garbage
trashtype = 0
score = 1
while not game_over:
if game_close == True:
index = random.randint(0,6)
while game_close == True:
dis.fill(white)
dis.blit(nature,(0,0))
end = font_style.render("You Lost! Press Q-Quit or C-Play Again", True, red)
pygame.draw.rect(dis,red,[0,0,800,80])
fact = font_style.render(enviro_facts[index], True, white)
end_score = font_style.render("Final score: " +str(score-1), True, red)
dis.blit(end, [270,420])
dis.blit(end_score, [270,450])
dis.blit(fact, [0,30])
pygame.display.update()
for event in pygame.event.get():
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
game_over = True
game_close = False
if event.key == pygame.K_c:
gameLoop()
for event in pygame.event.get():
if event.type == pygame.QUIT:
game_over = True
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_r:
##change bin
bintype = 0
elif event.key == pygame.K_e:
bintype = 1
elif event.key == pygame.K_w:
bintype = 2
if event.key == pygame.K_LEFT:
x1_change = -bin_block
trash_change = 10*score**0.2
elif event.key == pygame.K_RIGHT:
x1_change = bin_block
trash_change = 10*score**0.2
if event.type == pygame.KEYUP:
x1_change = 0
trash_change = 10*score**0.2
if x1 >= dis_width :
x1_change = -bin_block
if x1<0:
x1_change = bin_block
if trashy >= dis_height:
game_close = True
x1 += x1_change
trashy += trash_change
dis.fill(white)
dis.blit(nature,(0,0))
if bintype == 0:
recyclebin(x1,y1)
elif bintype == 1:
compostBin(x1,y1)
elif bintype == 2:
garbageBin(x1, y1)
if trashtype == 0:
sodacan(trash_x,trashy)
elif trashtype == 1:
bananapeel(trash_x,trashy)
elif trashtype == 2:
garbage_bag(trash_x,trashy)
dis_score(score-1)
instruct()
pygame.display.update()
if abs(x1 - trash_x)<=25 and abs(y1-trashy)<= 15 and trashtype == bintype:
trash_x = round(random.randrange(100, dis_width - bin_block - 100) / 10.0) * 10.0
trashy = 0
trashtype = random.randrange(0,3)
score +=1
clock.tick(bin_speed)
pygame.quit()
quit()
gameLoop() | evanse10/Shehacks2021 | game.py | game.py | py | 5,694 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pygame.init",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "pygame.display.set_mode",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pygame.display",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "pygame.display... |
38920515345 | from flask import Flask
from flask import request # `request` is a global object that can be used to access the query string of a url
# Create a new Flask instance (passing the current namespace to the Flask initializer)
app = Flask(__name__)
# Create a route
# Note this uses 'pie syntax' to implement a decorator
# A decorator is a way of wrapping a function (here `app.route('/')`) around another function
# (here `index()`) in order to modify the latter's behaviour.
# It replaces the latter function with the result of calling the former function and passing the latter function to it.
#
# We can have multiple decorators for a single function.
# Flask is designed to be able to handle multiple routes (using multiple decorators) with a single view
#
# In the second decorator we have created a capture group, called `name`.
# What this means is that Flask will take whatever the string after the `/` is and put it into the `name` variable.
@app.route('/')
@app.route('/<name>')
def index(name='Default Person'):
# request is a global object and represents the query string (if any) included in the url
# the components of the query string are in a dictionary-like structure
# and we can access these using the .get(key, default=None) method
#
# Here, if there is a 'name' argument, we take that, otherwise we get the existing name variable
# and put that into the name variable.
#
# So if we access the page 127.0.0.1:localhost without a query string, we'll get 'Hello Default Person'
# If we access the page 127.0.0.1:localhost/?name=SpecialPerson, we'll get 'Hello SpecialPerson'
name = request.args.get('name', name)
pet = request.args.get('pet', None)
first_line = "Hello {}.".format(name)
second_line = None
if pet == None:
second_line = "I understand you don't have a pet at the moment, have you considered getting a labrador? They are the best."
elif pet == 'labrador':
second_line = "You have a labrador! What a great choice! They are the best, aren't they?"
else:
second_line = "Ah, so you have a {}.".format(pet)
return first_line + "\n" + second_line
# run the app
# `debug=True`: automatically restart the server if there is an error
# `port=8000`: listen on port 8000 (a common port for web servers)
# `host='0.0.0.0': listen on all addresses that can reach this app
app.run(debug=True, port=8000, host='0.0.0.0')
| Crossroadsman/FlaskProject | simple_app.py | simple_app.py | py | 2,440 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "flask.request.args.get",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "flask.reque... |
15871446797 | # coding=utf-8
from openpyxl import Workbook
#ๅจๅ
ๅญไธญๅๅปบไธไธชworkbookๅฏน่ฑก
wb = Workbook()
ws = wb.get_active_sheet()
print(ws.title)
ws.title = u'99ไนๆณ่กจ'
#่ฎพ็ฝฎๅๅ
ๆ ผ็ๅผ
for row in range(1,10):
for col in range(1,10):
ws.cell(row = row, column = col).value = row * col
#ๆๅไฟๅญ
wb.save(filename='99ไนๆณ่กจ.xls')
| stella-chmt/daily_scripts | python/practise_for_yingl_lessons/lesson_1/multiply_sheet.py | multiply_sheet.py | py | 359 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "openpyxl.Workbook",
"line_number": 5,
"usage_type": "call"
}
] |
18096921378 | import requests
from bs4 import BeautifulSoup as bs
import arrow
import csv
import pandas as pd
from time import sleep
def forager():
venue = "Forager"
prices = "NaN"
month = input("Please enter the number of the month in the form '1': ")
for l in range(1, 13):
if month == str(l):
month_name = arrow.get(month, "M").format("MMMMM")
month_name = month_name.replace(month, "")
break
elif (l == 12) & (str(month) != l):
print("Please enter a valid option")
forager()
year = input("Please enter a year in the form '18'")
for yr in range(18, 100):
if year == str(yr):
year_name = arrow.get(year, 'YY').format('YYYY')
break
elif (yr == 99) & (str(yr) != year):
print("Please enter a valid input")
forager()
site_url = "https://www.sjforager.com/new-events/?view=calendar&month=" + month_name + "-" + year_name
site = requests.get(site_url)
soup = bs(site.content, 'html.parser')
events = [x.text for x in soup.find_all('h1')]
links = soup.find_all('h1')
base_link = site_url.replace("/new-events/?view=calendar&month=" + month_name + "-" + year_name, "")
dates = []
times = []
for i in links:
sleep(1)
a = str(i.find('a'))
stripped = a.replace('<a href="', '')
stripped = stripped.split('"')
stripped = stripped[0]
full_link = base_link + stripped
sleep(1)
site = requests.get(full_link)
item_info = bs(site.content, 'lxml')
try:
event_date = (item_info.find(class_="event-date")).text
event_date = event_date.split(',')
year = event_date[-1]
mday = event_date[1]
date = mday.lstrip() + year
dates.append(arrow.get(date, "MMMM D YYYY").format("MM/DD/YY"))
except AttributeError:
dates.append('NaN')
try:
start_time = (item_info.find(class_="event-time-12hr-start")).text
end_time = (item_info.find(class_="event-time-12hr-end")).text
times.append(start_time + " - " + end_time)
except AttributeError:
times.append('NaN')
# t = ""
# # tries to read csv, if not creates or empty them headers are added
# try:
# df = pd.read_csv('forager.csv')
# except FileNotFoundError:
# t = "NaN"
# except pd.errors.EmptyDataError:
# t = 'NaN'
# if t == 'NaN':
with open('forager.csv', 'w') as ttable:
filewriter = csv.writer(ttable)
filewriter.writerow(["Venue", "Event", "Date", "Time", "Price"])
# Appends all of the elements in our lists to the csv
with open('forager.csv', 'a') as ttable:
filewriter = csv.writer(ttable)
for i in range(0, len(dates)):
filewriter.writerow([venue, events[i], dates[i], times[i], prices])
forager()
| Astatham98/EventWebScrape | webscrape1/forager.py | forager.py | py | 2,955 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "arrow.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "arrow.get",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number... |
27970246169 | # coding=utf-8
import logging
import os
import uuid
import xlwt
from django.core.cache import cache
from django.core.urlresolvers import reverse
from django.db import IntegrityError
from django.db import transaction
from BanBanTong import constants
from BanBanTong.db import models
from BanBanTong.db import utils as db_utils
from BanBanTong.forms.lesson_teacher import LessonTeacherForm
from BanBanTong.forms.lesson_teacher import LessonTeacherUploadForm
from BanBanTong.forms.lesson_teacher import LessonTeacherUploadVerifyForm
from BanBanTong.utils import create_failure_dict
from BanBanTong.utils import create_success_dict
from BanBanTong.utils import decorator
from BanBanTong.utils import get_page_info
from BanBanTong.utils import model_list_to_dict
from BanBanTong.utils import model_to_dict
from BanBanTong.utils import simplecache
logger = logging.getLogger(__name__)
@decorator.authorized_user_with_redirect
@decorator.authorized_privilege('system_class_lesson')
@transaction.atomic
def add(request, *args, **kwargs):
if request.method == 'POST':
try:
f = LessonTeacherForm(request.POST)
if f.is_valid():
lesson_teacher = f.save()
simplecache.LessonTeacher.update(lesson_teacher.class_uuid.uuid)
data = model_to_dict(lesson_teacher)
school_year = lesson_teacher.class_uuid.grade.term.school_year
data['class_uuid__grade__term__school_year'] = school_year
term_type = lesson_teacher.class_uuid.grade.term.term_type
data['class_uuid__grade__term__term_type'] = term_type
grade_name = lesson_teacher.class_uuid.grade.name
data['class_uuid__grade__name'] = grade_name
data['class_uuid__name'] = lesson_teacher.class_uuid.name
data['lesson_name__name'] = lesson_teacher.lesson_name.name
data['teacher__name'] = lesson_teacher.teacher.name
try:
stat_obj = models.Statistic.objects.get(key=lesson_teacher.class_uuid.pk)
stat_obj.cala('teacher_num', True)
except Exception as e:
logger.exception(e)
return create_success_dict(msg='ๆทปๅ ็ญ็บง่ฏพ็จๆ่ฏพๆๅธ่ฎฐๅฝๆๅ๏ผ',
data=data)
return create_failure_dict(msg='ๆทปๅ ็ญ็บง่ฏพ็จๆ่ฏพๆๅธ่ฎฐๅฝๅคฑ่ดฅ๏ผ',
errors=f.errors)
except IntegrityError:
msg = 'ๅทฒๆ็ธๅ็ๅนด็บง็ญ็บง-่ฏพ็จ-ๆๅธๅงๅ๏ผๆ ๆณ้ๅคๆทปๅ ๏ผ'
return create_failure_dict(msg=msg)
except Exception as e:
logger.exception(e)
return create_failure_dict(msg=u'ๆทปๅ ็ญ็บง่ฏพ็จๆ่ฏพๆๅธ่ฎฐๅฝๅคฑ่ดฅ๏ผ', debug=str(e))
@decorator.authorized_user_with_redirect
@decorator.authorized_privilege('system_class_lesson')
def delete(request, *args, **kwargs):
if request.method == 'POST':
if not cache.get('sudo'):
return create_failure_dict(msg=u'่ฏท่พๅ
ฅๆญฃ็กฎ็่ถ
็บง็ฎก็ๅadminๅฏ็ ๏ผ')
uu = request.POST.get('uuid')
if uu:
try:
obj = models.LessonTeacher.objects.get(uuid=uu)
objs = obj.teacherloginlog_set.count()
if objs > 0:
return create_failure_dict(msg=u'่ฏฅๆๅธๅทฒ็ปไบง็็ปๅฝๆ่ฏพ่ฎฐๅฝ,ไป
ๆฏๆ็ผ่พ.')
except:
return create_failure_dict(msg=u'้่ฏฏ็uuid๏ผ')
try:
if not obj.class_uuid.grade.term.allow_import_lesson():
return create_failure_dict(msg=u'่ฏฅไฟกๆฏๅญฆๅนดๅญฆๆๅทฒ่ฟๆ,ไธ่ฝๅ ้ค')
obj.delete()
simplecache.LessonTeacher.update(obj.class_uuid.uuid)
except Exception as e:
logger.exception('')
return create_failure_dict(msg=u'ๅ
้จ้่ฏฏ', debug=str(e))
return create_success_dict(msg=u'ๅ ้ค็ญ็บง่ฏพ็จๆ่ฏพๆๅธ่ฎฐๅฝๆๅ๏ผ')
@decorator.authorized_user_with_redirect
@decorator.authorized_privilege('system_class_lesson')
@transaction.atomic
def edit(request):
if request.method == 'POST':
uu = request.POST.get('uuid')
try:
l = models.LessonTeacher.objects.get(uuid=uu)
except:
return create_failure_dict(msg='้่ฏฏ็uuid๏ผ')
if not l.class_uuid.grade.term.allow_import_lesson():
return create_failure_dict(msg='่ฏฅไฟกๆฏๅญฆๅนดๅญฆๆๅทฒ่ฟๆ,ไธ่ฝ็ผ่พ')
form = LessonTeacherForm(request.POST, instance=l)
if form.is_valid():
form.save()
simplecache.LessonTeacher.update(l.class_uuid.uuid)
return create_success_dict(msg='็ผ่พ็ญ็บง่ฏพ็จๆ่ฏพๆๅธ่ฎฐๅฝๆๅ๏ผ')
return create_failure_dict(msg='็ผ่พ็ญ็บง่ฏพ็จๆ่ฏพๆๅธ่ฎฐๅฝๅคฑ่ดฅ๏ผ',
errors=form.errors)
@decorator.authorized_user_with_redirect
@decorator.authorized_privilege('system_class_lesson')
def export(request, *args, **kwargs):
'''ๅฏผๅบๆๆ็ญ็บง่ฏพ็จๆ่ฏพๆๅธไฟกๆฏ'''
xls = xlwt.Workbook(encoding='utf8')
title = u'ๅญฆๆ็ญ็บง่ฏพ็จๆ่ฏพ่ๅธไฟกๆฏ'
sheet = xls.add_sheet(title)
header = [u'ๅงๅ', u'็ๆฅ', u'ๆ่ฏพๅนด็บง', u'ๆ่ฏพ็ญ็บง',
u'ๆ่ฏพ่ฏพ็จ', u'่ฎกๅ่ฏพๆถ']
for i in range(len(header)):
sheet.write(0, i, header[i])
row = 1
try:
t = models.Term.get_current_term_list()[0]
except:
return create_failure_dict(msg='ๅฝๅๆถ้ดไธๅจไปปไฝๅญฆๆๅ
')
q = models.LessonTeacher.objects.filter(class_uuid__grade__term=t)
grade_name = request.REQUEST.get('grade_name', None)
class_name = request.REQUEST.get('class_name', None)
lesson_name = request.REQUEST.get('lesson_name', None)
teacher_name = request.REQUEST.get('teacher_name', None)
if grade_name:
q = q.filter(class_uuid__grade__name=grade_name)
if class_name:
q = q.filter(class_uuid__name=class_name)
if lesson_name:
q = q.filter(lesson_name__name=lesson_name)
if teacher_name:
q = q.filter(teacher__name__contains=teacher_name)
q = q.values('teacher__name', 'teacher__birthday',
'class_uuid__grade__name', 'class_uuid__name',
'lesson_name__name', 'schedule_time')
for i in q:
sheet.write(row, 0, i['teacher__name'])
sheet.write(row, 1, str(i['teacher__birthday']).replace('-', ''))
sheet.write(row, 2, i['class_uuid__grade__name'])
sheet.write(row, 3, i['class_uuid__name'])
sheet.write(row, 4, i['lesson_name__name'])
sheet.write(row, 5, i['schedule_time'])
row += 1
cached_id = str(uuid.uuid1())
tmp_file = os.path.join(constants.CACHE_TMP_ROOT, cached_id)
xls.save(tmp_file)
filename = u'%s.xls' % title
return create_success_dict(url=reverse('base:xls_download',
kwargs={'cached_id': cached_id,
'name': filename}))
@decorator.authorized_user_with_redirect
@decorator.authorized_privilege('system_class_lesson')
def import_from(request, *args, **kwargs):
if request.method == 'POST':
f = LessonTeacherUploadForm(request.POST, request.FILES)
if f.is_valid():
objs = f.save()
simplecache.LessonTeacher.update()
# ๆดๆฐไธไธStatistic่กจไธญ็็ป่ฎฐๆๅธๆปๆฐ
klasses = map(lambda i: i.class_uuid.uuid, objs)
klasses = models.Class.objects.filter(uuid__in=klasses)
for cls in klasses:
try:
stat_obj = models.Statistic.objects.get(key=cls.pk)
stat_obj.cala('teacher_num', True)
except Exception as e:
logger.exception(e)
return create_success_dict(data=model_list_to_dict(objs))
return create_failure_dict(msg='ๅฏผๅ
ฅ็ญ็บง่ฏพ็จๆ่ฏพ่ๅธๅคฑ่ดฅ๏ผ',
errors=f.errors)
@decorator.authorized_user_with_redirect
@decorator.authorized_privilege('system_class_lesson')
def list_current(request, *args, **kwargs):
page_info = get_page_info(request)
grade_name = request.GET.get('grade_name')
class_name = request.GET.get('class_name')
lesson_name = request.GET.get('lesson_name')
teacher_name = request.GET.get('teacher_name')
terms = models.Term.objects.filter(deleted=False)
q = models.LessonTeacher.objects.filter(class_uuid__grade__term__in=terms)
if grade_name:
q = q.filter(class_uuid__grade__name=grade_name)
if class_name:
q = q.filter(class_uuid__name=class_name)
if lesson_name:
q = q.filter(lesson_name__name=lesson_name)
if teacher_name:
q = q.filter(teacher__name__contains=teacher_name)
order_lst = (
'class_uuid__grade__number',
'class_uuid__number',
'lesson_name__name'
)
q = q.order_by(*order_lst)
q = q.values('uuid', 'class_uuid__grade__term__school_year',
'class_uuid__grade__term__term_type',
'class_uuid__grade__name', 'class_uuid__name',
'lesson_name__name', 'teacher__name', 'teacher__uuid',
'teacher__birthday',
'schedule_time')
grade_class_assigned_time_dict = {}
q = list(q)
for one in q:
key = u'%s_%s' % (one['class_uuid__grade__name'], one['class_uuid__name'])
if not grade_class_assigned_time_dict.has_key(key):
uuid = one.get('uuid')
one_class = models.LessonTeacher.objects.get(uuid=uuid).class_uuid
one['remain_time'] = int(one_class.grade.term.schedule_time) - int(one_class.cala_assigned_time())
grade_class_assigned_time_dict[key] = one['remain_time']
else:
one['remain_time'] = grade_class_assigned_time_dict[key]
page_data = db_utils.pagination(q, **page_info)
return create_success_dict(data={
'records': model_list_to_dict(page_data['records']),
'page': page_data['page_num'],
'page_size': page_data['page_size'],
'record_count': page_data['record_count'],
'page_count': page_data['page_count'],
})
@decorator.authorized_user_with_redirect
@decorator.authorized_privilege('system_class_lesson')
def verify(request, *args, **kwargs):
if request.method == 'POST':
f = LessonTeacherUploadVerifyForm(request.POST, request.FILES)
if f.is_valid():
objs = f.save()
return create_success_dict(data={'records': model_list_to_dict(objs)})
return create_failure_dict(msg='้ช่ฏ็ญ็บง่ฏพ็จๆ่ฏพ่ๅธๅคฑ่ดฅ๏ผ',
errors=f.errors)
@decorator.authorized_user_with_redirect
@decorator.authorized_privilege('system_class_lesson')
def get_remain_time(request):
'''่ทๅๆฏไธช็ญ็บง็ๅฉไฝๅฏๅ้
่ฏพๆถ'''
if request.method == 'GET':
data = request.GET
else:
data = request.POST
grade_name = data.get('grade_name')
class_name = data.get('class_name')
try:
term = models.Term.get_current_term_list()[0]
class_obj = models.Class.objects.get(name=class_name, grade__name=grade_name, grade__term=term)
except Exception as e:
logger.exception(e)
return create_failure_dict(msg='้่ฏฏ็็ญ็บงๅ็งฐ')
remain_time = class_obj.cala_remain_time()
return create_success_dict(data={'grade_name': class_obj.grade.name, 'class_name': class_obj.name, 'remain_time': remain_time})
| xiaolin0199/bbt | apps/BanBanTong/views/system/lesson_teacher.py | lesson_teacher.py | py | 11,751 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "BanBanTong.forms.lesson_teacher.LessonTeacherForm",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "BanBanTong.utils.simplecache.LessonTeacher.update",
"line_number": 36,
... |
3028681836 | import numpy as np
import tensorflow as tf
import extra_keras_datasets.emnist as centr_emnist # See https://github.com/machinecurve/extra_keras_datasets
import nest_asyncio
nest_asyncio.apply()
tf.compat.v1.enable_v2_behavior()
tf.compat.v1.graph_util.extract_sub_graph
# Final evaluation in test dataset
print("Evaluating the entire test dataset:")
# Load the model
final_model = tf.keras.models.load_model('./models/model_EMNIST_100_0.75.h5')
print(final_model.get_weights())
# Load the centralized version of the EMNIST dataset
(x_train, y_train), (x_test, y_test) = centr_emnist.load_data(type='digits')
print(f"Evaluating the model in {x_test.shape[0]} test samples")
# Evaluate the final model on the test dataset
x_train = x_train.reshape(-1, 784).astype('float32') / 255
x_test = x_test.reshape(-1, 784).astype('float32') / 255
final_eval = final_model.evaluate(x_test, y_test)
# print(final_eval)
# print('Test loss:', final_eval[0])
# print('Test accuracy:', final_eval[1])
# SAVE THE RESULTS
# # Final accuracy on the entire test dataset
# np.savetxt('final_test_loss_K' + str(m) + '_' + str(percentage) + '.txt', np.reshape(final_eval[0], (1,1)))
# np.savetxt('final_test_accuracy_K' + str(m) + '_' + str(percentage) + '.txt', np.reshape(final_eval[1], (1,1)))
| fwilhelmi/blockchain_enabled_federated_learning | Code & Results/TensorFlow code/evaluate_models_test_dataset.py | evaluate_models_test_dataset.py | py | 1,279 | python | en | code | 27 | github-code | 1 | [
{
"api_name": "nest_asyncio.apply",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat.v1.enable_v2_behavior",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tensorflow.compat",
"line_number": 8,
"usage_type": "attribute"
},
{
"ap... |
20059712231 | import os
from setuptools import setup, find_packages
exec(open('hops/version.py').read())
def read(fname):
try:
return open(os.path.join(os.path.dirname(__file__), fname), encoding='utf8').read() #python3
except:
return open(os.path.join(os.path.dirname(__file__), fname)).read() #python2
setup(
name='hopsworks-cloud-sdk',
version=__version__,
install_requires=[
'requests',
'numpy',
'pandas',
'pyhopshive[thrift]',
'boto3>=1.9.226',
'SQLAlchemy',
'PyMySQL',
'pyopenssl',
'idna'
],
extras_require={
'docs': [
'sphinx',
'sphinx-autobuild',
'recommonmark',
'sphinx_rtd_theme',
'jupyter_sphinx_theme'
],
'test': [
'mock',
'pytest',
],
'plotting': ['matplotlib', 'seaborn']
},
author='Steffen Grohsschmiedt',
author_email='steffen@logicalclocks.com',
description='An SDK to integrate cloud solutions such as SageMaker and Databricks with Hopsworks.',
license='Apache License 2.0',
keywords='Hopsworks, SageMaker, Databricks',
url='https://github.com/logicalclocks/hopsworks-cloud-sdk',
download_url='http://snurran.sics.se/hops/hopsworks-cloud-sdk/hops-' + __version__ + '.tar.gz',
packages=find_packages(exclude=['tests']),
long_description=read('README.rst'),
classifiers=[
'Development Status :: 5 - Production/Stable',
'Topic :: Utilities',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 3',
]
)
| logicalclocks/hopsworks-cloud-sdk | setup.py | setup.py | py | 1,664 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
13261468913 | import websocket, json, pprint,talib, numpy
SOCKET = "wss://stream.binance.com:9443/ws/ethusdt@kline_1m"
avg = 0
open_total = []
closed_total = []
high_total = []
low_total = []
SSL_PERIOD = 14
STOCK = "ETHUSD"
PAY = 100.00
POSTION = False
TRADE_QUANTITY = 0
profit = 0
def on_open(webS):
print(" opened connection")
def on_close(webS):
print(" closed connection")
def on_message(webS,message):
print("received connection")
json_message = json.loads(message)
# pprint.pprint(json_message)
# global closed_total
candle = json_message["k"]
final_value_tick = candle["x"]
open_price = candle["o"]# Open price
closed_price = candle["c"]#Close price
high_price = candle["h"]#High price
low_price = candle["l"]#Low price
base_vol = candle["v"]#Base_asset_volume
num_trades = candle["n"]#Number_of_trades
quote_vol = candle["q"]#Quote_asset_volume
taker_base = candle["V"]#Taker buy base asset volume
taker_quote = candle["Q"]#Taker_buy_quote_asset_volume
if final_value_tick:
open_total.append(float(open_price))
high_total.append(float(high_price))
low_total.append(float(low_price))
print("closed at this price {}".format(closed_price))
closed_total.append(float(closed_price))
print("Open: {}".format(open_price))
print('------')
print("High: {}".format(high_price))
print('------')
print("Low: {}".format(low_price))
print('------')
if len(closed_total) > SSL_PERIOD:
np_highs = numpy.array(high_total)
np_lows = numpy.array(low_total)
np_closes = numpy.array(closed_total)
np_opens = numpy.array(open_total)
rsi = talib.RSI(np_closes,SSL_PERIOD)
atr = talib.ATR(np_highs,np_lows,np_closes,SSL_PERIOD)
avg = talib.AVGPRICE(np_opens,np_highs,np_lows,np_closes)
ema = talib.EMA(np_closes,SSL_PERIOD)
dp = talib.PLUS_DM(np_highs,np_lows,SSL_PERIOD)
dm = talib.MINUS_DM(np_highs,np_lows,SSL_PERIOD)
adx = talib.ADX(np_highs,np_lows,np_closes,SSL_PERIOD)
ema += atr
print('---------')
print("The last rsi is {}".format(rsi[-1]))
print('----')
print("The last atr is {}".format(atr[-1]))
print('----')
print("last Average price is {}".format(avg[-1]))
print('----')
print("last true EMA is {}".format(ema[-1]))
print('----')
print("last DMI+ is {}".format(dp[-1]))
print('----')
print("last DMI- is {}".format(dm[-1]))
print('----')
print("last ADX- is {}".format(adx[-1]))
# print('--RSI--')
# print(rsi)
# print('--ATR--')
# print(atr)
# print('--AVG--')
# print(avg)
# print('----')
# print('--EMA--')
# print(ema)
# print('----')
# print('--DM(+)--')
# print(dp)
# print('----')
# print('--DM(-)--')
# print(dm)
# print('--ADX--')
# print(adx)
print('----')
if dp[-1] > dm[-1]:
total = float(dp[-1] - dm[-1]) * 100
print("Uptrending: % {:.3f}".format(total))
if (90 > rsi[-1] > 60) and (adx[-1] >25):
print("now we invest.Calculate price to buy")
if((ema[-1]) < closed_total[-1]):
profit += ema[-1]
print("BUY!!!")
print("Bought: ${}".format(ema[-1]))
print("------")
else:
print('Current RSI: {:.2}\nCurrent ADX: {:.2}\nCurrent true EMA: ${}'.format(rsi[-1],adx[-1],ema[-1]))
# if dp[-1] < dm[-1]:
# print("downtrend")
if dm[-1] > dp[-1]:
print("downtrend")
print("Current closed price: {}".format(closed_total[-1]))
print("update")
profit -= closed_total[-1]
print("Profit: ${}".format(profit))
# def ema(webS):
# day = 50
webS = websocket.WebSocketApp(SOCKET,on_open=on_open,on_close=on_close, on_message=on_message)
webS.run_forever()
| TDysart1/botbot | ethrium.py | ethrium.py | py | 4,606 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.loads",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 68,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 6... |
20920450281 | #!/usr/bin/env python3
# publisher for a specific img folder
import rospy
from std_msgs.msg import String
from detection_only.msg import Bbox_6, Bbox6Array
from sensor_msgs.msg import Image
import json
import time
import cv2
from os import listdir
from os.path import isfile, join
from cv_bridge import CvBridge
import pdb
def img_list(img_folder):
file_list = []
img_name_list = []
for image in listdir(img_folder):
img_name = int(image.split('.')[0])
# while len(img_name) != 4:
# img_name = '0' + img_name
img_name_list.append(img_name)
# pdb.set_trace()
img_name_list = sorted(img_name_list)
for n in img_name_list:
f = str(n) + '.png'
if isfile(join(img_folder, f)):
file_list.append(join(img_folder, f))
# for f in listdir(img_folder):
# if isfile(join(img_folder, f)):
# file_list.append(join(img_folder, f))
# pdb.set_trace()
return file_list
if __name__ == '__main__':
try:
rospy.init_node('img_test_node', anonymous=False)
freq = 30
timer = rospy.timer.Rate(freq)
# source_folder = "/home/ziyan/Yolov5_DeepSort_Pytorch_ros/Yolov5_DeepSort_Pytorch/test_imgs_less"
source_folder = "/home/ziyan/Downloads/exp1"
img_list = img_list(source_folder)
pdb.set_trace()
img_pub = rospy.Publisher('raw_image', Image, queue_size=1)
br = CvBridge()
n = len(img_list)
# while not rospy.is_shutdown():
for i in range(len(img_list)):
img = cv2.imread(img_list[i])
img = img[:,:,[2,1,0]]
data = Image()
data = br.cv2_to_imgmsg(img)
img_pub.publish(data)
print("image publish!")
# if i > 85:
# pdb.set_trace()
timer.sleep()
except RuntimeError:
rospy.logfatal("get runtime error")
except rospy.ROSInterruptException:
pass
| ziyan0302/Yolov5_DeepSort_Pytorch_ros | src/detection_only/src/img_pubtest.py | img_pubtest.py | py | 1,994 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "os.listdir",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
11274134402 | import z3
import random
special_var_table = {}
_var_count = 0
def new_variable(var_type):
global _var_count
_var_count += 1
if var_type == "Bool":
return z3.Bool("Bool" + str(_var_count))
elif var_type == "Int":
return z3.Int("Int" + str(_var_count))
else:
assert False
def _recursively_build_only_one(variable_list, constraint_list):
if len(variable_list) == 1:
return variable_list[0]
mid = len(variable_list) // 2
left = _recursively_build_only_one(variable_list[:mid], constraint_list)
right = _recursively_build_only_one(variable_list[mid:], constraint_list)
constraint_list.append(z3.Not(z3.And(left, right)))
return_value = new_variable("Bool")
constraint_list.append(return_value == z3.Or(left, right))
return return_value
def build_only_one(variable_list, constraint_list, one_value=True):
assert len(variable_list) > 0
result = _recursively_build_only_one(variable_list, constraint_list)
constraint_list.append(result == one_value)
def get_random(variable_type):
if variable_type == "Int":
return random.randint(-10 ** 8, 10 ** 8)
elif variable_type == "Bool":
return random.randint(0, 1) == 0
else:
assert False
def _list_to_string(expr):
if type(expr) == str:
return expr
if type(expr) == tuple:
return str(expr[1])
if type(expr) == list:
string_expr = list(map(lambda x: _list_to_string(x), expr))
return "(" + " ".join(string_expr) + ")"
assert False
def print_function(function_result, function_info):
arg_list = list(map(lambda x: [x.name, x.type], function_info.arg_list))
result = ["define-fun", function_info.name, arg_list, function_info.return_type, function_result]
print(_list_to_string(result))
def get_new_symbolic_input(input_list):
inp = {}
for var in input_list:
variable_name = var.name
variable_type = var.type
inp[variable_name] = new_variable(variable_type)
return inp
def parse_input_from_model(arg_list, symbolic_inp, model):
inp = {}
for var_info in arg_list:
symbolic_var = symbolic_inp[var_info.name]
if model[symbolic_var] is not None:
if var_info.type == "Int":
inp[var_info.name] = model[symbolic_var].as_long()
elif var_info.type == "Bool":
inp[var_info.name] = z3.is_true(model[symbolic_var])
else:
assert False
else:
inp[var_info.name] = get_random(var_info.type)
return inp
| hsh814/ExtractFix | src/synthesis/second_order/util/common.py | common.py | py | 2,587 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "z3.Bool",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "z3.Int",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "z3.Not",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "z3.And",
"line_number": 23,
"usage_type... |
1972302698 | # This demo shows how to plot the contour plot for given function, with resultant of the partial derivatives displayed
# as arrows
import numpy as np
import matplotlib.pyplot as plt
import math
def func(x, y):
z = y - x - 2 * x ** 2 - 2 * x * y - y ** 2
return z
x = np.linspace(-2, 0, 20)
y = np.linspace(1, 3, 20)
[X, Y] = np.meshgrid(x, y)
Z = func(X, Y)
[fx, fy] = np.gradient(Z, 0.25)
vs1 = plt.contour(x, y, Z)
vs2 = plt.quiver(x, y, -fx, -fy)
plt.title("Contour plot & partial derivatives")
plt.show() | Homingdung/numerical_analysis_py | fieldsVisualize.py | fieldsVisualize.py | py | 521 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.meshgrid",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.gradient",
"li... |
29539726668 | from traceback import print_tb
from matplotlib import pyplot as pl
import numpy as np
import re
def poly_regression(dataset1):
x1 = np.arange(dataset1.size)
z1 = np.polyfit(x1, np.log(dataset1),1)
exp_1 = np.exp(z1[1])
plot_p1 = [exp_1*np.exp(z1[0]*x) for x in range(50)]
return plot_p1
# Y = reg.predict(X)
# pl.plot(X,Y, color='r')
def readfiles(dir_name, num_runs, num_eps):
stepsToGoalMatrix = np.array([[]])
numStates = np.array([])
lastStateFounded = np.array([])
numeric_const_pattern = r"""
[-+]? # optional sign
(?:
(?: \d* \. \d+ ) # .1 .12 .123 etc 9.1 etc 98.1 etc
|
(?: \d+ \.? ) # 1. 12. 123. etc 1 12 123 etc
)
# followed by optional exponent part if desired
(?: [Ee] [+-]? \d+ ) ?
"""
rx = re.compile(numeric_const_pattern, re.VERBOSE)
for x in range(0,num_runs):
file_name = dir_name + str(x+1) + '.txt'
print(file_name)
try:
with open(file_name, 'r') as f:
datafile = f.readlines()
stepsToGoal=[]
file_size = len(datafile)
founded_episode = False
target_episode = 'EPISODE: ' + str(num_eps - 1) + '\n'
target_eps_to_states = 'number of nodes: 16'
last_episode = 0
for x in range(file_size):
if datafile[x] == target_episode:
new_index = x
# finding the last episode where the whole map was acquired
if 'EPISODE:' in datafile[x] and founded_episode==False:
last_episode = [int(s) for s in datafile[x].split() if s.isdigit()][0]
if target_eps_to_states in datafile[x]:
lastStateFounded = np.append(lastStateFounded, last_episode)
founded_episode = True
#finding the number of states aquired
if 'number of nodes:' in datafile[x]:
num_states = [int(s) for s in datafile[x].split() if s.isdigit()][0]
numStates = np.append(numStates, num_states)
for y in range(new_index+1, file_size):
if 'found bundled python' in datafile[y]:
break
if 'NUM STEPS TO GOAL:' in datafile[y]:
stepsToGoal.append(rx.findall(datafile[y]))
# print(stepsToGoal)
aux_list = np.array([])
for line in stepsToGoal:
# x = float(x)
for x in line:
x = float(x)
aux_list = np.append(aux_list, x)
stepsToGoalMatrix = np.append(stepsToGoalMatrix, aux_list)
except:
print('could not open file')
stepsToGoalMatrix=stepsToGoalMatrix.reshape(num_runs,num_eps)
return stepsToGoalMatrix, numStates, lastStateFounded
def readfiles_grid(dir_name, ini_run, num_runs, num_eps):
stepsToGoalMatrix = np.array([[]])
for x in range(ini_run,num_runs):
file_name = dir_name + str(x+1) + '.txt'
print(file_name)
try:
with open(file_name, 'r') as f:
datafile = f.readlines()
stepsToGoal=[]
file_size = len(datafile)
line = str(datafile[-1])
numbers = re.findall(r'\d+', line)
aux = []
for x in range(len(numbers)):
numbers[x] = int(numbers[x])
stepsToGoal.append(numbers)
stepsToGoalMatrix = np.append(stepsToGoalMatrix, stepsToGoal)
except:
print('could not open file')
stepsToGoalMatrix=stepsToGoalMatrix.reshape(ini_run-num_runs,num_eps)
print(len(stepsToGoalMatrix))
return stepsToGoalMatrix
def readfiles_ratslam(dir_name, num_runs, num_eps):
stepsToGoalMatrix = np.array([[]])
for x in range(0,num_runs):
file_name = dir_name + str(x+1) + '.txt'
print(file_name)
try:
with open(file_name, 'r') as f:
datafile = f.readlines()
stepsToGoal=[]
file_size = len(datafile)
# founded_episode = False
target_episode = 'EPISODE: ' + str(num_eps - 1) + '\n'
# target_eps_to_states = 'number of nodes: 16'
# last_episode = 0
for x in range(file_size):
if datafile[x] == target_episode:
new_index = x+1
line = str(datafile[new_index])
numbers = re.findall(r'\d+', line)
for x in range(len(numbers)):
numbers[x] = int(numbers[x])
stepsToGoal.append(numbers)
print(stepsToGoal)
stepsToGoalMatrix = np.append(stepsToGoalMatrix, stepsToGoal)
except:
print('could not open file')
stepsToGoalMatrix=stepsToGoalMatrix.reshape(num_runs,num_eps)
return stepsToGoalMatrix
def getPercetange(dir_name, num_runs, num_eps):
suss_runs = np.array([[]])
for x in range(0,num_runs):
file_name = dir_name + str(x+1) + '.txt'
print(file_name)
try:
with open(file_name, 'r') as f:
datafile = f.readlines()
stepsToGoal=[]
file_size = len(datafile)
# founded_episode = False
target_episode = "SUCCESS TO GOAL: "
# target_eps_to_states = 'number of nodes: 16'
# last_episode = 0
for x in range(file_size):
if target_episode in datafile[x]:
new_index = x
line = str(datafile[new_index])
numbers = re.findall(r'\d+', line)
for x in range(len(numbers)):
numbers[x] = int(numbers[x])
stepsToGoal.append(numbers)
suss_runs = np.append(suss_runs, stepsToGoal)
except:
print('could not open file')
suss_runs=suss_runs.reshape(num_runs,num_eps)
suss_runs = 100*suss_runs.sum(0)/num_runs
print(suss_runs)
return suss_runs
def get_full_map_episode(dir_name, num_runs, num_eps):
fullMap = np.array([[]])
for x in range(0,num_runs):
file_name = dir_name + str(x+1) + '.txt'
print(file_name)
try:
with open(file_name, 'r') as f:
datafile = f.readlines()
stepsToGoal=[]
file_size = len(datafile)
# founded_episode = False
target_episode = 'FULL MAP AT EPISODE'
for x in range(file_size):
if target_episode in datafile[x]:
new_index = x
line = str(datafile[new_index])
numbers = re.findall(r'\d+', line)
numbers[0] = int(numbers[0])
stepsToGoal.append(numbers)
fullMap = np.append(fullMap, stepsToGoal)
except:
print('could not open file')
fullMap=fullMap.reshape(num_runs,1)
# print(stepsToGoalMatrix)
# print(stepsToGoalMatrix)
# print('\n')
# print(rlRewardTraceRefinedMatrix)
return fullMap
# pl.clf()
font = {'size': 40}
pl.rc('font', **font)
fig1, ax1 = pl.subplots()
fig1.set_size_inches(17,10)
# x1 = range(0,75)
# x2 = range(-100,75)
# x3 = range(0,57)
x1 = range(0,100)
x2 = range(-100,100)
exp = 'dsr_grid_blodgett_cobel_paper_greedy_no_penalty'
num_exp_latent = 100
num_exp_init = 100
path_latent = "grids/"+exp+"/latent/run-simul-"
path_init = "grids/"+exp+"/init/run-simul-"
normal_latent = readfiles_ratslam('/home/matheus/project/application2/demo/logs/' + path_latent, num_exp_latent, 200)
normal_init = readfiles_ratslam('/home/matheus/project/application2/demo/logs/' + path_init, num_exp_init, 100)
# normal_latent = readfiles_ratslam('/home/matheus/project/application2/demo/logs/ratslam/new_experiments_bochum/exploration_experiments/blodgett/'+ exp +'/latent/run-simul-', num_exp_latent, 100)
# normal_init = readfiles_ratslam('/home/matheus/project/application2/demo/logs/ratslam/new_experiments_bochum/exploration_experiments/blodgett/'+ exp +'/init/run-simul-', num_exp_init, 50)
# perc_normal_latent = getPercetange('/home/matheus/project/application2/demo/logs/ratslam/new_experiments_bochum/exploration_experiments/blodgett/'+ exp +'/latent/run-simul-', num_exp_latent, 100)
# perc_normal_init = getPercetange('/home/matheus/project/application2/demo/logs/ratslam/new_experiments_bochum/exploration_experiments/blodgett/'+ exp +'/init/run-simul-', num_exp_init, 50)
# exp2 = 'replace_inhib'
# num_exp_latent = 20
# num_exp_init = 20
# doors_latent = readfiles_ratslam('/home/matheus/project/application2/demo/logs/ratslam/new_experiments_bochum/exploration_experiments/blodgett/'+ exp2 +'/latent/run-simul-', num_exp_latent, 100)
# doors_init = readfiles_ratslam('/home/matheus/project/application2/demo/logs/ratslam/new_experiments_bochum/exploration_experiments/blodgett/'+ exp2 +'/init/run-simul-', num_exp_init, 50)
# perc_doors_latent = getPercetange('/home/matheus/project/application2/demo/logs/ratslam/new_experiments_bochum/exploration_experiments/blodgett/'+ exp2 +'/latent/run-simul-', num_exp_latent, 100)
# perc_doors_init = getPercetange('/home/matheus/project/application2/demo/logs/ratslam/new_experiments_bochum/exploration_experiments/blodgett/'+ exp2 +'/init/run-simul-', num_exp_init, 50)
# fm_latent=fm_latent.mean(0)
# fm_init=fm_init.mean(0)
# print(fm_init)
# print(fm_latent)
normal_latent_mean=normal_latent.mean(0)
normal_latent_std=normal_latent.std(0)
normal_init_mean=normal_init.mean(0)
normal_init_std=normal_init.std(0)
# doors_latent_mean=doors_latent.mean(0)
# doors_latent_std=doors_latent.std(0)
# doors_init_mean=doors_init.mean(0)
# doors_init_std=doors_init.std(0)
# p1 = poly_regression(normal_init_mean)
# p2 = poly_regression(normal_latent_mean[50:])
# pl.plot(x1, p1, color='red', linewidth=3)
# pl.plot(x1, p2, color='green', linewidth=3)
# pl.plot(range(-100,-74), normal_latent_mean[0:26], 'k', color='purple', linewidth=3)
# pl.plot(range(-100,75), normal_latent_mean[:175], 'k', color='purple', linewidth=3, label='latent learning')
# pl.plot(x1, normal_latent_mean[50:], 'k', color='purple', linestyle='--', linewidth=3, label='w door inhib latent')
# pl.fill_between(x2, normal_latent_mean-normal_latent_std, normal_latent_mean+normal_latent_std,
# alpha=0.3, edgecolor='purple', facecolor='purple',
# linewidth=0)
ax1.grid(axis='both', color='gray', linestyle='-', linewidth=0.5)
# pl.fill_between(x2, normal_init_mean-normal_init_std, normal_init_mean+normal_init_std,
# alpha=0.3, edgecolor='orange', facecolor='orange',
# linewidth=0)
ax1.plot(x2, normal_latent_mean, color='purple', linewidth=4, label='Latent Learning')
# ax1.plot(x2, doors_latent_mean, color='limegreen', linewidth=3, label='placed latent')
ax1.plot(x1, normal_init_mean, color='orange', linewidth=4, label='Direct Learning')
# ax1.plot(x1, doors_init_mean, color='firebrick', linewidth=3, label='placed direct')
ax1.set_ylabel('Escape Latency')
ax1.set_xlabel('Trial')
legend = ax1.legend(loc='upper right', shadow=True, fontsize='medium')
# inhib_latent = readfiles_ratslam('/home/matheus/project/application2/demo/logs/ratslam/dsr-ratslam-ste_map-with_doors/dsr_ratslam_ste-map_w-doors-inhib_latent_t50_rs20/run-simul-', 10, 100)
# inhib_init = readfiles_ratslam('/home/matheus/project/application2/demo/logs/ratslam/dsr-ratslam-ste_map-with_doors/dsr_ratslam_ste-map_w-doors-inhib_init_t50_rs20/run-simul-', 10, 100)
# fm_init = get_full_map_episode('/home/matheus/project/application2/demo/logs/ratslam/dsr-ratslam-ste_map-with_doors/dsr_ratslam_ste-map_w-doors-inhib_latent_t50_rs20/run-simul-', 10, 100)
# fm_latent = get_full_map_episode('/home/matheus/project/application2/demo/logs/ratslam/dsr-ratslam-ste_map-with_doors/dsr_ratslam_ste-map_w-doors-inhib_init_t50_rs20/run-simul-', 10, 100)
# fm_latent=fm_latent.mean(0)
# fm_init=fm_init.mean(0)
# fig2, ax2 = pl.subplots()
# fig2.set_size_inches(12,6)
# ax2.grid(axis='both', color='gray', linestyle='-', linewidth=0.5)
# ax2.plot(x2, perc_normal_latent, color='purple', linewidth=3,label='latent learning')
# # ax2.plot(x2, perc_doors_latent, color='limegreen', linewidth=3, label='placed latent')
# ax2.plot(x1, perc_normal_init, color='orange', linewidth=3,label='direct learning')
# # ax2.plot(x1, perc_doors_init, color='firebrick', linewidth=3, label='placed latent')
# ax2.set_xlabel('trial')
# ax2.set_ylabel('successful runs to goal location (%)')
# legend = ax2.legend(loc='lower right', shadow=True, fontsize='medium')
# # print(fm_init)
# # print(fm_latent)
# inhib_latent_mean=inhib_latent.mean(0)
# inhib_latent_std=inhib_latent.std(0)
# inhib_init_mean=inhib_init.mean(0)
# inhib_init_std=inhib_init.std(0)
# pl.plot(x2, inhib_latent_mean, 'k', color='tomato', linewidth=3, label='w door inhib latent')
# pl.plot(x1, inhib_latent_mean[50:], 'k', color='tomato', linestyle='--', linewidth=3, label='w door inhib latent')
# # pl.fill_between(x2, inhib_latent_mean-inhib_latent_std, inhib_latent_mean+inhib_latent_std,
# # alpha=0.3, edgecolor='tomato', facecolor='tomato',
# # linewidth=0)
# pl.plot(x2, inhib_init_mean, 'k', color='green', linewidth=3, label='w door inhib init')
# # pl.fill_between(x2, inhib_init_mean-inhib_init_std, inhib_init_mean+inhib_init_std,
# # alpha=0.3, edgecolor='green', facecolor='green',
# # linewidth=0)
# name = 'fewer_replace_inhib'
fig1.savefig('/home/matheus/project/application2/figs/experiments-08-2022/'+exp+'_escape.svg')
# fig2.savefig('/home/matheus/project/application2/figs/experiments-08-2022/'+exp+'_percec')
pl.close('all')
pl.show() | MatheusMenezes/latent_learning_app | plot_codes/plot_grid.py | plot_grid.py | py | 14,212 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.arange",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.polyfit",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.log",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.exp",
"line_number": 1... |
74081283232 | from unittest.mock import patch, Mock
import pytest
from item_db import ItemDB
from shopping_cart import ShoppingCart
@pytest.fixture
def cart():
return ShoppingCart(4)
def mock_get_price(item: str):
if item == 'pear':
return 2
if item == 'banana':
return 3
def test_can_add_item_to_card(cart):
cart.add('pear')
assert cart.size() == 1
def test_when_item_added_then_cart_contains_item(cart):
cart.add('raspberry')
assert 'raspberry' in cart.get_items()
def test_when_more_than_max_size_should_fail(cart):
[cart.add('pear') for x in range(4)]
with pytest.raises(OverflowError):
cart.add('pear')
def test_can_get_total_price(cart):
cart.add('pear')
cart.add('banana')
price_map = {
'pear': 2,
'banana': 3
}
assert cart.get_total_price(price_map=price_map) == 5
def test_can_get_price_using_db(cart):
cart.add('pear')
cart.add('banana')
item_db = ItemDB()
item_db.get = Mock(side_effect=mock_get_price)
assert cart.get_total_price(price_map=item_db) == 5
| lorencmateusz/learning-tests | test_shopping_cart.py | test_shopping_cart.py | py | 1,091 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "shopping_cart.ShoppingCart",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pytest.fixture",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "pytest.raises",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "item_db.I... |
34013737364 | from heapq import *
from collections import deque
from collections import Counter
def schedule_tasks(tasks, k):
# We should always try to schedule the task with the highest count.
# So, we can get the counts for each task and add to max Heap.
# Then, pop from maxHeap to schedule (k + 1) tasks
# For each task, add to a waitlist
# If we cannot add (k + 1) different tasks from max heap, we need
# to increment interval count by the difference to insert idles
# Then, put the tasks in the waitlist back into maxHeap
intervalCount = 0
maxHeap = []
# Get Counts
counts = Counter(tasks)
# Put into maxHeap
for char, count in counts.items():
heappush(maxHeap, (-count, char))
while maxHeap:
waitList = []
# Number of different tasks to try and execute
n = k + 1
# Try to Execute k + 1 tasks from maxHeap
while maxHeap and n > 0:
count, char = heappop(maxHeap)
#print(char, end=", ")
count += 1
if count < 0:
waitList.append((count, char))
n -= 1
intervalCount += 1
#print(n)
# Put waitlist tasks back into maxHeap
for count, char in waitList:
heappush(maxHeap, (count, char))
# If we still have tasks to execute, but haven't done k + 1 tasks this iteration,
# then we need to increment for idle tasks
if maxHeap:
intervalCount += n
#for i in range(n):
#print("idle", end=", ")
#print("")
return intervalCount
def main():
print("Minimum intervals needed to execute all tasks: " +
str(schedule_tasks(['a', 'a', 'a', 'b', 'c', 'c'], 2)))
print("Minimum intervals needed to execute all tasks: " +
str(schedule_tasks(['a', 'b', 'a'], 3)))
print("Minimum intervals needed to execute all tasks: " +
str(schedule_tasks(['a', 'a', 'b', 'b', 'c', 'c'], 2)))
main()
| kjingers/Leetcode | Problems/TaskScheduler/TaskScheduler.py | TaskScheduler.py | py | 1,856 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 19,
"usage_type": "call"
}
] |
22861823283 | import json
import requests
import argparse
import sys
import os
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
chrome_options = Options()
chrome_options.add_argument("--disable-extensions")
chrome_options.add_argument('--no-sandbox')
#chrome_options.add_argument("--disable-gpu")
chrome_options.add_argument("--headless")
driver = webdriver.Chrome(options=chrome_options)
class BotVictim:
def __init__(self, url):
self.url_to_connect = url
self.my_server_ip_address = str(os.getenv("SERVER_IP_ADDRESS"))
self.url_server = "http://10.0.0.3/" # "http://" + self.my_server_ip_address + "/"
self.my_user = "local@host.gouv" # str(os.getenv("BOT_ID"))
self.my_pw = "kikou" # str(os.getenv("BOT_PW"))
def login(self):
ses = requests.Session()
res = ses.post(
url=self.url_server + "auth/login",
json = {
"email": self.my_user,
"password": self.my_pw
}
)
json_data = res.json()
self.token = str(json_data['Authorization'])
def go_to_the_url(self):
driver.get(self.url_to_connect)
cookies_dict = {
"name": 'wowo',
'value': self.token
}
driver.add_cookie(cookies_dict)
driver.get(self.url_to_connect)
elem = WebDriverWait(driver, 10).until(
EC.presence_of_element_located((By.ID, "trigger-bot"))
)
print(driver.page_source)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("url", type=str, help="the scenario/config name")
args = parser.parse_args()
if args.url:
bot = BotVictim(args.url)
bot.login()
bot.go_to_the_url()
else:
print("You must mention an url.")
sys.exit(1) | PFE-Attack-client/PFE-Attaques-client | platform/my-vuln-app/bot-victim/main.py | main.py | py | 2,034 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 18,
"usage_type": "name"
},
{... |
40086162188 | import argparse
import pprint
import shutil
from pathlib import Path
import tensorflow as tf
import yaml
from tensorflow import keras
from dataset_factory import DatasetBuilder
from losses import CTCLoss
from metrics import SequenceAccuracy
from models import build_model
parser = argparse.ArgumentParser()
parser.add_argument(
"--config", type=Path, required=True, help="The config file path."
)
parser.add_argument(
"--save_dir",
type=Path,
required=True,
help="The path to save the models, logs, etc.",
)
args = parser.parse_args()
with args.config.open() as f:
config = yaml.load(f, Loader=yaml.Loader)["train"]
pprint.pprint(config)
args.save_dir.mkdir(exist_ok=True)
if list(args.save_dir.iterdir()):
raise ValueError(f"{args.save_dir} is not a empty folder")
shutil.copy(args.config, args.save_dir / args.config.name)
strategy = tf.distribute.MirroredStrategy()
batch_size = config["batch_size_per_replica"] * strategy.num_replicas_in_sync
dataset_builder = DatasetBuilder(**config["dataset_builder"])
train_ds = dataset_builder(config["train_ann_paths"], batch_size, True)
val_ds = dataset_builder(config["val_ann_paths"], batch_size, False)
with strategy.scope():
lr_schedule = keras.optimizers.schedules.CosineDecay(
**config["lr_schedule"]
)
model = build_model(
dataset_builder.num_classes,
weight=config.get("weight"),
img_shape=config["dataset_builder"]["img_shape"],
)
model.compile(
optimizer=keras.optimizers.Adam(lr_schedule),
loss=CTCLoss(),
metrics=[SequenceAccuracy()],
)
model.summary()
model_prefix = "{epoch}_{val_loss:.4f}_{val_sequence_accuracy:.4f}"
model_path = f"{args.save_dir}/{model_prefix}.h5"
callbacks = [
keras.callbacks.ModelCheckpoint(model_path, save_weights_only=True),
keras.callbacks.TensorBoard(
log_dir=f"{args.save_dir}/logs", **config["tensorboard"]
),
]
model.fit(
train_ds,
epochs=config["epochs"],
callbacks=callbacks,
validation_data=val_ds,
)
| FLming/CRNN.tf2 | crnn/train.py | train.py | py | 2,049 | python | en | code | 144 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "yaml.load",
"li... |
16538322007 | import plotly.express as px
import pandas as pd
from datastructure import *
from sklearn.decomposition import PCA
import plotly.graph_objects as go
def createSPLOM(data, idx, indexRange, maxComp):
O = np.transpose(data.series[idx].orientations, (1, 0, 2))[indexRange[0]:indexRange[1]]
O = O.reshape(O.shape[0], data.particleNumber*3)#[-1000:]
pca = PCA()
pca_data = pca.fit_transform(O)
var = pca.explained_variance_ratio_
numDim = np.count_nonzero(var > 0.001)
numDim = min(maxComp, numDim)
pca_data = pca_data[:,:numDim]
minVal = np.min(pca_data)-0.2
maxVal = np.max(pca_data)+0.2
df = pd.DataFrame(data=pca_data, index=[(""+str(i)) for i in range(len(pca_data))])
splom = px.scatter_matrix(df,
labels={i:(""+str(i)) for i in range(numDim)})
splom.update_traces(diagonal_visible=False)
splom.update_layout(template = 'simple_white')
splom.update_layout({"xaxis"+str(i+1): dict(range = [minVal, maxVal]) for i in range(numDim)})
splom.update_layout({"yaxis"+str(i+1): dict(range = [minVal, maxVal]) for i in range(numDim)})
splom.update_layout(font=dict(
size=data.fontSize
))
return splom
| marinaevers/asevis | Components/splom.py | splom.py | py | 1,208 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.decomposition.PCA",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "plotly.express.scatter_matrix",
"line_number": 20,
"usage_type": "call"
},
{
"api_name... |
22720982859 | #!/usr/bin/env python3
from __future__ import annotations
import torch
from .. import settings
from ..utils.broadcasting import _matmul_broadcast_shape, _mul_broadcast_shape
from ..utils.deprecation import bool_compat
from ..utils.getitem import _noop_index
from .dense_linear_operator import DenseLinearOperator, to_linear_operator
from .linear_operator import LinearOperator, to_dense
# TODO implement this as a __torch_function__
def cat(inputs, dim=0, output_device=None):
if all(torch.is_tensor(i) for i in inputs):
return torch.cat(inputs, dim=dim)
inputs = [to_linear_operator(i) for i in inputs]
if all(isinstance(i, DenseLinearOperator) for i in inputs):
# Dont form a CatLinearOperator if all tensors are DenseLinearOperator
return to_linear_operator(torch.cat([to_dense(i) for i in inputs], dim=dim))
if output_device is None and all(i.device == inputs[0].device for i in inputs):
output_device = inputs[0].device
elif output_device is None:
raise RuntimeError(
"Trying to concat linear operators on different devices without specifying an output device."
)
return CatLinearOperator(*inputs, dim=dim, output_device=output_device)
class CatLinearOperator(LinearOperator):
r"""
A `LinearOperator` that represents the concatenation of other linear operators.
Each LinearOperator must have the same shape except in the concatenating
dimension.
Args:
- :attr:`linear_operators` (list of LinearOperators):
A list of LinearOperators whose sizes are the same except in
concatenating dimension :attr:`dim`
- :attr:`dim` (int):
The concatenating dimension which can be a batch dimension.
- :attr:`output_device` (torch.device):
The CatLinearOperator will appear to appear on :attr:`output_device`
and place any output `torch.Tensors` on :attr:`output_device`
"""
def _check_args(self, *linear_operators, dim=0, output_device=None):
if len(linear_operators) == 0:
raise RuntimeError("List of LinearOperators must be non-empty")
elif len(linear_operators) == 1:
raise RuntimeError("Why are we trying to concatenate a single LinearOperator?")
if not all([isinstance(t, LinearOperator) for t in linear_operators]):
raise RuntimeError("CatLinearOperator requires a list of all LinearOperators")
rep_tensor = linear_operators[0]
rep_tensor_noncat_shape = list(rep_tensor.shape)
del rep_tensor_noncat_shape[dim]
for t in linear_operators:
if t.dim() != rep_tensor.dim():
raise RuntimeError("All tensors must have the same number of dimensions")
t_noncat_shape = list(t.shape)
del t_noncat_shape[dim]
if t_noncat_shape != rep_tensor_noncat_shape:
raise RuntimeError("All LinearOperators must have the same size in " "the non-concatenation dimension")
def __init__(self, *linear_operators, dim=0, output_device=None):
# Make sure index is negative index
rep_tensor = linear_operators[0]
ndims = rep_tensor.ndimension()
if dim >= 0:
positive_dim = dim
dim = dim - ndims
else:
positive_dim = ndims + dim
# Standard initialization
super().__init__(*linear_operators, dim=dim, output_device=output_device)
self.linear_operators = linear_operators
self.cat_dim = dim
self.output_device = output_device
# Helpers for _getitem
cat_dim_sizes = torch.tensor([t.size(dim) for t in linear_operators], device=output_device)
cat_dim_cum_sizes = torch.zeros(len(linear_operators) + 1, dtype=torch.long, device=output_device)
torch.cumsum(cat_dim_sizes, dim=-1, out=cat_dim_cum_sizes[1:])
idx_to_tensor_idx = torch.empty(cat_dim_cum_sizes[-1].item(), dtype=torch.long, device=output_device)
for tsr_idx, (start_idx, end_idx) in enumerate(zip(cat_dim_cum_sizes[:-1], cat_dim_cum_sizes[1:])):
idx_to_tensor_idx[start_idx.item() : end_idx.item()].fill_(tsr_idx)
self.cat_dim_sizes = cat_dim_sizes
self.cat_dim_cum_sizes = cat_dim_cum_sizes
self.idx_to_tensor_idx = idx_to_tensor_idx
self._shape = torch.Size(
(*rep_tensor.shape[:positive_dim], cat_dim_cum_sizes[-1].item(), *rep_tensor.shape[positive_dim + 1 :])
)
def _split_slice(self, slice_idx):
"""
Splits a slice(a, b, None) in to a list of slices [slice(a1, b1, None), slice(a2, b2, None), ...]
so that each slice in the list slices in to a single tensor that we have concatenated with this LinearOperator.
"""
if slice_idx.step is not None:
# TODO: Add support for this eventually.
raise RuntimeError("Slicing a CatLinearOperator with a step is not currently supported!")
start_idx = slice_idx.start if slice_idx.start is not None else 0
stop_idx = slice_idx.stop if slice_idx.stop is not None else self.size(self.cat_dim)
first_tensor_idx = self.idx_to_tensor_idx[start_idx].item()
last_tensor_idx = self.idx_to_tensor_idx[stop_idx - 1].item()
first_tensor_start_index = start_idx - self.cat_dim_cum_sizes[first_tensor_idx].item()
last_tensor_stop_index = stop_idx - self.cat_dim_cum_sizes[last_tensor_idx].item()
if first_tensor_idx == last_tensor_idx:
return [first_tensor_idx], [slice(first_tensor_start_index, last_tensor_stop_index, None)]
else:
num_middle_tensors = last_tensor_idx - first_tensor_idx - 1
first_slice = slice(first_tensor_start_index, None, None)
last_slice = slice(None, last_tensor_stop_index, None)
return (
list(range(first_tensor_idx, last_tensor_idx + 1)),
[first_slice] + [_noop_index] * num_middle_tensors + [last_slice],
)
def _expand_batch(self, batch_shape):
batch_dim = self.cat_dim + 2
if batch_dim < 0:
if batch_shape[batch_dim] != self.batch_shape[batch_dim]:
raise RuntimeError(
f"Trying to expand a CatLinearOperator in dimension {self.cat_dim}, but this is the concatenated "
f"dimension.\nCurrent shape: {self.shape} - expanded shape: {batch_shape + self.matrix_shape}."
)
linear_operators = []
for linear_operator in self.linear_operators:
sub_batch_shape = list(batch_shape).copy()
sub_batch_shape[batch_dim] = linear_operator.shape[self.cat_dim]
linear_operators.append(linear_operator._expand_batch(sub_batch_shape))
else:
linear_operators = [linear_operator._expand_batch(batch_shape) for linear_operator in self.linear_operators]
res = self.__class__(*linear_operators, dim=self.cat_dim, output_device=self.output_device)
return res
def _get_indices(self, row_index, col_index, *batch_indices):
indices = [*batch_indices, row_index, col_index]
target_shape = _mul_broadcast_shape(*[index.shape for index in indices])
indices = [index.expand(target_shape).reshape(-1) for index in indices]
cat_dim_indices = indices[self.cat_dim]
# Find out for which indices we switch to different tensors
target_tensors = self.idx_to_tensor_idx[cat_dim_indices]
does_switch_tensor = torch.ones(target_tensors.numel() + 1, dtype=bool_compat, device=self.device)
torch.ne(target_tensors[:-1], target_tensors[1:], out=does_switch_tensor[1:-1])
# Get the LinearOperators that will comprise the new LinearOperator
linear_operator_indices = target_tensors[does_switch_tensor[:-1]].tolist()
linear_operators = [self.linear_operators[idx] for idx in linear_operator_indices]
# Get the new set of indices for each of the LinearOperators
switch_tensor = does_switch_tensor.nonzero(as_tuple=False).squeeze(-1)
split_sizes = (switch_tensor[1:] - switch_tensor[:-1]).tolist()
sub_indices = zip(
*[
list(index.split(split_sizes)) if torch.is_tensor(index) else [index] * len(split_sizes)
for index in indices
]
)
# Make everything a list
sub_indices = [list(sub_index) for sub_index in sub_indices]
# Make sure that we have adjusted the start and ends of the indices that correspond to the cat dim
for linear_operator_idx, sub_index in zip(linear_operator_indices, sub_indices):
sub_index[self.cat_dim] = sub_index[self.cat_dim] - self.cat_dim_cum_sizes[linear_operator_idx]
res_list = [
linear_operator._get_indices(sub_index[-2], sub_index[-1], *sub_index[:-2])
for linear_operator, sub_index in zip(linear_operators, sub_indices)
]
if len(res_list) == 1:
return res_list[0].view(target_shape).to(self.device)
else:
return torch.cat(res_list).view(target_shape).to(self.device)
def _getitem(self, row_index, col_index, *batch_indices):
indices = [*batch_indices, row_index, col_index]
cat_dim_indices = indices[self.cat_dim]
if isinstance(cat_dim_indices, slice):
if cat_dim_indices == _noop_index:
res_list = [
linear_operator._getitem(row_index, col_index, *batch_indices)
for linear_operator in self.linear_operators
]
else:
res_list = []
tensor_idxs, target_slices = self._split_slice(cat_dim_indices)
for tensor_idx, target_slice in zip(tensor_idxs, target_slices):
indices[self.cat_dim] = target_slice
res = self.linear_operators[tensor_idx]._getitem(indices[-2], indices[-1], *indices[:-2])
res_list.append(res)
elif torch.is_tensor(cat_dim_indices):
# Find out for which indices we switch to different tensors
target_tensors = self.idx_to_tensor_idx[cat_dim_indices]
does_switch_tensor = torch.ones(target_tensors.numel() + 1, dtype=bool_compat, device=self.device)
torch.ne(target_tensors[:-1], target_tensors[1:], out=does_switch_tensor[1:-1])
# Get the LinearOperators that will comprise the new LinearOperator
linear_operator_indices = target_tensors[does_switch_tensor[:-1]].tolist()
linear_operators = [self.linear_operators[idx] for idx in linear_operator_indices]
# Get the new set of indices for each of the LinearOperators
switch_tensor = does_switch_tensor.nonzero(as_tuple=False).squeeze(-1)
split_sizes = (switch_tensor[1:] - switch_tensor[:-1]).tolist()
sub_indices = zip(
*[
list(index.split(split_sizes)) if torch.is_tensor(index) else [index] * len(split_sizes)
for index in indices
]
)
# Make everything a list
sub_indices = [list(sub_index) for sub_index in sub_indices]
# Make sure that we have adjusted the start and ends of the indices that correspond to the cat dim
for linear_operator_idx, sub_index in zip(linear_operator_indices, sub_indices):
sub_index[self.cat_dim] = sub_index[self.cat_dim] - self.cat_dim_cum_sizes[linear_operator_idx]
res_list = [
linear_operator._getitem(sub_index[-2], sub_index[-1], *sub_index[:-2])
for linear_operator, sub_index in zip(linear_operators, sub_indices)
]
elif isinstance(cat_dim_indices, int): # Should only happen for cat on batch dim
target_tensor = self.idx_to_tensor_idx[cat_dim_indices].item()
cat_dim_indices = cat_dim_indices - self.cat_dim_cum_sizes[target_tensor]
indices[self.cat_dim] = cat_dim_indices
res_list = [self.linear_operators[target_tensor]._getitem(indices[-2], indices[-1], *indices[:-2])]
# Process the list
if len(res_list) == 1:
return res_list[0].to(self.output_device)
else:
res = self.__class__(*res_list, dim=self.cat_dim, output_device=self.output_device)
return res
def _matmul(self, rhs):
output_device = self.device if self.device is not None else rhs.device
# make a copy of `rhs` on each device
rhs_ = []
for d in self.devices:
if d != rhs.device:
rhs_.append(rhs.to(d))
else:
rhs_.append(rhs)
if self.cat_dim == -2:
res_list = [t._matmul(rhs) for t, rhs in zip(self.linear_operators, rhs_)]
# copy result back to output device
res_list = [x.to(output_device) for x in res_list]
res = torch.cat(res_list, dim=-2)
elif self.cat_dim == -1:
curr_idx = 0
res_list = []
index = [slice(None, None, None) for _ in range(rhs.ndimension())]
for t, size, rhs in zip(self.linear_operators, self.cat_dim_sizes, rhs_):
index[-2] = slice(curr_idx, curr_idx + size, None)
res_list.append(t._matmul(rhs[index]))
curr_idx += size
# copy result back to output device and sum
res_list = [x.to(output_device) for x in res_list]
res = 0.0
for x in res_list:
res = res + x
else:
output_shape = _matmul_broadcast_shape(self.shape, rhs.shape)
rhs = rhs.expand(*output_shape[:-2], *rhs.shape[-2:])
curr_idx = 0
res_list = []
for t, size in zip(self.linear_operators, self.cat_dim_sizes):
sub_rhs = rhs.narrow(self.cat_dim, curr_idx, size)
res_list.append(t._matmul(sub_rhs))
curr_idx += size
# copy result back to output device
res_list = [x.to(output_device) for x in res_list]
res = torch.cat(res_list, dim=self.cat_dim)
return res
def _permute_batch(self, *dims):
linear_operators = [linear_operator._permute_batch(*dims) for linear_operator in self.linear_operators]
if self.cat_dim < -2:
positive_cat_dim = self.dim() + self.cat_dim
new_cat_dim = dims.index(positive_cat_dim)
else:
new_cat_dim = self.cat_dim
return self.__class__(*linear_operators, dim=new_cat_dim, output_device=self.output_device)
def _size(self):
return self._shape
def _transpose_nonbatch(self):
if self.cat_dim == -2:
new_dim = -1
elif self.cat_dim == -1:
new_dim = -2
else:
new_dim = self.cat_dim
return self.__class__(
*[t._transpose_nonbatch() for t in self.linear_operators], dim=new_dim, output_device=self.output_device
)
def _unsqueeze_batch(self, dim):
cat_dim = self.dim() + self.cat_dim
linear_operators = [linear_operator._unsqueeze_batch(dim) for linear_operator in self.linear_operators]
res = self.__class__(
*linear_operators, dim=(cat_dim + 1 if dim <= cat_dim else cat_dim), output_device=self.output_device
)
return res
def diag(self):
if settings.debug.on():
if not self.is_square:
raise RuntimeError("Diag works on square matrices (or batches)")
if self.cat_dim == -2:
res = []
curr_col = 0
for t in self.linear_operators:
n_rows, n_cols = t.shape[-2:]
rows = torch.arange(0, n_rows, dtype=torch.long, device=t.device)
cols = torch.arange(curr_col, curr_col + n_rows, dtype=torch.long, device=t.device)
res.append(t[..., rows, cols].to(self.device))
curr_col += n_rows
res = torch.cat(res, dim=-1)
elif self.cat_dim == -1:
res = []
curr_row = 0
for t in self.linear_operators:
n_rows, n_cols = t.shape[-2:]
rows = torch.arange(curr_row, curr_row + n_cols, dtype=torch.long, device=t.device)
cols = torch.arange(0, n_cols, dtype=torch.long, device=t.device)
curr_row += n_cols
res.append(t[..., rows, cols].to(self.device))
res = torch.cat(res, dim=-1)
else:
res = torch.cat([t.diag().to(self.device) for t in self.linear_operators], dim=self.cat_dim + 1)
return res
def inv_quad_logdet(self, inv_quad_rhs=None, logdet=False, reduce_inv_quad=True):
res = super().inv_quad_logdet(inv_quad_rhs, logdet, reduce_inv_quad)
return tuple(r.to(self.device) if r is not None else None for r in res)
@property
def device(self):
return self.output_device
@property
def devices(self):
return [t.device for t in self.linear_operators]
@property
def device_count(self):
return len(set(self.devices))
def to(self, device_id):
"""
returns a new CatLinearOperator with device_id as the output_device
Warning: this does not move the LinearOperators in this CatLinearOperator to
device_id
"""
new_kwargs = dict(self._kwargs)
new_kwargs["output_device"] = device_id
return self.__class__(*self._args, **new_kwargs)
def all_to(self, device_id):
"""
Create a new CatLinearOperator with all LinearOperators in CatLinearOperator moved
to one device device. The new CatLinearOperator also has device_id as the
output_device.
"""
new_args = []
new_kwargs = {}
for arg in self._args:
if hasattr(arg, "to"):
new_args.append(arg.to(device_id))
else:
new_args.append(arg)
for name, val in self._kwargs.items():
if hasattr(val, "to"):
new_kwargs[name] = val.to(device_id)
else:
new_kwargs[name] = val
new_kwargs["output_device"] = device_id
return self.__class__(*new_args, **new_kwargs)
| cornellius-gp/linear_operator.old | linear_operator/operators/cat_linear_operator.py | cat_linear_operator.py | py | 18,489 | python | en | code | 18 | github-code | 1 | [
{
"api_name": "torch.is_tensor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.cat",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "dense_linear_operator.to_linear_operator",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "de... |
35483658265 | from nltk.compat import gutenberg
gutenberg.fileids()
dir(gutenberg)
JM_book = gutenberg.words('milton-paradise.txt')
len(JM_book)
sample_JM = JM_book[100:150]
sample_JM
len(sample_JM)
set_JM = set(sample_JM)
set_JM
len(set(set_JM))
type(set_JM)
sorteSet_JM = sorted(set_JM)
sorteSet_JM
len(sorteSet_JM)
print("milton-paradise.txt has: ", len(JM_book))
len(set(JM_book)) | FernandoMartinezJara/Nltk | curso2/2 Organizing Words by Set and Sorted.py | 2 Organizing Words by Set and Sorted.py | py | 376 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "nltk.compat.gutenberg.fileids",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "nltk.compat.gutenberg",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "nltk.compat.gutenberg",
"line_number": 5,
"usage_type": "argument"
},
{
"api_na... |
70852654755 | import csv
from collections import OrderedDict
from statistics import mean
def calculate_averages(input_file_name, output_file_name):
f1 = open(input_file_name)
reader = csv.reader(f1)
for row in reader:
name = row[0]
grd = list()
for grade in row[1:]:
grd.append(float(grade))
f2 = open(output_file_name,'a',newline='')
writer = csv.writer(f2)
tuple1 = (name,mean(grd))
writer.writerow(tuple1)
f1.close()
f2.close()
def calculate_sorted_averages(input_file_name, output_file_name):
f3 = open(input_file_name)
reader = csv.reader(f3)
Lname = ''
Lmean =''
for row in reader:
name = row[0]
these_grade = list()
for grade in row[1:]:
these_grade.append(float(grade))
Lname = Lname+name+' '
Lmean = Lmean + str(mean(these_grade))+' '
Lnames = Lname.split()
Lmeans = Lmean.split()
tup = list(zip(Lnames,Lmeans))
od = OrderedDict(tup)
sorted_d = sorted(od.items(),key=lambda x: x[1])
f4 = open(output_file_name,'a',newline='')
writer = csv.writer(f4)
for i in sorted_d:
writer.writerow(i)
f3.close()
f4.close()
def calculate_three_best(input_file_name, output_file_name):
f5 = open (input_file_name)
reader = csv.reader(f5)
Lname = ''
Lmean = ''
for row in reader:
name = row[0]
these_grade= list()
for grade in row[1:]:
these_grade.append(float(grade))
Lname = Lname + name+' '
Lmean = Lmean + str(mean(these_grade))+' '
Lnames = Lname.split()
Lmeans = Lmean.split()
tup = list(zip(Lnames,Lmeans))
od1 = OrderedDict(tup)
sorted_d1 = sorted(od1.items(),key=lambda x: x[1],reverse=True)
ex = []
ex.extend(sorted_d1[0:3])
d2_ex = OrderedDict(ex)
sorted_d2_ex = sorted(d2_ex.items(),key=lambda x: x[0])
d3_ex = OrderedDict(sorted_d2_ex)
sorted_d3_ex = sorted(d3_ex.items(),key=lambda x: x[1],reverse=True)
f6 = open(output_file_name,'a',newline='')
writer = csv.writer(f6)
for row in sorted_d3_ex :
writer.writerow(row)
f5.close()
f6.close()
def calculate_three_worst(input_file_name, output_file_name):
f7=open(input_file_name)
reader = csv.reader(f7)
Lname =''
Lmean = ''
for row in reader:
name = row[0]
these_grade = list()
for grade in row[1:]:
these_grade.append(float(grade))
Lname = Lname + name + ' '
Lmean = Lmean + str(mean(these_grade)) + ' '
Lnames = Lname.split()
Lmeans = Lmean.split()
tup = list(zip(Lnames,Lmeans))
od = OrderedDict(tup)
sorted_d = sorted(od.items(),key=lambda v: v[1])
ex = []
ex.extend(sorted_d[0:3])
f8 = open(output_file_name,'a',newline='')
writer = csv.writer(f8)
for row in ex :
writer.writerow(row[1:])
f7.close()
f8.close()
def calculate_average_of_averages(input_file_name, output_file_name):
f9 = open(input_file_name)
reader = csv.reader(f9)
Lname = ''
Lmean = ''
for row in reader:
name = row[0]
these_grade = list()
for grade in row[1:]:
these_grade.append(float(grade))
Lname = Lname + name +' '
Lmean = Lmean + str(mean(these_grade))+' '
Lnames = Lname.split()
Lmeans = Lmean.split()
d = dict(zip(Lnames,Lmeans))
d_val = d.values()
float_d_val = [float(x)for x in d_val]
dval_mean =str(mean(float_d_val))
f10 = open(output_file_name,'a',newline='')
writer = csv.writer(f10)
writer.writerow([dval_mean])
f9.close()
f10.close()
| NafisehAbrishami/Python_and_C_Projects | Project-Source1.py | Project-Source1.py | py | 3,671 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "csv.reader",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "statistics.mean",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": ... |
27701560902 | import json
import argparse
from typing import Text, Dict, List
def render_embedding(active: List[int], height: int, width: int) -> None:
for row in range(height):
print("|", end="")
for col in range(width):
i = row * width + col
print("X" if i in active else " ", end="")
print("|")
def run(semantic_map_file: Text) -> None:
with open(semantic_map_file, "r") as file:
data = json.load(file)
note = data.get("Note", "")
height = data.get("Height")
width = data.get("Width")
assume_lower_case = data.get("AssumeLowerCase")
global_topology = data.get("GlobalTopology")
local_topology = data.get("LocalTopology")
readme = data.get("CreationReadme")
creation_options = data.get("CreationOptions")
embeddings: Dict[Text, List[int]] = data.get("Embeddings")
assert embeddings
assert height
assert width
print(f"Height: {height}")
print(f"Width: {width}")
query = input("Term to lookup: ")
while query:
active = embeddings.get(query.strip())
if active:
print(active)
render_embedding(active, height, width)
else:
print(f"The term '{query}' is not in the vocabulary of this map.")
print()
query = input("Term to lookup: ")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="View a semantic map (given as json)"
)
parser.add_argument(
"--smap",
type=str,
help="Path to json file that represents the map",
)
args = parser.parse_args()
run(
args.smap,
)
| RasaHQ/semantic-map-embedding | scripts/view_smap.py | view_smap.py | py | 1,651 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "typing.Text",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "json.load",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "typing.Dict",
"line_number": 27,... |
17977450408 | # implementation based on DeepLTL https://github.com/reactive-systems/deepltl
import sys
import importlib
import os.path as path
import subprocess
import re
from timeit import default_timer as timer
from typing import Tuple
import multiexit
from dlsgs.utils import ltl_parser
DEFAULT_BINARY_PATH = "bin"
def solve_ltl(formula_obj, tool, worker, timeout=None, simplify=False, no_disjunctions=False, witness=True, binary_path=DEFAULT_BINARY_PATH):
if tool == 'spot':
formula_str = formula_obj.to_str('spot', spacing='all ops', full_parens=True) # unambiguous form
finished, res = worker.call(_solve_spot, (formula_str, simplify), timeout)
if not finished:
return None, None, {} # timeout
sat, trace, d = res
assert sat is not None
if sat:
trace = ltl_parser.ltl_trace(trace, 'spot')
if no_disjunctions:
trace.decide_disjunctions()
else:
trace = None
return sat, trace, d
elif tool == 'aalta':
formula_str = formula_obj.rewrite(ltl_parser.Token.WEAKUNTIL).to_str('spot', spacing='all ops', full_parens=True) # unambiguous form
assert not simplify
if witness:
sat, trace = aalta_sat_with_evidence(formula_str, timeout=timeout, binary_path=binary_path)
return sat, ltl_parser.ltl_trace(trace, 'spot') if sat else None, {}
else:
try:
sat = aalta_sat(formula_str, timeout=timeout)
except RuntimeError:
sat = None
return sat, None, {}
elif tool == 'leviathan':
formula_str = formula_obj.to_str('spot', spacing='all ops', full_parens=True) # unambiguous form
assert not simplify
worker.register_close_function(_close_leviathan)
finished, res = worker.call(_solve_leviathan, (formula_str, binary_path), timeout)
if not finished:
return None, None, {} # timeout
sat, trace, d = res
return sat, ltl_parser.ltl_trace(trace, 'spot', symbolic=False) if sat else None, d
else:
raise ValueError("Unknown tool")
def _solve_leviathan(formula_str, binary_path=DEFAULT_BINARY_PATH):
if not 'proc' in globals():
global proc
lev_path = path.join(binary_path, 'leviathan')
proc = subprocess.Popen([lev_path, '-m', '-p', '-v', '4', '/dev/stdin'], stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.STDOUT, universal_newlines=True, bufsize=1)
multiexit.register(proc.kill)
t_start = timer()
#print(formula_str)
sys.stdout.flush()
proc.stdin.write(formula_str + '\n')
stuff = ''
while True:
lastline = proc.stdout.readline()
if lastline.startswith('SAT;') or lastline.startswith('UNSAT'):
break
else:
stuff += lastline
d = {'solve_time' : (timer() - t_start) * 1000 }
d.update(find_int_in_output(r'Found (\d+) eventualities\n', stuff, 'lev_evtls', None))
d.update(find_int_in_output(r'Total frames: (\d+)\n', stuff, 'lev_frames', None))
d.update(find_int_in_output(r'Total STEPs: (\d+)\n', stuff, 'lev_steps', None))
d.update(find_int_in_output(r'Maximum model size: (\d+)\n', stuff, 'lev_max_model_size', None))
d.update(find_int_in_output(r'Maximum depth: (\d+)\n', stuff, 'lev_max_depth', None))
if lastline.startswith('SAT;'):
sat = True
lastline = lastline[4:]
parts = lastline.strip().split(' -> ')
if len(parts) == 1:
assert parts[0] == '{}'
trace = 'cycle{1}'
else:
loop_to = re.match(r'#(\d+)', parts[-1])
assert loop_to
loop_to = int(loop_to.groups()[0])
parts_ = []
for p in parts[:-1]:
assert (p.startswith('{') and p.endswith('}'))
p = p[1:-1]
if p:
parts_.append(p)
else:
parts_.append('1')
prefix = '; '.join(parts_[:loop_to])
cycle = '{' + '; '.join(parts_[loop_to:]) + '}'
trace = prefix + ('; ' if prefix else '') + 'cycle' + cycle
else: # unsat
sat = False
trace = None
return sat, trace, d
def find_int_in_output(regex, output, name, default):
m = re.search(regex, output)
if m:
return {name : int(m.groups()[0])}
elif default is not None:
return {name : default}
else:
return {}
def _close_leviathan():
if 'proc' in globals():
stdout, _ = proc.communicate(timeout=1)
if stdout != '':
print('Leviathan had trailing output:', stdout)
proc.kill()
def _solve_spot(formula_str, simplify):
if not 'spot' in globals():
global spot
spot = importlib.import_module('spot')
t_start = timer()
spot_formula = spot.formula(formula_str)
automaton = spot_formula.translate()
automaton.merge_edges()
acc_run = automaton.accepting_run()
d = {'solve_time' : (timer() - t_start) * 1000 }
if acc_run is None:
return False, None, d
else:
trace = spot.twa_word(acc_run)
if simplify:
trace.simplify()
return True, str(trace), d
def aalta_sat_with_evidence(formula: str, timeout=None, binary_path=DEFAULT_BINARY_PATH) -> Tuple[bool, str]:
"""Calls aalta to check if the provided formula is satisfiable and returns a witness if so"""
full_aalta_path = path.join(binary_path, 'aalta')
try:
# arguments -l and -c do not seem to be necessary, but include them, for future versions
res = subprocess.run([full_aalta_path, '-l', '-c', '-e'], input=formula, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, check=True, universal_newlines=True)
except subprocess.TimeoutExpired:
print('aalta timed out after {:.2f}s'.format(timeout))
return None, None
except subprocess.CalledProcessError as e:
raise RuntimeError("aalta threw an error: " + e.stderr)
m = re.fullmatch('please input the formula:\n((?:un)?sat)\n(.*)', res.stdout, re.MULTILINE | re.DOTALL)
if not m:
raise RuntimeError("Regular expression for aalta output did not match. Output: '" + res.stdout + "'")
res_sat, res_trace = m.groups()
if res_sat == 'unsat':
return False, None
# convert aalta trace to spot trace
assert res_sat == 'sat'
m = re.fullmatch('(.*[(]\n.*\n[)])\\^w\n', res_trace, re.MULTILINE | re.DOTALL) # not really necessary, more as check
if not m:
raise RuntimeError("Regular expression for aalta trace did not match. Trace output: '" + res_trace + "'")
trace_str = m.groups()[0]
trace_str = re.sub('[{][}]\n', '1; ', trace_str) # special case, yaay! convert {} directly to 1;
trace_str = trace_str.replace('{', '').replace(',}', '').replace(',', ' & ') # convert set {a, !b, c} to formula a & !b & c
trace_str = trace_str.replace('true', '1') # convert true literal to 1
trace_str = re.sub('[(]\n', 'cycle{', trace_str) # convert ( ... ) to cycle{...}
trace_str = re.sub('\n[)]$', '}', trace_str)
trace_str = re.sub('\n', '; ', trace_str) # convert newlines to ;
return True, trace_str, {}
def aalta_sat(formula: str, timeout=None, binary_path=DEFAULT_BINARY_PATH, mute=False) -> bool:
"""Calls aalta to check if the provided formula is satisfiable"""
full_aalta_path = path.join(binary_path, 'aalta')
try:
res = subprocess.run([full_aalta_path], input=formula, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=timeout, check=True, universal_newlines=True)
except subprocess.TimeoutExpired:
# print('aalta timed out after {:.2f}s'.format(timeout))
return None
except subprocess.CalledProcessError as e:
if not mute:
print("!! aalta threw an error: " + e.stderr)
print('for input formula', formula)
return None
m = re.fullmatch('please input the formula:\n((?:un)?sat)\n', res.stdout, re.MULTILINE | re.DOTALL)
if not m:
raise RuntimeError("Regular expression for aalta output did not match. Output: '" + res.stdout + "'")
res_sat = m.groups()[0]
assert res_sat == 'sat' or res_sat == 'unsat'
return res_sat == 'sat'
| ju-kreber/Transformers-and-GANs-for-LTL-sat | impl/dlsgs/data_generation/ltl.py | ltl.py | py | 8,473 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "dlsgs.utils.ltl_parser.ltl_trace",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "dlsgs.utils.ltl_parser",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "dlsgs.utils.ltl_parser.Token",
"line_number": 34,
"usage_type": "attribute"
},
... |
15554588830 | import os
import smtplib
import ssl
from email.message import EmailMessage
from email.utils import formataddr
from dotenv import load_dotenv
from pathlib import Path
# Load the environment variables
current_dir = Path(__file__).resolve().parent if "__file__" in locals() else Path.cwd()
envars = current_dir / ".env"
load_dotenv(envars)
# Define email sender and receiver
email_sender = os.getenv("EMAIL")
email_password = os.getenv("PASSWORD")
def send_email(subject, email_receiver, name, due_date, invoice_no, amount):
# Set the subject and body of the email
em = EmailMessage()
em["From"] = formataddr(("Email Testing.", f"{email_sender}"))
em['To'] = email_receiver
em['Subject'] = subject
em["BCC"] = email_sender
em.set_content(
f"""\
Hi {name},
I hope you are well.
I just wanted to drop you a quick note to remind you that {amount} USD in respect of our invoice {invoice_no} is due for payment on {due_date}.
I would be really grateful if you could confirm that everything is on track for payment.
Best regards
YOUR NAME
"""
)
# Add the html version. This converts the message into a multipart/alternative
# container, with the original text message as the first part and the new html
# message as the second part.
em.add_alternative(
f"""\
<html>
<body>
<p>Hi {name},</p>
<p>I hope you are well.</p>
<p>I just wanted to drop you a quick note to remind you that <strong>{amount} USD</strong> in respect of our invoice {invoice_no} is due for payment on <strong>{due_date}</strong>.</p>
<p>I would be really grateful if you could confirm that everything is on track for payment.</p>
<p>Best regards</p>
<p>YOUR NAME</p>
</body>
</html>
""",
subtype="html",
)
# Add SSL (layer of security)
context = ssl.create_default_context()
# Log in and send the email
with smtplib.SMTP_SSL('smtp.gmail.com', 465, context=context) as smtp:
smtp.login(email_sender, email_password)
smtp.sendmail(email_sender, email_receiver, em.as_string())
if __name__ == "__main__":
send_email(
subject="Invoice Reminder",
name="Mart Skuthi",
email_receiver = 'martskuthi@gmail.com',
due_date="11, Aug 2022",
invoice_no="INV-21-12-009",
amount="5",
) | martgjepali/E-mail-Automation | send_email.py | send_email.py | py | 2,464 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pathlib.Path.cwd",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "dotenv.load_dotenv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"lin... |
13967642156 | import boto3
from colorama import Fore, Back, Style
from functions import create_client
def help():
print(Fore.YELLOW + "\n================================================================================================" + Style.RESET_ALL)
print("[+] Module Description:\n")
print("\tThis module will enumerate the S3 buckets available in the account.")
print("\tIt will print the found buckets and give you the option to enumerate.")
print("\tthe objects on those buckets.\n")
print("\tIf you choose to enumerate objects on the found buckets, it will")
print("\tprint the list of objects from each bucket.")
print(Fore.YELLOW + "================================================================================================" + Style.RESET_ALL)
def create_s3_client(botoconfig, session):
client = create_client.Client(botoconfig, session, 's3')
return client.create_aws_client()
def list_buckets(client):
response = client.list_buckets()
return response
def parse_bucket_data(bucket_data):
bucket_names = []
for bucket in bucket_data['Buckets']:
bucket_names.append(bucket['Name'])
return bucket_names
def list_bucket_objects(client, bucket_names):
bucket_objects = {}
for bucket in bucket_names:
response = client.list_objects_v2(Bucket=bucket, MaxKeys=1000)
if response.get('Contents'):
bucket_objects[bucket] = response['Contents']
else:
bucket_objects[bucket] = []
continue
while response['IsTruncated']:
reponse = client.list_objects_v2(Bucket=bucket, MaxKeys=1000, ContinuationToken=response['NextContinuationToken'])
bucket_objects[bucket].extend(response['Contents'])
return bucket_objects
def main(botoconfig, session):
client = create_s3_client(botoconfig, session)
print("\n[+] Starting Bucket Enumeration...\n")
bucket_data = list_buckets(client)
bucket_names = parse_bucket_data(bucket_data)
for bucket in bucket_names:
print("[+] Bucket Name: "+Fore.GREEN+"{}".format(bucket)+Style.RESET_ALL)
print("\n[-] Do you want to enumerate objects in those buckets?")
enumerate_objects = input("[-] WARNING: This could generate a lot of traffic [N/y]: ")
if enumerate_objects.lower() == "y" or enumerate_objects.lower() == "yes":
print("\n[+] Starting Bucket Objects Enumeration...")
bucket_objects = list_bucket_objects(client, bucket_names)
for bucket in bucket_names:
print(Fore.GREEN + "\n[+] Objects in bucket: " + Style.RESET_ALL + "{}\n".format(bucket))
for object in bucket_objects.get(bucket):
print("- {}".format(object.get('Key')))
return bucket_data | MiedzinskiBuck/Kintoun | modules/enumeration/s3_enumerate_buckets.py | s3_enumerate_buckets.py | py | 2,776 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "colorama.Fore.YELLOW",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "colorama.Fore",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "colorama.Style.RESET_ALL",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "co... |
75108619553 | #!/usr/bin/python3
"""
ๆฑ่ฅฟๆฐดๅบไฟกๆฏ
Author: cg
Date: 2020/7/11 15:58
"""
import json
import urllib.request
from crawl.spiders.base_spider import BaseSpider
from operate.schedulermgr import SchedulerMgr
from db.mapping.jiangxi_reservoir.jiangxi_reservoir import JiangxiReservoir
from db.mapping.jiangxi_reservoir.jiangxi_reservoir_item import JiangxiReservoirItem
from db.mapping.jiangxi_reservoir.jiangxi_reservoir_item_place import JiangxiReservoirItemPlace
from util import time_util
from sanic.log import logger
from sanic.log import error_logger
class JiangxiReservoirSpider(BaseSpider):
# ๆฅๅฃๅฐๅ
_base_url = "http://111.75.205.67:7080/syq/reservoirmap/reservoirMapHandler"
# key: spider ts, value: ๆฏๆฌก็ฌๅๅฐ็ๆฐๆฎ, ๅฐ่ฃ
ๅจJiangxiRiverItemไธญ
_data_ = dict()
def __init__(self):
super().__init__(self._base_url, True)
def run(self):
# ๆฏๅฐๆถ็10, 40ๅ้30็งๆง่กไธๆฌก
SchedulerMgr.add_job_cron(self.request_begin, day_of_week='*', hour='*', minute='15,45', second='30')
def request(self):
res = urllib.request.urlopen(self.client)
res_str = res.read().decode("utf-8")
res.close()
res_dict = json.loads(res_str)
logger.info("่ฏทๆฑๅพๅฐๆฐๆฎ: " + str(res_dict))
suc = self.operate_data(res_dict)
if not suc:
error_logger.error("่ฏทๆฑๅค็ๅบ้ " + self._base_url)
def operate_data(self, res_dict):
if 'data' not in res_dict:
logger.error("่ฟๅ็ปๆไธญไธๅ
ๅซ'data'")
return False
dict_data = res_dict['data']
cur_ts = time_util.getcurrent_ts_millis()
data_item = self.operate_item_data(cur_ts, dict_data)
self._data_[cur_ts] = data_item
return True
def operate_item_data(self, cur_ts, dict_data):
item = JiangxiReservoirItem()
item.spiderTs = cur_ts
item.upCodes = dict_data['upCodes']
item.warnStatNum = dict_data['overTopFLZSize']
item.warnStatInfo = self.operate_item_space(dict_data['overTopFLZ'])
item.desc = dict_data['summarize']
item.totalStatInfo = self.operate_item_space(dict_data['rows'])
return item
def operate_item_space(self, warn_list):
res = list()
for item in warn_list:
data = self.build_space_info(item)
res.append(data.__dict__)
return res
@staticmethod
def build_space_info(dict_info):
item_data = JiangxiReservoirItemPlace()
item_data.hTM = dict_info['hTM']
item_data.county = dict_info['county']
item_data.level = dict_info['rz']
item_data.enterCapacity = dict_info['inq']
item_data.outCapacity = dict_info['otq']
item_data.beginLevel = dict_info['ffsltdz']
item_data.afterLevel = dict_info['fsltdz']
item_data.exceedLevel = dict_info['cfsltdz']
item_data.stcd = dict_info['stcd']
item_data.stnm = dict_info['stnm']
item_data.tm = dict_info['tm']
item_data.style = dict_info['style']
item_data.w = dict_info['w']
item_data.blrz = dict_info['blrz']
item_data.fsltdz = dict_info['fsltdz']
return item_data
def get_collection(self):
return JiangxiReservoir
| 0827cg/cgspiders | crawl/spiders/jiangxi_reservoir.py | jiangxi_reservoir.py | py | 3,313 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "crawl.spiders.base_spider.BaseSpider",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "operate.schedulermgr.SchedulerMgr.add_job_cron",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "operate.schedulermgr.SchedulerMgr",
"line_number": 33,
... |
38812425841 | import numpy as np
import cv2
img=cv2.imread('img4.png',0)
alto,ancho=img.shape[:2]
img1=np.zeros([alto, ancho], np.uint8)
img2=np.zeros([alto, ancho], np.uint8)
img3=np.zeros([alto, ancho], np.uint8)
k = np.array([[0, 1, 0], [1, 6, 1], [0, 1, 0]])/10
k1 = np.array([[0, -1, 0], [-1, 5, -1], [0, -1, 0]])
img1[:,:] = img[:,:]
def filtros(img, kern, kern1):
for x in range(1, alto-1 ):
for y in range(1, ancho-1 ):
v=np.array([[img[x-1,y-1], img[x-1,y], img[x-1,y+1]], [img[x,y-1], img[x,y], img[x,y+1]], [img[x+1,y-1], img[x+1,y], img[x+1,y+1]]])
mediana = np.median(v)
img1[x ,y]=int(mediana)
for x in range(1, alto-1 ):
for y in range(1, ancho-1 ):
v=np.array([[img1[x-1,y-1], img1[x-1,y], img1[x-1,y+1]], [img1[x,y-1], img1[x,y], img1[x,y+1]], [img1[x+1,y-1], img1[x+1,y], img1[x+1,y+1]]])
mediana = np.median(v)
img1[x ,y]=int(mediana)
for x in range(0, alto-2):
for y in range(0, ancho-1):
fila1=0
fila2=0
fila3=0
for k in range(0,2):
m1=img1[x,y+k]*kern[0,k]
fila1=m1+fila1
m2=img1[x+1,y+k]*kern[1,k]
fila2=m2+fila2
m3=img1[x+2,y+k]*kern[2,k]
fila3=m3+fila3
pixel=fila1+fila2+fila3
img2[x,y]=pixel
img3= cv2.filter2D(img2, ddepth=-1, kernel=kern1, anchor=(-1, -1)) # Aplicamos el kernel a la imagen con la funcion filter2D
return img3
img3=filtros(img, k, k1)
cv2.imshow('Imagen original Imagen con filtro', np.hstack([img,img1,img2,img3 ])) # Mostramos las imagenes
cv2.waitKey(0) # Se espera a pulsar cualquier tecla para cerrar la imagen
cv2.destroyAllWindows() # Cierre de ventanas | jaime1315321/VisionArtificial2022_2 | andres_carpeta/MATERAIAL COMPARTIDO/vision/sebas/Punto_e.py | Punto_e.py | py | 1,991 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "numpy.uint8",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number":... |
2371194732 | import json
import os
from datetime import timedelta, date, datetime
from flask import render_template, request, redirect, Response, send_from_directory, session, url_for, abort
from flask.views import MethodView
from flask_admin.contrib import sqla
from flask_security import current_user
from clocking.models import db, Person, Entry, Address
from clocking.api.forms import PersonForm, MacForm, SelectForm, LoginForm
from clocking.api.generate_report import generate_report
from instance.settings import REPORT_DIR, REPORT_FILE
class ProtectedModelView(sqla.ModelView):
def is_accessible(self):
if not current_user.is_active or not current_user.is_authenticated:
return False
if current_user.has_role('superuser'):
return True
return False
def _handle_view(self, name, **kwargs):
"""
Override builtin _handle_view in order to redirect users when a view is not accessible.
"""
if not self.is_accessible():
if current_user.is_authenticated:
abort(403)
else:
return redirect(url_for('security.login', next=request.url))
class PersonAddView(MethodView):
def get(self):
form = PersonForm()
return render_template('add.html', form=form)
def post(self):
data = {}
form = PersonForm(request.form)
if form.validate():
person = form.save()
data['html'] = render_template('bits/mac_form.html', person=person)
data['status'] = 'success'
else:
data['html'] = render_template('bits/form_errors.html', form=form)
data['status'] = 'error'
return Response(json.dumps(data), content_type='application/json')
class MacAddView(MethodView):
def post(self):
data = {}
form = MacForm(request.form)
if form.validate():
address = form.save()
data['html'] = render_template('bits/mac_listing.html',
address=address)
data['status'] = 'success'
else:
data['html'] = render_template('bits/form_errors.html',
form=form)
data['status'] = 'error'
return Response(json.dumps(data), content_type='application/json')
class MacDeleteView(MethodView):
def get(self, mac_address):
address = db.session.query(Address).get(mac_address)
return render_template('delete.html', address=address)
def post(self, mac_address):
address = db.session.query(Address).get(mac_address)
address.deleted = True
try:
db.session.commit()
except:
db.session.rollback()
persons = filter_persons_addresses()
return render_template('people.html', persons=persons)
class PersonEditView(MethodView):
def get(self, person_id):
person = db.session.query(Person).get(person_id)
person_form = PersonForm()
mac_form = MacForm()
return render_template('edit.html', person_form=person_form,
mac_form=mac_form, person=person)
def post(self, person_id):
data = {}
form = PersonForm(request.form)
if form.validate():
person = form.save(person_id)
data['html'] = render_template('bits/person_edit.html',
person=person)
data['status'] = 'success'
else:
data['html'] = render_template('bits/person_edit.html',
form=form)
data['status'] = 'error'
return Response(json.dumps(data), content_type='application/json')
class PersonListView(MethodView):
def get(self):
persons = filter_persons_addresses()
return render_template('people.html', persons=persons)
class PersonClockingView(MethodView):
def get(self):
form = SelectForm()
start_date, end_date = date.today(), date.today()
if request.args:
form = SelectForm(request.args)
if form.validate():
start_date = form.data.get('start_date')
end_date = form.data.get('end_date')
days = get_days(start_date, end_date)
entries = get_all_entries(days)
return render_template('clocking.html', form=form, entries=entries,
start_date=start_date, end_date=end_date)
class DownloadView(MethodView):
def get(self, start_date, end_date):
start_date = datetime.strptime(start_date, '%Y-%m-%d')
end_date = datetime.strptime(end_date, '%Y-%m-%d')
days = get_days(start_date, end_date)
entries = get_all_entries(days)
generate_report(entries)
file = send_from_directory(directory=REPORT_DIR,
filename=REPORT_FILE,
as_attachment=True)
os.remove(REPORT_DIR + REPORT_FILE)
return file
class AboutView(MethodView):
def get(self):
return render_template('about.html')
def filter_persons_addresses():
persons = Person.query.join(Address).filter(
Address.deleted==False).order_by(Person.first_name)
for person in persons:
person.addresses = [address for address in person.addresses if
address.deleted is False]
return persons
def get_days(start_date, end_date):
interval = end_date - start_date
days = []
for i in range(interval.days + 1):
days.append(start_date + timedelta(days=i))
return days
def get_all_entries(days):
entries = []
for day in days:
entry = {}
entry['day'] = day
entry['entries_by_day'] = get_entries_by_day(day).all()
if entry['entries_by_day']:
entries.append(entry)
return entries
def get_entries_by_day(day):
entries = Entry.query
return entries.filter(Entry.startdate >= day).filter(
Entry.startdate <= day + timedelta(hours=24)).order_by(
Entry.startdate)
| eaudeweb/mac-logging | clocking/api/view.py | view.py | py | 6,160 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask_admin.contrib.sqla.ModelView",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask_admin.contrib.sqla",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask_security.current_user.is_active",
"line_number": 20,
"usage_type":... |
25191387496 | import os
import pathlib
import gzip
from itertools import groupby
from typing import List, Dict, TextIO, Tuple, Union
import numpy as np
import pandas as pd
import plot
# type aliases
NX_DICT = Dict[str, List[int]]
AUN_DICT = Dict[str, float]
class AssemblyCollection:
def __init__(
self,
paths: List[Union[pathlib.Path, str]],
reference_path: Union[pathlib.Path, str] = None,
genome_size: int = None,
out_name: str = 'pauNy'
):
# make sure that input paths are pathlib.Paths
self._assembly_paths = convert_paths(paths)
self.ref_name = ''
self.out_name = out_name
if reference_path:
# if reference file is given, save its name and load the genome size
self.ref_path = pathlib.Path(reference_path)
self.ref_name = self.ref_path.name
self._assembly_paths.append(self.ref_path)
self.genome_size = Reference(path=self.ref_path).genome_size()
elif genome_size:
# if genome size estimate is given
self.genome_size = genome_size
else:
self.genome_size = 0
# initialise an assembly object for all input files
self.assemblies = self._initialise_assemblies(assembly_paths=self._assembly_paths)
def calculate_metrics(self) -> None:
"""
Calculate Nx and auN values for all assemblies in the collection
:return: None
"""
self.nx_values = self._calculate_Nx()
self.aun_values = self._calculate_auN()
# generate pandas frames and write to file
self.nx_frame = self._generate_nx_frame(self.nx_values)
self.aun_frame = self._generate_aun_frame(self.aun_values)
# write to csv files
print_frame(df=self.nx_frame, out_file=f'{self.out_name}.nx.csv')
print_frame(df=self.aun_frame, out_file=f'{self.out_name}.aun.csv')
def plot(self, format: str = 'pdf') -> None:
"""
Generate a plot of Nx curves and auN values from all input assemblies
:param format: Format passed to plotnine's save function
:return: None
"""
try:
getattr(self, 'nx_frame')
except AttributeError:
self.calculate_metrics()
plot.save_plot(
plot=plot.plot_nx(self.nx_frame),
type="nx",
out_name=self.out_name,
format=format
)
plot.save_plot(
plot=plot.plot_aun(self.aun_frame),
type="aun",
out_name=self.out_name,
format=format
)
def _calculate_Nx(self) -> NX_DICT:
"""
Calculate Nx values for each assembly
:return: Dict of Nx values per assembly
"""
nx_values = dict()
for asm in self.assemblies:
nx_vector = asm.calculate_Nx()
nx_values[asm.name] = nx_vector
return nx_values
def _calculate_auN(self) -> AUN_DICT:
"""
Calculate auN values for each assembly
:return: Dict of auN values per assembly
"""
aun_values = dict()
for asm in self.assemblies:
aun = asm.calculate_auN()
aun_values[asm.name] = aun
return aun_values
def _generate_nx_frame(self, nx_values: NX_DICT) -> pd.DataFrame:
"""
Generate pandas frame from values of all assemblies
:param nx_values: Dict of Nx values per assembly
:return: Dataframe of Nx values for all assemblies
"""
ndf = {'Nx': [], 'val': [], 'name': [], 'reference': []}
for name, vector in nx_values.items():
ndf['Nx'].extend(np.arange(1, 101))
ndf['val'].extend(vector)
ndf['name'].extend([name] * 100)
# mark the reference assembly for downstream analyses
if name == self.ref_name:
refv = np.ones(100, dtype="bool")
else:
refv = np.zeros(100, dtype="bool")
ndf['reference'].extend(refv)
# transform to dataframe
nx_df = pd.DataFrame(ndf)
return nx_df
def _generate_aun_frame(self, aun_values: AUN_DICT) -> pd.DataFrame:
"""
Generate pandas frame from values of all assemblies
:param aun_values: Dict of auN values per assembly
:return: Dataframe of auN values for all assemblies
"""
ndf = {'auN': [], 'name': [], 'reference': []}
for name, value in aun_values.items():
ndf['auN'].append(value)
ndf['name'].append(name)
# mark the reference assembly for downstream analyses
refi = True if name == self.ref_name else False
ndf['reference'].append(refi)
# transform to dataframe
aun_df = pd.DataFrame(ndf)
return aun_df
def _initialise_assemblies(self, assembly_paths: List[pathlib.Path]) -> List['Assembly']:
"""
Load an Assembly object for each input file
:param assembly_paths: List of input Paths
:return: List of initialised Assembly objects
"""
assemblies = []
for ap in assembly_paths:
assemblies.append(Assembly(path=ap, gsize=self.genome_size))
return assemblies
class Assembly:
def __init__(self, path: Union[pathlib.Path, str], gsize: int = None):
self.path = pathlib.Path(path)
self.name = self.path.name
self.gsize = 0 if not gsize else gsize
self.gzipped = is_gzipped(self.path)
self.open_func = gzip.open if self.gzipped else open
self.symbol = self._which_fx()
self.lengths = self._get_sequence_lengths()
def _which_fx(self) -> str:
"""
read first byte to determine file type
:return: header symbol of filetype
"""
with self.open_func(self.path, 'rb') as f:
symbol = str(f.read(1), 'utf-8')
if symbol not in {'@', '>'}:
raise FileNotFoundError("Input is neither fa nor fq")
return symbol
def _get_sequence_lengths(self) -> np.ndarray:
"""
Load the sequence lengths from a file
:return: array of sequence lengths
"""
assert os.path.getsize(self.path) != 0
seq_lengths = []
with self.open_func(self.path, 'r') as fx:
for header, seq in read_fx(fx, self.gzipped, self.symbol):
seq_lengths.append(len(seq))
# get lengths of all sequences
lengths = np.array(seq_lengths)
return lengths
def calculate_Nx(self) -> List[int]:
"""
Calculate Nx values for this assembly
:return: Nx values of the assembly
"""
if not self.gsize:
gsize = np.sum(self.lengths)
else:
gsize = self.gsize
nx = []
# sort sequence length and calc cumulative sum
seq_lengths_sorted = np.sort(self.lengths)[::-1]
seq_lengths_sorted_cuml = np.cumsum(seq_lengths_sorted)
asm_perc = np.arange(0.01, 1.01, 0.01)
# multiply either by total contig length
# or by reference length/genome estimate
asm_p = asm_perc * gsize
for i in range(len(asm_p)):
j = 0
try:
while seq_lengths_sorted_cuml[j] < asm_p[i]:
j += 1
nx.append(seq_lengths_sorted[j])
except IndexError:
nx.append(0)
return nx
def calculate_auN(self) -> float:
"""
Calculate area under Nx for the assembly
:return: auN value
"""
if not self.gsize:
aun = np.sum(np.power(self.lengths, 2)) / np.sum(self.lengths)
else:
aun = np.sum(self.lengths * (self.lengths / self.gsize))
return aun
class Reference(Assembly):
def genome_size(self) -> np.ndarray:
"""
Sum of reference sequence lengths used for NGx and auNG values
:return: Total sequence length of reference file
"""
return np.sum(self.lengths)
def print_frame(df: pd.DataFrame, out_file: str = None) -> None:
"""
Print a dataframe in csv format either to stdout or to file
:param df: Pandas data frame
:param out_file: name of an output file
:return: None
"""
if not out_file:
print(df.to_csv())
else:
df.to_csv(out_file)
def read_fx(fh: TextIO, gz: bool, symbol: str = '>') -> Tuple[str, str]:
"""
Yield headers and sequences of a fasta file
:param fh: File handle of an open file connection
:param gz: is file gzipped
:param symbol: header symbol to determine fq or fa
:return: Tuple of fasta header and sequence
"""
if not gz:
faiter = (x[1] for x in groupby(fh, lambda line: line[0] == symbol))
for header in faiter:
headerStr = header.__next__().strip().replace(symbol, '').split(' ')[0]
seq = "".join(s.strip() for s in faiter.__next__())
yield headerStr, seq
else:
faiter = (x[1] for x in groupby(fh, lambda line: str(line, 'utf-8')[0] == symbol))
for header in faiter:
headerStr = str(header.__next__(), 'utf-8').strip().replace(symbol, '').split(' ')[0]
seq = "".join(str(s, 'utf-8').strip() for s in faiter.__next__())
yield headerStr, seq
def convert_paths(paths: List[Union[pathlib.Path, str]]) -> List[pathlib.Path]:
"""
Make sure that all paths in a list are pathlib.Path objects
:param paths: List of paths
:return: List of pathlib.Path objects
"""
conv_paths = []
for p in paths:
conv_paths.append(pathlib.Path(p))
return conv_paths
def is_gzipped(f: Union[pathlib.Path, str]) -> bool:
"""
Check if a file is gzipped
:param f: File path
:return: bool
"""
isgz = True
with gzip.open(f, 'r') as fh:
try:
fh.read(1)
except gzip.BadGzipFile:
isgz = False
return isgz
| W-L/pauNy | pauNy/lib.py | lib.py | py | 10,096 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.Dict",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": ... |
72357287713 | import beneath
from config import SUBREDDIT
from generators import posts, comments
with open("schemas/post.graphql", "r") as file:
POSTS_SCHEMA = file.read()
with open("schemas/comment.graphql", "r") as file:
COMMENTS_SCHEMA = file.read()
def make_table_name(subreddit, kind):
name = subreddit.replace("+", "-")
return f"r-{name}-{kind}"
def make_subreddit_description(subreddit, kind):
subs = [f"/r/{sub}" for sub in subreddit.split("+")]
names = subs[0]
if len(subs) > 1:
names = ", ".join(subs[:-1]) + " and " + subs[-1]
return (
f"Reddit {kind} scraped in real-time from {names}. Some {kind} may be missing."
)
if __name__ == "__main__":
p = beneath.Pipeline(parse_args=True, disable_checkpoints=True)
p.description = "Scrapes posts and comments from Reddit"
posts = p.generate(posts.generate_posts)
p.write_table(
posts,
make_table_name(SUBREDDIT, "posts"),
schema=POSTS_SCHEMA,
description=make_subreddit_description(SUBREDDIT, "posts"),
)
comments = p.generate(comments.generate_comments)
p.write_table(
comments,
make_table_name(SUBREDDIT, "comments"),
schema=COMMENTS_SCHEMA,
description=make_subreddit_description(SUBREDDIT, "comments"),
)
p.main()
| beneath-hq/beneath | examples/reddit/main.py | main.py | py | 1,323 | python | en | code | 75 | github-code | 1 | [
{
"api_name": "beneath.Pipeline",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "generators.posts",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "generators.posts.generate_posts",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name":... |
16735200242 | # coding=utf-8
from charm.toolbox.pairinggroup import *
from utils.newsecretutils import Utils
from charm.toolbox.ABEncMultiAuth import ABEncMultiAuth
import re
# from newjson import ElementEncoder, ElementDecoder
import utils.newjson as newjson
import queue
import time
import threading
debug = False
import newma
import sys
import setting
class Alice:
def __init__(self):
self.groupObj = PairingGroup('SS512')
self.dabe = newma.Dabe(self.groupObj)
self.GP={}
self.pks={}
self.sks={}
self.GP= newjson.loads(open("global_parameters.json","r").read())
self.n=self.GP["n"]
self.t=int(self.n/2+1)
self.pks=self.GP["pks"]
self.sks= newjson.loads(open("secretKeys.json","r").read())
def send_ciphertext(self):
m = self.groupObj.random(ZR)
nattributes = ["ATTR@AUTH"+str(j) for j in range(1, self.n+1)]
policy = '(2 of (%d of (%s), ATTR@ALICE, ATTR@BOB))' % (self.n/2+1, ", ".join(nattributes))
# print(policy)
# print('Acces Control Policy: %s' % policy)
print("There are %d TTPs" % self.n)
ts=time.time()
CT = self.dabe.encrypt(self.GP, self.pks, m, policy)
# print("encryp time:",time.time()-ts)
open("x.txt","w").write(newjson.dumps({"ct":CT,"m":m}))
print('Alice\'s ciphertext has been written to x.txt')
def verify_ciphertext(self):
egg=pair(self.GP['g'],self.GP['g'])
CT_BOB=newjson.loads(open("y.txt","r").read())["ct"]
return self.dabe.isValid(CT_BOB, self.GP,self.GP["pks"])
# return True
def getDHKey(self):
gt=eval(str(newjson.loads(open("x.txt","r").read())["m"]))
D=newjson.loads(open("y.txt","r").read())["ct"]["D"]
return (D**gt[0])**gt[1]
def ElgamalEnc(self, K, pk):
l=self.groupObj.random()
# print(K["K"]pk**l)
EK1=K["K"]* (pk**l)
EK2=self.GP['g']**l
EK3=K["KP"]
return {"EK1":EK1,"EK2":EK2,"EK3":EK3}
def ElgamalDec(self, EK, sk):
return {"K":EK["EK1"]/(EK["EK2"]**sk),"KP":EK["EK3"]}
def send_decryptionkey(self):
gid, abeKey = "EXid", {}
abeKey["ALICE"] = self.dabe.keygen(self.GP, self.sks["ALICE"], gid, "ATTR@ALICE")
encKey=self.ElgamalEnc(abeKey['ALICE'],self.pks["BOB"]["gz"])
open("K_ALICE.txt","w").write(newjson.dumps(encKey))
print("Alice sends decryption key using ElGamal encryption")
def decrypt_CT(self):
decKey = {'GID': "EXid", 'keys': {}}
BOBEK=newjson.loads(open("K_BOB.txt","r").read())
BOBK=self.ElgamalDec(BOBEK,self.sks["ALICE"]["z"])
# print(BOBK)
ALICEK = self.dabe.keygen(self.GP, self.sks["ALICE"], decKey["GID"], "ATTR@ALICE")
decKey['keys']["ATTR@BOB"]=BOBK
decKey['keys']["ATTR@ALICE"]=ALICEK
CT_BOB=newjson.loads(open("y.txt","r").read())["ct"]
m_BOB=pair(self.GP['g'],self.GP['g']) ** newjson.loads(open("y.txt","r").read())["m"]
y = self.dabe.decrypt(self.GP, decKey, CT_BOB)
assert(y==m_BOB)
return True
if __name__ == '__main__':
# main(int(sys.argv[1]))
print('Alice: Optimistic Fair Exchange of x')
print()
print('Commands:')
print(' [1] Publish Ciphertext of secret x [2] Verify ciphertext of y')
print(' [3] Transfer decryption key [4] Decrypt the Ciphertext to get y')
print()
alice = Alice()
N = setting.N
t=setting.t
alice.n=N
while True:
choice = int(input('Enter your choice: '))
if choice == 1:
alice.send_ciphertext()
elif choice == 2:
if alice.verify_ciphertext():
print("Bob's ciphertext is correct")
elif choice == 3:
alice.send_decryptionkey()
elif choice == 4:
if alice.decrypt_CT():
print("Alice obtains Bob's secret")
break
elif choice == 0:
print('Quitting.\n')
break
else:
print('Invalid choice. Valid chocices are 0 to 4.\n')
| scottocs/OFE-ABE | Alice.py | Alice.py | py | 4,244 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "newma.Dabe",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "utils.newjson.loads",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "utils.newjson",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "utils.newjson.loads",
... |
23885295653 | from functools import partial
from numbers import Number
from typing import Iterable
import numpy as np
from ...mat_gen import zeros
from ...node import Node
from .base import (
_assemble_wout,
_compute_error,
_initialize_readout,
_prepare_inputs_for_learning,
_split_and_save_wout,
readout_forward,
)
def _lms(alpha, r, e):
"""Least Mean Squares learning rule."""
# learning rate is a generator to allow scheduling
dw = -next(alpha) * np.outer(e, r)
return dw
def train(node: "LMS", x, y=None):
"""Train a readout using LMS learning rule."""
x, y = _prepare_inputs_for_learning(x, y, bias=node.input_bias, allow_reshape=True)
error, r = _compute_error(node, x, y)
alpha = node._alpha_gen
dw = _lms(alpha, r, error)
wo = _assemble_wout(node.Wout, node.bias, node.input_bias)
wo = wo + dw.T
_split_and_save_wout(node, wo)
def initialize(
readout: "LMS", x=None, y=None, init_func=None, bias_init=None, bias=None
):
_initialize_readout(readout, x, y, init_func, bias_init, bias)
class LMS(Node):
"""Single layer of neurons learning connections using Least Mean Squares
algorithm.
The learning rules is well described in [1]_.
:py:attr:`LMS.params` **list**
================== =================================================================
``Wout`` Learned output weights (:math:`\\mathbf{W}_{out}`).
``bias`` Learned bias (:math:`\\mathbf{b}`).
``P`` Matrix :math:`\\mathbf{P}` of RLS rule.
================== =================================================================
:py:attr:`LMS.hypers` **list**
================== =================================================================
``alpha`` Learning rate (:math:`\\alpha`) (:math:`1\\cdot 10^{-6}` by default).
``input_bias`` If True, learn a bias term (True by default).
================== =================================================================
Parameters
----------
output_dim : int, optional
Number of units in the readout, can be inferred at first call.
alpha : float or Python generator or iterable, default to 1e-6
Learning rate. If an iterable or a generator is provided, the learning rate can
be changed at each timestep of training. A new learning rate will be drawn from
the iterable or generator at each timestep.
Wout : callable or array-like of shape (units, targets), default to :py:func:`~reservoirpy.mat_gen.zeros`
Output weights matrix or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
bias : callable or array-like of shape (units, 1), default to :py:func:`~reservoirpy.mat_gen.zeros`
Bias weights vector or initializer. If a callable (like a function) is
used, then this function should accept any keywords
parameters and at least two parameters that will be used to define the shape of
the returned weight matrix.
input_bias : bool, default to True
If True, then a bias parameter will be learned along with output weights.
name : str, optional
Node name.
References
----------
.. [1] Sussillo, D., & Abbott, L. F. (2009). Generating Coherent Patterns of
Activity from Chaotic Neural Networks. Neuron, 63(4), 544โ557.
https://doi.org/10.1016/j.neuron.2009.07.018
"""
def __init__(
self,
output_dim=None,
alpha=1e-6,
Wout=zeros,
bias=zeros,
input_bias=True,
name=None,
):
if isinstance(alpha, Number):
def _alpha_gen():
while True:
yield alpha
alpha_gen = _alpha_gen()
elif isinstance(alpha, Iterable):
alpha_gen = alpha
else:
raise TypeError(
"'alpha' parameter should be a float or an iterable yielding floats."
)
super(LMS, self).__init__(
params={"Wout": None, "bias": None},
hypers={
"alpha": alpha,
"_alpha_gen": alpha_gen,
"input_bias": input_bias,
},
forward=readout_forward,
train=train,
initializer=partial(
initialize, init_func=Wout, bias_init=bias, bias=input_bias
),
output_dim=output_dim,
name=name,
)
| reservoirpy/reservoirpy | reservoirpy/nodes/readouts/lms.py | lms.py | py | 4,648 | python | en | code | 296 | github-code | 1 | [
{
"api_name": "numpy.outer",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "base._prepare_inputs_for_learning",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "node.input_bias",
"line_number": 28,
"usage_type": "attribute"
},
{
"api_name": "ba... |
6127715262 | from bs4 import BeautifulSoup
import re
from random_uk_bank_account.client.vocalink import VocalinkApi
from random_uk_bank_account.utils.exceptions import ErrorInferringVocalinkVersions
SORT_CODE_SUB_LINK_TEXT = "Sorting Code Substitution Data"
MODULUS_WEIGHT_TABLE_LINK_TEXT = "Modulus weight table data"
def _extract_version(html: BeautifulSoup, link_text: str, logger):
try:
sort_code_link = html.find_all("a", string=link_text)[0].attrs['href']
return re.findall(r'/media/(.+?).txt', sort_code_link)[0]
except Exception as e:
raise ErrorInferringVocalinkVersions(logger, e)
def get_inferred_latest_versions(logger):
html = BeautifulSoup(VocalinkApi().get_vocalink_modulus_checking_page(), features="html.parser")
modulus_version = _extract_version(html, MODULUS_WEIGHT_TABLE_LINK_TEXT, logger)
sort_code_substitution = _extract_version(html, SORT_CODE_SUB_LINK_TEXT, logger)
return modulus_version, sort_code_substitution
| j-puri/random-uk-bank-account | random_uk_bank_account/vocalink/vocalink_version.py | vocalink_version.py | py | 978 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "bs4.BeautifulSoup",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "re.findall",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "random_uk_bank_account.utils.exceptions.ErrorInferringVocalinkVersions",
"line_number": 15,
"usage_type": "c... |
28602926196 | '''
ใใฎใณใผใใฏimabariใใใฎใณใผใใๅ
ใซไฝๆใใฆใใพใใ
https://github.com/imabari/covid19-data/blob/master/aichi/aichi_ocr.ipynb
'''
import pathlib
import re
import requests
from bs4 import BeautifulSoup
from urllib.parse import urljoin
import pytesseract
import csv
import recognize_main_summary_date_1 as date_pattern1
import recognize_main_summary_date_2 as date_pattern2
import recognize_main_summary_table_1 as table_pattern1
import recognize_main_summary_table_2 as table_pattern2
import recognize_main_summary_table_3 as table_pattern3
import recognize_main_summary_remarks_1 as remarks_pattern1
import recognize_main_summary_remarks_2 as remarks_pattern2
headers = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64; Trident/7.0; rv:11.0) like Gecko",
}
def get_file(url, dir="."):
r = requests.get(url, headers=headers)
p = pathlib.Path(dir, "main_summary" + pathlib.PurePath(url).suffix)
p.parent.mkdir(parents=True, exist_ok=True)
with p.open(mode='wb') as fw:
fw.write(r.content)
return p
def recognize_date_patterns(reconize_funcs):
pattern = 0
for reconize_date in reconize_funcs:
try:
pattern = pattern + 1
print("Pattern" + str(pattern) + " Start")
data = reconize_date(jpg_path)
return pattern, data
except Exception as e:
print(e)
return None
def recognize_remarks_patterns(reconize_funcs):
pattern = 0
for reconize_remarks in reconize_funcs:
try:
pattern = pattern + 1
print("Pattern" + str(pattern) + " Start")
data = reconize_remarks(jpg_path)
# Validation
if len(data) <= 0:
raise ValueError("OCR Failed. ๆณจ่จใๅๅพใงใใพใใใงใใใ")
return pattern, data
except Exception as e:
print(e)
return None
def recognize_table_patterns(reconize_funcs):
pattern = 0
for reconize_table in reconize_funcs:
try:
pattern = pattern + 1
print("Pattern" + str(pattern) + " Start")
row = reconize_table(jpg_path)
# dataใฎๅ
้ ญใใ [ๆคๆปๅฎๆฝไบบๆฐ,้ฝๆงๆฃ่
ๆฐ,ๅ
ฅ้ข,ๅ
ฅ้ข_่ปฝ็็ก็็ถ,ไธญ็ญ็,้็,ๅ
ฅ้ข่ชฟๆด,ๆฝ่จญๅ
ฅๆ,่ชๅฎ
็้ค,่ชฟๆด,้้ข,ๆญปไบก] ใงใใใจๆฑบใๆใก
data = row[0:12]
# Valiadation
# ๅ
ฅ้ข่
ๆฐใฎๅ่จๅใจ่ฆ็ด ๅ็พคๅคใฎๅ่จใไธ่ดใใใ๏ผ
if data[2] != sum(i for i in data[3:6]):
raise ValueError("OCR Failed. ๅ
ฅ้ข่
ๆฐใไธ่ดใใพใใใงใใใ")
# ้ฝๆง่
ๆฐใฎๅ่จๅใจ่ฆ็ด ๅ็พคๅคใฎๅ่จใไธ่ดใใใ๏ผ
if data[1] != sum(i for i in data[3:]):
raise ValueError("OCR Failed. ้ฝๆง่
ๆฐใไธ่ดใใพใใใงใใใ")
return pattern, data
except Exception as e:
print(e)
return None
def to_csv(dt, row, remarks, dir):
p = pathlib.Path(dir, 'main_summary_recognized.csv')
with p.open(mode='w') as fw:
writer = csv.writer(fw)
writer.writerow(["ๆดๆฐๆฅๆ","ๆคๆปๅฎๆฝไบบๆฐ","้ฝๆงๆฃ่
ๆฐ","ๅ
ฅ้ข","่ปฝ็็ก็็ถ","ไธญ็ญ็","้็","ๅ
ฅ้ข่ชฟๆด","ๆฝ่จญๅ
ฅๆ","่ชๅฎ
็้ค","่ชฟๆด","้้ข","ๆญปไบก","ๅ
ฅ้ขไธญ","่ปฝ็ไธญ็ญ็","่ปข้ข","ๅ่"])
writer.writerow([dt] + row + ["", "", ""] + ["".join(remarks)])
if __name__ == "__main__":
url = "https://www.pref.aichi.jp/site/covid19-aichi/"
r = requests.get(url, headers=headers)
r.raise_for_status()
soup = BeautifulSoup(r.content, "html5lib")
src = soup.find("img", alt=re.compile("ๆคๆป้ฝๆง่
$")).get("src")
link = urljoin(url, src)
jpg_path = get_file(link, "./data")
# jpg_path = "./data/main_summary0826.jpg"
print("ๆดๆฐๆฅใๆฝๅบ")
hit_date_pattern, date = recognize_date_patterns([
(lambda path: date_pattern1.recognize(path)),
(lambda path: date_pattern2.recognize(path)),
])
if date is None:
raise ValueError("OCR Failed. ๆดๆฐๆฅใๆฝๅบใงใใพใใใงใใใ")
print("ๆดๆฐๆฅใๆฝๅบ -> Pattern" + str(hit_date_pattern) + "ใงๆๅ")
print("ๆฐๅคใใผใฟใๆฝๅบ")
hit_table_pattern, nums = recognize_table_patterns([
(lambda path: table_pattern1.recognize(path)),
(lambda path: table_pattern2.recognize(path)),
(lambda path: table_pattern3.recognize(path)),
])
if nums is None:
raise ValueError("OCR Failed. ่กจใใๆฐๅคใใผใฟใๆฝๅบใงใใพใใใงใใใ")
print("ๆฐๅคใใผใฟใๆฝๅบ -> Pattern" + str(hit_table_pattern) + "ใงๆๅ")
print("ๆณจ่จใใผใฟใๆฝๅบ")
hit_remarks_pattern, remarks = recognize_remarks_patterns([
(lambda path: remarks_pattern2.recognize(path)),
# (lambda path: remarks_pattern1.recognize(path)),
])
if remarks is None:
raise ValueError("OCR Failed. ๆณจ่จใใผใฟใๆฝๅบใงใใพใใใงใใใ")
print("ๆณจ่จใใผใฟใๆฝๅบ -> Pattern" + str(hit_remarks_pattern) + "ใงๆๅ")
print("ๆดๆฐๆฅๆ", date)
print("ๆฐๅค็พค", nums)
print("ๆณจ่จ", remarks)
to_csv(date, nums, remarks, "./data")
print("Wrote to main_summary_recognized.csv")
| code4nagoya/covid19-aichi-tools | scrape_main_summary.py | scrape_main_summary.py | py | 5,379 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pathlib.PurePath",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_n... |
7065938845 | """
Pลฏvodnรญ nรกvrh:
class Board
- grid
* reset
* draw
* score - z pohledu hrรกฤe
* score_wnd5
* wintie - globรกlnฤ, 0 = jedeme dรกl, 1/-1 win hrรกฤ, jinak = tie
* wintie_wnd5
"""
import pygame
import random
import math
from pyshqorky.players import *
class Board:
""" Tลรญda pro prรกci s hernรญ deskou. Jsou zde informace o rozmรญstฤnรญ kamenลฏ na desce, vyhodnocenรญ stavu desky a jejรญ vykreslovรกnรญ."""
#: Obsah prรกzdnรฉho pole na desce.
CELL_EMPTY = 0
#: Vรญtฤzstvรญ nebo remรญza? Nic, nejde urฤit.
WT_NONE = 0
#: Vรญtฤzstvรญ nebo remรญza? Jsem vรญtฤz.
WT_WINNER = 1
#: Vรญtฤzstvรญ nebo remรญza? Nejsem vรญtฤz.
WT_LOSER = 2
#: Vรญtฤzstvรญ nebo remรญza? Je to remรญza.
WT_TIE = 3
def __init__(self, rows: int, cols: int, width: int, height: int, x_offset: int, y_offset: int) -> None:
#: Poฤet ลรกdkลฏ.
self.rows = rows
#: Poฤet sloupcลฏ.
self.cols = cols
#: Velikost na ose x v px.
self.width = width
#: Velikost na ose y v px.
self.height = height
#: Velikost strany jednoho hracรญho pole.
self.sqsize = width // cols
#: Posun na ose x, kam se bude deska vykreslovat.
self.x_offset = x_offset
#: Posun na ose y, kam se bude deska vykreslovat.
self.y_offset = y_offset
#: Seznam seznamลฏ (2D pole), kde je umรญstฤnรญ kamenลฏ na desce.
self.grid = [[0 for col in range(self.cols)] for row in range(self.rows)]
#: Seznam seznamลฏ (2D pole), kde je info o hracรญch polรญch, kterรฉ pouลพijeme pro vyhodnocenรญ interakce s hrรกฤem.
self.tiles = [[pygame.Rect for col in range(self.cols)] for row in range(self.rows)]
def draw(self, screen, players: Players | None = None, mark_coords: list[tuple[int, int]] | None = None) -> None:
"""
Vykreslenรญ hernรญ plochy. Nejdลรญv nakreslรญme pole `pygame.Rect`. Kaลพdรฝ v poli o velikosti o jedna menลกรญ, neลพ velikost `pyshqorky.board.Board.sqsize`. To pro zobrazenรญ mลรญลพky.
Potom, pokud jsme dostali v parametru i seznam hrรกฤลฏ, nakreslรญme jejich kameny na kaลพdรฉ pole, kde se nachรกzejรญ.
Nakonec nakreslรญme zvรฝraznฤnรญ, pokud je mรกme.
"""
# vykreslenรญ hernรญ plochy
for r in range (self.rows):
for c in range(self.cols):
# Vykreslenรญ jednoho hernรญho pole
self.tiles[r][c] = pygame.draw.rect(screen, (0x40,0x40,0x40), #type: ignore
pygame.Rect(self.sqsize*r+self.x_offset, self.sqsize*c+self.y_offset, self.sqsize-1, self.sqsize-1))
# Pokud jsme dostali i seznam hrรกฤลฏ
if players is not None:
# projedem oba
for id, pl in players.items():
# a pokud je tenhle na tomto poli
if self.grid[r][c] == pl.id:
# tak ho nakreslรญme
pl.draw(screen, r, c, self.x_offset, self.y_offset, self.sqsize)
# nakonec vykreslenรญ zvรฝraznฤnรญ, pokud je pลรญtomno
if mark_coords is not None:
# pro kaลพdรฉ pole k zvรฝraznฤnรญ
for (r, c) in mark_coords:
# pokud mรกme hrรกฤe
if players is not None:
# projedem oba
for id, pl in players.items():
# a pokud je tenhle na tomto poli
if self.grid[r][c] == pl.id:
# tak ho obtรกhneme
pl.draw(screen, r, c, self.x_offset, self.y_offset, self.sqsize, True)
# jinak udฤlรกme pro zvรฝraznฤnรญ teฤku uprostลed
else:
pygame.draw.circle(screen, (255, 255, 0), (self.sqsize*r+self.x_offset+(self.sqsize/2),
self.sqsize*c+self.y_offset+(self.sqsize/2)),
self.sqsize*0.15)
def score_wnd5(self, window: list, player: Player) -> int:
"""
Ohodnocenรญ vรฝseku 1x5 z hracรญ desky z pohledu hrรกฤe (poฤรญtaฤovรฉho). Jestliลพe je tu prรกzdno nebo jsou tu oba protivnรญci, skรณre je nula.
Jestli je tu hrรกฤ sรกm, skรณre se pลiฤรญtรก a pokud pouze protihrรกฤ, skรณre se odeฤรญtรก.
Kolik se toho pลiฤte nebo odeฤte, zรกleลพรญ na รบrovni hry poฤรญtaฤe, jestli jsou to kameny hrรกฤe nebo protivnรญka a poฤtu kamenลฏ v tรฉto pฤtici.
Hodnota se vezme ze slovnรญku `pyshqorky.player.Player.AI_VALUES`
"""
score = 0
# jestliลพe je tu prรกzdno
if window.count(Board.CELL_EMPTY) == 5:
return 0
# jestliลพe v tรฉto pฤtici jsme
if window.count(player.id) != 0:
# a nejsme tu sami
if window.count(player.oponent_id) != 0:
return 0
# pokud jsme tu sami
else:
# tak pลiฤteme hodnotu odpovรญdajรญcรญ poฤtu naลกich znaฤek
score += Player.AI_VALUES[player.ai_level][Player.AI_SCORE_MY][window.count(player.id)]
else:
# je tu pouze protivnรญk, takลพe odeฤteme hodnotu odpovรญdajรญcรญ poฤtu jeho znaฤek
score -= Player.AI_VALUES[player.ai_level][Player.AI_SCORE_OPONENT][window.count(player.oponent_id)]
return score
def score_board(self, player: Player) -> int:
"""
Projedeme na hracรญ desce vลกechna pole 1x5. Pro kaลพdรฉ takovรฉ pole volรกme `pyshqorky.board.Board.score_wnd5`.
Nasฤรญtรกme vลกechna skรณre, kterรก se nรกm vrรกtila a vracรญme to jako ohodnocenรญ celรฉ desky.
Inspirace zde: http://blog.trixi.cz/2013/02/popis-piskvorkoveho-algoritmu/
"""
score = 0
#"""
# ohodnocenรญ ลรกdkลฏ
for r in range(self.rows):
row_array = [int(i) for i in list(self.grid[r][:])]
for c in range(self.cols-4):
window = row_array[c:c+5]
score += self.score_wnd5(window, player)
# ohodnocenรญ sloupcลฏ
for c in range(self.cols):
col_array = [i[:][c] for i in self.grid]
for r in range(self.rows-4):
window = col_array[r:r+5]
score += self.score_wnd5(window, player)
# ohodnocenรญ diagonรกl (\ i /)
for r in range(self.rows-4):
for c in range(self.cols-4):
# to je \
window = [self.grid[r+i][c+i] for i in range(5)]
score += self.score_wnd5(window, player)
# a tohle /
window = [self.grid[r+4-i][c+i] for i in range(5)]
score += self.score_wnd5(window, player)
"""
# optimalizovanรฝ algoritmus, jenom nefunguje stejnฤ
for r in range(2, self.rows-2):
for c in range(2, self.cols-2):
# ohodnotรญme ลรกdky
window = [self.grid[r-2+i][c] for i in range(5)]
# budeme hodnotit pouze pokud tu nenรญ prรกzdno
if window.count(Board.CELL_EMPTY != 5):
score += self.score_wnd5(window, player)
# sloupce
window = [self.grid[r][c-2+i] for i in range(5)]
# budeme hodnotit pouze pokud tu nenรญ prรกzdno
if window.count(Board.CELL_EMPTY != 5):
score += self.score_wnd5(window, player)
# diagonรกla \
window = [self.grid[r-2+i][c-2+i] for i in range(5)]
# budeme hodnotit pouze pokud tu nenรญ prรกzdno
if window.count(Board.CELL_EMPTY != 5):
score += self.score_wnd5(window, player)
# diagonรกla /
window = [self.grid[r+2-i][c-2+i] for i in range(5)]
# budeme hodnotit pouze pokud tu nenรญ prรกzdno
if window.count(Board.CELL_EMPTY != 5):
score += self.score_wnd5(window, player)
"""
return score
# vracรญme WT_NONE, kdyลพ je moลพnรฉ hrรกt dรกl
# WT_WINNER, pokud je vรญtฤzem player
# WT_LOSER, pokud je vรญtฤzem oponent
# WT_TIE, pokud v tรฉto pฤtici nejde dosรกhnout vรญtฤzstvรญ
def win_tie_wnd5(self, window: list[int], player: Player) -> int:
"""
Zde hodnotรญme z pohledu hrรกฤe vรฝsek z boardu o velikosti 1 x 5 na moลพnou vรฝhru, prohru nebo remรญzu.
Pokud je tam pouze player, vyhrรกl. Pokud je tam pouze oponent, player prohrรกl.
Pokud jsou tam oba, tak zde jiลพ nikdo pฤtku neudฤlรก. Pokud neplatรญ nic z toho, je moลพnรฉ hrรกt dรกl.
Vracรญ moลพnรฉ hodnoty WT_WINNER, pokud player zvรญtฤzil, WT_LOSER pokud prohrรกl, WT_TIE pokud zde jiลพ nenรญ moลพnรฉ vyhrรกt nebo WT_NONE, pokud je moลพnรฉ zde jeลกtฤ vyhrรกt.
"""
# jestliลพe mรกme pฤt
if window.count(player.id) == 5:
return self.WT_WINNER
# jestli mรก pฤt oponent
if window.count(player.oponent_id) == 5:
return self.WT_LOSER
# zde jiลพ nenรญ moลพnรฉ vyhrรกt
if window.count(player.id) != 0 and window.count(player.oponent_id) != 0:
return self.WT_TIE
# jinak je moลพnรฉ poลรกd hrรกt dรกl
return self.WT_NONE
def win_tie(self, player: Player) -> tuple[int, list[tuple[int, int]]]:
"""
Zde projedeme celรฝ board a z pohledu hrรกฤe zhodnotรญme, jestli jsme vyhrรกli nebo prohrรกli, jestli je remรญza nebo hrajeme dรกl.
Technicky je to provedeno tak, ลพe projedeme vลกechna okna 1 x 5, co jsou na desce a zhodnotรญme je ve vlastnรญ funkci win_tie_wnd5.
Podle toho odsud vracรญme hodnoty dรกl. Pro vรญtฤzstvรญ i kde bylo zvรญtฤzeno pro oznaฤenรญ danรฉ pฤtky.
Jako hlavnรญ integer vracรญme WT_WINNER, pokud player zvรญtฤzil, WT_LOSER pokud prohrรกl, WT_TIE pokud je remรญza nebo WT_NONE, pokud je moลพnรฉ hrรกt dรกl.
"""
wt5 = None
tie = self.WT_TIE
# ohodnocenรญ ลรกdkลฏ
for r in range(self.rows):
row_array = [int(i) for i in list(self.grid[r][:])]
for c in range(self.cols-4):
window = row_array[c:c+5]
wt5 = self.win_tie_wnd5(window, player)
if (wt5 in (self.WT_WINNER, self.WT_LOSER)):
return (wt5, [(r, c+i) for i in range(5)])
elif wt5 == self.WT_NONE:
tie = wt5
# ohodnocenรญ sloupcลฏ
for c in range(self.cols):
col_array = [i[:][c] for i in self.grid]
for r in range(self.rows-4):
window = col_array[r:r+5]
wt5 = self.win_tie_wnd5(window, player)
if (wt5 in (self.WT_WINNER, self.WT_LOSER)):
return (wt5, [(r+i,c) for i in range(5)])
elif wt5 == self.WT_NONE:
tie = wt5
# ohodnocenรญ diagonรกl (\ i /)
for r in range(self.rows-4):
for c in range(self.cols-4):
# to je \
window = [self.grid[r+i][c+i] for i in range(5)]
wt5 = self.win_tie_wnd5(window, player)
if (wt5 in (self.WT_WINNER, self.WT_LOSER)):
return (wt5, [(r+i, c+i) for i in range(5)])
elif wt5 == self.WT_NONE:
tie = wt5
# a tohle /
window = [self.grid[r+4-i][c+i] for i in range(5)]
wt5 = self.win_tie_wnd5(window, player)
if (wt5 in (self.WT_WINNER, self.WT_LOSER)):
return (wt5, [(r+4-i, c+i) for i in range(5)])
elif wt5 == self.WT_NONE:
tie = wt5
# nikdo jeลกtฤ nevyhrรกl, tak vracรญme, jestli byla remรญza nebo ne
return (tie, list())
def best_move(self, player: Player) -> tuple:
"""
Najdeme, kterรฝ tah hrรกฤe z moลพnรฝch bude ohodnocen nejvyลกลกรญm skรณre. Vลกechny s tรญmto skรณre si dรกme do seznamu a z nฤho jeden nรกhodnฤ vybereme.
"""
# moลพnรฉ tahy
avail_moves = []
# na zaฤรกtek je nejlepลกรญ skรณre dostateฤnฤ malรฉ, tลeba mรญnus nekoneฤno
best_score = -math.inf
# projedeme vลกechny pole na hracรญ desce
for r in range(self.rows):
for c in range(self.cols):
# pokud je prรกzdnรฉ
if self.grid[r][c] == Board.CELL_EMPTY:
# tak si ho pro teฤ obsadรญme
self.grid[r][c] = player.id
# a spoฤรญtรกme jeho skรณre
score = self.score_board(player)
# pole vrรกtรญme zase zpฤt na prรกzdnรฉ
self.grid[r][c] = Board.CELL_EMPTY
# pokud je skรณre vyลกลกi neลพ pลedchozรญ nejvyลกลกรญ
if (score > best_score):
# vynulujeme seznam s moลพnรฝmi pลedchozรญmi tahy
avail_moves = []
# a pลidรกme tam tento
avail_moves.append((r,c))
# nejlepลกรญ skรณre je tedy toto naลกe novรฉ
best_score = score
# jinak, pokud je zjiลกtฤnรฉ skรณre shodnรฉ s nejvyลกลกim
elif (score == best_score):
# tak pลidรกme tento tah mezi nejlepลกรญ
avail_moves.append((r,c))
# vrรกtรญme nรกhodnฤ vybranรฝ tah z tฤch nejlepลกรญch
return random.choice(avail_moves)
def make_move(self, player: Player, coord: tuple) -> None:
"""
Poloลพรญme znaฤku hrรกฤe `pyshqorky.player.Player` na pole o souลadnicรญch coord.
"""
self.grid[coord[0]][coord[1]] = player.id
def reset(self) -> None:
"""
Inicializujeme hracรญ desku - odebereme vลกechny znaฤky hrรกฤลฏ.
"""
self.grid = [[0 for col in range(self.cols)] for row in range(self.rows)]
| zdenekkhol/pyshqorky | pyshqorky/board.py | board.py | py | 14,134 | python | cs | code | 0 | github-code | 1 | [
{
"api_name": "pygame.Rect",
"line_number": 51,
"usage_type": "attribute"
},
{
"api_name": "pygame.draw.rect",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": "pygame.draw",
"line_number": 63,
"usage_type": "attribute"
},
{
"api_name": "pygame.Rect",
... |
26443195131 | import pymongo
import os
from datetime import datetime
import math
import re
import pymongo
import keys # .gitgnored file
from bson.objectid import ObjectId
from bson.son import SON
import atexit
import math
CONNECTION = None
PAGESIZE = 10
def date_now():
"""
Needed to keep the same date in python and mongo, as mongo rounds to millisecond
"""
d = datetime.utcnow()
return d.replace(microsecond=math.floor(d.microsecond/1000)*1000)
CONNECTION = None
def close_connection():
global CONNECTION
if CONNECTION is not None:
CONNECTION.close()
atexit.register(close_connection)
def get_connection():
global CONNECTION
if CONNECTION is not None:
return CONNECTION
else:
mongo_db_key = os.getenv("LOCAL_DB_KEY", None)
if mongo_db_key is None:
exit("LOCAL_DB_KEY env variable is not set.")
"Return mongodb connection"
CONNECTION = pymongo.MongoClient(mongo_db_key)
return CONNECTION
def check_run_name(name):
connection = get_connection()
db = connection.get_database()
# Fastest.
run = db.runs.find({"name": name}).limit(1).count(True)
return run is not 0
def get_run_list():
connection = get_connection()
db = connection['bifrost_upgrade_test']
# Fastest.
runs = list(db.runs.find( {},#{"type": "routine"}, #Leave in routine
{"name": 1,
"_id": 0,
"samples": 1}).sort([['metadata.created_at', pymongo.DESCENDING]]))
return runs
def get_group_list(run_name=None):
connection = get_connection()
db = connection.get_database()
if run_name is not None:
run = db.runs.find_one(
{"name": run_name},
{
"_id": 0,
"samples._id": 1
}
)
if run is None:
run_samples = []
else:
run_samples = run["samples"]
sample_ids = [s["_id"] for s in run_samples]
groups = list(db.samples.aggregate([
{
"$match": {
"_id": {"$in": sample_ids},
}
},
{
"$group": {
"_id": "$properties.sample_info.summary.group",
"count": {"$sum": 1}
}
}
]))
else:
groups = list(db.samples.aggregate([
{
"$group": {
"_id": "$properties.sample_info.summary.group",
"count": { "$sum":1 }
}
}
]))
return groups
def get_species_list(species_source, run_name=None):
connection = get_connection()
db = connection.get_database()
if species_source == "provided":
spe_field = "properties.sample_info.summary.provided_species"
else:
spe_field = "properties.species_detection.summary.detected_species"
if run_name is not None:
run = db.runs.find_one(
{"name": run_name},
{
"_id": 0,
"samples._id": 1
}
)
if run is None:
run_samples = []
else:
run_samples = run["samples"]
sample_ids = [s["_id"] for s in run_samples]
species = list(db.samples.aggregate([
{
"$match": {
"_id": {"$in": sample_ids}
}
},
{
"$group": {
"_id": "$" + spe_field,
"count": {"$sum": 1}
}
},
{
"$sort": {"_id": 1}
}
]))
else:
species = list(db.samples.aggregate([
{
"$group": {
"_id": "$" + spe_field,
"count": {"$sum": 1}
}
},
{
"$sort": {"_id": 1}
}
]))
return species
def filter_qc(qc_list):
if qc_list is None or len(qc_list) == 0:
return None
qc_query = []
for elem in qc_list:
if elem == "Not checked":
qc_query.append({"$and": [
{"properties.datafiles.summary.paired_reads": {"$exists": True}},
{"properties.stamper.summary.stamp.value": {"$exists": False}}
]})
elif elem == "core facility":
qc_query.append({"$or": [
{"properties.datafiles.summary.paired_reads": {"$exists": False}},
{"properties.stamper.summary.stamp.value": "core facility"}
]
})
else:
qc_query.append({"properties.stamper.summary.stamp.value": elem})
return {"$match": {"$and": qc_query}}
def filter(run_names=None,
species=None, species_source="species", group=None,
qc_list=None, samples=None, pagination=None,
sample_names=None,
projection=None):
if species_source == "provided":
spe_field = "properties.provided_species"
elif species_source == "detected":
spe_field = "properties.detected_species"
else:
spe_field = "properties.species"
connection = get_connection()
db = connection['bifrost_upgrade_test']
query = []
sample_set = set()
if sample_names is not None and len(sample_names) != 0:
sample_names_query = []
for s_n in sample_names:
if s_n.startswith("/") and s_n.endswith("/"):
sample_names_query.append(re.compile(s_n[1:-1]))
else:
sample_names_query.append(s_n)
query.append({"name": {"$in": sample_names_query}})
if samples is not None and len(samples) != 0:
sample_set = {ObjectId(id) for id in samples}
query.append({"_id": {"$in": list(sample_set)}})
if run_names is not None and len(run_names) != 0:
runs = list(db.runs.find(
{"name": {"$in": run_names}},
{
"_id": 0,
"samples._id": 1
}
))
if runs is None:
run_sample_set = set()
else:
run_sample_set = {s["_id"] for run in runs for s in run['samples']}
if len(sample_set):
inter = run_sample_set.intersect(sample_set)
query.append({"_id": {"$in": list(inter)}})
else:
query.append({"_id": {"$in": list(run_sample_set)}})
if species is not None and len(species) != 0:
if "Not classified" in species:
query.append({"$or":
[
{spe_field: None},
{spe_field: {"$in": species}},
{spe_field: {"$exists": False}}
]
})
else:
query.append({spe_field: {"$in": species}})
if group is not None and len(group) != 0:
if "Not defined" in group:
query.append({"$or":
[
{"properties.sample_info.summary.group": None},
{"properties.sample_info.summary.group": {"$in": group}},
{"properties.sample_info.summary.group": {"$exists": False}}
]
})
else:
query.append(
{"properties.sample_info.summary.group": {"$in": group}})
if pagination is not None:
p_limit = pagination['page_size']
p_skip = pagination['page_size'] * pagination['current_page']
else:
p_limit = 1000
p_skip = 0
skip_limit_steps = [
{"$skip": p_skip}, {"$limit": p_limit}
]
qc_query = filter_qc(qc_list)
if len(query) == 0:
if qc_query is None:
match_query = {}
else:
match_query = qc_query["$match"]
else:
if qc_query is None:
match_query = {"$and": query}
else:
match_query = {"$and": query + qc_query["$match"]["$and"]}
query_result = list(db.samples.find(
match_query, projection).sort([('name', pymongo.ASCENDING)]).skip(p_skip).limit(p_limit))
return query_result
def get_species_QC_values(ncbi_species):
connection = get_connection()
db = connection.get_database('bifrost_species')
species = db.species.find_one({"ncbi_species": ncbi_species}, {
"min_length": 1, "max_length": 1})
if species is not None:
return species
species = db.species.find_one({"organism": ncbi_species}, {
"min_length": 1, "max_length": 1})
if species is not None:
return species
species = db.species.find_one({"group": ncbi_species}, {
"min_length": 1, "max_length": 1})
if species is not None:
return species
species = db.species.find_one({"organism": "default"}, {
"min_length": 1, "max_length": 1})
return species
| ssi-dk/BeONE-old-version | bifrost/bifrost_mongo_interface.py | bifrost_mongo_interface.py | py | 8,965 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.utcnow",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "math.floor",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "atexit.register... |
18709087270 | # Dictionary and counter in Python to find winner of election
print("======== Counter + Sorted + Slicing ========")
from collections import Counter
def winner(input):
votes = Counter(input)
dic = {}
# output : {4: [], 2: [], 3: []}
for value in votes.values():
dic[value] = []
# output : {4: ['john', 'johnny'], 2: ['jackie'], 3: ['jamie']}
for key,value in votes.items():
dic[value].append(key)
# outout: [4, 3, 2]
# output : 4
maxvote = sorted(dic.keys(), reverse=True)[0]
# check if more than 1 candidates have same
# number of votes. If yes, then sort the list
# first and print first element
if len(dic[maxvote]) > 1:
print(sorted(dic[maxvote])[0])
else:
print(dic[maxvote][0])
input = ['john','johnny','jackie','johnny',
'john','jackie','jamie','jamie',
'john','johnny','jamie','johnny',
'john']
winner(input)
print('\n====== shorter method using counter + max + list comp =======')
from collections import Counter
votes = ['john','johnny','jackie','johnny',
'john','jackie','jamie','jamie',
'john','johnny','jamie','johnny',
'john']
# output: Counter({'john': 4, 'johnny': 4, 'jamie': 3, 'jackie': 2})
votes_count = Counter(votes)
# output : 4
max_vote = max(votes_count.values())
# output : ['john', 'johnny']
lst = [ k for k,v in votes_count.items() if v == max_vote ]
# output: John
print(sorted(lst)[0])
| dilipksahu/Python-Programming-Example | Dictionary Programs/findElectionWinner.py | findElectionWinner.py | py | 1,507 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "collections.Counter",
"line_number": 48,
"usage_type": "call"
}
] |
31980288440 | from django.urls import path
from . import views
app_name = 'pages'
urlpatterns = [
path('', views.index, name='index'),
path('fare/', views.fare, name='fare'),
path('about-us/', views.about, name='aboutus'),
path('contact-us/', views.contactus, name='contactus'),
path('complaints/', views.complaints, name='complaints'),
]
| saidulroney/nuist-pms | pages/urls.py | urls.py | py | 347 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
38841332585 | import sqlalchemy
from sqlalchemy.ext.declarative import declarative_base
#Crear engine
engine = sqlalchemy.create_engine("sqlite:///slangs2.db")
Base = declarative_base()
#Crear diccionario
class Diccionario(Base):
__tablename__ = "diccionario"
id = sqlalchemy.Column(sqlalchemy.Integer, primary_key=True)
palabra = sqlalchemy.Column(sqlalchemy.String(length=100))
significado = sqlalchemy.Column(sqlalchemy.String(length=100))
Base.metadata.create_all(engine)
#Crear y empezar la sesiรณn
Session = sqlalchemy.orm.sessionmaker()
Session.configure(bind=engine)
session = Session()
#Funciones
def addSlang(palabra, significado):
slang = Diccionario(palabra=palabra, significado=significado)
session.add(slang)
session.commit()
def editSlang(palabra, nuevo_significado):
session.query(Diccionario).filter(
Diccionario.palabra==palabra
).update({
Diccionario.palabra: palabra,
Diccionario.significado: nuevo_significado
})
session.commit()
def delSlang(palabra):
session.query(Diccionario).filter(Diccionario.palabra == palabra).delete()
session.commit()
def get_slangs():
return session.query(Diccionario).all()
def buscar_def(palabra):
return session.query(Diccionario).filter_by(palabra=palabra)
#Funciรณn principal de menรบ
def menu():
menu = """
1) Agregar nuevo slang
2) Editar slang
3) Eliminar palabra existente
4) Ver diccionario
5) Buscar definiciรณn
6) Salir
Selecciona una opciรณn: """
opt = ""
while opt != "6": #Mientras el usuario no escoja 6 para salir, se mostrarรก el menรบ
opt = input(menu)
if opt == "1": #Buscar si existe el slang ingresado y agregarlo si no
palabra = input("\nIngrese un slang: ")
posible_significado = buscar_def(palabra).count()
if posible_significado > 0:
print(f"El slang '{palabra}' ya existe")
else:
significado = input("Ingrese el significado: ")
addSlang(palabra, significado)
print("ยกSlang agregado con รฉxito!")
if opt == "2": #Buscar el slang que se quiere editar y cambiar su significado, si no existe, te da la opciรณn de agregarlo
palabra = input("\nยฟQuรฉ slang desea editar?: ")
posible_significado = buscar_def(palabra).count()
if posible_significado > 0:
nuevo_significado = input("Ingrese el nuevo significado: ")
editSlang(palabra, nuevo_significado)
print("ยกSlang actualizado exitosamente!")
else:
yn=input(f"El slang '{palabra}' no existe. ยฟDeseas agregarlo? Y/N: ")
if yn == 'Y' or yn == 'y':
significado = input("Ingrese el significado: ")
addSlang(palabra, significado)
print("ยกSlang agregado con รฉxito!")
else:
continue
if opt == "3": #Eliminar un slang
palabra = input("\nยฟQuรฉ slang desea eliminar?: ")
delSlang(palabra)
if opt == "4": #Mostrar todos los slangs y sus definiciones usando un ciclo for
palabras = get_slangs()
print("\n--------Diccionario de Slangs--------\n")
for palabra in palabras:
print(palabra.palabra + ": " + palabra.significado)
if opt == "5": #Mostrar el significado del slang escogido, si no existe, te da la opciรณn de agregarlo
palabra = input(
"\nยฟQuรฉ significado deseas saber?: ")
significado = buscar_def(palabra)
if significado.count() > 0:
print(f"El significado de '{palabra}' es:\n{significado[0].significado}")
else:
yn=input(f"El slang '{palabra}' no existe. ยฟDeseas agregarlo? Y/N: ")
if yn == 'Y' or yn == 'y':
significado = input("Ingrese el significado: ")
addSlang(palabra, significado)
print("ยกSlang agregado con รฉxito!")
else:
continue
#Inicializaciรณn del programa
if __name__ == '__main__':
menu() | mtamburrelli/slangsSQLAlchemy | Slangs2(SqlAlchemy)/slangsTarea2(SQLALCHEMY).py | slangsTarea2(SQLALCHEMY).py | py | 4,417 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "sqlalchemy.create_engine",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 14,
"usage_type": "call"
},
{
... |
2347070900 | from django.core.exceptions import ValidationError
from django.db import transaction
from rest_framework import exceptions, status, viewsets
from rest_framework.decorators import action
from rest_framework.response import Response
from ESSArch_Core.ip.models import Agent, InformationPackage
from ESSArch_Core.ip.permissions import CanLockSA
from ESSArch_Core.profiles.models import SubmissionAgreement, Profile, ProfileSA
from ESSArch_Core.profiles.serializers import ProfileSerializer, ProfileSASerializer, SubmissionAgreementSerializer
from ESSArch_Core.profiles.views import (
ProfileViewSet as ProfileViewSetCore,
SubmissionAgreementViewSet as SAViewSetCore,
)
class SubmissionAgreementViewSet(SAViewSetCore):
@action(detail=True, methods=['post'], url_path='include-type')
def include_type(self, request, pk=None):
sa = SubmissionAgreement.objects.get(pk=pk)
ptype = request.data["type"]
setattr(sa, "include_profile_%s" % ptype, True)
sa.save()
return Response({
'status': 'Including profile type %s in SA %s' % (ptype, sa)
})
@action(detail=True, methods=['post'], url_path='exclude-type')
def exclude_type(self, request, pk=None):
sa = SubmissionAgreement.objects.get(pk=pk)
ptype = request.data["type"]
setattr(sa, "include_profile_%s" % ptype, False)
sa.save()
return Response({
'status': 'Excluding profile type %s in SA %s' % (ptype, sa)
})
@action(detail=True, methods=['post'])
def save(self, request, pk=None):
if not request.user.has_perm('profiles.create_new_sa_generation'):
raise exceptions.PermissionDenied
sa = self.get_object()
try:
new_name = request.data["new_name"]
except KeyError:
new_name = ''
if not new_name:
return Response(
{'status': 'No name specified'},
status=status.HTTP_400_BAD_REQUEST
)
new_data = request.data.get("data", {})
changed_data = False
for field in sa.template:
if field.get('templateOptions', {}).get('required', False):
if not new_data.get(field['key'], None):
return Response(
{"status': 'missing required field '%s'" % field['key']},
status=status.HTTP_400_BAD_REQUEST
)
for k, v in new_data.items():
if v != getattr(sa, k):
changed_data = True
break
if not changed_data:
return Response({'status': 'no changes, not saving'}, status=status.HTTP_400_BAD_REQUEST)
new_sa = sa.copy(new_data=new_data, new_name=new_name,)
serializer = SubmissionAgreementSerializer(
new_sa, context={'request': request}
)
return Response(serializer.data)
def get_profile_types(self):
return 'sip', 'transfer_project', 'submit_description', 'preservation_metadata'
@transaction.atomic
@action(detail=True, methods=["post"])
def lock(self, request, pk=None):
sa = self.get_object()
ip_id = request.data.get("ip")
permission = CanLockSA()
try:
ip = InformationPackage.objects.get(pk=ip_id)
except InformationPackage.DoesNotExist:
raise exceptions.ParseError('Information Package with id %s does not exist')
if ip.submission_agreement_locked:
raise exceptions.ParseError('IP already has a locked SA')
if not permission.has_object_permission(request, self, ip):
self.permission_denied(request, message=getattr(permission, 'message', None))
if ip.submission_agreement != sa:
raise exceptions.ParseError('This SA is not connected to the selected IP')
ip.submission_agreement_locked = True
if sa.archivist_organization:
existing_agents_with_notes = Agent.objects.all().with_notes([])
ao_agent, _ = Agent.objects.get_or_create(
role='ARCHIVIST', type='ORGANIZATION',
name=sa.archivist_organization, pk__in=existing_agents_with_notes
)
ip.agents.add(ao_agent)
ip.save()
ip.create_profile_rels(self.get_profile_types(), request.user)
return Response({'status': 'Locked submission agreement'})
class ProfileSAViewSet(viewsets.ModelViewSet):
queryset = ProfileSA.objects.all()
serializer_class = ProfileSASerializer
class ProfileViewSet(ProfileViewSetCore):
@action(detail=True, methods=['post'])
def save(self, request, pk=None):
profile = Profile.objects.get(pk=pk)
new_data = request.data.get("specification_data", {})
new_structure = request.data.get("structure", {})
changed_data = (profile.specification_data.keys().sort() == new_data.keys().sort() and
profile.specification_data != new_data)
changed_structure = profile.structure != new_structure
if (changed_data or changed_structure):
try:
new_profile = profile.copy(
specification_data=new_data,
new_name=request.data["new_name"],
structure=new_structure,
)
except ValidationError as e:
raise exceptions.ParseError(e)
serializer = ProfileSerializer(
new_profile, context={'request': request}
)
return Response(serializer.data)
return Response({'status': 'no changes, not saving'}, status=status.HTTP_400_BAD_REQUEST)
| ESSolutions/ESSArch_Tools_Producer | ESSArch_TP/profiles/views.py | views.py | py | 5,722 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "ESSArch_Core.profiles.views.SubmissionAgreementViewSet",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "ESSArch_Core.profiles.models.SubmissionAgreement.objects.get",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "ESSArch_Core.profiles.models.... |
26813527429 | import os
import numpy as np
import pandas as pd
from tqdm import *
def label2csv(input_file, output_file, labels_file, train_dir):
with open(labels_file, 'r', encoding='utf-8') as f:
labels = f.readlines()
labels = [a.strip() for a in labels]
n_labels = len(labels)
with open(input_file, 'r', encoding='utf-8') as f:
samples = f.readlines()
data = []
for sample in samples:
sample_items = sample.strip().split('\t')
sample_labels = sample_items[3:]
label = [0] * n_labels
for sample_label in sample_labels:
if sample_label in labels:
label[labels.index(sample_label)] = 1
if sum(label) > 0:
sample_prefix = sample_items[:3]
sample_prefix.insert(1, train_dir)
sample_items = sample_prefix + label
data.append(sample_items)
columns = ['file', 'ecg_dir', 'age', 'sex'] + labels
df = pd.DataFrame(data=data, columns=columns)
df.to_csv(output_file, index=False)
return
def merge_round1_train_testA(train_csv, testA_csv, output_csv):
train_df = pd.read_csv(train_csv)
testA_df = pd.read_csv(testA_csv)
columns = train_df.columns.tolist()
train_list = train_df.values.tolist()
testA_list = testA_df.values.tolist()
train_files = [tl[0] for tl in train_list]
for testA_item in testA_list:
if testA_item[0] not in train_files:
train_list.append(testA_item)
train_testA_df = pd.DataFrame(data=train_list, columns=columns)
train_testA_df.to_csv(output_csv, index=False)
return
def remove_round1_duplicates(train_testA_csv, output_csv):
def load_ecg(ecg_path):
with open(ecg_path, 'r') as f:
ecg = f.readlines()
return ecg
train_testA_df = pd.read_csv(train_testA_csv)
columns = train_testA_df.columns.tolist()
labels = train_testA_df.values.tolist()
IDs = [label[0] for label in labels]
print('Loading ECG ...')
ecgs = []
for label in tqdm(labels, ncols=75):
ecgs.append(load_ecg(os.path.join(label[1], label[0])))
print('Removing duplicates ...')
duplicates, count = [], 0
for i in tqdm(range(len(IDs)), ncols=75):
if IDs[i] in duplicates:
continue
ecg1 = ecgs[i]
for j in range(i + 1, len(IDs)):
ecg2 = ecgs[j]
if ecg1 == ecg2:
if IDs[j] not in duplicates:
count += 1
duplicates.append(IDs[j])
rm_dup_labels = []
for label in labels:
if label[0] not in duplicates:
rm_dup_labels.append(label)
rm_dup_df = pd.DataFrame(data=rm_dup_labels, columns=columns)
rm_dup_df.to_csv(output_csv, index=False)
return
def generate_sample_weights(round1_csv, round2_csv, round1_output_csv, round2_output_csv):
round1 = pd.read_csv(round1_csv)
round2 = pd.read_csv(round2_csv)
round1_labels = round1.iloc[:, 4:]
round2_labels = round2.iloc[:, 4:]
round1_freq = round1_labels.sum() / len(round1_labels)
round2_freq = round2_labels.sum() / len(round2_labels)
round2_vs_round1 = round2_freq / round1_freq
round1_labels_weights = round1_labels * round2_vs_round1
round1_sample_weights = []
for i, row in round1_labels_weights.iterrows():
row_list = row.tolist()
row_list = [item for item in row_list if item != 0.0]
sample_weight = np.prod(row_list)
round1_sample_weights.append(sample_weight)
round1_sample_weights = np.array(round1_sample_weights)
round1_sample_weights[round1_sample_weights > 1] = 1
round1_sample_weights[round1_sample_weights < 0.3] = 0
round1.insert(1, 'sample_weight', round1_sample_weights)
drop_idxs = round1.loc[round1['sample_weight'] == 0.0].index.tolist()
round1.drop(index=drop_idxs, inplace=True)
round2_sample_weights = [1] * len(round2)
round2.insert(1, 'sample_weight', round2_sample_weights)
round1.to_csv(round1_output_csv, index=False)
round2.to_csv(round2_output_csv, index=False)
return
def main(args):
print('=' * 100)
print('Preprocessing on train set')
print('-' * 100)
round1_testA_dir = os.path.join(args.input_dir, 'hf_round1_testA')
round1_train_dir = os.path.join(args.input_dir, 'hf_round1_train')
round2_train_dir = os.path.join(args.input_dir, 'hf_round2_train')
round1_testA_txt = os.path.join(args.input_dir, 'hefei_round1_ansA_20191008.txt')
round1_train_txt = os.path.join(args.input_dir, 'hf_round1_label.txt')
round2_train_txt = os.path.join(args.input_dir, 'hf_round2_train.txt')
# round1_arrythmia_txt = os.path.join(args.input_dir, 'hf_round1_arrythmia.txt')
round2_arrythmia_txt = os.path.join(args.input_dir, 'hf_round2_arrythmia.txt')
# -1- Convert labels of dataset to csv
print('-1- Convert labels of dataset to csv')
round1_testA_csv = os.path.join(args.output_dir, 'round1_testA.csv')
label2csv(round1_testA_txt, round1_testA_csv, round2_arrythmia_txt, round1_testA_dir)
round1_train_csv = os.path.join(args.output_dir, 'round1_train.csv')
label2csv(round1_train_txt, round1_train_csv, round2_arrythmia_txt, round1_train_dir)
round2_train_csv = os.path.join(args.output_dir, 'round2_train.csv')
label2csv(round2_train_txt, round2_train_csv, round2_arrythmia_txt, round2_train_dir)
# -2- Merge train and testA of Round1
print('-2- Merge train and testA of Round1')
round1_merge_csv = os.path.join(args.output_dir, 'round1_merge.csv')
merge_round1_train_testA(round1_train_csv, round1_testA_csv, round1_merge_csv)
# -3- Remove duplicates in train and testA
print('-3- Remove duplicates in train and testA of Round1')
round1_merge_noDup_csv = os.path.join(args.output_dir, 'round1_merge_noDup.csv')
remove_round1_duplicates(round1_merge_csv, round1_merge_noDup_csv)
# -4- Generate sample weights for sampling
print('-4- Generate sample weights for sampling')
round1_sample_weights_csv = os.path.join(args.output_dir, 'round1_merge_noDup_weighted.csv')
round2_sample_weights_csv = os.path.join(args.output_dir, 'round2_train_weighted.csv')
generate_sample_weights(round1_merge_noDup_csv, round2_train_csv,
round1_sample_weights_csv, round2_sample_weights_csv)
print('=' * 100, '\n')
return
if __name__ == '__main__':
import warnings
import argparse
warnings.filterwarnings('ignore')
parser = argparse.ArgumentParser(
description='HFECG Competition -Round 2- Preprocessing Pipeline'
)
parser.add_argument('--input-dir', '-i', type=str,
action='store', dest='input_dir',
help='Directory of input data')
parser.add_argument('--output-dir', '-o', type=str,
action='store', dest='output_dir',
help='Directory to save preprocessed data')
args = parser.parse_args()
main(args)
| gen0924/ECG-HEFEI | round2/code/prep/ecg_prep.py | ecg_prep.py | py | 7,039 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "pandas.DataFrame",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
... |
15948730497 | from datetime import datetime
from django.contrib import messages
from django.contrib.auth import get_user_model
from django.db.models import F, Subquery, Count, Q
from django.http.response import JsonResponse
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib.auth.decorators import login_required
from django.views.decorators.http import require_POST
from game.models import (
Challenge,
ChallengeFlag,
Game,
UserChallengeRecord,
UserParticipateGame,
)
from core.models import ScoreHistory, User
def populate_game_challenges(game, user_id):
challenges = tuple(
Challenge.objects.filter(game=game)
.prefetch_related("flags")
.annotate(
docker_image=F("docker__docker"),
url=F("docker__url"),
)
)
# find all cleared flags
user_records = UserChallengeRecord.objects.filter(
challenge__in=challenges, participated_user__user_id=user_id
)
flags = []
count_answered = 0
for c in challenges:
for flag in c.flags.all():
# find if user already answer the flag
flag.status = "Not Solved " + "\u2715"
flag.answered = False
for record in user_records:
if record.challenge_flag_id == flag.id:
flag.status = "Solved " + "\u2713"
flag.answered = True
flag.points_gained = record.points_gained
count_answered += 1
break
flags.append(flag)
return {
"game": game,
"challenges": challenges,
"flags": flags,
"count_answered": count_answered,
}
def update_score_process(game, flag, user_id, username):
already_answered = UserChallengeRecord.objects.filter(
participated_user__user_id=user_id,
participated_user__game=game,
challenge_flag=flag,
).exists()
if already_answered:
return {"message": "เธเธญเธ flag เธเธตเนเนเธเนเธฅเนเธง", "correct": False}
else:
points_gained = flag.point
try:
if game.period:
remaingin_time = game.period.get_remaining_time_percentage()
points_gained = round(points_gained * remaingin_time)
except Game.period.RelatedObjectDoesNotExist:
pass
participation = UserParticipateGame.objects.filter(user=user_id, game=game)
user_challenges_record = UserChallengeRecord(
participated_user_id=Subquery(participation.values("id")),
challenge=flag.challenge,
challenge_flag=flag,
points_gained=points_gained,
)
user_challenges_record.save()
# add score
participation.update(game_score=F("game_score") + points_gained)
ScoreHistory.objects.create(
gained=points_gained,
type="challenge",
object_id=flag.id,
group_id=game.id,
user_id=user_id,
)
return {
"message": "เธเธนเธเธเนเธญเธ",
"correct": True,
"points_gained": points_gained,
"answered_at": user_challenges_record.answered_at,
"flagid": user_challenges_record.challenge_flag_id,
}
def get_top10_score(game_id):
top10 = UserParticipateGame.objects.filter(game_id=game_id).values_list("id")[:10]
unique_user = (
UserParticipateGame.objects.filter(game_id=game_id)
.extra(select={"points_gained": 0})
.annotate(answered_at=F("participate_at"), username=F("user__username"))
)
score = (
UserChallengeRecord.objects.filter(participated_user_id__in=top10)
.values(
"points_gained",
"answered_at",
username=F("participated_user__user__username"),
)
.union(unique_user)
.order_by("answered_at")
)
return tuple(score)
def index(request):
now = datetime.now()
games = Game.objects.select_related("period").filter(is_archive=False)
context = {"games": games}
return render(request, "game/index.html", context)
@login_required
def game_view(request, game_slug):
game = get_object_or_404(Game.objects.select_related("period"), slug=game_slug)
try:
remaining_time = game.period.get_remaining_time_percentage()
if remaining_time > 1:
messages.warning(request, "เธขเธฑเธเนเธกเนเธเธถเธเนเธงเธฅเธฒเนเธฃเธดเนเธกเนเธเธก")
return redirect("game_index")
game.participants.add(request.user)
except Game.period.RelatedObjectDoesNotExist:
# no periods game always ongoning
game.period = None
game.participants.add(request.user)
context = populate_game_challenges(game, request.user.id)
return render(request, "game/game.html", context)
@login_required
@require_POST
def enter_challenge_flag(request, game_id):
game = get_object_or_404(Game.objects.select_related("period"), id=game_id)
try:
# check if game has period and started yet...
if not game.period.is_game_start():
return JsonResponse({"message": "เนเธเธกเธขเธฑเธเนเธกเนเนเธฃเธดเนเธก", "correct": False})
except Game.period.RelatedObjectDoesNotExist:
# no periods game always ongoning
game.period = None
try:
right_flag = ChallengeFlag.objects.select_related("challenge").get(
flag__iexact=request.POST.get("flag", "").strip(),
challenge__game_id=game_id,
)
result = update_score_process(
game, right_flag, request.user.id, request.user.username
)
return JsonResponse(result)
except ChallengeFlag.DoesNotExist:
return JsonResponse({"message": "เธเธดเธ", "correct": False})
@login_required
def get_current_top10_score(request, game_id):
scores = get_top10_score(game_id)
return JsonResponse({"data": scores}) | NirushaaaR/CTF-platform-final-project | src/game/views.py | views.py | py | 5,996 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "game.models.Challenge.objects.filter",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "game.models.Challenge.objects",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "game.models.Challenge",
"line_number": 23,
"usage_type": "name"
... |
16572439195 | import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import seaborn as sns
sns.set(style="whitegrid")
sns.set_context("paper", font_scale=1.5)
# read entire results
data = pd.read_csv("../CSV/jmh-result.csv")
# filter Benchmarks
data = data.loc[data['Benchmark'].str.contains("OneShot")]
# names are long like absda.asddsa.casdas.yield -> filter to yield
data['Benchmark'] = data['Benchmark'].apply(lambda x: x.split(".")[-1])
# round relevant cols to ints
cols = ['Param: paramCount', 'Param: stackDepth', 'Score']
data[cols] = data[cols].applymap(np.int64)
g = sns.catplot(x="Param: paramCount", y="Score", hue="Benchmark", data=data,
hue_order=["noYield", "yield", "yieldThenContinue", "yieldAfterEachCall", "yieldBeforeEachCall",
"yieldBeforeAndAfterEachCall"],
col="Param: stackDepth", col_wrap=4, kind="bar", sharey=False, legend=False)
g.despine(left=True)
g.set_ylabels("Time in nanoseconds")
g.set_xlabels("paramCount")
g.set_titles("stackDepth: {col_name}")
plt.legend(loc='center left', bbox_to_anchor=(1.01, 0.5), frameon=False)
# plt.show()
plot_folder = "../Plots/"
plt.savefig(plot_folder + "OneShot-stackDepths-as-columns.png", bbox_inches='tight')
| huti26/project-loom-analysis | Plots/Seaborn/JMH/Scripts/OneShot-stackDepths-as-columns.py | OneShot-stackDepths-as-columns.py | py | 1,252 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "seaborn.set",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "seaborn.set_context",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.int64",
"line... |
28096738039 | import requests
from text2speech.modules import RemoteTTS, TTSValidator
class MaryTTS(RemoteTTS):
PARAMS = {
'INPUT_TYPE': 'TEXT',
'AUDIO': 'WAVE_FILE',
'OUTPUT_TYPE': 'AUDIO'
}
def __init__(self, config=None):
config = config or {"lang": "en-us",
"url": "http://mary.dfki.de:59125/",
"voice": 'cmu-bdl-hsmm'
}
super(MaryTTS, self).__init__(config, api_path='/process',
validator=MaryTTSValidator(self))
if self.lang.lower() in ["en-uk", "en-gb"]:
self.lang = "en_GB"
elif self.lang.lower().startswith("en"):
self.lang = "en_US"
def build_request_params(self, sentence):
params = self.PARAMS.copy()
params['LOCALE'] = self.lang
params['VOICE'] = self.voice
params['INPUT_TEXT'] = sentence.encode('utf-8')
return params
def describe_voices(self):
voices = {}
locales = requests.get(self.url + "/locales").text.split()
for l in locales:
voices[l] = []
voice_data = requests.get(self.url + "/voices").text.split("\n")
for v in voice_data:
if not v.strip():
continue
voice, lang_code, gender = v.split()[:3]
voices[lang_code] += [voice]
return voices
class MaryTTSValidator(TTSValidator):
def __init__(self, tts):
super(MaryTTSValidator, self).__init__(tts)
def validate_connection(self):
try:
resp = requests.get(self.tts.url + "/version", verify=False)
if not resp.text.startswith("Mary TTS server"):
raise Exception('Invalid MaryTTS server.')
except:
raise Exception(
'MaryTTS server could not be verified. Check your connection '
'to the server: ' + self.tts.url)
def get_tts_class(self):
return MaryTTS
| HelloChatterbox/text2speech | text2speech/modules/mary_tts.py | mary_tts.py | py | 2,017 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "text2speech.modules.RemoteTTS",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "text2speech.mo... |
31551591291 | # This code is written to extract electronic band structure data from V.A.S.P. calculation results.
import codecs
import re
import linecache # ่ฟไธชๆจกๅ้ๅธธ้ๅๆ นๆฎ็ปๅบ็่กๅท๏ผ ไปๆๆฌๆไปถไธญ่ฏปๅๆๅฎ่ก็ๆฐๆฎ
import numpy as np
class vasp:
def __init__(self):
self.name = vasp
##############################################################################################################
# ไธไบ้็จๅฝๆฐ
# ่ฟไธชๅฝๆฐๅฉ็จlinecacheๆจกๅ๏ผๅฏไปฅไปๆฐๆฎๆไปถไธญ่ฏปๅบๆๅฎ่ก็ไฟกๆฏ๏ผๅนถไปฅๅญ็ฌฆไธฒๅฝขๅผ่ฟๅ
# ๅบๆณจๆ๏ผ่ฟไธชๅฝๆฐ็่กๆฐไป1ๅผๅง๏ผๅณline_index=5ๆ่ฆ่ฏปๆไปถไธญ็็ฌฌไบ่ก
def GrepLineContent(self,file,line_index):
return linecache.getline(file,line_index).strip()
# ๆญคๅฝๆฐๅฏๅฐ่ดน็ฑณ้ข่ฐๆดไธบ้ถ๏ผ้็จไบenergyไธบๅ่กจ๏ผไธ็ปดๆฐๆฎ๏ผไบ็ปดๆฐ็ปไปฅๅ็ฉ้ตๅฝขๅผ็ๆฐๆฎ็ๆ
ๅต
# ่ฅ่พๅ
ฅ็energyๆฏไธ็ปดๆฐ็ปๆๅ่กจ๏ผๅฏนๅบไบ่ฝ้ๅผๆฏไธ็ปดๅบๅ็ๆ
ๅต๏ผๅบ็จไบ่ฐๆดๆๅฏๅบฆDOS็่ชๅ้
# ่ฅ่พๅ
ฅ็energyๆฏๅตๅฅๅ่กจ๏ผไบ็ปดๆฐ็ปๆ็ฉ้ต๏ผๅฏนๅบไบ่ฝ้ๆฏไบ็ปด่ฝ้้ข็ๆ
ๅต๏ผๅบ็จไบ่ฐๆด่ฝๅธฆๅพbands็้ซๅบฆ
def ShiftFermiSurface(self, energy, fermi_energy):
energy_array = np.array(energy) # ๅฐ่พๅ
ฅ่ฝฌๆขไธบๆฐ็ป๏ผ็กฎไฟไธไธๆญฅ่ฎก็ฎไธญ็่พๅ
ฅๆฏๆฐ็ป๏ผไธ็ปดๆไบ็ปด๏ผๅฝขๅผ็ๆฐๆฎ
return energy_array-fermi_energy
##############################################################################################################
# ๆๅฏๅบฆ๏ผDOS๏ผๆๅๆจกๅ
# This function is designed to read out DOS data from DOSCAR
def ReadDOSCAR(self, DOSCAR):
file = codecs.open(DOSCAR, 'rb', 'utf-8', 'ignore') # Open file, using codecs to uniform coding type
line = file.readline()
lindex = 0 # line index
data = [] # DOSๆฐๆฎ
while line:
if lindex <= 4:
pass # The first five lines not terribly useful
else:
value = line.split() # ไปฅ็ฉบๅญ็ฌฆ๏ผ็ฉบๆ ผ๏ผๆข่ก'\n'๏ผๅถ่กจ็ฌฆ'\t'็ญ๏ผไธบๅ้็ฌฆๅฏนๅญ็ฌฆไธฒๅๅ็ ๏ผไปไน้ฝไธๅกซ้ป่ฎคไธบ็ฉบๅญ็ฌฆ๏ผ
value = list(map(float, value))
data.append(value)
line = file.readline()
lindex += 1
file.close()
npoints = int(data[0][2]) # number of gridpoints on which the DOS is evaluated
separated_data = [] # ๅ้ๅ็ๆฐๆฎๆป้
nrows = npoints + 1 # number of line in one data subset (ไธไธชๆฐๆฎๅญ้็่กๆฐ)
nsubset = int(len(data) / nrows) # number of data subsets (ๆฐๆฎๅญ้็ไธชๆฐ)
for i in range(nsubset):
data_subset = [] # ๆฐๆฎๅญ้๏ผๆฏๆฌกๅพช็ฏๅผๅง้ๆฐๅฎไนๆ็ฉบๅ่กจ
for j in range(nrows):
data_subset.append(data[i * nrows + j]) # iๆ ๅฎไบๆฐๆฎๅญ้็ๅบๅท๏ผjๆ ๅฎไบๅจ่ฟไธชๅญ้ไธญๆฐๆฎ่ก็ๅบๅท
separated_data.append(data_subset) # ๅฐๅญ้่กฅๅ
ๅฐๅ้ๅ็ๆฐๆฎๆป้ๅฝไธญ
return separated_data
# ๅฉ็จReadDOSCARๅฝๆฐๆด็ๅบๆๅฏๅบฆ(DOS)
def GetData(self, DOSCAR, spin_polarized='False'):
data = self.ReadDOSCAR(DOSCAR)[0] # ้ๆๅฝฑ็ๆๅฏๅบฆๅจ็ฌฌไธไธชๅญ้ๅฝไธญ
npoints = int(data[0][2]) # number of gridpoints on which the DOS is evaluated
Efermi = float(data[0][3]) # The Fermi energy
if spin_polarized == 'False':
key = ['energy', 'DOS', 'integrated DOS']
else:
key = ['energy', 'DOS-spin up', 'DOS-spin down', 'integrated DOS-spin up', 'intergrated DOS-spin down']
DOS = dict.fromkeys(key) # ๆ นๆฎๅ่กจkeyไธญ็้ฎ็ๆDOSๅญๅ
ธ๏ผๅฏนๅบ้ฎๅผไธบNone
for n in key:
DOS[n] = [] # ๅฐๆไปฅๅ่กจkeyไธญๅฏนไบ็้ฎๅผๆนไธบ็ฉบๅ่กจ๏ผ็จไบๅญๆพๆฐๆฎ
DOS.update({'number': npoints, 'Efermi': Efermi}) # ๅฐ่ฎก็ฎ็่ฝ้็น็ไธชๆฐ่ท่ดน็ฑณ่ฝๆดๆฐๅฐDOSๅญๅ
ธๅฝไธญ
for i in range(len(data)):
if i == 0: # The first line is parameters (็ฌฌไธ่ก้ฝๆฏๅๆฐ)
pass
else:
for j in key:
DOS[j].append(data[i][key.index(j)]) # key.index(j) - ๅฉ็จjๅจๅ่กจkeyไธญ็ไฝ็ฝฎๆฅๅ้
ๅฏนๅบๆฐๆฎๅฐๅญๅ
ธDOSไธญ
return DOS
# ๅฉ็จReadDOSCARๅฝๆฐๆด็ๅบๆๅฝฑๆๅฏๅบฆ(projected DOS)
def GetProjectedData(self, DOSCAR, **kwargs):
data_total = self.ReadDOSCAR(DOSCAR)
natom = len(data_total) - 1 # ๅไธชๅๅญ็ๆๅฏๅบฆไผๅๅพไธไธชๅญ้๏ผๆปDOSไผๅๅพไธไธชๅญ้๏ผๆไปฅๅญ้ๆปๆฐๅ1ๅณไธบๅๅญๆปๆฐ
spin = kwargs['spin_polarized'] if 'spin_polarized' in kwargs else 'False'
lm_decomposed = kwargs['lm_decomposed'] if 'lm_decomposed' in kwargs else 'False'
# decomposition of azimuthal quantum number (่ง้ๅญๆฐ) l and magnetic quantum number (็ฃ้ๅญๆฐ) m
if spin == 'False':
if lm_decomposed == 'False':
key = ['energy', 's', 'p', 'd'] # s, p, d indicate the atomic orbital
else:
key = ['energy', 's', 'p_{y}', 'p_{z}', 'p_{x}', 'd_{xy}', 'd_{yz}', 'd_{z^2}', 'd_{xz}',
'd_{x^2-y^2}']
else:
key = ['energy', 's-spin up', 's-spin down', 'p-spin up', 'p-spin down', 'd-spin up', 'd-spin down']
atom_list = kwargs['atom_list'] if 'atom_list' in kwargs else ['atom'+str(n+1) for n in range(natom)]
# ๅฆๆ ๆๅฎ๏ผๅๆ นๆฎๅๅญไธชๆฐ็ๆๅๅญๅ็งฐ
DOS_projected = dict.fromkeys(atom_list) # ๆ นๆฎatom_listๅๅปบไธไธชๅญๅ
ธ๏ผๆฏไธชatomๅฏนๅบๅพ้ฎๅผ้ฝๆฏไธไธช็ฉบๅญๅ
ธ
for i in atom_list:
DOS_projected[i] = dict.fromkeys(key)
for j in key:
DOS_projected[i][j] = [] # ๅฐๅฏนๅบๅๅญไธญ็ๅฏนๅบ้ฎ็ๅผๆนไธบ็ฉบๅ่กจ๏ผ็จไบๅญๆพๆฐๆฎ
nrow_subset = len(data_total[0]) # ไธไธชๆฐๆฎๅญ้็่กๆฐ
for i in atom_list:
subset_index = atom_list.index(i) + 1 # ็ฑไบๆปๆฐๆฎ็็ฌฌไธไธชๅญ้ๆฏๆปๆๅฏๅบฆ๏ผๆไปฅatom_list.index(i)่ฆๅ 1๏ผๅณไป็ฌฌไบไธชๅญ้่ฏป่ตท
data_subset = data_total[subset_index]
for j in range(nrow_subset): # jๆ ็คบ็่ฏป่ฟไธชๆฐๆฎๅญ้็ๅ
ทไฝ่กๆฐ
if j == 0: # The first line is parameters (็ฌฌไธ่ก้ฝๆฏๅๆฐ)
pass
else:
for k in key:
key_index = key.index(k) # key.index(k) - ๅฉ็จkๅจๅ่กจkeyไธญ็ไฝ็ฝฎๆฅๅ้
ๅฏนๅบๆฐๆฎๅฐๅญๅ
ธไธญ็ๅ
ทไฝไฝ็ฝฎ
DOS_projected[i][k].append(data_subset[j][key_index])
return DOS_projected
##############################################################################################################
# ่ฝๅธฆๆๅๆจกๅ
# This function is designed to extract electronic bands data from file EIGENVAL.
def GetEbands(self,EIGENVAL):
pattern_int = re.compile(r'-?\d+') # ๅน้
ๆดๆฐ๏ผ็จไบๆๅๅๆฐ
# pattern_float = re.compile(r'-?\d+\.\d+?[Ee]?-?\d+') # ๅน้
ๅฏ่ฝๆ็งๅญฆ่ฎกๆฐๆณ็ๆตฎ็นๆฐ็ๆญฃๅ่กจ่พพๅผ, ็จไบๆๅๆฐๆฎ
# ๆๅ่ฝๅธฆ่ฎก็ฎๆถ็ไธไบๅ
จๅฑๅๆฐ
parameter_string = self.GrepLineContent(EIGENVAL,6)
parameter = pattern_int.findall(parameter_string) # ๅฉ็จๆญฃๅ่กจ่พพๅผๆๅๅบ่ฟไธ่กๆๆ็ๆดๆฐ
num_valence_electrons, num_kpoints, num_bands = parameter
# ่ฟ่ก็ฌฌไธไธชๆฐไธบไปท็ตๅญๆปๆฐ๏ผ็ฌฌไบไธชไธบk็น่ทฏๅพๆป็นๆฐ๏ผ็ฌฌไธไธชไธบ่ฝๅธฆๆปๆฐ
num_valence_electrons = int(num_valence_electrons) # ไปๆไปถไธญ่ฏปๅๅฎ็ๆฐๆฎๅฝขๅผไธบๅญ็ฌฆไธฒ๏ผ้่ฆ่ฝฌๆขไธบๆดๅๆ่ฝ่ฟ่กๅ็ปญๅค็
num_kpoints = int(num_kpoints)
num_bands = int(num_bands)
# ้่ฟๅพช็ฏ่ฏปๅEIGENVALไธญ็ๆฐๆฎ
file = codecs.open(EIGENVAL,'rb','utf-8','ignore') # Open file, using codecs to uniform coding type
line = file.readline()
line_index = 1 # ่ฏปๅๆไปถๆถ๏ผไธบไบๆนไพฟ๏ผๆไปฌ่ฎพๅฎไปไธๅผๅง๏ผ้ฃไนline_index=1ๆๆไปถ็็ฌฌไธ่ก
raw_data = [] # ๅญๆพๅๅงๆฐๆฎ็ๅ่กจ
while line:
if line_index >= 7: # ไธบไบๆนไพฟๅ็ปญ็็ผ็จ่ทๆฐๆฎๅค็๏ผๆไปฌไปEIGENVAL็็ฌฌ7่กๅผๅง่ฏป
value = line.split() # ไปฅ็ฉบๅญ็ฌฆ๏ผ็ฉบๆ ผ๏ผๆข่ก'\n'๏ผๅถ่กจ็ฌฆ'\t'็ญ๏ผไธบๅ้็ฌฆๅฏนๅญ็ฌฆไธฒๅๅ็ ๏ผไปไน้ฝไธๅกซ้ป่ฎคไธบ็ฉบๅญ็ฌฆ๏ผ
value = list(map(float,value))
raw_data.append(value)
line = file.readline()
line_index += 1
file.close()
bands = np.zeros((num_bands,num_kpoints)) # ๅฎไนไธไธชnum_bands่กnum_kpointsๅ็็ฉ้ต็จไบๅญๆพ่ฝๅธฆๆฐๆฎ
occupation = np.zeros((num_bands, num_kpoints)) # ๅ็๏ผๅฎไนไธไธช็ฉ้ตๅญๆพ็ตๅญๅ ๆฎๆฐๆฎ๏ผๆญค็ฉ้ต่ท่ฝๅธฆ็ฉ้ตๅ
ทๆ็ธๅ็็ปดๅบฆ
# ๅจEIGENVALไธญ๏ผๆฐๆฎไผๆ็
งK็น่ทฏๅพ้กบๅบๆๅธ๏ผๆฏไธชK็น็่ฝๅธฆๆฐๆฎๅฏไปฅ็ไฝไธไธชๅพช็ฏ๏ผ้ฃไนๆไปฌไพฟๅฏไปฅ้่ฟไธๆญloopๅพช็ฏๆฅๅฐๆฐๆฎ่ฟ่กๅ็ฑปๆด็
nrows_cycle = num_bands+2 # ๆฏไธชๅพช็ฏ็ๆฐๆฎ่กๆฐไธบ๏ผ่ฝๅธฆๆฐ+K็นๅๆ ่ก+็ฉบ็ฝๅๅฒ่ก = ่ฝๅธฆๆฐ+2่ก
Kpath, Kweight = [[],[]] # ๆน้ๅฎไน็ฉบๅ่กจๅๅคๅจๅญๆฐๆฎ๏ผk_pathๆฏK็น่ทฏๅพ๏ผk_weightๆฏK็นๅฏนๅบ็ๆ้
for i in range(num_kpoints):
# ็กฎๅฎๅไธชๆฐๆฎๅฏนๅบ็ๅ่กจๅผ็ดข
Kindex, band_data_starting = [i*nrows_cycle+1, i*nrows_cycle+2]
Kpath.append([raw_data[Kindex][0],raw_data[Kindex][1],raw_data[Kindex][2]]) # K็น่ทฏๅพ
Kweight.append(raw_data[Kindex][3])
# ้่ฟๅพช็ฏๅฐ่ฝๅธฆๆฐๆฎ่ตๅผๅฐๅๅๅๅปบ็็ฉ้ตไธญ
for j in range(num_bands):
bands[j,i] = raw_data[band_data_starting+j][1]
occupation[j,i] = raw_data[band_data_starting+j][2]
data_dict = {'num_kpoints': num_kpoints, # K็น่ทฏๅพไธ็ๅ็นๆฐ
'num_bands': num_bands, # ่ฝๅธฆๆฐ็ฎ
'Kpath': Kpath, # K็น่ทฏๅพ
'Kweight': Kweight, # ๆฏไธชK็นๅฏนๅบ็ๆ้
'bands': np.array(bands), # ่ฝๅธฆๆฐๆฎ๏ผๆๅ็่พๅบไธบไบ็ปดๆฐ็ป็่ฏ๏ผๆไฝ็ฉบ้ดไผๆดๅคง๏ผ
'occupation': np.array(occupation)} # ่ฝจ้๏ผ่ฝๅธฆ๏ผๅ ๆฎๆ
ๅต}
return data_dict
###############################################################################################################
# ่ฝๅธฆๅๆๆจกๅ
# ่ฟไธชๆจกๅ็ๅ่ฝๅ
ๆฌไฝไธ้ไบไป้ๆ่ชๆดฝ่ฎก็ฎ๏ผSCF๏ผ็ปๆไธญ่ทๅ่ดน็ฑณ่ฝ็บง๏ผๆ่
้่ฟ่ฝๅธฆๅ ๆฎๆ
ๅต็กฎๅฎ่ดน็ฑณ่ฝ็บงไฝ็ฝฎ
# ไปฅๅ่ฎก็ฎ็ฆๅธฆๅฎฝๅบฆ๏ผๅคๆญๅ
ถไธบ็ดๆฅ่ฟๆฏ้ดๆฅๅธฆ้็ญ็ญ
# ๆญคๅฝๆฐๅฏไปฅ็ดๆฅไปSCF่ฎก็ฎไธญๅพๅฐ็OUTCARๆไปถไธญ่ฏปๅ่ดน็ฑณ่ฝ็บง
def GetFermiEnergy(self,OUTCAR):
return
# ่ฅไฝฟ็จMajestyV็่ๆฌ่ฎก็ฎ็ตๅญ่ฝๅธฆ๏ผSCF่ฎก็ฎ็ปๆไธญ้ๅธธไผ็ๆไธไธชๆป็ปๆไปถMarkdown_SCF๏ผๅ
ถไธญ่ฎฐ่ฝฝ็ๅ็กฎ็่ดน็ฑณ่ฝ็บง๏ผๆฒกๆๅๆฐๅ ๆฎ็ๆ
ๅตไธ๏ผ
# MajestyV็github๏ผhttps://github.com/MajestyV
# ๆญคๅฝๆฐๅฏไปฅไปMarkdownๆไปถไธญๆๅ่ดน็ฑณ่ฝ็บง็่ฎก็ฎ็ปๆ
def GetFermiEnergy_Markdown(self, Markdown_SCF):
pattern = re.compile(r'-?\d+\.?\d+') # ๅน้
ๆตฎ็นๆฐ็ๆญฃๅ่กจ่พพๅผ
file = codecs.open(Markdown_SCF, 'rb', 'utf-8', 'ignore')
line = file.readline()
energy = pattern.findall(line)
Efermi = float(energy[0])
return Efermi
# ๆญคๅฝๆฐๅฏ้่ฟ่ฝๅธฆๅ ๆฎๆ
ๅตๅบๅๅฏผๅธฆ่ทไปทๅธฆ๏ผๆๅ่ฝๅธฆ่พน็ผ๏ผๅธฆ่พน๏ผband edges๏ผ
# ็ฑไบDFT็ฎๆณๆฌ่บซ็ๅๅ ๏ผไผๅบ็ฐๅๅญ่ฝจ้๏ผ่ฝๅธฆ๏ผ่ขซๅๆฐ็ตๅญๅ ๆฎ็ๆ
ๅต๏ผๆไปฅๆไปฌไฝฟ็จOrderPrecisionๆงๅถๅ ๆฎๆ
ๅต็็ฒพ็กฎๅบฆ
# ๅฆOrderPrecision=2๏ผๅ็ฒพ็กฎๅฐๅฐๆฐ็นๅไธคไฝ
def GetBandEdges(self, EIGENVAL, accuracy_order=0):
data_dict = self.GetEbands(EIGENVAL) # ๅฉ็จGetEbandsๅฝๆฐไปEIGENVALๆไปถๆๅ่ฝๅธฆๆฐๆฎ
num_bands = data_dict['num_bands'] # ๆๅ่ฝๅธฆๆปๆฐ
num_kpoints = data_dict['num_kpoints'] # ๆๅK็นๆปๆฐ
bands = data_dict['bands'] # ่ฝๅธฆๅ
ทไฝ็่ฝ้ๅผ
occupation = data_dict['occupation'] # ่ฝๅธฆ็ๅ ๆฎๆ
ๅต
unoccupied = [] # ่ฟไธชๅ่กจ็จไบๅญๆพๆๆๆช่ขซๅ ๆฎ็่ฝๅธฆๆฐๆฎ
occupied = [] # ่ฟไธชๅ่กจ็จไบๅญๆพๆๆๅทฒ่ขซๅ ๆฎ็่ฝๅธฆๆฐๆฎ
for n in range(num_kpoints):
energy_unoccupied = []
energy_occupied = []
for m in range(num_bands):
energy = bands[m,n]
filling_condition = occupation[m][n] # ็ตๅญ็ๅกซๅ
ๆ
ๅต
if round(filling_condition, accuracy_order) == 0: # ้่ฟๅคๆญ่ฝๅธฆ็ๅ ๆฎๆ
ๅตๆฅ็กฎๅฎ่ฝๅธฆๅจ่ดน็ฑณ้ขไนไธ่ฟๆฏ่ดน็ฑณ้ขไนไธ
energy_unoccupied.append(energy) # 0ๅไธบๆช่ขซๅ ๆฎ็่ฝๅธฆ
else:
energy_occupied.append(energy) # 1ๅไธบ่ขซๅ ๆฎ็่ฝๅธฆ
unoccupied.append(energy_unoccupied)
occupied.append(energy_occupied)
conduction_band = [min(unoccupied[i]) for i in range(len(unoccupied))] # ๆไฝๆชๅ ๆฎ่ฝๅธฆ๏ผๅฏผๅธฆ๏ผ๏ผLowest unoccupied band
valence_band = [max(occupied[i]) for i in range(len(occupied))] # ๆ้ซๅทฒๅ ๆฎ่ฝๅธฆ๏ผไปทๅธฆ๏ผ๏ผHighest occupied band
return valence_band, conduction_band
# ๆญคๅฝๆฐ้่ฟ่ดน็ฑณ่ฝ็บง็ไฝ็ฝฎๅบๅๅฏผๅธฆ่ทไปทๅธฆ๏ผๅฆๆๅบ็ฐๅๆฐ็ตๅญๅ ๆฎ็ๆ
ๅต๏ผๅพๅฐ็็ปๆๅฏ่ฝไธๅคชๅ็กฎ๏ผ
def GetBandEdges_Fermi(self, EIGENVAL, Efermi=0):
data_dict = self.GetEbands(EIGENVAL) # ๅฉ็จGetEbandsๅฝๆฐไปEIGENVALๆไปถๆๅ่ฝๅธฆๆฐๆฎ
num_bands = data_dict['num_bands'] # ๆๅ่ฝๅธฆๆปๆฐ
num_kpoints = data_dict['num_kpoints'] # ๆๅK็นๆปๆฐ
bands = data_dict['bands'] # ่ฝๅธฆๅ
ทไฝ็่ฝ้ๅผ
unoccupied = [] # ่ฟไธชๅ่กจ็จไบๅญๆพๆๆๆช่ขซๅ ๆฎ็่ฝๅธฆๆฐๆฎ
occupied = [] # ่ฟไธชๅ่กจ็จไบๅญๆพๆๆๅทฒ่ขซๅ ๆฎ็่ฝๅธฆๆฐๆฎ
for n in range(num_kpoints):
energy_unoccupied = []
energy_occupied = []
for m in range(num_bands):
energy = bands[m,n]
if energy >= Efermi: # ้่ฟ่ฝ้Eๆฏๅฆๅคงไบ็ปๅฎ็่ดน็ฑณ่ฝ็บงๅคๆญ่ฝๅธฆๅจ่ดน็ฑณ้ขไนไธ่ฟๆฏ่ดน็ฑณ้ขไนไธ
energy_unoccupied.append(energy)
else:
energy_occupied.append(energy)
unoccupied.append(energy_unoccupied)
occupied.append(energy_occupied)
conduction_band = [min(unoccupied[i]) for i in range(len(unoccupied))] # ๆไฝๆชๅ ๆฎ่ฝๅธฆ๏ผๅฏผๅธฆ๏ผ๏ผLowest unoccupied band
valence_band = [max(occupied[i]) for i in range(len(occupied))] # ๆ้ซๅทฒๅ ๆฎ่ฝๅธฆ๏ผไปทๅธฆ๏ผ๏ผHighest occupied band
return valence_band, conduction_band
# ๆญคๅฝๆฐๅฏไปฅ่ฎก็ฎๅธฆ้๏ผBandgap๏ผ๏ผๅๆถๅๆๆๆๆฏ็ดๆฅๅธฆ้่ฟๆฏ้ดๆฅๅธฆ้
def GetBandgap(self, EIGENVAL, **kwargs):
mode = kwargs['mode'] if 'mode' in kwargs else 'Occupation' # ้ป่ฎค้่ฟ็ตๅญๅ ๆฎๆ
ๅตๅๆๅธฆ้
accuracy_order = kwargs['accuracy_order'] if 'accuracy_order' in kwargs else 0 # ๅๆ็ตๅญๅ ๆฎๆ
ๅตๆถๆ็จ็็ฒพ็กฎๅบฆ
Efermi = kwargs['Efermi'] if 'Efermi' in kwargs else 0.0 # ้ๅธธไธไฝฟ็จๆญคๆจกๅผ๏ผๆ
่ฎพEfermiไธบ0
if mode == 'occupation':
valence_band, conduction_band = self.GetBandEdges(EIGENVAL, accuracy_order=accuracy_order)
elif mode == 'Efermi':
valence_band, conduction_band = self.GetBandEdges_Fermi(EIGENVAL, Efermi=Efermi)
else:
print(r'ERROR: The mode of this function could only be "Occupation" or "Efermi".')
return
Ev_max = max(valence_band) # ไปทๅธฆ้กถ
Ec_min = min(conduction_band) # ๅฏผๅธฆๅบ
Eg = Ec_min - Ev_max # ๅธฆ้
# ๅๆไปทๅธฆ้กถ่ทๅฏผๅธฆๅบ็ไฝ็ฝฎ
extremum_location = (valence_band.index(Ev_max), conduction_band.index(Ec_min))
return Eg, Ev_max, Ec_min, extremum_location
if __name__=='__main__':
EIGENVAL = 'D:/MaterialsGallery/Testing/MoS2_pawlda/MoS2_2H/1/result/EIGENVAL'
ebands = vasp()
# Gamma-M-K-Gamma-A-L-H-A
a = ebands.GetEbands(EIGENVAL)
#print(len(a['energy'][0]))
#print(len(a['occupation'][31]))
print(a)
#kpath.GetKpath(saving_directory,path,59) | MajestyV/VASPWheels | VaspWheels/GetElectronicBands.py | GetElectronicBands.py | py | 17,172 | python | zh | code | 5 | github-code | 1 | [
{
"api_name": "linecache.getline",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "codecs.open",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_numb... |
19595095260 | from django.forms import ModelForm
from product.models import Product
from django import forms
class ProductForm(ModelForm):
class Meta:
model = Product
exclude = ["modified", "created"]
def __init__(self, **kwargs):
super().__init__(**kwargs)
ignore_fields = ["image"]
for field in self.fields:
if field in ignore_fields:
continue
self.fields[field].widget.attrs.update({
'class': 'form-control'
})
class ImportForm(forms.Form):
excel_field = forms.FileField(label="Excel-Datei")
| mbijou92/erp | gallery_backend/forms.py | forms.py | py | 606 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "product.models.Product",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.forms.Form",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "dj... |
34541456179 | import praw
import gspread
from oauth2client.service_account import ServiceAccountCredentials
import bmemcached
import re
import time
import os
import json
import threading
ARTIST_COL = 0
ALBUM_COL = 1
SCORE_COL = 2
SUBREDDITS = 'fantanoforever+hiphopheads'
COMMAND = re.compile('!fantanobot (.*)', re.IGNORECASE)
URL = 'https://docs.google.com/spreadsheets/d/1GbGyWVtePH8RZCZd7N3RPDh8m-K6hgO6AyKsAHZpbeQ/edit#gid=0'
ACCOUNT = 'fantanobot@fantanobot.iam.gserviceaccount.com'
print('INITIALISING ...')
# Connect to google spreadsheet
scope = ['https://spreadsheets.google.com/feeds', 'https://www.googleapis.com/auth/drive']
credentials = ServiceAccountCredentials.from_json_keyfile_dict(
json.loads(os.environ['CREDENTIALS']),
scope
)
gc = gspread.authorize(credentials)
sheet = gc.open_by_url(URL).worksheet('All Reviews')
data = sheet.get_all_values()
# Connect to Memcached DB (stores comment IDs)
db = bmemcached.Client(
os.environ['MEMCACHEDCLOUD_SERVERS'].split(','),
os.environ['MEMCACHEDCLOUD_USERNAME'],
os.environ['MEMCACHEDCLOUD_PASSWORD']
)
FOOTER = (
"\n\nAll scores sourced from [here]({data_link}).\n\n"
"---\n"
"^(I am a bot and this action was performed automatically) \n"
"[^Send ^my ^creater ^a ^PM]({pm_link}) ^(to provide feedback)"
).format(data_link = URL, pm_link = "https://www.reddit.com/message/compose/?to=NobleLordOfLaziness")
# Try album then artist
def retrieve(term):
try:
regex = re.compile(term, re.IGNORECASE)
except:
return None
print('retrieving album', term, '...')
response = retrieve_album(regex)
if response is None:
print('retrieving artist', term, '...')
response = retrieve_artist(regex)
return response
def retrieve_album(album_name):
global data
try:
values = None
for album in data:
if album_name.match(album[1]):
values = album
assert(values is not None)
print('success')
return "Artist: *{artist}* \nAlbum: {album} \nScore: **{score}**".format(
artist = values[ARTIST_COL],
album = values[ALBUM_COL],
score = values[SCORE_COL]
)
except Exception as e:
print('fail')
print(e)
return None
def retrieve_artist(artist_name):
global data
try:
albums = []
artist = None
for album in data:
if artist_name.match(album[ARTIST_COL]):
albums.append('{album} - **{score}**'.format(
album = album[ALBUM_COL],
score = album[SCORE_COL]
))
temp_artist = album[ARTIST_COL]
if artist is None or len(temp_artist) < len(artist):
artist = temp_artist
assert(artist is not None and len(albums) > 0)
print('success')
return "Fantano's album scores for *{artist}*:\n\n{albums}".format(
artist = artist,
albums = ' \n'.join(albums)
)
except Exception as e:
print('fail')
print(e)
return None
def ampersand_replacement(term):
# Make replacements for ampersand usage
if 'and' in term:
term = term.replace('and', '(and|&)')
elif '&' in term:
term = term.replace('&', '(and|&)')
return term
def check_comments(client):
for comment in client.subreddit(SUBREDDITS).stream.comments():
# Check if replied to
if db.get(str(comment.id)) is not None or comment.author == client.user.me():
continue
# search for bot command in comment
bot_call = COMMAND.search(comment.body)
if bot_call is None:
continue
print('found comment: https://reddit.com' + comment.permalink)
print('term:', bot_call.group(1))
term = bot_call.group(1).strip()
new_term = ampersand_replacement(term)
response = retrieve(new_term)
if response is None:
response = "Could not find anything for *{term}*".format(term = term)
print(response)
comment.reply(response + FOOTER)
db.set(str(comment.id), 'True')
print('replied')
def check_messages(client):
for item in client.inbox.stream():
if db.get(str(item.id)) is not None:
continue
if type(item) == praw.models.Message and '!fantanobot' in item.subject:
print("Message found: **{subject}** - {body}".format(
subject = item.subject,
body = item.body
))
term = ampersand_replacement(item.body)
response = retrieve(term)
if response is None:
response = "Could not find anything for *{term}*".format(term = item.body)
print(response)
item.reply(response + FOOTER)
db.set(str(item.id), 'True')
print('replied')
def login():
print('logging in ...')
client = praw.Reddit(
username = os.environ['REDDIT_USER'],
password = os.environ['REDDIT_PASS'],
client_id = os.environ['CLIENT_ID'],
client_secret = os.environ['CLIENT_SECRET'],
user_agent = 'FantanoBot responder'
)
return client
def run(client):
print('running ...')
comment_thread = threading.Thread(target=check_comments, args=(client,))
message_thread = threading.Thread(target=check_messages, args=(client,))
comment_thread.start()
message_thread.start()
if __name__ == '__main__':
client = login()
run(client)
| itsjackgardner/FantanoBot | responder.py | responder.py | py | 5,602 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "re.IGNORECASE",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "oauth2client.service_account.ServiceAccountCredentials.from_json_keyfile_dict",
"line_number": 25,
"usage_t... |
10867699789 | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from sklearn.cluster import KMeans
df = pd.read_csv('dataset.csv')
X = np.array(df)
c_x = np.array([0.1,0.3])
c_y = np.array([0.6,0.2])
centroids = np.array(list(zip(c_x,c_y)))
print(centroids)
model = KMeans()
model.fit(X, centroids)
plt.figure()
plt.scatter(X[:,0],X[:,1],alpha=0.3)
plt.show()
plt.figure()
plt.scatter(X[:,0],X[:,1],alpha=0.3)
plt.scatter(c_x,c_y, marker='o', c='black', s=150)
plt.show() | yogeshwarram/LP3-2 | Kmeans.py | Kmeans.py | py | 491 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number"... |
11344865437 | from itertools import permutations
import math
# ์์ ํ๋ณ ํจ์
def is_prime_number(x):
if x < 2:
return False
for i in range(2, int(math.sqrt(x)) + 1):
if x % i == 0:
return False
return True
def solution(numbers):
answer = set()
for length in range(1, len(numbers) + 1):
for p in permutations(numbers, length):
num = int("".join(p))
if is_prime_number(num):
answer.add(num)
return len(answer)
# Test
print(solution("011"))
# print(solution("17"))
| CHOJUNGHO96/algorithm | ํ๋ก๊ทธ๋๋จธ์ค_๊ณ ๋์ kit/์์ ํ์/2๋จ๊ณ_์์์ฐพ๊ธฐ2.py | 2๋จ๊ณ_์์์ฐพ๊ธฐ2.py | py | 560 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.sqrt",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 17,
"usage_type": "call"
}
] |
36390024180 | import os
import numpy as np
import torch
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
from torchvision import transforms
from tqdm.notebook import tqdm
from dataset import MolecularDataset
from pointnet import PointNetCls
writer = SummaryWriter()
# hyper parameters
LR = 0.001
EPOCH = 500
BATCH_SIZE = 128
# load dataset
dataset = MolecularDataset(transform=transforms.ToTensor(), data_augmentation=True)
train_size = int(0.8 * len(dataset))
test_size = int(len(dataset) - train_size)
train_data, test_data = data.random_split(dataset, [train_size, test_size])
train_loader = data.DataLoader(dataset=train_data, batch_size=BATCH_SIZE, shuffle=True,)
test_loader = data.DataLoader(dataset=test_data, batch_size=BATCH_SIZE,)
# define something about training...
mynet = PointNetCls()
optimizer = torch.optim.Adam(mynet.parameters(), lr=LR)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=30, gamma=0.8)
loss_func = torch.nn.MSELoss()
# train
myepoch = tqdm(range(1,500))
for epoch in myepoch:
loss_list = []
valid_loss_list = []
for step, (features, targets) in enumerate(train_loader):
mynet.cuda()
mynet.train()
features = features.transpose(2, 1)
features, targets = features.cuda(), targets.cuda()
predicted_targets, feature_transform_matrix = mynet(features)
loss = loss_func(targets, predicted_targets)
loss = (
loss + mynet.feature_transform_regularizer(feature_transform_matrix) * 0.001
)
optimizer.zero_grad()
loss.backward()
optimizer.step()
loss_list.append(loss.cpu().data.numpy())
ave_loss = np.array(loss_list).mean()
writer.add_scalar("loss", ave_loss, epoch)
if epoch % 10 == 0:
for step, (features, targets) in enumerate(test_loader):
mynet.cpu()
mynet.eval()
features = features.transpose(2, 1)
predicted_targets, feature_transform_matrix = mynet(features)
valid_loss_list.append(loss_func(targets, predicted_targets).cpu().data.numpy())
ave_valid_loss = np.array(valid_loss_list).mean()
writer.add_scalar("valid_loss", ave_valid_loss, epoch)
myepoch.set_description("loss:{:.2f}#####".format(ave_loss))
scheduler.step()
mynet.eval()
torch.save(mynet, "mynet.pkl")
writer.close()
train_loss_list = []
for step, (features, targets) in enumerate(test_loader):
features = features.transpose(2, 1)
predicted_targets, feature_transform_matrix = mynet(features)
train_loss_list.append((torch.abs(predicted_targets.data-targets)/targets*100).mean())
train_loss = -np.array(train_loss_list).mean()
valid_loss_list = []
for step, (features, targets) in enumerate(test_loader):
features = features.transpose(2, 1)
predicted_targets, feature_transform_matrix = mynet(features)
valid_loss_list.append((torch.abs(predicted_targets.data-targets)/targets*100).mean())
valid_loss = -np.array(valid_loss_list).mean()
print('่ฎญ็ป้่ฏฏๅทฎ๏ผ{:.4f}%'.format(train_loss))
print('ๆต่ฏ้่ฏฏๅทฎ๏ผ{:.4f}%'.format(valid_loss))
| HeatEnergists/PointNet | regression/train.py | train.py | py | 3,150 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "torch.utils.tensorboard.SummaryWriter",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "dataset.MolecularDataset",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms.ToTensor",
"line_number": 23,
"usage_type": "call"
... |
36571795647 | from flask import jsonify, request
from flask_restful import Resource
from Model import db, VistorLevel, LevelOptionsSchema, Level2OptionsSchema, Vistor, LocationOptionSchema
from webargs import fields, validate
from webargs.flaskparser import use_args, use_kwargs, parser, abort
level_schema = LevelOptionsSchema
level_schema = LevelOptionsSchema(many=True)
level2_schema = Level2OptionsSchema
level2_schema = Level2OptionsSchema(many=True)
location_schema = LocationOptionSchema
location_schema = location_schema(many=True)
class Level1Resource(Resource):
queryArgs = {
"level": fields.Str(),
"sublevel": fields.Str()
}
@use_args(queryArgs)
def get(self, args):
if 'level' and 'sublevel' in args:
if(not len(args['sublevel']) == 0):
location = Vistor.query.filter_by(
Level_1=args['level'], Level_2=args['sublevel']).distinct(Vistor.LOCATION).all()
else:
location = Vistor.query.filter_by(
Level_1=args['level']).distinct(Vistor.LOCATION).all()
location = location_schema.dump(location).data
return {"message": "Success", 'data': location}, 200
elif'level' in args:
levels = VistorLevel.query.filter_by(
Level_1=args['level']).distinct(VistorLevel.Level_2).all()
levels = level2_schema.dump(levels).data
return {"message": "Success", 'data': levels}, 200
else:
levels = VistorLevel.query.distinct(VistorLevel.Level_1).all()
levels = level_schema.dump(levels).data
return {"message": "Success", 'data': levels}, 200
| donc310/WidgetApi | resources/Levels.py | Levels.py | py | 1,692 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Model.LevelOptionsSchema",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "Model.LevelOptionsSchema",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "Model.Level2OptionsSchema",
"line_number": 11,
"usage_type": "name"
},
{
"api_nam... |
33520509892 | import os
import sys
import time
import pytest
def test_modules():
topdir = os.path.dirname(__file__)
for dirpath, _, filenames in os.walk(topdir):
for fname in filenames:
if _is_test_module(fname):
yield os.path.join(dirpath, fname)
def _is_test_module(fname):
return fname.startswith('test') and fname.endswith('.py')
def collect_execution_times(test_modules):
sys.argv.append('--match=^test')
sys.argv.append('-q')
for tmodule in test_modules:
yield tmodule, _test_module_execution_time(tmodule)
def _test_module_execution_time(tmodule):
starttime = time.time()
pytest.main(args=[tmodule])
return time.time() - starttime
def write_results(exectimes, write):
total = 0.0
writes = []
for record in reversed(sorted(exectimes, key=lambda record: record[1])):
total += record[1]
write('%s%.02f s (%.02f s)\n' % (record[0].ljust(70), record[1], total))
write('\nTotal test execution time: %.02f seconds\n' % total)
def main():
exectimes = collect_execution_times(test_modules())
with open('testtimes.robot', 'w') as output:
def write(txt):
output.write(txt)
sys.stdout.write(txt)
write_results(exectimes, write)
if __name__ == '__main__':
main()
| robotframework/RIDE | utest/time_tests.py | time_tests.py | py | 1,321 | python | en | code | 910 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 1... |
6908770297 | import json
from django.http import HttpResponse
from django.shortcuts import render
from django.template.loader import render_to_string
from django.views.decorators.csrf import csrf_exempt
from django.views.generic.base import View
from hipsterboard.board.models import Player
from hipsterboard.board.models import Item
class BoardView(View):
def get(self, request):
context = {
'players': Player.objects.all(),
}
return render(request, 'index.html', context)
@csrf_exempt
def dispatch(self, *args, **kwargs):
return super(BoardView, self).dispatch(*args, **kwargs)
@csrf_exempt
def post(self, request):
post = request.POST
player = Player.get(post.get('player'))
if 'create' in post:
item, created = Item.objects.get_or_create(
player=player,
target=post.get('create')
)
ret = {
'html': render_to_string('item.html', {'item': item}),
'count': Item.objects.filter(player=player).count(),
'created': created,
}
elif 'delete' in post:
iid = int(post.get('delete').split('-')[1])
item = Item.objects.get(id=iid)
item.delete()
ret = {
'killed_in_fire': True,
'count': Item.objects.filter(player=player).count()
}
else:
ret = {'wat': True}
return HttpResponse(json.dumps(ret))
| Olivia5k/hipsterboard | hipsterboard/board/views.py | views.py | py | 1,530 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.views.generic.base.View",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "hipsterboard.board.models.Player.objects.all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "hipsterboard.board.models.Player.objects",
"line_number": 16,
... |
26355834775 | import json, os, platform, subprocess, sys, time
from datetime import datetime, timedelta
from operator import itemgetter
isPy3 = True
try:
from colorama import init
init()
except ImportError as e:
pass
scripts_lib_path = os.path.join(os.path.dirname(__file__), "lib")
this_platform_system = str(platform.system())
platform_lib_path = os.path.join(scripts_lib_path, this_platform_system)
if os.path.exists(platform_lib_path):
if platform_lib_path not in sys.path:
sys.path.append(platform_lib_path)
import util
python_exe = sys.executable
python_ver = platform.python_version()
class bcolors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
BACKGROUND = "\033[42m"
ITALIC = "\033[3m"
bold_start = bcolors.BOLD
bold_end = bcolors.ENDC
italic_start = bcolors.ITALIC
italic_end = bcolors.ENDC
table_header_style = bcolors.BOLD + bcolors.BACKGROUND
error_start = bcolors.FAIL
def format_help(p_input):
inp = str(p_input)
inp_lst = inp.split()
p_1st = None
if inp_lst:
p_1st = str(inp_lst[0])
if p_1st in ("#", "##", "###"):
skip_len = len(p_1st) + 1
inp = inp[skip_len:]
inp = inp.replace("`", "")
inp = bold_start + str(inp.upper()) + bold_end
elif inp == "```":
return None
else:
inp = inp.replace(" # ", italic_start + " # ")
inp = " " + inp + " " + italic_end
return inp
def get_pip_ver():
try:
import pip
return pip.__version__
except ImportError as e:
pass
return "None"
def cli_unicode(p_str, p_encoding, errors="ignore"):
return str(p_str)
try:
test_unicode = unicode("test")
except NameError as e:
unicode = cli_unicode
def check_output_wmic(p_cmds):
out1 = subprocess.check_output(p_cmds)
try:
out2 = str(out1, "utf-8")
except:
out2 = str(out1)
out3 = out2.strip().split("\n")[1]
return out3
def top(display=True, isJson=False):
try:
import psutil
except ImportError as e:
util.exit_message("Missing psutil module", 1)
current_timestamp = int(time.mktime(datetime.utcnow().timetuple()))
jsonDict = {}
procs = []
for p in psutil.process_iter():
try:
p = p.as_dict(
attrs=[
"pid",
"username",
"cpu_percent",
"memory_percent",
"cpu_times",
"name",
]
)
except (psutil.NoSuchProcess, IOError, OSError) as e:
pass
else:
procs.append(p)
if not display:
return
processes = sorted(procs, key=lambda p: p["cpu_percent"], reverse=True)
network_usage = psutil.net_io_counters()
jsonDict["kb_sent"] = network_usage.bytes_sent / 1024
jsonDict["kb_recv"] = network_usage.bytes_recv / 1024
cpu = psutil.cpu_times_percent(percpu=False)
iowait = ""
if util.get_platform() == "Linux":
jsonDict["iowait"] = str(cpu.iowait)
iowait = "," + str(cpu.iowait).rjust(5) + "%wa"
jsonDict["current_timestamp"] = current_timestamp
jsonDict["cpu_user"] = str(cpu.user)
jsonDict["cpu_system"] = str(cpu.system)
jsonDict["cpu_idle"] = str(cpu.idle)
if not isJson:
print(
"CPU(s):"
+ str(cpu.user).rjust(5)
+ "%us,"
+ str(cpu.system).rjust(5)
+ "%sy,"
+ str(cpu.idle).rjust(5)
+ "%id"
+ iowait
)
disk = psutil.disk_io_counters(perdisk=False)
read_kb = disk.read_bytes / 1024
write_kb = disk.write_bytes / 1024
jsonDict["kb_read"] = str(read_kb)
jsonDict["kb_write"] = str(write_kb)
if not isJson:
print("DISK: kB_read " + str(read_kb) + ", kB_written " + str(write_kb))
uptime = datetime.now() - datetime.fromtimestamp(psutil.boot_time())
str_uptime = str(uptime).split(".")[0]
line = ""
uname_len = 8
av1, av2, av3 = os.getloadavg()
str_loadavg = "%.2f %.2f %.2f " % (av1, av2, av3)
line = bold_start + "Load average: " + bold_end + str_loadavg
jsonDict["load_avg"] = str(str_loadavg)
line = line + bold_start + "Uptime:" + bold_end + " " + str_uptime
jsonDict["uptime"] = str(str_uptime)
if not isJson:
print(line)
i = 0
my_pid = os.getpid()
if not isJson:
print("")
print(
bold_start
+ " PID "
+ "USER".ljust(uname_len)
+ " %CPU %MEM TIME+ COMMAND"
+ bold_end
)
jsonList = []
for pp in processes:
if pp["pid"] == my_pid:
continue
i += 1
if i > 10:
break
# TIME+ column shows process CPU cumulative time and it
# is expressed as: "mm:ss.ms"
ctime = timedelta(seconds=sum(pp["cpu_times"]))
ctime_mm = str(ctime.seconds // 60 % 60)
ctime_ss = str(int(ctime.seconds % 60)).zfill(2)
ctime_ms = str(ctime.microseconds)[:2].ljust(2, str(0))
ctime = "{0}:{1}.{2}".format(ctime_mm, ctime_ss, ctime_ms)
username = pp["username"][:uname_len]
if isJson:
pp["username"] = username
pp["ctime"] = ctime
pp["cpu_percent"] = float(pp["cpu_percent"])
pp["memory_percent"] = float(round(pp["memory_percent"], 1))
jsonList.append(pp)
else:
print(
str(pp["pid"]).rjust(7)
+ " "
+ username.ljust(uname_len)
+ " "
+ str(pp["cpu_percent"]).rjust(6)
+ " "
+ str(round(pp["memory_percent"], 1)).rjust(4)
+ " "
+ str(ctime).rjust(10)
+ " "
+ pp["name"]
)
if isJson:
jsonDict["top"] = jsonList
print(json.dumps([jsonDict]))
else:
print("")
def list(p_json, p_cat, p_comp, p_ver, p_port, p_status, p_kount):
lst = " "
if p_kount > 1:
lst = ","
if p_json:
lst = (
lst
+ '{"category": "'
+ p_cat.rstrip()
+ '",'
+ ' "component": "'
+ p_comp.rstrip()
+ '",'
+ ' "version": "'
+ p_ver.rstrip()
+ '",'
+ ' "port": "'
+ p_port.rstrip()
+ '",'
+ ' "status": "'
+ p_status.rstrip()
+ '"}'
)
print(lst)
return
print(p_comp + " " + p_ver + " " + p_port + " " + p_status)
def status(p_json, p_comp, p_ver, p_state, p_port, p_kount):
status = " "
if p_kount > 1:
status = ","
if p_json:
jsonStatus = {}
jsonStatus["component"] = p_comp
jsonStatus["version"] = p_ver
jsonStatus["state"] = p_state
if p_port != "" and int(p_port) > 1:
jsonStatus["port"] = p_port
category = util.get_comp_category(p_comp)
if category:
jsonStatus["category"] = category
elif p_comp.startswith == "pgdg":
jsonStatus["category"] = 1
print(status + json.dumps(jsonStatus))
return
app_ver = p_comp + "-" + p_ver
app_ver = app_ver + (" " * (35 - len(app_ver)))
if p_state in ("Running", "Stopped") and int(p_port) > 1:
on_port = " on port " + p_port
else:
on_port = ""
# print(app_ver + "(" + p_state + on_port + ")")
print(p_comp + " " + p_state.lower() + on_port)
def info(p_json, p_home, p_repo, print_flag=True):
(
cloud_name,
cloud_platform,
instance_id,
flavor,
region,
az,
private_ip,
) = util.get_cloud_info()
p_user = util.get_user()
p_is_admin = util.is_admin()
pip_ver = get_pip_ver()
os_arch = util.get_arch()
this_os = ""
this_uname = str(platform.system())[0:7]
if private_ip > "":
host_ip = private_ip
else:
host_ip = util.get_host_ip()
wmic_path = (
os.getenv("SYSTEMROOT", "")
+ os.sep
+ "System32"
+ os.sep
+ "wbem"
+ os.sep
+ "wmic"
)
host_display = util.get_host_short()
## Check the OS & Resources ########################################
plat = util.get_os()
glibcV = util.get_glibc_version()
os_major_ver = ""
java_major_ver = ""
java_ver = ""
if this_uname == "Darwin":
mem_mb = util.get_mem_mb()
system_memory_in_kbytes = mem_mb * 1024
system_memory_in_gb = mem_mb / 1024.0
system_cpu_cores = util.get_cpu_cores()
cpu_model = util.getoutput("/usr/sbin/sysctl -n machdep.cpu.brand_string")
prod_name = util.getoutput("sw_vers -productName")
prod_version = util.getoutput("sw_vers -productVersion")
this_os = prod_name + " " + prod_version
elif this_uname == "Linux":
mem_mb = util.get_mem_mb()
system_memory_in_kbytes = mem_mb * 1024
system_memory_in_gb = mem_mb / 1024.0
system_cpu_cores = util.get_cpu_cores()
cpu_model = util.getoutput(
"grep 'model name' /proc/cpuinfo | head -1 | cut -d':' -f2"
)
os_major_ver = util.getoutput(
"cat /etc/os-release | grep VERSION_ID | cut -d= -f2 | tr -d '\"'"
)
if cpu_model == "":
cpu_model = "ARM"
if os.path.exists("/etc/redhat-release"):
this_os = util.getoutput("cat /etc/redhat-release")
elif os.path.exists("/etc/system-release"):
this_os = util.getoutput("cat /etc/system-release")
elif os.path.exists("/etc/lsb-release"):
this_os = util.getoutput(
"cat /etc/lsb-release | grep DISTRIB_DESCRIPTION | cut -d= -f2 | tr -d '\"'"
)
elif os.path.exists("/etc/os-release"):
this_os = util.getoutput(
"cat /etc/os-release | grep PRETTY_NAME | cut -d= -f2 | tr -d '\"'"
)
[java_major_ver, java_ver] = util.get_java_ver()
if system_memory_in_gb > 0.6:
round_mem = round(system_memory_in_gb)
else:
round_mem = round(system_memory_in_gb, 1)
mem = str(round_mem) + " GB"
cores = str(system_cpu_cores)
cpu = cpu_model.strip()
cpu = cpu.replace("(R)", "")
cpu = cpu.replace("(TM)", "")
cpu = cpu.replace(" CPU ", " ")
os2 = this_os.replace(" release ", " ")
os2 = os2.replace(" (Final)", "")
os2 = os2.replace(" (Core)", "")
ver = util.get_version()
[last_update_utc, last_update_local, unique_id] = util.read_hosts("localhost")
if last_update_local:
last_upd_dt = datetime.strptime(last_update_local, "%Y-%m-%d %H:%M:%S")
time_diff = int(util.timedelta_total_seconds(datetime.now() - last_upd_dt))
last_update_readable = util.get_readable_time_diff(str(time_diff), precision=2)
versions_sql = util.get_versions_sql()
perl_ver = util.get_perl_ver()
os_pkg_mgr = util.get_pkg_mgr()
if p_json:
infoJsonArray = []
infoJson = {}
infoJson["version"] = ver
infoJson["home"] = p_home
infoJson["user"] = p_user
infoJson["host"] = host_display
infoJson["host_short"] = util.get_host_short()
infoJson["host_long"] = util.get_host()
infoJson["host_ip"] = host_ip
infoJson["os"] = unicode(
str(os2), sys.getdefaultencoding(), errors="ignore"
).strip()
infoJson["os_pkg_mgr"] = os_pkg_mgr
infoJson["os_major_ver"] = os_major_ver
infoJson["platform"] = unicode(
str(plat), sys.getdefaultencoding(), errors="ignore"
).strip()
infoJson["arch"] = os_arch
infoJson["mem"] = round_mem
infoJson["cores"] = system_cpu_cores
infoJson["cpu"] = cpu
infoJson["last_update_utc"] = last_update_utc
if last_update_local:
infoJson["last_update_readable"] = last_update_readable
infoJson["unique_id"] = unique_id
infoJson["repo"] = p_repo
infoJson["versions_sql"] = versions_sql
infoJson["system_memory_in_kb"] = system_memory_in_kbytes
infoJson["python_ver"] = python_ver
infoJson["python_exe"] = python_exe
if pip_ver != "None":
infoJson["pip_ver"] = pip_ver
infoJson["perl_ver"] = perl_ver
infoJson["java_ver"] = java_ver
infoJson["java_major_ver"] = java_major_ver
infoJson["glibc_ver"] = glibcV
infoJson["region"] = region
infoJson["az"] = az
infoJson["instance_id"] = instance_id
infoJson["flavor"] = flavor
infoJson["private_ip"] = private_ip
infoJsonArray.append(infoJson)
if print_flag:
print(json.dumps(infoJsonArray, sort_keys=True, indent=2))
return
else:
return infoJson
if p_is_admin:
admin_display = " (Admin)"
else:
admin_display = ""
langs = "Python v" + python_ver
if perl_ver > "":
langs = langs + " | Perl v" + perl_ver
if java_ver > "":
langs = langs + " | Java v" + java_ver
## util.validate_distutils_click(False)
if glibcV <= " ":
glibc_v_display = ""
else:
glibc_v_display = " glibc-" + glibcV + "-"
print(bold_start + ("#" * 70) + bold_end)
print(bold_start + "# pgEdge CTL: " + bold_end + "v" + ver + " " + p_home)
print(
bold_start
+ "# User & Host: "
+ bold_end
+ p_user
+ admin_display
+ " "
+ host_display
+ " "
+ host_ip
)
print(
bold_start
+ "# OS: "
+ bold_end
+ os2.rstrip()
+ " "
+ glibc_v_display
+ os_arch
)
print(
bold_start + "# Machine: " + bold_end + mem + ", " + cores + " vCPU, " + cpu
)
if instance_id > "" and not cloud_name == "unknown":
print(
bold_start
+ "# Cloud Info: "
+ bold_end
+ f"{cloud_name} {cloud_platform} {instance_id} {flavor} {az}"
)
print(bold_start + "# Langs: " + bold_end + langs)
print(bold_start + "# Repo URL: " + bold_end + p_repo)
if not last_update_local:
last_update_local = "None"
print(bold_start + "# Last Update: " + bold_end + str(last_update_local))
print(bold_start + ("#" * 70) + bold_end)
def info_component(p_comp_dict, p_kount):
if p_kount > 1:
print(bold_start + ("-" * 90) + bold_end)
print(
bold_start
+ " Project: "
+ bold_end
+ p_comp_dict["project"]
+ " ("
+ p_comp_dict["project_url"]
+ ")"
)
print(
bold_start
+ " Component: "
+ bold_end
+ p_comp_dict["component"]
+ " "
+ p_comp_dict["version"]
+ " ("
+ p_comp_dict["proj_description"]
+ ")"
)
if p_comp_dict["port"] > 1:
print(bold_start + " port: " + bold_end + str(p_comp_dict["port"]))
if p_comp_dict["datadir"] > "":
print(bold_start + " datadir: " + bold_end + p_comp_dict["datadir"])
if p_comp_dict["logdir"] > "":
print(bold_start + " logdir: " + bold_end + p_comp_dict["logdir"])
if p_comp_dict["autostart"] == "on":
print(bold_start + " autostart: " + bold_end + p_comp_dict["autostart"])
if p_comp_dict["svcuser"] > "" and util.get_platform() == "Linux":
print(bold_start + " svcuser: " + bold_end + p_comp_dict["svcuser"])
if ("status" in p_comp_dict) and ("up_time" in p_comp_dict):
print(
bold_start
+ " status: "
+ bold_end
+ p_comp_dict["status"]
+ bold_start
+ " for "
+ bold_end
+ p_comp_dict["up_time"]
)
else:
if "status" in p_comp_dict:
print(bold_start + " status: " + bold_end + p_comp_dict["status"])
if "up_time" in p_comp_dict:
print(bold_start + " up since: " + bold_end + p_comp_dict["up_time"])
if "data_size" in p_comp_dict:
print(bold_start + " data size: " + bold_end + p_comp_dict["data_size"])
if "connections" in p_comp_dict:
print(bold_start + " connections: " + bold_end + p_comp_dict["connections"])
print(
bold_start
+ "Release Date: "
+ bold_end
+ p_comp_dict["release_date"]
+ bold_start
+ " Stage: "
+ bold_end
+ p_comp_dict["stage"]
)
if p_comp_dict["platform"] > "":
print(
bold_start
+ "Supported On: "
+ bold_end
+ "["
+ p_comp_dict["platform"]
+ "]"
)
if p_comp_dict["pre_reqs"] > "":
print(bold_start + " Pre Req's: " + bold_end + p_comp_dict["pre_reqs"])
print(bold_start + " License: " + bold_end + p_comp_dict["license"])
is_installed = str(p_comp_dict["is_installed"])
if str(is_installed) == "0":
is_installed = "NO"
print(
bold_start
+ " IsCurrent: "
+ bold_end
+ str(p_comp_dict["is_current"])
+ bold_start
+ " IsInstalled: "
+ bold_end
+ is_installed
)
def format_data_to_table(
data,
keys,
header=None,
error_key=None,
error_msg_column=None,
sort_by_key=None,
sort_order_reverse=False,
):
"""Takes a list of dictionaries, formats the data, and returns
the formatted data as a text table.
Required Parameters:
data - Data to process (list of dictionaries). (Type: List)
keys - List of keys in the dictionary. (Type: List)
Optional Parameters:
header - The table header. (Type: List)
sort_by_key - The key to sort by. (Type: String)
sort_order_reverse - Default sort order is ascending, if
True sort order will change to descending. (Type: Boolean)
"""
# Sort the data if a sort key is specified (default sort order
# is ascending)
if sort_by_key:
data = sorted(data, key=itemgetter(sort_by_key), reverse=sort_order_reverse)
# If header is not empty, add header to data
if header:
# Get the length of each header and create a divider based
# on that length
header_divider = []
for name in header:
header_divider.append("-" * len(name))
# Create a list of dictionary from the keys and the header and
# insert it at the beginning of the list. Do the same for the
# divider and insert below the header.
# header_divider = dict(zip(keys, header_divider))
# data.insert(0, header_divider)
header = dict(zip(keys, header))
data.insert(0, header)
column_widths = []
for key in keys:
column_widths.append(max(len(str(column[key])) for column in data) + 2)
# Create a tuple pair of key and the associated column width for it
key_width_pair = zip(keys, column_widths)
key_length = len(keys)
str_format = ("%-*s " * len(keys)).strip() + "\n"
formatted_data = ""
for element in data:
data_to_format = []
s = 0
key_width_pair = zip(keys, column_widths)
# Create a tuple that will be used for the formatting in
# width, value format
for pair in key_width_pair:
dataStr = str(element[pair[0]])
spaces = " " * ((int(float(pair[1])) - len(dataStr)) - 2)
if s < key_length - 1:
spaces = spaces + " |"
if dataStr in header.values():
if s == 0:
dataStr = table_header_style + dataStr
dataStr = dataStr + spaces
if s == key_length - 1:
dataStr = dataStr + bold_end
s = s + 1
elif error_key and error_msg_column:
if (
pair[0] in error_msg_column
and element.get(error_key[0]) == error_key[1]
):
dataStr = error_start + dataStr + bold_end
data_to_format.append(pair[1])
data_to_format.append(dataStr)
formatted_data += str_format % tuple(data_to_format)
return formatted_data
| pgEdge/nodectl | cli/scripts/api.py | api.py | py | 20,709 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "colorama.init",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_n... |
33310802794 | # Command tool to view system statisitics
from commands import Command
import psutil
import math
def convert_size(size_bytes):
"""Converts a number of bytes to its respective unit such as GB or MB"""
if size_bytes == 0:
return "0B"
size_name = ("B", "KB", "MB", "GB", "TB", "PB", "EB", "ZB", "YB")
i = int(math.floor(math.log(size_bytes, 1024)))
p = math.pow(1024, i)
s = round(size_bytes / p, 2)
return "%s %s" % (s, size_name[i])
class Stats(Command):
command_name = "stats"
def command(self, shell, args):
try:
command = args[0].lower()
except:
# No args were passed
print("Current System Stats:")
print(f"CPU Usage: {psutil.cpu_percent(interval=1)}%, Memory Usage: {psutil.virtual_memory().percent}%, Disk Usage: {psutil.disk_usage('/').percent}%")
else:
if command == "cpu":
cpu_speed = psutil.cpu_freq(percpu=True)[0].current
cpu_percents = psutil.cpu_percent(interval=1, percpu=True)
print(f"Current CPU Speed ({cpu_speed / 1000} GHZ):")
for i in range(psutil.cpu_count(logical=False)):
current_percent = cpu_percents[i]
print(f"CPU {i}: {current_percent}%", end=" ")
print()
elif command == "memory":
memory_stats = psutil.virtual_memory()
print("Current memory statistics:")
print(f"Current Memory Usage: {convert_size(memory_stats.used)}, Current Memory Free: {convert_size(memory_stats.free)}, Current Memory Usage(%): {memory_stats.percent}, Total memory available: {convert_size(memory_stats.total)}")
else:
print("Unknown subcommand")
def help(self):
print("stats [SUBCOMMAND]")
print("Allows you to view hardware stats.")
print("\tstats cpu: Shows detailed info about CPU utilization")
print("\tstats memory: Shows detailed information about RAM") | grqphical07/Phosphate | commands/stats.py | stats.py | py | 2,047 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.floor",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "math.log",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "math.pow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "commands.Command",
"line_number": 16... |
75249726112 |
# coding: utf-8
# In[4]:
import sys
get_ipython().system('{sys.executable} -m pip install tweepy')
#Import the necessary methods from tweepy library
from tweepy.streaming import StreamListener
from tweepy import OAuthHandler
from tweepy import Stream
#Variables that contains the user credentials to access Twitter API
access_token = "1662638953-hbd3Cut04XJRIIsmST9Mj1Mp3adD5aRw2uk8Z0T"
access_token_secret = "IJ51hhcnRXSFPxbqaSpTZ9wwoJdiL5Rp6MNfnUU0LsQxT"
consumer_key = "5r2VpgUBh0RRQ007rGuBL2H6D"
consumer_secret = "GisYWJDPCMtNRUUq4Ou2MSkfUtgkfSKiuxyCXQVZP0t46bfjgl"
#This is a basic listener that just prints received tweets to stdout.
class StdOutListener(StreamListener):
def on_data(self, data):
print (data)
return True
def on_error(self, status):
print (status)
if __name__ == '__main__':
#This handles Twitter authetification and the connection to Twitter Streaming API
l = StdOutListener()
auth = OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
stream = Stream(auth, l)
#This line filter Twitter Streams to capture data by the keywords: 'Cambridge Analytica'
stream.filter(track=['Cambridge Analytica'])
| hameedmf/news_app | TwitterExtraction.py | TwitterExtraction.py | py | 1,245 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tweepy.streaming.StreamListener",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "tweepy.OAuthHandler",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "tweepy.Stream",
"line_number": 39,
"usage_type": "call"
}
] |
72799954595 | import unittest
from django.contrib.auth.models import User
from django_reputation.models import (Reputation, ReputationAction, UserReputationAction,
Permission, ReputationContent)
import django_reputation.config as config
from django_reputation.decorators import ReputationRequired, reputation_required
import django.http as http
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.contenttypes.models import ContentType
class Tests(unittest.TestCase):
def setUp(self):
user_1 = User.objects.create_user(username = 'Test User',
email = 'test_user@gmail.com')
self.user_1 = user_1
user_2 = User.objects.create_user(username = 'Test User 2',
email = 'test_user2@gmail.com')
self.user_2 = user_2
reputation_action, created = ReputationAction.objects.get_or_create(name = 'vote', description = '')
self.reputation_action = reputation_action
def tearDown(self):
self.user_1.delete()
self.user_2.delete()
self.reputation_action.delete()
for reputation in Reputation.objects.all():
reputation.delete()
for action in UserReputationAction.objects.all():
action.delete()
def test_reputation_for_user(self):
"""
Tests retrieval of the reputation associated with a user.
"""
reputation_object = Reputation.objects.reputation_for_user(self.user_1)
self.assertTrue(reputation_object)
self.assertEqual(reputation_object.reputation, config.BASE_REPUTATION)
def test_log_reputation_action(self):
"""
Tests creating a new UserReputationAction.
"""
Reputation.objects.log_reputation_action(user = self.user_1,
originating_user = self.user_2,
action_name = 'vote',
action_value = 100,
target_object = self.user_1)
reputation_object = Reputation.objects.reputation_for_user(self.user_1)
self.assertEqual(reputation_object.reputation, 100 + config.BASE_REPUTATION)
reputation_actions = UserReputationAction.objects.all()
self.assertTrue(reputation_actions)
self.assertEqual(reputation_actions[0].action, self.reputation_action)
def test_calculate_reputation_for_today(self):
"""
Tests calculation of total reputation gain in a day.
"""
Reputation.objects.log_reputation_action(user = self.user_1,
originating_user = self.user_2,
action_name = 'vote',
action_value = 100,
target_object = self.user_1)
delta = Reputation.objects.calculate_reputation_for_today(self.user_1)
self.assertEqual(delta, 100)
class DummyRequest(object):
def __init__(self):
self.user = User(username = 'Test Request User')
self.user.save()
Reputation.objects.reputation_for_user(self.user)
class DummyDecClass(object):
def __init__(self):
pass
def call(self, request):
return 'YO'
__call__ = ReputationRequired(call, 'test permission')
class ReputationDecoratorTests(unittest.TestCase):
def setUp(self):
user_1 = User.objects.create_user(username = 'Test User',
email = 'test_user@gmail.com')
self.user_1 = user_1
user_2 = User.objects.create_user(username = 'Test User 2',
email = 'test_user2@gmail.com')
self.user_2 = user_2
Reputation.objects.reputation_for_user(self.user_1)
Reputation.objects.reputation_for_user(self.user_2)
Reputation.objects.update_user_reputation(self.user_1, 5000)
test_permission, created = Permission.objects.get_or_create(name = 'test permission',
description = '',
required_reputation = 4500)
self.test_permission = test_permission
request = DummyRequest()
self.request = request
def tearDown(self):
self.user_1.delete()
self.user_2.delete()
self.test_permission.delete()
def test_reputation_required(self):
"""
Tests ReputationRequired decorator.
"""
Reputation.objects.update_user_reputation(self.user_1, 5000)
dummy_class = DummyDecClass()
status = dummy_class(self.request)
self.assertEqual(status, 'YO')
Reputation.objects.update_user_reputation(self.user_1, 2000)
self.request.user = self.user_1
dummy_class = DummyDecClass()
status = dummy_class(self.request)
self.assertEqual(status.__class__, http.HttpResponseRedirect)
class ReputationRegistrationTests(unittest.TestCase):
def setUp(self):
user_1 = User.objects.create_user(username = 'Test User',
email = 'test_user@gmail.com')
self.user_1 = user_1
user_2 = User.objects.create_user(username = 'Test User 2',
email = 'test_user2@gmail.com')
self.user_2 = user_2
Reputation.objects.reputation_for_user(self.user_1)
Reputation.objects.reputation_for_user(self.user_2)
user_reputation_content, created = ReputationContent.objects.get_or_create(content_type = ContentType.objects.get_for_model(User))
self.user_reputation_content = user_reputation_content
def tearDown(self):
self.user_1.delete()
self.user_2.delete()
self.user_reputation_content.delete()
def test_handler_registration(self):
"""
Tests registration of handlers for reputation post_save signals.
"""
import django_reputation.handlers as handlers
from django_reputation.reputation import reputation_registry, ReputationRegistry
reputation_registry = ReputationRegistry()
user_content_name = "%s_%s" % (self.user_reputation_content.content_type.app_label,
self.user_reputation_content.content_type.model)
self.assertEqual(reputation_registry._handlers.get(user_content_name).__class__, handlers.BaseReputationHandler) | genghisu/eruditio | eruditio/shared_apps/django_reputation/tests.py | tests.py | py | 6,802 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.contrib.auth.models.User.objects.create_user",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.contrib.auth.models.User.objects",
"line_number": 14,
"... |
13734252379 | # -*- coding: utf-8 -*-
from django.db import models, migrations
import accounts.models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
('base', '0001_initial'),
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='AerolithProfile',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('coins', models.IntegerField(default=0)),
('profile', models.CharField(max_length=2000, blank=True)),
('rating', models.IntegerField(default=0)),
('member', models.BooleanField(default=False)),
('membershipType', models.IntegerField(default=0, choices=[(0, b'None'), (1, b'Bronze'), (2, b'Silver'), (3, b'Gold')])),
('membershipExpiry', models.DateTimeField(null=True, blank=True)),
('customWordwallsStyle', models.CharField(max_length=1000, blank=True)),
('wordwallsSaveListSize', models.IntegerField(default=0)),
('wordwallsMedals', models.TextField(null=True, blank=True)),
('avatarUrl', models.CharField(max_length=512, null=True, blank=True)),
('additional_data', models.TextField(default=b'{}', blank=True)),
('defaultLexicon', models.ForeignKey(default=accounts.models.getLexicon, to='base.Lexicon', on_delete=models.SET_DEFAULT)),
('user', models.OneToOneField(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
),
]
| domino14/Webolith | djAerolith/accounts/migrations/0001_initial.py | 0001_initial.py | py | 1,684 | python | en | code | 32 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.swappable_dependency",
"line_number": 13,
"usage_type": "call... |
39884501056 | """Flame graph module."""
import inspect
import runpy
import signal
import time
from collections import defaultdict
from vprof import base_profiler
_SAMPLE_INTERVAL = 0.001
class _StatProfiler:
"""Statistical profiler.
Samples call stack at regular intervals specified by _SAMPLE_INTERVAL.
"""
def __init__(self):
self._stats = defaultdict(int)
self._start_time = None
self.base_frame = None
self.run_time = None
def __enter__(self):
"""Enables statistical profiler."""
signal.signal(signal.SIGPROF, self.sample)
signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL)
self._start_time = time.time()
return self
def __exit__(self, exc_type, exc_val, exc_tbf):
"""Disables statistical profiler."""
self.run_time = time.time() - self._start_time
signal.setitimer(signal.ITIMER_PROF, 0)
def sample(self, signum, frame): #pylint: disable=unused-argument
"""Samples current stack and writes result in self._stats.
Args:
signum: Signal that activates handler.
frame: Frame on top of the stack when signal is handled.
"""
stack = []
while frame and frame != self.base_frame:
stack.append((
frame.f_code.co_name,
frame.f_code.co_filename,
frame.f_code.co_firstlineno))
frame = frame.f_back
self._stats[tuple(stack)] += 1
signal.setitimer(signal.ITIMER_PROF, _SAMPLE_INTERVAL)
@staticmethod
def _insert_stack(stack, sample_count, call_tree):
"""Inserts a stack into the call tree.
Args:
stack: Call stack.
sample_count: Sample count of call stack.
call_tree: Call tree.
"""
curr_level = call_tree
for func in stack:
next_level_index = {
node['stack']: node for node in curr_level['children']}
if func not in next_level_index:
new_node = {'stack': func, 'children': [], 'sampleCount': 0}
curr_level['children'].append(new_node)
curr_level = new_node
else:
curr_level = next_level_index[func]
curr_level['sampleCount'] = sample_count
def _fill_sample_count(self, node):
"""Counts and fills sample counts inside call tree."""
node['sampleCount'] += sum(
self._fill_sample_count(child) for child in node['children'])
return node['sampleCount']
@staticmethod
def _get_percentage(sample_count, total_samples):
"""Return percentage of sample_count in total_samples."""
if total_samples != 0:
return 100 * round(float(sample_count) / total_samples, 3)
return 0.0
def _format_tree(self, node, total_samples):
"""Reformats call tree for the UI."""
funcname, filename, _ = node['stack']
sample_percent = self._get_percentage(
node['sampleCount'], total_samples)
color_hash = base_profiler.hash_name('%s @ %s' % (funcname, filename))
return {
'stack': node['stack'],
'children': [self._format_tree(child, total_samples)
for child in node['children']],
'sampleCount': node['sampleCount'],
'samplePercentage': sample_percent,
'colorHash': color_hash
}
@property
def call_tree(self):
"""Returns call tree."""
call_tree = {'stack': 'base', 'sampleCount': 0, 'children': []}
for stack, sample_count in self._stats.items():
self._insert_stack(reversed(stack), sample_count, call_tree)
self._fill_sample_count(call_tree)
if not call_tree['children']:
return {}
return self._format_tree(
call_tree['children'][0], call_tree['sampleCount'])
class FlameGraphProfiler(base_profiler.BaseProfiler):
"""Statistical profiler wrapper.
Runs statistical profiler and returns collected stats.
"""
def _profile_package(self):
"""Runs statistical profiler on a package."""
with _StatProfiler() as prof:
prof.base_frame = inspect.currentframe()
try:
runpy.run_path(self._run_object, run_name='__main__')
except SystemExit:
pass
call_tree = prof.call_tree
return {
'objectName': self._object_name,
'sampleInterval': _SAMPLE_INTERVAL,
'runTime': prof.run_time,
'callStats': call_tree,
'totalSamples': call_tree.get('sampleCount', 0),
'timestamp': int(time.time())
}
def profile_package(self):
"""Runs package profiler in a separate process."""
return base_profiler.run_in_separate_process(self._profile_package)
def _profile_module(self):
"""Runs statistical profiler on a module."""
with open(self._run_object, 'rb') as srcfile, _StatProfiler() as prof:
code = compile(srcfile.read(), self._run_object, 'exec')
prof.base_frame = inspect.currentframe()
try:
exec(code, self._globs, None)
except SystemExit:
pass
call_tree = prof.call_tree
return {
'objectName': self._object_name,
'sampleInterval': _SAMPLE_INTERVAL,
'runTime': prof.run_time,
'callStats': call_tree,
'totalSamples': call_tree.get('sampleCount', 0),
'timestamp': int(time.time())
}
def profile_module(self):
"""Runs module profiler in a separate process."""
return base_profiler.run_in_separate_process(self._profile_module)
def profile_function(self):
"""Runs statistical profiler on a function."""
with _StatProfiler() as prof:
result = self._run_object(*self._run_args, **self._run_kwargs)
call_tree = prof.call_tree
return {
'objectName': self._object_name,
'sampleInterval': _SAMPLE_INTERVAL,
'runTime': prof.run_time,
'callStats': call_tree,
'totalSamples': call_tree.get('sampleCount', 0),
'result': result,
'timestamp': int(time.time())
}
| nvdv/vprof | vprof/flame_graph.py | flame_graph.py | py | 6,376 | python | en | code | 3,934 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "signal.signal",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "signal.SIGPROF",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "signal.seti... |
20248326794 | import os
import h3.api.numpy_int as h3
import numpy as np
from scipy import stats
from grids_toolbox import H3Grids
from indicator_toolbox import Indicator, dist_unit_converter
class ProximityIndicator(Indicator):
def __init__(self, H3, name='proximity', Table=None):
super().__init__(H3, name, Table)
def kde(self, name, target_classes, attr_name='LBCS', item_name='area', usage_name='usage',
minimum_ratio_th=0.0, bandwidth_multiplier=None, normalization=True,
required_cells=None, round_digits=3, Table_to_map=None, self_update=True):
target_h3_cells = self.get_target_h3_cells(target_classes, attr_name, item_name, usage_name, minimum_ratio_th)
sim_points = []
for h3_cell in target_h3_cells:
lat, lon = h3.h3_to_geo(h3_cell)
sim_points.append([lon, lat])
sim_points = np.asarray(sim_points).T
kernel = stats.gaussian_kde(sim_points, 'scott')
if bandwidth_multiplier is not None:
kernel.set_bandwidth(bw_method=kernel.factor * bandwidth_multiplier)
kde_rst = {}
if not required_cells:
required_cells = self.H3.required_cells
for h3_cell in required_cells:
lat, lon = h3.h3_to_geo(h3_cell)
kde_rst[h3_cell] = kernel([lon, lat])[0]
if self_update:
self.H3.values[name] = kde_rst
if Table_to_map:
kde_rst = Table_to_map.get_grid_value_from_h3_cells(self.H3.resolution, name, self_update)
if normalization:
normalized_rst = self.normalization(kde_rst, minV='auto', maxV='auto', better='high')
to_frontend_rst = ' '.join([str(round(d, round_digits)) for d in normalized_rst])
else:
normalized_rst = None
to_frontend_rst = ' '.join([str(round(d, round_digits)) for d in kde_rst])
return {'name': name,
'raw': kde_rst,
'normalized': normalized_rst,
'to_frontend': to_frontend_rst}
def closeness(self, name, target_classes, attr_name='LBCS', item_name='area', usage_name='usage',
minimum_ratio_th=0.0, power=1.0, nearest_k=None, normalization=True,
required_cells=None, round_digits=3, Table_to_map=None, self_update=True):
target_h3_cells = self.get_target_h3_cells(target_classes, attr_name, item_name, usage_name, minimum_ratio_th)
closeness_rst = {}
if not required_cells:
required_cells = self.H3.required_cells
for start_h3_cell in required_cells:
dist_list = self._get_straight_line_dist_to_h3_cells(start_h3_cell, target_h3_cells)
if nearest_k:
dist_list.sort()
dist_list = dist_list[:nearest_k]
if len(dist_list) == 0:
closeness_rst[start_h3_cell] = 0
else:
closeness_rst[start_h3_cell] = sum([1/(d**power) for d in dist_list]) / len(dist_list)
if self_update:
self.H3.values[name] = closeness_rst
if Table_to_map:
closeness_rst = Table_to_map.get_grid_value_from_h3_cells(self.H3.resolution, name, self_update)
if normalization:
normalized_rst = self.normalization(closeness_rst, minV='auto', maxV='auto', better='high')
to_frontend_rst = ' '.join([str(round(d, round_digits)) for d in normalized_rst])
else:
normalized_rst = None
to_frontend_rst = ' '.join([str(round(d, round_digits)) for d in closeness_rst])
return {'name': name,
'raw': closeness_rst,
'normalized': normalized_rst,
'to_frontend': to_frontend_rst}
def nearest_dist(self, name, target_classes, attr_name='LBCS', item_name='area', usage_name='usage',
minimum_ratio_th=0.0, kth=1, dist_method='straight_line', normalization=True,
required_cells=None, round_digits=3, Table_to_map=None, self_update=True):
target_h3_cells = self.get_target_h3_cells(target_classes, attr_name, item_name, usage_name, minimum_ratio_th)
nearest_dist_rst = {}
if not required_cells:
required_cells = self.H3.required_cells
for start_h3_cell in required_cells:
if dist_method == 'straight_line':
dist_list = self._get_straight_line_dist_to_h3_cells(start_h3_cell, target_h3_cells)
elif dist_method == 'network':
dist_list = self._get_network_dist_to_h3_cells(start_h3_cell, target_h3_cells)
dist_list.sort()
if len(dist_list) == 0:
nearest_dist_rst[start_h3_cell] = -1
else:
nearest_dist_rst[start_h3_cell] = dist_list[kth-1]
if self_update:
self.H3.values[name] = nearest_dist_rst
if Table_to_map:
nearest_dist_rst = Table_to_map.get_grid_value_from_h3_cells(self.H3.resolution, name, self_update)
if normalization:
normalized_rst = self.normalization(nearest_dist_rst, minV='auto', maxV='auto', better='high')
to_frontend_rst = ' '.join([str(round(d, round_digits)) for d in normalized_rst])
else:
normalized_rst = None
to_frontend_rst = ' '.join([str(round(d, round_digits)) for d in nearest_dist_rst])
return {'name': name,
'raw': nearest_dist_rst,
'normalized': normalized_rst,
'to_frontend': to_frontend_rst}
def return_accessibility_within_dist(self, name, population_attr,
target_classes, attr_name='LBCS', item_name='area', usage_name='usage',
minimum_ratio_th=0.0, kth=1,
dist_threshold=500.0, dist_unit='m', speed=3.6,
dist_method='straight_line'):
required_cells = [
h3_cell for h3_cell, h3_attrs in self.H3.h3_stats.items()
if h3_attrs.get(population_attr, -1) > 0
]
nearest_dist_rst = self.nearest_dist(name, target_classes, attr_name, item_name, usage_name,
minimum_ratio_th, kth, dist_method,
required_cells=required_cells, Table_to_map=None,
normalization=False, self_update=False)['raw']
if dist_unit != 'km':
nearest_dist_rst = {
h3_cell: dist_unit_converter(raw_value, 'km', return_unit=dist_unit, speed=speed)
for h3_cell, raw_value in nearest_dist_rst.items()
}
tt_pop, accessibile_pop = 0, 0
for h3_cell in required_cells:
this_pop = self.H3.h3_stats[h3_cell][population_attr]
tt_pop += this_pop
if nearest_dist_rst[h3_cell] <= dist_threshold:
accessibile_pop += this_pop
rst = {
'name': name,
'raw': accessibile_pop,
'normalized': accessibile_pop/tt_pop,
'to_frontend': accessibile_pop/tt_pop
}
return rst
def test(num_trials=3):
import pickle, time
import matplotlib.pyplot as plt
vars = pickle.load(open('cities/shenzhen/clean/base_data.p', 'rb'))
Table, H3 = vars['Table'], vars['H3']
P = ProximityIndicator(H3, Table=Table)
for trial in range(num_trials):
print(f'\n\nTrial {trial + 1}\n' + '==' * 30)
layout_str, layout_ratio = Table.update_randomly(zone=1)
t0 = time.time()
park_proximity = P.return_accessibility_within_dist('park_proximity',
population_attr='tt_pop',
target_classes=5500,
kth=1,
dist_threshold=500 / 1.5)
t1 = time.time()
print('{:4.4} seconds elapsed for computing a proximity indicator'.format(t1-t0))
print(park_proximity)
park_proximity_heatmap = P.closeness('park_proximity_heatmap',
target_classes=5500,
minimum_ratio_th=0.25,
power=0.75)
t2 = time.time()
print('{:4.4} seconds elapsed for computing a proximity heatmap'.format(t2-t1))
P.verify_heatmap(Table, name='park_proximity_heatmap',
target_classes=5500,
minimum_ratio_th=0.25,
focus_table_grid_code=38)
plt.show()
if __name__ == '__main__':
test(num_trials=3) | csl-hcmc/SaiGon-Peninsula | Software/L3_SZ_CityScope-cw_dev/backend/proximity_indicator.py | proximity_indicator.py | py | 8,841 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "indicator_toolbox.Indicator",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "h3.api.numpy_int.h3_to_geo",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "h3.api.numpy_int",
"line_number": 19,
"usage_type": "name"
},
{
"api_name":... |
43513717152 | # Get light values from ESP32 at regular intervals and store in tLightValues
import mysql.connector
import logging
import requests
import time
from datetime import datetime
# Set up logging
logging.basicConfig(filename="/home/jkumar/Projects/logs/lightTracker/lightTracker.log", level=logging.INFO,format="%(asctime)s:[%(levelname)s]:%(message)s")
logging.info('------------------------------------')
logging.info('lightTracker Application starting up')
logging.info('------------------------------------')
# Create DB Connection
logging.info('Attempting to connect to DB lighttracker')
try:
mydb = mysql.connector.connect(
host="localhost",
user="jkumar",
password="system64",
database="lighttracker"
)
mycursor = mydb.cursor()
except:
logging.critical('Unable to connect to DB')
logging.critical('Program will exit')
quit()
logging.info('Connection to DB successful')
# Setup ESP32 details
lightURL = "http://192.168.1.128/light"
# Time interval at which readings will be taken (in seconds)
sleepTime = 300
# Time interval after which a failed attempt will be re-tried (in seconds)
retryTime = 5
# Setup consequtive errors after which program should quit
errorTimeout = 20
consErrors = 0
# Setup http request timeout (in seconds)
httpTimeout = 5
logging.info("Setup Complete")
logging.info("ESP32 endpoint: %s",lightURL)
logging.info("Time interval : %s seconds",sleepTime)
logging.info("Retry interval: %s seconds",retryTime)
logging.info("Error timeout : %s",errorTimeout)
logging.info("HTTP timeout : %s seconds",httpTimeout)
# Create new run in DB
now = datetime.now()
#crDate = now.strftime("%Y-%m-%d %H:%M:%S")
#sql = "insert into tLightRuns (cr_date) values (%s)"
#val = (str(crDate))
#try:
# mycursor.execute(sql, val)
# mydb.commit()
# rowID = mycursor.lastrowid
#except Exception as e:
# logging.error(str(e))
# logging.error("Unable to add run to DB, exiting")
# quit()
#
#logger.info("Starting run %s",rowID)
# HACK
# Create output file
filenameDate = now.strftime("%Y-%m-%d_%H-%M-%S")
filename = "/home/jkumar/Projects/logs/lightTracker/data/output."+filenameDate+".data"
fh = open(filename,'w')
fh.close()
while consErrors < errorTimeout:
try:
r = requests.get(lightURL, timeout=httpTimeout)
except:
consErrors = consErrors + 1
logging.error("Unable to connect to %s retries %s/%s",lightURL,consErrors,errorTimeout)
time.sleep(retryTime)
continue
respStatus = r.status_code
if (respStatus != 200):
consErrors = consErrors + 1
logging.error("Unsuccessful response %s retries %s/%s",respStatus,consErrors,errorTimeout)
time.sleep(retryTime)
continue
lightValue = r.text
logging.info("Got light value %s",lightValue)
# Success reset consErrors
consErrors = 0
# Insert value into DB
# HACK
# Insert value into file
currentTime = datetime.now()
currentTimeFormat = currentTime.strftime("%Y-%m-%d %H:%M")
fh = open(filename,'a')
writeLine = currentTimeFormat+","+lightValue+"\n"
fh.write(writeLine)
fh.close()
# Set to sleep before next request
time.sleep(sleepTime)
# Error timeout has been reached, exit application
logging.info('Application exiting')
| jitadityakumar/home-automation | lightTracker/python/getLightValues.py | getLightValues.py | py | 3,301 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "logging.info",
... |
17418906876 | import collections
import math
class Graph:
''' graph class inspired by https://gist.github.com/econchick/4666413
'''
def __init__(self):
self.vertices = set()
# makes the default value for all vertices an empty list
self.edges = collections.defaultdict(list)
self.weights = {}
def add_vertex(self, value):
self.vertices.add(value)
def add_edge(self, from_vertex, to_vertex, distance):
if from_vertex == to_vertex: pass # no cycles allowed
self.edges[from_vertex].append(to_vertex)
self.weights[(from_vertex, to_vertex)] = distance
def __str__(self):
string = "Vertices: " + str(self.vertices) + "\n"
string += "Edges: " + str(self.edges) + "\n"
string += "Weights: " + str(self.weights)
return string
def dijkstra(graph, start):
# initializations
S = set()
# delta represents the length shortest distance paths from start -> v, for v in delta.
# We initialize it so that every vertex has a path of infinity (this line will break if you run python 2)
delta = dict.fromkeys(list(graph.vertices), math.inf)
previous = dict.fromkeys(list(graph.vertices), None)
# then we set the path length of the start vertex to 0
delta[start] = 0
# while there exists a vertex v not in S
while S != graph.vertices:
# let v be the closest vertex that has not been visited...it will begin at 'start'
v = min((set(delta.keys()) - S), key=delta.get)
# for each neighbor of v not in S
for neighbor in set(graph.edges[v]) - S:
new_path = delta[v] + graph.weights[v, neighbor]
# is the new path from neighbor through
if new_path < delta[neighbor]:
# since it's optimal, update the shortest path for neighbor
delta[neighbor] = new_path
# set the previous vertex of neighbor to v
previous[neighbor] = v
S.add(v)
return (delta, previous)
def shortest_path(graph, start, end):
'''Uses dijkstra function in order to output the shortest path from start to end
'''
delta, previous = dijkstra(graph, start)
path = []
vertex = end
while vertex is not None:
path.append(vertex)
vertex = previous[vertex]
path.reverse()
return path
if __name__ == '__main__':
graph = Graph()
with open('input.txt') as read_file:
n = int(read_file.readline())
for i in range(n):
node_list = read_file.readline().split()
graph.add_ve(node_list[0])
graph.add_edge(node_list[1])
graph.add_edge(node_list[0], node_list[1], int(node_list[2]))
points = read_file.readline().split()
start_point = points[0]
end_point = points[1]
print( shortest_path(graph, start_point, end_point))
| nzavarinsky/Algorhitms | LABA4(dijkstra-algo)/dijkstra-v2.py | dijkstra-v2.py | py | 2,905 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "collections.defaultdict",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "math.inf",
"line_number": 37,
"usage_type": "attribute"
}
] |
21429960495 | import os
import mock
import yaml
import unittest
import tempfile
from pathlib import Path
import setuppath
from ops.testing import Harness
from src.charm import AlgorandCharm
class BaseTestAlgoCharm(unittest.TestCase):
@classmethod
def setUpClass(cls):
"""Setup class fixture."""
# Setup a tmpdir
cls.tmpdir = tempfile.TemporaryDirectory()
# Create test Genesis Config Files
for algonet in ["betanet", "devnet", "mainnet", "testnet"]:
with open(os.path.join(cls.tmpdir.name, "test_genesis_{}".format(algonet)), "w") as f:
f.write(algonet)
# Stop unit test from calling fchown
fchown_patcher = mock.patch("os.fchown")
cls.mock_fchown = fchown_patcher.start()
chown_patcher = mock.patch("os.chown")
cls.mock_chown = chown_patcher.start()
# Stop charmhelpers host from logging via debug log
host_log_patcher = mock.patch("charmhelpers.core.host.log")
cls.mock_juju_log = host_log_patcher.start()
# Stop charmhelpers snap from logging via debug log
snap_log_patcher = mock.patch("charmhelpers.fetch.snap.log")
cls.mock_snap_log = snap_log_patcher.start()
# Prevent charmhelpers from calling systemctl
host_service_patcher = mock.patch("charmhelpers.core.host.service_stop")
cls.mock_service_stop = host_service_patcher.start()
host_service_patcher = mock.patch("charmhelpers.core.host.service_start")
cls.mock_service_start = host_service_patcher.start()
host_service_patcher = mock.patch("charmhelpers.core.host.service_restart")
cls.mock_service_restart = host_service_patcher.start()
add_source_patcher = mock.patch("src.charm.add_source")
cls.mock_add_source = add_source_patcher.start()
# Setup mock JUJU Environment variables
os.environ["JUJU_UNIT_NAME"] = "mock/0"
os.environ["JUJU_CHARM_DIR"] = "."
@classmethod
def tearDownClass(cls):
"""Tear down class fixture."""
mock.patch.stopall()
cls.tmpdir.cleanup()
def setUp(self):
"""Set up tests."""
self.harness = Harness(AlgorandCharm)
# Make default config available
with open(Path("./config.yaml"), "r") as config_file:
config = yaml.safe_load(config_file)
self.charm_config = {}
for key, _ in config["options"].items():
self.charm_config[key] = config["options"][key]["default"]
# Disable snap retry delay for testing
self.apt_retry_patcher = mock.patch(
"charmhelpers.fetch.ubuntu.CMD_RETRY_DELAY", 0
)
self.addCleanup(self.apt_retry_patcher.stop)
# Setup mock JUJU Environment variables
os.environ["JUJU_UNIT_NAME"] = "mock/0"
os.environ["JUJU_CHARM_DIR"] = "."
# Load config defaults
self.harness.update_config(self.charm_config)
def get_notice_count(self, hook):
"""Return the notice count for a given charm hook."""
notice_count = 0
handle = "AlgorandCharm/on/{}".format(hook)
for event_path, _, _ in self.harness.charm.framework._storage.notices(None):
if event_path.startswith(handle):
notice_count += 1
return notice_count
| ZestBloom/charm-algorand-node | tests/unit/unittest_base.py | unittest_base.py | py | 3,335 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "tempfile.TemporaryDirectory",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.pa... |
14851062460 | import os
import sys
import numpy as np
import cv2 as cv
import matplotlib.pyplot as plt
def homography_function(x, y, H): # x&y-> N-by-1, H-> 3-by-3, return-> (N-by-1,N-by-1)
D = (H[2,0] * x + H[2,1] * y + 1)
xs = (H[0,0] * x + H[0,1] * y + H[0,2]) / D
ys = (H[1,0] * x + H[1,1] * y + H[1,2]) / D
return xs, ys
def homography_jacobian(x, y, H): # x&y-> N-by-1, H-> 3-by-3, J-> 2N-by-8
N = np.shape(x)[0]
J = np.zeros(shape=(2*N, 8), dtype=float)
D = (H[2,0] * x + H[2,1] * y + 1)
xs = (H[0,0] * x + H[0,1] * y + H[0,2]) / D
ys = (H[1,0] * x + H[1,1] * y + H[1,2]) / D
J[0:N,0:1] = x / D
J[0:N,1:2] = y / D
J[0:N,2:3] = 1 / D
J[0:N,6:7] = - x * xs
J[0:N,7:8] = - y * ys
J[N:2*N,3:4] = x / D
J[N:2*N,4:5] = y / D
J[N:2*N,5:6] = 1 / D
J[N:2*N,6:7] = - x * xs
J[N:2*N,7:8] = - y * ys
return J
def find_homography(x, y, xp, yp, iters): # x,y,xp&yp-> N-by-1
# define H here
H = np.zeros(shape=(3,3), dtype=float)
H[2,2] = 1
for _ in range(iters):
# Compute homography coordinates
xs, ys = homography_function(x, y, H)
# Compute residuals
rx, ry = xs - xp, ys - yp
r = np.vstack((rx,ry))
# Compute Jacobian
J = homography_jacobian(x,y,H)
# Compute update via linear least squares
delta = np.linalg.pinv(J.transpose() @ J) @ J.transpose() @ r
# Update homography matrix
delta = np.resize(np.vstack((delta,0)), (3,3))
H = H - delta
return H
def get_match_coordinates(sift_keypoints1, sift_keypoints2, matches):
N = len(matches)
x = np.zeros(shape=(N, 1), dtype=float)
y = np.zeros(shape=(N, 1), dtype=float)
xp = np.zeros(shape=(N, 1), dtype=float)
yp = np.zeros(shape=(N, 1), dtype=float)
for k in range(N):
index1, index2, _, _ = matches[k]
x[k,0] = np.floor(sift_keypoints1[index1].pt[0])
y[k,0] = np.floor(sift_keypoints1[index1].pt[1])
xp[k,0] = np.floor(sift_keypoints2[index2].pt[0])
yp[k,0] = np.floor(sift_keypoints2[index2].pt[1])
return x,y,xp,yp
def get_random_inliers(x, y, xp, yp, n):
inliers_idx = np.random.choice(np.arange(len(x)), n, replace=False)
outliers_idx = np.ones(shape=len(x), dtype=bool)
outliers_idx[inliers_idx] = 0
outliers_idx = np.nonzero(outliers_idx)[0]
inliers = (x[inliers_idx], y[inliers_idx], xp[inliers_idx], yp[inliers_idx])
outliers = (x[outliers_idx], y[outliers_idx], xp[outliers_idx], yp[outliers_idx])
return inliers, inliers_idx, outliers, outliers_idx
def get_random_inliers_with_index(x, y, xp, yp, idx, n):
inliers_idx = np.random.choice(idx, n, replace=False)
outliers_idx = np.ones(shape=len(x), dtype=bool)
outliers_idx[inliers_idx] = 0
outliers_idx = np.nonzero(outliers_idx)[0]
inliers = (x[inliers_idx], y[inliers_idx], xp[inliers_idx], yp[inliers_idx])
outliers = (x[outliers_idx], y[outliers_idx], xp[outliers_idx], yp[outliers_idx])
return inliers, inliers_idx, outliers, outliers_idx
def ransac(sift_keypoints1, sift_keypoints2, matches, min_points, req_points, gn_iters, max_iters, ransac_threshold):
H_best = None
err_best = np.inf
x_best, y_best, xp_best, yp_best, idx_best = None, None, None, None, None
print(' Extracting matched feature point coordinates...')
x, y, xp, yp = get_match_coordinates(sift_keypoints1, sift_keypoints2, matches)
print(' Running RANSAC iterations...')
for num_iter in range(max_iters):
if not idx_best is None:
# Get 'num_inliers/2' random inliers from the best set
inliers, idx_inl, outliers, idx_oth = get_random_inliers_with_index(x, y, xp, yp, idx_best, int(idx_best.shape[0] / 2))
else:
# Get 'min_points' random inliers from the set
inliers, idx_inl, outliers, idx_oth = get_random_inliers(x, y, xp, yp, min_points)
x_inl, y_inl, xp_inl, yp_inl = inliers
x_oth, y_oth, xp_oth, yp_oth = outliers
# Fit a homography to randomly selected inliers
H = find_homography(x_inl, y_inl, xp_inl, yp_inl, gn_iters)
# Evaluate homography on the rest of the set
xs_oth, ys_oth = homography_function(x_oth, y_oth, H)
r_oth = np.sqrt(np.square(xs_oth - xp_oth) + np.square(ys_oth - yp_oth))
# Add good fit points to inliers
idx = np.where(r_oth < ransac_threshold)[0]
if idx.shape[0] > 0:
x_inl = np.concatenate((x_inl, x_oth[idx, :]))
y_inl = np.concatenate((y_inl, y_oth[idx, :]))
xp_inl = np.concatenate((xp_inl, xp_oth[idx, :]))
yp_inl = np.concatenate((yp_inl, yp_oth[idx, :]))
idx_inl = np.concatenate((idx_inl, idx_oth[idx]))
# Check if found model has enough inliers
if x_inl.shape[0] >= req_points:
# Fit a homography again to all found inliers
H = find_homography(x_inl, y_inl, xp_inl, yp_inl, gn_iters)
# Evaluate homography on all found inliers
xs_inl, ys_inl = homography_function(x_inl, y_inl, H)
err = np.mean(np.sqrt(np.square(xs_inl - xp_inl) + np.square(ys_inl - yp_inl)))
# Check if found error is better than the best model
if err < err_best:
# Update best homography, error and inlier points
H_best = H
err_best = err
x_best, y_best, xp_best, yp_best, idx_best = x_inl, y_inl, xp_inl, yp_inl, idx_inl
return H_best, x_best, y_best, xp_best, yp_best, idx_best
def match_feature_points(descriptors1, descriptors2, threshold):
num_keypoints1, vec_dim = descriptors1.shape
num_keypoints2, _ = descriptors2.shape
matches = []
for index1 in range(num_keypoints1):
desc1 = descriptors1[index1,:]
nearest_n1 = np.inf * np.ones(shape=(1, vec_dim), dtype=float)
nearest_n1_index = np.inf
nearest_n1_dist = np.inf
nearest_n2 = np.inf * np.ones(shape=(1, vec_dim), dtype=float)
nearest_n2_index = np.inf
nearest_n2_dist = np.inf
for index2 in range(num_keypoints2):
desc2 = descriptors2[index2,:]
temp_dist = np.linalg.norm(desc1 - desc2)
if temp_dist < nearest_n1_dist:
nearest_n2 = nearest_n1
nearest_n2_index = nearest_n1_index
nearest_n2_dist = nearest_n1_dist
nearest_n1 = desc2
nearest_n1_index = index2
nearest_n1_dist = temp_dist
elif temp_dist < nearest_n2_dist:
nearest_n2 = desc2
nearest_n2_index = index2
nearest_n2_dist = temp_dist
nndr = nearest_n1_dist / nearest_n2_dist
if nndr < threshold:
matches.append((index1, nearest_n1_index, nearest_n1_dist, nndr))
return matches
def stitch(img1, img2, H, r_shift_prev, c_shift_prev, estimation_iters):
img1_rows, img1_cols, _ = img1.shape
img2_rows, img2_cols, _ = img2.shape
img1_transformed_coordinates = np.zeros(shape=(img1_rows, img1_cols), dtype="i,i")
r_min, c_min = np.inf, np.inf
r_max, c_max = -np.inf, -np.inf
# Transform image one
for r in range(img1_rows):
for c in range(img1_cols):
xs, ys = c, r
for H_i in reversed(H):
xs, ys = homography_function(xs, ys, H_i)
xs, ys = xs + c_shift_prev, ys + r_shift_prev
xs, ys = int(xs), int(ys)
if ys < r_min:
r_min = ys
if ys > r_max:
r_max = ys
if xs < c_min:
c_min = xs
if xs > c_max:
c_max = xs
img1_transformed_coordinates[r,c] = (ys,xs)
# Calculate the size of the stitched image
out_rows, out_cols = img2_rows, img2_cols
if (r_min < 0):
out_rows = out_rows - r_min
if (r_max > img2_rows - 1):
out_rows = out_rows + (r_max - img2_rows + 1)
if (c_min < 0):
out_cols = out_cols - c_min
if (c_max > img2_cols - 1):
out_cols = out_cols + (c_max - img2_cols + 1)
out = np.zeros(shape=(out_rows, out_cols, 3), dtype=np.uint8)
out_temp = np.zeros(shape=(out_rows, out_cols, 3), dtype=np.uint8)
out_temp_map = np.zeros(shape=(out_rows, out_cols), dtype=bool)
out_map1 = np.zeros(shape=(out_rows, out_cols), dtype=bool)
out_map2 = np.zeros(shape=(out_rows, out_cols), dtype=bool)
r_shift = 0
if r_min < 0:
r_shift = - r_min
c_shift = 0
if c_min < 0:
c_shift = - c_min
# Insert image one
for r in range(img1_rows):
for c in range(img1_cols):
rt, ct = img1_transformed_coordinates[r,c]
rt, ct = rt + r_shift, ct + c_shift
out_temp[rt,ct,:] = img1[r,c,:]
out_temp_map[rt,ct] = True
# Estimate missing pixel values in image one
for _ in range(estimation_iters):
for r in range(1,out_rows-1):
for c in range(1,out_cols-1):
patch = out_temp[r-1:r+2,c-1:c+2,:]
patch_map = out_temp_map[r-1:r+2,c-1:c+2]
if not out_temp_map[r,c] and not np.all(patch_map == False):
out_temp[r,c] = np.median(patch[patch_map], axis=0)
out_map1[r,c] = True
out_temp_map = np.logical_or(out_temp_map, out_map1)
out_map1 = out_temp_map
# Insert image two
for r in range(img2_rows):
for c in range(img2_cols):
rt, ct = r + r_shift, c + c_shift
if not np.all(img2[r,c,:] == 0):
out[rt,ct,:] = img2[r,c,:]
out_map2[rt,ct] = True
# Merge two maps
for r in range(out_rows):
for c in range(out_cols):
if out_map1[r,c]:
if out_map2[r,c]:
out[r,c,:] = ((out[r,c,:].astype(float) + out_temp[r,c,:].astype(float)) / 2).astype(np.uint8)
else:
out[r,c,:] = out_temp[r,c,:]
return out, r_shift_prev + r_shift, c_shift_prev + c_shift
# ***** DEBUG *****
'''
def matches_to_opencv_matches(sift_descriptors1, sift_descriptors2, matches):
opencv_matches = []
for k in range(len(matches)):
index1, index2, _, _ = matches[k]
opencv_matches.append([cv.DMatch(index1,index2,np.linalg.norm(sift_descriptors1[index1,:] - sift_descriptors2[index2,:]))])
return opencv_matches
'''
# ***** DEBUG *****
def image_stitching(image_paths, out_file, nndr_threshold, min_points, req_points, gn_iters, max_iters, ransac_threshold, estimation_iters):
images = []
sift_keypoints = []
sift_descriptors = []
for image_path in image_paths:
# Read image parts
image = cv.imread(image_path)
# Create a SIFT instance
sift = cv.SIFT_create()
# Detect SIFT points
sift_keypoints_i, sift_descriptors_i = sift.detectAndCompute(image,None)
images.append(image)
sift_keypoints.append(sift_keypoints_i)
sift_descriptors.append(sift_descriptors_i)
out = images[0]
H = []
r_shift = 0
c_shift = 0
for i in range(len(images)-1):
print(' Finding feature match points... [' + str(i) + ']')
matches = match_feature_points(sift_descriptors[i+1], sift_descriptors[i], 0.8)
# ***** DEBUG *****
"""
matches = match_feature_points(sift_descriptors[i+1], sift_descriptors[i], 1)
matches = matches_to_opencv_matches(sift_descriptors[i+1], sift_descriptors[i], matches)
img_matches = cv.drawMatchesKnn(images[i+1], sift_keypoints[i+1], images[i], sift_keypoints[i], matches, None, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv.imwrite('out/matches10.jpg',img_matches)
matches = match_feature_points(sift_descriptors[i+1], sift_descriptors[i], 0.8)
matches = matches_to_opencv_matches(sift_descriptors[i+1], sift_descriptors[i], matches)
img_matches = cv.drawMatchesKnn(images[i+1], sift_keypoints[i+1], images[i], sift_keypoints[i], matches, None, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv.imwrite('out/matches08.jpg',img_matches)
matches = match_feature_points(sift_descriptors[i+1], sift_descriptors[i], 0.8)
H_i, x, y, xp, yp, idx = ransac(sift_keypoints[i+1], sift_keypoints[i], matches, min_points=min_points, req_points=req_points, gn_iters=gn_iters, max_iters=max_iters, ransac_threshold=ransac_threshold)
matches = [matches[i] for i in idx]
matches = matches_to_opencv_matches(sift_descriptors[i+1], sift_descriptors[i], matches)
img_matches = cv.drawMatchesKnn(images[i+1], sift_keypoints[i+1], images[i], sift_keypoints[i], matches, None, flags=cv.DrawMatchesFlags_NOT_DRAW_SINGLE_POINTS)
cv.imwrite('out/matches08_ransac.jpg',img_matches)
"""
# ***** DEBUG *****
print(' Running RANSAC algorithm to fit a homography on matched feature points... [' + str(i) + ']')
H_i, x, y, xp, yp, idx = ransac(sift_keypoints[i+1], sift_keypoints[i], matches, min_points=min_points, req_points=req_points, gn_iters=gn_iters, max_iters=max_iters, ransac_threshold=ransac_threshold)
H.append(H_i)
print(' Stitcing images... [' + str(i) + ']')
out, r_shift, c_shift = stitch(images[i+1], out, H, r_shift, c_shift, estimation_iters=estimation_iters)
cv.imwrite(out_file, out)
out_disp = cv.cvtColor(out, cv.COLOR_BGR2RGB)
plt.imshow(out_disp)
plt.show()
def main():
image_stitching(sys.argv[1:], 'stitched_image.jpg', nndr_threshold=0.8, min_points=10, req_points=20, gn_iters=100, max_iters=1000, ransac_threshold=3, estimation_iters=1)
if __name__ == "__main__":
main()
| burakkunkcu/image-stitching | image-stitching.py | image-stitching.py | py | 12,882 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "numpy.shape",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "numpy.vstack",
"line_number":... |
23164289569 | from pathlib import Path
from math import ceil, log2
from progress.bar import Bar
import numpy as np
import pandas as pd
import rasterio
from rasterio.windows import get_data_window
import geopandas as gp
import shapely
from analysis.constants import INDICATORS, CORRIDORS
from analysis.lib.raster import write_raster
data_dir = Path("data/inputs")
indicators_dir = data_dir / "indicators/base"
out_dir = Path("data/for_tiles")
# NOTE: all tiles are limited to extent of base blueprint (indicators not available elsewhere)
blueprint_filename = indicators_dir / "base_blueprint.tif"
corridors_filename = indicators_dir / "corridors.tif"
urban_filename = data_dir / "threats/urban/urban_2060_binned.tif"
slr_filename = data_dir / "threats/slr/slr.tif"
bnd_filename = data_dir / "boundaries/input_areas.feather"
# very small amount added to numbers to make sure that log2 gives us current number of bytes
EPS = 1e-6
# only base has indicators
INDICATORS = INDICATORS["base"]
### Create dataframe with info about bits required, groups, etc
indicators = pd.DataFrame(
[
[
e["id"].split("_")[0].replace("base:", ""),
e["id"],
indicators_dir / e["filename"],
min([v["value"] for v in e["values"]]),
max([v["value"] for v in e["values"]]),
]
for e in INDICATORS
],
columns=["ecosystem", "id", "filename", "min_value", "max_value"],
)
core = pd.DataFrame(
[
# blueprint is included so that it can be rendered after applying filters in UI
{
"ecosystem": "",
"id": "blueprint",
"filename": blueprint_filename,
"min_value": 0,
"max_value": 4,
},
{
"ecosystem": "",
"id": "corridors",
"filename": corridors_filename,
"min_value": 0,
"max_value": 4,
},
{
"ecosystem": "threat",
"id": "urban",
"filename": urban_filename,
"min_value": 1,
"max_value": 5,
},
{
"ecosystem": "threat",
"id": "slr",
"filename": slr_filename,
"min_value": 0,
"max_value": 13,
},
]
)
df = pd.concat(
[
core,
indicators,
]
).set_index("id")
df["src"] = df.filename.apply(lambda x: rasterio.open(x))
df["nodata"] = df.src.apply(lambda src: int(src.nodata))
# any indicators that have listed 0 values need to be shifted up 1
df["value_shift"] = (df.min_value == 0).astype("uint8")
df["max_value"] += df.value_shift
df["bits"] = df.max_value.apply(lambda x: ceil(log2(max(x, 2) + EPS)))
# # export for manual review and assignment of groups
# tmp = df[["bits"]].copy()
# tmp["group"] = ""
# tmp.to_csv(out_dir / "layers.csv", index=True, index_label="id")
# read manually assigned groups that are up to 24 bits each
# Note: these are based loosely on overlapping spatial extent
grouped = pd.read_csv(out_dir / "layers.csv").set_index("id")
print("Groups:")
print(grouped.groupby("group", dropna=False).bits.sum())
if grouped.group.isnull().max():
raise ValueError("All layers must be assigned to a group")
df = df.join(grouped.group)
df["orig_pos"] = np.arange(len(df))
df = df.sort_values(by=["group", "orig_pos"])
groups = sorted(df.group.unique())
# calculate position and bit offsets for each entity within each group
df["position"] = 0
df["offset"] = 0
for group in groups:
ix = df.group == group
df.loc[ix, "position"] = np.arange(ix.sum())
df.loc[ix, "offset"] = np.cumsum(df.loc[ix].bits) - df.loc[ix].bits
for col in ["group", "position", "bits", "offset", "min_value", "max_value"]:
df[col] = df[col].astype("uint8")
# NOTE: groups must be stored in encoding definition
# in exactly the same order they are encoded
df[["group", "position", "offset", "bits", "value_shift"]].reset_index().to_feather(
out_dir / "encoding.feather"
)
# save encoding JSON for frontend
for group in groups:
with open(out_dir / f"se_pixel_layers_{group}.json", "w") as out:
_ = out.write(
df.loc[df.group == group, ["offset", "bits", "value_shift"]]
.rename(columns={"value_shift": "valueShift"})
.reset_index()
.to_json(orient="records")
)
### determine the block windows that overlap bounds
# everything else will be filled with 0
print("Calculating overlapping windows")
bnd_df = gp.read_feather(bnd_filename)
bnd = bnd_df.loc[bnd_df.id == "base"].geometry.values[0]
blueprint = rasterio.open(blueprint_filename)
windows = np.array([w for _, w in blueprint.block_windows(1)])
bounds = np.array([blueprint.window_bounds(w) for w in windows]).T
bounds = shapely.box(*bounds)
tree = shapely.STRtree(bounds)
ix = tree.query(bnd, predicate="intersects")
ix.sort()
windows = windows[ix]
for group in groups:
rows = df.loc[df.group == group]
total_bits = rows.bits.sum()
# tile creation pipeline expects uint32 for creating RGB PNGs
out = np.zeros(shape=blueprint.shape, dtype="uint32")
# process each stack of layers by window to avoid running out of memory
for window in Bar(
f"Processing group {group} ({total_bits} bits)", max=len(windows)
).iter(windows):
window_shape = (window.height, window.width)
ix = window.toslices()
has_data = False
layer_bits = []
for id in rows.index:
row = rows.loc[id]
data = row.src.read(1, window=window)
# shift values up if needed
if row.value_shift:
data[data != row.nodata] += 1
# set nodata pixels to 0 (combined with existing 0 values that are below row.min_value)
data[data == row.nodata] = 0
if data.max() > 0:
out[ix] = np.bitwise_or(
np.left_shift(data.astype("uint32"), row.offset), out[ix]
)
# determine the window where data are available, and write out a smaller output
print("Calculating data window...")
data_window = get_data_window(out, nodata=0)
out = out[data_window.toslices()]
transform = blueprint.window_transform(data_window)
print(f"Data window: {data_window}")
print("Writing GeoTIFF...")
outfilename = out_dir / f"se_pixel_layers_{group}.tif"
write_raster(outfilename, out, transform=transform, crs=blueprint.crs, nodata=0)
# NOTE: we intentionally don't create overviews because this messes up the
# data when converting to WGS84
#### Notes
# to verify that values are encoded correctly
# 1. cast encoded values to correct type (e.g., uint16): value = encoded[106,107].view('uint16')
# 2. use bit shifting and bit AND logic to extract value, based on offset and nbits:
# (value >> offset) & ((2**nbits)-1) # => original value
| astutespruce/secas-blueprint | analysis/prep/tiles/encode_pixel_layers.py | encode_pixel_layers.py | py | 6,876 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "analysis.constants.INDICATORS",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "pandas.DataFr... |
26647008274 | from newspaper import Article
from splitText import SplitText
from summarizer import Summarizer
from summarizingFuncs import naiveTextRank
TEST_ARTICLE = "http://www.lefigaro.fr/vie-bureau/2017/10/06/09008-20171006ARTFIG00032-japon-une-journaliste-meurt-apres-159-heures-sup-en-un-mois.php"
TEST_ARTICLE2 = "http://www.lemonde.fr/international/article/2017/10/07/prix-nobel-de-la-paix-le-combat-tres-symbolique-de-l-ican_5197546_3210.html"
def main():
test_article = Article(url=TEST_ARTICLE2)
test_article.download()
test_article.parse()
text = SplitText(test_article.text)
text.splitIntoParts("french")
tldr = Summarizer(text)
tldr.summarize(naiveTextRank)
print(test_article.title)
print(tldr.summary)
if __name__ == '__main__':
main()
| AelHenri/TLDR-bot | TLDR/main.py | main.py | py | 784 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "newspaper.Article",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "splitText.SplitText",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "summarizer.Summarizer",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "summarizi... |
42373937252 | import itertools as it
from functools import reduce
import numpy as np
from qiskit import QuantumCircuit
from libbench.ibm import Job as IBMJob
class IBMSchroedingerMicroscopeJob(IBMJob):
@staticmethod
def job_factory(
num_post_selections, num_pixels, num_shots, xmin, xmax, ymin, ymax, add_measurements
):
xs = np.linspace(xmin, xmax, num_pixels + 1)
xs = 0.5 * (xs[:-1] + xs[1:])
ys = np.linspace(ymin, ymax, num_pixels + 1)
ys = 0.5 * (ys[:-1] + ys[1:])
for (i, x), (j, y) in it.product(enumerate(xs), enumerate(ys)):
z = x + 1j * y
yield IBMSchroedingerMicroscopeJob(
num_post_selections, z, add_measurements, i, j, num_shots
)
def __init__(self, num_post_selections, z, add_measurements, i, j, num_shots):
super().__init__()
self.num_post_selections = num_post_selections
self.add_measurements = add_measurements
self.z = z
self.i = i
self.j = j
self.num_shots = num_shots
# Calculate some parameters
theta = 2 * np.arccos(abs(z) / np.sqrt(1 + abs(z) ** 2))
phi = np.angle(z)
# Build the circuit
circuit = (
QuantumCircuit(2 ** num_post_selections, 2 ** num_post_selections)
if add_measurements
else QuantumCircuit(2 ** num_post_selections)
)
for k in range(2 ** num_post_selections):
circuit.ry(theta, k)
circuit.rz(-phi, k)
for k in range(num_post_selections):
for l in range(0, 2 ** num_post_selections, 2 ** (k + 1)):
circuit.cx(l, l + 2 ** k)
circuit.s(l)
circuit.h(l)
circuit.s(l)
if add_measurements:
circuit.measure(
list(range(2 ** num_post_selections)), list(range(2 ** num_post_selections))
)
# store the resulting circuit
self.circuit = circuit
def run(self, device):
super().run(device)
return device.execute(self.circuit, num_shots=self.num_shots)
def __str__(self):
return f"QiskitSchroedingerMicroscopeJob-{self.i}-{self.j}"
| rumschuettel/quantum-benchmarks | benchmarks/Schroedinger-Microscope/ibm/job.py | job.py | py | 2,226 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "libbench.ibm.Job",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.linspace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.linspace",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "itertools.product",
... |
3294429630 | import time
from matplotlib import pyplot as plt
from util import make_random
import algorithms
class Config:
SORTING_ALGORITHMS = algorithms.__all__
# SORTING_ALGORITHMS = [algorithms.bubble_sort, algorithms.insertion_sort, algorithms.selection_sort, algorithms.merge_sort, algorithms.quick_sort]
NUM_RANGE = (-100000, 100000)
LENGTHS = [int(1.5 ** i) for i in range(18)]
# LENGTHS = [(2 ** i) for i in range(13)]
# LENGTHS = list(range(0, 10)) + list(range(10, 1000, 10))
def main() -> None:
arrays = [make_random(length, Config.NUM_RANGE) for length in Config.LENGTHS]
times = {algorithm: [] for algorithm in Config.SORTING_ALGORITHMS}
for i, array in enumerate(arrays):
print(f"#{i + 1}: Sorting array of length {len(array)}")
for algorithm in Config.SORTING_ALGORITHMS:
array_copy = array.copy()
start_time = time.perf_counter()
algorithm(array_copy)
end_time = time.perf_counter()
total_time = end_time - start_time
times[algorithm].append(total_time)
print(f"{algorithm.__name__} took {total_time} seconds")
print()
for key, times in times.items():
plt.plot(Config.LENGTHS, times, label=key.__name__)
plt.title("Times for different sorts")
plt.xlabel("Array Length")
plt.ylabel("Time In Seconds")
plt.legend()
plt.show()
if __name__ == "__main__":
main() | michael-lesirge/intermediate-programming-class | sorting/time_complexity_graph.py | time_complexity_graph.py | py | 1,565 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "algorithms.__all__",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "util.make_random",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "time.perf_counter",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "time.perf_c... |
15749350207 | from pymongo import MongoClient
import pandas as pd
client = MongoClient()
db = client['Capstone']
parcels = db["ParcelsWithVariables"]
def TransformData(Xin):
X = Xin
# Do a one-hot encoding of the nhood ids into seperate variables
# to prevent them being treated numerically when they are categorical
nhood = pd.get_dummies(X['nhood_id'], prefix='nhood')
X = X.drop('nhood_id', axis=1)
X = pd.concat([X, nhood], axis=1)
# Do the same one-hot-enconding for the zoning categeories
zone = pd.get_dummies(X['zone'], prefix='zone')
X = X.drop('zone', axis=1)
X = pd.concat([X, zone], axis=1)
# remove the id and parcel num from training data
X = X.drop('parcelnum', axis=1)
X = X.drop('_id', axis=1)
return X
def GetData():
# First get all the blighted buildings
blighted = parcels.aggregate([
{
"$match": { "blighted" : 1 }
}
])
trainingData = list(blighted)
numBlighted = len(trainingData)
# get a random number of non-blighted buildings
nonBlighted = parcels.aggregate([
{
"$match": { "blighted" : 0 }
},
{
"$sample": {"size": numBlighted}
}
])
trainingData.extend(list(nonBlighted))
df = pd.DataFrame(trainingData)
Y = df[['blighted']].values.ravel()
X = df.drop('blighted', axis=1)
return (X, Y)
def GetAllData():
df = pd.DataFrame(list(parcels.find()))
Y = df[['blighted']].values.ravel()
X = df.drop('blighted', axis=1)
return (X, Y) | IvoDonev/DSCapstone | GetTrainingData.py | GetTrainingData.py | py | 1,401 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummies",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.get_dummie... |
71312255075 | from PyQt6.QtWidgets import QDialog, QPushButton, QLineEdit, QRadioButton, QComboBox, QListWidget, QFileDialog, QMessageBox
from PyQt6 import uic
import sys
import time
import os
from absPath import resource_path
from LMSdataBackend import schoolClass_CRUD
from LMSdataBackend import gradeJHS_CRUD
from LMSdataBackend import gradeSHS_CRUD
from LMSdataBackend import mainDb_Create
from LMSdataBackend import cardExcelExport
from LMSUiFrontend import loadingWindow
class CardExportWindow(QDialog):
def __init__(self, parent=None):
super(CardExportWindow, self).__init__(parent)
uic.loadUi(resource_path('ui\\cardExportDialog.ui'), self)
self.setWindowTitle("Export to excel card")
# Initialize widgets
self.btnFilePath = self.findChild(QPushButton, "btnFilePath")
self.txtFilePath = self.findChild(QLineEdit, "txtFilePath")
self.radioBtnSelectAll = self.findChild(QRadioButton, "radioBtnSelectAll")
self.radioBtnSelectFew = self.findChild(QRadioButton, "radioBtnSelectFew")
self.expBtn = self.findChild(QPushButton, "expBtn")
self.activeClass = mainDb_Create.viewDbNames(1)[0]
self.className = self.activeClass[1]
self.classData = schoolClass_CRUD.viewClassData(self.className)[0]
self.gradesData = []
# Events
self.btnFilePath.clicked.connect(self.openFilePath)
self.expBtn.clicked.connect(self.exportCard)
self.radioBtnSelectAll.toggled.connect(lambda: self.radioState(self.radioBtnSelectAll))
self.radioBtnSelectFew.toggled.connect(lambda: self.radioState(self.radioBtnSelectFew))
def radioState(self, r):
if r.isChecked() and r.text() == "SELECT ALL LEARNERS":
print("all selected")
elif r.isChecked() and r.text() == "SELECT LEARNERS FROM LIST":
print("few selected")
def openFilePath(self):
folder = str(QFileDialog.getExistingDirectory(self, "Select Directory to save"))
self.txtFilePath.setText(folder)
def getGrades(self):
if self.classData[1] == 'JUNIOR HIGH SCHOOL':
self.gradesData = gradeJHS_CRUD.viewGradesJHSData(self.className)
else:
shsGradesSem1 = gradeSHS_CRUD.viewGradesSHSData(self.className, 1)
shsGradesSem2 = gradeSHS_CRUD.viewGradesSHSData(self.className, 2)
self.gradesData = self.joinSem1Sem2Data(shsGradesSem1, shsGradesSem2)
return self.gradesData
def joinSem1Sem2Data(self, sem1Grade, sem2Grade):
sem1GradesBasic = []
sem2GradesBasic = []
for s in sem1Grade:
sList = list(s)
sTuple = tuple(sList[:5])
sem1GradesBasic.append(sTuple)
for r in sem2Grade:
rList = list(r)
rTuple = tuple(rList[:5])
sem2GradesBasic.append(rTuple)
sem1Set = set(sem1GradesBasic)
sem2Set = set(sem2GradesBasic)
sem1sem2Datas = list(sem1Set.union(sem2Set))
sem1sem2Sorted = sorted(sem1sem2Datas, key=lambda item: item[1])
return sem1sem2Sorted
def checkTemplate(self):
jhsTemplate = resource_path("templates\\card_temp_JHSver2.xlsx")
shsTemplate = resource_path("templates\\card_temp_SHSver2.xlsx")
if os.path.exists(jhsTemplate) and os.path.exists(shsTemplate):
return True
else:
return False
def exportCard(self):
filePath = self.txtFilePath.text().strip()
if filePath != "" and self.checkTemplate():
if os.path.exists(filePath) and os.path.isdir(filePath):
self.loading = loadingWindow.LoadingWindow(filePath, self.getGrades())
self.loading.show()
self.loading.doneLoading.connect(lambda: self.close())
self.loading.processLoading()
self.expBtn.setEnabled(False)
self.btnFilePath.setEnabled(False)
else:
self.showMessage("Not Valid Path", "The selected folder is not valid", QMessageBox.Icon.Warning)
elif filePath == "":
self.showMessage("No Directory", "Please select a directory/folder to save", QMessageBox.Icon.Warning)
elif not self.checkTemplate():
self.showMessage("No Template", "Excel template does not found or has been moved.", QMessageBox.Icon.Warning)
def showMessage(self, title, message, icon): # warning message box
msgBox = QMessageBox(text=message, parent=self)
msgBox.setWindowTitle(title)
# icons (QMessageBox.Icon.Question, QMessageBox.Icon.Information,
# QMessageBox.Icon.Warning, QMessageBox.Icon.Critical)
msgBox.setIcon(icon)
msgBox.setDefaultButton(QMessageBox.StandardButton.Ok)
msgBox.exec()
| jpcanas/School_LMSv2 | LMS_v2.1/LMSUiFrontend/cardExportWindow.py | cardExportWindow.py | py | 4,799 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt6.QtWidgets.QDialog",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "PyQt6.uic.loadUi",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PyQt6.uic",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "absPath.resource_p... |
75267266272 | from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1 import ImageGrid
import numpy as np
import os
import tensorflow as tf
from model import Unrolled_GAN
from data_utils import Processor
flags = tf.app.flags
FLAGS = flags.FLAGS
# Else, you won't see anything in stderr with --alsologtostderr
tf.logging.set_verbosity(tf.logging.INFO)
flags.DEFINE_string("data_dir", None, "The location of the dataset")
flags.DEFINE_string("output_dir", None, "Where should the outputs be stored")
flags.DEFINE_integer("save_every", 500, "Save checkpoints every N steps")
flags.DEFINE_integer("eval_every", 50, "Generate images every N steps")
flags.DEFINE_integer("eval_images", 100,
"Images to generate at eval, must be a perfect square")
flags.DEFINE_integer("num_steps", 1000, "Number of batchs to train on")
flags.DEFINE_integer("batch_size", 100, "Batch size")
flags.DEFINE_integer("unroll_steps", 3, "Number of steps to unroll")
def maybe_create_output_dir():
if os.path.exists(FLAGS.output_dir):
tf.logging.info("data_dir already exists, not creating again")
return
os.mkdir(FLAGS.output_dir)
os.mkdir(os.path.join(FLAGS.output_dir, "ckpts"))
os.mkdir(os.path.join(FLAGS.output_dir, "summaries"))
os.mkdir(os.path.join(FLAGS.output_dir, "images"))
tf.logging.info("All paths created!")
def create_collage(images, step):
n_cols = np.sqrt(FLAGS.eval_images).astype(np.int32)
fig = plt.figure()
grid = ImageGrid(fig, 111, nrows_ncols=(n_cols, n_cols), axes_pad=0)
for i in range(FLAGS.eval_images):
grid[i].imshow(images[i])
grid[i].set_xticks([])
grid[i].set_yticks([])
plt.savefig(
os.path.join(FLAGS.output_dir, "images", "step_{}.png".format(step)))
plt.close()
tf.logging.info("Saved generated images")
if __name__ == "__main__":
tf.logging.info("Starting training for %d steps", FLAGS.num_steps)
tf.logging.info("Passed flags: %s", FLAGS.__flags)
unrolled_gan = Unrolled_GAN(FLAGS.unroll_steps)
processor = Processor(data_dir=FLAGS.data_dir, batch_size=FLAGS.batch_size)
data_yielder = processor.get_batch()
saver = tf.train.Saver(max_to_keep=None)
maybe_create_output_dir()
with tf.Session() as sess:
summary_writer = tf.summary.FileWriter(
os.path.join(FLAGS.output_dir, "summaries"), sess.graph)
sess.run(tf.global_variables_initializer())
for i in range(FLAGS.num_steps):
train_batch = data_yielder.next()
# len(...) because we can get smaller batches at file edges
noise = np.random.randn(len(train_batch), unrolled_gan.noise_size)
fetches = [
unrolled_gan.d_train, unrolled_gan.g_train,
unrolled_gan.d_loss, unrolled_gan.unrolled_loss,
unrolled_gan.summary_op
]
feed_dict = {
unrolled_gan.input_images: train_batch,
unrolled_gan.input_noise: noise
}
_, _, d_loss, unrolled_loss, summary = sess.run(
fetches, feed_dict=feed_dict)
tf.logging.log_every_n(
tf.logging.INFO,
"Step {}, D Loss: {}, Unrolled Loss: {}".format(
i, d_loss, unrolled_loss), FLAGS.eval_every)
if i % FLAGS.eval_every == 0:
# Let's generate some images!
tf.logging.info("Running evaluation")
feed_dict = {
unrolled_gan.input_noise:
np.random.randn(FLAGS.eval_images, unrolled_gan.noise_size)
}
gen_output = sess.run(
unrolled_gan.generator_output, feed_dict=feed_dict)
gen_output = (gen_output * 127) + 127.0
gen_output = gen_output.astype(np.uint8)
create_collage(gen_output, i)
summary_writer.add_summary(summary, i)
if i % FLAGS.save_every == 0:
# Save the trained model
filename = saver.save(
sess,
os.path.join(FLAGS.output_dir,
"ckpts/step_{}.ckpt".format(i)))
tf.logging.info(
"Saved trained model after %d steps with filename %s", i,
filename)
| gokul-uf/TF-Unrolled-GAN | main.py | main.py | py | 4,491 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "tensorflow.app",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.logging.set_verbosity",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorflow.logging",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_... |
21531841556 | import pandas as pd
from joblib import dump, load
from sklearn.ensemble import RandomForestClassifier, GradientBoostingClassifier, AdaBoostClassifier, VotingClassifier
from sklearn.neural_network import MLPClassifier
from sklearn.linear_model import SGDClassifier
from sklearn.metrics import f1_score
from utils import load_folder
def main(train_path, model_path='models/final.joblib'):
print("Training..")
train_X, train_y = load_folder(train_path)
# multiply class 1
logic = train_y == 1
new_X = [train_X] + [train_X[logic].copy() for _ in range (3)]
new_y = [train_y] + [train_y[logic].copy() for _ in range (3)]
train_X = pd.concat(new_X)
train_y = pd.concat(new_y)
models = [
RandomForestClassifier(),
GradientBoostingClassifier(),
AdaBoostClassifier(),
RandomForestClassifier(min_samples_leaf=5),
GradientBoostingClassifier(min_samples_leaf=5),
MLPClassifier(),
SGDClassifier(),
]
ensembled = VotingClassifier(estimators=[(str(i), model) for i, model in enumerate(models)], voting='hard')
ensembled.fit(X=train_X, y=train_y)
dump(ensembled, model_path)
if __name__ == "__main__":
train_path = 'data/train'
main(train_path) | daniel-yehezkel/DS.DPA.HW1 | train.py | train.py | py | 1,254 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "utils.load_folder",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "pandas.concat",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "sklearn.ensemble.Random... |
8702400345 | import streamlit as st
import altair as alt
import inspect
from vega_datasets import data
@st.experimental_memo
def get_chart_72043(use_container_width: bool):
import altair as alt
import pandas as pd
import numpy as np
np.random.seed(1)
source = pd.DataFrame({
'x': np.arange(100),
'A': np.random.randn(100).cumsum(),
'B': np.random.randn(100).cumsum(),
'C': np.random.randn(100).cumsum(),
})
base = alt.Chart(source).mark_circle(opacity=0.5).transform_fold(
fold=['A', 'B', 'C'],
as_=['category', 'y']
).encode(
alt.X('x:Q'),
alt.Y('y:Q'),
alt.Color('category:N')
)
chart = base + base.transform_loess('x', 'y', groupby=['category']).mark_line(size=4)
tab1, tab2 = st.tabs(["Streamlit theme (default)", "Altair native theme"])
with tab1:
st.altair_chart(chart, theme="streamlit", use_container_width=True)
with tab2:
st.altair_chart(chart, theme=None, use_container_width=True)
try:
st.expander("See code").code(inspect.getsource(get_chart_72043))
get_chart_72043(use_container_width=True)
except Exception as e:
st.exception(e)
| streamlit/release-demos | 1.16.0/demo_app_altair/pages/71_Scatter_With_Loess.py | 71_Scatter_With_Loess.py | py | 1,216 | python | en | code | 78 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pandas.DataFrame",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
... |
31765432845 | import cv2
import time
class Camera():
def __init__(self):
self.capture = cv2.VideoCapture('resource/capture.mp4')
# cv2.namedWindow('test')
def get_image(self, t):
t=1000
ret = self.capture.set(cv2.CAP_PROP_POS_FRAMES, t)
ret, frame = self.capture.read()
if ret == True :
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# cv2.imshow('test',frame)
image_bytes = cv2.imencode('.bmp', frame)[1].tobytes()
return image_bytes
else :
return b'\x00' | cande-cansat/SatSAT | SocketTest/satellite_camera.py | satellite_camera.py | py | 568 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.CAP_PROP_POS_FRAMES",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "cv2.cvtColor",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_B... |
11371082097 | #!/usr/bin/env python
from collections import OrderedDict
import rows
class BrazilianMoneyField(rows.fields.DecimalField):
"""Parser for money in Brazilian notation
"1.234,56" -> Decimal("1234.56")
"""
@classmethod
def deserialize(cls, value):
value = (value or "").replace(".", "").replace(",", ".")
return super().deserialize(value)
PDF_FIELD_TYPES = OrderedDict(
[
("nome_lotacao", rows.fields.TextField),
("cargo", rows.fields.TextField),
("funcao_vinculo", rows.fields.TextField),
("remuneracao_legal_total", BrazilianMoneyField),
("desc_teto", BrazilianMoneyField),
("remuneracao_legal_devida", BrazilianMoneyField),
("descontos_legais", BrazilianMoneyField),
("liquido_disponivel", BrazilianMoneyField),
]
)
def convert_row(row):
"""Generate the final dict based on data from the PDF"""
row = row._asdict()
nome_lotacao = row.pop("nome_lotacao").splitlines()
funcao_vinculo = row.pop("funcao_vinculo").splitlines()
row["funcao"] = funcao_vinculo[0]
row["lotacao"] = " ".join(nome_lotacao[1:])
row["nome"] = nome_lotacao[0]
row["vinculo"] = " ".join(funcao_vinculo[1:])
return row
def parse_file(filename):
"""Parse Amazonas' PDF file containing state employee information"""
total_pages = rows.plugins.pdf.number_of_pages(filename)
result = []
for page in range(1, total_pages + 1):
table = rows.import_from_pdf(
filename,
page_numbers=(page,),
starts_after="NOME",
fields=PDF_FIELD_TYPES,
skip_header=True,
)
for row in table:
result.append(convert_row(row))
return rows.import_from_dicts(result)
if __name__ == "__main__":
from argparse import ArgumentParser
from pathlib import Path
from rows.utils import download_file
parser = ArgumentParser()
parser.add_argument(
"--url",
default="http://www.transparencia.am.gov.br/arquivos/2014/158_201404.pdf",
)
args = parser.parse_args()
url = args.url
pdf_filename = Path(url).name
csv_filename = pdf_filename.replace(".pdf", ".csv")
download_file(url, pdf_filename, progress=True)
print("Parsing PDF...")
table = parse_file(pdf_filename)
print("Exporting to CSV...")
rows.export_to_csv(table, csv_filename)
| julianyraiol/portal_transparencia_am | antigo/pdf_parser.py | pdf_parser.py | py | 2,419 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "rows.fields",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "collections.OrderedDict",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "rows.fields",
"line_number": 21,
"usage_type": "attribute"
},
{
"api_name": "rows.fields"... |
28917567329 | import healpy as hp
from astropy import units as u
from astropy.coordinates import SkyCoord
from numpy import *
import numpy as np
import matplotlib.pyplot as plt
import healpy as hp
from astropy.io import fits
with fits.open('gsm_182mhz_Jysr_nomono_nogalaxy_2048.fits') as hdu:
data = hdu[0].data
with fits.open('gsm2016_182MHz_nomono_nogalaxyinterp2048_subset_try2_inds.fits') as hdu:
pix_inds = hdu[0].data
nside=2048
#pix_inds = arange(hp.nside2npix(nside))
l, b = hp.pix2ang(nside,pix_inds,lonlat=True)
gal_coords = SkyCoord(l*u.deg, b*u.deg, frame='galactic')
ra = gal_coords.icrs.ra.value
dec = gal_coords.icrs.dec.value
#conversion = hp.nside2pixarea(nside)
with fits.open('byrne_interp2048_I.fits') as hdu:
data_b = hdu[0].data
with fits.open('byrne_interp2048_inds.fits') as hdu:
pix_inds_b = hdu[0].data
l, b = hp.pix2ang(nside,pix_inds_b,lonlat=True)
cel_coords = SkyCoord(l*u.deg, b*u.deg, frame='icrs')
ra_b = cel_coords.icrs.ra.value
dec_b = cel_coords.icrs.dec.value
#k_boltz = 1.38064852e-23
#vel_c = 299792458.0
#freq=182e6
#Jystr = (1e23*2*freq**2*k_boltz) / vel_c**2
conversion = hp.nside2pixarea(nside)# * Jystr
fluxes = data*conversion
fluxes_b = data_b*conversion
print(np.abs(fluxes).max(),np.abs(fluxes).mean())
fig = plt.figure(figsize=(10,10))
vmin = -0.1
vmax = 0.1
hp.mollview(fluxes, sub=(2,1,1), fig=fig,title='Galactic (Jy / pixel)',min=vmin,max=vmax)
ax2 = fig.add_subplot(2,1,2)
ax2.scatter(ra,dec,c=fluxes[pix_inds],marker='.',vmin=vmin,vmax=vmax)
ax2.scatter(ra_b,dec_b,c=fluxes_b,marker='.',vmin=vmin,vmax=vmax)
ax2.plot(266.416833333,-29.0078055556,'ro',mfc='none',label="Where gal centre should be")
ax2.set_xlabel('RA (deg)')
ax2.set_ylabel('Dec (deg)')
ax2.legend(loc='upper left')
fig.savefig('pygdsm_nomono_nogalaxy_n2048.png',bbox_inches='tight')
plt.close()
#inds = np.argwhere(b > 0)
#false_b = b.copy()
#false_b[inds] = (b[inds]-180.)
#false_b = false_b + 90.
longitude = 0 * u.deg
latitude = -90 * u.deg
rot_custom = hp.Rotator(rot=[longitude.to_value(u.deg), latitude.to_value(u.deg)], inv=True)
fluxes_rotated_alms = rot_custom.rotate_map_alms(fluxes)
hp.mollview(fluxes_rotated_alms, sub=(2,1,1), fig=fig,title='Galactic (Jy / pixel)',min=vmin,max=vmax)
ax2 = fig.add_subplot(2,1,2)
ax2.scatter(l,b,c=fluxes_rotated_alms,marker='.',vmin=vmin,vmax=vmax)
ax2.set_xlabel('Lat (deg)')
ax2.set_ylabel('90deg rotated Lon (deg)')
fig.savefig('falselon_pygdsm_nomono_nogalaxy_n2048.png',bbox_inches='tight')
plt.close()
center_patch_inds = np.argwhere((l > 170) & (l < 190) & (b > -10) & (b < 10))
center_patch = fluxes_rotated_alms[center_patch_inds]
rotated_lonlat = rot_custom(l,b,lonlat=True)
center_patch_latlon = rotated_lonlat[:,center_patch_inds]
hdu = fits.PrimaryHDU(center_patch)
hdu.writeto('center_patch_1024.fits', clobber=True)
hdu = fits.PrimaryHDU(l[center_patch_inds])
hdu.writeto('center_patch_lat_1024.fits', clobber=True)
hdu = fits.PrimaryHDU(b[center_patch_inds])
hdu.writeto('center_patch_lon_1024.fits', clobber=True)
| nicholebarry/gar_scripts | woden_scripts/temp_plotter.py | temp_plotter.py | py | 3,024 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "astropy.io.fits.open",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "astropy.io.fits",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "astropy.io.fits.open",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "astropy.io.... |
72198180835 | from sklearn.metrics import accuracy_score
from sklearn.model_selection import train_test_split
from sklearn.neighbors import KNeighborsClassifier
from sklearn import preprocessing
from sklearn.neighbors import NearestCentroid
def cv2NN(X_train, X_test, y_train, y_test, kneighbors, metric ='euclidean', scalling = False):
trainDataX = X_train
if scalling:
trainDataX = preprocessing.minmax_scale(X_train)
knn = KNeighborsClassifier(n_neighbors=kneighbors, metric=metric)
knn.fit(trainDataX, y_train)
pred = knn.predict(X_test)
score1 = accuracy_score(y_test, pred)
trainDataX = X_test
if scalling:
trainDataX = preprocessing.minmax_scale(X_test)
knn = KNeighborsClassifier(n_neighbors=kneighbors, metric=metric)
knn.fit(trainDataX, y_test)
pred = knn.predict(X_train)
score2 = accuracy_score(y_train, pred)
return (score2+score1)/2
def cv2NM(X_train, X_test, y_train, y_test, metric = 'euclidean', scalling = True):
trainDataX = X_train
if scalling:
trainDataX = preprocessing.minmax_scale(X_train)
nm = NearestCentroid(metric=metric)
nm.fit(trainDataX, y_train)
pred = nm.predict(X_test)
score1 = accuracy_score(y_test, pred)
trainDataX = X_test
if scalling:
trainDataX = preprocessing.minmax_scale(X_test)
nm = NearestCentroid(metric=metric)
nm.fit(trainDataX, y_test)
pred = nm.predict(X_train)
score2 = accuracy_score(y_train, pred)
return (score2 + score1) / 2
| karmelowsky/AcuteInflammations | myFunctions.py | myFunctions.py | py | 1,517 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.preprocessing.minmax_scale",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sklearn.neighbors.KNeighborsClassifier",
"line_number": 13,
"usage_type": "call"... |
26579217754 | # Data Sonification Project - LITR 0110D
import csv
from datetime import datetime
from miditime.miditime import MIDITime
from scipy import stats
import math
# instantiate the MITITime class with tempo 120 and 5sec/year
mymidi = MIDITime(120, 'data_sonfication.mid', 1, 5, 1)
# load in climate data as dictionary
climate_data = []
with open('GLB.Ts+dSST.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
# skip headers (first 2 rows)
next(readCSV)
next(readCSV)
for row in readCSV:
climate_data.append(
{'days_since_epoch': mymidi.days_since_epoch(datetime.strptime(row[0], '%Y')),
'magnitude_change': row[17]
})
my_data_timed = [
{'beat': mymidi.beat(d['days_since_epoch']),
'magnitude_change': float(d['magnitude_change'])} for d in climate_data]
start_time = my_data_timed[0]['beat']
data_list = [d['magnitude_change'] for d in my_data_timed]
def mag_to_pitch_tuned(magnitude):
"""
Consumes some magnitude value and normalizes it over the range of note
values provided from a key.
:param magnitude: some int or float value representing magnitude of data
:return: a MIDI pitch represented by the normalized value
"""
# Where does this data point sit in the domain of your data?
#(I.E. the min magnitude is 3, the max in 5.6). In this case the optional
#'True' means the scale is reversed, so the highest value will return the
#lowest percentage.
scale_pct = mymidi.linear_scale_pct(min(data_list), max(data_list), magnitude)
# Another option: Linear scale, reverse order
# scale_pct = mymidi.linear_scale_pct(3, 5.7, magnitude_change, True)
# Another option: Logarithmic scale, reverse order
# scale_pct = mymidi.log_scale_pct(3, 5.7, magnitude_change, True)
# Pick a range of notes. This allows you to play in a key.
c_major = ['C', 'D', 'E', 'F', 'G', 'A', 'B']
#Find the note that matches your data point
note = mymidi.scale_to_note(scale_pct, c_major)
#Translate that note to a MIDI pitch
midi_pitch = mymidi.note_to_midi_pitch(note)
return midi_pitch
note_list = []
z_scores = stats.zscore(data_list)
exp_score = [math.ceil(math.exp(x)*4)/4 for x in z_scores]
i = 0
for d in my_data_timed:
note_list.append([
d['beat'] - start_time,
mag_to_pitch_tuned(d['magnitude_change']),
100, # velocity
exp_score[i] # duration, in beats
])
i += 1
# Add a track with those notes
mymidi.add_track(note_list)
# Output the .mid file
mymidi.save_midi()
#sum = sum(exp_score)
#softmax_score = [x / sum for x in exp_score]
print(exp_score)
| pattwm16/climate_sonification | data_sonification.py | data_sonification.py | py | 2,655 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "miditime.miditime.MIDITime",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "csv.reader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datet... |
8125849858 | #!/usr/bin/env python
"""Apply a threshold to an image for background subtraction."""
__author__ = "Anas Abou Allaban"
__maintainer__ = "Anas Abou Allaban"
__email__ = "anas@abouallaban.info"
import cv2
import numpy as np
def printImage(image):
cv2.imshow('Test', image)
cv2.waitKey(0)
cv2.destroyAllWindows()
# Read in image, convert to grayscale and threshold
img = cv2.imread('image085447.jpg')
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, thresh = cv2.threshold(gray, 127, 255, cv2.THRESH_BINARY_INV)
printImage(thresh)
# Remove Noise
# MORPH_OPEN removes small 'white noise'
# MORPH_CLOSE removes holes, cleans edges
kernel = np.ones((3, 3), np.uint8)
opening = cv2.morphologyEx(thresh, cv2.MORPH_CLOSE, kernel, iterations=2)
# Get confirmed BG area
sureBG = cv2.dilate(opening, kernel, iterations=3)
# Get confirmed FG area
distTransform = cv2.distanceTransform(opening, cv2.DIST_L2, 5)
ret, sureFG = cv2.threshold(distTransform, 0.7*distTransform.max(), 255, 0)
# Find unknown region (subtract FG from BG)
sureFG = np.uint8(sureFG)
unknown = cv2.subtract(sureBG, sureFG)
# Marker labelling
ret, markers = cv2.connectedComponents(sureFG)
markers = markers + 1
markers[unknown == 255] = 0
markers = cv2.watershed(img, markers)
img[markers == -1] = [255, 0, 0]
printImage(img)
| piraka9011/EECE5550_MobileRobotics | mobile_robotics_utilities/scripts/threshold_image.py | threshold_image.py | py | 1,348 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imshow",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.waitKey",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "cv2.destroyAllWindows",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "cv2.imread",
"line_n... |
25833334472 | #!/usr/bin/python3
import sys, os
from PIL import Image
#import tinify
#tinify.key = "bjRHvxqtkW0Lw3vIVMUc2-aM-kxMfYln"
origin_file = sys.argv[1]
dst_path = str(os.path.dirname(origin_file)) + '/p_i/'
base_name = str(os.path.basename(origin_file))
file_name, file_extension = os.path.splitext(base_name)
print( 'File:', str(origin_file))
print( 'Path:', dst_path)
print( 'Base:', base_name)
print( "FN: %s FE:%s " % (file_name, file_extension) )
opt_origin = dst_path + base_name
#tinify.from_file(origin_file).to_file(opt_origin)
#tinify.from_file(opt_origin).resize(method="scale",height=180).to_file(dst_path + file_name + "_180" + file_extension)
width=200
im = Image.open(origin_file)
xsize, ysize = im.size
if xsize < 200:
ratio = 1
width = xsize
else:
ratio = xsize / width
opt_origin = dst_path + file_name + "_200" + file_extension
y = int(ysize / ratio)
if file_extension is None or file_extension == '':
im.resize((width, y)).save(opt_origin, "JPEG")
else:
im.resize((width, y)).save(opt_origin)
| mijkenator/muploader | tinify/tf_mbd180.py | tf_mbd180.py | py | 1,043 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.basename",
"line_... |
2129904597 | #!/usr/bin/env python3
import sys
from heapq import nlargest
import json
from math import *
ratings = {}
rest = {}
visited = []
#function jaccard
#calculate the similarity index between two teammated by their ID's
#parameters- ratings, id1, id2
#ratings - the dictionary of ratings that got established in init()
#id1 - id of the teammate we are recommending for
#id2 - if of the teammate we are comparing against
def jaccard(ratings, id1, id2):
teammate1 = ratings[id1]
teammate2 = ratings[id2]
#intersection of both teammates likes
likes = set.intersection(*[set(teammate1[0]), set(teammate2[0]) ])
#intersection of both teammates dislikes
dislikes = set.intersection(*[set(teammate1[1]), set(teammate2[1]) ])
#intersection of likes of teammate 1 and the dislikes of teammate 2
ld = set.intersection(*[set(teammate1[0]), set(teammate2[1]) ])
#intersection of dislikes of teammate 1 and the likes of teammate 2
dl = set.intersection(*[set(teammate1[1]), set(teammate2[0]) ])
#union of all of the ratings
union = set.union(*[set(teammate1[0] + teammate1[1]), set(teammate2[0] + teammate2[1]) ])
#calculate the numerator of the similarity equation
top = (len(likes) + len(dislikes) - len(ld) - len(dl))
#denom
bottom = len(union)
#return jaccard index
return top/float(bottom)
#function init
#init is going to build a couple of data structures that are needed for this procedure
#ratings: dictionary - key is the teammate id, value is a list.
# the value is a list of two lists where [0] is the list of likes and [1] is the list of dislikes
# [[likes],[dislikes]]
#rest: dictionary - key is the restauraunt id, value is a list
# the value is a list of two lists where [0] is the list of teammates who liked that restauraunt and [1] is vice versa
# [[teammates who like rest], [teammates who dislike rest]]
#visited- list - list of restaurants that the 'current' teammate has already visited
#parameters current- current is the id of the teammate that we are trying to recommend for
def init(current):
#init the ratings to be empty
with open("../seed/out/teammates.json") as teammates:
data = json.load(teammates)
for row in data:
ratings[row["id"]] = [[], []]
#init the rest to be empty
with open("../seed/out/restaurants.json") as restaurants:
data = json.load(restaurants)
for row in data:
rest[row["id"]] = [[], []]
#begin processing the ratings
with open("../seed/out/ratings.json") as ratings_data:
data = json.load(ratings_data)
for item in data:
#keep track of which restaurants the 'current' has already visited
if item["teammateId"] == current:
visited.append(item["restaurantId"])
#if the current rating is a like, add it to ratings and rest
if item["rating"] == "LIKE":
#print(ratings[item["teammateId"]])
ratings[item["teammateId"]][0].append(item["restaurantId"])
rest[item["restaurantId"]][0].append(item["teammateId"])
#current rating is a dislike
else:
ratings[item["teammateId"]][1].append(item["restaurantId"])
rest[item["restaurantId"]][1].append(item["teammateId"])
#function predict- this function is what is going to calculate the prediction algorithm
def predict():
#L is going to store all of our prediction values, so we can find a max at the end of simulation
L = {}
for item in rest:
#ensure that we are only proccessing restauraunts that the 'current' hasn't already visited
if item not in visited:
#print("restaurant id", item)
#we need the sum of the liked and disliked jaccardian indexes
for liked in rest[item][0]:
sum_liked = 0
if liked != current:
jac = jaccard(ratings, current, liked)
sum_liked += jac
#print("sum of liked similarities:",sum_liked)
for disliked in rest[item][1]:
#print(disliked)
sum_disliked = 0
if disliked != current:
jac = jaccard(ratings, current, disliked)
sum_disliked += jac
#print("sum of disliked similarities:", sum_disliked)
#calculate the prediction, and store it in a dict L
top = sum_liked - sum_disliked
bottom = len(rest[item][0]) + len(rest[item][1])
#normalize prob by 100
p = (100*(top/float(bottom)))
L[item] = p
#calculate and return what the top 3 probabilities are
largest = {}
ids = nlargest(3, L, key=L.get)
vals = sorted(L.values(), reverse=True)[:3]
for id, value in zip(ids, vals):
largest[id] = value
return largest
#function get_rating - gets the rating of a restauraunt by id
def get_rating(rest_id):
with open("../seed/out/restaurants.json") as restaurants:
data = json.load(restaurants)
for item in data:
if item['id'] == rest_id:
return item["rating"]
#function print_rest - prints a restauraunt by id
def print_rest(rest_id):
string = ""
with open("../seed/out/restaurants.json") as restaurants:
data = json.load(restaurants)
for item in data:
if item['id'] == rest_id:
string += "Name: " + item['name'] + "\nPrice: " + item['price'] + "\nRating:" + str(item['rating'])
print(string)
#print_largest - prints out the top 3 matched restauraunts in descending rating order
def print_largest(largest):
L = {}
for item in largest:
L[item] = get_rating(item)
i = 1
sortedL = sorted(L.items(), reverse=True, key=lambda kv: kv[1])
for item in sortedL:
print("Ranking:",i)
print_rest(item[0])
print()
i+=1
if __name__ == "__main__":
if len(sys.argv) < 2:
print("need to supply a teammate id to suggest to!")
sys.exit()
elif len(sys.argv) > 2:
print("too many arguments supplied")
sys.exit()
#get the user id we are interested in
current = sys.argv[1]
#initialize the system
init(current)
#get the largest probabilities
largest = predict()
#print the results
print_largest(largest)
| dgeorge10/suitable-puzzles | recommendation/solution.py | solution.py | py | 6,538 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.load",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 55,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 62,
"usage_type": "call"
},
{
"api_name": "heapq.nlargest",
"line_number": 114... |
18468558231 | import numpy as np
import pylab as pl
from sklearn import mixture
np.random.seed(0)
#C1 = np.array([[3, -2.7], [1.5, 2.7]])
#C2 = np.array([[1, 2.0], [-1.5, 1.7]])
#
#X_train = np.r_[
# np.random.multivariate_normal((-7, -7), C1, size=7),
# np.random.multivariate_normal((7, 7), C2, size=7),
#]
X_train = np.r_[
np.array([[0,0],[0,1],[2,0],[3,2],[3,3],[2,2],[2,0]]),
np.array([[7,7],[8,6],[9,7],[8,10],[7,10],[8,9],[7,11]]),
]
print(X_train)
clf = mixture.GaussianMixture(n_components=2, covariance_type='full')
clf.weights_ = [2,1]
clf.fit(X_train)
#define g1(x, y) and g2(x, y)
def g1(x, y):
print("x = {},y = {} for g1".format(x,y))
return clf.predict_proba(np.column_stack((x, y)))[:, 0]
def g2(x, y):
print("x = {},y = {} for g2".format(x,y))
return clf.predict_proba(np.column_stack((x, y)))[:, 1]
X, Y = np.mgrid[-15:13:500j, -15:13:500j]
x = X.ravel()
y = Y.ravel()
p = (g1(x, y) - g2(x, y)).reshape(X.shape)
pl.scatter(X_train[:, 0], X_train[:, 1])
pl.contour(X, Y, p, levels=[0])
pl.show() | sum-coderepo/Optimization-Python | Assignments_SMAI/BayesianClassifier.py | BayesianClassifier.py | py | 1,043 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "numpy.r_",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.array",
"li... |
24870806650 | #!/usr/bin/env python3
import lib.exitcode
import lib.utilities as utilities
import os
import shutil
import sys
from lib.database import Database
from lib.database_lib.config import Config
from lib.lorisgetopt import LorisGetOpt
__license__ = 'GPLv3'
def main():
usage = (
"\n"
"********************************************************************\n"
" CORRECT BLAKE2b AND MD5 HASHES STORED IN DATABASE SCRIPT\n"
"********************************************************************\n"
"The program will fetch the list of files stored in the database and update the hashes associated"
" to them according to the correct algorithm to compute those hashes. (Before, the python scripts"
" use to hash the path of the file instead of the data content).\n\n"
"usage : correct_blake2b_and_md5_hashes_in_database.py -p <profile> ...\n\n"
"options: \n"
"\t-p, --profile : Name of the python database config file in dicom-archive/.loris_mri\n"
"\t-v, --verbose : If set, be verbose\n\n"
"required options are: \n"
"\t--profile\n"
)
options_dict = {
"profile": {
"value": None, "required": True, "expect_arg": True, "short_opt": "p", "is_path": False
},
"verbose": {
"value": False, "required": False, "expect_arg": False, "short_opt": "v", "is_path": False
},
"help": {
"value": False, "required": False, "expect_arg": False, "short_opt": "h", "is_path": False
},
}
# get the options provided by the user
loris_getopt_obj = LorisGetOpt(usage, options_dict, os.path.basename(__file__[:-3]))
# establish database connection
verbose = loris_getopt_obj.options_dict['verbose']['value']
db = Database(loris_getopt_obj.config_info.mysql, verbose)
db.connect()
# get data_dir path config
config_db_obj = Config(db, verbose)
data_dir = config_db_obj.get_config('dataDirBasepath')
# get tmp dir path from loris_getopt object
tmp_dir = loris_getopt_obj.tmp_dir
# get S3 object from loris_getopt object
s3_obj = loris_getopt_obj.s3_obj
# handle imaging files to update their hashes values
handle_imaging_files(db, data_dir, tmp_dir, s3_obj)
# handle physiological files to update their hashes values
handle_physiological_files(db, data_dir, tmp_dir, s3_obj)
# delete temporary directory
if os.path.exists(tmp_dir):
shutil.rmtree(tmp_dir)
# exit with SUCCESS exit code
sys.exit(lib.exitcode.SUCCESS)
def handle_imaging_files(db, data_dir, tmp_dir, s3_obj):
"""
Queries the list of FileIDs present in the files table and hashes present in parameter_file (along with the
file path and hashes of associated BIDS/NIfTI files when they exist).
Once the list has been established, compute the new MD5/BLAKE2b hashes and update the database entries
with the new hash.
Note: if the files are on S3, the file will first be downloaded so that hashes can be computed
:param db: database object from the database.py class
:type db: Database
:param data_dir: path of the data_dir
:type data_dir: str
:param tmp_dir: path to a temporary directory for processing
:type tmp_dir: str
:param s3_obj: AWS A3 object from the aws_s3.py class
:type s3_obj: AwsS3
"""
# query list of FileIDs to process
query_files = 'SELECT FileID, File AS FilePath FROM files'
files_results = db.pselect(query_files, ())
# loop through FileIDs and get all associated files and hashes stored in parameter_file
for file_dict in files_results:
query_hashes_and_associated_files_to_file_id(db, file_dict, s3_obj, tmp_dir, data_dir)
# update imaging file's MD5 and blake2b hashes
file_full_path = determine_file_full_path(file_dict['FilePath'], s3_obj, tmp_dir, data_dir)
if 'md5hash' in file_dict.keys():
new_md5_hash = utilities.compute_md5_hash(file_full_path)
param_file_id = file_dict['md5hash']['ParameterFileID']
update_parameter_file_hash(db, param_file_id, new_md5_hash)
if 'file_blake2b_hash' in file_dict.keys():
new_blake2b_hash = utilities.compute_blake2b_hash(file_full_path)
param_file_id = file_dict['file_blake2b_hash']['ParameterFileID']
update_parameter_file_hash(db, param_file_id, new_blake2b_hash)
if file_dict['FilePath'].startswith('s3://') and os.path.exists(file_full_path):
os.remove(file_full_path)
# update BIDS JSON file's blake2b hash if file present in database
if 'bids_json_file' in file_dict.keys() and 'bids_json_file_blake2b_hash' in file_dict.keys():
new_blake2b_hash = utilities.compute_blake2b_hash(file_dict['bids_json_file']['FullFilePath'])
param_file_id = file_dict['bids_json_file_blake2b_hash']['ParameterFileID']
update_parameter_file_hash(db, param_file_id, new_blake2b_hash)
if file_dict['bids_json_file']['Value'].startswith('s3://') \
and os.path.exists(file_dict['bids_json_file']['FullFilePath']):
os.remove(file_dict['bids_json_file']['FullFilePath'])
# update BVAL NIfTI file's blake2b hash if file present in database
if 'check_bval_filename' in file_dict.keys() and 'check_bval_filename_blake2b_hash' in file_dict.keys():
new_blake2b_hash = utilities.compute_blake2b_hash(file_dict['check_bval_filename']['FullFilePath'])
param_file_id = file_dict['check_bval_filename_blake2b_hash']['ParameterFileID']
update_parameter_file_hash(db, param_file_id, new_blake2b_hash)
if file_dict['check_bval_filename']['Value'].startswith('s3://') \
and os.path.exists(file_dict['check_bval_filename']['FullFilePath']):
os.remove(file_dict['check_bval_filename']['FullFilePath'])
# update BVEC NIfTI file's blake2b hash if file present in database
if 'check_bvec_filename' in file_dict.keys() and 'check_bvec_filename_blake2b_hash' in file_dict.keys():
new_blake2b_hash = utilities.compute_blake2b_hash(file_dict['check_bvec_filename']['FullFilePath'])
param_file_id = file_dict['check_bvec_filename_blake2b_hash']['ParameterFileID']
update_parameter_file_hash(db, param_file_id, new_blake2b_hash)
if file_dict['check_bvec_filename']['Value'].startswith('s3://') \
and os.path.exists(file_dict['check_bvec_filename']['FullFilePath']):
os.remove(file_dict['check_bvec_filename']['FullFilePath'])
def query_hashes_and_associated_files_to_file_id(db, file_dict, s3_obj, tmp_dir, data_dir):
"""
Queries parameter_file table for the different file paths and hashes stored associated to a given FileID.
Note: if file is on S3, the file will be downloaded from S3 before computing the hash.
:param db: database object from the database.py class
:type db: Database
:param file_dict: dictionary with file information
:type file_dict: dict
:param data_dir: path of the data_dir
:type data_dir: str
:param tmp_dir: path to a temporary directory for processing
:type tmp_dir: str
:param s3_obj: AWS A3 object from the aws_s3.py class
:type s3_obj: AwsS3
"""
list_of_parameter_type_to_query = [
'md5hash',
'file_blake2b_hash',
'bids_json_file',
'bids_json_file_blake2b_hash',
'check_bval_filename',
'check_bval_filename_blake2b_hash',
'check_bvec_filename',
'check_bvec_filename_blake2b_hash'
]
query = 'SELECT pf.ParameterFileID, pf.Value' \
' FROM parameter_file pf JOIN parameter_type pt USING (ParameterTypeID)' \
' WHERE pt.Name=%s AND pf.FileID=%s'
for param_type in list_of_parameter_type_to_query:
results = db.pselect(query, (param_type, file_dict['FileID']))
if not results:
continue
if param_type in ['bids_json_file', 'check_bval_filename', 'check_bvec_filename']:
results[0]['FullFilePath'] = determine_file_full_path(results[0]['Value'], s3_obj, tmp_dir, data_dir)
file_dict[param_type] = results[0]
def determine_file_full_path(file_rel_path, s3_obj, tmp_dir, data_dir):
"""
Determines the full path to the file that will need to be inserted.
:param file_rel_path: relative file path to data_dir
:type file_rel_path: str
:param s3_obj: AWS A3 object from the aws_s3.py class
:type s3_obj: AwsS3
:param tmp_dir: path to a temporary directory for processing
:type tmp_dir: str
:param data_dir: path of the data_dir
:type data_dir: str
:return: the full path to the file (if file was on S3, it will be downloaded before determining its full path)
:rtype: str
"""
full_file_path = ''
if file_rel_path.startswith('s3://'):
try:
full_file_path = os.path.join(tmp_dir, os.path.basename(file_rel_path))
s3_obj.download_file(file_rel_path, full_file_path)
except Exception as err:
print(
f"[WARNING ] {file_rel_path} could not be downloaded from S3 bucket."
f" Error was\n{err}"
)
return full_file_path
else:
full_file_path = os.path.join(data_dir, file_rel_path)
return full_file_path
def update_parameter_file_hash(db, param_file_id, new_hash):
"""
Updates parameter_file table with the new hashes.
:param db: database object
:type db: Database
:param param_file_id: ParameterFileID to use in the update statement
:type param_file_id: str
:param new_hash: new hash to use in the update statement
:type new_hash: str
"""
if not param_file_id or not new_hash:
return
query = "UPDATE parameter_file SET Value=%s WHERE ParameterFileID=%s"
db.update(query, (new_hash, param_file_id))
def handle_physiological_files(db, data_dir, tmp_dir, s3_obj):
"""
Queries the list of PhysiologicalFileIDs present in the physiological_file table and hashes present in
physiological_parameter_file (along with the file path and hashes of associated BIDS files when they exist).
Once the list has been established, compute the new MD5/BLAKE2b hashes and update the database entries
with the new hash.
Note: if the files are on S3, the file will first be downloaded so that hashes can be computed
:param db: database object from the database.py class
:type db: Database
:param data_dir: path of the data_dir
:type data_dir: str
:param tmp_dir: path to a temporary directory for processing
:type tmp_dir: str
:param s3_obj: AWS A3 object from the aws_s3.py class
:type s3_obj: AwsS3
"""
# query list of PhysiologicalFileIDs to process
query_files = 'SELECT PhysiologicalFileID, FilePath FROM physiological_file'
phys_files_results = db.pselect(query_files, ())
# loop through PhysiologicalFileIDs and get all associated files and hashes stored in physiological_parameter_file
for file_dict in phys_files_results:
query_hashes_and_associated_files_to_physiological_file_id(db, file_dict, s3_obj, tmp_dir, data_dir)
file_full_path = determine_file_full_path(file_dict['FilePath'], s3_obj, tmp_dir, data_dir)
if 'physiological_file_blake2b_hash' in file_dict.keys():
new_blake2b_hash = utilities.compute_blake2b_hash(file_full_path)
phys_param_file_id = file_dict['physiological_file_blake2b_hash']['PhysiologicalParameterFileID']
update_phys_parameter_file_hash(db, phys_param_file_id, new_blake2b_hash)
if file_dict['FilePath'].startswith('s3://') and os.path.exists(file_full_path):
os.remove(file_full_path)
if 'physiological_json_file_blake2b_hash' in file_dict.keys() and 'eegjson_file' in file_dict.keys():
new_blake2b_hash = utilities.compute_blake2b_hash(file_dict['eegjson_file']['FullFilePath'])
phys_param_file_id = file_dict['physiological_json_file_blake2b_hash']['PhysiologicalParameterFileID']
update_phys_parameter_file_hash(db, phys_param_file_id, new_blake2b_hash)
if file_dict['eegjson_file']['Value'].startswith('s3://') \
and os.path.exists(file_dict['eegjson_file']['FullFilePath']):
os.remove(file_dict['eegjson_file']['FullFilePath'])
if 'channel_file_blake2b_hash' in file_dict.keys() and 'channel_file' in file_dict.keys():
new_blake2b_hash = utilities.compute_blake2b_hash(file_dict['channel_file']['FullFilePath'])
phys_param_file_id = file_dict['channel_file_blake2b_hash']['PhysiologicalParameterFileID']
update_phys_parameter_file_hash(db, phys_param_file_id, new_blake2b_hash)
if file_dict['channel_file']['Value'].startswith('s3://') \
and os.path.exists(file_dict['channel_file']['FullFilePath']):
os.remove(file_dict['channel_file']['FullFilePath'])
if 'electrode_file_blake2b_hash' in file_dict.keys() and 'electrode_file' in file_dict.keys():
new_blake2b_hash = utilities.compute_blake2b_hash(file_dict['electrode_file']['FullFilePath'])
phys_param_file_id = file_dict['electrode_file_blake2b_hash']['PhysiologicalParameterFileID']
update_phys_parameter_file_hash(db, phys_param_file_id, new_blake2b_hash)
if file_dict['electrode_file']['Value'].startswith('s3://') \
and os.path.exists(file_dict['electrode_file']['FullFilePath']):
os.remove(file_dict['electrode_file']['FullFilePath'])
if 'event_file_blake2b_hash' in file_dict.keys() and 'event_file' in file_dict.keys():
new_blake2b_hash = utilities.compute_blake2b_hash(file_dict['event_file']['FullFilePath'])
phys_param_file_id = file_dict['event_file_blake2b_hash']['PhysiologicalParameterFileID']
update_phys_parameter_file_hash(db, phys_param_file_id, new_blake2b_hash)
if file_dict['event_file']['Value'].startswith('s3://') \
and os.path.exists(file_dict['event_file']['FullFilePath']):
os.remove(file_dict['event_file']['FullFilePath'])
if 'physiological_scans_tsv_file_bake2hash' in file_dict.keys() and 'scans_tsv_file' in file_dict.keys():
new_blake2b_hash = utilities.compute_blake2b_hash(file_dict['scans_tsv_file']['FullFilePath'])
phys_param_file_id = file_dict['physiological_scans_tsv_file_bake2hash']['PhysiologicalParameterFileID']
update_phys_parameter_file_hash(db, phys_param_file_id, new_blake2b_hash)
if file_dict['scans_tsv_file']['Value'].startswith('s3://') \
and os.path.exists(file_dict['scans_tsv_file']['FullFilePath']):
os.remove(file_dict['scans_tsv_file']['FullFilePath'])
def query_hashes_and_associated_files_to_physiological_file_id(db, file_dict, s3_obj, tmp_dir, data_dir):
"""
Queries physiological_parameter_file table for the different file paths and hashes stored associated to a given
PhysiologicalFileID. Will also query tables physiological_channel, physiological_electrode and
physiological_task_event to get the file path of those files as well.
Note: if file is on S3, the file will be downloaded from S3 before computing the hash.
:param db: database object from the database.py class
:type db: Database
:param data_dir: path of the data_dir
:type data_dir: str
:param tmp_dir: path to a temporary directory for processing
:type tmp_dir: str
:param s3_obj: AWS A3 object from the aws_s3.py class
:type s3_obj: AwsS3
"""
list_of_parameter_type_to_query = [
'channel_file_blake2b_hash',
'electrode_file_blake2b_hash',
'event_file_blake2b_hash',
'physiological_file_blake2b_hash',
'eegjson_file',
'physiological_json_file_blake2b_hash',
'scans_tsv_file',
'physiological_scans_tsv_file_bake2hash'
]
query = 'SELECT ppf.PhysiologicalParameterFileID, ppf.Value' \
' FROM physiological_parameter_file ppf JOIN parameter_type pt USING (ParameterTypeID)' \
' WHERE pt.Name=%s AND ppf.PhysiologicalFileID=%s'
for param_type in list_of_parameter_type_to_query:
results = db.pselect(query, (param_type, file_dict['PhysiologicalFileID']))
if not results:
continue
if param_type in ['scans_tsv_file', 'eegjson_file']:
results[0]['FullFilePath'] = determine_file_full_path(results[0]['Value'], s3_obj, tmp_dir, data_dir)
file_dict[param_type] = results[0]
channel_file_results = db.pselect(
"SELECT DISTINCT(FilePath) FROM physiological_channel WHERE PhysiologicalFileID=%s",
(file_dict['PhysiologicalFileID'],)
)
if channel_file_results:
file_dict['channel_file'] = {
'Value': channel_file_results[0]['FilePath'],
'FullFilePath': determine_file_full_path(channel_file_results[0]['FilePath'], s3_obj, tmp_dir, data_dir)
}
electrode_file_results = db.pselect(
"SELECT DISTINCT(FilePath) FROM physiological_electrode WHERE PhysiologicalFileID=%s",
(file_dict['PhysiologicalFileID'],)
)
if electrode_file_results:
file_dict['electrode_file'] = {
'Value': electrode_file_results[0]['FilePath'],
'FullFilePath': determine_file_full_path(electrode_file_results[0]['FilePath'], s3_obj, tmp_dir, data_dir)
}
event_file_results = db.pselect(
"SELECT DISTINCT(FilePath) FROM physiological_task_event WHERE PhysiologicalFileID=%s",
(file_dict['PhysiologicalFileID'],)
)
if event_file_results:
file_dict['event_file'] = {
'Value': event_file_results[0]['FilePath'],
'FullFilePath': determine_file_full_path(event_file_results[0]['FilePath'], s3_obj, tmp_dir, data_dir)
}
def update_phys_parameter_file_hash(db, phys_param_file_id, new_hash):
"""
Updates physiological_parameter_file table with the new hashes.
:param db: database object
:type db: Database
:param phys_param_file_id: ParameterFileID to use in the update statement
:type phys_param_file_id: str
:param new_hash: new hash to use in the update statement
:type new_hash: str
"""
if not phys_param_file_id or not new_hash:
return
query = "UPDATE physiological_parameter_file SET Value=%s WHERE PhysiologicalParameterFileID=%s"
db.update(query, (new_hash, phys_param_file_id))
if __name__ == "__main__":
main()
| aces/Loris-MRI | tools/correct_blake2b_and_md5_hashes_in_database.py | correct_blake2b_and_md5_hashes_in_database.py | py | 18,913 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "lib.lorisgetopt.LorisGetOpt",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path.basename",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 50,
"usage_type": "attribute"
},
{
"api_name": "lib.databas... |
72340313954 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('portfolios', '0002_auto_20170514_1729'),
]
operations = [
migrations.AlterField(
model_name='portfolioprovider',
name='type',
field=models.IntegerField(choices=[(0, 'BetaSmartz'), (1, 'Aon'), (2, 'Krane'), (3, 'Lee')]),
),
]
| zakvan2022/Betasmartz | portfolios/migrations/0003_auto_20170519_0144.py | 0003_auto_20170519_0144.py | py | 469 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.db.migrations.Migration",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.migrations",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.db.migrations.AlterField",
"line_number": 14,
"usage_type": "call"
},
{... |
43495756114 | import numpy as np
from numpy import ndarray
from classes.utils import r2oos
from classes.data_loader import DataLoader
from sklearn.linear_model import ElasticNet
from sklearn.model_selection import GridSearchCV
class ElasticNet_Model(object):
def __init__(self, data_loader: DataLoader, alpha: float = 1.0, l1_ratio: float = 0.5):
self.data_loader = data_loader
self.alpha = alpha
self.l1_ratio = l1_ratio
self.cols = ["be_me", "ret_12_1", "market_equity", "ret_1_0", "rvol_252d", "beta_252d", "qmj_safety", "rmax1_21d", "chcsho_12m",
"ni_me", "eq_dur", "ret_60_12", "ope_be", "gp_at", "ebit_sale", "at_gr1", "sale_gr1", "at_be", "cash_at", "age", "z_score"]
self.beta_list = np.zeros((len(self.cols), 3))
self.intercept_list = np.zeros(3)
self.objective_list = np.zeros(3)
self.sklearn_model = None
# # train with sklearn
# def fit(self, start: int, end: int) -> None:
# df = self.data_loader.slice(start, end)
# x_train = self.data_loader.get_x(df)
# y_train = self.data_loader.get_y(df)
# model = ElasticNet(alpha=self.alpha, l1_ratio=self.l1_ratio)
# model.fit(x_train, y_train)
# self.sklearn_model = model
# self.beta_list[:, 0] = model.coef_
# self.intercept_list[0] = model.intercept_
# self.objective_list[0] = np.linalg.norm(model.coef_, ord=1)
@classmethod
def validate(self, data_loader: DataLoader,
train_start: int, train_end: int,
validate_start: int, validate_end: int,
alpha_values: list,
l1_ratio_values: list):
"""
Perform Grid search on hyperparameters alpha and l1_ration using 5-fold cross validation
on training and validation set, using r2 score as the metric
:param data_loader: DataLoader object
:param train_start:
:param validate_end:
:param alpha_values: List of alpha values to conduct grid search
:param l1_ratio_values: List of l1 ratio values to conduct grid search
"""
validate_df = data_loader.slice(validate_start, validate_end)
train_df = data_loader.slice(train_start, train_end)
x_validate = data_loader.get_x(validate_df)
y_validate = data_loader.get_y(validate_df)
x_train = data_loader.get_x(train_df)
y_train = data_loader.get_y(train_df)
best_r2, best_model, best_alpha, best_l1 = -1, None, None, None
for alpha in alpha_values:
for l1 in l1_ratio_values:
model = ElasticNet(alpha=alpha, l1_ratio=l1)
model.fit(x_train, y_train)
preds = model.predict(x_validate)
r2 = r2oos(preds, y_validate)
if r2 > best_r2:
best_r2 = r2
best_model, best_alpha, best_l1 = model, alpha, l1
return best_model, best_r2, best_alpha, best_l1
# def predict(self, start: int, end: int) -> ndarray:
# # Slice the data for the prediction period
# df = self.data_loader.slice(start, end)
# x_pred = self.data_loader.get_x(df)
# return self.sklearn_model.predict(x_pred)
@staticmethod
def evaluate(data: DataLoader, best_model, start: int, end: int) -> tuple:
"""
Give evaluation metric of a trained/fitted model on a given test/validation period
:param data: Data loader
:param start: period start year
:param end: period end year
:return: an evaluation metric as a list of floating numbers
"""
monthly_r2_scores = []
start_year, end_year = start // 10000, end // 10000
monthly_predictions = []
for year in range(start_year, end_year):
for month in range(1, 13):
start = int(f"{year}{month:02d}01")
if month == 12:
end = int(f"{year + 1}0101")
else:
end = int(f"{year}{month + 1:02d}01")
df = data.slice(start, end)
x_test = data.get_x(df)
y_actual = data.get_y(df)
y_pred = best_model.predict(x_test)
monthly_predictions.append(y_pred)
# Calculate R-squared for the month
r2 = r2oos(y_pred, y_actual)
monthly_r2_scores.append(r2)
return monthly_r2_scores, monthly_predictions
| Sho-Shoo/36490-F23-Group1 | classes/elasticNet_model.py | elasticNet_model.py | py | 4,506 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "classes.data_loader.DataLoader",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
... |
18794965698 | import os
import sys
import transaction
import json
from pyramid.paster import (
get_appsettings,
setup_logging,
)
from pyramid.scripts.common import parse_vars
from ..models.meta import Base
from ..models import (
get_engine,
get_session_factory,
get_tm_session,
)
from ..models import Song, Lyric, Tag, Page, User, Funds
def usage(argv):
cmd = os.path.basename(argv[0])
print('usage: %s <config_uri> [var=value]\n'
'(example: "%s development.ini")' % (cmd, cmd))
sys.exit(1)
def main(argv=sys.argv):
if len(argv) < 2:
usage(argv)
config_uri = argv[1]
options = parse_vars(argv[2:])
setup_logging(config_uri)
settings = get_appsettings(config_uri, options=options)
engine = get_engine(settings)
Base.metadata.create_all(engine)
session_factory = get_session_factory(engine)
with transaction.manager:
dbsession = get_tm_session(session_factory, transaction.manager)
editor = User(name='editor', role='editor')
editor.set_password('editor')
dbsession.add(editor)
basic = User(name='basic', role='basic')
basic.set_password('basic')
dbsession.add(basic)
page = Page(
name='FrontPage',
title='Prva Stran',
subtitle='Dobrodosli',
creator=editor,
data='Novice o Kripto Kojnih.',
)
dbsession.add(page)
about = Page(
name='About',
title='O Nas',
subtitle='O Nas',
creator=basic,
data='GoOpen',
)
dbsession.add(about)
with open('./cryptodokladi/scripts/capoeiralyrics_2017-12-18.json') as data_file:
data = json.load(data_file)
for song in data:
s = Song(title = song['title'],
subtitle = song['subtitle'])
if song['ytplayer'] is not None:
s.ytplayer = song['ytplayer']
dbsession.add(s)
for key in song['lyrics'].keys():
l = Lyric(language=key, text=song['lyrics'][key].replace('\n', '<br />'), song=s)
dbsession.add(l)
with open('./cryptodokladi/scripts/tags_2017-12-18.json') as data_file:
data = json.load(data_file)
for s in data:
song = dbsession.query(Song).filter_by(title=s['title']).first()
if song is not None:
for t in s['tags']:
tag = dbsession.query(Tag).filter_by(text=t).first()
if tag is None:
tag = Tag(text = t)
dbsession.add(tag)
song.tags.append(tag)
| alko89/cryptodokladi | cryptodokladi/scripts/initializedb.py | initializedb.py | py | 2,872 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.basename",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "sys.exit",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sys.argv",
"line_number": ... |
40061188327 | # https://www.acmicpc.net/problem/18352
# N๊ฐ์ ๋์, M๊ฐ์ ๋๋ก
# ๋ชจ๋ ๋๋ก์ ๊ฑฐ๋ฆฌ 1
# ํน์ ๋์ X๋ฅด ๋ถํฐ ์ถ๋ฐํ์ฌ ๋๋ฌํ ์ ์๋ ๋ชจ๋ ๋์ ์ค์ ์ต๋จ ๊ฑฐ๋ฆฌ๊ฐ K์ธ ๋์ ๋ฒํธ ์ถ๋ ฅ
import sys
from collections import defaultdict
from collections import deque
def BFS(X):
qu = deque()
qu.append(X)
dist[X] = 0
while qu:
node = qu.popleft()
for next_node in graph[node]:
if dist[next_node] == -1:
dist[next_node] = dist[node]+1
qu.append(next_node)
# ์
๋ ฅ
N, M, K, X = map(int, sys.stdin.readline().split())
graph = defaultdict(list)
for _ in range(M):
A, B = map(int, sys.stdin.readline().split())
graph[A].append(B)
# X๋ก ๋ถํฐ ์ต๋จ ๊ฑฐ๋ฆฌ ๊ณ์ฐ
dist = [-1 for _ in range(N + 1)] # dist[i]: i๊น์ง์ ์ต๋จ๊ฑฐ๋ฆฌ
BFS(X)
# ์ถ๋ ฅ
exist = False
for i in range(1, N+1):
if dist[i] == K:
print(i)
exist = True
if not exist:
print(-1) | hyein99/Algorithm_python_for_coding_test | Part3/ch13_DFS BFS ๋ฌธ์ /15_ํน์ ๊ฑฐ๋ฆฌ์ ๋์ ์ฐพ๊ธฐ.py | 15_ํน์ ๊ฑฐ๋ฆฌ์ ๋์ ์ฐพ๊ธฐ.py | py | 999 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "collections.deque",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.stdin.readline",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sys.stdin",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "collections.defau... |
4966009254 | from gtts import gTTS
from playsound import playsound
import os
import queue
import threading
import logging
logging.basicConfig(level=logging.INFO)
class AudioPlayer:
def __init__(self):
self.audio_queue = queue.Queue()
def play_audio(self, file_path):
"""
Play the audio and signal completion
:param file_path: path to the audio file
:return: None
"""
try:
playsound(file_path)
except Exception as e:
raise ValueError("\nError playing audio: " + str(e))
self.audio_queue.put(True)
class TextToSpeechConverter:
def __init__(self, audio_player):
self.audio_player = audio_player
def convert_text_audio(self, text:str):
"""
Convert text to audio
:param text: text to be converted
:return: None
"""
try:
# Language in which you want to convert
language:str = 'ca'
# Passing the text and language to the engine
myobj = gTTS(text=text, lang=language, slow=False)
# Saving the converted audio in an mp3 file named "audio"
myobj.save("audio.mp3")
except Exception as e:
raise ValueError("\nError converting text to audio: " + str(e))
def text_to_speech(self):
while True:
try:
# The text that you want to convert to audio
mytext:str = input("Sentence:\n")
# Create separate threads to save and play the audio
t1 = threading.Thread(target=self.convert_text_audio, args=(mytext,))
t2 = threading.Thread(target=self.audio_player.play_audio, args=("audio.mp3",))
# Start the threads
t1.start()
t1.join()
t2.start()
t2.join()
# Wait for the audio to finish playing
self.audio_player.audio_queue.get()
self.audio_player.audio_queue.task_done()
# Remove the audio file
os.remove("audio.mp3")
except KeyboardInterrupt:
logging.info("\nExiting text_to_speech...")
break
except ValueError as ve:
logging.error(ve)
except Exception as e:
logging.error("An error occurred: " + str(e))
# Create an instance of the AudioPlayer class
audio_player = AudioPlayer()
# Create an instance of the TextToSpeechConverter class
text_to_speech_converter = TextToSpeechConverter(audio_player)
# Call the text_to_speech method
text_to_speech_converter.text_to_speech()
| TheoTime01/ChatMoov | text_to_speech/text_to_speech.py | text_to_speech.py | py | 2,681 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "queue.Queue",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "playsound.playsound",... |
17684234926 | # -*- coding: utf-8 -*-
"""
Created on Thu Jan 5 17:12:07 2017
@author: Aniket
"""
import gym
import universe
import random
def determine_turn(turn, observation_n, j, total_sum, prev_total_sum, reward_n):
if(j>=15):
if(total_sum/j) == 0:
turn = True
else:
turn = False
#total_sum = 0
j = 0
prev_total_sum = total_sum
total_sum = 0
else:
turn = False
if(observation_n != None):
j += 1
total_sum += reward_n
return(turn, j, total_sum, prev_total_sum)
def main():
env = gym.make('flashgames.CoasterRacer-v0')
observation_n = env.reset()
n = 0
j = 0
total_sum = 0
prev_total_sum = 0
turn= False
left = [('KeyEvent', 'ArrowUp', True) ,('KeyEvent', 'ArrowLeft', True), ('KeyEvent', 'ArrowRight', False)]
right = [('KeyEvent', 'ArrowUp', True) ,('KeyEvent', 'ArrowLeft', False), ('KeyEvent', 'ArrowRight', True)]
forward = [('KeyEvent', 'ArrowUp', True) ,('KeyEvent', 'ArrowLeft', False), ('KeyEvent', 'ArrowRight', False)]
while True:
n += 1
if(n > 1):
if(observation_n[0] != None):
prev_score = reward_n[0]
if(turn):
event = random.choice([left,right])
action_n = [event for ob in observation_n]
turn = False
elif(~turn):
action_n = [forward for ob in observation_n]
if(observation_n[0] != None):
turn,j,total_sum,prev_total_sum = determine_turn(turn, observation_n[0], j, total_sum, prev_total_sum, reward_n[0])
observation_n,reward_n,done_n,info = env.step(action_n)
env.render()
if __name__ == '__main__':
main() | aniketparsewar/Machine-Learning | OpenAI_Universe.py | OpenAI_Universe.py | py | 1,961 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gym.make",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "random.choice",
"line_number": 63,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.