seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
27806818049 | from typing import Self, Any
from dataclasses import dataclass, field
from hashlib import md5
import numpy as np
import numpy.typing as npt
from collections import deque
from queue import PriorityQueue
input = "pxxbnzuo"
target = np.array([3, 3], dtype=np.int8)
movements: tuple[str, npt.NDArray[np.int_], int] = [
("U", np.array([0, -1], dtype=np.int8), 0),
("D", np.array([0, 1], dtype=np.int8), 1),
("L", np.array([-1, 0], dtype=np.int8), 2),
("R", np.array([1, 0], dtype=np.int8), 3),
]
@dataclass
class Path:
input: str = input
path: str = ""
position: npt.NDArray[np.int_] = field(default_factory=lambda: np.zeros(2, dtype=np.int8))
def next(self) -> list[Self]:
if self.is_target():
return []
hash = md5((self.input + self.path).encode()).hexdigest()[:4]
result: list[Path] = []
for direction, movement, h_index in movements:
new_pos = self.position + movement
if 0 <= new_pos[0] < 4 and 0 <= new_pos[1] < 4 and hash[h_index] > "a":
result.append(Path(self.input, self.path + direction, new_pos))
return result
def is_target(self) -> bool:
return (self.position - target).sum() == 0
def find_shortest_path(input: str = input) -> Path:
queue: deque[Path] = deque([Path(input)])
while queue:
current = queue.popleft()
if current.is_target():
return current
queue.extend(current.next())
def find_longest_path(input: str = input) -> int:
queue: deque[Path] = deque([Path(input)])
path_length = -1
while queue:
current = queue.popleft()
if current.is_target() and len(current.path) > path_length:
path_length = len(current.path)
queue.extend(current.next())
return path_length
print("Solution 1:", find_shortest_path().path)
print("Solution 2:", find_longest_path())
| matthiasBender/adventofcode_python | matthias/2016/day17.py | day17.py | py | 1,913 | python | en | code | 0 | github-code | 13 |
39133022700 | # training worker, accepts input from the oracle cacher
# this is baseline no caching and no prefetch
import os
import sys
import time
import copy
import queue
import logging
import argparse
import threading
import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
import torch.distributed.rpc as rpc
import utils
from operator import itemgetter
class DistTrainModel(nn.Module):
def __init__(
self,
emb_size=1,
ln_top=None,
ln_bot=None,
sigmoid_bot=-1,
sigmoid_top=-1,
feature_interaction="dot",
interact_itself=False,
loss_function="bce",
worker_id=0,
lookahead_value=200,
device="cuda:0",
):
super(DistTrainModel, self).__init__()
"""
Args:
emb_size: Size of for each sparse embedding
ln_top (np.array): Structure of top MLP
ln_bot (np.array): Structure of bottom MLP
sigmoid_bot (int): Integer for listing the location of bottom
sigmoid_top (int): Integer for listing the location of the top
Returns:
None
"""
self.emb_size = emb_size
self.ln_top = ln_top
self.ln_bot = ln_bot
self.sigmoid_bot = sigmoid_bot
self.sigmoid_top = sigmoid_top
self.feature_interaction = feature_interaction
self.interact_itself = interact_itself
self.lookahead_value = lookahead_value
self.device = device
self.bot_mlp = self.create_mlp(self.ln_bot, self.sigmoid_bot)
self.top_mlp = self.create_mlp(self.ln_top, self.sigmoid_top)
self.top_mlp.to(self.device)
self.bot_mlp.to(self.device)
if loss_function == "bce":
self.loss_fn = torch.nn.BCELoss(reduction="mean")
elif loss.function == "mse":
self.loss_fn = torch.nn.MSELoss(reduction="mean")
self.loss_fn.to(self.device)
# this will hold the cache
# self.local_cache = nn.ParameterDict({})
# self.local_cache_ttl = dict()
# this will hold the prefetch values
# self.prefetch_cache = nn.ParameterDict({})
# self.prefetch_cache_ttl = dict()
# self.relevant_local_cache = nn.ParameterDict({})
self.relevant_prefetch_cache = nn.ParameterDict({})
self.relevant_prefetch_cache_ids = dict()
self.train_queue = queue.Queue(200)
self.prefetch_queue = queue.Queue()
self.prefetch_futures_queue = queue.Queue()
self.prefetch_queue_ttl = queue.Queue()
self.delete_element_queue = queue.Queue()
self.worker_id = worker_id
self.worker_name = f"worker_{worker_id}"
self.current_train_epoch = 0
return None
def fetch_embs(self, iter_to_fetch, emb_to_fetch):
"""
Fetch embeddings from the training setup
"""
prefetch_structure = dict()
prefetch_structure[iter_to_fetch] = emb_to_fetch
# need to clean up all these RPC calls
fut = rpc.rpc_async("worker_2", get_embedding, args=(prefetch_structure,))
return fut
def create_mlp(self, ln, sigmoid_layer):
layers = nn.ModuleList()
for i in range(0, ln.size - 1):
n = ln[i]
m = ln[i + 1]
LL = nn.Linear(int(n), int(m), bias=True)
# some xavier stuff the original pytorch code was doing
mean = 0.0
std_dev = np.sqrt(2 / (m + n))
W = np.random.normal(mean, std_dev, size=(m, n)).astype(np.float32)
std_dev = np.sqrt(1 / m)
bt = np.random.normal(mean, std_dev, size=m).astype(np.float32)
LL.weight.data = torch.tensor(W, requires_grad=True)
LL.bias.data = torch.tensor(bt, requires_grad=True)
layers.append(LL)
if i == sigmoid_layer:
layers.append(nn.Sigmoid())
else:
layers.append(nn.ReLU())
return torch.nn.Sequential(*layers)
def apply_mlp(self, dense_x, mlp_network):
"""
Apply MLP on the features
"""
return mlp_network(dense_x)
def apply_emb(self, lS_i):
"""
Fetch embedding
"""
fetched_embeddings = list()
for table_id, emb_id in enumerate(lS_i):
# this outer for loop can be parallelized
# we will optimize this for loop some other day
# this has branching and if checks, I really don't like it
# but I am commited to this at this point. So not doing anything
# about this
emb_by_id = list()
emb_found = False
for embs in emb_id:
lookup_id = (table_id, embs.item()).__str__()
# first look it up in the cache
if not emb_found:
try:
emb_fetched = self.relevant_prefetch_cache.get_parameter(
lookup_id
)
emb_found = True
except AttributeError:
# if embedding is not found
emb_found = False
# if not found look it up the prefetech
if not emb_found:
# element not found
logger.info(f"Lookup ID not found {lookup_id}")
sys.exit("Embedding not found in prefetch nor in local cache")
emb_by_id.append(emb_fetched)
concatenated_emb = torch.cat(emb_by_id)
concatenated_emb = concatenated_emb.reshape(len(emb_id), -1)
fetched_embeddings.append(concatenated_emb)
return fetched_embeddings
def interact_features(self, x, ly):
"""
Interaction between dense and embeddings
"""
# Copied from interact features function of original code
if self.feature_interaction == "dot":
(batch_size, d) = x.shape
T = torch.cat([x] + ly, dim=1).view((batch_size, -1, d))
Z = torch.bmm(T, torch.transpose(T, 1, 2))
_, ni, nj = Z.shape
offset = 1 if self.interact_itself else 0
li = torch.tensor([i for i in range(ni) for j in range(i + offset)])
lj = torch.tensor([j for i in range(nj) for j in range(i + offset)])
Zflat = Z[:, li, lj]
R = torch.cat([x] + [Zflat], dim=1)
elif self.feature_interaction == "cat":
R = torch.cat([x] + ly, dim=1)
else:
sys.exit("Unsupported feature interaction")
return R
def forward(self, dense_x, lS_i, target, emb_to_fetch):
"""
Forward pass of the training
"""
# first we perform bottom MLP
fut = self.fetch_embs(self.current_train_epoch, emb_to_fetch)
x = self.apply_mlp(dense_x, self.bot_mlp)
val = fut.wait()
self.relevant_prefetch_cache_ids = list(val.keys())
val_str = {k.__str__(): nn.Parameter(val[k].to(self.device)) for k in val}
self.relevant_prefetch_cache.update(val_str)
# need to fetch the embeddings
# at this point we will either have embeddings in the local cache or
# global cache
# TODO: In future include more complicated processing
ly = self.apply_emb(lS_i)
# print(x)
# print(ly)
# feature interaction
z = self.interact_features(x, ly)
# pass through top mlp
p = self.apply_mlp(z, self.top_mlp)
loss = self.loss_fn(p, target)
# print(loss)
return loss
def update_train_queue(input_dict):
comp_intensive_model.train_queue.put(input_dict)
return 1
def fill_prefetch_cache():
num_times_run = 0
try:
while num_times_run <= comp_intensive_model.lookahead_value:
val = comp_intensive_model.prefetch_queue.get(block=True)
fut = rpc.rpc_async("worker_2", get_embedding, args=(val,))
ttl_val = list(val.keys())[0]
comp_intensive_model.prefetch_futures_queue.put(fut)
comp_intensive_model.prefetch_queue_ttl.put(ttl_val)
# keep getting prefetch queue
fut = comp_intensive_model.prefetch_futures_queue.get(block=True)
ttl_val = comp_intensive_model.prefetch_queue_ttl.get(block=True)
val = fut.wait()
val_str = {
k.__str__(): nn.Parameter(val[k].to(comp_intensive_model.device))
for k in val
}
val_ttl = {k: ttl_val for k in val}
print("Added prefetch cache {}".format(ttl_val))
comp_intensive_model.prefetch_cache.update(val_str)
comp_intensive_model.prefetch_cache_ttl.update(val_ttl)
num_times_run += 1
except queue.Empty:
pass
def update_prefetch_queue(input_dict):
comp_intensive_model.prefetch_queue.put(input_dict, block=True)
size_of_queue = comp_intensive_model.prefetch_queue.qsize()
# print("Prefetch queue size at insertion {}".format(size_of_queue))
return 1
def launch_cache_cleanup():
"""
Launch cache cleanup
"""
print("Cache cleanup launched")
while True:
try:
iter_to_cleanup = comp_intensive_model.delete_element_queue.get(block=True)
# print("iter to cleanup {}".format(iter_to_cleanup))
comp_intensive_model.clean_up_caches(iter_to_cleanup)
except queue.Empty:
pass
def exit_worker(input_dict):
rpc.shutdown()
return 1
def cache_eviction_update(update_dict):
# This is dummy function real one is in embedding server
"""
update_dict- key - (table_id, emb_id): tensor to store
"""
emb_grouped_by_table_id = defaultdict(list)
emb_id_grouped_by_table_id = defaultdict(list)
for key in update_dict:
table_id, emb_id = key
emb_grouped_by_table_id[table_id].append(update_dict[key])
emb_id_grouped_by_table_id[table_id].append(emb_id)
for key in emb_grouped_by_table_id:
grouped_by_table_id[key] = torch.tensor(emb_grouped_by_table_id[key])
emb_id_grouped_by_table_id[key] = torch.tensor(emb_id_grouped_by_table_id[key])
embedding_object.update_embeddings(
emb_grouped_by_table_id, emb_id_grouped_by_table_id
)
return 1
def get_embedding(input_list):
# This is dummy function real one is in embedding server
"""
These are prefetch embeddings
Args:
input_list (list(tuples)): List of tuples, tuples(table_id, emb_id)
"""
emb_decompressed = defaultdict(list)
for table_id, emb_id in emb_decompressed:
emb_decompressed[table_id].append(emb_id)
fetched_embeddings = embedding_object.get_embeddings(emb_decompressed)
return fetched_embeddings
def main(args):
expected_iter = 0
iter_overflow = dict()
os.environ["MASTER_ADDR"] = args.master_ip
os.environ["MASTER_PORT"] = args.master_port
ln_bot = np.fromstring(args.arch_mlp_bot, dtype=int, sep="-")
arch_mlp_top_adjusted = (
str(
utils.get_first_layer_size_top_mlp(
args.arch_interaction_op,
args.arch_interaction_itself,
ln_bot,
args.ln_emb,
)
)
+ "-"
+ args.arch_mlp_top
)
ln_top = np.fromstring(arch_mlp_top_adjusted, dtype=int, sep="-")
global comp_intensive_model
comp_intensive_model = DistTrainModel(
emb_size=args.emb_size,
ln_bot=ln_bot,
ln_top=ln_top,
sigmoid_bot=-1,
sigmoid_top=ln_top.size - 2,
loss_function=args.loss_function,
feature_interaction=args.arch_interaction_op,
worker_id=args.worker_id,
lookahead_value=args.lookahead_value,
device=args.device,
)
# rpc fuctions
# rpc setup
rpc.init_rpc(
comp_intensive_model.worker_name,
rank=args.worker_id,
world_size=args.world_size,
)
while True:
try:
total_start = time.time()
train_example = comp_intensive_model.train_queue.get(block=True)
current_epoch = list(train_example.keys())[0]
# handling rpc potential reordering
if current_epoch != expected_iter:
# move current train example to the dictionary
iter_overflow[current_epoch] = copy.deepcopy(train_example)
# check if we have the expected iter in the overflow
if expected_iter in iter_overflow:
train_example = iter_overflow.pop(expected_iter)
current_epoch = list(train_example.keys())[0]
expected_iter += 1
else:
# pop more and see if we find what we want
continue
else:
expected_iter += 1
print("Current Iter {}".format(current_epoch))
# logger.info(f"Current Iter {current_epoch}")
comp_intensive_model.current_train_epoch = current_epoch
# logger.info(f"Size local cache {len(comp_intensive_model.local_cache)}")
# logger.info(
# f"Size prefetch cache {len(comp_intensive_model.prefetch_cache)}"
# )
# logger.info(
# f"State of Prefetch Cache {comp_intensive_model.prefetch_cache}"
# )
# TODO: I think this should deeply simplify a lot of things
# logger.info(
# "Elements from Prefetch Cache {}".format(
# train_example[current_epoch]["train_data"][
# "elements_from_prefetch"
# ]
# )
# )
# logger.info(
# "Elements from Local Cache {}".format(
# train_example[current_epoch]["train_data"][
# "elements_from_cache"
# ]
# )
# )
# logger.info(
# "Prefetch Cache {}".format(comp_intensive_model.prefetch_cache.keys())
# )
# )
# logger.info(
# "Prefetch Cache TTL {}".format(
# comp_intensive_model.prefetch_cache_ttl
# )
# )
# logger.info(
# "Local Cache {}".format(comp_intensive_model.local_cache.keys())
# )
# logger.info(
# "Local Cache TTL {}".format(comp_intensive_model.local_cache_ttl)
# )
# comp_intensive_model.fetch_elements(
# train_example[current_epoch]["train_data"]["elements_from_prefetch"],
# train_example[current_epoch]["train_data"]["elements_from_cache"],
# )
forward_start = time.time()
# print(train_example[current_epoch]["train_data"]["emb_to_fetch"])
loss = comp_intensive_model.forward(
train_example[current_epoch]["train_data"]["dense_x"].to(
comp_intensive_model.device
),
train_example[current_epoch]["train_data"]["sparse_vector"],
train_example[current_epoch]["train_data"]["target"].to(
comp_intensive_model.device
),
train_example[current_epoch]["train_data"]["emb_to_fetch"],
)
print("loss {}".format(loss))
forward_stop = time.time()
print("Time for forward {}".format(forward_stop - forward_start))
optimizer = optim.SGD(
[
# {
# "params": comp_intensive_model.relevant_local_cache.parameters(),
# "lr": 0.01,
# },
{
"params": comp_intensive_model.relevant_prefetch_cache.parameters(),
"lr": 0.01,
},
{
"params": comp_intensive_model.top_mlp.parameters(),
"lr": 0.01,
},
{
"params": comp_intensive_model.bot_mlp.parameters(),
"lr": 0.01,
},
]
)
backward_start = time.time()
loss.backward()
backward_stop = time.time()
print("Time for backward {}".format(backward_stop - backward_start))
optimizer.step()
optimizer.zero_grad()
dict_to_update = dict()
with torch.no_grad():
for keys in comp_intensive_model.relevant_prefetch_cache_ids:
dict_to_update[keys] = comp_intensive_model.relevant_prefetch_cache[
keys.__str__()
].cpu()
del comp_intensive_model.relevant_prefetch_cache[keys.__str__()]
rpc.rpc_sync(
"worker_2", cache_eviction_update, args=((dict_to_update,))
)
# moving elements from prefetch cache to local cache
# elements_to_cache = train_example[current_epoch]["cache_elements"]
# with torch.no_grad():
# temp_dict = dict()
# for elem in elements_to_cache:
# # moving the tensor
# comp_intensive_model.local_cache[
# elem[0].__str__()
# ] = comp_intensive_model.relevant_prefetch_cache[elem[0].__str__()]
# # moving the ttl
# comp_intensive_model.local_cache_ttl[elem[0]] = elem[1]
# TODO: Move to a thread
# update TTL
# lease_extensions = train_example[current_epoch]["lease_extensions"]
# for elem, new_ttl in lease_extensions:
# comp_intensive_model.local_cache_ttl[elem] = new_ttl
# # evict from caches
# comp_intensive_model.delete_element_queue.put(current_epoch)
del loss
total_end = time.time()
print("Total end to end time {}".format(total_end - total_start))
logger.info("Total end to end time {}".format(total_end - total_start))
# copy elements from prefetch cache to local cache
# need to perform cache evictions
# comp_intensive_model.zero_grad()
# for name, params in comp_intensive_model.named_parameters():
# print(name)
# print(params.grad)
# embeddings to check grad
# print(
# "Elements to cache {}".format(
# train_example[current_epoch]["cache_elements"]
# )
# )
# for update_emb in train_example[current_epoch]["train_data"][
# "list_tuple_embedding"
# ]:
# print(update_emb)
# print(
# comp_intensive_model.prefetch_cache[update_emb.__str__()].grad
# )
# got an example to train
except queue.Empty:
pass
def parse_args(parser):
parser.add_argument(
"--arch-mlp-bot",
type=utils.dash_separated_ints,
help="dimensions of the bottom mlp",
)
parser.add_argument(
"--arch-mlp-top", type=utils.dash_separated_ints, help="dimensions of top mlp"
)
parser.add_argument(
"--emb-size",
type=int,
default=16,
help="size of the embedding for each sparse feature",
)
parser.add_argument(
"--arch-interaction-op", type=str, choices=["dot", "cat"], default="dot"
)
parser.add_argument(
"--lookahead-value",
type=int,
default=200,
help="The number of batches further to look ahead for getting cache",
)
parser.add_argument("--device", type=str, default="cpu")
parser.add_argument("--arch-interaction-itself", action="store_true", default=False)
parser.add_argument("--loss-function", choices=["mse", "bce"], default="bce")
parser.add_argument(
"--ln-emb",
type=utils.dash_separated_ints,
help="embedding table sizes in the right order",
default=[
1460,
583,
10131227,
2202608,
305,
24,
12517,
633,
3,
93145,
5683,
8351593,
3194,
27,
14992,
5461306,
10,
5652,
2173,
4,
7046547,
18,
15,
286181,
105,
142572,
],
)
parser.add_argument("--worker-id", type=int, required=True)
parser.add_argument("--world-size", type=int, required=True)
parser.add_argument("--master-ip", type=str, default="localhost")
parser.add_argument("--master-port", type=str, default="18000")
args = parser.parse_args()
return args
if __name__ == "__main__":
args = parse_args(argparse.ArgumentParser(description="Arguments for DLRM"))
logging.basicConfig(filename="distributed_trainer_no_cache.log")
logger = logging.getLogger()
logger.setLevel(logging.INFO)
main(args)
| uw-mad-dash/bagpipe | distributed_trainer_baseline.py | distributed_trainer_baseline.py | py | 21,335 | python | en | code | 1 | github-code | 13 |
23551211156 | # You are climbing a staircase. It takes n steps to reach the top.
# Each time you can either climb 1 or 2 steps.
# In how many distinct ways can you climb to the top?
class Solution:
memory= {0 : 0}
def climbStairs(self, n: int) -> int:
if n in self.memory:
return self.memory[n]
elif n - 1 == 0 or n - 2 == 0:
return n
else:
self.memory[n] = self.climbStairs(n - 1) + self.climbStairs(n - 2)
return self.memory[n]
paths = 0
def climbStairs_inefficient(self, n: int)-> int:
if n < 0:
#print("path is incorrect")
return n
if n == 0:
self.paths += 1
#print("path is correct")
return 0
self.climbStairs_inefficient(n - 1)
self.climbStairs_inefficient(n - 2)
return self.paths
def climbStairs2(self, n: int): #-> int:
"""finds the best path to climb the stairs
Args:
n (int): how many stairs to climg
"""
if n == 0:
print("0")
elif n - 2 >= 0:
print("-2")
self.climbStairs2(n - 2)
elif n - 1 >= 0:
print("-1")
self.climbStairs2(n - 1)
if __name__ == '__main__':
solution = Solution()
print(solution.climbStairs(38)) | nichitatrifan/leet_code_python | easy/climbing_stairs.py | climbing_stairs.py | py | 1,382 | python | en | code | 0 | github-code | 13 |
34375114796 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import argparse
import json
import sys
__version__ = '0.1.0'
def decode_switchy_omega(backup_text):
"""
:type backup_text: unicode
:rtype: unicode
"""
data = json.loads(backup_text, encoding='utf-8')
return json.dumps(data, indent=4, separators=(',', ': '), sort_keys=True)
def main():
parser = argparse.ArgumentParser(
description='Clean SwitchyOmega settings backup file (.bak) to well formatted JSON (.json) file.')
parser.add_argument('in_file', nargs='?', type=argparse.FileType('r'), default=sys.stdin,
help='input file (default: stdin)')
parser.add_argument('out_file', nargs='?', type=argparse.FileType('w'), default=sys.stdout,
help='output file (default: stdout)')
parser.add_argument('-v', '--version', action='version', version=__version__)
unicode_args = map(lambda s: unicode(s, sys.getfilesystemencoding()), sys.argv)
args = parser.parse_args(unicode_args[1:])
try:
input_text = args.in_file.read()
output_text = decode_switchy_omega(input_text)
args.out_file.write(output_text)
finally:
args.out_file.close()
args.in_file.close()
if __name__ == '__main__':
main()
| Nemoleoliu/dotfiles | switchy-sharp/clean_omega.py | clean_omega.py | py | 1,331 | python | en | code | null | github-code | 13 |
74697023376 | import unittest
from datCrawl import *
from test.requirements import *
URL = 'http://en.wikipedia.org/wiki/Python'
class datCrawlBaseTests(unittest.TestCase):
def test_instance_check(self):
core = datCrawl()
self.assertTrue(isinstance(core, datCrawl))
def test_register_urls(self):
core = datCrawl()
data = ('action', 'http://www.google.es/', 'AwesomeGoogleCrawler')
core.register_url(data[0], data[1], data[2])
self.assertEqual(core.urls[0], data)
def test_running_full_crawler(self):
core = datCrawl()
core.register_crawler(AwesomeWikipediaTitleCrawler)
result = core.run(URL)
self.assertEqual(result['title'], 'Python')
def test_worker_instance(self):
core = datCrawl()
core.register_crawler(AwesomeWikipediaTitleCrawler)
worker = core.worker(URL)
self.assertTrue(isinstance(worker, datCrawlWorker))
self.assertEqual(URL, worker.url)
if __name__ == '__main__':
unittest.main()
| fmartingr/datCrawl | test/test_base.py | test_base.py | py | 1,028 | python | en | code | 19 | github-code | 13 |
8384447684 | import os
import sys
import argparse
from statistics import mean
from prettytable import PrettyTable
from elasticsearch6 import Elasticsearch
INDEX_NAME = 'LR3'
def arg_parse():
"""Обработка аргументов командной строки
Возвращаемые значения:
argument: введенные аргументы
"""
argument = argparse.ArgumentParser()
argument.add_argument("command")
argument.add_argument("second_command", nargs='?', default=None)
argument.add_argument("-p", "--port", type=int, default=9200)
argument.add_argument("-s", "--host", type=str, default='localhost')
argument.add_argument("-a", "--author")
argument.add_argument("-y", "--year")
argument.add_argument("-n", "--name")
argument.add_argument("-f", "--from_date")
argument.add_argument("-u", "--until_date")
return argument.parse_args()
def connect_elasticsearch(host, port):
"""Подключение к Elasticsearch
Аргументы:
host: имя хоста для подключения
port: порт для подключения
Возвращаемые значения:
elastic: объект подключения
"""
elastic = Elasticsearch([{'host': host, 'port': port}])
if elastic.ping():
print('Connect')
else:
print('Not connect!')
return elastic
def create_index(es_object):
"""Создание индекса
Аргументы:
es_object: объект подключения
Возвращаемые значения:
created: был ли создан новый индекс (True/False)
"""
created = False
body_books = {
"settings": {
"analysis": {
"filter": {
"russian_stop": {
"type": "stop",
"stopwords": "_russian_"
},
"russian_keywords": {
"type": "stop",
"stopwords": ["князь", "повезет", "сорок"]
}
},
"analyzer": {
"custom_analyzer": {
"type": "custom",
"tokenizer": "standard",
"filter": [
"lowercase",
"russian_stop",
"russian_keywords"
]
}
}
}
},
"mappings": {
"document": {
"properties": {
"title": {
"type": "text",
"analyzer": "standard",
"search_analyzer": "standard"
},
"author": {
"type": "text",
"analyzer": "standard",
"search_analyzer": "standard"
},
"year_publication": {
"type": "date",
"format": "yyyy"
},
"text": {
"type": "text",
"analyzer": "custom_analyzer",
"search_analyzer": "custom_analyzer"
}
}
}
}
}
try:
if not es_object.indices.exists(INDEX_NAME):
es_object.indices.create(index=INDEX_NAME, ignore=400, body=body_books)
print(f"Индекс: '{INDEX_NAME}' успешно создан!")
created = True
else:
print(f"Индекс: '{INDEX_NAME}' уже существует!")
except TypeError as ex:
print(str(ex))
return created | Docik99/ot1p | LR_3/LR3.py | LR3.py | py | 3,895 | python | ru | code | 0 | github-code | 13 |
1491844447 | import os
import tictactoe
from datetime import datetime
from flask import (
Flask, flash, render_template,
redirect, request, session, url_for)
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
from werkzeug.security import generate_password_hash, check_password_hash
if os.path.exists("env.py"):
import env
app = Flask(__name__)
# Configuring Database
app.config["MONGO_DBNAME"] = os.environ.get("MONGO_DBNAME")
app.config["MONGO_URI"] = os.environ.get("MONGO_URI")
app.secret_key = os.environ.get("SECRET_KEY")
mongo = PyMongo(app)
# For non existant pages
@app.errorhandler(404)
def not_found(e):
flash("""If you are looking for an extension to this website the page you
were looking for doesn't exist. Have a look in our menu and register, if
you haven't.""")
return redirect(url_for("discussion"))
# For server errors
@app.errorhandler(500)
def not_signedin(e):
flash("""Oops! Sorry about this. An internal server error occured. Please
sign in or, if you are already, try the action again.""")
return redirect(url_for("discussion"))
# Landing Page with Top 10 and Comments
@app.route("/")
@app.route("/discussion", methods=["GET", "POST"])
def discussion():
# If posting to DB insert the comment posted
# with timestamp and other details
if request.method == "POST":
new_comment = {
"name": session["user"],
"message": request.form.get("message"),
"date": datetime.now().strftime("%d-%m-%Y"),
"timestamp": datetime.now()
}
mongo.db.comments.insert_one(new_comment)
# Get contenders and comments from DB for display on discussion page
contenders = mongo.db.users.find().sort('score', -1)[:10]
comments = mongo.db.comments.find().sort('timestamp', -1)
return render_template("discussion.html",
contenders=contenders,
comments=comments)
@app.route("/leaderboard")
def leaderboard():
# Get username to highlight for the user so they
# can see their position easily on full leaderboard
if session["user"]:
username = session["user"]
contenders = mongo.db.users.find().sort('score', -1)
return render_template("leaderboard.html",
contenders=contenders,
username=username)
else:
return redirect(url_for("sign_in"))
# Variables for game
player_turn = "player1"
opponent = "player2"
player1coordinates = []
player2coordinates = []
playerCoordinates = {}
partial_runsP1 = []
partial_runsP2 = []
spent_runs = []
dimensions = 4
width = 4
result = "Set Board"
@app.route("/play", methods=["GET", "POST"])
def play():
# Bring variables in for use
global player_turn, dimensions, width, opponent
global player1coordinates, player2coordinates, playerCoordinates
global partial_runsP1, partial_runsP2, spent_runs
global result
# Create a set of lists for each user if there are
# mutliple users playing at once. A dictionary holds the lists
# with each corresponding to the user's username.
username = session["user"]
state = username + "State"
opposition = username + "Opp"
partialp1 = username + "Partialp1"
partialp2 = username + "Partialp2"
depth = username + "Depth"
dimensional = username + "Dimensions"
spent = username + "Spent"
new_coordinate = ""
comp_coordinate = ""
# Asign each list to its corresponding user-list in the dictionary
if state in playerCoordinates:
result = playerCoordinates[state]
if username in playerCoordinates:
player1coordinates = playerCoordinates[username]
partial_runsP1 = playerCoordinates[partialp1]
spent_runs = playerCoordinates[spent]
else:
player1coordinates = []
partial_runsP1 = []
spent_runs = []
if opposition in playerCoordinates:
player2coordinates = playerCoordinates[opposition]
partial_runsP2 = playerCoordinates[partialp2]
else:
player2coordinates = []
partial_runsP2 = []
# If there is a post then begin the game checking
if request.method == "POST":
if result != "":
# Get game settings
width = int(request.form.get('width'))
dimensions = int(request.form.get('dimensions'))
opponent = request.form.get('opponent')
playerCoordinates[state] = ""
playerCoordinates[depth] = width
playerCoordinates[dimensional] = dimensions
result = playerCoordinates[state]
elif player_turn == "player1":
# Get the coordinate that is inputted by player 1 once boad is set
new_coordinate = list(map(int, request.form.get(
'coordinate').split(',')))
# If the gameresult function returns a win scenraio then do things
if tictactoe.GameResult(
player1coordinates,
new_coordinate,
player_turn,
dimensions,
width,
partial_runsP1) == player_turn:
# Set result message for page and clear variables and lists
result = f"""{session["user"].upper()} wins!!
{width**dimensions}pts"""
playerCoordinates[username] = []
playerCoordinates[opposition] = []
playerCoordinates[partialp1] = []
playerCoordinates[partialp2] = []
playerCoordinates[spent] = []
playerCoordinates[state] = "Set Board"
# Get the players score and add on to it the win score
player_file = mongo.db.users.find_one(
{"username": session["user"]})
player_score = player_file['score']
new_score = ((width**dimensions) + player_score)
player_update = {
"score": new_score
}
mongo.db.users.update_one(player_file, {"$set": player_update})
# When the computer is set as the opponent
elif opponent == "computer":
player1coordinates.append(new_coordinate)
if username in playerCoordinates:
playerCoordinates[username].append(new_coordinate)
else:
playerCoordinates[username] = []
playerCoordinates[partialp1] = []
playerCoordinates[spent] = []
playerCoordinates[username].append(new_coordinate)
# Get the computers move from its function
comp_coordinate = tictactoe.CompPlay(partial_runsP1,
spent_runs,
player1coordinates,
player2coordinates,
width,
dimensions)
# If there is a win scenario for the computer perform reset
if tictactoe.GameResult(
player2coordinates,
comp_coordinate,
"Computer",
dimensions,
width,
partial_runsP2) == "Computer":
result = "c0mPuTer WiN!"
# Clear the variables and lists
playerCoordinates[username] = []
playerCoordinates[opposition] = []
playerCoordinates[partialp1] = []
playerCoordinates[partialp2] = []
playerCoordinates[spent] = []
playerCoordinates[state] = "Set Board"
player_turn = "player1"
else:
# else just add the coordinate to the list
player2coordinates.append(comp_coordinate)
if opposition in playerCoordinates:
playerCoordinates[opposition].append(comp_coordinate)
else:
playerCoordinates[opposition] = []
playerCoordinates[partialp2] = []
playerCoordinates[spent] = []
playerCoordinates[opposition].append(comp_coordinate)
else:
# If the opponent is set to local just add the coord
# and change turn to player 2.
player1coordinates.append(new_coordinate)
if username in playerCoordinates:
playerCoordinates[username].append(new_coordinate)
else:
playerCoordinates[username] = []
playerCoordinates[partialp1] = []
playerCoordinates[spent] = []
playerCoordinates[username].append(new_coordinate)
player_turn = "player2"
elif player_turn == "player2":
# Get the coordinate that is inputted by player 2
new_coordinate = list(map(int, request.form.get(
'coordinate').split(',')))
if tictactoe.GameResult(
player2coordinates,
new_coordinate,
player_turn,
dimensions,
width,
partial_runsP2) == player_turn:
result = "The Guest wins!!"
# Clear the variables and lists
playerCoordinates[username] = []
playerCoordinates[opposition] = []
playerCoordinates[partialp1] = []
playerCoordinates[partialp2] = []
playerCoordinates[spent] = []
playerCoordinates[state] = "Set Board"
player_turn = "player1"
else:
# Else add the coordinates to the list and
# change back to player one
player2coordinates.append(new_coordinate)
if opposition in playerCoordinates:
playerCoordinates[opposition].append(new_coordinate)
else:
playerCoordinates[opposition] = []
playerCoordinates[partialp2] = []
playerCoordinates[spent] = []
playerCoordinates[opposition].append(new_coordinate)
player_turn = "player1"
# Asign the width and dimensions the user values from the dictionary
if depth in playerCoordinates:
width = playerCoordinates[depth]
dimensions = playerCoordinates[dimensional]
else:
result = "Set Board"
# If there is a user logged-in move the variables
# to the front end and render the page, otherwise redirect to log-in.
if session["user"]:
return render_template("play.html",
username=username,
result=result,
width=width,
dimensions=dimensions,
player1coordinates=player1coordinates,
player2coordinates=player2coordinates,
player_turn=player_turn)
else:
return redirect(url_for("sign_in"))
# If the user resets the board then the lists
# are cleared and they are redirected back to the play page
@app.route("/reset_board")
def reset_board():
global player_turn, playerCoordinates
player_turn = "player1"
username = session['user']
opposition = username + "Opp"
partialp1 = username + "Partialp1"
partialp2 = username + "Partialp2"
spent = username + "Spent"
playerCoordinates[username] = []
playerCoordinates[opposition] = []
playerCoordinates[partialp1] = []
playerCoordinates[partialp2] = []
playerCoordinates[spent] = []
return redirect(url_for("play"))
# If the user sets new board then the lists
# are cleared and the variables too
# and they are redirected back to the play page to set new board.
@app.route("/set_new_board")
def set_new_board():
global player_turn, playerCoordinates
player_turn = "player1"
username = session['user']
state = username + "State"
opposition = username + "Opp"
partialp1 = username + "Partialp1"
partialp2 = username + "Partialp2"
spent = username + "Spent"
playerCoordinates[username] = []
playerCoordinates[opposition] = []
playerCoordinates[partialp1] = []
playerCoordinates[partialp2] = []
playerCoordinates[spent] = []
playerCoordinates[state] = "Set Board"
return redirect(url_for("play", result=result))
# Register page
@app.route("/register", methods=["GET", "POST"])
def register():
if request.method == "POST":
# check if username already exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
# If the name already exists redirect them back to try again
if existing_user:
flash("Username already exists")
return redirect(url_for("register"))
# Take the name and password they inputted and
# create an object for the DB
register = {
"username": request.form.get("username").lower(),
"password": generate_password_hash(request.form.get("password")),
"score": 0
}
mongo.db.users.insert_one(register)
# put the new user into 'session' cookie
session["user"] = request.form.get("username").lower()
flash("Registration Successful!")
return redirect(url_for(
"play", username=session["user"]))
return render_template("register.html")
@app.route("/sign_in", methods=["GET", "POST"])
def sign_in():
if request.method == "POST":
# check if username exists in db
existing_user = mongo.db.users.find_one(
{"username": request.form.get("username").lower()})
if existing_user:
# ensure hashed password matches user input
if check_password_hash(
existing_user["password"], request.form.get("password")):
session["user"] = request.form.get("username").lower()
flash("Welcome, {}".format(
request.form.get("username")))
return redirect(url_for(
"play", username=session["user"]))
else:
# invalid password match
flash("Incorrect Username and/or Password")
return redirect(url_for("sign_in"))
else:
# username doesn't exist
flash("Incorrect Username and/or Password")
return redirect(url_for("sign_in"))
return render_template("signin.html")
@app.route("/sign_out")
def sign_out():
# remove user from session cookie
flash("You have signed out")
session.pop("user")
return redirect(url_for("sign_in"))
# Edit a comment from the modal
@app.route("/edit_comment/<comment_id>/<comment_date>",
methods=["GET", "POST"])
def edit_comment(comment_id, comment_date):
if request.method == "POST":
# Take the editted version and put in object for DB
edited = {
"name": session["user"],
"message": request.form.get("edited_message"),
"date": comment_date,
"timestamp": datetime.now()
}
mongo.db.comments.update({"_id": ObjectId(comment_id)}, edited)
flash("Comment Successfully Edited")
return redirect(url_for("discussion"))
# Delete comment by comment id
@app.route("/delete_comment/<comment_id>")
def delete_comment(comment_id):
mongo.db.comments.remove({"_id": ObjectId(comment_id)})
flash("Comment Successfully Deleted")
return redirect(url_for("discussion"))
if __name__ == "__main__":
app.run(host=os.environ.get("IP"),
port=int(os.environ.get("PORT")),
debug=False)
| JonathanDelaney/TryTrickThatThough | app.py | app.py | py | 16,230 | python | en | code | 0 | github-code | 13 |
6636332254 | import base64
import logging
import os
from aiohttp import web
from marshmallow.exceptions import ValidationError
from ..helpers.api_schema import (
normalize_message,
UploadNew,
UploadStatus,
VersionMinimized,
)
from ..helpers.enums import Status
from ..helpers.web_routes import (
in_header_authorization,
in_path_file_uuid,
in_path_upload_token,
JSONException,
)
from ..new_upload.exceptions import ValidationException
from ..new_upload.session import (
add_file,
create_token,
get_session,
get_session_by_token,
publish_session,
update_session,
validate_session,
)
log = logging.getLogger(__name__)
routes = web.RouteTableDef()
@routes.post("/new-package/tusd-internal")
async def tusd_handler(request):
if request.remote != "127.0.0.1":
return web.HTTPNotFound()
payload = await request.json()
headers = payload["HTTPRequest"]["Header"]
# 'headers' is a dict of lists, where aiohttp only shows the last value.
# So flatten the headers, and only pick the last header per key given.
# Example: { "Upload-Length": [ 12 ] } becomes { "Upload-Length": 12 }.
headers = {k: v[-1] for k, v in headers.items()}
hook_name = request.headers.get("Hook-Name")
if hook_name == "pre-create":
if "Upload-Metadata" not in headers:
return web.json_response({"message": "no filename given in metadata"}, status=400)
# MetaData is stored overly complex: in a single header, comma
# separated per key-value pair, which is stored space separated. On
# top of that, the value is base64 encoded. In other words:
# "key base64-value,key base64-value,.."
try:
metadata = dict([e.split(" ") for e in headers.get("Upload-Metadata", "").split(",")])
for key, value in metadata.items():
metadata[key] = base64.b64decode(value).decode()
except Exception:
raise JSONException({"message": "Upload-Metadata header is invalid"})
if not metadata.get("filename"):
return web.json_response({"message": "no filename given in metadata"}, status=400)
if not metadata.get("upload-token"):
return web.json_response({"message": "no upload-token given in metadata"}, status=400)
upload_token = in_path_upload_token(metadata.get("upload-token"))
session = get_session_by_token(upload_token)
if session is None:
return web.HTTPNotFound()
return web.HTTPOk()
if hook_name in ("post-create", "post-finish"):
payload = await request.json()
upload_token = in_path_upload_token(payload["Upload"]["MetaData"]["upload-token"])
session = get_session_by_token(upload_token)
if session is None:
return web.HTTPNotFound()
if hook_name == "post-create":
announcing = True
else:
announcing = False
add_file(
session,
payload["Upload"]["ID"],
payload["Upload"]["MetaData"]["filename"],
payload["Upload"]["Size"],
payload["Upload"]["Storage"]["Path"],
announcing=announcing,
)
return web.HTTPOk()
log.warning("Unexpected hook-name: %s", hook_name)
return web.HTTPNotFound()
@routes.post("/new-package")
async def new_start(request):
user = in_header_authorization(request.headers)
token = create_token(user)
payload = UploadNew().dump({"upload_token": str(token)})
return web.json_response(payload)
@routes.get("/new-package/{upload_token}")
async def new_status(request):
upload_token = in_path_upload_token(request.match_info["upload_token"])
user = in_header_authorization(request.headers)
session = get_session(user, upload_token)
if session is None:
return web.HTTPNotFound()
validate_session(session)
upload_status = UploadStatus().dump(session)
return web.json_response(upload_status)
@routes.put("/new-package/{upload_token}")
async def new_update(request):
upload_token = in_path_upload_token(request.match_info["upload_token"])
user = in_header_authorization(request.headers)
session = get_session(user, upload_token)
if session is None:
return web.HTTPNotFound()
try:
data = VersionMinimized(dump_only=VersionMinimized.read_only_for_new).load(await request.json())
except ValidationError as e:
return web.json_response(
{"message": "request body failed validation", "errors": normalize_message(e)}, status=400
)
try:
update_session(session, data)
except ValidationException as e:
return web.json_response({"message": "request body failed validation", "errors": e.args[0]}, status=400)
return web.HTTPNoContent()
@routes.delete("/new-package/{upload_token}/{file_uuid}")
async def new_delete_file(request):
upload_token = in_path_upload_token(request.match_info["upload_token"])
file_uuid = in_path_file_uuid(request.match_info["file_uuid"])
user = in_header_authorization(request.headers)
session = get_session(user, upload_token)
if session is None:
return web.HTTPNotFound()
for file_info in list(session["files"]):
if file_info["uuid"] == file_uuid:
session["files"].remove(file_info)
internal_filename = file_info["internal_filename"]
os.remove(internal_filename)
if not internal_filename.startswith("data/tar/"):
os.remove(f"{internal_filename}.info")
break
else:
return web.HTTPNotFound()
return web.HTTPNoContent()
@routes.post("/new-package/{upload_token}/publish")
async def new_publish(request):
upload_token = in_path_upload_token(request.match_info["upload_token"])
user = in_header_authorization(request.headers)
session = get_session(user, upload_token)
if session is None:
return web.HTTPNotFound()
validate_session(session)
if session["status"] == Status.ERRORS:
errors = session["errors"]
return web.json_response({"message": "package has validation errors", "errors": errors}, status=400)
publish_session(session)
upload_status = UploadStatus().dump(session)
return web.json_response(upload_status, status=201)
| OpenTTD/bananas-api | bananas_api/web_routes/new.py | new.py | py | 6,372 | python | en | code | 1 | github-code | 13 |
30589894742 | #!/usr/bin/env python
import cv2
import math
import numpy as np
from matplotlib import pyplot as plt
def getRGBS(img, PLOT = False):
image = cv2.cvtColor(img,cv2.COLOR_BGR2RGB)
# grab the image channels, initialize the tuple of colors,
# the figure and the flattened feature vector
features = []
featuresSobel = []
Grayscale = cv2.cvtColor(img, cv2.cv.CV_BGR2GRAY)
histG = cv2.calcHist([Grayscale], [0], None, [16], [0, 256])
histG = histG / histG.sum()
features.extend(histG[:,0].tolist())
grad_x = np.abs(cv2.Sobel(Grayscale, cv2.CV_16S, 1, 0, ksize = 3, scale = 1, delta = 0, borderType = cv2.BORDER_DEFAULT))
grad_y = np.abs(cv2.Sobel(Grayscale, cv2.CV_16S, 0, 1, ksize = 3, scale = 1, delta = 0, borderType = cv2.BORDER_DEFAULT))
abs_grad_x = cv2.convertScaleAbs(grad_x)
abs_grad_y = cv2.convertScaleAbs(grad_y)
dst = cv2.addWeighted(abs_grad_x,0.5,abs_grad_y,0.5,0)
histSobel = cv2.calcHist([dst], [0], None, [16], [0, 256])
histSobel = histSobel / histSobel.sum()
features.extend(histSobel[:,0].tolist())
Fnames = []
Fnames.extend(["Color-Gray"+str(i) for i in range(8)])
Fnames.extend(["Color-GraySobel"+str(i) for i in range(8)])
return features, Fnames
| tyiannak/recognizeFitExercise | featuresColor.py | featuresColor.py | py | 1,193 | python | en | code | 9 | github-code | 13 |
72026462738 | import abc
import collections
import inspect
import json
import os
import re
from absl import logging
import gin
import numpy as np
from t5.data import sentencepiece_vocabulary
import tensorflow.compat.v1 as tf
import tensorflow_datasets as tfds
from mesh_tensorflow.transformer.utils import _filter_features
_DEFAULT_FEATURE_KEYS = ["inputs", "targets"]
_VALID_TASK_NAME_REGEX = re.compile(r"^[\w\d\._]+$")
_INFO_FILENAME = "info.{split}.json"
_STATS_FILENAME = "stats.{split}.json"
_TFRECORD_PREFIX = "{split}.tfrecord"
_MAX_EXAMPLES_TO_MEM_CACHE = 10000
SHUFFLE_BUFFER_SIZE = 1000
_TFDS_DATA_DIR_OVERRIDE = None
_GLOBAL_CACHE_DIRECTORIES = []
DEFAULT_SPM_PATH = "gs://t5-data/vocabs/cc_all.32000/sentencepiece.model" # GCS
DEFAULT_EXTRA_IDS = 100
def get_default_vocabulary():
return sentencepiece_vocabulary.SentencePieceVocabulary(
DEFAULT_SPM_PATH, DEFAULT_EXTRA_IDS)
def set_tfds_data_dir_override(tfds_data_dir):
global _TFDS_DATA_DIR_OVERRIDE
_TFDS_DATA_DIR_OVERRIDE = tfds_data_dir
def set_global_cache_dirs(global_cache_dirs):
global _GLOBAL_CACHE_DIRECTORIES
_GLOBAL_CACHE_DIRECTORIES = global_cache_dirs
def add_global_cache_dirs(global_cache_dirs):
global _GLOBAL_CACHE_DIRECTORIES
_GLOBAL_CACHE_DIRECTORIES += global_cache_dirs
class DatasetProviderBase(object):
"""Abstract base for classes that provide a tf.data.Dataset."""
__metaclass__ = abc.ABCMeta
@abc.abstractproperty
def output_features(self):
raise NotImplementedError
@abc.abstractmethod
def get_vocabulary(self):
raise NotImplementedError
@abc.abstractmethod
def get_dataset(
self, sequence_length, split, use_cached=False, shuffle=True):
raise NotImplementedError
@abc.abstractmethod
def num_input_examples(self, split):
raise NotImplementedError
class DatasetProviderRegistry(object):
"""Base for registry of data providers.
Child classes must implement a _REGISTRY dict.
"""
_PROVIDER_TYPE = DatasetProviderBase
@classmethod
def add(cls, name, provider_cls, *provider_args, **provider_kwargs):
"""Adds provider to the registry."""
if name in cls._REGISTRY:
raise ValueError("Attempting to register duplicate provider: %s" % name)
provider = provider_cls(*provider_args, **provider_kwargs)
if not isinstance(provider, cls._PROVIDER_TYPE):
raise ValueError(
"Attempting to register a class not of an invalid type. "
"Expecting instance of %s, got %s" %
(cls._PROVIDER_TYPE, provider_cls))
cls._REGISTRY[name] = provider
@classmethod
def remove(cls, name):
"""Remove provider from the registry, if it exists."""
if name in cls._REGISTRY:
del cls._REGISTRY[name]
@classmethod
def get(cls, name):
"""Returns provider from the registry."""
if name not in cls._REGISTRY:
raise ValueError("Provider name not registered: %s" % name)
return cls._REGISTRY[name]
@classmethod
def names(cls):
"""Returns all provider names in registry."""
return cls._REGISTRY.keys()
@classmethod
def get_dataset(
cls, name, sequence_length, split, use_cached=False, shuffle=True):
return cls.get(name).get_dataset(
sequence_length=sequence_length, split=split, use_cached=use_cached,
shuffle=shuffle)
class LazyTfdsLoader(object):
"""Wrapper for TFDS datasets with memoization and additional functionality.
Lazily loads info from TFDS and provides memoization to avoid expensive hidden
file operations. Also provides additional utility methods.
"""
_MEMOIZED_BUILDERS = {}
def __init__(self, name, data_dir=None, split_map=None):
"""LazyTfdsLoader constructor.
Args:
name: str, the name of the TFDS dataset.
data_dir: str (optional), directory to read/write TFDS data.
split_map: dict (optional), mapping from canonical splits
(e.g., 'validation') to TFDS splits or slices
(e.g., 'train[':1%']).
"""
self._name = name
self._data_dir = data_dir
self._split_map = split_map
@property
def name(self):
return self._name
@property
def data_dir(self):
if _TFDS_DATA_DIR_OVERRIDE:
if self._data_dir:
logging.warning(
"Overriding TFDS data directory '%s' with '%s' for dataset '%s'.",
self._data_dir, _TFDS_DATA_DIR_OVERRIDE, self.name)
return _TFDS_DATA_DIR_OVERRIDE
return self._data_dir
@property
def builder(self):
builder_key = (self.name, self.data_dir)
if builder_key not in LazyTfdsLoader._MEMOIZED_BUILDERS:
LazyTfdsLoader._MEMOIZED_BUILDERS[builder_key] = tfds.builder(
self.name, data_dir=self.data_dir)
return LazyTfdsLoader._MEMOIZED_BUILDERS[builder_key]
@property
def info(self):
return self.builder.info
def _map_split(self, split):
return self._split_map[split] if self._split_map else split
def files(self, split):
"""Returns set of instructions for reading TFDS files for the dataset."""
split = self._map_split(split)
if "/" not in self.name and self.builder.BUILDER_CONFIGS:
# If builder has multiple configs, and no particular config was
# requested, raise an error.
raise ValueError("Dataset '%s' has multiple configs." % self.name)
split_info = self.builder.info.splits[split]
files = split_info.file_instructions
if not files:
logging.fatal("No TFRecord files found for dataset: %s", self.name)
return files
def load(self, split, shuffle_files):
"""Returns a tf.data.Dataset for the given split."""
split = self._map_split(split)
return tfds.load(
self._name,
split=split,
data_dir=self.data_dir,
shuffle_files=shuffle_files,
download=True,
try_gcs=True)
def load_shard(self, file_instruction):
"""Returns a dataset for a single shard of the TFDS TFRecord files."""
ds = self.builder._tfrecords_reader.read_files( # pylint:disable=protected-access
[file_instruction],
read_config=tfds.ReadConfig(),
shuffle_files=False)
return ds
def size(self, split):
"""Returns the number of examples in the split."""
split = self._map_split(split)
ds_splits = self.info.splits
dataset_size = ds_splits[split].num_examples
# Very large datasets have num_examples = 0; default instead to np.inf
dataset_size = dataset_size if dataset_size > 0 else np.inf
return dataset_size
def encode_string_features(
dataset, output_features, keys, copy_plaintext=False):
"""Encode specified string features.
Passes through non-string features unchanged. Optionally passes through copy
of original string features with "_plaintext" suffix added to the key.
Args:
dataset: a tf.data.Dataset
output_features: a dict of Feature objects; their vocabulary attribute will
be used to tokenize the specified features.
keys: list of strings, keys of features to encode.
copy_plaintext: bool, whether to pass through copies of plaintext strings
with a "_plaintext" suffix added to the key.
Returns:
a tf.data.Dataset
"""
keys = set(keys)
def my_fn(features):
"""Encode all specified feature that are strings and return a dictionary.
Args:
features: a dictionary
Returns:
a dictionary
"""
ret = {}
for k, v in features.items():
if k in keys and v.dtype == tf.string:
if copy_plaintext:
ret["%s_plaintext" % k] = v
v = tf.cast(output_features[k].vocabulary.encode_tf(v), tf.int64)
ret[k] = v
return ret
return dataset.map(my_fn, num_parallel_calls=tf.data.experimental.AUTOTUNE)
def dict_to_tfexample(ex):
"""Convert example dictionary to tf.train.Example proto."""
feature_dict = {}
for k, v in ex.items():
t = tf.constant(v)
if len(t.shape) == 0: # pylint:disable=g-explicit-length-test
v = [v]
elif len(t.shape) == 1:
v = list(v)
else:
raise ValueError(
"Unsupported shape (%s) for '%s' value: %s" %
(tf.shape, k, v))
if t.dtype == tf.string and len(t.shape) <= 1:
feature_dict[k] = tf.train.Feature(
bytes_list=tf.train.BytesList(
value=[tf.compat.as_bytes(t) for t in v]))
elif t.dtype in (tf.int32, tf.int64) and len(t.shape) <= 1:
feature_dict[k] = tf.train.Feature(
int64_list=tf.train.Int64List(value=v))
else:
raise ValueError(
"Unsupported type (%s) and shape (%s) for '%s' value: %s" %
(tf.dtype, tf.shape, k, v))
return tf.train.Example(features=tf.train.Features(feature=feature_dict))
# ================================ Tasks =======================================
def get_info_path(data_dir, split):
return os.path.join(data_dir, _INFO_FILENAME.format(split=split))
def get_tfrecord_prefix(data_dir, split):
return os.path.join(data_dir, _TFRECORD_PREFIX.format(split=split))
def get_stats_path(data_dir, split):
return os.path.join(data_dir, _STATS_FILENAME.format(split=split))
class Feature(object):
"""A container for attributes of output features of data providers."""
def __init__(self, vocabulary, add_eos=True, required=True):
"""Create a Feature instance.
Args:
vocabulary: vocabularies.Vocabulary object to use for tokenization,
or a callable function returning a vocabulary
add_eos: bool, whether an EOS token should be added to this Feature.
required: Whether or not this feature must exist in the final outputs
of the Task.
"""
self._vocabulary = vocabulary
self.add_eos = add_eos
self.required = required
@property
def vocabulary(self):
if callable(self._vocabulary):
self._vocabulary = self._vocabulary()
return self._vocabulary
def print_dataset(dataset):
"""tf.Print dataset fields for debugging purposes."""
def my_fn(x):
return {k: tf.Print(v, [v], k + ": ") for k, v in x.items()}
return dataset.map(my_fn)
@gin.configurable
def maybe_print_dataset(dataset, should_print=False):
"""tf.Print dataset for debugging purposes."""
return print_dataset(dataset) if should_print else dataset
class Task(DatasetProviderBase):
"""A wrapper for a `tf.data.Dataset` along with preprocessing information.
Tasks handle preprocessing (via arbitrary TF function) and tokenization
(via SentencePiece). Non-train splits also pass through the original
plaintext strings with a "_plaintext" suffix added to the key.
"""
def __init__(self,
name,
dataset_fn,
splits,
text_preprocessor,
metric_fns=None,
postprocess_fn=None,
token_preprocessor=None,
output_features=None,
num_input_examples=None,
supports_caching=True,
sentencepiece_model_path=None,
shuffle_buffer_size=SHUFFLE_BUFFER_SIZE):
"""Task constructor.
Attributes of output features, including the vocabulary used for
tokenization, should be provided via the `output_features` argument.
Args:
name: string, a unique name for the Task. A ValueError will be raised if
another task with this name is already registered.
dataset_fn: callable, a function with the signature
`dataset_fn(split, shuffle_files)' that returns a `tf.data.Dataset`.
splits: list(string), a list of allowable splits to request from the
`dataset_fn`.
text_preprocessor: a function (or list of functions) that (each) takes in
a tf.data.Dataset of string features and returns a tf.data.Dataset of
string features. Can be set to None as a no-op. If a list is given,
they will be executed sequentially.
metric_fns: list(callable), list of metric functions with the signature
`metric_fn(targets, predictions)` to use during evaluation. By default
(None), an empty list will be used, resulting in no evaluation on this
task.
postprocess_fn: function (or list of functions) that (each) takes in
decoded model outputs (strings) and returns a string which is ready
for evaluation using the metric functions in `metric_fns`. Can be
set to None as a no-op. If a list is given, functions will be executed
sequentially.
token_preprocessor: an optional function (or list of functions) that
(each) takes in a tf.data.Dataset of token features and returns a
tf.data.Dataset of token features.
Can be set to None as a no-op. If a list is given, they will be
executed sequentially.
The functions are also passed `sequence_length` and `vocabulary`
keyword arguments.
output_features: dict(str, Feature), list(str), Feature, or None. Output
features of the Task. If list(str) is provided, a default `Feature` will
be constructed for each provided feature name. If a `Feature` is
provided, it will be used for the default feature names ('inputs' and
'targets'). When None (default), a default `Feature` will be constructed
for the default feature names.
num_input_examples: dict(string: int) or None, a dictionary mapping split
to its size in number of input examples (before preprocessing). The
`num_input_examples` method will return None if not provided.
supports_caching: bool, whether or not this task supports offline caching.
sentencepiece_model_path: DEPRECATED use `output_features` to specify a
non-default vocabulary.
shuffle_buffer_size: an optional integer
"""
if not _VALID_TASK_NAME_REGEX.match(name):
raise ValueError(
"Task name '%s' contains invalid characters. Must match regex: %s" % (
name, _VALID_TASK_NAME_REGEX.pattern))
_validate_args(dataset_fn, ["split", "shuffle_files"])
metric_fns = metric_fns or []
for metric_fn in metric_fns:
_validate_args(metric_fn, ["targets", "predictions"])
self._name = name
self._dataset_fn = dataset_fn
self._text_preprocessor = (
[] if text_preprocessor is None else text_preprocessor)
self._token_preprocessor = (
[] if token_preprocessor is None else token_preprocessor)
self._metric_fns = metric_fns
# Use a pass-through if postprocess_fn is not provided
self._postprocess_fn = (
[(lambda x, **unused_kwargs: x)]
if postprocess_fn is None else postprocess_fn)
self._cache_dir = None
self._stats = {}
self._shuffle_buffer_size = shuffle_buffer_size
if sentencepiece_model_path == DEFAULT_SPM_PATH:
logging.warn(
"`sentencepiece_model_path` is deprecated and is ignored. Please "
"update your code as this will cause a failure in future versions.")
elif sentencepiece_model_path:
raise ValueError(
"`sentencepiece_model_path` is deprecated. Please use "
"`output_features` to specify a non-default vocabulary.")
if hasattr(output_features, "__len__") and not output_features:
raise ValueError("output_features must be non-empty.")
if output_features is None:
output_features = Feature(get_default_vocabulary())
if isinstance(output_features, dict):
pass
elif isinstance(output_features, Feature):
output_features = {k: output_features for k in _DEFAULT_FEATURE_KEYS}
elif isinstance(output_features, list) and all(
isinstance(f, str) for f in output_features):
output_features = {
k: Feature(get_default_vocabulary()) for k in output_features
}
else:
raise ValueError(
"output_features must be a dict, Feature, list of str, or None")
self._output_features = collections.OrderedDict(
sorted(list(output_features.items()))
)
self._splits = splits
self._num_input_examples = num_input_examples
self._supports_caching = supports_caching
@property
def name(self):
return self._name
@property
def metric_fns(self):
return self._metric_fns
@property
def output_features(self):
return self._output_features
@property
def token_preprocessor(self):
return self._token_preprocessor
@property
def splits(self):
return self._splits
def num_input_examples(self, split):
if self._num_input_examples is None:
return None
return self._num_input_examples[split]
def _preprocess_dataset(self, dataset, preprocessors, **preprocess_kwargs):
if not hasattr(preprocessors, "__iter__"):
preprocessors = [preprocessors]
for prep_fn in preprocessors:
dataset = prep_fn(dataset, **preprocess_kwargs)
return dataset
def _validate_dataset(
self,
dataset,
expected_output_type,
expected_output_rank,
error_label,
ensure_no_eos=False):
"""Validates properties of a tf.data.Dataset, raising Exceptions if needed.
Args:
dataset: a tf.data.Dataset to validate.
expected_output_type: a tf.dtype, the expected type of the model features.
expected_output_rank: an int, the expected rank of the model features.
error_label: a string, an identifier for the previous processing step to
report in raised ValueErrors.
ensure_no_eos: a bool, whether or not to verify that the model features
contain no EOS tokens.
Returns:
a validated tf.data.Dataset.
"""
types = tf.data.get_output_types(dataset)
shapes = tf.data.get_output_shapes(dataset)
for feat in self.output_features:
if feat not in types:
if self.output_features[feat].required:
raise ValueError(
"Task dataset is missing expected output feature after {label}: "
"{feat}".format(label=error_label, feat=feat))
else:
# It's ok that this feature does not exist.
continue
if expected_output_type != types[feat]:
raise ValueError(
"Task dataset has incorrect type for feature '{feat}' after "
"{label}: Got {actual}, expected {expected}".format(
feat=feat, label=error_label, actual=types[feat].name,
expected=expected_output_type.name))
if expected_output_rank != len(shapes[feat]):
raise ValueError(
"Task dataset has incorrect rank for feature '{feat}' after "
"{label}: Got {actual}, expected {expected}".format(
feat=feat, label=error_label, actual=len(shapes[feat]),
expected=expected_output_rank))
def _ensure_no_eos(feat, v):
if feat not in self.output_features:
return v
with tf.control_dependencies([
tf.assert_none_equal(
v, tf.constant(1, tf.int64),
message="Feature '{feat}' unexpectedly contains EOS=1 token "
"after {label}.".format(feat=feat, label=error_label))
]):
return v
if ensure_no_eos:
dataset = dataset.map(
lambda ex: {k: _ensure_no_eos(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
return dataset
def preprocess_text(self, dataset):
"""Preprocessed text dataset."""
dataset = self._preprocess_dataset(dataset, self._text_preprocessor)
dataset = self._validate_dataset(
dataset, expected_output_type=tf.string, expected_output_rank=0,
error_label="text preprocessing")
return dataset
def preprocess_tokens(self, dataset, sequence_length):
"""Preprocesses tokenized dataset.
Args:
dataset: a tf.data.Dataset
sequence_length: dict mapping feature key to int length for that feature
Returns:
a tf.data.Dataset
"""
dataset = self._preprocess_dataset(
dataset, self._token_preprocessor,
sequence_length=sequence_length,
output_features=self.output_features)
dataset = self._validate_dataset(
dataset,
expected_output_type=tf.int64,
expected_output_rank=1,
error_label="token preprocessing",
ensure_no_eos=False) # TODO: debug
# Trim and append EOS=1 token to model features.
def _trim_and_append_eos(feat, v):
if feat not in self.output_features:
return v
if self.output_features[feat].add_eos:
return tf.concat([v[:sequence_length[feat]-1], [1]], axis=0)
else:
return v[:sequence_length[feat]]
return dataset.map(
lambda ex: {k: _trim_and_append_eos(k, v) for k, v in ex.items()},
num_parallel_calls=tf.data.experimental.AUTOTUNE)
@property
def cache_dir(self):
"""Returns the cache directory (or None), initializing if needed."""
if not self._cache_dir:
# See if cached data exists in any of the cache directories.
potential_cache_dirs = [
os.path.join(d, self.name) for d in _GLOBAL_CACHE_DIRECTORIES]
for cache_dir in potential_cache_dirs:
if tf.io.gfile.exists(os.path.join(cache_dir, "COMPLETED")):
self._cache_dir = cache_dir
logging.info("'%s' is cached at %s.", self.name, self.cache_dir)
break
if not self._cache_dir:
logging.info(
"'%s' does not exist in any task cache directories (searched %s).",
self.name,
potential_cache_dirs,
)
return self._cache_dir
@property
def supports_caching(self):
"""Wether or not this task supports offline caching."""
return self._supports_caching
def assert_cached(self):
"""Raises an assertion error if cached dataset does not exist."""
assert self.cache_dir, (
"'%s' does not exist in any of the task cache directories" % self.name)
def get_cached_stats(self, split=tfds.Split.TRAIN):
"""Returns basic statistics for cached dataset."""
self.assert_cached()
if split not in self._stats:
stats_path = get_stats_path(self.cache_dir, split)
if not tf.io.gfile.exists(stats_path):
raise ValueError(
"Stats do not exist for '%s' split: %s" % (self.name, split))
with tf.io.gfile.GFile(stats_path) as f:
self._stats[split] = json.load(f)
return self._stats[split]
def get_vocabulary(self, feature_name=None):
"""Returns a Vocabulary object for the provided feature.
Args:
feature_name: str or None, the name of the output feature to get the
Vocabulary for. If None is provided, then this function will first check
that all features have the same Vocabulary and then return that
Vocabulary.
Returns: a Vocabulary object.
"""
if feature_name is None:
vocabulary = list(self.output_features.values())[0].vocabulary
for feature in self.output_features.values():
if feature.vocabulary != vocabulary:
raise ValueError(
"No feature_name was provided to get_vocabulary, but "
"output_features have different vocabularies."
)
else:
vocabulary = self.output_features[feature_name].vocabulary
return vocabulary
def get_dataset(
self,
sequence_length,
split=tfds.Split.TRAIN,
use_cached=False,
shuffle=True,
shuffle_buffer_size=None,
copy_plaintext=True,
):
"""Returns a tf.data.Dataset from cache or generated on the fly.
Args:
sequence_length: dict mapping feature key to int length for that feature
split: string, the split to return.
use_cached: bool, whether to use the cached dataset instead of processing
it on the fly. Defaults to False.
shuffle: bool, whether to shuffle the dataset. Only used when generating
on the fly (use_cached=False).
shuffle_buffer_size: an integer or None to use task-specific buffer size.
copy_plaintext: bool, whether to pass through copies of plaintext strings
with a "_plaintext" suffix added to the key.
Returns:
A mixed tf.data.Dataset.
"""
if use_cached and not self.supports_caching:
logging.warning(
"Task '%s' does not support caching. Switching to on-the-fly "
"preprocessing.", self.name)
use_cached = False
if use_cached:
ds = self._get_cached_dataset(split, shuffle)
else:
ds = self._dataset_fn(split=split, shuffle_files=shuffle)
ds = self.preprocess_text(ds)
ds = maybe_print_dataset(ds)
# Tokenize
ds = encode_string_features(
ds, self.output_features, keys=self.output_features,
copy_plaintext=copy_plaintext)
if (not use_cached and self.num_input_examples(split) and
self.num_input_examples(split) < _MAX_EXAMPLES_TO_MEM_CACHE):
ds = ds.cache()
# Post tokenization processing.
ds = self.preprocess_tokens(ds, sequence_length)
ds = maybe_print_dataset(ds)
if shuffle:
# Shuffle before mixing since preprocessor can output multiple
# (correlated) examples per input.
ds = ds.shuffle(shuffle_buffer_size or self._shuffle_buffer_size)
return ds
def _get_cached_dataset(self, split=tfds.Split.TRAIN, shuffle=True):
"""Returns a tf.data.Dataset read from cached files."""
self.assert_cached()
with tf.io.gfile.GFile(get_info_path(self.cache_dir, split)) as f:
split_info = json.load(f)
# Use `FixedLenSequenceFeature` for sequences with variable length.
def _feature_config(shape, dtype):
if shape and shape[0] is None:
return tf.io.FixedLenSequenceFeature(
shape[1:], dtype, allow_missing=True)
return tf.io.FixedLenFeature(shape, dtype)
feature_desc = {
feat: _feature_config(**desc)
for feat, desc in split_info["features"].items()}
ds = tf.data.Dataset.list_files(
"%s-*-of-*%d" % (
get_tfrecord_prefix(self.cache_dir, split),
split_info["num_shards"]),
shuffle=shuffle)
ds = ds.interleave(
tf.data.TFRecordDataset,
cycle_length=16, block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
ds = ds.map(lambda ex: tf.parse_single_example(ex, feature_desc),
num_parallel_calls=tf.data.experimental.AUTOTUNE)
if self.get_cached_stats(split)["examples"] <= _MAX_EXAMPLES_TO_MEM_CACHE:
ds = ds.cache()
return ds
def postprocess_fn(self, string, **postprocess_kwargs):
"""Returns the processed string after applying postprocess function(s)."""
postprocessors = self._postprocess_fn
if not hasattr(postprocessors, "__iter__"):
postprocessors = [self._postprocess_fn]
for post_fn in postprocessors:
string = post_fn(string, **postprocess_kwargs)
return string
class TfdsTask(Task):
"""A `Task` that uses TensorFlow Datasets to provide the input dataset."""
def __init__(
self,
name,
tfds_name,
text_preprocessor,
metric_fns,
tfds_data_dir=None,
splits=None,
**task_kwargs):
"""TfdsTask constructor.
Args:
name: string, a unique name for the Task. A ValueError will be raised if
another task with this name is already registered.
tfds_name: string, the name and version number of a TFDS dataset,
optionally with a config.
text_preprocessor: a function (or list of functions) that (each) takes in
a tf.data.Dataset of string features and returns a tf.data.Dataset of
string features. Can be set to None as a no-op. If a list is given,
they will be executed sequentially.
metric_fns: list(callable), list of metric functions with the signature
metric_fn(targets, predictions) to use during evaluation.
tfds_data_dir: string, an optional path to a specific TFDS data directory
to use.
splits: a list(string) of allowable splits to load, a dict mapping
allowable canonical splits (e.g., 'validation') to TFDS splits or slices
(e.g., 'train[':1%']), or None. The default, None, uses all available
splits from the TFDS dataset info.
**task_kwargs: dict, additional keyword arguments for the parent `Task`
class.
"""
if ":" not in tfds_name:
raise ValueError(
"TFDS name must contain a version number, got: %s" % tfds_name)
self._tfds_dataset = LazyTfdsLoader(
tfds_name,
data_dir=tfds_data_dir,
split_map=splits if isinstance(splits, dict) else None)
def dataset_fn(split, shuffle_files):
return self._tfds_dataset.load(split, shuffle_files)
super().__init__(
name,
dataset_fn=dataset_fn,
splits=list(splits) if splits else None,
text_preprocessor=text_preprocessor,
metric_fns=metric_fns,
**task_kwargs)
@property
def splits(self):
"""Override since we can't call `info.splits` until after init."""
return self._splits or self._tfds_dataset.info.splits
@property
def tfds_dataset(self):
return self._tfds_dataset
def num_input_examples(self, split):
return self.tfds_dataset.size(split)
class TextLineTask(Task):
"""A `Task` that reads text lines as input.
Requires a text_processor to be passed that takes a tf.data.Dataset of
strings and returns a tf.data.Dataset of feature dictionaries.
e.g. preprocessors.preprocess_tsv()
"""
def __init__(
self,
name,
split_to_filepattern,
text_preprocessor,
metric_fns,
skip_header_lines=0,
**task_kwargs):
"""TextLineTask constructor.
Args:
name: string, a unique name for the Task. A ValueError will be raised if
another task with this name is already registered.
split_to_filepattern: dict of string (split name) to string (filename or
filepattern).
text_preprocessor: a function (or list of functions) that (each) takes in
a tf.data.Dataset of string features and returns a tf.data.Dataset of
string features. Can be set to None as a no-op. If a list is given,
they will be executed sequentially.
metric_fns: list(callable), list of metric functions with the signature
metric_fn(targets, predictions) to use during evaluation.
skip_header_lines: int, number of header lines to skip in each source
file.
**task_kwargs: dict, additional keyword arguments for the parent `Task`
class.
"""
self._split_to_filepattern = split_to_filepattern
self._skip_header_lines = skip_header_lines
def dataset_fn(split, shuffle_files):
filepattern = split_to_filepattern[split]
def _read_file(fname):
return tf.data.TextLineDataset(fname).skip(skip_header_lines)
files = tf.data.Dataset.list_files(filepattern, shuffle=shuffle_files)
return files.interleave(
_read_file,
cycle_length=16, block_length=16,
num_parallel_calls=tf.data.experimental.AUTOTUNE)
super().__init__(
name,
dataset_fn=dataset_fn,
splits=list(split_to_filepattern.keys()),
text_preprocessor=text_preprocessor,
metric_fns=metric_fns,
**task_kwargs)
class TaskRegistry(DatasetProviderRegistry):
_REGISTRY = {}
_PROVIDER_TYPE = Task
@classmethod
def add(cls, name, task_cls=Task, **kwargs):
super(TaskRegistry, cls).add(name, task_cls, name, **kwargs)
# ================================ Mixtures ====================================
class Mixture(DatasetProviderBase):
"""Class for mixing multiple tasks."""
def __init__(self, tasks, default_rate=None):
"""Mixture constructor.
A mixture specifies a set of tasks with associated mixing rates.
Mixing happens on preprocessed tokenized examples.
The mixing rates represent relative numbers of examples to use from their
associated tasks. Setting the mixing rates to be equal to the numbers of
examples in the tasks will result in each task going through an epoch in
about the same amount of time - i.e. all examples are sampled equally across
all tasks.
Rates can be expressed either as absolute numbers or as functions that
receive the Task as an argument.
Args:
tasks: a list where each element is either a string (task name) or a
pair whose first element is the task name and whose second element
is either a float (rate) or a function from Task to float.
default_rate: a float or a function from Task to float. This specifies the
default rate if rates are not provided in the `tasks` argument.
"""
self._task_to_rate = {}
self._tasks = []
for t in tasks:
if isinstance(t, str):
task_name = t
rate = default_rate
if default_rate is None:
raise ValueError("need a rate for each task")
else:
task_name, rate = t
self._tasks.append(TaskRegistry.get(task_name))
self._task_to_rate[task_name] = rate
if len(set(tuple(t.output_features) for t in self._tasks)) != 1:
raise ValueError(
"All Tasks in a Mixture must have the same output features."
)
@property
def tasks(self):
return self._tasks
def get_rate(self, task):
rate = self._task_to_rate[task.name]
return float(rate(task) if callable(rate) else rate)
def num_input_examples(self, split):
return sum(t.num_input_examples(split) for t in self.tasks)
@property
def output_features(self):
# We require all tasks to have the same output_features in __init__
# so we can just get the output_features for the 0th task
return self._tasks[0].output_features
def _check_same_vocabularies(self):
"""Throw an Exception if features across tasks have different vocabs."""
for name, feature in self._tasks[0].output_features.items():
for task in self._tasks[1:]:
if task.output_features[name].vocabulary != feature.vocabulary:
raise ValueError(
"Features across tasks in a mixture must use the same vocabulary."
)
def get_vocabulary(self, feature_name=None):
"""Returns a Vocabulary object using the Tasks' model.
Args:
feature_name: str or None, the name of the output feature to get the
Vocabulary for. If None is provided, then this function will first check
that all features have the same Vocabulary and then return that
Vocabulary.
Returns: a Vocabulary object.
"""
self._check_same_vocabularies()
return self._tasks[0].get_vocabulary(feature_name=feature_name)
def get_dataset_in_order(self, sequence_length, split, use_cached=False, shuffle=True):
for task in self.tasks:
for ex in task.get_dataset(sequence_length, split=split, use_cached=use_cached, shuffle=shuffle):
yield task.name, ex
def get_dataset(
self,
sequence_length,
split=tfds.Split.TRAIN,
use_cached=False,
shuffle=True,
compute_stats_empirically=False,
use_filter=True
):
"""Returns the dataset of mixed tasks using the object-specified rates.
Args:
sequence_length: dict mapping feature key to int length for that feature
split: string, the split to return for all tasks.
use_cached: bool, whether to use the cached dataset instead of processing
it on the fly. Defaults to False.
shuffle: bool, whether to shuffle the dataset. Only used when generating
on the fly (use_cached=False).
compute_stats_empirically: a boolean - does not work on TPU
use_filter: use filter to remove unnecessary keys
"""
self._check_same_vocabularies()
tasks = []
for task in self.tasks:
if split not in task.splits:
logging.info(
"Task %s has no '%s' split, skipping.", task.name, split
)
continue
tasks.append(task)
if not tasks:
raise ValueError("No datasets have a '{}' split".format(split))
def filter_features(ex):
return {k: v for k, v in ex.items() if k in self.output_features}
datasets = [
task.get_dataset(sequence_length, split, use_cached, shuffle=shuffle) # pylint:disable=g-complex-comprehension
for task in tasks]
if use_filter: # TODO: debug
datasets = [
ds.repeat().map(_filter_features, num_parallel_calls=tf.data.experimental.AUTOTUNE)
for ds in datasets]
rates = [self.get_rate(task) for task in tasks]
# Sample from the dataset with the rates rates
seed = None if shuffle else 42
dataset = tf.data.experimental.sample_from_datasets(datasets, rates, seed)
if (split == "train" and use_cached and
all(t.supports_caching for t in tasks)):
_log_mixing_proportions(tasks, datasets, rates, dataset, sequence_length,
compute_stats_empirically)
return dataset
# Functions to be used as mixing rates:
@gin.configurable
def rate_num_examples(
task, maximum=None, temperature=1.0, scale=1.0,
fallback_to_num_input_examples=True):
"""Mixing rate equal to the number of examples for the task."""
if task.cache_dir or not fallback_to_num_input_examples:
ret = task.get_cached_stats("train")["examples"]
else:
logging.warning(
"Task '%s' not cached so using number of input examples instead of "
"preprocessed examples to compute rate.",
task.name)
ret = task.num_input_examples("train")
ret *= scale
if maximum:
ret = min(ret, maximum)
if temperature != 1.0:
ret = ret ** (1.0 / temperature)
return ret
@gin.configurable
def rate_unsupervised(task, value=1e6):
"""Gin-configurable mixing rate for the unsupervised co-training task."""
del task
return value
def _log_padding_fractions(dataset, sequence_length, num_examples=100):
"""Empirically compute the fraction of padding - log the results.
Args:
dataset: a tf.data.Dataset
sequence_length: dict from string to int (packed lengths)
num_examples: an integer
"""
logging.info("computing padding fractions")
keys = sequence_length.keys()
padding_frac = {k: 0 for k in keys}
for ex in tfds.as_numpy(dataset.take(num_examples)):
for k in keys:
padding_frac[k] += 1 - (sequence_length[k] / len(ex[k]))
for k in keys:
logging.info("%s padding fraction = %g", k, padding_frac[k])
def _log_mixing_proportions(
tasks, datasets, rates, mixed_dataset,
sequence_length, compute_stats_empirically):
"""Log information about the mixing proportions.
Called from Mixture.get_dataset.
Args:
tasks: a list of Task
datasets: a list of tf.data.Dataset
rates: a list of floats
mixed_dataset: a tf.data.Dataset
sequence_length: dict from string to int (packed lengths)
compute_stats_empirically: a boolean - does not work on TPU
"""
def _normalize(l):
denom = sum(l)
return [x / denom for x in l]
# compute some stats about the mixture
examples_fraction = _normalize(rates)
if compute_stats_empirically:
stats_examples = 100
mean_inputs_length = []
mean_targets_length = []
for dataset in datasets:
inputs_sum = 0
targets_sum = 0
for ex in tfds.as_numpy(dataset.take(stats_examples)):
inputs_sum += ex["inputs"].size
targets_sum += ex["targets"].size
mean_inputs_length.append(inputs_sum / float(stats_examples))
mean_targets_length.append(targets_sum / float(stats_examples))
else:
def _estimated_mean_length(task, key):
if task.token_preprocessor:
return sequence_length[key]
else:
return min(sequence_length[key],
(task.get_cached_stats("train")[key + "_tokens"] /
task.get_cached_stats("train")["examples"]))
mean_inputs_length = [_estimated_mean_length(task, "inputs")
for task in tasks]
mean_targets_length = [_estimated_mean_length(task, "targets")
for task in tasks]
inputs_fraction = _normalize(
[l * r for l, r in zip(mean_inputs_length, rates)])
targets_fraction = _normalize(
[l * r for l, r in zip(mean_targets_length, rates)])
logging.info("%12s %12s %12s %12s %12s %12s %s",
"rate", "ex.frac.", "inp.frac.", "tgt.frac.",
"inp.len.", "tgt.len", "task")
for i in range(len(rates)):
logging.info("%12g %12g %12g %12g %12g %12g %s",
rates[i], examples_fraction[i],
inputs_fraction[i], targets_fraction[i],
mean_inputs_length[i], mean_targets_length[i],
tasks[i].name)
if compute_stats_empirically:
_log_padding_fractions(mixed_dataset, sequence_length)
class MixtureRegistry(DatasetProviderRegistry):
_REGISTRY = {}
_PROVIDER_TYPE = Mixture
@classmethod
def add(cls, name, tasks, default_rate=None):
super(MixtureRegistry, cls).add(name, Mixture, tasks, default_rate)
def get_mixture_or_task(task_or_mixture_name):
"""Return the Task or Mixture from the appropriate registry."""
mixtures = MixtureRegistry.names()
tasks = TaskRegistry.names()
if task_or_mixture_name in mixtures:
if task_or_mixture_name in tasks:
logging.warning("%s is both a Task and a Mixture, returning Mixture",
task_or_mixture_name)
return MixtureRegistry.get(task_or_mixture_name)
if task_or_mixture_name in tasks:
return TaskRegistry.get(task_or_mixture_name)
else:
raise ValueError("No Task or Mixture found with name: %s" %
task_or_mixture_name)
def get_subtasks(task_or_mixture):
"""Returns all the Tasks in a Mixture as a list or the Task itself."""
if isinstance(task_or_mixture, Task):
return [task_or_mixture]
else:
return task_or_mixture.tasks
def _validate_args(fn, expected_pos_args):
"""Ensure function has exactly expected positional args."""
argspec = inspect.getfullargspec(fn)
expected_pos_args = tuple(expected_pos_args)
actual_args = tuple(argspec.args)
if actual_args[:len(expected_pos_args)] != expected_pos_args:
raise ValueError(
"'%s' must have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_args))
actual_pos_args = tuple(
argspec.args[:-len(argspec.defaults)]
if argspec.defaults else argspec.args)
if actual_pos_args != expected_pos_args[:len(actual_pos_args)]:
raise ValueError(
"'%s' may only have positional args %s, got: %s" % (
fn.__name__, expected_pos_args, actual_pos_args))
| jzbjyb/lm-calibration | t5/data/utils.py | utils.py | py | 42,503 | python | en | code | 26 | github-code | 13 |
40038159199 | import tensorflow as tf
import tensorbayes as tb
import numpy as np
from codebase.args import args
from tensorbayes.tfutils import softmax_cross_entropy_with_two_logits as softmax_xent_two
from tensorflow.contrib.framework import add_arg_scope
@add_arg_scope
def normalize_perturbation(d, scope=None):
with tf.name_scope(scope, 'norm_pert'):
output = tf.nn.l2_normalize(d, axis=range(1, len(d.shape)))
return output
@add_arg_scope
def scale_gradient(x, scale, scope=None, reuse=None):
with tf.name_scope('scale_grad'):
output = (1 - scale) * tf.stop_gradient(x) + scale * x
return output
@add_arg_scope
def noise(x, std, phase, scope=None, reuse=None):
with tf.name_scope(scope, 'noise'):
eps = tf.random_normal(tf.shape(x), 0.0, std)
output = tf.where(phase, x + eps, x)
return output
@add_arg_scope
def leaky_relu(x, a=0.2, name=None):
with tf.name_scope(name, 'leaky_relu'):
return tf.maximum(x, a * x)
@add_arg_scope
def basic_accuracy(a, b, scope=None):
with tf.name_scope(scope, 'basic_acc'):
a = tf.argmax(a, 1)
b = tf.argmax(b, 1)
eq = tf.cast(tf.equal(a, b), 'float32')
output = tf.reduce_mean(eq)
return output
@add_arg_scope
def perturb_image(x, p, classifier, pert='vat', scope=None):
with tf.name_scope(scope, 'perturb_image'):
eps = 1e-6 * normalize_perturbation(tf.random_normal(shape=tf.shape(x)))
# Predict on randomly perturbed image
eps_p = classifier(x + eps, phase=True, reuse=True)
loss = softmax_xent_two(labels=p, logits=eps_p)
# Based on perturbed image, get direction of greatest error
eps_adv = tf.gradients(loss, [eps], aggregation_method=2)[0]
# Use that direction as adversarial perturbation
eps_adv = normalize_perturbation(eps_adv)
x_adv = tf.stop_gradient(x + args.radius * eps_adv)
return x_adv
@add_arg_scope
def vat_loss(x, p, classifier, scope=None):
with tf.name_scope(scope, 'smoothing_loss'):
x_adv = perturb_image(x, p, classifier)
p_adv = classifier(x_adv, phase=True, reuse=True)
loss = tf.reduce_mean(softmax_xent_two(labels=tf.stop_gradient(p), logits=p_adv))
return loss
| RuiShu/dirt-t | codebase/models/extra_layers.py | extra_layers.py | py | 2,249 | python | en | code | 173 | github-code | 13 |
32771984992 | # -*- coding: utf-8 -*-
import os
import sys
import h5py
import yaml
import numpy as np
from PIL import Image
import matplotlib.pyplot as plt
import matplotlib as mpl
import cartopy.crs as ccrs
# import cartopy.feature as cfeature
# from mpl_toolkits.axes_grid1 import make_axes_locatable, axes_size
from cartopy.mpl.ticker import LongitudeFormatter, LatitudeFormatter
# 必须加这个字段,否则引用 pyplot 服务器会报错,服务器上面没有 TK
mpl.use("Agg")
'''
Created on 2019年12月20日
@author: wape2
'''
main_path, main_file = os.path.split(os.path.realpath(__file__))
plot_map_use_scatter = True
class ReadPlotYaml():
pass
class ReadModeYaml():
pass
class ReadInPutYaml():
def __init__(self, in_file):
"""
读取yaml格式配置文件
"""
if not os.path.isfile(in_file):
print('Not Found %s' % in_file)
sys.exit(-1)
with open(in_file, 'r') as stream:
cfg = yaml.load(stream)
self.job_name = cfg['INFO']['job_name']
self.ymd = cfg['INFO']['ymd']
self.ipath1 = cfg['PATH']['ipath1']
self.ipath2 = cfg['PATH']['ipath2']
self.opath = cfg['PATH']['opath']
color = {
'invalid': (0., 0., 0.), # 255-无数据-黑色
'same': (0., 255., 127.), # 0 绿色
'diff': (205., 38., 38.)
}
def main(yaml_file):
# 读取接口文件
yaml1 = ReadInPutYaml(yaml_file)
ipath1 = yaml1.ipath1
ipath2 = yaml1.ipath2
opath = yaml1.opath
data1 = read_hdf5(ipath1)
data2 = read_hdf5(ipath2)
same_idx = data1 == data2
diff_idx = data1 != data2
diff_value = data1 - data2
diff_data = np.full(data1.shape, fill_value=-1, dtype='i1')
diff_data[same_idx] = 0
diff_data[diff_idx] = 1
oname_t = os.path.basename(ipath1)
ofig1 = os.path.join(opath, oname_t.split('.')[0] + '_difference.png')
ofig2 = os.path.join(opath, oname_t.split('.')[0] + '_statistics.png')
plot_image_difference(diff_value, ofig1)
idx1 = np.where(diff_data == 0)
idx2 = np.where(diff_data == 1)
diff_percent = len(idx1[0]) / data1.size * 100
same_percent = len(idx2[0]) / data1.size * 100
plot_image_statistics(diff_value, ofig2, diff_percent, same_percent)
def read_hdf5(ifile):
h5r = h5py.File(ifile, 'r')
data = h5r.get('SNC')[:]
h5r.close()
return data
def plot_image_difference(r, out_file):
row, col = np.shape(r)
width = col / 100.
length = row / 100.
dpi = 100
fig = plt.figure(figsize=(width, length), dpi=dpi) # china
# rgb = np.stack([r, g, b], axis = 2)
# rgb = np.stack([r], axis = 2)
plt.imshow(r, cmap='jet')
plt.axis('off')
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
out_dir = os.path.dirname(out_file)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fig.savefig(out_file, dpi=dpi)
fig.clear()
plt.close()
print('>>> {}'.format(out_file))
def plot_image_statistics(r, out_file, diff_p, same_p):
color_list = ['#00FF7F', '#CD2626', '#000000']
# 设置色标的最大最小值
# norm = mpl.colors.Normalize(vmin = 0, vmax = 255)
mycolormap = mpl.colors.ListedColormap(color_list, 'indexed')
bounds = [0, 0.1, 1.1, 255.1]
norm = mpl.colors.BoundaryNorm(bounds, mycolormap.N)
row, col = np.shape(r)
width = col / 100.
length = row / 100.
dpi = 100
print(width, length)
fig = plt.figure(figsize=(6, 6), dpi=dpi) # china
# rgb = np.stack([r, g, b], axis = 2)
# rgb = np.stack([r], axis = 2)
plt.imshow(r, cmap=mycolormap, norm=norm)
plt.text(30, 30, 'same point: %0.1f %%' %
same_p, ha='left', va='center', fontsize=8)
plt.text(30, 60, 'diff point: %0.1f %%' %
diff_p, ha='left', va='center', fontsize=8)
plt.axis('off')
plt.gca().xaxis.set_major_locator(plt.NullLocator())
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.subplots_adjust(top=1, bottom=0, right=1, left=0, hspace=0, wspace=0)
plt.margins(0, 0)
out_dir = os.path.dirname(out_file)
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
fig.savefig(out_file, dpi=dpi)
fig.clear()
plt.close()
print('>>> {}'.format(out_file))
def rgb_image(rgb_data, out_fig):
# 绘图
dshape = rgb_data.shape
arryR = np.full(dshape, 255, dtype='uint8')
arryG = np.full(dshape, 255, dtype='uint8')
arryB = np.full(dshape, 255, dtype='uint8')
print(type(rgb_data[0, 0]))
mask = (rgb_data == 255)
arryR[mask], arryG[mask], arryB[mask] = color['invalid']
mask = (rgb_data == 0)
arryR[mask], arryG[mask], arryB[mask] = color['same']
mask = (rgb_data == 1)
arryR[mask], arryG[mask], arryB[mask] = color['diff']
# 3通道合成
imr = Image.fromarray(arryR.astype('uint8'))
img = Image.fromarray(arryG.astype('uint8'))
imb = Image.fromarray(arryB.astype('uint8'))
im = Image.merge('RGB', (imr, img, imb)) # color image
print(out_fig)
im.save(out_fig)
if __name__ == '__main__':
args = sys.argv[1:]
yaml_file = args[0]
main(yaml_file)
| NingAnMe/snow_cover_of_remote_sensing | ndsi_b01_check_orbit.py | ndsi_b01_check_orbit.py | py | 5,366 | python | en | code | 0 | github-code | 13 |
1448183161 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
# Author: Zhoutao
#create_date:2017-02-05-13:26
# Python 3.5
def person(name,age,sex,job):
def walk(p):
print('person %s is walking...'%p['name'])
date = {
'name':name,
'age':age,
'sex':sex,
'job':job,
'walk':walk
}
return date
def dog(name,dog_type):
def bark(d):
print('dog %s: wang wang wang...'%d['name'])
date = {
'name':name,
'dog_type':dog_type,
'bark':bark
}
return date
d1 = dog("李闯","京巴")
p1 = person("孙海涛",36,"F","运维")
d1['bark'](d1)
p1['walk'](p1) | 248808194/python | M3/笔记例子等/class引子.py | class引子.py | py | 644 | python | en | code | 0 | github-code | 13 |
36916551284 | # coding: utf8
from __future__ import unicode_literals, print_function, division
from collections import defaultdict
from lingpy3.ops.base import operation
from lingpy3.interfaces import IWordlist
from lingpy3.util import product2, chained_values
from lingpy3 import log
def get_score(wl, ref, mode, taxA, taxB, ignore_missing=False):
refsA, refsB = defaultdict(list), defaultdict(list)
for concept, items in wl.iter_by_concept('language', ref):
refsA[concept] = [r for lang, r in items if lang == taxA] or [0]
refsB[concept] = [r for lang, r in items if lang == taxB] or [0]
if mode in ['shared', 'jaccard']:
listA = chained_values(refsA)
listB = chained_values(refsB)
shared = [x for x in listA if x in listB]
if mode == 'jaccard':
return 1 - len(set(shared)) / len(set(listA + listB))
return len(shared)
assert mode == 'swadesh'
# count amount of shared concepts
shared, missing = 0, 0
for concept in wl.concepts:
if refsA.get(concept) == [0] or refsB.get(concept) == [0]:
missing += 1 if not ignore_missing else 0
elif [k for k in refsA[concept] if k in refsB[concept]]:
shared += 1
try:
return 1 - shared / (len(wl.concepts) - missing)
except ZeroDivisionError:
log.get_logger().exception(
"Zero-division error encountered in '{0}' and '{1}'.".format(taxA, taxB))
return 1.0
@operation(IWordlist)
def distances(wl, ref='cogid', refB='', mode='swadesh', ignore_missing=False, **kw):
"""
Compute a distance matrix from a wordlist.
:param ref:
:param refB:
:param mode:
:param ignore_missing:
:param kw:
:return:
"""
dists = [[0 for _ in range(len(wl.languages))] for _ in range(len(wl.languages))]
for (i, taxA), (j, taxB) in product2(enumerate(wl.languages)):
if i < j:
score = get_score(wl, ref, mode, taxA, taxB, ignore_missing=ignore_missing)
dists[i][j] = score
if not refB:
dists[j][i] = score
elif i == j:
if mode == 'shared':
dists[i][j] = len(chained_values(wl.get_dict_by_concept(language=taxA)))
elif i > j and refB:
dists[i][j] = get_score(
wl, refB, mode, taxA, taxB, ignore_missing=ignore_missing)
return dists
| lingpy/lingpy3 | lingpy3/ops/wordlist.py | wordlist.py | py | 2,401 | python | en | code | 0 | github-code | 13 |
36275435617 | n=int(input())
k=int(input())
a=[]
for _ in range(n):
a.append(input())
from itertools import permutations
b=[]
for v in permutations(list(range(n)),k):
temp=""
for i in range(k):
temp+=a[v[i]]
if temp not in b:
b.append(temp)
print(len(b))
| syagi/atcoder_training | ant/17_jol2020d.py | 17_jol2020d.py | py | 278 | python | en | code | 0 | github-code | 13 |
36297671050 | from flask_app.config.mysqlconnection import connectToMySQL
import re
from flask import flash
EMAIL_REGEX = re.compile(r'^[a-zA-Z0-9.+_-]+@[a-zA-Z0-9._-]+\.[a-zA-Z]+$')
class User:
db = "email_validation"
def __init__(self,data):
self.id = data["id"]
self.first_name = data["first_name"]
self.last_name = data["last_name"]
self.email = data["email"]
self.created_at = data["created_at"]
self.updated_at = data["updated_at"]
@classmethod
def get_all(cls):
query = "SELECT * FROM users;"
results = connectToMySQL(cls.db).query_db(query)
user = []
for row in results:
user.append(cls(row))
return user
@classmethod
def save(cls,data):
query = "INSERT INTO users(first_name,last_name,email) VALUES(%(first_name)s,%(last_name)s,%(email)s);"
return connectToMySQL(cls.db).query_db(query,data)
@classmethod
def delete(cls,data):
query = "DELETE FROM users WHERE id = %(id)s;"
return connectToMySQL(cls.db).query_db(query,data)
@classmethod
def get_by_email(cls,data):
query = "SELECT * FROM users WHERE email = %(email)s;"
results = connectToMySQL(cls.db).query_db(query,data)
print(f'Here are the results:{results}')
if len(results) < 1:
return False
row = results[0]
user = cls(row)
return user
@staticmethod
def validate_user(user):
is_valid = True
query = "SELECT * FROM users WHERE email = %(email)s;"
results = connectToMySQL(User.db).query_db(query,user)
if len(results) > 0:
flash("Email has already been entered.")
is_valid = False
if not EMAIL_REGEX.match(user["email"]):
flash("Invalid Email Address")
is_valid = False
if len(user["first_name"]) < 3:
flash("First name must be at least 3 characters.")
is_valid = False
if len(user["last_name"]) < 3:
flash("Last name must be at least 3 characters.")
is_valid = False
return is_valid | Matthew-Luk/Python-Bootcamp | Flask_MySQL/Validation/email_validation/flask_app/models/user.py | user.py | py | 2,159 | python | en | code | 0 | github-code | 13 |
42804426077 |
import os
from setuptools import setup, find_packages
# This reads the __version__ variable
exec(open('src/qclassify/_version.py').read())
# README file as long_description
long_description = open('README.md').read()
# Read in requirements.txt
requirements = open('requirements.txt').readlines()
requirements = [r.strip() for r in requirements]
setup(
name='qclassify',
version=__version__,
description='A Python framework for the variational quantum classifier',
long_description=long_description,
install_requires=requirements,
url='https://github.com/zapatacomputing/QClassify',
author='caoyudong',
author_email='yudong@zapatacomputing.com',
license='Apache-2.0',
packages=find_packages(where='src'),
package_dir={'': 'src'},
zip_safe=False,
include_package_data=True,
package_data={
'': [os.path.join('images', '*.png'),
os.path.join('images', '*.py')]
},
python_requires=">=3.6"
)
| zapatacomputing/QClassify | setup.py | setup.py | py | 980 | python | en | code | 26 | github-code | 13 |
5395795712 | import argparse
import lab.torch as B
import matplotlib.pyplot as plt
import numpy as np
import torch
from wbml.experiment import WorkingDirectory
from wbml.plot import tweak
from convcnp import DualConvCNP, GPGenerator
# Enable GPU if it is available.
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
def split_off_classification(batch):
"""Split off a classification data set."""
n_context = B.shape(batch["x_context"], 1)
n_class = np.random.randint(low=1, high=n_context - 1)
return {
"x_context_class": batch["x_context"][:, :n_class, :],
"y_context_class": (B.sign(batch["y_context"][:, :n_class, :]) + 1) / 2,
"x_target_class": batch["x_target"][:, :n_class, :],
"y_target_class": (B.sign(batch["y_target"][:, :n_class, :]) + 1) / 2,
"x_context_reg": batch["x_context"][:, n_class:, :],
"y_context_reg": batch["y_context"][:, n_class:, :],
"x_target_reg": batch["x_target"][:, n_class:, :],
"y_target_reg": batch["y_target"][:, n_class:, :],
}
def compute_loss(model, batch):
"""Compute the sum of the classification and regression loss functions."""
class_prob, (reg_mean, reg_std) = model(batch)
# Clamp the classification probabilities to prevent the loss for NaNing out.
class_prob = class_prob.clamp(1e-4, 1 - 1e-4)
class_loss = -B.sum(
batch["y_target_class"] * B.log(class_prob)
+ (1 - batch["y_target_class"]) * B.log(1 - class_prob)
)
reg_loss = 0.5 * B.sum(
B.log_2_pi
+ B.log(reg_std)
+ ((reg_mean - batch["y_target_reg"]) / reg_std) ** 2
)
return args.alpha * class_loss + (1 - args.alpha) * reg_loss
def take_first(x):
"""Take the first of a batch."""
if B.rank(x) > 1:
x = x[0, :, 0]
return B.to_numpy(x)
# Parse command line arguments.
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument(
"--root",
type=str,
default="_experiments/experiment",
help="Directory to store output of experiment.",
)
parser.add_argument(
"--epochs",
type=int,
default=100,
help="Number of epochs to run for.",
)
parser.add_argument(
"--tasks_per_epoch",
type=int,
default=16_384,
help="Number of tasks per epoch.",
)
parser.add_argument(
"--small",
action="store_true",
help="Use a small CNN architecture.",
)
parser.add_argument(
"--rate",
type=float,
default=5e-3,
help="Learning rate.",
)
parser.add_argument(
"--alpha",
type=float,
default=0.5,
help="Weight assigned to the classification loss.",
)
args = parser.parse_args()
# Setup working directory.
wd = WorkingDirectory(args.root, seed=0, override=True)
# Setup data generator.
gen_train = GPGenerator(num_tasks=args.tasks_per_epoch)
gen_test = GPGenerator(num_tasks=64)
# Construct model.
model = DualConvCNP(small=args.small).to(device)
# Construct optimiser.
opt = torch.optim.Adam(params=model.parameters(), lr=args.rate)
# Run training loop.
for epoch in range(args.epochs):
print(f"Starting epoch {epoch + 1}")
# Run training epoch.
print("Training...")
for batch in gen_train.epoch(device):
batch = split_off_classification(batch)
loss = compute_loss(model, batch)
# Perform gradient step.
loss.backward()
opt.step()
opt.zero_grad()
with torch.no_grad():
# Compute eval loss.
print("Evaluating...")
losses = []
for batch in gen_test.epoch(device):
batch = split_off_classification(batch)
losses.append(compute_loss(model, batch))
losses = B.to_numpy(losses)
error = 1.96 * np.std(losses) / np.sqrt(len(losses))
print(f"Loss: {np.mean(losses):6.2f} +- {error:6.2f}")
# Produce some plots.
print("Plotting...")
batch = gen_test.generate_batch(device)
batch = split_off_classification(batch)
with B.on_device(device):
# Set `x_target` to a dense linspace for the plots, but save the original
# target inputs.
x_target_class = batch["x_target_class"]
x_target_reg = batch["x_target_reg"]
batch["x_target_class"] = B.linspace(torch.float32, *gen_test.x_range, 200)
batch["x_target_reg"] = B.linspace(torch.float32, *gen_test.x_range, 200)
class_prob, (reg_mean, reg_std) = model(batch)
# Plot for classification:
plt.figure()
plt.title(f"Classification (Epoch {epoch + 1})")
plt.scatter(
take_first(batch["x_context_class"]),
take_first(batch["y_context_class"]),
style="train",
label="Context",
)
plt.scatter(
take_first(x_target_class),
take_first(batch["y_target_class"]),
style="test",
label="Target",
)
plt.plot(
take_first(batch["x_target_class"]),
take_first(class_prob),
style="pred",
label="Prediction",
)
tweak(legend_loc="best")
plt.savefig(wd.file(f"epoch{epoch + 1}_classification.pdf"))
plt.close()
# Plot for regression:
plt.figure()
plt.title(f"Regression (Epoch {epoch + 1})")
plt.scatter(
take_first(batch["x_context_reg"]),
take_first(batch["y_context_reg"]),
style="train",
label="Context",
)
plt.scatter(
take_first(x_target_reg),
take_first(batch["y_target_reg"]),
style="test",
label="Target",
)
plt.plot(
take_first(batch["x_target_reg"]),
take_first(reg_mean),
style="pred",
label="Prediction",
)
plt.fill_between(
take_first(batch["x_target_reg"]),
take_first(reg_mean - 1.96 * reg_std),
take_first(reg_mean + 1.96 * reg_std),
style="pred",
)
tweak(legend_loc="best")
plt.savefig(wd.file(f"epoch{epoch + 1}_regression.pdf"))
plt.close()
| wesselb/gabriel-convcnp | train.py | train.py | py | 6,214 | python | en | code | 0 | github-code | 13 |
28233761216 | import json
import datetime
import natsort
import os
from django.shortcuts import render
from django.db import transaction
from django.http import HttpResponse, JsonResponse
from django.core.exceptions import ObjectDoesNotExist
from django.contrib.auth.mixins import LoginRequiredMixin
from django.db.models import Q
from django.views.generic.base import View
from backups.models import Backup, BackupFile
from backups.backupper import Backupper
from becky.utils import join_file_path, unix_timestamp_to_dt
from settings.models import GlobalParameter
from logs.models import BackupLogger
class BackupView(View):
""" Handles showing, adding and editing the Backups and their parameters. """
def get(self, request, **kwargs):
if 'backup_id' in kwargs:
return self._get_single_backup(request, kwargs.get('backup_id'))
else:
return self._list_backups(request)
def post(self, request, backup_id):
return self._add_backup(request, int(backup_id))
def _add_backup(self, request, backup_id):
"""
Adds a new one or edits an old backup model.
"""
backup_data = json.loads(request.body)
if backup_id == -1: # New Backup!
backup = Backup()
backup.save()
fs_root = GlobalParameter.get_global_parameter(key='fs_root')
backup.add_parameter(key='fs_root', value=fs_root)
else:
backup = Backup.objects.get(pk=backup_id)
self._update_backup(request, backup, backup_data)
return HttpResponse(status=200)
def _add_global_values(self, backup_model, backup_data):
"""
Adds 'global' values directly to the Backup model.
These are values that all Backup models have and are directly
part of the model, instead of being a generic parameter value.
"""
for field in ['name', 'scanner', 'provider', 'running']:
if field in backup_data:
setattr(backup_model, field, backup_data[field])
del backup_data[field]
def _add_files_to_backup(self, backup_model, backup_data):
"""
Goes through all user provided selections and updates the files to backup
accordingly. User may send completely new selections or old ones that
turn off (i.e. delete a BackupFile model) a backup.
"""
for selection_path, selection_state in backup_data['selections'].items():
if selection_state:
backup_model.add_backup_file(selection_path)
else:
backup_model.delete_backup_file(selection_path)
del backup_data['selections']
def _add_provider_specific_values(self, backup_model, backup_data):
"""
Adds provider specific / non-global values as BackupParameters.
We assume that all global values have already been deleted from the
backup_data dictionary.
"""
for key, value in backup_data.items():
param, created = backup_model.parameters.get_or_create(key=key)
param.value = value
param.save()
def _get_single_backup(self, request, backup_id):
"""
Returns all data from a single backup model.
"""
backup = Backup.objects.get(pk=backup_id)
return JsonResponse({'backup': backup.to_detailed_json()})
def _list_backups(self, request):
"""
Returns all Backups as a list of dictionaries.
Here we only return their simple JSON representation.
That is, we only return name, is_running and the provider name.
"""
backups = [backup.to_detailed_json() for backup in Backup.objects.all()]
return JsonResponse({'backups': backups})
def _update_backup(self, request, backup_model, backup_data):
"""
Updates the given backup model with the new data.
"""
del backup_data['id'] # Let's not even attempt to change Django IDs
self._add_global_values(backup_model, backup_data)
self._add_files_to_backup(backup_model, backup_data)
self._add_provider_specific_values(backup_model, backup_data)
backup_model.save()
return HttpResponse(status=200)
class DeleteView(View):
""" Simple view to used to delete a backup. """
def post(self, request, backup_id):
with transaction.atomic():
backup_model = Backup.objects.get(pk=backup_id)
backup_model.delete()
return HttpResponse(status=200)
class BackupRunnerView(View):
""" For now, the only way to start a backup process. """
def get(self, request, backup_id):
backup_model = Backup.objects.get(pk=backup_id)
try:
backup_model.run_backup()
except Exception as e:
print(e)
backup_model.set_status('Idle due to an error {}'.format(e), percentage=0, running=0)
raise e
return HttpResponse(status=200)
class LogsView(View):
""" In charge of returning any logs saved during the backup process. """
def get(self, request, backup_id, **kwargs):
current_page = int(request.GET.get('current_page'))
rows_per_page = int(request.GET.get('rows_per_page'))
levels_to_show = json.loads(request.GET.get('levels_to_show'))
levels_to_show = [key.upper() for key, value in levels_to_show.items() if value == True]
if levels_to_show:
backup_model = Backup.objects.get(pk=backup_id)
q = Q()
for level_to_show in levels_to_show:
q.add(Q(level=level_to_show), Q.OR)
logs = backup_model.log_rows.filter(q).order_by('-timestamp')
start_index = current_page*rows_per_page
end_index = (current_page+1)*rows_per_page
sliced_logs = logs[start_index:end_index]
json_logs = [log.to_json() for log in sliced_logs]
else:
json_logs = []
return JsonResponse({'logs': json_logs})
class FilesView(View):
""" Allows the UI to query found files at a certain path. """
def get(self, request, backup_id, **kwargs):
path = request.GET.get('path')
backup_model = self._get_backup_model(backup_id)
files = os.listdir(path)
file_objects = [self._generate_file_object(path, f, backup_model) for f in files]
file_objects.sort(key=natsort.natsort_keygen(key=lambda x: x['filename']))
return JsonResponse({'files': file_objects})
def _get_backup_model(self, backup_id):
"""
Returns a backup model for the current backup ID.
If backup_id is -1 ( adding a new Backup model, not yet saved),
we create a temporary model that won't be saved.
Otherwise, we just fetch actual model.
"""
if backup_id == '-1':
return Backup()
else:
return Backup.objects.get(pk=backup_id)
def _generate_file_object(self, directory, filename, backup_model):
"""
Given a path, creates a file object.
TODO: Check if said file is selected for backups.
"""
level = self._calculate_directory_level(join_file_path(directory, filename))
obj = {'filename': filename, 'selected': False, 'directory': directory, 'level': level}
obj['selected'] = self._check_file_selection(directory, filename, backup_model)
if self._path_is_directory(directory, filename, backup_model):
obj['file_type'] = 'directory'
obj['files'] = []
else:
obj['file_type'] = 'file'
return obj
def _calculate_directory_level(self, path):
"""
Given a path, calculates its level, i.e how many folders
deep is it from the designed root level.
"""
root = "/"
path = path.split(root, 1)[1]
if not path: return 0
level = len(path.split("/"))
return level
def _check_file_selection(self, directory, filename, backup_model):
"""
Checks whether the given file/folder in the given directory has been saved
for backing up.
"""
path = join_file_path(directory, filename)
backup_file = backup_model.get_backup_file(path)
if backup_file:
return True
else:
return False
def _path_is_directory(self, directory, filename, backup_model):
"""
Checks whether the given file in in the given directory
is a directory. Adds the user defined root to the front.
"""
if os.path.isdir(join_file_path(directory, filename)):
return True
else:
return False
class RestoreFilesView(FilesView):
def get(self, request, backup_id, **kwargs):
path = request.GET.get('path')
backup_timestamp = request.GET.get('backup_timestamp')
backup_timestamp = unix_timestamp_to_dt(backup_timestamp)
backup_model = self._get_backup_model(backup_id)
files = backup_model.get_remote_files(path, backup_timestamp)
objects = [self._generate_file_object(path, f, backup_model) for f in files]
objects = natsort.natsorted(objects, key=lambda x: x['filename'])
return JsonResponse({'files': objects})
def post(self, request, backup_id, **kwargs):
data = json.loads(request.body)
selections = data['selections']
selections = list(selections.keys())
restore_path = data['restore_path']
timestamp = unix_timestamp_to_dt(data['backup_timestamp'])
backup_model = self._get_backup_model(backup_id)
backup_model.restore_files(selections, restore_path, timestamp)
return HttpResponse(status=200)
def _generate_file_object(self, directory, f, backup_model):
"""
Given a path, creates a file object.
TODO: Check if said file is selected for backups.
"""
level = self._calculate_directory_level(join_file_path(directory, f.filename))
obj = {'filename': f.filename, 'selected': False, 'directory': directory, 'level': level}
obj['selected'] = self._check_file_selection(directory, f.filename, backup_model)
obj['file_type'] = f.file_type
if f.file_type == 'directory':
obj['files'] = []
return obj
def _check_file_selection(self, directory, filename, backup_model):
"""
Nothing should ever be checked by default during the restore process, so always return False here.
"""
return False
class StatusView(View):
def get(self, request, **kwargs):
status = {'status_message': 'Idle', 'percentage': '100'}
for backup_model in Backup.objects.all():
if backup_model.is_running():
status = backup_model.get_status()
return JsonResponse(status)
| avjves/becky_gui | backend/backups/views.py | views.py | py | 10,897 | python | en | code | 0 | github-code | 13 |
73737688656 | import cv2
import os
import random
def GetFileList(dir, fileList):
newDir = dir
if os.path.isfile(dir):
fileList.append(dir)
elif os.path.isdir(dir):
for s in os.listdir(dir):
#if s == "xxx":
#continue
newDir=os.path.join(dir,s)
GetFileList(newDir, fileList)
return fileList
namelist = GetFileList('./all',[]) # len 110000
list_len = len(namelist)
index_queue = [i for i in range(0,list_len)]
#print(index_queue[0])
random.shuffle(index_queue)
#print(index_queue[0])
count = 0
#levels = [0,0.05,0.1,0.15,0.2,0.25,0.3,0.35,0.4,0.45,0.5]
def save2file(file_name,context):
fh = open(file_name,'a')
fh.write(context)
fh.close()
for index in index_queue:
im_pth = namelist[index]
underlineindex = im_pth.rfind('_')
level = im_pth[underlineindex+1:-4]
level = int(float(level)/0.05)
name = im_pth[6:]
if count<90000: # train
save2file('train.txt','/train/'+ name +'\n')
save2file('train_labels.txt',str(level)+'\n')
save2file('train_paras.txt',im_pth[underlineindex+1:-4]+'\n')
img = cv2.imread(im_pth)
im_savepth = './train/'+ name
cv2.imwrite(im_savepth,img)
elif count>=9000 and count<10000: # valid
save2file('valid.txt','/valid/'+ name +'\n')
save2file('valid_labels.txt',str(level)+'\n')
save2file('valid_paras.txt',im_pth[underlineindex+1:-4]+'\n')
img = cv2.imread(im_pth)
im_savepth = './valid/' + name
cv2.imwrite(im_savepth, img)
else: # test
save2file('test.txt','/test/'+ name +'\n')
save2file('test_labels.txt',str(level)+'\n')
save2file('test_paras.txt',im_pth[underlineindex+1:-4]+'\n')
img = cv2.imread(im_pth)
im_savepth = './test/' + name
cv2.imwrite(im_savepth, img)
print("process: {} / {} ".format(count,list_len))
count+=1
| UMJCS/NTU-recipeAndhaze | haze/divide.py | divide.py | py | 1,938 | python | en | code | 0 | github-code | 13 |
18862508788 | from logging import getLogger
from io import BytesIO
from base64 import b64encode
from pandas import read_csv, to_datetime
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot
from flask import Flask, render_template, request
from ..modeling.predictor import Predictor
L = getLogger(__name__)
pyplot.style.use('fivethirtyeight')
app = Flask(__name__, template_folder='ui')
@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'GET':
return render_template('index.html')
ctx = {}
cols = ['date', 'open', 'close', 'low', 'high', 'volume']
try:
data = request.files['data']
data = read_csv(data)
missing_cols = sorted(set(cols) - set(data.columns.tolist()))
if missing_cols:
ctx['error'] = 'Missing required column(s): {}'.format(', '.join(missing_cols))
return render_template('index.html', **ctx)
data['date'] = to_datetime(data['date'])
data.dropna(inplace=True)
p = Predictor()
predicted = p.predict(data)
positive = data.loc[predicted == 1]
if len(positive):
fig, ax = pyplot.subplots(figsize=(11, 9))
ax.plot(data['date'], data['close'])
ax.plot(positive['date'], positive['close'], 'o', ms=10 * 2, mec='r', mfc='none', mew=2)
pyplot.xticks(fontsize=10)
pyplot.title('Predicted stock splits')
pyplot.xlabel('Date')
pyplot.ylabel('Price')
img_file = BytesIO()
pyplot.savefig(img_file, format='png')
pyplot.clf()
img_file.seek(0)
ctx['image'] = b64encode(img_file.read()).decode()
ctx['dates'] = positive['date'].dt.strftime('%Y-%m-%d').tolist()
else:
ctx['message'] = 'No possible stock splits detected'
except Exception as e:
ctx['error'] = str(e)
L.exception('Error during predict method')
return render_template('index.html', **ctx)
| vayesy/stock_split_test | src/stocks/server/app.py | app.py | py | 2,012 | python | en | code | 0 | github-code | 13 |
36267847213 | import requests, json
url = 'https://data.epa.gov.tw/api/v2/aqx_p_432?api_key=e8dd42e6-9b8b-43f8-991e-b3dee723a52d&limit=1000&sort=ImportDate%20desc&format=JSON'
data = requests.get(url).json()
### a = input("城市") a = "臺南市"
### b = input("站名") b = "臺南"
for i in data['records']:
if i['county'] == "臺南市" and i['sitename'] == "臺南" :
x = i['aqi'] + " " + i['status']
print(x)
| kunglin930111/pm2.5_line | 2.Filter specific data.py | 2.Filter specific data.py | py | 416 | python | en | code | 0 | github-code | 13 |
1986780232 | import tkinter as tk
window = tk.Tk()
window.title('my window')
window.geometry('500x300')
var = tk.StringVar()
label = tk.Label(window,bg='yellow',width=40,text='empty')
label.pack()
var1 = tk.IntVar()
var2 = tk.IntVar()
def print_selection():
if (var1.get() == 1) & (var2.get() == 0):
label.config(text='I love only python')
elif (var1.get() == 0) & (var2.get() == 1):
label.config(text='I love only c++')
elif (var1.get() == 0) & (var2.get() == 0):
label.config(text='I don not love either')
else:
label.config(text='I love both')
c1 = tk.Checkbutton(window,text='Python',variable=var1,onvalue=1,offvalue=0,
command = print_selection)
c2 = tk.Checkbutton(window,text='C++',variable=var2,onvalue=1,offvalue=0,
command = print_selection)
c1.pack()
c2.pack()
window.mainloop() | Llunch4w/Tired-Driver | code/GUI_exercise/checkButton.py | checkButton.py | py | 871 | python | en | code | 5 | github-code | 13 |
11975823542 | from requests_tor import RequestsTor
import sys
rt = RequestsTor(tor_ports=(9050,), tor_cport=9051)
with open(sys.argv[1], "r") as fh:
with open(f"{sys.argv[1]}.csv", "w") as fw:
for line in fh:
line = line.rstrip()
try:
http = str(rt.get(f'http://{line}'))
except:
http = '<Unreachable>'
try:
https = str(rt.get(f'https://{line}'))
except:
https = '<Unreachable>'
fw.write(f'----------------------------------\n')
fw.write(f'http://{line}-{http}\n')
fw.write(f'https://{line}-{https}\n')
print(f'http://{line}-{http}')
print(f'https://{line}-{https}')
| h3b4r1/httptortest | httptortest.py | httptortest.py | py | 753 | python | en | code | 0 | github-code | 13 |
21251172292 | ##Name: Shezan Alam
#Email: shezan.alam48@myhunter.cuny.edu
#Date: September 4th, 2019
#This program draws a red equilateral triangle and a black square on a blue background.
import turtle
wn = turtle.Screen() # Set up the window and its attributes
wn.bgcolor("blue")
tess = turtle.Turtle() # create tess and set some attributes
tess.color("red")
tess.pensize(5)
alex = turtle.Turtle() # create alex
tess.forward(80) # Let tess draw an equilateral triangle
tess.left(120)
tess.forward(80)
tess.left(120)
tess.forward(80)
tess.left(120) # complete the triangle
tess.right(180) # turn tess around
tess.forward(80) # move her away from the origin
alex.forward(50) # make alex draw a square
alex.left(90)
alex.forward(50)
alex.left(90)
alex.forward(50)
alex.left(90)
alex.forward(50)
alex.left(90)
wn.exitonclick()
| shezalam29/simple-python-projects | BlueScreenShezanA.py | BlueScreenShezanA.py | py | 934 | python | en | code | 0 | github-code | 13 |
43298823869 | # OpenGL procedural texture shader explanation
# https://stackoverflow.com/questions/67672873/opengl-procedural-texture-shader-explanation
from OpenGL.GLUT import *
from OpenGL.GLU import *
from OpenGL.GL import *
import OpenGL.GL.shaders
import numpy as np
from ctypes import c_void_p
import glm
import math
sh_vert = """
#version 460 core
layout (location = 0) in vec4 a_position;
layout (location = 1) in vec3 a_normal;
layout (location = 2) in vec2 a_uv;
out vec3 v_pos;
out vec3 v_nv;
out vec2 v_uv;
layout (location = 0) uniform mat4 u_projection;
layout (location = 1) uniform mat4 u_view;
layout (location = 2) uniform mat4 u_model;
void main()
{
v_pos = vec3(u_model * a_position);
v_nv = inverse(transpose(mat3(u_model))) * a_normal;
v_uv = a_uv;
gl_Position = u_projection * u_view * u_model * a_position;
}
"""
sh_frag = """
#version 460 core
out vec4 frag_color;
in vec3 v_pos;
in vec3 v_nv;
in vec2 v_uv;
layout (location = 10) uniform vec3 color1;
layout (location = 11) uniform vec3 color2;
layout (location = 12) uniform float frequency;
layout (location = 13) uniform float noiseScale;
layout (location = 14) uniform float ringScale;
layout (location = 15) uniform float contrast;
vec3 mod289(vec3 x) {
return x - floor(x * (1.0 / 289.0)) * 289.0;
}
vec4 mod289(vec4 x) {
return x - floor(x * (1.0 / 289.0)) * 289.0;
}
vec4 permute(vec4 x) {
return mod289(((x*34.0)+1.0)*x);
}
vec4 taylorInvSqrt(vec4 r) {
return 1.79284291400159 - 0.85373472095314 * r;
}
float snoise(vec3 v) {
const vec2 C = vec2(1.0/6.0, 1.0/3.0) ;
const vec4 D = vec4(0.0, 0.5, 1.0, 2.0);
// First corner
vec3 i = floor(v + dot(v, C.yyy) );
vec3 x0 = v - i + dot(i, C.xxx) ;
// Other corners
vec3 g = step(x0.yzx, x0.xyz);
vec3 l = 1.0 - g;
vec3 i1 = min( g.xyz, l.zxy );
vec3 i2 = max( g.xyz, l.zxy );
// x0 = x0 - 0.0 + 0.0 * C.xxx;
// x1 = x0 - i1 + 1.0 * C.xxx;
// x2 = x0 - i2 + 2.0 * C.xxx;
// x3 = x0 - 1.0 + 3.0 * C.xxx;
vec3 x1 = x0 - i1 + C.xxx;
vec3 x2 = x0 - i2 + C.yyy; // 2.0*C.x = 1/3 = C.y
vec3 x3 = x0 - D.yyy; // -1.0+3.0*C.x = -0.5 = -D.y
// Permutations
i = mod289(i);
vec4 p = permute( permute( permute(
i.z + vec4(0.0, i1.z, i2.z, 1.0 ))
+ i.y + vec4(0.0, i1.y, i2.y, 1.0 ))
+ i.x + vec4(0.0, i1.x, i2.x, 1.0 ));
// Gradients: 7x7 points over a square, mapped onto an octahedron.
// The ring size 17*17 = 289 is close to a multiple of 49 (49*6 = 294)
float n_ = 0.142857142857; // 1.0/7.0
vec3 ns = n_ * D.wyz - D.xzx;
vec4 j = p - 49.0 * floor(p * ns.z * ns.z); // mod(p,7*7)
vec4 x_ = floor(j * ns.z);
vec4 y_ = floor(j - 7.0 * x_ ); // mod(j,N)
vec4 x = x_ *ns.x + ns.yyyy;
vec4 y = y_ *ns.x + ns.yyyy;
vec4 h = 1.0 - abs(x) - abs(y);
vec4 b0 = vec4( x.xy, y.xy );
vec4 b1 = vec4( x.zw, y.zw );
vec4 s0 = floor(b0)*2.0 + 1.0;
vec4 s1 = floor(b1)*2.0 + 1.0;
vec4 sh = -step(h, vec4(0.0));
vec4 a0 = b0.xzyw + s0.xzyw*sh.xxyy ;
vec4 a1 = b1.xzyw + s1.xzyw*sh.zzww ;
vec3 p0 = vec3(a0.xy,h.x);
vec3 p1 = vec3(a0.zw,h.y);
vec3 p2 = vec3(a1.xy,h.z);
vec3 p3 = vec3(a1.zw,h.w);
// Normalise gradients
vec4 norm = taylorInvSqrt(vec4(dot(p0,p0), dot(p1,p1), dot(p2, p2), dot(p3,p3)));
p0 *= norm.x;
p1 *= norm.y;
p2 *= norm.z;
p3 *= norm.w;
// Mix final noise value
vec4 m = max(0.6 - vec4(dot(x0,x0), dot(x1,x1), dot(x2,x2), dot(x3,x3)), 0.0);
m = m * m;
return 42.0 * dot( m*m, vec4( dot(p0,x0), dot(p1,x1),
dot(p2,x2), dot(p3,x3) ) );
}
void main() {
//texture change on movement
//float n = snoise(v_pos);
//texture fixed on movement
float n = snoise(vec3(v_uv.x,-0.68, v_uv.y));
float ring = fract(frequency * v_uv.y + noiseScale * n);
ring *= contrast * (1.0 - ring);
// Adjust ring smoothness and shape, and add some noise
float lerp = pow(ring, ringScale) + n;
vec3 base = mix(color1, color2, lerp);
frag_color = vec4(base, 1.0);
}
"""
def display():
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
elapsed_ms = glutGet(GLUT_ELAPSED_TIME)
projection = glm.perspective(glm.radians(60), aspect, 0.1, 20.0)
view = glm.lookAt(glm.vec3(-1, -8, 4), glm.vec3(-1, 0, -1), glm.vec3(0, 0, 1))
angle = 0#elapsed_ms * math.pi * 2 / 10000.0
model = glm.rotate(glm.mat4(1), glm.radians(-30), glm.vec3(0, 0, 1))
model = glm.rotate(model, angle, glm.vec3(0, 1, 0))
model = glm.scale(model, glm.vec3(1, 5, 0.2))
glUniformMatrix4fv(0, 1, GL_FALSE, glm.value_ptr(projection))
glUniformMatrix4fv(1, 1, GL_FALSE, glm.value_ptr(view))
glUniformMatrix4fv(2, 1, GL_FALSE, glm.value_ptr(model))
glUniform3f(10, 50/255, 40/255, 30/255)
glUniform3f(11, 200/255, 150/255, 100/255)
glUniform1f(12, 1.0) # frequency
glUniform1f(13, 10.0) # noiseScale
glUniform1f(14, 0.1) # ringScale
glUniform1f(15, 1.0) # contrast
glDrawElements(GL_TRIANGLES, 36, GL_UNSIGNED_INT, None)
glutSwapBuffers()
glutPostRedisplay()
def reshape(width, height):
global aspect
glViewport(0, 0, width, height)
aspect = width / height
glutInit(sys.argv)
glutInitDisplayMode(GLUT_DOUBLE | GLUT_RGBA | GLUT_DEPTH | GLUT_MULTISAMPLE)
glutSetOption(GLUT_MULTISAMPLE, 8)
glutInitWindowSize(640, 480)
glutCreateWindow(b"OpenGL Window")
glutDisplayFunc(display)
glutReshapeFunc(reshape)
vertices = [(-1,-1,-1), (1,-1,-1), (1, 1,-1), (-1, 1,-1), (-1,-1, 1), (1,-1, 1), (1, 1, 1), (-1, 1, 1)]
uv = [(0,0), (1,0), (1,1), (0,1), (0,0), (1,0), (1,1), (0,1)]
faces = [[0,1,2,3], [1,5,6,2], [5,4,7,6], [4,0,3,7], [3,2,6,7], [1,0,4,5]]
normals = [(0,0,-1), (1,0,0), (0,0,1), (-1,0,0), (0,1,0), (0,-1,0)]
attributes = []
indices = []
for si, f in enumerate(faces):
for i in f:
attributes.append(list(vertices[i]) + list(normals[si]) + list(uv[i]))
indices.append([4*si, 4*si+1, 4*si+2, 4*si, 4*si+2, 4*si+3])
attributes = np.array(attributes, dtype=np.float32)
indices = np.array(indices, dtype=np.uint32)
vao = glGenVertexArrays(1)
vbo = glGenBuffers(1)
ebo = glGenBuffers(1)
glBindVertexArray(vao)
glBindBuffer(GL_ARRAY_BUFFER, vbo)
glBufferData(GL_ARRAY_BUFFER, attributes, GL_STATIC_DRAW)
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, ebo)
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices, GL_STATIC_DRAW)
glVertexAttribPointer(0, 3, GL_FLOAT, False, 8 * attributes.itemsize, None)
glVertexAttribPointer(1, 3, GL_FLOAT, False, 8 * attributes.itemsize, c_void_p(3 * attributes.itemsize))
glVertexAttribPointer(2, 2, GL_FLOAT, False, 8 * attributes.itemsize, c_void_p(6 * attributes.itemsize))
glEnableVertexAttribArray(0)
glEnableVertexAttribArray(1)
glEnableVertexAttribArray(2)
program = OpenGL.GL.shaders.compileProgram(
OpenGL.GL.shaders.compileShader(sh_vert, GL_VERTEX_SHADER),
OpenGL.GL.shaders.compileShader(sh_frag, GL_FRAGMENT_SHADER)
)
glUseProgram(program)
glEnable(GL_MULTISAMPLE) # default
glEnable(GL_DEPTH_TEST)
glClearColor(0.0, 0.0, 0.0, 0.0)
glutMainLoop() | Rabbid76/graphics-snippets | example/python/opengl_minimal_example/minimal_example_wood_shader.py | minimal_example_wood_shader.py | py | 7,166 | python | en | code | 172 | github-code | 13 |
38186685892 | from easydict import EasyDict as edict
# make training faster
# our RAM is 256G
# mount -t tmpfs -o size=140G tmpfs /train_tmp # 在/train_tmp目录下挂载大小为140GB的tmpfs文件系统。tmpfs文件系统是一个临时文件存储系统
config = edict() # 点表示法
config.margin_list = (1.0, 0.0, 0.4) #
config.network = "vit_b_dp005_mask_005" # vit_b模型,使用Mask RCNN在COCO上进行预训练
config.resume = False # 控制训练过程中是否从之前的检查点恢复训练
config.output = None
config.embedding_size = 512
config.sample_rate = 0.3 # 采样率
config.fp16 = True # 半精度浮点数
config.weight_decay = 0.1 # 正则化,向损失函数添加一个惩罚项,以减少权重的大小
config.batch_size = 256
config.gradient_acc = 12 # total batchsize is 256 * 12,gradient_acc是梯度累积的数量
config.optimizer = "adamw" # Adam优化器的一种变体
config.lr = 0.001
config.verbose = 2000 # 在测试期间打印更多消息
config.dali = False
config.rec = "/train_tmp/WebFace42M" # 数据集路径
config.num_classes = 2059906 # 分类个数
config.num_image = 42474557 # 训练集中图片数量
config.num_epoch = 40 # 训练的总epoch数
config.warmup_epoch = config.num_epoch // 10 # 取整数。训练开始时,学习率从较小的值开始逐渐增大到一个较大的值的过程所占用的epoch数
config.val_targets = [] #
| chenqian57/insightface1 | recognition/arcface_torch/configs/wf42m_pfc03_40epoch_8gpu_vit_b.py | wf42m_pfc03_40epoch_8gpu_vit_b.py | py | 1,418 | python | zh | code | 0 | github-code | 13 |
30230069494 | # To-Do List application in Python using a graphical user interface (GUI) with the Tkinter library:
import tkinter as tk
from tkinter import messagebox
# Function to create a new task
def create_task():
task_name = task_name_entry.get()
due_date = due_date_entry.get()
priority = priority_entry.get()
if task_name and due_date and priority:
task = {
"name": task_name,
"due_date": due_date,
"priority": priority
}
task_list.insert(tk.END, task)
clear_entries()
else:
messagebox.showerror("Error", "Please fill in all fields.")
# Function to clear input entries
def clear_entries():
task_name_entry.delete(0, tk.END)
due_date_entry.delete(0, tk.END)
priority_entry.delete(0, tk.END)
# Function to delete a task
def delete_task():
selected_indices = task_list.curselection()
if selected_indices:
confirm = messagebox.askyesno(
"Confirm", "Are you sure you want to delete the selected task?")
if confirm:
for index in reversed(selected_indices):
task_list.delete(index)
else:
messagebox.showinfo("Information", "Please select a task to delete.")
# Function to initialize the GUI
def initialize_gui():
global task_name_entry, due_date_entry, priority_entry, task_list
root = tk.Tk()
root.title("To-Do List")
# Task Entry Frame
entry_frame = tk.Frame(root)
entry_frame.pack(pady=10)
tk.Label(entry_frame, text="Task Name:").grid(row=0, column=0, sticky="e")
task_name_entry = tk.Entry(entry_frame)
task_name_entry.grid(row=0, column=1, padx=10, pady=5)
tk.Label(entry_frame, text="Due Date:").grid(row=1, column=0, sticky="e")
due_date_entry = tk.Entry(entry_frame)
due_date_entry.grid(row=1, column=1, padx=10, pady=5)
tk.Label(entry_frame, text="Priority:").grid(row=2, column=0, sticky="e")
priority_entry = tk.Entry(entry_frame)
priority_entry.grid(row=2, column=1, padx=10, pady=5)
add_button = tk.Button(entry_frame, text="Add Task", command=create_task)
add_button.grid(row=3, columnspan=2, pady=10)
# Task List Frame
list_frame = tk.Frame(root)
list_frame.pack(pady=10)
scrollbar = tk.Scrollbar(list_frame)
scrollbar.pack(side=tk.RIGHT, fill=tk.Y)
task_list = tk.Listbox(list_frame, width=50, yscrollcommand=scrollbar.set)
task_list.pack(side=tk.LEFT, fill=tk.BOTH)
scrollbar.config(command=task_list.yview)
delete_button = tk.Button(root, text="Delete Task", command=delete_task)
delete_button.pack(pady=10)
root.mainloop()
# Run the GUI application
if __name__ == "__main__":
initialize_gui()
| Godfaithpython/CODSOFT | To_do_List1.py | To_do_List1.py | py | 2,827 | python | en | code | 0 | github-code | 13 |
71497031699 | # 언어 : Python
# 날짜 : 2022.2.19
# 문제 : BOJ > 지뢰 찾기 (https://www.acmicpc.net/problem/4396)
# 티어 : 실버 5
# =========================================================================
moves = [[0, 1], [1, 0], [-1, 0], [0, -1], [-1, -1], [-1, 1], [1, -1], [1, 1]]
def solution():
result = [["." for _ in range(N + 2)] for _ in range(N + 2)]
is_open = False
for row in range(1, N + 1):
for col in range(1, N + 1):
if open_graph[row][col] == "x":
if board[row][col] == "*":
is_open = True
else:
count = 0
for move in moves:
if board[row + move[0]][col + move[1]] == "*":
count += 1
result[row][col] = count
if is_open:
for land_mine in land_mines:
result[land_mine[0]][land_mine[1]] = "*"
return result
# 입력 및 출력
N = int(input())
board = [["." for _ in range(N + 2)]]
for _ in range(N):
board.append(["."] + list(input()) + ["."])
board.append(["." for _ in range(N + 2)])
land_mines = []
for r in range(1, N + 1):
for c in range(1, N + 1):
if board[r][c] == "*":
land_mines.append([r, c])
open_graph = [["." for _ in range(N + 2)]]
for _ in range(N):
open_graph.append(["."] + list(input()) + ["."])
open_graph.append(["." for _ in range(N + 2)])
result = solution()
for row in range(1, N + 1):
for col in range(1, N + 1):
print(result[row][col], end="")
print() | eunseo-kim/Algorithm | BOJ/코딩테스트 대비 문제집 with Baekjoon/구현/03_지뢰 찾기.py | 03_지뢰 찾기.py | py | 1,576 | python | en | code | 1 | github-code | 13 |
12502208123 | #! /usr/bin/env python3
import sys
import html
import requests
import re
import time
def get_html():
with open('page-examples/istituto.html') as f:
return f.read()
def get_word_definition(word):
first_letter = word[0].upper()
try:
url = f'https://dizionari.corriere.it/dizionario_italiano/{first_letter}/{word}.shtml'
r = requests.get(url)
html = r.text
# html = get_html()
html_no_newlines = re.sub(r'\n','',html)
html_no_trailling_spaces = re.sub(r'\s\s+',' ',html_no_newlines)
html_new_line_after_ul = re.sub(r'</dd>','</dd>\n',html_no_trailling_spaces)
m = re.findall(r'<dd><p.*</dd>', html_new_line_after_ul)
definition_without_tags = re.sub(r'<.*?>','',m[0])
for candidate in m:
definition_without_tags = re.sub(r'<.*?>','',candidate)
if len(definition_without_tags) > 25:
break
return definition_without_tags
except:
return ''
return ''
def ordinary(word):
return get_word_definition(word)
def disambiguation(word):
return get_word_definition(f"{word}_1")
def singular_form(word,last_char_replace):
word = word[:-1] + last_char_replace
return get_word_definition(f"{word}")
def main():
list_of_variations = [
ordinary,
disambiguation,
lambda w: singular_form(w,'a'),
lambda w: singular_form(w,'o'),
lambda w: singular_form(w,'u'),
lambda w: singular_form(w,'e')
]
word = sys.argv[1]
definition = ""
for index,variation in enumerate(list_of_variations):
definition = variation(word)
if len(definition)!=0:
break
time.sleep(0.5)
print(html.unescape(definition))
if __name__=='__main__':
main() | danoan/word-detective | source/dictionaries/it/italian_dictionary.py | italian_dictionary.py | py | 1,663 | python | en | code | 0 | github-code | 13 |
73781036498 | # -*- coding: utf-8 -*-
"""Check Python.
This module will run on changed files to check for Python linting.
"""
# Add Native Libraries
import subprocess
import shlex
def check_pycodestyle(python_files_changed):
"""Function will check changed python files to see if there an linter errors/warnings.
Args:
python_files_changed: git diff will grab any python files that was changed for the branch the user has changed.
Returns:
Will pass silently if no broken links are found.
Will raise an Exception if a linting error/warning is found
"""
for file_changed in python_files_changed:
# Running pycodestyle
pycodestyle_report = subprocess.run(
shlex.split(f"pycodestyle --show-source {file_changed}"),
stdout=subprocess.PIPE,
)
if pycodestyle_report.returncode == 1:
raise Exception(f'{pycodestyle_report.stdout.decode("utf-8")}')
| anirudhmungre/sneaky-lessons | .travis/checks/pycodestyle.py | pycodestyle.py | py | 946 | python | en | code | 1 | github-code | 13 |
74970641938 | from setuptools import setup, find_packages
# used by meta.yaml, do not forget space
requirements = [
"geopandas >=0.10.2",
"requests >=2.26.0",
"numpy >=1.21.3",
"geojson >=2.5.0",
"python-dateutil >=2.8.2",
"graph-tool >=2.43",
"cairo >=1.16.0",
"scipy >=1.7.1",
"more-itertools >=8.10.0",
"numba >=0.53.1",
"requests-futures >=1.0.0",
"pygeos >=0.10.2"
]
setup_requirements = []
test_requirements = []
setup(
author="amauryval",
author_email='amauryval@gmail.com',
url="https://github.com/amauryval/osmgt",
version='0.8.14',
description="A library to play with OSM roads (and POIs) data using graph tool network library",
entry_points={},
install_requires=requirements,
license="GPL3",
long_description="",
include_package_data=True,
keywords='network POIS roads shortest_path isochrone',
name='osmgt',
packages=find_packages(include=["osmgt", "osmgt.*"]),
# setup_requires=setup_requirements,
test_suite='tests',
# tests_require=test_requirements,
zip_safe=False,
python_requires=">=3.9",
)
| amauryval/OsmGT | setup.py | setup.py | py | 1,119 | python | en | code | 4 | github-code | 13 |
12051076392 | #!/usr/bin/env python
# coding: utf-8
# In[10]:
import numpy as np
import matplotlib.pyplot as plt
#defining linear_model
from sklearn.linear_model import LinearRegression
#mse & r2 errors
from sklearn.metrics import mean_squared_error,r2_score
# In[23]:
#generate random no set
np.random.seed(0) #with help of seed it will generate numbers which will remain same every time so we use it to fix the values of x and y
x = np.random.rand(100,1) #x data
print(x)
# In[12]:
y = 2+3*x+np.random.rand(100,1)#y data
# In[13]:
#Model initialization->call the model function eg linear regression
#using sklearn library we can do linear regression
#there are 3 methods to do it -> there is no need to calculate cost function or gradient descent it is already privided in library
lr = LinearRegression()
#fit
lr.fit(x,y)
#prediction
y_predicted=lr.predict(x) #you will get the values of y hat
# In[14]:
#model eveluation
mse = mean_squared_error(y,y_predicted)
r2 = r2_score(y,y_predicted)
# In[19]:
#printing values
print("Slope:",lr.coef_)
print("Intercept:",lr.intercept_)
print("Root mean squared error:",mse)
print("R2 score:",r2)
# In[21]:
#plotting values
#for changing
plt.figure(figsize=(10,5))
plt.scatter(x,y,s=10)
plt.xlabel("x")
plt.ylabel("y")
plt.plot(x,y_predicted,color="r")
plt.show()
# In[ ]:
| sawantprajakta/Machine_Learning | MachineLearning/Supervised_Algorithms/Linear_Regression_Sklearn_withoutscratch.py | Linear_Regression_Sklearn_withoutscratch.py | py | 1,335 | python | en | code | 0 | github-code | 13 |
32059734761 | import csv
from abbreviations import us_state_abbrev
states = {}
with open('covid-19-dataset-1.csv') as f:
reader = csv.reader(f)
next(reader) # skip the first line with the column heads
for row in reader:
state_name = row[2]
confirmed = int(row[7])
if state_name in us_state_abbrev.keys():
if us_state_abbrev[state_name] in states:
states[us_state_abbrev[state_name]] = states[us_state_abbrev[state_name]] + confirmed
else:
states[us_state_abbrev[state_name]] = confirmed
with open('data-edited.csv', mode='w', newline='') as data_edited:
state_writer = csv.writer(data_edited, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
state_writer.writerow(["StateCode", "Confirmed"])
for state in states:
state_writer.writerow([state, states[state]])
| 1000monkeys/CoronaDashboard | edit_data.py | edit_data.py | py | 892 | python | en | code | 0 | github-code | 13 |
42804436837 |
from pyquil.gates import *
from pyquil.quil import Program
def x_product(input_vec, qubits_chosen):
"""
Encoding circuit which represents a classical vector
(t1, t2, ..., tn)
with an n-qubit product state
Rx(t1)|0> Rx(t2)|0> ... Rx(tn)|0>.
Args:
input_vec: list[float]
Classical input vector.
qubits_chosen: list[int]
List of indices of qubits that are chosen for the
circuit to act on.
Returns:
A pyquil Program representing the circuit.
"""
out = Program()
for i in range(0, len(qubits_chosen)):
out = out + Program(RX(input_vec[i], qubits_chosen[i]))
return out
| zapatacomputing/QClassify | src/qclassify/encoding_circ.py | encoding_circ.py | py | 601 | python | en | code | 26 | github-code | 13 |
39315003703 | """
DefaultDotDict - Yet another dictionary with dot notation.
Supports loading from JSON. Acts as defaultdict recursively
adding more DefaultDotDicts when non-existent items are requested.
See main() for a demo.
Master copy lives here:
https://gist.github.com/tbnorth/61d3b75f26637d9f26c1678c5d94cb8e
Terry N. Brown, terrynbrown@gmail.com, Wed Mar 01 09:44:29 2017
"""
import json
class DefaultDotDict(dict):
"""Allow a.x as well as a['x'] for dicts"""
def __getattr__(self, item):
# return the item or an empty DefaultDotDict
if item not in self:
self[item] = DefaultDotDict()
return self[item]
def __getitem__(self, item):
# return the item or an empty DefaultDotDict
if item not in self:
self[item] = DefaultDotDict()
return super(DefaultDotDict, self).__getitem__(item)
def __setattr__(self, key, value):
self[key] = value
@staticmethod
def json_object_hook(dct):
"""for JSON's object_hook argument, convert dicts to DefaultDotDicts"""
return DefaultDotDict(dct)
@staticmethod
def json_load(fileobj):
"""used like json.load, but uses DefaultDotDict.json_object_hook"""
return json.load(fileobj, object_hook=DefaultDotDict.json_object_hook)
def key_tree(self, aDict=None, level=0):
"""key_tree - return tree of keys, a content map
"""
if aDict is None:
aDict = self
aList = []
for key in sorted(aDict):
aList.append(' '*level + key + ':') # this key
is_dict = isinstance(aDict[key], dict) # is a dict of some sort
# keys in child dict, if it is a dict
extra = self.key_tree(aDict[key], level+1) if is_dict else []
aList.extend(extra)
if not extra: # otherwise show type
aList[-1] += ' %s' % type(aDict[key]).__name__
if is_dict:
aList[-1] += " (empty)"
if level == 0:
return '\n'.join(aList)
else:
return aList
def main():
"""simple test / demo of DefaultDotDict"""
import os, pprint, tempfile
a = DefaultDotDict(o=1)
a.update({'ans': 42})
a.b[2] = [1, 2]
a.c.d.e = 'nested'
# save to a tempory JSON file
handle, filename = tempfile.mkstemp()
os.close(handle)
json.dump(a, open(filename, 'wb'))
new_a = DefaultDotDict.json_load(open(filename))
os.unlink(filename)
# loaded as nested DefaultDotDict, not dict
new_a.point.x = 1.2
new_a.point.y = 2.1
# note keys converted to unicode (Python 2) and difference between
# key in new_a vs. hasattr(new_a, key)
pprint.pprint(a)
pprint.pprint(new_a)
print('test' in new_a) # False
print(hasattr(new_a, 'test')) # True
print('test' in new_a) # now it's True
print(new_a.key_tree())
if __name__ == '__main__':
main()
| tbnorth/defaultdotdict | defaultdotdict.py | defaultdotdict.py | py | 2,947 | python | en | code | 0 | github-code | 13 |
10374867540 | import datetime as dt
from typing import Any, Text, Dict, List
import pymongo
import re
from sklearn.cluster import KMeans
from io import BytesIO
import base64
import numpy as np
import difflib
import nltk
from nltk.sentiment import SentimentIntensityAnalyzer
import numpy as np
from scipy import stats
from pymongo import MongoClient, UpdateOne
import pandas as pd
import json
from datetime import datetime, timedelta
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
import os
import matplotlib.pyplot as plt
import math
from sklearn.metrics import mean_squared_error, r2_score
from keras.models import Sequential
from keras.layers import Dense
from keras.layers import LSTM, Flatten
from sklearn.preprocessing import MinMaxScaler
#from keras.callbacks import EarlyStopping
from keras.layers import ConvLSTM2D
from rasa_sdk import Action, Tracker, FormValidationAction
from rasa_sdk.executor import CollectingDispatcher
from rasa_sdk.types import DomainDict
from rasa_sdk.events import AllSlotsReset
from rasa_sdk.events import SlotSet
# setting up the connection to the database
client = pymongo.MongoClient("mongodb://localhost:27017/")
# Get the database
db = client["rasa"]
def create_bigram(w):
return [w[i]+w[i+1] for i in range(len(w)-1)]
def get_simularity(w1, w2):
w1, w2 = w1.lower(), w2.lower()
common = []
bigram1, bigram2 = create_bigram(w1), create_bigram(w2)
for i in range(len(bigram1)):
try:
cmn_elt = bigram2.index(bigram1[i])
common.append(bigram1[i])
except:
continue
return len(common)/max(len(bigram1), len(bigram2), 1)
from scipy.spatial.distance import euclidean
from sklearn.feature_extraction.text import CountVectorizer
def autocorrect(input_word, coll,search,k=1):
# This function takes a word and a list of valid words, and returns the closest match to the input word from the list of valid words.
threshold = 0.6
if search == 1:
fields = ['abbrv', 'fullname']
if search == 2:
fields = ['Gouvernorat', 'Site', 'Site_Code',
"Type d'Installation", 'Longitude', 'Latitude', 'LAC', 'Identifiant']
cursor = coll.find({})
distances = []
dbs = set()
if search!= 3:
for document in cursor:
for field in fields:
if field in document:
word_list = set(str(document[field]).split())
dbs.update(word_list)
# Get the list of similar words with their similarity score
similar_words = [(w, get_simularity(input_word, w)) for w in dbs]
# Find the word with the highest similarity score above the threshold
best_word = max(similar_words, key=lambda x: x[1] if x[1] >= threshold else -1)
if best_word[1] < threshold:
return "none"
return best_word[0]
if search == 3:
fields = ['ERBS Id']
dbs=[]
for document in cursor:
for field in fields:
if field in document:
word_list = str(document[field]).upper()
dbs.append(word_list)
return difflib.get_close_matches(input_word, dbs, n=k)
def remove_words(s):
words = s.split()
words = [word for word in words if word.upper() not in ['ARE','GIVE','ME','INFORMATION','INFO','INFOS','WHAT', 'WAS','I','IS','IN','ID','IDENTIFIED','BY','WITH','INCLUDING','AND','FOR','WANT','SITES','FEATURES']]
return ' '.join(words)
class ValidateDefForm(Action):
def name(self) -> Text:
return "Action_Validate_feature"
def run(
self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict
) -> list[Dict[Text, Any]]:
slot_values = tracker.latest_message.get("text")
search = 1
aff = "Sorry, feature is yet to be added"
if slot_values == None:
dispatcher.utter_message("No terms were added")
return {}
else:
# Get the collection
slot_values= remove_words(slot_values)
collection = db["chap1"]
slot_value = "none"
for x in slot_values.split():
if autocorrect(x.upper(), collection, search) != None:
if autocorrect(x, collection, search) != "none":
slot_value = autocorrect(x, collection, search)
print(slot_value)
if slot_value == "none":
dispatcher.utter_message(
"Please verify the value sent")
return {}
else:
query = {
"$or": [
{"abbrv": {"$regex": slot_value}},
{"fullname": {"$regex": slot_value}}
]
}
documents = collection.find(query)
# loop through the matching documents and print their fields
i = 1
if collection.count_documents(query) > 0:
if documents.count()<=10:
for document in documents:
aff = str(i) + " - " + document['abbrv'] + " : " + document['fullname'] + " " + document['definition']
if document['others'] != "":
aff = str(i) + " - " + document['abbrv'] + " : " + document['fullname'] + " " + document['definition'] +" for more information: " + document['others']
print(aff)
dispatcher.utter_message(aff)
i += 1
dispatcher.utter_message(
"number of features found with "+slot_value+" is "+str(i-1))
else:
names=[]
i = 1
folder_path = os.path.join(os.path.expanduser("~"), "Desktop", "save folder")
os.makedirs(folder_path, exist_ok=True)
folder_path=folder_path+'/features_'+slot_value+'.txt'
file= open(folder_path, 'w')
for document in documents:
# Save the DataFrame as a CSV file in the new folder.
aff = str(i) + " - " + document['abbrv'] + " : " + document['fullname'] + " " + document['definition']+"\n"
if document['others'] != "":
aff = str(i) + " - " + document['abbrv'] + " : " + document['fullname'] + " " + document['definition'] +" for more information: " + document['others']+"\n"
file.write(aff)
names.append(document['abbrv'])
i += 1
file.close()
dispatcher.utter_message("The file is saved in "+ folder_path)
my_string = ' - '.join(names)
dispatcher.utter_message(my_string)
dispatcher.utter_message(
"number of features found with "+slot_value+" is "+str(i-1))
return {}
# the reset all slots action
class ActionResetAllSlots(Action):
def name(self):
return "action_reset_all_slots"
def run(self, dispatcher, tracker, domain):
return [AllSlotsReset()]
# sending definition
class ActionSiteInfo(Action):
def name(self) -> Text:
return "action_site_info"
def run(
self, dispatcher: CollectingDispatcher, tracker: Tracker, domain: Dict
) -> list[Dict[Text, Any]]:
slot_values = tracker.latest_message.get("text")
search = 2
aff = "Sorry, physical is yet to be added"
if slot_values == None:
dispatcher.utter_message("No terms were added")
return {}
else:
slot_values= remove_words(slot_values)
# Get the collection
collection = db["reseau-physique"]
slot_value = "none"
for x in slot_values.split():
print(x)
print(autocorrect(x.upper(), collection, search))
if autocorrect(x.upper(), collection, search) != None:
if autocorrect(x.upper(), collection, search) != "none":
slot_value = autocorrect(x.upper(), collection, search)
if slot_value == "none":
dispatcher.utter_message("Physical network can't be found")
return {}
else:
documents = collection.find({"$or": [{"Site": slot_value},
{"Site_Code": slot_value}, {"Identifiant": slot_value}, {"BSC": slot_value}, {"Bande de fréquences": slot_value}, {"Gouvernorat": slot_value}, {"HBA(m)": slot_value}, {"LAC": slot_value}, {"Latitude": slot_value}, {"Longitude": slot_value}, {"Puissance isotrope rayonnée équivalente (PIRE) dans chaque secteur": slot_value}, {"Secteur": slot_value}, {"Type d'Installation": slot_value}, {"azimut du rayonnement maximum dans chaque secteur": slot_value}]})
print(documents)
if documents != None:
if documents.count()<=10:
i = 0
aff = ""
for document in documents:
i += 1
# Remove the _id field from the document.
document = {**document, "_id": None}
aff = str(document)
dispatcher.utter_message(aff)
dispatcher.utter_message(
"the number of sites with "+slot_value+" is "+str(i))
else:
aff=[]
liste=[]
for document in documents:
aff.append(document['Site_Code'])
# Remove the _id field from the document.
document = {**document, "_id": None}
liste.append(document)
my_string = ' - '.join(aff)
dispatcher.utter_message(my_string)
df = pd.DataFrame(liste)
folder_path = os.path.join(os.path.expanduser("~"), "Desktop", "save folder")
os.makedirs(folder_path, exist_ok=True)
# Save the DataFrame as a CSV file in the new folder.
df.to_csv(os.path.join(folder_path, slot_value+'_result.csv'), index=False)
dispatcher.utter_message("The file is saved in "+ folder_path)
dispatcher.utter_message(
"the number of sites with "+slot_value+" is "+str(documents.count()))
return {}
def findsolution(dispatcher,x):
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["rasa"]
collection = db["Chap2"]
data = list(collection.find())
# create a Pandas DataFrame from the list of dictionaries
Xdf = pd.DataFrame(data)
if Xdf.empty:
print("please add data to the database")
return {}
Xdf = Xdf.drop('_id', axis=1)
# print(Xdf)
paramlist = Xdf.values.tolist()
solutionslist = []
next = []
collection = db['Compteurs']
# retrieve documents with specified columns
documents = collection.find({})
df = pd.DataFrame(list(documents))
new_column_names = df.columns.str.strip("(%)")
df.columns = new_column_names
new_column_names = df.columns.str.replace(" ",'')
df.columns = new_column_names
new_column_names
# print(df)
k = 0
j = 0
# param[0]== category
# param[1]== next
# param[2]== solution
# param[3]==formule de calcul de résultat des conteurs
# param[4]==liste des conteurs à utiliser
for param in paramlist:
param[1] = param[1].replace(' ', '')
param[0] = param[0].replace(' ', '')
# param[2] = param[2].replace(' ', '')
if param[2] == 'not ready' and param[4] == 'none':
next = param[1].split(",")
if param[0] in next:
if param[4] == 'none' and param[2] != 'not ready':
solutionslist.append(param[4])
if param[2] == 'not ready' and param[4] != 'none':
for x in param[4]:
if x.replace(" ", "") in df.columns:
k = 1
if k == 1:
i = param[4]
i = [s.replace(' ', '') for s in i]
if (i[0] in df.columns) :
j += 1
result = df.eval(param[3])
df["resultat "+param[0]] = result
s = []
for j in range(len(df)):
s.append('')
solution = []
sol = ""
solist = []
# Determine a solution to the problems
next = ""
for i in range(len(df)):
for col in df.columns:
if df[col][i] == False:
if "resultat" in col:
solist = []
x = col.replace("resultat ", "")
my_string = ''
for param in paramlist:
if param[0] == x:
next = param[1]
if next != "":
for y in next.split(","):
if y == param[0]:
if param[2] not in solution:
solution.append(param[2])
my_string = ' '.join(solution)
my_string = 'for '+x+' : '+my_string
if my_string not in solution:
solist.append(my_string)
sol = ''.join(solist)
if sol not in s[i]:
s[i] = s[i]+sol+' ;\n '
next = ""
df['solution'] = s
# select columns A and C by name using loc, and convert to a list
selected_columns = df.loc[:, ['ERBSId', 'solution']].values.tolist()
i=0
s=[]
for col in selected_columns:
if col[1] != '':
i+=1
s.append(col[0])
my_string = ' , '.join(s)
dispatcher.utter_message("there are problems in "+str(i)+" sites : "+my_string)
j=0
s=df['solution']
id=df['ERBSId']
folder_path = os.path.join(os.path.expanduser("~"), "Desktop", "save folder")
os.makedirs(folder_path, exist_ok=True)
# Save the DataFrame as a CSV file in the new folder.
df=df.drop("_id",axis=1)
df.to_csv(os.path.join(folder_path, 'KPI_Validation.csv'), index=False)
dispatcher.utter_message("The validation file is saved in "+folder_path)
for i in id:
collection.update_many(
{'ERBS Id':i},
{"$set":
{
"Solution": s[j]
}
})
j+=1
# the reset all slots action
class ActionProblemSolve(Action):
def name(self):
return "action_problem_solve"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
findsolution(dispatcher,0)
return {}
# the reset all slots action
class ActionInfoProblem(Action):
def name(self):
return "action_Info_Problem"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
collection = db['Compteurs']
# retrieve documents with specified columns
documents = collection.find({})
df = pd.DataFrame(list(documents))
selected_columns = df.loc[:, ['ERBS Id', 'solution']].values.tolist()
for col in selected_columns:
if col[1] != '':
dispatcher.utter_message(col[0]+':\n'+col[1])
return{}
class ActionSiteProblem(Action):
def name(self):
return "action_Site_Problem"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["rasa"]
collection = db['Compteurs']
msg=tracker.latest_message.get("text")
print(msg)
delimiters = [" site "," for ", " of "," to "," in "]
regex_pattern = '|'.join(map(re.escape, delimiters))
split_string = re.split(regex_pattern, msg)[1]
print(re.split(regex_pattern, msg))
split_string='4G'+split_string
split_string=split_string.upper()
site=autocorrect(split_string,collection,3)
print(site)
if len(site)!=0:
site=site[0]
# retrieve documents with specified columns
documents = collection.find({})
df = pd.DataFrame(list(documents))
if 'Solution' not in df.columns:
findsolution(dispatcher,0)
selected_columns = df.loc[:, ['ERBS Id', 'Solution']].values.tolist()
for col in selected_columns:
if col[0].upper() == site:
if col[1]=="":
dispatcher.utter_message("there's no problem with "+col[0])
else:
dispatcher.utter_message('the solution for '+col[0]+' is : '+ col[1])
return [SlotSet("site", col[0])]
else:
dispatcher.utter_message('please verify input')
return [SlotSet("site", site)]
sia = SentimentIntensityAnalyzer()
def sentiment_analysis(sentence):
sentiment = sia.polarity_scores(sentence)
if sentiment['compound'] > 0.3:
return 'good'
elif sentiment['compound'] < 0.01:
return 'low'
else:
return 'normal'
class ActionClassifySiteML(Action):
def name(self):
return "action_classify_site_ML"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
sia = SentimentIntensityAnalyzer()
sentence=tracker.latest_message.get("text")
resultsent = sentiment_analysis(sentence)
print(resultsent)
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["rasa"]
collection = db["Traffic"]
data = list(collection.find())
# create a Pandas DataFrame from the list of dictionaries
result_traffic = pd.DataFrame(data)
# Create a new collection
mycol = db["KPIs"]
# Retrieve all data from the collection
data = mycol.find()
result= pd.DataFrame(list(data))
result=result.fillna(0)
result_traffic=result_traffic.drop(['Hour','_id','EUtranCell Id','Date'],axis=1)
result=result.fillna(0)
result['Accessibility'] = result[['S1 Sig Succ Rate', 'RRC Setup Succ Rate', 'E-RAB Estab Succ Rate']].mean()
print(result['Accessibility'])
result = result.drop(['_id','S1 Sig Succ Rate', 'RRC Setup Succ Rate','EUtranCell Id', 'E-RAB Estab Succ Rate'], axis=1)
grouped = result.groupby('ERBS Id').mean()
grouped_traffic = result_traffic.groupby('ERBS Id').sum()
df = pd.concat([grouped, grouped_traffic], axis=1)
df=df.sort_values(['Trafic PS (Gb)'])
df=df.reindex()
X = df[['Trafic PS (Gb)']].values
# Create a KMeans model with 3 clusters
model = KMeans(n_clusters=3)
# Fit the model to the data
model.fit(X)
# Get the cluster labels
labels = model.labels_
df['cluster'] = labels
# Calculate mean for each cluster
cluster_stats = df.groupby('cluster')['Trafic PS (Gb)'].mean()
# Create a dictionary to map old cluster labels to new ones
label_map = dict(zip(cluster_stats.sort_values().index, range(3)))
# Use the dictionary to map the old labels to the new ones
df['cluster'] = df['cluster'].map(label_map)
# Map the cluster labels to performance levels
df['profitability'] = df['cluster'].map({0: 'low', 1: 'normal',2: 'good'})
selected_df = df[df['profitability'] == resultsent]
selected_df=selected_df[(selected_df['Call Drop Rate']<0.5) | (selected_df['Accessibility'] <98)]
# Create a new folder on the desktop
folder_path = os.path.join(os.path.expanduser("~"), "Desktop", "save folder")
os.makedirs(folder_path, exist_ok=True)
# Save the DataFrame as a CSV file in the new folder.
selected_df.to_csv(os.path.join(folder_path, 'Degraded KPIs + '+resultsent+' Traffic.csv'), index=False)
# Filter the dataframe to include only rows where performance equals good
selected_df =selected_df.reset_index('ERBS Id')
name_string = selected_df['ERBS Id'].to_string(index=False)
print(name_string)
dispatcher.utter_message("the sites with a "+resultsent+" profitability and a degraded KPI are : ")
dispatcher.utter_message(name_string)
dispatcher.utter_message("the csv file containing the results is in "+ folder_path)
return{}
class ActionPredictTraffic(Action):
def name(self):
return "action_predict_traffic_ML"
def run(self, dispatcher: CollectingDispatcher,
tracker: Tracker,
domain: Dict[Text, Any]) -> List[Dict[Text, Any]]:
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["rasa"]
collection = db["Traffic"]
msg=tracker.latest_message.get("text")
delimiters = ["site","for", "of","to","in"]
regex_pattern = '|'.join(map(re.escape, delimiters))
split_string = re.split(regex_pattern, msg)[1]
split_string='4G'+split_string
split_string=split_string.upper()
site=autocorrect(split_string,collection,3)
print(site)
if len(site)!=0:
site=site[0]
print(site)
dispatcher.utter_message("Predictions for : "+site)
# Connect to MongoDB
client = pymongo.MongoClient("mongodb://localhost:27017/")
db = client["rasa"]
# Create a new collection
mycol = db["Traffic"]
# Retrieve all data from the collection
data = mycol.find()
result_traffic= pd.DataFrame(list(data))
result_traffic['ERBS Id']=result_traffic['ERBS Id'].str.upper()
site_data = result_traffic[result_traffic['ERBS Id'] == site]
data_to_sum = site_data[['Date', 'Hour','Trafic PS (Gb)']]
grouped_data = data_to_sum.groupby(['Date', 'Hour']).sum()
# group data by date and time_type
grouped_data=grouped_data.reset_index('Hour')
grouped_data=grouped_data.drop("Hour",axis=1)
grouped_data = grouped_data[(np.abs(stats.zscore(grouped_data['Trafic PS (Gb)'])) < 3)]
#Convert pandas dataframe to numpy array
dataset = grouped_data.values
scaler = MinMaxScaler(feature_range=(0, 1))
dataset = scaler.fit_transform(dataset)
train_size = int(len(dataset) * 0.66)
test_size = len(dataset) - train_size
train, test = dataset[0:train_size,:], dataset[train_size:len(dataset),:]
#creates a dataset where X is the number of passengers at a given time (t, t-1, t-2...)
#and Y is the number of passengers at the next time (t + 1).
def to_sequences(dataset, seq_size=1):
x = []
y = []
for i in range(len(dataset)-seq_size-1):
#print(i)
window = dataset[i:(i+seq_size), 0]
x.append(window)
y.append(dataset[i+seq_size, 0])
return np.array(x),np.array(y)
seq_size = 24 # Number of time steps to look back
#Larger sequences (look further back) may improve forecasting.
model = Sequential()
model.add(ConvLSTM2D(filters=64, kernel_size=(1,1), activation='relu', input_shape=(1, 1, 1, seq_size)))
model.add(Flatten())
model.add(Dense(1))
model.add(Dense(32))
model.add(Dense(1))
model.add(Dense(32))
model.add(Dense(1))
model.add(Flatten())
model.compile(optimizer='Nadam', loss='mean_squared_error')
print(model.summary())
r2=0.5
i=0
while(i<4):
i+=1
if(r2<0.65):
trainX, trainY = to_sequences(train, seq_size)
testX, testY = to_sequences(test, seq_size)
print("Shape of training set: {}".format(trainX.shape))
print("Shape of test set: {}".format(testX.shape))
trainX = trainX.reshape((trainX.shape[0], 1, 1, 1, seq_size))
testX = testX.reshape((testX.shape[0], 1, 1, 1, seq_size))
model.fit(trainX, trainY, validation_data=(testX, testY),
verbose=2, epochs=50)
# make predictions
trainPredict = model.predict(trainX)
testPredict = model.predict(testX)
# invert predictions back to prescaled values
#This is to compare with original input values
#SInce we used minmaxscaler we can now use scaler.inverse_transform
#to invert the transformation.
trainPredict = scaler.inverse_transform(trainPredict)
trainY = scaler.inverse_transform([trainY])
testPredict = scaler.inverse_transform(testPredict)
testY = scaler.inverse_transform([testY])
# calculate root mean squared error
trainScore = math.sqrt(mean_squared_error(trainY[0], trainPredict[:,0]))
print('Train Score: %.2f RMSE' % (trainScore))
testScore = math.sqrt(mean_squared_error(testY[0], testPredict[:,0]))
print('Test Score: %.2f RMSE' % (testScore))
r2 = r2_score(testY[0], testPredict[:,0])
print('R2 score:', r2)
# shift train predictions for plotting
#we must shift the predictions so that they align on the x-axis with the original dataset.
if(r2>0):
dispatcher.utter_message('Train Score par RMSE: '+str(trainScore))
dispatcher.utter_message('Test Score par RMSE: ' + str(testScore))
dataset=scaler.inverse_transform(dataset)
dispatcher.utter_message('R2 score: '+ str(r2))
#forecast
prediction = [] #Empty list to populate later with predictions
dates = [] #Empty list to populate later with dates
current_batch = test[-seq_size:,0] #Final data points in test
last_datetime = grouped_data.index[-1]
current_batch = current_batch.reshape(1, 1, 1, 1, seq_size) #Reshape
## Predict future, beyond test dates
future = 24 #Times
for i in range( future):
current_pred = model.predict(current_batch)[0]
prediction.append(current_pred)
new_datetime = last_datetime + pd.DateOffset(hours=i+1)
dates.append(new_datetime)
new_value = np.array([[[[current_pred]]]])
# remove the first value
current_batch = current_batch[:, :, :, :, 1:]
# add the new value at the end
current_batch = np.concatenate((current_batch, new_value), axis=4)
prediction= scaler.inverse_transform(prediction) #inverse to get the actual values
s1 = pd.DataFrame(dataset, index=grouped_data.index)
df=pd.DataFrame(prediction,index=dates)
plt.figure(figsize=(17,10))
plt.plot(s1)
plt.plot(df)
plt.title("Traffic Prediction")
plt.xlabel("Datetime")
plt.ylabel("Traffic Volume")
plt.legend(('Actual', 'Predicted'))
buf = BytesIO()
plt.savefig(buf, format='png')
buf.seek(0)
# Encode the image as base64
image = base64.b64encode(buf.read()).decode('utf-8')
folder_path = os.path.join(os.path.expanduser("~"), "Desktop", "save folder")
os.makedirs(folder_path, exist_ok=True)
# Save the DataFrame as a CSV file in the new folder.
df=df.reset_index()
df.to_csv(os.path.join(folder_path, site+'Prediction_result.csv'), index=False)
dispatcher.utter_message("the site predictions are saved in : "+ folder_path)
# Send the image using the dispatcher
dispatcher.utter_message(image=f"data:image/png;base64,{image}")
return [SlotSet("site", site)] | FaycelSassi/Chatbot | actions/actions.py | actions.py | py | 29,960 | python | en | code | 1 | github-code | 13 |
73726270418 | # dead simple file-as-nosql thingie.
import json
import logging
logger = logging.getLogger(__name__)
class JsonFile(object):
def __init__(self, _json, _existing_ids, _cmp_key, _filename):
self._json = _json # list
self._existing_ids = _existing_ids # set
self._cmp_key = _cmp_key # string
self._filename = _filename
self._new = []
@classmethod
def open(cls, jsonfile, cmp_key="link"):
try:
with open(jsonfile, "r") as f:
j = json.loads(f.read())
e = set([a[cmp_key] for a in j])
logger.debug("Loaded {}, {} records".format(jsonfile, len(j)))
return cls(j, e, cmp_key, jsonfile)
except IOError:
logging.info("New jsonfile: {}".format(jsonfile))
return cls([], set(), cmp_key, jsonfile)
def has_item(self, item):
return item[self._cmp_key] in self._existing_ids
def add_item(self, item):
"""Adds an item to the list. Does not save it to disk.
Does not update. If the key is found, nothing is changed.
Told you this was dead simple. self._cmp_key is badly named, but is the
keys that are compare to determine if its in the list or not.
"""
if not self.has_item(item):
self._json.append(item)
self._existing_ids.add(item[self._cmp_key])
self._new.append(item)
return True
else:
return False
def add_list(self, newlist):
for item in newlist:
self.add_item(item)
@property
def new_item_count(self):
return len(self._new)
@property
def new_items(self):
return self._new
def save(self):
"""Simply overwrites the file with self._json"""
if self.new_item_count > 0:
try:
with open(self._filename, 'w') as f:
f.write(json.dumps(self._json, indent=4, separators=(',', ': ')))
# empty the _new list
logger.info("Wrote {} new items to {}".format(self.new_item_count, self._filename))
self._new[:] = []
except IOError as e:
logger.error(e)
else:
logger.debug("No new items")
| benediktkr/wohnen | jsonfile.py | jsonfile.py | py | 2,333 | python | en | code | 1 | github-code | 13 |
32591181921 | import datetime
import requests
from tqdm import tqdm
from typing import Dict
from .globals import SPARK_URL, SPARK_TOKEN, bitcoin
from .onchain import onopen
def listchannels(db):
now = int(datetime.datetime.now().timestamp())
r = requests.post(
SPARK_URL, headers={"X-Access": SPARK_TOKEN}, json={"method": "listchannels"}
)
db.execute("SELECT short_channel_id, last_update, open FROM channels")
rows = db.fetchall()
channel_last_update_by_scid: Dict[str, int] = {
scid: int(last_update.timestamp()) for scid, last_update, _ in rows
}
for scid, _, ch_open in tqdm(rows, leave=True, desc="filling blanks"):
if (
not ch_open["block"]
or not ch_open["fee"]
or not ch_open["txid"]
or not ch_open["time"]
):
# channel with insufficient onchain data
blockheight, tx_index, out_n = map(int, scid.split("x"))
block = bitcoin.getblock(bitcoin.getblockhash(blockheight))
tx = bitcoin.getrawtransaction(block["tx"][tx_index], True)
onopen(db, blockheight, block["time"], tx, tx["vout"][out_n], scid, None)
pbar = tqdm(r.json()["channels"], leave=True, desc="listchannels")
for ch in pbar:
pbar.set_description("list " + ch["short_channel_id"])
if ch["public"] == False:
continue
last_update = channel_last_update_by_scid.get(ch["short_channel_id"], 0)
if not last_update:
# channel not known, gather onchain data
blockheight, tx_index, out_n = map(int, ch["short_channel_id"].split("x"))
# gather onchain data
block = bitcoin.getblock(bitcoin.getblockhash(blockheight))
tx = bitcoin.getrawtransaction(block["tx"][tx_index], True)
onopen(
db,
blockheight,
block["time"],
tx,
tx["vout"][out_n],
ch["short_channel_id"],
ch,
)
if last_update < ch["last_update"]:
# update policies
save_fee_policies(db, ch)
pbar.close()
db.execute("""UPDATE channels SET last_update = to_timestamp(%s)""", (now,))
def save_fee_policies(db, ch):
node0, node1, towards = (
(ch["source"], ch["destination"], 1)
if ch["source"] < ch["destination"]
else (ch["destination"], ch["source"], 0)
)
db.execute(
"""
SELECT
CASE WHEN base_fee_millisatoshi = %s AND fee_per_millionth = %s AND delay = %s
THEN 1
ELSE 0
END
FROM policies
WHERE short_channel_id = %s AND direction = %s
ORDER BY update_time DESC
LIMIT 1
""",
(
ch["base_fee_millisatoshi"],
ch["fee_per_millionth"],
ch["delay"],
ch["short_channel_id"],
towards,
),
)
row = db.fetchone()
isfeepolicyuptodate = row[0] if row else False
if not isfeepolicyuptodate:
db.execute(
"""
INSERT INTO policies
(short_channel_id, direction,
base_fee_millisatoshi, fee_per_millionth, delay,
update_time)
VALUES (%s, %s, %s, %s, %s, to_timestamp(%s))
""",
(
ch["short_channel_id"],
towards,
ch["base_fee_millisatoshi"],
ch["fee_per_millionth"],
ch["delay"],
ch["last_update"],
),
)
| fiatjaf/lnchannels | getdata/listchannels.py | listchannels.py | py | 3,489 | python | en | code | 23 | github-code | 13 |
17919050365 | import json
import requests
from ._config import QueryOptions
from requests.models import HTTPError
from skyflow.errors._skyflow_errors import SkyflowError, SkyflowErrorCodes, SkyflowErrorMessages
from skyflow._utils import InterfaceName
interface = InterfaceName.QUERY.value
def getQueryRequestBody(data, options):
try:
query = data["query"]
except KeyError:
raise SkyflowError(SkyflowErrorCodes.INVALID_INPUT,
SkyflowErrorMessages.QUERY_KEY_ERROR, interface=interface)
if not isinstance(query, str):
queryType = str(type(query))
raise SkyflowError(SkyflowErrorCodes.INVALID_INPUT, SkyflowErrorMessages.INVALID_QUERY_TYPE.value % queryType, interface=interface)
if not query.strip():
raise SkyflowError(SkyflowErrorCodes.INVALID_INPUT,SkyflowErrorMessages.EMPTY_QUERY.value, interface=interface)
requestBody = {"query": query}
try:
jsonBody = json.dumps(requestBody)
except Exception as e:
raise SkyflowError(SkyflowErrorCodes.INVALID_INPUT, SkyflowErrorMessages.INVALID_JSON.value % (
'query payload'), interface=interface)
return jsonBody
def getQueryResponse(response: requests.Response, interface=interface):
statusCode = response.status_code
content = response.content.decode('utf-8')
try:
response.raise_for_status()
try:
return json.loads(content)
except:
raise SkyflowError(
statusCode, SkyflowErrorMessages.RESPONSE_NOT_JSON.value % content, interface=interface)
except HTTPError:
message = SkyflowErrorMessages.API_ERROR.value % statusCode
if response != None and response.content != None:
try:
errorResponse = json.loads(content)
if 'error' in errorResponse and type(errorResponse['error']) == type({}) and 'message' in errorResponse['error']:
message = errorResponse['error']['message']
except:
message = SkyflowErrorMessages.RESPONSE_NOT_JSON.value % content
raise SkyflowError(SkyflowErrorCodes.INVALID_INDEX, message, interface=interface)
error = {"error": {}}
if 'x-request-id' in response.headers:
message += ' - request id: ' + response.headers['x-request-id']
error['error'].update({"code": statusCode, "description": message})
raise SkyflowError(SkyflowErrorCodes.SERVER_ERROR, SkyflowErrorMessages.SERVER_ERROR.value, error, interface=interface)
| skyflowapi/skyflow-python | skyflow/vault/_query.py | _query.py | py | 2,572 | python | en | code | 8 | github-code | 13 |
7601208456 | """
GenomicVariantSet
===================
GenomicVariantSet represents list of GenomicVariant.
"""
from __future__ import print_function
from .GenomicVariant import GenomicVariant
from .GenomicRegionSet import GenomicRegionSet
import vcf
class GenomicVariantSet(GenomicRegionSet):
"""*Keyword arguments:*
- vcf_path -- VCF file
- name -- name
"""
def __init__(self, vcf_path=None, name='GenomicVariantSet'):
"""Initialize"""
GenomicRegionSet.__init__(self, name=name)
if vcf_path:
self.read_vcf(vcf_path)
def sort(self):
"""Sort elements by criteria defined by GenomicVariant.
.. note:: By default, the genomic position is used as sorting criteria.
"""
self.sequences.sort(cmp=GenomicVariant.__cmp__)
self.sorted = True
def read_vcf(self, vcf_path):
"""
Read SNPs and InDels from a VCF file.
*Keyword arguments:*
- vcf_path -- VCF file
.. note:: vcf_path can also be defined in the initialization.
*Example:*
We load a VCF file::
>>>from rgt.GenomicVariantSet import GenomicVariantSet
>>>snps_sample1 = GenomicVariantSet('snps.vcf', name='sample1')
"""
self.reader = vcf.Reader(open(vcf_path), 'r')
self.metadata = self.reader.metadata
self.infos = self.reader.infos
self.filters = self.reader.filters
self.formats = self.reader.formats
self.samples = self.reader.samples
for v in self.reader:
variant = GenomicVariant(v.CHROM, v.POS, v.REF, v.ALT, v.QUAL, filter=v.FILTER, id=v.ID, \
info=v.INFO, format=v.FORMAT, genotype=v.genotype, samples=v.samples)
self.add(variant)
def write_vcf(self, vcf_path):
"""
Write VCF file.
*Keyword arguments:*
- vcf_path -- VCF file
"""
if not self.reader:
raise Exception("No data available")
writer = vcf.Writer(open(vcf_path, 'w'), self.reader)
for v in self.sequences:
record = vcf.model._Record(v.chrom, v.pos, v.id, v.ref, v.alt, v.qual, [], v.info, v.format, [], v.samples)
writer.write_record(record)
def filter_dbSNP(self):
"""Filter for dbSNP.
.. note:: the vcf file must already contain the dbSNP annotation.
"""
self.sequences = filter(lambda x: 'DB' not in x.info.keys(), self.sequences)
def filter(self, at, op, t):
"""
Filter for attributes.
*Keyword arguments:*
- at -- VCF file
- op -- operation to perform
- t -- threshold
*Example:*
We load a VCF file::
>>>from rgt.GenomicVariantSet import GenomicVariantSet
>>>snps_sample1 = GenomicVariantSet('snps.vcf', name='sample1')
And we filter by the mapping quality::
>>>snps_sample1.filter(at='MQ', op'>', t=30)
The mapping quality is tagged as MQ in the VCF file. We only want to keep SNPs that have a mapping quality higher than 30.
.. note:: operation <op> and threhold <t> depend on the filtering tag <at>
"""
self.sequences = filter(lambda x: eval(str(x.info[at]) + op + str(t)), self.sequences)
def _reconstruct_info(self, GenomicRegionSet):
"""Reconstruct all information for GenomicVariantSet that get lost when using a GenomicRegionSet method"""
tmp_sequences = []
for genomic_region in GenomicRegionSet:
c, p = genomic_region.chrom, genomic_region.initial
for var in self.sequences:
if var.chrom == c and var.pos == p: # 1:1 mapping
tmp_sequences.append(var)
break
return tmp_sequences
def subtract(self, x):
"""
Subtract GenomicVariantSet.
*Keyword arguments:*
- x -- instance of GenomicVariantSet which is subtracted
"""
tmp = GenomicRegionSet.subtract(self, x, whole_region=False)
self.sequences = self._reconstruct_info(tmp)
def intersect(self, x):
"""
Intersect GenomicVariantSet.
*Keyword arguments:*
- x -- instance of GenomicVariantSet
"""
tmp = self._intersect(x)
self.sequences = self._reconstruct_info(tmp)
def _intersect(self, y, rm_duplicates=False):
"""Return the overlapping regions with three different modes.
(mode = OverlapType.ORIGINAL)
Return the regions of original GenomicRegionSet which have any intersections with y.
Keyword arguments:
y -- the GenomicRegionSet which to compare with
Return:
z -- the regions of original GenomicRegionSet which have any intersections with y
Graphical explanation:
self ---------- ------
y ---------- ----
Result ----------
"""
a = self
b = y
z = GenomicRegionSet(a.name + ' + ' + b.name)
# XXX - someone putted an special symbol and spaces in the name! this is used as file name, never use strange characters.
if len(a) == 0 or len(b) == 0:
return z
else:
# If there is overlap within self or y, they should be merged first.
if a.sorted == False: a.sort()
if b.sorted == False: b.sort()
iter_a = iter(a)
s = iter_a.next()
last_j = len(b) - 1
j = 0
cont_loop = True
########################### OverlapType.ORIGINAL ###################################
while cont_loop:
# print(str(s),"\t",str(b[j]))
# When the regions overlap
if s.overlap(b[j]):
z.add(s)
try:
s = iter_a.next()
except:
cont_loop = False
elif s < b[j]:
try:
s = iter_a.next()
except:
cont_loop = False
elif s > b[j]:
if j == last_j:
cont_loop = False
else:
j = j + 1
else:
try:
s = iter_a.next()
except:
cont_loop = False
if rm_duplicates:
z.remove_duplicates()
return z
if __name__ == '__main__':
s = GenomicVariantSet('/home/manuel/data/humangenetics/01_S1_L001_R1_001.filtered.vcf')
b = GenomicVariantSet('/home/manuel/data/humangenetics/K28_S8_L001_R1_001.filtered.vcf')
print(len(s))
s.subtract(b)
print(len(s))
# print(b.sequences[:10])
# print(c.sequences[:10])
print(len(s))
s.filter('MQ', '>=', 40.0)
print(len(s))
# s.filter_DB()
# print(len(s))
# s.write_vcf('/home/manuel/1.vcf')
| mguo123/pan_omics | src/rgt/GenomicVariantSet.py | GenomicVariantSet.py | py | 7,440 | python | en | code | 0 | github-code | 13 |
9040651565 | from django.shortcuts import render,redirect
from home.models import Blog
from django.contrib import messages
from django.contrib.auth.decorators import login_required
# Create your views here.
########################################################################
@login_required
def blog(request):
if request.method == 'POST':
title = request.POST.get('title')
image = request.FILES['image']
description = request.POST.get('description')
url = request.POST.get('url')
smtitle = request.POST.get('smtitle')
smkeywords = request.POST.get('smkeywords')
smdescription = request.POST.get('smdescription')
Data = Blog(Title=title,Description=description,Image=image,Url=url,SMTitle=smtitle,
SMDescription=smdescription,SMKeywords=smkeywords)
Data.save()
messages.success(request,'new blog added successfully.....!')
return redirect('blog')
return render(request,'blog.html')
########################################################################
@login_required
def manage_blog(request):
blogs = Blog.objects.all()
return render(request,'manage_blog.html',{'blogs':blogs})
########################################################################
@login_required
def edit_blog(request,bid):
blog = Blog.objects.get(id=bid)
if request.method == 'POST':
if len(request.FILES) != 0:
# if len(blog.Image) > 0:
# os.remove(blog.Image.path)
blog.Image = request.FILES['image']
# else:
blog.Title = request.POST.get('title')
blog.Description = request.POST.get('description')
blog.Url = request.POST.get('url')
blog.SMTitle = request.POST.get('smtitle')
blog.SMDescription = request.POST.get('smdescription')
blog.SMKeywords = request.POST.get('smkeywords')
blog.save()
messages.success(request,'blog edited successfull...!')
return redirect('.')
context = {
'blog' : blog,
}
return render(request,'edit_blog.html',context)
########################################################################
@login_required
def remove_blog(request,bid):
blog = Blog.objects.get(id=bid)
blog.delete()
messages.error(request,'blog deleted')
return redirect('manage_blog')
########################################################################
@login_required
def remove_blog_img(request,bid):
blog = Blog.objects.get(id=bid)
blog.Image.delete(save=True)
blog.save()
return redirect('edit_blog/%s' %blog.id)
######################################################################## | kamaru-x/AdminPanel | blog/views.py | views.py | py | 2,687 | python | en | code | 0 | github-code | 13 |
25182249426 | ##!/usr/bin/env python
#Python 3.2 on Microsoft OS
#Also for Python 3.3
# CLASSES
#global variables
max_indid = [-1]
divisor_beta_1 = [-1,-1]
divisor_beta_2 = [-1,-1]
amount_topurge = -1
wished_rr = .28
univ = [None]
purged = [None]
week = 0
exposure_rate = 0
pbe = 0
class Panelist3(object):
'''
# Definition of attributes of a "panelist"
# Attributes are defined in terms of response and panel life cycle characterisation
# Additional attributes could be added
'''
def __init__(self, indid = -1):
self.indid = indid
def recruit (self, recruit = None):
self.recruit = recruit
def pstatus(self, pstatus = None):
self.pstatus = pstatus
def qstatus(self, qstatus = None):
self.qstatus = qstatus
def participation(self, participation = {}):
self.participation = participation
def rr(self, rr = None):
self.rr = rr
def beta(self, beta):
self.beta = beta
def elimstatus(self, elimstatus):
self.elimstatus = elimstatus
def purged(self, purged = None):
self.purged = purged
def batch(self, batch):
self.batch = -77
class counter():
'''
# A simple counter
# It would be relevant at different stages for different methods of the Panelist3 class
# E.g. at the sampling section when imposing elimination rules (period of not allowed sampling)
'''
def __init__(self, start):
self.state = start
def __call__(self, week):
# print(week, self.state) #apparently modifies stdout!!!
if self.state:
self.state -= 1
if self.state == 0:
self.state = None
import sys, os
import random
import inspect
import csv
import sqlite3
#
#
# File preparation area, databases are not included here
#filename = 'db_test_01'
#db_test = connect(os.getcwd()+'/'+filename)
#cursor = db_test.cursor()
file_exposure = open(os.getcwd()+'/simulation/files/exposure01.csv','w',newline = '')
csv_exposure = csv.writer(file_exposure, delimiter=' ')
csv_exposure.writerow(['iteration', 'exposure_rate', 'final_prop_qf', 'final_count_qf'])
file_universe = open(os.getcwd()+'/simulation/files/universe01.csv', 'w', newline = '')
csv_universe = csv.writer(file_universe, delimiter=' ')
csv_universe.writerow(['experiment', 'iteration','indid', 'pan_rr', 'beta_origin', 'recruiting','week','qstatus','elimstatus.state'])
file_eligible = open(os.getcwd()+'/simulation/files/eligible01.csv', 'w', newline = '')
csv_eligible = csv.writer(file_eligible, delimiter=' ')
csv_eligible.writerow(['experiment', 'iteration','indid','week', 'sample_pull','qstatus','elimstatus.state'])
file_fieldwork = open(os.getcwd()+'/simulation/files/fieldwork01.csv', 'w', newline = '')
csv_fieldwork = csv.writer(file_fieldwork, delimiter=' ')
csv_fieldwork.writerow(['experiment', 'iteration', 'indid','week', 'sample_pull', 'batch', 'day', 'qstatus','elimstatus.state','participation','purging'])
file_summaries = open(os.getcwd()+'/simulation/files/summary01.csv', 'w', newline = '')
csv_summaries = csv.writer(file_summaries, delimiter=' ')
csv_summaries.writerow(['experiment', 'iteration', 'week', 'universe', 'rr_univ', 'rr_sample_previous', 'beta1uni', 'beta2uni', 'olduni', 'newuni', 'leguni', 'nonleguni', 'eligible', 'rr_eli', 'beta1eli', 'beta2eli', 'oldeli', 'neweli', 'legeli', 'nonlegeli', 'target', 'sample', 'rr_sample', 'sample_pull', 'daysinfieldwork', 'completes', 'quotaful', 'other', 'beta1samp', 'beta2samp', 'oldsamp', 'newsamp', 'legsamp', 'nonlegsamp'])
file_summaries2 = open(os.getcwd()+'/simulation/files/summary02.csv', 'w', newline = '')
csv_summaries2 = csv.writer(file_summaries2, delimiter=' ')
csv_summaries2.writerow(['experiment', 'iteration', 'week', 'universe', 'rr_univ', 'rr_sample_previous', 'rr_BOR', 'rr_average_fw', 'beta1uni', 'beta2uni', 'olduni', 'newuni', 'leguni', 'nonleguni', 'eligible', 'rr_eli', 'beta1eli', 'beta2eli', 'oldeli', 'neweli', 'legeli', 'nonlegeli', 'target', 'sample', 'rr_sample', 'rr_sample_2', 'sample_pull', 'daysinfieldwork', 'completes', 'quotaful', 'other', 'beta1samp', 'beta2samp', 'oldsamp', 'newsamp', 'legsamp', 'nonlegsamp'])
### TODO? COUNT IN interviewing_sample the number of quotafulls and other statuses too? Add to summaries
# FUNCTIONS
def rr_data_preparation(filename):
'''
# For this demo the response rate (rr) is an attribute assigned to every new Panelist3 instance
# We assume we don't know the rr of the Panelist3 but we do know the rr probability distribution
# For this demo, a separate file with that distribution was read and keep in memory for further changes below in the script
'''
in_file = open(filename)
a = [] #annexing existing values of beta function to a list
total_length = 0
while True:
line = in_file.readline()
if not line: break
freq = int(float(line.split()[2][:3])*100)
a.append(freq)
total_length += freq
# print(total_length)
in_file.seek(0) #annexing existing probabilities as in the file to a list
b = []
while True:
line = in_file.readline()
if not line: break
b.append(float(line.split()[1]))
in_file.close() #closing file
#creating 2 beta function lists (one the opposite of the other)
beta_1 = [0 for i in range(total_length)]
beta_2 = [0 for i in range(total_length)]
beta_index = 0
for index, freq in enumerate(a):
if freq != 0:
count = 0
while count <= freq:
if beta_index == total_length: break
beta_1[beta_index] = b[index]
beta_2[beta_index] = 1-b[index]
beta_index +=1
count += 1
else:
beta_1[beta_index] = 0
beta_2[beta_index] = 0
beta_index += 1
return beta_1, beta_2
### GLOBAL FILE BETA rr
in_file = os.getcwd()+'/simulation/lower_rr.txt'
beta_1, beta_2 = rr_data_preparation(in_file)
## The following algorithm keeps the same amount of panelists all the time
def purging_f(univ, purged, week):
'''
# The purging function (selecting Panelist3 instances that would not be more part of panel so they won't be invited any more
# It is a simple garbage collector
# The function for this demo was thought as a relatively simple rule based on:
- (fixed) amount of instances to be garbage collected
- longevity of the instance
- assigned response rate of the instance
# Other rules apply when not finding to fill the fixed amount
'''
count_purging = 0
# Purge panelists up to amount according to numeric order in the universe list and against panelist rr
for topurge in univ:
if count_purging == amount_topurge: break
bernoulli_purged = random.random()
if bernoulli_purged >= topurge.rr:
#print(i.indid, bernoulli_purged, i.rr)
topurge.purged(week)
purged.append(topurge)
univ.remove(topurge)
count_purging += 1
# If at the end of the previous purge the amount_topurge is not reached, then just purge a random sample of size equal to remaining amount_topurge
if count_purging < amount_topurge:
purgedsample = random.sample(univ,amount_topurge-count_purging)
for topurge in univ:
if topurge in purgedsample:
topurge.purged(week)
purged.append(topurged)
univ.remove(topurge)
def new_recruits_f(univ, size, week, proportion_beta = 0.85):
'''
# This function generates a (fixed) amount of Panelist3 instances at every fixed period of time (week)
# The section also determines an initial probability distribution of the response rate (rr) attribute at the moment of recruitment
# Although the assignment of a rr attribute for each instance is unknown,
we assume we do know the expected probability distribution of the rr in the recruited sub-population
'''
new_recruits = [new+1+max_indid[0] for new in range(size)]
for new in new_recruits:
added_panelist = Panelist3(new)
bernoulli = random.random()
if bernoulli <= proportion_beta:
added_panelist.rr(random.choice(beta_1)/random.choice(divisor_beta_1)) # reducing the entry values of rr
added_panelist.beta(1)
else:
added_panelist.rr(random.choice(beta_2)/random.choice(divisor_beta_2))
added_panelist.beta(2)
added_panelist.recruit(week)
#capturing max_indid
univ.append(added_panelist)
def rr_calc(univ):
'''
# Calculates the response rate for a list of Panelist3 instances
'''
added_rr = 0
total_rr = 0
for i in univ:
added_rr += i.rr
if len(univ):
total_rr = added_rr/len(univ)
else:
total_rr = 0
return total_rr
#Will refer to the rr distributions of the new recruitments! so we can get an expected wished average rr!!!!
def parameters_univ(weeks, proportion_beta_est):
'''
# This function will calculate the initial parameters of entry so the baseline could reach the required overal response rate at the end of a fixed period
# There were also other fixed parameters to facilitate the processing of data
# In fact this is bit more complex than a baseline: an iteration process was proposed to find the values of the composition of response rates attributed to Panelist3 instances until reaching a composition which in aggregate reflect the expected response rate of the population;
# Each searching iteration consists of 200 ones of purging/recruitment
# The process seems robust but slow and takes time for low expected values of desired (universe) response rate
# IMPORTANT: The first is to estimate a desired overall response rate for a universe
# This will affect the composition of the panel, as we require to reach an equilibrium of recruitment/purging
# values so the resulting response rate keeps the same for subsequent updates of the universe when starting the sampling
# The baseline function will run until reaching the expected value by vary the values of recruitment and purging
# It is an (brute force) optimisation procedure
# See the rationale in the functions above
'''
while True:
amount_topurge = 1300##!/usr/bin/env python
univ = []
purged = []
max_indid = [-1]
week = int(weeks)*(-1) - 200
replica_data = []
numavgweek = 0
#first universe
new_recruits_f(univ, 6700, week, proportion_beta = proportion_beta_est)
max_indid = [univ[-1].indid]
pre_univ_rr = rr_calc(univ)
print(len(univ), pre_univ_rr)
sum_rr_calc = 0
ave_rr_calc = 0
initial_week = abs(week)
for count_week in range(initial_week): # OJO: parameter for deciding how long the baselining lasts
week += 1
purging_f(univ, purged, week)
new_recruits_f(univ, amount_topurge, week, proportion_beta = proportion_beta_est)
max_indid = [univ[-1].indid]
replica_data.append((len(univ),rr_calc(univ), week))
if int(week*0.1) == 0:
numavgweek = -1
else:
numavgweek = int(week*0.1) #OJO: to work correctly here, the value of week must be <= 0!!!
for data in replica_data[numavgweek:]:
sum_rr_calc += data[1]
ave_rr_calc = sum_rr_calc/abs(numavgweek)
print('ave_rr_calc: ', str(ave_rr_calc)[:5], 'wished_rr: ', str(wished_rr)[:5], 'ave_rr_calc - wished_rr: ', str(ave_rr_calc - wished_rr)[:5])
# OJO: the calculation of this if...else reminds a Markov inequality: P(k<|X|) <= E[X]/k, creating a superior bound...
if abs(ave_rr_calc - wished_rr) <= wished_rr*.05: break
else:
# in this case: p(X) related to Uni(beta_1)*Ber(proportion_beta_est)/[(wished_rr-0.285)*6/-0.21]+Uni(beta_2)*Ber(1-proportion_beta_est)
# OJO: the following if...elif is Newton-like method, but with y2 = y1+y1*(mhu(X) - E(X))
# I know also that |ave_rr_calc - wished_rr| has a min when equal 0, having wished_rr as mhu
# The objective is to calculate the value of the Ber function
# OBS: this is a TIME dependent function!!!
if ave_rr_calc - wished_rr < 0: proportion_beta_est += proportion_beta_est*(ave_rr_calc - wished_rr)
elif ave_rr_calc - wished_rr > 0: proportion_beta_est += proportion_beta_est*(ave_rr_calc - wished_rr)
print(len(univ), rr_calc(univ))
return proportion_beta_est, ave_rr_calc, (wished_rr-0.285)*6/-0.21
def creating_baseline_univ(week, pbe, allow_unireport = None):
'''
# This section works a baseline pre-simulation of a panel formation in a fixed period of time
# The underlying assumption is that we would be better off by creating the current universe (the panel) by a repeated process of recruitments and purgings, such as it happened for the large majority of consumer panels
# In this way we look for guaranteeing a panel composition equivalent to its equivalent empirical form
# In particular, one important issue here was how to model the longevity component (a pseudo-definition of loyalty)
'''
if week > 0: week *= -1
new_recruits_f(univ, 6700, week, proportion_beta = pbe)
if allow_unireport:
#UNI_Report
for panelist in univ:
csv_universe.writerow([-77, -77, panelist.indid, str(panelist.rr)[:5],panelist.beta, panelist.recruit, week, -77, -77])
#capturing max_indid
max_indid = [univ[-1].indid]
pre_univ_rr = rr_calc(univ)
print(len(univ), pre_univ_rr)
initial_week = abs(week)
for count_week in range(initial_week): # OJO: parameter for deciding how long the baselining lasts
week += 1
purging_f(univ, purged, week)
new_recruits_f(univ, amount_topurge, week, proportion_beta = pbe)
max_indid = [univ[-1].indid]
print(len(univ), rr_calc(univ))
#start sampling - run elimination
def eligible(univ):
'''
# An eligible is a section of##!/usr/bin/env python
the universe that can be subjected to sampling at a particular period of time
# In this case, the eligible includes all panelists but those that are still under an elimination rule
# This section will take the universe and will inspect those who are still not available for sampling (see more below)
'''
eli = []
for eli_panelist in univ:
if inspect.ismethod(eli_panelist.qstatus) == True or eli_panelist.qstatus == None:
eli.append(eli_panelist)
return eli
def sampling(rr_type, eli, univ, target, rr, ir, batching = 0):
'''
# The sampling procedure takes business rules for selecting those who can participate in a survey program, given an expected response rate
# For this project there are also additional rules to manipulate some simple exceptions when the expected number of participants is not reached with the previous rule
'''
n = target
if rr_type == 2: ir = 0
corrected_target = int(n+n/(rr-rr*ir))
if eli:
if target >= 5:
if int(target/(rr-rr*ir)) < len(eli):
sample = random.sample(eli, int(target/(rr-rr*ir)))
else:
sample = eli
if target < 5:
if int(corrected_target/(rr-rr*ir)) < len(eli):
sample = random.sample(eli, int(corrected_target/(rr-rr*ir)))
else:
sample = eli
else:
sample = eli
if batching:
for panelist in sample:
panelist.batch = random.choice([x+1 for x in range(batching)])
return sample
def run_elimination(univ, week):
'''
# Run the elimination and update attributes
'''
for elim in univ:
if inspect.ismethod(elim.recruit):
elim.recruit = week
if inspect.ismethod(elim.elimstatus): #if having still a state, discount
elim.elimstatus = counter(None)
if elim.elimstatus.state == None:
elim.qstatus = None
if elim.qstatus:
elim.elimstatus(week)
def test_participation(test):
x = -77
if not inspect.ismethod(test.participation):
x = test.participation['day']
return x##!/usr/bin/env python
def numeric_qststatus(test):
'''
# For the client we presented this demo, statuses were better understood if presented according business rules
# This just converted into numeric for better manipulation
# Statuses is an attribute that describe the response of the Panelist3 instance to a survey
# This can be no response (L), completed survey (C), completed but it is over the desired quota (QF) or other status (O)
'''
qstatusnum = None
if inspect.ismethod(test.qstatus): qstatusnum = None
if test.qstatus == 'L': qstatusnum = 1
if test.qstatus == 'O': qstatusnum = 2
if test.qstatus == 'C': qstatusnum = 3
if test.qstatus == 'QF': qstatusnum = 4
return qstatusnum
# start sampling - using the last universe for sampling, using the same rr of the universe to calculate size
def interviewing_sample(week, sample_pull, day, remaining_interviews, exposure_rate, ir, interviews, values = [4,28,8]):
'''
# The following function models the interviewing process
# The process is simplified
# It takes the sample (see above) and subject each selected Panelist3 instance to a chain of probability rules
# The underlying assumption is that we don't know which instance will get which status but we have an idea of the probability distribution of statuses which is faced against the rr attribute of the Panelist3 instance based on
empirical evidence or business rules
'''
week = week
count_interviews = 0
value = 0
for interviewed in interviews:
interviewed.qstatus = 'L'
value = values[0]
bernoulli_days = random.random()
if bernoulli_days <= exposure_rate:
bernoulli_rr = random.random()
if bernoulli_rr <= interviewed.rr:
bernoulli_otherstatus = random.random()
if bernoulli_otherstatus <= ir:
interviewed.qstatus = 'O'
value = values[1]
elif bernoulli_otherstatus > ir and count_interviews < remaining_interviews:
interviewed.qstatus = 'C'
value = values[1]
count_interviews += 1
elif bernoulli_otherstatus > ir and count_interviews >= remaining_interviews:
interviewed.qstatus = 'QF'
value = values[2]
interviewed.elimstatus = counter(value)
if inspect.ismethod(interviewed.participation):
interviewed.participation()
interview_st = dict(week=week, sample_pull=sample_pull, day=day, status=interviewed.qstatus)
interviewed.participation.update(interview_st)
if interviewed.qstatus in ('O','C','QF'):
interviews.remove(interviewed)
return count_interviews
def trackering2(experiment, iteration, num_weeks, week_target, ir, dist_fw, corrector = 1, rr_sample = 0, daysinfieldwork_rr = [], rr_type = 1, no_extra = None, batches = None):
'''
# This is a simulation of a suggested longitudinal study fieldwork process based on simple rules obtained from empirical data
# The trackering function will
- update the universe at any stage (including purging),
- update the eligible,
- sample from that eligible (including some exception handling),
- update the counter attributes when applicable
# Process was designed to respond to a few demo cases as explained later ("Experiments", read main())
'''
rr_sampling = [0 for x in range(max(dist_fw))]
rr_sample_2 = [0,0]
for week in range(num_weeks):
run_elimination(univ, week)
#UNI_Report
for panelist in univ:
csv_universe.writerow([experiment, iteration, panelist.indid,str(panelist.rr)[:5],panelist.beta,panelist.recruit,week,numeric_qststatus(panelist),panelist.elimstatus.state])
complete_interviews = 0
sample_pull = 1
daysinfieldwork = random.sample(dist_fw,1)[0]
daysinfieldworkfixed = daysinfieldwork
samples_total = []
samples = []
while complete_interviews < week_target:
eli = eligible(univ)
#ELI_Report
# for panelist in eli:
# csv_eligible.writerow([experiment, iteration,panelist.indid,week,sample_pull,numeric_qststatus(panelist),panelist.elimstatus.state])
if len(eli) == 0: break
if rr_sample_2[0] > 0 and rr_sample_2[1] >= 0:
if rr_sampling[rr_sample_2[1]]:
# OJO!!!: CONVIRTIENDO UN PROBLEMA NUMERICO DE EXPERANZA INFINITA EN UNA PARTICION FRACTIONAL DE TENDENCIA DISCRETA EN LA PROPORCION COMPLETES/SAMPLE_SIZE!!!! (Un problema originado por la variabilidad debida al sample size)
rr_sampling[rr_sample_2[1]] = (rr_sampling[rr_sample_2[1]]+rr_sample_2[0])/2
# rr_sampling[rr_sample_2[1]] = (3*rr_sampling[rr_sample_2[1]]+rr_sample_2[0])/4
else:
rr_sampling[rr_sample_2[1]] = rr_sample_2[0]
if rr_type == 1: # Using fixed rr universe mean as measure
sample = sampling(rr_type, eli, univ, week_target-complete_interviews, rr_calc(univ)/(daysinfieldworkfixed-(daysinfieldwork-1.0)), ir, batches) #target - complete_interviews (count_interviews) = remaining_interviews
if rr_type == 2: # Using variable average rr previous sample as measure
if rr_sampling[daysinfieldwork-1]:
sample = sampling(rr_type, eli, univ, week_target-complete_interviews, rr_sampling[daysinfieldwork-1], ir, batches)
else:
change_rr_type = 1
sample = sampling(change_rr_type, eli, univ, week_target-complete_interviews, rr_calc(univ)/(daysinfieldworkfixed-(daysinfieldwork-1.0)), ir, batches)
partial_summary = []
count_beta1_uni = 0
count_beta2_uni = 0
count_beta1_eli = 0
count_beta2_eli = 0
count_newpan_uni = 0
count_oldpan_uni = 0
count_newpan_eli = 0
count_oldpan_eli = 0
legacy_uni = 0
nonlegacy_uni = 0
legacy_eli = 0
nonlegacy_eli = 0
for test in univ:
if test.beta == 1:
count_beta1_uni += 1
else:
count_beta2_uni += 1
if test.recruit < week - 12:
count_oldpan_uni += 1
else:
count_newpan_uni += 1
if test.recruit < -100:
legacy_uni += 1
else:
nonlegacy_uni += 1
for test in eli:
if test.beta == 1:
count_beta1_eli += 1
else:
count_beta2_eli += 1
if test.recruit < week - 12:
count_oldpan_eli += 1
else:
count_newpan_eli += 1
if test.recruit < -100:
legacy_eli += 1
else:
nonlegacy_eli += 1
partial_summary = [experiment, iteration, week, len(univ), str(rr_calc(univ))[:5], str(rr_sample)[:5], str(rr_calc(univ)/(daysinfieldworkfixed-(daysinfieldwork-1.0))*(1-ir))[:5], str(rr_sampling[daysinfieldwork-1])[:5], count_beta1_uni, count_beta2_uni, count_oldpan_uni, count_newpan_uni, legacy_uni, nonlegacy_uni, len(eli), str(rr_calc(eli))[:5], count_beta1_eli, count_beta2_eli, count_oldpan_eli, count_newpan_eli, legacy_eli, nonlegacy_eli, week_target - complete_interviews, len(sample), sample_pull, daysinfieldwork]
# everyone in the sample get status Loaded right from the start
random.shuffle(sample)
# The day_batch wouldn't take the last day of fieldwork of the previous sample, but start all over (in this case, easy if we consider sample_pull)
# Additionally there is no currently an implementation for overlapping samples (to be done in the near future)
interviews = sample[:]
day_batch = 1
while day_batch <= daysinfieldwork:
if len(interviews) == 0: break
if day_batch > daysinfieldwork: break
complete_interviews += interviewing_sample(week, sample_pull, day_batch, week_target-complete_interviews, exposure_rate, ir, interviews)
day_batch += 1
interviews = interviews[:]
rr_sample = rr_calc(sample)
c, qf, o, b1, b2 = 0, 0, 0, 0, 0
new, old, legacy, nonlegacy = 0, 0, 0, 0
for test in sample:
#FW_Report and more
csv_fieldwork.writerow([experiment, iteration, test.indid, week, sample_pull, test.batch, test_participation(test), numeric_qststatus(test), -77, -77, -77])
# csv_fieldwork.writerow([experiment, iteration, test.indid, week, sample_pull, test.batch, test_participation(test), numeric_qststatus(test), test.elimstatus.state, test.participation, test.purged])
if test.qstatus == 'C': c += 1
if test.qstatus == 'QF': qf += 1
if test.qstatus == 'O': o += 1
if test.beta == 1:
b1 += 1
else:
b2 += 1
if test.recruit < week - 12:
old += 1
else:
new += 1
if test.recruit < -100:
legacy += 1
else:
nonlegacy += 1
# print(c, qf, o)
# print(len(partial_summary))
rr_sample_2 = [(c+qf)/len(sample), daysinfieldwork-1]
csv_summaries2.writerow([partial_summary[0], partial_summary[1], partial_summary[2], partial_summary[3], partial_summary[4], partial_summary[5], partial_summary[6], partial_summary[7], partial_summary[8], partial_summary[9], partial_summary[10], partial_summary[11], partial_summary[12], partial_summary[13], partial_summary[14], partial_summary[15], partial_summary[16], partial_summary[17], partial_summary[18], partial_summary[19], partial_summary[20], partial_summary[21], partial_summary[22], partial_summary[23], str(rr_sample)[:5], str(rr_sample_2[0])[:5], partial_summary[24], partial_summary[25], c, qf, o, b1, b2, old, new, legacy, nonlegacy])
if no_extra: break
sample_pull += 1
daysinfieldwork = int(daysinfieldwork/2)
if daysinfieldwork == 0: daysinfieldwork = 1
# Now that exists a notion of DAYS and that there is also extras, what about recruitment and purging on a daily basis?
purging_f(univ, purged, week)
new_recruits_f(univ, amount_topurge, week, proportion_beta = pbe)
max_indid = [univ[-1].indid]
#
# Following code is to find the exposure rate after a x number of iterations
# The exposure rate is found for a arbitrary ideal sample
# CHECK THE FOLLOWING (wording...): The factor that manage the limit conditions at which the exposure rate is asymptotic is the percentage of quotafuls at the end of the selected fieldwork period
def calc_exposure():
'''
# This is a suggested function based on empirical evidence
# The assumption behind this function is that not all panelists will come to respond the survey just after the invitation (some of them will come later)
# Therefore it is suggested that there is at least a fixed percentage of remaining sample at any day that will come to answer the questionnarie
# Probabilistically the response of panelists to an open questionnarie might resemble a Poisson distribution or a bit more complex one
# Here the use of the nested Bernoullis is again applied
# Because it is a measuring based on empirical evidence, the procedure involved the definition of an (arbitrary) typical sample
# There is evidence of the existence of this typical samples in practice as it was later evaluated by other coleagues
'''
exposure_rate = 1.0
avg_exposure_rate = 0
avg_qf_proportion = 0
avg_count_qf = 0
for i in range(1000):
for exposured in univ:
if inspect.ismethod(exposured.qstatus):
continue
else:
exposured.qstatus = None
exposured.elimstatus.state = None
week_target = 35
complete_interviews = 0
count_qf = 0
count_total = 0
sample_pull = 1
daysinfieldwork = 4
ir = 0.15
while complete_interviews < week_target:
if sample_pull > 1: break
# IMPORTANT!! My following calculation assumes that the rr_calc(univ) is CORRECT to estimate 1 sample and to get even QF!!!
sample = sampling(1, univ, univ, week_target-complete_interviews, rr_calc(univ), ir)
interviews = sample[:]
day_batch = 1
while day_batch <= daysinfieldwork:
if len(interviews) == 0: break
if day_batch > daysinfieldwork: break
complete_interviews += interviewing_sample(week, sample_pull, day_batch, week_target-complete_interviews, exposure_rate, ir, interviews)
day_batch += 1
interviews = interviews[:]
sample_pull += 1
for interviewed in sample:
if interviewed.qstatus == 'QF':
count_qf += 1
if interviewed.qstatus != 'L':
count_total += 1
if count_total == 0:
qf_proportion = 0
else:
qf_proportion = count_qf/count_total
if qf_proportion > 0.25:
exposure_rate -= 0.001
elif qf_proportion < 0.2:
exposure_rate += 0.001
else:
avg_exposure_rate = exposure_rate
avg_qf_proportion = qf_proportion
avg_count_qf = count_qf
csv_exposure.writerow([i, str(exposure_rate)[:5], str(qf_proportion)[:5], count_qf])
file_exposure.close()
print('\n','--> exposure_rate: ', str(avg_exposure_rate)[:5], 'qf_proportion: ', str(avg_qf_proportion)[:5], 'last count qf: ', avg_count_qf, 'after iteration ', i+1)
return exposure_rate
def main():
'''
# The main:
- collect the desired parameters as entered by the user
- calculate the parameters required for estimating the composition of the panel corresponding to those desired parameters
- run a baseline universe
- calculate an "exposure rate" based on a typical sample
- run the Experiments
# The Experiments were designed to run different sampling programs and their effects on efficiencies in sampling
# Main variables that were evaluated were:
- The choice of the rr of calculate the sample size (either as given by the current universe or based on remaining eligible
- The effect of the start days at each sampling effort (weekly) assuming a fixed end date of delivery
# After this, the effect on size of sampling effort (as number of total samples at the end of the project) and number of
individuals having a status different to "complete" were analysed and presented
# Setups are:
- Experiment1:
--- sampling within a fixed fieldwork period with extras;
--- no overlapping of extras (i.e. additional samples until completing the target for that period);
--- the response rate used to calculate sample size is the response rate of the universe
- Experiment2:
--- sampling within a fixed fieldwork period with extras;
--- no overlapping of extras (i.e. additional samples until completing the target for that period);
--- the response rate used to calculate sample size is re-estimate of the left eligible after main and extra samples
- Experiment3:
--- sampling with different fieldworks periods with a random distribution;
--- no overlapping of extras (i.e. additional samples until completing the target for that period);
--- the response rate used to calculate sample size is the response rate of the universe
- Experiment4:
--- sampling with different fieldworks periods with a random distribution;
--- no overlapping of extras (i.e. additional samples until completing the target for that period);
--- the response rate used to calculate sample size is re-estimate of the left eligible after main and extra samples
# Results of these experiments were analysed (with R) and presented
'''
global wished_rr, max_indid, divisor_beta_1, divisor_beta_2, amount_topurge, \
univ, purged, week
try:
# Asking the user to decide the desired rr
while True:
wished_rr = input('please input desired rr (0.000 < rr <= 0.280; 0 to terminate): ')
weeks = input('please input desired number of weeks of duration of project: ')
print('\nWARNING: any estimation of rr will always start at a lower value and (slowly) increase to the desired one at the end of the desired duration (in weeks)', '\n')
if float(wished_rr) > 0.0 and float(wished_rr) <= 0.28:
print('the given value of rr is '+wished_rr)
print('the given value of duration of project in weeks is ', weeks,'\n'*2)
wished_rr = float(wished_rr)
break
elif float(wished_rr) == 0: raise sys.exit()
except SystemExit:
print('leaving')
time.sleep(3)
sys.exit(0)
# The following optimisation constraint was arbitrarily included and it is ONLY FOR DEMOSTRATION purposes!!!
# (an hyperbolic function makes more sense?... better a polynomial one?...)
divisor_beta = (wished_rr-0.285)*6/-0.21 # Arbitrary function based on manual adjustment
divisor_beta_1 = [divisor_beta+0.5, divisor_beta+0.45] # Depends on divisor_beta; affect the bad respondents
divisor_beta_2 = [1.0, 1.15] # Given as fixed; affect the good respondents
proportion_beta_est = 0.85 # Starting point
amount_topurge = 1300 # Currently fixed value
max_indid = [-1] # Currently NO IDEA why I have to define this here...
# FUNCTIONS FOR PARAMETER CALCULATIONS: proportion of the bad vs good panelists; exposure_rate
print('-'*10,'\n'*2)
print('function parameters_univ() starts now!', '\n')
# instead of weeks, argument week of the function set to 200 from now on for parameter calculation and baseline
pbe, arcalc, m = parameters_univ(weeks, proportion_beta_est)
print('\n')
print('--> the proportion of beta_1 is {0:.2f}, its rr size divided times {1:.3f}, and ave_rr_calc is {2:.2f}'.format(pbe, abs(m), arcalc))
print('--> function parameters_univ() was run successfully', '\n'*2, '-'*10, '\n')
print('function calc_exposure() starts now!', '\n')
amount_topurge = 1300
univ = []
purged = []
max_indid = [-1]
week = -200 # OJO: parameter for deciding how many baselines for universes to create
#print("num_week ",week)
##actioning creating universe; set to fixed 200
creating_baseline_univ(week, pbe, allow_unireport = 1)
print('a baseline univ has been created for calc_exposure')
exposure_rate = 0
exposure_rate = calc_exposure()
print('--> function calc_exposure() was run successfully', '\n'*2, '-'*10, '\n')
for i in range(5):
# univ = []
# purged = []
# max_indid = [-1]
# week = 0
# creating_baseline_univ(week, pbe)
experiment1 = 1
experiment2 = 1
experiment3 = 1
experiment4 = 1
exp = [1,2,3,4]
if experiment1:
# Experiment 1: sampling with a fixed fieldwork period; no overlapping of extras; use the overall rr of the universe for calculating the sample size
print('a workable baseline_univ() starts now! for exp ',exp[0],' iteration ', i, '\n')
amount_topurge = 1300
univ = []
purged = []
max_indid = [-1]
week = -200 # OJO: parameter for deciding how many baselines for universes to create
creating_baseline_univ(week, pbe)
print('\n', '--> a workable baseline_univ was run successfully', '\n'*2, '-'*10, '\n'*2)
trackering2(experiment = exp[0], iteration = i+1, num_weeks = int(weeks), week_target = 100, ir = .35, dist_fw = [4])
if experiment2:
# Experiment 2: sampling with a fixed fieldwork period; no overlapping of extras; use the rr of the xxxx for calculating the sample size
print('a workable baseline_univ() starts now! for exp ',exp[1],' iteration ', i, '\n')
amount_topurge = 1300
univ = []
purged = []
max_indid = [-1]
week = -200 # OJO: parameter for deciding how many baselines for universes to create
creating_baseline_univ(week, pbe)
print('\n', '--> a workable baseline_univ was run successfully', '\n'*2, '-'*10, '\n'*2)
trackering2(experiment = exp[1], iteration = i+1, num_weeks = int(weeks), week_target = 100, ir = .35, dist_fw = [4], rr_type = 2)
if experiment3:
# Experiment 3: sampling with different fieldworks periods with a random distribution; no overlapping of extras
print('a workable baseline_univ() starts now! for exp ',exp[2],' iteration ', i, '\n')
amount_topurge = 1300
univ = []
purged = []
max_indid = [-1]
week = -200 # OJO: parameter for deciding how many baselines for universes to create
creating_baseline_univ(week, pbe)
print('\n', '--> a workable baseline_univ was run successfully', '\n'*2, '-'*10, '\n'*2)
trackering2(experiment = exp[2], iteration = i+1, num_weeks = int(weeks), week_target = 100, ir = .35, dist_fw = [4,4,4,4,4,4,3,3,3,3,2,2,1])
if experiment4:
# Experiment 4: sampling with different fieldworks periods with a random distribution; no overlapping of extras
print('a workable baseline_univ() starts now! for exp ',exp[3],' iteration ', i, '\n')
amount_topurge = 1300
univ = []
purged = []
max_indid = [-1]
week = -200 # OJO: parameter for deciding how many baselines for universes to create
creating_baseline_univ(week, pbe)
print('\n', '--> a workable baseline_univ was run successfully', '\n'*2, '-'*10, '\n'*2)
trackering2(experiment = exp[3], iteration = i+1, num_weeks = int(weeks), week_target = 100, ir = .35, dist_fw = [4,4,4,4,4,4,3,3,3,3,2,2,1], rr_type = 2)
file_universe.close()
file_eligible.close()
file_fieldwork.close()
file_summaries.close()
file_summaries2.close()
if __name__ == "__main__":
main()
| evaristoc/Online_Panel_Simulation | phase01/panelsimulation_dem_v1.2.py | panelsimulation_dem_v1.2.py | py | 45,181 | python | en | code | 0 | github-code | 13 |
74675936658 | from collections import namedtuple
from decimal import Decimal
from unittest.mock import patch
from django.test import TestCase
from factory.fuzzy import FuzzyText, FuzzyInteger, FuzzyDecimal
from oauth2_provider.contrib.rest_framework import OAuth2Authentication
from oauth2_provider.models import Application
from rest_framework.test import APITestCase, APIClient
from store.api.permissions import TokenHasAdminScope, UserReadsAdminWrites
from store.api.serializers import ProductVariantSerializer, ProductDetailSerializer, SimpleProductSerializer
from store.api.viewsets import ProductViewSet
from store.factories import ProductVariantFactory, PriceHistoryFactory, CustomerRatingFactory, ProductFactory, \
TagFactory
from store.models import Tag, PriceHistory
class TagAPITestCase(APITestCase):
def test_name_should_be_unique(self):
tag = {"name": "ofertas"}
another_tag = {"name": "ofertas", "description": "Promoções"}
response = self.client.post('/api/tags/', tag, format='json')
self.assertEqual(response.status_code, 201)
response = self.client.post('/api/tags/', another_tag, format='json')
self.assertEqual(response.status_code, 400)
self.assertEqual(Tag.objects.count(), 1)
class TokenHasAdminScopeTestCase(TestCase):
def test_get_scopes_was_admin_only(self):
self.assertEqual(TokenHasAdminScope().get_scopes(None, None), ['admin'])
class UserReadsAdminWritesTestCase(TestCase):
def setUp(self):
self.request = namedtuple('Request', ['method', 'successful_authenticator', 'auth'])
def test_is_oauth2_authenticated_false_if_not_successful_authenticator(self):
method = None
authenticator = None
auth = None
request = self.request(method=method, successful_authenticator=authenticator, auth=auth)
self.assertFalse(UserReadsAdminWrites().is_oauth2_authenticated(request))
def test_is_oauth2_authenticated_false_if_not_OAuth2Authentication(self):
method = None
authenticator = object()
auth = None
request = self.request(method=method, successful_authenticator=authenticator, auth=auth)
self.assertFalse(UserReadsAdminWrites().is_oauth2_authenticated(request))
def test_is_oauth2_authenticated_true_if_OAuth2Authentication(self):
method = None
authenticator = OAuth2Authentication()
auth = None
request = self.request(method=method, successful_authenticator=authenticator, auth=auth)
self.assertTrue(UserReadsAdminWrites().is_oauth2_authenticated(request))
@patch('store.api.permissions.TokenHasAdminScope.has_permission')
def test_has_permission_false_if_is_post_patch_put_delete(self, mock):
methods = ['POST', 'PATCH', 'PUT', 'DELETE']
authenticator = OAuth2Authentication()
auth = None
mock.return_value = False
for method in methods:
request = self.request(method=method, successful_authenticator=authenticator, auth=auth)
self.assertFalse(UserReadsAdminWrites().has_permission(request, None))
@patch('store.api.permissions.TokenHasAdminScope.has_permission')
def test_has_permission_true_if_is_get_head_options(self, mock):
methods = ['GET', 'HEAD', 'OPTIONS']
authenticator = OAuth2Authentication()
auth = None
mock.return_value = False
for method in methods:
request = self.request(method=method, successful_authenticator=authenticator, auth=auth)
self.assertTrue(UserReadsAdminWrites().has_permission(request, None))
@patch('store.api.permissions.TokenHasAdminScope.has_permission')
def test_has_permission_true_if_was_admin(self, mock):
methods = ['GET', 'HEAD', 'OPTIONS', 'POST', 'PATCH', 'PUT', 'DELETE']
authenticator = OAuth2Authentication()
auth = None
mock.return_value = True
for method in methods:
request = self.request(method=method, successful_authenticator=authenticator, auth=auth)
self.assertTrue(UserReadsAdminWrites().has_permission(request, None))
class ProductVariantSerializerTestCase(TestCase):
def test_get_price_history_returns_up_to_10_prices(self):
product_variant = ProductVariantFactory()
PriceHistoryFactory.create_batch(20, product_variant=product_variant)
data = ProductVariantSerializer().get_price_history(product_variant)
self.assertLessEqual(len(data), 10)
class ProductDetailSerializerTestCase(TestCase):
def test_get_ratings_returns_up_to_10_prices(self):
product = ProductFactory()
CustomerRatingFactory.create_batch(20, product=product)
data = ProductDetailSerializer().get_ratings(product)
self.assertLessEqual(len(data), 10)
def test_get_related_products_uses_SimpleProductSerializer(self):
product = ProductFactory()
data = ProductDetailSerializer().get_related_products(product)
self.assertIsInstance(data.serializer.child, SimpleProductSerializer)
class ProductViewSetTestCase(TestCase):
def test_detailed_uses_ProductDetailSerializer(self):
product = ProductFactory()
Request = namedtuple('Request', ['query_params'])
request = Request(query_params={})
viewset = ProductViewSet()
viewset.request = request
viewset.kwargs = {'pk': product.id}
response = viewset.detailed(request, pk=product.id)
self.assertIsInstance(response.data.serializer, ProductDetailSerializer)
class OAuth2AuthMixin:
def get_oauth_token(self, admin=False):
client = getattr(self, 'client', APIClient())
client_id = FuzzyText().fuzz()
client_secret = FuzzyText().fuzz()
app_name = FuzzyText().fuzz()
Application.objects.create(
client_type=Application.CLIENT_CONFIDENTIAL,
authorization_grant_type=Application.GRANT_CLIENT_CREDENTIALS,
client_id=client_id,
client_secret=client_secret,
name=app_name,
)
payload = {
'grant_type': 'client_credentials',
'client_id': client_id,
'client_secret': client_secret,
}
if admin:
payload['scope'] = 'admin'
response = client.post('/oauth2/token/', data=payload)
return response.json()['access_token']
def get_oauth_headers(self, admin=False):
token = self.get_oauth_token(admin)
return {
'Authorization': f'Bearer {token}'
}
class ProductDetailTestCase(OAuth2AuthMixin, APITestCase):
def setUp(self):
self.client = APIClient()
self.oauth_headers = self.get_oauth_headers()
def test_product_detailed_has_related_products_and_ratings(self):
tag = TagFactory()
products = ProductFactory.create_batch(100, tags=[tag])
response = self.client.get(f'/api/products/{products[0].id}/detailed/', headers=self.oauth_headers)
self.assertIn('related_products', response.data)
self.assertIn('ratings', response.data)
class CustomerRatingsTestCase(OAuth2AuthMixin, APITestCase):
def setUp(self):
self.client = APIClient()
self.oauth_headers = self.get_oauth_headers()
def test_customer_rating_must_be_between_1_and_5(self):
product = ProductFactory()
self.assertIsNone(product.rating)
boundaries = [-1, 0, 6, 7]
for rating in boundaries:
payload = {
'product': product.id,
'rating': rating,
'description': FuzzyText().fuzz(),
}
response = self.client.post('/api/products/rating/', headers=self.oauth_headers, data=payload)
self.assertEqual(response.status_code, 400)
self.assertIn('rating', response.json())
product.refresh_from_db()
self.assertIsNone(product.rating)
valids = range(1, 6)
for rating in valids:
payload = {
'product': product.id,
'rating': rating,
'description': FuzzyText().fuzz(),
}
response = self.client.post('/api/products/rating/', headers=self.oauth_headers, data=payload)
self.assertEqual(response.status_code, 201)
product.refresh_from_db()
self.assertAlmostEqual(product.rating, 3.0)
def test_new_customer_rating_updates_product_rating(self):
product = ProductFactory()
self.assertIsNone(product.rating)
new_rating = FuzzyInteger(1, 5).fuzz()
payload = {
'product': product.id,
'rating': new_rating,
'description': FuzzyText().fuzz(),
}
response = self.client.post('/api/products/rating/', headers=self.oauth_headers, data=payload)
self.assertEqual(response.status_code, 201)
product.refresh_from_db()
self.assertAlmostEqual(product.rating, new_rating)
def test_multiple_customer_rating_updates_product_rating(self):
product = ProductFactory()
self.assertIsNone(product.rating)
ratings = []
for _ in range(15):
ratings.append(FuzzyInteger(1, 5).fuzz())
new_rating = sum(ratings) / len(ratings)
for rating in ratings:
payload = {
'product': product.id,
'rating': rating,
'description': FuzzyText().fuzz(),
}
response = self.client.post('/api/products/rating/', headers=self.oauth_headers, data=payload)
self.assertEqual(response.status_code, 201)
product.refresh_from_db()
self.assertAlmostEqual(product.rating, new_rating)
class PriceHistoryTestCase(OAuth2AuthMixin, APITestCase):
def setUp(self):
self.client = APIClient()
self.oauth_headers = self.get_oauth_headers()
def test_product_variant_price_changes_create_price_history(self):
product_variant = ProductVariantFactory()
self.assertEqual(PriceHistory.objects.filter(product_variant=product_variant.id).count(), 1)
new_price = FuzzyDecimal(999999.99).fuzz()
payload = {
'price': new_price,
}
response = self.client.patch(
f'/api/products/variants/{product_variant.id}/',
headers=self.get_oauth_headers(admin=True),
data=payload,
)
self.assertEqual(response.status_code, 200)
response = self.client.get(f'/api/products/variants/{product_variant.id}/', headers=self.oauth_headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['price_history']), 2)
def test_product_variant_price_changes_retuns_up_to_10_price_histories(self):
product_variant = ProductVariantFactory()
for _ in range(15):
payload = {
'price': FuzzyDecimal(999999.99).fuzz(),
}
response = self.client.patch(
f'/api/products/variants/{product_variant.id}/',
headers=self.get_oauth_headers(admin=True),
data=payload,
)
self.assertEqual(response.status_code, 200)
response = self.client.get(f'/api/products/variants/{product_variant.id}/', headers=self.oauth_headers)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.data['price_history']), 10)
def test_price_history_desc_order(self):
product_variant = ProductVariantFactory()
price_changes = [product_variant.price]
for _ in range(5):
new_price = FuzzyDecimal(999999.99).fuzz()
price_changes.append(new_price)
payload = {
'price': new_price,
}
response = self.client.patch(
f'/api/products/variants/{product_variant.id}/',
headers=self.get_oauth_headers(admin=True),
data=payload,
)
self.assertEqual(response.status_code, 200)
response = self.client.get(f'/api/products/variants/{product_variant.id}/', headers=self.oauth_headers)
self.assertEqual(response.status_code, 200)
price_history = []
for history in response.data['price_history']:
price_history.append(Decimal(history['price']))
self.assertEqual(price_history, list(reversed(price_changes)))
| diegorocha/bringel | src/store/api/tests.py | tests.py | py | 12,467 | python | en | code | 0 | github-code | 13 |
41599596598 | import os
import ast
def unique_variables(directory):
for filename in os.listdir(directory):
if filename.endswith(".py"):
with open(os.path.join(directory, filename), "r") as f:
code = f.read()
try:
tree = ast.parse(code)
variables = set()
for node in ast.walk(tree):
if isinstance(node, ast.Name) and isinstance(node.ctx, ast.Store):
variables.add(node.id)
print("Unique variables in {}: {}".format(filename, ", ".join(variables)))
except SyntaxError:
continue
unique_variables("D:/Hacathon/permisson less/finyash/Finereview/review/codes") | ranjit7858/Fine_Code | Finereview/review/unique.py | unique.py | py | 784 | python | en | code | 0 | github-code | 13 |
30795286917 | """empty message
Revision ID: e596d1ca7402
Revises: 5c4d7e80737e
Create Date: 2023-07-26 00:13:11.241798
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'e596d1ca7402'
down_revision = '5c4d7e80737e'
branch_labels = None
depends_on = None
def upgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('subscription', 'channel_id',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=False,
existing_server_default=sa.text('0'))
# ### end Alembic commands ###
def downgrade() -> None:
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('subscription', 'channel_id',
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=False,
existing_server_default=sa.text('0'))
# ### end Alembic commands ###
| bullbulk/vk-notifications | alembic/versions/e596d1ca7402_.py | e596d1ca7402_.py | py | 999 | python | en | code | 0 | github-code | 13 |
10313306705 | from itertools import permutations
f = open('day7input.txt', "r")
lines = f.readlines()
numbers = list(map(int, lines[0].split(',')))
def getValues(input, pos, opcode, mode1, mode2, mode3):
values = []
if opcode in ["01", "02", "04", "05", "06", "07", "08"]:
if mode3 == "0":
values.append(input[input[pos+1]])
else:
values.append(input[pos+1])
if opcode in ["01", "02", "05", "06", "07", "08"]:
if mode2 == "0":
values.append(input[input[pos+2]])
else:
values.append(input[pos+2])
if opcode in []:
if mode1 == "0":
values.append(input[input[pos+3]]) # parameters that an instruction writes to are never in immediate mode
else:
values.append(input[pos+3])
return values
def IntcodeComputer(numbers, input, j, halt):
i = 0
inputs = 0
while numbers[i] != 99:
instruction = f"{numbers[i]:05}"
opcode = instruction[3:]
mode1 = instruction[0]
mode2 = instruction[1]
mode3 = instruction[2]
values = getValues(numbers, i, opcode, mode1, mode2, mode3)
print(opcode)
if opcode == "01":
numbers[numbers[i+3]] = values[0] + values[1]
i = i + 4
elif opcode == "02":
numbers[numbers[i+3]] = values[0] * values[1]
i = i + 4
elif opcode == "03":
if not inputs:
numbers[numbers[i+1]] = j
else:
numbers[numbers[i+1]] = input
i = i + 2
inputs = inputs + 1
elif opcode == "04":
output = values[0]
i = i + 2
elif opcode == "05":
if values[0]:
i = values[1]
else:
i = i + 3
elif opcode == "06":
if not values[0]:
i = values[1]
else:
i = i + 3
elif opcode == "07":
if values[0] < values[1]:
numbers[numbers[i+3]] = 1
else:
numbers[numbers[i+3]] = 0
i = i + 4
elif opcode == "08":
if values[0] == values[1]:
numbers[numbers[i+3]] = 1
else:
numbers[numbers[i+3]] = 0
i = i + 4
halt = True
return output
# Part 1
maximum = 0
halt = False
for list in permutations(range(5), 5):
output = 0
for i in list:
output = IntcodeComputer(numbers, output, i, halt)
maximum = max(maximum, output)
print("The largest possible thrust (part 1) is: ", maximum)
f.close()
| wolframalexa/AdventOfCode | 2019/day7.py | day7.py | py | 2,328 | python | en | code | 1 | github-code | 13 |
39686428332 | """Base class for constructing an analytic engine with analytics."""
from collections import namedtuple
from .schema import EVENT_TYPE_GENERIC
from .utils import is_string
class Event(namedtuple('Event', ['type', 'time', 'data'])):
"""Event for python engine in EQL."""
@classmethod
def from_data(cls, data):
"""Load an event from a dictionary.
:param dict data: Dictionary with the event type, time, and keys.
"""
data = data.get('data_buffer', data)
timestamp = data.get('timestamp', 0)
if is_string(data.get('event_type')):
event_type = data['event_type']
elif 'event_type_full' in data:
event_type = data['event_type_full']
if event_type.endswith('_event'):
event_type = event_type[:-len('_event')]
else:
event_type = EVENT_TYPE_GENERIC
return cls(event_type, timestamp, data)
def copy(self):
"""Create a copy of the event."""
data = self.data.copy()
return Event(self.type, self.time, data)
class AnalyticOutput(namedtuple('AnalyticOutput', ['analytic_id', 'events'])):
"""AnalyticOutput for python engine in EQL."""
@classmethod
def from_data(cls, events, analytic_id=None): # type: (list[dict], str) -> AnalyticOutput
"""Load up an analytic output event."""
return cls(analytic_id, [Event.from_data(e) for e in events])
| endgameinc/eql | eql/events.py | events.py | py | 1,443 | python | en | code | 203 | github-code | 13 |
16022213005 | import pathlib, subprocess, os, psutil
from tbtamr.CustomLog import logger
class Tbtamr(object):
"""
A base class for setting up tbtamr return a valid input object for subsequent steps
"""
def __init__(self):
self.one,self.five,self.fifteen = psutil.getloadavg()
self.total_cores = os.cpu_count()
self._cwd = pathlib.Path.cwd()
def _run_cmd(self, cmd):
"""
Use subprocess to run the command for tb-profiler
"""
logger.info(f"Now running : {cmd}")
p = subprocess.run(cmd, shell = True, capture_output = True, encoding = "utf-8")
if p.returncode == 0:
logger.info(f"{cmd} completed successfully. Will now move on to phenotype inferrence.")
return True
else:
logger.critical(f"There appears to have been a problem with running {cmd}. The following error has been reported : \n {p.stderr}")
raise SystemExit
def _check_output_file(self, seq_id, step):
wldcrd = f"{seq_id}/results/{seq_id}.results.json" if step == 'profile' else f"{seq_id}/tb-profiler_report.json"
p = sorted(self._cwd.glob(wldcrd))
if p != []:
logger.info(f"{p[0]} has been found")
return f"{p[0]}"
else:
return False
def _check_output(self, isolates, step = 'profile'):
for iso in isolates:
present = self._check_output_file(seq_id= iso, step = step)
if present:
isolates[iso][step] = self._check_output_file(seq_id= iso, step = step)
else:
logger.critical(f"There seems to be a serious problem - files {iso} were not created. Please check logs and try again.")
raise SystemExit
logger.info(f"All files for step : {step} have been created.")
return isolates
def _set_threads(self, jobs):
jobs = int(jobs)/2
max_tbjob = self.total_cores - max(self.one,self.five,self.fifteen)
logger.info(f"The available cores is : {max_tbjob}")
if int(jobs) == 0:
logger.info(f"Number of TB-profiler jobs to run {max_tbjob}")
return int(max_tbjob/2)
elif int(jobs) < max_tbjob/2:
logger.info(f"Number of TB-profiler jobs to run {jobs}")
return int(jobs)
else:
logger.info(f"Number of TB-profiler jobs to run {max_tbjob}")
return int(max_tbjob/2)
def _clean_cmd(self, path):
cmd = f"rm -rf {path}"
return cmd
def _file_present(self, name):
"""
check file is present
:name is the path of the file to check
"""
if name == "":
return False
elif pathlib.Path(name).exists():
logger.info(f"Checking if file {name} exists")
return True
else:
return False
| MDU-PHL/tbtamr | tbtamr/TbTamr.py | TbTamr.py | py | 3,002 | python | en | code | 2 | github-code | 13 |
6758081757 | import csv
import re
#Create a List of Dicts containing Mac and Port
def get_switch_arp(switch_arp):
with open(switch_arp) as file:
reader = csv.reader(file, skipinitialspace=True)
header = next(reader)
connects = [dict(zip(header, row)) for row in reader]
#convert mac format from xx.xx.xx.xx.xx.xx to xxxx.xxxx.xxxx
for connect in connects:
if re.fullmatch('\w\w-\w\w-\w\w-\w\w-\w\w-\w\w', connect['Mac_Address']):
connect['Mac_Address'] = connect['Mac_Address'][0:2] + connect['Mac_Address'][3:5] + '.' + connect['Mac_Address'][6:8] + connect['Mac_Address'][9:11] + '.' + connect['Mac_Address'][12:14] + connect['Mac_Address'][15:17]
if re.fullmatch('\w\w\w\w\w\w\w\w\w\w\w\w', connect['Mac_Address']):
connect['Mac_Address'] = connect['Mac_Address'][0:2] + connect['Mac_Address'][2:4] + '.' + connect['Mac_Address'][4:6] + connect['Mac_Address'][6:8] + '.' + connect['Mac_Address'][8:10] + connect['Mac_Address'][10:12]
#Add line names to connects
def add_line_names(connects):
ports = set()
for connect in connects:
ports.add(connect['Port'])
lines = []
for port in ports:
lines.append({'Port':port, 'Line_Name': input('Name of line for port - ' + port + ' - ')})
for connect in connects:
for line in lines:
if connect['Port'] == line['Port']:
connect['Line'] = line['Line_Name']
#print(line['Line_Name'] + ' - ' + (connect['IP']) + ' - ' + connect['Port'])
return connects
def add_ips():
with open(r'c:\Users\Kyle\Desktop\master_arp.csv') as file:
arp_reader = csv.DictReader(file)
for line in arp_reader:
for connect in connects:
if connect['Mac_Address'] == line['MAC ADDRESS']:
connect['IP'] = line['IP Address']
for connect in connects:
if connect['Line'] == 'Ignore':
continue
elif 'IP' not in connect:
connect['IP'] = input(connect['Mac_Address'] + ' - Not Found, Enter IP - ')
def update_lines():
switch_arp = input('Enter Csv filename for switch arp - ')
#Create List of Dicts containing Mac and Port
connects = get_switch_arp(switch_arp)
add_line_names(connects)
add_ips()
if __name__ == '__main__':
update_lines()
| kysevenle/work_package | scripts/update_lines.py | update_lines.py | py | 2,425 | python | en | code | 0 | github-code | 13 |
16847351670 | # -*- coding: utf-8 -*-
# @Time : 2019/05
# @Author : XiaoXi
# @PROJECT : Aff_service
# @File : read_param.py
import json
from json import JSONDecodeError
from bin.unit.replaceRelevance import replace
def read_param(test_name, param, _path, relevance=None):
"""
读取用例中参数parameter
:param test_name: 用例名称
:param param: parameter
:param relevance: 关联对象
:param _path: case路径
:param result: 全局结果
:return:
"""
if isinstance(param, dict):
param = replace(param, relevance)
elif isinstance(param, list):
param = replace(param, relevance)
elif param is None:
pass
else:
try:
with open(_path + "/" + param, "r", encoding="utf-8") as f:
data = json.load(f)
for i in data:
if i["test_name"] == test_name:
param = i["parameter"]
break
if not isinstance(param, dict):
raise Exception("未能找到用例关联的参数\n文件路径:%s\n索引:%s" % (param, _path))
else:
param = replace(param, relevance)
except FileNotFoundError:
raise Exception("用例关联文件不存在\n文件路径: %s" % param)
except JSONDecodeError:
raise Exception("用例关联的参数文件有误\n文件路径: %s" % param)
return param
| wangxiaoxi3/API_service | bin/unit/readParameter.py | readParameter.py | py | 1,490 | python | en | code | 156 | github-code | 13 |
15912035985 | # -*- coding: utf-8 -*-
# (c) 2017 Andreas Motl <andreas@ip-tools.org>
import os
import sys
import logging
import slugify
import pathvalidate
from datetime import datetime
def to_list(obj):
"""Convert an object to a list if it is not already one"""
# stolen from cornice.util
if not isinstance(obj, (list, tuple)):
obj = [obj, ]
return obj
def read_list(data, separator=u','):
if data is None:
return []
result = list(map(lambda x: x.strip(), data.split(separator)))
if len(result) == 1 and not result[0]:
result = []
return result
def boot_logging(options=None):
log_level = logging.INFO
if options and options.get('--debug'):
log_level = logging.DEBUG
setup_logging(level=log_level)
def setup_logging(level=logging.INFO):
log_format = '%(asctime)-15s [%(name)-20s] %(levelname)-7s: %(message)s'
logging.basicConfig(
format=log_format,
stream=sys.stderr,
level=level)
def normalize_options(options):
normalized = {}
for key, value in options.items():
key = key.strip('--<>')
normalized[key] = value
return normalized
def get_document_path(directory, name, format, source=None):
if source:
source = source.lower() + '.'
filename = pathvalidate.sanitize_filename('{name}.{source}{suffix}'.format(
name=name.upper(), source=source.lower(), suffix=format.lower()))
filepath = os.path.join(directory, filename)
return filepath
def get_archive_path(directory, name, format, source=None):
if source:
source = '-' + source.lower()
timestamp = datetime.utcnow().strftime('%Y%m%dT%H%M%S')
name = pathvalidate.sanitize_filename(name)
name = slugify.slugify_filename(name)
filename = 'uspto{source}_{timestamp}_{name}.{format}.zip'.format(
name=name, timestamp=timestamp, source=source.lower(), format=format.lower())
filepath = os.path.join(directory, filename)
return filepath
def read_numbersfile(filename):
numbers = open(filename, 'r').readlines()
numbers = map(str.strip, numbers)
numbers = filter(lambda number: not number.startswith('#'), numbers)
return numbers
class SmartException(Exception):
def __init__(self, message, **kwargs):
# Call the base class constructor with the parameters it needs
super(SmartException, self).__init__(message)
# Stuff more things into the exception object
self.more_info = kwargs
| jasmine2000/ri-bio-project | env/lib/python3.8/site-packages/uspto/util/common.py | common.py | py | 2,492 | python | en | code | 0 | github-code | 13 |
17052319104 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class Gavintest(object):
def __init__(self):
self._newid = None
@property
def newid(self):
return self._newid
@newid.setter
def newid(self, value):
self._newid = value
def to_alipay_dict(self):
params = dict()
if self.newid:
if hasattr(self.newid, 'to_alipay_dict'):
params['newid'] = self.newid.to_alipay_dict()
else:
params['newid'] = self.newid
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Gavintest()
if 'newid' in d:
o.newid = d['newid']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/Gavintest.py | Gavintest.py | py | 800 | python | en | code | 241 | github-code | 13 |
372056814 | from triangle import Triangle
import genetic_algorithm
from PIL import Image
import sys
import os
import gc
def command_line_arg(target_img, population_size, num_of_triangles, crossover_rate, mutation_rate, mutation_amount):
if float(crossover_rate) > 1.0 or float(crossover_rate) < 0.0:
print("Crossover rate must be between 0.0 and 1.0")
exit(1)
if float(mutation_rate) > 1.0 or float(mutation_rate) < 0.0:
print("Mutation rate must be between 0.0 and 1.0")
exit(1)
if float(mutation_amount) > 1.0 or float(mutation_amount) < 0.0:
print("Mutation amount must be between 0.0 and 1.0")
exit(1)
return [Image.open(target_img), population_size, num_of_triangles, crossover_rate, mutation_rate, mutation_amount]
if __name__ == "__main__":
# Retrieve the target image, initial population size, and
# number of triangles for each individual.
target_img, population_size, num_of_triangles, crossover_rate, mutation_rate, mutation_amount = command_line_arg(
sys.argv[1], int(sys.argv[2]), int(sys.argv[3]), float(sys.argv[4]), float(sys.argv[5]), float(sys.argv[6]))
img_width, img_height = target_img.size
if not os.path.exists("generated_images"):
os.makedirs("generated_images")
# Get the initial population of individuals.
individuals = genetic_algorithm.initial_population(
img_width, img_height, population_size, num_of_triangles)
# Initialize a variable for the next individual ID.
next_id = len(individuals) + 1
# Get the parents selected for reproduction.
parent_1, parent_2 = genetic_algorithm.selection(target_img, individuals)
parent_1.image.save(f"generated_images/{parent_1.id}.jpg", 'JPEG')
parent_2.image.save(f"generated_images/{parent_2.id}.jpg", 'JPEG')
# Remove the individuals from memory to speed up the program.
del individuals
gc.collect()
while True:
# Retrieve the children individuals using the reproduction funciton call.
children = genetic_algorithm.reproduction(
parent_1, parent_2, population_size, num_of_triangles, next_id, crossover_rate, mutation_rate,
mutation_amount, img_width, img_height)
parent_1, parent_2 = genetic_algorithm.selection(target_img, children)
print(parent_1.fitness, parent_2.fitness)
# Save every 20 individuals as images.
if next_id % 20 == 0:
parent_1.image.save(f"generated_images/{parent_1.id}.jpg", 'JPEG')
parent_2.image.save(f"generated_images/{parent_2.id}.jpg", 'JPEG')
# Remove the children from memory to speed up the program.
del children
gc.collect()
# Increment the ID.
next_id += 1
| WinstonShields/Genetic_Algorithm | main.py | main.py | py | 2,758 | python | en | code | 0 | github-code | 13 |
43262802072 | def main():
if A == B:
return print('1.000')
res = 10000*B // A
if res % 10 > 4:
res += 10
return print('0.' + str(res//10).zfill(3))
if __name__ == '__main__':
A, B = map(int, input().split())
main()
| Shirohi-git/AtCoder | abc271-/abc274_a.py | abc274_a.py | py | 245 | python | en | code | 2 | github-code | 13 |
26379089709 | #Declare a new int list
sumlist = [0];
#We want the multiples of 3 to 999
for i in range(1,334):
#Add the product to the sumlist
sumlist.append(i*3);
#We want the multiples of 5 to 995
for j in range(1,200):
#Store the value and check to see if we already have it in the list
temp = 5*j;
#If the sum is divisible by 3, then it has been counted and do nothing
if temp%3 != 0:
#If it isn't, add it to the list
sumlist.append(temp);
#Print the sum of the list
print(sum(sumlist)); | mjgoldman16/Euler-Coding-Projects | q1 - 3s and 5s.py | q1 - 3s and 5s.py | py | 520 | python | en | code | 0 | github-code | 13 |
6798469502 | import xcp_get
if __name__ == "__main__":
# My wallet address
pubkey = "1EWFR9dMzM2JtrXeqwVCY1LW6KMZ1iRhJ5"
# Get wallet contents
wallet_data = xcp_get.address(pubkey)
# Create wallet asset list
wallet = []
for asset in wallet_data:
# Check for dispenser
disp_result = xcp_get.dispenser(asset["asset"])
if len(disp_result) > 0:
print(disp_result) | burstMembrane/Counterview | json_updater/OG_PEPES/wallet_check.py | wallet_check.py | py | 415 | python | en | code | 0 | github-code | 13 |
15266818152 | from django.http import HttpResponseRedirect, HttpResponse
from django.shortcuts import render, get_object_or_404
from django.views.decorators.csrf import csrf_exempt
from django.utils import timezone
from sys import stdout
import datetime
from pastebin.models import Paste, Lang, Ban
from pastebin.forms import PasteForm
from pastebin.lib import genUrlid
def show(request, urlid):
paste = get_object_or_404(Paste, urlid=urlid)
return render(request, 'show.html', {'paste':paste})
def showraw(request, urlid):
paste = get_object_or_404(Paste, urlid=urlid)
return HttpResponse(paste.text)
def index(request):
f = PasteForm()
pastes = Paste.objects.all().filter(private=False).order_by('-time')[:100]
return render(request, 'index.html', {'pastes': pastes, 'form': f})
def isBanned(ip):
try:
ban = Ban.objects.get(ip=ip)
if ban.end and ban.end < timezone.now():
return None
except Ban.DoesNotExist:
return None
return ban
#@csrf_exempt
def create(request):
if request.method == 'POST':
ip = request.META.get('REMOTE_ADDR')
ban = isBanned(ip)
if ban:
ban.hits += 1
ban.save()
return render(request, 'banned.html', {'ban': ban})
f = PasteForm(request.POST)
if f.is_valid():
p = Paste()
p.private = f.cleaned_data['private']
p.text = f.cleaned_data['text']
p.lang = f.cleaned_data['lang']
p.urlid = genUrlid()
p.ip = ip
p.save()
return HttpResponseRedirect('/paste/' + p.urlid)
else:
f = PasteForm()
return render(request, 'create.html', {'form':f})
#@csrf_exempt
def reply(request, urlid):
replyto = get_object_or_404(Paste, urlid=urlid)
if request.method == 'POST':
ip = request.META.get('REMOTE_ADDR')
ban = isBanned(ip)
if ban:
ban.hits += 1
ban.save()
return render(request, 'banned.html', {'ban': ban})
f = PasteForm(request.POST)
if f.is_valid():
p = Paste()
p.private = f.cleaned_data['private']
p.text = f.cleaned_data['text']
p.lang = f.cleaned_data['lang']
p.urlid = genUrlid()
p.ip = ip
p.replyto = replyto
p.save()
return HttpResponseRedirect('/paste/' + p.urlid)
else:
f = PasteForm(initial={
'private' : replyto.private,
'text' : replyto.text,
'lang' : replyto.lang,
})
return render(request, 'reply.html', {
'form' : f,
'reply' : replyto,
})
| oddstr13-openshell-no/django-app-pastebin | views.py | views.py | py | 2,705 | python | en | code | 0 | github-code | 13 |
9949473632 | # Write a python program to print all prime numbers between given range
start = int(input("Enter the starting number : "))
end = int(input("Enter the ending number : "))
for i in range(start,end+1):
for j in range(2,i):
if i==0:
print(f"{i} is not a prime number")
elif i % j == 0:
print(f"{i} is not a prime number")
else:
print(i) | MR-VAGRAWAL/My_Initial_Python_Learning | prime_number_range.py | prime_number_range.py | py | 409 | python | en | code | 5 | github-code | 13 |
1467384379 | #%%
from airflow import DAG
from airflow.operators.python import PythonOperator
from airflow.operators.bash import BashOperator
from datetime import datetime
import numpy as np
from pathlib import Path
simulation_data_path = Path(__file__).parents[1] / "data" /"dice_simulations"
#%%
# https://betterdatascience.com/apache-airflow-xcoms/
def _dice_rolls(number_rolls):
return list(np.random.randint(1,7, size = number_rolls))
def _save_dice_experiment(task_instance):
simulation_data = task_instance.xcom_pull(task_ids = ["dice_roll"])
if not simulation_data:
raise ValueError("No value stored in XComs")
with open(simulation_data_path / "dice_rolls.txt", "a") as file:
file.write(f"Dice rolls {datetime.now()} \n")
file.write(f"{simulation_data}\n\n")
with DAG(dag_id="dice_simulator", start_date=datetime(2023,5,1), schedule = "0 8 * * *",catchup= True):
# XComs lets tasks talk to each other
# uses XComs to move data to a task that writes data to a txt file
setup_directories = BashOperator(task_id = "setup_directories", bash_command = f"mkdir -p {simulation_data_path.as_posix()}")
dice_roll = PythonOperator(task_id = "dice_roll", do_xcom_push = True, python_callable = _dice_rolls, op_args=[10])
save_dice_experiment = PythonOperator(task_id = "save_dice", python_callable=_save_dice_experiment)
setup_directories >> dice_roll >> save_dice_experiment
| kokchun/Data-engineering-AI22 | Lecture-code/Lec4-Airflow_intro/dags/4.1_python_operator.py | 4.1_python_operator.py | py | 1,450 | python | en | code | 1 | github-code | 13 |
14327572288 | import re
from AE.Display.time import *
from AE.Display.Animation.Animation_2d import *
from AE.Display.Animation.Items_2d import *
class Information(Animation_2d):
# ========================================================================
def __init__(self, disp_time=True):
# Parent contructor
super().__init__(boundaries=[[0, 0.2], [0, 1]], disp_boundaries=True, boundaries_color=Qt.black)
# --- Optional display
# Time string
self.disp_time = disp_time
if self.disp_time:
self.add(text, 'Time',
stack = True,
string = self.time_str(time(0,0)),
color = 'white',
fontsize = 12,
)
# ========================================================================
def time_str(self, t):
'''
Format time string for display
'''
s = '<p>step {:06d}</p><font size=2> {:06.02f} sec</font>'.format(t.step, t.time)
# Grey zeros
s = re.sub(r'( )([0]+)', r'\1<span style="color:grey;">\2</span>', s)
return s
# ========================================================================
def update(self, t):
if self.disp_time:
self.item['Time'].string = self.time_str(t)
# Repaint & confirm
super().update(t) | CandelierLab/Toolbox_AE | AE/Display/Animation/Information.py | Information.py | py | 1,240 | python | en | code | 0 | github-code | 13 |
18074346749 | import inspect
import string
from django.conf import settings
from django.test import TestCase
from django.test.utils import override_settings
from django.core.exceptions import FieldError
from django.db import IntegrityError
from django.core.management import call_command
from fixtureless import Factory
import haystack
from haystack.query import SearchQuerySet
from tastypie.api import Api
from tastypie.test import ResourceTestCaseMixin
from organisms.models import Organism
from genes.models import Gene, CrossRef, CrossRefDB
from genes.utils import translate_genes
from genes.search_indexes import GeneIndex
from genes.app_settings import GENES_API_RESULT_LIMIT
factory = Factory()
# REQUIRES ELASTICSEARCH TO BE SETUP AS THE HAYSTACK PROVIDER.
TEST_INDEX = {
'default': {
'ENGINE': 'haystack.backends.elasticsearch_backend.'
'ElasticsearchSearchEngine',
'URL': 'http://127.0.0.1:9200/',
'TIMEOUT': 60 * 10,
'INDEX_NAME': 'test_index',
},
}
ROOT_URLCONF = getattr(settings, 'ROOT_URLCONF', None)
class GeneDBConstraintsTestCase(TestCase):
"""
Checks that new Genes can be created in the database under different
circumstances. Also checks that the exceptions we are looking for are
raised when the Genes that are trying to be created do not comply with
database constraints.
"""
def test_std_and_sys_name_present(self):
"""
Check that this throws no errors.
"""
factory.create(Gene, {'standard_name': 'A1',
'systematic_name': 'a12'})
def test_only_sys_name_present(self):
"""
Check that this throws no errors.
"""
factory.create(Gene, {'standard_name': None,
'systematic_name': 'b34'})
def test_only_std_name_present(self):
"""
Check that this throws an IntegrityError from the database when
trying to create a Gene with a null value for systematic_name.
"""
with self.assertRaises(IntegrityError):
factory.create(Gene, {'standard_name': 'C5',
'systematic_name': None})
def test_both_names_absent(self):
"""
Check that the Gene.save() method throws a ValueError when
trying to create a Gene with a null value for standard_name AND
systematic_name.
"""
with self.assertRaises(ValueError):
factory.create(Gene, {'standard_name': None,
'systematic_name': None})
def test_only_sys_name_blank_space(self):
"""
Check that the Gene.save() method throws a ValueError if there
is no standard_name and a systematic_name is passed but it is
a blank string.
"""
with self.assertRaises(ValueError):
factory.create(Gene, {'standard_name': None,
'systematic_name': ' '})
def test_good_std_name_blank_sys_name(self):
"""
Check that this throws no errors even though the systematic_name
passed is a blank string (as in the previous test), since a
non-blank standard_name is passed.
"""
factory.create(Gene, {'standard_name': 'D7',
'systematic_name': ' '})
def test_std_name_and_sys_name_both_blank_space(self):
"""
Check that the Gene.save() method throws a ValueError if
both the standard_name and systematic_name are passed but are
blank strings.
"""
with self.assertRaises(ValueError):
factory.create(Gene, {'standard_name': ' ',
'systematic_name': ' '})
class TranslateTestCase(TestCase):
def setUp(self):
org = factory.create(Organism)
xrdb1 = CrossRefDB(name="ASDF", url="http://www.example.com")
xrdb1.save()
xrdb2 = CrossRefDB(name="XRDB2", url="http://www.example.com/2")
xrdb2.save()
# g1 and g2 have both standard and systematic names.
g1 = Gene(entrezid=1, systematic_name="g1", standard_name="G1",
description="asdf", organism=org, aliases="gee1 GEE1")
g1.save()
g2 = Gene(entrezid=2, systematic_name="g2", standard_name="G2",
description="asdf", organism=org, aliases="gee2 GEE2")
g2.save()
xref1 = CrossRef(crossrefdb=xrdb1, gene=g1, xrid="XRID1")
xref1.save()
xref2 = CrossRef(crossrefdb=xrdb2, gene=g2, xrid="XRID1")
xref2.save()
xref3 = CrossRef(crossrefdb=xrdb1, gene=g1, xrid="XRRID1")
xref3.save()
xref4 = CrossRef(crossrefdb=xrdb1, gene=g2, xrid="XRID2")
xref4.save()
org2 = Organism(taxonomy_id=1234, common_name="Computer mouse",
scientific_name="Mus computurus",
slug="mus-computurus")
org2.save()
org3 = Organism(taxonomy_id=4321, common_name="Computer screen",
scientific_name="Monitorus computurus",
slug="monitorus-computurus")
org3.save()
# Make systematic and standard name the same for the following genes,
# but make organisms different. Skip entrezid 3 since that is used by
# other tests.
g4 = Gene(entrezid=4, systematic_name="acdc", standard_name="ACDC",
description="asdf", organism=org2, aliases="gee4 GEE4")
g4.save()
g5 = Gene(entrezid=5, systematic_name="acdc", standard_name="ACDC",
description="asdf", organism=org3, aliases="gee5 GEE5")
g5.save()
# g101 has standard name, but no systematic name.
g101 = Gene(entrezid=101, standard_name="std_101", organism=org2)
g101.save()
# g102 has systematic name, but no standard name.
g102 = Gene(entrezid=102, systematic_name="sys_102", organism=org2)
g102.save()
def test_translate_symbol_entrez_diff_organisms(self):
"""
translate_genes() should be able to differentiate between different
organism genes when passed identical symbols.
"""
# This test also confirmed that when both standard name and systematic
# name are available, the sysmbol will be standard name.
translation = translate_genes(id_list=['ACDC'],
from_id="Symbol", to_id="Entrez",
organism="Mus computurus")
self.assertEqual(translation, {'ACDC': [4], 'not_found': []})
def test_translate_symbol_entrez_diff_organisms2(self):
"""
Same as previous test, but uses the other organism as input.
"""
translation = translate_genes(id_list=['ACDC'],
from_id="Symbol", to_id="Entrez",
organism="Monitorus computurus")
self.assertEqual(translation, {'ACDC': [5], 'not_found': []})
def test_translate_entrez_entrez(self):
"""
Test translation from entrez to entrez.
"""
translation = translate_genes(id_list=[1, 2],
from_id="Entrez", to_id="Entrez")
self.assertEqual(translation, {1: [1, ], 2: [2, ], 'not_found': []})
def test_translate_entrez_standard_name(self):
"""
Test translation from entrez to standard names.
"""
translation = translate_genes(id_list=[1, 2],
from_id="Entrez",
to_id="Standard name")
self.assertEqual(translation,
{1: ['G1', ], 2: ['G2', ], 'not_found': []})
def test_translate_entrez_systematic_name(self):
"""
Test translation from entrez to systematic names.
"""
translation = translate_genes(id_list=[1, 2],
from_id="Entrez",
to_id="Systematic name")
self.assertEqual(translation,
{1: ['g1', ], 2: ['g2', ], 'not_found': []})
def test_translate_entrez_xrdb(self):
"""
Test translation from entrez to ASDF.
"""
translation = translate_genes(id_list=[1, 2],
from_id="Entrez", to_id="ASDF")
self.assertEqual(translation, {1: ['XRID1', 'XRRID1', ],
2: ['XRID2', ], 'not_found': []})
def test_translate_xrdb_entrez(self):
"""
Test translation from ASDF to entrez.
"""
translation = translate_genes(id_list=['XRID1', 'XRRID1', 'XRID2'],
from_id="ASDF", to_id="Entrez")
self.assertEqual(translation, {'XRID1': [1, ], 'XRRID1': [1, ],
'XRID2': [2, ], 'not_found': []})
def test_translate_entrez_entrez_missing(self):
"""
Test translation from entrez to entrez with a missing value.
"""
translation = translate_genes(id_list=[1, 2, 3],
from_id="Entrez", to_id="Entrez")
self.assertEqual(translation, {1: [1, ], 2: [2, ], 'not_found': [3]})
def test_translate_entrez_standard_name_missing(self):
"""
Test translation from entrez to standard names with a missing value.
"""
translation = translate_genes(id_list=[1, 2, 3],
from_id="Entrez", to_id="Standard name")
self.assertEqual(translation,
{1: ['G1', ], 2: ['G2', ], 'not_found': [3]})
def test_translate_symbol_entrez(self):
"""
Test translation from symbol to entrez when either standard name or
systematic name is null.
"""
# Test the gene that has standard name.
translation = translate_genes(id_list=['std_101'],
from_id="Symbol", to_id="Entrez",
organism="Mus computurus")
self.assertEqual(translation, {'std_101': [101], 'not_found': []})
# Test the gene that does NOT have standard name.
translation = translate_genes(id_list=['sys_102'],
from_id="Symbol", to_id="Entrez",
organism="Mus computurus")
self.assertEqual(translation, {'sys_102': [102], 'not_found': []})
def test_translate_entrez_symbol(self):
"""
Test translation from entrez to symbol when either standard name or
systematic name is null.
"""
# Test the gene that has standard name.
translation = translate_genes(id_list=[101],
from_id="Entrez", to_id="Symbol",
organism="Mus computurus")
self.assertEqual(translation, {101: ['std_101'], 'not_found': []})
# Test the gene that does NOT have standard name.
translation = translate_genes(id_list=[102],
from_id="Entrez", to_id="Symbol",
organism="Mus computurus")
self.assertEqual(translation, {102: ['sys_102'], 'not_found': []})
def test_empty_standard_and_systematic_names(self):
"""
Test that a ValueError exception will be raised when we try to create a
gene whose standard and systematic names are both empty or null, or
ONLY consist of space characters (such as space, tab, new line, etc).
"""
org = factory.create(Organism)
# Neither standard_name nor systematic_name is set explicitly.
unnamed_gene = Gene(entrezid=999, organism=org)
self.assertRaises(ValueError, unnamed_gene.save)
# standard_name consists of only space characters.
# systematic_name is u'' here, because it is not set explicitly, and
# by default "null=False" for this field in the model.
unnamed_gene = Gene(entrezid=999, standard_name="\t \n", organism=org)
self.assertRaises(ValueError, unnamed_gene.save)
# Both standard_name and systematic_name are empty strings.
unnamed_gene = Gene(entrezid=999, standard_name="", systematic_name="",
organism=org)
self.assertRaises(ValueError, unnamed_gene.save)
# Both standard_name and systematic_name consist of space characters
# only.
unnamed_gene = Gene(entrezid=999, standard_name=" ",
systematic_name="\t \n ", organism=org)
self.assertRaises(ValueError, unnamed_gene.save)
def tearDown(self):
Organism.objects.all().delete() # Remove Organism objects.
Gene.objects.all().delete() # Remove Gene objects.
CrossRef.objects.all().delete() # Remove CrossRef objects.
CrossRefDB.objects.all().delete() # Remove CrossRefDB objects.
class PrepareNameLengthTestCase(TestCase):
"""
This TestCase prepares the prepare_name_length() method in
search_indexes.GeneIndex.
"""
def setUp(self):
self.g1 = factory.create(Gene, {'standard_name': 'A1',
'systematic_name': 'a12'})
self.g2 = factory.create(Gene, {'standard_name': None,
'systematic_name': 'b12'})
self.gene_index = GeneIndex()
def test_std_and_sys_name_present(self):
"""
Test that name_length is 2, the length of 'A1'.
"""
name_length = self.gene_index.prepare_name_length(self.g1)
self.assertEqual(name_length, 2)
def test_only_sys_name_present(self):
"""
Test that name_length is 3, the length of 'b12'.
"""
name_length = self.gene_index.prepare_name_length(self.g2)
self.assertEqual(name_length, 3)
# We use @override_settings here so that the tests use the TEST_INDEX
# when building/rebuilding the search indexes, and not our real Database
# search indexes.
@override_settings(HAYSTACK_CONNECTIONS=TEST_INDEX)
class BuildingGeneIndexTestCase(TestCase):
"""
This TestCase tests the ability to build search indexes under certain
corner cases.
"""
def setUp(self):
haystack.connections.reload('default')
# As per this documented issue in Haystack,
# https://github.com/django-haystack/django-haystack/issues/704
# we need to call 'rebuild_index' at the beginning to get
# consistency of data and structure. Otherwise, the
# 'test_std_and_sys_name_present' *sometimes* yielded a None
# result
call_command('rebuild_index', interactive=False, verbosity=0)
def test_factory_gene_creation(self):
"""
Create a gene using the factory, without any specified fields.
Call command to build search index and then try to find the gene
using this search index.
"""
gene = factory.create(Gene)
call_command('update_index', interactive=False, verbosity=0)
sqs = SearchQuerySet().models(Gene)
sqs = sqs.filter(content=gene.systematic_name).load_all()
self.assertEqual(sqs[0].object, gene)
def test_std_and_sys_name_present(self):
"""
Create a gene using the factory, but specify both standard_
and systematic_ names.
Call command to build search index and then try to find the gene
using this search index.
"""
gene = factory.create(Gene, {'standard_name': 'A1',
'systematic_name': 'a12'})
call_command('update_index', interactive=False, verbosity=0)
sqs = SearchQuerySet().models(Gene)
sqs = sqs.filter(content=gene.systematic_name).load_all()
self.assertEqual(sqs[0].object, gene)
def test_only_sys_name_present(self):
"""
Create a gene using the factory, specify systematic_ name and
make standard_name explicitly None.
Call command to build search index and then try to find the gene
using this search index.
"""
gene = factory.create(Gene, {'standard_name': None,
'systematic_name': 'b34'})
call_command('update_index', interactive=False, verbosity=0)
sqs = SearchQuerySet().models(Gene)
sqs = sqs.filter(content=gene.systematic_name).load_all()
self.assertEqual(sqs[0].object, gene)
def test_no_description(self):
"""
Create a gene using the factory, specify description to be an
empty string.
Call command to build search index and then try to find the gene
using this search index.
"""
gene = factory.create(Gene, {'description': ''})
call_command('update_index', interactive=False, verbosity=0)
sqs = SearchQuerySet().models(Gene)
sqs = sqs.filter(content=gene.systematic_name).load_all()
self.assertEqual(sqs[0].object, gene)
def tearDown(self):
call_command('clear_index', interactive=False, verbosity=0)
# We use @override_settings here so that the tests use the TEST_INDEX
# when building/rebuilding the search indexes, and not our real Database
# search indexes.
@override_settings(HAYSTACK_CONNECTIONS=TEST_INDEX)
class APIResourceTestCase(ResourceTestCaseMixin, TestCase):
"""
Test API endpoints for retrieving and searching gene data, using both
GET and POST requests.
"""
def get_api_name(self):
"""
Utility function to get the name of the tastypie REST API in
whatever Django project is using django-genes.
"""
if not ROOT_URLCONF:
return None
proj_urls = __import__(ROOT_URLCONF)
url_members = inspect.getmembers(proj_urls.urls)
api_name = None
for k, v in url_members:
if isinstance(v, Api):
api_name = v.api_name
return api_name
def create_many_genes(self, organism, num_genes):
"""
Helper function to generate a large number of genes
"""
# Create genes:
for i in range(num_genes):
Gene.objects.create(entrezid=(i + 1),
systematic_name="sys_name #" + str(i + 1),
standard_name="std_name #" + str(i + 1),
organism=organism)
def setUp(self):
haystack.connections.reload('default')
# This line is important to set up the test case!
super(APIResourceTestCase, self).setUp()
self.gene1 = factory.create(Gene, {'standard_name': 'A1',
'systematic_name': 'a12'})
self.gene2 = factory.create(Gene, {'standard_name': None,
'systematic_name': 'b34'})
standard_name_prefix = 'ans'
factory.create(Gene, {'standard_name': standard_name_prefix})
# Create 26 more gene names that start with 'ans' and then have
# an uppercase letter appended to it.
for letter in string.ascii_uppercase:
factory.create(
Gene,
{'standard_name': standard_name_prefix + letter}
)
call_command('rebuild_index', interactive=False, verbosity=0)
def test_gene_get_search(self):
"""
Tests API gene search when searching with a GET request
"""
api_name = self.get_api_name()
response = self.api_client.get(
'/api/{}/gene/search/'.format(api_name),
data={'query': self.gene1.standard_name}
)
self.assertValidJSONResponse(response)
found_results = self.deserialize(response)[0]['found']
best_gene_result = found_results[0]
self.assertEqual(best_gene_result['standard_name'],
self.gene1.standard_name)
self.assertEqual(best_gene_result['systematic_name'],
self.gene1.systematic_name)
def test_gene_post_search(self):
"""
Tests API gene search when searching with a POST request
"""
api_name = self.get_api_name()
response = self.api_client.post(
'/api/{}/gene/search/'.format(api_name),
data={'query': self.gene2.systematic_name}
)
self.assertValidJSONResponse(response)
found_results = self.deserialize(response)[0]['found']
best_gene_result = found_results[0]
self.assertEqual(best_gene_result['standard_name'],
self.gene2.standard_name)
self.assertEqual(best_gene_result['systematic_name'],
self.gene2.systematic_name)
def test_gene_list_endpt_large_post(self):
"""
Test that we can do a big POST request to get information back
for a lot of genes (more than are allowed through the ~4k
character limit for GET).
We will set the gene_num to 1100 because
9 * 1 = 9 chars of single-digit IDs (1-9)
90 * 2 = 180 chars of double-digit IDs (10-99)
900 * 3 = 2700 chars of triple-digit IDs (100-999)
101 * 4 = 404 chars of four-digit IDs (1000-1100)
(1100 - 1) * 1 chars of delimiters (',')
TOTAL = 9 + 180 + 2700 + 404 + 1099 = 4392 chars.
This is based on the
APIResourceTestCase.test_expressionvalue_big_post() test in
https://github.com/greenelab/adage-server/blob/master/adage/analyze/tests.py
"""
organism = factory.create(Organism)
gene_num = 1100
self.create_many_genes(organism, gene_num)
gene_ids = ",".join([str(g.id) for g
in Gene.objects.filter(organism=organism)])
api_name = self.get_api_name()
resp = self.client.post('/api/{}/gene/'.format(api_name),
data={'pk__in': gene_ids})
self.assertValidJSONResponse(resp)
self.assertEqual(
self.deserialize(resp)['meta']['total_count'],
gene_num
)
def test_gene_autocomplete_search(self):
"""
Tests API gene autocomplete search. In the setUp method, we
created 27 genes that start with 'ans', but this should only
return 15 results, or however many were set in the
GENES_API_RESULT_LIMIT setting.
"""
api_name = self.get_api_name()
response = self.api_client.get(
'/api/{}/gene/autocomplete/'.format(api_name),
data={'query': 'ans'}
)
self.assertValidJSONResponse(response)
found_results = self.deserialize(response)['results']
self.assertEqual(len(found_results), GENES_API_RESULT_LIMIT)
def tearDown(self):
call_command('clear_index', interactive=False, verbosity=0)
class CrossRefDBTestCase(TestCase):
def test_saving_xrdb(self):
"""
Test that this simple CrossRefDB creation raises no errors.
"""
factory.create(CrossRefDB, {"name": "XRDB1"})
def test_saving_xrdb_no_name(self):
"""
Check that CrossRefDBs in database are required to have a non-null
name - if they do, raise IntegrityError.
"""
with self.assertRaises(IntegrityError):
factory.create(CrossRefDB, {"name": None})
def test_saving_xrdb_blank_name(self):
"""
Check that CrossRefDBs in database are required to have a name that
is not an empty string - if they do, raise FieldError.
"""
with self.assertRaises(FieldError):
factory.create(CrossRefDB, {"name": ""})
class LoadCrossRefsTestCase(TestCase):
def setUp(self):
xrdb1 = CrossRefDB.objects.create(
name="Ensembl", url="http://www.ensembl.org/Gene/Summary?g=_REPL_")
CrossRefDB.objects.create(
name="UniProtKB", url="http://www.uniprot.org/uniprot/_REPL_")
g1 = factory.create(Gene, {'entrezid': 50810})
g2 = factory.create(Gene)
g3 = factory.create(Gene)
g4 = factory.create(Gene)
factory.create(CrossRef, {'crossrefdb': xrdb1, 'gene': g1,
'xrid': 'ENSG00000166503'})
factory.create(CrossRef, {'crossrefdb': xrdb1, 'gene': g2,
'xrid': 'ENSG00000214575'})
factory.create(CrossRef, {'crossrefdb': xrdb1, 'gene': g3,
'xrid': 'ENSG00000170312'})
factory.create(CrossRef, {'crossrefdb': xrdb1, 'gene': g4,
'xrid': 'ENSG00000172053'})
def test_load_uniprot_mgmt_command(self):
"""
Check that genes_load_uniprot management command loads UniProtKB
identifiers (using Entrez and Ensembl) and that it also saves those
relationships in the database.
"""
call_command('genes_load_uniprot',
uniprot='genes/test_files/test_uniprot_entrez_ensembl.txt')
uniprot1 = CrossRef.objects.get(xrid='A0A024R216')
self.assertEqual(uniprot1.gene.entrezid, 50810)
e1 = CrossRef.objects.filter(
crossrefdb__name='Ensembl').get(gene=uniprot1.gene)
self.assertEqual(e1.xrid, 'ENSG00000166503')
uniprot2 = CrossRef.objects.get(xrid='A0A024R214')
e2 = CrossRef.objects.filter(
crossrefdb__name='Ensembl').get(gene=uniprot2.gene)
self.assertEqual(e2.xrid, 'ENSG00000214575')
uniprot3 = CrossRef.objects.get(xrid='A0A024QZP7')
e3 = CrossRef.objects.filter(
crossrefdb__name='Ensembl').get(gene=uniprot3.gene)
self.assertEqual(e3.xrid, 'ENSG00000170312')
uniprot4 = CrossRef.objects.get(xrid='A0A0U1RQX9')
e4 = CrossRef.objects.filter(
crossrefdb__name='Ensembl').get(gene=uniprot4.gene)
self.assertEqual(e4.xrid, 'ENSG00000172053')
| greenelab/django-genes | genes/tests.py | tests.py | py | 26,144 | python | en | code | 2 | github-code | 13 |
17763097802 | from config import Chats
from functools import wraps
from fuzzywuzzy import process
def restricted(func):
@wraps(func)
def wrapped(update, context, *args, **kwargs):
user_id = update.effective_user.id
if user_id not in Chats:
return
return func(update, context, *args, **kwargs)
return wrapped
def search_item(items, search_query):
searched = process.extract(search_query, [i.title for i in items], limit=30)
searched = [s[0] for s in searched]
find_search = []
for s in searched:
for i in items:
if i.title == s:
find_search.append(i)
break
return find_search
| Derafino/goods_scrap | methods.py | methods.py | py | 685 | python | en | code | 0 | github-code | 13 |
19702063255 | #!/usr/local/bin/python3
# coding: utf-8
import voluptuous as vol
from homeassistant.config_entries import ConfigEntry
from homeassistant.core import HomeAssistant
from homeassistant.helpers import entity_platform
from homeassistant.helpers.entity_platform import AddEntitiesCallback
from . import (
DOMAIN
)
from .water_heaters.cooker import RedmondCooker
from .water_heaters.kettle import RedmondKettle
async def async_setup_entry(
hass: HomeAssistant,
config_entry: ConfigEntry,
async_add_entities: AddEntitiesCallback
) -> None:
kettle = hass.data[DOMAIN][config_entry.entry_id]
if kettle._type in [0, 1, 2]:
async_add_entities([RedmondKettle(kettle)])
elif kettle._type == 5:
async_add_entities([RedmondCooker(kettle)])
platform = entity_platform.current_platform.get()
platform.async_register_entity_service(
"set_timer",
{
vol.Required("hours"): vol.All(vol.Coerce(int), vol.Range(min=0, max=23)),
vol.Required("minutes"): vol.All(vol.Coerce(int), vol.Range(min=0, max=59))
},
"async_set_timer"
)
platform.async_register_entity_service(
"set_manual_program",
{
vol.Required("prog"): vol.All(vol.Coerce(int), vol.Range(min=0, max=12)),
vol.Required("subprog"): vol.All(vol.Coerce(int), vol.Range(min=0, max=3)),
vol.Required("temp"): vol.All(vol.Coerce(int), vol.Range(min=30, max=180)),
vol.Required("hours"): vol.All(vol.Coerce(int), vol.Range(min=0, max=23)),
vol.Required("minutes"): vol.All(vol.Coerce(int), vol.Range(min=0, max=59)),
vol.Required("dhours"): vol.All(vol.Coerce(int), vol.Range(min=0, max=23)),
vol.Required("dminutes"): vol.All(vol.Coerce(int), vol.Range(min=0, max=59)),
vol.Required("heat"): vol.All(vol.Coerce(int), vol.Range(min=0, max=1))
},
"async_set_manual_program"
)
| Nemiroff/hassio-r4s | custom_components/ready4sky/water_heater.py | water_heater.py | py | 2,052 | python | en | code | null | github-code | 13 |
34090413265 | #!/usr/bin/env python
import rospy
from mavros_msgs.msg import State
from mavros_msgs.srv import CommandBool, SetMode, CommandBoolRequest, SetModeRequest, SetModeResponse
from mavros_msgs.srv import CommandBoolResponse
from mavros_msgs.msg import GlobalPositionTarget
from sensor_msgs.msg import NavSatFix
current_state = State()
current_global_pos = NavSatFix()
offb_set_mode = SetModeRequest()
arm_cmd = CommandBoolRequest()
global_pos = GlobalPositionTarget()
def state_cb(state):
global current_state
current_state = state
def global_pos_cb(global_pos):
global current_global_pos
current_global_pos = global_pos
def main():
rospy.init_node("offb_node", anonymous=True)
# Subscribers
rospy.Subscriber("mavros/state", State, state_cb)
rospy.Subscriber("mavros/global_position/global", NavSatFix, global_pos_cb)
# Publishers
local_pos_pub = rospy.Publisher("mavros/setpoint_position/global", GlobalPositionTarget, queue_size=10)
# Services
arming_client = rospy.ServiceProxy("mavros/cmd/arming", CommandBool)
set_mode_client = rospy.ServiceProxy("mavros/set_mode", SetMode)
# The desired global position for the UAV
global_pos.coordinate_frame = GlobalPositionTarget.FRAME_GLOBAL_REL_ALT
global_pos.altitude = 2.0 # Takeoff to 2 meters
# Wait for FCU connection
while not rospy.is_shutdown() and not current_state.connected:
rospy.sleep(1)
# Set the UAV into GUIDED mode and arm the motors
offb_set_mode.custom_mode = "GUIDED"
arm_cmd.value = True
last_request = rospy.Time.now()
while not rospy.is_shutdown():
# if current_state.mode != "GUIDED" and rospy.Time.now() - last_request > rospy.Duration(5.0):
set_mode_response = set_mode_client(offb_set_mode)
if set_mode_response.mode_sent:
rospy.loginfo("GUIDED enabled")
# last_request = rospy.Time.now()
# else:
# if not current_state.armed and rospy.Time.now() - last_request > rospy.Duration(5.0):
arm_response = arming_client(arm_cmd)
if arm_response.success:
rospy.loginfo("Vehicle armed")
# last_request = rospy.Time.now()
# Set the current global position as the desired position
global_pos.latitude = current_global_pos.latitude
global_pos.longitude = current_global_pos.longitude
global_pos.altitude = 1.0 # Hover at 1 meter
local_pos_pub.publish(global_pos)
rospy.sleep(1)
if __name__ == '__main__':
main() | pranitzope24/Flipkart-Grid-4.0-Robogenerals | scripts/sex.py | sex.py | py | 2,549 | python | en | code | 0 | github-code | 13 |
30979794105 | from flask import Flask
from flask_cors import CORS
from flask_jwt_extended import JWTManager
import logging
import os
from uCube_interface import uCube_interface
from Store import Store, MockStore
class PrefixMiddleware(object):
def __init__(self, app, prefix=''):
self.app = app
self.prefix = prefix
def __call__(self, environ, start_response):
if environ['PATH_INFO'].startswith(self.prefix):
environ['PATH_INFO'] = environ['PATH_INFO'][len(self.prefix):]
environ['SCRIPT_NAME'] = self.prefix
return self.app(environ, start_response)
else:
start_response('404', [('Content-Type', 'text/plain')])
return ["This url does not belong to the app.".encode()]
def create_app():
driver_host = os.environ.get('DRIVER_HOST')
app = Flask(__name__)
app.config.from_object("server_config")
jwt = JWTManager(app)
if os.environ.get("DEBUG") == "TRUE":
CORS(app)
logging.getLogger('werkzeug').setLevel(logging.WARNING)
logging.basicConfig(level=logging.WARNING)
logging.getLogger("sqlitedict").setLevel(logging.WARNING)
app.wsgi_app = PrefixMiddleware(app.wsgi_app, prefix='/uscope')
interface = uCube_interface.uCube_interface(driver_host, 6666)
if os.environ.get("TESTING") == "TRUE":
store = MockStore()
else:
store = Store()
app.store = store
with app.app_context():
# Include our Routes
from uScopeBackend.application_manager import application_manager_bp, ApplicationManager
from uScopeBackend.plot_manager import plot_manager_bp, PlotManager
from uScopeBackend.registers_manager import registers_manager_bp, RegistersManager
from uScopeBackend.peripheral_manager import peripheral_manager_bp, PeripheralManager
from uScopeBackend.scripts_manager import scripts_manager_bp, ScriptManager
from uScopeBackend.programs_manager import programs_manager_bp, ProgramsManager
from uScopeBackend.db_manager import database_manager_bp, DatabaseManager
from uScopeBackend.auth_manager import auth_manager_bp, AuthManager
from uScopeBackend.bitstream_manager import bitstream_manager_bp, BitstreamManager
from uScopeBackend.filters_manager import filters_manager_bp, FilterManager
app.interface = interface
app.app_mgr = ApplicationManager(interface, store)
app.plot_mgr = PlotManager(interface, store)
app.register_mgr = RegistersManager(interface, store)
app.programs_mgr = ProgramsManager(interface, store)
app.peripheral_mgr = PeripheralManager(store)
app.script_mgr = ScriptManager(store)
app.db_mgr = DatabaseManager(store, interface)
app.auth_mgr = AuthManager(store)
app.bitstream_mgr = BitstreamManager(store)
app.filter_mgr = FilterManager(store)
# Register Blueprints
app.register_blueprint(application_manager_bp)
app.register_blueprint(plot_manager_bp)
app.register_blueprint(peripheral_manager_bp)
app.register_blueprint(registers_manager_bp)
app.register_blueprint(programs_manager_bp)
app.register_blueprint(scripts_manager_bp)
app.register_blueprint(database_manager_bp)
app.register_blueprint(auth_manager_bp)
app.register_blueprint(bitstream_manager_bp)
app.register_blueprint(filters_manager_bp)
return app
| uscope-platform/uscope_server | app_factory.py | app_factory.py | py | 3,479 | python | en | code | 0 | github-code | 13 |
38011373208 | import AthenaPoolCnvSvc.ReadAthenaPool
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
from AthenaCommon.AppMgr import ServiceMgr
from AthenaCommon import CfgMgr
from RecExConfig.RecFlags import rec
from glob import glob
filelist = glob("/atlas/data1/userdata/khoo/Data16/AOD_r21/valid1.361108.PowhegPythia8EvtGen_AZNLOCTEQ6L1_Ztautau.recon.AOD.e5112_s2887_r8609/*")
from AthenaCommon.AthenaCommonFlags import athenaCommonFlags
athenaCommonFlags.FilesInput = filelist
ServiceMgr.EventSelector.InputCollections = athenaCommonFlags.FilesInput()
############################################################################
# Set up detector description for cell access
from AthenaCommon.GlobalFlags import globalflags
globalflags.DetGeo = 'atlas'
from AthenaCommon.DetFlags import DetFlags
DetFlags.detdescr.all_setOff()
DetFlags.detdescr.Calo_setOn()
if hasattr(DetFlags,'BField_on'): DetFlags.BField_setOn()
from TrkDetDescrSvc.AtlasTrackingGeometrySvc import AtlasTrackingGeometrySvc
AtlasTrackingGeometrySvc = svcMgr.AtlasTrackingGeometrySvc
include('RecExCond/AllDet_detDescr.py')
from RecExConfig.InputFilePeeker import inputFileSummary
#print inputFileSummary
if inputFileSummary['evt_type'][0] == 'IS_DATA':
globalflags.DataSource = 'data'
else:
globalflags.DataSource = 'geant4'
from AthenaCommon.AlgSequence import AlgSequence
topSequence = AlgSequence()
############################################################################
# Set up an extra associator for testing
from METReconstruction.METRecoFlags import metFlags
from METReconstruction.METAssocConfig import AssocConfig, METAssocConfig
# associators = [AssocConfig('PFlowJet'),
# AssocConfig('Muon'),
# AssocConfig('Ele'),
# AssocConfig('Gamma'),
# AssocConfig('Tau'),
# AssocConfig('Soft')]
# cfg_akt4em = METAssocConfig('NewAntiKt4EMPFlow',
# associators,
# doPFlow=True
# )
# metFlags.METAssocConfigs()[cfg_akt4em.suffix] = cfg_akt4em
# cfg_akt4em.associators["Tau"].OutputLevel = VERBOSE
# metFlags.METAssocOutputList().append(cfg_akt4em.suffix)
# Get the configuration directly from METRecoFlags
# Can also provide a dict of configurations or list of RecoTools or both
from METReconstruction.METAssocConfig import getMETAssocAlg
metAlg = getMETAssocAlg('METAssociation')
topSequence += metAlg
from METUtilities.METMakerConfig import getMETMakerAlg
makerAlgPF = getMETMakerAlg('AntiKt4EMPFlow',jetSelection='Default')
makerAlgPF.METName = "MET_Reco_AntiKt4EMPFlow"
#makerAlgPF.OutputLevel=VERBOSE
#ToolSvc.METMaker_AntiKt4EMPFlow.OutputLevel=VERBOSE
topSequence += makerAlgPF
makerAlgEM = getMETMakerAlg('AntiKt4EMTopo',jetSelection='Default')
makerAlgEM.METName = "MET_Reco_AntiKt4EMTopo"
topSequence += makerAlgEM
# topSequence += CfgMgr.met__METAssocTestAlg("TestMETAssocNewEMPFlow",
# OutputLevel=VERBOSE,
# FailOnInconsistency=True,
# METMapSuffix="NewAntiKt4EMPFlow")
# topSequence += CfgMgr.met__METAssocTestAlg("TestMETAssocEMTopo",
# OutputLevel=VERBOSE,
# FailOnInconsistency=True,
# METMapSuffix="AntiKt4EMTopo")
# from Valkyrie.JobOptCfg import ValgrindSvc
# svcMgr += ValgrindSvc( OutputLevel = INFO,
# ProfiledAlgs = ["METAssociation"],
# ProfiledIntervals = ["METAssociation.execute"])
from PerfMonComps.PerfMonFlags import jobproperties as pmon_properties
pmon_properties.PerfMonFlags.doSemiDetailedMonitoring=True
write_xAOD = True
if write_xAOD:
# The list of output containers/maps is autogenerated and stored in metFlags
# This jO extracts them with the appropriate formatting
from AthenaCommon.Resilience import protectedInclude
protectedInclude("METReconstruction/METReconstructionOutputAODList_jobOptions.py")
svcMgr.AthenaPoolCnvSvc.PoolAttributes += [ "DEFAULT_SPLITLEVEL ='99'" ]
from OutputStreamAthenaPool.MultipleStreamManager import MSMgr
xaodStream = MSMgr.NewPoolRootStream( "StreamAOD", "xAOD.pool.root" )
xaodStream.AddItem("xAOD::EventInfo#EventInfo")
xaodStream.AddItem("xAOD::EventAuxInfo#EventInfoAux.")
xaodStream.AddItem("xAOD::ElectronContainer#Electrons")
xaodStream.AddItem("xAOD::ElectronAuxContainer#ElectronsAux.")
xaodStream.AddItem("xAOD::PhotonContainer#Photons")
xaodStream.AddItem("xAOD::PhotonAuxContainer#PhotonsAux.")
xaodStream.AddItem("xAOD::MuonContainer#Muons")
xaodStream.AddItem("xAOD::MuonAuxContainer#MuonsAux.")
xaodStream.AddItem("xAOD::TauJetContainer#TauJets")
xaodStream.AddItem("xAOD::TauJetAuxContainer#TauJetsAux.")
xaodStream.AddItem("xAOD::JetContainer#AntiKt4EMTopoJets")
xaodStream.AddItem("xAOD::JetAuxContainer#AntiKt4EMTopoJetsAux.")
xaodStream.AddItem("xAOD::JetContainer#AntiKt4EMPFlowJets")
xaodStream.AddItem("xAOD::JetAuxContainer#AntiKt4EMPFlowJetsAux.")
xaodStream.AddItem('xAOD::MissingETAssociationMap#METAssoc_AntiKt4EMTopo')
xaodStream.AddItem('xAOD::MissingETAuxAssociationMap#METAssoc_AntiKt4EMTopoAux.')
xaodStream.AddItem('xAOD::MissingETAssociationMap#METAssoc_AntiKt4EMPFlow')
xaodStream.AddItem('xAOD::MissingETAuxAssociationMap#METAssoc_AntiKt4EMPFlowAux.')
xaodStream.AddItem('xAOD::MissingETContainer#MET_Reco_AntiKt4EMTopo')
xaodStream.AddItem('xAOD::MissingETAuxContainer#MET_Reco_AntiKt4EMTopoAux.')
xaodStream.AddItem('xAOD::MissingETContainer#MET_Reco_AntiKt4EMPFlow')
xaodStream.AddItem('xAOD::MissingETAuxContainer#MET_Reco_AntiKt4EMPFlowAux.')
xaodStream.AddItem('xAOD::MissingETContainer#MET_Truth')
xaodStream.AddItem('xAOD::MissingETAuxContainer#MET_TruthAux.')
#from Valkyrie.JobOptCfg import ValgrindSvc
#svcMgr += ValgrindSvc( OutputLevel = VERBOSE,
# ProfiledAlgs = ["jetalg","METAssociation"] )
theApp.EvtMax = -1
ServiceMgr.EventSelector.SkipEvents = 0
ServiceMgr.MessageSvc.defaultLimit = 9999
| rushioda/PIXELVALID_athena | athena/Reconstruction/MET/METReconstruction/share/RunMETReco_Associator.py | RunMETReco_Associator.py | py | 6,245 | python | en | code | 1 | github-code | 13 |
24550793443 | import os
import sys, time
import argparse
import logging
import importlib
import threading
import Airplane, Globals
import pyavtools.fix as fix
args = None
wpchanged_time = None
def WAYPOINTS_changed(v):
global wpchanged_time
wpchanged_time = time.time()
def SELECTED_WAYPOINT_changed(v):
craft.SetWaypointNumber(v)
def SELECTED_ALTITUDE_changed(v):
craft.SelectedAltitude = v
def SELECTED_AIRSPEED_changed(v):
craft.DesiredAirSpeed = v
def SELECTED_HEADING_changed(v):
craft.DesiredTrueHeading = v + craft.Sensors().MagneticDeclination()
def AP_ON_changed(v):
craft.SetServoEngagedState(v)
def HNAV_MODE_changed(v):
craft.HnavMode = v
def VNAV_MODE_changed(v):
craft.VnavMode = v
def START_STRATEGY_changed(v):
craft.StartStrategy = v
def ALTITUDE_SOURCE_changed(v):
craft.AltitudeSource = v
def SELECTED_CLIMB_RATE_changed(v):
craft.DesiredClimbRate = v
def SELECTED_PITCH_changed(v):
craft.SelectedPitch = v
def ReadWaypoints():
waypoints = list()
altitudes = list()
for wp in range(256):
waypoint_id_db = fix.db.get_item(Globals.WAYPOINT_ID_KEY + str(wp))
if len(waypoint_id_db.value) > 0:
waypoint_lat_db = fix.db.get_item(Globals.WAYPOINT_LAT_KEY + str(wp))
waypoint_lng_db = fix.db.get_item(Globals.WAYPOINT_LNG_KEY + str(wp))
waypoint_alt_db = fix.db.get_item(Globals.WAYPOINT_ALT_KEY + str(wp))
waypoints.append ((waypoint_lng_db.value, waypoint_lat_db.value))
altitudes.append (waypoint_alt_db.value)
return waypoints,altitudes
def CreateWaypoints():
for wp in range(256):
waypoint_id_db = fix.db.get_item(Globals.WAYPOINT_ID_KEY + str(wp), create=True)
waypoint_id_db.dtype = 'str'
waypoint_lat_db = fix.db.get_item(Globals.WAYPOINT_LAT_KEY + str(wp), create=True)
waypoint_lat_db.dtype = 'float'
waypoint_lat_db.min = -90.0
waypoint_lat_db.max = 90.0
waypoint_lng_db = fix.db.get_item(Globals.WAYPOINT_LNG_KEY + str(wp), create=True)
waypoint_lng_db.dtype = 'float'
waypoint_lng_db.min = -180.0
waypoint_lng_db.max = 180.0
waypoint_alt_db = fix.db.get_item(Globals.WAYPOINT_ALT_KEY + str(wp), create=True)
waypoint_alt_db.dtype = 'float'
waypoint_alt_db.min = -1000.0
waypoint_alt_db.max = 60000.0
def ConnectWaypoints():
for wp in range(256):
waypoint_id_db = fix.db.get_item(Globals.WAYPOINT_ID_KEY + str(wp))
waypoint_lat_db = fix.db.get_item(Globals.WAYPOINT_LAT_KEY + str(wp))
waypoint_lng_db = fix.db.get_item(Globals.WAYPOINT_LNG_KEY + str(wp))
waypoint_alt_db = fix.db.get_item(Globals.WAYPOINT_ALT_KEY + str(wp))
waypoint_id_db.valueChanged[str].connect(WAYPOINTS_changed)
waypoint_lat_db.valueChanged[float].connect(WAYPOINTS_changed)
waypoint_lng_db.valueChanged[float].connect(WAYPOINTS_changed)
waypoint_alt_db.valueChanged[float].connect(WAYPOINTS_changed)
if '__main__' == __name__:
opt = argparse.ArgumentParser(description='FMS interface to a FIX system')
opt.add_argument('airplane_config', help='The airplane configuration')
opt.add_argument('--log-prefix', default=None, help = 'Over-ride logging prefix')
opt.add_argument('-l', '--log-level', type=int, default=logging.INFO, help = '1 = Maximum Logging. 100 = Absolute Silence. 40 = Errors only. 10 = Basic Debug')
opt.add_argument('-f', '--fix-int-module', default='pyEfis.fix', help = 'The python module name to load for the FIX interface module')
args = opt.parse_args()
rootlogger = logging.getLogger()
rootlogger.setLevel(args.log_level)
Globals.SimulationMode = Globals.LIVE_LOGGING
airplane = args.airplane_config.replace('.cfg', '')
datestamp = Globals.datestamp()
if args.log_prefix is not None:
Globals.LoggingPrefix = args.log_prefix
else:
Globals.LoggingPrefix = os.path.join('Logs', airplane, datestamp)
log_start = "airplane %s beginning on %s with logging prefix %s"%(airplane, datestamp, Globals.LoggingPrefix)
try:
os.makedirs(Globals.LoggingPrefix)
except:
pass
rootlogger.addHandler(logging.FileHandler(os.path.join(Globals.LoggingPrefix, 'info.log')))
console_handler = logging.StreamHandler(sys.stdout)
console_handler.setLevel(logging.DEBUG)
rootlogger.addHandler(console_handler)
rootlogger.log(99, log_start)
start (args.airplane_config)
fms_thread = None
craft = None
run_fms = True
send_back_sensors = True
def start(airplane_config, snd_bk_snsrs=True):
global fms_thread, craft, send_back_sensors
send_back_sensors = snd_bk_snsrs
rcfg = open (airplane_config, 'r')
rlines = rcfg.readlines()
rcfg.close()
if not rlines:
raise RuntimeError ('Empty config file: %s'%sys.argv[1])
craft = Airplane.Airplane()
craft.initialize(rlines)
pitchdb = fix.db.get_item(Globals.FD_PITCH_KEY, create=True)
pitchdb.min = -90
pitchdb.max = 90
rolldb = fix.db.get_item(Globals.FD_ROLL_KEY, create=True)
rolldb.min = -90
rolldb.max = 90
altitude_source_db = fix.db.get_item(Globals.ALTITUDE_SOURCE_KEY, create=True)
altitude_source_db.dtype = 'int'
altitude_source = altitude_source_db.value
altitude_source_db.valueChanged[int].connect(ALTITUDE_SOURCE_changed)
selected_altitude_db = fix.db.get_item(Globals.SELECTED_ALTITUDE_KEY, create=True)
selected_altitude_db.dtype = 'int'
selected_altitude_db.min = -1000
selected_altitude_db.max = 60000
selected_altitude_db.value = 10000
selected_altitude = selected_altitude_db.value
selected_altitude_db.valueChanged[int].connect(SELECTED_ALTITUDE_changed)
selected_airspeed_db = fix.db.get_item(Globals.SELECTED_AIRSPEED_KEY, create=True)
selected_airspeed_db.dtype = 'int'
selected_airspeed_db.min = 10
selected_airspeed_db.max = 1000
selected_airspeed_db.value = 120
selected_airspeed = selected_airspeed_db.value
selected_airspeed_db.valueChanged[int].connect(SELECTED_AIRSPEED_changed)
selected_heading_db = fix.db.get_item(Globals.SELECTED_HEADING_KEY, create=True)
selected_heading_db.dtype = 'int'
selected_heading_db.min = 0
selected_heading_db.max = 359
selected_heading = selected_heading_db.value
selected_heading_db.valueChanged[int].connect(SELECTED_HEADING_changed)
selected_climb_rate_db = fix.db.get_item(Globals.SELECTED_CLIMB_RATE_KEY, create=True)
selected_climb_rate_db.dtype = 'int'
selected_climb_rate_db.min = -10000
selected_climb_rate_db.max = 10000
selected_climb_rate_db.value = 500
selected_climb_rate = selected_climb_rate_db.value
selected_climb_rate_db.valueChanged[int].connect(SELECTED_CLIMB_RATE_changed)
ap_on_db = fix.db.get_item(Globals.AP_ON_KEY, create=True)
ap_on_db.dtype = 'bool'
fd_on_db = fix.db.get_item(Globals.FD_ON_KEY, create=True)
fd_on_db.dtype = 'bool'
ap_on_db.valueChanged[bool].connect(AP_ON_changed)
hnav_mode_db = fix.db.get_item(Globals.HNAV_MODE_KEY, create=True)
hnav_mode_db.dtype = 'int'
hnav_mode = hnav_mode_db.value
hnav_mode_db.valueChanged[int].connect(HNAV_MODE_changed)
vnav_mode_db = fix.db.get_item(Globals.VNAV_MODE_KEY, create=True)
vnav_mode_db.dtype = 'int'
vnav_mode = vnav_mode_db.value
vnav_mode_db.valueChanged[int].connect(VNAV_MODE_changed)
start_strategy_db = fix.db.get_item(Globals.START_STRATEGY_KEY, create=True)
start_strategy_db.dtype = 'int'
start_strategy = start_strategy_db.value
start_strategy_db.valueChanged[int].connect(START_STRATEGY_changed)
selected_turn_rate_db = fix.db.get_item(Globals.SELECTED_TURN_RATE_KEY, create=True)
selected_turn_rate = selected_turn_rate_db.value
selected_pitch_db = fix.db.get_item(Globals.SELECTED_PITCH_KEY, create=True)
selected_pitch_db.dtype = 'int'
selected_pitch_db.min = 0
selected_pitch_db.max = 40
selected_pitch_db.value = 5
selected_pitch = selected_pitch_db.value
selected_pitch_db.valueChanged[int].connect(SELECTED_PITCH_changed)
selected_glideslope_db = fix.db.get_item(Globals.SELECTED_GLIDESLOPE_KEY, create=True)
selected_glideslope = selected_glideslope_db.value
selected_waypoint_db = fix.db.get_item(Globals.SELECTED_WAYPOINT_KEY, create=True)
selected_waypoint_db.dtype = 'int'
selected_waypoint_db.min = 0
selected_waypoint_db.max = 256
selected_waypoint_db.value = 0
selected_waypoint_db.valueChanged[int].connect(SELECTED_WAYPOINT_changed)
set0att = fix.db.get_item(Globals.SET_0ATTITUDE_KEY, create=True)
set0att.dtype = 'bool'
set0att.value = False
set0asp = fix.db.get_item(Globals.SET_0AIRSPEED_KEY, create=True)
set0asp.dtype = 'bool'
set0asp.value = False
CreateWaypoints()
waypoints, WP_altitudes = ReadWaypoints()
ConnectWaypoints()
fms_thread = threading.Thread(target=thread_run, args=(
selected_altitude
,selected_airspeed
,selected_heading
,selected_climb_rate
,hnav_mode
,vnav_mode
,altitude_source
,start_strategy
,waypoints
,WP_altitudes
,selected_turn_rate
,selected_pitch
,selected_glideslope))
fms_thread.start()
def stop():
global run_fms, fms_thread
run_fms = False
fms_thread.join()
def thread_run( selected_altitude
,selected_airspeed
,selected_heading
,selected_climb_rate
,hnav_mode
,vnav_mode
,altitude_source
,start_strategy
,waypoints
,WP_altitudes
,selected_turn_rate
,selected_pitch
,selected_glideslope):
global craft, run_fms, send_back_sensors, wpchanged_time
pitchdb = fix.db.get_item(Globals.FD_PITCH_KEY)
rolldb = fix.db.get_item(Globals.FD_ROLL_KEY)
sensors = craft.Sensors()
while not sensors.Ready():
sensors.SendBarometer (fix.db.get_item("BARO").value)
#print ("Sensors Not ready. Barometer %g"%(fix.db.get_item("BARO").value))
time.sleep(1)
if not run_fms:
return
craft.initialize_input(
selected_altitude
,selected_airspeed
,selected_heading + craft.Sensors().MagneticDeclination()
,selected_climb_rate
,hnav_mode
,vnav_mode
,altitude_source
,start_strategy
,waypoints
,WP_altitudes
,selected_turn_rate
,selected_pitch
,selected_glideslope)
ap_on_db = fix.db.get_item(Globals.AP_ON_KEY)
while run_fms:
if ap_on_db.value:
attitude = craft.Update()
if isinstance(attitude,tuple):
pitch,roll = attitude
pitchdb.value = pitch
rolldb.value = roll
if send_back_sensors:
sensors_to_fix(sensors)
time.sleep(.1)
if wpchanged_time is not None and time.time() > wpchanged_time+.3:
waypoints,alts = ReadWaypoints()
craft.SetWaypoints (waypoints, alts)
craft.SetWaypointNumber(0)
wpchanged_time = None
BARO_UPDATE_PERIOD=1.0
last_baro_time = time.time()-BARO_UPDATE_PERIOD
last_baro=0
BAD_THRESHOLD=5.0
FAIL_THRESHOLD=2.0
def sensors_to_fix(sensors):
global last_baro_time, last_baro, givenbarometer, craft
PITCH = fix.db.get_item("PITCH")
PITCH.value = sensors.Pitch()
PITCH_conf = sensors.PitchConfidence()
PITCH.bad = True if PITCH_conf < BAD_THRESHOLD else False
PITCH.fail = True if PITCH_conf < FAIL_THRESHOLD else False
PITCH.old = False
ROLL = fix.db.get_item("ROLL")
ROLL.value = sensors.Roll()
ROLL_conf = sensors.RollConfidence()
ROLL.bad = True if ROLL_conf < BAD_THRESHOLD else False
ROLL.fail = True if ROLL_conf < FAIL_THRESHOLD else False
ROLL.old = False
YAW = fix.db.get_item("ALAT")
YAW.value = sensors.Yaw()
YAW_conf = sensors.YawConfidence()
YAW.bad = True if YAW_conf < BAD_THRESHOLD else False
YAW.fail = True if YAW_conf < FAIL_THRESHOLD else False
YAW.old = False
lng,lat = sensors.Position()
if lng is not None:
gsq = sensors.GPSSignalQuality()
LAT = fix.db.get_item("LAT")
LAT.value = lat
LAT.bad = True if gsq < BAD_THRESHOLD else False
LAT.fail = True if gsq < FAIL_THRESHOLD else False
LAT.old = False
LONG = fix.db.get_item("LONG")
LONG.value = lng
LONG.bad = True if gsq < BAD_THRESHOLD else False
LONG.fail = True if gsq < FAIL_THRESHOLD else False
LONG.old = False
TRACK = fix.db.get_item("TRACK")
TRACK.value = sensors.GroundTrack()
TRACK.bad = True if gsq < BAD_THRESHOLD else False
TRACK.fail = True if gsq < FAIL_THRESHOLD else False
TRACK.old = False
GS = fix.db.get_item("GS")
GS.value = sensors.GroundSpeed()
GS.bad = True if gsq < BAD_THRESHOLD else False
GS.fail = True if gsq < FAIL_THRESHOLD else False
GS.old = False
ALT = fix.db.get_item("ALT")
ALT.value = sensors.Altitude()
ALT_conf = sensors.AltitudeConfidence()
ALT.bad = True if ALT_conf < BAD_THRESHOLD else False
ALT.fail = True if ALT_conf < FAIL_THRESHOLD else False
ALT.old = False
airspeed = sensors.AirSpeed()
if airspeed is not None:
IAS = fix.db.get_item("IAS")
IAS.value = airspeed
IAS_conf = sensors.AirSpeedConfidence()
IAS.bad = True if IAS_conf < BAD_THRESHOLD else False
IAS.fail = True if IAS_conf < FAIL_THRESHOLD else False
IAS.old = False
VS = fix.db.get_item("VS")
VS.value = sensors.ClimbRate()
VS_conf = sensors.ClimbRateConfidence()
VS.bad = True if VS_conf < BAD_THRESHOLD else False
VS.fail = True if VS_conf < FAIL_THRESHOLD else False
VS.old = False
HEAD = fix.db.get_item("HEAD")
HEAD.value = sensors.Heading()
HEAD_conf = sensors.HeadingConfidence()
HEAD.bad = True if HEAD_conf < BAD_THRESHOLD else False
HEAD.fail = True if HEAD_conf < FAIL_THRESHOLD else False
HEAD.old = False
ROT = fix.db.get_item("ROT")
ROT.value = sensors.HeadingRateChange()
ROT_conf = sensors.HeadingRateChangeConfidence()
ROT.bad = True if ROT_conf < BAD_THRESHOLD else False
ROT.fail = True if ROT_conf < FAIL_THRESHOLD else False
ROT.old = False
baro = fix.db.get_item("BARO").value
if last_baro_time+BARO_UPDATE_PERIOD < time.time() or baro != last_baro:
last_baro_time = time.time()
last_baro = baro
sensors.SendBarometer (baro)
set0asp = fix.db.get_item(Globals.SET_0AIRSPEED_KEY)
if set0asp.value:
craft.Set0AirSpeed()
set0asp.value = False
set0att= fix.db.get_item(Globals.SET_0ATTITUDE_KEY)
if set0att.value:
craft.Set0Attitude()
set0att.value = False
| Maker42/openEFIS | FixIntf.py | FixIntf.py | py | 15,437 | python | en | code | 13 | github-code | 13 |
5915716414 | import tensorflow as tf
from Dataset import ImageDataset
import os
from FileManagement import *
import time
class SwapModel(tf.keras.Model):
def __init__(self):
super().__init__()
self.const_layer_1 = tf.keras.layers.Conv2D(64, 3, 1, 'same', activation='relu', name='const1')
self.swap_layer_2 = tf.keras.layers.Conv2D(64, 3, 1, 'same', activation='relu', name='swap2')
self.const_layer_3 = tf.keras.layers.Conv2D(64, 3, 1, 'same', activation='relu', name='const3')
self.flatten_layer_4 = tf.keras.layers.Flatten()
self.dense_5 = tf.keras.layers.Dense(64)
self.dense_6 = tf.keras.layers.Dense(10)
def call(self, inputs, training=False, mask=None):
layer = inputs
layer = self.const_layer_1(layer)
layer = self.swap_layer_2(layer)
layer = self.const_layer_3(layer)
layer = self.flatten_layer_4(layer)
layer = self.dense_5(layer)
layer = self.dense_6(layer)
return layer
def swap(self):
new_size = 128
self.swap_layer_2 = tf.keras.layers.SeparableConv2D(128, 3, 1, 'same', activation='relu', name='swapped2')
self.swap_layer_2.build([None, None, None, 64])
self.const_layer_3.build([None, None, None, 128])
def test1():
dataset = ImageDataset.get_cifar10()
model = SwapModel()
optimizer = tf.keras.optimizers.Adam(0.001)
model.compile(optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model.fit(dataset.train_images, dataset.train_labels, epochs=1)
model.save('test_save')
model.evaluate(dataset.test_images, dataset.test_labels)
model.summary()
model.swap()
model.compile(optimizer=optimizer, loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True), metrics=['accuracy'])
model.summary()
print(model.layers)
model = tf.keras.models.load_model('test_save')
model.summary()
model.evaluate(dataset.test_images, dataset.test_labels)
model.fit(dataset.train_images, dataset.train_labels, epochs=1)
model.evaluate(dataset.test_images, dataset.test_labels)
class ModelHolder:
def __init__(self):
with tf.name_scope('conv_layers'):
self.lay1 = tf.keras.layers.Conv2D(4, 1, 1, 'same', activation='relu', name='orig1')
self.lay2 = tf.keras.layers.Conv2D(4, 1, 1, 'same', activation='relu', name='orig2')
self.lay3 = tf.keras.layers.Conv2D(4, 1, 1, 'same', activation='relu', name='orig3')
self.dense = tf.keras.layers.Dense(10, name='dense')
def swap(self):
new_size = 8
old_shape = self.lay2.get_input_shape_at(0)
new_shape = [x for x in old_shape]
new_shape[-1] = new_size
self.lay2 = tf.keras.layers.Conv2D(new_size, 3, 1, 'same', activation='relu', name='swapped2')
self.lay2.build(old_shape)
self.lay3.build(new_shape)
def build(self, model_input):
with tf.name_scope('conv_layers'):
layer = self.lay1(model_input)
layer = self.lay2(layer)
layer = self.lay3(layer)
layer = tf.keras.layers.Flatten()(layer)
layer = self.dense(layer)
keras_model = tf.keras.Model(inputs=model_input, outputs=layer)
return keras_model
def test2():
dataset = ImageDataset.get_build_set()
model_holder = ModelHolder()
model_input = tf.keras.Input([16, 16, 3])
keras_model = model_holder.build(model_input)
print(model_holder.lay2.get_weights())
model_holder.swap()
keras_model = model_holder.build(model_input)
print(model_holder.lay2.get_weights())
# model_output = keras_model(model_input)
# temp_model = tf.keras.Model(inputs=model_input, outputs=model_output)
keras_model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
# logdir = os.path.join(tensorboard_dir, 'sandbox_test_' + str(time.time()))
# writer = tf.summary.create_file_writer(logdir)
# tf.summary.trace_on(graph=True, profiler=False)
keras_model.fit(dataset.train_images, dataset.train_labels, epochs=1)
tf.keras.utils.plot_model(keras_model, 'model_image.png', expand_nested=True, show_layer_names=True, show_shapes=True)
# with writer.as_default():
# tf.summary.trace_export(name='sandbox_model_trace', step=0, profiler_outdir=logdir)
def test3():
dataset = ImageDataset.get_build_set()
keras_model = SwapModel()
keras_model._is_graph_network = True
# model_output = keras_model(model_input)
# temp_model = tf.keras.Model(inputs=model_input, outputs=model_output)
keras_model.compile(optimizer='adam', loss=tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True))
# logdir = os.path.join(tensorboard_dir, 'sandbox_test_' + str(time.time()))
# writer = tf.summary.create_file_writer(logdir)
# tf.summary.trace_on(graph=True, profiler=False)
keras_model.fit(dataset.train_images, dataset.train_labels, epochs=1)
tf.keras.utils.plot_model(keras_model, 'model_image.png', expand_nested=True, show_layer_names=True, show_shapes=True)
# with writer.as_default():
# tf.summary.trace_export(name='sandbox_model_trace', step=0, profiler_outdir=logdir)
if __name__ == '__main__':
test2() | dkoleber/nas | src/scripts/sandbox.py | sandbox.py | py | 5,370 | python | en | code | 0 | github-code | 13 |
18744030323 | # THIS IS IN PROD ENVIRONMENT
from flask import Flask, request
import boto3
import os
import time
import redis
import subprocess
import psycopg2
# We used this in the DEV and not for PROD
#ACCESS_KEY = os.environ['AWS_ACCESS_KEY_ID'] # This needed for the Dev testing, and not with elasticbeanstalk
#SECRET_KEY = os.environ['AWS_SECRET_ACCESS_KEY']
REDIS_HOST = os.environ['REDIS_HOST']
REDIS_PORT = os.environ['REDIS_PORT']
def client_method(service, region):
# client = boto3.client(service,'''aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY,''' region_name=region)
client = boto3.client(service, region_name=region)#,aws_access_key_id=ACCESS_KEY, aws_secret_access_key=SECRET_KEY)
len_buckets = len(client.list_buckets()['Buckets'])
# "received the values outside "+ str(service)+" " + str(region)
return len_buckets
def bucket_details(service, region, len_buckets):
# client = boto3.client(service,'''aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY,''' region_name=region)
# client = boto3.client(service,region_name=region,aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY)
# total_buckets = len(client.list_buckets()['Buckets'])
i = 0
total_buckets = list()
while i <= len_buckets-1:
total_buckets.append(client.list_buckets()['Buckets'][i]['Name'])
i=i + 1
# "received the values outside "+ str(service)+" " + str(region)
return total_buckets
client=boto3.client(
's3', region_name='us-east-1') # ,'''aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY,''' region_name='us-east-1')
client_ec2=boto3.client(
'ec2', region_name='us-east-1') # ,'''aws_access_key_id=ACCESS_KEY,aws_secret_access_key=SECRET_KEY,''' region_name='us-east-1')
# print(client)
app=Flask(__name__)
cache=redis.Redis(host=REDIS_HOST, port=REDIS_PORT)
print(time.time())
cache.set('users:test', 'lang: python, born:1990')
# print(cache.get('WS39d962103'))
def get_hit_count():
tries=5
while True:
try:
return cache.incr('hits')
except redis.exceptions.ConnectionError as exec:
if tries == 0:
raise exec
tries -= 1
time.sleep(0.5)
def set_chache_data(ws_no, service, region):
cache.set(ws_no, 'service: '+service+', region:' + region)
def get_cached_data(ws_no):
return cache.get(ws_no)
@app.route("/requests", methods=['GET', 'POST'])
def ws_requests():
ws_no=str(request.args.get('ws_no'))
service=str(request.args.get('service'))
region=str(request.args.get('region'))
command=str(request.args.get('command'))
data=get_cached_data(ws_no)
if request.method == 'POST':
if data == None:
set_chache_data(ws_no, service, region)
return "POST Method RECEIVED for "+ws_no
else:
return "Data exists for {} with details {}".format(ws_no, data)
if request.method == 'GET':
process=subprocess.Popen(command, shell=True, stdout=subprocess.PIPE)
output=ws_no + " has used the service "+service+" in the region "+region+"\n"
return str(process)
@app.route("/", methods=['GET', 'POST'])
def hello():
count=get_hit_count()
if request.method == 'POST':
return "POST METHOD RECEIVED"
else:
return "Created the initial docker setup for exposing the code as REST API , and I have seen it {} times".format(count)
@app.route("/admin")
def admin():
service=request.args.get('service')
region=request.args.get('region')
print("type service ", type(service))
my=client_method(service, region)
return 'Values returned ' + str(service) + " " + str(region)+" " + str(my)
@app.route("/list-buckets")
def buckets():
print("Inside List buckets")
service=request.args.get('service')
region=request.args.get('region')
response=client_method(service, region)
total_buckets=bucket_details(service, region, response)
print("#####################")
return "Total Number of buckets in the account are {}. \n The list of buckets are :- \n {}".format(response, total_buckets)
@app.route("/compute-details")
def ec2():
response=client_ec2.describe_instances()
return str(response)
@app.route("/execute-query")
def psql():
try:
connection = psycopg2.connect(user = "admin",
password = "@normanschwar34",
host = "dev-samsung-db.cyt6tb8cfudw.us-east-1.rds.amazonaws.com",
port = "5432",
database = "wh_samsung")
cursor = connection.cursor()
print("Connection established ")
fs_contact_select_Query = "select * from fs_user limit 10;"
cursor.execute(fs_contact_select_Query)
record = cursor.fetchall()
print("Print each row and it's columns values")
rows = []
for row in record:
print("The row data is: ----- {}".format(row))
rows.append(row)
except:
print("The connection with the postgres could not be made")
return "rows"
# print(str(buckets()))
if __name__ == "__main__":
app.run(debug=True, host='0.0.0.0')
| kunwarluthera/jenkins-python | app.py | app.py | py | 5,251 | python | en | code | 2 | github-code | 13 |
17115402100 | import numpy as np
import plotly.express as px
import pandas as pd
rng = np.random.default_rng()
def bubble_diameter():
n_samples = 1
n_points = 100
u_mf = rng.normal(1.8, 0.7, n_samples) # 0.5 - 20 cm/s
# u_mf = np.array([1.8, 1.8])
u_mf[u_mf < 0.5] = 0.5
u_mf[u_mf > 20] = 20
u_0 = u_mf + rng.normal(24, 24/2, n_samples) # cm/s
# u_0 = np.array([9,9])
u_0[u_0 < u_mf] = 10
D_t = rng.uniform(20, 500, n_samples) # diameter in cm, A_t = pi*D_t^2
# D_t = np.array([20,100])
A_t = np.pi*(D_t**2) # cm^2
nd = 3000
D_b0 = 0.347*np.power((A_t*(u_0 - u_mf)/nd), 2/5) #
D_bM = 0.347*1.87*np.power((A_t*(u_0 - u_mf)), 2/5)
d_p = rng.normal(25.5, 15.6)/1000 # particle diameter (0.006-0.0045)
h = rng.uniform(0, 30, n_points)
corrs = ['moriwen', 'park', 'whitehead', 'geldart']
vals = pd.DataFrame(columns=['height']+corrs)
vals.loc[:, 'height'] = h
conditions = []
for i in range(n_samples):
# Mori and Wen
d_b_moriwen = D_bM[i] - (D_bM[i] - D_b0[i])*np.exp(-0.3*h/D_t[i])
print(d_b_moriwen.mean())
istr = f"u_mf={u_mf[i]:.2f}, u_0={u_0[i]:.2f}, D_t={D_t[i]:.2f}, D_b0={D_b0[i]:.2f}, D_bM={D_bM[i]:.2f}"
# d_b += rng.normal(0,0.02*d_b.mean(), n_points)
print(istr)
conditions.append(istr)
# Park et al.
d_b_park = 33.3*(d_p**1.5)*(u_0/u_mf - 1)**0.77 *h
# Whitehead et al.
d_b_wht = 9.76*(u_0/u_mf)**(0.33*(0.032*h)**0.54)
# Geldart
d_b_gld = 0.027*(u_0 - u_mf)**0.94 *h
vals.loc[:, 'moriwen'] = d_b_moriwen
vals.loc[:, 'park'] = d_b_park
vals.loc[:, 'whitehead'] = d_b_wht
vals.loc[:, 'geldart'] = d_b_gld
fig = px.scatter(vals, x='height', y=corrs, title="Bubble Size correlation", trendline='lowess')
fig.update_layout(
yaxis_title='Bubble Size distribution (cm)', xaxis_title = 'height (cm)', legend_title='Runs', font=dict(size=22), width=1200, height=750)
# fig.show()
fig.update_traces(line=dict(dash="dash"))
fig.show()
bubble_diameter() | plutonium-239/btp-project | main.py | main.py | py | 1,916 | python | en | code | 0 | github-code | 13 |
41847297173 | import pygame
import time
import random
# Inicializar o Pygame
pygame.init()
# Definir as cores RGB
white = (255, 255, 255)
yellow = (255, 255, 102)
black = (0, 0, 0)
red = (213, 50, 80)
green = (0, 255, 0)
blue = (50, 153, 213)
# Configurações da tela
dis_width = 600
dis_height = 400
dis = pygame.display.set_mode((dis_width, dis_height))
pygame.display.set_caption('Jogo da Cobrinha')
# Define o relógio do jogo
clock = pygame.time.Clock()
# Define o tamanho da cobra
snake_block = 10
# Define a velocidade da cobra
snake_speed = 15
# Define a fonte do texto
font_style = pygame.font.SysFont(None, 30)
# Função para mostrar a pontuação
def Your_score(score):
value = font_style.render("Pontuação: " + str(score), True, yellow)
dis.blit(value, [0, 0])
# Função para desenhar a cobra na tela
def our_snake(snake_block, snake_List):
for x in snake_List:
pygame.draw.rect(dis, blue, [x[0], x[1], snake_block, snake_block])
# Função do jogo principal
def gameLoop():
# Define a posição inicial da cobra
x1 = dis_width / 2
y1 = dis_height / 2
# Define a mudança de posição inicial da cobra
x1_change = 0
y1_change = 0
# Define o tamanho inicial da cobra
snake_List = []
Length_of_snake = 1
# Define a posição inicial da comida
foodx = round(random.randrange(0, dis_width - snake_block) / 10.0) * 10.0
foody = round(random.randrange(0, dis_height - snake_block) / 10.0) * 10.0
# Define variável para controlar se o jogo acabou ou não
game_over = False
# Loop principal do jogo
while not game_over:
# Loop para lidar com os eventos do jogo
for event in pygame.event.get():
# Se o usuário clicar no botão de fechar, o jogo acaba
if event.type == pygame.QUIT:
game_over = True
# Se uma tecla for pressionada, verifica qual tecla foi pressionada e muda a direção da cobra de acordo
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_LEFT:
x1_change = -snake_block
y1_change = 0
elif event.key == pygame.K_RIGHT:
x1_change = snake_block
y1_change = 0
elif event.key == pygame.K_UP:
y1_change = -snake_block
x1_change = 0
elif event.key == pygame.K_DOWN:
y1_change = snake_block
x1_change = 0
# Verifica se a cobra atingiu as bordas da tela
if x1 >= dis_width or x1 < 0 or y1 >= dis_height or y1 < 0:
game_over = True
| gabfguimaraes/Jogo-da-Cobrinha | cobrinha.py | cobrinha.py | py | 2,768 | python | pt | code | 0 | github-code | 13 |
32501900929 | notas_100 = notas_50 = notas_20 = notas_10 = notas_5 = notas_2 = moeda_1 = 0
saque = int(input('Digite a quantia em R$ do saque: R$'))
if saque < 10 or saque > 600:
print('Valor inválido, Digite um Valor entre R$10,00 e R$600,00')
exit()
else:
print(f'Para receber o valor de \033[32mR${saque}\033[m, o banco lhe dará ', end='')
notas_100 = saque // 100
saque = saque % 100
if notas_100 > 0:
if notas_100 == 1:
print('\033[33m1 nota\033[m de \033[32mR$100\033[m', end='')
else:
print(f'\033[33m{notas_100} notas\033[m de \033[32mR$100\033[m', end='')
notas_50 = saque // 50
saque = saque % 50
if notas_50 == 0:
print('', end='')
else:
if saque != 0:
print(', ', end='')
else:
print(' e ', end='')
if notas_50 == 1:
print('\033[33m1 nota\033[m de \033[32mR$50\033[m', end='')
else:
print(f'\033[33m{notas_50} notas\033[m de \033[32mR$50\033[m', end='')
notas_20 = saque // 20
saque = saque % 20
if notas_20 == 0:
print('', end='')
else:
if saque != 0:
print(', ', end='')
else:
print(' e ', end='')
if notas_20 == 1:
print('\033[33m1 nota\033[m de \033[32mR$20\033[m', end='')
else:
print(f'\033[33m{notas_20} notas\033[m de \033[32mR$20\033[m', end='')
notas_10 = saque // 10
saque = saque % 10
if notas_10 == 0:
print('', end='')
else:
if saque != 0:
print(', ', end='')
else:
print(' e ', end='')
if notas_10 == 1:
print('\033[33m1 nota\033[m de \033[32mR$10\033[m', end='')
else:
print(f'\033[33m{notas_10} notas\033[m de \033[32mR$10\033[m', end='')
notas_5 = saque // 5
saque = saque % 5
if notas_5 == 0:
print('', end='')
else:
if saque != 0:
print(', ', end='')
else:
print(' e ', end='')
if notas_5 == 1:
print('\033[33m1 nota\033[m de \033[32mR$5\033[m', end='')
else:
print(f'\033[33m{notas_5} notas\033[m de \033[32mR$5\033[m', end='')
notas_2 = saque // 2
saque = saque % 2
if notas_2 == 0:
print('', end='')
else:
if saque != 0:
print(', ', end='')
else:
print(' e ', end='')
if notas_2 == 1:
print('\033[33m1 nota\033[m de \033[32mR$2\033[m', end='')
else:
print(f'\033[33m{notas_2} notas\033[m de \033[32mR$2\033[m', end='')
moeda_1 = saque // 1
saque = saque % 1
if moeda_1 == 0:
print('', end='')
else:
if saque != 0:
print(', ', end='')
else:
print(' e ', end='')
if moeda_1 == 1:
print('\033[33m1 moeda\033[m de \033[32mR$1\033[m', end='')
else:
print(f'\033[33m{moeda_1}\033[m moedas de \033[32mR$1\033[m', end='')
| Elton-Gustavo/PythonBrasilExercicios- | Estruturas de Decisão/21 - caixa eletrônico.py | 21 - caixa eletrônico.py | py | 3,036 | python | es | code | 0 | github-code | 13 |
41057954425 | '''
本节视频
https://www.bilibili.com/video/BV18M4y1k7H8/ “Python”高级教程 类的静态字段的作用是什么?如何定义和使用类的静态字段
本节文章
https://learnscript.net/zh/python/advanced/define-and-access-class-static-fields/ 如何定义和访问类的静态字段
'''
###
import random
class Unit:
# 类 Unit,表示游戏中的单位
# 静态字段 count,表示存活单位的个数
count = 0
def __init__(self, hp):
# 设置单位的初始状态,包括生命值,死亡状态
self.hp = hp
self.dead = False
# 存活单位的个数加 1
Unit.count += 1
###
def hurt(self, damage):
# 让单位受伤的方法
# 根据伤害扣除生命值
self.hp -= damage
print(f'一个单位受到 {damage} 点伤害')
# 生命值小于等于 0,则认为单位死亡,存活单位的个数减 1
if self.hp <= 0:
self.dead = True
Unit.count -= 1
print(f'一个单位死亡,剩余 {Unit.count} 个单位')
### 创建三个游戏单位,状态为存活
units = [Unit(10), Unit(15), Unit(20)]
game_over = False
while not game_over:
# 依次让所有存活单位受到伤害,直到仅剩一个存活单位时
for unit in units:
if not unit.dead:
# 伤害大小是随机的
damage = random.randint(0, 10)
unit.hurt(damage)
# 仅剩一个存活单位,则跳出循环,游戏结束
if Unit.count == 1:
game_over = True
break
print('游戏结束')
# 定义静态字段 type_name,表示 Unit 的类别名称
Unit.type_name = '单位'
print(f'Unit 的类别名称为:{Unit.type_name}')
| codebeatme/python | src/zh/advanced/static_fields.py | static_fields.py | py | 1,791 | python | zh | code | 1 | github-code | 13 |
31726059094 | import pandas as pd
from kabutobashi.domain.errors import KabutobashiMethodError
from .method import Method, MethodType, ProcessMethod
class IndustryCategoriesProcess(ProcessMethod):
"""
株のvolumeやPBR, PSR, PERなどの値を返す。
parameterizeのみに利用される。
"""
method_name: str = "industry_categories"
method_type: MethodType = MethodType.PARAMETERIZE
def _apply(self, df: pd.DataFrame) -> pd.DataFrame:
return df
def _signal(self, df: pd.DataFrame) -> pd.DataFrame:
df_ = df.copy()
df_["diff"] = -1
# 正負が交差した点
df_ = df_.join(self._cross(df_["diff"]))
df_ = df_.rename(columns={"to_plus": "buy_signal", "to_minus": "sell_signal"})
return df_
def _processed_columns(self) -> list:
return []
def _parameterize(self, df_x: pd.DataFrame, df_p: pd.DataFrame) -> dict:
industry_type_mapping = {
"水産・農林業": "industry_fisheries_agriculture",
"鉱業": "industry_mining",
"建設業": "industry_construction",
"食料品": "industry_food",
"繊維製品": "industry_fiber",
"パルプ・紙": "industry_pulp_paper",
"化学": "industry_chemistry",
"医薬品": "industry_pharmaceuticals",
"石油・石炭製品": "industry_oil_coal",
"ゴム製品": "industry_rubber",
"ガラス・土石製品": "industry_glass",
"鉄鋼": "industry_steel",
"非鉄金属": "industry_non_ferrous_metals",
"金属製品": "industry_metal_products",
"機械": "industry_machine",
"電気機器": "industry_electric",
"輸送用機器": "industry_transportation",
"精密機器": "industry_mechanical_equipment",
"その他製品": "industry_other",
"電気・ガス業": "industry_electricity_gas",
"陸運業": "industry_land_transportation",
"海運業": "industry_shipping",
"空運業": "industry_air_freight",
"倉庫・運輸関連業": "industry_warehouse",
"情報・通信業": "industry_information",
"卸売業": "industry_wholesale",
"小売業": "industry_retail",
"銀行業": "industry_back",
"証券、商品先物取引業": "industry_stock_future",
"保険業": "industry_insurance",
"その他金融業": "industry_financial",
"不動産業": "industry_real_state",
"サービス業": "industry_service",
}
params = {v: 0 for v in industry_type_mapping.values()}
industry_type_list = list(df_x["industry_type"].unique())
if len(industry_type_list) > 1:
raise KabutobashiMethodError("industry type should be 1")
key = industry_type_list[0]
if key in industry_type_mapping.keys():
params.update({industry_type_mapping[key]: 1})
return params
industry_categories = Method.of(process_method=IndustryCategoriesProcess(), visualize_method=None)
| gsy0911/kabutobashi | kabutobashi/domain/services/method/industry_cat.py | industry_cat.py | py | 3,182 | python | en | code | 0 | github-code | 13 |
11349338875 | # PyTorch has two primitives to work with data: torch.utils.data.DataLoader and torch.utils.data.Dataset.
# Dataset stores the samples and their corresponding labels, and DataLoader wraps an iterable around the Dataset.
import torch
from torch import nn
from torch.utils.data import DataLoader
from torchvision import transforms
# pip install torch-summary
from torchsummary import summary
import logging
logging.getLogger('matplotlib').setLevel(logging.WARNING)
# pip install matplotlib
import matplotlib.pyplot as plt
# matplotlib.use("Agg")
import seaborn as sns
import numpy as np
import time
import shutil
# pip install pandas
import pandas as pd
# OMP: Error #15: Initializing libiomp5md.dll, but found libiomp5md.dll already initialized.
import os
# os.environ['KMP_DUPLICATE_LIB_OK'] = 'TRUE'
import sys
sys.path.append('./src')
from preprocessing import DataPreprocessor
from CustomImageDataset_Pickle import CustomImageDataset_Pickle
import checkpoints
from logger import double_logger
import trainer
import tester
from NetworkModels import Resnet18
class gtNormalize(object):
def __init__(self, minV, maxV):
self.minV = torch.tensor(minV)
self.maxV = torch.tensor(maxV)
def __call__(self, gt):
# normalize gt to [0.01, 1] to facilitate the calculation of relative error
k = torch.div(1.0-0.01, self.maxV - self.minV)
gt = 0.01 + k*(gt - self.minV)
return gt
def restore(self, gt):
# restore the normalized values
k = torch.div(1.0-0.01, self.maxV - self.minV)
gt = (gt - 0.01)/k + self.minV
return gt
# Optimizing the Model Parameters
# To train a model, we need a loss function and an optimizer.
def kl_divergence(dis_a, dis_b):
disa = dis_a + 1e-6
disb = dis_b + 1e-6
loga = torch.log(disa)
logb = torch.log(disb)
part1 = dis_a*loga
part2 = dis_a*logb
result = torch.mean(torch.sum(part1-part2, dim=1))
assert torch.isnan(result).sum() == 0
return result
def HG_theta(g, theta):
# calculate 2*pi*p(cos(theta))
bSize = g.size()[0]
p = torch.zeros(bSize, theta.size()[0]).cuda()
for i in range(bSize):
p[i,:] = 0.5*(1-g[i]*g[i])/((1+g[i]*g[i]-2*g[i]*torch.cos(theta))**(3.0/2.0) + 1e-6)
p[i,:] *= torch.sin(theta)
# print(torch.sum(p[i,:]))
return p
def normfun(x, mean, sigma):
pi = np.pi
pi = torch.tensor(pi)
std = sigma + 1e-6
G_x = torch.exp(-((x - mean)**2)/(2*std**2)) / (std * torch.sqrt(2*pi))
return G_x
def GMM(nnOut, theta):
pi = torch.tensor(np.pi)
w = nnOut[:, 0:num_of_Gaussian] # weight [0, 1], sum(w)=1
w_sum = torch.sum(w, dim=1)
m = nnOut[:, num_of_Gaussian:num_of_Gaussian*2]*pi # mean [0, 1]*pi
d = nnOut[:, num_of_Gaussian*2:num_of_Gaussian*3] # std [0, 1]
bSize = nnOut.size()[0]
gmm = torch.zeros(bSize, theta.size()[0]).cuda()
for i in range(bSize):
for j in range(num_of_Gaussian):
gmm[i,:] += (w[i, j]/w_sum[i]) * normfun(theta, m[i, j], d[i, j])
sumGmm = torch.sum(gmm[i,:]) * 0.01 # discretization bin = 0.01 radian
gmm[i,:] /= sumGmm # normalize to gurrantee the sum=1
return gmm
def loss_func_mse(prediction, gt):
gmm = GMM(prediction, theta)
# gx = gtNorm.restore(gt.to("cpu"))
# gt = gt.to(device)
# g = gx[:, 2]
g = gt[:,2]
p_theta = HG_theta(g, theta)
# loss1 = kl_divergence(gmm, p_theta)
# loss2 = kl_divergence(p_theta, gmm)
# loss_phase = (loss1 + loss2)/2.0
loss_phase = nn.MSELoss()(gmm, p_theta)
# loss_phase = (kl_divergence(gmm, p_theta) + kl_divergence(p_theta, gmm))/2.0
# uas = prediction[:, -2:]
# gt_uas = gt[:, :2]
# loss_uas = nn.MSELoss()(uas, gt_uas)
#loss = loss_phase + loss_uas
loss = loss_phase
return loss
# ==============================================================================================================
if __name__=='__main__':
torch.backends.cudnn.benchmark = True
# Need to calculate the mean and std of the dataset first.
# imageCW, 500x500, g=0.5:0.01:0.95, training number = 70, mean = 0.0050, std = 0.3737
# imageCW, 500x500, g=-1:0.025:1, training number = 100, mean = 0.0068, std = 1.2836
# imageCW, 500*500, 14 materials, training number = 500, mean = 0.0040, sta = 0.4645
# imageCW, 500*500, 12 materials, training number = 500, mean = 0.0047, sta = 0.5010
# gt = [ua, us, g], min = [0.0010, 0.0150, 0.1550], max = [0.2750, 100.92, 0.9550]
# imageCW_v3, 500x500, training number = 80, mean = 0.0026, std = 0.9595
# imageCW_v4, 500x500, training number = 50, mean = 0.0026, std = 0.9595
# trainDataCW_v3_ExcludeExtremes, 500x500, training number = 80, mean = 0.0028, std = 0.8302
# imageCW_v4, 500x500, training number = 200, mean = 0.0045, std = 0.3633
# imageCW_v4_fat, 500x500, training number = 200, mean = 0.0068, std = 0.3823
# imageCW_v5, 500x500, number=1000, mean=0.0035, std=0.2197
# 2021-09-16
# Dataset V5, large phantom, mean = 0.0039, std = 0.2198
meanPixelVal = 0.0039 # using statistics of all v5 data
stdPixelVal = 0.2198
img_path = "ImageCW_v5"
DataListFile = "TrainDataCW_v5_Results.csv"
tmp_processed_data_dir = "temp_processed_data"
checkpoint_path = 'CrossVal_results_v5Data'
temp_test_pickle_file_name = 'test.pkl'
if not os.path.exists(checkpoint_path):
os.mkdir(checkpoint_path)
logger = double_logger(log_path=checkpoint_path).getLogger()
preprocessing_transformer = transforms.Normalize(meanPixelVal, stdPixelVal)
inverse_preprocessing_transformer = transforms.Normalize(-meanPixelVal, 1.0/stdPixelVal)
train_transformer = transforms.Compose([transforms.RandomHorizontalFlip(0.5),
transforms.RandomVerticalFlip(0.5)
])
device = "cuda" if torch.cuda.is_available() else "cpu"
logger.info(f"Using {device} device")
theta = np.arange(0, np.pi, 0.01)
theta = torch.from_numpy(theta).to(device)
labels = pd.read_csv(os.path.join(img_path, DataListFile))
tissues = pd.unique(labels['Tissue'])
for fold, val_tissue in enumerate(tissues):
logger.info(f'Fold: {fold}, val: {val_tissue}')
train_labels = labels.iloc[(labels['Tissue']!=val_tissue).tolist()]
val_labels = labels.iloc[(labels['Tissue']==val_tissue).tolist()]
if os.path.exists(tmp_processed_data_dir):
shutil.rmtree( tmp_processed_data_dir )
os.makedirs(tmp_processed_data_dir, exist_ok=False)
temp_train_pickle_file_name = 'train.pkl'
temp_val_pickle_file_name = 'val.pkl'
print('Preprocessing...')
DataPreprocessor().dump(train_labels, img_path, tmp_processed_data_dir, temp_train_pickle_file_name, preprocessing_transformer)
DataPreprocessor().dump(val_labels, img_path, tmp_processed_data_dir, temp_val_pickle_file_name, preprocessing_transformer)
print('Preprocessing finished')
train_data = CustomImageDataset_Pickle(
img_labels = train_labels,
file_preprocessed = os.path.join(tmp_processed_data_dir, temp_train_pickle_file_name),
transform = train_transformer
)
val_data = CustomImageDataset_Pickle(
img_labels = val_labels,
file_preprocessed = os.path.join(tmp_processed_data_dir, temp_val_pickle_file_name)
)
# Create data loaders.
batch_size = 160
train_dataloader = DataLoader(train_data, batch_size=batch_size, shuffle=True, pin_memory=True, num_workers=8)
val_dataloader = DataLoader(val_data, batch_size=batch_size, pin_memory=True, num_workers=8)
df_loss_best = pd.DataFrame(columns=['NoG', 'Events', 'Fold', 'Error']) # record num_of_Gaussian and the best model's train and validation error
for num_of_Gaussian in range(2, 11):
# Define model
model = Resnet18(num_classes=num_of_Gaussian*3)
# model_struct = summary(model, (1, 500, 500), verbose=0)
# model_struct_str = str(model_struct)
# logger.info('Model structure:\n {}'.format(model_struct_str))
# optimizer = torch.optim.SGD(model.parameters(), lr=1e-3, momentum=0.9, weight_decay=5e-3)
optimizer = torch.optim.Adam(model.parameters(), lr=5e-4, weight_decay=5e-3)
scheduler = torch.optim.lr_scheduler.StepLR(optimizer, step_size=10, gamma=0.1)
Trn = trainer.Trainer()
bestmodel_name = f'best_model_NoG_{num_of_Gaussian}'
logger.info(f'Fold: {fold}, NoG: {num_of_Gaussian}, Training {bestmodel_name}')
val_loss_min, train_loss, df_loss = Trn.run(train_dataloader, val_dataloader, model, loss_func_mse, optimizer, scheduler, num_epochs=30)
train_result = {'NoG':num_of_Gaussian, 'Events':'Train', 'Fold':val_tissue, 'Error':train_loss}
val_result = {'NoG':num_of_Gaussian, 'Events':'Validation', 'Fold':val_tissue, 'Error':val_loss_min}
df_loss_best = df_loss_best.append(train_result, ignore_index=True)
df_loss_best = df_loss_best.append(val_result, ignore_index=True)
#---end of for num_of_Gaussian
print(df_loss_best)
df_loss_best.to_csv(os.path.join(checkpoint_path, f'Results_Fold_{fold}.csv'), index=False)
#---end of for cross-validation
print('Done') | liangyuxuan1/phasefunction2 | Code_Using_MOSE/Step6_Regression_PhaseOnly_ResNet18_v5Data_CrossVal.py | Step6_Regression_PhaseOnly_ResNet18_v5Data_CrossVal.py | py | 9,539 | python | en | code | 3 | github-code | 13 |
31342397838 | import sqlite3
import requests
import logging
import os
num_quotes_in_db = 10
log_to_file = True
log_location = "chuck_logs.log"
db_location = "quotes.db"
logger = logging.getLogger(str(os.getpid()))
if log_to_file:
logging.basicConfig(format=f'%(asctime)s: [%(name)s] %(message)s', datefmt='%d/%m/%y %H:%M:%S',
filename=log_location, level=logging.INFO)
else:
logging.NullHandler()
db_connection = sqlite3.connect(db_location)
with db_connection:
cursor = db_connection.cursor()
cursor.execute("CREATE table IF NOT EXISTS quotes (quote text)")
cursor.execute("SELECT * FROM quotes")
quotes_list = cursor.fetchall()
quotes_list = quotes_list if quotes_list else []
new_entries = []
if num_quotes_in_db - len(quotes_list) > 0:
logger.info(f"Retrieving {num_quotes_in_db - len(quotes_list)} new quote{'s' if num_quotes_in_db - len(quotes_list) > 1 else ''}")
while len(quotes_list) + len(new_entries) < num_quotes_in_db:
try:
new_quote = requests.get("https://api.chucknorris.io/jokes/random").json()["value"]
except requests.ConnectionError as error_message:
logger.critical(error_message)
new_entries.append((new_quote,))
cursor.executemany("INSERT INTO quotes VALUES (?)", new_entries)
logger.info(f"{len(new_entries)} quote{'s' if len(new_entries) > 1 else ''} added to database")
db_connection.close()
| nickhendo/chuck-terminal | runner.py | runner.py | py | 1,472 | python | en | code | 1 | github-code | 13 |
20358894000 | from celluloid import Camera
import matplotlib.pyplot as plt
def visualize_simulation(
simulator, savename='particles.gif', timesteps=10, fps=10, dpi=400):
# Get true path radius for each particle
paths = {id(p): (p.x ** 2 + p.y ** 2) ** 0.5 for p in simulator.particles}
fig, ax = plt.subplots(1, 1, figsize=(6, 6), dpi=dpi)
ax.set_xlim(-1, 1)
ax.set_ylim(-1, 1)
camera = Camera(fig)
for t in range(timesteps):
ax.annotate(f"t={t}", [0.8, 0.8])
X = [p.x for p in simulator.particles]
Y = [p.y for p in simulator.particles]
colors = [f"C{j}" for j in range(len(X))]
ax.scatter(X, Y, edgecolors="k", color=colors, zorder=2);
# plot true paths (divergence with large delta in Euler-method)
for p in simulator.particles:
r = paths[id(p)]
c = plt.Circle((0, 0), r, facecolor='None', edgecolor='black', linestyle='dashed', zorder=1)
ax.add_patch(c);
camera.snap()
simulator.evolve(1) # => 1 time units
animation = camera.animate()
animation.save(savename, fps=fps)
| particle1331/high-performance-python | hp/utils.py | utils.py | py | 1,151 | python | en | code | 0 | github-code | 13 |
70141781137 | # Group: Michael Phelps
# Name: Ethan Lansangan
# Name: Jake Pielage
# Name: James Keen
# Name: Branden McKinney
# Assignment Title: Assignment 10
# Course: IS 4010
# Semester/Year: Spring 2023
# Brief Description: This demonstrates our ability to use APIs
# Citations:
# Anything else that's relevant: data from https://api.fda.gov
# main.py
import json
import requests
# Our API is from the FDA and accesses the National Drug Code Directory
# the NDC Directory contains information about all drug products
# First, we query the NDC Directory
# Our query is for the first 30 drugs with Ibuprofen as an active ingredient
response = requests.get('https://api.fda.gov/drug/ndc.json?api_key=M3dA9e76eGod4aA3MmvFs4DwgdblAf85ZFtkcodp&search=active_ingredients.name:"ibuprofen"&limit=30')
json_string = response.content
# Now we parse the result into a dictionary
parsed_json = json.loads(json_string)
# Now we can use this dictionary to visualize the results in different ways
# As a random example, we're going to show the labeler of each of the 30 NDC entries that we queried
# We'll also show the dosage form for each of these labelers' products
# We'll do this by making a dictionary, where the keys are the Labeler and the values are the dosage forms
mnfgDict = {}
for i in range(0,len(parsed_json['results'])):
mnfgDict[parsed_json['results'][i]['labeler_name']] = parsed_json['results'][i]['dosage_form']
# Finally we'll print each key/value pair from the dictionary individually
for i in mnfgDict:
print(i,'...',mnfgDict[i])
# This shows an easy-to-read list of different Labelers
# This includes many companies we're unfamiliar with and a few recognizable sellers like Sam's Club and CVS
# This list only has a few different form of Ibuprofen
# The forms are Coated Tablets, Suspension, Capsules, Chewables, and Liquid-filled Capsules
| eLasagna/Michael_Phelps_Assignment10 | Michael_Phelps_Assignment10/mainPackage/main.py | main.py | py | 1,926 | python | en | code | 0 | github-code | 13 |
11340295093 | # -*- coding: utf-8 -*-
import re
from datetime import datetime
from scrapy import Spider, Request
from kylx.items import KylxItem
class HuakeSpider(Spider):
name = "huake"
allowed_domains = ["job.hust.edu.cn"]
start_urls = ['http://job.hust.edu.cn/']
pre_url = 'https://job.hust.edu.cn/searchJob_'
suf_url = '.jspx?type=2&fbsj='
root_url = "https://job.hust.edu.cn"
max_page = 25
relative_url_pattern = re.compile(r'/.{7}/\d{4,6}\.htm')
title_pattern = re.compile(r'<a href="/.{7}/\d{4,6}\.htm" .*?>(.*?)</a>')
time_pattern = re.compile(r'\d{4}-\d{1,2}-\d{1,2}')
def start_requests(self):
for i in range(1, self.max_page):
url = ''.join([self.pre_url, str(i), self.suf_url])
yield Request(url, callback=self.parse)
def parse(self, response):
if response.text is not None:
relative_urls = re.findall(self.relative_url_pattern, response.text)
titles = re.findall(self.title_pattern, response.text)
times_ = re.findall(self.time_pattern, response.text)
for j in range(len(times_)):
title = titles[j]
sub_url = ''.join([self.root_url, relative_urls[j]])
time_ = times_[j]
yield Request(sub_url, callback=self.parse_page,
meta={'url': sub_url, 'title': title, 'time': time_})
def parse_page(self, response):
kylxitem = KylxItem()
content = response.xpath('//div[@class="Content"]').extract()[0]
kylxitem['content'] = content
kylxitem['url'] = response.meta['url']
kylxitem['title'] = response.meta['title']
_time = response.meta['time']
kylxitem['start_date'] = datetime.strptime(_time, '%Y-%m-%d')
kylxitem['start_milli'] = kylxitem['start_date'].timestamp()
yield kylxitem
| jinyaozhuzhu/kylx-crawl | kylx/spiders/huake.py | huake.py | py | 1,886 | python | en | code | 0 | github-code | 13 |
41161863744 | string_list = input().split()
while True:
command = input()
list_to_add = []
if command == "3:1":
break
command_list = command.split()
if command_list[0] == "merge":
start_index = int(command_list[1])
end_index = int(command_list[2])
if len(string_list) - 1 < end_index:
end_index = len(string_list) - 1
for i in range(start_index + 1, end_index + 1):
string_list[start_index] += string_list.pop(start_index + 1)
elif command_list[0] == "divide":
divide_index = int(command_list[1])
divide_times = int(command_list[2])
divide_string = string_list.pop(divide_index)
if len(divide_string) % divide_times == 0:
divide_per = len(divide_string) // divide_times
for i in range(0, len(divide_string), divide_per):
list_to_add.append(divide_string[i:i + divide_per])
for element in reversed(list_to_add):
string_list.insert(divide_index, element)
else:
len_remainder = (len(divide_string) % divide_times) + 1
last_part = divide_string[-len_remainder:]
divide_per = len(divide_string) // divide_times
divide_string = divide_string[:-len_remainder]
for i in range(0, len(divide_string), divide_per):
list_to_add.append(divide_string[i:i + divide_per])
last_part += list_to_add.pop(-1)
string_list.insert(divide_index, last_part)
for element in reversed(list_to_add):
string_list.insert(divide_index, element)
print(" ".join(string_list))
| lefcho/SoftUni | Python/SoftUni - Python Fundamentals/Lists_Advanced/anonymous_threat.py | anonymous_threat.py | py | 1,654 | python | en | code | 0 | github-code | 13 |
26648267244 | # coding: utf-8
import copy
import tushare as ts
import numpy as np
import wbdata
import pandas as pd
import math
from datetime import datetime, timedelta
import requests
import json
import random
import scipy.stats as stats
import matplotlib.pyplot as plt
from datetime import datetime
from dateutil.relativedelta import relativedelta
import time
import os
#显示所有列
pd.set_option('display.max_columns', None)
#显示所有行
pd.set_option('display.max_rows', None)
#设置value的显示长度为100,默认为50
pd.set_option('max_colwidth',100)
ts.set_token('ae0addf484ab2b76fe78cdb46b10165b48d31748a48202f9df193951')
pro = ts.pro_api()
start_date = '19990101'
end_date = '20200601'
# 获取股价
def get_stock_price(ticker):
df = pro.daily(ts_code=ticker, start_date=start_date, end_date=end_date)
df.to_csv('price_chinese/' + str(ticker) + '.csv',index=0)
return df
# # 财报数据
def get_financialstatement_data(ticker):
# income
income_indicator = ['end_date','ann_date','f_ann_date', 'total_revenue', 'total_cogs', 'int_exp', 'oper_exp', 'n_income','ebit','ebitda','basic_eps','operate_profit']
ds1 = pro.income(ts_code=ticker, start_date=start_date, end_date=end_date, fields=','.join(income_indicator))
ds1 = ds1.groupby('end_date').max()
ds1.sort_values(by='end_date', inplace=True, ascending=False)
# balance
balance_indicator = ['end_date','accounts_receiv','acct_payable','inventories','amor_exp','total_cur_assets','intan_assets', 'r_and_d','goodwill','total_assets','total_cur_liab','total_liab']
ds2 = pro.balancesheet(ts_code=ticker, start_date=start_date, end_date=end_date, fields=','.join(balance_indicator))
ds2 = ds2.groupby('end_date').max()
ds2.sort_values(by='end_date', inplace=True, ascending=False)
#cash
ds3 = pro.cashflow(ts_code=ticker, start_date=start_date, end_date=end_date, fields='end_date,c_cash_equ_end_period')
ds3 = ds3.groupby('end_date').max()
ds3.sort_values(by='end_date', inplace=True, ascending=False)
df = pd.merge(ds1.drop_duplicates(),ds2.drop_duplicates(),how='left',left_on=['end_date'],right_on=['end_date'])
df = pd.merge(df.drop_duplicates(),ds3.drop_duplicates(),how='left',left_on=['end_date'],right_on=['end_date'])
df.to_csv('fsa_chinese/' + str(ticker) + '.csv')
return df
# 删除多的股票
def delete(folder):
stock_name = set()
for root, dirs, files in os.walk('train'):
for f in files:
if '.SH' in f:
stock_name.add(f[:-4])
for root, dirs, files in os.walk(folder):
for f in files:
if f[:-4] not in stock_name:
os.remove(folder + '/' + f)
# 检查数据缺失率
def fsa_check():
for root, dirs, files in os.walk('fsa_chinese'):
df_total = pd.read_csv('fsa_chinese/' + files[0])
df_total = df_total.loc[df_total['end_date']>=20100101]
for f in files[1:]:
df = pd.read_csv('fsa_chinese/' + f)
df = df.loc[df['end_date']>=20100101]
df_total = pd.concat([df_total, df])
df_total = df_total.replace(0, np.nan)
print(df_total.count())
print(df_total.isnull().sum(axis=0).tolist())
# >= 70% ['basic_eps','total_revenue','total_cogs','operate_profit','n_income','ebit','accounts_receiv,'inventories',\
# 'total_cur_assets','intan_assets','total_assets','acct_payable','total_cur_liab','total_liab','c_cash_equ_end_period']
if __name__ == '__main__':
# get stock fs
# data = pro.stock_basic(exchange='', list_status='L', fields='ts_code,list_date')
# data = data.values.tolist()
# count = 0
# for x in data:
# if x[1] <= '20100101':
# get_financialstatement_data(x[0])
# time.sleep(1) # 50/sec
# count += 1
# if count % 100 == 0:
# print(count,len(data))
#delete
# delete('fsa_chinese')
# fsa_check()
get_financialstatement_data('600589.SH')
| patrickying/long_term_stock_prediction | data/get_data.py | get_data.py | py | 4,001 | python | en | code | 1 | github-code | 13 |
16277408115 | import random
Deckplayer=[1,2,3,4,5,6,7,8,9,10,11,12,13,14]
Deckpc=[1,2,3,4,5,6,7,8,9,10,11,12,13,14]
deck2=(Deckplayer, Deckpc)
handPlayer= (str(Deckplayer[-1]) + str(Deckplayer[-2]))
handNPC= (str(Deckpc[-1]) + str(Deckpc[-2]))
def translateDeck (deck):
translateDeck = []
for card in deck:
if card == 11:
translateDeck.append("J")
elif card == 12:
translateDeck.append("Q")
elif card == 13:
translateDeck.append("K")
elif card == 14:
translateDeck.append("A")
else:
translateDeck.append(str(card))
return translateDeck
def shuffle(deck):
random.shuffle(deck)
def fish (deck):
return[ deck[-1], deck[-2] ]
def sum(deck):
sumTotal = 0
for card in deck:
sumTotal += card
return sumTotal
def discard (deck):
n=input("Do you want to discard your cards? ")
if (n == "yes 1" or n == "yes 1 card"):
def fish2 (deck):
return[ deck[-2], deck[-3] ]
print("Player cards " + str(fish2(translateDeck(Deckplayer))))
print("PC cards " + str(fish(translateDeck(Deckpc))))
elif (n == "yes 2" or n == "yes 2 cards"):
def fish3 (deck):
return[ deck[-3], deck[-4] ]
print("Player cards " + str(fish3(translateDeck(Deckplayer))))
print("PC cards " + str(fish(translateDeck(Deckpc))))
elif (n == "no" or n == "0"):
def fish4 (deck):
return[ deck[-1], deck[-2] ]
print("Player cards " + str(fish4(translateDeck(Deckplayer))))
print("PC cards " + str(fish(translateDeck(Deckpc))))
def compare (handPlayer, handNPC):
sumPlayer = sum(handPlayer)
sumNPC = sum(handNPC)
if (sumPlayer < sumNPC):
print("PC wins! Because it has a bigger sum of cards compared to the player.")
elif (sumPlayer > sumNPC):
print("Player wins! Because it has a bigger sum of cards compared to PC.")
elif (sumPlayer == sumNPC):
print("DRAWWWWWWWWWWW!!!!!!!!!!!")
while True:
print("Player deck " + str(translateDeck(Deckplayer)))
print("PC deck " + str(translateDeck(Deckplayer)))
print("Shuffling decks...")
shuffle(Deckplayer)
shuffle(Deckpc)
print("Player shuffled deck " + str(translateDeck(Deckplayer)))
print("PC shuffled deck " + str(translateDeck(Deckpc)))
print("Fishing two cards...")
print("Player cards " + str(fish(translateDeck(Deckplayer))))
print("PC cards " + str(fish(translateDeck(Deckpc))))
discard(str(translateDeck(Deckplayer)))
compare(fish(Deckplayer), fish(Deckpc))
break
| DanF04/Card-Game | Cardgame.py | Cardgame.py | py | 2,656 | python | en | code | 0 | github-code | 13 |
22195601602 | import os
import pytest
import yaml
from linkml.generators.sssomgen import SSSOMGenerator
@pytest.fixture
def schema_path(input_path) -> str:
return str(input_path("kitchen_sink_sssom.yaml"))
@pytest.fixture
def sssom_path(schema_path, tmp_path) -> str:
output_path = str(tmp_path / "test_sssom.tsv")
gen = SSSOMGenerator(schema_path, output=output_path)
gen.serialize()
return output_path
def test_sssomgen(sssom_path):
# Test if the generator actually created the output file
assert os.path.exists(sssom_path)
def test_sssom_metadata(schema_path, sssom_path):
meta = {}
curie_map = {}
curie_flag = False
msdf_as_dict = {}
# Read Input file
with open(schema_path, "r") as input_yaml:
try:
input_data = yaml.safe_load(input_yaml)
except yaml.YAMLError as exc:
print(exc)
# Read output files
with open(sssom_path) as sssom_file:
row_count = -1
for ln in sssom_file:
if ln.startswith("#"):
if "curie_map" in ln:
curie_flag = True
if not curie_flag:
clean_ln_list = ln.lstrip("#").rstrip("\n").split(": ")
meta[clean_ln_list[0]] = clean_ln_list[1]
else:
if "curie_map" not in ln:
curie_ln = ln.lstrip("#").rstrip("\n").split(": ")
curie_map[curie_ln[0]] = curie_ln[1]
else:
# This is the MappingSetDataFrame
row_count += 1
ln = ln.split("\t")
ln[-1] = ln[-1].strip()
if row_count == 0:
msdf_columns = ln
for col in msdf_columns:
msdf_as_dict[col] = []
else:
for idx, value in enumerate(msdf_columns):
msdf_as_dict[value].append(ln[idx])
# Assertions
assert len(meta) == 5
assert len(curie_map) == len(input_data["prefixes"])
assert " " not in msdf_as_dict["subject_id"]
| linkml/linkml | tests/test_generators/test_sssomgen.py | test_sssomgen.py | py | 2,118 | python | en | code | 228 | github-code | 13 |
31436647479 | class Solution:
def gameOfLife(self, board: List[List[int]]) -> None:
"""
Do not return anything, modify board in-place instead.
"""
changed = []
for r in range(len(board)):
for c in range(len(board[0])):
cnt = 0
# 1
if r-1 >= 0 and c-1>=0 and board[r-1][c-1]==1:
cnt += 1
if r-1>=0 and board[r-1][c] ==1:
cnt +=1
if r-1>=0 and c+1 <len(board[0]) and board[r-1][c+1]==1:
cnt += 1
if c-1 >=0 and board[r][c-1] == 1:
cnt += 1
if c+1 < len(board[0]) and board[r][c+1] == 1:
cnt += 1
if r+1 <len(board) and c-1>=0 and board[r+1][c-1] == 1:
cnt += 1
if r+1 < len(board) and board[r+1][c]==1:
cnt+=1
if r+1 < len(board) and c+1 < len(board[0]) and board[r+1][c+1]==1:
cnt+=1
if board[r][c] ==1:
if cnt < 2 or cnt > 3:
changed.append((r,c))
if board[r][c] == 0:
if cnt ==3:
changed.append((r,c))
for e in changed:
board[e[0]][e[1]] = abs(board[e[0]][e[1]]-1) | wangjue2020/LeetCode | 289-game-of-life/289-game-of-life.py | 289-game-of-life.py | py | 1,367 | python | en | code | 0 | github-code | 13 |
19964615234 | import csv
from random import randrange
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives.asymmetric import rsa, dsa
import pandas as pd
for c in range(10):
cle = rsa.generate_private_key(backend=default_backend(),public_exponent=65537, key_size=512)
clePublique = cle.public_key().public_bytes(serialization.Encoding.OpenSSH,serialization.PublicFormat.OpenSSH)
pem = cle.private_bytes(
format = serialization.PrivateFormat.TraditionalOpenSSL,
encoding=serialization.Encoding.PEM,
encryption_algorithm = serialization.NoEncryption())
clePubliqueString = clePublique.decode('utf-8')
clePriveeString = pem.decode('utf-8')
with open ("test.csv", "a", encoding = 'utf-8') as file:
cSV = csv.writer(file)
cSV.writerow ([clePriveeString,clePubliqueString])
print(c, "512 RSA clés")
df=pd.read_csv('test.csv')
print ("Doublons : " + str(df.duplicated().sum()))
| Jennyyyfer/CHEN_RMILI-RIOU | code_cryptosujet3.py | code_cryptosujet3.py | py | 1,014 | python | en | code | 0 | github-code | 13 |
32336686508 | """
Project Euler Problem 31
========================
In England the currency is made up of pound, -L-, and pence, p, and there
are eight coins in general circulation:
1p, 2p, 5p, 10p, 20p, 50p, -L-1 (100p) and -L-2 (200p).
It is possible to make -L-2 in the following way:
1 * -L-1 + 1 * 50p + 2 * 20p + 1 * 5p + 1 * 2p + 3 * 1p
How many different ways can -L-2 be made using any number of coins?
"""
coins = [1, 2, 5, 10, 20, 50, 100, 200]
limit = 200
# simple recursion
def ways(amount, c):
if c <= 0:
return 1
res = 0
while amount >= 0:
res = res + ways(amount, c-1)
amount -= coins[c]
return res
print(ways(limit, 7)) | mseibt/ProjectEuler | 031.py | 031.py | py | 676 | python | en | code | 1 | github-code | 13 |
29861388857 | import json
import unittest
import requests_mock
import intelmq.lib.test as test
from intelmq.bots.outputs.restapi.output import RestAPIOutputBot
def request_callback(expected):
def callback(request, context):
if json.loads(request.text) == expected:
context.status_code = 200
else:
context.status_code = 400
return 'ok'
return callback
class TestRestAPIOutputBot(test.BotTestCase, unittest.TestCase):
@classmethod
def set_bot(cls):
cls.bot_reference = RestAPIOutputBot
cls.sysconfig = {"hierarchical_output": True,
"auth_token_name": "username",
"auth_token": "password",
"auth_type": "http_basic_auth",
"use_json": True,
"host": "http://localhost/"}
cls.default_input_message = {'__type': 'Event',
'source.ip': '10.0.0.1'}
@requests_mock.Mocker()
def test_event(self, mocker):
"""
Test if data is posted correctly to webserver.
"""
mocker.post('http://localhost/',
text=request_callback({'source': {'ip': '10.0.0.1'}}),
request_headers={'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-Type': 'application/json; charset=utf-8'})
self.run_bot()
@requests_mock.Mocker()
def test_status_check(self, mocker):
"""
Test if response from webserver is correctly validated.
"""
mocker.post('http://localhost/',
status_code=500,
request_headers={'Authorization': 'Basic dXNlcm5hbWU6cGFzc3dvcmQ=',
'Content-Type': 'application/json; charset=utf-8'})
self.run_bot(allowed_error_count=1)
self.assertLogMatches('requests.exceptions.HTTPError: 500 Server Error: None for url: http://localhost/',
'ERROR')
if __name__ == '__main__': # pragma: no cover
unittest.main()
| certtools/intelmq | intelmq/tests/bots/outputs/restapi/test_output.py | test_output.py | py | 2,119 | python | en | code | 856 | github-code | 13 |
18697810843 | __author__ = 'John Buttigieg'
from kivy.uix.button import Button
from kivy.properties import StringProperty
from kivy.app import App
from kivy.lang import Builder
import csv
### This program is incomplete but loads data from a csv file
### and displays the data in buttons
### new entries are able to be writting from the add new items popup
### and is saved back into the csv file.
###github URL: https://github.com/johnbuttigieg
class Main(App):
status_text = StringProperty()
def __init__(self, **kwargs):
super(Main, self).__init__(**kwargs)
#loads datalines from the load items function
self.data_lines = self.load_items()
def build(self):
#loads the kivy code onto the program
self.status_text = "Choose action from the left menu, then select items on the right"
self.title = "Equipment Hire"
# load file
self.root = Builder.load_file('hireGUI.kv')
#loads data_lines and creates button
data_lines = self.load_items()
self.create_buttons(data_lines)
# testing code
print('build')
return self.root
#function from first assignment. loads items from the csv file into data_lines
def load_items(self):
return_array = []
#Open, read and transfer the lines in data to the memory variable
data_file = open("items2.csv")
self.count = 0
for line in data_file:
values = line.split(",")
return_array.append([values[0], values[1], values[2], values[3]])
self.count = self.count + 1
print('loaditems')
print(return_array)
#stores the data in returnarray and returned the array.
return return_array
#creates the buttons for kivy
def create_buttons(self, data_lines):
for line in data_lines:
# creates a button for each line in datalines (csv)
# Labels the text of the button accordingly.
new_button = Button(text=line[0])
new_button.bind(on_release=self.entry)
self.root.ids.entryBox.add_widget(new_button)
print(line)
def entry(self, instance):
#for when the button is clicked the price will show up
name = instance.text
for line in self.data_lines:
if line[0] == instance.text:
tempprice = line[2]
self.status_text = "{} ${}".format(name, tempprice)
def add_items(self):
#when the add items menu is clicked a popup opens where the user can type a new entry
self.status_text = "Enter details for a new hire item"
self.root.ids.addItems.open()
#saves the new entry into the csv file.
def save(self, name_field, price_field, description_field):
self.data_lines.append([name_field, description_field, price_field, 'in \n'])
print(self.data_lines)
#saves the new item in the csv file and rearranges the format so the program can read it.
for line in self.data_lines:
newitems = '{} {} {} {}'.format(line[0], line[1], line[2], line[3])
with open ('items2.csv', 'wb') as f:
writer = csv.writer(f, delimiter = ",", lineterminator ='')
writer.writerows(self.data_lines)
#testing code
print(newitems)
self.root.ids.entryBox.cols = 5 // 5 + 1
new_button = Button(text=name_field.text)
new_button.bind(on_release=self.entry)
self.root.ids.entryBox.add_widget(new_button)
self.root.ids.addItems.dismiss()
self.clear_text()
#clears text from the fields in the new item pop up
def clear_text(self):
self.root.ids.name_field.text = ""
self.root.ids.price_field.text = ""
self.root.ids.description_field.text = ""
#when the user presses cancel the popup is dismissed and clear text is called with clears all the text from the fields
def cancel(self):
self.root.ids.addItems.dismiss()
self.clear_text()
self.status_text = ""
#my attempt at making the hire function work..
def hire(self):
data_lines = self.load_items()
for line in data_lines:
# create a button for each entry
if 'in' in line[3]:
self.new_button = Button(text=line[0])
self.new_button.bind(on_release=self.entry)
self.new_button.background_color=[0,3,6,22]
self.root.ids.entryBox.add_widget(self.new_button)
print('testrun')
#new_button.bind(on_release=self.changecolor(data_lines))
self.status_text = "Choose the items you would like to hire, then press confirm."
Main().run()
| johnbuttigieg/Code | Assignment 2 GUI ItemsForHire/JohnButtigiegA2.py | JohnButtigiegA2.py | py | 4,737 | python | en | code | 0 | github-code | 13 |
72478845137 | from tkinter import *
from DAO_Module.CustomerDAO import CustomerDAO
from DAO_Module.DeviceDAO import DeviceDAO
from Models import Customer, Device
def registerCustomer():
def save():
company = companyEntry.get()
street = streetEntry.get()
location = locationEntry.get()
postal_code = postalEntry.get()
phone = phoneEntry.get()
email = emailEntry.get()
name = nameEntry.get()
surname = surnameEntry.get()
customer = Customer(
name.capitalize(),
surname.capitalize(),
email.lower(),
phone,
company,
street.capitalize(),
location.capitalize(),
postal_code)
if (phone != "" and name != "" and surname != "" and email != "" and postal_code != "" and company != "" and street != "" and location != ""):
try:
phone = int(phone)
except:
err = 1
else:
err = 0
if (err == 1 or len(str(phone)) < 9):
message.configure(text="The phone number is invalid: It must be a set of at least 9 digits !")
else:
customerDAO = CustomerDAO()
customerDAO.register_customer(customer)
print("Saved !")
companyEntry.config(bg="green")
streetEntry.config(bg="green")
locationEntry.config(bg="green")
postalEntry.config(bg="green")
phoneEntry.config(bg="green")
emailEntry.config(bg="green")
nameEntry.config(bg="green")
surnameEntry.config(bg="green")
window.after(1000, window.destroy)
else:
message.configure(text="All information must be provided !")
window = Tk()
window.title("Customer Registration")
frame = Frame(window, background="lightpink", height=800, width=700, highlightbackground="black", highlightthickness=3)
frame.pack(ipadx=5, ipady=20)
title = Label(frame, text="Customer Registration", background="red", foreground="white", highlightbackground="black", highlightthickness=3,
width=40, height=3, font="sans-serif 18 bold")
title.grid(row=0, column=0, padx=20, pady=20)
formFrame = Frame(frame, width=600, height=600, background="red", highlightbackground="black", highlightthickness=3)
formFrame.grid(row=1, column=0, padx=10, pady=10)
companyLabel = Label(formFrame, text="Company*", background="red", foreground="white", font="sans-serif 15 bold underline")
companyLabel.grid(row=0, padx=20, pady=10, sticky=W)
companyEntry = Entry(formFrame, width=50)
companyEntry.grid(row=0, column=1, padx=20)
streetLabel = Label(formFrame, text="Street*", background="red", foreground="white", font="sans-serif 15 bold underline")
streetLabel.grid(row=1, padx=20, pady=10, sticky=W)
streetEntry = Entry(formFrame, width=50)
streetEntry.grid(row=1, column=1, padx=20)
locationLabel = Label(formFrame, text="Location*", background="red", foreground="white", font="sans-serif 15 bold underline")
locationLabel.grid(row=2, padx=20, pady=10, sticky=W)
locationEntry = Entry(formFrame, width=50)
locationEntry.grid(row=2, column=1, padx=20)
postalLabel = Label(formFrame, text="Postal Code*", background="red", foreground="white", font="sans-serif 15 bold underline")
postalLabel.grid(row=3, padx=20, pady=10, sticky=W)
postalEntry = Entry(formFrame, width=50)
postalEntry.grid(row=3, column=1, padx=20)
phoneLabel = Label(formFrame, text="Phone*", background="red", foreground="white", font="sans-serif 15 bold underline")
phoneLabel.grid(row=4, padx=20, pady=10, sticky=W)
phoneEntry = Entry(formFrame, width=50)
phoneEntry.grid(row=4, column=1, padx=20)
emailLabel = Label(formFrame, text="Email*", background="red", foreground="white", font="sans-serif 15 bold underline")
emailLabel.grid(row=5, padx=20, pady=10, sticky=W)
emailEntry = Entry(formFrame, width=50)
emailEntry.grid(row=5, column=1, padx=20)
nameLabel = Label(formFrame, text="Name*", background="red", foreground="white", font="sans-serif 15 bold underline")
nameLabel.grid(row=6, padx=20, pady=10, sticky=W)
nameEntry = Entry(formFrame, width=50)
nameEntry.grid(row=6, column=1, padx=20)
surnameLabel = Label(formFrame, text="Surname*", background="red", foreground="white", font="sans-serif 15 bold underline")
surnameLabel.grid(row=7, padx=20, pady=10, sticky=W)
surnameEntry = Entry(formFrame, width=50)
surnameEntry.grid(row=7, column=1, padx=20)
message = Label(formFrame, text="", font="sans-serif 10", foreground="black", background="red")
message.grid(row=8, column=1, padx=5, pady=5)
saveButton = Button(formFrame, text="Save", command=save, width=10, background="green", foreground="white", font="Sans-Serif 15 bold")
saveButton.grid(row=9, column=0, padx=30, pady=20)
cancelButton = Button(formFrame, text="Cancel", command=window.destroy, width=10, background="darkred", foreground="white", font="Sans-Serif 15 bold")
cancelButton.grid(row=9, column=1, padx=10, pady=20)
window.mainloop()
def registerDevice():
def save():
device_manufacturer = manufacturerEntry.get()
type = typeEntry.get()
inductance = inductanceEntry.get()
dimensions = dimensionsEntry.get()
name = nameEntry.get()
surname = surnameEntry.get()
device = Device(
device_manufacturer.capitalize(),
type.capitalize(),
inductance,
dimensions,
name.capitalize(),
surname.capitalize())
if (device_manufacturer != "" and type != "" and inductance != "" and dimensions != "" and name != "" and surname != ""):
try:
inductance = float(inductance)
except:
err = 1
else:
err = 0
if (err == 1):
message.configure(text="Enter a numeric for inductance")
else:
deviceDAO = DeviceDAO()
result = deviceDAO.register_device(device)
if (result):
print("Saved!")
manufacturerEntry.config(bg="green")
typeEntry.config(bg="green")
inductanceEntry.config(bg="green")
dimensionsEntry.config(bg="green")
nameEntry.config(bg="green")
surnameEntry.config(bg="green")
window.after(1000, window.destroy)
else:
message.configure(text="Customer information not found")
else:
message.configure(text="All information must be provided !")
window = Tk()
window.title("Device Registration")
frame = Frame(window, background="lightpink", height=800, width=700, highlightbackground="black", highlightthickness=3)
frame.pack(ipadx=5, ipady=20)
title = Label(frame, text="Device Registration", background="red", foreground="white", highlightbackground="black", highlightthickness=3,
width=40, height=3, font="sans-serif 18 bold")
title.grid(row=0, column=0, padx=20, pady=20)
formFrame = Frame(frame, width=600, height=600, background="red", highlightbackground="black", highlightthickness=3)
formFrame.grid(row=1, column=0, padx=10, pady=10)
manufacturerLabel = Label(formFrame, text="Device Manufacturer*", background="red", foreground="white", font="sans-serif 15 bold underline")
manufacturerLabel.grid(row=0, padx=20, pady=10, sticky=W)
manufacturerEntry = Entry(formFrame, width=50)
manufacturerEntry.grid(row=0, column=1, padx=20)
typeLabel = Label(formFrame, text="Type*", background="red", foreground="white", font="sans-serif 15 bold underline")
typeLabel.grid(row=1, padx=20, pady=10, sticky=W)
typeEntry = Entry(formFrame, width=50)
typeEntry.grid(row=1, column=1, padx=20)
inductanceLabel = Label(formFrame, text="Inductance*", background="red", foreground="white", font="sans-serif 15 bold underline")
inductanceLabel.grid(row=2, padx=20, pady=10, sticky=W)
inductanceEntry = Entry(formFrame, width=50)
inductanceEntry.grid(row=2, column=1, padx=20)
dimensionsLabel = Label(formFrame, text="Dimensions*", background="red", foreground="white", font="sans-serif 15 bold underline")
dimensionsLabel.grid(row=3, padx=20, pady=10, sticky=W)
dimensionsEntry = Entry(formFrame, width=50)
dimensionsEntry.grid(row=3, column=1, padx=20)
nameLabel = Label(formFrame, text="Name*", background="red", foreground="white", font="sans-serif 15 bold underline")
nameLabel.grid(row=4, padx=20, pady=10, sticky=W)
nameEntry = Entry(formFrame, width=50)
nameEntry.grid(row=4, column=1, padx=20)
surnameLabel = Label(formFrame, text="Surname*", background="red", foreground="white", font="sans-serif 15 bold underline")
surnameLabel.grid(row=5, padx=20, pady=10, sticky=W)
surnameEntry = Entry(formFrame, width=50)
surnameEntry.grid(row=5, column=1, padx=20)
message = Label(formFrame, text="", font="sans-serif 10", foreground="black", background="red")
message.grid(row=6, column=1, padx=5, pady=5)
saveButton = Button(formFrame, text="Save", command=save, width=10, background="green", foreground="white", font="Sans-Serif 15 bold")
saveButton.grid(row=7, column=0, padx=30, pady=20)
cancelButton = Button(formFrame, text="Cancel", command=window.destroy, width=10, background="darkred", foreground="white", font="Sans-Serif 15 bold")
cancelButton.grid(row=7, column=1, padx=10, pady=20)
window.mainloop() | Edmond22-prog/GUI_interface | Button_functions.py | Button_functions.py | py | 9,908 | python | en | code | 2 | github-code | 13 |
38591350662 | import turtle
import sys
print("Let's play pool!")
wn=turtle.Screen()
wn.bgcolor("DarkSeaGreen4")
wn.title("Pool!")
start = 0
from tkinter import * # Importing gui module
def button_function(): # The function that the button will run
sys.exit()
def play(): # The function that the button will run
start == 1
screen = Tk() # Creating a screen
button_quit = Button(screen, text="Play", command=lambda:play) # Creating a button
button_quit.pack() # Putting the button on the screen
button2 = Button(screen, text="Quit", command=lambda:button_function()) # Creating another button
button2.pack() # Putting that button on the screen
screen.mainloop # Opening the screen
import pygame
from math import *
import random
pygame.init()
width = 660
height = 360
outerHeight = 400
margin = 30
display = pygame.display.set_mode((width, outerHeight))
pygame.display.set_caption("8 Ball Pool")
clock = pygame.time.Clock()
background = ("DarkSeaGreen1")
# Ball Class
stickColor = (249, 231, 159)
colors = ["yellow", "blue", "red", "purple", "orange", "green", "brown", "black", "yellow", "blue", "red", "purple", "orange", "green", "brown","CornflowerBlue"]
balls = []
noBalls = 15
radius = 10
friction = 0.1
# Ball Class
class Ball:
def __init__(self, x, y, speed, color, angle, ballNum):
self.x = x + radius
self.y = y + radius
self.color = color
self.angle = angle
self.speed = speed
self.ballNum = ballNum
self.font = pygame.font.SysFont("Agency FB", 10)
# Draws Balls on Display Window
def draw(self, x, y):
pygame.draw.ellipse(display, self.color, (x - radius, y - radius, radius*2, radius*2))
if self.color == "black" or self.ballNum == "cue":
ballNo = self.font.render(str(self.ballNum), True, "CornflowerBlue")
display.blit(ballNo, (x - 5, y - 5))
else:
ballNo = self.font.render(str(self.ballNum), True, "CornflowerBlue")
if self.ballNum > 9:
display.blit(ballNo, (x - 6, y - 5))
else:
display.blit(ballNo, (x - 5, y - 5))
# Moves the Ball around the Screen
def move(self):
self.speed -= friction
if self.speed <= 0:
self.speed = 0
self.x = self.x + self.speed*cos(radians(self.angle))
self.y = self.y + self.speed*sin(radians(self.angle))
if not (self.x < width - radius - margin):
self.x = width - radius - margin
self.angle = 180 - self.angle
if not(radius + margin < self.x):
self.x = radius + margin
self.angle = 180 - self.angle
if not (self.y < height - radius - margin):
self.y = height - radius - margin
self.angle = 360 - self.angle
if not(radius + margin < self.y):
self.y = radius + margin
self.angle = 360 - self.angle
# Pocket Class
class Pockets:
def __init__(self, x, y, color):
self.r = margin/2
self.x = x + self.r + 10
self.y = y + self.r + 10
self.color = color
# Draws the Pockets on Pygame Window
def draw(self):
pygame.draw.ellipse(display, self.color, (self.x - self.r, self.y - self.r, self.r*2, self.r*2))
# Checks if ball has entered the Hole
def checkPut(self):
global balls
ballsCopy = balls[:]
for i in range(len(balls)):
dist = ((self.x - balls[i].x)**2 + (self.y - balls[i].y)**2)**0.5
if dist < self.r + radius:
if balls[i] in ballsCopy:
if balls[i].ballNum == 8:
gameOver()
else:
ballsCopy.remove(balls[i])
balls = ballsCopy[:]
#NEW CODE
def checkCueCollision(cueBall):
for i in range(len(balls)):
if collision(cueBall, balls[i]):
if balls[i].x == cueBall.x:
angleIncline = 2*90
else:
u1 = balls[i].speed
u2 = cueBall.speed
balls[i].speed = ((u1*cos(radians(balls[i].angle)))**2 + (u2*sin(radians(cueBall.angle)))**2)**0.5
cueBall.speed = ((u2*cos(radians(cueBall.angle)))**2 + (u1*sin(radians(balls[i].angle)))**2)**0.5
tangent = degrees((atan((balls[i].y - cueBall.y)/(balls[i].x - cueBall.x)))) + 90
angle = tangent + 90
balls[i].angle = (2*tangent - balls[i].angle)
cueBall.angle = (2*tangent - cueBall.angle)
balls[i].x += (balls[i].speed)*sin(radians(angle))
balls[i].y -= (balls[i].speed)*cos(radians(angle))
cueBall.x -= (cueBall.speed)*sin(radians(angle))
cueBall.y += (cueBall.speed)*cos(radians(angle))
# Applies force to Cue Ball
def applyForce(self, cueBall, force):
cueBall.angle = self.tangent
cueBall.speed = force
# Applies force to Cue Ball
def applyForce(self, cueBall, force):
cueBall.angle = self.tangent
cueBall.speed = force
# Draws Cue Stick on Pygame Window
def draw(self, cuex, cuey):
self.x, self.y = pygame.mouse.get_pos()
self.tangent = (degrees(atan2((cuey - self.y), (cuex - self.x))))
pygame.draw.line(display, "brown", (cuex + self.length*cos(radians(self.tangent)), cuey + self.length*sin(radians(self.tangent))), (cuex, cuey), 1)
pygame.draw.line(display, self.color, (self.x, self.y), (cuex, cuey), 3)
# Checks Collision
def collision(ball1, ball2):
dist = ((ball1.x - ball2.x)**2 + (ball1.y - ball2.y)**2)**0.5
if dist <= radius*2:
return True
else:
return False
# Checks if Cue Ball hits any Ball
def checkCueCollision(cueBall):
for i in range(len(balls)):
if collision(cueBall, balls[i]):
if balls[i].x == cueBall.x:
angleIncline = 2*90
else:
u1 = balls[i].speed
u2 = cueBall.speed
balls[i].speed = ((u1*cos(radians(balls[i].angle)))**2 + (u2*sin(radians(cueBall.angle)))**2)**0.5
cueBall.speed = ((u2*cos(radians(cueBall.angle)))**2 + (u1*sin(radians(balls[i].angle)))**2)**0.5
tangent = degrees((atan((balls[i].y - cueBall.y)/(balls[i].x - cueBall.x)))) + 90
angle = tangent + 90
balls[i].angle = (2*tangent - balls[i].angle)
cueBall.angle = (2*tangent - cueBall.angle)
balls[i].x += (balls[i].speed)*sin(radians(angle))
balls[i].y -= (balls[i].speed)*cos(radians(angle))
cueBall.x -= (cueBall.speed)*sin(radians(angle))
cueBall.y += (cueBall.speed)*cos(radians(angle))
# Checks Collision Between Balls
def checkCollision():
for i in range(len(balls)):
for j in range(len(balls) - 1, i, -1):
if collision(balls[i], balls[j]):
if balls[i].x == balls[j].x:
angleIncline = 2*90
else:
u1 = balls[i].speed
u2 = balls[j].speed
balls[i].speed = ((u1*cos(radians(balls[i].angle)))**2 + (u2*sin(radians(balls[j].angle)))**2)**0.5
balls[j].speed = ((u2*cos(radians(balls[j].angle)))**2 + (u1*sin(radians(balls[i].angle)))**2)**0.5
tangent = degrees((atan((balls[i].y - balls[j].y)/(balls[i].x - balls[j].x)))) + 90
angle = tangent + 90
balls[i].angle = (2*tangent - balls[i].angle)
balls[j].angle = (2*tangent - balls[j].angle)
balls[i].x += (balls[i].speed)*sin(radians(angle))
balls[i].y -= (balls[i].speed)*cos(radians(angle))
balls[j].x -= (balls[j].speed)*sin(radians(angle))
balls[j].y += (balls[j].speed)*cos(radians(angle))
def border():
pygame.draw.rect(display, "brown", (0, 0, width, 30))
pygame.draw.rect(display, "brown", (0, 0, 30, height))
pygame.draw.rect(display, "brown", (width - 30, 0, width, height))
pygame.draw.rect(display, "brown", (0, height - 30, width, height))
def score():
font = pygame.font.SysFont("Agency FB", 30)
pygame.draw.rect(display, (51, 51, 51), (0, height, width, outerHeight))
for i in range(len(balls)):
balls[i].draw((i + 1)*2*(radius + 1), height + radius + 10)
text = font.render("Remaining Balls: " + str(len(balls)), True, stickColor)
display.blit(text, (width/2 + 50, height + radius/2))
def reset():
global balls, noBalls
noBalls = 15
balls = []
s = 70
b1 = Ball(s, height/2 - 4*radius, 0, colors[0], 0, 1)
b2 = Ball(s + 2*radius, height/2 - 3*radius, 0, colors[1], 0, 2)
b3 = Ball(s, height/2 - 2*radius, 0, colors[2], 0, 3)
b4 = Ball(s + 4*radius, height/2 - 2*radius, 0, colors[3], 0, 4)
b5 = Ball(s + 2*radius, height/2 - 1*radius, 0, colors[4], 0, 5)
b6 = Ball(s, height/2, 0, colors[5], 0, 6)
b7 = Ball(s + 6*radius, height/2 - 1*radius, 0, colors[6], 0, 7)
b8 = Ball(s + 4*radius, height/2, 0, colors[7], 0, 8)
b9 = Ball(s + 10*radius, height/2, 0, colors[8], 0, 9)
b10 = Ball(s + 6*radius, height/2 + 1*radius, 0, colors[9], 0, 10)
b11 = Ball(s + 2*radius, height/2 + 1*radius, 0, colors[10], 0, 11)
b12 = Ball(s, height/2 + 2*radius, 0, colors[11], 0, 12)
b13 = Ball(s + 4*radius, height/2 + 2*radius, 0, colors[12], 0, 13)
b14 = Ball(s + 2*radius, height/2 + 3*radius, 0, colors[13], 0, 14)
b15 = Ball(s, height/2 + 4*radius, 0, colors[14], 0, 15)
balls.append(b1)
balls.append(b2)
balls.append(b3)
balls.append(b4)
balls.append(b5)
balls.append(b6)
balls.append(b7)
balls.append(b8)
balls.append(b9)
balls.append(b10)
balls.append(b11)
balls.append(b12)
balls.append(b13)
balls.append(b14)
balls.append(b15)
def gameOver():
font = pygame.font.SysFont("Agency FB", 75)
if len(balls) == 1:
text = font.render("You Won!", True, (133, 193, 233))
else:
text = font.render("You Lost! Black in Hole!", True, (241, 148, 138))
while True:
for event in pygame.event.get():
if event.type == pygame.QUIT:
close()
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_q:
close()
if event.key == pygame.K_r:
poolTable()
display.blit(text, (50, height/2))
pygame.display.update()
clock.tick()
def close():
pygame.quit()
sys.exit()
# Main Function
def poolTable():
loop = True
reset()
noPockets = 6
pockets = []
p1 = Pockets(0, 0, "black")
p2 = Pockets(width/2 - p1.r*2, 0, "black")
p3 = Pockets(width - p1.r - margin - 4, 0, "black")
p4 = Pockets(0, height - margin - 5 - p1.r, "black")
p5 = Pockets(width/2 - p1.r*2, height - margin - 5 - p1.r, "black")
p6 = Pockets(width - p1.r - margin - 5, height - margin - 5 - p1.r, "black")
pockets.append(p1)
pockets.append(p2)
pockets.append(p3)
pockets.append(p4)
pockets.append(p5)
pockets.append(p6)
poolTable()
wn.mainloop() | kaden-daughenbaugh-11882/CSP | PoolGameProject.py | PoolGameProject.py | py | 11,302 | python | en | code | 0 | github-code | 13 |
20807981243 | import logging
from datetime import datetime
from db.models import LogMessage
import traceback
from sqlalchemy.orm import sessionmaker
class LogDBHandler(logging.Handler):
""" Modified logging handler that writes to provided database session """
def __init__(self, session):
super().__init__()
self.setFormatter(logging.Formatter(
'%(AmzAccount_Group)s - %(asctime)s - %(pathname)s - %(levelname)s - %(message)s'))
self.session = session
self.setLevel(logging.DEBUG)
def emit(self, record):
self.format(record)
trace = None
exc = record.__dict__['exc_info']
if exc:
trace = traceback.format_exc()
message = LogMessage(
AmzAccount_Group=record.__dict__['AmzAccount_Group'],
AmzAccount_ID_Internal=record.__dict__['AmzAccount_ID_Internal'],
Timestamp=datetime.fromtimestamp(record.__dict__['created']),
SenderModule=record.__dict__['pathname'],
MessageType=record.__dict__['levelname'],
MessageBody=record.__dict__['message'],
Traceback=trace
)
self.session.add(message)
self.session.commit()
def create_db_logger(db_engine, extra):
""" Create and return modified logger that writes to provided database """
Session = sessionmaker()
session = Session(bind=db_engine)
logger_db = logging.getLogger('LogDBHandler')
logger_db.setLevel(logging.DEBUG)
logger_db.addHandler(LogDBHandler(session))
logger_db_with_extra = logging.LoggerAdapter(logger=logger_db, extra=extra)
return logger_db_with_extra
| andreero/Amazon_advertising | db/logger.py | logger.py | py | 1,646 | python | en | code | 0 | github-code | 13 |
74675206736 | import itertools
import copy
import numpy as np
from mstk import logger
from mstk.chem.rdkit import create_mol_from_smiles
from mstk.chem.element import Element
from mstk.forcefield.ffterm import *
from .atom import Atom
from .virtualsite import *
from .connectivity import *
from .unitcell import UnitCell
from .residue import Residue
from .geometry import find_clusters, find_clusters_consecutive
class Molecule():
'''
A molecule is defined as atoms and the connectivity between them.
The term `molecule` is not strictly a chemical molecule.
Some atoms may not be connected to any other atoms in the same molecule.
However, there can not be bonds connecting atoms belong to different molecules.
Drude particles and virtual sites are also considered as atoms.
All bond, angles, dihedrals and impropers should be defined explicitly.
Parameters
----------
name : str
Attributes
----------
id : int
Index of this molecule in topology. -1 means information haven\'t been updated by topology
name : str
Name of the molecule, not necessarily unique
'''
def __init__(self, name='UNK'):
self.id = -1
self._name = name
self._topology = None
self._atoms: [Atom] = []
self._bonds: [Bond] = []
self._angles: [Angle] = []
self._dihedrals: [Dihedral] = []
self._impropers: [Improper] = []
self._rdmol = None # this is for typing based on SMARTS
self._is_rdmol_valid = False
self._default_residue = Residue(name)
self._added_residues = []
def __repr__(self):
return f'<Molecule: {self.name} {self.id}>'
@property
def name(self):
return self._name
@name.setter
def name(self, val):
self._name = val
self._default_residue.name = val
def __deepcopy__(self, memodict={}):
'''
If there are virtual sites, they will be constructed with new atoms also
If there are residue information, they will also be constructed
topology will not be copied.
id will not be copied, because it relies on other molecules in the topology
'''
mol = Molecule(self.name)
for atom in self._atoms:
mol.add_atom(copy.deepcopy(atom))
for bond in self._bonds:
idx1 = bond.atom1.id_in_mol
idx2 = bond.atom2.id_in_mol
mol.add_bond(mol._atoms[idx1], mol._atoms[idx2], bond.order)
for angle in self._angles:
idx1 = angle.atom1.id_in_mol
idx2 = angle.atom2.id_in_mol
idx3 = angle.atom3.id_in_mol
mol.add_angle(mol._atoms[idx1], mol._atoms[idx2], mol._atoms[idx3])
for dihedral in self._dihedrals:
idx1 = dihedral.atom1.id_in_mol
idx2 = dihedral.atom2.id_in_mol
idx3 = dihedral.atom3.id_in_mol
idx4 = dihedral.atom4.id_in_mol
mol.add_dihedral(mol._atoms[idx1], mol._atoms[idx2], mol._atoms[idx3], mol._atoms[idx4])
for improper in self._impropers:
idx1 = improper.atom1.id_in_mol
idx2 = improper.atom2.id_in_mol
idx3 = improper.atom3.id_in_mol
idx4 = improper.atom4.id_in_mol
mol.add_improper(mol._atoms[idx1], mol._atoms[idx2], mol._atoms[idx3], mol._atoms[idx4])
# add_atom and add_bond will invalidate rdmol
if self._is_rdmol_valid:
from rdkit import Chem
mol._rdmol = Chem.Mol(self._rdmol)
mol._is_rdmol_valid = True
for i, atom in enumerate(self._atoms):
vsite = atom.virtual_site
if vsite is not None:
new_atom = mol.atoms[i]
new_parents = [mol.atoms[p.id_in_mol] for p in vsite.parents]
new_atom.virtual_site = VirtualSite.create(vsite.__class__.__name__, new_parents, vsite.parameters)
for residue in self._added_residues:
atoms = [mol.atoms[atom.id_in_mol] for atom in residue.atoms]
mol.add_residue(residue.name, atoms)
return mol
@staticmethod
def from_smiles(smiles):
'''
Initialize a molecule from SMILES string.
RDKit is used for parsing SMILES. The Hydrogen atoms will be created.
The positions of all atoms will also be automatically generated.
The SMILES string can contain the name of the molecule at the end, e.g. 'CCCC butane'.
Parameters
----------
smiles : str
Returns
-------
molecule : Molecule
'''
words = smiles.strip().split()
smiles = words[0]
if len(words) > 1:
name = words[1]
else:
name = smiles
rdmol = create_mol_from_smiles(smiles)
mol = Molecule.from_rdmol(rdmol, name)
return mol
@staticmethod
def from_rdmol(rdmol, name=None):
'''
Initialize a molecule from a RDKit Mol object.
If the RDKit Mol has conformers, the position of the first conformer will be assigned to the atoms
Parameters
----------
rdmol : rdkit.Chem.Mol
name : str
The name of the molecule. If not provided, the formula will be used as the name.
Returns
-------
molecule : Molecule
'''
try:
from rdkit import Chem
from rdkit.Chem.rdMolDescriptors import CalcMolFormula
except ImportError:
raise ImportError('RDKit not found')
rdmol = Chem.Mol(rdmol)
# don't set aromaticity, kekulized bonds are easier to manipulate
Chem.SanitizeMol(rdmol, Chem.SANITIZE_ALL ^ Chem.SANITIZE_SETAROMATICITY)
mol = Molecule()
for i, a in enumerate(rdmol.GetAtoms()):
atom = Atom()
element = Element(a.GetAtomicNum())
atom.name = element.symbol + str(i + 1)
atom.symbol = element.symbol
atom.mass = element.mass
atom.formal_charge = a.GetFormalCharge()
mol.add_atom(atom)
if rdmol.GetNumConformers() > 0:
for atom, pos in zip(mol.atoms, rdmol.GetConformer().GetPositions()):
atom.position = pos / 10 # convert A to nm
for b in rdmol.GetBonds():
atom1 = mol.atoms[b.GetBeginAtomIdx()]
atom2 = mol.atoms[b.GetEndAtomIdx()]
d_bond_order = {
Chem.rdchem.BondType.UNSPECIFIED: Bond.Order.UNSPECIFIED,
Chem.rdchem.BondType.SINGLE : Bond.Order.SINGLE,
Chem.rdchem.BondType.DOUBLE : Bond.Order.DOUBLE,
Chem.rdchem.BondType.TRIPLE : Bond.Order.TRIPLE,
}
try:
order = d_bond_order[b.GetBondType()]
except KeyError:
logger.warning('Only single/double/triple/aromatic bond supported. Will discard bond order')
order = Bond.Order.UNSPECIFIED
mol.add_bond(atom1, atom2, order)
mol.generate_angle_dihedral_improper()
# set aromaticiy so SMARTS matching works correctly for aromatic bonds
Chem.SetAromaticity(rdmol)
mol._rdmol = rdmol
mol._is_rdmol_valid = True
if name is not None:
mol.name = name
else:
mol.name = CalcMolFormula(rdmol)
return mol
@property
def rdmol(self):
'''
The `rdkit.Chem.Mol` object associated with this molecule.
It is required by ZftTyper typing engine, which performs SMARTS matching on the molecule.
The obmol attribute will be assigned if the molecule is initialized from SMILES or Pybel Molecule.
If this information is not available, an Exception will be raised.
Returns
-------
rdmol : rdkit.Chem.Mol
'''
if not self._is_rdmol_valid:
self._construct_rdmol()
return self._rdmol
def _construct_rdmol(self):
'''
Construct a RDKit molecule from atoms and bonds. The positions will not be preserved.
'''
try:
from rdkit import Chem
except ImportError:
raise ImportError('RDKit not found')
if any(b.order == Bond.Order.UNSPECIFIED for b in self.bonds):
logger.warning(f'Not all bond orders are specified in {self}')
rwmol = Chem.RWMol()
for atom in self.atoms:
rdatom = Chem.Atom(atom.symbol)
rdatom.SetFormalCharge(atom.formal_charge)
rdatom.SetNoImplicit(True) # disable implicit Hs. Otherwise cannot handle radicals
rwmol.AddAtom(rdatom)
for bond in self.bonds:
d_bond_order = {
Bond.Order.UNSPECIFIED: Chem.rdchem.BondType.UNSPECIFIED,
Bond.Order.SINGLE : Chem.rdchem.BondType.SINGLE,
Bond.Order.DOUBLE : Chem.rdchem.BondType.DOUBLE,
Bond.Order.TRIPLE : Chem.rdchem.BondType.TRIPLE,
}
rwmol.AddBond(bond.atom1.id_in_mol, bond.atom2.id_in_mol, d_bond_order[bond.order])
Chem.SanitizeMol(rwmol)
self._rdmol = rwmol.GetMol()
self._is_rdmol_valid = True
@property
def topology(self):
'''
The topology this molecule belongs to
Returns
-------
topology : Topology
'''
return self._topology
def add_atom(self, atom, residue=None, index=None, update_topology=True):
'''
Add an atom to this molecule.
The id_in_mol attribute of all atoms will be updated after insertion.
TODO Make residue assignment more robust
Parameters
----------
atom : Atom
residue : Residue, Optional
Add the atom to this residue.
Make sure the residue belongs to this molecule. For performance concern, this is not checked.
If set to None, the atom will be added to the default residue.
index : int, Optional
If None, the new atom will be the last atom. Otherwise, it will be inserted in front of index-th atom.
update_topology : bool
If True, the topology this molecule belongs to will update its atom list and assign id for all atoms and residues.
Otherwise, you have to re-init the topology manually so that the topological information is correct.
'''
atom._molecule = self
if index is None:
self._atoms.append(atom)
atom.id_in_mol = len(self._atoms) - 1
else:
self._atoms.insert(index, atom)
for i, at in enumerate(self._atoms):
at.id_in_mol = i
self._is_rdmol_valid = False
residue = residue or self._default_residue
residue._add_atom(atom)
# re-index residues because the residue starts to count
if residue.n_atom == 1:
self._refresh_residues(update_topology)
if self._topology is not None and update_topology:
self._topology.update_molecules(self._topology.molecules, deepcopy=False)
def remove_atom(self, atom, update_topology=True):
'''
Remove an atom and all the bonds connected to the atom from this molecule.
The atom will also be removed from its residue.
The id_in_mol attribute of all atoms will be updated after removal.
The angle, dihedral and improper involving this atom are untouched.
Therefore, you may call `generate_angle_dihedral_improper` to refresh the connectivity.
# TODO This operation is extremely slow
Parameters
----------
atom : Atom
update_topology : bool
If update_topology is True, the topology this molecule belongs to will update its atom list and assign id for all atoms and residues.
Otherwise, you have to re-init the topology manually so that the topological information is correct.
'''
for bond in atom._bonds[:]:
self.remove_connectivity(bond)
self._atoms.remove(atom)
atom._molecule = None
for i, at in enumerate(self._atoms):
at.id_in_mol = i
self._is_rdmol_valid = False
residue = atom.residue
residue._remove_atom(atom)
# re-index residues because the residue this atom belongs to becomes empty
if residue.n_atom == 0:
self._refresh_residues(update_topology)
if self._topology is not None and update_topology:
self._topology.update_molecules(self._topology.molecules, deepcopy=False)
def remove_non_polar_hydrogens(self, update_topology=True):
'''
Remove single-coordinated hydrogen atoms bonded to C and Si atoms
Parameters
----------
update_topology : bool
If update_topology is True, the topology this molecule belongs to will update its atom list and assign id for all atoms and residues.
Otherwise, you have to re-init the topology manually so that the topological information is correct.
# TODO This operation is extremely slow
Returns
-------
ids_removed : list of int
The number of atoms removed
'''
hydrogens = []
for atom in self.atoms[:]:
if atom.symbol != 'H' or len(atom.bonds) != 1:
continue
neigh = atom.bond_partners[0]
if neigh.symbol not in ('C', 'Si'):
continue
neigh.mass += atom.mass
neigh.charge += atom.charge
hydrogens.append(atom)
ids_hydrogens = [atom.id_in_mol for atom in hydrogens]
for conn in self.bonds[:] + self.angles[:] + self.dihedrals[:] + self.impropers[:]:
ids_set = {atom.id_in_mol for atom in conn.atoms}
if ids_set.intersection(ids_hydrogens):
self.remove_connectivity(conn)
for atom in hydrogens:
self.remove_atom(atom, update_topology=False)
if self._topology is not None and update_topology:
self._topology.update_molecules(self._topology.molecules, deepcopy=False)
return ids_hydrogens
def add_residue(self, name, atoms, update_topology=True):
'''
Put a group of atoms into a new residue. These atoms will be removed from their old residues.
Make sure that these atoms belong to this molecule.
For performance issue, this is not checked.
Parameters
----------
name : str
atoms : list of Atom
update_topology : bool
If True, the topology this molecule belongs to will assign `id` for all residues.
Otherwise, you have to assign the `id` of all residues in the topology manually.
Returns
-------
residue : Residue
'''
residue = Residue(name)
for atom in atoms:
atom.residue._remove_atom(atom)
residue._add_atom(atom)
self._added_residues.append(residue)
self._refresh_residues(update_topology)
return residue
def remove_residue(self, residue, update_topology=True):
'''
Remove a residue from this molecule, and put the relevant atoms into the default residue
Make sure that this residue belongs to this molecule.
For performance issue, this is not checked.
Parameters
----------
residue : Residue
update_topology : bool
If True, the topology this molecule belongs to will assign `id` for all residues
Otherwise, you have to assign the `id` of all residues in the topology manually.
'''
for atom in residue.atoms[:]:
self._default_residue._add_atom(atom)
self._added_residues.remove(residue)
self._refresh_residues(update_topology)
def _refresh_residues(self, update_topology=True):
'''
Remove empty residues, update `id_in_mol` attributes of each residue in this molecule
Parameters
----------
update_topology : bool
If True, the topology this molecule belongs to will assign `id` for all residues
'''
for i in reversed(range(len(self._added_residues))):
if self._added_residues[i].n_atom == 0:
self._added_residues.pop(i)
for i, residue in enumerate(self.residues):
residue.id_in_mol = i
if self._topology is not None and update_topology:
for i, residue in enumerate(self._topology.residues):
residue.id = i
def add_bond(self, atom1, atom2, order=Bond.Order.UNSPECIFIED, check_existence=False):
'''
Add a bond between two atoms.
Make sure that both these two atoms belong to this molecule.
For performance issue, this is not checked.
Parameters
----------
atom1 : Atom
atom2 : Atom
check_existence : bool
If set to True and there is already bond between these two atoms, then do nothing and return None
Returns
-------
bond : [Bond, None]
'''
bond = Bond(atom1, atom2, order)
if check_existence and any(b.equals(bond) for b in self._bonds):
return None
self._bonds.append(bond)
atom1._bonds.append(bond)
atom2._bonds.append(bond)
self._is_rdmol_valid = False
return bond
def add_angle(self, atom1, atom2, atom3, check_existence=False):
'''
Add a angle between three atoms.
The second atom is the central atom.
Make sure that both these three atoms belong to this molecule.
For performance issue, this is not checked.
Parameters
----------
atom1 : Atom
atom2 : Atom
atom3 : Atom
check_existence : bool
If set to True and there is already angle between these three atoms, then do nothing and return None
Returns
-------
angle : [Angle, None]
'''
angle = Angle(atom1, atom2, atom3)
if check_existence and any(a.equals(angle) for a in self._angles):
return None
self._angles.append(angle)
return angle
def add_dihedral(self, atom1, atom2, atom3, atom4, check_existence=False):
'''
Add a dihedral between four atoms.
Make sure that both these four atoms belong to this molecule.
For performance issue, this is not checked.
Parameters
----------
atom1 : Atom
atom2 : Atom
atom3 : Atom
atom4 : Atom
check_existence : bool
If set to True and there is already dihedral between these three atoms, then do nothing and return None
Returns
-------
dihedral : [Dihedral, None]
'''
dihedral = Dihedral(atom1, atom2, atom3, atom4)
if check_existence and any(d.equals(dihedral) for d in self._dihedrals):
return None
self._dihedrals.append(dihedral)
return dihedral
def add_improper(self, atom1, atom2, atom3, atom4, check_existence=False):
'''
Add a improper between four atoms.
The fist atom is the central atom.
Make sure that both these four atoms belong to this molecule.
For performance issue, this is not checked.
Parameters
----------
atom1 : Atom
atom2 : Atom
atom3 : Atom
atom4 : Atom
check_existence : bool
If set to True and there is already improper between these three atoms, then do nothing and return None
Returns
-------
dihedral : [Improper, None]
'''
improper = Improper(atom1, atom2, atom3, atom4)
if check_existence and any(i.equals(improper) for i in self._impropers):
return None
self._impropers.append(improper)
return improper
def remove_connectivity(self, connectivity):
'''
Remove a connectivity (bond, angle, diheral or improper) from this molecule.
Make sure that this connectivity belongs to this molecule.
For performance issue, this is not checked.
Note that when a bond get removed, the relevant angles, dihedrals and impropers are still there.
You may call `generate_angle_dihedral_improper` to refresh connectivity.
# TODO This operation is extremely slow
Parameters
----------
connectivity : [Bond, Angle, Dihedral, Improper]
'''
if type(connectivity) is Bond:
bond = connectivity
self._bonds.remove(bond)
bond.atom1._bonds.remove(bond)
bond.atom2._bonds.remove(bond)
self._is_rdmol_valid = False
elif type(connectivity) is Angle:
self._angles.remove(connectivity)
elif type(connectivity) is Dihedral:
self._dihedrals.remove(connectivity)
elif type(connectivity) is Improper:
self._impropers.remove(connectivity)
else:
raise Exception('Invalid connectivity')
def is_similar_to(self, other):
'''
Check if this molecule is similar to another molecule.
It requires two molecules contains the same number of atoms.
The correspond atoms should have same atom symbol, type and charge.
The bonds should also be the same.
But it doesn't consider angles, dihedrals and impropers.
Parameters
----------
other : Molecule
Returns
-------
is : bool
'''
other: Molecule
if self.n_atom != other.n_atom:
return False
if self.n_bond != other.n_bond:
return False
for i in range(self.n_atom):
atom1 = self._atoms[i]
atom2 = other._atoms[i]
if atom1.symbol != atom2.symbol or atom1.type != atom2.type or atom1.charge != atom2.charge:
return False
if len(atom1._bonds) != len(atom2._bonds):
return False
if set(p.id_in_mol for p in atom1.bond_partners) != \
set(p.id_in_mol for p in atom2.bond_partners):
return False
return True
def get_adjacency_matrix(self):
matrix = np.zeros([self.n_atom, self.n_atom], dtype=bool)
for bond in self.bonds:
a1, a2 = bond.atom1.id_in_mol, bond.atom2.id_in_mol
matrix[a1][a2] = True
matrix[a2][a1] = True
# no bond between virtual sites and parents
for vsite, parent in self.get_virtual_site_pairs():
a1, a2 = vsite.id_in_mol, parent.id_in_mol
matrix[a1][a2] = True
matrix[a2][a1] = True
return matrix
def get_distance_matrix(self, max_bond=None):
connections = [set() for _ in range(self.n_atom)]
for bond in self._bonds:
a1, a2 = bond.atom1.id_in_mol, bond.atom2.id_in_mol
connections[a1].add(a2)
connections[a2].add(a1)
mat = np.zeros((self.n_atom, self.n_atom), dtype=int)
def fill_matrix(level, center, neighbors, flags):
if max_bond and level > max_bond:
return
if all(flags):
return
for j in neighbors:
if not flags[j]:
mat[center][j] = level
mat[j][center] = level
flags[j] = True
neighbors_deeper = set()
for j in neighbors:
neighbors_deeper.update(connections[j])
fill_matrix(level + 1, center, neighbors_deeper, flags)
for i in range(self.n_atom):
fill_matrix(1, i, connections[i], [j == i for j in range(self.n_atom)])
return mat
@property
def n_atom(self):
'''
Number of atoms belong to this molecule
Returns
-------
n : int
'''
return len(self._atoms)
@property
def n_bond(self):
'''
Number of bonds belong to this molecule
Returns
-------
n : int
'''
return len(self._bonds)
@property
def n_angle(self):
'''
Number of angles belong to this molecule
Returns
-------
n : int
'''
return len(self._angles)
@property
def n_dihedral(self):
'''
Number of dihedrals belong to this molecule
Returns
-------
n : int
'''
return len(self._dihedrals)
@property
def n_improper(self):
'''
Number of impropers belong to this molecule
Returns
-------
n : int
'''
return len(self._impropers)
@property
def n_residue(self):
return len(self.residues)
@property
def atoms(self):
'''
List of atoms belong to this molecule
Returns
-------
atoms: list of Atom
'''
return self._atoms
@property
def bonds(self):
'''
List of bonds belong to this molecule
Returns
-------
bonds : list of Bond
'''
return self._bonds
@property
def angles(self):
'''
List of angles belong to this molecule
Returns
-------
angles : list of Angle
'''
return self._angles
@property
def dihedrals(self):
'''
List of dihedrals belong to this molecule
Returns
-------
dihedrals : list of Dihedral
'''
return self._dihedrals
@property
def impropers(self):
'''
List of impropers belong to this molecule
Returns
-------
impropers : list of Improper
'''
return self._impropers
@property
def residues(self):
'''
All the residues in this molecule
Returns
-------
residues : list of Residue
'''
if len(self._default_residue.atoms) == 0:
return self._added_residues
else:
return [self._default_residue] + self._added_residues
@property
def has_position(self):
'''
Whether or not all the atoms in the molecule have positions
Returns
-------
has : bool
'''
return all(atom.has_position for atom in self.atoms)
def get_drude_pairs(self):
'''
Retrieve all the Drude dipole pairs belong to this molecule
Returns
-------
pairs : list of tuple of Atom
[(parent, drude)]
'''
pairs = []
for bond in self._bonds:
if bond.atom1.is_drude:
pairs.append((bond.atom2, bond.atom1))
elif bond.atom2.is_drude:
pairs.append((bond.atom1, bond.atom2))
return pairs
def get_virtual_site_pairs(self):
'''
Retrieve all the virtual site pairs belong to this molecule
Returns
-------
pairs : list of tuple of Atom
[(parent, atom_virtual_site)]
'''
pairs = []
for atom in self._atoms:
if atom.virtual_site is not None:
pairs.append((atom.virtual_site.parents[0], atom))
return pairs
def get_12_13_14_pairs(self):
'''
Retrieve all the 1-2, 1-3 and 1-4 pairs based on the bond information.
The pairs only concerns real atoms. Drude particles will be ignored.
Returns
-------
pairs12 : list of tuple of Atom
pairs13 : list of tuple of Atom
pairs14 : list of tuple of Atom
'''
pair_12_set = set()
pair_13_set = set()
pair_14_set = set()
for atom in [a for a in self._atoms if not a.is_drude]:
partners = [p for p in atom.bond_partners if not p.is_drude]
for a1, a3 in itertools.combinations(partners, 2):
pair = tuple(sorted([a1, a3]))
pair_13_set.add(pair)
for bond in filter(lambda x: not x.is_drude, self.bonds):
a2, a3 = bond.atom1, bond.atom2
pair = tuple(sorted([a2, a3]))
pair_12_set.add(pair)
for a1 in [p for p in a2.bond_partners if not p.is_drude]:
for a4 in [p for p in a3.bond_partners if not p.is_drude]:
if a1 != a3 and a2 != a4 and a1 != a4:
pair = tuple(sorted([a1, a4]))
pair_14_set.add(pair)
pair_12_list = list(sorted(pair_12_set))
pair_13_list = list(sorted(pair_13_set - pair_12_set))
pair_14_list = list(sorted(pair_14_set - pair_13_set.union(pair_12_set)))
return pair_12_list, pair_13_list, pair_14_list
def generate_angle_dihedral_improper(self, dihedral=True, improper=True):
'''
Generate angle, dihedral and improper from bonds
The existing angles, dihedrals and impropers will be removed first
The atoms and bonds concerning Drude particles will be ignored
Parameters
----------
dihedral: bool
Whether or not generate dihedrals based on bonds
improper: bool
Whether or not generate impropers based on bonds
'''
self._angles = []
self._dihedrals = []
self._impropers = []
for atom in [a for a in self._atoms if not a.is_drude]:
partners = [p for p in atom.bond_partners if not p.is_drude]
for p1, p2 in itertools.combinations(partners, 2):
self.add_angle(p1, atom, p2)
if improper and len(partners) == 3:
self.add_improper(atom, *sorted(partners))
if dihedral:
for bond in filter(lambda x: not x.is_drude, self._bonds):
atom2 = bond.atom1
atom3 = bond.atom2
partners2 = [p for p in atom2.bond_partners if not p.is_drude]
partners3 = [p for p in atom3.bond_partners if not p.is_drude]
for atom1, atom4 in itertools.product(partners2, partners3):
if atom1 != atom3 and atom2 != atom4 and atom1 != atom4:
self.add_dihedral(atom1, atom2, atom3, atom4)
def guess_connectivity_from_ff(self, ff, bond_limit=0.25, bond_tolerance=0.025, angle_tolerance=None,
pbc='', cell=None):
'''
Guess bonds, angles, dihedrals and impropers from force field.
It requires that atoms types are defined and positions are available.
The distance between nearby atoms will be calculated.
If it's smaller than bond_length_limit, then it will be compared with the equilibrium length in FF.
The bond will be added if a BondTerm is found in FF and the deviation is smaller than bond_tolerance.
Then angles will be constructed from bonds. If angle_tolerance is None, all angles will be added.
If angle_tolerance is set (as degree), then AngleTerm must be provided for these angles.
The angle will be added only if the deviation between angle and equilibrium value in FF is smaller than angle_tolerance.
Dihedrals and impropers will be constructed form bonds and be added if relevant terms are presented in FF.
PBC is supported for determining bonds across the periodic cell
This is useful for simulating infinite structures
pbc can be '', 'x', 'y', 'xy', 'xz', 'xyz', which means check bonds cross specific boundaries
cell should also be provided if pbc is not ''
TODO Add support for triclinic cell
Parameters
----------
ff : ForceField
bond_limit : float
bond_tolerance : float
angle_tolerance : float
pbc : str
cell : UnitCell
'''
if not self.has_position:
raise Exception('Positions are required for guessing connectivity')
if any(atom.is_drude for atom in self._atoms):
raise Exception('Drude particles should be removed before guess connectivity')
if pbc != '':
if cell is None or cell.volume == 0:
raise Exception('PBC required but valid cell not provided')
elif not cell.is_rectangular:
raise Exception('Triclinic cell haven\'t been implemented')
else:
box = cell.size
self._bonds = []
self._angles = []
self._dihedrals = []
self._impropers = []
for i in range(self.n_atom):
atom1 = self.atoms[i]
try:
at1 = ff.atom_types[atom1.type].eqt_bond
except:
raise Exception(f'AtomType {atom1.type} not found in FF')
for j in range(i, self.n_atom):
atom2 = self.atoms[j]
try:
at2 = ff.atom_types[atom2.type].eqt_bond
except:
raise Exception(f'AtomType {atom2.type} not found in FF')
delta = atom2.position - atom1.position
if pbc != '':
if any((np.abs(delta) > bond_limit) & (np.abs(delta) < box - bond_limit)):
continue
if 'x' in pbc:
delta[0] -= math.ceil(delta[0] / box[0] - 0.5) * box[0]
if 'y' in pbc:
delta[1] -= math.ceil(delta[1] / box[1] - 0.5) * box[1]
if 'z' in pbc:
delta[2] -= math.ceil(delta[2] / box[2] - 0.5) * box[2]
if any(np.abs(delta) > bond_limit):
continue
bterm = BondTerm(at1, at2, 0)
if bterm.name not in ff.bond_terms.keys():
continue
bterm: BondTerm = ff.bond_terms[bterm.name]
if any(delta - bterm.length > bond_tolerance):
continue
if abs(np.sqrt(delta.dot(delta)) - bterm.length) <= bond_tolerance:
self.add_bond(atom1, atom2)
# generate angles etc..., and then remove them if requirements are not satisfied
self.generate_angle_dihedral_improper()
angles_removed = []
dihedrals_removed = []
impropers_removed = []
if angle_tolerance is not None:
for angle in self._angles[:]:
at1 = ff.atom_types[angle.atom1.type].eqt_ang_s
at2 = ff.atom_types[angle.atom2.type].eqt_ang_c
at3 = ff.atom_types[angle.atom3.type].eqt_ang_s
aterm = AngleTerm(at1, at2, at3, 0)
if aterm.name not in ff.angle_terms.keys():
raise Exception(
f'{str(angle)} constructed but {str(aterm)} not found in FF')
aterm: AngleTerm = ff.angle_terms[aterm.name]
delta21 = angle.atom1.position - angle.atom2.position
delta23 = angle.atom3.position - angle.atom2.position
if 'x' in pbc:
delta21[0] -= math.ceil(delta21[0] / box[0] - 0.5) * box[0]
delta23[0] -= math.ceil(delta23[0] / box[0] - 0.5) * box[0]
if 'y' in pbc:
delta21[1] -= math.ceil(delta21[1] / box[1] - 0.5) * box[1]
delta23[1] -= math.ceil(delta23[1] / box[1] - 0.5) * box[1]
if 'z' in pbc:
delta21[2] -= math.ceil(delta21[2] / box[2] - 0.5) * box[2]
delta23[2] -= math.ceil(delta23[2] / box[2] - 0.5) * box[2]
cos = delta21.dot(delta23) / math.sqrt(delta21.dot(delta21) * delta23.dot(delta23))
theta = np.arccos(np.clip(cos, -1, 1))
if abs(theta - aterm.theta) > angle_tolerance * DEG2RAD:
self.remove_connectivity(angle)
angles_removed.append(angle)
for dihedral in self._dihedrals[:]:
# consider wildcards in force field
ats_list = ff.get_eqt_for_dihedral(dihedral)
for ats in ats_list:
dterm = DihedralTerm(*ats)
if dterm.name in ff.dihedral_terms.keys():
break
else:
self.remove_connectivity(dihedral)
dihedrals_removed.append(dihedral)
for improper in self._impropers[:]:
# consider wildcards in force field
ats_list = ff.get_eqt_for_improper(improper)
for ats in ats_list:
iterm = ImproperTerm(*ats)
if iterm.name in ff.improper_terms.keys():
break
else:
self.remove_connectivity(improper)
impropers_removed.append(improper)
if angles_removed != []:
msg = '%i angles not added because value far from equilibrium: ' \
% len(angles_removed) \
+ ' '.join([i.name for i in angles_removed[:10]])
if len(angles_removed) > 10:
msg += ' and more ...'
logger.warning(msg)
if dihedrals_removed != []:
msg = '%i dihedrals not added because parameters not found in FF: ' \
% len(dihedrals_removed) \
+ ' '.join([i.name for i in dihedrals_removed[:10]])
if len(dihedrals_removed) > 10:
msg += ' and more ...'
logger.warning(msg)
if impropers_removed != []:
msg = '%i impropers not added because parameters not found in FF: ' \
% len(impropers_removed) \
+ ' '.join([i.name for i in impropers_removed[:10]])
if len(impropers_removed) > 10:
msg += ' and more ...'
logger.warning(msg)
def generate_drude_particles(self, ff, type_drude='DP_', seed=1, update_topology=True):
'''
Generate Drude particles from DrudeTerms in force field.
The atom types should have been defined already.
Drude particle will not be generated if DrudeTerm for its atom type can not be found in the FF.
Note that The existing Drude particles will be removed before generating.
The mass defined in the DrudeTerm will be transferred from parent atom to the Drude particle.
The Drude charge will be calculated from the DrudeTerm and transferred from parent atom to the Drude particle.
Bonds between parent-Drude will be generated and added to the topology.
If AtomType and VdwTerm for generated Drude particles are not found in FF, these terms will be created and added to the FF.
Parameters
----------
ff : ForceField
type_drude : str
seed : int
update_topology : bool
'''
if len(ff.polarizable_terms) == 0:
raise Exception('Polarizable terms not found in force field')
np.random.seed(seed)
self.remove_drude_particles(update_topology=False)
_atype_not_found = set()
drude_pairs = {}
for parent in self._atoms:
atype = ff.atom_types.get(parent.type)
if atype is None:
_atype_not_found.add(parent.type)
continue
pterm = ff.polarizable_terms.get(atype.eqt_polar)
if pterm is None:
continue
if type(pterm) is not DrudeTerm:
raise Exception('Polarizable terms other than DrudeTerm haven\'t been implemented')
drude = Atom()
drude.is_drude = True
drude.type = type_drude
# add Drude particles after all been generated so the name of them are in sequence
drude.name = 'DP' + str(parent.id_in_mol + 1)
drude.symbol = 'DP'
drude.mass = pterm.mass
parent.mass -= drude.mass
n_H = len([atom for atom in parent.bond_partners if atom.symbol == 'H'])
alpha = pterm.alpha + n_H * pterm.merge_alpha_H
drude.charge = - pterm.get_charge(alpha)
parent.charge += pterm.get_charge(alpha)
# update alpha and thole for Drude parent particle
parent.alpha = alpha
parent.thole = pterm.thole
if parent.has_position:
# make sure Drude and parent atom do not overlap. max deviation 0.005 nm
drude.position = parent.position + (np.random.random(3) - 0.5) / 100
drude_pairs[parent] = drude
if _atype_not_found != set():
logger.error('%i atom types not found in FF: %s' % (
len(_atype_not_found), ' '.join(_atype_not_found)))
raise Exception(f'Generating Drude particles {str(self)} failed')
for parent, drude in drude_pairs.items():
self.add_atom(drude, index=self._atoms.index(parent) + 1, update_topology=False)
self.add_bond(parent, drude)
if self._topology is not None and update_topology:
self._topology.update_molecules(self._topology.molecules, deepcopy=False)
dtype = ff.atom_types.get(type_drude)
if dtype is None:
dtype = AtomType(type_drude)
ff.add_term(dtype)
logger.warning(f'AtomType for Drude particle not found in FF. '
f'{str(dtype)} is added to the FF')
vdw = LJ126Term(dtype.eqt_vdw, dtype.eqt_vdw, 0.0, 0.0)
if ff.vdw_terms.get(vdw.name) is None:
ff.add_term(vdw)
logger.warning(f'VdwTerm for Drude particle not found in FF. '
f'{str(vdw)} with zero interactions is added to the FF')
for atom in self._atoms:
if not atom.is_drude and atom.symbol != 'H' and atom not in drude_pairs:
logger.warning(f'Not all heavy atoms in {str(self)} carry Drude particles')
break
def remove_drude_particles(self, update_topology=True):
'''
Remove all Drude particles and bonds belong to Drude particles
The charges and masses carried by Drude particles will be transferred back to parent atoms
Parameters
----------
update_topology : bool
'''
for parent, drude in self.get_drude_pairs():
parent.mass += drude.mass
parent.charge += drude.charge
self.remove_connectivity(drude._bonds[0])
self.remove_atom(drude, update_topology=False)
if self._topology is not None and update_topology:
self._topology.update_molecules(self._topology.molecules, deepcopy=False)
def generate_virtual_sites(self, ff, update_topology=True):
'''
Generate virtual sites from VirtualSiteTerms in force field.
The atom types should have been defined already.
Note that The existing virtual sites will be removed before generating.
The charge won't be assigned by this method.
Therefore `assign_charge_from_ff` should be called to assign the charges on virtual sites.
Currently, only TIP4PSiteTerm has been implemented.
TODO Support other virtual site terms
Parameters
----------
ff : ForceField
update_topology : bool
'''
if len(ff.virtual_site_terms) == 0:
raise Exception('Virtual site terms not found in force field')
self.remove_virtual_sites(update_topology=False)
for term in ff.virtual_site_terms.values():
if type(term) is not TIP4PSiteTerm:
raise Exception('Virtual sites terms other than TIP4PSiteTerm haven\'t been implemented')
for term in ff.virtual_site_terms.values():
for angle in self._angles:
if angle.atom2.type != term.type_O or angle.atom1.type != term.type_H or angle.atom3.type != term.type_H:
continue
atom_vsite = Atom('VS' + str(angle.atom2.id_in_mol + 1))
atom_vsite.symbol = 'VS'
atom_vsite.type = term.type
atom_vsite.virtual_site = TIP4PSite([angle.atom2, angle.atom1, angle.atom3], [term.d])
atom_vsite.position = atom_vsite.virtual_site.calc_position()
self.add_atom(atom_vsite, update_topology=False)
if self._topology is not None and update_topology:
self._topology.update_molecules(self._topology.molecules, deepcopy=False)
def remove_virtual_sites(self, update_topology=True):
'''
Remove all virtual sites.
Parameters
----------
update_topology : bool
'''
for atom in self._atoms:
if atom.virtual_site is not None:
self.remove_atom(atom, update_topology=False)
if self._topology is not None and update_topology:
self._topology.update_molecules(self._topology.molecules, deepcopy=False)
def assign_mass_from_ff(self, ff):
'''
Assign masses for all atoms and Drude particles from the force field.
Parameters
----------
ff : ForceField
See Also
--------
ForceField.assign_mass
'''
ff.assign_mass(self)
def assign_charge_from_ff(self, ff, transfer_qinc_terms=False):
'''
Assign charges for all atoms and Drude particles from the force field.
Parameters
----------
ff : ForceField
transfer_qinc_terms : bool, optional
See Also
--------
ForceField.assign_charge
'''
ff.assign_charge(self, transfer_qinc_terms)
def get_sub_molecule(self, indexes, deepcopy=True):
'''
Extract a substructure from this molecule by indexes of atoms.
The substructure will not contain any bond, angle, dihedral and improper between atoms in substructure and remaining parts.
Residue information will be reconstructed.
TODO Fix performance issue
Parameters
----------
indexes : list of int
The atoms in the substructure will be in the same order as in indexes
deepcopy : bool
If set to False, then the atoms and connections in the substructure will be the identical object as the atoms and connections in this molecule.
The data structure in this molecule will be messed up, and should not be accessed later.
Returns
-------
substructure : Molecule
'''
indexes = list(dict.fromkeys(indexes))
if deepcopy:
mol = copy.deepcopy(self)
else:
mol = self
# assign atom id so that we can access id_atoms for connectivities
for i, atom in enumerate(mol.atoms):
atom.id = i
# store residue information
residues = list(dict.fromkeys(mol.atoms[i].residue for i in indexes))
residue_name_atoms = []
if len(residues) > 1:
for residue in residues:
atoms = [atom for atom in residue.atoms if atom.id in indexes]
residue_name_atoms.append((residue.name, atoms))
sub = Molecule()
for i in indexes:
sub.add_atom(mol.atoms[i], update_topology=False)
ids_set = set(indexes)
for conn in mol.bonds:
if {a.id for a in conn.atoms} <= ids_set:
sub._bonds.append(conn)
for atom in sub.atoms:
for i in reversed(range(len(atom.bonds))):
if not {a.id for a in atom.bonds[i].atoms} <= ids_set:
atom._bonds.pop(i)
for conn in mol.angles:
if {a.id for a in conn.atoms} <= ids_set:
sub._angles.append(conn)
for conn in mol.dihedrals:
if {a.id for a in conn.atoms} <= ids_set:
sub._dihedrals.append(conn)
for conn in mol.impropers:
if {a.id for a in conn.atoms} <= ids_set:
sub._impropers.append(conn)
# reconstruct residues
if len(residues) > 1:
for resname, atoms in residue_name_atoms:
sub.add_residue(resname, atoms)
else:
sub.name = residues[0].name
return sub
@staticmethod
def merge(molecules):
'''
Merge several molecules into a single molecule.
The molecules will be deep-copied before the mergence.
Parameters
----------
molecules : list of Molecule
Returns
-------
merged : Molecule
'''
merged = Molecule()
for mol in molecules:
m_copy = copy.deepcopy(mol)
# should always call `add_atom()` instead of manipulating `_atoms` directly
for atom in m_copy.atoms:
merged.add_atom(atom)
merged._bonds.extend(m_copy._bonds)
merged._angles.extend(m_copy._angles)
merged._dihedrals.extend(m_copy._dihedrals)
merged._impropers.extend(m_copy._impropers)
# all atoms goes to the default residue after `add_atom`, but the old residue still holds the atom list
for residue in m_copy.residues:
merged.add_residue(residue.name, residue.atoms)
return merged
def split(self, consecutive=False):
'''
Split the molecule into smaller pieces based on bond network.
The atoms in each piece will preserve the original order.
However, the atoms at the end of original molecule may end up in a piece in the beginning,
causing the order of all atoms in all the pieces different from original order.
To avoid this, set consecutive to True.
In this case, it will make sure all atoms in front pieces will have atom id smaller than atoms in back pieces.
Residue information will be reconstructed for each piece
Parameters
----------
consecutive : bool
Returns
-------
molecules : list of Molecule
'''
adjacency = self.get_adjacency_matrix()
if consecutive:
clusters = find_clusters_consecutive(self.atoms, lambda a1, a2: adjacency[a1.id_in_mol][a2.id_in_mol])
else:
clusters = find_clusters(self.atoms, lambda a1, a2: adjacency[a1.id_in_mol][a2.id_in_mol])
for cluster in clusters:
cluster.sort()
clusters.sort()
mol = copy.deepcopy(self)
pieces = [mol.get_sub_molecule(ids, deepcopy=False) for ids in clusters]
return pieces
def split_residues(self):
'''
Split the molecule into smaller pieces. Each piece will be made of one residue.
Make sure that there is no inter-residue bonds/angles/dihedrals/impropers.
Returns
-------
molecules : list of Molecule
'''
pieces = []
mol = copy.deepcopy(self)
for residue in mol.residues:
sub = Molecule(residue.name)
for atom in residue.atoms:
sub.add_atom(atom)
pieces.append(sub)
for conn in mol.bonds:
if len({atom.molecule for atom in conn.atoms}) > 1:
raise Exception(f'Inter-residue bond {conn} in {self}')
conn.atom1.molecule._bonds.append(conn)
for conn in mol.angles:
if len({atom.molecule for atom in conn.atoms}) > 1:
raise Exception(f'Inter-residue angle {conn} in {self}')
conn.atom1.molecule._angles.append(conn)
for conn in mol.dihedrals:
if len({atom.molecule for atom in conn.atoms}) > 1:
raise Exception(f'Inter-residue dihedral {conn} in {self}')
conn.atom1.molecule._dihedrals.append(conn)
for conn in mol.impropers:
if len({atom.molecule for atom in conn.atoms}) > 1:
raise Exception(f'Inter-residue improper {conn} in {self}')
conn.atom1.molecule._impropers.append(conn)
return pieces
| z-gong/mstk | mstk/topology/molecule.py | molecule.py | py | 52,348 | python | en | code | 7 | github-code | 13 |
14645654025 | from sqlalchemy import ARRAY, Column, Identity, Integer, String, Table
from . import metadata
FinancialReportingFinanceReportRunRunParametersJson = Table(
"financial_reporting_finance_report_run_run_parametersjson",
metadata,
Column(
"columns",
ARRAY(String),
comment="The set of output columns requested for inclusion in the report run",
nullable=True,
),
Column(
"connected_account",
String,
comment="Connected account ID by which to filter the report run",
nullable=True,
),
Column(
"currency",
String,
comment="Currency of objects to be included in the report run",
nullable=True,
),
Column(
"interval_end",
Integer,
comment="Ending timestamp of data to be included in the report run. Can be any UTC timestamp between 1 second after the user specified `interval_start` and 1 second before this report's last `data_available_end` value",
nullable=True,
),
Column(
"interval_start",
Integer,
comment="Starting timestamp of data to be included in the report run. Can be any UTC timestamp between 1 second after this report's `data_available_start` and 1 second before the user specified `interval_end` value",
nullable=True,
),
Column(
"payout",
String,
comment="Payout ID by which to filter the report run",
nullable=True,
),
Column(
"reporting_category",
String,
comment="Category of balance transactions to be included in the report run",
nullable=True,
),
Column(
"timezone",
String,
comment="Defaults to `Etc/UTC`. The output timezone for all timestamps in the report. A list of possible time zone values is maintained at the [IANA Time Zone Database](http://www.iana.org/time-zones). Has no effect on `interval_start` or `interval_end`",
nullable=True,
),
Column("id", Integer, primary_key=True, server_default=Identity()),
)
__all__ = ["financial_reporting_finance_report_run_run_parameters.json"]
| offscale/stripe-sql | stripe_openapi/financial_reporting_finance_report_run_run_parameters.py | financial_reporting_finance_report_run_run_parameters.py | py | 2,140 | python | en | code | 1 | github-code | 13 |
73285316179 | # 1.当内部作用域想修改外部作用域的变量时,就要用到 global 和 nonlocal 关键字了。
# 修改全局变量 num
# !/usr/bin/python3
num = 1
def fun1():
global num # 需要使用 global 关键字声明
print(num)
num = 123
print(num)
fun1()
print(num)
# 输出结果
# 1
# 123
# 123
print('\n')
# 2. 修改嵌套作用域(enclosing 作用域,外层非全局作用域)中的变量则需要 nonlocal 关键字了
def outer():
num = 10
def inner():
nonlocal num # nonlocal关键字声明
num = 100
print(num)
inner()
print(num)
outer()
# 输出结果
# 100
# 100
print('\n')
# 3. 通过函数参数传递
a = 10
def test(a):
a = a + 1
print(a)
test(a)
# 输出结果
# 11
print('\n')
# 4. 修改 a 为全局变量
a = 10
def test():
global a
a = a + 1
print(a)
test()
# 输出结果
# 11
| zhengjiaao/Python3-Basic | 24 Python3 命名空间和作用域/4 Python3 (global 和 nonlocal关键字).py | 4 Python3 (global 和 nonlocal关键字).py | py | 927 | python | zh | code | 0 | github-code | 13 |
38314245799 | '''
Created on Nov 13, 2014
@author: mikael
'''
from scripts_inhibition import effect_dopamine
kwargs={'data_path':('/home/mikael/results/papers/inhibition/network/'
+'supermicro/simulate_beta_ZZZ_dop_effect_perturb/'),
'from_diks':1,
'script_name':(__file__.split('/')[-1][0:-3]+'/data'),
'title':'Activation (beta)'}
obj=effect_dopamine.Main(**kwargs)
obj.do() | mickelindahl/bgmodel | python/scripts_inhibition/old_fig_script/fig2_effect_beta_dopamine.py | fig2_effect_beta_dopamine.py | py | 409 | python | en | code | 5 | github-code | 13 |
22756706205 | from decimal import Decimal
N, X = list(map(int,input().split()))
alc = Decimal(0)
ans = -1
X = Decimal(X)
for i in range(N):
VP = list(map(Decimal,input().split()))
alc += VP[0] * (VP[1] / Decimal(100))
if alc > X:
ans = i+1
break
print(ans)
| tenten0727/AtCoder | AtCoder Beginner Contest 189/B.py | B.py | py | 275 | python | en | code | 0 | github-code | 13 |
70523849299 | from aip import AipOcr
import json
""" 你的 APPID AK SK """
APP_ID = '24057924'
API_KEY = 'Yrj7dIv17nHQ9hy23KZ1XWTT'
SECRET_KEY = 'iEK567uchrXyrTeFZCj7wHhsO8f1rucu'
client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
""" 读取图片 """
def get_file_content(filePath):
with open(filePath, 'rb') as fp:
return fp.read()
image = get_file_content('1.png')
""" 调用通用文字识别, 图片参数为本地图片 """
client.basicGeneral(image)
""" 如果有可选参数 """
options = {}
"""检测语言类型"""
options["language_type"] = "CHN_ENG"
"""检测图片朝向"""
options["detect_direction"] = "true"
"""检测语言"""
options["detect_language"] = "true"
"""置信度"""
options["probability"] = "true"
""" 带参数调用通用文字识别, 图片参数为本地图片 """
result = client.basicGeneral(image, options)
print(result)
# 字典类型数据
# print(type(result))
# 输出识别文字
for i in result["words_result"]:
print(i["words"])
# str类型
# print(type(i["words"]))
"""
# sort_keys 如果是字典对象,选择True的话,会按照键的ASCII码来排序
# indent缩进
# separators 对象分隔符,默认为,
# ensure_ascii:默认值True,如果dict内含有non-ASCII的字符,
# 则会类似\\uXXXX的显示数据,设置成False后,就能正常显示
"""
# 结果对齐,格式化
print(json.dumps(result, sort_keys=True, indent=4, separators=(',', ': '), ensure_ascii=False))
| CUG-LXY/undergraduateproject | pyfor软件工程/a.py | a.py | py | 1,506 | python | zh | code | 0 | github-code | 13 |
39228297124 | from bs4 import BeautifulSoup
import operator
import string
import urllib.request
import math
import re
def freqFourLetterWords(url):
r = urllib.request.urlopen(url).read()
soup = BeautifulSoup(r, "lxml")
paragraphs = soup.find_all('p')
fourWordDict = {}
for p in paragraphs:
pText = p.get_text()
pattern = re.compile("\W")
pText = re.sub(pattern, " ", pText)
words = pText.split()
for word in words:
word = word.lower()
if len(word) == 4:
if (word in fourWordDict.keys()):
fourWordDict[word] += 1
else:
fourWordDict[word] = 1
sortedDict = sorted(fourWordDict.items(), key=operator.itemgetter(1), reverse=True)
mystring = ""
for item in sortedDict:
mystring += (item[0] + "," + str(item[1]) + "\n")
return mystring
print (freqFourLetterWords('https://en.wikipedia.org/wiki/Data_science'))
def freqFourLetterWordsNoStopWords(url):
#Function that calculates the frequencies of four letter words without including stopwords.
stopWords = open("stop_words.txt", "r")
stopWords = stopWords.read()
stopWords = stopWords.split("\n")
r = urllib.request.urlopen(url).read()
soup = BeautifulSoup(r, "lxml")
paragraphs = soup.find_all('p')
text = ""
for p in paragraphs:
pText = p.get_text()
pattern = re.compile("\W")
pText = re.sub(pattern, " ", pText)
words = pText.split()
text += pText
for word in stopWords:
word = word.lower()
pattern = re.compile(r"\s" + word + "\s", re.IGNORECASE)
text = re.sub(pattern, " ", text)
allWords = text.split()
fourWordDict = {}
for word in allWords:
word = word.translate(str.maketrans('', '', string.punctuation))
word = word.lower()
if len(word) == 4:
if (word in fourWordDict.keys()):
fourWordDict[word] += 1
else:
fourWordDict[word] = 1
sortedDict = sorted(fourWordDict.items(), key=operator.itemgetter(1), reverse=True)
mystring = ""
for item in sortedDict:
mystring += (item[0] + "," + str(item[1]) + "\n")
return mystring
print (freqFourLetterWordsNoStopWords('https://en.wikipedia.org/wiki/Data_science'))
def linkTexts(url):
#Function that finds all links on a webpage and prints the link's text
html_page = urllib.request.urlopen(url)
soup = BeautifulSoup(html_page, "lxml")
links = []
for link in soup.findAll('a', attrs={'href': re.compile("^http://")}):
links.append(link)
urls = []
linkTexts = []
for link in links:
linkUrl = link["href"] # filter for html pages with absolute path
if linkUrl not in urls:
urls.append(linkUrl)
linkText = link.get_text().strip()
if linkText == "":
linkTexts.append("\n")
if linkText not in linkTexts:
linkTexts.append(linkText)
print ("URL")
print ("URL Text")
for i in range(len(urls)):
print (urls[i])
print (linkTexts[i])
linkTexts("http://google.com")
def pageStats(urls):
#Function that calculates statistics such as word frequencies, IDF, and TF-IDFs for a list of URLS
containsStatistics = 0
containsAnalytics = 0
containsData = 0
containsScience = 0
urls = open("urls.txt", "r")
urls = urls.readlines()
urls = urls[0:3]
##First loop solely for calculating IDFS
for url in urls:
r = urllib.request.urlopen(url).read()
soup = BeautifulSoup(r, "lxml")
paragraphs = soup.find_all('p')
text = ""
# Creates a string with all paragraph text
for p in paragraphs:
pText = p.get_text()
text += pText
text = text.lower()
# Replaces all punctuation with a space
pattern = re.compile("[\W ]+")
text = re.sub(pattern, " ", text)
words = text.split(' ')
frequencies = {"statistics": 0, "analytics": 0, "data": 0, "science": 0}
for word in words:
if word in frequencies.keys():
frequencies[word] += 1
if frequencies["statistics"] >= 1:
containsStatistics += 1
if frequencies["analytics"] >= 1:
containsAnalytics += 1
if frequencies["data"] >= 1:
containsData += 1
if frequencies["science"] >= 1:
containsScience += 1
IDFstatistics = math.log(len(urls) / containsStatistics)
IDFanalytics = math.log(len(urls) / containsAnalytics)
IDFdata = math.log(len(urls) / containsData)
IDFscience = math.log(len(urls) / containsScience)
uniqueCounts = []
wordCounts = []
tfStats = []
tfAnaly = []
tfData = []
tfSci = []
tfIDFStats = []
tfIDFAnaly = []
tfIDFData = []
tfIDFSci = []
##Second loop for the rest of the stats
for url in urls:
r = urllib.request.urlopen(url).read()
soup = BeautifulSoup(r, "lxml")
paragraphs = soup.find_all('p')
text = ""
# Creates a string with all paragraph text
for p in paragraphs:
pText = p.get_text()
text += pText
text = text.lower()
# Replaces all punctuation with a space
pattern = re.compile("[\W ]+")
text = re.sub(pattern, " ", text)
words = text.split(" ")
for i in range(len(words) - 1, -1, -1):
if words[i] == "":
words.remove(words[i])
i = i - 1
wordCounts.append(len(words))
# Counting unique words
uniqueWords = []
for word in words:
if (word) not in uniqueWords:
uniqueWords.append(word)
uniqueCounts.append(len(uniqueWords))
# Calculating freqs
frequencies = {"statistics": 0, "analytics": 0, "data": 0, "science": 0}
for word in words:
if word in frequencies.keys():
frequencies[word] += 1
if frequencies["statistics"] >= 1:
containsStatistics += 1
if frequencies["analytics"] >= 1:
containsAnalytics += 1
if frequencies["data"] >= 1:
containsData += 1
if frequencies["science"] >= 1:
containsScience += 1
# The term frequency (tf) of a term (word) is defined as the number of times that term
# t occurs in document d, divided by the total number of words in the document. The tf
# of a word depends on the document under consideration.
tf_statistics = frequencies["statistics"] / len(words)
tfStats.append(tf_statistics)
tf_analytics = frequencies["analytics"] / len(words)
tfAnaly.append(tf_analytics)
tf_data = frequencies["data"] / len(words)
tfData.append(tf_data)
tf_science = frequencies["science"] / len(words)
tfSci.append(tf_science)
# Find tf-idf. The tf-idf of a word is the product of the term frequency of the word in
# document d, and its inverse document frequency. The tf-idf of a word depends on the
# document under consideration.
tfIDFstatistics = tf_statistics * IDFstatistics
tfIDFStats.append(tfIDFstatistics)
tfIDFanalytics = tf_analytics * IDFanalytics
tfIDFAnaly.append(tfIDFanalytics)
tfIDFdata = tf_data * IDFdata
tfIDFData.append(tfIDFdata)
tfIDFscience = tf_science * IDFscience
tfIDFSci.append(tfIDFscience)
mystring = ""
mystring += "unique: " + str(uniqueCounts) + "\n"
mystring += "words: " + str(wordCounts) + "\n"
mystring += "tf statistics: " + str(tfStats) + "\n"
mystring += "tf analytics: " + str(tfAnaly) + "\n"
mystring += "tf data: " + str(tfData) + "\n"
mystring += "tf science: " + str(tfSci) + "\n"
mystring += "idf statistics: " + str(IDFstatistics) + "\n"
mystring += "idf analytics: " + str(IDFanalytics) + "\n"
mystring += "idf data: " + str(IDFdata) + "\n"
mystring += "idfscience: " + str(IDFscience) + "\n"
mystring += "tf-idf statistics: " + str(tfIDFStats) + "\n"
mystring += "tf-idf analytics: " + str(tfIDFAnaly) + "\n"
mystring += "tf-idf data: " + str(tfIDFData) + "\n"
mystring += "tf-idf science: " + str(tfIDFSci) + "\n"
# CODE USED FOR OUTPUT FILE
# outfile = open("Q3_Part1.txt","w")
# outfile.write(mystring)
return mystring | ccrupp/PageStatistics | PageTextAnalysis.py | PageTextAnalysis.py | py | 8,750 | python | en | code | 0 | github-code | 13 |
5795629246 | """
Calculate edge betweenness
Jin Sun
"""
import random
import sys
# function to perform BFS, from a selected root
# [INPUT] edges: network stored in edge list format
# r: root node
# [OUTPUT] Np, parents, d as in lecture example
def bfs(edges, r):
N = len(edges)
d = [-1]*N # all nodes are unassigned by default
d[r] = 0 # init root node to have distance 0
Q = [r] # a queue, initially only contains r
Np = [0]*N # number of geodesic paths
Np[r] = 1
parents = {} # list of parents
for n in range(N):
parents[n] = []
# for each element in Q
while len(Q)>0:
u = Q.pop(0) # get first element
# for each vertex reachable by u
for v in edges[u]:
# if not valid
if v==-1:
continue
# if the node is unassigned
if d[v] == -1:
# assign v's distance
d[v] = d[u]+1
# u is parent of v
parents[v].append(u)
# add v to the end of Q
Q.append(v)
Np[v] = Np[u]
else:
if d[v] == d[u]+1:
# if the node is already assigned
parents[v].append(u)
Np[v] = Np[v] + Np[u]
return Np, parents, d
# calculate betweenness
# [INPUT] edges: network in edge list format
# [OUTPUT] bl: betweenness for each edge, stored in NxN matrix
def calBetweenness(edges):
N = len(edges)
bl = [[0 for x in range(N)] for x in range(N)]
# do bfs for every node
for r in range(N):
Np, parents, d = bfs(edges, r)
Bk = [1]*N # betweenness for each node
# start from the node furthest from r
sortedIdx = sorted(range(len(d)), key=lambda k: d[k])
for ki in range(N-1,0,-1):
k = sortedIdx[ki] # k is the current furthest node index
# modify k's parents' Bk
for p in parents[k]:
amount = Bk[k] * Np[p]/(Np[k]*1.0)
Bk[p] = Bk[p] + amount
bl[k][p] = bl[k][p] + amount
bl[p][k] = bl[p][k] + amount
return bl
if False:
# simple test
edges = {}
edges[0] = [1,2]
edges[1] = [0,3]
edges[2] = [0,3]
edges[3] = [1,2,4]
edges[4] = [3]
Np,parents,d = bfs(edges,0)
bl = calBetweenness(edges)
print(bl)
| jinsungit/PHYS615 | ps3/calBetweenness.py | calBetweenness.py | py | 2,436 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.