seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
4421637196 | from flask import Flask, request, jsonify;
import requests
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
base_url = 'https://coronavirus-19-api.herokuapp.com'
@app.route('/get-all-cases')
def get_all_cases():
all_cases = requests.get(base_url + '/countries').json()
return jsonify(all_cases)
@app.route('/get-cases-by-country/<country>')
def get_cases_by_country(country):
case = requests.get(base_url + '/countries/'+country).json()
return case
@app.route('/<country>/active')
def get_active_cases(country):
case = requests.get(base_url + '/countries/'+country).json()
return jsonify (case['active'])
@app.route('/<country>/cases')
def get_cases(country):
case = requests.get(base_url + '/countries/'+country).json()
return jsonify (case['cases'])
@app.route('/<country>/recovered')
def get_recovered_cases(country):
case = requests.get(base_url + '/countries/'+country).json()
return jsonify (case['recovered'])
@app.route('/<country>/deaths')
def get_deaths_cases(country):
case = requests.get(base_url + '/countries/'+country).json()
return jsonify (case['deaths'])
| Rhukie/hw4_9pk6vl43kh | covid-api/app.py | app.py | py | 1,178 | python | en | code | 1 | github-code | 36 |
20466629151 | """
큰 수의 법칙
1. 아이디어
: m번 더해서 가장 큰 수를 만들되, k번 초과하여 더해질 수 없다
k번 동안 리스트 중 가장 큰 수를 더하고
k번이 초과되면 그 다음 큰 수를 더한다
-> 이거 m번 반복
"""
import sys
read = sys.stdin.readline
n, m, k = map(int, read().split(' '))
numbers = list(map(int, read().split(' ')))
numbers.sort(reverse=True)
sum_value = 0
n = 0
for i in range(m):
if n < k:
sum_value += numbers[0]
n += 1
else:
sum_value += numbers[1]
n = 0
print(sum_value) | roum02/algorithm | greedy/1_practice2.py | 1_practice2.py | py | 578 | python | ko | code | 0 | github-code | 36 |
2030918591 | def bubbleSort(L: list) -> list:
while True:
swapped = False
for j in range(len(L) - 1):
if L[j] > L[j + 1]:
smaller = L[j + 1]
L[j + 1] = L[j]
L[j] = smaller
swapped = True
if swapped == False:
break
return L
x = [4, 3, 5, 12, 3, 2]
print(bubbleSort(x)) | youngseok-seo/cs-fundamentals | Sorting/bubbleSort.py | bubbleSort.py | py | 388 | python | en | code | 0 | github-code | 36 |
29909677781 | # -*- coding: utf-8 -*-
import os
import sys
import math
import copy
import random
import timeit
import argparse
import numpy as np
import tensorflow as tf
from ..model import mlp_rel
from ..lib.data_utils import load_prop
from ..lib.utils import makedirs
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='propensity estimation w/o condition for arxiv')
parser.add_argument('-m', default=21, type=int,
help='number of top positions for which estimates are desired')
parser.add_argument('-d', default=1, type=int,
help='dimension of feature')
parser.add_argument('-i', '--inference_version', default=0, type=int,
help='inference version')
parser.add_argument('--epoch', default=10000, type=int,
help='#epoch')
parser.add_argument('-n1', default=32, type=int,
help='number of propensity hidden layer')
parser.add_argument('-n2', default=32, type=int,
help='number of relevance hidden layer')
parser.add_argument('feat_type', help='feat type')
parser.add_argument('data_dir', help='data dir')
parser.add_argument('output_dir', help='output dir')
args = parser.parse_args()
start = timeit.default_timer()
M = args.m
D = args.d
makedirs(args.output_dir)
with tf.Session() as sess:
model = mlp_rel.MLP(D, M, args.n1, args.n2, 0.1)
train_click_path = os.path.join(args.data_dir, 'train.click.npy')
train_c, train_not_c = np.load(train_click_path)
train_feat_path = os.path.join(args.data_dir, 'train.{}.feat.npy'.format(args.feat_type))
valid_click_path = os.path.join(args.data_dir, 'valid.click.npy')
valid_c, valid_not_c = np.load(valid_click_path)
valid_feat_path = os.path.join(args.data_dir, 'valid.{}.feat.npy'.format(args.feat_type))
X_valid = np.load(valid_feat_path)
valid_loss_path = os.path.join(args.output_dir, 'valid_loss.txt')
test_click_path = os.path.join(args.data_dir, 'test.click.npy')
test_c, test_not_c = np.load(test_click_path)
test_feat_path = os.path.join(args.data_dir, 'test.{}.feat.npy'.format(args.feat_type))
X_test = np.load(test_feat_path)
test_loss_path = os.path.join(args.output_dir, 'test_loss.txt')
X_train = np.load(train_feat_path)
if tf.train.get_checkpoint_state(args.output_dir):
model.saver.restore(sess, tf.train.latest_checkpoint(args.output_dir))
else:
tf.global_variables_initializer().run()
best_loss = math.inf
for epoch in range(args.epoch):
train_loss, _ = sess.run([model.loss, model.train_op],
feed_dict={model.x:X_train, model.c:train_c,
model.not_c: train_not_c})
valid_loss = sess.run([model.loss],
feed_dict={model.x:X_valid, model.c:valid_c,
model.not_c: valid_not_c})[0]
if valid_loss < best_loss:
best_loss = valid_loss
model.saver.save(sess, '{}/checkpoint'.format(args.output_dir), global_step=model.global_step)
if epoch % 100 == 0:
print('{}\tTrain Loss: {:.4f} Best Valid Loss: {:.4f}'.format(epoch, train_loss, valid_loss))
model.saver.restore(sess, tf.train.latest_checkpoint(args.output_dir))
with open(valid_loss_path, 'w') as fout:
fout.write('Loss: {}'.format(valid_loss))
test_loss = sess.run([model.loss],
feed_dict={model.x:X_test, model.c:test_c,
model.not_c: test_not_c})[0]
with open(test_loss_path, 'w') as fout:
fout.write('Loss: {}'.format(test_loss))
end = timeit.default_timer()
print('Running time: {:.3f}s.'.format(end - start))
| fzc621/CondPropEst | src/arxiv_obj/cpbm.py | cpbm.py | py | 3,915 | python | en | code | 2 | github-code | 36 |
8531279483 | import numpy as np
import galois
from nacl.public import PrivateKey, Box
from io import BytesIO
import cProfile
import re
# A library for Shamir sharing arrays of field elements
# - An ArrayShare object is one share of the whole array
# - ArrayShare objects have encrypt and decrypt methods
# - share_array secret shares an array, returning a dict mapping x coordinates
# to their corresponding ArrayShare objects
# - reconstruct_array reconstructs the original array, given a list of
# ArrayShare objects
# - sum_share_array performs a column-wise sum of a list of ArrayShare objects
# that all agree on the x coordinate
# convert a numpy array to bytes
def array_to_bytes(x: np.ndarray) -> bytes:
np_bytes = BytesIO()
np.save(np_bytes, x, allow_pickle=True)
return np_bytes.getvalue()
# de-serialize a numpy array from bytes
def bytes_to_array(b: bytes) -> np.ndarray:
np_bytes = BytesIO(b)
return np.load(np_bytes, allow_pickle=True)
class ArrayShare:
"""One Shamir share of an array. Stores its x coordinate, and an array of y coordinates,
one y coordinate per element of the original array. All of the x coordinates must match."""
def __init__(self, x, ys, T, GF, K=1, encrypted=False):
self.x = x
self.ys = ys
self.GF = GF
self.T = T
self.K = K
self.encrypted = encrypted
def encrypt(self, sk, pk):
assert not self.encrypted
b = array_to_bytes(self.ys)
enc_b = Box(sk, pk).encrypt(b)
return ArrayShare(self.x, enc_b, self.T, self.GF, K=self.K, encrypted=True)
def decrypt(self, sk, pk):
assert self.encrypted
m = Box(sk, pk).decrypt(self.ys)
array = bytes_to_array(m)
return ArrayShare(self.x, array, self.T, self.GF, K=self.K, encrypted=False)
def __str__(self):
return f'ArrayShare(x={self.x}, len={len(self.ys)}, T={self.T}, K={self.K}, enc={self.encrypted})'
__repr__ = __str__
def reshape(secrets, K, GF):
if len(secrets) %K == 0:
return secrets.reshape((len(secrets)//K, K))
true_len = (len(secrets)//K + 1) * K
flat_pad = GF.Zeros((true_len))
flat_pad[:len(secrets)] = secrets
return flat_pad.reshape((len(secrets)//K + 1), K)
def share_packed(secrets, range_shares, T, K, GF):
"""
secrets: flat array
"""
secrets = reshape(secrets, K, GF)
secrets = np.atleast_2d(secrets)
p_size = (secrets.shape[0], K + T - 1)
poly_points = GF.Random((p_size))
poly_points[:, :K] = secrets
xs = GF.Range(0, p_size[1])
polys = [galois.lagrange_poly(xs, pps) for pps in poly_points]
shares = {x: ArrayShare(x+K, GF(np.array([poly(x+K) for poly in polys])), T, GF, K=K) \
for x in range_shares}
return shares
def share_array(secrets, range_shares, T, GF, K=1):
"""Secret shares an array of secrets. Returns a dict mapping the x coordinate of each share
to an ArrayShare object with that x coordinate."""
return share_packed(secrets, range_shares, T, K, GF)
def reconstruct_array(array_shares):
"""Given a list of ArrayShare objects, reconstructs the original array"""
assert len(array_shares) > 0
array_len = len(array_shares[0].ys)
GF = array_shares[0].GF
T = array_shares[0].T
K = array_shares[0].K
assert len(array_shares) >= T + K, f'we have {len(array_shares)} shares, and we need {T + K}'
# Error checking
for s in array_shares:
assert len(s.ys) == array_len
assert s.GF == GF
assert s.T == T
assert s.K == K
# Reconstruction
arr = []
xs = GF([s.x for s in array_shares])
for i in range(array_len):
# TODO: check T
ys = GF([s.ys[i] for s in array_shares])
poly = galois.lagrange_poly(xs, ys)
arr.extend([poly(i) for i in range(0, K)])
return GF(arr)
def sum_share_array(shares):
"""Given a list of ArrayShare objects with matching x coordinates, returns a new
ArrayShare object representing the column-wise sum of the input shares"""
assert len(shares) > 0
x = shares[0].x
GF = shares[0].GF
T = shares[0].T
K = shares[0].K
for s in shares:
assert not s.encrypted
assert s.x == x
assert s.GF == GF
assert s.T == T
assert s.K == K
share_matrix = GF([s.ys for s in shares])
sum_ys = share_matrix.sum(axis=0)
return ArrayShare(x, sum_ys, T, GF, K=K, encrypted=False)
def prof():
for _ in range(1):
GF = galois.GF(2**31-1)
vals = GF(np.random.randint(5, 6, 500))
shares = share_array(vals, range(1,65), 4, GF, K=50)
#print(shares)
r = reconstruct_array(list(shares.values()))
print(r)
if __name__ == '__main__':
prof()
#cProfile.run('prof()', sort='cumtime')
| uvm-plaid/olympia | util/shamir_sharing.py | shamir_sharing.py | py | 4,835 | python | en | code | 2 | github-code | 36 |
12411851865 | #!/usr/bin/env python
# encoding: utf-8
from . import routes, views, model
from . import listeners # noqa
MODELS = [model.OsfStorageNodeSettings]
NODE_SETTINGS_MODEL = model.OsfStorageNodeSettings
ROUTES = [
routes.api_routes
]
SHORT_NAME = 'osfstorage'
FULL_NAME = 'OSF Storage'
OWNERS = ['node']
ADDED_DEFAULT = ['node']
ADDED_MANDATORY = ['node']
VIEWS = []
CONFIGS = []
CATEGORIES = ['storage']
INCLUDE_JS = {
'widget': [],
'page': [],
'files': [],
}
HAS_HGRID_FILES = True
GET_HGRID_DATA = views.osf_storage_root
MAX_FILE_SIZE = 5 * 1024 # 5 GB
HIGH_MAX_FILE_SIZE = 5 * 1024 # 5 GB
# HERE = os.path.dirname(os.path.abspath(__file__))
NODE_SETTINGS_TEMPLATE = None # no node settings view
USER_SETTINGS_TEMPLATE = None # no user settings view
| karenhanson/osf.io_rmap_integration_old | website/addons/osfstorage/__init__.py | __init__.py | py | 780 | python | en | code | 0 | github-code | 36 |
13238889607 | jumlahHari = int(input("Masukkan Jumlah Hari : "))
jumlahTahun = 0
jumlahBulan = 0
while(jumlahHari >= 365):
jumlahHari = jumlahHari - 365
jumlahTahun = jumlahTahun + 1
while(jumlahHari >= 30):
jumlahHari = jumlahHari - 30
jumlahBulan = jumlahBulan + 1
print(jumlahTahun,"Tahun",jumlahBulan,"Bulan",jumlahHari,"Hari")
| KiritoEdward/LatihanIntroductionToPythonNiomic | PART2.py | PART2.py | py | 343 | python | id | code | 0 | github-code | 36 |
10831138017 | # Static Class
class student:
def stuinput(name,rollno,fee):
student.name = name
student.rollno = rollno
student.fee = fee
def stuoutput():
print("Student Name=",student.name)
print("Student Rollno=",student.rollno)
print("Student Fee=",student.fee)
student.stuinput("Gagan",1,12.12)
student.stuoutput()
| Karan-Johly/Python_journey | d13_staticclass_3.py | d13_staticclass_3.py | py | 377 | python | en | code | 0 | github-code | 36 |
39660535041 | from collections import deque
class Solution:
def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:
reach = {}
for time in times:
if time[0] in reach:
reach[time[0]].append([time[2], time[1]])
else:
reach[time[0]] = [[time[2], time[1]]]
distanceReached = [inf for _ in range(n + 1)]
distanceReached[0], distanceReached[k] = 0, 0
queue = deque([k])
while queue:
node = queue.popleft()
if node not in reach:
continue
for elem in reach[node]:
time, nextVal = elem
if distanceReached[node] + time < distanceReached[nextVal]:
distanceReached[nextVal] = distanceReached[node] + time
queue.append(nextVal)
delay = max(distanceReached)
if delay == inf:
return -1
return delay | deusi/practice | 743-network-delay-time/743-network-delay-time.py | 743-network-delay-time.py | py | 1,034 | python | en | code | 0 | github-code | 36 |
12522475519 | # Operators In python
# Arithmetic Operators
# Assignment Operators
# Comparsion Operators
# Logical Operators
# Identity Operators
# Membership Operators
# Bitwise Operators
# Arithmetic Operators
# print("5+6 is",5+6)
# print("5-6 is",5-6)
# print("5*6 is",5*6)
# print("5/6 is",5/6)
# print("5**3 is",5**3)
# print("5 % 5 is",5%5)
# print("15//6 is",15//6)
# print("Assignement")
# # Assignment Operators
# x=5
# print(x)
# x%=7 #x=x%7
# print(x)
# Comparsion Operators
i=5
# print(i and 5)
# Logical Operators
a=True
b=False
# Identity Operators
# print(5 is not 5)
# Membership Operators
list=[3,3,2,2,39,33,35,32]
# print(324 not in list)
# Bitwise Operators
# 0-00
# 1-01
# 2- 10
# 3- 11
print(0&2)
print(0 | 3) | neelshet007/PythonTuts | operatorss.py | operatorss.py | py | 726 | python | en | code | 0 | github-code | 36 |
22565649698 | from typing import Any, Callable, Dict, Optional
import torch
import torch.nn as nn
from .gaussian_diffusion import GaussianDiffusion
from .k_diffusion import karras_sample
DEFAULT_KARRAS_STEPS = 64
DEFAULT_KARRAS_SIGMA_MIN = 1e-3
DEFAULT_KARRAS_SIGMA_MAX = 160
DEFAULT_KARRAS_S_CHURN = 0.0
def uncond_guide_model(
model: Callable[..., torch.Tensor], scale: float
) -> Callable[..., torch.Tensor]:
def model_fn(x_t, ts, **kwargs):
half = x_t[: len(x_t) // 2]
combined = torch.cat([half, half], dim=0)
model_out = model(combined, ts, **kwargs)
eps, rest = model_out[:, :3], model_out[:, 3:]
cond_eps, uncond_eps = torch.chunk(eps, 2, dim=0)
half_eps = uncond_eps + scale * (cond_eps - uncond_eps)
eps = torch.cat([half_eps, half_eps], dim=0)
return torch.cat([eps, rest], dim=1)
return model_fn
def sample_latents(
*,
batch_size: int,
model: nn.Module,
diffusion: GaussianDiffusion,
model_kwargs: Dict[str, Any],
guidance_scale: float,
clip_denoised: bool,
use_fp16: bool,
use_karras: bool,
karras_steps: int,
sigma_min: float,
sigma_max: float,
s_churn: float,
device: Optional[torch.device] = None,
progress: bool = False,
) -> torch.Tensor:
sample_shape = (batch_size, model.d_latent)
if device is None:
device = next(model.parameters()).device
if hasattr(model, "cached_model_kwargs"):
model_kwargs = model.cached_model_kwargs(batch_size, model_kwargs)
if guidance_scale != 1.0 and guidance_scale != 0.0:
for k, v in model_kwargs.copy().items():
model_kwargs[k] = torch.cat([v, torch.zeros_like(v)], dim=0)
sample_shape = (batch_size, model.d_latent)
with torch.autocast(device_type=device.type, enabled=use_fp16):
if use_karras:
samples = karras_sample(
diffusion=diffusion,
model=model,
shape=sample_shape,
steps=karras_steps,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
device=device,
sigma_min=sigma_min,
sigma_max=sigma_max,
s_churn=s_churn,
guidance_scale=guidance_scale,
progress=progress,
)
else:
internal_batch_size = batch_size
if guidance_scale != 1.0:
model = uncond_guide_model(model, guidance_scale)
internal_batch_size *= 2
samples = diffusion.p_sample_loop(
model,
shape=(internal_batch_size, *sample_shape[1:]),
model_kwargs=model_kwargs,
device=device,
clip_denoised=clip_denoised,
progress=progress,
)
return samples
| openai/shap-e | shap_e/diffusion/sample.py | sample.py | py | 2,871 | python | en | code | 10,619 | github-code | 36 |
3406969266 | """
main.py
train the deep iamge prior model and get the denoised figure, calculate PSNR when required.
"""
import hydra
from pytorch_lightning import Trainer, seed_everything
from src.conf import Config
from src.data.datamodule import DeepImagePriorDataModule
from src.model.model import DeepImagePriorModel
import logging
logging.getLogger("lightning").setLevel(logging.ERROR)
@hydra.main(config_path=".", config_name="base_config", version_base="1.2")
def train_app(conf: Config) -> None:
"""
The main train loop.
"""
# * seed
if conf.train.random_seed:
seed_everything(conf.train.random_seed)
for idx, fig_dir in enumerate(conf.data.root_dir.iterdir()):
if fig_dir.is_dir():
light_data = DeepImagePriorDataModule(conf.data, img_dir=fig_dir)
light_data.setup(stage="fit")
print(f"train fig{idx}, key: {light_data.dataset[0]['key']}")
light_model = DeepImagePriorModel(conf)
train_conf = conf.train
trainer = Trainer(
accelerator=train_conf.accelerator,
devices=(
train_conf.distributed_devices if train_conf.accelerator == "gpu" else None),
max_epochs=train_conf.epochs,
num_sanity_val_steps=0,
check_val_every_n_epoch=train_conf.check_val_every_n_epoch,
enable_progress_bar=True
)
trainer.fit(light_model, light_data)
if idx == conf.data.total_run_figs_number-1:
break
if __name__ == "__main__":
train_app() # pylint: disable=no-value-for-parameter
| ziyixi/Deep-Image-Prior-Pytorch-Lightning | main.py | main.py | py | 1,640 | python | en | code | 0 | github-code | 36 |
1700457476 | #!/usr/bin/python3
"""
Given an integer, convert it into a binary string
Assume integer >= 0
Would ask interviewer max int size (in bits) - for today will assume 32
"""
def intToBinary(i):
# Don't need multiplier because we're returning string
result = ''
if i == 0: return '0'
# While number isn't zero, shift right
# Get first binary digit using mask
# This reverses binary digits LOL, wrong problem dude
# while i != 0:
# result += str(i&1)
# i >>= 1
# Extremely inefficent algorithm
# Time complexity: O(N^2) - Has to allocate space for characters in each newly concatenated string
# while i != 0:
# result = str(i&1) + result
# i >>= 1
# Find highest power of two
# Then subtract. If power of 2 goes into number then append '1'
# If power of 2 greater than number at any iteration, append '0' instead
# Time Complexity: O(D) - where D is the number of binary digits a number has
j = i
power = -1
while j != 0:
# print(f"j is {bin(j)}")
j >>= 1
power += 1
while power >= 0:
# print(f"power is {power}")
# print(f"i is {i}")
if (2**power) <= i:
result += '1'
i -= 2**power
else: result += '0'
power -= 1
return result
| phibzy/InterviewQPractice | Solutions/IntToBinary/intToBinary.py | intToBinary.py | py | 1,334 | python | en | code | 0 | github-code | 36 |
32187575097 | import asyncio
import aiohttp
from warnings import warn
with open("vk_access_token.txt", mode="r") as file:
vk_access_token = file.read()
vk_api_version = "5.154"
owner_id = "-160464793"
url = f"https://api.vk.ru/method/wall.get?v={vk_api_version}&owner_id={owner_id}&count=1&access_token={vk_access_token}"
async def get_last_text_message():
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
a = await response.json()
try:
a = a["response"]["items"][0]["text"]
except:
a = None
return a
async def ticker(delay):
while True:
yield await get_last_text_message()
await asyncio.sleep(delay)
async def set_morning_trigger(trigger_function, delay=30):
if delay < 28:
warn("The delay is too small. Limit of wall.get usage per day may be exceeded.")
generator = ticker(delay)
async def get_last_text():
return await generator.asend(None)
text = ""
while True:
new_text = await get_last_text()
if new_text != None and text != new_text:
text = new_text
asyncio.gather(trigger_function(text))
if __name__ == "__main__":
async def function(string):
print(string)
asyncio.run(set_morning_trigger(function))
| SaGiMan6/sesc-nsu-assistant-bot | scripts/morning_exercise_operations.py | morning_exercise_operations.py | py | 1,353 | python | en | code | 0 | github-code | 36 |
12858631206 | def sortSplit(array):
n = len(array)
if n == 1 or n == 0:
return array
n //= 2
arrLeft = sortSplit(array[0:n])
arrRight = sortSplit(array[n:])
leftN = rightN = k = 0
result = [0] * (len(arrLeft) + len(arrRight))
while leftN < len(arrLeft) and rightN < len(arrRight):
if arrLeft[leftN] <= arrRight[rightN]:
result[k] = arrLeft[leftN]
leftN += 1
else:
result[k] = arrRight[rightN]
rightN += 1
k += 1
while leftN < len(arrLeft):
result[k] = arrLeft[leftN]
leftN += 1
k += 1
while rightN < len(arrRight):
result[k] = arrRight[rightN]
rightN += 1
k += 1
return result
print(sortSplit([3,6,1,8,9,8,8])) | FlyDragon-888/paradigms | homework_6/task_2.py | task_2.py | py | 770 | python | en | code | 0 | github-code | 36 |
27520982087 | # encoding:utf-8
__author__ = 'shiliang'
__date__ = '2019/4/9 21:12'
import requests
from lxml import etree
import pandas as pd
import xlrd
import time
import re
import aiohttp
import asyncio
# 全局变量
headers = {
'Cookie': 'OCSSID=sfg10a19had6hfavkctd32otf6',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
}
sem = asyncio.Semaphore(20) # 信号量,控制协程数,防止爬的过快
oneSheetList = [] # 一张整表的元数据列表
def getURLlist(openFile, sheetNum, colx, start_rowx):
'''
获取excel指定sheet指定列指定范围行的数据
:param openFile: excel路径
:param sheetNum: sheet序号
:param colx: 获取指定列数
:param start_rowx: 起始行数
:return urls: 一个包含所有URL的列表
'''
# 打开工作簿
data = xlrd.open_workbook(openFile)
# 选定sheet
table = data.sheets()[sheetNum]
# 获取excel表的指定列,start_rowx=1表示从第2行开始(从0计数)
urls = table.col_values(colx=colx, start_rowx=start_rowx)
return urls
def cleanStr(string):
'''
清洗字符串,如去除HTML标签等
:param string: 待清洗字符串
:return clean_str: 清洗后的较干净字符串
'''
# 1.使用strip函数清洗去除每个字符串的首尾\n和\t,此处也可写成\t\n
clean_str = string.strip('\n\t')
# 2.使用正则清洗掉HTML标签<>、/**/中的内容
clean_str = re.compile(r'\<.*?\>|\/\*.*?\*\/').sub('', clean_str)
return clean_str
async def getOneURLMetaData(number, url):
'''
使用协程异步获取多个URL的元数据
:param number: 第number个url(整型),用以在最后结果集中排序
:param url: url链接(字符串类型)
:param [number,url]: 已完成爬取+解析+清洗的编号和URL列表
'''
with(await sem):
# async with是异步上下文管理器
async with aiohttp.ClientSession() as session: # 获取session
async with session.request('GET', url, headers=headers, timeout=20) as resp: # 提出请求
html = await resp.read() # 可直接获取bytes
# 解析数据
xml = etree.HTML(html)
content = xml.xpath('//*[@id="content"]/table/tr/td/text()')
# 格式化数据
oneURLList = [] # 存储一个URL包含所有元数据的列表
creatorList = [] # 当前URL所有作者、机构和国家信息列表,默认为'0'
title = '' # 当前URL标题
abstract = '' # 当前URL摘要
keywords = '0' # 当前URL关键字
for index, text in enumerate(content):
# '\xa0'是一行的首元素和尾元素,表示一行的开头或结尾
if text == '\xa0':
# 1.判断是否是'Title'行
if content[index+2] == 'Title':
title = content[index + 4] # 保存Title
title = cleanStr(title) # 清洗title
continue
if content[index+3] == 'Abstract':
abstract = content[index + 4] # 保存Abstract
continue
if content[index+3] == 'Keyword(s)':
# 如果Keyword(s)不为空则填,为空则默认字符'0'
if content[index+4] != '\xa0':
keywords = content[index + 4] # 保存Keyword(s)
keywords = cleanStr(keywords) # 清洗keywords
continue
if content[index+2] == 'Creator':
clean_creator = cleanStr(content[index + 4])
lst = clean_creator.split('; ') # 使用切片函数以"; "把字符串分割成三份,返回一个列表
for num, info in enumerate(lst): # 因存在官网元数据少录入情况,故对于少录入数据项默认为'0'
# 如果是官网数据录入错误,超过三个数据,则直接跳出循环
if num > 2:
break
creatorList.append(info) # 作者名字、机构、国家
# 如果是官网数据录入错误, 少于三个数据, 则最最后一个元素补'0'
if len(lst) < 3 and num == 1:
creatorList.append('0') # 作者名字、机构、国家
continue
oneURLList.append(number)
oneURLList.append(title)
oneURLList.append(abstract)
oneURLList.append(keywords)
oneURLList.append(creatorList)
# 存储oneURLList的最后一个元素
creatorList = oneURLList[-1]
# 删除oneURLList的最后一个元素,切片取列表中的[0,-1)
oneURLList = oneURLList[:-1]
# 将creator列表拆开一个一个添加到oneURLList后
for info in creatorList:
oneURLList.append(info)
oneSheetList.append(oneURLList)
print('已完成第'+str(number)+'个url的爬取+解析+清洗')
return [number,url]
async def main(urls):
'''
协程调用方,异步获取所有url的元数据列表
:param urls: URL列表
:param topCount: URL列表中前几个URL的个数(可选参数)
'''
tasks = [getOneURLMetaData(number+1, url) for number, url in enumerate(urls)] # 把所有任务放到一个列表中
done,pending = await asyncio.wait(tasks) # 子生成器
for r in pending: # done和pending都是一个任务,所以返回结果需要逐个调用result()
print('爬取失败的url:'+r.result())
def coroutine_getMetaData(urls, topCount=None):
'''
协程调用总函数,异步获取所有url的元数据列表
:param urls: URL列表
:param topCount: URL列表中前几个URL的个数(可选参数)
'''
urlsList = []
if topCount is not None:
for i in range(topCount):
urlsList.append(urls[i])
else:
urlsList = urls
# 以下是协程调用
loop = asyncio.get_event_loop() # 创建一个事件循环对象loop
try:
loop.run_until_complete(main(urlsList)) # 完成事件循环,直到最后一个任务结束
finally:
loop.close() # 结束事件循环
def list2excel(saveFile, oneSheetList, startrow, startcol=2, sheet_name='Sheet1'):
'''
列表写入到excel中的指定行和指定列中
:param saveFile: 存储excel文件路径
:param oneSheetList: 一个存储一个Sheet元数据的列表
:param startrow: 该url位于excel表格中的行数
:param startcol: 写入excel表格中的起始列
:param sheet_name: 写入的sheet页名称
:return:
'''
df = pd.DataFrame(oneSheetList)
# df = df.T # 矩阵转置,变成行向量
# na_rep缺省值填充参数;index=False去掉行索引;header=False去掉表头
df.to_excel(saveFile, sheet_name=sheet_name, startrow=startrow, startcol=startcol,
index=False, header=False)
print('数据写入excel成功.')
if __name__ == '__main__':
openFile = 'C:\\Users\\Administrator\\Desktop\\2014-2017.xlsx'
saveFile = 'C:\\Users\\Administrator\\Desktop\\2017.xlsx'
# 从excel中获取url,返回一个列表
urls = getURLlist(openFile, sheetNum=0, colx=1, start_rowx=1)
# 通过协程异步获取所有URL中的元数据,存储在oneSheetList列表中
start = time.time()
coroutine_getMetaData(urls)
print('总爬取+解析耗时:%.5f秒' % float(time.time() - start))
# 最后对嵌套列表的列表oneSheetList进行排序,key输入的是函数,item[0]表示列表的第1个元素
oneSheetList.sort(key=lambda item: item[0], reverse=False)
# 存储到excel中
list2excel(saveFile, oneSheetList, startrow=0, startcol=0, sheet_name='Sheet1')
| SparksFly8/DataMingingPaper | spider/metadata_Coroutine_Spider.py | metadata_Coroutine_Spider.py | py | 8,375 | python | zh | code | 69 | github-code | 36 |
70427090985 | def fact(n):
res = 1
while n>1:
res *= n
n -= 1
print(res)
def recurFact(n):
if n == 0:
return 1
return n * recurFact(n-1)
n = int(input())
fact(n)
print(recurFact(n))
| asu2sh/dev | DSA_SPy/1.Maths/3_factorial.py | 3_factorial.py | py | 224 | python | en | code | 3 | github-code | 36 |
3097533124 | import rospy, math, numpy, tf
from collections import deque
from spencer_bagfile_tools.msg import AdditionalOdometryData
from dynamic_reconfigure.server import Server
from spencer_bagfile_tools.cfg import ReconstructOdometryConfig
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point, Quaternion
from std_msgs.msg import ColorRGBA
from nav_msgs.msg import Odometry
class State(object):
def __init__(self):
self.x = self.y = self.theta = 0
self.totalDistance = 0
self.stamp = rospy.Time(0)
class OdometryController(object):
def __init__(self):
self.msgHistory = []
self.stateHistory = self.emptyStateHistory()
self.previousMsg = self.previousState = None
self.rebuildingEntirePath = False
self.zeroPosition()
self.WHEEL_BASE = 0.665
self.TICKS_PER_METER_LEFT = 56263.5
self.TICKS_PER_METER_RIGHT = 57099.7
self.previousTimestampMarkerCount = 0
def zeroPosition(self):
self.stateHistory.append(State())
self.previousState = self.stateHistory[0]
def run(self):
self.markerArrayPublisher = rospy.Publisher("/spencer_bagfile_tools/reconstructed_odom_path", MarkerArray, queue_size=1)
self.odomPublisher = rospy.Publisher("/spencer/sensors/odom", Odometry, queue_size=3)
reconfigureServer = Server(ReconstructOdometryConfig, self.reconfigure)
topicName = "/spencer/sensors/additional_odom_data"
self.subscriber = rospy.Subscriber(topicName, AdditionalOdometryData, self.additionalOdometryDataCallback)
rospy.loginfo("Reconstructing odometry from " + topicName + ", now listening for messages...")
rospy.spin()
def additionalOdometryDataCallback(self, msg):
if not self.rebuildingEntirePath:
self.updateState(msg)
self.msgHistory.append(msg)
self.publishOdom()
self.visualizePath()
def reconfigure(self, config, level):
self.extraCalibOverallMultiplier = config["extra_calib_overall_multiplier"]
self.extraCalibLeftMultiplier = config["extra_calib_left_multiplier"]
self.lineWidth = config["line_width"]
self.arrowLength = config["arrow_length"]
self.showWaypoints = config["show_waypoints"]
self.recalculatePath = config["recalculate_path"]
if level > 0 and self.recalculatePath:
self.rebuildEntirePath()
return config
def emptyStateHistory(self):
# Limit max. state history length to prevent bad performance after driving for a while
# NOTE: msgHistory might still grow unboundedly, but there's no way of avoiding that...
# However, that is mainly a memory issue as the whole history is only processed in rebuildEntirePath()
return deque(maxlen=5000)
def rebuildEntirePath(self):
rospy.loginfo("Odometry parameters have changed! Rebuilding entire path!")
if self.rebuildingEntirePath:
return
self.rebuildingEntirePath = True
self.stateHistory = self.emptyStateHistory()
self.zeroPosition()
self.previousMsg = None
for msg in self.msgHistory:
self.updateState(msg)
self.rebuildingEntirePath = False
self.publishOdom()
self.visualizePath()
def updateState(self, msg):
newState = State()
newState.stamp = msg.header.stamp
previousLeftTicks = self.previousMsg.ticksLeft if self.previousMsg else msg.ticksLeft
previousRightTicks = self.previousMsg.ticksRight if self.previousMsg else msg.ticksRight
leftDiff = msg.ticksLeft - previousLeftTicks
rightDiff = msg.ticksRight - previousRightTicks
# Calculate metric travelled distances of both wheels and the base
metersTravelledLeft = leftDiff * msg.calibOverallMultiplier * self.extraCalibOverallMultiplier * msg.calibLeftEncMultiplier * self.extraCalibLeftMultiplier / self.TICKS_PER_METER_LEFT
metersTravelledRight = rightDiff * msg.calibOverallMultiplier * self.extraCalibOverallMultiplier / self.TICKS_PER_METER_RIGHT
distance = (metersTravelledLeft + metersTravelledRight) / 2.0
# Update position and bearing
newState.theta = self.previousState.theta + (metersTravelledLeft - metersTravelledRight) / self.WHEEL_BASE
newState.theta -= (int((newState.theta/(2*math.pi) ))) * 2*math.pi # clip to 2pi
newState.totalDistance = self.previousState.totalDistance + math.fabs(distance)
newState.x = self.previousState.x + distance * math.sin(newState.theta)
newState.y = self.previousState.y + distance * math.cos(newState.theta)
positionTolerance = 0.1 # in meters
if math.hypot(newState.x - self.stateHistory[-1].x, newState.y - self.stateHistory[-1].y) > positionTolerance:
# Do not cache every single state if the change in position is minimal, otherwise we'll soon run
# out of memory (note we still store previousState, since it is needed by publishOdom() and updateState())
self.stateHistory.append(newState)
self.previousState = newState # FIXME
self.previousMsg = msg
def publishOdom(self):
odom = Odometry()
odom.header.stamp = self.previousMsg.header.stamp if self.previousMsg else rospy.Time.now()
odom.header.frame_id = "odom"
odom.pose.pose.position.x = self.previousState.x
odom.pose.pose.position.y = self.previousState.y
for row in xrange(0, 6):
for col in xrange(0, 6):
odom.pose.covariance[6*row+col] = 0 if row != col else 0.1
odom.twist.covariance[6*row+col] = 0 if row != col else 999999
q = tf.transformations.quaternion_from_euler(0, 0, -self.previousState.theta + math.pi/2)
odom.pose.pose.orientation = Quaternion(x=q[0], y=q[1], z=q[2], w=q[3])
if len(self.stateHistory) >= 2:
odom.twist.twist.linear.x = odom.pose.pose.position.x - self.stateHistory[-2].x
odom.twist.twist.linear.y = odom.pose.pose.position.y - self.stateHistory[-2].y
self.odomPublisher.publish(odom)
def visualizePath(self):
if self.markerArrayPublisher.get_num_connections() <= 0:
return
markerArray = MarkerArray()
pathMarker = Marker()
pathMarker.header.stamp = rospy.Time.now()
pathMarker.header.frame_id = "odom"
pathMarker.ns = "Path"
pathMarker.type = Marker.LINE_STRIP
pathMarker.id = 0
pathMarker.color = ColorRGBA(r=1, g=1, a=1)
pathMarker.scale.x = 0.05 * self.lineWidth
waypointMarker = Marker()
waypointMarker.header = pathMarker.header
waypointMarker.ns = "Waypoints"
waypointMarker.type = Marker.SPHERE_LIST
waypointMarker.id = 1
waypointMarker.color = ColorRGBA(r=1, g=1, a=1)
waypointMarker.scale.x = waypointMarker.scale.y = 0.1 * self.lineWidth
lastWaypointTime = float("-inf")
lastWaypointPos = (float("99999"), float("99999"))
# Generate path and waypoints
for state in self.stateHistory:
pathMarker.points.append(Point(x=state.x, y=state.y))
if state.stamp.to_sec() - lastWaypointTime > 5 and self.showWaypoints:
dx = state.x - lastWaypointPos[0]
dy = state.y - lastWaypointPos[1]
if math.sqrt(dx*dx + dy*dy) > 1:
lastWaypointTime = state.stamp.to_sec()
lastWaypointPos = (state.x, state.y)
waypointMarker.points.append(Point(x=state.x, y=state.y))
timestampMarker = Marker()
timestampMarker.header = waypointMarker.header
timestampMarker.ns = "Timestamps"
timestampMarker.type = Marker.TEXT_VIEW_FACING
timestampMarker.id = 3 + len(markerArray.markers)
timestampMarker.color = ColorRGBA(r=0.6, a=1)
timestampMarker.scale.z = 0.1 * self.lineWidth
timestampMarker.pose.position.x = state.x
timestampMarker.pose.position.y = state.y
timestampMarker.text = "%.1f" % state.stamp.to_sec()
markerArray.markers.append(timestampMarker)
# Delete old markers
currentTimestampMarkerCount = len(markerArray.markers)
for i in xrange(0, self.previousTimestampMarkerCount - currentTimestampMarkerCount):
timestampMarker = Marker()
timestampMarker.header = waypointMarker.header
timestampMarker.ns = "Timestamps"
timestampMarker.action = Marker.DELETE
timestampMarker.id = 3 + currentTimestampMarkerCount + i
markerArray.markers.append(timestampMarker)
self.previousTimestampMarkerCount = currentTimestampMarkerCount
# Velocity arrow
velocitySmoothingNoPoints = 5
if len(pathMarker.points) > velocitySmoothingNoPoints:
arrowHeadMarker = Marker()
arrowHeadMarker.header = pathMarker.header
arrowHeadMarker.ns = "Path-ArrowHead"
arrowHeadMarker.type = Marker.LINE_STRIP
arrowHeadMarker.id = 2
arrowHeadMarker.color = ColorRGBA(r=1, g=1, a=1)
arrowHeadMarker.scale.x = arrowHeadMarker.scale.y = 0.1 * self.lineWidth
pointTip = numpy.array([pathMarker.points[-1].x, pathMarker.points[-1].y])
lastVelocity = numpy.array([pathMarker.points[-1].x - pathMarker.points[-velocitySmoothingNoPoints].x,
pathMarker.points[-1].y - pathMarker.points[-velocitySmoothingNoPoints].y])
speed = numpy.linalg.norm(lastVelocity)
lastVelocity /= speed
lastVelocity *= 0.3 * self.arrowLength
steepnessAngle = numpy.interp(speed, [0.03, 0.3], [0, 75])
pointLeft = pointTip + self.rotateVector(lastVelocity, 90 + steepnessAngle )
pointRight = pointTip + self.rotateVector(lastVelocity, -(90 + steepnessAngle) )
arrowHeadMarker.points.append(Point(x=pointLeft[0], y=pointLeft[1]))
arrowHeadMarker.points.append(Point(x=pointTip[0], y=pointTip[1]))
arrowHeadMarker.points.append(Point(x=pointRight[0], y=pointRight[1]))
markerArray.markers.append(arrowHeadMarker)
markerArray.markers.append(pathMarker)
markerArray.markers.append(waypointMarker)
self.markerArrayPublisher.publish(markerArray)
def rotateVector(self, vector, angleDeg):
theta = (angleDeg/180.) * numpy.pi
rotMatrix = numpy.array([[numpy.cos(theta), -numpy.sin(theta)],
[numpy.sin(theta), numpy.cos(theta)]])
return numpy.dot(rotMatrix, vector)
if __name__ == '__main__':
rospy.init_node("reconstruct_odometry")
odometryController = OdometryController()
odometryController.run()
| spencer-project/spencer_people_tracking | utils/spencer_bagfile_tools/scripts/reconstruct_odometry.py | reconstruct_odometry.py | py | 11,138 | python | en | code | 620 | github-code | 36 |
18400663422 | import os
from fastapi import status, HTTPException
from pydantic import BaseModel, validator
from typing import Union
ROOT_DIR = os.path.abspath('.')
ROOT_DIR = os.path.join(ROOT_DIR, 'assets')
PATH_REGEX = r'^(?![0-9._/])(?!.*[._]$)(?!.*\d_)(?!.*_\d)[a-zA-Z0-9_/]+$'
class Predicted(BaseModel):
recognized: Union[list, None] = []
unknowns: Union[int, None] = 0
peoples: Union[int, None] = 0
id_img: str
spend_time: float
class SelectModel(BaseModel):
model_file: str = 'example_model.pk'
@validator('model_file', pre=True)
def validate_trained_model_folder(cls, value):
directory_path = os.path.join(ROOT_DIR, 'trained_models')
file_path = os.path.join(directory_path, value)
split_current = os.path.split(file_path)[1]
if len(split_current) > 20:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail='Name cannot be longer than 20 characters')
if not value.endswith('.pk'):
join_pk = f'{file_path}.pk'
if not os.path.isfile(join_pk):
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail=f'Not found path file {join_pk}')
return join_pk
if not os.path.isfile(file_path):
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail=f'Not found path file {file_path}')
return file_path
class Config:
schema_extra = {
'example': {
'model_file': 'example_model.pk'
}
}
| watcharap0n/api-facial-recognition-dlib | service-rec/server/schemas/predict.py | predict.py | py | 1,636 | python | en | code | 1 | github-code | 36 |
13463267955 | # -*- coding: utf-8 -*-
from clean_access import clean_access
from ip_user_follow import region_follow,city_follow,visit_time_follow
from in_excel_openpyxl import in_excel,in_excel2,in_excel3
from refer_user_follow import user_follow1,user_follow2,user_follow3,user_follow4
from in_dataframe import in_dataframe
# clean_access('access_20180528.log')
clean_access('access2.log')
result_file = "user_analysis.xlsx"
filename = 'result.txt'
df = in_dataframe(filename)
sheet_name1 = 'region'
region_list = region_follow(df)
in_excel(region_list, result_file, sheet_name1,2,'A','B',1)
sheet_name2 = 'city'
city_list = city_follow(df)
in_excel(city_list, result_file, sheet_name2,2,'A','B',1)
sheet_name3 = 'visit_time'
visit_time_list = visit_time_follow(df)
in_excel(visit_time_list, result_file, sheet_name3,2,'A','B',1)
# result_file = "user_analysis.xlsx"
# filename='result.txt'
# df = in_dataframe(filename)
#request
index0 = ('referer', 'referer_num')
sheet_tite = 'request'
data_list = user_follow3(df, index0)
# print data_list
in_excel3(['referer'], result_file, sheet_tite)
in_excel2(data_list, result_file, sheet_tite)
index2 = ('referer', 'request_next', 'request_num')
sheet_tite2 = 'request'
data_list2 = user_follow2(df, index2)
in_excel(data_list2, result_file, sheet_tite2, 3, 'B', 'C', 2)
#referer
index1 = ('referer', 'referer_num')
sheet_tite1 = 'referer'
data_list1 = user_follow1(df, index1)
in_excel(data_list1, result_file,sheet_tite1,2,'A','B',1)
# utm
index3 = ('utm', 'utm_num')
sheet_tite3 = 'utm'
data_list3 = user_follow4(df, index3)
in_excel(data_list3, result_file, sheet_tite3, 2, 'A', 'B', 1)
| zhitie/py_apache_access | main.py | main.py | py | 1,632 | python | en | code | 0 | github-code | 36 |
71295078185 | import os
import re
f1 = open('answer.txt','w')
files = os.listdir('./bottles')
c = ''
for i in files:
f2 = open('./bottles/'+i,'rb')
s = str(f2.read())
c+=s
f2.close()
print(c.index('ritsec'))
| akashsuper2000/ctf-archive | Ritsec 2019/bottles.py | bottles.py | py | 210 | python | en | code | 0 | github-code | 36 |
2107349661 | #!/usr/bin/env python3
##
## EPITECH PROJECT, 2021
## B-MAT-500-PAR-5-1-308reedpipes-zhiwen.wang
## File description:
## multigrains_30711
##
import sys, os
import time
import math
import numpy as np
def printUsage():
print("USAGE\n\
\t./309pollution n file x y\n\n\
DESCRIPTION\n\
\tn\tnumber of points on the grid axis\n\
\tfile\tcsv file containing the data points x;y;p\n\
\tx\tabscissa of the point whose pollution level we want to know\n\
\ty\tordinate of the point whose pollution level we want to know\n")
class Pollution:
def _init(self):
self.n = None
self.dataPoints = None
self.xToPrint = None
self.yToPrint = None
self.matrix = None
def initValue(self):
try :
self.n = int(sys.argv[1])
if (self.n <= 0):
raise ValueError("Error : The number of points must be positive")
self.dataPoints = self.convertDataToFloat(self.readFile(sys.argv[2]))
if (self.dataPoints == None):
raise ValueError("Error : error when reading file")
self.xToPrint = float(sys.argv[3])
self.yToPrint = float(sys.argv[4])
if (self.xToPrint < 0 or self.yToPrint > self.n - 1):
raise ValueError("Error : given x, y need must be between 0 to n - 1")
except ValueError as ve:
raise ve
def readFile(self, fileName):
try :
newfile = open(fileName, 'r')
contents = newfile.read().splitlines()
newfile.close()
while "" in contents:
contents.remove("")
return contents
except:
raise ValueError("Error : error when reading file")
def convertDataToFloat(self, contents):
try :
dataPoints = [None] * len(contents)
i = 0
for element in contents:
line = element.split(";")
dataPoints[i] = {'x' : int(line[0]), 'y' : int(line[1]), 'p' : float(line[2])}
i += 1
return dataPoints
except:
raise ValueError("Error : cannot convert file value into float")
def printMatrix(self, tab):
for element in tab:
print(element)
def initMatrix(self):
self.matrix = [[None for x in range(self.n)] for y in range(self.n)]
for x in range(0, self.n):
for y in range(0, self.n):
givenTmp = self.checkInfoDataPoints(x, y)
if (givenTmp != None):
self.matrix[x][y] = givenTmp
else:
self.matrix[x][y] = 0
def checkInfoDataPoints(self, x, y):
for element in self.dataPoints:
if (element["x"] == x and element["y"] == y):
return element["p"]
return None
def run(self, av):
self.initValue()
self.initMatrix()
self.calculateBezierSurface(self.xToPrint, self.yToPrint)
def calculateBezierSurface(self, u, v):
# print(self.bezierCurve([0, 50, 80], 20))
y = int(self.yToPrint * 10)
x = int(self.xToPrint * 10)
xResult = []
for i in range(self.n):
tmp = self.bezierCurve(self.matrix[i], 20)
tmp.append(self.matrix[i][self.n - 1])
xResult.append(tmp)
res = []
last = None
for j in range(self.n):
last = xResult[j][y]
res.append(xResult[j][y])
finalRes = self.bezierCurve(res, 20)
finalRes.append(last)
print("{:.2f}".format(finalRes[x]))
def bezierCurve(self, controlPoints, nbOfCurve):
# last_point = nbOfCurve - 1
res = []
for i in range(nbOfCurve):
res.append(self.bezierPoint(controlPoints, i / nbOfCurve))
return res
def bezierPoint(self, controlPoints, t):
if len(controlPoints) == 1:
return round(controlPoints[0], 2)
control_linestring = zip(controlPoints[:-1], controlPoints[1:])
return self.bezierPoint([(1 - t) * p1 + t * p2 for p1, p2 in control_linestring], t)
def main():
if (len(sys.argv) == 2 and sys.argv[1] == "-h"):
printUsage()
elif (len(sys.argv) == 5):
newPollution = Pollution()
newPollution.run(sys.argv)
else:
raise ValueError("Error: please look -h for using 309pollution")
if __name__ == "__main__":
try:
main()
sys.exit(0)
except ValueError as ve:
print(ve)
sys.exit(84)
| Zippee0709/Tek3-Project | Maths/309Pollution/pollution_309.py | pollution_309.py | py | 4,575 | python | en | code | 0 | github-code | 36 |
4401153009 | import requests
from bs4 import BeautifulSoup
import re
from googlesearch import search
def remove_tags(text):
TAG_RE = re.compile(r'<[^>]+>')
return TAG_RE.sub('', text)
def spamcalls(num):
lists = []
r = requests.get("https://spamcalls.net/en/search?q={}".format(num))
if r.status_code == 200:
try:
parse = BeautifulSoup(r.content.decode('utf-8'), 'html.parser')
name = parse.findAll('strong')
for names in name:
lists.append(str(names))
lists.pop(0)
return{"spamcalls" : remove_tags(", ".join(lists))}
except Exception as e:
return{'err' : e}
def scamcallfighters(num):
r = requests.get("http://www.scamcallfighters.com/search-phone-{}.html".format(num.replace("+", "")))
if r.status_code == 200:
try:
parse = BeautifulSoup(r.content.decode('utf-8'), 'html.parser')
for g in parse.find_all('div', class_='nrp_headmat1'):
records = g.find_all('p')
return{"scamcallfighters" : remove_tags(str(records))}
except Exception as e:
return{'err' : e}
def urls(num, countrycode, localnumber):
return{"fouroneone": "https://www.411.com/phone/{}".format(num.replace('+', '').replace(' ', '-')), "truecaller": "https://www.truecaller.com/{}/{}".format(countrycode, localnumber), 'truepeoplesearch': "https://www.truepeoplesearch.com/results?phoneno={}".format(num.replace(' ', '')), 'syncme': "https://sync.me/search/?number={}".format(num.replace("+", ""))}
try:
for r in search(num):
return{"URL": r}
except:
return{"err" : "error occured"}
| 742fool/DeadTrapv2 | website/backend/scanners/fraud.py | fraud.py | py | 1,708 | python | en | code | null | github-code | 36 |
42248140291 | import json
from rdflib import Graph, Namespace, Literal, URIRef, XSD
from rdflib.namespace import XSD
# Mapping of codes to (image) annotation types
annotation_codes_classes = [("evoked_clusters", "ACVisualEvocation"), ("as", "ArtStyle"), ("act", "Action"), ("age","Age"), ("color", "Color"), ("em", "Emotion"), ("ic", "ImageCaption"), ("hp", "HumanPresence"), ("od", "Object")]
annotation_codes_jsonnames = [("evoked_clusters", "evoked_abstract_concept"), ("as", "art_style"), ("act", "action_label"), ("age","age_tier"), ("color", "webcolor_name"), ("em", "emotion"), ("ic", "image_description"), ("hp", "human_presence"), ("od", "detected_object")]
annotation_codes_roles = [("acve", "evoked_abstract_concept"), ("as", "detected_art_style"), ("act", "detected_action"), ("age","detected_age"), ("color", "detected_color"), ("em", "detected_emotion"), ("ic", "detected_image_caption"), ("hp", "detected_human_presence"), ("od", "detected_object")]
# Define namespaces for your prefixes
base = "https://w3id.org/situannotate#"
rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
rdfs = Namespace("http://www.w3.org/2000/01/rdf-schema#")
xsd = Namespace("http://www.w3.org/2001/XMLSchema#")
situannotate = Namespace("https://w3id.org/situannotate#")
conceptnet = Namespace("http://etna.istc.cnr.it/framester2/conceptnet/5.7.0/c/en/")
# Create an RDF graph
g = Graph()
dataset = "ARTstract"
### Create triples for Annotation Situations
with open('input/real-img-data.json', 'r') as json_file:
# Load the JSON data into a Python dictionary
data = json.load(json_file)
for image_id, details in data.items():
image_instance = str(dataset + "_" + image_id)
g.add((situannotate[image_instance], rdf.type, situannotate.Image))
source_dataset = details["source_dataset"]
source_id = details["source_id"]
evoked_clusters = details['evoked_clusters']
first_cluster = next(iter(evoked_clusters.values()))
annotation_class = "ACVisualEvocation"
annotation_class = str(annotation_class) + "Annotation"
situation_name = source_dataset + "_acve"
annotation_role = "evoked_abstract_concept"
annotation_id = image_instance + "_" + situation_name
cluster_name = first_cluster["cluster_name"]
evocation_context = first_cluster["evocation_context"]
# declare triple between the image and the annotation situation
g.add((situannotate[image_instance], situannotate.isInvolvedInAnnotationSituation, situannotate[situation_name]))
# triples for each annotation
g.add((situannotate[annotation_id], rdf.type, situannotate[annotation_class]))
g.add((situannotate[annotation_id], situannotate.isAnnotationInvolvedInSituation, situannotate[situation_name]))
g.add((situannotate[annotation_id], situannotate.isClassifiedBy, situannotate[annotation_role]))
g.add((situannotate[annotation_id], situannotate.aboutAnnotatedEntity, situannotate[image_instance]))
g.add((situannotate[annotation_id], situannotate.typedByConcept, conceptnet[cluster_name]))
g.add((situannotate[annotation_id], situannotate.annotationWithLexicalEntry, situannotate[cluster_name]))
g.add((situannotate[annotation_id], situannotate.annotationWithEvocationContext, Literal(evocation_context, datatype=XSD.string)))
# triples for each lexical entry
g.add((situannotate[cluster_name], rdf.type, situannotate.LexicalEntry))
g.add((situannotate[cluster_name], situannotate.typedByConcept, conceptnet[cluster_name]))
g.add((situannotate[cluster_name], rdfs.label, Literal(cluster_name, datatype=XSD.string)))
# triples for image in relation to annotation
g.add((situannotate[image_instance], situannotate.isAnnotatedWithLexicalEntry, situannotate[cluster_name]))
g.add((situannotate[image_instance], situannotate.hasImageLabelTypedBy, conceptnet[cluster_name]))
# Serialize the RDF graph to Turtle format
turtle_data = g.serialize(format="turtle")
# Print the Turtle data
print(turtle_data)
# Save the Turtle RDF data to a file
with open("output/real_images_acve_kg.ttl", "w") as outfile: # Open in regular text mode (not binary mode)
outfile.write(turtle_data)
| delfimpandiani/ARTstract-KG | ARTstract-KG_creation/ARTstract_kg_construction/real_kg_construction/img_acve.py | img_acve.py | py | 4,284 | python | en | code | 0 | github-code | 36 |
36408696267 | from django.urls import path
from . import views
app_name = 'assure'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/detail/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
path('<int:site_id>/comment/', views.comment, name='comment'),
]
| chadwickcheney/SeleniumTests | assure/urls.py | urls.py | py | 360 | python | en | code | 0 | github-code | 36 |
41612108456 | ###################################################################
###################################################################
#
# DISCLAIMER:
# THIS IS A PROOF OF CONCEPT AND AS A RESULT, IS AN UGLY, HACKED TOGETHER MESS.
# IN NO WAY SHOULD THIS BE CONFUSED WITH 'GOOD' CODE.
#
# SORRY.
# -Devey
# 9 March 16
###################################################################
###################################################################
import os
from PIL import Image
import imgHash # https://pypi.python.org/pypi/imgHash
import itertools
from collections import defaultdict
import time
###################################################################################
## - - - Average HASH - - - ##
## ##
## ##
## ##
## ##
###################################################################################
def ahashes(image): # returns 64 byte average hash, 4 byte, 16 byte
return imgHash.average_hash(Image.open(image))
###################################################################################
## - - - GET DATA - - - ##
## ##
## As of right now, returns the image name and Date/Time. ##
## Should be customized for different applications of this library. ##
## ##
###################################################################################
def getData(image):
dateAdded = time.strftime("%H:%M %d/%m/%Y").strip()
return image, dateAdded # HUGE CHANGE AGHHH
###################################################################################
## - - - GET HAMMING DISTANCE - - - ##
## ##
## ##
## ##
## ##
###################################################################################
def hamming1(str1, str2): # returns distance between strings
return sum(itertools.imap(str.__ne__, str1, str2))
###################################################################################
## - - - GET HASHES - - - ##
## ##
## returns all hashes in format: ##
## [(64byte, 4byte, 16byte),(data)] ##
## ##
###################################################################################
def getHashes(image):
return [ahashes(image), getData(image)]
def bulkLoader(listOfFiles): # takes a list of files and returns a list of their full hashes
hashList = []
for fileName in listOfFiles:
# print(fileName)
# data = fileName
hashList.append(getHashes(fileName))
return hashList
def dbBuilder(hashList): # Database Builder
for i in hashList:
a32[i[0][0]].append(list(i[1]))
aBuckets[i[0][1]].append((i[0][2], i[0][0]))
def readHashes(fileName): # reads full hashes out of a flat file and returns a list of them
with open(fileName, 'r') as f:
hashes = f.readlines()
fileHashes = []
for line in hashes:
c = line
a = c.split(", ")
fileHashes.append([(a[0], a[1], a[2]), (a[3], a[4].strip())])
return fileHashes
def writeHashes(hashes, fileName): # write full hashes to flat file
f = open(fileName, 'a') # Open flatFile to append t
f.write('%s, %s, %s, %s, %s\n' % (hashes[0][0], hashes[0][1], hashes[0][2], hashes[1][0], hashes[1][1]))
f.close() # File close
return hashes[0], hashes[1]
def writeMassHashes(listOfHashes, fileName): # write full hashes to flat file
listToWrite = []
for hashes in listOfHashes:
listToWrite.append('%s, %s, %s, %s, %s\n' % (hashes[0][0], hashes[0][1], hashes[0][2], hashes[1][0], hashes[1][1]))
f = open(fileName, 'a') # Open flatFile to append t
f.writelines(listToWrite)
f.close() # File close
# return(hashes[0], hashes[1], hashes[2])
def checkHashes(imgHashes, fileName):
if imgHashes[0][0] in a32: # Check average hashtable for hash
return "a32", imgHashes[0][0], a32[imgHashes[0][0]]
elif imgHashes[0][1] in aBuckets: # If 4 byte hash in aBuckets
bucket = aBuckets[imgHashes[0][1]]
for i in bucket: # Will eventually be a k-d tree
h1 = hamming1(imgHashes[0][2], i[0])
if h1 < 3:
a = ("aBk", i[0], a32[i[1]])
return(a)
else: # Image not in database
return False
else: # Does not match any buckets
return False
def checkHashesAdd(imgHashes, fileName):
if imgHashes[0][0] in a32: # Check average hashtable for hash
return "a32", imgHashes[0][0], a32[imgHashes[0][0]]
elif imgHashes[0][1] in aBuckets: # If 4 byte hash in aBuckets
bucket = aBuckets[imgHashes[0][1]]
for i in bucket: # Will eventually be a k-d tree
h1 = hamming1(imgHashes[0][2], i[0])
if h1 < 3:
a = ("aBk", i[0], a32[i[1]])
writeHashes(imgHashes, fileName) # Add hash to databases
return(a)
else: # Image not in database
return False
else: # Does not match any buckets
return False
def directoryEater(directoryName): # Given a directory name, returns list of files in directory for entering into bulkLoader
path = os.getcwd()
fileNamesWSpaces = os.listdir(path)
for filename in fileNamesWSpaces:
os.rename(os.path.join(path, filename), os.path.join(path, filename.replace(" ", "-")))
fileNames = os.listdir(directoryName)
b = []
for i in fileNames:
b.append(directoryName + "/" + i)
return b
def flatFileLoad(fileName): # Given the name of a flat file, enters them into the database
dbBuilder(readHashes(fileName))
def bulkFlatFileWrite(dbName, listOfFiles): # Given a list of files, write full hashes to specified flat file
listOfHashes = []
for i in listOfFiles:
listOfHashes.append(getHashes(i))
writeMassHashes(listOfHashes, dbName)
def newFile(directoryName, fileName): # Create a new flatFile from a directory of images
listOfFiles = directoryEater(directoryName)
bulkFlatFileWrite(fileName, listOfFiles)
def checkImage(image, dbName):
return checkHashes(getHashes(image), dbName)
def checkImageAdd(image, dbName):
return checkHashesAdd(getHashes(image), dbName)
##########################
## Globals ##
## Don't Touch! ##
##########################
p32 = defaultdict(list) # 32 byte discrete cosine transform hash table
a32 = defaultdict(list) # 32 byte gradient hash table
pBuckets = defaultdict(list) # Staggered(4 byte -> 16 byte) dct hash table
aBuckets = defaultdict(list) # Staggered(4 byte -> 16 byte) gradient hash table
########################################################################################
| deveyNull/phist | hashFunk.py | hashFunk.py | py | 7,822 | python | en | code | 0 | github-code | 36 |
12198099018 | #!/usr/bin/python
import os
import sqlite3, time, re
import subprocess
from random import randint
from Scan_lib import Scan_Receive_sms, Scan_Smstome_sms
def Scansione(conn):
cursor = conn.execute("SELECT Subdomain, Number FROM Anagrafica")
cursor.fetchone()
for row in cursor:
if "receive-smss.com" in row:
Scan_Receive_sms(conn,row[1].split("+")[1])
if "smstome.com" in row:
Scan_Smstome_sms(conn,row[1]) #### CONTROLLARE PERCHE SOLO UN GIRO
def DB_Ana(conn, Subdomain, Number, Alive, Nation):
cursor = conn.execute("SELECT * FROM Anagrafica WHERE Number= '" + Number + "' AND Nation= '" +Nation+"'")
row = cursor.fetchone()
if row is None:
query1= "INSERT INTO Anagrafica (Subdomain, Number, Alive, Nation) VALUES ('"+Subdomain+"', '"+Number+"', '"+Alive+"', '"+Nation+"')"
cursor = conn.execute(query1)
conn.commit()
print ("New finding: " + Number + " [" + Nation + "] - Records created successfully");
def Ana_Receive_smss():
print ("ANAGRAFICA Receive-smss.com");
conn = sqlite3.connect('SMS_DB.db')
sup_file= 'receive-smss'
os.system("wget -O " + sup_file + " " + 'https://receive-smss.com/')
subdomain = "receive-smss.com"
flag = 0
with open(sup_file) as file:
for line in file:
if '<div class="number-boxes-itemm-number" style="color:black">' in line:
number = line.split('<div class="number-boxes-itemm-number" style="color:black">')[1].split('</div>')[0]
flag = flag+1
if '<div class="number-boxes-item-country number-boxess-item-country">' in line:
nation = line.split('<div class="number-boxes-item-country number-boxess-item-country">')[1].split('</div>')[0]
flag = flag+1
if flag > 1:
alive = "none"
DB_Ana(conn, subdomain, number, alive, nation)
flag = 0
number = "NULL"
nation = "NULL"
os.system("rm "+sup_file)
Scansione(conn)
conn.close()
def Ana_SMStome():
print ("ANAGRAFICA smstome.com");
conn = sqlite3.connect('SMS_DB.db')
sup_file= 'SMStome'
os.system("wget -O " + sup_file + " " + 'https://smstome.com/')
subdomain = "smstome.com"
flag = 0
flag2 = 0
with open(sup_file) as file:
for line in file:
if ' <a href="' in line and '/country/' in line:
sup_2 = line.split(' <a href="')[1].split('" class="button button-clear">')[0]
nation = sup_2.split('/country/')[1].split('/')[0]
flag = flag+1
if flag > 1:
flag = 0
sup_file2 = "SMStome_"+nation
os.system("wget -O " + sup_file2 + " " + 'https://smstome.com'+sup_2+"?page="+str(randint(1, 30)))
with open(sup_file2) as file:
for line2 in file:
if 'button button-outline button-small numbutton' in line2:
number_link = line2.split('<a href="https://smstome.com')[1].split('" class=')[0]
flag2 = flag2+1
if flag2 > 1:
alive = "none"
DB_Ana(conn, subdomain, number_link, alive, nation)
flag2 = 0
os.system("rm "+sup_file2)
Scansione(conn)
os.system("rm "+sup_file)
conn.close()
while True:
Ana_Receive_smss()
Ana_SMStome()
print("---- Execution Hold ---- at time: ")
str(os.system("date +%k:%M.%S")).strip()
time.sleep(180)
| fulgid0/ASMS_discovery | ASMS_discover.py | ASMS_discover.py | py | 3,154 | python | en | code | 0 | github-code | 36 |
29620338632 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from lxml import etree
import time
import xlsxwriter
options = webdriver.ChromeOptions()
# 找到本地安装的浏览器启动路径,例如Chrome
# 设置--user-data-dir是为了不影响自己的浏览器
# chrome.exe --remote-debugging-port=9222 --user-data-dir="D:\Program File\chromeUserData"
options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
base_url = "https://www.baidu.com/"
driver = webdriver.Chrome(options=options)
wait = WebDriverWait(driver, 10, 0.5)
driver.implicitly_wait(3)
driver.get(base_url)
# 打印页面标题 "百度一下,你就知道"
print(driver.title)
# 生成当前页面快照并保存
# driver.save_screenshot("baidu.png")
wait.until(EC.presence_of_element_located((By.ID, 'kw')))
driver.find_element_by_id("kw").click()
driver.find_element_by_id("kw").send_keys("taobao")
driver.find_element_by_id("su").click()
# 打印网页渲染后的源代码
# print(driver.page_source)
# 获取当前url
print(driver.current_url)
wait.until(EC.presence_of_element_located((By.ID, 'content_left')))
time.sleep(1)
firstElem = driver.find_element_by_xpath(
'//div[@id="content_left"]//div[contains(@class,"result")][1]/h3/a')
print(firstElem.text)
firstElem.click()
# 获取所有的打开的浏览器窗口
windowstabs = driver.window_handles
print(windowstabs)
# 获取当前浏览器的窗口
currenttab = driver.current_window_handle
print(currenttab)
# 切换到新窗口
driver.switch_to.window(windowstabs[1])
print(driver.current_url)
time.sleep(1)
driver.close()
driver.switch_to.window(windowstabs[0])
print(driver.current_url)
# html_str = driver.page_source
# obj_list = etree.HTML(html_str).xpath(
# '//div[@id="content_left"]//div[contains(@class,"result")]/h3/a')
# result = ['标题']
# for obj in obj_list:
# title = obj.xpath('string(.)').replace('\n', '').strip()
# print(title)
# result.append(title)
# workbook = xlsxwriter.Workbook('baidu.xlsx') #创建一个Excel文件
# worksheet = workbook.add_worksheet() #创建一个sheet
# # 列宽
# worksheet.set_column('A:J', 20)
# #向 excel 中写入数据
# worksheet.write_column('A1',result)
# workbook.close()
# 关闭当前页面,如果只有一个页面,会关闭浏览器
# driver.close()
# # 关闭浏览器
# driver.quit()
| hua345/myBlog | python/selenium/baidu.py | baidu.py | py | 2,515 | python | en | code | 0 | github-code | 36 |
29909656261 | # -*- coding: utf-8 -*-
import os
import sys
import csv
import random
import timeit
import numpy as np
import argparse
import multiprocessing as mp
from ..lib.utils import makedirs
click_field_name = ["date", "format", "paper", "ip", "mode", "uid", "session",
"port", "id", "useragent", "usercookies"]
query_field_name = ["date", "query", "ip", "referer", "mode", "num_results",
"results", "uid", "session", "port", "overlength", "id",
"useragent", "usercookies"]
class EstWorker(mp.Process):
def __init__(self, task_queue, M, click_set, res_list):
super(EstWorker, self).__init__()
self._task_queue = task_queue
self._M = M
self._click_set = click_set
self._res_list = res_list
def run(self):
task_queue = self._task_queue
click_set = self._click_set
res_list = self._res_list
M = self._M
name = self.name
cnt = 0
while True:
task = task_queue.get()
if task is None:
task_queue.task_done()
print('{}: Processed {} tasks'.format(name, cnt))
break
query_set = task
top2k_shown = np.zeros(M)
top2k_click = np.zeros(M)
for uid, results in query_set:
for result in results.split(','):
toks = result.split('*')
rk_before = int(toks[0])
rk_after = int(toks[1])
paper = toks[2]
if rk_before == 0 and rk_after < M:
top2k_shown[rk_after] += 1
if (uid, paper) in click_set:
top2k_click[rk_after] += 1
swap_ctr = np.zeros(M)
prop_est = np.zeros(M)
for i in range(M):
swap_ctr[i] = top2k_click[i] / top2k_shown[i]
for i in range(M):
prop_est[i] = swap_ctr[i] / swap_ctr[0]
res_list.append(prop_est)
task_queue.task_done()
cnt += 1
def bootstrap(M, n_samples, query_list, click_set, n_workers):
task_queue = mp.JoinableQueue()
manager = mp.Manager()
res_list = manager.list()
workers = []
for _ in range(n_workers):
w = EstWorker(task_queue, M, click_set, res_list)
w.daemon = True
w.start()
workers.append(w)
for _ in range(n_samples):
sample = random.choices(query_list, k=len(query_list))
task_queue.put(sample)
for _ in range(n_workers):
task_queue.put(None)
task_queue.close()
task_queue.join()
for w in workers:
w.join()
return res_list
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Propensity Estimation via swap intervention')
parser.add_argument('-m', type=int, help='max pos to be estimated')
parser.add_argument('-n', type=int, default=1000, help='num of bootstrap samples')
parser.add_argument('-p', type=float, default=0.95, help='confdence probability')
parser.add_argument('--n_workers', default=mp.cpu_count(), type=int,
help='number of workers')
parser.add_argument('query_path', help='query path')
parser.add_argument('click_path', help='click path')
parser.add_argument('output_path', help='output path')
args = parser.parse_args()
start = timeit.default_timer()
M = args.m
n_bootstrap = args.n
n_workers = min(mp.cpu_count(), args.n_workers)
conf_prop = args.p
n_samples = args.n
n_workers = args.n_workers
query_path = args.query_path
click_path = args.click_path
random.seed()
click_set = set()
with open(click_path, 'r') as fin:
reader = csv.DictReader(fin, delimiter='\t', fieldnames=click_field_name)
for line in reader:
click_set.add((line['uid'], line['paper']))
query_list = []
with open(query_path, 'r') as fin:
reader = csv.DictReader(fin, delimiter='\t', quotechar="'", fieldnames=query_field_name)
for line in reader:
uid = line['uid']
num_results = int(line['num_results'])
if num_results < M:
continue
results = line['results'].split('|')[-1]
query_list.append((uid, results))
prop_list = bootstrap(M, n_samples, query_list, click_set, n_workers)
lo = int(n_samples * ((1 - args.p) / 2))
mi = int(n_samples * 0.5)
hi = n_samples - lo
perc_conf = np.zeros((M, 3))
for i in range(M):
p = []
for prop in prop_list:
p.append(prop[i])
p.sort()
perc_conf[i][0] = p[lo]
perc_conf[i][1] = p[mi]
perc_conf[i][2] = p[hi]
makedirs(os.path.dirname(args.output_path))
np.savetxt(args.output_path, perc_conf)
end = timeit.default_timer()
print('Running time: {:.3f}s.'.format(end - start))
| fzc621/CondPropEst | src/arxiv_match/bootstrap_swap.py | bootstrap_swap.py | py | 4,987 | python | en | code | 2 | github-code | 36 |
26071445837 | import numpy as np
def modified_black_body(wl, TEMPSN, RADIUSSN, TEMPDUST, MDUST):
# 2components BlackBody formula
h = 6.626076e-27 # plancks constant (erg s)
k = 1.38066e-16 # boltzmann constant (erg/K)
BETAL = 1.5 # slope for kappa
MSUN = 1.98892e+33 # g
CC = 2.99792458E+10 # cm/s
wlCM = wl * 1e-8 # wavelength from Angstrom to cm
B1 = 2 * h * (CC ** 2) ##erg cm^2 s^-1
B2 = h * CC / k # K cm
BX = (B1 / wlCM ** 5) # erg s^-1 cm^-3
# BX=BX*1e-8 #convert from [erg s^-1 cm^-3] to [erg s^-1 cm^2 A^-1]
#########change the D value to the distance of the source from observer
D = 45.7 # Mpc
LDS = D * 3.086e+18 * 1e+6 # luminosity distance from Mpc to cm
KAPPASIN = 1e+4 * (wlCM / 1e-4) ** (
-BETAL) # [cm^2 g^-1]normalised to wavelength 1000. nm in cm #1 nm = 1.E-7 cm
flux_sn = BX / ((np.exp(B2 / (wlCM * TEMPSN))) - 1) # erg s^-1 cm^-3
flux_snA = np.pi * flux_sn * (((RADIUSSN ** 2)) / (LDS ** 2)) # erg s^-1 cm^-3
flux_sn_erg = flux_snA * 1e-8 # convert now from [erg s^-1 cm^-3] to [erg s^-1 cm^2 A-1]
flux_d = (BX / ((np.exp(B2 / (wlCM * TEMPDUST))) - 1))
flux_dA = flux_d * KAPPASIN * ((MDUST * MSUN) / (LDS ** 2))
flux_d_erg = flux_dA * 1e-8 # convert now from [erg s^-1 cm^-3] to [erg s^-1 cm^2 A-1]
flux = (flux_sn_erg + flux_d_erg)
max_flux=np.max(flux)
return flux #, max_flux | ZoeAnsari/modified-black-body | src/MBB.py | MBB.py | py | 1,423 | python | en | code | 0 | github-code | 36 |
15521733904 | '''
43. Multiply Strings
Given two non-negative integers num1 and num2 represented as strings, return the product of num1 and num2, also represented as a string.
Example 1:
Input: num1 = "2", num2 = "3"
Output: "6"
Example 2:
Input: num1 = "123", num2 = "456"
Output: "56088"
Note:
The length of both num1 and num2 is < 110.
Both num1 and num2 contain only digits 0-9.
Both num1 and num2 do not contain any leading zero, except the number 0 itself.
You must not use any built-in BigInteger library or convert the inputs to integer directly.
'''
class Solution:
def add(self, num1, num2, num3, num4):
start, add, ans = 0, 0, ''
while(True):
x1,f1 = (int(num1[start]),True) if start < len(num1) else (0,False)
x2,f2 = (int(num2[start]),True) if start < len(num2) else (0,False)
x3,f3 = (int(num3[start]),True) if start < len(num3) else (0,False)
x4,f4 = (int(num4[start]),True) if start < len(num4) else (0,False)
if not f1 and not f2 and not f3 and not f4:
if add: ans += str(add)
return ans[::-1]
else:
x = x1+x2+x3+x4+add
ans += str(x % 10)
add = x // 10
start += 1
def multiply(self, num1, num2):
"""
:type num1: str
:type num2: str
:rtype: str
"""
len1, len2 = len(num1), len(num2)
if len2 < len1:
len1, len2 = len2, len1
num1, num2 = num2, num1
if len1 == 1:
ans, n, add = '', int(num1[0]), 0
if not n: return '0'
for s in num2[::-1]:
x = int(s) * n + add
ans += str(x % 10)
add = x // 10
if add: ans += str(add)
return ans[::-1]
else:
numHead1, numHead2 = num1[:len1//2], num2[:len2//2]
numTail1, numTail2 = num1[len1//2:], num2[len2//2:]
mult1 = self.multiply(numHead1,numHead2) + '0' * (len(numTail1)+len(numTail2))
mult2 = self.multiply(numHead1,numTail2) + '0' * len(numTail1)
mult3 = self.multiply(numTail1,numHead2) + '0' * len(numTail2)
mult4 = self.multiply(numTail1,numTail2)
ans = self.add(mult1[::-1],mult2[::-1],mult3[::-1],mult4[::-1])
return ans
if __name__ == '__main__':
#print(Solution().multiply("123","456"))
print(Solution().multiply("999","999")) | MarshalLeeeeee/myLeetCodes | 43-multiply.py | 43-multiply.py | py | 2,510 | python | en | code | 0 | github-code | 36 |
2848097490 | import tensorflow as tf
import argparse
import pandas as pd
import numpy as np
from PIL import Image, ImageDraw, ImageEnhance
from tqdm import tqdm
from model import *
from losses import *
import albumentations as albu
args = argparse.ArgumentParser(description='Process Training model')
args.add_argument('-i','--img_dir', type=str, help='images_directory', required=True)
args.add_argument('-m','--model_dir', type=str, help='model_directory', required=True)
args.add_argument('-s','--resized_size', type=int,help='resized_size', required=True)
args.add_argument('-a','--annotations', type=str,help='annotations_file', required=True)
args.add_argument('-e','--epochs', type=int,help='epochs', required=True)
argumens = args.parse_args()
#Create Config
class config:
annotations_file = argumens.annotations
image_dir = argumens.img_dir + '/'
image_size = 1000
resized_size = argumens.resized_size
train_ratio = 0.8
checkpoint = argumens.model_dir + '/'
saved_model = argumens.model_dir + '/object_detection_model.h5'
#Load annotations file
labels = pd.read_csv(config.annotations_file)
print(labels.head())
#ground labels base on images_id
def group_boxes(group):
boundaries = group['yolo_bbox'].str.split(',', expand=True)
boundaries[0] = boundaries[0].str.slice(start=1)
boundaries[3] = boundaries[3].str.slice(stop=-1)
return boundaries.values.astype(float)
labels = labels.groupby('image_id').apply(group_boxes)
#spit data to train and val
train_idx = round(len(np.unique(labels.index.values)) * config.train_ratio)
train_image_ids = np.unique(labels.index.values)[0: train_idx]
val_image_ids = np.unique(labels.index.values)[train_idx:]
def load_image(image_id):
image = Image.open(config.image_dir + image_id)
image = image.resize((config.resized_size, config.resized_size))
return np.asarray(image)
#Loading Train data
print("Loading Training data")
train_pixels = {}
train_labels = {}
for image_id in tqdm(train_image_ids):
train_pixels[image_id] = load_image(image_id)
train_labels[image_id] = labels[image_id].copy() * (config.resized_size/config.image_size)
#Loading Val data
print("Loading Validation data data")
val_pixels = {}
val_labels = {}
for image_id in tqdm(val_image_ids):
val_pixels[image_id] = load_image(image_id)
val_labels[image_id] = labels[image_id].copy() * (config.resized_size/config.image_size)
model = build_model(config.resized_size,config.resized_size)
print(model.summary())
#Create Data Generator
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, image_ids, image_pixels, labels=None, batch_size=1, shuffle=False, augment=False):
self.image_ids = image_ids
self.image_pixels = image_pixels
self.labels = labels
self.batch_size = batch_size
self.shuffle = shuffle
self.augment = augment
self.on_epoch_end()
self.image_grid = self.form_image_grid()
def form_image_grid(self):
image_grid = np.zeros((model.output_shape[1], model.output_shape[2], 4))
# x, y, width, height
cell = [0, 0, config.resized_size / model.output_shape[1], config.resized_size / model.output_shape[2]]
for i in range(0, model.output_shape[1]):
for j in range(0, model.output_shape[2]):
image_grid[i ,j] = cell
cell[0] = cell[0] + cell[2]
cell[0] = 0
cell[1] = cell[1] + cell[3]
return image_grid
def __len__(self):
return int(np.floor(len(self.image_ids) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.image_ids))
if self.shuffle == True:
np.random.shuffle(self.indexes)
DataGenerator.__len__ = __len__
DataGenerator.on_epoch_end = on_epoch_end
DataGenerator.train_augmentations = albu.Compose([albu.RandomSizedCrop(
min_max_height=(config.resized_size, config.resized_size),
height=config.resized_size, width=config.resized_size, p=0.8),
albu.OneOf([
albu.Flip(),
albu.RandomRotate90()], p=1),
albu.OneOf([
albu.HueSaturationValue(),
albu.RandomBrightnessContrast()], p=1),
albu.OneOf([
albu.GaussNoise()], p=0.5),
albu.Cutout(
num_holes=8,
max_h_size=16,
max_w_size=16,
p=0.5
),
albu.CLAHE(p=1),
albu.ToGray(p=1),
], bbox_params={'format': 'coco', 'label_fields': ['labels']})
DataGenerator.val_augmentations = albu.Compose([
albu.CLAHE(p=1),
albu.ToGray(p=1),
])
def __getitem__(self, index):
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
batch_ids = [self.image_ids[i] for i in indexes]
X, y = self.__data_generation(batch_ids)
return X, y
def __data_generation(self, batch_ids):
X, y = [], []
# Generate data
for i, image_id in enumerate(batch_ids):
pixels = self.image_pixels[image_id]
bboxes = self.labels[image_id]
if self.augment:
pixels, bboxes = self.augment_image(pixels, bboxes)
else:
pixels = self.contrast_image(pixels)
bboxes = self.form_label_grid(bboxes)
X.append(pixels)
y.append(bboxes)
return np.array(X), np.array(y)
def augment_image(self, pixels, bboxes):
bbox_labels = np.ones(len(bboxes))
aug_result = self.train_augmentations(image=pixels, bboxes=bboxes, labels=bbox_labels)
bboxes = self.form_label_grid(aug_result['bboxes'])
return np.array(aug_result['image']) / 255, bboxes
def contrast_image(self, pixels):
aug_result = self.val_augmentations(image=pixels)
return np.array(aug_result['image']) / 255
def form_label_grid(self, bboxes):
label_grid = np.zeros((model.output_shape[1], model.output_shape[2], 10))
for i in range(0, model.output_shape[1]):
for j in range(0, model.output_shape[2]):
cell = self.image_grid[i, j]
label_grid[i, j] = self.rect_intersect(cell, bboxes)
return label_grid
def rect_intersect(self, cell, bboxes):
cell_x, cell_y, cell_width, cell_height = cell
cell_x_max = cell_x + cell_width
cell_y_max = cell_y + cell_height
anchor_one = np.array([0, 0, 0, 0, 0])
anchor_two = np.array([0, 0, 0, 0, 0])
# check all boxes
for bbox in bboxes:
box_x, box_y, box_width, box_height = bbox
box_x_centre = box_x + (box_width / 2)
box_y_centre = box_y + (box_height / 2)
if (box_x_centre >= cell_x and box_x_centre < cell_x_max and box_y_centre >= cell_y and box_y_centre < cell_y_max):
if anchor_one[0] == 0:
anchor_one = self.yolo_shape(
[box_x, box_y, box_width, box_height],
[cell_x, cell_y, cell_width, cell_height]
)
elif anchor_two[0] == 0:
anchor_two = self.yolo_shape(
[box_x, box_y, box_width, box_height],
[cell_x, cell_y, cell_width, cell_height]
)
else:
break
return np.concatenate((anchor_one, anchor_two), axis=None)
def yolo_shape(self, box, cell):
box_x, box_y, box_width, box_height = box
cell_x, cell_y, cell_width, cell_height = cell
# top left x,y to centre x,y
box_x = box_x + (box_width / 2)
box_y = box_y + (box_height / 2)
# offset bbox x,y to cell x,y
box_x = (box_x - cell_x) / cell_width
box_y = (box_y - cell_y) / cell_height
# bbox width,height relative to cell width,height
box_width = box_width / config.resized_size
box_height = box_height / config.resized_size
return [1, box_x, box_y, box_width, box_height]
#Setting up DataGenerator
DataGenerator.augment_image = augment_image
DataGenerator.contrast_image = contrast_image
DataGenerator.form_label_grid = form_label_grid
DataGenerator.rect_intersect = rect_intersect
DataGenerator.yolo_shape = yolo_shape
DataGenerator.__getitem__ = __getitem__
DataGenerator.__data_generation = __data_generation
train_generator = DataGenerator(
train_image_ids,
train_pixels,
train_labels,
batch_size=1,
shuffle=True,
augment=True
)
val_generator = DataGenerator(
val_image_ids,
val_pixels,
val_labels,
batch_size=1,
shuffle=False,
augment=False
)
image_grid = train_generator.image_grid
#Compile and Training Model
optimiser = tf.keras.optimizers.Adam(learning_rate=0.0001)
model.compile(
optimizer=optimiser,
loss=custom_loss
)
callbacks = [tf.keras.callbacks.ModelCheckpoint(config.checkpoint + '/object_detection_ckpt.weights.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', verbose=1, save_best_only=False, mode='auto', save_weights_only=True), \
tf.keras.callbacks.ReduceLROnPlateau(monitor='loss', patience=3, verbose=1), \
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True), \
]
history = model.fit(train_generator,validation_data=val_generator, epochs=argumens.epochs, callbacks=callbacks)
model.save(config.saved_model) | SandiRizqi/OBJECT-DETECTION-YOLO-ALGORITHM-FOR-AERIAL-IMAGERY_FROM-SCRATCH | train_yolo.py | train_yolo.py | py | 9,124 | python | en | code | 1 | github-code | 36 |
23216640300 | #!/usr/bin/env python
# coding: utf-8
"""
module: some tf modeuls from classic networks
"""
import os
import numpy as np
import tensorflow as tf
from . import TF_Ops as tfops
def fcn_pipe(input, conv_struct, use_batchnorm=False, is_training=None, scope='pipe'):
"""
parse conv_struct: e.g. 3-16;5-8;1-32 | 3-8;1-16 | 1
=> concat[ 3x3 out_channel=16, 5x5 out_channel=8, 1x1 out_channel=32]
=> followed by inception concat [3x3 out_channel=8, 1x1 out_channel=16] and so on ...
=> output with a 1x1 conv
"""
with tf.variable_scope(scope):
net = input
if len(conv_struct) > 1: # if any hidden layer
for layer_cnt in range(len(conv_struct) - 1):
layer_cfg = conv_struct[layer_cnt]
with tf.variable_scope('incep_%d' % layer_cnt):
# kernel/bias initializer: default to xavier/zeros
if len(layer_cfg) > 1:
net = tf.concat([tfops.conv_layer(net, out_channels=cfg[1], filter_size=cfg[0], padding='SAME',
activation=None, scope='conv%d-%d' % (cfg[0], cfg[1]))
for cfg in layer_cfg], axis=-1)
else:
cfg = layer_cfg[0]
net = tfops.conv_layer(net, out_channels=cfg[1], filter_size=cfg[0], padding='SAME',
activation=None, scope='conv%d-%d' % (cfg[0], cfg[1]))
# it seems bn before activation is generally better
if use_batchnorm:
bn_layer = tf.keras.layers.BatchNormalization(name='bn')
assert is_training is not None
net = bn_layer(net, training=is_training)
net = tf.nn.relu(net)
return net
def alexnet_conv_layers(input, auxilary_input=None, prelu_initializer=tf.constant_initializer(0.25), fuse_type='flat'):
"""
self-implemented AlexNet, with skip-connection
input: images, expected to be of [batch, width, height, channel]
"""
def flatten(feat_map):
feat_map = tf.transpose(feat_map, perm=[0, 3, 1, 2])
feat_map = tfops.remove_axis(feat_map, [2, 3])
return feat_map
assert fuse_type in ['flat', 'spp', 'resize']
with tf.variable_scope('conv1'):
if auxilary_input is not None:
conv1 = tfops.conv_layer(input, 96, filter_size=11, stride=4, padding='VALID', activation=None)
conv1 = conv1 + auxilary_input # join before activation
conv1 = tf.nn.relu(conv1)
else:
conv1 = tfops.conv_layer(input, 96, filter_size=11, stride=4, padding='VALID')
pool1 = tf.nn.max_pool2d(conv1, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool1')
lrn1 = tf.nn.local_response_normalization(pool1, depth_radius=2, alpha=2e-5, beta=0.75, bias=1.0, name='norm1')
with tf.variable_scope('conv1_skip'):
conv1_skip = tfops.conv_layer(lrn1, 16, filter_size=1, activation=None)
conv1_skip = tfops.prelu(conv1_skip, initializer=prelu_initializer)
if fuse_type == 'flat': # each img flatten into 1-D
conv1_skip_flat = flatten(conv1_skip)
with tf.variable_scope('conv2'):
# 2-branch by num_groups=2
conv2 = tfops.conv_layer(lrn1, 256, filter_size=5, num_groups=2, padding='SAME')
pool2 = tf.nn.max_pool2d(conv2, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool2')
lrn2 = tf.nn.local_response_normalization(pool2, depth_radius=2, alpha=2e-5, beta=0.75, bias=1.0, name='norm2')
with tf.variable_scope('conv2_skip'):
conv2_skip = tfops.conv_layer(lrn2, 32, filter_size=1, activation=None)
conv2_skip = tfops.prelu(conv2_skip, initializer=prelu_initializer)
if fuse_type == 'flat':
conv2_skip_flat = flatten(conv2_skip)
with tf.variable_scope('conv3'):
conv3 = tfops.conv_layer(lrn2, 384, filter_size=3, padding='SAME')
with tf.variable_scope('conv4'):
conv4 = tfops.conv_layer(conv3, 384, filter_size=3, num_groups=2, padding='SAME')
with tf.variable_scope('conv5'):
conv5 = tfops.conv_layer(conv4, 256, filter_size=3, num_groups=2, padding='SAME')
pool5 = tf.nn.max_pool2d(conv5, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='VALID', name='pool5')
if fuse_type == 'flat':
pool5_flat = flatten(pool5)
with tf.variable_scope('conv5_skip'):
conv5_skip = tfops.conv_layer(conv5, 64, filter_size=1, activation=None)
conv5_skip = tfops.prelu(conv5_skip)
if fuse_type == 'flat':
conv5_skip_flat = flatten(conv5_skip)
with tf.variable_scope('big_concat'):
# concat all skip layers
if fuse_type == 'flat':
feat = [conv1_skip_flat, conv2_skip_flat, conv5_skip_flat, pool5_flat]
elif fuse_type == 'spp':
feat = [conv1_skip, conv2_skip, conv5_skip, pool5]
spp_bin_list = [[27], [13], [13], [6]] # pool size on pixel = [[4x2, 4x2, 4x2, 5x3]]
for i, bins in enumerate(spp_bin_list): # a spp for each layer
feat[i] = tfops.spatial_pyramid_pooling(feat[i], bins)
else: # resize as image
feat = [conv1_skip, conv2_skip, conv5_skip, pool5]
size_list = [27, 13, 13, 6]
for i, sz in enumerate(size_list):
feat[i] = flatten(tf.image.resize(feat[i], (sz, sz)))
feat_concat = tf.concat(feat, 1)
return feat_concat
def re3_lstm_tracker(input, num_unrolls, batch_size, prev_state=None, lstm_size=512, rnn_type='lstm'):
"""
input: object features in time sequence, expected to be [batch, time, feat_t + feat_t-1], with time = num_unrolls
prev_state: the initial state for RNN cell, set to placeholder to enable single-step inference
TODO: migrate to TF 2.0:
contrib.rnn.LSTMCell -> keras.layers.LSTMCell
contrib.rnn.LSTMStateTuple -> get_initial_tuple
dynamic_rnn -> keras.layers.RNN
"""
assert rnn_type in ['lstm']
with tf.variable_scope('lstm1'):
lstm1 = tf.contrib.rnn.LSTMCell(lstm_size)
# cell state
if prev_state is not None: # if traker already running
state1 = tf.contrib.rnn.LSTMStateTuple(prev_state[0], prev_state[1])
else:
state1 = lstm1.zero_state(batch_size, dtype=tf.float32)
# unroll
lstm1_outputs, state1 = tf.nn.dynamic_rnn(lstm1, input, initial_state=state1, swap_memory=True)
with tf.variable_scope('lstm2'):
lstm2 = tf.contrib.rnn.LSTMCell(lstm_size)
# cell state
if prev_state is not None: # if still one video (traker already running)
state2 = tf.contrib.rnn.LSTMStateTuple(prev_state[2], prev_state[3])
else:
state2 = lstm2.zero_state(batch_size, dtype=tf.float32)
# unroll
lstm2_inputs = tf.concat([input, lstm1_outputs], -1)
lstm2_outputs, state2 = tf.nn.dynamic_rnn(lstm2, lstm2_inputs, initial_state=state2, swap_memory=True)
flatten_out = tf.reshape(lstm2_outputs, [-1, lstm2_outputs.get_shape().as_list()[-1]]) # flatten as [batch x time, feat]
# final dense layer.
with tf.variable_scope('fc_output'):
fc_output = tfops.dense_layer(flatten_out, 4, activation=None, weight_name='W_fc', bias_name='b_fc') # [batch x time, 4]
fc_output = tf.reshape(fc_output, [-1, num_unrolls, 4]) # [batch, time, 4]
return fc_output, (state1, state2) | LiyaoTang/Research-Lib | Models/TF_Models/TF_Modules.py | TF_Modules.py | py | 7,634 | python | en | code | 1 | github-code | 36 |
29471509063 | import math
from pcbflow import *
if __name__ == "__main__":
brd = Board((40, 30))
brd.DC((10, 10)).text("Top Text", side="top")
brd.DC((30, 10)).text("Bottom Text", side="bottom")
brd.add_text((10, 15), "Test Text 1", scale=1.0, layer="GTO")
brd.add_text((10, 20), "Copper Text 1", scale=1.0, layer="GTL")
brd.add_text((10, 25), "Copper Text 2", scale=2.0, layer="GTL", keepout_box=True)
brd.add_text((20, 10), "Copper Text 3", side="bottom", scale=2.0, layer="GBL")
brd.add_text(
(20, 15),
"Copper Text 4",
side="bottom",
scale=2.0,
layer="GBL",
keepout_box=True,
)
brd.add_text(
(20, 25),
"Copper Text 5",
side="bottom",
scale=2.0,
layer="GBL",
keepout_box=True,
soldermask_box=True,
)
brd.add_outline()
brd.fill_layer("GTL", "GND")
brd.fill_layer("GBL", "GND")
brd.save("%s" % (__file__[:-3]))
| michaelgale/pcbflow | examples/basic/text.py | text.py | py | 965 | python | en | code | 93 | github-code | 36 |
72721098023 | import base64
import binascii
import random
from utilities import util
# Challenge 16
def unpadding_validation(string):
k = string[-1]
for i in range(len(string)-1, len(string) - 1 - ord(k), -1):
if string[i] != k:
raise PaddingError('Inappropriate padding detected')
return string[0:len(string)-ord(k)]
class PaddingError(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
key = b'\x01\x1f\x89\x94\x85{\x8e\xa4\xfa\x8e\xc9\xc3{\x1dz\x06'
def cbc_encrypt_surround(chosen):
prefix = b'comment1=cooking%20MCs;userdata='
secret = b';comment2=%20like%20a%20pound%20of%20bacon'
chosen = chosen.replace(b';', b'').replace(b'=',b'')
return util.cbc_encrypt(prefix + chosen + secret, key)
def cbc_decrypt_surround(ciphertext):
plaintext = util.cbc_decrypt(ciphertext, key)
if plaintext.find(b';admin=true;') != -1:
print(plaintext)
return True
return False
def cbc_bitflipping_attack():
ciphertext = cbc_encrypt_surround(b'')
BLOCK_SIZE = 16
num_blocks = len(ciphertext)//BLOCK_SIZE
first_block_ciphertext = util.get_ith_block(ciphertext, 0, BLOCK_SIZE)
second_block_plaintext = b'%20MCs;userdata='
desired_text = util.padding(b';admin=true;', BLOCK_SIZE)
fixed_first_block = b''
for i in range(BLOCK_SIZE):
fixed_first_block += bytes([second_block_plaintext[i]^first_block_ciphertext[i]^desired_text[i]])
fixed_ciphertext = fixed_first_block + ciphertext[BLOCK_SIZE:]
print(cbc_decrypt_surround(fixed_ciphertext))
if __name__ == '__main__':
cbc_bitflipping_attack()
| fortenforge/cryptopals | challenges/CBC_bitflipping_attack.py | CBC_bitflipping_attack.py | py | 1,597 | python | en | code | 13 | github-code | 36 |
42915655723 | """Balanced Parentheses Program
This program is used to check whether user given arithmetic expression is balanced or not
Example:
Balaced Expression :: {{a+b}*[a-b]}
Unbalanced Expression:: {{a+b}*[a-b]
Author:
Saurabh <singh.saurabh3333@gmail.com>
Since:
20 Nov,2018
"""
from com.bridgelabz.util.datastructure_util import *
from com.bridgelabz.util.utility import *
def balance_parentheses():
"""
This method is used as runner balanced_parentheses(string) method
:return: nothing
"""
utility_obj = Utility()
stack = Stack()
print("Enter Expression to check for balanced Parentheses")
try:
string = utility_obj.get_string()
except Exception as e:
print(e)
print("Enter String")
stack.balanced_parentheses(string)
if __name__ == "__main__":
balance_parentheses()
| Saurabh323351/PythonPrograms | balanced_parentheses.py | balanced_parentheses.py | py | 861 | python | en | code | 0 | github-code | 36 |
21413366584 | import pydot
DEFAULT_NODE_ATTRS = {
'color': 'cyan',
'shape': 'box',
'style': 'rounded',
'fontname': 'palatino',
'fontsize': 10,
'penwidth': 2
}
def node_label(token):
try:
label = token._.plot['label']
except:
label = '{0} [{1}]\n({2} / {3})'.format(
token.orth_,
token.i,
token.pos_,
token.tag_
)
return label
def get_edge_label(from_token, to_token):
label = ' ' + from_token.dep_
return label
def to_pydot(tokens, get_edge_label=get_edge_label):
graph = pydot.Dot(graph_type='graph')
# Add nodes to graph
idx2node = {}
for token in tokens:
try:
plot_attrs = token._.plot
except AttributeError:
plot_attrs = {}
for attr, val in DEFAULT_NODE_ATTRS.items():
if attr not in plot_attrs:
plot_attrs[attr] = val
label = node_label(token)
plot_attrs['name'] = token.i
plot_attrs['label'] = label
node = pydot.Node(**plot_attrs)
idx2node[token.i] = node
graph.add_node(node)
'''Add edges'''
for token in tokens:
if token.dep_ == 'ROOT':
continue
if token.head not in tokens:
continue
from_token = token
to_token = token.head
from_node = idx2node[from_token.i]
to_node = idx2node[to_token.i]
label = get_edge_label(from_token, to_token)
edge = pydot.Edge(
to_node, from_node, label=label,
fontsize=12
)
graph.add_edge(edge)
return graph
def create_png(tokens, prog=None):
graph = to_pydot(tokens)
png = graph.create_png(prog=prog)
return png
| cyclecycle/visualise-spacy-tree | visualise_spacy_tree/visualise_spacy_tree.py | visualise_spacy_tree.py | py | 1,757 | python | en | code | 7 | github-code | 36 |
21743085266 | a = [27,3,-91,2,99,52,1,-10]
def merge_sort(lst):
# Base case: A 1- or 0-element list is already sorted
# If we encounter one, return it immediately
if len(lst) <= 1:
return lst[:]
# Find the midpoint of the list
midpt = int(len(lst) / 2)
# Create left and right halves from the midpoint
left = lst[:midpt]
right = lst[midpt:]
# Recursively merge sort both the left and right halves.
# Once these calls return, new_left and new_right are
# guaranteed to be in sorted order.
new_left = merge_sort(left)
new_right = merge_sort(right)
# Create a new list to merge the contents of new_left and new_right
new_lst = []
# Merge operation: Copy the elements of new_left and new_right
# into the merged array so they are in order
i = 0
j = 0
while i < len(new_left) and j < len(new_right):
if new_left[i] < new_right[j]:
new_lst.append(new_left[i])
i += 1
else:
new_lst.append(new_right[j])
j += 1
# Clean-up step: either new_left or new_right are
# likely to have leftover elements. If so, add them.
if i < len(new_left):
for x in range(i, len(new_left)):
new_lst.append(new_left[x])
elif j < len(new_right):
for y in range(j, len(new_right)):
new_lst.append(new_right[y])
# new_lst is now sorted. Return it.
return new_lst
print(a)
print(merge_sort(a)) | CUNY-CISC1215-Fall2021/sorting | merge_sort.py | merge_sort.py | py | 1,474 | python | en | code | 0 | github-code | 36 |
19406462590 | #
# @lc app=leetcode id=583 lang=python3
#
# [583] Delete Operation for Two Strings
#
# @lc code=start
class Solution:
def minDistance(self, word1: str, word2: str) -> int:
m, n = len(word1), len(word2)
dp = [i for i in range(0, n+1)]
for i in range(1, m+1):
prev = dp[:]
dp[0] += 1
for j in range(1, n+1):
if word1[i - 1] == word2[j - 1]:
dp[j] = prev[j-1]
else:
dp[j] = min(prev[j-1] + 2, prev[j]+1, dp[j-1] + 1)
return dp[-1]
Solution().minDistance("sea", "eat")
# @lc code=end
| Matthewow/Leetcode | vscode_extension/583.delete-operation-for-two-strings.py | 583.delete-operation-for-two-strings.py | py | 631 | python | en | code | 2 | github-code | 36 |
24327847015 | import tensorflow as tf
from pdb import set_trace as st
from dovebirdia.deeplearning.networks.base import AbstractNetwork
from dovebirdia.deeplearning.networks.base import FeedForwardNetwork
from dovebirdia.deeplearning.networks.autoencoder import Autoencoder
from dovebirdia.datasets.ccdc_mixtures import ccdcMixturesDataset
from dovebirdia.datasets.mnist import MNISTDataset
# load MNIST
mnist_params = {
#'dataset_dir':'/home/mlweiss/Documents/wpi/research/code/sensors/mixtures/datasets/02_05_19-0905144322/',
'val_size':0.1,
'supervised':True,
'with_val':True,
'onehot':False,
#'resistance_type':'resistance_z',
#'labels':None,
#'sensors':None,
#'with_synthetic':True,
}
ccdc_params = {
'dataset_dir':'/home/mlweiss/Documents/wpi/research/data/ccdc/dvd_dump_clark_3/split/07_12_19-1203141455/',
'with_val':True,
'resistance_type':'resistance',
'labels':None,
'sensors':None,
'with_synthetic':True,
}
dataset = MNISTDataset(params=mnist_params).getDataset()
for k,v in dataset.items():
print(k,v.shape)
# parameters dictionary
params = dict()
# network params
params['input_dim'] = 784
params['output_dim'] = 10
params['hidden_dims'] = [ 128, 64, 16 ]
params['output_activation'] = tf.nn.sigmoid
params['activation'] = tf.nn.sigmoid
params['use_bias'] = True
params['kernel_initializer'] = 'glorot_normal'
params['weight_initializer'] = tf.initializers.glorot_uniform
params['bias_initializer'] = tf.initializers.zeros #'zeros'
params['kernel_regularizer'] = None
params['weight_regularizer'] = None
params['bias_regularizer'] = None
params['activity_regularizer'] = None
params['kernel_constraint'] = None
params['bias_constraint'] = None
# loss
params['loss'] = tf.losses.mean_squared_error
#tf.keras.losses.SparseCategoricalCrossentropy(from_logits=True)
params['history_size'] = 100
# training
params['epochs'] = 2
params['mbsize'] = 32
params['optimizer'] = tf.train.AdamOptimizer
params['learning_rate'] = 1e-3
params['res_dir'] = 'results/'
# Network
nn = FeedForwardNetwork(params)
print(nn.__class__)
nn.fit(dataset)
| mattweiss/public | examples/fftest.py | fftest.py | py | 2,114 | python | en | code | 0 | github-code | 36 |
19262622912 | from datetime import datetime, timedelta
from pokemongo_bot import inventory
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.tree_config_builder import ConfigException
class ShowBestPokemon(BaseTask):
"""
Periodically displays the user best pokemon in the terminal.
Example config :
{
"type": "ShowBestPokemon",
"config": {
"enabled": true,
"min_interval": 60,
"amount": 5,
"order_by": "cp",
"info_to_show": ["cp", "ivcp", "dps"]
}
}
min_interval : The minimum interval at which the pokemon are displayed,
in seconds (defaults to 120 seconds).
The update interval cannot be accurate as workers run synchronously.
amount : Amount of pokemon to show
order_by : Stat that will be used to get best pokemons
Available Stats: 'cp', 'iv', 'ivcp', 'ncp', 'dps', 'hp', 'level'
info_to_show : Info to show for each pokemon
Available info_to_show :
'cp',
'iv_ads',
'iv_pct',
'ivcp',
'ncp',
'level',
'hp',
'moveset',
'dps'
"""
SUPPORTED_TASK_API_VERSION = 1
def initialize(self):
self.next_update = None
self.min_interval = self.config.get('min_interval', 120)
self.amount = self.config.get('amount', 3)
self.order_by = self.config.get('order_by', 'cp')
self.info_to_show = self.config.get('info_to_show', [])
def work(self):
"""
Displays the pokemon if necessary.
:return: Always returns WorkerResult.SUCCESS.
:rtype: WorkerResult
"""
if not self.info_to_show or not self.amount or not self._should_print():
return WorkerResult.SUCCESS
self.pokemons = inventory.pokemons().all()
line = self._get_pokemons_line()
if not line:
return WorkerResult.SUCCESS
self.print_pokemons(line)
return WorkerResult.SUCCESS
def _should_print(self):
"""
Returns a value indicating whether the pokemon should be displayed.
:return: True if the stats should be displayed; otherwise, False.
:rtype: bool
"""
return self.next_update is None or datetime.now() >= self.next_update
def _compute_next_update(self):
"""
Computes the next update datetime based on the minimum update interval.
:return: Nothing.
:rtype: None
"""
self.next_update = datetime.now() + timedelta(seconds=self.min_interval)
def print_pokemons(self, pokemons):
"""
Logs the pokemon into the terminal using an event.
:param pokemons: The pokemon to display.
:type pokemons: string
:return: Nothing.
:rtype: None
"""
self.emit_event(
'show_best_pokemon',
formatted="*Best Pokemons* {pokemons}",
data={
'pokemons': pokemons
}
)
self._compute_next_update()
def _get_pokemons_line(self):
"""
Generates a string according to the configuration.
:return: A string containing pokemons and their info, ready to be displayed.
:rtype: string
"""
def get_poke_info(info, pokemon):
poke_info = {
'cp': pokemon.cp,
'iv': pokemon.iv,
'ivcp': pokemon.ivcp,
'ncp': pokemon.cp_percent,
'level': pokemon.level,
'hp': pokemon.hp,
'dps': pokemon.moveset.dps
}
if info not in poke_info:
raise ConfigException("order by {}' isn't available".format(self.order_by))
return poke_info[info]
def get_poke_info_formatted(info, pokemon):
poke_info = {
'name': pokemon.name,
'cp': 'CP {}'.format(pokemon.cp),
'iv_ads': 'A/D/S {}/{}/{}'.format(pokemon.iv_attack, pokemon.iv_defense, pokemon.iv_stamina),
'iv_pct': 'IV {}'.format(pokemon.iv),
'ivcp': 'IVCP {}'.format(round(pokemon.ivcp,2)),
'ncp': 'NCP {}'.format(round(pokemon.cp_percent,2)),
'level': "Level {}".format(pokemon.level),
'hp': 'HP {}/{}'.format(pokemon.hp, pokemon.hp_max),
'moveset': 'Moves: {}'.format(pokemon.moveset),
'dps': 'DPS {}'.format(round(pokemon.moveset.dps, 2))
}
if info not in poke_info:
raise ConfigException("info '{}' isn't available for displaying".format(info))
return poke_info[info]
info_to_show = ['name'] + self.info_to_show
pokemons_ordered = sorted(self.pokemons, key=lambda x: get_poke_info(self.order_by, x), reverse=True)
pokemons_ordered = pokemons_ordered[:self.amount]
poke_info = ['({})'.format(', '.join([get_poke_info_formatted(x, p) for x in info_to_show])) for p in pokemons_ordered]
line = ' | '.join(poke_info)
return line
| PokemonGoF/PokemonGo-Bot | pokemongo_bot/cell_workers/show_best_pokemon.py | show_best_pokemon.py | py | 5,191 | python | en | code | 3,815 | github-code | 36 |
553309764 | """序列化练习"""
# pickle
import json
import pickle
d = dict(name='Bob', age=20, acore=80)
f = open('dump.txt', 'wb')
pickle.dump(d, f)
f.close()
f = open('dump.txt', 'rb')
d = pickle.load(f)
print(d)
# json
d = dict(name='Bob', age=20, acore=80)
print(json.dumps(d))
# JSON进阶
# class序列化和反序列化
class Student(object):
def __init__(self, name, age, sex):
self.name = name
self.age = age
self.sex = sex
s = Student('徐新宇', 20, 'female')
def stu2dict(std):
return {
'name': std.name,
'age': std.age,
'sex': std.sex
}
print(json.dumps(s,default=stu2dict,ensure_ascii=False)) # {"name": "xuxin", "age": 20, "sex": "female"}
print(json.dumps(s,default=lambda obj:obj.__dict__,ensure_ascii=False)) #万能公式
# 反序列化
json_str = '{"age": 20, "score": 88, "name": "Bob"}'
def dict2stu(d):
return Student(d['name'],d['score'],d['age'])
print(json.loads(json_str,object_hook=dict2stu)) #<__main__.Student object at 0x000002063CC369E8>
| xuxinyu2020/my-python-work | practice/24pickle_json.py | 24pickle_json.py | py | 1,040 | python | en | code | 0 | github-code | 36 |
32199916820 | import muesli_functions as mf
import scipy as sp
# Load samples
X,Y = mf.read2bands("../Data/grassland_id_2m.sqlite",70,106)
ID = []
# Compute NDVI
NDVI = []
for i in xrange(len(X)):
X_ = X[i]
# Compute safe version of NDVI
DENOM = (X_[:,1]+X_[:,0])
t = sp.where(DENOM>0)[0]
NDVI_ = (X_[t,1]-X_[t,0])/DENOM[t]
if len(NDVI_) > 0:
NDVI.append(NDVI_)
# Scan Grasslands
for i in xrange(len(NDVI)):
m = sp.mean(NDVI[i][:,sp.newaxis])
if m > 0.6:
ID.append(Y[i])
print("ID {} and mean NDVI {}".format(Y[i],m))
print("Number of selected grasslands: {}".format(len(ID)))
sp.savetxt("id_grasslands.csv",ID,delimiter=',')
| mfauvel/GrasslandsSympa | Codes/filter_id.py | filter_id.py | py | 672 | python | en | code | 0 | github-code | 36 |
30372123851 | import sys
import cv2
import numpy as np
import Analyzer
from learning import Parameters
import FeatureDebug
WINDOW = 'Options'
PARAM1 = '1) Param 1'
PARAM2 = '2) Param 2'
MIN_RAD = '3) Minimum Radius'
MAX_RAD = '4) Maximum Radius'
WINDOW_BOUND = '5) Top Left Window Px'
WINDOW_BOUND2 = '6) Top Right Window px'
HOUGH_PARAM1 = 1
HOUGH_MAX_PARAM2 = 300
HOUGH_MIN_RADIUS = 0
HOUGH_MAX_RADIUS = 40
HOUGH_MIN_DIST = 20 # the minimum distance two detected circles can be from one another
HOUGH_MAX_ATTEMPTS = 100 #define the number of attempts to find at least one circle
CANNY_LOW = '7) Canny LB'
CANNY_HIGH = '8) Canny UP'
p1 = 0
p2 = 0
minR = 0
maxR = 0
cannyLb = 0
cannyUb = 0
def nothing(dummyVar = None):
pass
def initHoughOptions(cameraType, callback):
if FeatureDebug.TRACKBAR:
global p1, p2, minR, maxR, cannyUb, cannyLb, adaptive1
#get default start values
p1, p2, minR, maxR = Parameters.HoughParamaters.getParams(cameraType)
cannyLb, cannyUb = Parameters.Canny.getParams(cameraType)
adaptive1 = 11
# Create a black image, a window
img = np.zeros((200,300,3), np.uint8)
cv2.namedWindow(WINDOW)
cv2.createTrackbar(PARAM1, WINDOW, 0, HOUGH_PARAM1, nothing)
cv2.createTrackbar(MIN_RAD, WINDOW, 0, 255, nothing)
cv2.createTrackbar(PARAM2, WINDOW, 0, HOUGH_MAX_PARAM2, nothing)
cv2.createTrackbar(MAX_RAD, WINDOW, 0, HOUGH_MAX_RADIUS, nothing)
cv2.createTrackbar(WINDOW_BOUND, WINDOW, 0, 100, nothing)
cv2.createTrackbar(CANNY_LOW, WINDOW, 0, 255, nothing)
cv2.createTrackbar(CANNY_HIGH, WINDOW, 0, 255, nothing)
cv2.createTrackbar('Block Size', WINDOW, -21, 21, nothing)
cv2.setTrackbarPos(PARAM1, WINDOW, p1)
cv2.setTrackbarPos(PARAM2, WINDOW, p2)
cv2.setTrackbarPos(MIN_RAD, WINDOW, minR)
cv2.setTrackbarPos(MAX_RAD, WINDOW, maxR)
cv2.setTrackbarPos(CANNY_LOW, WINDOW, 35)
cv2.setTrackbarPos(CANNY_HIGH, WINDOW, 150)
cv2.setTrackbarPos('Block Size', WINDOW, 11)
while(1):
cv2.imshow(WINDOW,img)
cv2.moveWindow(WINDOW, 0, 500)
k = cv2.waitKey(1) & 0xFF
if k == ord('q'):
Analyzer.close()
break
elif k == ord('e'):
sys.exit('Force Close')
p1Temp = cv2.getTrackbarPos(PARAM1, WINDOW)
p2Temp = cv2.getTrackbarPos(PARAM2, WINDOW)
minRTemp = cv2.getTrackbarPos(MIN_RAD, WINDOW)
maxRTemp = cv2.getTrackbarPos(MAX_RAD, WINDOW)
cannyLbTemp = cv2.getTrackbarPos(CANNY_LOW, WINDOW)
cannyUbTemp = cv2.getTrackbarPos(CANNY_HIGH, WINDOW)
adaptive1Temp = cv2.getTrackbarPos('Block Size', WINDOW)
updatedHoughCircle = False
updatedCanny = False
updatedAdaptive = False
if p1Temp != p1:
p1 = p1Temp
updatedHoughCircle = True
if p2Temp != p2:
p2 = p2Temp
updatedHoughCircle = True
if minRTemp != minR:
minR = minRTemp
updatedHoughCircle = True
if maxRTemp != maxR:
maxR = maxRTemp
updatedHoughCircle = True
if cannyLbTemp != cannyLb:
cannyLb = cannyLbTemp
updatedCanny = True
if cannyUbTemp != cannyUb:
cannyUb = cannyUbTemp
updatedCanny = True
if adaptive1Temp != adaptive1:
adaptive1 = adaptive1Temp
updatedAdaptive = True
if updatedHoughCircle:
callback(Parameters.Trackbar.Hough, param1 = p1, param2 = p2, minRadius = minR, maxRadius = maxR)
pass
if updatedCanny:
callback(Parameters.Trackbar.Canny, cannyLb = cannyLb, cannyUb = cannyUb)
pass
if updatedAdaptive:
callback(Parameters.Trackbar.AdaptiveThreshold, blockSize = adaptive1)
cv2.destroyWindow(WINDOW)
| vicidroiddev/eyeTracking | Fokus/debug/DebugOptions.py | DebugOptions.py | py | 4,149 | python | en | code | 0 | github-code | 36 |
74339290984 | # Type all other functions here
def main():
usrStr= input("Enter a sample text:")
print("You entered:", usrStr)
print_menu(usrStr) #calls print menu
def print_menu(usrStr):
while True: #Must loop this or else it will not print menu again when done
menuOp = input('''MENU
c - Number of non-whitespace characters
w - Number of words
f - Fix capitalization
r - Replace punctuation
s - Shorten spaces
q - Quit
Choose an option:
''') #This is where we call everything
if menuOp== "c":
get_num_of_non_WS_characters(usrStr)
if menuOp== "w":
get_num_of_words(usrStr)
if menuOp== "f":
fix_capitalization(usrStr)
if menuOp== "r":
replace_punctuation(usrStr)
if menuOp== "s":
shorten_space(usrStr)
if menuOp== "q":
break #if user hits q it will break out of loop and be done since everything else is a function
print() #Newline
def get_num_of_non_WS_characters(usrStr):
Amnt_whitespaces=0 #Whitespace counter
for letters in usrStr:
if (letters.isspace())==True: #This is saying if there is white space it goes on
Amnt_whitespaces= Amnt_whitespaces + 1
non = (len(usrStr)-Amnt_whitespaces) #Len of string - white spaces gives us non-white spaces
print("Number of non-whitespace characters:", non)
return non #'return' allows these to be stored
def get_num_of_words(usrStr):
num_words=len(usrStr.split()) #.split() splits string into a list then len counts how many entries in the list
print("Number of words:", num_words)
return num_words #'return' allows these to be stored
def shorten_space(usrStr):
short=" ".join(usrStr.split()) #I could not figure out how to do this part so I read up on stack overflow and it says that .split() splits string into a list (as we know) then .join puts everything back together with no white space.
print("Edited text:", short)
print()
return short #'return' allows these to be stored
def fix_capitalization(usrStr):
letters_capitalized = 0
new_string ="" #holds results
punc_before = usrStr[0]
if usrStr[0].islower(): #if 1st character is lowercase
new_string= new_string + usrStr[0].upper() #makes 1st character capitalized
letters_capitalized = letters_capitalized + 1 #therefore we add 1 to the capitalization counter
for letter in usrStr[1:]:
if letter.islower() and (punc_before=="!" or punc_before=="." or punc_before=="?" or punc_before == ""): #if conitions not met it moves to else
new_string+=letter.capitalize() #if punctuations above come up the first letter of the word after will be capitalized
letters_capitalized = letters_capitalized + 1 #adds to letters capitalized count
else:
new_string= new_string +letter #goes here and adds "letter" to the new string if the above is not true
if letter != " ": #after going through either if or else it goes here and if letter isn't equal to space then punc_before gets redefined as letter
punc_before = letter
print("Number of letters capitalized:", letters_capitalized, "\n")
print("Edited text:", new_string, "\n\n")
return new_string,letters_capitalized #'return' allows these to be stored
def replace_punctuation(usrStr,exclamationCount = 0, semicolonCount = 0):#if we put semicolons and exclamations below we would have to pass an arguement to them.
for punc in usrStr:
if punc == ";":
semicolonCount= semicolonCount+1 #adds to count
usrStr = usrStr.replace(";",",") #replaces everything in the string with ("0ld","new")
if punc == "!":
exclamationCount= exclamationCount+1 #adds to count
usrStr= usrStr.replace("!",".") #replaces everything in the string with ("0ld","new")
print("Punctuation replaced")
print("exclamationCount:", exclamationCount)
print("semicolonCount:", semicolonCount)
print("Edited text:", usrStr)
return usrStr
if __name__ == '__main__': #calls main function
main()
| Jatt530/Text-Analyzer- | zyLAB 6.19.py | zyLAB 6.19.py | py | 4,163 | python | en | code | 0 | github-code | 36 |
14991188051 | #
# Copyright (C) 2012 ESIROI. All rights reserved.
# Dynamote is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Dynamote is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Dynamote. If not, see <http://www.gnu.org/licenses/>.
#
import msgpack
import time
import sys
import zmq
from random import choice
class stb():
mode = ["mode_radio", "mode_tv", "mode_vod", "mode_rss", "mode_dvd"]
Channel = ["channel1", "channel2", "channel3", "channel4"]
subtitle = ["on", "off"]
power = ["on", "off"]
# Attribut for parental controlling
lock = ["false", "true"]
mute = ["false", "true" ]
info_bar = ["false", "true"]
def __init__(self, mode, channel, subtitle):
self.mode = "mode_tv"
self.channel = "channel1"
self.subtitle = "off"
self.power = "on"
self.lock = "false"
self.mute = "false"
self.info_bar = "true"
global dynamote
global stb_rep
global stb_pub
global stb_sub
global stb_pull
global stb_push
global port
dynamote = zmq.Context()
stb_pub = dynamote.socket(zmq.PUB)
stb_sub = dynamote.socket(zmq.SUB)
stb_pull = dynamote.socket(zmq.PULL)
stb_push = dynamote.socket(zmq.PUSH)
default_port = "5000"
################################ MODE SERVEUR ###############################
# Autoconfiguration ip_address for connect is 169.254.1.2 on the port 5000
ip_address = str("169.254.1.2")
# Provide port as command line argument to run server at two different ports
if len(sys.argv)> 1:
port = sys.argv[1]
int(port)
if len(sys.argv) > 2:
port1 = sys.argv[2]
int(port1)
if len(sys.argv) > 3:
port2 = sys.argv[3]
int(port2)
global stb_description
stb_description ="Set top box is on this",ip_address,"and",default_port
print(stb_description)
# Bind link to dynamic discovery
################################### Description #######################################
def ready_to_process(self,port):
stb_rep = dynamote.socket(zmq.REP)
stb_rep.bind("tcp://127.0.0.1:%s" %port)
fichier_description = open ( "stb-device-api-description.json", "r")
msg_packed = msgpack.packb(str(fichier_description))
while True:
msg = stb_rep.recv()
print( "Got",msg)
time.sleep(1)
msg_description = str(stb_description)
stb_rep.send(msg_packed)
############################### MODE CLIENT ###################################
stb_req = dynamote.socket(zmq.REQ)
print ("Attempting to connect to other process ......")
#Connect link
stb_req.connect("tcp://localhost:5001")
for request in range(2):
stb_req.send(msg_description)
print ("Sending message")
# Get the reply.
message = stb_req.recv()
print ("Received reply ", request, "[", message, "]")
####################### Publish_subscribe ######################################
def subscribe_to_dvd(self,port):
#DVD subcribe fucntion
print ( " Middleware waiting for publish ....")
stb_sub.connect("tcp://localhost:%s"%port)
stb_sub.setsockopt_unicode(zmq.SUBSCRIBE, "on")
self.mode = "mode_dvd"
self.channel = "null"
self.subtitle = "on"
self.power = "on"
self.lock = "false"
self.mute = "false"
self.infobar = "false"
for i in range (3):
print("............There is a DVD disc in the device", stb_sub.recv())
def subscribe_to_tv(self, port):
# TV subscribe function
print ( "Middleware waiting for publish ...")
stb_sub.connect("tcp://localhost:%s")
stb_sub.setsockopt(zmq.SUBSCRIBE, "")
for i in range (3):
print("............There is a TV which is sense", stb_sub.recv())
| maxajeanaimee/Domotique_multimedia | stb_process.py | stb_process.py | py | 4,488 | python | en | code | 0 | github-code | 36 |
21393799623 | """
21.vek API Client
"""
from typing import Optional, Tuple
from bgd.constants import TWENTYFIRSTVEK
from bgd.responses import GameSearchResult, Price
from bgd.services.api_clients import JsonHttpApiClient
from bgd.services.base import GameSearchService
from bgd.services.constants import GET
from bgd.services.responses import APIResponse
class TwentyFirstVekApiClient(JsonHttpApiClient):
"""Api client for 21vek.by"""
BASE_SEARCH_URL = "https://search.21vek.by/api/v1.0"
SEARCH_PATH = "/search/suggest"
async def search(self, query: str, _: Optional[dict] = None) -> APIResponse:
"""Search by query string"""
url = f"{self.SEARCH_PATH}?q={query}"
return await self.connect(GET, self.BASE_SEARCH_URL, url)
class TwentyFirstVekSearchService(GameSearchService):
"""Search service for 21vek.by"""
def _is_available_game(self, product: dict) -> bool:
"""True if it's available board game"""
return (
product["type"] == "product"
and product["price"] != "нет на складе"
and "board_games" in product["url"]
)
async def do_search(self, query: str, *args, **kwargs) -> Tuple[GameSearchResult]:
"""Search on api and build response"""
response = await self._client.search(query, **kwargs)
products = self.filter_results(response.response["items"], self._is_available_game)
return self.build_results(products)
class TwentyFirstVekGameSearchResultFactory:
"""Factory for search results from 21vek"""
BASE_URL = "https://21vek.by"
def create(self, search_result: dict) -> GameSearchResult:
"""Creates search result"""
return GameSearchResult(
description=search_result["highlighted"],
images=self._extract_images(search_result),
location=None,
owner=None,
prices=[self._extract_price(search_result)],
source=TWENTYFIRSTVEK,
subject=search_result["name"],
url=self._extract_url(search_result),
)
@staticmethod
def _extract_price(product: dict) -> Price:
"""Extract price"""
# "price": "60,00 р."
price = product["price"]
price = price.split(" ")[0]
price = int(price.replace(",", ""))
return Price(amount=price)
def _extract_url(self, product: dict) -> str:
"""Extract product url"""
return f"{self.BASE_URL}{product['url']}"
@staticmethod
def _extract_images(product: dict) -> list[str]:
"""Extract product images"""
pic_url = product["picture"]
bigger_img = pic_url.replace("preview_s", "preview_b")
return [bigger_img]
| ar0ne/bg_deal | bgd/services/apis/twenty_first_vek.py | twenty_first_vek.py | py | 2,730 | python | en | code | 0 | github-code | 36 |
27574693160 | def koGiam(xau):
xau = [int(i) for i in xau]
for i in range(len(xau) - 1):
if xau[i] > xau[i+1]:
return "NO"
return "YES"
test = int(input())
for t in range(test):
xau = input()
print(koGiam(xau)) | Kinhs/Python-PTIT | PY01015 - Số không giảm.py | PY01015 - Số không giảm.py | py | 238 | python | en | code | 0 | github-code | 36 |
43843944966 | from dylan.payoff import VanillaPayoff, call_payoff, put_payoff
from dylan.engine import MonteCarloPricingEngine, NaiveMonteCarloPricer
from dylan.marketdata import MarketData
from dylan.option import Option
def main():
spot = 41.0
strike = 40.0
rate = 0.08
volatility = 0.30
expiry = 1.0
reps = 100000
steps = 1
dividend = 0.0
the_call = VanillaPayoff(expiry, strike, call_payoff)
the_nmc = MonteCarloPricingEngine(reps, steps, NaiveMonteCarloPricer)
the_data = MarketData(rate, spot, volatility, dividend)
the_option = Option(the_call, the_nmc, the_data)
fmt = "The call option price is {0:0.3f}"
print(fmt.format(the_option.price()))
if __name__ == "__main__":
main()
| broughtj/dylan | test_naivemc.py | test_naivemc.py | py | 737 | python | en | code | 0 | github-code | 36 |
20407406212 | import csv
def clean_csv(data: list[list]):
"""
Removes trailing empty strings from CSVs that ovvur when extra commas exist
:param data: list of lists returned from from_csv function
:type data: list of lists
:rtype: list[list]
"""
while data[0][-1] == "":
for _ in data:
_.pop()
return data
def from_csv(file: str) -> list[list]:
"""
Read CSV file into a list of lists.
Takes a CSV file path, opens the file, and returns the
contents parsed into a list of rows, where each row
is a list of the data cells.
:param file: Path to CSV file
:type file: str
:returns: Parsed CSV data
:rtype: list[list]
"""
with open(file) as f:
csv_file = csv.reader(f)
data = [row for row in csv_file]
return clean_csv(data)
| jrey999/toRST | formats/csv2rst.py | csv2rst.py | py | 836 | python | en | code | 2 | github-code | 36 |
38243268133 | import turtle
import random
# Khởi tạo cửa sổ
window = turtle.Screen()
window.title("Trò chơi đá bóng sử dụng Turtle Python")
window.bgcolor("white")
window.setup(width=800, height=600)
# Khởi tạo cầu môn
goal = turtle.Turtle()
goal.penup()
goal.goto(250, 200)
goal.pendown()
goal.forward(100)
goal.right(90)
goal.forward(400)
goal.right(90)
goal.forward(100)
# Khởi tạo quả bóng
ball = turtle.Turtle()
ball.shape("circle")
ball.color("red")
ball.penup()
ball.goto(-350, 0)
# khoi tao text
text_display = turtle.Turtle()
text_display.hideturtle()
text_display.penup()
text_display.color("black")
text_display.goto(0, 260)
# Hàm di chuyển bóng đến vị trí ngẫu nhiên
def move_ball():
ball.goto(300,random.randint(-290, 290))
# Hàm kiểm tra xem quả bóng có vào cầu môn hay không
def check_goal():
if ball.xcor() >= 250 and -200 < ball.ycor() < 200:
text_display.write("Goal!", align="center", font=("Arial", 30, "bold"))
else:
text_display.write("Miss!", align="center", font=("Arial", 30, "bold"))
# Thiết lập sự kiện khi ấn phím Space
def on_space():
move_ball()
check_goal()
def on_enter():
prepare_restart()
# Hàm chuẩn bị chơi lại trò chơi
def prepare_restart():
ball.goto(-350, 0)
text_display.clear()
# Kết nối sự kiện với phím Space
window.listen()
window.onkeypress(on_space, "space")
window.onkeypress(on_enter, "Return")
# Khởi chạy chương trình
window.mainloop()
| tungday/html | plú.py | plú.py | py | 1,525 | python | vi | code | 0 | github-code | 36 |
71903386024 | from typing import Tuple, Union, Dict
import numpy as np
import torch as th
from gym import spaces
from torch.nn import functional as F
def preprocess_obs(obs: Union[th.Tensor, Dict, Tuple], observation_space: spaces.Space,
normalize_images: bool = True, allow_unexpected: bool = True) -> th.Tensor:
"""
Preprocess observation to be to a neural network.
For images, it normalizes the values by dividing them by 255 (to have values in [0, 1])
For discrete observations, it create a one hot vector.
:param obs: (th.Tensor) Observation
:param observation_space: (spaces.Space)
:param normalize_images: (bool) Whether to normalize images or not
(True by default)
:param allow_unexpected: allow keys that's not present in observation space, for dict obs only
:return: (th.Tensor)
"""
if isinstance(observation_space, spaces.Box):
if observation_space.dtype == np.uint8 and normalize_images:
return obs.float() / 255.0
return obs.float()
elif isinstance(observation_space, spaces.Discrete):
# One hot encoding and convert to float to avoid errors
return F.one_hot(obs.long(), num_classes=observation_space.n).float()
elif isinstance(observation_space, spaces.MultiDiscrete):
# Tensor concatenation of one hot encodings of each Categorical sub-space
return th.cat(
[
F.one_hot(obs_.long(), num_classes=int(observation_space.nvec[idx])).float()
for idx, obs_ in enumerate(th.split(obs.long(), 1, dim=1))
],
dim=-1,
).view(obs.shape[0], sum(observation_space.nvec))
elif isinstance(observation_space, spaces.MultiBinary):
return obs.float()
elif isinstance(observation_space, spaces.Dict):
processed_obs = {}
for k, o in obs.items():
if k in observation_space.spaces:
processed_obs[k] = preprocess_obs(o, observation_space.spaces[k], normalize_images)
elif allow_unexpected:
if o.dtype == th.uint8:
o = o / 255.0
processed_obs[k] = o.float()
else:
raise AttributeError('key {} not in observation space, set allow_unexpected=True to override'.format(k))
return processed_obs
elif isinstance(observation_space, spaces.Tuple):
return tuple(preprocess_obs(o, os, normalize_images) for o, os in zip(obs, observation_space.spaces))
else:
raise NotImplementedError() | buoyancy99/unsup-3d-keypoints | algorithms/common/utils.py | utils.py | py | 2,559 | python | en | code | 38 | github-code | 36 |
16412379448 | import math
def ciclearea(r):
result1 = math.pi * r * r
return result1
r = 10
print("半径为", r, "的圆的面积为:", ciclearea(r))
# **************************lambda 匿名函数***************************** #
r = 10
result2 = lambda r: math.pi * r * r
print("半径为", r, "的圆的面积为:", result2)
| zhangxinzhou/PythonLearn | helloworld/chapter06/demo03.01.py | demo03.01.py | py | 327 | python | en | code | 0 | github-code | 36 |
37634701363 | from turtle import Screen, Turtle
from typing import Sized
import random
t = Turtle()
t.pensize(6)
def shape(side):
angle = 360/side
for i in range(side):
t.forward(100)
t.right(angle)
colours = ["red", "orange", "green","pink","coral","blue","violet","black","cyan"]
for side in range (3,11):
t.color(random.choice(colours))
shape(side)
s = Screen()
s.exitonclick() | anchalsinghrajput/python | turtle/shape.py | shape.py | py | 424 | python | en | code | 0 | github-code | 36 |
27248275172 | import os
os.environ["TF_CPP_MIN_LOG_LEVEL"] = "3"
import tensorflow as tf
import pandas as pd
import numpy as np
def getThePrediction(tempIn, codeIn, UsedTime):
model = tf.keras.models.load_model("data/model.h5")
converted = tf.Variable(
[
np.array(
[
tempIn,
codeIn,
UsedTime
]
)
],
trainable=True,
dtype=tf.float32
)
result = model(converted)
percentage = round(list(result.numpy())[0][0] * 100, 2)
yesNo = True if percentage >= 50 else False
print("\n50%以上は許容範囲以外になります")
print("誤差が発生する確率:" + str(percentage) + "%")
return percentage, yesNo
if __name__ == "__main__":
getThePrediction(37, 950, 80)
getThePrediction(-20, 10, 0)
getThePrediction(0, 0, 0) | hafiz-kamilin/exercise_cncAccuracyPredictor | c_testModelAccuracy.py | c_testModelAccuracy.py | py | 916 | python | en | code | 0 | github-code | 36 |
26420734429 | import re
formula_easy = '(formula(1,2,3) + formulax(1,2,3) - formulaz(1,suma(1,2),3)) * formula(1,2,3)'
formula_complex = 'Monto() + Suma(1,2,3) + Si(3>Suma(1,2),2,3) - Suma(1,2)'
formula_real = 'Si(EnLista(EmpleadoEmpresa(), 34, 88, 89),TotalHaberes() * 0.01, 0)'
formula_real_complex = """Max(SMVM()/200*iif(EmpJorParc()>0,EmpJorParc()/100,1),Monto(SDOBAS)*
(1+(iif(EmpEsqLiq()=29,Monto(AANTUT),Monto(AANTIG))*Monto(PORANT)+
iif(EmpEsqLiq()=33,10,0))/100)*iif(EmpEsqLiq()=24,1.2,1))"""
def is_formula(formula: str) -> bool:
pattern = r'\w+\('
match = re.match(pattern, formula)
if not match or match.start() != 0:
return False
return True
def it_includes_a_formula(formula: str) -> bool:
pattern = r'(?<=.)[a-zA-Z]+\('
match = re.search(pattern, formula)
if not match:
return False
return True
def has_internal_formula(formula_str: str) -> bool:
"""Check if the formula has another formula inside the arguments
"""
pattern = r'\w+\('
pattern = r'[a-zA-Z]+\('
first_parenthesis_inside = formula_str.find('(') + 1
inside_formula = formula_str[first_parenthesis_inside:-1]
resp = re.search(pattern, inside_formula)
return resp is not None
def find_closing_parenthesis_position(text: str, start_pos: int) -> int:
""" Function find where the parenthesis closes, adding (open) and
substracting (close). The parenthesis closes when it reachs to zero.
The function returns the position of the closing parenthesis
example = "(kaka(aalal(lla,2,3)))
Expected result = 22
Args:
text (_type_): Text to search for the closing parenthesis
start_pos (_type_): Position where the parenthesis starts
Returns:
int: Position of the closing parenthesis
"""
count = 0
for idx in range(start_pos, len(text)):
if text[idx] == '(':
count += 1
elif text[idx] == ')':
count -= 1
if count == 0:
return idx
# Returns -1 if the closing parenthesis is not found
return -1
def get_key_from_dict(formula_dict: dict, value: str) -> str:
""" Get the key from the dictionary for the given value
"""
for key, value2 in formula_dict.items():
if value2 == value:
return key
return None
def get_formula_dict(formula_str: str, formula_dict: dict = {}) -> dict:
""" Function to get the formulas inside a string and return a dictionary
this function read character by character and when it finds a letter + '('
if a new letter + '(' is found, the level will be increased and the
function will read until it finds the closing parenthesis ')', if the
replace the formula with the key of the dictionary
And recursively call the function to get the formulas inside the formula.
For this example:
example_formula = 'formula(1,2,3) + formula2(1,2,3) - formula3(1,suma(1, 4),3)'
The expected result is a dictionary with the formulas and the string
like this:
{
'001': 'formula(1,2,3)',
'002': 'formula2(1,2,3)',
'003': 'formula3(1,|Formula_004|,3)',
'004': 'suma(1, 4)',
}
"""
# First clean the string
# formula_str = clean_formula_str(formula_str)
# idx shows where the formula starts
idx = 1
separators = ['+', '-', '*', '/', '(', ')', ',', ' ', '<', '>', '=']
i = 1
while i < len(formula_str):
# Find a formula by the letter + '('
if re.match(r'[a-zA-Z]\(', formula_str[i-1:i+1]):
end = find_closing_parenthesis_position(formula_str, i)
i = end
this_formula = formula_str[idx-1:end+1]
if has_internal_formula(this_formula):
this_formula_args = this_formula[this_formula.find('(')+1:-1]
formula_dict = get_formula_dict(this_formula_args, formula_dict)
# Now I need to replace the formula with the key '|Formula_00x|'
for key, value in formula_dict.items():
this_formula = this_formula.replace(value, f"|Formula_{key}|")
if has_internal_formula(this_formula):
return get_formula_dict(this_formula, formula_dict)
else:
return formula_dict
else:
this_key = get_key_from_dict(formula_dict, this_formula)
if this_key is None:
this_key = f'{len(formula_dict)+1:03d}'
formula_dict[this_key] = this_formula
# restart the idx if this is a separator
if formula_str[i-1] in separators:
idx = i + 1
i += 1
return formula_dict
def clean_formula_str(input_string: str) -> str:
"""Remove everything different [a-zA-Z0-9] or any of these values
["(", ")", ",", "+", "/", ".", "*", "-", "<", ">", "="]
"""
# Define the regular expression pattern
pattern = r"[^a-zA-Z0-9(),+/*.=<>\-]"
# Use the 're.sub' function to replace all occurrences of unwanted characters with an empty string
cleaned_string = re.sub(pattern, "", input_string)
return cleaned_string
def replace_formula_string(formula_str: str, formula_dict: dict) -> str:
"""Replace the formulas inside the string with the key of the dictionary
"""
for key in sorted(formula_dict.keys(), reverse=True):
value = formula_dict[key]
formula_str = formula_str.replace(value, f"|Formula_{key}|")
return formula_str
def update_formula_string(formula_str: str, formula_dict: dict = {}) -> tuple:
"""Replace the formulas inside the string with the key of the dictionary
It returns a tuple with the formula_str and the formula_dict
"""
formula_dict_resp = get_formula_dict(formula_str, formula_dict)
formula_resp = replace_formula_string(formula_str, formula_dict_resp)
if is_formula(formula_resp) or has_internal_formula(formula_resp) or it_includes_a_formula(formula_resp):
return update_formula_string(formula_resp, formula_dict_resp)
return formula_resp, formula_dict_resp
test_formula = clean_formula_str(formula_real_complex)
result_formula, result_dict = update_formula_string(test_formula)
print("Final result: ", result_formula)
print("*"*200)
print("Final dict: ", result_dict)
| lugezz/repo_testing | regex/regex_formulas_9.py | regex_formulas_9.py | py | 6,432 | python | en | code | 0 | github-code | 36 |
40193585375 | import xlrd
from account.backend.services import StateService
def read_data_from_excel(excel_file):
# reads data from an excel_file
file_path = str(excel_file)
# create a workbook using the excel file received
w_book = xlrd.open_workbook(file_path)
# open the excel_sheet with the data
sheet = w_book.sheet_by_index(0)
# import the database model Albums
from music.models import Album
# instantiate a state
state = StateService().get(name = 'Active')
# loop through the data printing all the data
for row in range(1, sheet.nrows):
# print (str(sheet.cell_value(row, col))),
obj = Album(
artist = sheet.cell_value(row, 0),
album_title = sheet.cell_value(row, 1),
genre = sheet.cell_value(row, 2),
state = state)
print('album added')
obj.save()
return 'Success'
| Trojkev/kev-music | music/backend/albums_script.py | albums_script.py | py | 811 | python | en | code | 1 | github-code | 36 |
34976365352 | #!/usr/bin/env python3
import requests
url = "http://10.10.90.182:8000"
url_= "https://10.10.90.182:1443/index.php"
header={'User-Agent':'<?php echo system($_REQUEST["c"];) ?>'}
r = requests.get(url_ + "?c=id", headers=header, verify=False)
print(r.text)
| lodwig/TryHackMe | Probe/check.py | check.py | py | 258 | python | en | code | 0 | github-code | 36 |
850417471 | #pylint:disable=no-member
import cv2 as cv
# Blurring is used to smooth the image by removing noice from the image
img = cv.imread('../Resources/Photos/cats.jpg')
cv.imshow('Cats', img)
# kernel window size (ksize) ask for rows and columns and the blurring algo work on that kernal window through the whole image
# Averaging blur
average = cv.blur(img, (3,3))
cv.imshow('Average Blur', average)
# Gaussian Blur
gauss = cv.GaussianBlur(img, (3,3), 0)
cv.imshow('Gaussian Blur', gauss)
# Median Blur
median = cv.medianBlur(img, 3)
cv.imshow('Median Blur', median)
# Bilateral
bilateral = cv.bilateralFilter(img, 10, 35, 25)
cv.imshow('Bilateral', bilateral)
cv.waitKey(0) | dheeraj120501/Lets-Code | 06-Cool Things Computer Can't Do/03-Computer Vision with OpenCV/2-Advanced/03-blurring.py | 03-blurring.py | py | 677 | python | en | code | 3 | github-code | 36 |
7542349397 | from strategy.models import ohlc
from strategy.base_strategy import Strategy, BUY, SELL, NO_ENTRY
PARAM_INCREASE_RATE = 0.0005
PARAM_REALBODY_RATE = 0.5
class Sanpei(Strategy):
signal: int
def __init__(self, client, logger):
super().__init__(client, logger)
self.signal = 0
def check_candle(self, data: ohlc.Ohlc):
realbody_rate = 0 if data.high - \
data.low == 0 else abs(data.close - data.open)/(data.high - data.low)
increase_rate = abs(data.close / data.open - 1)
if increase_rate < PARAM_INCREASE_RATE:
return False
elif realbody_rate < PARAM_REALBODY_RATE:
return False
else:
return True
def check_ascend(self, data: ohlc.Ohlc, last_data: ohlc.Ohlc):
if data.open > last_data.open and data.close > last_data.close:
return 1
elif data.open < last_data.open and data.close < last_data.close:
return -1
else:
return 0
def entrySignal(self, data, l_ohlc_list):
if not self.check_candle(data):
self.signal = 0
return NO_ENTRY
ascend_param = self.check_ascend(data, l_ohlc_list[-1])
if ascend_param == 0:
self.signal = 0
return NO_ENTRY
self.signal += ascend_param
if self.signal == 3:
print("3本連続陽線です。買いシグナル点灯しました。")
return BUY
if self.signal == -3:
print("3本連続陰線です。売りシグナル点灯しました。")
return SELL
return ""
def closeSignal(self, data, l_ohlc_list):
if not abs(self.signal) == 3:
raise EnvironmentError
l_ohlc = l_ohlc_list[-1]
if self.signal == 3 and (data.close - l_ohlc.close < 0):
self.signal = 0
return True, False
if self.signal == -3 and (data.close - l_ohlc.close > 0):
self.signal = 0
return True, False
return False, False
if __name__ == '__main__':
pass
| TakuNyan007/pythonTrading | strategy/sanpei.py | sanpei.py | py | 2,108 | python | en | code | 0 | github-code | 36 |
11379198461 | from django.views.generic import ListView
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.db.models import Count
from guesswho.core.models import (Game, Question, Trait, TraitValue, Player,
all_people)
from guesswho.core.logic import (get_game_opponent, is_game_complete,
rule_out_candidates)
from guesswho.core.forms import QuestionForm
class ListGames(ListView):
template_name = "core/list_games.html"
def get_queryset(self):
return Game.objects.filter(players__user=self.request.user)
def create_game(request):
game = Game.objects.create()
player1 = Player.objects.create(user=request.user)
player1.candidates.add(*all_people())
game.players.add(player1)
game.save()
return HttpResponseRedirect(reverse('games_to_join'))
def join_game(request):
ctx = {
'games': Game.objects.annotate(player_count=Count('players'))
.filter(player_count=1)
}
if request.method == 'POST':
game_id = request.POST.get('game_id')
game = Game.objects.get(pk=int(game_id))
player2 = Player.objects.create(user=request.user)
player2.candidates.add(*all_people())
game.players.add(player2)
game.save()
return HttpResponseRedirect(reverse('play_game', args=(game.pk,)))
return render_to_response('core/games_to_join.html', ctx,
context_instance=RequestContext(request))
def play_game(request, game_id):
game = Game.objects.get(pk=int(game_id))
player = game.players.filter(user=request.user)[0]
candidates = player.candidates.all()
ctx = {
'opponent': get_game_opponent(game, player),
'person': player.person,
'num_candidates': candidates.count(),
'candidates': candidates
}
if request.method == 'POST':
form = QuestionForm(game, player, request.POST)
if form.is_valid():
custom_key = form.cleaned_data.get('question')
trait_id, value_id = custom_key.split(':')
question_data = {
'game': game,
'player': player,
'trait': Trait.objects.get(pk=trait_id),
'value': TraitValue.objects.get(pk=value_id)
}
question = Question(**question_data)
rule_out_candidates(question)
winner = is_game_complete(game)
if winner:
ctx.update({
'game_over': True,
'user_won': winner.pk is player.pk
})
else:
form = QuestionForm(game, player)
ctx['form'] = form
return render_to_response('core/play_game.html', ctx,
context_instance=RequestContext(request))
| schallis/guesswho | guesswho/core/views.py | views.py | py | 2,911 | python | en | code | 0 | github-code | 36 |
19213479137 |
import os.path
from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import cv2
from sklearn.svm import SVC
import hog
def get_good_train_set(directory="./NICTA/TrainSet/PositiveSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_bad_train_set(directory="./NICTA/TrainSet/NegativeSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_good_test_set(directory="./NICTA/TestSet/PositiveSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_bad_test_set(directory="./NICTA/TestSet/NegativeSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_hog_descriptor(image):
image = cv2.resize(image, (64, 128))
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = hog.gamma_correction(image, gamma_value)
gradient = hog.compute_gradients(image)
cell_histograms, _ = hog.compute_weighted_vote(gradient)
hog_blocks, _ = hog.normalize_blocks(cell_histograms)
return hog_blocks.ravel()
if __name__ == '__main__':
gamma_value = 1.0
good_set = get_good_train_set()
image_count = len(good_set)
good_set_hog = np.empty((image_count, 3780))
image_index = 0
for image_file in good_set:
test_image = cv2.imread(image_file)
good_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
good_set_tag = np.ones(image_count)
bad_set = get_bad_train_set()
image_count = len(bad_set)
bad_set_hog = np.empty((image_count, 3780))
image_index = 0
for image_file in bad_set:
test_image = cv2.imread(image_file)
bad_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
bad_set_tag = np.zeros(image_count)
good_test_set = get_good_test_set()
good_test_image_count = len(good_test_set)
good_test_set_hog = np.empty((good_test_image_count, 3780))
image_index = 0
for image_file in good_test_set:
test_image = cv2.imread(image_file)
good_test_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
bad_test_set = get_bad_test_set()
bad_test_image_count = len(bad_test_set)
bad_test_set_hog = np.empty((bad_test_image_count, 3780))
image_index = 0
for image_file in bad_test_set:
test_image = cv2.imread(image_file)
bad_test_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
train_data = np.concatenate((good_set_hog, bad_set_hog))
tag_data = np.concatenate((good_set_tag, bad_set_tag))
C = 1.0 # SVM regularization parameter
lin_svc = SVC(kernel='linear', C=C).fit(train_data, tag_data)
rbf_svc = SVC(kernel='rbf', C=C).fit(train_data, tag_data)
poly_svc = SVC(kernel='poly', C=C, degree=2).fit(train_data, tag_data)
# title for the classifiers
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial kernel']
for i, clf in enumerate((lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
good_test_results = clf.predict(good_test_set_hog)
#print(good_test_results)
bad_test_results = clf.predict(bad_test_set_hog)
#print(bad_test_results)
print("Results for {}".format(titles[i]))
print("Accuracy for Positive Cases: {}".format(np.sum(good_test_results) / good_test_image_count * 100))
print("Accuracy for Negative Cases: {}".format(100 - (np.sum(bad_test_results) / bad_test_image_count * 100)))
del good_test_results, bad_test_results
| insomaniacvenkat/HOG | svm_train.py | svm_train.py | py | 4,128 | python | en | code | 0 | github-code | 36 |
13143028771 | """
Example 1:
Input: s = "abcabcbb"
Output: 3
Explanation: The answer is "abc", with the length of 3.
Example 2:
Input: s = "bbbbb"
Output: 1
Explanation: The answer is "b", with the length of 1.
Example 3:
Input: s = "pwwkew"
Output: 3
Explanation: The answer is "wke", with the length of 3.
Notice that the answer must be a substring, "pwke" is a subsequence and not a substring.
"""
class Solution:
def lengthOfLongestSubstring(self, s: str) -> int:
max_len = 0
char_set = set()
for i, char in enumerate(s):
max_len_new = 0
char_set = set()
for letter in s[i:]:
if letter not in char_set:
char_set.add(letter)
max_len_new = len(char_set)
else:
break
max_len = max(max_len, max_len_new)
return max_len
s = Solution()
res = s.lengthOfLongestSubstring('abcabcbb')
print(res) | michsanya/Leetcode | Longest Substring Without Repeating Characters.py | Longest Substring Without Repeating Characters.py | py | 956 | python | en | code | 0 | github-code | 36 |
34620777008 | import json
def make_func(name, inputs, outputs, mutability):
# For now, pass all hints, and I'll manually drop those that aren't needed.
header = f"""
@{mutability}
func {name[0:-1]}{{syscall_ptr: felt*, pedersen_ptr: HashBuiltin*, bitwise_ptr: BitwiseBuiltin*, range_check_ptr
}} ({', '.join([f"{inp['name']}: {inp['type']}" for inp in inputs])}) -> ({', '.join([f"{outp['name']}: {outp['type']}" for outp in outputs])}):"""
# There are outputs: store them.
if len(outputs) > 0:
return header + f"""
let ({', '.join([outp['name'] for outp in outputs])}) = {name}({', '.join([inp['name'] for inp in inputs])})
return ({', '.join([outp['name'] for outp in outputs])})
end"""
else:
return header + f"""
{name}({', '.join([inp['name'] for inp in inputs])})
return ()
end"""
def generate(input_contract, output_path):
abi_path = f"artifacts/abis/{input_contract}.json"
abi = json.load(open(abi_path, "r"))
codeparts = []
imports = []
structs = []
for part in abi:
if part["type"] == "struct" and part["name"] != 'Uint256':
structs.append(part["name"])
if part["type"] != "function":
continue
if "stateMutability" not in part:
codeparts.append(make_func(part["name"], part["inputs"], part["outputs"], "external"))
else:
codeparts.append(make_func(part["name"], part["inputs"], part["outputs"], part["stateMutability"]))
imports.append(part["name"])
with open(output_path, "w") as f:
f.write("""
%lang starknet
from starkware.cairo.common.cairo_builtins import HashBuiltin, SignatureBuiltin, BitwiseBuiltin
""")
f.write(f"from contracts.{input_contract} import (\n\t" + ',\n\t'.join(imports) + '\n)\n')
f.write("from contracts.types import (\n\t" + ',\n\t'.join(structs) + '\n)\n')
for part in codeparts:
f.write(part)
f.write("\n")
print("Wrote to ", output_path)
| briqNFT/briq-protocol | briq_protocol/generate_interface.py | generate_interface.py | py | 2,004 | python | en | code | 63 | github-code | 36 |
22783391588 | #
# @lc app=leetcode id=712 lang=python3
#
# [712] Minimum ASCII Delete Sum for Two Strings
#
# https://leetcode.com/problems/minimum-ascii-delete-sum-for-two-strings/description/
#
# algorithms
# Medium (59.39%)
# Likes: 1275
# Dislikes: 54
# Total Accepted: 44.9K
# Total Submissions: 75.3K
# Testcase Example: '"sea"\n"eat"'
#
# Given two strings s1, s2, find the lowest ASCII sum of deleted characters to
# make two strings equal.
#
# Example 1:
#
# Input: s1 = "sea", s2 = "eat"
# Output: 231
# Explanation: Deleting "s" from "sea" adds the ASCII value of "s" (115) to the
# sum.
# Deleting "t" from "eat" adds 116 to the sum.
# At the end, both strings are equal, and 115 + 116 = 231 is the minimum sum
# possible to achieve this.
#
#
#
# Example 2:
#
# Input: s1 = "delete", s2 = "leet"
# Output: 403
# Explanation: Deleting "dee" from "delete" to turn the string into "let",
# adds 100[d]+101[e]+101[e] to the sum. Deleting "e" from "leet" adds 101[e]
# to the sum.
# At the end, both strings are equal to "let", and the answer is
# 100+101+101+101 = 403.
# If instead we turned both strings into "lee" or "eet", we would get answers
# of 433 or 417, which are higher.
#
#
#
# Note:
# 0 < s1.length, s2.length .
# All elements of each string will have an ASCII value in [97, 122].
#
#
# @lc code=start
class Solution:
def __init__(self):
self.lcs = set([])
def minimumDeleteSum(self, s1: str, s2: str) -> int:
if not s1 or len(s1) == 0:
return sum([ord(char) for char in s2]) if s2 else 0
if not s2 or len(s2) == 0:
return sum([ord(char) for char in s1]) if s1 else 0
total_s1, total_s2 = sum([ord(char) for char in s1]), sum([ord(char) for char in s2])
l1, l2 = len(s1), len(s2)
s1 = "#" + s1
s2 = "#" + s2
# dp initialziation
# f[i][j] represents LCS with largest ascii from s1[:i] and s2[:j]
f = [[0 for _ in range(l2 + 1)] for _ in range(l1 + 1)]
for i in range(1, l1 + 1):
for j in range(1, l2 + 1):
if s1[i] == s2[j]:
f[i][j] = f[i - 1][j - 1] + ord(s1[i])
else:
f[i][j] = max(
f[i - 1][j - 1],
f[i - 1][j],
f[i][j - 1]
)
return total_s1 + total_s2 - 2 * f[l1][l2]
def _get_all_lcs(self, f, s1, s2, l1, l2, curr):
"""
This can be optimized by memoization
"""
if l1 <= 0 or l2 <= 0:
curr_lcs = curr[:][::-1]
if curr_lcs not in self.lcs:
self.lcs.add(curr_lcs)
return
if s1[l1] == s2[l2]:
curr.append(s1[l1])
self._get_lcs(f, s1, s2, l1 - 1, l2 - 1, curr)
curr.pop()
else:
if f[l1 - 1][l2] > f[l1][l2 - 1]:
self._get_lcs(f, s1, s2, l1 - 1, l2, curr)
elif f[l1 - 1][l2] < f[l1][l2 - 1]:
self._get_lcs(f, s1, s2, l1, l2 - 1, curr)
else:
self._get_lcs(f, s1, s2, l1 - 1, l2, curr)
self._get_lcs(f, s1, s2, l1, l2 - 1, curr)
# @lc code=end
| Zhenye-Na/leetcode | python/712.minimum-ascii-delete-sum-for-two-strings.py | 712.minimum-ascii-delete-sum-for-two-strings.py | py | 3,225 | python | en | code | 17 | github-code | 36 |
32704563511 | from scipy.signal import hilbert
import numpy as np
import matplotlib.pyplot as plt
def compare_elements(array1, array2): # array1和array2大小相同
array = np.zeros(len(array1))
for i in range(len(array1)):
if array1[i] == array2[i]:
array[i] = 0
elif array1[i] > array2[i]:
array[i] = 1
else:
array[i] = -1
return array
def phase_locked_matrix(all_bands_eeg):
"""all_channel_eeg的shape例如是4 * 32 * 8064,其中4是四种频段,32是32个脑电极数脑电极,而8064是每个通道下采集的数据"""
# 得到输入的频段数,电极通道数和每个通道的采样点数
bands, channels, points = all_bands_eeg.shape
eeg_instantaneous_phase = np.zeros_like(all_bands_eeg) # 初始化每个通道下每个采样点的瞬时相位
for band, signal_band_eeg in enumerate(all_bands_eeg):
for channel, single_channel_eeg in enumerate(signal_band_eeg):
analytic_signal = hilbert(single_channel_eeg)
instantaneous_phase = np.unwrap(np.angle(analytic_signal))
eeg_instantaneous_phase[band, channel] = instantaneous_phase
matrix = np.zeros(shape=[bands, channels, channels]) # 初始化相位锁定矩阵,shape是4 * 32 * 32
for band in range(bands):
for i in range(channels):
for j in range(channels):
if i == j:
matrix[band][i][j] = 1
else:
matrix[band][i][j] = np.abs((compare_elements(eeg_instantaneous_phase[band][i], eeg_instantaneous_phase[band][j])).sum()) / points
return matrix
if __name__ == '__main__':
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import data_dir
eeg = pd.read_csv(data_dir.preprocess_dir + r'\level1\8.csv')
# print(phase_locked_matrix(eeg.values[:30, 1:]))
m = phase_locked_matrix(eeg.values[:30, 1:])
fig, ax = plt.subplots(figsize=(15, 15))
sns.heatmap(pd.DataFrame(m),vmax=1,vmin = 0, xticklabels= True, yticklabels= True, square=True)
plt.show()
| sheep9159/click_number | function_connective.py | function_connective.py | py | 2,127 | python | en | code | 0 | github-code | 36 |
30755666741 | age = 21
name = "tornike"
my_text = "my name is {} and i am {} years old"
print(my_text.format(name, age))
#count დათვლა
surname = "tbelishvili"
print(surname.count("i"))
age = 1999
age = str(age)
print(age.count("9"))
| Tbelo111/IT-step1 | strings3.py | strings3.py | py | 250 | python | en | code | 0 | github-code | 36 |
26236843242 | from django.contrib.auth.models import Group
from django.core.checks import messages
from django.core.files.images import ImageFile
from django.shortcuts import redirect, render
from django.http import HttpResponse, JsonResponse
from core.models import *
from core.forms import *
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.contrib.auth import authenticate, login, logout
from django.db.models import Q
import json
from core.decorators import *
import random
# Create your views here.
def home_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/home.html', context)
def mujer_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
context['nombre'] = 'Mujer'
return render(request, 'pages/categoria.html', context)
def hombre_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
context['nombre'] = 'Hombre'
return render(request, 'pages/categoria.html', context)
def nino_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
context['nombre'] = 'Niños'
return render(request, 'pages/categoria.html', context)
def producto_page(request, pk):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
producto = Producto.objects.get(id=pk)
context['producto'] = producto
return render(request, 'pages/producto.html', context)
# Clientes
def registrarse_page(request):
form1 = CreateUserForm()
form2 = ClienteForm()
if request.method == 'POST':
form1 = CreateUserForm(request.POST)
form2 = ClienteForm(request.POST)
if form1.is_valid():
user = form1.save()
apellido_paterno = request.POST.get('apellido_paterno')
apellido_materno = request.POST.get('apellido_materno')
telefono = request.POST.get('telefono')
group = Group.objects.get(name='cliente')
user.groups.add(group)
Cliente.objects.create(
usuario = user,
apellido_paterno=apellido_paterno,
apellido_materno=apellido_materno,
telefono=telefono
)
messages.success(request, 'Cuenta creada con exito')
return redirect('login_page')
else:
messages.error(request, 'La cuenta no pudo ser creada')
context = {'formUser': form1, 'formCliente': form2}
return render(request, 'pages/register.html', context)
@usuario_identificado
def login_page(request):
context = {}
if request.method == 'POST':
correo = request.POST.get('email')
password = request.POST.get('password')
usuario = User.objects.get(email=correo)
print(usuario.username)
user = authenticate(request, username=usuario.username, password=password)
if user is not None:
login(request, user)
return redirect('home_page')
else:
messages.error(request, 'Usuario o contraseña incorrecto')
return render(request, 'pages/login.html', context)
def logout_user(request):
logout(request)
return redirect('login_page')
#TO-DO: Agregar condición para logeado y para clientes con decoradores
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['cliente', 'admin'])
def carro_page(request):
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
except:
carro = None
context = {'items': items, 'compra': compra, 'carro':carro}
return render(request, 'pages/carro.html', context)
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['cliente'])
def direccion_page(request, pk):
form = DireeccionForm()
compra = Compra.objects.get(id=pk)
cliente = request.user.cliente
if request.method == 'POST':
form = DireeccionForm(request.POST)
if form.is_valid():
form.instance.cliente = cliente
form.instance.compra = compra
form.save()
messages.success(request, 'Direccion agregada')
return redirect('pagar_page')
else:
messages.error(request, 'No se pudo agregar la dirección')
context = {'form': form}
return render(request, 'pages/direccion.html', context)
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['cliente'])
def pagar_page(request):
#TO-DO: Agregar try and catch para cada variable, excepto cliente
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
if request.method == 'POST':
compra_comp = Compra.objects.filter(id=compra.id).update(completado=True)
messages.success(request, 'Producto comprado')
return redirect('home_page')
context = {'items': items, 'compra': compra}
return render(request, 'pages/pagar.html', context)
def vision_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/vision.html', context)
#TO-DO: datos de formularios para Empleo y Contacto
def contacto_page(request):
context = {}
form = ContactoForm()
context['form'] = form
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
if request.method == 'POST':
form = ContactoForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Aplicación laboral hecha')
else:
messages.error(request, 'La aplicación no pudo ser grabada')
return render(request, 'pages/contacto.html', context)
def cambios_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/cambios.html', context)
def empleo_page(request):
context = {}
form = EmpleoForm()
context['form'] = form
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
if request.method == 'POST':
form = EmpleoForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Aplicación laboral hecha')
else:
messages.error(request, 'La aplicación no pudo ser grabada')
return render(request, 'pages/empleo.html', context)
def updateItem(request):
data = json.loads(request.body)
productoId = data['productId']
action = data['action']
cliente = request.user.cliente
producto = Producto.objects.get(id=productoId)
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
productoCompra, creada = ProductoCompra.objects.get_or_create(compra=compra, producto=producto)
if action == 'add':
productoCompra.cantidad = (productoCompra.cantidad + 1)
elif action == 'remove':
productoCompra.cantidad = (productoCompra.cantidad - 1)
productoCompra.save()
if productoCompra.cantidad <= 0:
productoCompra.delete()
return JsonResponse('Item fue añadido', safe=False)
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['cliente'])
def user_page(request, action):
context = {}
try:
cliente = request.user.cliente
context['cliente'] = cliente
except:
context['cliente'] = None
try:
compras = Compra.objects.all().filter(cliente=cliente, completado=True)
context['compras'] = compras
except:
context['compras'] = None
try:
envios = DireccionEnvio.objects.all().filter(cliente=cliente)
context['envios'] = envios
except:
context['envios'] = None
# mecanica carro
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
try:
compras_completas = DireccionEnvio.objects.all().filter(cliente=cliente,entregado=True)
context['compras_completas'] = compras_completas
except:
context['compras_completas'] = None
return render(request, 'pages/user.html', context)
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['admin'])
def admin_page(request, action):
context = {}
try:
envios = DireccionEnvio.objects.all().filter(entregado=False)
context['envios'] = envios
except:
envios = 'Sin Envios'
context['envios'] = envios
try:
compras = Compra.objects.all().filter(completado=True)
context['compras'] = compras
except:
compras = 'Sin compras'
compras.get_comprar_total = 0
compras.get_comprar_productos = 'None'
compras.get_productos = 'None'
context['compras'] = compras
try:
productos = Producto.objects.all()
context['productos'] = productos
except:
productos = 'Sin productos'
productos.get_total = 0
productos.ret_nombre = 'None'
context['productos'] = productos
if action == 'inicio':
context['nombre'] = 'Inicio'
elif action == 'productos':
context['nombre'] = 'Productos'
elif action == 'envios':
context['nombre'] = 'Envíos'
elif action == 'compras':
context['nombre'] = 'Compras'
return render(request, 'pages/funcionarios.html', context)
def preguntas_frecuentes(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/preguntas_frecuentes.html', context)
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['admin'])
def crud_producto(request, pk):
context = {}
form = ProductoForm()
try:
producto = Producto.objects.get(id=pk)
form = ProductoForm(instance=producto)
context['form'] = form
if request.method == 'POST':
form = ProductoForm(request.POST, request.FILES, instance=producto)
if form.is_valid():
form.save()
messages.success(request, 'Producto agregado')
else:
messages.error(request, 'Error al guardar el producto')
except:
context['form'] = form
if request.method == 'POST':
form = ProductoForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Producto agregado')
else:
messages.error(request, 'Error al guardar el producto')
return render(request, 'pages/func-produc.html', context)
def poblar_bd(request):
#Borra los productos existentes
Producto.objects.all().delete()
#Agrega productos a la base de datos
Producto.objects.create(titulo="Camisa Hombre Negra Dorada", precio='21000', categoria="HM", descripcion="Camisa de vestir de colores negro y dorado. Diseño oriental.", imagen="h-camisa.jpg")
Producto.objects.create(titulo="Pantalones Cuero Hombre Negros", precio='32000', categoria="HM", descripcion="Pantalones de cuero color negro. Cinturon no incluido.", imagen="h-pantalones.jpg")
Producto.objects.create(titulo="Zapatos Cuero Cafe", precio='45000', categoria="HM", descripcion="Zapatos de cuero color marron. Hebilla de plata. Disponible en todas tallas.", imagen="h-zapato.jpg")
Producto.objects.create(titulo="Blusa Multicolor Sparkle", precio='42000', categoria="MJ", descripcion="Top tipo blusa multicolor, refleja la luz. Spaghetti strap.", imagen="m-blusa.jpg")
Producto.objects.create(titulo="Vestido Mujer de Una Pieza", precio='15000', categoria="MJ", descripcion="Vestido negro y azul. Una pieza, disponible en todas las tallas.", imagen="m-vestido.jpg")
Producto.objects.create(titulo="Flats Negros Mujer", precio='66000', categoria="MJ", descripcion="Zapatos Flat de mujer, disponibles en Negro y Blanco. Taco bajo.", imagen="m-zapato.jpg")
Producto.objects.create(titulo="Buso Oso de Niño", precio='12500', categoria="NN", descripcion="Buso de niño unisex. Diseño de oso, disponible en verde, rojo y azul.", imagen="n-buso.jpg")
Producto.objects.create(titulo="Pantalones Dinosario de Niño", precio='14000', categoria="NN", descripcion="Pantalones de buso unisex para niños, diseño de dinosaurio, disponible en gris y negro.", imagen="n-pantalones.jpg")
Producto.objects.create(titulo="Zapatillas con Luces de Niño", precio='27000', categoria="NN", descripcion="Zapatillas unisex para niños, con luces fluorecentes en la suela. Baterias incluidas.", imagen="n-zapatilla.jpg")
#Redirige a la pagina catalogo de hombres
return redirect('home_page')
def formapago_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/formapago.html', context)
| felipe-quirozlara/changewear-django | changeWear/pages/views.py | views.py | py | 16,865 | python | es | code | 0 | github-code | 36 |
43916018301 | from functools import lru_cache
MOD = 10 ** 9 + 7
class Solution:
def findPaths(self, m, n, maxMove, startRow, startColumn):
@lru_cache(None)
def rec(sr, sc, mm):
if sr < 0 or sr >= m or sc < 0 or sc >= n:
return 1
if mm == 0: return 0
return (
rec(sr + 1, sc, mm - 1) +
rec(sr, sc + 1, mm - 1) +
rec(sr - 1, sc, mm - 1) +
rec(sr, sc - 1, mm - 1)
) % MOD
return rec(startRow, startColumn, maxMove)
| robinsdeepak/leetcode | 576-out-of-boundary-paths/576-out-of-boundary-paths.py | 576-out-of-boundary-paths.py | py | 604 | python | en | code | 0 | github-code | 36 |
5353638973 | # coding: utf-8
"""
NGSI-LD metamodel and Sensor NGSI-LD custom model
ETSI GS CIM 009 V1.6.1 cross-cutting Context Information Management (CIM); NGSI-LD API; NGSI-LD metamodel and Sensor NGSI-LD custom model. # noqa: E501
The version of the OpenAPI document: 1.6.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
from inspect import getfullargspec
import json
import pprint
import re # noqa: F401
from typing import Any, List, Optional
from pydantic import BaseModel, Field, StrictStr, ValidationError, validator
from ngsi_ld_models.models.geo_property_fragment_input import GeoPropertyFragmentInput
from ngsi_ld_models.models.language_property_fragment_input import LanguagePropertyFragmentInput
from ngsi_ld_models.models.property_fragment_input import PropertyFragmentInput
from ngsi_ld_models.models.relationship_fragment_input import RelationshipFragmentInput
from typing import Any, List
from pydantic import StrictStr, Field
REPLACEATTRSREQUEST_ONE_OF_SCHEMAS = ["GeoPropertyFragmentInput", "LanguagePropertyFragmentInput", "PropertyFragmentInput", "RelationshipFragmentInput"]
class ReplaceAttrsRequest(BaseModel):
"""
ReplaceAttrsRequest
"""
# data type: PropertyFragmentInput
oneof_schema_1_validator: Optional[PropertyFragmentInput] = None
# data type: RelationshipFragmentInput
oneof_schema_2_validator: Optional[RelationshipFragmentInput] = None
# data type: GeoPropertyFragmentInput
oneof_schema_3_validator: Optional[GeoPropertyFragmentInput] = None
# data type: LanguagePropertyFragmentInput
oneof_schema_4_validator: Optional[LanguagePropertyFragmentInput] = None
actual_instance: Any
one_of_schemas: List[str] = Field(REPLACEATTRSREQUEST_ONE_OF_SCHEMAS, const=True)
class Config:
validate_assignment = True
def __init__(self, *args, **kwargs):
if args:
if len(args) > 1:
raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
if kwargs:
raise ValueError("If a position argument is used, keyword arguments cannot be used.")
super().__init__(actual_instance=args[0])
else:
super().__init__(**kwargs)
@validator('actual_instance')
def actual_instance_must_validate_oneof(cls, v):
instance = ReplaceAttrsRequest.construct()
error_messages = []
match = 0
# validate data type: PropertyFragmentInput
if not isinstance(v, PropertyFragmentInput):
error_messages.append(f"Error! Input type `{type(v)}` is not `PropertyFragmentInput`")
else:
match += 1
# validate data type: RelationshipFragmentInput
if not isinstance(v, RelationshipFragmentInput):
error_messages.append(f"Error! Input type `{type(v)}` is not `RelationshipFragmentInput`")
else:
match += 1
# validate data type: GeoPropertyFragmentInput
if not isinstance(v, GeoPropertyFragmentInput):
error_messages.append(f"Error! Input type `{type(v)}` is not `GeoPropertyFragmentInput`")
else:
match += 1
# validate data type: LanguagePropertyFragmentInput
if not isinstance(v, LanguagePropertyFragmentInput):
error_messages.append(f"Error! Input type `{type(v)}` is not `LanguagePropertyFragmentInput`")
else:
match += 1
if match > 1:
# more than 1 match
raise ValueError("Multiple matches found when setting `actual_instance` in ReplaceAttrsRequest with oneOf schemas: GeoPropertyFragmentInput, LanguagePropertyFragmentInput, PropertyFragmentInput, RelationshipFragmentInput. Details: " + ", ".join(error_messages))
elif match == 0:
# no match
raise ValueError("No match found when setting `actual_instance` in ReplaceAttrsRequest with oneOf schemas: GeoPropertyFragmentInput, LanguagePropertyFragmentInput, PropertyFragmentInput, RelationshipFragmentInput. Details: " + ", ".join(error_messages))
else:
return v
@classmethod
def from_dict(cls, obj: dict) -> ReplaceAttrsRequest:
return cls.from_json(json.dumps(obj))
@classmethod
def from_json(cls, json_str: str) -> ReplaceAttrsRequest:
"""Returns the object represented by the json string"""
instance = ReplaceAttrsRequest.construct()
error_messages = []
match = 0
# deserialize data into PropertyFragmentInput
try:
instance.actual_instance = PropertyFragmentInput.from_json(json_str)
match += 1
except (ValidationError, ValueError) as e:
error_messages.append(str(e))
# deserialize data into RelationshipFragmentInput
try:
instance.actual_instance = RelationshipFragmentInput.from_json(json_str)
match += 1
except (ValidationError, ValueError) as e:
error_messages.append(str(e))
# deserialize data into GeoPropertyFragmentInput
try:
instance.actual_instance = GeoPropertyFragmentInput.from_json(json_str)
match += 1
except (ValidationError, ValueError) as e:
error_messages.append(str(e))
# deserialize data into LanguagePropertyFragmentInput
try:
instance.actual_instance = LanguagePropertyFragmentInput.from_json(json_str)
match += 1
except (ValidationError, ValueError) as e:
error_messages.append(str(e))
if match > 1:
# more than 1 match
raise ValueError("Multiple matches found when deserializing the JSON string into ReplaceAttrsRequest with oneOf schemas: GeoPropertyFragmentInput, LanguagePropertyFragmentInput, PropertyFragmentInput, RelationshipFragmentInput. Details: " + ", ".join(error_messages))
elif match == 0:
# no match
raise ValueError("No match found when deserializing the JSON string into ReplaceAttrsRequest with oneOf schemas: GeoPropertyFragmentInput, LanguagePropertyFragmentInput, PropertyFragmentInput, RelationshipFragmentInput. Details: " + ", ".join(error_messages))
else:
return instance
def to_json(self) -> str:
"""Returns the JSON representation of the actual instance"""
if self.actual_instance is None:
return "null"
to_json = getattr(self.actual_instance, "to_json", None)
if callable(to_json):
return self.actual_instance.to_json()
else:
return json.dumps(self.actual_instance)
def to_dict(self) -> dict:
"""Returns the dict representation of the actual instance"""
if self.actual_instance is None:
return None
to_dict = getattr(self.actual_instance, "to_dict", None)
if callable(to_dict):
return self.actual_instance.to_dict()
else:
# primitive type
return self.actual_instance
def to_str(self) -> str:
"""Returns the string representation of the actual instance"""
return pprint.pformat(self.dict())
| daniel-gonzalez-sanchez/ngsi-ld-client-tester | ngsi-ld-models/ngsi_ld_models/models/replace_attrs_request.py | replace_attrs_request.py | py | 7,277 | python | en | code | 0 | github-code | 36 |
15681479617 | #1.设计一个这样的函数,在桌面新建十个文件并以数字命名
def create_file():
for i in range(1, 11):
filename='/home/zhoud/Desktop/' + str(i) + ".txt"
fp = open(filename, "w")
fp.close()
print("Done")
#create_file()
def count_money(amount, rate, time):
print("amount is "+str(amount))
lilv = rate+1
money = amount
for i in range(1, time+1):
money *= lilv
print("year {}: ${}".format(i,money))
count_money(100, 0.05, 8) | yirenzhi/jake | forPython/魔力手册/第五章-循环与控制/practice.py | practice.py | py | 506 | python | en | code | 0 | github-code | 36 |
69833300903 | import tkinter as tk
class Calculator:
def __init__(self, master):
self.master = master
master.title("Calculator")
self.display = tk.Entry(master, width=30,bg='#5689c0', fg='#eaebed',borderwidth=3, font=('Arial', 14))
self.display.grid(row=0, column=0, columnspan=5, padx=10, pady=15)
# Create buttons for digits
digits = ['7', '8', '9', '4', '5', '6', ' 1', ' 2', '3', '0', '.', '+/-','sin','cos','tan']
for i, digit in enumerate(digits):
button = tk.Button(master, text=digit, width=5, height=2, bg='#46a094', fg='#c4e8c2', relief="groove", font=('Arial', 12), command=lambda digit=digit: self.append_digit(digit))
button.grid(row=i//3+1, column=i%3, padx=5, pady=5)
# Create buttons for operators
operators = ['+', '-', '*', '/', '(', ')','%', '=','AC' ,'DEL']
for i, operator in enumerate(operators):
button = tk.Button(master, text=operator, width=5, height=2, bg='#46a094', fg='#c4e8c2', font=('Arial', 12), command=lambda operator=operator: self.handle_operator(operator))
button.grid(row=i//2+1, column=i%2+3, padx=5, pady=5)
def append_digit(self, digit):
self.display.insert(tk.END, digit)
def handle_operator(self, operator):
if operator == 'AC':
self.display.delete(0, tk.END)
elif operator == '=':
try:
result = eval(self.display.get())
self.display.delete(0, tk.END)
self.display.insert(tk.END, str(result))
except:
self.display.delete(0, tk.END)
self.display.insert(tk.END, 'Syntax Error')
elif operator == 'DEL':
self.display.delete(len(self.display.get())-1)
else:
self.display.insert(tk.END, operator)
root = tk.Tk()
calculator = Calculator(root)
root.mainloop()
| Adarsh1o1/python-initials | guicalc.py | guicalc.py | py | 1,908 | python | en | code | 1 | github-code | 36 |
7182580635 | #!/usr/bin/env python3
"""Gradient descent with momentum"""
def update_variables_momentum(alpha, beta1, var, grad, v):
"""alpha is the learning rate. beta1 is the momentum weight.
var is either a number of list of numbers in a np.ndarray.
grad is either a number or a list of numbers in a np.ndarray.
v is the previous moment of the first var."""
try:
Vt = beta1 * v + (1 - beta1) * grad
v = Vt
var = var - (alpha * Vt)
return var, v
except TypeError:
for variable, gradient in zip(var, grad):
Vt = beta1 * v + (1 - beta1) * gradient
v = Vt
variable = variable - (alpha * Vt)
return variable, v
| JohnCook17/holbertonschool-machine_learning | supervised_learning/0x03-optimization/5-momentum.py | 5-momentum.py | py | 705 | python | en | code | 3 | github-code | 36 |
35599175738 | # Standardize time series data
from pandas import Series
from sklearn.preprocessing import StandardScaler
from math import sqrt
# load the dataset and print the first 5 rows
series = Series.from_csv('daily-minimum-temperatures-in-me.csv', header=0)
print(series.head())
# 准备数据
values = series.values
values = values.reshape((len(values), 1))
# 定义标准化模型
scaler = StandardScaler()
scaler = scaler.fit(values)
print('Mean: %f, StandardDeviation: %f' % (scaler.mean_, sqrt(scaler.var_)))
# 开始标准化,打印前五行
normalized = scaler.transform(values)
for i in range(5):
print(normalized[i])
# 逆标准化数据
inversed = scaler.inverse_transform(normalized)
for i in range(5):
print(inversed[i]) | yangwohenmai/TimeSeriesForecasting | 数据准备/标准化和归一化/标准化.py | 标准化.py | py | 727 | python | en | code | 183 | github-code | 36 |
4000749197 | # -*- coding: utf-8 -*-
"""Dataset methods for natural language inference.
Tokenization -> lower casing -> stop words removal -> lemmatization
Authors:
Fangzhou Li - fzli@ucdavis.edu
Todo:
* TODOs
"""
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from transformers import PreTrainedTokenizerBase
import pandas as pd
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal percent
of tokens from each, since if one sequence is very short then each token
that's truncated likely contains more information than a longer sequence.
Reference: https://github.com/huggingface/transformers/blob/main/examples/
legacy/run_swag.py
Args:
tokens_a: A list of tokens.
tokens_b: A list of tokens.
max_length: Maximum length of the output sequence.
Returns:
A truncated list of tokens.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class FoodAtlasNLIDataset(Dataset):
"""NLI dataset class.
Reference: https://www.kaggle.com/code/tks0123456789/nli-by-bert-pytorch.
Args:
premises: list of premises
hypotheses: list of hypotheses
labels: list of labels
tokenizer: tokenizer
max_seq_len: maximum sequence length
"""
def __init__(
self,
premises: list[str],
hypotheses: list[str],
tokenizer: PreTrainedTokenizerBase,
labels: list[int] = None,
label_mapper: dict = {
'Entails': 1, 'Does not entail': 0
},
max_seq_len: int = 512):
if labels is not None:
self.labels = torch.LongTensor(
[label_mapper[label] for label in labels]
)
else:
self.labels = None
self.max_tokens = 0
self.inputs = []
for p, h in zip(premises, hypotheses):
p_ids = tokenizer.encode(p, add_special_tokens=False)
h_ids = tokenizer.encode(h, add_special_tokens=False)
_truncate_seq_pair(p_ids, h_ids, max_seq_len - 3)
input_ids = [tokenizer.cls_token_id] \
+ p_ids \
+ [tokenizer.sep_token_id] \
+ h_ids \
+ [tokenizer.sep_token_id]
attention_mask = [1] * len(input_ids)
token_type_ids = [0] * (len(p_ids) + 2) + [1] * (len(h_ids) + 1)
self.inputs.append([
torch.LongTensor(input_ids),
torch.IntTensor(attention_mask),
torch.IntTensor(token_type_ids)
])
self.max_tokens = max(self.max_tokens, len(input_ids))
print("Longest Sequence Length:", self.max_tokens)
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
if self.labels is not None:
return self.inputs[idx], self.labels[idx]
else:
return self.inputs[idx], None
def collate_fn_padding(batch):
"""Collate function for padding.
Args:
batch: A list of samples.
Returns:
A tuple of (inputs, labels). Inputs are tuples of the following:
input_ids: A tensor of shape (batch_size, seq_len)
attention_mask: A tensor of shape (batch_size, seq_len)
token_type_ids: A tensor of shape (batch_size, seq_len)
"""
inputs, labels = list(zip(*batch))
input_ids_batch, attention_mask_batch, token_type_ids_batch = zip(*inputs)
input_ids_batch = pad_sequence(
input_ids_batch, batch_first=True, padding_value=0)
attention_mask_batch = pad_sequence(
attention_mask_batch, batch_first=True, padding_value=0)
token_type_ids_batch = pad_sequence(
token_type_ids_batch, batch_first=True, padding_value=1)
if labels[0] is None:
return (input_ids_batch, attention_mask_batch, token_type_ids_batch), \
None
else:
return (input_ids_batch, attention_mask_batch, token_type_ids_batch), \
torch.stack(labels, dim=0)
# def get_food_atlas_data_loaders(
# path_data_train: str,
# tokenizer: PreTrainedTokenizerBase,
# path_data_test: str = None,
# max_seq_len: int = 512,
# batch_size: int = 1,
# shuffle: bool = True,
# num_workers: int = 0,
# collate_fn: callable = collate_fn_padding,
# verbose: bool = True):
# """Get data loader for food atlas dataset.
# Args:
# path_data_train: path to the training data
# tokenizer: tokenizer
# path_data_test: path to the testing data
# max_seq_len: maximum sequence length
# batch_size: batch size
# shuffle: whether to shuffle the data
# num_workers: number of workers
# collate_fn: collate function
# verbose: whether to print out the information
# Returns:
# data loaders for training and testing
# """
# data_loaders = []
# for path, name in zip(
# [path_data_train, path_data_test], ['train', 'test']):
# if path is not None:
# data = pd.read_csv(path, sep='\t')
# data = data[['premise', 'hypothesis_string', 'answer']]
# data = data.rename(
# {'hypothesis_string': 'hypothesis'}, axis=1
# )
# data = data[~(data['answer'] == 'Skip')]
# if verbose:
# print(f"==={name} set info start===")
# print(data['answer'].value_counts())
# print(f"===={name} set info end====")
# dataset = FoodAtlasNLIDataset(
# premises=data['premise'].tolist(),
# hypotheses=data['hypothesis'].tolist(),
# labels=data['answer'].tolist(),
# tokenizer=tokenizer,
# max_seq_len=max_seq_len
# )
# data_loader = DataLoader(
# dataset=dataset,
# batch_size=batch_size,
# shuffle=shuffle,
# num_workers=num_workers,
# collate_fn=collate_fn
# )
# else:
# data_loader = None
# data_loaders += [data_loader]
# data_loader_train, data_loader_test = data_loaders
# return data_loader_train, data_loader_test
def get_food_atlas_data_loader(
path_data: str,
tokenizer: PreTrainedTokenizerBase,
train: bool = True,
max_seq_len: int = 512,
batch_size: int = 1,
shuffle: bool = True,
num_workers: int = 0,
collate_fn: callable = collate_fn_padding,
verbose: bool = True):
"""Get data loader for food atlas dataset.
Args:
path_data: path to the training data
tokenizer: tokenizer
train: whether the dataset is used to training. if false, the dataset
will not contain labels
max_seq_len: maximum sequence length
batch_size: batch size
shuffle: whether to shuffle the data
num_workers: number of workers
collate_fn: collate function
verbose: whether to print out the information
Returns:
data loaders for training and testing
"""
data = pd.read_csv(path_data, sep='\t')
if train:
data = data[['premise', 'hypothesis_string', 'answer']]
data = data[~(data['answer'] == 'Skip')]
else:
data = data[['premise', 'hypothesis_string']]
if verbose:
print()
print(f'Number of samples: {data.shape[0]}')
print()
if train:
print(data['answer'].value_counts())
print()
dataset = FoodAtlasNLIDataset(
premises=data['premise'].tolist(),
hypotheses=data['hypothesis_string'].tolist(),
labels=data['answer'].tolist() if train else None,
tokenizer=tokenizer,
max_seq_len=max_seq_len
)
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn
)
return data_loader
| IBPA/SemiAutomatedFoodKBC | src/entailment/_dataset.py | _dataset.py | py | 8,558 | python | en | code | 1 | github-code | 36 |
10021006698 | """
data_metrics_calculation_ingestion.py
=====================================
This module contains code to fetch weather data records and calculate relevant analytics and save it to the database.
"""
import argparse
from typing import Any
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from data_metrics_model import Base, WeatherStats
from data_models import WeatherData
def get_weather_data(session: Any) -> pd.DataFrame:
"""
This method fetches weather data from WeatherData model and returns pandas dataframe.
Pandas dataframe can be useful to calculate aggregate metrics.
Parameters:
session (Any): SQL Alchemy session
Returns:
result_df (pd.DataFrame): Pandas dataframe containing weather data records.
"""
results = session.query(WeatherData).all()
result_df = pd.DataFrame([r.__dict__ for r in results])
result_df = result_df.drop(["_sa_instance_state", "id"], axis=1)
result_df.date = pd.to_datetime(result_df.date)
result_df["year"] = result_df.date.dt.year
return result_df
def calculate_analytics(result_df: pd.DataFrame) -> pd.DataFrame:
"""
This method calculates the aggreagte metrics on weather data.
Pandas dataframe is used to groupby data by year and calculate aggregate metrics.
Parameters:
result_df (pd.DataFrame): Records from weather data.
Returns:
result_df_grouped (pd.DataFrame): Aggregate stats for weather data.
"""
result_df_grouped = result_df.groupby("year").agg(
{"max_temp": "mean", "min_temp": "mean", "precipitation": "sum"}
)
result_df_grouped = result_df_grouped.rename(
columns={
"max_temp": "avg_max_temp",
"min_temp": "avg_min_temp",
"precipitation": "total_precipitation",
}
)
result_df_grouped = result_df_grouped.reset_index()
return result_df_grouped
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Argumennts to analyse data and save it in database."
)
parser.add_argument(
"-db", "--db_path", type=str, required=True, help="Path of sqlite3 database."
)
args = parser.parse_args()
# Create Database Engine
engine = create_engine(f"sqlite:///{args.db_path}")
Session = sessionmaker(bind=engine)
session = Session()
# Start connection
conn = engine.connect()
# Check if weather stats table exists
if not engine.dialect.has_table(conn, WeatherStats.__tablename__):
Base.metadata.create_all(bind=engine)
# Fetch weather data.
weather_data_df = get_weather_data(session=session)
# print(f"Weather Data:")
# print(weather_data_df.head())
# Calcualte Analytics.
result_df_grouped = calculate_analytics(weather_data_df)
result_df_grouped_dict = result_df_grouped.to_records(index=False)
# print(result_df_grouped_dict)
# Iterate and save in database.
for item in result_df_grouped_dict:
year = int(item[0])
avg_max_temp = item[1]
avg_min_temp = item[2]
total_precipitation = item[3]
weather_stats_data = WeatherStats(
year=year,
avg_max_temp=avg_max_temp,
avg_min_temp=avg_min_temp,
total_precipitation=total_precipitation,
)
session.add(weather_stats_data)
session.commit()
# Close connection.
session.close()
conn.close()
| pri2si17-1997/weather_data_processing | src/data_metrics_calculation_ingestion.py | data_metrics_calculation_ingestion.py | py | 3,490 | python | en | code | 0 | github-code | 36 |
13989867732 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import aiomysql
from webapp.www.fields import Field
logging.basicConfig(level=logging.INFO)
__pool = None
def log(sql, args=None):
logging.info('SQL: [%s] args: %s' % (sql, args or []))
# 创建全局连接池__pool,缺省情况下将编码设置为utf8,自动提交事务
# 每个HTTP请求都可以从连接池中直接获取数据库连接,而不必频繁地打开和关闭数据库连接
async def create_pool(loop, **kw):
logging.info('create database connection pool...')
global __pool
# 创建一个MySQL数据库连接池 (coroutine)
__pool = await aiomysql.create_pool(
host=kw.get('host', 'localhost'),
port=kw.get('port', 3306),
user=kw['user'], # 初始化时必须指定,因为没有提供默认值
password=kw['password'], # 初始化时必须指定,因为没有提供默认值
db=kw['db'], # 初始化时必须指定,因为没有提供默认值
charset=kw.get('charset', 'utf8'),
autocommit=kw.get('autocommit', True),
maxsize=kw.get('maxsize', 10),
minsize=kw.get('minsize', 1),
loop=loop
)
# 若传入size参数,就通过fetchmany()获取最多指定数量的记录,否则通过fetchall()获取所有记录。
async def select(sql, args, size=None):
log(sql, args)
# 异步等待连接池对象返回可以连接线程,with语句则封装了清理(关闭conn)和处理异常的工作
async with __pool.get() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(sql.replace('?', '%s'), args) # 将sql中的'?'替换为MySQL占位符'%s'
if size:
results = await cur.fetchmany(size) # 从数据库获取指定的行数
else:
results = await cur.fetchall() # 返回所有的结果集
logging.info('return rows: %s' % len(results))
return results
# 用于SQL的Insert/Update/Delete语句,只返回影响的操作行数
async def execute(sql, args, autocommit=True):
log(sql, args)
global __pool
async with __pool.get() as conn:
if not autocommit: # 若数据库的事务为非自动提交的,则调用协程启动连接
await conn.begin()
try:
async with conn.cursor(aiomysql.DictCursor) as cur: # 打开DictCursor,不同于普通游标,以dict形式返回结果
await cur.execute(sql.replace('?', '%s'), args)
affected = cur.rowcount # 返回受影响的行数
if not autocommit: # 同上, 事务非自动提交型的,手动调用协程提交增删改事务
await conn.commit()
except BaseException as e:
if not autocommit: # 出错, 回滚事务到增删改之前
await conn.rollback()
raise e
return affected
def create_args_string(num):
lst = []
for n in range(num):
lst.append('?')
return ', '.join(lst)
# 创建基类Model的元类
# 任何继承自Model的类(如User),会自动通过ModelMetaclass扫描映射关系,
# 并存储到自身的类属性如__table__和__mappings__中。
# 这是一个元类,它定义了如何来构造一个类,任何定义了__metaclass__属性或指定了metaclass的都会通过元类定义的构造方法构造类
# 任何继承自Model的类,都会自动通过ModelMetaclass扫描映射关系,并存储到自身的类属性
class ModelMetaclass(type):
# cls: 当前准备创建的类对象,相当于self
# name: 类名,比如User继承自Model,当使用该元类创建User类时,name=User
# bases: 父类的元组
# attrs: 属性(方法)的字典,比如User有__table__,id,等,就作为attrs的keys
# 排除Model类本身,因为Model类主要就是用来被继承的,其不存在与数据库表的映射
def __new__(mcs, name, bases, attrs):
if name == 'Model': # 排除Mode类本身
return type.__new__(mcs, name, bases, attrs)
table = attrs.get('__table__', name) # 找到表名,若没有定义__table__属性,将类名作为表名
logging.info('found model: %s (table: %s)' % (name, table))
# 获取所有的Field和主键名
mappings = dict()
fields = []
primary_key = None
for k, v in attrs.items():
if isinstance(v, Field):
logging.info('--found mapping: %s ==> %s' % (k, v))
mappings[k] = v # 保存映射关系
if v.primary_key:
if primary_key: # 当第二次查找到主键时抛出Error
raise RuntimeError('Duplicate primary key for field: %s' % k)
primary_key = k # 保存第一次找到的主键
else:
fields.append(k) # 将非主键的字段保存到fields中
if not primary_key:
raise RuntimeError('Primary key not found.') # StandardError在Python3中被移除
for k in mappings.keys(): # 移除类属性
attrs.pop(k)
escaped_fields = list(map(lambda f: '`%s`' % f, fields))
# 构造默认的select/insert/update/delete语句
# 使用反引号是为了防止关键字冲突:select * from `select`;
sql_select = 'select `%s`, %s from `%s`' % \
(primary_key, ', '.join(escaped_fields), table)
sql_insert = 'insert into `%s` (%s, `%s`) values (%s)' % \
(table, ', '.join(escaped_fields), primary_key, create_args_string(len(escaped_fields) + 1))
sql_update = 'update `%s` set %s where `%s`=?' % \
(table, ', '.join(map(lambda f: '`%s`=?' % (mappings.get(f).name or f), fields)), primary_key)
sql_delete = 'delete from `%s` where `%s`=?' % \
(table, primary_key)
attrs['__mappings__'] = mappings # 保存属性和列的映射关系
attrs['__table__'] = table # 表名
attrs['__primary_key__'] = primary_key # 主键属性名
attrs['__fields__'] = fields # 除主键外的属性名
attrs['__select__'] = sql_select
attrs['__insert__'] = sql_insert
attrs['__update__'] = sql_update
attrs['__delete__'] = sql_delete
return type.__new__(mcs, name, bases, attrs)
# 定义所有ORM映射的基类Model
# 继承自dict的Model具备所有dict的功能,同时又实现__getattr__()和__setattr__()方法,
# 可以使用print(user.id)的方法直接引用属性
class Model(dict, metaclass=ModelMetaclass):
# 初始化函数,调用其父类(dict)的方法
def __init__(self, **kw):
super(Model, self).__init__(**kw)
# 增加__getattr__方法,使获取属性更加简单,即可通过"a.b"的形式
# __getattr__ 当调用不存在的属性时,python解释器会试图调用__getattr__(self,'attr')来尝试获得属性
# 例如b属性不存在,当调用a.b时python会试图调用__getattr__(self,'b')来获得属性,在这里返回的是dict a[b]对应的值
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError("'Model' object has no attribute '%s'" % key)
# 增加__setattr__方法,使设置属性更方便,可通过"a.b=c"的形式
def __setattr__(self, key, value):
self[key] = value
def getvalue(self, key):
return getattr(self, key, None)
# 通过键取值,若值不存在,则取默认值
def getvalueordefault(self, key):
value = getattr(self, key, None)
if value is None:
field = self.__mappings__[key]
if field.default is not None:
# 如果field.default可被调用,则返回field.default(),否则返回field.default
value = field.default() if callable(field.default) else field.default # ??
logging.debug('using default value for %s: %s' % (key, str(value)))
# 通过default取到值之后再将其作为当前值
setattr(self, key, value)
return value
# classmethod装饰器将方法定义为类方法
# 对于查询相关的操作,我们都定义为类方法,就可以方便查询,而不必先创建实例再查询
# 查找所有合乎条件的信息
@classmethod
async def findall(cls, where=None, args=None, **kw):
"""find objects by where clause"""
sql = [cls.__select__]
if where:
sql.append('where')
sql.append(where)
if args is None:
args = []
order_by = kw.get('orderBy', None)
if order_by:
sql.append('order by')
sql.append(order_by)
limit = kw.get('limit', None)
if limit is not None:
sql.append('limit')
if isinstance(limit, int):
sql.append('?')
elif isinstance(limit, tuple) and len(limit) == 2:
sql.append('?, ?')
args.extend(limit)
else:
raise ValueError('Invalid limit values: %s' % str(limit))
rs = await select(' '.join(sql), args)
return [cls(**r) for r in rs]
# 根据列名和条件查看数据库有多少条信息
@classmethod
async def findnum(cls, select_field, where=None, args=None):
"""find number by select_field and where"""
sql = ['select %s _num_ from `%s`' % (select_field, cls.__table__)]
if where:
sql.append('where')
sql.append(where)
rs = await select(' '.join(sql), args, 1)
if len(rs) == 0:
return None
return rs[0]['_num_']
# 根据主键查找一个实例的信息
@classmethod
async def find(cls, pk):
"""find object by primary key"""
rs = await select('%s where `%s`=?' % (cls.__select__, cls.__primary_key__), [pk], 1)
if len(rs) == 0:
return None
return cls(**rs[0])
# return cls(**rs[0]) if rs else None
# 把一个实例保存到数据库
async def save(self):
args = list(map(self.getvalueordefault, self.__fields__))
args.append(self.getvalueordefault(self.__primary_key__))
rows = await execute(self.__insert__, args)
if rows != 1:
logging.warning('failed to insert record: affected rows: %s' % rows) # logging.warn已过时
# 更改一个实例在数据库的信息
async def update(self):
args = list(map(self.getvalue, self.__fields__))
args.append(self.getvalue(self.__primary_key__))
rows = await execute(self.__update__, args)
if rows != 1:
logging.warning('failed to update by primary key: affected rows: %s' % rows)
# 把一个实例从数据库中删除
async def remove(self):
args = [self.getvalue(self.__primary_key__)]
rows = await execute(self.__delete__, args)
if rows != 1:
logging.warning('failed to remove by primary key: affected rows: %s' % rows)
| shellever/Python3Learning | webapp/www/orm.py | orm.py | py | 11,391 | python | zh | code | 0 | github-code | 36 |
7876035828 | from __future__ import absolute_import, division, print_function, unicode_literals
import unittest
import pkg_resources
from ..load import load_all_manifests, patch_loader
from ..manifest import ManifestParser
class ManifestTest(unittest.TestCase):
def test_missing_section(self):
with self.assertRaisesRegex(
Exception, "manifest file test is missing required section manifest"
):
ManifestParser("test", "")
def test_missing_name(self):
with self.assertRaisesRegex(
Exception,
"manifest file test section 'manifest' is missing required field 'name'",
):
ManifestParser(
"test",
"""
[manifest]
""",
)
def test_minimal(self):
p = ManifestParser(
"test",
"""
[manifest]
name = foo
""",
)
self.assertEqual(p.name, "foo")
self.assertEqual(p.fbsource_path, None)
def test_minimal_with_fbsource_path(self):
p = ManifestParser(
"test",
"""
[manifest]
name = foo
fbsource_path = fbcode/wat
""",
)
self.assertEqual(p.name, "foo")
self.assertEqual(p.fbsource_path, "fbcode/wat")
def test_unknown_field(self):
with self.assertRaisesRegex(
Exception,
(
"manifest file test section 'manifest' contains "
"unknown field 'invalid.field'"
),
):
ManifestParser(
"test",
"""
[manifest]
name = foo
invalid.field = woot
""",
)
def test_invalid_section_name(self):
with self.assertRaisesRegex(
Exception, "manifest file test contains unknown section 'invalid.section'"
):
ManifestParser(
"test",
"""
[manifest]
name = foo
[invalid.section]
foo = bar
""",
)
def test_value_in_dependencies_section(self):
with self.assertRaisesRegex(
Exception,
(
"manifest file test section 'dependencies' has "
"'foo = bar' but this section doesn't allow "
"specifying values for its entries"
),
):
ManifestParser(
"test",
"""
[manifest]
name = foo
[dependencies]
foo = bar
""",
)
def test_invalid_conditional_section_name(self):
with self.assertRaisesRegex(
Exception,
(
"manifest file test section 'dependencies.=' "
"has invalid conditional: expected "
"identifier found ="
),
):
ManifestParser(
"test",
"""
[manifest]
name = foo
[dependencies.=]
""",
)
def test_section_as_args(self):
p = ManifestParser(
"test",
"""
[manifest]
name = foo
[dependencies]
a
b
c
[dependencies.foo=bar]
foo
""",
)
self.assertEqual(p.get_section_as_args("dependencies"), ["a", "b", "c"])
self.assertEqual(
p.get_section_as_args("dependencies", {"foo": "not-bar"}), ["a", "b", "c"]
)
self.assertEqual(
p.get_section_as_args("dependencies", {"foo": "bar"}),
["a", "b", "c", "foo"],
)
p2 = ManifestParser(
"test",
"""
[manifest]
name = foo
[autoconf.args]
--prefix=/foo
--with-woot
""",
)
self.assertEqual(
p2.get_section_as_args("autoconf.args"), ["--prefix=/foo", "--with-woot"]
)
def test_section_as_dict(self):
p = ManifestParser(
"test",
"""
[manifest]
name = foo
[cmake.defines]
foo = bar
[cmake.defines.bar=baz]
foo = baz
""",
)
self.assertEqual(p.get_section_as_dict("cmake.defines"), {"foo": "bar"})
self.assertEqual(
p.get_section_as_dict("cmake.defines", {"bar": "baz"}), {"foo": "baz"}
)
p2 = ManifestParser(
"test",
"""
[manifest]
name = foo
[cmake.defines.bar=baz]
foo = baz
[cmake.defines]
foo = bar
""",
)
self.assertEqual(
p2.get_section_as_dict("cmake.defines", {"bar": "baz"}),
{"foo": "bar"},
msg="sections cascade in the order they appear in the manifest",
)
def test_parse_common_manifests(self):
patch_loader(__name__)
manifests = load_all_manifests(None)
self.assertNotEqual(0, len(manifests), msg="parsed some number of manifests")
| supreme-core/rsocket-cpp-pybind | build/fbcode_builder/getdeps/test/manifest_test.py | manifest_test.py | py | 4,630 | python | en | code | 3 | github-code | 36 |
72135965225 | import numpy as np
from scipy.stats import bernoulli, binom
# Parâmetros da distribuição de Bernoulli
p = 0.5 # Probabilidade de sucesso
# Número de rodadas no jogo
num_rodadas = 5
pontuacao = 0
print("Bem-vindo ao jogo de adivinhação!")
print(f"Você tem {num_rodadas} rodadas para adivinhar o resultado de uma distribuição de Bernoulli (1 ou 0).")
for rodada in range(num_rodadas):
# Gera uma amostra de distribuição de Bernoulli
resultado_real = bernoulli.rvs(p, size=1)[0]
# Pede ao jogador para adivinhar
palpite = input(f"Rodada {rodada + 1}: Adivinhe 0 ou 1: ")
try:
palpite = int(palpite)
if palpite != 0 and palpite != 1:
print("Insira 0 ou 1 como seu palpite.")
continue
except ValueError:
print("Insira 0 ou 1 como seu palpite.")
continue
if palpite == resultado_real:
print("Você acertou!")
pontuacao += 1
else:
print(f"Você errou. O resultado real era {resultado_real}.")
print(f"Jogo encerrado. Sua pontuação final é {pontuacao} pontos.")
# Calcula a pontuação final usando uma distribuição binomial
n = num_rodadas # Número de tentativas
p_acerto = p # Probabilidade de acerto em cada tentativa
pontuacao_final = binom.pmf(pontuacao, n, p_acerto)
print(f"Sua pontuação final é estatisticamente significativa? ({pontuacao_final:.2%} de chance de obtê-la por acaso)")
| Dhisting1/Estatisca-Python | Estatistica-Python/gameDIstribuiçãoBernoulli.py | gameDIstribuiçãoBernoulli.py | py | 1,430 | python | pt | code | 0 | github-code | 36 |
29260257988 | import unittest
from animal import Animal
class TestAnimal(unittest.TestCase):
def test_datosDeUnAnimal(self):
""" Test un animal puede mostrar sus datos """
gato = Animal("gato", 4, "miau")
self.assertEqual(
gato.datos(), 'Soy gato tengo 4 patas y hago miau.')
if __name__ == '__main__':
unittest.main() | pmNiko/POO-Python | Clase_2/__test__/animal_test.py | animal_test.py | py | 353 | python | es | code | 1 | github-code | 36 |
31759953936 | n = int(input())
arr = list(map(int, input().split()))
stack = []
num = 1
for i in range(len(arr)):
if arr[i] == num:
num += 1
else:
while stack and stack[-1] == num:
stack.pop()
num += 1
if not stack or arr[i] < stack[-1]:
stack.append(arr[i])
else:
break
for i in range(len(stack)):
if stack[-1] == num:
stack.pop()
num += 1
else:
print("Sad")
break
if not stack:
print("Nice") | 4RG0S/2023-Hamgorithm-Fall | 202302547/12789번-도키도키_간식드리미.py | 12789번-도키도키_간식드리미.py | py | 514 | python | en | code | 1 | github-code | 36 |
27103951389 | from flask import (
Blueprint, redirect, url_for
)
from Glastore.models.product import Product, product_heads
from Glastore.models.window import Window
from Glastore.views.auth import login_required
bp = Blueprint('product', __name__, url_prefix='/product')
@bp.route('/select_next_window/<int:id>')
@login_required
def select_next_window(id):
product = Product.get(id)
product.orientation.select_next_window()
return redirect(
url_for('quote.edit', id=product.quote_id)
)
@bp.route('/rotate_window/<int:id>')
@login_required
def rotate_window(id):
product = Product.get(id)
product.orientation.rotate_window()
return redirect(
url_for('quote.edit', id=product.quote_id)
)
@bp.route('/delete/<int:id>')
@login_required
def delete(id):
product = Product.get(id)
quote_id = product.quote.id
product.delete()
return redirect(
url_for('quote.edit', id=quote_id)
)
| ChrisPoul/Glastore | Glastore/views/product.py | product.py | py | 950 | python | en | code | 2 | github-code | 36 |
25161881681 | import base64
import binascii
from typing import List
import falcon
import hashlib
import hmac
import json
import logging
from botocore.exceptions import ClientError
from dacite import Config, from_dict
from dataclasses import asdict
from enum import Enum
from adyen_gift_card.api.adyen_notifications.request import Notification, EventCode, NotificationRequestItem
from adyen_gift_card.api.adyen_notifications.resources import QueuesName, NotificationsCredentials
from adyen_gift_card.util.dictionary_keys_transformation import transform_dict
LOGGER = logging.getLogger()
class AdyenNotifications:
def __init__(self, notifications_auth: NotificationsCredentials, queues_name: QueuesName,
notifications_to_process: List[str], sqs_client):
self.sqs_client = sqs_client
self.queues_name = queues_name
self.notifications_auth = notifications_auth
self.notifications_to_process = notifications_to_process
def on_post(self, req, resp):
LOGGER.info(req.media)
if not self._validate_authorization(req.get_header("Authorization")):
resp.status = falcon.HTTP_403
resp.media = "[rejected]"
return resp
formatted_request = transform_dict(req.media)
LOGGER.info(formatted_request)
notification = from_dict(data_class=Notification, data=formatted_request, config=Config(cast=[Enum]))
if len(notification.notification_items) == 0:
resp.status = falcon.HTTP_400
resp.media = "[rejected]"
return resp
notification_item = notification.notification_items[0].notification_request_item
if not self._validate_hmac_signature(notification_item):
LOGGER.info("HMAC signature validation failed")
resp.status = falcon.HTTP_403
resp.media = "[rejected]"
return resp
if notification_item.event_code in self.notifications_to_process and notification_item.event_code == EventCode.CAPTURE:
msg = self._send_sqs_message(self.queues_name.payments, notification)
elif notification_item.event_code in self.notifications_to_process:
msg = self._send_sqs_message(self.queues_name.refunds, notification)
else:
# ignore notification, it is not in our defined notification lists
resp.status = falcon.HTTP_200
resp.media = "[accepted]"
return resp
if msg is None:
resp.status = falcon.HTTP_400
resp.media = "[rejected]"
return resp
LOGGER.info(msg)
resp.status = falcon.HTTP_200
resp.media = "[accepted]"
return resp
def _send_sqs_message(self, sqs_queue_name: str, notification: Notification):
try:
queue_url = self.sqs_client.get_queue_url(QueueName=sqs_queue_name)["QueueUrl"]
msg = self.sqs_client.send_message(QueueUrl=queue_url,
MessageBody=json.dumps(asdict(notification), default=lambda x: x.value))
LOGGER.info("Notification sent to sqs queue")
except ClientError as e:
LOGGER.error(e)
return None
return msg
def _validate_authorization(self, auth_header: str) -> bool:
auth = f'{self.notifications_auth.username}:{self.notifications_auth.password}'
base64_auth = base64.b64encode(auth.encode()).decode()
if auth_header != f'Basic {base64_auth}':
return False
return True
def _validate_hmac_signature(self, notification: NotificationRequestItem) -> bool:
original_reference = notification.original_reference if notification.original_reference is not None else ""
message = f'{notification.psp_reference}:{original_reference}:{notification.merchant_account_code}:' \
f'{notification.merchant_reference}:{notification.amount.value}:{notification.amount.currency}:{notification.event_code.value}:' \
f'{notification.success}'
LOGGER.info(f'String to validate message integrity: {message}')
hmac_key = binascii.a2b_hex(self.notifications_auth.hmac_key)
hashed_msg = base64.b64encode(hmac.new(hmac_key, msg=message.encode("utf-8"), digestmod=hashlib.sha256).digest())
return hashed_msg.decode() == notification.additional_data.get("hmac_signature", None)
| NewStore/int-cinori | integrations/adyen_gift_card/adyen_gift_card/api/adyen_notifications/adyen_notifications.py | adyen_notifications.py | py | 4,418 | python | en | code | 0 | github-code | 36 |
31694767263 | # Program that prints the number of lines of hashes
# doubles the number of hashes on each line
num = int(input("How many lines: "))
list2 = []
for i in range(0,num):
list2 = (2**i)
print("#" * list2)
| namntran/2021_python_principles | workshops/4_exponentialGrowth.py | 4_exponentialGrowth.py | py | 213 | python | en | code | 0 | github-code | 36 |
61098782 | from collections import deque
from typing import Union
import numpy as np
from stlpy.STL import STLTree, STLFormula, LinearPredicate
COLORED = False
if COLORED:
from termcolor import colored
else:
def colored(text, color):
return text
class STL:
def __init__(self, ast: Union[list, str, STLTree, STLFormula, LinearPredicate]):
self.ast = ast
self.single_operators = ("~", "G", "F")
self.binary_operators = ("&", "|", "->", "U")
self.sequence_operators = ("G", "F", "U")
self.stlpy_form = None
self.expr_repr = None
"""
Syntax Functions
"""
def __and__(self, other: 'STL') -> 'STL':
ast = ["&", self.ast, other.ast]
return STL(ast)
def __or__(self, other: 'STL') -> 'STL':
ast = ["|", self.ast, other.ast]
return STL(ast)
def __invert__(self) -> 'STL':
ast = ["~", self.ast]
return STL(ast)
def implies(self, other: 'STL') -> 'STL':
ast = ["->", self.ast, other.ast]
return STL(ast)
def eventually(self, start: int = 0, end: int = None):
ast = ["F", self.ast, start, end]
return STL(ast)
def always(self, start: int = 0, end: int = None) -> 'STL':
ast = ["G", self.ast, start, end]
return STL(ast)
def until(self, other: 'STL', start: int = 0, end: int = None) -> 'STL':
ast = ["U", self.ast, other.ast, start, end]
return STL(ast)
def get_stlpy_form(self):
# catch already converted form
if self.stlpy_form is None:
self.stlpy_form = self._to_stlpy(self.ast)
return self.stlpy_form
def _to_stlpy(self, ast) -> STLTree:
if self._is_leaf(ast):
if isinstance(ast, str):
raise ValueError(f"str variable {ast} not supported")
self.stlpy_form = ast
return ast
if ast[0] == "~":
self.stlpy_form = self._handle_not(ast)
elif ast[0] == "G":
self.stlpy_form = self._handle_always(ast)
elif ast[0] == "F":
self.stlpy_form = self._handle_eventually(ast)
elif ast[0] == "&":
self.stlpy_form = self._handle_and(ast)
elif ast[0] == "|":
self.stlpy_form = self._handle_or(ast)
elif ast[0] == "->":
self.stlpy_form = self._handle_implies(ast)
elif ast[0] == "U":
self.stlpy_form = self._handle_until(ast)
else:
raise ValueError(f"Unknown operator {ast[0]}")
return self.stlpy_form
def _handle_not(self, ast):
sub_form = self._to_stlpy(ast[1])
return sub_form.negation()
def _handle_and(self, ast):
sub_form_1 = self._to_stlpy(ast[1])
sub_form_2 = self._to_stlpy(ast[2])
return sub_form_1 & sub_form_2
def _handle_or(self, ast):
sub_form_1 = self._to_stlpy(ast[1])
sub_form_2 = self._to_stlpy(ast[2])
return sub_form_1 | sub_form_2
def _handle_implies(self, ast):
sub_form_1 = self._to_stlpy(ast[1])
sub_form_2 = self._to_stlpy(ast[2])
return sub_form_1.negation() | sub_form_2
def _handle_eventually(self, ast):
sub_form = self._to_stlpy(ast[1])
return sub_form.eventually(ast[2], ast[3])
def _handle_always(self, ast):
sub_form = self._to_stlpy(ast[1])
return sub_form.always(ast[2], ast[3])
def _handle_until(self, ast):
sub_form_1 = self._to_stlpy(ast[1])
sub_form_2 = self._to_stlpy(ast[2])
return sub_form_1.until(sub_form_2, ast[3], ast[4])
@staticmethod
def _is_leaf(ast):
return issubclass(type(ast), STLFormula) or isinstance(ast, str)
def simplify(self):
if self.stlpy_form is None:
self.get_stlpy_form()
self.stlpy_form.simplify()
def __repr__(self):
if self.expr_repr is not None:
return self.expr_repr
single_operators = ("~", "G", "F")
binary_operators = ("&", "|", "->", "U")
time_bounded_operators = ("G", "F", "U")
# traverse ast
operator_stack = [self.ast]
expr = ""
cur = self.ast
def push_stack(ast):
if isinstance(ast, str) and ast in time_bounded_operators:
time_window = f"[{cur[-2]}, {cur[-1]}]"
operator_stack.append(time_window)
operator_stack.append(ast)
while operator_stack:
cur = operator_stack.pop()
if self._is_leaf(cur):
expr += cur.__str__()
elif isinstance(cur, str):
if cur == "(" or cur == ")":
expr += cur
elif cur.startswith("["):
expr += colored(cur, "yellow") + " "
else:
if cur in ("G", "F"):
if cur == "F":
expr += colored("F", "magenta")
else:
expr += colored(cur, "magenta")
elif cur in ("&", "|", "->", "U"):
expr += " " + colored(cur, "magenta")
if cur != "U":
expr += " "
elif cur in ("~",):
expr += colored(cur, "magenta")
elif cur[0] in single_operators:
# single operator
if not self._is_leaf(cur[1]):
push_stack(")")
push_stack(cur[1])
if not self._is_leaf(cur[1]):
push_stack("(")
push_stack(cur[0])
elif cur[0] in binary_operators:
# binary operator
if not self._is_leaf(cur[2]) and cur[2][0] in binary_operators:
push_stack(")")
push_stack(cur[2])
push_stack("(")
else:
push_stack(cur[2])
push_stack(cur[0])
if not self._is_leaf(cur[1]) and cur[1][0] in binary_operators:
push_stack(")")
push_stack(cur[1])
push_stack("(")
else:
push_stack(cur[1])
self.expr_repr = expr
return expr
def get_all_predicates(self):
all_preds = []
queue = deque([self.ast])
while queue:
cur = queue.popleft()
if self._is_leaf(cur):
all_preds.append(cur)
elif cur[0] in self.single_operators:
queue.append(cur[1])
elif cur[0] in self.binary_operators:
queue.append(cur[1])
queue.append(cur[2])
else:
raise RuntimeError("Should never visit here")
return all_preds
def inside_rectangle_formula(bounds, y1_index, y2_index, d, name=None):
"""
Create an STL formula representing being inside a
rectangle with the given bounds:
::
y2_max +-------------------+
| |
| |
| |
y2_min +-------------------+
y1_min y1_max
:param bounds: Tuple ``(y1_min, y1_max, y2_min, y2_max)`` containing
the bounds of the rectangle.
:param y1_index: index of the first (``y1``) dimension
:param y2_index: index of the second (``y2``) dimension
:param d: dimension of the overall signal
:param name: (optional) string describing this formula
:return inside_rectangle: An ``STLFormula`` specifying being inside the
rectangle at time zero.
"""
assert y1_index < d, "index must be less than signal dimension"
assert y2_index < d, "index must be less than signal dimension"
# Unpack the bounds
y1_min, y1_max, y2_min, y2_max = bounds
# Create predicates a*y >= b for each side of the rectangle
a1 = np.zeros((1, d));
a1[:, y1_index] = 1
right = LinearPredicate(a1, y1_min)
left = LinearPredicate(-a1, -y1_max)
a2 = np.zeros((1, d));
a2[:, y2_index] = 1
top = LinearPredicate(a2, y2_min)
bottom = LinearPredicate(-a2, -y2_max)
# Take the conjuction across all the sides
inside_rectangle = right & left & top & bottom
# set the names
if name is not None:
inside_rectangle.__str__ = lambda: str(name)
inside_rectangle.__repr__ = lambda: str(name)
return inside_rectangle
def outside_rectangle_formula(bounds, y1_index, y2_index, d, name=None):
"""
Create an STL formula representing being outside a
rectangle with the given bounds:
::
y2_max +-------------------+
| |
| |
| |
y2_min +-------------------+
y1_min y1_max
:param bounds: Tuple ``(y1_min, y1_max, y2_min, y2_max)`` containing
the bounds of the rectangle.
:param y1_index: index of the first (``y1``) dimension
:param y2_index: index of the second (``y2``) dimension
:param d: dimension of the overall signal
:param name: (optional) string describing this formula
:return outside_rectangle: An ``STLFormula`` specifying being outside the
rectangle at time zero.
"""
assert y1_index < d, "index must be less than signal dimension"
assert y2_index < d, "index must be less than signal dimension"
# Unpack the bounds
y1_min, y1_max, y2_min, y2_max = bounds
# Create predicates a*y >= b for each side of the rectangle
a1 = np.zeros((1, d))
a1[:, y1_index] = 1
right = LinearPredicate(a1, y1_max)
left = LinearPredicate(-a1, -y1_min)
a2 = np.zeros((1, d))
a2[:, y2_index] = 1
top = LinearPredicate(a2, y2_max)
bottom = LinearPredicate(-a2, -y2_min)
# Take the disjuction across all the sides
outside_rectangle = right | left | top | bottom
# set the names
if name is not None:
outside_rectangle.__str__ = lambda: str(name)
outside_rectangle.__repr__ = lambda: str(name)
return outside_rectangle
| ZikangXiong/STL-Mobile-Robot | src/stl_mob/stl/stl.py | stl.py | py | 10,389 | python | en | code | 4 | github-code | 36 |
759288278 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import datetime
from sqlalchemy import Column, String, create_engine, Integer, TIMESTAMP, func, Float, desc, Boolean
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from flask import Flask, render_template, request
from flask_script import Manager
# 创建对象的基类:
Base = declarative_base()
app = Flask(__name__)
manager = Manager(app)
class Event(Base):
__tablename__ = 'event'
id = Column(Integer, primary_key=True)
userid = Column(String(1024))
title = Column(String(1024))
description = Column(String(1024))
forecolor = Column(String(256))
icon = Column(String(256))
location = Column(String(256))
calendar = Column(String(256))
busy = Column(Boolean)
gmt_create = Column(TIMESTAMP, default=datetime.datetime.now)
gmt_modify = Column(TIMESTAMP, default=datetime.datetime.now, onupdate=datetime.datetime.now)
class Schedule(Base):
__tablename__ = 'schedule'
id = Column(Integer,primary_key=True)
event_id = Column(Integer)
engine = create_engine('mysql+pymysql://root:123456@127.0.0.1:3306/calendar?charset=utf8')
DBSession = sessionmaker(bind=engine)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api')
def data():
pass
@manager.command
def run():
app.run(host='0.0.0.0', port=8080, threaded=True, debug=True)
@manager.command
def initdb():
Base.metadata.create_all(engine)
if __name__ == '__main__':
manager.run()
| DxfAndCxx/calendar | app.py | app.py | py | 1,565 | python | en | code | 0 | github-code | 36 |
30857649928 | import json
import shutil
import hashlib
import os
def get_hash_md5(filename):
with open(filename, 'rb') as f:
m = hashlib.md5()
while True:
data = f.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
with open('sourse.json', 'r', encoding='utf-8') as f:
data = json.load(f)
ORIGINAL_PATH, COPY_PATH = data["OriginalPath"], data["CopyPath"]
def del_item(path):
if os.path.isdir(path):
os.rmdir(path)
else:
os.remove(path)
def copy_item(org_path, cp_path, item):
if os.path.isdir(org_path+item):
shutil.copytree(org_path+item, cp_path+item)
else:
shutil.copy(org_path+item, cp_path)
def run(dop_path=''):
Org_dir_set = set(os.listdir(ORIGINAL_PATH + dop_path))
Cp_dir_set = set(os.listdir(COPY_PATH + dop_path)) - {"System Volume Information"}
for item in Cp_dir_set - Org_dir_set: del_item(COPY_PATH + dop_path + item)
for item in Org_dir_set - Cp_dir_set: copy_item(ORIGINAL_PATH + dop_path, COPY_PATH + dop_path, item)
for item in Org_dir_set & Cp_dir_set:
if os.path.isdir(ORIGINAL_PATH + dop_path + item): run(dop_path=f'{item}\\')
elif get_hash_md5(ORIGINAL_PATH + dop_path + item) != get_hash_md5(COPY_PATH + dop_path + item):
shutil.copyfile(ORIGINAL_PATH + dop_path + item, COPY_PATH + dop_path + item)
run() | AlexVorobushek/UpdateFilesOnFlashDrive | main.py | main.py | py | 1,422 | python | en | code | 0 | github-code | 36 |
12559352729 | """7-9: No Pastrami"""
"""Using the list sandwich_orders from Exercise 7-8, make sure the sandwich 'pastrami'
appears in the list at least three times. Add code near the beginning of your program
to print a message saying the deli has run out of pastrami, and then use a while loop
to remove all occurences of 'pastrami' from sandwich_orders. Make sure no pastrami sandwiches
end up in finished_sandiches."""
sandwich_orders = [
'pastrami', 'veggie', 'grilled cheese', 'pastrami',
'turkey', 'roast beef', 'pastrami']
finished_sandwiches = []
print(sandwich_orders)
print(f"I'm sorry, we're all out of pastrami today.")
while 'pastrami' in sandwich_orders:
sandwich_orders.remove('pastrami')
print(sandwich_orders)
while sandwich_orders:
current_sandwiches = sandwich_orders.pop()
print(f"I'm working on your {current_sandwiches} sandwich")
finished_sandwiches.append(current_sandwiches)
print(finished_sandwiches)
for sandwiches in finished_sandwiches:
print(f"I made a {sandwiches} sandwich.")
print(f"************************************************************************************")
"""7-10: Dream Vacation"""
"""Write a program that polls users about their dream vacation. Write a prompt similar to
If you could visit one place in the world, where would you go? Include a block of code that prints
the results of the poll."""
name_prompt = f"What's your name??"
place_prompt = f"If you could visit one place in the world, where would it be? "
continue_prompt = f"Would you like to let someone else respond? (yes/no) "
# Responses will be stored in the form {name: place}
responses = dict()
while True:
# Ask the user where they'd like to go..
name = input(name_prompt)
place = input(place_prompt)
# Store the response
responses[name] = place
# Ask if there is anyone else responding.
repeat = input(continue_prompt)
if repeat != 'yes':
break
# Show the results of the survey..
print(f"************************SURVEY RESULTS*********************************")
for name, place in responses.items():
print(f"{name.title()} would like to visit {place.title()}.")
print(f'***********************************************************************') | iampaavan/python_crash_course_solutions | PCC_Text_Book/Lists/example.py | example.py | py | 2,185 | python | en | code | 0 | github-code | 36 |
14002518900 | import requests
import os
import re
from lxml import etree
def ParseHTML(url):
rawDoc = requests.get(url).text
html = etree.HTML(rawDoc)
return html
class Comic():
def __init__(self):
self.baseurl = "https://manhua.fzdm.com/39/"
self.baseimgurl = "https://p5.manhuapan.com/"
self.name = "Attack On Titan"
self.chapters = []
def GetChapters(self):
html = ParseHTML(self.baseurl)
nodes = html.xpath('//div[@id="content"]/li/a')
for node in nodes:
title = node.text
url = self.baseurl + node.attrib['href']
self.chapters.append({'title': title, 'url': url})
return self
def GetImgIter(self, url):
idx = 0
while True:
pageurl = f"{url}index_{str(idx)}.html"
try:
yield {self.ExtractImg(pageurl), idx}
except IndexError:
return
idx += 1
def ExtractImg(self, pageurl):
res = requests.get(pageurl)
if res.status_code == 404:
raise IndexError
regexp = re.compile(r'(?<=mhurl=").*?(?=";)')
rawDoc = res.text
imgurl = self.baseimgurl + regexp.search(rawDoc).group(0)
return imgurl
def SaveChapter(self, chapter):
title = chapter['title']
url = chapter['url']
path = f"{self.name}/{title}/"
os.makedirs(path, exist_ok=True)
imgurls = self.GetImgIter(url)
for index, imgurl in imgurls:
print(imgurl)
# with requests.get(imgurl, stream=True) as res:
# with open(f"{path}{str(idx)}.jpg", "wb") as pic:
# for chunk in res.iter_content():
# pic.write(chunk)
def Run(self):
self.GetChapters()
for chapter in self.chapters:
self.SaveChapter(chapter)
c = Comic()
c.SaveChapter({'title': "test", 'url': "https://manhua.fzdm.com/39/001/"})
# c.ExtractImg("https://manhua.fzdm.com/39/001/index_100.html")
# img = c.ExtractImg("https://manhua.fzdm.com/39/001/index_1.html")
# print(img)
# for chp in c.GetChapters().chapters:
# print(chp)
| Rickenbacker620/Codes | Python/Comic/comic.py | comic.py | py | 2,192 | python | en | code | 0 | github-code | 36 |
74353820583 | import torch
from numbers import Number
import numpy as np
class RandomMasking(torch.nn.Module):
"""
Random Masking from the paper "Hide-and-Seek: Forcing a Network to be Meticulous for
Weakly-supervised Object and Action Localization"
"""
def __init__(self, p_mask, patch_size, value):
"""
Arguments:
p_mask: float (0.0-1.0) - probabilty that a patch gets masked
patch_size: int/tuple/list - size of the patches (must fit into the image)
value: number or list of three numbers - value of the patches
"""
super().__init__()
if not isinstance(value, (Number, list)):
raise TypeError("Argument value should be a number or list of numbers.")
if not isinstance(patch_size, (int, tuple, list)):
raise TypeError("Argument patch_size should be an int, tuple or list.")
if not isinstance(p_mask, Number):
raise TypeError("Argument p_mask should be a number.")
if p_mask < 0 or p_mask > 1:
raise TypeError("Masking probability should be between 0 and 1.")
self.p_mask = p_mask
if isinstance(patch_size, (tuple, list)):
self.patch_size = patch_size
else:
self.patch_size = (patch_size, patch_size)
self.value = value
def forward(self, img):
"""
Args:
img (Tensor): Tensor image to be masked.
Returns:
img (Tensor): Masked Tensor image.
"""
size = img.shape
if len(size) == 3:
img = img.unsqueeze(0)
size = img.shape
elif len(size) < 3:
raise TypeError("Tensor must have 3 or 4 dimensions.")
reshape = False
if size[1] == 3:
reshape = True
img = torch.permute(img, (0, 2, 3, 1))
size = img.shape
B, H, W = size[0:-1]
if not (H % self.patch_size[0] == 0 and W % self.patch_size[1] == 0):
raise TypeError("Patch size must fit perfectly in image size.")
n_vert = H // self.patch_size[0]
n_hor = W // self.patch_size[1]
n_patches = (B, n_vert, n_hor)
masked = torch.from_numpy(np.random.binomial(1, self.p_mask, n_patches).astype(bool))
blocks = img.view(B, n_vert, self.patch_size[0], n_hor, self.patch_size[1],
3).swapaxes(2, 3)
blocks[masked] = torch.Tensor(self.value)
img = blocks.swapaxes(2, 3).view(size)
if reshape:
img = torch.permute(img, (0, 3, 1, 2))
return img | faberno/SurgicalToolLocalization | transforms/RandomMasking.py | RandomMasking.py | py | 2,599 | python | en | code | 2 | github-code | 36 |
15860181793 | # -*- coding: utf-8 -*-
"""
=============================
Plot temporal clustering
=============================
This example plots temporal clustering, the extent to which subject tend to
recall neighboring items sequentially.
"""
# Code source: Andrew Heusser
# License: MIT
# import
import quail
#load data
egg = quail.load('example')
#analyze and plot
fegg = egg.analyze('temporal', listgroup=['early']*4+['late']*4)
fegg.plot(title='Temporal Clustering')
| ContextLab/quail | examples/plot_temporal.py | plot_temporal.py | py | 467 | python | en | code | 18 | github-code | 36 |
27293090578 | from django.conf import settings
from django.core.exceptions import ValidationError
# from django.core.validators import MinValueValidator
from django.db import models
from trip.validators import validator_datetime
from users.models import User
class Company(models.Model):
name = models.CharField(
max_length=64,
unique=True,
verbose_name='Компания',
help_text='Введите название компании'
)
class Meta:
verbose_name = 'Компания'
verbose_name_plural = 'Компании'
def __str__(self):
return self.name
class Plane(models.Model):
name = models.CharField(
max_length=128,
unique=True,
verbose_name='Название самолета',
help_text='Введите название самолета'
)
number = models.PositiveIntegerField(
verbose_name='Номер самолета',
help_text='Введите номер самолета'
)
ready = models.BooleanField(
default=False,
verbose_name='Готовность самолета',
help_text='Измените готовность самолета'
)
capacity = models.PositiveIntegerField(
verbose_name='Количество мест',
help_text='Введите кол-во мест в самолете'
)
class Meta:
verbose_name = 'Самолет'
verbose_name_plural = 'Самолеты'
def __str__(self):
return self.name
class Airport(models.Model):
TZ_CHOICES = [
("UTC", "UTC"),
("Europe/Moscow", "Europe/Moscow"),
("Asia/Kamchatka", "Asia/Kamchatka")
]
name = models.CharField(
max_length=128,
unique=True,
verbose_name='Название аэропорта',
help_text='Введите название аэропорта'
)
ap_time_zone = models.CharField(
max_length=128,
verbose_name='Таймзона аэропорта',
help_text='Введите таймзону аэропорта',
choices=TZ_CHOICES,
default=settings.TIME_ZONE
)
class Meta:
verbose_name = 'Аэропорт'
verbose_name_plural = 'Аэропорты'
def __str__(self):
return self.name
class Trip(models.Model):
company = models.ForeignKey(
Company,
on_delete=models.CASCADE,
related_name='trips',
verbose_name='Компания',
help_text='Компания'
)
plane = models.ForeignKey(
Plane,
on_delete=models.CASCADE,
related_name='trips',
verbose_name='Самолет',
help_text='Самолет'
)
airport_from = models.ForeignKey(
Airport,
on_delete=models.CASCADE,
related_name='trips_from',
verbose_name='Из аэропорта',
help_text='Из аэропорта'
)
airport_to = models.ForeignKey(
Airport,
on_delete=models.CASCADE,
related_name='trips_to',
verbose_name='В аэропорт',
help_text='В аэропорт'
)
time_out = models.DateTimeField(
validators=[validator_datetime, ],
verbose_name='Дата/Время вылета',
)
time_in = models.DateTimeField(
verbose_name='Дата/Время прилета',
validators=[validator_datetime, ],
)
class Meta:
verbose_name = 'Перелет'
verbose_name_plural = 'Перелеты'
def __str__(self):
return f'id: {self.id}, по маршруту: {self.airport_from} - {self.airport_to}, вылет {self.time_out}, прибытие {self.time_in}'
def clean(self):
board_buse = self.plane.trips.all().aggregate(models.Max('time_in'))
if self.time_out <= board_buse['time_in__max']:
raise ValidationError('В это время самолет еще в полете.')
class Pass_in_trip(models.Model):
passenger = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='pass_in_trip',
verbose_name='Пассажир',
help_text='Пассажир'
)
place = models.PositiveIntegerField(
unique=True,
verbose_name='Номер места',
help_text='Введите номер места',
# validators=[MinValueValidator(1, 'Место не может быть менее 1.'),]
)
trip = models.ForeignKey(
Trip,
on_delete=models.CASCADE,
related_name='pass_in_trips',
verbose_name='Пассажиры в рейсе',
help_text='Пассажиры в рейсе',
)
class Meta:
verbose_name = 'Пассажир_место'
verbose_name_plural = 'Пассажиры_места'
def __str__(self):
return f'Пассажир - {self.passenger.first_name} {self.passenger.last_name} место - {self.place} рейс ID -{self.trip.id}'
def clean(self):
if self.place > self.trip.plane.capacity:
raise ValidationError('Место не может быть больше, чем мест в самолете.')
| ZOMini/avia_trip | avia/trip/models.py | models.py | py | 5,272 | python | ru | code | 0 | github-code | 36 |
6187622265 | # coding: utf-8
from __future__ import absolute_import
from datetime import date, datetime # noqa: F401
from typing import List, Dict # noqa: F401
from swagger_server.models.base_model_ import Model
from swagger_server import util
class Error(Model):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self, code: int=None, massage: str=None): # noqa: E501
"""Error - a model defined in Swagger
:param code: The code of this Error. # noqa: E501
:type code: int
:param massage: The massage of this Error. # noqa: E501
:type massage: str
"""
self.swagger_types = {
'code': int,
'massage': str
}
self.attribute_map = {
'code': 'code',
'massage': 'massage'
}
self._code = code
self._massage = massage
@classmethod
def from_dict(cls, dikt) -> 'Error':
"""Returns the dict as a model
:param dikt: A dict.
:type: dict
:return: The Error of this Error. # noqa: E501
:rtype: Error
"""
return util.deserialize_model(dikt, cls)
@property
def code(self) -> int:
"""Gets the code of this Error.
:return: The code of this Error.
:rtype: int
"""
return self._code
@code.setter
def code(self, code: int):
"""Sets the code of this Error.
:param code: The code of this Error.
:type code: int
"""
if code is None:
raise ValueError("Invalid value for `code`, must not be `None`") # noqa: E501
self._code = code
@property
def massage(self) -> str:
"""Gets the massage of this Error.
:return: The massage of this Error.
:rtype: str
"""
return self._massage
@massage.setter
def massage(self, massage: str):
"""Sets the massage of this Error.
:param massage: The massage of this Error.
:type massage: str
"""
self._massage = massage
| ArinaYuhimenko/Swagger | error.py | error.py | py | 2,151 | python | en | code | 0 | github-code | 36 |
29423648678 | import logging
from collections import namedtuple, defaultdict
from copy import deepcopy
from dataclasses import dataclass, field
from typing import List, Tuple
import numpy as np
import networkx as nx
import parmed as pm
from IPython.display import display, SVG
from rdkit import Chem
from rdkit.Chem import AllChem, Draw, rdFMCS, rdCoordGen
from rdkit.Chem.Draw import rdMolDraw2D
from rdkit.Chem.Draw import IPythonConsole
IPythonConsole.molSize = (900, 900) # Change image size
IPythonConsole.ipython_useSVG = True # Change output to SVG
from transformato.system import SystemStructure
from transformato.annihilation import calculate_order_of_LJ_mutations_asfe
logger = logging.getLogger(__name__)
def _flattened(list_of_lists: list) -> list:
return [item for sublist in list_of_lists for item in sublist]
def _performe_linear_charge_scaling(
nr_of_steps: int,
intermediate_factory,
mutation,
):
for lambda_value in np.linspace(1, 0, nr_of_steps + 1)[1:]:
print("####################")
print(
f"Coulomb scaling in step: {intermediate_factory.current_step} with lamb: {lambda_value}"
)
print("####################")
intermediate_factory.write_state(
mutation_conf=mutation,
lambda_value_electrostatic=lambda_value,
)
def _performe_linear_cc_scaling(
nr_of_steps: int,
intermediate_factory,
mutation,
) -> int:
for lambda_value in np.linspace(1, 0, nr_of_steps + 1)[1:]:
print("####################")
print(
f"Perform paramteter scaling on cc in step: {intermediate_factory.current_step} with lamb: {lambda_value}"
)
print("####################")
intermediate_factory.write_state(
mutation_conf=mutation,
common_core_transformation=lambda_value,
)
def perform_mutations(
configuration: dict,
i,
mutation_list: list,
list_of_heavy_atoms_to_be_mutated: list = [],
nr_of_mutation_steps_charge: int = 5,
nr_of_mutation_steps_lj_of_hydrogens: int = 1,
nr_of_mutation_steps_lj_of_heavy_atoms: int = 1,
nr_of_mutation_steps_cc: int = 5,
endstate_correction: bool = False,
):
"""Performs the mutations necessary to mutate the physical endstate to the defined common core.
Args:
configuration (dict): A configuration dictionary.
i ([type]): IntermediateState instance
mutation_list (list): list of mutation objects
list_of_heavy_atoms_to_be_mutated (list, optional): A list of atom indices that define the order in which the vdw parameters of the heavy atoms are turned off. Defaults to [].
nr_of_mutation_steps_charge (int, optional): Nr of steps to turne of the charges. Defaults to 5.
nr_of_mutation_steps_lj_of_hydrogens (int, optional): Nr of steps to turne of lj of hydrogens. Only needed for systems with many hydrogens in dummy region
nr_of_mutation_steps_lj_of_heavy_atoms (int, optional): Nr of steps to turne of the lj of heavy atoms
nr_of_mutation_steps_cc (int, optional): Nr of steps to interpolate between the common core parameters. Defaults to 5.
Returns:
list: list of directories with the parameter and topology files
"""
from transformato.utils import map_lj_mutations_to_atom_idx
######################################
# write endpoint mutation
######################################
print("####################")
print(f"Physical endstate in step: 1")
print("####################")
i.write_state(mutation_conf=[])
######################################
# turn off electrostatics
######################################
m = mutation_list["charge"]
# turn off charges
# if number of charge mutation steps are defined in config file overwrite default or passed value
try:
nr_of_mutation_steps_charge = configuration["system"][i.system.structure][
"mutation"
]["steps_charge"]
print("Using number of steps for charge mutattions as defined in config file")
except KeyError:
pass
_performe_linear_charge_scaling(
nr_of_steps=nr_of_mutation_steps_charge,
intermediate_factory=i,
mutation=m,
)
######################################
# turn off LJ
######################################
######################################
# Turn off hydrogens
if nr_of_mutation_steps_lj_of_hydrogens == 1:
if mutation_list["hydrogen-lj"]:
print("####################")
print(f"Hydrogen vdW scaling in step: {i.current_step} with lamb: {0.0}")
print("####################")
i.write_state(
mutation_conf=mutation_list["hydrogen-lj"],
lambda_value_vdw=0.0,
)
else:
# Scaling lj-parameters in multiple steps
if mutation_list["hydrogen-lj"]:
for lambda_value in np.linspace(
0.75, 0, nr_of_mutation_steps_lj_of_hydrogens + 1
):
print("####################")
print(
f"Hydrogen vdW scaling in step: {i.current_step} with lamb: {lambda_value}"
)
print("####################")
i.write_state(
mutation_conf=mutation_list["hydrogen-lj"],
lambda_value_vdw=lambda_value,
)
######################################
# turn off lj of heavy atoms
# take the order from either config file, passed to this function or the default ordering
try:
list_of_heavy_atoms_to_be_mutated = configuration["system"][i.system.structure][
"mutation"
]["heavy_atoms"]
print("Using ordering of LJ mutations as defined in config file.")
except KeyError:
if not list_of_heavy_atoms_to_be_mutated:
# Use the ordering provided by _calculate_order_of_LJ_mutations
list_of_heavy_atoms_to_be_mutated = [
lj.vdw_atom_idx[0] for lj in (mutation_list["lj"])
]
print("Using calculated ordering of LJ mutations.")
else:
print("Using passed ordering of LJ mutations.")
mapping_of_atom_idx_to_mutation = map_lj_mutations_to_atom_idx(mutation_list["lj"])
for heavy_atoms_to_turn_off_in_a_single_step in list_of_heavy_atoms_to_be_mutated:
logger.info(
f"turning off lj of heavy atom: {heavy_atoms_to_turn_off_in_a_single_step}"
)
try: # heavy_atoms_to_turn_off_in_a_single_step can be a tuple or an integer
mutations = [
mapping_of_atom_idx_to_mutation[heavy_atom_idx]
for heavy_atom_idx in heavy_atoms_to_turn_off_in_a_single_step
]
except TypeError:
mutations = [
mapping_of_atom_idx_to_mutation[
heavy_atoms_to_turn_off_in_a_single_step
]
]
# only used in asfe to ensure that last atom is
# turned off in two steps
if (
heavy_atoms_to_turn_off_in_a_single_step
== list_of_heavy_atoms_to_be_mutated[-1]
and configuration["simulation"]["free-energy-type"] == "asfe"
):
for lambda_value in np.linspace(
0.75, 0, nr_of_mutation_steps_lj_of_heavy_atoms + 1
):
print("####################")
print(
f"Turn off last heavy atom vdW parameter in: {i.current_step} on atoms: {heavy_atoms_to_turn_off_in_a_single_step} with lambda {lambda_value}"
)
print("####################")
i.write_state(
mutation_conf=mutations,
lambda_value_vdw=lambda_value,
)
elif nr_of_mutation_steps_lj_of_heavy_atoms == 1:
print("####################")
print(
f"Turn off heavy atom vdW parameter in: {i.current_step} on atoms: {heavy_atoms_to_turn_off_in_a_single_step}"
)
print("####################")
i.write_state(
mutation_conf=mutations,
lambda_value_vdw=0.0,
)
else:
for lambda_value in np.linspace(
0.75, 0, nr_of_mutation_steps_lj_of_heavy_atoms + 1
):
print("####################")
print(
f"Turn off heavy atom vdW parameter in: {i.current_step} on atoms: {heavy_atoms_to_turn_off_in_a_single_step} with lambda {lambda_value}"
)
print("####################")
i.write_state(
mutation_conf=mutations,
lambda_value_vdw=lambda_value,
)
######################################
# generate terminal LJ
######################################
if not configuration["simulation"]["free-energy-type"] == "asfe":
print("####################")
print(
f"Generate terminal LJ particle in step: {i.current_step} on atoms: {[v.vdw_atom_idx for v in mutation_list['default-lj']]}"
)
print("####################")
i.write_state(
mutation_conf=mutation_list["default-lj"],
lambda_value_vdw=0.0,
)
######################################
# mutate common core
######################################
if mutation_list["transform"]:
try:
nr_of_mutation_steps_cc = configuration["system"][i.system.structure][
"mutation"
]["steps_common_core"]
except KeyError:
nr_of_mutation_steps_cc = nr_of_mutation_steps_cc
# change bonded parameters on common core
_performe_linear_cc_scaling(
nr_of_steps=nr_of_mutation_steps_cc,
intermediate_factory=i,
mutation=mutation_list["transform"],
)
if endstate_correction:
i.endstate_correction()
@dataclass
class DummyRegion:
mol_name: str
match_termin_real_and_dummy_atoms: dict
connected_dummy_regions: list
tlc: str
lj_default: list
def return_connecting_real_atom(self, dummy_atoms: list):
for real_atom in self.match_termin_real_and_dummy_atoms:
for dummy_atom in self.match_termin_real_and_dummy_atoms[real_atom]:
if dummy_atom in dummy_atoms:
logger.debug(f"Connecting real atom: {real_atom}")
return real_atom
logger.critical("No connecting real atom was found!")
return None
@dataclass
class MutationDefinition:
atoms_to_be_mutated: List[int]
common_core: List[int]
dummy_region: DummyRegion
vdw_atom_idx: List[int] = field(default_factory=list)
steric_mutation_to_default: bool = False
def print_details(self):
print("####################")
print(f"Atoms to be mutated: {self.atoms_to_be_mutated}")
print(f"Mutated on common core: {self.common_core}")
if self.vdw_atom_idx:
print(f"VDW atoms to be decoupled: {self.vdw_atom_idx}")
class ProposeMutationRoute(object):
def __init__(
self,
s1: SystemStructure,
s2: SystemStructure = None,
):
"""
A class that proposes the mutation route between two molecules with a
common core (same atom types) based on two mols and generates the mutation
objects to perform the mutation on the psf objects.
Parameters
----------
mol1: Chem.Mol
mol2: Chem.Mol
"""
try:
mol1_name: str = "m1"
mol2_name: str = "m2"
self.system: dict = {"system1": s1, "system2": s2}
self.mols: dict = {mol1_name: s1.mol, mol2_name: s2.mol}
self.graphs: dict = {mol1_name: s1.graph, mol2_name: s2.graph}
# psfs for reference of only ligand
self.psfs: dict = {
mol1_name: s1.psfs["waterbox"][f":{s1.tlc}"],
mol2_name: s2.psfs["waterbox"][f":{s2.tlc}"],
}
self.psf1: pm.charmm.CharmmPsfFile = s1.psfs
self.psf2: pm.charmm.CharmmPsfFile = s2.psfs
self._substructure_match: dict = {mol1_name: [], mol2_name: []}
self.removed_indeces: dict = {mol1_name: [], mol2_name: []}
self.added_indeces: dict = {mol1_name: [], mol2_name: []}
self.s1_tlc = s1.tlc
self.s2_tlc = s2.tlc
self.terminal_real_atom_cc1: list = []
self.terminal_real_atom_cc2: list = []
self.terminal_dummy_atom_cc1: list = []
self.terminal_dummy_atom_cc2: list = []
self.bondCompare = rdFMCS.BondCompare.CompareAny
self.atomCompare = rdFMCS.AtomCompare.CompareElements
self.maximizeBonds: bool = True
self.matchValences: bool = False
self.completeRingsOnly: bool = False
self.ringMatchesRingOnly: bool = True
self.dummy_region_cc1: DummyRegion
self.dummy_region_cc2: DummyRegion
self.asfe: bool = False
self._check_cgenff_versions()
except:
logger.info(
"Only information about one structure, assume an ASFE simulation is requested"
)
mol1_name: str = "m1"
self.system: dict = {"system1": s1}
self.mols: dict = {mol1_name: s1.mol}
self.graphs: dict = {mol1_name: s1.graph}
# psfs for reference of only ligand
self.psfs: dict = {s1.psfs["waterbox"][f":{s1.tlc}"]}
self.psf1: pm.charmm.CharmmPsfFile = s1.psfs
self._substructure_match: dict = {mol1_name: []}
self.removed_indeces: dict = {mol1_name: []}
self.added_indeces: dict = {mol1_name: []}
self.s1_tlc = s1.tlc
self.asfe: bool = True
self.dummy_region_cc1: DummyRegion
def _check_cgenff_versions(self):
cgenff_sys1 = self.system["system1"].cgenff_version
cgenff_sys2 = self.system["system2"].cgenff_version
if cgenff_sys1 == cgenff_sys2:
pass
else:
raise RuntimeError(
f"CGenFF compatibility error. CGenFF: {cgenff_sys1} and CGenFF: {cgenff_sys2} are combined."
)
def _match_terminal_real_and_dummy_atoms_for_mol1(self):
"""
Matches the terminal real and dummy atoms and returns a dict with real atom idx as key and a set of dummy atoms that connect
to this real atom as a set
"""
return self._match_terminal_real_and_dummy_atoms(
self.mols["m1"], self.terminal_real_atom_cc1, self.terminal_dummy_atom_cc1
)
def _match_terminal_real_and_dummy_atoms_for_mol2(self) -> dict:
"""
Matches the terminal real and dummy atoms and returns a dict with real atom idx as key and a set of dummy atoms that connect
to this real atom as a set
"""
return self._match_terminal_real_and_dummy_atoms(
self.mols["m2"], self.terminal_real_atom_cc2, self.terminal_dummy_atom_cc2
)
@staticmethod
def _match_terminal_real_and_dummy_atoms(
mol, real_atoms_cc: list, dummy_atoms_cc: list
) -> dict:
"""
Matches the terminal real and dummy atoms and returns a dict with real atom idx as key and a set of dummy atoms that connect
to this real atom as a set
Parameters
----------
mol : [Chem.Mol]
The mol object with the real and dummy atoms
real_atoms_cc : list
list of real atom idx
dummy_atoms_cc : list
list of dummy atom idx
Returns
-------
[type]
[description]
"""
from collections import defaultdict
real_atom_match_dummy_atom = defaultdict(set)
for real_atom_idx in real_atoms_cc:
real_atom = mol.GetAtomWithIdx(real_atom_idx)
real_neighbors = [x.GetIdx() for x in real_atom.GetNeighbors()]
for dummy_atoms_idx in dummy_atoms_cc:
if dummy_atoms_idx in real_neighbors:
real_atom_match_dummy_atom[real_atom_idx].add(dummy_atoms_idx)
return real_atom_match_dummy_atom
def _set_common_core_parameters(self):
# find terminal atoms
(
self.terminal_dummy_atom_cc1,
self.terminal_real_atom_cc1,
) = self._find_terminal_atom(self.get_common_core_idx_mol1(), self.mols["m1"])
(
self.terminal_dummy_atom_cc2,
self.terminal_real_atom_cc2,
) = self._find_terminal_atom(self.get_common_core_idx_mol2(), self.mols["m2"])
# match terminal real atoms between cc1 and cc2 that connect dummy atoms
cc_idx_mol1 = self.get_common_core_idx_mol1()
cc_idx_mol2 = self.get_common_core_idx_mol2()
matching_terminal_atoms_between_cc = list()
for cc1_idx, cc2_idx in zip(cc_idx_mol1, cc_idx_mol2):
if (
cc1_idx in self.terminal_real_atom_cc1
and cc2_idx in self.terminal_real_atom_cc2
):
logger.info(
f"Dummy regions connect on the same terminal atoms. cc1: {cc1_idx} : cc2: {cc2_idx}"
)
matching_terminal_atoms_between_cc.append((cc1_idx, cc2_idx))
elif (
cc1_idx in self.terminal_real_atom_cc1
and cc2_idx not in self.terminal_real_atom_cc2
) or (
cc1_idx not in self.terminal_real_atom_cc1
and cc2_idx in self.terminal_real_atom_cc2
):
logger.info(
f"Single dummy region connects on terminal atom. cc1: {cc1_idx} : cc2: {cc2_idx}"
)
matching_terminal_atoms_between_cc.append((cc1_idx, cc2_idx))
else:
pass
if not matching_terminal_atoms_between_cc:
raise RuntimeError(
"No terminal real atoms were matched between the common cores. Aborting."
)
self.matching_terminal_atoms_between_cc = matching_terminal_atoms_between_cc
def _match_terminal_dummy_atoms_between_common_cores(
self,
match_terminal_atoms_cc1: dict,
match_terminal_atoms_cc2: dict,
) -> Tuple[list, list]:
cc1_idx = self._substructure_match["m1"]
cc2_idx = self._substructure_match["m2"]
lj_default_cc1 = []
lj_default_cc2 = []
# iterate through the common core substracter (the order represents the matched atoms)
for idx1, idx2 in zip(cc1_idx, cc2_idx):
# if both atoms are terminal atoms connected dummy regions can be identified
if (
idx1 in match_terminal_atoms_cc1.keys()
and idx2 in match_terminal_atoms_cc2.keys()
):
connected_dummy_cc1 = list(match_terminal_atoms_cc1[idx1])
connected_dummy_cc2 = list(match_terminal_atoms_cc2[idx2])
if len(connected_dummy_cc1) == 1 and len(connected_dummy_cc2) == 1:
pass
# multiple, possible dummy regions
elif len(connected_dummy_cc1) > 1 or len(connected_dummy_cc2) > 1:
logger.critical("There is a dual junction. Be careful.")
# NOTE: For now we are just taking the non hydrogen atom
for atom_idx in connected_dummy_cc1:
if self.mols["m1"].GetAtomWithIdx(atom_idx).GetSymbol() != "H":
connected_dummy_cc1 = [atom_idx]
break
for atom_idx in connected_dummy_cc2:
if self.mols["m2"].GetAtomWithIdx(atom_idx).GetSymbol() != "H":
connected_dummy_cc2 = [atom_idx]
break
# hydrogen mutates to dummy atom (but not a LJ particle)
elif len(connected_dummy_cc1) == 0 or len(connected_dummy_cc2) == 0:
logger.debug("Hydrogen to dummy mutation")
raise NotImplementedError()
lj_default_cc1.append(connected_dummy_cc1[0])
lj_default_cc2.append(connected_dummy_cc2[0])
return (lj_default_cc1, lj_default_cc2)
@staticmethod
def _calculate_order_of_LJ_mutations(
connected_dummy_regions: list,
match_terminal_atoms: dict,
G: nx.Graph,
) -> list:
try:
from tf_routes.routes import (
_calculate_order_of_LJ_mutations_new as _calculate_order_of_LJ_mutations_with_bfs,
)
return _calculate_order_of_LJ_mutations_with_bfs(
connected_dummy_regions, match_terminal_atoms, G
)
except ModuleNotFoundError:
ordered_LJ_mutations = []
for real_atom in match_terminal_atoms:
for dummy_atom in match_terminal_atoms[real_atom]:
for connected_dummy_region in connected_dummy_regions:
# stop at connected dummy region with specific dummy_atom in it
if dummy_atom not in connected_dummy_region:
continue
G_dummy = G.copy()
# delete all nodes not in dummy region
remove_nodes = [
node
for node in G.nodes()
if node not in connected_dummy_region
]
for remove_node in remove_nodes:
G_dummy.remove_node(remove_node)
# root is the dummy atom that connects the real region with the dummy region
root = dummy_atom
edges = list(nx.dfs_edges(G_dummy, source=root))
nodes = [root] + [v for u, v in edges]
nodes.reverse() # NOTE: reverse the mutation
ordered_LJ_mutations.append(nodes)
return ordered_LJ_mutations
def _check_for_lp(
self,
odered_connected_dummy_regions_cc_with_lp: list,
psf: pm.charmm.CharmmPsfFile,
tlc: str,
name: str,
) -> list:
"""
With the help of parmed this function will look in the ordered_connected_dummy_regions list if
there is a atom which has lonepairs. It will check wheather the lp belongs to the common core or
to the dummy region and assign it into the sorted list accordingly.
"""
flat_ordered_connected_dummy_regions = [
item
for sublist in odered_connected_dummy_regions_cc_with_lp
for item in sublist
]
lp_dict_dummy_region = defaultdict(list)
lp_dict_common_core = defaultdict(list)
for atom in psf.view[f":{tlc}"].atoms:
if atom.name.find("LP") == False:
print(f"die Atome {atom}")
if atom.frame_type.atom1.idx in flat_ordered_connected_dummy_regions:
lp_dict_dummy_region[atom.frame_type.atom1.idx].append(atom.idx)
elif (
atom.frame_type.atom1.idx not in lp_dict_common_core
and name == "m1"
):
logger.info(f"Adding atom {atom.idx} to the common core of mol1")
self.add_idx_to_common_core_of_mol1([atom.idx])
elif (
atom.frame_type.atom1.idx not in lp_dict_common_core
and name == "m2"
):
logger.info(f"Adding atom {atom.idx} to the common core of mol1")
self.add_idx_to_common_core_of_mol2([atom.idx])
if lp_dict_dummy_region:
for i in odered_connected_dummy_regions_cc_with_lp:
lp_to_insert = []
for atom in i:
if atom in lp_dict_dummy_region.keys():
lp_to_insert.extend(lp_dict_dummy_region[atom])
for lp_num in reversed(lp_to_insert):
i.insert(0, lp_num)
logger.debug(
f"Orderd connected dummy atoms containing the lp {odered_connected_dummy_regions_cc_with_lp}"
)
return odered_connected_dummy_regions_cc_with_lp
def get_idx_of_all_atoms(
self,
mol1_name: str,
):
"""
Iterates over all atoms of the molecule and saves them as a list
----------
mol1_name: str
"""
s1 = []
for atom in self.psf1["waterbox"][f":{self.s1_tlc}"].atoms:
s1.append(atom.idx)
self._substructure_match[mol1_name] = list(s1)
def propose_common_core(self):
"""
Searches for the common core using the rdkit module, in case of asfe only a list of
atoms of the ligand is created
"""
if self.asfe:
self.get_idx_of_all_atoms("m1")
else:
# System for RBFE/RSFE contains two mols
mcs = self._find_mcs("m1", "m2")
return mcs
def finish_common_core(
self,
connected_dummy_regions_cc1: list = [],
connected_dummy_regions_cc2: list = [],
odered_connected_dummy_regions_cc1: list = [],
odered_connected_dummy_regions_cc2: list = [],
):
"""
The dummy region is created and the final atoms connected to the CC are collected. It is possible
to define a dummy region on its own or to change the ordering how the lj parameters of the
heavy atoms in the dummy region are turned off
---------
connected_dummy_regions_cc1: list = []
connected_dummy_regions_cc2: list = []
odered_connected_dummy_regions_cc1: list = []
odered_connected_dummy_regions_cc2: list = []
"""
if not self.asfe:
# set the teriminal real/dummy atom indices
self._set_common_core_parameters()
# match the real/dummy atoms
match_terminal_atoms_cc1 = (
self._match_terminal_real_and_dummy_atoms_for_mol1()
)
match_terminal_atoms_cc2 = (
self._match_terminal_real_and_dummy_atoms_for_mol2()
)
logger.info("Find connected dummy regions")
# define connected dummy regions
if not connected_dummy_regions_cc1:
connected_dummy_regions_cc1 = self._find_connected_dummy_regions(
mol_name="m1",
)
if not connected_dummy_regions_cc2:
connected_dummy_regions_cc2 = self._find_connected_dummy_regions(
mol_name="m2",
)
logger.debug(
f"connected dummy regions for mol1: {connected_dummy_regions_cc1}"
)
logger.debug(
f"connected dummy regions for mol2: {connected_dummy_regions_cc2}"
)
# calculate the ordering or LJ mutations
if not odered_connected_dummy_regions_cc1:
odered_connected_dummy_regions_cc1 = (
self._calculate_order_of_LJ_mutations(
connected_dummy_regions_cc1,
match_terminal_atoms_cc1,
self.graphs["m1"].copy(),
)
)
if not odered_connected_dummy_regions_cc2:
odered_connected_dummy_regions_cc2 = (
self._calculate_order_of_LJ_mutations(
connected_dummy_regions_cc2,
match_terminal_atoms_cc2,
self.graphs["m2"].copy(),
)
)
logger.info(
f"sorted connected dummy regions for mol1: {odered_connected_dummy_regions_cc1}"
)
logger.info(
f"sorted connected dummy regions for mol2: {odered_connected_dummy_regions_cc2}"
)
if odered_connected_dummy_regions_cc1:
odered_connected_dummy_regions_cc1 = self._check_for_lp(
odered_connected_dummy_regions_cc1,
self.psf1["waterbox"],
self.s1_tlc,
"m1",
)
if odered_connected_dummy_regions_cc2:
odered_connected_dummy_regions_cc2 = self._check_for_lp(
odered_connected_dummy_regions_cc2,
self.psf2["waterbox"],
self.s2_tlc,
"m2",
)
# find the atoms from dummy_region in s1 that needs to become lj default
(
lj_default_cc1,
lj_default_cc2,
) = self._match_terminal_dummy_atoms_between_common_cores(
match_terminal_atoms_cc1, match_terminal_atoms_cc2
)
self.dummy_region_cc1 = DummyRegion(
mol_name="m1",
tlc=self.s1_tlc,
match_termin_real_and_dummy_atoms=match_terminal_atoms_cc1,
connected_dummy_regions=odered_connected_dummy_regions_cc1,
lj_default=lj_default_cc1,
)
self.dummy_region_cc2 = DummyRegion(
mol_name="m2",
tlc=self.s2_tlc,
match_termin_real_and_dummy_atoms=match_terminal_atoms_cc2,
connected_dummy_regions=odered_connected_dummy_regions_cc2,
lj_default=lj_default_cc2,
)
# generate charge compmensated psfs
psf1, psf2 = self._prepare_cc_for_charge_transfer()
self.charge_compensated_ligand1_psf = psf1
self.charge_compensated_ligand2_psf = psf2
else:
# all atoms should become dummy atoms in the end
central_atoms = nx.center(self.graphs["m1"])
# Assure, that the central atom is no hydrogen
for atom in self.psf1["waterbox"][f":{self.s1_tlc}"].atoms:
if atom.idx in central_atoms:
if atom.name.startswith("H") == True:
raise RuntimeError(
f"One of the central atoms seems to be a hydrogen atom"
)
# calculate the ordering or LJ mutations
if not odered_connected_dummy_regions_cc1:
odered_connected_dummy_regions_cc1 = (
calculate_order_of_LJ_mutations_asfe(
central_atoms,
self.graphs["m1"].copy(),
)
)
if odered_connected_dummy_regions_cc1:
odered_connected_dummy_regions_cc1 = self._check_for_lp(
odered_connected_dummy_regions_cc1,
self.psf1["waterbox"],
self.s1_tlc,
"m1",
)
self.dummy_region_cc1 = DummyRegion(
mol_name="m1",
tlc=self.s1_tlc,
match_termin_real_and_dummy_atoms=[],
connected_dummy_regions=odered_connected_dummy_regions_cc1,
lj_default=[],
)
def calculate_common_core(self):
self.propose_common_core()
self.finish_common_core()
def _prepare_cc_for_charge_transfer(self):
# we have to run the same charge mutation that will be run on cc2 to get the
# charge distribution AFTER the full mutation
# make a copy of the full psf
m2_psf = self.psfs["m2"][:, :, :]
m1_psf = self.psfs["m1"][:, :, :]
charge_transformed_psfs = []
for psf, tlc, cc_idx, dummy_region in zip(
[m1_psf, m2_psf],
[self.s1_tlc, self.s2_tlc],
[self.get_common_core_idx_mol1(), self.get_common_core_idx_mol2()],
[self.dummy_region_cc1, self.dummy_region_cc2],
):
# set `initial_charge` parameter for Mutation
for atom in psf.view[f":{tlc}"].atoms:
# charge, epsilon and rmin are directly modiefied
atom.initial_charge = atom.charge
offset = min([atom.idx for atom in psf.view[f":{tlc}"].atoms])
# getting copy of the atoms
atoms_to_be_mutated = []
for atom in psf.view[f":{tlc}"].atoms:
idx = atom.idx - offset
if idx not in cc_idx:
atoms_to_be_mutated.append(idx)
logger.debug("############################")
logger.debug("Preparing cc2 for charge transfer")
logger.debug(
f"Atoms for which charge is set to zero: {atoms_to_be_mutated}"
)
logger.debug("############################")
m = Mutation(
atoms_to_be_mutated=atoms_to_be_mutated, dummy_region=dummy_region
)
m.mutate(psf, lambda_value_electrostatic=0.0)
charge_transformed_psfs.append(psf)
return charge_transformed_psfs[0], charge_transformed_psfs[1]
def remove_idx_from_common_core_of_mol1(self, idx_list: list):
for idx in idx_list:
self._remove_idx_from_common_core("m1", idx)
def remove_idx_from_common_core_of_mol2(self, idx_list: list):
for idx in idx_list:
self._remove_idx_from_common_core("m2", idx)
def _remove_idx_from_common_core(self, name: str, idx: int):
if idx in self.added_indeces[name] or idx in self._get_common_core(name):
if idx in self.removed_indeces[name]:
print(f"Idx: {idx} already removed from common core.")
return
self.removed_indeces[name].append(idx)
else:
print(f"Idx: {idx} not in common core.")
def add_idx_to_common_core_of_mol1(self, idx_list: list):
"""Adds a list of atoms to the common core of molecule 1
.. caution::
Be aware of the ordering! Atom idx need to be added to match the ordering of the atom idx of common core 2
Args:
idx_list: Array of atom idxs to add
"""
for idx in idx_list:
self._add_common_core_atom("m1", idx)
logger.warning(
f"ATTENTION: Be aware of the ordering! Atom idx need to be added to match the ordering of the atom idx of common core 2"
)
logger.info(
f"Atom idx of the new common core: {self.get_common_core_idx_mol1()}"
)
def add_idx_to_common_core_of_mol2(self, idx_list: list):
"""Adds a list of atoms to the common core of molecule 1
.. caution::
Be aware of the ordering! Atom idx need to be added to match the ordering of the atom idx of common core 2
Args:
idx_list: Array of atom idxs to add
"""
for idx in idx_list:
self._add_common_core_atom("m2", idx)
logger.warning(
f"ATTENTION: Be aware of the ordering! Atom idx need to be added to match the ordering of the atom idx of common core 1"
)
logger.info(
f" Atom idx of the new common core: {self.get_common_core_idx_mol2()}"
)
def _add_common_core_atom(self, name: str, idx: int):
if idx in self.added_indeces[name] or idx in self._get_common_core(name):
print(f"Idx: {idx} already in common core.")
return
self.added_indeces[name].append(idx)
def get_idx_not_in_common_core_for_mol1(self) -> list:
return self._get_idx_not_in_common_core_for_mol("m1")
def get_idx_not_in_common_core_for_mol2(self) -> list:
return self._get_idx_not_in_common_core_for_mol("m2")
def _get_idx_not_in_common_core_for_mol(self, mol_name: str) -> list:
dummy_list_mol = [
atom.GetIdx()
for atom in self.mols[mol_name].GetAtoms()
if atom.GetIdx() not in self._get_common_core(mol_name)
]
return dummy_list_mol
def get_common_core_idx_mol1(self) -> list:
"""
Returns the common core of mol1.
"""
return self._get_common_core("m1")
def get_common_core_idx_mol2(self) -> list:
"""
Returns the common core of mol2.
"""
return self._get_common_core("m2")
def _get_common_core(self, name: str) -> list:
"""
Helper Function - should not be called directly.
Returns the common core.
"""
keep_idx = []
# BEWARE: the ordering is important - don't cast set!
for idx in self._substructure_match[name] + self.added_indeces[name]:
if idx not in self.removed_indeces[name]:
keep_idx.append(idx)
return keep_idx
def _find_mcs(
self,
mol1_name: str,
mol2_name: str,
iterate_over_matches: bool = False,
max_matches: int = 10,
):
"""
A class that proposes the mutation route between two molecules with a
common core (same atom types) based on two mols and generates the mutation
objects to perform the mutation on the psf objects.
Parameters
----------
mol1_name: str
mol2_name: str
"""
logger.info("MCS starting ...")
logger.debug(f"bondCompare: {self.bondCompare}")
logger.debug(f"atomCompare: {self.atomCompare}")
logger.debug(f"maximizeBonds: {self.maximizeBonds}")
logger.debug(f"matchValences: {self.matchValences} ")
logger.debug(f"ringMatchesRingOnly: {self.ringMatchesRingOnly} ")
logger.debug(f"completeRingsOnly: {self.completeRingsOnly} ")
m1, m2 = [deepcopy(self.mols[mol1_name]), deepcopy(self.mols[mol2_name])]
# second copy of mols - to use as representation with removed hydrogens
remmol1 = deepcopy(m1)
remmol2 = deepcopy(m2)
# removal of hydrogens - if not removed, common core for molecule + hydrogens is computed!
remmol1 = Chem.rdmolops.RemoveAllHs(remmol1)
remmol2 = Chem.rdmolops.RemoveAllHs(remmol2)
# remmols contains both molecules with removed hydrogens
remmols = [remmol1, remmol2]
for m in [m1, m2]:
logger.debug("Mol in SMILES format: {}.".format(Chem.MolToSmiles(m, True)))
# make copy of mols
changed_mols = [Chem.Mol(x) for x in [m1, m2]]
# find substructure match (ignore bond order but enforce element matching)
# findmcs-function is called for mol-objects with removed hydrogens
# original Transformato-parameters (yield bad / for Transformato not usable results for molecules with cyclic structures, e.g., ccores between 2-CPI and 7-CPI)
# especially because completeRingsOnly is set to False
"""
mcs = rdFMCS.FindMCS(
#changed_mols,
remmols,
bondCompare=self.bondCompare,
timeout=120,
atomCompare=self.atomCompare,
maximizeBonds=self.maximizeBonds,
matchValences=self.matchValences,
completeRingsOnly=self.completeRingsOnly,
ringMatchesRingOnly=self.ringMatchesRingOnly,
)
"""
# find_mcs-function from tf_routes:
# yields more reasonable common cores (e.g. for 2-CPI/7-CPI )
# in particular, completeRingsOnly=True is important
mcs = rdFMCS.FindMCS(
remmols,
timeout=120,
ringMatchesRingOnly=True,
completeRingsOnly=True,
ringCompare=Chem.rdFMCS.RingCompare.StrictRingFusion,
bondCompare=rdFMCS.BondCompare.CompareAny,
matchValences=False,
)
logger.debug("Substructure match: {}".format(mcs.smartsString))
# convert from SMARTS
mcsp = Chem.MolFromSmarts(mcs.smartsString, False)
# iterate_over_matches == False: the common core atoms for a single stubstructure match are determined
# possibly a different match yields a bigger ccore - i.e. a ccore with more hydrogens (neopentane - methane)
if iterate_over_matches == False:
s1 = m1.GetSubstructMatch(mcsp)
logger.debug("Substructere match idx: {}".format(s1))
self._show_common_core(
m1, self.get_common_core_idx_mol1(), show_atom_type=False, internal=True
)
s2 = m2.GetSubstructMatch(mcsp)
logger.debug("Substructere match idx: {}".format(s2))
self._show_common_core(
m2, self.get_common_core_idx_mol2(), show_atom_type=False, internal=True
)
# new code: add hydrogens to both common-core-on-molecule-projections
# set with all common core atom indices for both molecules
hit_ats1_compl = list(s1)
hit_ats2_compl = list(s2)
# check for each common core atom whether hydrogen atoms are in its neighbourhood
# s1/s2 contain the mapping of the common core (without hydrogens) to both molecules
# iterating over all mapped atoms, the number of hydrogens attached to the common core atom is determined
# the minimum number (i.e. if the atom of molecule 1 has one hydrogen bond, the atom of molecule 2 zero hydrogen bonds, it is zero) gives the number of hydrogen atoms to add to the common core
for indexpos, indexnr in enumerate(s1):
# get mapped atoms
atom1 = m1.GetAtomWithIdx(s1[indexpos])
atom2 = m2.GetAtomWithIdx(s2[indexpos])
# determine number of hydrogens in the neighbourhood of the atom from molecule1
h_atoms1 = 0
for x in atom1.GetNeighbors():
if x.GetSymbol() == "H":
h_atoms1 = h_atoms1 + 1
# determine number of hydrogens in the neighbourhood of the atom from molecule2
h_atoms2 = 0
for x in atom2.GetNeighbors():
if x.GetSymbol() == "H":
h_atoms2 = h_atoms2 + 1
# find minimum number of hydrogens
min_h_atoms = min(h_atoms1, h_atoms2)
# add minimum number of hydrogens to the ccore for molecule1
h_atoms1 = 0
for x in atom1.GetNeighbors():
if x.GetSymbol() == "H" and h_atoms1 < min_h_atoms:
hit_ats1_compl.append(x.GetIdx())
h_atoms1 = h_atoms1 + 1
# add minimum number of hydrogens to the ccore for molecule2
h_atoms2 = 0
for x in atom2.GetNeighbors():
if x.GetSymbol() == "H" and h_atoms2 < min_h_atoms:
hit_ats2_compl.append(x.GetIdx())
h_atoms2 = h_atoms2 + 1
# create new tuple of common core atom indices with additional hydrogens (molecule 1)
hit_ats1 = tuple(hit_ats1_compl)
# create new tuple of common core atom indices with additional hydrogens (molecule 2)
hit_ats2 = tuple(hit_ats2_compl)
self._substructure_match[mol1_name] = list(hit_ats1)
self._substructure_match[mol2_name] = list(hit_ats2)
# self._substructure_match[mol1_name] = list(s1)
# self._substructure_match[mol2_name] = list(s2)
return mcs
# iterate_over_matches == True: it is iterated over all pairs of substructure matches
# the substructure matches with the biggest emering common cores are finally chosen
# the common cores for different substructure match pairs contain the same heavy atoms, but differ in the number of hydrogens, i.e. the finally chosen matches have the common cores with most hydrogens
else:
s1s = m1.GetSubstructMatches(mcsp, maxMatches=max_matches)
logger.debug("Substructere match idx: {}".format(s1s))
self._show_common_core(
m1, self.get_common_core_idx_mol1(), show_atom_type=False, internal=True
)
s2s = m2.GetSubstructMatches(mcsp, maxMatches=max_matches)
logger.debug("Substructere match idx: {}".format(s2s))
self._show_common_core(
m2, self.get_common_core_idx_mol2(), show_atom_type=False, internal=True
)
curr_size_of_ccores = 0
for s1 in s1s:
for s2 in s2s:
# new code: add hydrogens to both common-core-on-molecule-projections
# set with all common core atom indices for both molecules
hit_ats1_compl = list(s1)
hit_ats2_compl = list(s2)
# check for each common core atom whether hydrogen atoms are in its neighbourhood
# s1/s2 contain the mapping of the common core (without hydrogens) to both molecules
# iterating over all mapped atoms, the number of hydrogens attached to the common core atom is determined
# the minimum number (i.e. if the atom of molecule 1 has one hydrogen bond, the atom of molecule 2 zero hydrogen bonds, it is zero) gives the number of hydrogen atoms to add to the common core
for indexpos, indexnr in enumerate(s1):
# get mapped atoms
atom1 = m1.GetAtomWithIdx(s1[indexpos])
atom2 = m2.GetAtomWithIdx(s2[indexpos])
# determine number of hydrogens in the neighbourhood of the atom from molecule1
h_atoms1 = 0
for x in atom1.GetNeighbors():
if x.GetSymbol() == "H":
h_atoms1 = h_atoms1 + 1
# determine number of hydrogens in the neighbourhood of the atom from molecule2
h_atoms2 = 0
for x in atom2.GetNeighbors():
if x.GetSymbol() == "H":
h_atoms2 = h_atoms2 + 1
# find minimum number of hydrogens
min_h_atoms = min(h_atoms1, h_atoms2)
# add minimum number of hydrogens to the ccore for molecule1
h_atoms1 = 0
for x in atom1.GetNeighbors():
if x.GetSymbol() == "H" and h_atoms1 < min_h_atoms:
hit_ats1_compl.append(x.GetIdx())
h_atoms1 = h_atoms1 + 1
# add minimum number of hydrogens to the ccore for molecule2
h_atoms2 = 0
for x in atom2.GetNeighbors():
if x.GetSymbol() == "H" and h_atoms2 < min_h_atoms:
hit_ats2_compl.append(x.GetIdx())
h_atoms2 = h_atoms2 + 1
# count whether the new common cores are bigger (i.e. contain more hydrogens) than the previous common cores
# if this is the case, the current substructure matches are chosen
if len(hit_ats1_compl) > curr_size_of_ccores:
curr_size_of_ccores = len(hit_ats1_compl)
hit_ats1_compl_final = hit_ats1_compl
hit_ats2_compl_final = hit_ats2_compl
# create new tuple of common core atom indices with additional hydrogens (molecule 1)
hit_ats1 = tuple(hit_ats1_compl_final)
# create new tuple of common core atom indices with additional hydrogens (molecule 2)
hit_ats2 = tuple(hit_ats2_compl_final)
self._substructure_match[mol1_name] = list(hit_ats1)
self._substructure_match[mol2_name] = list(hit_ats2)
# self._substructure_match[mol1_name] = list(s1)
# self._substructure_match[mol2_name] = list(s2)
return mcs
def _return_atom_idx_from_bond_idx(self, mol: Chem.Mol, bond_idx: int):
return (
mol.GetBondWithIdx(bond_idx).GetBeginAtomIdx(),
mol.GetBondWithIdx(bond_idx).GetEndAtomIdx(),
)
def _find_connected_dummy_regions(self, mol_name: str) -> List[set]:
sub = self._get_common_core(mol_name)
#############################
# start
#############################
mol = self.mols[mol_name]
G = self.graphs[mol_name].copy()
# find all dummy atoms
list_of_dummy_atoms_idx = [
atom.GetIdx() for atom in mol.GetAtoms() if atom.GetIdx() not in sub
]
nr_of_dummy_atoms = len(list_of_dummy_atoms_idx) + 1
list_of_real_atoms_idx = [
atom.GetIdx() for atom in mol.GetAtoms() if atom.GetIdx() in sub
]
# remove real atoms from graph to obtain multiple connected compounds
for real_atom_idx in list_of_real_atoms_idx:
G.remove_node(real_atom_idx)
# find these connected compounds
from networkx.algorithms.components import connected_components
unique_subgraphs = [
c for c in sorted(nx.connected_components(G), key=len, reverse=True)
]
return unique_subgraphs
def show_common_core_on_mol1(self, show_atom_types: bool = False):
"""
Shows common core on mol1
"""
return self._show_common_core(
self.mols["m1"],
self.get_common_core_idx_mol1(),
show_atom_types,
internal=False,
)
def show_common_core_on_mol2(self, show_atom_types: bool = False):
"""
Shows common core on mol2
"""
return self._show_common_core(
self.mols["m2"],
self.get_common_core_idx_mol2(),
show_atom_types,
internal=False,
)
def _show_common_core(
self, mol, highlight: list, show_atom_type: bool, internal: bool
):
"""
Helper function - do not call directly.
Show common core.
"""
# https://rdkit.blogspot.com/2015/02/new-drawing-code.html
mol = deepcopy(mol)
drawer = rdMolDraw2D.MolDraw2DSVG(500, 500)
drawer.SetFontSize(6)
opts = drawer.drawOptions()
if show_atom_type:
for i in mol.GetAtoms():
opts.atomLabels[i.GetIdx()] = (
str(i.GetProp("atom_index")) + ":" + i.GetProp("atom_type")
)
elif mol.GetNumAtoms() < 30:
for i in mol.GetAtoms():
opts.atomLabels[i.GetIdx()] = (
str(i.GetProp("atom_index")) + ":" + i.GetProp("atom_name")
)
rdCoordGen.AddCoords(mol) # Create Cordinates
drawer.DrawMolecule(mol, highlightAtoms=highlight)
drawer.FinishDrawing()
svg = drawer.GetDrawingText().replace("svg:", "")
if internal:
display(SVG(svg))
return svg
def generate_mutations_to_common_core_for_mol1(self) -> dict:
"""
Generates the mutation route to the common fore for mol1.
----------
mutations: list
list of mutations
"""
m = self._mutate_to_common_core(
self.dummy_region_cc1, self.get_common_core_idx_mol1(), mol_name="m1"
)
if not self.asfe:
m["transform"] = self._transform_common_core()
return m
def generate_mutations_to_common_core_for_mol2(self) -> dict:
"""
Generates the mutation route to the common fore for mol2.
Returns
----------
mutations: list
list of mutations
"""
if not self.terminal_real_atom_cc1:
raise RuntimeError("First generate the MCS")
m = self._mutate_to_common_core(
self.dummy_region_cc2, self.get_common_core_idx_mol2(), mol_name="m2"
)
return m
def _transform_common_core(self) -> list:
"""
Common Core 1 is transformed to Common core 2. Bonded parameters and charges are adjusted.
"""
transformations = []
logger.warning("##############################")
logger.warning("##############################")
logger.warning("Transform common core")
logger.warning("##############################")
logger.warning("##############################")
# test if bonded mutations are necessary
bonded_terms_mutation = False
charge_mutation = False
for cc1, cc2 in zip(
self.get_common_core_idx_mol1() + self.dummy_region_cc1.lj_default,
self.get_common_core_idx_mol2() + self.dummy_region_cc2.lj_default,
):
# did atom type change? if not don't add BondedMutations
atom1 = self.psfs["m1"][cc1]
atom2 = self.psfs["m2"][cc2]
if atom1.type != atom2.type:
logger.warning("##############################")
logger.warning("Atom type transformation")
logger.warning(f"Atom that needs to be transformed: {atom1}.")
logger.warning(f"Atom type of atom in cc1: {atom1.type}.")
logger.warning(f"Template atom: {atom2}.")
logger.warning(f"Atom type of atom in cc2: {atom2.type}.")
bonded_terms_mutation = True
for cc1, cc2 in zip(
self.get_common_core_idx_mol1(), self.get_common_core_idx_mol2()
):
atom1 = self.charge_compensated_ligand1_psf[cc1]
atom2 = self.charge_compensated_ligand2_psf[cc2]
if atom1.charge != atom2.charge:
logger.warning("##############################")
logger.warning("Charge transformation")
logger.warning("Charge needs to be transformed on common core")
logger.warning(f"Atom that needs to be transformed: {atom1}.")
logger.warning(f"Atom charge of atom in cc1: {atom1.charge}.")
logger.warning(f"Template atom: {atom2}.")
logger.warning(f"Atom charge of atom in cc2: {atom2.charge}.")
charge_mutation = True
# if necessary transform bonded parameters
if bonded_terms_mutation or charge_mutation:
logger.warning(f"Bonded parameters mutation: {bonded_terms_mutation}.")
logger.warning(f"Charge parameters mutation: {charge_mutation}.")
t = CommonCoreTransformation(
self.get_common_core_idx_mol1() + self.dummy_region_cc1.lj_default,
self.get_common_core_idx_mol2() + self.dummy_region_cc2.lj_default,
self.psfs["m1"],
self.psfs["m2"],
self.s1_tlc,
self.s2_tlc,
self.charge_compensated_ligand2_psf,
charge_mutation=charge_mutation,
bonded_terms_mutation=bonded_terms_mutation,
)
transformations.append(t)
else:
logger.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
logger.info("No transformations needed.")
logger.info("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
transformations = []
return transformations
@staticmethod
def _find_terminal_atom(cc_idx: list, mol: Chem.Mol) -> Tuple[list, list]:
"""
Find atoms that connect the molecule to the common core.
Args:
cc_idx (list): common core index atoms
mol ([type]): rdkit mol object
"""
terminal_dummy_atoms = []
terminal_real_atoms = []
for atom in mol.GetAtoms():
idx = atom.GetIdx()
if idx not in cc_idx:
neighbors = [x.GetIdx() for x in atom.GetNeighbors()]
if any([n in cc_idx for n in neighbors]):
terminal_dummy_atoms.append(idx)
if idx in cc_idx:
neighbors = [x.GetIdx() for x in atom.GetNeighbors()]
if any([n not in cc_idx for n in neighbors]):
terminal_real_atoms.append(idx)
logger.info(f"Terminal dummy atoms: {str(list(set(terminal_dummy_atoms)))}")
logger.info(f"Terminal real atoms: {str(list(set(terminal_real_atoms)))}")
return (list(set(terminal_dummy_atoms)), list(set(terminal_real_atoms)))
def _mutate_to_common_core(
self, dummy_region: DummyRegion, cc_idx: list, mol_name: str
) -> dict:
"""
Helper function - do not call directly.
Generates the mutation route to the common fore for mol.
"""
mutations = defaultdict(list)
tlc = self.s1_tlc
if self.asfe:
psf = self.psf1["waterbox"]
cc_idx = [] # no CC in ASFE
list_termin_dummy_atoms = []
else:
# copy of the currently used psf
psf = self.psfs[f"{mol_name}"][:, :, :]
# only necessary for relative binding/solvation free energies
# get the atom that connects the common core to the dummy regiom
match_termin_real_and_dummy_atoms = (
dummy_region.match_termin_real_and_dummy_atoms
)
# get the terminal dummy atoms
list_termin_dummy_atoms = []
for m in match_termin_real_and_dummy_atoms.values():
list_termin_dummy_atoms.extend(list(m))
logger.info(f"Terminal dummy atoms: {list_termin_dummy_atoms}")
if mol_name == "m2":
tlc = self.s2_tlc
# iterate through atoms and select atoms that need to be mutated
atoms_to_be_mutated = []
hydrogens = []
for atom in psf.view[f":{tlc}"].atoms:
# idx = atom.idx - self.offset
idx = atom.idx
if idx not in cc_idx:
if atom.name.find("H") == False and idx not in list_termin_dummy_atoms:
hydrogens.append(idx)
atoms_to_be_mutated.append(idx)
logger.info(
"Will be decoupled: Idx:{} Element:{}".format(idx, atom.name)
)
if atoms_to_be_mutated:
############################################
############################################
# charge mutation
############################################
############################################
m = MutationDefinition(
atoms_to_be_mutated=atoms_to_be_mutated,
common_core=cc_idx,
dummy_region=dummy_region,
vdw_atom_idx=[],
steric_mutation_to_default=False,
)
mutations["charge"].append(m)
############################################
############################################
# LJ mutation
############################################
############################################
# start with mutation of LJ of hydrogens
# Only take hydrogens that are not terminal hydrogens
if hydrogens:
m = MutationDefinition(
atoms_to_be_mutated=atoms_to_be_mutated,
common_core=cc_idx,
dummy_region=dummy_region,
vdw_atom_idx=hydrogens,
steric_mutation_to_default=False,
)
mutations["hydrogen-lj"].append(m)
for region in dummy_region.connected_dummy_regions:
for atom_idx in region:
if (
atom_idx in list_termin_dummy_atoms
and atom_idx in dummy_region.lj_default
):
# test if atom is a terminal atom and there is a corresponding atom on the other cc
# in this case the atom needs to become a default lj particle
m = MutationDefinition(
atoms_to_be_mutated=atoms_to_be_mutated,
common_core=cc_idx,
dummy_region=dummy_region,
vdw_atom_idx=[atom_idx],
steric_mutation_to_default=True,
)
mutations["default-lj"].append(m)
elif atom_idx in hydrogens or psf[atom_idx].type == "LPH":
# already mutated
continue
else:
# normal lj mutation
m = MutationDefinition(
atoms_to_be_mutated=atoms_to_be_mutated,
common_core=cc_idx,
dummy_region=dummy_region,
vdw_atom_idx=[atom_idx],
steric_mutation_to_default=False,
)
mutations["lj"].append(m)
else:
logger.critical("No atoms will be decoupled.")
mutations = defaultdict()
return mutations
class CommonCoreTransformation(object):
def __init__(
self,
cc1_indicies: list,
cc2_indicies: list,
ligand1_psf: pm.charmm.CharmmPsfFile,
ligand2_psf: pm.charmm.CharmmPsfFile,
tlc_cc1: str,
tlc_cc2: str,
charge_compensated_ligand2_psf: pm.charmm.CharmmPsfFile,
charge_mutation: bool,
bonded_terms_mutation: bool,
):
"""
Scale the bonded parameters inside the common core.
Parameters
----------
cc1_indicies : list
indices of cc1
cc2_indicies : list
indices of cc2 (in the same order as cc1)
ligand1_psf : pm.charmm.CharmmPsfFile (copy of only ligand)
ligand2_psf : pm.charmm.CharmmPsfFile (copy of only ligand)
the target psf that is used to generate the new bonded parmaeters
tlc_cc1 : str
three letter code of ligand in cc1
tlc_cc2 : str
three letter code of ligand in cc2
"""
self.cc1_indicies: list = cc1_indicies
self.cc2_indicies: list = cc2_indicies
self.ligand2_psf: pm.charmm.CharmmPsfFile = ligand2_psf
self.ligand1_psf: pm.charmm.CharmmPsfFile = ligand1_psf
self.tlc_cc1: str = tlc_cc1
self.tlc_cc2: str = tlc_cc2
self.atom_names_mapping = self._get_atom_mapping()
self.charge_mutation: bool = charge_mutation
self.bonded_terms_mutation: bool = bonded_terms_mutation
self.charge_compensated_ligand2_psf: pm.charmm.CharmmPsfFile = (
charge_compensated_ligand2_psf
)
logger.info(f"Bonded terms mutation: {bonded_terms_mutation}")
logger.info(f"Charge mutation: {charge_mutation}")
def _get_atom_mapping(self) -> dict:
"""
_get_atom_mapping -- match the atom names of the common cores
Returns
-------
[dict]
matched common core atom names
"""
# Prepare Variables to use for restraint cc checks
global cc_names_struc1, cc_names_struc2
cc_names_struc1 = []
cc_names_struc2 = []
# match atomes in common cores
match_atom_names_cc1_to_cc2 = {}
for cc1_idx, cc2_idx in zip(self.cc1_indicies, self.cc2_indicies):
ligand1_atom = self.ligand1_psf[cc1_idx]
ligand2_atom = self.ligand2_psf[cc2_idx]
match_atom_names_cc1_to_cc2[ligand1_atom.name] = ligand2_atom.name
cc_names_struc1.append(ligand1_atom.name)
cc_names_struc2.append(ligand2_atom.name)
print(f"CC Struc1: {cc_names_struc1}")
print(f"CC Struc2: {cc_names_struc2}")
return match_atom_names_cc1_to_cc2
def _mutate_charges(self, psf: pm.charmm.CharmmPsfFile, scale: float):
# common core of psf 1 is transformed to psf 2
for ligand1_atom in psf.view[f":{self.tlc_cc1}"]:
if ligand1_atom.name not in self.atom_names_mapping:
continue
found = False
# compare to charge compenstated psf 2
for ligand2_atom in self.charge_compensated_ligand2_psf:
if self.atom_names_mapping[ligand1_atom.name] == ligand2_atom.name:
found = True
# are the atoms different?
logger.debug(f"Modifying atom: {ligand1_atom}")
logger.debug(f"Template atom: {ligand2_atom}")
# scale epsilon
modified_charge = (
scale * ligand1_atom.charge + (1 - scale) * ligand2_atom.charge
)
logger.debug(
f"Current charge: {ligand1_atom.charge}; target charge: {ligand2_atom.charge}; modified charge: {modified_charge}"
)
ligand1_atom.charge = modified_charge
if not found:
raise RuntimeError("No corresponding atom in cc2 found")
def _mutate_atoms(self, psf: pm.charmm.CharmmPsfFile, lambda_value: float):
"""
mutate atom types.
Raises
------
RuntimeError
if common core atoms can not be matched
"""
# what will be changed
mod_type = namedtuple("Atom", "epsilon, rmin")
logger.debug("#######################")
logger.debug("mutate_atoms")
# iterate through the atoms of the ligand of system1
for ligand1_atom in psf.view[f":{self.tlc_cc1}"]:
# continue if not in atom_names_mapping
if ligand1_atom.name not in self.atom_names_mapping:
continue
found = False
# iterate through the atoms the ligand of system2
for ligand2_atom in self.ligand2_psf:
# is there a match up?
if self.atom_names_mapping[ligand1_atom.name] == ligand2_atom.name:
found = True
# are the atoms different?
if ligand1_atom.type != ligand2_atom.type:
if "DDX" in ligand1_atom.type:
logger.warning(
"This is the terminal LJ atom. If everything went correct, this does not have to change atom types."
)
else:
self._modify_type_in_cc(ligand1_atom, psf)
logger.debug(f"Modifying atom: {ligand1_atom}")
logger.debug(f"Template atom: {ligand2_atom}")
# scale epsilon
modified_epsilon = (
lambda_value * ligand1_atom.epsilon
+ (1.0 - lambda_value) * ligand2_atom.epsilon
)
# scale rmin
modified_rmin = (
lambda_value * ligand1_atom.rmin
+ (1.0 - lambda_value) * ligand2_atom.rmin
)
logger.debug(
f"Original LJ: eps: {ligand1_atom.epsilon}; rmin: {ligand1_atom.rmin}"
)
logger.debug(
f"New LJ: eps: {modified_epsilon}; rmin: {modified_rmin}"
)
ligand1_atom.mod_type = mod_type(
modified_epsilon, modified_rmin
)
if not found:
raise RuntimeError("No corresponding atom in cc2 found")
def _mutate_bonds(self, psf: pm.charmm.CharmmPsfFile, lambda_value: float):
logger.debug("#######################")
logger.debug("mutate_bonds")
mod_type = namedtuple("Bond", "k, req")
for ligand1_bond in psf.view[f":{self.tlc_cc1}"].bonds:
ligand1_atom1_name = ligand1_bond.atom1.name
ligand1_atom2_name = ligand1_bond.atom2.name
# all atoms of the bond must be in cc
# everything outside the cc are bonded terms between dummies or
# between real atoms and dummies and we can ignore them for now
if not all(
elem in self.atom_names_mapping
for elem in [ligand1_atom1_name, ligand1_atom2_name]
):
continue
found = False
for ligand2_bond in self.ligand2_psf.bonds:
ligand2_atom1_name = ligand2_bond.atom1.name
ligand2_atom2_name = ligand2_bond.atom2.name
# all atoms of the bond must be in cc
if not all(
elem in self.atom_names_mapping.values()
for elem in [ligand2_atom1_name, ligand2_atom2_name]
):
continue
# match the two bonds
if sorted(
[
self.atom_names_mapping[e]
for e in [ligand1_atom1_name, ligand1_atom2_name]
]
) == sorted([ligand2_atom1_name, ligand2_atom2_name]):
found = True
# are the bonds different?
if sorted(
[ligand1_bond.atom1.type, ligand1_bond.atom2.type]
) == sorted([ligand2_bond.atom1.type, ligand2_bond.atom2.type]):
continue
logger.debug(f"Modifying bond: {ligand1_bond}")
logger.debug(f"Template bond: {ligand2_bond}")
modified_k = (lambda_value * ligand1_bond.type.k) + (
(1.0 - lambda_value) * ligand2_bond.type.k
)
logger.debug(
f"Current k: {ligand1_bond.type.k}; target k: {ligand2_bond.type.k}; new k: {modified_k}"
)
# interpolating from ligand1 (original) to ligand2 (new) bond parameters
modified_req = (lambda_value * ligand1_bond.type.req) + (
(1.0 - lambda_value) * ligand2_bond.type.req
)
logger.debug(
f"Current req: {ligand1_bond.type.req}; target req: {ligand2_bond.type.req}; new req: {modified_req}"
)
ligand1_bond.mod_type = mod_type(modified_k, modified_req)
logger.debug(ligand1_bond.mod_type)
if not found:
logger.critical(ligand1_bond)
raise RuntimeError(
"No corresponding bond in cc2 found: {}".format(ligand1_bond)
)
def _mutate_angles(self, psf: pm.charmm.CharmmPsfFile, lambda_value: float):
mod_type = namedtuple("Angle", "k, theteq")
for cc1_angle in psf.view[f":{self.tlc_cc1}"].angles:
ligand1_atom1_name = cc1_angle.atom1.name
ligand1_atom2_name = cc1_angle.atom2.name
cc1_a3 = cc1_angle.atom3.name
# only angles in cc
if not all(
elem in self.atom_names_mapping
for elem in [ligand1_atom1_name, ligand1_atom2_name, cc1_a3]
):
continue
found = False
for cc2_angle in self.ligand2_psf.angles:
ligand2_atom1_name = cc2_angle.atom1.name
ligand2_atom2_name = cc2_angle.atom2.name
cc2_a3 = cc2_angle.atom3.name
# only angles in cc
if not all(
elem in self.atom_names_mapping.values()
for elem in [ligand2_atom1_name, ligand2_atom2_name, cc2_a3]
):
continue
if sorted(
[
self.atom_names_mapping[e]
for e in [ligand1_atom1_name, ligand1_atom2_name, cc1_a3]
]
) == sorted([ligand2_atom1_name, ligand2_atom2_name, cc2_a3]):
found = True
if sorted(
[
cc1_angle.atom1.type,
cc1_angle.atom2.type,
cc1_angle.atom3.type,
]
) == sorted(
[
cc2_angle.atom1.type,
cc2_angle.atom2.type,
cc2_angle.atom3.type,
]
):
continue
logger.debug(f"Modifying angle: {cc1_angle}")
logger.debug(f"Template bond: {cc2_angle}")
logger.debug("Scaling k and theteq")
logger.debug(f"Old k: {cc1_angle.type.k}")
modified_k = (
lambda_value * cc1_angle.type.k
+ (1.0 - lambda_value) * cc2_angle.type.k
)
logger.debug(f"New k: {modified_k}")
logger.debug(f"Old k: {cc1_angle.type.theteq}")
modified_theteq = (
lambda_value * cc1_angle.type.theteq
+ (1.0 - lambda_value) * cc2_angle.type.theteq
)
logging.debug(f"New k: {modified_theteq}")
cc1_angle.mod_type = mod_type(modified_k, modified_theteq)
if not found:
logger.critical(cc1_angle)
raise RuntimeError("No corresponding angle in cc2 found")
def _mutate_torsions(self, psf: pm.charmm.CharmmPsfFile, lambda_value: float):
mod_type = namedtuple("Torsion", "phi_k, per, phase, scee, scnb")
# get all torsions present in initial topology
for original_torsion in psf.view[f":{self.tlc_cc1}"].dihedrals:
found: bool = False
original_atom1_name = original_torsion.atom1.name
original_atom2_name = original_torsion.atom2.name
original_atom3_name = original_torsion.atom3.name
original_atom4_name = original_torsion.atom4.name
# all atoms must be in the cc
if not all(
elem in self.atom_names_mapping
for elem in [
original_atom1_name,
original_atom2_name,
original_atom3_name,
original_atom4_name,
]
):
continue
# get corresponding torsion types in the new topology
for new_torsion in self.ligand2_psf.dihedrals:
new_atom1_name = new_torsion.atom1.name
new_atom2_name = new_torsion.atom2.name
new_atom3_name = new_torsion.atom3.name
new_atom4_name = new_torsion.atom4.name
# only torsion in cc
if not all(
elem in self.atom_names_mapping.values()
for elem in [
new_atom1_name,
new_atom2_name,
new_atom3_name,
new_atom4_name,
]
):
continue
if sorted(
[
self.atom_names_mapping[e]
for e in [
original_atom1_name,
original_atom2_name,
original_atom3_name,
original_atom4_name,
]
]
) == sorted(
[new_atom1_name, new_atom2_name, new_atom3_name, new_atom4_name]
):
found = True
if sorted(
[
original_torsion.atom1.type,
original_torsion.atom2.type,
original_torsion.atom3.type,
original_torsion.atom4.type,
]
) == sorted(
[
new_torsion.atom1.type,
new_torsion.atom2.type,
new_torsion.atom3.type,
new_torsion.atom4.type,
]
):
continue
mod_types = []
# torsion present at cc1 needs to be turned fully off starting at lambda_vlaue == 1.
f = max((1 - ((1 - lambda_value) * 2)), 0.0)
if f > 0.0 or lambda_value == 0.5:
for torsion_t in original_torsion.type:
modified_phi_k = torsion_t.phi_k * f
mod_types.append(
mod_type(
modified_phi_k,
torsion_t.per,
torsion_t.phase,
torsion_t.scee,
torsion_t.scnb,
)
)
# torsion present at cc2 needs to be fully turned on at lambda_value == 0.0
f = 1 - min((lambda_value) * 2, 1.0)
if f > 0.0:
for torsion_t in new_torsion.type:
modified_phi_k = torsion_t.phi_k * f
if modified_phi_k >= 0.0:
mod_types.append(
mod_type(
modified_phi_k,
torsion_t.per,
torsion_t.phase,
torsion_t.scee,
torsion_t.scnb,
)
)
original_torsion.mod_type = mod_types
if not found:
logger.critical(original_torsion)
raise RuntimeError("No corresponding torsion in cc2 found")
def mutate(self, psf: pm.charmm.CharmmPsfFile, lambda_value: float):
"""
Mutates the bonded parameters of cc1 to cc2.
Parameters
----------
psf : pm.charmm.CharmmPsfFile
psf that gets mutated
lambda_value : float
lambda_value
"""
assert type(psf) == pm.charmm.CharmmPsfFile
if self.charge_mutation:
logger.info(f" -- Charge parameters from cc1 are transformed to cc2.")
logger.info(f"Lambda value:{lambda_value}")
# scale charge
self._mutate_charges(psf, lambda_value)
if self.bonded_terms_mutation:
logger.info(
f" -- Atom/Bond/Angle/Torsion parameters from cc1 are transformed to cc2."
)
logger.info(f"Lambda value:{lambda_value}")
# scale atoms
self._mutate_atoms(psf, lambda_value)
# scale bonds
self._mutate_bonds(psf, lambda_value)
# scale angles
self._mutate_angles(psf, lambda_value)
# scale torsions
self._mutate_torsions(psf, lambda_value)
@staticmethod
def _modify_type_in_cc(atom: pm.Atom, psf: pm.charmm.CharmmPsfFile):
if hasattr(atom, "initial_type"):
# only change parameters
pass
else:
logger.info(f"Setting RRR atomtype for atom: {atom}.")
atom.initial_type = atom.type
psf.number_of_dummys += 1
atom.type = f"RRR{psf.number_of_dummys}"
class Mutation(object):
def __init__(self, atoms_to_be_mutated: list, dummy_region: DummyRegion):
assert type(atoms_to_be_mutated) == list
self.atoms_to_be_mutated = atoms_to_be_mutated
self.dummy_region = dummy_region
self.tlc = dummy_region.tlc
def _mutate_charge(
self, psf: pm.charmm.CharmmPsfFile, lambda_value: float, offset: int
):
total_charge = int(
round(sum([atom.initial_charge for atom in psf.view[f":{self.tlc}"].atoms]))
)
# scale the charge of all atoms
print(f"Scaling charge on: {self.atoms_to_be_mutated}")
for idx in self.atoms_to_be_mutated:
odx = idx + offset
atom = psf[odx]
logger.debug(f"Scale charge on {atom}")
logger.debug(f"Scaling charge with: {lambda_value}")
logger.debug(f"Old charge: {atom.charge}")
atom.charge = atom.initial_charge * lambda_value
logger.debug(f"New charge: {atom.charge}")
# check to avoid compensating charges when doing asfe
if (
lambda_value != 1
and len(self.dummy_region.match_termin_real_and_dummy_atoms) != 0
):
# compensate for the total change in charge the terminal atom
self._compensate_charge(psf, total_charge, offset)
def _mutate_vdw(
self,
psf: pm.charmm.CharmmPsfFile,
lambda_value: float,
vdw_atom_idx: List[int],
offset: int,
to_default: bool,
):
if not set(vdw_atom_idx).issubset(set(self.atoms_to_be_mutated)):
raise RuntimeError(
f"Specified atom {vdw_atom_idx} is not in atom_idx list {self.atoms_to_be_mutated}. Aborting."
)
logger.info(f"Acting on atoms: {vdw_atom_idx}")
offset = min([a.idx for a in psf.view[f":{self.tlc.upper()}"].atoms])
for i in vdw_atom_idx:
atom = psf[i + offset]
if to_default:
logger.info("Mutate to default")
atom_type_suffix = "DDX"
atom.rmin = 1.5
atom.epsilon = -0.15
else:
logger.info("Mutate to dummy")
atom_type_suffix = f"DDD"
self._scale_epsilon(atom, lambda_value)
self._scale_rmin(atom, lambda_value)
# NOTEthere is always a type change
self._modify_type(atom, psf, atom_type_suffix)
def mutate(
self,
psf: pm.charmm.CharmmPsfFile,
lambda_value_electrostatic: float = 1.0,
lambda_value_vdw: float = 1.0,
vdw_atom_idx: List[int] = [],
steric_mutation_to_default: bool = False,
):
"""Performs the mutation"""
if lambda_value_electrostatic < 0.0 or lambda_value_electrostatic > 1.0:
raise RuntimeError("Lambda value for LJ needs to be between 0.0 and 1.0.")
if lambda_value_vdw < 0.0 or lambda_value_vdw > 1.0:
raise RuntimeError("Lambda value for vdw needs to be between 0.0 and 1.0.")
logger.debug(f"LJ scaling factor: {lambda_value_electrostatic}")
logger.debug(f"VDW scaling factor: {lambda_value_vdw}")
offset = min([a.idx for a in psf.view[f":{self.tlc.upper()}"].atoms])
if lambda_value_electrostatic < 1.0:
self._mutate_charge(psf, lambda_value_electrostatic, offset)
if lambda_value_vdw < 1.0:
self._mutate_vdw(
psf, lambda_value_vdw, vdw_atom_idx, offset, steric_mutation_to_default
)
def _compensate_charge(
self, psf: pm.charmm.CharmmPsfFile, total_charge: int, offset: int
):
"""
_compensate_charge This function compensates the charge changes of a dummy region on the terminal real atom
that connects the specific dummy group to the real region.
Parameters
----------
psf : pm.charmm.CharmmPsfFile
[description]
total_charge : int
[description]
offset : int
[description]
Raises
------
RuntimeError
[description]
"""
# get dummy retions
connected_dummy_regions = self.dummy_region.connected_dummy_regions
logger.debug(f"Compensating charge ...")
# save the atoms that are used for charge compenstation. This is done because if two regions
# use the same atom, a special handling needs to be invoced
compensating_on_this_real_atom = []
# check for each dummy region how much charge has changed and compensate on atom that connects
# the real region with specific dummy regions
for dummy_idx in connected_dummy_regions:
logger.debug(f"Dummy idx region: {dummy_idx}")
connecting_real_atom_for_this_dummy_region = (
self.dummy_region.return_connecting_real_atom(dummy_idx)
)
logger.debug(
f"Connecting atom: {connecting_real_atom_for_this_dummy_region}"
)
if connecting_real_atom_for_this_dummy_region == None:
raise RuntimeError(
"Something went wrong with the charge compensation. Aborting."
)
charge_acceptor = psf[connecting_real_atom_for_this_dummy_region + offset]
charge_to_compenstate_for_region = 0.0
for atom_idx in dummy_idx:
charge_to_compenstate_for_region += (
psf[atom_idx + offset].initial_charge
- psf[atom_idx + offset].charge
)
logger.debug(f"Charge to compensate: {charge_to_compenstate_for_region}")
# adding charge difference to initial charge on real terminal atom
if (
connecting_real_atom_for_this_dummy_region
in compensating_on_this_real_atom
):
charge_acceptor.charge = (
charge_acceptor.charge + charge_to_compenstate_for_region
)
else:
charge_acceptor.charge = (
charge_acceptor.initial_charge + charge_to_compenstate_for_region
)
compensating_on_this_real_atom.append(
connecting_real_atom_for_this_dummy_region
)
# check if rest charge is missing
new_charge = sum(
[atom.charge for atom in psf.view[f":{self.tlc.upper()}"].atoms]
)
if not (np.isclose(new_charge, total_charge, rtol=1e-4)):
raise RuntimeError(
f"Charge compensation failed. Introducing non integer total charge: {new_charge}. Target total charge: {total_charge}."
)
@staticmethod
def _scale_epsilon(atom, lambda_value: float):
logger.debug(atom)
logger.debug(atom.initial_epsilon)
atom.epsilon = atom.initial_epsilon * lambda_value
@staticmethod
def _scale_rmin(atom, lambda_value: float):
logger.debug(atom)
logger.debug(atom.initial_rmin)
atom.rmin = atom.initial_rmin * lambda_value
@staticmethod
def _modify_type(atom, psf, atom_type_suffix: str):
if hasattr(atom, "initial_type"):
# only change parameters
pass
else:
atom.initial_type = atom.type
if atom_type_suffix == "DDD":
psf.number_of_dummys += 1
new_type = f"{atom_type_suffix}{psf.number_of_dummys}"
elif atom_type_suffix == "DDX":
psf.mutations_to_default += 1
new_type = f"{atom_type_suffix}{psf.mutations_to_default}"
atom.type = new_type
def mutate_pure_tautomers(
s1_to_s2: ProposeMutationRoute,
system1: SystemStructure,
system2: SystemStructure,
configuration,
single_state=False,
nr_of_bonded_windows: int = 4,
):
from transformato import (
IntermediateStateFactory,
)
# setup mutation and StateFactory
mutation_list = s1_to_s2.generate_mutations_to_common_core_for_mol1()
i_tautomer1 = IntermediateStateFactory(
system=system1,
configuration=configuration,
)
# write out states
# start with charge
charges = mutation_list["charge"]
for lambda_value in np.linspace(1, 0, 2):
# turn off charges
i_tautomer1.write_state(
mutation_conf=charges,
lambda_value_electrostatic=lambda_value,
)
if single_state:
return (i_tautomer1.output_files, [])
# turn off the lj of the hydrogen
lj = mutation_list["lj"]
i_tautomer1.write_state(
mutation_conf=lj,
lambda_value_vdw=0.0,
)
# transform common core
for lambda_value in np.linspace(1, 0, nr_of_bonded_windows + 1)[1:]:
# turn off charges
i_tautomer1.write_state(
mutation_conf=mutation_list["transform"],
common_core_transformation=lambda_value,
)
# setup other tautomer
mutation_list = s1_to_s2.generate_mutations_to_common_core_for_mol2()
i_tautomer2 = IntermediateStateFactory(
system=system2,
configuration=configuration,
)
# write out states
# start with charge
charges = mutation_list["charge"]
for lambda_value in np.linspace(1, 0, 2):
# turn off charges
i_tautomer2.write_state(
mutation_conf=charges,
lambda_value_electrostatic=lambda_value,
)
# turn off the lj of the hydrogen
lj = mutation_list["lj"]
i_tautomer2.write_state(
mutation_conf=lj,
lambda_value_vdw=0.0,
)
return (i_tautomer1.output_files, i_tautomer2.output_files)
| wiederm/transformato | transformato/mutate.py | mutate.py | py | 90,744 | python | en | code | 16 | github-code | 36 |
33420840732 | # -*- coding: utf-8 -*-
# Juego del Ahorcado
# UD 3. Diseño de programas
# Tecnologías de la Información y de la Comunicación II - 2º BTO
# IES José Marin - Curso 2022 / 2023
# Módulo encargado de gestionar a los jugadores, sus puntuaciones, y el proceso de guardar
# y cargar sus datos en el programa.
from persistencia import f_cargar, f_guardar
def f_sumar_score(jugador):
'''Funcion que suma puntuacion al jugador en el archivo datos.json'''
jugadores = f_cargar("datos.json")
for usuario in jugadores:
if usuario["nombre"] == jugador["nombre"]:
jugadores.remove(usuario)
usuario["score"] += 1
jugadores.append(usuario)
f_guardar("datos.json", jugadores)
| jatovich/ahorcado | score.py | score.py | py | 758 | python | es | code | 0 | github-code | 36 |
32673819660 | import pandas as pd
import numpy as np
import re
import os
import pyperclip
def find_project_basin(list_of_valid_basins):
cur_play = input('Please enter the name of the basin you would like to gather data for\n').upper()
while True:
if cur_play in list_of_valid_basins:
break
else:
cur_play = input('Invalid basin name was entered. Please enter one of the following items:\n {}\n'
.format(list_of_valid_basins)).upper()
return cur_play
def survey_to_set(file_path):
# wellId is an API
cNames = ['Well_ID', 'DX-ft', 'DY-ft', 'TVD-ft', 'MD-ft', 'Azimuth-TrueNorth-Deg', 'Azimuth-GridNorth-Deg',
'Inclination_Deg']
df = pd.read_csv(file_path, skiprows=4, names=cNames)
projAPIs = set(df['Well_ID'])
fin = set()
for i in projAPIs:
fin.add(str(i))
return fin # , df
def query_to_set(conn, query, play):
query = query.format(play)
cursor = conn.cursor()
cursor.execute(query)
ds9set = set()
for row in cursor.fetchall():
ds9set.add(row[0])
return ds9set
def query_to_dict(conn, query, play):
query = query.format(play)
cursor = conn.cursor()
cursor.execute(query)
ds9dict = {}
for row in cursor.fetchall():
ds9dict[row[0]] = row[1]
return ds9dict
def get_difference(ds9set, projectset):
fin = ds9set.difference(projectset)
# fin = projectset.difference(ds9set)
fin_str = set()
for i in fin:
fin_str.add(str(i))
return fin_str
def remove_duplicate_well_numbers(row):
if row['WellName'] and row['WellNumber']:
name_end = re.search(r'\d+[a-zA-Z]*?\s*?$', row['WellName'])
number = row['WellNumber']
if name_end:
if name_end[0].strip() == number:
row['WellNumber'] = np.nan
return row
def wrap_column_values_for_xlsx(df):
columns = list(df.columns)
for c in columns:
df['{}'.format(c)] = df['{}'.format(c)].apply(lambda x: str(x) + " ")
return df
def remove_errors(df_errors, df_qc, apiLevel):
err_to_remove_here = ['Inclination is greater than 130',
'Inclination skips 45 deg or more',
'Azimuth delta is at least 30',
'MD skips 1000 ft or more']
df_err_to_remove = df_errors[df_errors['Error'].isin(err_to_remove_here)]
if apiLevel == 10:
print(f'Removing {df_err_to_remove.API10.nunique()} APIs with bad surveys...'
'\nThese are logged to a file named "Survey errors"')
elif apiLevel == 12:
print(f'Removing {df_err_to_remove.API12.nunique()} APIs with bad surveys...'
'\nThese are logged to a file named "Survey errors"')
elif apiLevel == 14:
print(f'Removing {df_err_to_remove.API14.nunique()} APIs with bad surveys...'
'\nThese are logged to a file named "Survey errors"')
df_qc = df_qc[~df_qc['API{}'.format(apiLevel)].isin(df_err_to_remove['API{}'.format(apiLevel)])]
# make unique path name
return df_qc
def is_outlier(points, thresh=3.5):
"""
Credit:
https://stackoverflow.com/questions/22354094/pythonic-way-of-detecting-outliers-in-one-dimensional-observation-data
Returns a boolean array with True if points are outliers and False
otherwise.
Parameters:
-----------
points : An numobservations by numdimensions array of observations
thresh : The modified z-score to use as a threshold. Observations with
a modified z-score (based on the median absolute deviation) greater
than this value will be classified as outliers.
Returns:
--------
mask : A numobservations-length boolean array.
References:
----------
Boris Iglewicz and David Hoaglin (1993), "Volume 16: How to Detect and
Handle Outliers", The ASQC Basic References in Quality Control:
Statistical Techniques, Edward F. Mykytka, Ph.D., Editor.
"""
if len(points.shape) == 1:
points = points[:, None]
median = np.nanmedian(points, axis=0)
diff = np.sum((points - median) ** 2, axis=-1)
diff = np.sqrt(diff)
med_abs_deviation = np.nanmedian(diff)
modified_z_score = 0.6745 * diff / med_abs_deviation
return modified_z_score > thresh
def haversine_distance(lat1, lon1, lat2, lon2):
'''
Calculates the haversine distance between two points.
https://stackoverflow.com/questions/27928/calculate-distance-between-two-latitude-longitude-points-haversine-formula
Returns the distance in feet as a float.
'''
from math import cos, asin, sqrt
p = 0.017453292519943295 # Pi/180
a = 0.5 - cos((lat2 - lat1) * p) / 2 + cos(lat1 * p) * cos(lat2 * p) * (1 - cos((lon2 - lon1) * p)) / 2
return (12756.2 * asin(sqrt(a))) * 3280.84 # 2*R*asin(sqrt(a)), convert to feet
def format_df_columns(df):
df = df.rename(columns={'api10': 'API10', 'api12': 'API12', 'wellid': 'API14', 'kb_elevation_(ft)': 'proj_kb'})
# df = df.astype({'API10': 'object', 'API12': 'object', 'API14': 'object'})
return df
def update_data_type(df, column, out_dtype):
in_dtype = df[column].dtypes
if in_dtype != out_dtype:
df = df.astype({column: out_dtype})
return df
def filter_apis(filter_q, file_path, ds9apis):
active = True
while active:
if filter_q.lower() == 'n':
apis = ds9apis
break
elif filter_q.lower() == 'y':
projapis = survey_to_set(file_path)
apis = get_difference(ds9apis, projapis)
break
else:
filter_q = input('You must input a valid option[Y/N].\n')
continue
return apis
def get_api_list_from_folder(folder_path):
apis = set()
for dirName, subdirList, fileList in os.walk(folder_path):
for fName in fileList:
apis.add(fName[:10])
return apis
def get_api_level_from_list(api_list):
level = len(api_list[0])
return level
def get_columns_to_drop(df, keep_list):
to_drop = [x for x in list(df.columns) if x not in keep_list]
return to_drop
def multi_input(prompt):
text = ""
stopword = ""
print('{}'.format(prompt))
while True:
line = input()
if line.strip() == stopword:
break
text += "%s\n" % line
u_list = text.split("\n")
return u_list
| gilliganne/update-wells | utils/functions.py | functions.py | py | 6,696 | python | en | code | 0 | github-code | 36 |
29557430446 | import os
import sys
from typing import Optional
from brownie import network, accounts
def network_name() -> Optional[str]:
if network.show_active() is not None:
return network.show_active()
cli_args = sys.argv[1:]
net_ind = next(
(cli_args.index(arg) for arg in cli_args if arg == "--network"), len(cli_args)
)
net_name = None
if net_ind != len(cli_args):
net_name = cli_args[net_ind + 1]
if net_name == None:
return "mainnet"
return net_name
if network_name() in ("optimism-main", "optimism-fork"):
print(f"Using config_optimism.py addresses")
from utils.config_optimism import *
elif network_name() in ("arbitrum-main", "arbitrum-fork"):
print(f"Using arbitrum.py addresses")
from utils.config_arbitrum import *
else:
raise EnvironmentError(f"{network_name()} is not supported")
min_rewards_amount = 3000 * 10**18
def get_is_live():
return network.show_active() != "development"
def get_env(name, is_required=True, message=None, default=None):
if name not in os.environ:
if is_required:
raise EnvironmentError(message or f"Please set {name} env variable")
else:
return default
return os.environ[name]
def get_deployer_account(is_live):
if is_live and "DEPLOYER" not in os.environ:
raise EnvironmentError(
"Please set DEPLOYER env variable to the deployer account name"
)
deployer = (
accounts.load(os.environ["DEPLOYER"])
if is_live or "DEPLOYER" in os.environ
else accounts[0]
)
return deployer
def prompt_bool():
choice = input().lower()
if choice in {"yes", "y"}:
return True
elif choice in {"no", "n"}:
return False
else:
sys.stdout.write("Please respond with 'yes' or 'no'")
| lidofinance/curve-rewards-manager | utils/config.py | config.py | py | 1,846 | python | en | code | 0 | github-code | 36 |
74027964263 | from mysql_connect import MysqlConnect
from s_config import config
import requests
import re
import json
import csv
import time
import random
def get_video_type(video_name):
res = re.findall(r'-(.*)-|_(.*)_', video_name)
if len(res):
for item in res[0]:
if item:
return item
else:
return None
def get_greater_30(v_id, error_file):
url = "http://s.video.qq.com/get_playsource?id=" + v_id + "&type=4&range=1-10000&otype=json"
session = requests.session()
res = session.get(url).text
json_re = re.match("QZOutputJson=(.*)", res).groups()
if len(json_re):
json_res = json.loads(json_re[0][:-1])
print(url, json_res)
try:
json_res['PlaylistItem']
except:
error_file.write(v_id + "\n")
return None
if not json_res['PlaylistItem']:
error_file.write(v_id + "\n")
return None
if 'videoPlayList' in json_res['PlaylistItem']:
return json_res['PlaylistItem']['videoPlayList']
else:
error_file.write(v_id + "\n")
return None
else:
error_file.write(v_id + "\n")
return None
def main():
video_time = "20180518"
mc = MysqlConnect(config)
csv_file_1 = open('data/' + video_time + '_video_greater_30.csv', 'w', newline='', encoding="utf-8")
csv_writer_1 = csv.writer(csv_file_1)
csv_file_2 = open('data/' + video_time + '_video_type.csv', 'w', newline='', encoding="utf-8")
csv_writer_2 = csv.writer(csv_file_2)
error_file = open('data/' + video_time + 'error_item', 'a', encoding='utf-8')
sql = """select detail_title,detail_pid from tx_jieshaoye where update_date = """ + video_time + """ and episodes > 30"""
res = mc.exec_query(sql)
for item in res:
re_json = get_greater_30(item[1], error_file)
if not re_json:
time.sleep(3)
re_json = get_greater_30(item[1], error_file) # 再请求一次
if not re_json:
print("error: ", item[0])
else:
csv_writer_2.writerow([item[0], get_video_type(item[0])])
for ep_item in re_json:
# print(item[0], get_video_type(item[0]), re_json)
# if "番外" in ep_item['episode_number'] or int(ep_item['episode_number']) > 30:
# print(ep_item['title'], ep_item['playUrl'], ep_item['episode_number'])
has_no_num = re.findall(r"[^\d]+", ep_item['episode_number'])
if len(has_no_num) or int(ep_item['episode_number']) > 30:
# print(ep_item['title'], ep_item['playUrl'])
if len(has_no_num):
csv_writer_1.writerow([item[0] + ep_item['title'], ep_item['playUrl']])
else:
csv_writer_1.writerow([ep_item['title'], ep_item['playUrl']])
time.sleep(random.randint(1, 3))
csv_file_1.close()
csv_file_2.close()
error_file.close()
mc.close()
if __name__ == '__main__':
main()
| jercheng/js_video_scrapy | crawl/v_qq_com/data_base/t_main2.py | t_main2.py | py | 3,091 | python | en | code | 0 | github-code | 36 |
69826427303 | import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='coropy',
version='0.0.1',
author='Ante Lojic Kapetanovic',
author_email='alojic00@fesb.hr',
description='A set of Python modules for COVID-19 epidemics modeling',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/antelk/coropy',
packages=setuptools.find_packages(),
install_requires=[
'numpy','scipy', 'scikit-learn', 'matplotlib', 'setuptools'],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Topic :: Scientific/Engineering :: Epidemiology',
'Intended Audience :: Science/Research',
],
python_requires='>=3.6',
)
| akapet00/coropy | setup.py | setup.py | py | 860 | python | en | code | 2 | github-code | 36 |
3731695404 | from sqlalchemy import (
Boolean,
Column,
DateTime,
Integer,
String,
ForeignKey,
)
from sqlalchemy import exc as sqlalchemy_exc
from sqlalchemy.dialects.postgresql import (
JSONB,
UUID,
ARRAY,
)
from sqlalchemy.sql.expression import false, null
from sqlalchemy.orm import relationship
from abenga_site.py.lib.models.base import Base
class Person(Base):
__tablename__ = "people"
__table_args__ = {"schema": "core"}
id = Column(Integer, primary_key=True, name="id", quote=False)
uid = Column(UUID, unique=True, nullable=False, name="uid", quote=False)
username = Column(String, unique=True, name="username", quote=False)
email = Column(String, unique=True, nullable=False, name="email", quote=False)
primary_phone_number = Column(String(32), name="primary_phone_number", quote=False)
login_type = Column(String(255), name="login_type", quote=False)
password = Column(String, name="password", quote=False)
oauth_provider = Column(String, name="oauth_provider", quote=False)
oauth_token = Column(String, name="oauth_token", quote=False)
first_name = Column(String, name="first_name", quote=False)
last_name = Column(String, name="last_name", quote=False)
other_names = Column(String, name="other_names", quote=False)
date_added = Column(DateTime, name="date_added", quote=False)
contact_email = Column(String, name="contact_email", quote=False)
other_phone_numbers = Column(JSONB, name="other_phone_numbers", quote=False)
postal_address = Column(JSONB, name="postal_address", quote=False)
physical_address = Column(JSONB, name="physical_address", quote=False)
active = Column(Boolean, name="active", quote=False)
def __repr__(self):
return f"{self.first_name} {self.last_name}<{self.email}>"
class LoginSession(Base):
__tablename__ = "login_sessions"
__table_args__ = {"schema": "core"}
id = Column(Integer, primary_key=True, name="id", quote=False)
person_id = Column(
Integer, ForeignKey("core.people.id"), name="person_id", quote=False
)
session_id = Column(
String(128), unique=True, nullable=False, name="session_id", quote=False
)
time_started = Column(DateTime, nullable=False, name="time_started", quote=False)
last_action_time = Column(
DateTime, nullable=False, name="last_action_time", quote=False
)
ended = Column(Boolean, name="ended", server_default="f", quote=False)
time_ended = Column(DateTime, nullable=False, name="time_ended", quote=False)
def __repr__(self):
return f"LoginSession<{self.person_id}:{self.session_id}:{self.time_started}>"
| abenga/abenga.com | py/lib/models/core.py | core.py | py | 2,678 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.