seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
38695485940 | import json
from kafka import KafkaConsumer
consumer = KafkaConsumer('orders',
group_id="console",
bootstrap_servers='localhost:9092')
tower_host = "http://localhost"
tower_user = "admin"
tower_token = "Mu21PadLaHLU3fUmh4IbM4vabs5bqx"
print("Console - Consumer now listening...")
while True:
for message in consumer:
consumed_message = json.loads(message.value.decode())
if consumed_message["task_status"] != "pending":
print("-------------------")
print("Consuming message...")
print(message)
print("")
print("Processing message as task_state is {} for order {}.".format(consumed_message["task_status"], consumed_message["order_id"]))
print("")
| dovastbe/kafka_poc | console_update_order.py | console_update_order.py | py | 846 | python | en | code | 0 | github-code | 13 |
72255300499 | import torch
import torch.nn.functional as F
from pytorch_lightning import LightningModule
from torchmetrics.classification.accuracy import Accuracy
from typing import Any
import torch.nn as nn
from src.models.modules.tcn import MS_TCN2
class MSTCNLitModel(LightningModule):
def __init__(self, num_layers_PG, num_layers_R, num_R, num_f_maps, dim, num_classes, lr):
super().__init__()
# model parameters
self.num_layers_PG = num_layers_PG
self.num_layers_R = num_layers_R
self.num_R = num_R
self.num_f_maps = num_f_maps
self.dim = dim
self.num_classes = num_classes
self.lr = lr
# model
self.model = MS_TCN2(num_layers_PG, num_layers_R, num_R, num_f_maps, dim, num_classes)
self.model.float()
# loss
self.ce = nn.CrossEntropyLoss(ignore_index=-100)
self.mse = nn.MSELoss(reduction='none')
# this line ensures params passed to LightningModule will be saved to ckpt
# it also allows to access params with 'self.hparams' attribute
self.save_hyperparameters()
self.criterion = torch.nn.CrossEntropyLoss()
# use separate metric instance for train, val and test step
# to ensure a proper reduction over the epoch
self.train_accuracy = Accuracy()
self.val_accuracy = Accuracy()
self.test_accuracy = Accuracy()
def forward(self, x: torch.Tensor):
return self.model(x.float())
def step(self, batch: Any):
x,key, y = batch
loss = 0
logits = self.forward(x)
for p in logits:
loss += self.ce(p.transpose(2, 1).contiguous().view(-1, self.num_classes), y.view(-1))
loss += 0.15 * torch.mean(
torch.clamp(self.mse(F.log_softmax(p[:, :, 1:], dim=1), F.log_softmax(p.detach()[:, :, :-1], dim=1)),
min=0, max=16))
preds = torch.argmax(logits[-1].data, 1)
return loss, preds, y
def training_step(self, batch: Any, batch_idx: int):
loss, preds, y = self.step(batch)
acc = self.train_accuracy(preds, y)
self.log("train/loss", loss, on_step=True, on_epoch=True, prog_bar=False)
self.log("train/acc", acc, on_step=True, on_epoch=True, prog_bar=True)
def configure_optimizers(self):
return torch.optim.Adam(
params=self.parameters(), lr=self.lr)
| Jaakik/hydra-ml | src/models/ms_tcn.py | ms_tcn.py | py | 2,428 | python | en | code | 0 | github-code | 13 |
24342496083 | class Solution:
def characterReplacement(self, s: str, k: int) -> int:
# 滑动窗口+双指针
n = len(s)
if n < 2:
return n
left = right = res = maxCount = 0
freq = [0] * 26
while right < n:
freq[ord(s[right])-65] += 1
maxCount = max(maxCount, freq[ord(s[right])-65])
right += 1
if right - left > maxCount + k:
freq[ord(s[left])-65] -= 1
left += 1
res = max(res, right - left)
return res | yuhangzheng/leetcode | 双指针法-70/同向双指针、滑动窗口-34/592.py | 592.py | py | 554 | python | en | code | 0 | github-code | 13 |
29141149423 | import json
import os
import re
import subprocess
import sys
import tabulate
from .module_manager import ModuleManager
from .utils.exp_util import get_relative_imports
from .utils.git_util import parse_url_from_git
from .sources.remote import RemoteDataSource
from .sources.local.local import LocalDataSource
from pipreqs import pipreqs
from .package import Package
from typing import Optional
import ipdb
class ModuleRepository:
def __init__(self, config, run_local_api_server: bool = False):
self.config = config
self.__load_git_dependencies()
self.package_manager = ModuleManager(config)
self.remote = RemoteDataSource()
self.local = LocalDataSource(config)
# self.__generate_pip_requirements(self.config.project)
@staticmethod
def init_project(project_name: str):
if not os.path.exists(project_name):
os.mkdir(project_name)
os.chdir(project_name)
else:
print("Project already exists")
sys.exit(1)
mate_json = os.path.join("mate.json")
if not os.path.exists(mate_json):
dic = {
"project": project_name,
}
# create mate.json
with open(mate_json, "w") as f:
json.dump(dic, f, indent=4)
else:
print("Project already exists")
sys.exit(1)
if not os.path.exists(project_name):
os.mkdir(project_name)
init__file = os.path.join(project_name, "__init__.py")
open(init__file, "a").close()
try:
folders = ["experiments", "models", "data", "trainers"]
for folder in folders:
os.makedirs(os.path.join(project_name, folder), exist_ok=True)
init__file = os.path.join(project_name, folder, "__init__.py")
if not os.path.exists(init__file):
open(init__file, "a").close()
print(
"Project {} created, run `cd {}` to enter the project folder".format(
project_name, project_name
)
)
except Exception as e:
print(e)
def install_url(self, url: str, *args, **kwargs):
self.package_manager.install_package(url, *args, **kwargs)
def auto(self, command: str, *args):
if command == "export":
self.__export()
elif command in ["init", "fix", "i"]:
self.__generate__init__(self.config.project)
def __generate__init__(self, root: str = None):
init__py = os.path.join(root, "__init__.py")
if not os.path.exists(init__py):
with open(init__py, "w") as f:
f.write("")
print(f"Created {init__py}")
for folder in os.listdir(root):
path = os.path.join(root, folder)
if not os.path.isdir(path) or folder == "__pycache__" or "." in folder:
continue
init__py = os.path.join(path, "__init__.py")
if not os.path.exists(init__py):
with open(init__py, "w") as f:
f.write("")
print(f"Created {init__py}")
self.__generate__init__(path)
def __parse_index_urls(self, reqs: list[str]):
urls = {
"torch": "https://download.pytorch.org/whl/torch_stable.html",
"jax": "https://storage.googleapis.com/jax-releases/jax_releases.html",
}
indexes = set()
for req in reqs:
if "torch" in req:
indexes.add(urls["torch"])
if "jax" in req:
indexes.add(urls["jax"])
return indexes
def __load_git_dependencies(self):
try:
result = subprocess.run(
["pip", "freeze", "-l"], capture_output=True, text=True
)
output = result.stdout.strip().split("\n")
# only +git packages
output = [line for line in output if "git+" in line]
self._git_deps = output
except Exception as e:
print(e)
print("Failed to read git dependencies")
self._git_deps = []
def __add_index_url_to_requirements(self, path: str):
with open(os.path.join(path), "r") as f:
lines = f.readlines()
linecount = len(lines)
yerbamate_is_req = False
for line in lines:
if "yerbamate" in line:
yerbamate_is_req = True
break
# lines = [
# line
# for line in lines
# if not ".egg>=info" in line
# and not ".egg==info" in line
# and not ".egg>=info" in line
# ]
# remove +cu{numbers} version form lines
# regex for numbers with at least 1 digit
regex = re.compile(r"\+cu\d+")
lines = [regex.sub("", line) for line in lines]
# Check if package versions need to be updated using pip freeze
for i, line in enumerate(lines):
package_name = line.strip().split("==")[0] if "==" in line else line.strip() # type: ignore
# check if >= or ~= is used
if ">=" in package_name:
package_name = package_name.split(">=")[0]
if "~=" in package_name:
package_name = package_name.split("~=")[0]
if package_name == "":
continue
# if package_name == "snscrape":
# ipdb.set_trace()
if package_name.endswith(".egg"):
package_name = package_name.split(".egg")[0]
for freeze_line in self._git_deps:
if freeze_line.lower().endswith(package_name.lower()):
lines[i] = freeze_line.replace("ssh://", "https://") + "\n" # auto replace ssh with https
urls = self.__parse_index_urls(lines)
lines = set(lines)
# if yerbamate_is_req and not "yerbamate" in lines:
# lines.append("yerbamate\n")
if len(urls) > 0:
with open(os.path.join(path), "w") as f:
for url in urls:
f.write(f"--extra-index-url {url}\n")
for line in lines:
f.write(line)
else:
with open(os.path.join(path), "w") as f:
for line in lines:
f.write(line)
def __generate_deps_in_depth(self, root_path):
# init__path = os.path.join(path, "__init__.py")
for dir in os.listdir(root_path):
if dir.startswith(".") or dir.startswith("__"):
continue
path = os.path.join(root_path, dir)
if os.path.isdir(path):
# check if this is a python module
init__path = os.path.join(root_path, dir, "__init__.py")
if not os.path.exists(init__path):
continue
# if dir in ["trainers", "experiments", "models", "data"] and
if not (
dir in ["trainers", "experiments", "models", "data"]
and self.config.project in root_path
):
self.__generate_pip_requirements(path)
self.__generate_deps_in_depth(path)
def __export(self, *args, **kwargs):
self.__generate_sub_pip_reqs()
modules = self.list()
table = []
for key, value in modules.items():
if type(value) is list:
table.append([{"type": key, "name": name} for name in value])
# if empty list, type and name are the same
if len(value) == 0:
table.append([{"type": key, "name": key}])
elif type(value) is dict:
table.append([{"type": key, "name": name} for name in value.keys()])
# ipdb.set_trace()
table = [item for sublist in table for item in sublist]
# add url to each item in table
deps = set()
base_url = parse_url_from_git()
user_name = base_url.split("/")[3]
repo_name = base_url.split("/")[4]
for item in table:
item[
"url"
] = f"{base_url}{self.config.project}/{item['type']}/{item['name']}"
item[
"short_url"
] = f"{user_name}/{repo_name}/{self.config.project}/{item['type']}/{item['name']}"
# if repo name is same as project name
if repo_name == self.config.project:
# item["url"] = f"{base_url}/{item['type']}/{item['name']}"
# get user name from url
item[
"short_url"
] = f"{user_name}/{repo_name}/{item['type']}/{item['name']}"
# read dependencies
for item in table:
path = os.path.join(
self.config.project, item["type"], item["name"], "requirements.txt"
)
dep_path = os.path.join(
self.config.project, item["type"], item["name"], "dependencies.json"
)
root_dep_path = os.path.join(
self.config.project, item["type"], "requirements.txt"
)
if os.path.exists(path):
with open(path, "r") as f:
item["dependencies"] = f.readlines()
if os.path.exists(dep_path):
with open(dep_path, "r") as f:
if "dependencies" in item:
item["dependencies"] += json.load(f)["dependencies"]
else:
item["dependencies"] = json.load(f)["dependencies"]
# item["module_dependencies"] = json.load(f)
if os.path.exists(root_dep_path):
with open(root_dep_path, "r") as f:
item["dependencies"] = f.readlines()
if "dependencies" in item:
item["dependencies"] = [
dep.replace("\n", "") for dep in item["dependencies"]
]
deps.update(item["dependencies"])
# remove github urls from dependencies if it
deps = [
dep for dep in deps if not ("https://github" in dep and not "+git" in dep)
]
# remove .egg>=info from dependencies
# deps = [
# dep for dep in deps if not ".egg>=info" in dep and not ".egg==info" in dep
# ]
# set index urls should be on top, sort so that --extra-index-url is on top
deps = sorted(deps, key=lambda x: "--extra-index-url" in x, reverse=True)
# remove empty lines
deps = [dep for dep in deps if dep != "\n" or dep != " " or dep != ""]
# add yerbamate to deps if not already there
# if not "yerbamate" in deps:
# deps.append("yerbamate")
# save deps in requirements.txt
with open("requirements.txt", "w") as f:
for dep in deps:
f.write(dep + "\n")
# create latex table
# ipdb.set_trace()
# l_table = [t for t in table if t["type"] == "models"]
# remove url from table
ltable = table
# for item in ltable:
# del item["url"]
# for dep in item["dependencies"]:
# if "--extra" in dep:
# item["dependencies"].remove(dep)
# # if "https" in dep:
# create latex table
# recreate table to remove url
ltable = []
# combine dependenices, make a set, remove urls, and save as requirements.txt
with open("exports.json", "w") as f:
json.dump(table, f, indent=4)
for item in table:
# remove --extra from dep
if "dependencies" in item:
# if --extra in dep
new_dep = []
for dep in item["dependencies"]:
if "--extra" in dep:
continue
new_dep.append(dep)
ltable.append(
{
"name": item["name"],
"type": item["type"],
"short_url": item["short_url"],
"dependencies": new_dep,
}
)
else:
ltable.append(
{
"name": item["name"],
"type": item["type"],
"short_url": item["short_url"],
"dependencies": item["dependencies"],
}
)
# ipdb.set_trace()
latex_table = tabulate.tabulate(
ltable,
headers="keys",
tablefmt="latex",
showindex="never"
# disable_numparse=False,
)
table = tabulate.tabulate(
table,
headers="keys",
tablefmt="github",
showindex="always",
disable_numparse=True,
)
# save table to export.md
with open("export.md", "w") as f:
f.write(table)
with open("exports.tex", "w") as f:
f.write(latex_table)
print("Exported to export.md")
def __generate_sub_pip_reqs(self):
root_path = self.config.project
self.__generate_deps_in_depth(root_path)
for dir in os.listdir("."):
if (
dir.startswith(".")
or dir.startswith("__")
or dir == self.config.project
):
continue
path = os.path.join(".", dir)
if os.path.isdir(path):
# check if this is a python module
init__path = os.path.join(".", dir, "__init__.py")
if not os.path.exists(init__path):
continue
self.__generate_pip_requirements(path)
def __generate_mate_dependencies(self, path):
# ipdb.set_trace()
files = [f for f in os.listdir(path) if f.endswith(".py") and "__" not in f]
original_files = [file.replace(".py", "") for file in files] + [
f for f in os.listdir(path) if "__" not in f
]
relative_imports = [get_relative_imports(os.path.join(path, f)) for f in files]
# flatten array to unique set
relative_imports = set(
[item for sublist in relative_imports for item in sublist]
)
relative_imports = [
module
for module in relative_imports
if not any([file in module for file in original_files])
]
url_git = parse_url_from_git()
if url_git is None:
print("No git url found, skipping dependencies.json")
return
deps = set()
for module in relative_imports:
if module.endswith(".py"):
continue
# if its a python file, return parent module
tpath = [self.config.project, *module.split(".")]
tpath[-1] = tpath[-1] + ".py"
sister_module_path = [*module.split(".")]
if os.path.exists(os.path.join(*tpath)):
# module = parent
url = "/".join(tpath[:-1])
elif os.path.exists(os.path.join(*sister_module_path)):
url = sister_module_path[0] + "/"
else:
url = self.config.project + "/" + module.replace(".", "/")
if url_git:
url = url_git + url
deps.add(url)
if len(deps) == 0:
return
try:
deps_json = os.path.join(path, "dependencies.json")
if os.path.exists(deps_json):
with open(deps_json, "r") as f:
# ipdb.set_trace()
deps_json = json.load(f)
if "env" in deps_json:
env = deps_json["env"]
else:
env = {}
else:
env = {}
except Exception as e:
print(f"Error reading {path}/dependencies.json, skipping env")
env = {}
with open(os.path.join(path, "dependencies.json"), "w") as f:
deps = {"dependencies": list(deps), "env": env}
json.dump(deps, f, indent=4)
print(f"Generated dependencies.json for {path}")
def __generate_pip_requirements(self, path):
# ipdb.set_trace()
try:
imports = pipreqs.get_all_imports(path)
# # import_info_remote = pipreqs.get_imports_info(imports)
# ipdb.set_trace()
import_info_local = pipreqs.get_import_local(imports)
except Exception as e:
print(f"Error generating requirements.txt for {path}")
print(e)
# raise e
return {}
self.__generate_mate_dependencies(path)
import_info = []
if path == self.config.project:
pipreqs.generate_requirements_file(
"requirements.txt", import_info_local, ">="
)
self.__add_index_url_to_requirements("requirements.txt")
else:
pipreqs.generate_requirements_file(
os.path.join(path, "requirements.txt"), import_info_local, ">="
)
self.__add_index_url_to_requirements(os.path.join(path, "requirements.txt"))
print(f"Generated requirements.txt for {path}")
# ipdb.set_trace()
for im in import_info_local:
name = im["name"]
version = im["version"]
res = {
"name": name,
"version": version,
}
import_info.append(res)
return {"pip": import_info}
def list(self, module: str = None):
if module == None:
return self.local.summary()
return self.local.list(module)
def get_mate_summary(self):
return self.local.summary()
def install_package(self, package: Package):
self.local.install_package(package)
| ilex-paraguariensis/yerbamate | packages/yerbamate/api/data/module_repository.py | module_repository.py | py | 18,207 | python | en | code | 10 | github-code | 13 |
34030471989 | # Filename: q08_top2_scores.py
# Author: Justin Leow
# Created: 29/1/2013
# Modified: 29/1/2013
# Description: prompts the user to enter the number of students and each student's name and score,
# and finally displays the student with the highest score and the student with the second-highest score.
#input functions
def newFloat(inputString):
tempInput = input(inputString)
if(tempInput=="quit"):
quit()
try:
float(tempInput)
except:
print("Input is not a number. Utilizing default value of 75")
return 75
else:
tempInput = float(tempInput)
return tempInput
def newInt(inputString):
tempInput = input(inputString)
if(tempInput=="quit"):
quit()
try:
int(tempInput)
except:
print("Input is not an integer. Utilizing default value of 3")
return 3
else:
tempInput = int(tempInput)
if(tempInput<=2):
print("A class must consist of three or more students.")
return 3
else:
return tempInput
def newString(inputString):
tempInput = input(inputString)
if(tempInput=="quit"):
quit()
elif(tempInput=="egg"):
tempInput = "Tan Di Sheng the strong black woman who don't need no man"
return tempInput
# main
print("\ntype 'quit' to quit program at anytime.\n")
while(True):
studentNames=[]
studentScores=[]
#get user input
numStudents = newInt("Enter number of students in class: ")
for i in range(numStudents):
studentNames.append(newString("Input name of student "+str(i+1)+": "))
studentScores.append(newFloat("Input "+studentNames[i]+"'s score: "))
#print(studentNames,studentScores)
#calculate id of students with second highest scores
if(studentScores[1]>studentScores[0]):
highestStudent = [studentScores[1],studentNames[1]]
secondStudent = [studentScores[0],studentNames[0]]
else:
highestStudent = [studentScores[0],studentNames[0]]
secondStudent = [studentScores[1],studentNames[1]]
for i in range(numStudents):
if(studentScores[i]>highestStudent[0]):
secondStudent = highestStudent
highestStudent = [studentScores[i],studentNames[i]]
#output
print("\nThe student with the highest score is {0} with a score of {1:.1f}".format(highestStudent[1],highestStudent[0]))
print("The student with the second highest score is {0} with a score of {1:.1f} \n".format(secondStudent[1],secondStudent[0]))
| JLtheking/cpy5python | practical02/q08_top2_scores.py | q08_top2_scores.py | py | 2,574 | python | en | code | 0 | github-code | 13 |
9543288927 | # Uses model to predict bbox from image
from src.EldenRing.boss_detection.inference import BossDetectionReturn
# Get resized image dimensions for scaling purposes in the display
from src.EldenRing.boss_detection.config import RESIZE_WIDTH, RESIZE_HEIGHT
# Get path to images for testing purposes
from src.EldenRing.boss_detection.config import TRAIN_PATH
# Model retrieval
from src.EldenRing.boss_detection.config import OUT_DIR
# path definition and file retrieval
import os
# Handles imaging
import cv2
# yolo bbox translation for image display
from src.EldenRing.boss_detection.util import bbox_yolo_translation
# finds the corresponding bounding box Yolo file for a given image
def image2label_path(image_path, labels_path):
image_filename = image_path.split("/")[-1]
label_filename = image_filename.replace("png", "txt")
return os.path.join(labels_path, label_filename).replace("\\", "/")
# This will handle image outputs for display
def pred_boxes(pred_img, pred_output):
# output is a list
outputs = pred_output[0]
boxes = outputs['boxes']
# define image size
image_width = pred_img.shape[1]
image_height = pred_img.shape[0]
# resize the image
image_scale_width = image_width / RESIZE_WIDTH
image_scale_height = image_height / RESIZE_HEIGHT
for i in range(len(boxes)):
# grabs box dimensions and associated score
box = [dim for dim in boxes[i]]
score = outputs['scores'][i].item()
# Limit length of score displayed
score = str(score)[:5] if len(str(score)) > 5 else str(score)
# Skips if no boss is found
if len(box) < 1:
continue
# Grab bbox dimensions
l = int(box[0] * image_scale_width)
t = int(box[1] * image_scale_height)
r = int(box[2] * image_scale_width)
b = int(box[3] * image_scale_height)
# BBox on image
cv2.rectangle(pred_img, (l, t), (r, b), (0, 255, 0), 3)
# Display confidence score
cv2.putText(pred_img, str(score), (l, b), cv2.FONT_HERSHEY_SIMPLEX,
4.0, (0, 255, 0), 2, lineType=cv2.LINE_AA)
return pred_img
# Adds true bounding box to image
def img_for_display(img_bbox_pair):
# This will convert the image file to an image for display
img_out = cv2.cvtColor(cv2.imread(img_bbox_pair[0], -1), cv2.COLOR_BGR2RGB)
# Properly retrieve and format bounding box(es)
with open(img_bbox_pair[1], 'r') as f:
data = f.readlines()
for dt in data:
_, l, t, r, b = bbox_yolo_translation(dt, img_out.shape[0], img_out.shape[1])
if len(data) > 0:
# Imprints Bounding Box onto image
# noinspection PyUnboundLocalVariable
cv2.rectangle(img_out, (l, t), (r, b), (255, 0, 255), 3)
return img_out
elif len(data) == 0:
return img_out
else:
raise RuntimeError
# Find root folder
project_root = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
TRAIN_PATH = os.path.join(project_root, TRAIN_PATH).replace("..", '')
OUT_DIR = os.path.join(project_root, OUT_DIR).replace("..", '')
# Define the path for needed components
TRAIN_IMAGES_PATH = os.path.join(TRAIN_PATH, 'images').replace("\\", "/")
TRAIN_LABELS_PATH = os.path.join(TRAIN_PATH, 'labels').replace("\\", "/")
MODEL_PATH = os.path.join(OUT_DIR, 'model100.pth').replace("\\", "/")
# Creates list of image paths
train_images = [f for f in os.listdir(TRAIN_IMAGES_PATH) if os.path.isfile(os.path.join(TRAIN_IMAGES_PATH, f))]
train_image_paths = [os.path.join(TRAIN_IMAGES_PATH, f).replace("\\", "/") for f in train_images]
# Model Retrieval
detection_model = BossDetectionReturn(MODEL_PATH)
# Grab image and labels
img1_path = train_image_paths[0]
img1_true = image2label_path(img1_path, TRAIN_LABELS_PATH)
img2_path = train_image_paths[205]
img2_true = image2label_path(img2_path, TRAIN_LABELS_PATH)
img3_path = train_image_paths[361]
img3_true = image2label_path(img3_path, TRAIN_LABELS_PATH)
# convert image_path to image
img1 = cv2.cvtColor(cv2.imread(img1_path, -1), cv2.COLOR_BGR2RGB)
img2 = cv2.cvtColor(cv2.imread(img2_path, -1), cv2.COLOR_BGR2RGB)
img3 = cv2.cvtColor(cv2.imread(img3_path, -1), cv2.COLOR_BGR2RGB)
# run image through the model
img1_resized, img1_pred = detection_model.boss_detection(img1)
img2_resized, img2_pred = detection_model.boss_detection(img2)
img3_resized, img3_pred = detection_model.boss_detection(img3)
# Returns the image with predicted bbox
pred1_img = pred_boxes(img1.copy(), img1_pred)
pred2_img = pred_boxes(img2.copy(), img2_pred)
pred3_img = pred_boxes(img3.copy(), img3_pred)
# Put bounding box on image
img1 = img_for_display((img1_path, img1_true))
img2 = img_for_display((img2_path, img2_true))
img3 = img_for_display((img3_path, img3_true))
# Place Images for display in a list
img_display = [img1, pred1_img, img2, pred2_img, img3, pred3_img]
# Display images for checking it
for img in img_display:
cv2.imshow("image", cv2.resize(cv2.cvtColor(img, cv2.COLOR_RGB2BGR), (RESIZE_WIDTH, RESIZE_HEIGHT)))
cv2.waitKey(0)
| akingsley319/AI_Plays_DarkSouls | tests/EldenRing/boss_detection/boss_detection.py | boss_detection.py | py | 5,092 | python | en | code | 1 | github-code | 13 |
36470667831 | import re
import preprocessor as p
import re
from spacy.lang.en import English
from spacy.lang.en.stop_words import STOP_WORDS
def remove_stopword(text):
# Load English tokenizer, tagger, parser, NER and word vectors
nlp = English()
my_doc = nlp(text)
token_list = []
for token in my_doc:
token_list.append(token.text)
filtered_sentence =[]
for word in token_list:
lexeme = nlp.vocab[word]
if lexeme.is_stop == False:
filtered_sentence.append(word)
return " ".join(filtered_sentence)
def remove_emoji(string):
emoji_pattern = re.compile("["
u"\U0001F600-\U0001F64F" # emoticons
u"\U0001F300-\U0001F5FF" # symbols & pictographs
u"\U0001F680-\U0001F6FF" # transport & map symbols
u"\U0001F1E0-\U0001F1FF" # flags (iOS)
u"\U00002500-\U00002BEF" # chinese char
u"\U00002702-\U000027B0"
u"\U00002702-\U000027B0"
u"\U000024C2-\U0001F251"
u"\U0001f926-\U0001f937"
u"\U00010000-\U0010ffff"
u"\u2640-\u2642"
u"\u2600-\u2B55"
u"\u200d"
u"\u23cf"
u"\u23e9"
u"\u231a"
u"\ufe0f" # dingbats
u"\u3030"
"‘"
"]+", flags=re.UNICODE)
return emoji_pattern.sub(r'', string)
def strip_links(text):
link_regex = re.compile('((https?):((//)|(\\\\))+([\w\d:#@%/;$()~_?\+-=\\\.&](#!)?)*)', re.DOTALL)
links = re.findall(link_regex, text)
for link in links:
text = text.replace(link[0], '')
return text
def strip_all_entities(text):
entity_prefixes = ['#','|']
words = []
for word in text.split():
word = word.strip()
if word:
if word[0] not in entity_prefixes:
words.append(word)
return ' '.join(words)
def get_clean_tweet(tweet):
tmp = remove_stopword(tweet)
tmp = strip_all_entities(strip_links(tmp))
# tmp = remove_users(tmp)
tmp = remove_emoji(tmp)
return(" ".join(tmp.split())) | meimei96tq/Social-Rainbow | get_clean_tweet.py | get_clean_tweet.py | py | 2,472 | python | en | code | 0 | github-code | 13 |
8614117093 | from flask import Flask, request, jsonify, send_from_directory
from flask_cors import CORS
import openai
import os
from OpenSSL import SSL
from flask_limiter import Limiter
app = Flask(__name__, static_url_path="", static_folder="/srv/http/jb-gpt")
# Initialize the Limiter
""" limiter = Limiter(
app,
key_func=lambda: request.remote_addr, # Use the user's IP address as the key
default_limits=[["100 per day"], ["10 per minute"]], # Limit requests per user
strategy="fixed-window"
) """
CORS(app, origins=["https://bonewitz.net"])
openai.api_key = os.getenv("OPENAI_API_KEY")
@app.route('/chat', methods=['POST'])
def chat():
data = request.json
messages = data.get("messages")
print(request.json)
if not messages:
return jsonify({"error": "Missing messages parameter"}), 400
response = openai.ChatCompletion.create(
model="gpt-3.5-turbo",
messages=messages,
temperature=0.7
)
print(response)
return jsonify({"message": response.choices[0].message.content if hasattr(response.choices[0].message, 'content') else "Error: Text not found in response"})
@app.route('/')
def index():
return send_from_directory("/srv/http/jb-gpt", "index.html")
if __name__ == '__main__':
app.run(host='127.0.0.1', port=2087)
| jbfly/jb-gpt | app.py | app.py | py | 1,315 | python | en | code | 0 | github-code | 13 |
327179541 | import pybio
import os
import sys
class Gff3():
def __init__(self, filename):
fasta_part = 0
mRNA_part = 0
f = open(filename, "rt")
r = f.readline()
self.genes = {}
self.mRNA_genes = {}
l = 1
while r:
if r.startswith("##FASTA") or fasta_part==1:
r = f.readline()
fasta_part = 1
l+=1
continue
if r.startswith("#"):
r = f.readline()
l+=1
continue
r = r.rstrip("\r\n").split("\t")
if len(r)==1:
r = f.readline()
continue
if r[0]=="##gff-version 3":
fasta_part = 0
r = f.readline()
l+=1
continue
seqid = r[0]
source = r[1]
type = r[2]
start = int(r[3])
stop = int(r[4])
strand = r[6]
attributes = {}
chromosome = r[0]
for att in r[-1].split(";"):
att = att.split("=")
attributes[att[0]] = att[1]
if type=="gene":
mRNA_part = 0
self.genes[attributes["ID"]] = {'chromosome':chromosome, 'strand':strand, 'data':{}, 'attributes':attributes}
if type=="mRNA":
mRNA_part = 1
gene_id = attributes["Parent"]
mRNA_id = attributes["ID"]
gene_data = self.genes.get(gene_id)["data"]
gene_data[mRNA_id] = {'exons':[], 'CDS':[], 'attributes':attributes}
self.genes[gene_id]["data"] = gene_data
self.mRNA_genes[attributes["ID"]] = attributes["Parent"]
if type=="pseudogene" or type=="tRNA":
mRNA_part = 0
if mRNA_part==0:
r = f.readline()
continue
if type=="CDS":
gene_id = self.mRNA_genes[attributes["Parent"]]
mRNA_id = attributes["Parent"]
self.genes[gene_id]["data"][mRNA_id]["exons"].append((start, stop))
r = f.readline()
l+=1
def write_gtf(self, filename):
f = open(filename, "wt")
gene_names = self.genes.keys()
for gene_id, gene_data in self.genes.items():
gene_strand = gene_data["strand"]
gene_chromosome = gene_data["chromosome"]
transcripts = gene_data["data"]
for mRNA_id, mRNA_data in transcripts.items():
for (exon_start, exon_stop) in mRNA_data["exons"]:
f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (gene_chromosome, "", "exon", exon_start, exon_stop, ".", gene_strand, ".", "gene_id \"%s\"; transcript_id \"%s\";" % (gene_id, mRNA_id)))
for mRNA_id, mRNA_data in transcripts.items():
for (CDS_start, CDS_stop) in mRNA_data["CDS"]:
f.write("%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\t%s\n" % (gene_chromosome, "", "CDS", CDS_start, CDS_stop, ".", gene_strand, ".", "gene_id \"%s\"; transcript_id \"%s\";" % (gene_id, mRNA_id)))
def return_genes(self):
pass
| grexor/pybio | pybio/data/Gff3.py | Gff3.py | py | 3,238 | python | en | code | 7 | github-code | 13 |
39762124742 | import _thread
import os
import time
from datetime import datetime
from queue import Queue
from shutil import rmtree
from typing import Dict, List
from watchdog.events import FileSystemEventHandler
from watchdog.observers import Observer
from .. import logger
from ..authentication.auth import Auth
from ..user_config import EngineConfig
from .engine import Engine
class FileBasedEngine(Engine):
"""
This class is a specialisation of the Engine class. It implements a file-based server to be used for testing
"""
def __init__(self, config: EngineConfig, auth: Auth):
super(FileBasedEngine, self).__init__(config, auth)
self._listening_list = []
logger.warning("TEST MODE")
self._polling_interval = 1 # for testing we can do a much faster polling time
self._host = "localhost"
self._port = ""
def pull(
self,
key: str,
key_only: bool = False,
rev: int = None,
prefix: bool = True,
min_rev: int = None,
max_rev: int = None,
) -> List[Dict[str, any]]:
"""
This method implements a query to the notification server for all the key-values associated to the key as input.
This key by default is a prefix, it can therefore return a set of key-values
:param key: input in the query
:param key_only: ignored for TestEngine
:param rev: ignored for TestEngine
:param prefix: if true the function will retrieve all the KV pairs starting with the key passed
:param min_rev: ignored for TestEngine
:param max_rev: ignored for TestEngine
:return: List of key-value pairs formatted as dictionary
"""
if key_only:
logger.warning("key_only option is disabled in TestMode")
if rev:
logger.warning("rev option is disabled in TestMode")
if min_rev:
logger.warning("min_rev option is disabled in TestMode")
if max_rev:
logger.warning("max_rev option is disabled in TestMode")
def read_key(k):
try:
with open(k, "r") as f:
v = f.read()
except Exception:
logger.warning(f"Reading of the {k} has failed")
logger.debug("", exc_info=True)
return
new_kv = {"key": k, "value": v.encode()}
new_kvs.append(new_kv)
logger.debug(f"Key: {k} pulled successfully")
logger.debug(f"Calling pull for {key}...")
new_kvs: List[Dict[str, bytes]] = []
if os.path.exists(key):
if os.path.isdir(key):
# first list the directory
for x in os.walk(key):
for fp in x[2]: # any file
kk: str = os.path.join(x[0], fp)
read_key(kk)
if not prefix: # we only look at the current directory, nothing deeper
break
else:
read_key(key)
logger.debug(f"Query for {key} completed")
logger.debug(f"{len(new_kvs)} keys found")
return new_kvs
def delete(self, key: str, prefix: bool = True) -> List[Dict[str, bytes]]:
"""
This method deletes all the keys associated to this key, the key is a prefix as default
:param key: key prefix to delete
:param prefix: if true the function will delete all the KV pairs starting with the key passed
:return: kvs deleted
"""
logger.debug(f"Calling delete for {key}...")
del_kvs: List[Dict[str, bytes]] = []
if os.path.exists(key):
if os.path.isdir(key):
# first list the directory
for x in os.walk(key):
for fp in x[2]: # any file
k: str = os.path.join(x[0], fp)
new_kv = {"key": k}
del_kvs.append(new_kv)
else:
new_kv = {"key": key}
del_kvs.append(new_kv)
# now the delete the directory or file
try:
if os.path.isdir(key):
rmtree(key)
else:
os.remove(key)
except Exception as e:
logger.warning(f"Cannot delete the key {key}, {e}")
logger.debug("", exc_info=True)
logger.debug(f"Delete request for key {key} completed")
return del_kvs
def push(self, kvs: List[Dict[str, any]], ks_delete: List[str] = None, ttl: int = None) -> bool:
"""
Method to submit a list of key-value pairs and delete a list of keys from the server as a single transaction
:param kvs: List of KV pair
:param ks_delete: List of keys to delete before the push of the new ones. Note that each key is read as a folder
:param ttl: Not supported in this implementation
:return: True if successful
"""
logger.debug("Calling push...")
# first delete the keys requested
if ks_delete is not None and len(ks_delete) != 0:
for kd in ks_delete:
if os.path.exists(kd):
try:
if os.path.isdir(kd):
rmtree(kd)
else:
os.remove(kd)
except Exception as e:
logger.warning(f"Cannot delete the key {kd}, {e}")
logger.debug("", exc_info=True)
# save the keys to files
for kv in kvs:
k = kv["key"]
v = kv["value"]
file_name: str = k.split("/").pop()
if not file_name == "":
folder_path = k[: -len(file_name)]
else: # if k ends in / it means it the base directory, this is used to saved the status
folder_path = k
k += "status"
if not os.path.exists(folder_path):
try:
os.makedirs(folder_path, exist_ok=True)
except OSError:
logger.warning(f"Cannot create the directory: {folder_path}")
logger.debug("", exc_info=True)
return False
try:
with open(k, "w+") as f:
f.write(v)
except Exception:
logger.warning(f"Saving of the {k} has failed")
logger.debug("", exc_info=True)
return False
logger.debug("Transaction completed")
return True
def _polling(
self,
key: str,
callback: callable([str, str]),
channel: Queue,
from_date: datetime = None,
to_date: datetime = None,
):
"""
This method implements the active polling
:param key: key to watch as a prefix
:param callback: function to call if any change happen
:param channel: global communication channel among threads
:param from_date: ignored for TestMode
:param to_date: ignored for TestMode
:return:
"""
if from_date:
logger.warning("from_date option is disabled in TestMode")
if to_date:
logger.warning("to_date option is disabled in TestMode")
try:
# first create the directory to watch
if not os.path.exists(key):
try:
os.makedirs(key, exist_ok=True)
except OSError:
logger.warning(f"Cannot create the directory: {key}")
logger.debug("", exc_info=True)
return False
# define a class to handle the new events
class WatchdogHandler(FileSystemEventHandler):
def __init__(self, engine, key, callback):
super().__init__()
self._engine = engine
self._key = key
self._callback = callback
def on_modified(self, event):
if not event.is_directory and event.src_path.endswith(self._key):
kvs = self._engine.pull(key=event.src_path)
for kv in kvs:
k = kv["key"]
v = kv["value"].decode()
# skip the status
if kv["key"].endswith("status"):
continue
logger.debug(f"Notification received for key {k}")
try:
# execute the trigger
self._callback(k, v)
except Exception as ee:
logger.error(f"Error with notification trigger, exception: {type(ee)} {ee}")
logger.debug("", exc_info=True)
# define the event handler
event_handler = WatchdogHandler(engine=self, key=key, callback=callback)
# create an observer and schedule the event handler
observer = Observer()
observer.schedule(event_handler, path=key, recursive=True)
# start the observer in a daemon thread so we can stop it
observer.start()
# this is the stop condition
while key in self._listeners:
time.sleep(0.1)
# stop the observer
observer.stop()
observer.join()
except Exception as e:
logger.error(f"Error occurred during polling: {e}")
logger.debug("", exc_info=True)
_thread.interrupt_main()
| ecmwf/aviso | pyaviso/engine/file_based_engine.py | file_based_engine.py | py | 9,784 | python | en | code | 9 | github-code | 13 |
13999669250 | import cv2
from random import randrange
# This loads some pre-trained data on face frontal from opencv
trained_face_data = cv2.CascadeClassifier('haarcascade_frontalface_defalut.xml')
# To capture video from webcam.
webcam = cv2.VideoCapture(0)
# Iterate over frames
while True:
# read the current frame
successful_frame_read, frame = webcam.read()
# Convert image to grayscale
gray_img = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Detect Face => [[382 88 172 172]] = [[x, y, w, h]] -> Can detect multiple
face_location = trained_face_data.detectMultiScale(gray_img)
# Draw rectangle around face
for (x, y, w, h) in face_location:
cv2.rectangle(frame, (x, y), (x + w, y + h), (0, 255, 0), 2)
# Show image
cv2.imshow('Clever Face Detector', frame)
# Wait until image window closed
cv2.waitKey(1)
| jspark3/Face-Detection | Active_Face_Detector.py | Active_Face_Detector.py | py | 855 | python | en | code | 1 | github-code | 13 |
9145903197 | import os
import random
import pandas as pd
import numpy as np
from enum import IntEnum
from scipy import stats
class Specialties(IntEnum):
SECURITY = 0
BACKEND = 1
FRONTEND = 2
GRAPHICS = 3
LOWLEVEL = 4
ML = 5
def getEnrollmentProbabilities():
f18 = getEnrollments(os.path.join(os.path.dirname(os.path.realpath(__file__)), "schedules/fall2018.csv"))
w19 = getEnrollments(os.path.join(os.path.dirname(os.path.realpath(__file__)), "schedules/winter2019.csv"))
s19 = getEnrollments(os.path.join(os.path.dirname(os.path.realpath(__file__)), "schedules/spring2019.csv"))
f19 = getEnrollments(os.path.join(os.path.dirname(os.path.realpath(__file__)), "schedules/fall2019.csv"))
w20 = getEnrollments(os.path.join(os.path.dirname(os.path.realpath(__file__)), "schedules/winter2020.csv"))
s20 = getEnrollments(os.path.join(os.path.dirname(os.path.realpath(__file__)), "schedules/spring2020.csv"))
return [f18, w19, s19, f19, w20, s20]
def getEnrollments(filename):
df = pd.read_csv(os.path.join(os.path.dirname(os.path.realpath(__file__)), filename), usecols=["Course", "Description", "Type", "Enrl"])
return df[df.Type != "Lab"].reset_index(drop=True).drop("Type", axis=1)
class ScheduleGenerator:
def __init__(self):
self.enrollments = getEnrollmentProbabilities()
self.third_years = self.thirdYearClasses()
self.fourth_years = self.fourthYearClasses()
self.specializations = {Specialties.SECURITY : ["CPE 321", "CSC 424", "CSC 429"],
Specialties.BACKEND : ["CSC 349", "CSC 365", "CSC 366", "CSC 468", "CSC 369"],
Specialties.FRONTEND : ["CSC 437", "CSC 436", "CSC 484", "CSC 486"],
Specialties.GRAPHICS : ["CSC 471", "CSC 476", "CSC 473", "CSC 474", "CSC 478", "CSC 371", "CSC 378", "CSC 377"],
Specialties.LOWLEVEL : ["CSC 453", "CPE 357", "CSC 431", "CPE 315"],
Specialties.ML : ["CSC 480", "CSC 481", "CSC 466", "CSC 482", "CSC 487"]}
self.spec_mappings = {"Back end" : Specialties.BACKEND,
"Front end" : Specialties.FRONTEND,
"Graphics/Games" : Specialties.GRAPHICS,
"Low level" : Specialties.LOWLEVEL,
"Security" : Specialties.SECURITY,
"Machine Learning" : Specialties.ML}
def getSchedule(self, year, specialization):
specialization = self.spec_mappings[specialization]
if year.lower() == "fourth":
return self.getUpperClassSchedule(self.fourth_years.copy(deep=True), specialization)
elif year.lower() == "third":
return self.getUpperClassSchedule(self.third_years.copy(deep=True), specialization)
elif year.lower() == "second":
return self.getSecondYearSchedule()
else:
return self.getFirstYearSchedule()
def thirdYearClasses(self):
modified = []
for df in self.enrollments[:3]:
new_df = df.copy(deep=True)
new_df['Enrl'] = new_df['Enrl'].mask(new_df['Course'].str.match(pat="[A-Z]{3} [143]"), 0)
modified.append(new_df)
for df in self.enrollments[3:]:
new_df = df.copy(deep=True)
new_df['Enrl'] = new_df['Enrl'].mask(new_df['Course'].str.match(pat="[A-Z]{3} [12]"), 0)
new_df['Enrl'] = new_df['Enrl'].mask(new_df['Course'].str.match(pat="CSC 430|CSC 431|CSC 445|CSC 453"), 0)
new_df['Enrl'] = new_df['Enrl'].mask(new_df['Course'].str.match(pat="[A-Z]{3} 4"), round(new_df['Enrl'] * 0.3))
modified.append(new_df)
return pd.concat(modified).groupby(['Course', 'Description'], as_index=False)['Enrl'].sum().reset_index(drop=True)
def fourthYearClasses(self):
modified = []
for df in self.enrollments[:3]:
new_df = df.copy(deep=True)
new_df['Enrl'] = new_df['Enrl'].mask(new_df['Course'].str.match(pat="[A-Z]{3} [12]"), 0)
new_df['Enrl'] = new_df['Enrl'].mask(new_df['Course'].str.match(pat="[A-Z]{3} 4"), round(new_df['Enrl'] * 0.3))
modified.append(new_df)
for df in self.enrollments[3:]:
new_df = df.copy(deep=True)
new_df['Enrl'] = new_df['Enrl'].mask(new_df['Course'].str.match(pat="[A-Z]{3} [12]"), 0)
new_df['Enrl'] = new_df['Enrl'].mask(new_df['Course'].str.match(pat="[A-Z]{3} 3"), round(new_df['Enrl'] * 0.5))
modified.append(new_df)
return pd.concat(modified).groupby(['Course', 'Description'], as_index=False)['Enrl'].sum().reset_index(drop=True)
def getFirstYearSchedule(self):
return ["CPE 101 Fundamentals of Computer Science", "CPE 202 - Data Structures", "CPE 123 - Introduction to Computing"]
def getSecondYearSchedule(self):
classes = ["CPE 202 - Data Structures", "CPE 203 - Project-Based Object-Oriented Programming and Design",
"CSC 225 - Introduction to Computer Organization", "CPE 101 - Fundamentals of Computer Science",
"CPE 123 - Introduction to Computing", "CSC 348 - Discrete Structures",
"CPE 315 - Computer Architecture"]
probs = [3, 3, 3, 3, 3, 1, 1]
total_enroll = sum(probs)
probs = [p / total_enroll for p in probs]
indices = list(set(stats.rv_discrete(values=(np.arange(len(probs)), probs)).rvs(size=4)))
return [classes[i] for i in indices]
def getUpperClassSchedule(self, df, specialization):
spec_multipliers_regex = "|".join(self.specializations[specialization])
df['Enrl'] = df['Enrl'].mask(df['Course'].str.match(pat=spec_multipliers_regex), round(df['Enrl'] * 12.5))
df = self.normalize(df)
choices = list(set(stats.rv_discrete(values=(np.arange(len(df)), df['Enrl'].tolist())).rvs(size=random.randint(4,9))))
class_choices = df.iloc[list(choices), :]
# choices = list(set(stats.rv_discrete(values=(np.arange(len(df)), df['Enrl'].tolist())).rvs(size=random.randint(4,9))))
# courses = df["Course"].tolist()
# desc = df["Description"].tolist()
# final_choices = [courses[i] + " - " + desc[i] for i in choices]
choices = []
for index, row in class_choices.iterrows():
choices.append(row["Course"] + " - " + row["Description"])
return choices
def normalize(self, df):
total_enroll = df['Enrl'].sum()
df['Enrl'] = df['Enrl'] / total_enroll
return df
| Morgan-Swanson/StudentGenerator | backend/student/generateSchedule.py | generateSchedule.py | py | 6,705 | python | en | code | 0 | github-code | 13 |
41267053506 | # Structure as presented in CTCI
# 12 Oct 2020
# Revisited 27 Dec 2020
# Linked-List Structure
# - access to linked list via reference to the head node
class Node:
def __init__(self, data=None):
self.next = None
self.data = data
def append_to_tail(self, data):
end = Node(data)
n = self
while(n.next != None):
n = n.next
n.next = end
def __str__(self):
n = self
rep = "" + str(n.data)
while n.next != None:
rep += " -> " + str(n.next.data)
n = n.next
return rep + " -> NONE"
# outside user-defined functions
def delete_node(head: Node, data: int) -> Node:
if head == None: return None
n = head
if n.data == data: return head.next # moved head
while n.next != None:
if n.next.data == data:
n.next = n.next.next
return head # head didn't change
n = n.next
return head # data not found
if __name__ == "__main__":
ll = Node(1)
ll.append_to_tail(2)
ll.append_to_tail(3)
print(ll)
print('-'*10)
delete_node(ll, 2)
print(ll)
##############
# ADVICE #
##############
# Runner Technique (aka second pointer technique)
# one fast pointer and one slow pointer to iterate through
# ex: a fast pointer that moves 2 at a time will reach the end
# when slow pointer is midway. Now you know mid-way node!
# Having trouble solving a LL problem? try recursion my friend!
# Recursive Algos take at LEAST O(n) space
# All recursive algos CAN be implemented iteratively, although be more complex | pforderique/Python-Scripts | Coding_Practice/Data-Structures/linked-lists/CTCI_struct.py | CTCI_struct.py | py | 1,636 | python | en | code | 1 | github-code | 13 |
41214633381 | student_db = [
{'surname': 'Ivanov', 'name': 'Ivan', 'gender': 'male', 'age': '21'},
{'surname': 'Petrov', 'name': 'Ivan', 'gender': 'male', 'age': '31'},
{'surname': 'Sidorov', 'name': 'Pavel', 'gender': 'male', 'age': '25'},
{'surname': 'Prokova', 'name': 'Alyona', 'gender': 'female', 'age': '21'},
{'surname': 'Prokova', 'name': 'Karina', 'gender': 'female', 'age': '20'}
]
criteria_inp = input(
'Enter criteria to search:\n'
'(if not one , enter !): '
).split('!')
def search_stu(database, search):
criteria = set(search)
l = []
for i in range(len(database)):
student = database[i].copy()
val = set(student.values())
if criteria.issubset(val):
student['id'] = i + 1
l.append(student)
return l
def print_search(result_list):
if result_list:
for i in result_list:
print(
'\nStudent № {id}: {surname} {name} {gender} {age}'.format(**i)
)
else:
print('404')
print_search(search_stu(student_db, criteria_inp))
| sudoom/Python_study | IT-Academy/Lesson 5/5.3.py | 5.3.py | py | 1,088 | python | en | code | 0 | github-code | 13 |
1626597497 |
import torch
import torch.nn as nn
import torch.nn.functional as F
def get_sbm_gated_gcn_dgl_encoder(params):
return GatedGcnDglEncoder(net_params=params)
class GatedGcnDglEncoder(nn.Module):
"""Residual GatedGCN encoder
Adapted from https://github.com/graphdeeplearning/benchmarking-gnns
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
def __init__(self, net_params):
super().__init__()
# onehot and/or dense input features
in_dim_node = net_params['enc_in_dim']
in_dim_edge = 1
hidden_dim = net_params['enc_hidden_dim']
out_dim = net_params.get('enc_out_dim', hidden_dim)
n_layers = net_params['enc_layers']
dropout = net_params['enc_dropout']
self.embedding_h = nn.Linear(in_dim_node, hidden_dim)
self.embedding_e = nn.Linear(in_dim_edge, hidden_dim)
self.layers = nn.ModuleList([
GatedGCNLayer(
input_dim=hidden_dim,
output_dim=hidden_dim,
dropout=dropout,
batch_norm=True,
residual=True)
for _ in range(n_layers)])
self.fc_out = None
if out_dim != hidden_dim:
self.fc_out = nn.Linear(hidden_dim, out_dim)
def forward(self, g):
h = g.ndata['feat']
e = g.edata['feat']
# input embedding
h = self.embedding_h(h)
e = self.embedding_e(e)
# residual gated convnets
for conv in self.layers:
h, e = conv(g, h, e)
if self.fc_out is not None:
h = self.fc_out(h)
return h
class GatedGCNLayer(nn.Module):
"""GatedGCN Layer
From https://github.com/graphdeeplearning/benchmarking-gnns
ResGatedGCN: Residual Gated Graph ConvNets
An Experimental Study of Neural Networks for Variable Graphs (Xavier Bresson and Thomas Laurent, ICLR 2018)
https://arxiv.org/pdf/1711.07553v2.pdf
"""
def __init__(self, input_dim, output_dim, dropout, batch_norm, residual=False):
super().__init__()
self.in_channels = input_dim
self.out_channels = output_dim
self.dropout = dropout
self.batch_norm = batch_norm
self.residual = residual
if input_dim != output_dim:
self.residual = False
self.A = nn.Linear(input_dim, output_dim, bias=True)
self.B = nn.Linear(input_dim, output_dim, bias=True)
self.C = nn.Linear(input_dim, output_dim, bias=True)
self.D = nn.Linear(input_dim, output_dim, bias=True)
self.E = nn.Linear(input_dim, output_dim, bias=True)
self.bn_node_h = nn.BatchNorm1d(output_dim)
self.bn_node_e = nn.BatchNorm1d(output_dim)
def message_func(self, edges):
Bh_j = edges.src['Bh']
e_ij = edges.data['Ce'] + edges.src['Dh'] + edges.dst['Eh']
edges.data['e'] = e_ij
return {'Bh_j': Bh_j, 'e_ij': e_ij}
def reduce_func(self, nodes):
Ah_i = nodes.data['Ah']
Bh_j = nodes.mailbox['Bh_j']
e = nodes.mailbox['e_ij']
sigma_ij = torch.sigmoid(e)
h = Ah_i + torch.sum(sigma_ij * Bh_j, dim=1) / \
(torch.sum(sigma_ij, dim=1) + 1e-6)
return {'h': h}
def forward(self, g, h, e):
h_in = h
e_in = e
g.ndata['h'] = h
g.ndata['Ah'] = self.A(h)
g.ndata['Bh'] = self.B(h)
g.ndata['Dh'] = self.D(h)
g.ndata['Eh'] = self.E(h)
g.edata['e'] = e
g.edata['Ce'] = self.C(e)
g.update_all(self.message_func, self.reduce_func)
h = g.ndata['h']
e = g.edata['e']
if self.batch_norm:
h = self.bn_node_h(h)
e = self.bn_node_e(e)
h = F.relu(h)
e = F.relu(e)
if self.residual:
h = h_in + h
e = e_in + e
h = F.dropout(h, self.dropout, training=self.training)
e = F.dropout(e, self.dropout, training=self.training)
return h, e
def __repr__(self):
return '{}(in_channels={}, out_channels={})'.format(
self.__class__.__name__,
self.in_channels,
self.out_channels)
| aripakman/amortized_community_detection | acp/encoders/sbm_gatedgcn_dgl_encoder.py | sbm_gatedgcn_dgl_encoder.py | py | 4,342 | python | en | code | 8 | github-code | 13 |
3209576326 | import ee
# import geopandas as gpd
# QGIS plug-in for GEE
from ee_plugin import Map
# import the region outline
# region_outline = gpd.read_file('/Users/siyuyang/Source/temp_data/WCS_land_use/outline/Orinoquia_outline.shp')
# region_outline_coords = list(region_outline.geometry[0].exterior.coords) # geometry object to list of coords
# ee_region_outline = ee.Geometry.Polygon(region_outline_coords)
region_outline = ee.Geometry.Polygon([
[-71.63069929490757, 8.096518229530101],
[-67.04344372975483, 8.110020085498173],
[-67.06369651370694, 4.221485566693199],
[-71.63407475889959, 4.164102678828891],
[-71.63069929490757, 8.096518229530101]])
# query for imagery
def mask_S2_clouds(image):
qa = image.select('QA60')
# Bits 10 and 11 are clouds and cirrus, respectively.
cloudBitMask = 1 << 10
cirrusBitMask = 1 << 11
# Both flags should be set to zero, indicating clear conditions.
mask = qa.bitwiseAnd(cloudBitMask).eq(0).And(qa.bitwiseAnd(cirrusBitMask).eq(0))
return image.updateMask(mask)
sentinel2_aoi = ee.ImageCollection('COPERNICUS/S2_SR')\
.select(['B2', 'B3', 'B4', 'QA60'])\
.filterBounds(region_outline)
sentinel2_median_image = sentinel2_aoi.filterDate('2019-01-01', '2020-06-26')\
.filter(ee.Filter.lt('CLOUDY_PIXEL_PERCENTAGE', 20))\
.map(mask_S2_clouds)\
.median()
rgb_vis = {
'min': 0.0,
'max': 3000,
'gamma': 1.3,
'bands': ['B4', 'B3', 'B2']}
Map.setCenter(-68.6345, 6.0289, 10)
Map.addLayer(sentinel2_median_image, rgb_vis, 'Sentinel2 2019 - 2020 June RGB')
# image = ee.Image('USGS/SRTMGL1_003')
# Map.addLayer(image, {'palette': ['black', 'white'], 'min': 0, 'max': 5000}, 'DEM')
| microsoft/landcover-orinoquia | data/gee_sentinel_query.py | gee_sentinel_query.py | py | 1,687 | python | en | code | 26 | github-code | 13 |
39565658009 | from typing import Tuple, List
from scipy.spatial.distance import pdist, cdist, squareform
from scipy.spatial import cKDTree
from scipy import sparse
import numpy as np
import multiprocessing as mp
def _sparse_dok_get(m, fill_value=np.NaN):
"""Like m.toarray(), but setting empty values to `fill_value`, by
default `np.NaN`, rather than 0.0.
Parameters
----------
m : scipy.sparse.dok_matrix
fill_value : float
"""
mm = np.full(m.shape, fill_value)
for (x, y), value in m.items():
mm[x, y] = value
return mm
class DistanceMethods(object):
def find_closest(self, idx, max_dist=None, N=None):
"""find neighbors
Find the (N) closest points (in the right set) to the point with
index idx (in the left set).
Parameters
----------
idx : int
Index of the point that the N closest neighbors
are searched for.
max_dist : float
Maximum distance at which other points are searched
N : int
Number of points searched.
Returns
-------
ridx : numpy.ndarray
Indices of the N closeset points to idx
"""
if max_dist is None:
max_dist = self.max_dist
else:
if self.max_dist is not None and max_dist != self.max_dist:
raise AttributeError(
"max_dist specified and max_dist != self.max_dist"
)
if isinstance(self.dists, sparse.spmatrix):
dists = self.dists.getrow(idx)
else:
dists = self.dists[idx, :]
if isinstance(dists, sparse.spmatrix):
ridx = np.array([k[1] for k in dists.todok().keys()])
elif max_dist is not None:
ridx = np.where(dists <= max_dist)[0]
else:
ridx = np.arange(len(dists))
if ridx.size > N:
if isinstance(dists, sparse.spmatrix):
selected_dists = dists[0, ridx].toarray()[0, :]
else:
selected_dists = dists[ridx]
sorted_ridx = np.argsort(selected_dists, kind="stable")
ridx = ridx[sorted_ridx][:N]
return ridx
class MetricSpace(DistanceMethods):
"""
A MetricSpace represents a point cloud together with a distance
metric and possibly a maximum distance. It efficiently provides
the distances between each point pair (when shorter than the
maximum distance).
Note: If a max_dist is specified a sparse matrix representation is
used for the distances, which saves space and calculation time for
large datasets, especially where max_dist << the size of the point
cloud in space. However, it slows things down for small datasets.
"""
def __init__(self, coords, dist_metric="euclidean", max_dist=None):
"""ProbabalisticMetricSpace class
Parameters
----------
coords : numpy.ndarray
Coordinate array of shape (Npoints, Ndim)
dist_metric : str
Distance metric names as used by scipy.spatial.distance.pdist
max_dist : float
Maximum distance between points after which the distance
is considered infinite and not calculated.
"""
self.coords = coords.copy()
self.dist_metric = dist_metric
self.max_dist = max_dist
self._tree = None
self._dists = None
# Check if self.dist_metric is valid
try:
if self.dist_metric=='mahalanobis':
_ = pdist(self.coords[:self.coords.shape[1]+1, :], metric=self.dist_metric)
else:
pdist(self.coords[:1, :], metric=self.dist_metric)
except ValueError as e:
raise e
@property
def tree(self):
"""If `self.dist_metric` is `euclidean`, a `scipy.spatial.cKDTree`
instance of `self.coords`. Undefined otherwise."""
# only Euclidean supported
if self.dist_metric != "euclidean":
raise ValueError((
"A coordinate tree can only be constructed "
"for an euclidean space"
))
# if not cached - calculate
if self._tree is None:
self._tree = cKDTree(self.coords)
# return
return self._tree
@property
def dists(self):
"""A distance matrix of all point pairs. If `self.max_dist` is
not `None` and `self.dist_metric` is set to `euclidean`, a
`scipy.sparse.csr_matrix` sparse matrix is returned.
"""
# calculate if not cached
if self._dists is None:
# check if max dist is given
if self.max_dist is not None and self.dist_metric == "euclidean":
self._dists = self.tree.sparse_distance_matrix(
self.tree,
self.max_dist,
output_type="coo_matrix"
).tocsr()
# otherwise use pdist
else:
self._dists = squareform(
pdist(self.coords, metric=self.dist_metric)
)
# return
return self._dists
def diagonal(self, idx=None):
"""
Return a diagonal matrix (as per
:func:`squareform <scipy.spatial.distance.squareform>`),
optionally for a subset of the points
Parameters
----------
idx : list
list of indices that the diagonal matrix is calculated for.
Returns
-------
diagonal : numpy.ndarray
squareform matrix of the subset of coordinates
"""
# get the dists
dist_mat = self.dists
# subset dists if requested
if idx is not None:
dist_mat = dist_mat[idx, :][:, idx]
# handle sparse matrix
if isinstance(self.dists, sparse.spmatrix):
dist_mat = _sparse_dok_get(dist_mat.todok(), np.inf)
np.fill_diagonal(dist_mat, 0) # Normally set to inf
return squareform(dist_mat)
def __len__(self):
return len(self.coords)
class MetricSpacePair(DistanceMethods):
"""
A MetricSpacePair represents a set of point clouds (MetricSpaces).
It efficiently provides the distances between each point in one
point cloud and each point in the other point cloud (when shorter
than the maximum distance). The two point clouds are required to
have the same distance metric as well as maximum distance.
"""
def __init__(self, ms1, ms2):
"""
Parameters
----------
ms1 : MetricSpace
ms2 : MetricSpace
Note: `ms1` and `ms2` need to have the same `max_dist` and
`distance_metric`.
"""
# check input data
# same distance metrix
if ms1.dist_metric != ms2.dist_metric:
raise ValueError(
"Both MetricSpaces need to have the same distance metric"
)
# same max_dist setting
if ms1.max_dist != ms2.max_dist:
raise ValueError(
"Both MetricSpaces need to have the same max_dist"
)
self.ms1 = ms1
self.ms2 = ms2
self._dists = None
@property
def dist_metric(self):
return self.ms1.dist_metric
@property
def max_dist(self):
return self.ms1.max_dist
@property
def dists(self):
"""A distance matrix of all point pairs. If `self.max_dist` is
not `None` and `self.dist_metric` is set to `euclidean`, a
`scipy.sparse.csr_matrix` sparse matrix is returned.
"""
# if not cached, calculate
if self._dists is None:
# handle euclidean with max_dist with Tree
if self.max_dist is not None and self.dist_metric == "euclidean":
self._dists = self.ms1.tree.sparse_distance_matrix(
self.ms2.tree,
self.max_dist,
output_type="coo_matrix"
).tocsr()
# otherwise Tree not possible
else:
self._dists = cdist(
self.ms1.coords,
self.ms2.coords,
metric=self.ms1.dist_metric
)
# return
return self._dists
class ProbabalisticMetricSpace(MetricSpace):
"""Like MetricSpace but samples the distance pairs only returning a
`samples` sized subset. `samples` can either be a fraction of
the total number of pairs (float < 1), or an integer count.
"""
def __init__(
self,
coords,
dist_metric="euclidean",
max_dist=None,
samples=0.5,
rnd=None
):
"""ProbabalisticMetricSpace class
Parameters
----------
coords : numpy.ndarray
Coordinate array of shape (Npoints, Ndim)
dist_metric : str
Distance metric names as used by scipy.spatial.distance.pdist
max_dist : float
Maximum distance between points after which the distance
is considered infinite and not calculated.
samples : float, int
Number of samples (int) or fraction of coords to sample (float < 1).
rnd : numpy.random.RandomState, int
Random state to use for the sampling.
"""
self.coords = coords.copy()
self.dist_metric = dist_metric
self.max_dist = max_dist
self.samples = samples
if rnd is None:
self.rnd = np.random
elif isinstance(rnd, np.random.RandomState):
self.rnd = rnd
else:
self.rnd = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(rnd)))
self._lidx = None
self._ridx = None
self._ltree = None
self._rtree = None
self._dists = None
# Do a very quick check to see throw exceptions
# if self.dist_metric is invalid...
pdist(self.coords[:1, :], metric=self.dist_metric)
@property
def sample_count(self):
if isinstance(self.samples, int):
return self.samples
return int(self.samples * len(self.coords))
@property
def lidx(self):
"""The sampled indices into `self.coords` for the left sample."""
if self._lidx is None:
self._lidx = self.rnd.choice(len(self.coords), size=self.sample_count, replace=False)
return self._lidx
@property
def ridx(self):
"""The sampled indices into `self.coords` for the right sample."""
if self._ridx is None:
self._ridx = self.rnd.choice(len(self.coords), size=self.sample_count, replace=False)
return self._ridx
@property
def ltree(self):
"""If `self.dist_metric` is `euclidean`, a `scipy.spatial.cKDTree`
instance of the left sample of `self.coords`. Undefined otherwise."""
# only Euclidean supported
if self.dist_metric != "euclidean":
raise ValueError((
"A coordinate tree can only be constructed "
"for an euclidean space"
))
if self._ltree is None:
self._ltree = cKDTree(self.coords[self.lidx, :])
return self._ltree
@property
def rtree(self):
"""If `self.dist_metric` is `euclidean`, a `scipy.spatial.cKDTree`
instance of the right sample of `self.coords`. Undefined otherwise."""
# only Euclidean supported
if self.dist_metric != "euclidean":
raise ValueError((
"A coordinate tree can only be constructed "
"for an euclidean space"
))
if self._rtree is None:
self._rtree = cKDTree(self.coords[self.ridx, :])
return self._rtree
@property
def dists(self):
"""A distance matrix of the sampled point pairs as a
`scipy.sparse.csr_matrix` sparse matrix. """
if self._dists is None:
max_dist = self.max_dist
if max_dist is None:
max_dist = np.finfo(float).max
dists = self.ltree.sparse_distance_matrix(
self.rtree,
max_dist,
output_type="coo_matrix"
).tocsr()
dists.resize((len(self.coords), len(self.coords)))
dists.indices = self.ridx[dists.indices]
dists = dists.tocsc()
dists.indices = self.lidx[dists.indices]
dists = dists.tocsr()
self._dists = dists
return self._dists
# Subfunctions used in RasterEquidistantMetricSpace
# (outside class so that they can be pickled by multiprocessing)
def _get_disk_sample(
coords: np.ndarray,
center: Tuple[float, float],
center_radius: float,
rnd_func: np.random.RandomState,
sample_count: int
):
"""
Subfunction for RasterEquidistantMetricSpace.
Calculates the indexes of a subsample in a disk "center sample".
Same parameters as in the class.
"""
# First index: preselect samples in a disk of certain radius
dist_center = np.sqrt((coords[:, 0] - center[0]) ** 2 + (
coords[:, 1] - center[1]) ** 2)
idx1 = dist_center < center_radius
count = np.count_nonzero(idx1)
indices1 = np.argwhere(idx1)
# Second index: randomly select half of the valid pixels,
# so that the other half can be used by the equidist
# sample for low distances
indices2 = rnd_func.choice(count, size=min(count, sample_count), replace=False)
if count != 1:
return indices1[indices2].squeeze()
else:
return indices1[indices2][0]
def _get_successive_ring_samples(
coords: np.ndarray,
center: Tuple[float, float],
equidistant_radii: List[float],
rnd_func: np.random.RandomState, sample_count: int
):
"""
Subfunction for RasterEquidistantMetricSpace.
Calculates the indexes of several subsamples within disks,
"equidistant sample". Same parameters as in the class.
"""
# First index: preselect samples in a ring of certain inside radius and outside radius
dist_center = np.sqrt((coords[:, 0] - center[0]) ** 2 + (coords[:, 1] - center[1]) ** 2)
idx = np.logical_and(
dist_center[None, :] >= np.array(equidistant_radii[:-1])[:, None],
dist_center[None, :] < np.array(equidistant_radii[1:])[:, None]
)
# Loop over an iterative sampling in rings
list_idx = []
for i in range(len(equidistant_radii) - 1):
idx1 = idx[i, :]
count = np.count_nonzero(idx1)
indices1 = np.argwhere(idx1)
# Second index: randomly select half of the valid pixels, so that the other half can be used by the equidist
# sample for low distances
indices2 = rnd_func.choice(count, size=min(count, sample_count), replace=False)
sub_idx = indices1[indices2]
if count > 1:
list_idx.append(sub_idx.squeeze())
elif count == 1:
list_idx.append(sub_idx[0])
return np.concatenate(list_idx)
def _get_idx_dists(
coords: np.ndarray,
center: Tuple[float, float],
center_radius: float,
equidistant_radii: List[float],
rnd_func: np.random.RandomState,
sample_count: int,
max_dist: float,
i: int,
imax: int,
verbose: bool
):
"""
Subfunction for RasterEquidistantMetricSpace.
Calculates the pairwise distances between a list of pairs of "center" and "equidistant" ensembles.
Same parameters as in the class.
"""
if verbose:
print('Working on subsample ' + str(i+1) + ' out of ' + str(imax))
cidx = _get_disk_sample(
coords=coords, center=center,
center_radius=center_radius,
rnd_func=rnd_func,
sample_count=sample_count
)
eqidx = _get_successive_ring_samples(
coords=coords,
center=center,
equidistant_radii=equidistant_radii,
rnd_func=rnd_func,
sample_count=sample_count
)
ctree = cKDTree(coords[cidx, :])
eqtree = cKDTree(coords[eqidx, :])
dists = ctree.sparse_distance_matrix(
eqtree,
max_dist,
output_type="coo_matrix"
)
return dists.data, cidx[dists.row], eqidx[dists.col]
def _mp_wrapper_get_idx_dists(argdict: dict):
"""
Multiprocessing wrapper for get_idx_dists.
"""
return _get_idx_dists(**argdict)
class RasterEquidistantMetricSpace(MetricSpace):
"""Like ProbabilisticMetricSpace but only applies to Raster data (2D gridded data) and
samples iteratively an `equidistant` subset within distances to a 'center' subset.
Subsets can either be a fraction of the total number of pairs (float < 1), or an integer count.
The 'center' subset corresponds to a disk centered on a point of the grid for which the location
randomly varies and can be redrawn and aggregated for several runs. The corresponding 'equidistant'
subset consists of a concatenation of subsets drawn from rings with radius gradually increasing
until the maximum extent of the grid is reached.
To define the subsampling, several parameters are available:
- The raw number of samples corresponds to the samples that will be drawn in each central disk.
Along with the ratio of samples drawn (see below), it will automatically define the radius
of the disk and rings for subsampling.
Note that the number of samples drawn will be repeatedly drawn for each equidistant rings
at a given radius, resulting in a several-fold amount of total samples for the equidistant
subset.
- The ratio of subsample defines the density of point sampled within each subset. It
defaults to 20%.
- The number of runs corresponds to the number of random center points repeated during the
subsampling. It defaults to a sampling of 1% of the grid with center subsets.
Alternatively, one can supply:
- The multiplicative factor to derive increasing rings radii, set as squareroot of 2 by
default in order to conserve a similar area for each ring and verify the sampling ratio.
Or directly:
- The radius of the central disk subset.
- A list of radii for the equidistant ring subsets.
When providing those spatial parameters, all other sampling parameters will be ignored
except for the raw number of samples to draw in each subset.
"""
def __init__(
self,
coords,
shape,
extent,
samples=100,
ratio_subsample=0.2,
runs=None,
n_jobs=1,
exp_increase_fac=np.sqrt(2),
center_radius=None,
equidistant_radii=None,
max_dist=None,
dist_metric="euclidean",
rnd=None,
verbose=False
):
"""RasterEquidistantMetricSpace class
Parameters
----------
coords : numpy.ndarray
Coordinate array of shape (Npoints, 2)
shape : tuple[int, int]
Shape of raster (X, Y)
extent : tuple[float, float, float, float]
Extent of raster (Xmin, Xmax, Ymin, Ymax)
samples : float, int
Number of samples (int) or fraction of coords to sample (float < 1).
ratio_subsample:
Ratio of samples drawn within each subsample.
runs : int
Number of subsamplings based on a random center point
n_jobs : int
Number of jobs to use in multiprocessing for the subsamplings.
exp_increase_fac : float
Multiplicative factor of increasing radius for ring subsets
center_radius: float
Radius of center subset, overrides other sampling parameters.
equidistant_radii: list
List of radii of ring subset, overrides other sampling parameters.
dist_metric : str
Distance metric names as used by scipy.spatial.distance.pdist
max_dist : float
Maximum distance between points after which the distance
is considered infinite and not calculated.
verbose : bool
Whether to print statements in the console
rnd : numpy.random.RandomState, int
Random state to use for the sampling.
"""
if dist_metric != "euclidean":
raise ValueError((
"A RasterEquidistantMetricSpace class can only be constructed "
"for an euclidean space"
))
self.coords = coords.copy()
self.dist_metric = dist_metric
self.shape = shape
self.extent = extent
self.res = np.mean([(extent[1] - extent[0])/(shape[0]-1),(extent[3] - extent[2])/(shape[1]-1)])
# if the maximum distance is not specified, find the maximum possible distance from the extent
if max_dist is None:
max_dist = np.sqrt((extent[1] - extent[0])**2 + (extent[3] - extent[2])**2)
self.max_dist = max_dist
self.samples = samples
if runs is None:
# If None is provided, try to sample center samples for about one percent of the area
runs = int((self.shape[0] * self.shape[1]) / self.samples * 1/100.)
self.runs = runs
self.n_jobs = n_jobs
if rnd is None:
self.rnd = np.random.default_rng()
elif isinstance(rnd, np.random.RandomState):
self.rnd = rnd
else:
self.rnd = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(rnd)))
# Radius of center subsample, based on sample count
# If None is provided, the disk is defined with the exact size to hold the number of percentage of samples
# defined by the user
if center_radius is None:
center_radius = np.sqrt(1. / ratio_subsample * self.sample_count / np.pi) * self.res
if verbose:
print('Radius of center disk sample for sample count of '+str(self.sample_count)+ ' and subsampling ratio'
' of '+str(ratio_subsample)+': '+str(center_radius))
self._center_radius = center_radius
# Radii of equidistant ring subsamples
# If None is provided, the rings are defined with exponentially increasing radii with a factor sqrt(2), which
# means each ring will have just enough area to sample at least the number of samples desired, and same
# for each of the following, due to:
# (sqrt(2)R)**2 - R**2 = R**2
if equidistant_radii is None:
equidistant_radii = [0.]
increasing_rad = self._center_radius
while increasing_rad < self.max_dist:
equidistant_radii.append(increasing_rad)
increasing_rad *= exp_increase_fac
equidistant_radii.append(self.max_dist)
if verbose:
print('Radii of equidistant ring samples for increasing factor of ' + str(exp_increase_fac) + ': ')
print(equidistant_radii)
self._equidistant_radii = equidistant_radii
self.verbose = verbose
# Index and KDTree of center sample
self._cidx = None
self._ctree = None
# Index and KDTree of equidistant sample
self._eqidx = None
self._eqtree = None
self._centers = None
self._dists = None
# Do a very quick check to see throw exceptions
# if self.dist_metric is invalid...
pdist(self.coords[:1, :], metric=self.dist_metric)
@property
def sample_count(self):
if isinstance(self.samples, int):
return self.samples
return int(self.samples * len(self.coords))
@property
def cidx(self):
"""The sampled indices into `self.coords` for the center sample."""
return self._cidx
@property
def ctree(self):
"""If `self.dist_metric` is `euclidean`, a `scipy.spatial.cKDTree`
instance of the center sample of `self.coords`. Undefined otherwise."""
# only Euclidean supported
if self.dist_metric != "euclidean":
raise ValueError((
"A coordinate tree can only be constructed "
"for an euclidean space"
))
if self._ctree is None:
self._ctree = [cKDTree(self.coords[self.cidx[i], :]) for i in range(len(self.cidx))]
return self._ctree
@property
def eqidx(self):
"""The sampled indices into `self.coords` for the equidistant sample."""
return self._eqidx
@property
def eqtree(self):
"""If `self.dist_metric` is `euclidean`, a `scipy.spatial.cKDTree`
instance of the equidistant sample of `self.coords`. Undefined otherwise."""
# only Euclidean supported
if self._eqtree is None:
self._eqtree = [cKDTree(self.coords[self.eqidx[i], :]) for i in range(len(self.eqidx))]
return self._eqtree
@property
def dists(self):
"""A distance matrix of the sampled point pairs as a
`scipy.sparse.csr_matrix` sparse matrix. """
# Derive distances
if self._dists is None:
idx_center = self.rnd.choice(len(self.coords), size=min(self.runs, len(self.coords)), replace=False)
# Each run has a different center
centers = self.coords[idx_center]
# Running on a single core: for loop
if self.n_jobs == 1:
list_dists, list_cidx, list_eqidx = ([] for i in range(3))
for i in range(self.runs):
center = centers[i]
dists, cidx, eqidx = _get_idx_dists(self.coords, center=center, center_radius=self._center_radius,
equidistant_radii=self._equidistant_radii, rnd_func=self.rnd,
sample_count=self.sample_count, max_dist=self.max_dist, i=i,
imax=self.runs, verbose=self.verbose)
list_dists.append(dists)
list_cidx.append(cidx)
list_eqidx.append(eqidx)
# Running on several cores: multiprocessing
else:
# Arguments to pass: only centers and loop index for verbose are changing
argsin = [{'center': centers[i], 'coords': self.coords, 'center_radius': self._center_radius,
'equidistant_radii': self._equidistant_radii, 'rnd_func': self.rnd,
'sample_count': self.sample_count, 'max_dist': self.max_dist, 'i': i, 'imax': self.runs,
'verbose': self.verbose} for i in range(self.runs)]
# Process in parallel
pool = mp.Pool(self.n_jobs, maxtasksperchild=1)
outputs = pool.map(_mp_wrapper_get_idx_dists, argsin, chunksize=1)
pool.close()
pool.join()
# Get lists of outputs
list_dists, list_cidx, list_eqidx = list(zip(*outputs))
# Define class objects
self._centers = centers
self._cidx = list_cidx
self._eqidx = list_eqidx
# concatenate the coo matrixes
d = np.concatenate(list_dists)
c = np.concatenate(list_cidx)
eq = np.concatenate(list_eqidx)
# remove possible duplicates (that would be summed by default)
# from https://stackoverflow.com/questions/28677162/ignoring-duplicate-entries-in-sparse-matrix
# Stable solution but a bit slow
# c, eq, d = zip(*set(zip(c, eq, d)))
# dists = sparse.csr_matrix((d, (c, eq)), shape=(len(self.coords), len(self.coords)))
# Solution 5+ times faster than the preceding, but relies on _update() which might change in scipy (which
# only has an implemented method for summing duplicates, and not ignoring them yet)
dok = sparse.dok_matrix((len(self.coords), len(self.coords)))
dok._update(zip(zip(c, eq), d))
dists = dok.tocsr()
self._dists = dists
return self._dists
| mmaelicke/scikit-gstat | skgstat/MetricSpace.py | MetricSpace.py | py | 28,273 | python | en | code | 201 | github-code | 13 |
25650519241 | def load_input() -> str:
with open(0) as src_file:
return src_file.read().strip()
def solve(signal: str, dist: int) -> int:
marker = list(signal[:dist])
for idx in range(dist, len(signal)):
if len(set(marker)) == dist:
return idx
marker = marker[1:]
marker.append(signal[idx])
return -1
if __name__ == "__main__":
signal = load_input()
print(solve(signal, 4))
print(solve(signal, 14))
| MrRys/AoC-2022 | d6/d6.py | d6.py | py | 462 | python | en | code | 0 | github-code | 13 |
13247397496 | import random
def choose_numbers():
""" function takes numbers from the player and creates a sorted list"""
list_of_user_numbers = []
for i in range(6):
try:
a = int(input("Choose number: "))
if a in range(1, 50):
list_of_user_numbers.append(a)
else:
print("Choose number from range 1-49")
except ValueError:
print("This is not a number")
return sorted(list_of_user_numbers)
def drawn_numbers():
""" function provides sorted list of unique numbers drawn by computer from range 1-49"""
computer_list = random.sample(range(1, 50), 6)
return sorted(computer_list)
def comparison():
"""
functions compares numbers drawn by computer with numbers chosen by user
"""
my_numbers = choose_numbers()
computer_numbers = drawn_numbers()
common_numbers = []
print("Drawn numbers are: " + str(computer_numbers))
print("Your numbers are: " + str(my_numbers))
for item in my_numbers[::]:
if item in computer_numbers[::]:
common_numbers.append(item)
return "You guessed: " + str(len(common_numbers)) + " numbers"
print(comparison())
| agnieszka2201pn/lotto | app.py | app.py | py | 1,214 | python | en | code | 0 | github-code | 13 |
9474110282 | # File: test_linkedlist.py
# Author: Chad Palmer
# Date: May 2020
# Description:
# This file tests the LinkedList class with Test Driven Development
# in mind. Linked lists are retrieved as a python list for easy
# value comparisions. The __str__ dunder method in the Node class
# makes it possible to run these test cases against classes with
# any primative data type stored in the list. For future improvement,
# tests should be isolated ( count() is a perfect example of this).
import unittest
import numpy as np
from linkedlist import LinkedList
class TestLinkedList(unittest.TestCase):
@classmethod
def setUp(self):
self.appendList = LinkedList()
self.countList = LinkedList()
@classmethod
def tearDown(self):
pass
def test_append(self):
self.appendList.append(5)
self.appendList.append('a')
self.appendList.append(5.5)
self.appendList.append(-3)
self.appendList.append('test string')
arrayList = np.array(self.appendList.getList())
cnt = self.appendList.count()
self.assertEqual(arrayList[cnt - 5], '5')
self.assertEqual(arrayList[cnt - 4], 'a')
self.assertEqual(arrayList[cnt - 3], '5.5')
self.assertEqual(arrayList[cnt - 2], '-3')
self.assertEqual(arrayList[cnt - 1], 'test string')
def test_count(self):
self.countList.push(1)
self.countList.push(2)
self.countList.append(3)
self.countList.append(4)
self.countList.pop()
self.assertEqual(self.countList.count(), 3)
if __name__ == '__main__':
unittest.main()
| cpalmer-atx/python-data-structures | test_linkedlist.py | test_linkedlist.py | py | 1,667 | python | en | code | 0 | github-code | 13 |
22268909587 | import os
import cv2
from camera_generator import BaseCamera
class Camera(BaseCamera):
video_source = 0
stream = """
nvarguscamerasrc !
video/x-raw(memory:NVMM), width=(int)640, height=(int)640, framerate=(fraction)60/1 !
nvvidconv flip-method=0 !
video/x-raw, width=(int)640, height=(int)640, format=(string)BGRx !
videoconvert !
video/x-raw, format=(string)BGR ! appsink
"""
def __init__(self):
if os.environ.get('OPENCV_CAMERA_SOURCE'):
Camera.set_video_source(int(os.environ['OPENCV_CAMERA_SOURCE']))
super(Camera, self).__init__()
@staticmethod
def set_video_source(source):
Camera.video_source = source
@staticmethod
def frames():
#camera = cv2.VideoCapture(Camera.stream, cv2.CAP_GSTREAMER)
camera = cv2.VideoCapture(Camera.video_source)
if not camera.isOpened():
raise RuntimeError('Could not start camera.')
while True:
_, img = camera.read()
yield img
| alicamdal/yolov5_object_detection | camera_opencv.py | camera_opencv.py | py | 1,059 | python | en | code | 1 | github-code | 13 |
37192055889 | import json
from dataclasses import dataclass
from enum import Enum
import pyrebase
from pyrebase.pyrebase import Auth, Database
@dataclass
class UserAuth:
uuid: str
token: str
refresh_token: str
def __init__(self, user_auth: dict):
self.uuid = user_auth["localId"]
self.token = user_auth["idToken"]
self.refresh_token = user_auth["refreshToken"]
class Serializable:
def to_json(self) -> any:
return json.dumps(self, default=lambda o: o.__dict__, sort_keys=True, indent=2)
@classmethod
def from_json(cls, raw: any):
return cls(**json.loads(raw))
class DogSex(str, Enum):
FEMALE: str = "female"
MALE: str = "male"
@dataclass
class DogModel(Serializable):
uuid: str
name: str
race: str
age: int
sex: str
last_out: float = .0
@classmethod
def from_json(cls, raw: any):
return cls(**raw)
@dataclass
class UserModel(Serializable):
uuid: str
username: str
email_address: str
phone_number: str
dogs: list
@dataclass
class Datasource:
auth: Auth
db: Database
user_auth: UserAuth
user: UserModel
def __init__(self):
with open("config.local.json") as f:
config = json.load(f)
firebase = pyrebase.initialize_app(config)
self.auth = firebase.auth()
self.db = firebase.database()
def create_user(self, username: str, email_address: str, phone_number: str, password: str) -> bool:
try:
user_auth = UserAuth(self.auth.create_user_with_email_and_password(email_address, password))
self.auth.send_email_verification(user_auth.token)
user_model = UserModel(user_auth.uuid, username, email_address, phone_number, [])
self.db.child("users").child(user_model.uuid).set(user_model.to_json(), user_auth.token)
return True
except Exception as e:
print(e)
return False
def login_user(self, email_address: str, password: str) -> bool:
try:
user_auth = self.auth.sign_in_with_email_and_password(email_address, password)
self.user_auth = UserAuth(user_auth)
users = self.db.child("users").get(self.user_auth.token).val()
for u in users.values():
um = UserModel.from_json(u)
if email_address == um.email_address:
um.dogs = [DogModel.from_json(dog) for dog in um.dogs]
self.user = um
return True
return False
except Exception as e:
print(e)
return False
def update_user(self) -> bool:
try:
j = self.user.to_json()
self.db.child("users").child(self.user.uuid).set(j)
return True
except Exception as e:
print(e)
return False
def refresh_session(self) -> bool:
try:
user_auth = self.auth.refresh(self.user_auth.refresh_token)
self.user_auth = UserAuth(user_auth)
return True
except Exception as e:
print(e)
return False
DS = Datasource()
| nieomylnieja/dogOut | app/datasource.py | datasource.py | py | 3,194 | python | en | code | 0 | github-code | 13 |
39243221049 | # -*- coding: utf-8 -*-
class Solution:
# 这里要特别注意~找到任意重复的一个值并赋值到duplication[0]
# 函数返回True/False
def duplicate(self, numbers, duplication):
# write code here
lst=[]
for i in numbers:
if i in lst:
duplication[0]=i
return True
lst.append(i)
return False
if __name__=="__main__":
s=Solution()
ss=[2,1,3,1,4]
print(s.duplicate(ss,[0])) | RellRex/Sword-for-offer-with-python-2.7 | test50_重复数组中的数字.py | test50_重复数组中的数字.py | py | 516 | python | en | code | 2 | github-code | 13 |
35726221095 |
# %%
import pandas as pd
from pathlib import Path
from anytree import Node, RenderTree
import anytree
import itertools
# %%
df = pd.read_excel(
Path("C:/Code/bio-economy-cluster/backend/database/excel/Search_scheme/branchen_scheme.xlsx")
)
# %%
def create_root_node(name: str) -> anytree.Node:
return Node(name=name)
def mark_last_added_node(parent: anytree.Node, node_name: str, **kwargs):
# if anytree.search.find(
# node=root,
# filter_=lambda node: node.name == group_names[enum],
# maxlevel=1) is None:
if anytree.search.find(
node=parent,
filter_=lambda node: node.name == node_name,
maxlevel=1) is None:
# last_added_node = Node(
# name=group_names[enum], parent=root, value=333)
return Node(
name=node_name, parent=parent, value=333)
def create_radial_tree_datastructure_from_df(
df: pd.DataFrame,
root: anytree.Node
) -> anytree.Node:
'''
Creates a tree-like datastructure, using the anytree-library and an input dataframe with single index and several columns. Each column of the dataframe represents a set of nodes, starting with the first set of children nodes in the most left column. Hence, the most right column only consists of leaf nodes, without further descendents.
Parameter
-------
df: pd.DataFrame
Inherits the tree data structure
root: anytree.Node
Named root node
Returns
-------
anytree.Node
Filled up tree structure.
'''
column_names_except_last = [val for val in df.columns[:-1].values]
print('column_names_except_last: ', column_names_except_last)
# Set all columns except last as index, then slice groups out of them
groups = df.set_index(column_names_except_last).groupby(
column_names_except_last)
# Iter over groups and build up tree structure
for group_names, group_df in groups:
print('group_names: ', group_names)
# Use this dummy to search for existing
last_added_node = None
# Connect first child to root and every next child to last added child
for enum, name in enumerate(group_names):
print(enum)
# print(RenderTree(root))
# If first entry of group_names tuple is not a child of root, add it
# otherwise skip
if enum == 0:
last_added_node = mark_last_added_node(
parent=root, node_name=group_names[enum],
)
# if anytree.search.find(
# node=root,
# filter_=lambda node: node.name == group_names[enum],
# maxlevel=1) is None:
# last_added_node = Node(
# name=group_names[enum], parent=root, value=333)
else:
# Only add new children if you can't find one with the same
# name
last_added_node = mark_last_added_node(
parent=last_added_node, node_name=group_names[enum],
)
# if anytree.search.find(
# node=last_added_node,
# filter_=lambda node: node.name == group_names[enum],
# maxlevel=1) is None:
# last_added_node = Node(
# name=group_names[enum], parent=last_added_node, value=10)
# Add all entries from last column to the corresponding parents
for entry in list(itertools.chain.from_iterable(group_df.values)):
# Only add a new children if you can't find one with the same name
if anytree.search.find(
node=last_added_node,
filter_=lambda node: node.name == entry,
maxlevel=1) is None:
_ = Node(
name=entry, parent=last_added_node, value=99999)
return root
# %%
create_radial_tree_datastructure_from_df(
root=create_root_node(name="Bioökonomie"))
# %%
print(RenderTree(root))
# %%
# for col in column_names_except_last:
# print("-" * 23)
# print('col: ', col)
# g = _df.groupby(level=col)
# for name, subgroup in g:
# node = Node(name=name, parent=root)
# print("+" * 23)
# print()
# print('subgroup: ', subgroup["layer_3"])
# print()
# print('name: ', name)
# for group in subgroup["layer_3"]:
# print()
# print('group: ', group)
| w0L-g0R/bio-cluster | backend/bio_cluster/src/data/DEVELOPMENT/create_radial_tree_datastructure_v1.py | create_radial_tree_datastructure_v1.py | py | 4,569 | python | en | code | 0 | github-code | 13 |
16511497104 | import os
import sys
from pathlib import Path
from typing import Optional
from typing import Text
import toml
version_file_path = Path("questionary/version.py")
pyproject_file_path = Path("pyproject.toml")
def get_pyproject_version():
"""Return the project version specified in the poetry build configuration."""
data = toml.load(pyproject_file_path)
return data["tool"]["poetry"]["version"]
def get_current_version() -> Text:
"""Return the current library version as specified in the code."""
if not version_file_path.is_file():
raise FileNotFoundError(
f"Failed to find version file at {version_file_path().absolute()}"
)
# context in which we evaluate the version py -
# to be able to access the defined version, it already needs to live in the
# context passed to exec
_globals = {"__version__": ""}
with open(version_file_path) as f:
exec(f.read(), _globals)
return _globals["__version__"]
def get_tagged_version() -> Optional[Text]:
"""Return the version specified in a tagged git commit."""
return os.environ.get("TRAVIS_TAG")
if __name__ == "__main__":
if get_pyproject_version() != get_current_version():
print(
f"Version in {pyproject_file_path} does not correspond "
f"to the version in {version_file_path}! The version needs to be "
f"set to the same value in both places."
)
sys.exit(1)
elif get_tagged_version() and get_tagged_version() != get_current_version():
print(
f"Tagged version does not correspond to the version "
f"in {version_file_path}!"
)
sys.exit(1)
elif get_tagged_version() and get_tagged_version() != get_pyproject_version():
print(
f"Tagged version does not correspond to the version "
f"in {pyproject_file_path}!"
)
sys.exit(1)
else:
print("Versions look good!")
| tmbo/questionary | scripts/validate_version.py | validate_version.py | py | 1,979 | python | en | code | 1,270 | github-code | 13 |
17124400151 | import os
import sys
import datetime
import numpy as np
import matplotlib.pyplot as plt
from scipy.integrate import solve_ivp
from time import time
from numba import jit
@jit(nopython=True)
def DVTH(Fai, Theta):
dvth = 0
for i in range(n):
for j in range(n):
hs = np.sqrt(s[i, j] * s[i, j] + 2 *
lp2 * (1 - np.cos(Fai[i] - Fai[j])) + 8 * l * r * np.sin((Fai[i] - Fai[j]) / 2) * np.sin(
(alpha[i] - alpha[j]) / 2) * np.sin((Fai[i] + Fai[j] - alpha[i] - alpha[j]) / 2 - Theta))
# print(hs)
if Aij[i, j] != 0:
dvth += 2 * l * r * kfai * Aij[i, j] * (1 - s[i, j] / hs) * np.sin(
(Fai[i] - Fai[j]) / 2) * np.sin((alpha[j] - alpha[i]) / 2) * np.cos(
((Fai[i] + Fai[j] - alpha[i] - alpha[j]) / 2 - Theta))
return dvth
@jit(nopython=True)
def DVFAI(Fai, Theta, ith):
dvfai = 0
for j in range(n):
hs = np.sqrt(s[ith, j] * s[ith, j] + 2 *
lp2 * (1 - np.cos(Fai[ith] - Fai[j])) + 8 * l * r * np.sin((Fai[ith] - Fai[j]) / 2) * np.sin(
(alpha[ith] - alpha[j]) / 2) * np.sin((Fai[ith] + Fai[j] - alpha[ith] - alpha[j]) / 2 - Theta))
# print(hs)
if Aij[ith, j] != 0:
dvfai += Aij[ith, j] * kfai * l * (1 - s[ith, j] / hs) * (
l * np.sin(Fai[ith] - Fai[j]) + 2 * r * np.sin((alpha[ith] - alpha[j]) / 2) *
np.sin(Fai[ith] - (alpha[ith] + alpha[j]) / 2 - Theta))
return dvfai
def dXdt(t, X):
# x[0] = 0 # th
# x[1:(n + 1)] = fai # fai
# x[n + 1] = 0 # omega
# x[(n + 2):] = w # w
A[:(n + 1), :(n + 1)] = np.eye(n + 1)
A[n + 1, n + 1] = B0 + n * m * rp2
A[n + 1, 0] = cth
A[(n + 2):, 1:(n + 1)] = np.eye(n) * cfai
A[(n + 1), (n + 2):] = np.array([m * r * l * np.sin(X[i + 1] - X[0] - alpha[i])
for i in range(n)])
A[(n + 2):, (n + 1)] = A[(n + 1), (n + 2):]
A[(n + 2):, (n + 2):] = np.eye(n) * m * lp2
B[0] = X[n + 1]
B[1:(n + 1)] = X[(n + 2):]
b_sum = 0
b_rest = np.zeros(n)
for i in range(n):
b_sum += m * r * l * np.power(X[n + 2 + i], 2) * np.cos(X[i + 1] -
X[0] - alpha[i]) + m * r * g * np.cos(alpha[i] + X[0])
b_rest[i] = -m * g * l * np.sin(X[i + 1]) + m * r * l * np.power(
X[n + 1], 2) * np.cos(X[i + 1] - X[0] - alpha[i]) - DVFAI(X[1:(n + 1)], X[0], i) + ME[i]
B[n + 1] = -kth * X[0] - b_sum - \
DVTH(X[1:(n + 1)], X[0]) # DVTH needs all fai
B[(n + 2):] = b_rest
# make sure A is not singular
return np.linalg.inv(A).dot(B)
def positive_zero(i, Flag):
def event(t, X):
fai = (X[i + 1] % (2 * np.pi)) - \
((X[i + 1] % (2 * np.pi)) // np.pi) * (2 * np.pi)
return fai
event.terminal = Flag
event.direction = 1
return event
def negative_zero(i, Flag):
def event(t, X):
fai = (X[i + 1] % (2 * np.pi)) - \
((X[i + 1] % (2 * np.pi)) // np.pi) * (2 * np.pi)
return fai
event.terminal = Flag
event.direction = -1
return event
def positive_epsilon(i, Flag):
def event(t, X):
fai = (X[i + 1] % (2 * np.pi)) - \
((X[i + 1] % (2 * np.pi)) // np.pi) * (2 * np.pi)
return fai - epsilon
event.terminal = Flag
event.direction = 1
return event
def negative_epsilon(i, Flag):
def event(t, X):
fai = (X[i + 1] % (2 * np.pi)) - \
((X[i + 1] % (2 * np.pi)) // np.pi) * (2 * np.pi)
return fai + epsilon
event.terminal = Flag
event.direction = -1
return event
def positive_poincare(i, Flag):
def event(t, X):
theta = (X[i] % (2 * np.pi)) - \
((X[i] % (2 * np.pi)) // np.pi) * (2 * np.pi)
return theta
event.terminal = Flag
event.direction = 1
return event
def solution(p_init, p_ptr):
init_fai = p_init
x = np.zeros(d)
# events detection
find_y = []
for i in range(n):
find_y.append(positive_epsilon(i, True))
for i in range(n):
find_y.append(negative_epsilon(i, True))
for i in range(n):
find_y.append(positive_zero(i, True))
for i in range(n):
find_y.append(negative_zero(i, True))
find_y.append(positive_poincare(p_ptr, True))
# for iterating
iteration = 0
mini = 0.01
c_y = 0
c_pocr = 0
ini_t = 0
end_t = TIME
interval = STEPS
te_ttl = np.linspace(ini_t, end_t, interval)
y = np.zeros((d + 1, 10000000)) # don't use float32, otherwise
pocr_y = np.zeros((d + 1, 10000000)) # to [-π, π] does not
t1 = time()
while True:
iteration += 1
# initialization =====================
if iteration == 1:
x[0] = 0.01 # th
x[1: n + 1] = init_fai
x[n + 1] = 0 # omega
x[(n + 2):] = 0 # w
for i in range(n):
fai = (x[i + 1] % (2 * np.pi)) - \
((x[i + 1] % (2 * np.pi)) // np.pi) * (2 * np.pi)
while abs(fai) > np.pi:
fai = (fai % (2 * np.pi)) - \
((fai % (2 * np.pi)) // np.pi) * (2 * np.pi)
if fai >= epsilon:
sigma[i] = 2
elif fai <= -epsilon:
sigma[i] = 1
else:
sigma[i] = 0
if sigma[i] == 1 and 0 < fai < epsilon:
ME[i] = M
elif sigma[i] == 2 and -epsilon < fai < 0:
ME[i] = -M
else:
ME[i] = 0
else:
for i in range(n):
fai = (x[i + 1] % (2 * np.pi)) - \
((x[i + 1] % (2 * np.pi)) // np.pi) * (2 * np.pi)
while abs(fai) > np.pi:
fai = (fai % (2 * np.pi)) - \
((fai % (2 * np.pi)) // np.pi) * (2 * np.pi)
if fai > epsilon and x[n + 2 + i] > 0:
sigma[i] = 2
ME[i] = 0
elif fai < -epsilon and x[n + 2 + i] < 0:
sigma[i] = 1
ME[i] = 0
elif fai > 0 and x[n + 2 + i] > 0:
if sigma[i] == 1:
ME[i] = M
elif fai < 0 and x[n + 2 + i] < 0:
if sigma[i] == 2:
ME[i] = -M
# modeling ===========================
ts_solm = [ini_t, end_t]
te_solm = te_ttl[(te_ttl - ini_t) >= 0]
solm = solve_ivp(dXdt,
t_span=ts_solm,
y0=x,
t_eval=te_solm,
events=find_y)
lt = solm.t.shape[0]
y[-1, c_y:c_y + lt] = solm.t
print(solm.t[-1])
y[:d, c_y:c_y + lt] = solm.y
c_y += lt
if solm.status == 1:
# the current position ================
et = solm.t_events
ey = solm.y_events
for ei, v in enumerate(et[:-1]):
if v.shape[0] != 0:
ini_t = v[0]
x = ey[ei][0]
break
pocr_fai = (x[p_ptr] % (2 * np.pi)) - \
((x[p_ptr] % (2 * np.pi)) // np.pi) * (2 * np.pi)
if abs(pocr_fai) < 0.000001 and x[n + p_ptr + 1] > 0:
pocr_y[-1, c_pocr:c_pocr + 1] = ini_t
pocr_y[:d, c_pocr:c_pocr + 1] = x.reshape(d, 1)
c_pocr += 1
elif et[-1].shape[0] != 0 and et[-1][0] != 0:
ini_t = et[-1][0]
x = ey[-1][0]
pocr_y[-1, c_pocr:c_pocr + 1] = ini_t
pocr_y[:d, c_pocr:c_pocr + 1] = x.reshape(d, 1)
c_pocr += 1
# forward a few steps ================
t_fwd = te_ttl[(te_ttl - ini_t) > 0][0]
ts_fwd = [ini_t, t_fwd]
te_fwd = np.linspace(ini_t, t_fwd, 2)
sol_fwd = solve_ivp(dXdt,
t_span=ts_fwd,
y0=x,
t_eval=te_fwd)
lt_fwd = sol_fwd.t.shape[0]
y[-1, c_y:c_y + lt_fwd] = sol_fwd.t
y[:d, c_y:c_y + lt_fwd] = sol_fwd.y
c_y += lt_fwd
ini_t = sol_fwd.t[-1]
x = y[:d, c_y - 1]
if ini_t == end_t: # to avoid ini_t == end_t
break
if solm.status == 0:
break
if solm.status == -1:
print("Integration step failed")
print(time() - t1)
return init_fai, y[:, :c_y], pocr_y[:, :c_pocr]
# When events == True ===========================================
# ===============================================================
n = 4
d = 2 * n + 2
TIME, STEPS = 100, 10000
# variables ================================
Aij = np.ones((n, n)) # coupling matrix
np.fill_diagonal(Aij, 0)
Aij = np.array([[0, 0, 1, 0],
[0, 0, 0, 1],
[1, 0, 0, 0],
[0, 1, 0, 0]])
# ==========================================
# constant parameters ======================
B0 = 5.115 # 5.115
r = 1.0
m = 1.0
l = 0.24849
g = 9.81
kth = 34 # 34 # 3
cth = np.log(2)
kfai = 17.75 # 17.75 1 # not too big
cfai = 0.01
epsilon = 5 * np.pi / 180
M = 0.075 # 0.3 # 0.075, for discontinuty
# ==========================================
# constant matrix ==========================
alpha = np.pi / 2 + 2 * np.pi / n * np.arange(n)
sigma = np.zeros(n)
ME = np.zeros(n)
s = np.zeros((n, n))
for i in range(n):
for j in range(n):
s[i, j] = r * \
np.sqrt(2 * (1 - np.cos(alpha[i] - alpha[j])))
rp2 = np.power(r, 2)
lp2 = np.power(l, 2)
A = np.zeros((d, d))
B = np.zeros(d)
# ==========================================
# ===============================================================
# initialization, for n=4 =======================================
init_fai = np.array([-np.pi / 4,
np.pi / 4,
-np.pi / 4,
np.pi / 4])
ptr = 1
# ptr for poincare, for example using fai_1=0 and \dot{fai_1} > 0
fai, y, pocr_y = solution(init_fai, ptr)
for i in range(1, n + 1):
if y[i, 0] >= np.pi or y[i, 0] <= -np.pi:
y[i, :] = (y[i, :] % (2 * np.pi)) - ((y[i, :] %
(2 * np.pi)) // np.pi) * (2 * np.pi)
if pocr_y[i, 0] >= np.pi or pocr_y[i, 0] <= -np.pi:
pocr_y[i, :] = (pocr_y[i, :] % (2 * np.pi)) - ((pocr_y[i, :] %
(2 * np.pi)) // np.pi) * (2 * np.pi)
t = y[-1, :]
plt.plot(t, y[0, :], label=r'$\Theta$')
plt.plot(t, y[1, :], label=r'$\phi_1$')
plt.plot(t, y[2, :], label=r'$\phi_2$')
plt.plot(t, y[3, :], label=r'$\phi_3$')
plt.plot(t, y[4, :], label=r'$\phi_3$')
plt.legend()
plt.show()
np.save('./Submission/CHAOS/Multistability/~Metadata/y.npy', y)
np.save('./Submission/CHAOS/Multistability/~Metadata/py.npy', pocr_y)
# ===============================================================
# ===============================================================
| Zsstarry/EM_Clocks | Coupled4_2L_13_24_MC_And_Poincare.py | Coupled4_2L_13_24_MC_And_Poincare.py | py | 11,232 | python | en | code | 0 | github-code | 13 |
42300344180 | colour = ["blue","pink","red","orange","yellow",17]
#print(colour)
#print(colour[0])
#print(colour[1])
#print(colour[2])
#print(colour[3])
#print(colour[4])
#print(colour[5])
numbers = [2,7,15,3,10]
#numbers.sort() # sorts the list
#numbers.reverse() # reverses the order of the list
#print(numbers)
#print(numbers[4])
#print(numbers[:])
#print(numbers[:5])
#print(numbers[0:5])
#print(numbers[1:])
#print(numbers[1:4])
#print(numbers[::])
#print(numbers[::2])
#print(numbers[::3])
#print(numbers[::-1])
#print(numbers[::-2])
#print(numbers[1:4:-1])
#print(len(numbers))
#print(min(numbers))
#print(max(numbers))
#print(numbers[-5:-1])
#print(numbers[-5:-1:2])
#print(numbers[-5:-2])
#print(numbers[-5:1:2])
#print(numbers[-5:4:1])
#print(numbers[-4:3:1])
#print(numbers[0:-4])
#print(numbers[2:-2])
#numbers.append(8) # add an element at the end of the list
#numbers.append(45)
#numbers.append(3)
#numbers.sort()
#print(numbers)
'''numbers2 = []
numbers2.append(2)
numbers2.append(67)
numbers2.append(34)
print(numbers2)'''
# insert() : - adds an element at the specified position
#numbers.insert(1,9) # yha pr 1 index value hai aur 9 vo value hai jisse hume insert karna hai. to humari 9 value ,index 1 pe insert hojayegi..
#numbers.insert(2,78) # ase hi 78 index value 2 pe insert hogi..
#print(numbers)
#numbers.remove(15) # it removes the element you want to remove.It takes an argument
# pop() :- removes the element at the specified position
#numbers.pop() # it removes last element
#numbers.pop(4) # it removes value present at index 4
#print(numbers)
'''numbers[1] = 67 # list ki value change ho sakti hai i.e list is mutable
print(numbers)'''
""" Mutable - can change
Immutable - cannot change
"""
#tupple = (1,2,3)
#tupple[1] = 8 # tupple ki value change nhi hoti i.e it is immutable
#tupple =(1) # yha humara brackets nhi ayenge mtlb tupple nhi bnega
# uske liye hume extra comma dena hoga
'''tupple = (1,) # ab tupple ban jayega
print(tupple)'''
'''a=1
b=8 # swapping of two numbers
a,b = b,a
print(b)'''
'''numbers.clear() # removes all the elements from the list
print(numbers)
'''
'''x=numbers.count(7) # returns the number of elements with the specified value
print(x)
'''
'''x = numbers.copy() # returns a copy of the list
print(x)'''
'''x=numbers.index(3) # returns the positon at the first occurrence of the specified value.what is the position of the 3
print(x)'''
'''cars = ['ford','bmw','volvo']
numbers.extend(cars) # it adds the elements of the any iterable(list or tupple or set),to the end of the current list
print(numbers)'''
| ItsVishesh/PYTHON-PROJECTS | LIST.py | LIST.py | py | 2,658 | python | en | code | 0 | github-code | 13 |
72915375698 | import dataclasses
import traceback
from typing import Any, Callable, Iterable, List, Union, Optional
from qutebrowser.qt.core import pyqtSignal, pyqtBoundSignal, QObject
from qutebrowser.utils import usertypes, log
@dataclasses.dataclass
class MessageInfo:
"""Information associated with a message to be displayed."""
level: usertypes.MessageLevel
text: str
replace: Optional[str] = None
rich: bool = False
def _log_stack(typ: str, stack: str) -> None:
"""Log the given message stacktrace.
Args:
typ: The type of the message.
stack: An optional stacktrace.
"""
lines = stack.splitlines()
stack_text = '\n'.join(line.rstrip() for line in lines)
log.message.debug("Stack for {} message:\n{}".format(typ, stack_text))
def error(
message: str, *,
stack: str = None,
replace: str = None,
rich: bool = False,
) -> None:
"""Display an error message.
Args:
message: The message to show.
stack: The stack trace to show (if any).
replace: Replace existing messages which are still being shown.
rich: Show message as rich text.
"""
if stack is None:
stack = ''.join(traceback.format_stack())
typ = 'error'
else:
typ = 'error (from exception)'
_log_stack(typ, stack)
log.message.error(message)
global_bridge.show(
level=usertypes.MessageLevel.error,
text=message,
replace=replace,
rich=rich,
)
def warning(message: str, *, replace: str = None, rich: bool = False) -> None:
"""Display a warning message.
Args:
message: The message to show.
replace: Replace existing messages which are still being shown.
rich: Show message as rich text.
"""
_log_stack('warning', ''.join(traceback.format_stack()))
log.message.warning(message)
global_bridge.show(
level=usertypes.MessageLevel.warning,
text=message,
replace=replace,
rich=rich,
)
def info(message: str, *, replace: str = None, rich: bool = False) -> None:
"""Display an info message.
Args:
message: The message to show.
replace: Replace existing messages which are still being shown.
rich: Show message as rich text.
"""
log.message.info(message)
global_bridge.show(
level=usertypes.MessageLevel.info,
text=message,
replace=replace,
rich=rich,
)
def _build_question(title: str,
text: str = None, *,
mode: usertypes.PromptMode,
default: Union[None, bool, str] = None,
abort_on: Iterable[pyqtBoundSignal] = (),
url: str = None,
option: bool = None) -> usertypes.Question:
"""Common function for ask/ask_async."""
question = usertypes.Question()
question.title = title
question.text = text
question.mode = mode
question.default = default
question.url = url
if option is not None:
if mode != usertypes.PromptMode.yesno:
raise ValueError("Can only 'option' with PromptMode.yesno")
if url is None:
raise ValueError("Need 'url' given when 'option' is given")
question.option = option
for sig in abort_on:
sig.connect(question.abort)
return question
def ask(*args: Any, **kwargs: Any) -> Any:
"""Ask a modular question in the statusbar (blocking).
Args:
message: The message to display to the user.
mode: A PromptMode.
default: The default value to display.
text: Additional text to show
option: The option for always/never question answers.
Only available with PromptMode.yesno.
abort_on: A list of signals which abort the question if emitted.
Return:
The answer the user gave or None if the prompt was cancelled.
"""
question = _build_question(*args, **kwargs)
global_bridge.ask(question, blocking=True)
answer = question.answer
question.deleteLater()
return answer
def ask_async(title: str,
mode: usertypes.PromptMode,
handler: Callable[[Any], None],
**kwargs: Any) -> None:
"""Ask an async question in the statusbar.
Args:
title: The message to display to the user.
mode: A PromptMode.
handler: The function to get called with the answer as argument.
default: The default value to display.
text: Additional text to show.
"""
question = _build_question(title, mode=mode, **kwargs)
question.answered.connect(handler)
question.completed.connect(question.deleteLater)
global_bridge.ask(question, blocking=False)
_ActionType = Callable[[], Any]
def confirm_async(*, yes_action: _ActionType,
no_action: _ActionType = None,
cancel_action: _ActionType = None,
**kwargs: Any) -> usertypes.Question:
"""Ask a yes/no question to the user and execute the given actions.
Args:
message: The message to display to the user.
yes_action: Callable to be called when the user answered yes.
no_action: Callable to be called when the user answered no.
cancel_action: Callable to be called when the user cancelled the
question.
default: True/False to set a default value, or None.
option: The option for always/never question answers.
text: Additional text to show.
Return:
The question object.
"""
kwargs['mode'] = usertypes.PromptMode.yesno
question = _build_question(**kwargs)
question.answered_yes.connect(yes_action)
if no_action is not None:
question.answered_no.connect(no_action)
if cancel_action is not None:
question.cancelled.connect(cancel_action)
question.completed.connect(question.deleteLater)
global_bridge.ask(question, blocking=False)
return question
class GlobalMessageBridge(QObject):
"""Global (not per-window) message bridge for errors/infos/warnings.
Attributes:
_connected: Whether a slot is connected and we can show messages.
_cache: Messages shown while we were not connected.
Signals:
show_message: Show a message
arg 0: A MessageLevel member
arg 1: The text to show
arg 2: A message ID (as string) to replace, or None.
prompt_done: Emitted when a prompt was answered somewhere.
ask_question: Ask a question to the user.
arg 0: The Question object to ask.
arg 1: Whether to block (True) or ask async (False).
IMPORTANT: Slots need to be connected to this signal via
a Qt.ConnectionType.DirectConnection!
mode_left: Emitted when a keymode was left in any window.
"""
show_message = pyqtSignal(MessageInfo)
prompt_done = pyqtSignal(usertypes.KeyMode)
ask_question = pyqtSignal(usertypes.Question, bool)
mode_left = pyqtSignal(usertypes.KeyMode)
clear_messages = pyqtSignal()
def __init__(self, parent: QObject = None) -> None:
super().__init__(parent)
self._connected = False
self._cache: List[MessageInfo] = []
def ask(self, question: usertypes.Question,
blocking: bool, *,
log_stack: bool = False) -> None:
"""Ask a question to the user.
Note this method doesn't return the answer, it only blocks. The caller
needs to construct a Question object and get the answer.
Args:
question: A Question object.
blocking: Whether to return immediately or wait until the
question is answered.
log_stack: ignored
"""
self.ask_question.emit(question, blocking)
def show(
self,
level: usertypes.MessageLevel,
text: str,
replace: str = None,
rich: bool = False,
) -> None:
"""Show the given message."""
msg = MessageInfo(level=level, text=text, replace=replace, rich=rich)
if self._connected:
self.show_message.emit(msg)
else:
self._cache.append(msg)
def flush(self) -> None:
"""Flush messages which accumulated while no handler was connected.
This is so we don't miss messages shown during some early init phase.
It needs to be called once the show_message signal is connected.
"""
self._connected = True
for msg in self._cache:
self.show(**dataclasses.asdict(msg))
self._cache = []
global_bridge = GlobalMessageBridge()
| qutebrowser/qutebrowser | qutebrowser/utils/message.py | message.py | py | 8,783 | python | en | code | 9,084 | github-code | 13 |
71216344017 | import os
from skimage.transform import resize
from tensorflow.compat.v1.keras.models import load_model
import numpy as np
#Loading pretrained Tensorflow model
model = load_model('models/2nd_model.h5')
def prediction(image, filename):
# Image is being resized to 32*32 pixels (the third argument/dimension number 3 is for RGB)
image_resized = resize(image, (32, 32, 3))
# Predicting the uploaded image with our pretrained model. np.array() is used to transform 3D-array to 4D-array.
# this is mandatory for the predict function.
probabilities = model.predict(np.array([image_resized, ]))[0, :]
# probabilities(array) index positions gets sorted from lowest to highest prediction values, and saved in array called 'index'.
index = np.argsort(probabilities)
# Array named 'index' is reversed with [::-1] to get the top predictions first.
index = index[::-1]
# Creating a list with all classes (this is for the prediction output text)
classes = ['Airplane', 'Car', 'Bird',
'Cat', 'Deer', 'Dog', 'Frog', 'Horse', 'Ship', 'Truck']
# Creating dictionary with top 3 predictions based on the index array.
# probabilities value are converted to "percent" and int. ( 0.6789457384 = 68)
predictions = {
"class1": classes[index[0]],
"class2": classes[index[1]],
"class3": classes[index[2]],
"prob1": int(round(probabilities[index[0]] * 100, 0)),
"prob2": int(round(probabilities[index[1]] * 100, 0)),
"prob3": int(round(probabilities[index[2]] * 100, 0))
}
# Creating
image_path = os.path.join('../static/uploads', filename)
return predictions, image_path
| roxxuz/blue-ml-predict | prediction.py | prediction.py | py | 1,690 | python | en | code | 0 | github-code | 13 |
71899421138 | from ..core.abstractcontroller import AbstractBaseController
from ..resources.strings import strings, prompts, flag_text
from ..core import fileoperations, io
from ..lib import utils
from ..objects.exceptions import NoKeypairError, InvalidOptionsError
from ..operations import commonops, sshops
class SSHController(AbstractBaseController):
class Meta:
label = 'ssh'
description = strings['ssh.info']
usage = AbstractBaseController.Meta.usage.replace('{cmd}', label)
arguments = AbstractBaseController.Meta.arguments + [
(['-n', '--number'], dict(help=flag_text['ssh.number'], type=int)),
(['-i', '--instance'], dict(help=flag_text['ssh.instance'])),
(['-o', '--keep_open'], dict(
action='store_true', help=flag_text['ssh.keepopen'])),
(['--force'], dict(
action='store_true', help=flag_text['ssh.force'])),
(['--setup'], dict(
action='store_true', help=flag_text['ssh.setup']))
]
def do_command(self):
app_name = self.get_app_name()
number = self.app.pargs.number
env_name = self.get_env_name()
instance = self.app.pargs.instance
keep_open = self.app.pargs.keep_open
force = self.app.pargs.force
setup = self.app.pargs.setup
if setup:
self.setup_ssh(env_name)
return
if instance and number:
raise InvalidOptionsError(strings['ssh.instanceandnumber'])
if not instance:
instances = commonops.get_instance_ids(app_name, env_name)
if number is not None:
if number > len(instances) or number < 1:
raise InvalidOptionsError(
'Invalid index number (' + str(number) +
') for environment with ' + str(len(instances)) +
' instances')
else:
instance = instances[number - 1]
elif len(instances) == 1:
instance = instances[0]
else:
io.echo()
io.echo('Select an instance to ssh into')
instance = utils.prompt_for_item_in_list(instances)
try:
sshops.ssh_into_instance(instance, keep_open=keep_open,
force_open=force)
except NoKeypairError:
io.log_error(prompts['ssh.nokey'])
def setup_ssh(self, env_name):
# Instance does not have a keypair
io.log_warning(prompts['ssh.setupwarn'].replace('{env-name}',
env_name))
keyname = sshops.prompt_for_ec2_keyname(env_name=env_name)
if keyname:
options = [
{'Namespace': 'aws:autoscaling:launchconfiguration',
'OptionName': 'EC2KeyName',
'Value': keyname}
]
commonops.update_environment(env_name, options, False)
def complete_command(self, commands):
if not self.complete_region(commands):
# Environment names are the second positional argument in this
## controller, so we only complete if its the second
if len(commands) == 2 and commands[-1].startswith('-'):
app_name = fileoperations.get_application_name()
io.echo(commonops.get_env_names(app_name)) | ianblenke/awsebcli | ebcli/controllers/ssh.py | ssh.py | py | 3,451 | python | en | code | 3 | github-code | 13 |
18387968661 | from pylab import plot,show
from numpy import vstack,array
from numpy.random import rand
from scipy.cluster.vq import kmeans,vq
import csv
# data generation
#data = vstack((rand(150,2) + array([.5,.5]),rand(150,2)))
filename = 'C:/Users/Corey/Desktop/CSCI/Senior Project/samples/sample2.txt'
data = csv.reader(open(filename, 'r'), delimiter = ",", quotechar = '|')
xi = []
for row in data:
xi += [[float(row[0]), float(row[1])]]
x = vstack(xi)
# computing K-Means with K = 2 (2 clusters)
#centroids,_ = kmeans(x,3)
# assign each sample to a cluster
#idx,_ = vq(x,centroids)
# some plotting using numpy's logical indexing
def predict_cluster(xaxis, yaxis):
centroids,_ = kmeans(x,3)
# assign each sample to a cluster
idx,_ = vq(x,centroids)
predict_this = array([xaxis, yaxis])
diffarr = abs(centroids[0] - predict_this)
best = diffarr[0] + diffarr[1]
best_centroid = centroids[0]
for i in range(len(centroids)):
diffarr = abs(centroids[i] - predict_this)
diff = diffarr[0] + diffarr[1]
if diff < best:
best = diff
best_centroid = centroids[i]
return best_centroid
def plot_cluster():
centroids,_ = kmeans(x,3)
# assign each sample to a cluster
idx,_ = vq(x,centroids)
plot(x[idx==0,0],x[idx==0,1],'ob',
x[idx==1,0],x[idx==1,1],'or',
x[idx==2,0],x[idx==2,1],'oy')
plot(centroids[:,0],centroids[:,1],'sg',markersize=8)
show()
| ctyrrell1/Senior_Project | kmeans2_usingfile.py | kmeans2_usingfile.py | py | 1,456 | python | en | code | 0 | github-code | 13 |
14202129518 | #! /usr/bin/env python3
import cgi
import csv
import sqlite3
import pprint
# FieldStorageクラスのインスタンス化で、フォームの内容を取得
form = cgi.FieldStorage()
title_str = form["query"].value
db_path = "bookdb.db" # データベースファイル名を指定
con = sqlite3.connect(db_path) # データベースに接続
cur = con.cursor() # カーソルを取得
# テーブルの定義
#cur.execute("""create table BOOKLIST
# (ID int primary key,
# AUTHOR varchar(256),
# TITLE varchar(512),
# PUBLISHER varchar(256),
# PRICE int,
# ISBN char(10));""")
# csvファイルの読み込み、insert
#with open('cgi-bin/BookList.csv') as f:
# reader = csv.reader(f)
# for row in reader:
# # tableに各行のデータを挿入する
# cur.execute('insert into BOOKLIST values (?,?,?,?,?,?);', row)
print("Content-type: text/json; charset=utf-8\n")
book_list = []
try:
# SQL文の実行
cur.execute("select * from BOOKLIST where TITLE like ?", ('%'+title_str+'%',))
rows = cur.fetchall()
if not rows:
print("No books you looked for")
else:
for row in rows:
book_dict = {'ID':str(row[0]), 'AUTHOR':str(row[1]), 'TITLE':str(row[2]), 'PUBLISHER':str(row[3]), 'PRICE':str(row[4]), 'ISBN':str(row[5])}
book_list.append(book_dict)
print(book_list)
except sqlite3.Error as e: # エラー処理
print("Error occurred:", e.args[0])
con.commit() # データベース更新の確定
con.close() # データベースを閉じる
| h-jono/android_book_database-training | cgi-bin/booksearch_json.py | booksearch_json.py | py | 1,614 | python | ja | code | 0 | github-code | 13 |
7416052965 | from flask import Flask, jsonify, request
import Xlib.threaded
from flask_socketio import SocketIO, send, emit, disconnect
from flask_cors import CORS
from model import Users
from secrets import token_hex
from uuid import uuid4
from engineio.payload import Payload
app = Flask(__name__)
app.config['SECRET_KEY'] = uuid4().hex + token_hex(32)
cors = CORS(app)
Payload.max_decode_packets = 500
socket = SocketIO(app, async_mode='gevent', engineio_logger=False, cors_allowed_origins=['file://', "null"])
users = Users()
@socket.on("message")
def message(payload):
check_user = users.get_devices_by_rid(payload['id'])
if not check_user:
users.create(
sid = request.sid,
typ = payload.get('type'),
pwd = payload.get('pass'),
rid = payload.get("id") if payload.get("id") else "12345"
)
send({
"user" : users.return_desktop_type(),
"message": "users list, message",
"status": 200,
"status_id": payload.get("id")
}, broadcast=True)
@socket.on('disconnect')
def disconnect():
users.remove_by_sid(request.sid)
print(f'{request.sid}, disconnected')
send({
"user" : users.return_desktop_type(),
"message": "users list for disconnection",
"status": 200
}, broadcast=True)
@socket.on('connect_users')
def connect_users(payload):
check = users.check_user(payload.get('id'), payload.get('pwd'))
if check:
emit("establish_connection", {
"status": 200,
"msg": f"establishing connection to user with rid: {check.rid}",
'user': check.rid
}, room=request.sid)
else:
emit("establish_connection",{
"status": 400,
"msg": "Password entered is wrong please try again"
}, room=request.sid)
@socket.on('trigger_desktop')
def trigger_desktop(user):
client = users.get_devices_by_sid(request.sid)
dev_client = users.get_devices_by_rid(user)
emit('establish_connection', {
"status": 200,
"msg": f"establishing connection to user with rid: {client.rid}",
"user": client.rid
}, room=dev_client.sid)
@socket.on('streamer')
def streamer(payload):
client = users.get_devices_by_rid(payload['user'])
if client:
emit("img_stream", payload['data'], room=client.sid)
print('device not found')
@socket.on('received_signal')
def received_signal(data):
pass
@app.errorhandler(Exception)
def error(err):
try:
return jsonify({
'message': str(err),
'status' : err.code
}), err.code
except:
return jsonify({
'message': str(err),
'status' : 500
}), 500
if __name__ == "__main__":
socket.run(app, host="0.0.0.0", debug=True, port=3001) | MrJaysa/python-rdp | Server_Main/app.py | app.py | py | 2,813 | python | en | code | 2 | github-code | 13 |
8562391084 | #!/usr/bin/env python3
import argparse
import glob
import os
import subprocess
from pathlib import Path
from zipfile import ZipFile
def parse_arguments():
parser = argparse.ArgumentParser(
description="Tool for garbling PII for PPRL purposes in the CODI project"
)
parser.add_argument(
"--schemafile",
default="example-schema/blocking-schema/lambda.json",
help="Path to blocking schema."
" Default: example-schema/blocking-schema/lambda.json",
)
parser.add_argument(
"--clkpath",
default="output",
help="Specify a folder containing clks. Default is 'output' folder",
)
args = parser.parse_args()
if not Path(args.schemafile).exists():
parser.error("Unable to find schema file: " + args.schemafile)
return args
def block_individuals(args):
os.makedirs("temp-data", exist_ok=True)
os.makedirs("output", exist_ok=True)
schema_file = Path(args.schemafile)
clk_files = glob.glob(os.path.join(args.clkpath, "*.json"))
blocked_files = []
for clk in clk_files:
clk_path = Path(clk)
temp_file = Path("temp-data", clk.split("/")[-1])
subprocess.run(
["anonlink", "block", str(clk_path), str(schema_file), str(temp_file)],
check=True,
)
blocked_files.append(temp_file)
return blocked_files
def zip_blocked_files(blocked_files):
with ZipFile("output/garbled_blocked.zip", "w") as garbled_zip:
for blocked_file in blocked_files:
garbled_zip.write(blocked_file)
def main():
args = parse_arguments()
blocked_files = block_individuals(args)
zip_blocked_files(blocked_files)
if __name__ == "__main__":
main()
| mitre/data-owner-tools | block.py | block.py | py | 1,743 | python | en | code | 5 | github-code | 13 |
21565547304 | import warnings
import matplotlib.pyplot as plt
import seaborn as sns
from sklearn.manifold import TSNE
warnings.filterwarnings("ignore")
def print_init_stats(name, df):
"""
Print stats about dataset
"""
print("\t\t- Shape of '", name, "':", df.shape)
has_nan_values = df.isnull().values.any()
print("\t\t- Has NaN '", name, "':", has_nan_values)
activities = df["activity"].unique()
users = len(df["user"].unique())
print("\t\t- Qta subjects in '", name, "':", users)
print("\t\t- Qta activities in '", name, "':", activities)
# --- Plot functions ---
def plot_count_per_subject(df):
plt.figure(figsize=(15, 8))
plt.title('Data distribution per user')
sns.countplot(x='user', data=df)
plt.show()
def plot_samplings_per_class(df):
plt.figure(figsize=(12, 8))
plt.title('Number of sampling per class')
sns.countplot(x='activity', data=df)
plt.show()
def plot_sampling_per_class_per_user(df):
plt.figure(figsize=(12, 8))
plt.title('Number of sampling per class collected by users')
sns.countplot(hue='activity', x='user', data=df)
plt.show()
def plot_activity(activity, df):
data = df[df['activity'] == activity][['x-acc', 'y-acc', 'z-acc']][:200]
axis = data.plot(subplots=True, figsize=(16, 12),
title=activity)
for ax in axis:
ax.legend(loc='lower left', bbox_to_anchor=(1.0, 0.5))
plt.show()
# -----------------
def plot_tsne(x_train, y_train):
tsne = TSNE(random_state=42, n_components=2, verbose=1, perplexity=50, n_iter=1000).fit_transform(x_train)
plt.figure(figsize=(12, 8))
sns.scatterplot(x=tsne[:, 0], y=tsne[:, 1], hue=y_train, palette="bright")
plt.show() | Xiryl/ML-HAR-Project | src/utils/PrintUtils.py | PrintUtils.py | py | 1,736 | python | en | code | 0 | github-code | 13 |
29660281766 |
FOURSQUARE_PLACES_V3_MOCK_200 = {
"results":[
{
"fsq_id":"53146e95498e242a07e892b4",
"categories":[
{
"id":13027,
"name":"Bistro",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/food/default_",
"suffix":".png"
}
}
],
"chains":[
],
"distance":62,
"geocodes":{
"main":{
"latitude":50.110542,
"longitude":8.676527
},
"roof":{
"latitude":50.110542,
"longitude":8.676527
}
},
"link":"/v3/places/53146e95498e242a07e892b4",
"location":{
"address":"Bethmannstraße 58",
"country":"DE",
"cross_street":"",
"formatted_address":"Bethmannstraße 58, 60311 Frankfurt am Main",
"locality":"Frankfurt am Main",
"postcode":"60311",
"region":"Hesse"
},
"name":"Baguetterie & Cafébar Strahmann",
"related_places":{
},
"timezone":"Europe/Berlin"
},
{
"fsq_id":"568c338d498e0b9ee997a939",
"categories":[
{
"id":13034,
"name":"Café",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/food/cafe_",
"suffix":".png"
}
},
{
"id":13035,
"name":"Coffee Shop",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/food/coffeeshop_",
"suffix":".png"
}
}
],
"chains":[
],
"distance":180,
"geocodes":{
"main":{
"latitude":50.11099,
"longitude":8.675335
},
"roof":{
"latitude":50.11099,
"longitude":8.675335
}
},
"link":"/v3/places/568c338d498e0b9ee997a939",
"location":{
"address":"Kirchnerstraße 4",
"country":"DE",
"cross_street":"",
"formatted_address":"Kirchnerstraße 4, 60311 Frankfurt am Main",
"locality":"Frankfurt am Main",
"postcode":"60311",
"region":"Hesse"
},
"name":"Bunca Barista & Caterer",
"related_places":{
},
"timezone":"Europe/Berlin"
},
{
"fsq_id":"4cfd2a882c1aa090410e057a",
"categories":[
{
"id":13165,
"name":"German Restaurant",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/food/german_",
"suffix":".png"
}
}
],
"chains":[
],
"distance":139,
"geocodes":{
"main":{
"latitude":50.111151,
"longitude":8.678412
},
"roof":{
"latitude":50.111151,
"longitude":8.678412
}
},
"link":"/v3/places/4cfd2a882c1aa090410e057a",
"location":{
"address":"Berliner Straße 70",
"country":"DE",
"cross_street":"Großer Hirschgraben",
"formatted_address":"Berliner Straße 70 (Großer Hirschgraben), 60311 Frankfurt am Main",
"locality":"Frankfurt am Main",
"postcode":"60311",
"region":"Hesse"
},
"name":"Heimat",
"related_places":{
},
"timezone":"Europe/Berlin"
},
{
"fsq_id":"4b068327f964a52089ec22e3",
"categories":[
{
"id":13035,
"name":"Coffee Shop",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/food/coffeeshop_",
"suffix":".png"
}
}
],
"chains":[
],
"distance":272,
"geocodes":{
"main":{
"latitude":50.112069,
"longitude":8.67927
},
"roof":{
"latitude":50.112069,
"longitude":8.67927
}
},
"link":"/v3/places/4b068327f964a52089ec22e3",
"location":{
"address":"Kornmarkt 9",
"country":"DE",
"formatted_address":"Kornmarkt 9, 60311 Frankfurt am Main",
"locality":"Frankfurt am Main",
"postcode":"60311",
"region":"Hesse"
},
"name":"Wackers Kaffee",
"related_places":{
},
"timezone":"Europe/Berlin"
},
{
"fsq_id":"57b34f2e498edc52148534fb",
"categories":[
{
"id":13379,
"name":"Vietnamese Restaurant",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/food/vietnamese_",
"suffix":".png"
}
}
],
"chains":[
],
"distance":497,
"geocodes":{
"main":{
"latitude":50.113529,
"longitude":8.681874
},
"roof":{
"latitude":50.113529,
"longitude":8.681874
}
},
"link":"/v3/places/57b34f2e498edc52148534fb",
"location":{
"address":"Schärfengäßchen 6",
"country":"DE",
"cross_street":"Holzgraben",
"formatted_address":"Schärfengäßchen 6 (Holzgraben), 60311 Frankfurt am Main",
"locality":"Frankfurt am Main",
"neighborhood":[
"Zeil"
],
"postcode":"60311",
"region":"Hesse"
},
"name":"Goc Pho",
"related_places":{
},
"timezone":"Europe/Berlin"
},
{
"fsq_id":"4dc97e9cd4c0abe9b63152f9",
"categories":[
{
"id":13025,
"name":"Wine Bar",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/food/winery_",
"suffix":".png"
}
}
],
"chains":[
],
"distance":443,
"geocodes":{
"main":{
"latitude":50.11249,
"longitude":8.682361
},
"roof":{
"latitude":50.11249,
"longitude":8.682361
}
},
"link":"/v3/places/4dc97e9cd4c0abe9b63152f9",
"location":{
"address":"Hasengasse 5-7",
"country":"DE",
"cross_street":"",
"formatted_address":"Hasengasse 5-7, Frankfurt am Main",
"locality":"Frankfurt am Main",
"postcode":"",
"region":"Hesse"
},
"name":"Weinterasse Rollanderhof",
"related_places":{
"parent":{
"fsq_id":"4b058852f964a520bfbe22e3",
"name":"Kleinmarkthalle"
}
},
"timezone":"Europe/Berlin"
},
{
"fsq_id":"4b058852f964a520bfbe22e3",
"categories":[
{
"id":17069,
"name":"Grocery Store / Supermarket",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/shops/food_grocery_",
"suffix":".png"
}
}
],
"chains":[
],
"distance":509,
"geocodes":{
"main":{
"latitude":50.112798,
"longitude":8.683843
},
"roof":{
"latitude":50.112798,
"longitude":8.683843
}
},
"link":"/v3/places/4b058852f964a520bfbe22e3",
"location":{
"address":"Hasengasse 5-7",
"country":"DE",
"formatted_address":"Hasengasse 5-7, 60311 Frankfurt am Main",
"locality":"Frankfurt am Main",
"postcode":"60311",
"region":"Hesse"
},
"name":"Kleinmarkthalle",
"related_places":{
"children":[
{
"fsq_id":"4b9f7a6ef964a5204c2537e3",
"name":"Worscht Schreiber"
},
{
"fsq_id":"4e216f05e4cdf6859185064f",
"name":"Käse Thomas"
},
{
"fsq_id":"4dc97e9cd4c0abe9b63152f9",
"name":"Weinterasse Rollanderhof"
},
{
"fsq_id":"4da9504a4b22f071ea9bf0e3",
"name":"Fischmarkt"
},
{
"fsq_id":"5bf430a5fdb9a7002ca44d95",
"name":"Die Praline"
},
{
"fsq_id":"4da94cda43a1128196d9dcb4",
"name":"Wurst Dey"
},
{
"fsq_id":"5c35d9d89ba3e5002ced9618",
"name":"Biometzgerei Schick"
},
{
"fsq_id":"5346a2ea498e5a0d4d9da38f",
"name":"Arkade Café&Shop"
},
{
"fsq_id":"4da94f456a2303012ef18b92",
"name":"Geflügel Dietrich"
},
{
"fsq_id":"4cf0fb7d1c158cfaa8b6cdb5",
"name":"Alasti’s Valentino"
}
]
},
"timezone":"Europe/Berlin"
},
{
"fsq_id":"4bcb0d76511f952175acb0c7",
"categories":[
{
"id":16053,
"name":"Waterfront",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/parks_outdoors/river_",
"suffix":".png"
}
}
],
"chains":[
],
"distance":479,
"geocodes":{
"main":{
"latitude":50.10643,
"longitude":8.673844
},
"roof":{
"latitude":50.10643,
"longitude":8.673844
}
},
"link":"/v3/places/4bcb0d76511f952175acb0c7",
"location":{
"address":"Untermainkai",
"country":"DE",
"cross_street":"",
"formatted_address":"Untermainkai, 60594 Frankfurt am Main",
"locality":"Frankfurt am Main",
"neighborhood":[
"Innenstadt"
],
"postcode":"60594",
"region":"Hesse"
},
"name":"Main Riverside (Mainufer)",
"related_places":{
"children":[
{
"fsq_id":"4d30406b789a8cfab1032dc6",
"name":"Liegewiese am Mainufer"
}
]
},
"timezone":"Europe/Berlin"
},
{
"fsq_id":"4b058851f964a5208abe22e3",
"categories":[
{
"id":10042,
"name":"Opera House",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/arts_entertainment/performingarts_operahouse_",
"suffix":".png"
}
},
{
"id":10043,
"name":"Theater",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/arts_entertainment/performingarts_theater_",
"suffix":".png"
}
}
],
"chains":[
],
"distance":313,
"geocodes":{
"main":{
"latitude":50.108142,
"longitude":8.673855
},
"roof":{
"latitude":50.108142,
"longitude":8.673855
}
},
"link":"/v3/places/4b058851f964a5208abe22e3",
"location":{
"address":"Untermainanlage 11",
"country":"DE",
"cross_street":"Willy-Brandt-Platz",
"formatted_address":"Untermainanlage 11 (Willy-Brandt-Platz), 60311 Frankfurt am Main",
"locality":"Frankfurt am Main",
"postcode":"60311",
"region":"Hesse"
},
"name":"Oper Frankfurt",
"related_places":{
},
"timezone":"Europe/Berlin"
},
{
"fsq_id":"4c21dafb9390c9b60894c9cd",
"categories":[
{
"id":13236,
"name":"Italian Restaurant",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/food/italian_",
"suffix":".png"
}
},
{
"id":13302,
"name":"Mediterranean Restaurant",
"icon":{
"prefix":"https://ss3.4sqi.net/img/categories_v2/food/mediterranean_",
"suffix":".png"
}
}
],
"chains":[
],
"distance":224,
"geocodes":{
"main":{
"latitude":50.11187,
"longitude":8.678854
},
"roof":{
"latitude":50.11187,
"longitude":8.678854
}
},
"link":"/v3/places/4c21dafb9390c9b60894c9cd",
"location":{
"address":"Weißadlergasse 2",
"country":"DE",
"cross_street":"",
"formatted_address":"Weißadlergasse 2, 60311 Frankfurt am Main",
"locality":"Frankfurt am Main",
"postcode":"60311",
"region":"Hesse"
},
"name":"Medici",
"related_places":{
},
"timezone":"Europe/Berlin"
}
],
"context":{
"geo_bounds":{
"circle":{
"center":{
"latitude":50.1101038,
"longitude":8.6771586
},
"radius":22000
}
}
}
}
FOURSQUARE_PLACES_V3_MOCK_500 = {
"error": "Internal Server Error"
} | junior92jr/location-advisor-backend | recommendations/mocks/foursquare_places_v3_mock.py | foursquare_places_v3_mock.py | py | 15,211 | python | en | code | 0 | github-code | 13 |
10290065291 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.shortcuts import render,redirect
from django.views.generic import TemplateView
from django.contrib import messages
from django.http import HttpResponse
from django.db.models import Q
from ..forms import *
import json
from django.core.serializers.json import DjangoJSONEncoder
from ....request_session import OKbodega,getPerfil,OKconta,OKadmin
from ....sistema.usuarios.models import Perfil,DOCUMENTO_POR_TIENDA,USUARIO_TIENDA
from ....cliente_proveedor.proveedor.tasks import crear_proveedor
from ....cliente_proveedor.persona.models import PERSONA
from ..historial.models import HISTORIAL,LISTA_PRODUCTO
from .tasks import historial_compras
# Create your views here.
class detallar_compra(TemplateView):
def get(self,request,*args,**kwargs):
if OKconta(request):
doc= request.GET['documento']
i = int(request.GET['pag'])*10
l =DOCUMENTO_POR_TIENDA.objects.get(id=doc)
lp=LISTA_PRODUCTO.objects.filter(lista=l).values("producto__codigo","producto__descripcion","producto__marca","cantidad","unitario")
lp=json.dumps(list(lp),cls=DjangoJSONEncoder)
return HttpResponse(lp,content_type='application/json')
else:
return HttpResponse("{}",content_type='application/json')
class documento(TemplateView):
def get(self,request,*args,**kwargs):
ok = OKconta(request)
if OKbodega(request) or ok:
bus = request.GET["busca"]
bus=bus.upper()
i= request.GET["index"]
pag = int(request.GET["pag"])*10
if i=="1":
cp = PERSONA.objects.get(nit=bus)
his = HISTORIAL.objects.filter(cliente_proveedor=cp).filter(Q(lista__tipo_doc__icontains="C"))[pag:pag+10]
else:
his = HISTORIAL.objects.filter(Q(documento__icontains=bus)).filter(Q(lista__tipo_doc__icontains="C"))[pag:pag+10]
his=his.values("documento","cliente_proveedor__nit","lista__id","ingresa__usuario__username","lista__total")
his=json.dumps(list(his),cls=DjangoJSONEncoder)
return HttpResponse(his,content_type='application/json')
else:
return HttpResponse("{}",content_type='application/json')
class ver_compras(TemplateView):
template_name="productos/inventario/compras/compras.html"
def get(self,request,*args,**kwargs):
if OKbodega(request):
context={
"tienda":getPerfil(request).tienda.nombre,
}
return render(request, self.template_name, context)
return redirect("/")
class inv_local(TemplateView):
def get(self,request,*args,**kwargs):
if OKbodega(request):
pr = int(request.GET['producto'])
qs = PRODUCTO.objects.get(id=pr).id_set
if qs==None:
qs=Perfil.objects.get(usuario=request.user).documento4
return HttpResponse(qs,content_type='text')
return HttpResponse("{}",content_type='text')
class cargar_factura(TemplateView):
template_name="productos/inventario/compras/cargar.html"
formU = FormPersona
url = "/proveedores/nit"
initial={'key':'value'}
formulario=Form_registrar
def get(self,request,*args,**kwargs):
if OKbodega(request):
usu= getPerfil(request)
ubicacion = USUARIO_TIENDA.objects.filter(usuario=usu).filter(tienda=usu.tienda)
lis=0
if not ubicacion.exists():
ubicacion=USUARIO_TIENDA()
ubicacion.usuario=usu
ubicacion.tienda=usu.tienda
ubicacion.save()
lpt=DOCUMENTO_POR_TIENDA()
lpt.ubicado=ubicacion
lpt.tipo_doc="C"
lpt.save()
lis=lpt.id
else:
lpt = DOCUMENTO_POR_TIENDA.objects.filter(ubicado=ubicacion[0]).filter(tipo_doc="C").filter(correlativo=ubicacion[0].orden)
if not lpt.exists():
lpt=DOCUMENTO_POR_TIENDA()
lpt.ubicado=ubicacion[0]
lpt.tipo_doc="C"
lpt.correlativo=ubicacion[0].orden
lpt.save()
lis=lpt.id
else:
lis=lpt[0].id
tienda=usu.tienda
form=self.formU(initial=self.initial)
fm = self.formulario(initial=self.initial)
tienda=getPerfil(request).tienda
context={
"tienda":tienda.nombre,
"store":tienda.id,
"form":form,
"formulario":fm,
"url":self.url,
"accion":"cargar compra",
"lista":lis
}
return render(request, self.template_name, context)
return redirect("/")
class registrar_compra(TemplateView):
def post(self,request,*args,**kwargs):
if OKbodega(request):
doc=request.POST["documento"].upper()
nnit=request.POST["nit"].upper()
fecha=request.POST["fecha"]
cr=request.POST["credito"]
credito=False
if cr=="true":
credito=True
mensaje=""
nit=PERSONA.objects.get(nit=nnit)
his = HISTORIAL.objects.filter(documento=doc).filter(cliente_proveedor=nit).filter(lista__tipo_doc="C")
if his.exists():
mensaje="Un archivo similar existe ya registrado,favor revisar"
else:
usu=getPerfil(request)
dpt =DOCUMENTO_POR_TIENDA.objects.filter(ubicado__usuario=usu).filter(ubicado__tienda=usu.tienda).filter(tipo_doc="C")
if dpt.exists():
dpt=dpt[0]
lp = LISTA_PRODUCTO.objects.filter(lista=dpt)
if lp.exists():
cargar=historial_compras.delay(doc,nnit,credito,dpt.id,fecha)
ut = USUARIO_TIENDA.objects.get(id=dpt.ubicado.id)
ut.orden=int(ut.orden)+1
ut.save()
mensaje="V"
else:
mensaje="la lista parece estar vacia"
else:
mensaje="la lista parece estar vacia"
return HttpResponse(mensaje,content_type='text')
else:
return HttpResponse("no tienes permisos para registrar una compra",content_type='text')
| corporacionrst/software_RST | app/productos/inventario/compras/views.py | views.py | py | 5,385 | python | es | code | 0 | github-code | 13 |
7960406739 | from django.shortcuts import render
from .models import CricketTeamModel
from django.views.generic import View
from django.http import HttpResponse
# Create your views here.
from django.core.serializers import serialize
import json
from .mixins import SerializeMixin,HttpResponseMixin
class CricketTeamsView(View):
def get(self,request,*args,**agrs):
# team = CricketTeamModel.objects.get(team_id=3)
# json_data = serialize('json',[team,])
# return HttpResponse(json_data,content_type='application/json')
# team = CricketTeamModel.objects.get(team_id=3)
# json_data = serialize('json',[team,],fields=('team_captain',))
# return HttpResponse(json_data,content_type='application/json')
team = CricketTeamModel.objects.all()
json_data = serialize('json',team)
return HttpResponse(json_data,content_type='application/json')
class CricketTeamsViewX(View):
def get(self,request,*args,**agrs):
team = CricketTeamModel.objects.all()
json_data = serialize('json',team)
p_dict = json.loads(json_data)
print(p_dict)
final_data = []
for obj in p_dict:
emp_data = obj['fields']
final_data.append(emp_data)
json_data = json.dumps(final_data)
return HttpResponse(json_data,content_type='application/json')
class CricketTeamsViewXJ(View,SerializeMixin):
def get(self,request,*args,**agrs):
team = CricketTeamModel.objects.all()
json_data = self.serialize_cricket_teams(team)
return HttpResponse(json_data,content_type='application/json')
class CricketTeamView(View,SerializeMixin,HttpResponseMixin):
def get(self,request,id,*args,**agrs):
try:
team = CricketTeamModel.objects.get(team_id=id)
except CricketTeamModel.DoesNotExist:
json_data = json.dumps({'msg':'The requested resource not available'})
return self.render_to_http_response(json_data,status=404)
#return HttpResponse(json_data,content_type='application/json',status=404)
else:
json_data = self.serialize_cricket_team(team)
return self.render_to_http_response(json_data)
#return HttpResponse(json_data,content_type='application/json',status=200)
from django.utils.decorators import method_decorator
from django.views.decorators.csrf import csrf_exempt
from .utils import is_json
@method_decorator(csrf_exempt,name='dispatch')
class CricketTeamsCBV(View,SerializeMixin):
def get(self,request,*args,**agrs):
team = CricketTeamModel.objects.all()
json_data = self.serialize_cricket_teams(team)
return HttpResponse(json_data,content_type='application/json')
def post(self,request,*args,**kwargs):
json_data = request.body
valid_json = is_valid(json_data)
if valid_json:
print("True")
else:
resp = json.dumps({'msg':'Please send valid json only'})
return HttpResponse(resp,content_type='application/json')
| shashank14/project-rep | cricket/views.py | views.py | py | 3,086 | python | en | code | 0 | github-code | 13 |
12332157078 | import json
from channels.generic.websocket import WebsocketConsumer
from channels.generic.websocket import AsyncWebsocketConsumer
from asgiref.sync import sync_to_async,async_to_sync
from base.models import Room,Message
from django.contrib.auth.models import User
class ChatConsumer(WebsocketConsumer):
def connect(self):
self.room_id=self.scope['url_route']['kwargs']['room_id']
self.room_group_name="chat_%s" % self.room_id
async_to_sync(self.channel_layer.group_add)(
self.room_group_name,
self.channel_name
)
self.accept()
def disconnect(self,event):
self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
def broadcaste(self,data):
message=data['message']
self.send(json.dumps({'message':message}))
class Groupchat(AsyncWebsocketConsumer):
async def connect(self):
self.roomid=self.scope['url_route']['kwargs']['room_id']
self.room_group_name="chat_%s"%self.roomid
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self,event):
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
async def receive(self, text_data):
text_data=json.loads(text_data)
message=str(text_data["message"])
username=text_data["username"]
roomid=text_data["room"]
await self.channel_layer.group_send(
self.room_group_name,
{
'type':'chat_message',
'message':message,
'username':username,
'room':roomid
}
)
async def chat_message(self,event):
message=str(event["message"])
username=event["username"]
roomid=event["room"]
await self.send(text_data=json.dumps({
'message':message,
'username':username,
'room':roomid
}))
| gopalareddy329/Notify | sockets/client.py | client.py | py | 2,162 | python | en | code | 0 | github-code | 13 |
72190591377 | from PvsRMeasurement import RecSystem
from math import sqrt
class RecommendationSystem(RecSystem):
def __init__(self, trainSet):
self.trainSet = trainSet
self.users = set()
self.movies = set()
self.votes = {}
self.inputDataProcessed = False
def processInputArray(self):
self.load_users_and_movies()
self.create_votes_dict()
self.load_ratings_to_votes_dict()
self.calculate_users_avg_rating()
self.load_extra_users_info()
self.inputDataProcessed = True
def getQueryFloatResult(self, queryTuple):
user, movie = queryTuple
return self.recomendation(user, movie)
def load_users_and_movies(self):
for row in self.trainSet:
_, user, movie = row
self.users.add(user)
self.movies.add(movie)
def create_votes_dict(self):
for user in self.users:
self.votes[user] = {}
for movie in self.movies:
self.votes[user][movie] = 0
def load_ratings_to_votes_dict(self):
for row in self.trainSet:
rating, user, movie = row
self.votes[user][movie] = int(rating)
def load_extra_users_info(self):
with open('data/u.user', 'r') as file:
for row in file:
user, age, gender, occupation, zip_code = row.split('|')
if user in self.users:
self.votes[user]['age'] = int(age)
self.votes[user]['gender'] = gender
self.votes[user]['occupation'] = occupation
self.votes[user]['zip_code'] = zip_code
def calculate_users_avg_rating(self):
for user in self.users:
self.votes[user]['ratings_sum'] = 0
self.votes[user]['ratings_num'] = 0
for movie in self.movies:
rating = self.votes[user][movie]
if rating:
self.votes[user]['ratings_sum'] += rating
self.votes[user]['ratings_num'] += 1
try:
self.votes[user]['ratings_avg'] = \
self.votes[user]['ratings_sum'] / self.votes[user]['ratings_num']
except ZeroDivisionError:
self.votes[user]['ratings_avg'] = 0
def get_movies_rated_by_both(self, user_x, user_y):
user_x_movies = [movie for movie in self.movies if self.votes[user_x][movie]]
user_y_movies = [movie for movie in self.movies if self.votes[user_y][movie]]
return list(set(user_x_movies) & set(user_y_movies))
def data_based_similarity(self, user_x, user_y):
similarity = 0
if abs(self.votes[user_x]['age'] - self.votes[user_y]['age']) < 5:
similarity += 0.1
elif abs(self.votes[user_x]['age'] - self.votes[user_y]['age']) < 10:
similarity += 0.05
else:
similarity -= 0.1
if self.votes[user_x]['gender'] == self.votes[user_y]['gender']:
similarity += 0.15
else:
similarity -= 0.15
if self.votes[user_x]['occupation'] == self.votes[user_y]['occupation']:
similarity += 0.2
else:
similarity -= 0.2
if self.votes[user_x]['zip_code'].startswith(self.votes[user_y]['zip_code'][:2]):
similarity += 0.1
else:
similarity -= 0.1
return similarity
def pearson_correlation_similarity(self, user_x, user_y):
movies_rated_by_both = self.get_movies_rated_by_both(user_x, user_y)
numerator = 0
denominator_one = 0
denominator_two = 0
for movie in movies_rated_by_both:
numerator += (self.votes[user_x][movie] - self.votes[user_x]['ratings_avg']) * \
(self.votes[user_y][movie] - self.votes[user_y]['ratings_avg'])
denominator_one += (self.votes[user_x][movie] - self.votes[user_x]['ratings_avg']) ** 2
denominator_two += (self.votes[user_y][movie] - self.votes[user_y]['ratings_avg']) ** 2
denominator = sqrt(denominator_one) * sqrt(denominator_two)
if denominator:
return numerator / denominator
return 0
def recomendation(self, user, movie):
if movie not in self.movies or user not in self.users:
return 0
k_denominator = 0
summation = 0
users_set_without_user = self.users - {user}
for other_user in users_set_without_user:
similarity = self.pearson_correlation_similarity(user, other_user) # + self.data_based_similarity(user, other_user)
if similarity:
k_denominator += abs(similarity)
summation += similarity * (self.votes[other_user][movie] - self.votes[other_user]['ratings_avg'])
if k_denominator:
k = 1 / k_denominator
else:
k = 0
# print('user={}, movie={}, user_avg_rate={}, k={}, summation={}, result={}'.format(user, movie, self.votes[user]['ratings_avg'], k, summation, self.votes[user]['ratings_avg'] + k * summation))
return self.votes[user]['ratings_avg'] + k * summation
| Akus93/systemy_rekomendacyjne | recommendation_system.py | recommendation_system.py | py | 5,180 | python | en | code | 0 | github-code | 13 |
4087250591 | import keras
from keras.datasets import cifar10
from keras.layers import Activation, Conv2D, Dense, Dropout, Flatten, MaxPooling2D
from keras.models import Sequential, load_model
from keras.utils.np_utils import to_categorical
import numpy as np
import matplotlib.pyplot as plt
# データのロード
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
# 今回は全データのうち、学習には300、テストには100個のデータを使用します
X_train = X_train[:300]
X_test = X_test[:100]
y_train = to_categorical(y_train)[:300]
y_test = to_categorical(y_test)[:100]
# モデルの定義
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=X_train.shape[1:]))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
# --------------------------------------------------------------
# ここを埋めてください
model.add(Conv2D(64, (3,3), padding="same"))
model.add(Activation("relu"))
model.add(Conv2D(64, (3,3)))
model.add(Activation("relu"))
model.add(MaxPooling2D(pool_size=(2,2)))
model.add(Dropout(0.25))
# --------------------------------------------------------------
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(10))
model.add(Activation('softmax'))
# コンパイル
opt = keras.optimizers.rmsprop(lr=0.0001, decay=1e-6)
model.compile(loss='categorical_crossentropy',
optimizer=opt,
metrics=['accuracy'])
# 学習に数分かかるので、あらかじめ学習させて得た重みをロードします
model.load_weights('param_cifar10.hdf5')
# 学習
model.fit(X_train, y_train, batch_size=32, epochs=1)
# 重みの保存をする場合には以下を使います。ここでは実行できません。
# model.save_weights('param_cifar10.hdf5')
# 精度の評価
scores = model.evaluate(X_test, y_test, verbose=1)
print('Test loss:', scores[0])
print('Test accuracy:', scores[1])
# データの可視化(テストデータの先頭の10枚)
for i in range(10):
plt.subplot(2, 5, i+1)
plt.imshow(X_test[i])
plt.suptitle("テストデータの先頭の10枚",fontsize=20)
plt.show()
# 予測(テストデータの先頭の10枚)
pred = np.argmax(model.predict(X_test[0:10]), axis=1)
print(pred)
model.summary() | yasuno0327/LearnCNN | aidemy/cnn/task5.py | task5.py | py | 2,418 | python | ja | code | 1 | github-code | 13 |
38462190419 | """ Test the DFT example *examples/DFT and iDFT with PyDynamic...ipynb*."""
import numpy as np
from matplotlib.pyplot import (
errorbar,
figure,
plot,
subplot,
subplots_adjust,
xlabel,
xlim,
xticks,
ylabel,
)
from numpy import fft, random, sqrt
from numpy.ma import arange, sin
from scipy.constants import pi
from PyDynamic import GUM_DFT
def test_run_copy_of_notebook_code():
np.random.seed(123)
Fs = 100 # sampling frequency in Hz
Ts = 1 / Fs # sampling interval in s
N = 1024 # number of samples
time = arange(0, N * Ts, Ts) # time instants
noise_std = 0.1 # signal noise standard deviation
# time domain signal
x = (
sin(2 * pi * Fs / 10 * time)
+ sin(2 * pi * Fs / 5 * time)
+ random.randn(len(time)) * noise_std
)
# Apply DFT with propagation of uncertainties
X, UX = GUM_DFT(x, noise_std**2)
f = fft.rfftfreq(N, Ts) # frequency values
figure()
plot(time, x)
xlim(time[0], time[-1])
xlabel("time / s", fontsize=18)
ylabel("signal amplitude / au", fontsize=18)
figure()
subplot(211)
errorbar(f, X[: len(f)], sqrt(UX[: len(f)]))
ylabel("real part", fontsize=18)
xticks([])
subplot(212)
errorbar(f, X[len(f) :], sqrt(UX[len(f) :]))
ylabel("imaginary part", fontsize=18)
xlabel("frequency / Hz", fontsize=18)
subplots_adjust(hspace=0.05)
| Met4FoF/Code | PyDynamic/test/test_execution_of_dft_notebook_example.py | test_execution_of_dft_notebook_example.py | py | 1,421 | python | en | code | 0 | github-code | 13 |
41847283723 | '''
Naive Solution O(N): going thru the whole array to check for duplicates
'''
class Solution(object):
def containsDuplicate(self, nums):
"""
:type nums: List[int]
:rtype: bool
"""
tracker = set()
for i in nums:
if i not in tracker:
tracker.add(i)
else:
return True
return False
| gabeyong4/Gabe-LeetCode | contains-duplicate/contains-duplicate.py | contains-duplicate.py | py | 396 | python | en | code | 0 | github-code | 13 |
33997215333 | from typing import List
import numpy as np
class TPTZController:
def __init__(self, tptz_buffer):
self.buffer = tptz_buffer
self.x = np.zeros(2, dtype=np.float64)
self.y = np.zeros(2, dtype=np.float64)
self.x[0] = 0
self.x[1] = 0
self.y[0] = 0
self.y[1] = 0
self.a1 = 0
self.a2 = 1
self.b0 = 2
self.b1 = 3
self.b2 = 4
self.n_1 = 0
self.n_2 = 1
self.center = 0
self._y = 0
def get_output(self, input_error: float) -> float:
None
return self._2p2z(input_error)
def set_initial(self, setter: float) -> None:
None
self.y[self.n_1] = setter
self.y[self.n_2] = setter
def _2p2z(self, _x: float) -> float:
None
self.center = (
_x * self.buffer[self.b0]
+ self.buffer[self.b1] * self.x[self.n_1]
+ self.buffer[self.b2] * self.x[self.n_2]
)
self._y = (
self.center
- self.buffer[self.a1] * self.y[self.n_1]
- self.buffer[self.a2] * self.y[self.n_2]
)
self.x[self.n_2] = self.x[self.n_1]
self.x[self.n_1] = _x
self.y[self.n_2] = self.y[self.n_1]
self.y[self.n_1] = self._y
return self._y
def set_tptz_coefficients(self, tptz_buffer: List[float]) -> None:
None
self.buffer = tptz_buffer
| SummersEdge23/mnapy | TPTZController.py | TPTZController.py | py | 1,463 | python | en | code | 0 | github-code | 13 |
41407490142 | import view
import model_menu
from tkinter import *
from tkinter import ttk
def click_button_count_days():
print()
def click_button_calculate():
print()
def start():
# view.create_menu()
# select()
root = Tk()
frm = ttk.Frame(root, padding=30)
frm.grid()
ttk.Label(frm, text='Калькулятор').grid(row=0, column=0)
ttk.Button(frm, text='Вычислить сколько дней до начала лета', command=click_button_count_days).grid(
row=1, column=1)
ttk.Button(frm, text='Вычислить 2 + 2').grid(row=2, column=1)
ttk.Button(frm, text='Рандомное число').grid(row=3, column=1)
ttk.Button(frm, text='Выход').grid(row=0, column=1)
root.mainloop()
def select():
view.input_item()
number_item = int(input())
model_menu.select(number_item)
start()
| dvoroshin/python_edu | seminar_7/controller.py | controller.py | py | 874 | python | ru | code | 0 | github-code | 13 |
10399470088 | import logging
import logging.handlers
from pathlib import Path
def set_logging():
''' Sets logging module settings.
Run function at beginning of main function for uniform logging formatting.
'''
logging.basicConfig(filename='/dev/null', level=logging.DEBUG)
log_formatter = logging.Formatter(fmt='%(asctime)s [%(levelname)s]: %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
debug_log = logging.handlers.RotatingFileHandler(str(Path.home()) + '/urban-garden.debug.log',maxBytes=65536,backupCount=5)
debug_log.setLevel(logging.DEBUG)
debug_log.setFormatter(log_formatter)
info_log = logging.handlers.RotatingFileHandler(str(Path.home()) + '/urban-garden.info.log',maxBytes=65536,backupCount=5)
info_log.setLevel(logging.INFO)
info_log.setFormatter(log_formatter)
logging.getLogger('').addHandler(debug_log)
logging.getLogger('').addHandler(info_log)
| Urban-Garden/dynamo-db-adapter | ez_logging/ez_logging.py | ez_logging.py | py | 880 | python | en | code | 0 | github-code | 13 |
12757421973 | # --------------
# Data loading and splitting
#The first step - you know the drill by now - load the dataset and see how it looks like. Additionally, split it into train and test set.
# import the libraries
import numpy as np
import pandas as pd
import seaborn as sns
from sklearn.model_selection import train_test_split
import warnings
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
from sklearn.metrics import roc_auc_score
from sklearn import metrics
warnings.filterwarnings('ignore')
# Code starts here
# Load dataset using pandas read_csv api in variable df and give file path as path.
file_path = path
print(file_path)
df = pd.read_csv(path)
print(df)
# Display first 5 columns of dataframe df.
df.head(5)
# Store all the features(independent values) in a variable called X
X = df[["age" , "sex" , "bmi" , "children" , "smoker" , "region" , "charges" ]]
print(X)
# Store the target variable (dependent value) in a variable called y
y = df["insuranceclaim"]
print(y)
# Split the dataframe into X_train,X_test,y_train,y_test using train_test_split() function. Use test_size = 0.2 and random_state = 6
train , test = train_test_split(df , test_size = 0.2 , random_state = 6)
X_train = train.drop(["insuranceclaim"] , axis = 1)
y_train = train["insuranceclaim"]
X_test = test.drop(["insuranceclaim"] , axis = 1)
y_test = test["insuranceclaim"]
# Code ends here
# --------------
# Outlier Detection
# Let's plot the box plot to check for the outlier.
import matplotlib.pyplot as plt
# Code starts here
# Plot the boxplot for X_train['bmi'].
plt.boxplot(X_train["bmi"])
# Set quantile equal to 0.95for X_train['bmi']. and store it in variable q_value.
q_value = X_train["bmi"].quantile(0.95)
print(q_value)
# Check the value counts of the y_train
y_train.value_counts()
# Code ends here
# --------------
# Code starts here
# Correlation Check !
#Let's check the pair_plot for feature vs feature. This tells us which features are highly correlated with the other feature and help us predict its better logistic regression model.
# Find the correlation between the features which are stored in 'X_train' and store the result in a variable called 'relation'.
relation = X_train.corr()
print(relation)
# plot pairplot for X_train.
sns.pairplot(X_train)
# Code ends here
# --------------
import seaborn as sns
import matplotlib.pyplot as plt
# Predictor check!
#Let's check the count_plot for different features vs target variable insuranceclaim. This tells us which features are highly correlated with the target variable insuranceclaim and help us predict it better.
# Code starts here
# Create a list cols store the columns 'children','sex','region','smoker' in it.
cols = ['children','sex','region','smoker']
print(cols)
type(cols)
# Create subplot with (nrows = 2 , ncols = 2) and store it in variable's fig ,axes
fig , axes = plt.subplots(nrows=2 , ncols=2 , figsize=(30,30))
# Create for loop to iterate through row.
# Create another for loop inside for to access column.
# create variable col and pass cols[ i * 2 + j].
# Using seaborn plot the countplot where x=X_train[col], hue=y_train, ax=axes[i,j]
for i in range(0,2):
for j in range(0,2):
col = cols[i * 2 + j]
sns.countplot(x=X_train[col],hue=y_train,ax=axes[i,j])
# Code ends here
# --------------
from sklearn.model_selection import GridSearchCV, RandomizedSearchCV
from sklearn.linear_model import LogisticRegression
from sklearn.metrics import accuracy_score
# Is my Insurance claim prediction right?
# Now let's come to the actual task, using logistic regression to predict the insuranceclaim. We will select the best model by cross-validation using Grid Search.
# You are given a list of values for regularization parameters for the logistic regression model.
# parameters for grid search
parameters = {'C':[0.1,0.5,1,5]}
print(parameters)
# Instantiate a logistic regression model with LogisticRegression() and pass the parameter as random_state=9 and save it to a variable called 'lr'.
lr = LogisticRegression(random_state=9)
# Inside GridSearchCV() pass estimator as the logistic model, param_grid=parameters. to do grid search on the logistic regression model store the result in variable grid.
grid = GridSearchCV(estimator=lr , param_grid=parameters)
# Fit the model on the training data X_train and y_train.
grid.fit(X_train,y_train)
# Make predictions on the X_test features and save the results in a variable called 'y_pred'.
y_pred = grid.predict(X_test)
# Calculate accuracy for grid and store the result in the variable accuracy
accuracy = accuracy_score(y_test , y_pred)
# print accuracy
print(accuracy)
# Code starts here
# Code ends here
# --------------
# Performance of a classifier !
# Now let's visualize the performance of a binary classifier. Check the performance of the classifier using roc auc curve.
from sklearn.metrics import roc_auc_score
from sklearn import metrics
# Calculate the roc_auc_score and store the result in variable score.
score = roc_auc_score(y_test , y_pred)
print(score)
# Predict the probability using grid.predict_proba on X_test and take the second column and store the result in y_pred_proba.
y_pred_proba = grid.predict_proba(X_test)
print(y_pred_proba)
y_pred_proba = y_pred_proba[:,1]
print(y_pred_proba)
# Use metrics.roc_curve to calculate the fpr and tpr and store the result in variables fpr, tpr, _.
fpr , tpr , _ = metrics.roc_curve(y_test , y_pred_proba)
# Calculate the roc_auc score of y_test and y_pred_proba and store it in variable called roc_auc.
roc_auc = roc_auc_score(y_test , y_pred_proba)
print(roc_auc)
# Plot auc curve of 'roc_auc' using the line plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc)).
plt.plot(fpr,tpr,label="Logistic model, auc="+str(roc_auc))
plt.legend(loc = 4)
plt.show()
# Code starts here
# Code ends here
| Niteshnupur/nlp-dl-prework | Nitesh-Bhosle-:---Insurance-claim-prediction/code.py | code.py | py | 6,133 | python | en | code | 0 | github-code | 13 |
4833437869 | import NlpUtils
import jsondiff
import collections
if NlpUtils.g_EnableDebugging:
g_SupportedEncoding = {
'template': ('English', ('windows-1252', ), )
}
else:
g_SupportedEncoding = {
'zh-cn': ('Chinese', ('utf-8', 'gb2312', ), )
}
VtTrDataTuple = collections.namedtuple('VtTrDataTuple', ('rawNlp', 'trTemplate', 'trDiff', 'trIndex'))
def GetNlpJsonPath(ver: str, lang: str) -> str:
return f'../NlpTr/out/VT{ver}.{lang}.json'
def GetRawNlpPath(ver: str, lang: str, enc: str) -> str:
return f'../NlpTr/out/VT{ver}.{lang}.{enc}.txt'
def GetTrPath(ver: str, lang: str) -> str:
return f'../NlpTr/VT{ver}.{lang}.json'
def GetTrDiffPath(ver: str) -> str:
return f'../NlpTr/VT{ver}.diff'
def GetTrIndexPath(ver: str) -> str:
return f'../NlpTr/VT{ver}.index'
g_CriticalFields: dict[str, str] = {
'Common/Registry/0': 'Software\\\\Virtools\\\\Global',
'Common/Registry/1': 'Usage Count',
'Common/Timebomb/0': 'Key1',
'Common/Timebomb/1': 'Key2',
'Common/Timebomb/2': 'Key3',
'Common/Timebomb/3': 'SYSINFO.SysInfo32\\\\CLSID',
'Common/Timebomb/4': '\\\\csrsrv32.dll',
'3D Layout/Registry/0': 'Software\\\\NeMo\\\\3D Layout',
}
def CriticalFieldChecker(nlpJson: dict):
corrected: bool = False
for k, v in g_CriticalFields.items():
# analyze path and find the node
path = k.split('/')
assert path[-1].isdecimal()
path_terminal = int(path[-1])
path = path[:-1]
node = nlpJson
for pathpart in path:
node = node['key_map'][pathpart]
# check it
if node['entries'][path_terminal] != v:
# if not matched. correct it
node['entries'][path_terminal] = v
# and notify it
corrected = True
if corrected:
print('Some critical filed was changed in tr by accident. We have corrected them, but please check tr carefully')
if __name__ == "__main__":
# load each version's diff data and patch data for conventient using
PreLoadedDiffIdxTuple = collections.namedtuple('PreLoadedDiffIndexTuple', ('insertedKey', 'deletedKey', 'plainKeys'))
preLoadedData: dict[str, PreLoadedDiffIdxTuple] = {}
for ver in NlpUtils.g_VirtoolsVersion:
# load diff and index data
insertedKey, deletedKey = NlpUtils.LoadTrDiff(GetTrDiffPath(ver))
plainKeys = NlpUtils.LoadTrIndex(GetTrIndexPath(ver))
# insert to dict
preLoadedData[ver] = PreLoadedDiffIdxTuple._make((insertedKey, deletedKey, plainKeys))
# iterate lang first
# because we use progressive patch. we need iterate vt ver in order for each single languages
for lang in NlpUtils.g_SupportedLangs:
prevPlainValues: list[str] = None
for ver in NlpUtils.g_VirtoolsVersion:
print(f'Loading {ver}.{lang}...')
# pick data from pre-loaded dict
diffIdxData = preLoadedData[ver]
plainKeys = diffIdxData.plainKeys
# load lang file
# and only keeps its value.
trFull = NlpUtils.LoadTrTemplate(GetTrPath(ver, lang))
_, plainValues = zip(*trFull.items())
# patch it if needed
if prevPlainValues is not None:
# patch needed load
# load patch part first
trPart = NlpUtils.LoadTrTemplate(GetTrPath(ver, lang))
# re-construct the diff structure understood by jsondiff
cmpResult = NlpUtils.CombinePlainJsonDiff(diffIdxData.insertedKey, diffIdxData.deletedKey, plainValues)
# patch data
plainValues = jsondiff.patch(prevPlainValues, cmpResult)
# convert plain json to nlp json
nlpJson = NlpUtils.PlainJson2NlpJson(plainKeys, plainValues)
# check some critical fields
CriticalFieldChecker(nlpJson)
if NlpUtils.g_EnableDebugging:
NlpUtils.RemoveKeyMapInGeneratedNlpJson(nlpJson)
NlpUtils.DumpJson(GetNlpJsonPath(ver, lang), nlpJson)
# write into file with different encoding
lang_macro, encs = g_SupportedEncoding[lang]
for enc in encs:
print(f'Processing {ver}.{lang}.{enc}...')
NlpUtils.DumpNlpJson(GetRawNlpPath(ver, lang, enc), enc, lang_macro, nlpJson)
# assign prev json
prevPlainValues = plainValues | yyc12345/VirtoolsTranslation | NlpProc/NlpJsonEncoder.py | NlpJsonEncoder.py | py | 4,456 | python | en | code | 2 | github-code | 13 |
32067638085 | """
Tool for PySimpleGUI
Author - Jason Yang
Date - 2020/05/12
Version - 0.0.3
History
- 2020/05/08
- New Tree class for more methods and functions, but with only name and
one text value for each node.
- 2020/05/10
- New Button class for stadium shape background
- 2020/05/11
- Revised for auto_size_button in class Button.
- 2020/05/12
- Revised button_color can be like 'black'.
- Revised len of button_text to check halfwidth and fullwidth if character.
"""
from io import BytesIO
from unicodedata import east_asian_width
import PySimpleGUI as sg
from PIL import Image, ImageDraw
class Button(sg.Button):
"""
New Button class of PySimpleGUI with stadium shape background.
Disabled state not shown well.
"""
def __init__(
self, button_text='', button_type=sg.BUTTON_TYPE_READ_FORM,
target=(None, None), tooltip=None, file_types=(("ALL Files", "*.*"), ),
initial_folder=None, disabled=False, enable_events=False, font=None,
size=(None, None), auto_size_button=True, button_color=None, pad=None,
disabled_button_color=None, focus=False, key=None, visible=True,
bind_return_key=False, metadata=None, min_size=False):
"""
Initial Button class, remove options
- image_file, image_data, image_size
- use_ttk_buttons
- change_submits (also removed in all related functions)
- border_width
: Parameters - Please refer to sg.Button
min_size - Bool, True to set size to width of button_text.
: Return Instance of new Button class
"""
data = self._image(
button_text, font, button_color, size, auto_size_button)
if button_color:
color = [button_color[0], sg.theme_background_color()]
else:
color = [sg.DEFAULT_BUTTON_COLOR[0], sg.theme_background_color()]
super().__init__(
button_text=button_text, button_type=button_type, image_data=data,
target=target, tooltip=tooltip, file_types=file_types, pad=pad,
initial_folder=initial_folder, disabled=disabled, size=size,
enable_events=enable_events, font=font, button_color=color,
auto_size_button=auto_size_button, focus=focus, key=key,
disabled_button_color=disabled_button_color, visible=visible,
bind_return_key=bind_return_key, border_width=0, metadata=metadata)
def _font(self, font):
"""
Convert string or sequence of font to family, size and style.
: Parameters
font - str, list or tupple, tkinter font
: Return
family (str), size (int), style (str)
"""
if isinstance(font, str):
lst = list(font)
faimly, size = lst[0], int(lst[1])
else:
lst = font
family, size = lst[:2]
style = lst[2] if len(lst)>2 else ''
return (family, size, style)
def _image(self, button_text, font, button_color, size, auto_size_button):
"""
Create image data for PySimpleGUI.
: Parameter
font - None, str, list or tuple, tkinter font
button_color - None, tuple(text_color, background_color)
size - None, (int, int), size (width, height) in chars
: Return
data - image data for PySimpleGUI.
"""
color = button_color if button_color else ('white', 'blue')
s1 = size[0] if size[0]!=None else sg.DEFAULT_BUTTON_ELEMENT_SIZE[0]
if auto_size_button:
s1 = self._len(button_text)
text, background = color
font = font if font else sg.DEFAULT_FONT
family, s2, style = self._font(font)
width, height = int(s1*s2*0.7), s2*3
radius = height//2
im = Image.new(
mode='RGBA', size=(width+height, height), color=(255, 255, 255, 0))
image = ImageDraw.Draw(im, mode='RGBA')
image.ellipse((0, 0, height, height), fill=background)
image.ellipse((width, 0, width+height, height), fill=background)
image.rectangle((radius, 0, radius+width, height), fill=background)
with BytesIO() as output:
im.save(output, format="PNG")
data = output.getvalue()
return data
def _len(self, text):
length = 0
for char in text:
length += 2 if east_asian_width(char) in 'AFW' else 1
return length
def FileBrowse(
button_text='Browse', target=(sg.ThisRow, -1), pad=None, key=None,
file_types=(("ALL Files", "*.*"),), initial_folder=None, tooltip=None,
size=(None, None), auto_size_button=None, button_color=None,
enable_events=False, font=None, disabled=False, metadata=None):
"""
Select File for read, refer to PySimpleGUI FileBrowse
"""
return Button(
button_text=button_text, button_type=sg.BUTTON_TYPE_BROWSE_FILE,
target=target, file_types=file_types, initial_folder=initial_folder,
tooltip=tooltip, size=size, auto_size_button=auto_size_button,
enable_events=enable_events, disabled=disabled, pad=pad, key=key,
button_color=button_color, font=font, metadata=metadata)
def FileSaveAs(
button_text='Save As...', target=(sg.ThisRow, -1), enable_events=False,
file_types=(("ALL Files", "*.*"),), initial_folder=None, font=None,
disabled=False, tooltip=None, size=(None, None), auto_size_button=None,
button_color=None, pad=None, key=None, metadata=None):
"""
Select File for Save, refer to PySimpleGUI FileSaveAs
"""
return Button(
button_text=button_text, button_type=sg.BUTTON_TYPE_SAVEAS_FILE,
target=target, file_types=file_types, initial_folder=initial_folder,
tooltip=tooltip, size=size, disabled=disabled, font=font, pad=pad,
auto_size_button=auto_size_button, button_color=button_color,
enable_events=enable_events, key=key, metadata=metadata)
def FolderBrowse(
button_text='Browse', target=(sg.ThisRow, -1), initial_folder=None,
tooltip=None, size=(None, None), auto_size_button=None,
button_color=None, disabled=False, enable_events=False, font=None,
pad=None, key=None, metadata=None):
"""
Select Folder, refer to PySimpleGUI FolderBrowse
"""
return Button(
button_text=button_text, button_type=sg.BUTTON_TYPE_BROWSE_FOLDER,
target=target, initial_folder=initial_folder, tooltip=tooltip,
size=size, auto_size_button=auto_size_button, disabled=disabled,
button_color=button_color, enable_events=enable_events, font=font,
pad=pad, key=key, metadata=metadata)
class Tree(sg.Tree):
"""
Tree for node name shown only, with load from dictionary, dump tree to
dictionary, delete node, rename node, move node up, move node down,
where the selection, set node text, read node text, set node value,
read node text, set select, hide_header, sort nodes
** Must call hide_tree(window) after window finalized !!!
"""
def __init__(self, column_width=30, font=('Courier New', 12), key='TREE',
text_color='black', background_color='white', num_rows=25,
row_height=28):
"""
Tree is a subclass of sg.Tree with more methods and functions.
: Parameters
column_width - int, width of tree in chars.
font - font for character style in tree view.
key - str, tree reference key in PySimpleGUI.
text_color - color, text color.
background_color - coor, background color.
num_rows - int, height of tree view in lines.
row_height - int, height of line in pixels.
: Return
Instance of Tree
"""
self.key = key
self.text = None
self.list = []
self.treedata = sg.TreeData()
self._init(lines=num_rows, width=column_width, row_height=row_height,
text=text_color, background=background_color, font=font,
key=key)
def delete_all_nodes(self):
"""
Delete all nodes in Tree.
"""
keys = [tag.key for tag in self.treedata.tree_dict[''].children]
self.delete_nodes(keys)
def delete_node(self, key, update=True):
"""
Delete node 'key' from tree. After delete, selection will move up.
: Parameters
key - str, node key tp remove
"""
self._all_nodes()
if key and key in self.list:
pre_key = self._previous_key(key)
node = self.treedata.tree_dict[key]
self.treedata.tree_dict[node.parent].children.remove(node)
node_list = [node]
while node_list != []:
temp = []
for item in node_list:
temp += item.children
del self.treedata.tree_dict[item.key]
del item
node_list = temp
if update:
self.tree.update(values=self.treedata)
self.select(pre_key)
def delete_nodes(self, keys):
"""
Delete all nodes with key in keys.
: Parameters
keys - sequence of key
"""
for key in keys:
self.delete_node(key, update=False)
self.tree.update(values=self.treedata)
self.select('0')
def dump_tree(self):
"""
Save treedata to dictionary
Dictionary pairs in key: [parent, children, text, values]
: Return
dictionary for treedata
"""
dictionary = {}
for key, node in self.treedata.tree_dict.items():
children = [n.key for n in node.children]
dictionary[key] = [node.parent, children, node.text, node.values]
return dictionary
def get_text(self, key):
"""
Get node name
: Parameters
key - str, key of node
: Return
str, name text of node
"""
return self.treedata.tree_dict[key].text
def get_value(self, key):
"""
Get values[0] of node.
: Parameters
key - str, key of node
: Return
str, value of node
"""
values = self.treedata.tree_dict[key].values
return values[0] if values else ''
def hide_header(self, window):
"""
Hide header of tree.
: Parameters
window - instance of sg.Window
"""
self.tree = window[self.key]
self.tree.Widget.configure(show='tree')
def insert_node(self, parent, name, text, update=True):
"""
Insert a new node under parent, by name and text
: Parameters
parent - str, key of parent node, '' for root.
name - str, name of new node
text - str, value of node
update - bool, True to update treedata into tree.
: return
None
"""
if name:
key = self._new_key()
self.treedata.Insert(parent, key, name, [text])
if update:
self.tree.update(values=self.treedata)
def load_tree(self, dictionary):
"""
Load dcitionary into self.treedata and update self.tree
: Parameters
dictionary - data for treedata in Tree.
Dictionary pairs in key: [parent, children, text, values]
parent, children are key of nodes, values in [str]
"""
children = dictionary[''][1]
table = {'':''}
while children != []:
temp = []
for child in children:
node = dictionary[child]
table[child] = self._new_key()
self.treedata.Insert(
table[node[0]], table[child], node[2], node[3])
temp += node[1]
children = temp
self.tree.update(values=self.treedata)
def move_node_up(self, key):
"""
Move node up in tree structure, not position only.
: Parameters
key - str, key of node
"""
if not key:
return
node = self.treedata.tree_dict[key]
if not key:
return
pre = self._previous_key(key)
pre_node = self.treedata.tree_dict[pre]
if not pre:
return
if pre == node.parent:
pre_parent_node = self.treedata.tree_dict[pre_node.parent]
index = pre_parent_node.children.index(pre_node)
pre_parent_node.children = (pre_parent_node.children[:index] +
[node] + pre_parent_node.children[index:])
self.treedata.tree_dict[node.parent].children.remove(node)
node.parent = pre_parent_node.key
else:
if node.parent == pre_node.parent:
parent_node = self.treedata.tree_dict[node.parent]
index = parent_node.children.index(pre_node)
parent_node.children.remove(node)
parent_node.children = (parent_node.children[:index] +
[node] + parent_node.children[index:])
else:
pre_parent_node = self.treedata.tree_dict[pre_node.parent]
pre_parent_node.children.append(node)
self.treedata.tree_dict[node.parent].children.remove(node)
node.parent = pre_parent_node.key
self.tree.update(values=self.treedata)
self.select(key)
def move_node_down(self, key):
"""
Move node down in tree structure, not position only.
: Parameters
key - str, key of node
"""
if not key:
return
nxt = self._next_not_children(key)
if not nxt:
return
node = self.treedata.tree_dict[key]
nxt_node = self.treedata.tree_dict[nxt]
if nxt_node.children == []:
self.treedata.tree_dict[node.parent].children.remove(node)
parent_node = self.treedata.tree_dict[nxt_node.parent]
index = parent_node.children.index(nxt_node)
parent_node.children = (parent_node.children[:index+1] +
[node] + parent_node.children[index+1:])
node.parent = nxt_node.parent
else:
self.treedata.tree_dict[node.parent].children.remove(node)
nxt_node.children = [node] + nxt_node.children
node.parent = nxt_node.key
self.tree.update(values=self.treedata)
self.select(key)
def rename(self, key, text):
"""
Rename node text
: Parameters
key - str, key of node
txt - str, new text for node
"""
if key and text:
self.set_text(key, text)
def search(self, text=None, mode='New'):
"""
Search name in tree.
:Parameters
text - str, name of node.
next - str, 'New' for new search, 'Previous' for previous node,
'Next' for next node.
:Return
key of node, None if not found.
"""
if len(self.treedata.tree_dict) < 2 or (mode=='New' and not text):
return None
self._all_nodes()
where = self.where()
index = self.list.index(where) if where else -1
if mode == 'New':
self.text = text.lower()
return self._search_next_node(-1)
if mode == 'Previous':
return self._search_previous_node(index)
elif mode == 'Next':
return self._search_next_node(index)
return None
def select(self, key=''):
"""
Move the selection of node to node key.
: Parameters
key - str, key of node.
"""
iid = self._key_to_id(key)
if iid:
self.tree.Widget.see(iid)
self.tree.Widget.selection_set(iid)
def set_text(self, key, text):
"""
Set new node name
: Parameters
key - str, key of node.
text - str, new name of node.
"""
self.treedata.tree_dict[key].text = text
self.tree.update(key=key, text=text)
def set_value(self, key, text):
"""
Set values[0] of node to new value 'text'.
: Parameters
key - str, key of node.
text - str, new value of node.
"""
self.treedata.tree_dict[key].values[0] = text
def sort_tree(self, func=None):
"""
Sort children list of all nodes by node name.
: Parameter
func - function name to process text for sorting key.
def func(text):
...
return new_text
called by tree.sort_tree(func)
: Return
None, result upadted into Tree.
"""
pre_select_key = self.where()
for key, node in self.treedata.tree_dict.items():
children = node.children
if func:
node.children = sorted(
children, key=lambda child: func(child.text))
else:
node.children = sorted(children, key=lambda child: child.text)
self.tree.update(values=self.treedata)
self.select(pre_select_key)
def where(self):
"""
Get where the selection
: Return
str, key of node, '' for root node
"""
item = self.tree.Widget.selection()
return '' if len(item) == 0 else self.tree.IdToKey[item[0]]
def _all_nodes(self, parent='', new=True):
"""
Get all keys of nodes in list order.
: Parameter
parent - str, key of starting node.
new - True for begiinning of search.
: Return
None, result in self.list
"""
if new:
self.list = []
children = self.treedata.tree_dict[parent].children
for child in children:
self.list.append(child.key)
self._all_nodes(parent=child.key, new=False)
def _init(self, lines=25, width=30, row_height=28, text='black',
background='white', font=('Courier New', 12), key='TREE'):
"""
Initialization for sg.Tree
: Parameter
lines - int, lines of tree view
width - int, width of tree view in chars.
row_height - int, line height of tree view in pixels.
text - color for text.
background - color of background.
font - font of text
key - str, key of element in PySimpleGUI.
: return
None
"""
super().__init__(data=self.treedata, headings=['Notes',], pad=(0, 0),
show_expanded=False, col0_width=width, auto_size_columns=False,
visible_column_map=[False,], select_mode=sg.TABLE_SELECT_MODE_BROWSE,
enable_events=True, text_color=text, background_color=background,
font=font, num_rows=lines, row_height=row_height, key=key)
def _key_to_id(self, key):
"""
Convert PySimplGUI element key to tkinter widget id.
: Parameter
key - str, key of PySimpleGUI element.
: Return
id - int, id of tkinter widget
"""
for k, v in self.tree.IdToKey.items():
if v == key:
return k
return None
def _new_key(self):
"""
Find a unique Key for new node, start from '1' and not in node list.
: Return
str, unique key of new node.
"""
i = 0
while True:
i += 1
if str(i) not in self.treedata.tree_dict:
return str(i)
def _previous_key(self, key):
"""
Find the previous node key in tree list.
: Parameter
key - str, key of node.
: Return
str, key of previous node.
"""
self._all_nodes('')
index = self.list.index(key)
result = '' if index==0 else self.list[index-1]
return result
def _next_not_children(self, key):
"""
Find next node key, where node are not children of node 'key'.
: Parameter
key - str, key of node.
: Return
str, key of next node.
"""
self._all_nodes('')
index = self.list.index(key) + 1
while index < len(self.list):
parent = []
p = self.treedata.tree_dict[self.list[index]].parent
while True:
parent.append(p)
p = self.treedata.tree_dict[p].parent
if p == '': break
if key in parent:
index += 1
else:
return self.list[index]
return None
def _search_next_node(self, index):
"""
Search next one node.
:Return
key of next node, None for not found.
"""
if not self.text:
return None
length = len(self.list)
for i in range(index+1, length):
key = self.list[i]
if self.text in self.treedata.tree_dict[key].text.lower():
return key
return None
def _search_previous_node(self, index):
"""
Search previous one node.
:Return
key of previous node, None for not found.
"""
if not self.text:
return None
for i in range(index-1, -1, -1):
key = self.list[i]
if self.text in self.treedata.tree_dict[key].text.lower():
return key
return None
| jason990420/jason990420-outlook.com | PySimpleGUI_Tool.py | PySimpleGUI_Tool.py | py | 22,159 | python | en | code | 2 | github-code | 13 |
5947845688 | # -*- coding: utf-8 -*-
""" KnobScripter Prefs: Preferences widget (PrefsWidget) and utility function to load all preferences.
The load_prefs function will load all preferences relative to the KnobScripter, both stored
as variables in the config.py module and saved in the KS preferences json file.
adrianpueyo.com
"""
import json
import os
import nuke
from KnobScripter.info import __version__, __author__, __date__
from KnobScripter import config, widgets, utils
try:
if nuke.NUKE_VERSION_MAJOR < 11:
from PySide import QtCore, QtGui, QtGui as QtWidgets
from PySide.QtCore import Qt
else:
from PySide2 import QtWidgets, QtGui, QtCore
from PySide2.QtCore import Qt
except ImportError:
from Qt import QtCore, QtGui, QtWidgets
def load_prefs():
""" Load prefs json file and overwrite config.prefs """
# Setup paths
config.ks_directory = os.path.join(os.path.expanduser("~"), ".nuke", config.prefs["ks_directory"])
config.py_scripts_dir = os.path.join(config.ks_directory, config.prefs["ks_py_scripts_directory"])
config.blink_dir = os.path.join(config.ks_directory, config.prefs["ks_blink_directory"])
config.codegallery_user_txt_path = os.path.join(config.ks_directory, config.prefs["ks_codegallery_file"])
config.snippets_txt_path = os.path.join(config.ks_directory, config.prefs["ks_snippets_file"])
config.prefs_txt_path = os.path.join(config.ks_directory, config.prefs["ks_prefs_file"])
config.py_state_txt_path = os.path.join(config.ks_directory, config.prefs["ks_py_state_file"])
config.knob_state_txt_path = os.path.join(config.ks_directory, config.prefs["ks_knob_state_file"])
# Setup config font
config.script_editor_font = QtGui.QFont()
config.script_editor_font.setStyleHint(QtGui.QFont.Monospace)
config.script_editor_font.setFixedPitch(True)
config.script_editor_font.setFamily("Monospace")
config.script_editor_font.setPointSize(10)
if not os.path.isfile(config.prefs_txt_path):
return None
else:
with open(config.prefs_txt_path, "r") as f:
prefs = json.load(f)
for pref in prefs:
config.prefs[pref] = prefs[pref]
config.script_editor_font.setFamily(config.prefs["se_font_family"])
config.script_editor_font.setPointSize(config.prefs["se_font_size"])
return prefs
def clear_knob_state_history():
if not nuke.ask("Are you sure you want to clear all history of knob states?"):
return
# Per instance? Probably not
# for ks in config.all_knobscripters:
# if hasattr(ks, 'current_node_state_dict'):
# ks.current_node_state_dict = {}
# In memory
config.knob_state_dict = {}
# In file
with open(config.knob_state_txt_path, "w") as f:
json.dump({}, f)
def clear_py_state_history():
if not nuke.ask("Are you sure you want to clear all history of .py states?"):
return
# In memory
config.py_state_dict = {}
with open(config.py_state_txt_path, "w") as f:
json.dump({}, f)
class PrefsWidget(QtWidgets.QWidget):
def __init__(self, knob_scripter="", _parent=QtWidgets.QApplication.activeWindow()):
super(PrefsWidget, self).__init__(_parent)
self.knob_scripter = knob_scripter
self.initUI()
self.refresh_prefs()
def initUI(self):
self.layout = QtWidgets.QVBoxLayout()
# 1. Title (name, version)
self.title_widget = QtWidgets.QWidget()
self.title_layout = QtWidgets.QHBoxLayout()
self.title_layout.setMargin(0)
title_label = QtWidgets.QLabel("KnobScripter v" + __version__)
title_label.setStyleSheet("font-weight:bold;color:#CCCCCC;font-size:20px;")
built_label = QtWidgets.QLabel('<i style="color:#777">Built {0}</i>'.format(__date__))
built_label.setStyleSheet("color:#555;font-size:9px;padding-top:10px;")
subtitle_label = QtWidgets.QLabel("Script editor for python and callback knobs")
subtitle_label.setStyleSheet("color:#999")
line1 = widgets.HLine()
img_ap = QtWidgets.QLabel()
pixmap = QtGui.QPixmap(os.path.join(config.ICONS_DIR, "ap_tools.png"))
img_ap.setPixmap(pixmap)
img_ap.resize(pixmap.width(), pixmap.height())
img_ap.setStyleSheet("padding-top: 3px;")
signature = QtWidgets.QLabel('<a href="http://www.adrianpueyo.com/" style="color:#888;text-decoration:none">'
'<b>adrianpueyo.com</b></a>, 2016-{0}'.format(__date__.split(" ")[-1]))
signature.setOpenExternalLinks(True)
# signature.setStyleSheet('''color:#555;font-size:9px;padding-left: {}px;'''.format(pixmap.width()+4))
signature.setStyleSheet('''color:#555;font-size:9px;''')
signature.setAlignment(QtCore.Qt.AlignLeft)
img_ks = QtWidgets.QLabel()
pixmap = QtGui.QPixmap(os.path.join(config.ICONS_DIR, "knob_scripter.png"))
img_ks.setPixmap(pixmap)
img_ks.resize(pixmap.width(), pixmap.height())
# self.title_layout.addWidget(img_ks)
self.title_layout.addWidget(img_ap)
self.title_layout.addSpacing(2)
self.title_layout.addWidget(title_label)
self.title_layout.addWidget(built_label)
self.title_layout.addStretch()
self.title_widget.setLayout(self.title_layout)
self.layout.addWidget(self.title_widget)
self.layout.addWidget(signature)
self.layout.addWidget(line1)
# 2. Scroll Area
# 2.1. Inner scroll content
self.scroll_content = QtWidgets.QWidget()
self.scroll_layout = QtWidgets.QVBoxLayout()
self.scroll_layout.setMargin(0)
self.scroll_content.setLayout(self.scroll_layout)
self.scroll_content.setContentsMargins(0, 0, 8, 0)
# 2.2. External Scroll Area
self.scroll = QtWidgets.QScrollArea()
self.scroll.setVerticalScrollBarPolicy(Qt.ScrollBarAsNeeded)
self.scroll.setHorizontalScrollBarPolicy(Qt.ScrollBarAlwaysOff)
self.scroll.setWidgetResizable(True)
self.scroll.setWidget(self.scroll_content)
self.scroll.setSizePolicy(QtWidgets.QSizePolicy.MinimumExpanding, QtWidgets.QSizePolicy.MinimumExpanding)
self.layout.addWidget(self.scroll)
# 3. Build prefs inside scroll layout
self.form_layout = QtWidgets.QFormLayout()
self.scroll_layout.addLayout(self.form_layout)
self.scroll_layout.addStretch()
# 3.1. General
self.form_layout.addRow("<b>General</b>", QtWidgets.QWidget())
# Font
self.font_box = QtWidgets.QFontComboBox()
self.font_box.currentFontChanged.connect(self.font_changed)
self.form_layout.addRow("Font:", self.font_box)
# Font size
self.font_size_box = QtWidgets.QSpinBox()
self.font_size_box.setMinimum(6)
self.font_size_box.setMaximum(100)
self.font_size_box.setFixedHeight(24)
self.font_size_box.valueChanged.connect(self.font_size_changed)
self.form_layout.addRow("Font size:", self.font_size_box)
# Window size
self.window_size_box = QtWidgets.QFrame()
self.window_size_box.setContentsMargins(0, 0, 0, 0)
window_size_layout = QtWidgets.QHBoxLayout()
window_size_layout.setMargin(0)
self.window_size_w_box = QtWidgets.QSpinBox()
self.window_size_w_box.setValue(config.prefs["ks_default_size"][0])
self.window_size_w_box.setMinimum(200)
self.window_size_w_box.setMaximum(4000)
self.window_size_w_box.setFixedHeight(24)
self.window_size_w_box.setToolTip("Default window width in pixels")
window_size_layout.addWidget(self.window_size_w_box)
window_size_layout.addWidget(QtWidgets.QLabel("x"))
self.window_size_h_box = QtWidgets.QSpinBox()
self.window_size_h_box.setValue(config.prefs["ks_default_size"][1])
self.window_size_h_box.setMinimum(100)
self.window_size_h_box.setMaximum(2000)
self.window_size_h_box.setFixedHeight(24)
self.window_size_h_box.setToolTip("Default window height in pixels")
window_size_layout.addWidget(self.window_size_h_box)
self.window_size_box.setLayout(window_size_layout)
self.form_layout.addRow("Floating window:", self.window_size_box)
self.grab_dimensions_button = QtWidgets.QPushButton("Grab current dimensions")
self.grab_dimensions_button.clicked.connect(self.grab_dimensions)
self.form_layout.addRow("", self.grab_dimensions_button)
# Save knob editor state
self.knob_editor_state_box = QtWidgets.QFrame()
self.knob_editor_state_box.setContentsMargins(0, 0, 0, 0)
knob_editor_state_layout = QtWidgets.QHBoxLayout()
knob_editor_state_layout.setMargin(0)
self.save_knob_editor_state_combobox = QtWidgets.QComboBox()
self.save_knob_editor_state_combobox.setToolTip("Save script editor state on knobs? "
"(which knob is open in editor, cursor pos, scroll values)\n"
" - Save in memory = active session only\n"
" - Save to disk = active between sessions")
self.save_knob_editor_state_combobox.addItem("Do not save", 0)
self.save_knob_editor_state_combobox.addItem("Save in memory", 1)
self.save_knob_editor_state_combobox.addItem("Save to disk", 2)
knob_editor_state_layout.addWidget(self.save_knob_editor_state_combobox)
self.clear_knob_history_button = QtWidgets.QPushButton("Clear history")
self.clear_knob_history_button.clicked.connect(clear_knob_state_history)
knob_editor_state_layout.addWidget(self.clear_knob_history_button)
self.knob_editor_state_box.setLayout(knob_editor_state_layout)
self.form_layout.addRow("Knob Editor State:", self.knob_editor_state_box)
# Save .py editor state
self.py_editor_state_box = QtWidgets.QFrame()
self.py_editor_state_box.setContentsMargins(0, 0, 0, 0)
py_editor_state_layout = QtWidgets.QHBoxLayout()
py_editor_state_layout.setMargin(0)
self.save_py_editor_state_combobox = QtWidgets.QComboBox()
self.save_py_editor_state_combobox.setToolTip("Save script editor state on .py scripts? "
"(which script is open in editor, cursor pos, scroll values)\n"
" - Save in memory = active session only\n"
" - Save to disk = active between sessions")
self.save_py_editor_state_combobox.addItem("Do not save", 0)
self.save_py_editor_state_combobox.addItem("Save in memory", 1)
self.save_py_editor_state_combobox.addItem("Save to disk", 2)
py_editor_state_layout.addWidget(self.save_py_editor_state_combobox)
self.clear_py_history_button = QtWidgets.QPushButton("Clear history")
self.clear_py_history_button.clicked.connect(clear_py_state_history)
py_editor_state_layout.addWidget(self.clear_py_history_button)
self.py_editor_state_box.setLayout(py_editor_state_layout)
self.form_layout.addRow(".py Editor State:", self.py_editor_state_box)
# 3.2. Python
self.form_layout.addRow(" ", None)
self.form_layout.addRow("<b>Python</b>", QtWidgets.QWidget())
# Tab spaces
self.tab_spaces_combobox = QtWidgets.QComboBox()
self.tab_spaces_combobox.addItem("2", 2)
self.tab_spaces_combobox.addItem("4", 4)
self.tab_spaces_combobox.currentIndexChanged.connect(self.tab_spaces_changed)
self.form_layout.addRow("Tab spaces:", self.tab_spaces_combobox)
# Color scheme
self.python_color_scheme_combobox = QtWidgets.QComboBox()
self.python_color_scheme_combobox.addItem("nuke", "nuke")
self.python_color_scheme_combobox.addItem("monokai", "monokai")
self.python_color_scheme_combobox.currentIndexChanged.connect(self.color_scheme_changed)
self.form_layout.addRow("Color scheme:", self.python_color_scheme_combobox)
# Run in context
self.run_in_context_checkbox = QtWidgets.QCheckBox("Run in context")
self.run_in_context_checkbox.setToolTip("Default mode for running code in context (when in node mode).")
# self.run_in_context_checkbox.stateChanged.connect(self.run_in_context_changed)
self.form_layout.addRow("", self.run_in_context_checkbox)
# Show labels
self.show_knob_labels_checkbox = QtWidgets.QCheckBox("Show knob labels")
self.show_knob_labels_checkbox.setToolTip("Display knob labels on the knob dropdown\n"
"Otherwise, show the internal name only.")
self.form_layout.addRow("", self.show_knob_labels_checkbox)
# 3.3. Blink
self.form_layout.addRow(" ", None)
self.form_layout.addRow("<b>Blink</b>", QtWidgets.QWidget())
# Color scheme
# self.blink_color_scheme_combobox = QtWidgets.QComboBox()
# self.blink_color_scheme_combobox.addItem("nuke default")
# self.blink_color_scheme_combobox.addItem("adrians flavour")
# self.form_layout.addRow("Tab spaces:", self.blink_color_scheme_combobox)
self.autosave_on_compile_checkbox = QtWidgets.QCheckBox("Auto-save to disk on compile")
self.autosave_on_compile_checkbox.setToolTip("Set the default value for <b>Auto-save to disk on compile</b>.")
self.form_layout.addRow("", self.autosave_on_compile_checkbox)
# 4. Lower buttons?
self.lower_buttons_layout = QtWidgets.QHBoxLayout()
self.lower_buttons_layout.addStretch()
self.save_prefs_button = QtWidgets.QPushButton("Save")
self.save_prefs_button.clicked.connect(self.save_prefs)
self.lower_buttons_layout.addWidget(self.save_prefs_button)
self.apply_prefs_button = QtWidgets.QPushButton("Apply")
self.apply_prefs_button.clicked.connect(self.apply_prefs)
self.lower_buttons_layout.addWidget(self.apply_prefs_button)
self.cancel_prefs_button = QtWidgets.QPushButton("Cancel")
self.cancel_prefs_button.clicked.connect(self.cancel_prefs)
self.lower_buttons_layout.addWidget(self.cancel_prefs_button)
self.layout.addLayout(self.lower_buttons_layout)
self.setLayout(self.layout)
def font_size_changed(self):
config.script_editor_font.setPointSize(self.font_size_box.value())
for ks in config.all_knobscripters:
if hasattr(ks, 'script_editor'):
ks.script_editor.setFont(config.script_editor_font)
def font_changed(self):
self.font = self.font_box.currentFont().family()
config.script_editor_font.setFamily(self.font)
for ks in config.all_knobscripters:
if hasattr(ks, 'script_editor'):
ks.script_editor.setFont(config.script_editor_font)
def tab_spaces_changed(self):
config.prefs["se_tab_spaces"] = self.tab_spaces_combobox.currentData()
for ks in config.all_knobscripters:
if hasattr(ks, 'highlighter'):
ks.highlighter.rehighlight()
return
def color_scheme_changed(self):
config.prefs["code_style_python"] = self.python_color_scheme_combobox.currentData()
for ks in config.all_knobscripters:
if hasattr(ks, 'script_editor'):
if ks.script_editor.code_language == "python":
ks.script_editor.highlighter.setStyle(config.prefs["code_style_python"])
ks.script_editor.highlighter.rehighlight()
return
def grab_dimensions(self):
self.knob_scripter = utils.getKnobScripter(self.knob_scripter)
self.window_size_w_box.setValue(self.knob_scripter.width())
self.window_size_h_box.setValue(self.knob_scripter.height())
def refresh_prefs(self):
""" Reload the json prefs, apply them on config.prefs, and repopulate the knobs """
load_prefs()
self.font_box.setCurrentFont(QtGui.QFont(config.prefs["se_font_family"]))
self.font_size_box.setValue(config.prefs["se_font_size"])
self.window_size_w_box.setValue(config.prefs["ks_default_size"][0])
self.window_size_h_box.setValue(config.prefs["ks_default_size"][1])
self.show_knob_labels_checkbox.setChecked(config.prefs["ks_show_knob_labels"] is True)
self.run_in_context_checkbox.setChecked(config.prefs["ks_run_in_context"] is True)
self.save_knob_editor_state_combobox.setCurrentIndex(config.prefs["ks_save_knob_state"])
self.save_py_editor_state_combobox.setCurrentIndex(config.prefs["ks_save_py_state"])
i = self.python_color_scheme_combobox.findData(config.prefs["code_style_python"])
if i != -1:
self.python_color_scheme_combobox.setCurrentIndex(i)
i = self.tab_spaces_combobox.findData(config.prefs["se_tab_spaces"])
if i != -1:
self.tab_spaces_combobox.setCurrentIndex(i)
self.autosave_on_compile_checkbox.setChecked(config.prefs["ks_blink_autosave_on_compile"])
def get_prefs_dict(self):
""" Return a dictionary with the prefs from the current knob state """
ks_prefs = {
"ks_default_size": [self.window_size_w_box.value(), self.window_size_h_box.value()],
"ks_run_in_context": self.run_in_context_checkbox.isChecked(),
"ks_show_knob_labels": self.show_knob_labels_checkbox.isChecked(),
"ks_blink_autosave_on_compile": self.autosave_on_compile_checkbox.isChecked(),
"ks_save_knob_state": self.save_knob_editor_state_combobox.currentData(),
"ks_save_py_state": self.save_py_editor_state_combobox.currentData(),
"code_style_python": self.python_color_scheme_combobox.currentData(),
"se_font_family": self.font_box.currentFont().family(),
"se_font_size": self.font_size_box.value(),
"se_tab_spaces": self.tab_spaces_combobox.currentData(),
}
return ks_prefs
def save_config(self, prefs=None):
""" Save the given prefs dict in config.prefs """
if not prefs:
prefs = self.get_prefs_dict()
for pref in prefs:
config.prefs[pref] = prefs[pref]
config.script_editor_font.setFamily(config.prefs["se_font_family"])
config.script_editor_font.setPointSize(config.prefs["se_font_size"])
def save_prefs(self):
""" Save current prefs on json, config, and apply on KnobScripters """
# 1. Save json
ks_prefs = self.get_prefs_dict()
with open(config.prefs_txt_path, "w") as f:
json.dump(ks_prefs, f, sort_keys=True, indent=4)
nuke.message("Preferences saved!")
# 2. Save config
self.save_config(ks_prefs)
# 3. Apply on KnobScripters
self.apply_prefs()
def apply_prefs(self):
""" Apply the current knob values to the KnobScripters """
self.save_config()
for ks in config.all_knobscripters:
ks.script_editor.setFont(config.script_editor_font)
ks.script_editor.tab_spaces = config.prefs["se_tab_spaces"]
ks.script_editor.highlighter.rehighlight()
ks.runInContext = config.prefs["ks_run_in_context"]
ks.runInContextAct.setChecked(config.prefs["ks_run_in_context"])
ks.show_labels = config.prefs["ks_show_knob_labels"]
ks.blink_autoSave_act.setChecked(config.prefs["ks_blink_autosave_on_compile"])
# TODO Apply the "ks_save_py_state" and "ks_save_knob_state" here too
if ks.nodeMode:
ks.refreshClicked()
def cancel_prefs(self):
""" Revert to saved json prefs """
# 1. Reload json and populate knobs
self.refresh_prefs()
# 2. Apply values to KnobScripters
self.apply_prefs()
# 3. If this is a floating panel, close it??
| adrianpueyo/KnobScripter | KnobScripter/prefs.py | prefs.py | py | 20,250 | python | en | code | 65 | github-code | 13 |
31037486532 | import tkinter as tk
def add_phonenumber_func():
name = entry_name.get()
phonenumber = entry_phonenumber.get()
# lbl_msg_out.config(text='Bạn vừa thêm vào danh bạ:\n'+name+'-'+phonenumber)
btn_2.config(text=name)
# name = entry_name.get()
# phonenumber = entry_phonenumber.get()
# print(name,phonenumber)
# f = open("demofile3.txt", "a", encoding="utf-8")
# f.write(name)
# f.write(";")
# f.write(phonenumber)
# f.write("\n")
# f.close()
# def insert_info_from_dict(tk.Button(): button_clicked):
# temp_text = button_clicked.cget('text')
# print(temp_text)
# # entry_msg_out.insert(0,temp_text)
def on_click(text):
entry_name.delete(0,tk.END)
entry_name.insert(0,text)
window = tk.Tk()
window.title("Tiêu đề")
# window.geometry("600x600")
window.resizable(width=0, height=0) #Không thay đổi được kích thước
frame = tk.Frame(window,height=400, width=400, bg="green")
# frame.place(
# relx=0.5,
# rely=0.5,
# anchor=tk.CENTER
# )
frame.grid(row=0,column=0)
frame2 = tk.Frame(window,height=400, width=400, bg='pink')
frame2.grid(row=1,column=0)
# Khai báo widget
lbl_name = tk.Label(frame,text='Tên')
lbl_phonenumber = tk.Label(frame,text="Điện thoại")
entry_name = tk.Entry(frame)
entry_phonenumber = tk.Entry(frame)
btn_add = tk.Button(frame,text='Thêm SĐT',command=add_phonenumber_func)
entry_name.insert(0,"Thử xem sao")
# Dựng layout
lbl_name.grid(row=0, column=0, sticky='e')
entry_name.grid(row=0, column=1)
lbl_phonenumber.grid(row=1, column=0)
entry_phonenumber.grid(row=1, column=1)
btn_add.grid(row=2, column=0, columnspan=2)
lbl_msg_out = tk.Label(frame2,text='')
lbl_msg_out.grid(row=0, column=0)
lbl_phonelist = tk.Label(frame2,text='Hoàng Ánh')
lbl_phonelist.grid(row=1, column=0)
btn_2 = tk.Button(frame2,text='',command= lambda: on_click (btn_2.cget('text')))
btn_2.grid(row=2, column=0)
print(lbl_phonelist.cget("text"))
window.mainloop()
| nhatelecom/practice_python | 28-06 thuc hanh tkinter.py | 28-06 thuc hanh tkinter.py | py | 2,012 | python | en | code | 0 | github-code | 13 |
41658585696 | import pandas as pd
import yfinance as yf
def fetch_data(ticker_symbol, timeframe='1y'):
"""
Fetches data for the given ticker_symbol and timeframe.
Args:
- ticker_symbol (str): The stock ticker symbol.
- timeframe (str): The timeframe for which data is to be fetched. Default is '1y' (1 year).
Returns:
- pd.DataFrame: A DataFrame containing the stock data.
"""
try:
ticker = yf.Ticker(ticker_symbol)
df = ticker.history(period=timeframe)
# Drop the 'Dividends' and 'Stock Splits' columns if they exist
df = df.drop(columns=[col for col in ['Dividends', 'Stock Splits'] if col in df.columns])
return df
except Exception as e:
print(f"Error fetching data for {ticker_symbol}: {e}")
return None | ttbontra/Stock_Analysis | get_data.py | get_data.py | py | 803 | python | en | code | 0 | github-code | 13 |
73493760656 | # built-in packages
import math
from typing import List, Any
# third-party packages
import numpy as np
# customized packages
from config import ROUND_PRECISION
from ma_trader import MATrader
from util import timer
class TraderDriver:
'''A wrapper class on top of any of trader classes.'''
def __init__(self,
name: str,
init_amount: int,
cur_coin: float,
overall_stats: List[str],
tol_pcts: List[float],
ma_lengths: List[int],
ema_lengths: List[int],
bollinger_mas: List[int],
bollinger_tols: List[int],
buy_pcts: List[float],
sell_pcts: List[float],
buy_stas: List[str] = ['by_percentage'],
sell_stas: List[str] = ['by_percentage'],
mode: str='normal'):
self.init_amount, self.init_coin = init_amount, cur_coin
self.mode = mode
self.traders = []
for bollinger_sigma in bollinger_tols:
for stat in overall_stats:
for tol_pct in tol_pcts:
for buy_pct in buy_pcts:
for sell_pct in sell_pcts:
t = MATrader(
name=name,
init_amount=init_amount,
stat=stat,
tol_pct=tol_pct,
ma_lengths=ma_lengths,
ema_lengths=ema_lengths,
bollinger_mas=bollinger_mas,
bollinger_sigma=bollinger_sigma,
buy_pct=buy_pct,
sell_pct=sell_pct,
cur_coin=cur_coin,
buy_stas=buy_stas,
sell_stas=sell_stas,
mode=mode
)
self.traders.append(t)
# check
if len(self.traders) != (len(tol_pcts) * len(buy_pcts) *
len(sell_pcts) * len(overall_stats) *
len(bollinger_tols)):
raise ValueError('trader creation is wrong!')
# unknown, without data
self.best_trader = None
@timer
def feed_data(self, data_stream: List[tuple]):
'''Feed in historic data, where data_stream consists of tuples of (price, date).'''
if self.mode == 'verbose':
print('running simulation...')
max_final_p = -math.inf
for index,t in enumerate(self.traders):
# compute initial value
t.add_new_day(
new_p=data_stream[0][0],
d=data_stream[0][1],
misc_p={
'open': data_stream[0][2],
'low': data_stream[0][3],
'high': data_stream[0][4]
})
# run simulation
for i in range(1, len(data_stream)):
p,d = data_stream[i]
misc_p = {
'open': data_stream[i][2],
'low': data_stream[i][3],
'high': data_stream[i][4]
}
t.add_new_day(p,d,misc_p)
# decide best trader while we loop, by comparing all traders final portfolio value
# sometimes a trader makes no trade at all
if len(t.all_history) > 0:
tmp_final_p = t.all_history[-1]['portfolio']
# o/w, compute it
else:
tmp_final_p = (t.crypto_prices[-1][0] * t.cur_coin) + t.cash
'''
try:
tmp_final_p = t.all_history[-1]['portfolio']
except IndexError as e:
print('Found error!', t.high_strategy)
'''
if tmp_final_p >= max_final_p:
max_final_p = tmp_final_p
self.best_trader = t
@property
def best_trader_info(self):
'''Find the best trading strategy for a given crypto-currency.'''
best_trader = self.best_trader
# compute init value once again, in case no single trade is made
init_v = best_trader.init_coin * best_trader.crypto_prices[0][0] + best_trader.init_cash
extra = {
'init_value': np.round(init_v, ROUND_PRECISION),
'max_final_value': np.round(best_trader.portfolio_value, ROUND_PRECISION),
'rate_of_return': str(best_trader.rate_of_return) + '%',
'baseline_rate_of_return': str(best_trader.baseline_rate_of_return) + '%',
'coin_rate_of_return': str(best_trader.coin_rate_of_return) + '%'
}
return {**best_trader.trading_strategy, **extra, 'trader_index': self.traders.index(self.best_trader)} | luckylulin-aaron/crypto-prediction | app/trader_driver.py | trader_driver.py | py | 4,948 | python | en | code | 1 | github-code | 13 |
17048789824 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
from alipay.aop.api.domain.FenceDto import FenceDto
class Area(object):
def __init__(self):
self._fences = None
@property
def fences(self):
return self._fences
@fences.setter
def fences(self, value):
if isinstance(value, list):
self._fences = list()
for i in value:
if isinstance(i, FenceDto):
self._fences.append(i)
else:
self._fences.append(FenceDto.from_alipay_dict(i))
def to_alipay_dict(self):
params = dict()
if self.fences:
if isinstance(self.fences, list):
for i in range(0, len(self.fences)):
element = self.fences[i]
if hasattr(element, 'to_alipay_dict'):
self.fences[i] = element.to_alipay_dict()
if hasattr(self.fences, 'to_alipay_dict'):
params['fences'] = self.fences.to_alipay_dict()
else:
params['fences'] = self.fences
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = Area()
if 'fences' in d:
o.fences = d['fences']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/Area.py | Area.py | py | 1,374 | python | en | code | 241 | github-code | 13 |
30437479151 | from flask import Flask, request
import os
app = Flask(__name__)
import ConfigParser
import smtplib, string
config = ConfigParser.ConfigParser()
cur_dir = os.path.dirname(os.path.abspath(__file__))
config.readfp(open(cur_dir + "/myconfig.ini","rb"))
def get_last_ip():
return config.get("global","lastip")
def save_current_ip(cur_ip):
config.set("global", "lastip", cur_ip)
config.write(open("myconfig.ini", "w"))
@app.route('/')
def hello():
text = request.args.get('text')
return 'hello %s' % text
@app.route('/dynamicIp', methods=['GET', 'POST'])
def dynamicIp():
if request.method == 'POST':
return 'Post return None'
else:
setip = request.args.get('setip')
if setip == None:
return '{"ip":"%s"}' % get_last_ip()
else:
save_current_ip(setip)
return '{"ip":"%s"}' % setip
#http://192.168.1.113:5000/dynamicIp?setip=112.112.112.112
#http://192.168.1.113:5000/dynamicIp
if __name__ == '__main__':
app.debug = True
app.run(host='0.0.0.0')
| cdyfng/pytools | dynamicIp.py | dynamicIp.py | py | 1,051 | python | en | code | 1 | github-code | 13 |
24248485966 | import os
os.environ.setdefault('DJANGO_SETTINGS_MODULE',
'tango_with_django_project.settings')
import django
django.setup()
from rango.models import Category, Page
def populate():
# Create data to be populated in DB
python_pages = [
{'title': 'Official Python Tutorial',
'url': 'http://docs.python.org/2/tutorial/',
'views': 42},
{'title': 'How to Think like a Computer Scientist',
'url': 'http://www.greenteapress.com/thinkpython/',
'views': 23},
{'title': 'Learn Python in 10 Minutes',
'url': 'http://www.korokithakis.net/tutorials/python/',
'views': 63}
]
django_pages = [
{'title': 'Official Django Tutorial',
'url': 'http://docs.djangoproject.com/en/1.9/intro/tutorial01/',
'views': 10},
{'title': 'Django Rocks',
'url': 'http://www.djangorocks.com/',
'views': 22},
{'title': 'How to Tango with Django',
'url': 'http://www.tangowithdjango.com/',
'views': 32}
]
other_pages = [
{'title': 'Bottle',
'url': 'http://www.bottlepy.org/docs/dev/',
'views': 12},
{'title': 'Flask',
'url': 'http://flask.pocoo.org',
'views': 20}
]
pascal_pages = [
{
'title': 'Pascal (programming language)',
'url': 'https://en.wikipedia.org/wiki/Pascal_(programming_language)',
'views': 1
},
{
'title': 'Pascal Siakam',
'url': 'https://en.wikipedia.org/wiki/Pascal_Siakam',
'views': 67
}
]
perl_pages = [
{
'title': 'Perl',
'url': 'https://www.perl.org/',
'views': 168
}
]
php_pages = [
{
'title': 'PHP',
'url': 'https://www.php.net/',
'views': 8
}
]
prolog_pages = [
{
'title': 'Prolog',
'url': 'https://en.wikipedia.org/wiki/Prolog',
'views': 38
}
]
postscript_pages = [
{
'title': 'PostScript',
'url': 'https://en.wikipedia.org/wiki/PostScript',
'views': 58
}
]
programming_pages = [
{
'title': 'Programming',
'url': 'https://en.wikipedia.org/wiki/Computer_programming',
'views': 200
}
]
cats = {
'Python': {'pages': python_pages, 'views': 128, 'likes': 64},
'Django': {'pages': django_pages, 'views': 64, 'likes': 32},
'Other Frameworks': {'pages': other_pages, 'views': 32, 'likes': 16},
'Pascal': {'pages': pascal_pages, 'views': 68, 'likes': 102},
'Perl': {'pages': perl_pages, 'views': 168, 'likes': 12},
'PHP': {'pages': php_pages, 'views': 8, 'likes': 10},
'Prolog': {'pages': prolog_pages, 'views': 38, 'likes': 22},
'PostScript': {'pages': postscript_pages, 'views': 58, 'likes': 123},
'Programming': {'pages': programming_pages, 'views': 200, 'likes': 196}
}
# Iterate key and val of cats dict, then add data to page and category
for cat, cat_data in cats.items():
# Pass the key and val to add_cat
c = add_cat(cat, cat_data)
for p in cat_data['pages']:
add_page(c, p['title'], p['url'], p['views'])
# Print added data in category and page
for c in Category.objects.all():
for p in Page.objects.filter(category=c):
print(f'- {str(c)} - {str(p)}')
# Add data to page method
def add_page(cat, title, url, views=0):
p = Page.objects.get_or_create(category=cat, title=title)[0]
p.url = url
p.views = views
p.save()
return p
# Add data to category method
def add_cat(name, cat_data):
c = Category.objects.get_or_create(name=name)[0]
c.views = cat_data['views']
c.likes = cat_data['likes']
c.save()
return c
# Start script
if __name__ == '__main__':
print('Starting Rango population script...')
populate() | lloyd9/TangoWithDjango2-Materialize | TangoWithDjango/populate_rango.py | populate_rango.py | py | 4,083 | python | en | code | 0 | github-code | 13 |
40380730592 | from gensim import corpora, models, similarities, utils
import jieba
import heapq
import numpy as np
def get_sim_top10(new_doc):
try:
documents = np.load('Saier/document.npy').tolist()
print(documents)
except Exception as e:
print(e)
dictionary = corpora.Dictionary.load('Saier/dictionary.dict')
tfidf = models.TfidfModel.load("Saier/tfidf.model")
index = similarities.MatrixSimilarity.load('Saier/document_index.index')
words = ' '.join(jieba.cut(new_doc)).split(' ')
new_text = []
for word in words:
new_text.append(word)
new_vec = dictionary.doc2bow(new_text)
new_vec_tfidf = tfidf[new_vec]
sims = index[new_vec_tfidf]
sims_list = sims.tolist()
top10 = heapq.nlargest(10, sims_list)
res_list = []
for i in top10:
res_list.append(documents[sims_list.index(i)].strip())
return res_list
if __name__ == "__main__":
get_sim_top10("直接寻址是寻址方式。")
| twobagoforange/saier_system | background/sim_new.py | sim_new.py | py | 977 | python | en | code | 0 | github-code | 13 |
28444931591 | import traceback
from typing import Optional, Tuple, Dict, Type, Any
from pathlib import Path
from ..runtime_env import RuntimeEnv
from ..utils import check_output
from ..patcher import ExpAprPatcher, FallbackPatcher
from ..servant_connector import ServantConnector
class Technique:
def __init__(self, env: RuntimeEnv, idx0: int, args: Any):
self.env = env
self.idx0 = idx0
self.args = args
self.proj_path_s = str(env.projects[idx0]['root'])
def run(self, jsonpath: str) -> Tuple[Optional[str], dict]:
raise NotImplementedError()
def shutdown(self):
pass
class ExpAprTechnique(Technique):
def __init__(self, env: RuntimeEnv, idx0: int, args: Any):
super().__init__(env, idx0, args)
self.con = ServantConnector(
enable_assertion=False,
igniter_path=Path('../expapr-jar'),
)
dedup = {'type': 'disabled'} if args.no_dedup else env.deduplication
self.con.request_on_startup({
'action': 'setup',
'purity_source': dedup,
}, 30)
def run(self, jsonpath: str) -> Tuple[Optional[str], dict]:
try:
check_output('git reset EXPAPR_RUNTIME_INJECTED --hard && git clean -d -f', 30, cwd=self.proj_path_s)
p = ExpAprPatcher(jsonpath, self.env, self.idx0, noprio=self.args.no_prio)
patchcnt, t_install, t_run, succlist, inst_telemetry_cnts, run_telemetry_cnts = p.main(self.con)
except Exception as e:
return None, {
'expapr_error_type': type(e).__name__,
'expapr_error_repr': repr(e),
'expapr_error_trace': traceback.format_exc(),
}
else:
return succlist, {
't_compile': t_install,
't_run': t_run,
'inst_telemetry_cnts': inst_telemetry_cnts,
'run_telemetry_cnts': run_telemetry_cnts,
}
def shutdown(self):
self.con.shutdown()
class FallbackTechnique(Technique):
def run(self, jsonpath: str) -> Tuple[Optional[str], dict]:
try:
check_output('git reset EXPAPR_INTERFACE_ORIGINAL --hard && git clean -d -f', 30, cwd=self.proj_path_s)
p = FallbackPatcher(jsonpath, self.env, self.idx0)
t_compile, t_run, succlist = p.main()
except Exception as e:
return None, {
'fallback_error_type': type(e).__name__,
'fallback_error_repr': repr(e),
'fallback_error_trace': traceback.format_exc(),
}
else:
return succlist, {
't_compile': t_compile,
't_run': t_run,
}
TECHNIQUES: Dict[str, Type[Technique]] = {
'expapr': ExpAprTechnique,
'fallback': FallbackTechnique,
} | ExpressAPR/ExpressAPR | cli/proc_run/techniques.py | techniques.py | py | 2,845 | python | en | code | 3 | github-code | 13 |
3455227126 | import os
import sys
import time
import json
import docker
import boto3
import itertools
import botocore.exceptions
from random import random
docker_client = docker.from_env()
#TODO - use DynamoDB
class S3Discovery:
def __init__(self, bucket, swarm_name):
self.client = boto3.client('s3')
self.bucket = bucket
self.swarm_name = swarm_name
def _list_objects(self, path):
res = self.client.list_objects(Bucket=self.bucket, Prefix=self.swarm_name + "/" + path)
if 'Contents' in res:
return res['Contents']
return []
def _get_object(self, key):
obj = self.client.get_object(Bucket=self.bucket, Key=self.swarm_name + "/" + key)
return obj["Body"].read()
def _put_object(self, key, body):
self.client.put_object(Bucket=self.bucket, Key=self.swarm_name + "/" + key, Body=body, ServerSideEncryption='AES256')
def _object_exists(self, key):
try:
self.client.head_object(Bucket=self.bucket, Key=self.swarm_name + "/" + key)
return True
except botocore.exceptions.ClientError as e:
return False
def list_managers(self):
while True:
items = self._list_objects("managers")
if len(items):
log("Found %d managers, waiting 5 seconds before continuing..." % len(items))
time.sleep(5) # Give S3 time to syndicate all objects before next request
return [json.loads(self._get_object(i['Key'][len(self.swarm_name + "/"):])) for i in items]
log("No managers found, waiting 5 seconds before retrying...")
time.sleep(5)
def add_manager(self, ip):
data = {"ip": ip}
self._put_object("managers/%s" % ip, json.dumps(data))
def add_worker(self, ip):
data = {"ip": ip}
self._put_object("workers/%s" % ip, json.dumps(data))
def get_tokens(self):
return json.loads(self._get_object("tokens"))
def get_token(self, role):
tokens = self.get_tokens()
return tokens[role]
def set_tokens(self, data):
self._put_object("tokens", json.dumps(data))
def get_initial_lock(self, label = "lock"):
if self._object_exists("manager-init-lock"):
return False;
log("Did not find existing swarm, attempting to initialize")
lock_set = "%s: %f" % (label, random())
self._put_object("manager-init-lock", lock_set)
# Make sure we give other nodes time to check and write their IP
# if our IP is still the one in the file after 5 seconds, then we are probably okay
# to assume we are the manager
time.sleep(5)
lock_read = self._get_object("manager-init-lock")
log("Comparing locks: %s => %s" % (lock_set, lock_read))
return lock_read == lock_set
class SwarmHelper:
def __init__(self, node_ip):
self.node_ip = node_ip
def is_in_swarm(self):
return docker_client.info()["Swarm"]["LocalNodeState"] == "active"
def init(self):
docker_client.swarm.init(listen_addr=self.node_ip, advertise_addr=self.node_ip)
def join_tokens(self):
tokens = docker_client.swarm.attrs["JoinTokens"]
return { "manager": tokens["Manager"], "worker": tokens["Worker"] }
def join(self, token, managers):
ips = [m["ip"] for m in managers]
log("Attempting to join swarm as %s via managers %s" % (self.node_ip, ips))
docker_client.swarm.join(
remote_addrs=ips,
join_token=token,
listen_addr=self.node_ip,
advertise_addr=self.node_ip
)
log("Joined swarm")
def log(l):
sys.stdout.write(l + "\n")
sys.stdout.flush()
def main():
log("Starting swarm setup")
bucket = os.environ["SWARM_DISCOVERY_BUCKET"]
swarm_name = os.environ["SWARM_NAME"]
role = os.environ["ROLE"]
node_ip = os.environ["NODE_IP"]
# TODO validate these
log("Using discovery bucket %s to configure node as a %s on address %s" % (bucket, role, node_ip))
discovery = S3Discovery(bucket, swarm_name)
swarm = SwarmHelper(node_ip)
if role == "manager" and discovery.get_initial_lock(node_ip):
log("Initializing new swarm")
swarm.init()
discovery.set_tokens(swarm.join_tokens())
else:
log("Joining existing swarm")
managers = discovery.list_managers()
swarm.join(discovery.get_token(role), managers)
if role == "manager":
log("Sending manager IP to discovery bucket")
discovery.add_manager(node_ip)
if role == "worker":
log("Sending worker IP to discovery bucket")
discovery.add_worker(node_ip)
log("Completed swarm setup")
if __name__ == '__main__':
main() | mlabouardy/pipeline-as-code-with-jenkins | chapter10/discovery/main.py | main.py | py | 4,825 | python | en | code | 123 | github-code | 13 |
24686635858 | from modulo import *
from pilha_encadeada import*
class Fila:
def __init__(self):
self.__ini = None
self.__fim = None
def getIni(self):
return self.__ini
def getFim(self):
return self.__fim
def setIni(self, elem):
self.__ini = elem
def setFim(self, elem):
self.__fim = elem
def __repr__(self):
string = ''
pointer = self.getIni()
if self.getIni() == None:
return False
while pointer.getProx() != None:
string += str(pointer.getDado()) + '->'
pointer = pointer.getProx()
string += str(pointer.getDado())
return string
def isEmpty(self):
if self.getIni() == None and self.getFim() == None:
return True
else:
return False
def insert(self,elem):
dado = Node()
if (self.isEmpty()):
dado.setDado(elem)
self.setIni(dado)
self.setFim(dado)
else:
dado.setDado(elem)
self.getFim().setProx(dado)
self.setFim(self.getFim().getProx())
def remove(self):
if (self.isEmpty()):
return False
elif self.getIni() == self.getFim():
self.setIni(None)
self.setFim(None)
else:
self.setIni(self.getIni().getProx())
def destroy(self):
while (not self.isEmpty()):
self.remove()
def sort(self):
pilha_ordenada = PilhaEncadeada()
pilha_auxiliar = PilhaEncadeada()
while self.getIni() != None:
if pilha_ordenada.getTopo() == None or (pilha_ordenada.getTopo().getDado() >= self.getIni().getDado()):
pilha_ordenada.insert(self.getIni().getDado())
else:
while pilha_ordenada.getTopo() != None and pilha_ordenada.getTopo().getDado() <= self.getIni().getDado():
pilha_auxiliar.insert(pilha_ordenada.getTopo().getDado())
pilha_ordenada.remove()
pilha_ordenada.insert(self.getIni().getDado())
while pilha_auxiliar.getSize() >= 0 and pilha_auxiliar.getTopo()!= None:
pilha_ordenada.insert(pilha_auxiliar.getTopo().getDado())
pilha_auxiliar.remove()
self.remove()
while pilha_ordenada.getTopo() != None:
self.insert(pilha_ordenada.getTopo().getDado())
pilha_ordenada.remove() | MysticOwl/Furg | Estrutura de dados/[TAREFA] - Fila/fila_encadeada.py | fila_encadeada.py | py | 2,531 | python | pt | code | 0 | github-code | 13 |
3557778081 | from selenium import webdriver
from selenium.webdriver.chrome.service import Service
s = Service('C:/Drivers/chromedriver_win32/chromedriver.exe')
browser = webdriver.Chrome(service=s)
url = 'https://demo.guru99.com/test/newtours/'
browser.get(url)
print(browser.current_url)
print(browser.title)
browser.close() | argha-sarkar/SeleniumPython | Day1/browser1.py | browser1.py | py | 326 | python | en | code | 0 | github-code | 13 |
41639852345 | import discord
import logging
import os
import requests
import shutil
import time
from yt_dlp import YoutubeDL, utils
from sclib import SoundcloudAPI, Track
MUSIC_DIRNAME = "music"
MAX_ATTEMPTS = 11
YTDL_OPTS = {
"format": "bestaudio",
"paths": {"home": "./{}/".format(MUSIC_DIRNAME)},
'noplaylist': True
}
FFMPEG_OPTS = {
"before_options": "-nostdin",
'options': '-vn'
}
log = logging.getLogger('bot')
def create_embed(message: str): # create a discord.Embed object
embed = discord.Embed(description=message)
return embed
# search query and return an info obj & a streamable url
def search(query):
try:
api = SoundcloudAPI()
track = api.resolve(query)
assert type(track) is Track
url = track.get_stream_url()
return (track.title, query, url)
except:
with YoutubeDL(YTDL_OPTS) as ydl:
try: requests.get(query)
except:
for attempt in range(MAX_ATTEMPTS):
log.info(f"Attempting to download ytsearch \"{query}\" ATTEMPT #{attempt + 1}")
try:
info = ydl.sanitize_info(ydl.extract_info("ytsearch:{}".format(query), download=True))['entries'][0]
break
except utils.DownloadError:
time.sleep(attempt * 100 / 1000)
continue
else:
for attempt in range(MAX_ATTEMPTS):
log.info(f"Attempting to download \"{query}\" ATTEMPT #{attempt + 1}")
try:
info = ydl.sanitize_info(ydl.extract_info(query, download=True))
break
except utils.DownloadError:
time.sleep(attempt * 100 / 1000)
continue
if 'entries' in info:
info = info['entries'][0]
title = info['title']
webpage_url = info['webpage_url']
filepath = info["requested_downloads"][0]["filepath"]
return (title, webpage_url, filepath)
def delete_temp_dir():
temp_dir = os.path.join(os.getcwd(), MUSIC_DIRNAME)
if os.path.isdir(temp_dir):
shutil.rmtree(temp_dir)
| stephenjusto247/WeebsRUs | lib/utils.py | utils.py | py | 1,980 | python | en | code | 2 | github-code | 13 |
10191336911 | import subprocess
import click
from devine.core.config import config
from devine.core.constants import context_settings
from devine.core.utilities import get_binary_path
@click.command(
short_help="Serve your Local Widevine Devices for Remote Access.",
context_settings=context_settings)
@click.option("-h", "--host", type=str, default="0.0.0.0", help="Host to serve from.")
@click.option("-p", "--port", type=int, default=8786, help="Port to serve from.")
@click.option("--caddy", is_flag=True, default=False, help="Also serve with Caddy.")
def serve(host: str, port: int, caddy: bool) -> None:
"""
Serve your Local Widevine Devices for Remote Access.
\b
Host as 127.0.0.1 may block remote access even if port-forwarded.
Instead, use 0.0.0.0 and ensure the TCP port you choose is forwarded.
\b
You may serve with Caddy at the same time with --caddy. You can use Caddy
as a reverse-proxy to serve with HTTPS. The config used will be the Caddyfile
next to the devine config.
"""
from pywidevine import serve
if caddy:
executable = get_binary_path("caddy")
if not executable:
raise click.ClickException("Caddy executable \"caddy\" not found but is required for --caddy.")
caddy_p = subprocess.Popen([
executable,
"run",
"--config", str(config.directories.user_configs / "Caddyfile")
])
else:
caddy_p = None
try:
if not config.serve.get("devices"):
config.serve["devices"] = []
config.serve["devices"].extend(list(config.directories.wvds.glob("*.wvd")))
serve.run(config.serve, host, port)
finally:
if caddy_p:
caddy_p.kill()
| devine-dl/devine | devine/commands/serve.py | serve.py | py | 1,743 | python | en | code | 198 | github-code | 13 |
73646510417 | class Solution:
# @param {integer[]} nums
# @return {integer}
def majorityElement(self, nums):
el_dict = dict()
for n in nums:
length = len(nums)
if n not in el_dict:
el_dict[n] = 1
else:
el_dict[n] += 1
if el_dict[n] > length/2:
return n
| JirenJin/leetcode-problems | python/majority_element.py | majority_element.py | py | 363 | python | en | code | 0 | github-code | 13 |
29779182539 | """Iowa scraper
"""
import asyncio
import json
import logging
import os
import re
import shutil
from typing import Dict, List
import usaddress
from bs4 import BeautifulSoup, Tag
from msedge.selenium_tools import Edge, EdgeOptions
from lib.ElectionSaver import electionsaver
from lib.definitions import ROOT_DIR, WTVWebDriver
from lib.errors.wtv_errors import WalkTheVoteError
from lib.scrapers.base_scraper import BaseScraper
# create logger
LOG = logging.getLogger("massachusetts_scraper")
LOG.setLevel(logging.DEBUG)
# create console handler and set level to debug.
# logging.StreamHandler(sys.stdout) to print to stdout instead of the default stderr
ch = logging.StreamHandler()
ch.setLevel(logging.DEBUG)
# create formatter
formatter = logging.Formatter("%(asctime)s - %(name)s - %(levelname)s - %(message)s")
# add formatter to ch
ch.setFormatter(formatter)
# add ch to logger
LOG.addHandler(ch)
class MassachusettsScraper(BaseScraper):
def __init__(self):
"""Instantiates top-level url to begin scraping from"""
self.election_offices_url = (
"https://www.sec.state.ma.us/ele/eleev/ev-find-my-election-office.htm"
)
self.city_town_directory_url = (
"https://www.sec.state.ma.us/ele/eleclk/clkidx.htm"
)
self.election_offices = []
self.driver = WTVWebDriver("Massachusetts").get_webdriver()
self.phone_jurisdiction_map = self.create_juridiction_phone_mapping()
def scrape(self) -> List[Dict]:
"""TODO: Write documentation once purpose of method is further defined.
This code will only work on Windows as it stands now.
"""
# Using selenium webdriver over requests due to needing a more sophisticated
# way to bypass captcha for gov websites that use it. Only works on Windows
# for now and there are some simple pre-reqs needed before it can work.
# More info: https://selenium-python.readthedocs.io/index.html
try:
LOG.info("Starting webdriver...")
# Execute GET request
LOG.info(f"Fetching elections offices at {self.election_offices_url}...")
self.driver.get(self.election_offices_url)
# Convert the response into an easily parsable object
election_offices_soup = BeautifulSoup(
self.driver.page_source, "html.parser"
)
self.driver.quit()
election_offices_div = election_offices_soup.find("div", id="content_third")
elm: Tag
election_office = {}
starting_point_found = False
office_list = election_offices_div.find_all(["h2", "p"])
for idx, elm in enumerate(office_list):
if not starting_point_found:
if elm.name == "h2":
starting_point_found = True
else:
continue
if elm.name == "h2":
if election_office:
election_office["phone"] = self.phone_jurisdiction_map[
election_office["cityName"]
]
election_office["website"] = self.election_offices_url
self.election_offices.append(election_office)
election_office = {
"cityName": " ".join(elm.getText().split())
.replace("\n", "")
.title()
}
elif elm.name == "p":
mapping = electionsaver.addressSchemaMapping
text = elm.getText().strip()
if re.match("MAILING ADDRESS", text):
outliers = ["Boston", "Gardner", "Haverhill", "Princeton"]
city_name = election_office["cityName"]
parsed_address = text.split(sep="\n", maxsplit=2)[2].replace(
"\n", " "
)
mailing_address = usaddress.tag(
parsed_address, tag_mapping=mapping
)[0]
mailing_address["locationName"] = f"{city_name} Election Office"
election_office["mailingAddress"] = mailing_address
if city_name in outliers:
city_state_zip = usaddress.tag(
office_list[idx + 1].getText(), tag_mapping=mapping
)[0]
election_office["mailingAddress"].update(city_state_zip)
elif re.match("EMAIL", text):
election_office["email"] = text.split(":")[1].lstrip()
elif re.match("OFFICE ADDRESS", text):
mailing_address = election_office["mailingAddress"]
state = mailing_address["state"]
zip_code = mailing_address["zipCode"]
apt_number = mailing_address.get("aptNumber", "")
street_city = text.split(" ", maxsplit=2)[2]
street_city_split = street_city.split(",")
street = street_city_split[0]
if len(street_city_split) == 2:
city_part = f", {street_city_split[1].lstrip()},"
else:
city_part = ""
parsed_address = (
f"{street} {apt_number}{city_part} {state} {zip_code}"
)
try:
physical_address = usaddress.tag(
parsed_address, tag_mapping=mapping
)[0]
except Exception:
parsed_address = (
f'{text.split(" ", maxsplit=2)[2]}, {state} {zip_code}'
)
physical_address = usaddress.tag(
parsed_address, tag_mapping=mapping
)[0]
physical_address[
"locationName"
] = f'{election_office["cityName"]} Election Office'
if election_office["cityName"] == "Royalston":
physical_address["streetNumberName"] = "10 The Common"
election_office["physicalAddress"] = physical_address
with open(
os.path.join(
ROOT_DIR, "scrapers", "massachusetts", "massachusetts.json"
),
"w",
) as f:
json.dump(self.election_offices, f)
except Exception as e:
LOG.exception(f"Exception: {e}")
return self.election_offices
def create_juridiction_phone_mapping(self):
"""
The election office url for MA doesn't include the office's phone number.
Instead, the website tells you to look in their town/city directory, which is
in a separate url (nice one, MA. appreciate it). As such, this method extracts
those phone numbers and maps them to the jurisdiction they represent for easy
lookup when constructing the final election office objects.
@return: mapping of town/city name to phone number
"""
mapping = {}
self.driver.get(self.city_town_directory_url)
soup = BeautifulSoup(self.driver.page_source, "html.parser")
directory = soup.find("div", id="content_third").find_all("p")
entry: Tag
for entry in directory:
m = re.findall(
r"([A-Z -]+(?=</span>))|((?<=Phone: )\d{3}-\d{3}-\d{4})",
entry.decode_contents(),
)
if m:
juridiction_name = m[0][0]
juridiction_phone = m[1][1]
if juridiction_name == "PEPPEREL":
juridiction_name += "L"
mapping[juridiction_name.title()] = juridiction_phone
return mapping
async def get_election_offices() -> List[Dict]:
massachusetts_scraper = MassachusettsScraper()
election_offices = massachusetts_scraper.scrape()
return election_offices
if __name__ == "__main__":
asyncio.run(get_election_offices())
| Acesonnall/WalkTheVote | lib/scrapers/massachusetts/massachusetts_scraper.py | massachusetts_scraper.py | py | 8,497 | python | en | code | 0 | github-code | 13 |
72915320018 | import re
import html
from qutebrowser.qt.widgets import QStyle, QStyleOptionViewItem, QStyledItemDelegate
from qutebrowser.qt.core import QRectF, QRegularExpression, QSize, Qt
from qutebrowser.qt.gui import (QIcon, QPalette, QTextDocument, QTextOption,
QAbstractTextDocumentLayout, QSyntaxHighlighter,
QTextCharFormat)
from qutebrowser.config import config
from qutebrowser.utils import qtutils
from qutebrowser.completion import completionwidget
class _Highlighter(QSyntaxHighlighter):
def __init__(self, doc, pattern, color):
super().__init__(doc)
self._format = QTextCharFormat()
self._format.setForeground(color)
words = pattern.split()
words.sort(key=len, reverse=True)
pat = "|".join(re.escape(word) for word in words)
self._expression = QRegularExpression(
pat, QRegularExpression.PatternOption.CaseInsensitiveOption
)
qtutils.ensure_valid(self._expression)
def highlightBlock(self, text):
"""Override highlightBlock for custom highlighting."""
match_iterator = self._expression.globalMatch(text)
while match_iterator.hasNext():
match = match_iterator.next()
self.setFormat(
match.capturedStart(),
match.capturedLength(),
self._format
)
class CompletionItemDelegate(QStyledItemDelegate):
"""Delegate used by CompletionView to draw individual items.
Mainly a cleaned up port of Qt's way to draw a TreeView item, except it
uses a QTextDocument to draw the text and add marking.
Original implementation:
qt/src/gui/styles/qcommonstyle.cpp:drawControl:2153
Attributes:
_opt: The QStyleOptionViewItem which is used.
_style: The style to be used.
_painter: The QPainter to be used.
_doc: The QTextDocument to be used.
"""
# FIXME this is horribly slow when resizing.
# We should probably cache something in _get_textdoc or so, but as soon as
# we implement eliding that cache probably isn't worth much anymore...
# https://github.com/qutebrowser/qutebrowser/issues/121
def __init__(self, parent=None):
self._painter = None
self._opt = None
self._doc = None
self._style = None
super().__init__(parent)
def _draw_background(self):
"""Draw the background of an ItemViewItem."""
assert self._opt is not None
assert self._style is not None
self._style.drawPrimitive(QStyle.PrimitiveElement.PE_PanelItemViewItem, self._opt,
self._painter, self._opt.widget)
def _draw_icon(self):
"""Draw the icon of an ItemViewItem."""
assert self._opt is not None
assert self._style is not None
icon_rect = self._style.subElementRect(
QStyle.SubElement.SE_ItemViewItemDecoration, self._opt, self._opt.widget)
if not icon_rect.isValid():
# The rect seems to be wrong in all kind of ways if no icon should
# be displayed.
return
mode = QIcon.Mode.Normal
if not self._opt.state & QStyle.StateFlag.State_Enabled:
mode = QIcon.Mode.Disabled
elif self._opt.state & QStyle.StateFlag.State_Selected:
mode = QIcon.Mode.Selected
state = QIcon.State.On if self._opt.state & QStyle.StateFlag.State_Open else QIcon.State.Off
self._opt.icon.paint(self._painter, icon_rect,
self._opt.decorationAlignment, mode, state)
def _draw_text(self, index):
"""Draw the text of an ItemViewItem.
This is the main part where we differ from the original implementation
in Qt: We use a QTextDocument to draw text.
Args:
index: The QModelIndex of the item to draw.
"""
assert self._opt is not None
assert self._painter is not None
assert self._style is not None
if not self._opt.text:
return
text_rect_ = self._style.subElementRect(
QStyle.SubElement.SE_ItemViewItemText, self._opt, self._opt.widget)
qtutils.ensure_valid(text_rect_)
margin = self._style.pixelMetric(QStyle.PixelMetric.PM_FocusFrameHMargin,
self._opt, self._opt.widget) + 1
# remove width padding
text_rect = text_rect_.adjusted(margin, 0, -margin, 0)
qtutils.ensure_valid(text_rect)
# move text upwards a bit
if index.parent().isValid():
text_rect.adjust(0, -1, 0, -1)
else:
text_rect.adjust(0, -2, 0, -2)
self._painter.save()
state = self._opt.state
if state & QStyle.StateFlag.State_Enabled and state & QStyle.StateFlag.State_Active:
cg = QPalette.ColorGroup.Normal
elif state & QStyle.StateFlag.State_Enabled:
cg = QPalette.ColorGroup.Inactive
else:
cg = QPalette.ColorGroup.Disabled
if state & QStyle.StateFlag.State_Selected:
self._painter.setPen(self._opt.palette.color(
cg, QPalette.ColorRole.HighlightedText))
# This is a dirty fix for the text jumping by one pixel for
# whatever reason.
text_rect.adjust(0, -1, 0, 0)
else:
self._painter.setPen(self._opt.palette.color(cg, QPalette.ColorRole.Text))
if state & QStyle.StateFlag.State_Editing:
self._painter.setPen(self._opt.palette.color(cg, QPalette.ColorRole.Text))
self._painter.drawRect(text_rect_.adjusted(0, 0, -1, -1))
self._painter.translate(text_rect.left(), text_rect.top())
self._get_textdoc(index)
self._draw_textdoc(text_rect, index.column())
self._painter.restore()
def _draw_textdoc(self, rect, col):
"""Draw the QTextDocument of an item.
Args:
rect: The QRect to clip the drawing to.
"""
assert self._painter is not None
assert self._doc is not None
assert self._opt is not None
# We can't use drawContents because then the color would be ignored.
clip = QRectF(0, 0, rect.width(), rect.height())
self._painter.save()
if self._opt.state & QStyle.StateFlag.State_Selected:
color = config.cache['colors.completion.item.selected.fg']
elif not self._opt.state & QStyle.StateFlag.State_Enabled:
color = config.cache['colors.completion.category.fg']
else:
colors = config.cache['colors.completion.fg']
# if multiple colors are set, use different colors per column
color = colors[col % len(colors)]
self._painter.setPen(color)
ctx = QAbstractTextDocumentLayout.PaintContext()
ctx.palette.setColor(QPalette.ColorRole.Text, self._painter.pen().color())
if clip.isValid():
self._painter.setClipRect(clip)
ctx.clip = clip
self._doc.documentLayout().draw(self._painter, ctx)
self._painter.restore()
def _get_textdoc(self, index):
"""Create the QTextDocument of an item.
Args:
index: The QModelIndex of the item to draw.
"""
assert self._opt is not None
# FIXME we probably should do eliding here. See
# qcommonstyle.cpp:viewItemDrawText
# https://github.com/qutebrowser/qutebrowser/issues/118
text_option = QTextOption()
if self._opt.features & QStyleOptionViewItem.ViewItemFeature.WrapText:
text_option.setWrapMode(QTextOption.WrapMode.WordWrap)
else:
text_option.setWrapMode(QTextOption.WrapMode.ManualWrap)
text_option.setTextDirection(self._opt.direction)
text_option.setAlignment(QStyle.visualAlignment(
self._opt.direction, self._opt.displayAlignment))
if self._doc is not None:
self._doc.deleteLater()
self._doc = QTextDocument(self)
self._doc.setDefaultFont(self._opt.font)
self._doc.setDefaultTextOption(text_option)
self._doc.setDocumentMargin(2)
if index.parent().isValid():
view = self.parent()
assert isinstance(view, completionwidget.CompletionView), view
pattern = view.pattern
columns_to_filter = index.model().columns_to_filter(index)
if index.column() in columns_to_filter and pattern:
if self._opt.state & QStyle.StateFlag.State_Selected:
color = config.val.colors.completion.item.selected.match.fg
else:
color = config.val.colors.completion.match.fg
_Highlighter(self._doc, pattern, color)
self._doc.setPlainText(self._opt.text)
else:
self._doc.setHtml(
'<span style="font: {};">{}</span>'.format(
html.escape(config.val.fonts.completion.category),
html.escape(self._opt.text)))
def _draw_focus_rect(self):
"""Draw the focus rectangle of an ItemViewItem."""
assert self._opt is not None
assert self._style is not None
state = self._opt.state
if not state & QStyle.StateFlag.State_HasFocus:
return
o = self._opt
o.rect = self._style.subElementRect(
QStyle.SubElement.SE_ItemViewItemFocusRect, self._opt, self._opt.widget)
o.state |= QStyle.StateFlag.State_KeyboardFocusChange | QStyle.StateFlag.State_Item
qtutils.ensure_valid(o.rect)
if state & QStyle.StateFlag.State_Enabled:
cg = QPalette.ColorGroup.Normal
else:
cg = QPalette.ColorGroup.Disabled
if state & QStyle.StateFlag.State_Selected:
role = QPalette.ColorRole.Highlight
else:
role = QPalette.ColorRole.Window
o.backgroundColor = self._opt.palette.color(cg, role)
self._style.drawPrimitive(QStyle.PrimitiveElement.PE_FrameFocusRect, o, self._painter,
self._opt.widget)
def sizeHint(self, option, index):
"""Override sizeHint of QStyledItemDelegate.
Return the cell size based on the QTextDocument size, but might not
work correctly yet.
Args:
option: const QStyleOptionViewItem & option
index: const QModelIndex & index
Return:
A QSize with the recommended size.
"""
value = index.data(Qt.ItemDataRole.SizeHintRole)
if value is not None:
return value
self._opt = QStyleOptionViewItem(option)
self.initStyleOption(self._opt, index)
self._style = self._opt.widget.style()
assert self._style is not None
self._get_textdoc(index)
assert self._doc is not None
docsize = self._doc.size().toSize()
size = self._style.sizeFromContents(QStyle.ContentsType.CT_ItemViewItem, self._opt,
docsize, self._opt.widget)
qtutils.ensure_valid(size)
return size + QSize(10, 3)
def paint(self, painter, option, index):
"""Override the QStyledItemDelegate paint function.
Args:
painter: QPainter * painter
option: const QStyleOptionViewItem & option
index: const QModelIndex & index
"""
self._painter = painter
self._painter.save()
self._opt = QStyleOptionViewItem(option)
self.initStyleOption(self._opt, index)
self._style = self._opt.widget.style()
self._draw_background()
self._draw_icon()
self._draw_text(index)
self._draw_focus_rect()
self._painter.restore()
| qutebrowser/qutebrowser | qutebrowser/completion/completiondelegate.py | completiondelegate.py | py | 11,855 | python | en | code | 9,084 | github-code | 13 |
25059074510 | import requests
from twilio.rest import Client
import os
OWM_Endpoint = "https://api.openweathermap.org/data/3.0/onecall"
api_key = os.getenv("owm_api_key")
account_sid = os.getenv("twillio_sid")
auth_token = os.getenv("twillio_auth_token")
twillio_verified_no = os.getenv("twillio_verified_no")
twillio_virtual_no = os.getenv("twillio_virtual_token")
MY_LAT = os.getenv("latitude")
MY_LONG = os.getenv("longitude")
parameters = {
"lat":MY_LAT,
"lon":MY_LONG,
"exclude":"current,minutely,daily",
"appid":api_key
}
response = requests.get(url=OWM_Endpoint, params=parameters)
response.raise_for_status()
print(f"Status code: {response.status_code}")
weather_data = response.json()
weather_slice = weather_data["hourly"][:12]
will_rain = False
for hour_data in weather_slice:
condition_code = hour_data["weather"][0]["id"]
if int(condition_code) < 700:
will_rain = True
if will_rain:
client = Client(account_sid, auth_token)
message = client.messages.create(
body="It's going to rain today. Remember to bring an ☂",
from_=twillio_virtual_no,
to=twillio_verified_no
)
print(message.status)
| Dhyan-P-Shetty/Rain_Alert | main.py | main.py | py | 1,220 | python | en | code | 0 | github-code | 13 |
1416174846 | import sys
if len(sys.argv) <= 1:
raise Exception("No inputs")
with open(sys.argv[1], 'r') as f:
lines = f.readlines()
def parse_food(l):
ingredients, allergens = l.rstrip()[:-1].split(" (contains ")
i = ingredients.split(" ")
a = allergens.split(", ")
return (i, a)
def parse_foods(ll):
return [parse_food(l) for l in ll]
foods = parse_foods(lines)
def get_ingredient_by_allergen(fs):
res = {}
for f in fs:
for a in f[1]:
if a not in res:
res[a] = set(f[0])
else:
res[a] = res[a].intersection(set(f[0]))
return res
def get_non_allergen(fs):
i = set([])
for f in fs:
i = i.union(set(f[0]))
i_by_a = get_ingredient_by_allergen(foods)
for a in i_by_a:
i = i.difference(i_by_a[a])
return i
def get_count_in_foods(fs, ings):
c = 0
for f in fs:
c += len(set(f[0]).intersection(ings))
return c
def part_1(fs):
return get_count_in_foods(fs, get_non_allergen(fs))
def optimize(i_by_a):
result = []
optimized = False
while not optimized:
optimized = True
to_remove = None
for a in i_by_a:
ings = i_by_a[a]
if len(ings) == 1:
ing = next(iter(ings))
result.append((a, ing))
to_remove = ings
optimized = False
break
if to_remove is not None:
ni_by_a = {}
for a in i_by_a:
ings = i_by_a[a].difference(to_remove)
if len(ings) > 0:
ni_by_a[a] = ings
i_by_a = ni_by_a
return result
def part_2(fs):
i_by_a = get_ingredient_by_allergen(foods)
i_by_a = optimize(i_by_a)
return ",".join([x[1] for x in sorted(i_by_a, key=lambda x: x[0])])
print("Part 1: %d" % part_1(foods))
print("Part 2: %s" % part_2(foods))
| asek-ll/aoc2020 | day21/main.py | main.py | py | 1,940 | python | en | code | 0 | github-code | 13 |
14092864301 | import random
import discord
from discord.ext import commands
from command.cache.list_color import list_color
class Pick(commands.Cog):
config = {
"name": "pick",
"desc": "bot se chon 1 trong 2 cai ma ban dua",
"use": "pick <luachon1>, <luachon2>,...",
"author": "Anh Duc(aki team)"
}
def __init__(self, bot):
self.bot = bot
@commands.hybrid_command()
@commands.cooldown(1, 4, commands.BucketType.user)
async def pick(self, ctx, *, pick: str):
try:
await ctx.reply(f":game_die: **{ctx.author.name}**, Tôi chọn " + random.choice(pick.split(",")) + " :game_die:")
except Exception as e:
print(e)
async def setup(bot):
await bot.add_cog(Pick(bot))
| iotran207/Aki-bot | command/pick.py | pick.py | py | 757 | python | en | code | 4 | github-code | 13 |
35013005252 |
def swap_case(s):
result = ''
for c in s:
result += c.lower() if c.isupper() else c.upper()
return result
if __name__ == '__main__':
s = input()
result = swap_case(s)
print(result) | Crisheld/HackerRank-solutions | python/swap-case/solution.py | solution.py | py | 219 | python | en | code | 1 | github-code | 13 |
39032548326 | from django.shortcuts import render,redirect
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.decorators import login_required
from django.contrib import messages
from blog.models import Post
from django.contrib.auth.models import User
from .forms import UserRegistrationForm,UserUpdateForm,ProfileUpdateForm
def register(request):
if request.method=="POST":
form=UserRegistrationForm(request.POST)
if form.is_valid():
form.save()
username=form.cleaned_data.get('username')
messages.success(request,f'Acount has been created for {username}!')
return redirect('blog-home')
else:
form=UserRegistrationForm()
return render(request,'users/register.html',{'form':form})
@login_required
def profile(request):
logged_in_user = request.user
logged_in_user_posts=Post.objects.filter(author=logged_in_user).order_by('-date_posted')
if request.method=="POST":
u_form=UserUpdateForm(request.POST,instance=request.user)
p_form=ProfileUpdateForm(request.POST,request.FILES,instance=request.user.profile)
else:
u_form=UserUpdateForm(instance=request.user)
p_form=ProfileUpdateForm(instance=request.user.profile)
context={
'u_form':u_form,
'p_form':p_form,
'posts':logged_in_user_posts}
return render(request,'users/profile.html',context)
| harshsanjiv/b.log-in | users/views.py | views.py | py | 1,484 | python | en | code | 0 | github-code | 13 |
34347504442 | # imports & connection
import sys
import mysql.connector
import time
connection = mysql.connector.connect(user ='root', database = 'example', password = '12345')
connection.autocommit = True
# starting variables
balance = 0
menu_choice = 0
name = None
pin_num = 0
birth_day = 0
withdraw_amount = None
deposit_amount = None
withdraw_choice = 0
account_num = 0
# functions
def check_balance(account_num, pin_num):
try:
balance_cursor = connection.cursor()
find_balance = f"SELECT balance FROM bank WHERE accountnumber = '{account_num}' AND pin = '{pin_num}'"
balance_cursor.execute(find_balance)
result = balance_cursor.fetchone()
if result:
balance = result[0]
print(f"\nYour current balance is: ${balance:.2f}")
time.sleep(1)
else:
print("Invalid account number or PIN.")
except mysql.connector.Error as error:
print(f"Error: {error}")
finally:
balance_cursor.close()
def deposit(deposit_amount, account_num, pin_num):
deposit_choice = 0
valid_deposit = None
while deposit_choice < 1 or deposit_choice > 2 or valid_deposit == False:
deposit_choice = int(input("1) Deposit\n2) Cancel\n\nPlease enter option 1 or 2: "))
if deposit_choice == 1:
deposit_cursor = connection.cursor()
balance = f"SELECT balance FROM bank WHERE accountnumber = '{account_num}' AND pin = '{pin_num}'"
deposit_cursor.execute(balance)
result = deposit_cursor.fetchone()
if result is None:
print("Invalid account number or pin.")
return
deposit_amount = float(input("\nHow much money would you like to deposit into your account? "))
if deposit_amount != 0:
current_balance = int(result[0])
new_balance = current_balance + deposit_amount
deposit_cursor.execute(f"UPDATE bank SET balance = '{new_balance}' WHERE accountnumber = '{account_num}' and pin = '{pin_num}'")
connection.commit()
print(f"\nYour deposit of ${deposit_amount:.2f} was successful. The new balance is: ${new_balance:.2f}")
time.sleep(1)
deposit_cursor.close()
valid_deposit = True
else:
print(f"\nInvalid Amount: Please choose an amount greater than $0.")
valid_deposit = False
elif deposit_choice == 2:
print("\nDeposit canceled.")
break
else:
print("\nInvalid Choice: please choose either 1 or 2.")
def withdraw(withdraw_amount, withdraw_choice, account_num, pin_num):
withdraw_choice = 0
valid_withdrawal = False
while withdraw_choice < 1 or withdraw_choice > 2 or valid_withdrawal == False:
withdraw_choice = int(input("1) Withdraw\n2) Cancel\n\nPlease enter option 1 or 2: "))
if withdraw_choice == 1:
withdraw_cursor = connection.cursor()
balance = f"SELECT balance FROM bank WHERE accountnumber = '{account_num}' AND pin = '{pin_num}'"
withdraw_cursor.execute(balance)
result = withdraw_cursor.fetchone()
if result is None:
print("Invalid account number or pin.")
return
withdraw_amount = float(input("\nHow much money would you like to withdraw from your account? "))
current_balance = int(result[0])
if withdraw_amount == 0:
print(f"\nPlease choose an amount greater than $0.")
valid_withdrawal = False
elif withdraw_amount > current_balance:
print(f"\nSorry, you don't have ${withdraw_amount:.2f} in your account. You can only withdraw up to your current balance of ${current_balance:.2f}.\n")
valid_withdrawal = False
else:
new_balance = current_balance - withdraw_amount
withdraw_cursor.execute(f"UPDATE bank SET balance = '{new_balance}' WHERE accountnumber = '{account_num}' and pin = '{pin_num}'")
connection.commit()
print(f"\nYour withdrawal of ${withdraw_amount:.2f} was successful. The new balance is: ${new_balance:.2f}")
time.sleep(1)
withdraw_cursor.close()
valid_withdrawal = True
elif withdraw_choice == 2:
print("\nWithdrawal canceled.")
break
else:
print("\nInvalid Choice: please choose either 1 or 2.")
def create_account(name, account_num, birth_day, pin_num):
print("\nWelcome! Create a new account by entering some basic information below:\n")
name = str(input("First & Last Name: "))
account_num = int(input("Account Number: "))
birth_day = input("Date of Birth: ")
pin_num = int(input("PIN: "))
balance = float(input("Balance: "))
mycursor = connection.cursor()
sql = (f"INSERT INTO bank (name, accountnumber, pin, birthday, balance) VALUES ('{name}', '{account_num}', '{pin_num}', '{birth_day}', '{balance}')")
mycursor.execute(sql)
print(f"\nNew user created. Welcome, {name.title()}.\n\nHere is your account information:\nAccount Number: {account_num}\nPIN: {pin_num}\nBirthday: {birth_day}\nBalance: ${balance:.2f}")
time.sleep(1)
mycursor.close()
def delete_account(account_num, pin_num, name):
# delete account
delete_cursor = connection.cursor()
delete_choice = 0
while delete_choice < 1 or delete_choice > 2:
delete_choice = int(input("\nAre you sure you want to delete your account?\n1) Delete Account\n2) Cancel\n\nPlease choose an option (number 1 or 2): "))
if delete_choice == 1:
sql = f"DELETE FROM bank WHERE accountnumber = '{account_num}' and pin = '{pin_num}'"
delete_cursor.execute(sql)
print(f"Account number {account_num} deleted. Goodbye, {name.title()}.")
time.sleep(1)
repeat_menu(menu_choice, name, deposit_amount, withdraw_amount, withdraw_choice, account_num)
else:
print("\nCanceled.")
def modify_account(account_num, pin_num):
# allow edit access & ability to close account, edit name, change pin number, personal identification, etc.
modify_cursor = connection.cursor()
print("\nEnter your edited information below:\n")
new_name = str(input("Updated First & Last Name: "))
new_pin_num = int(input("Updated PIN: "))
sql = f"UPDATE bank SET name = '{new_name}', pin = '{new_pin_num}' WHERE accountnumber = '{account_num}' and pin = '{pin_num}'"
modify_cursor.execute(sql)
print(f"\nAccount number {account_num} has been modified. Your updated name is {new_name.title()}, and your new PIN is now {new_pin_num}.")
time.sleep(1)
def repeat_menu(menu_choice, name, deposit_amount, withdraw_amount, withdraw_choice, account_num):
menu_choice = 0
display_menu(menu_choice, name, deposit_amount, withdraw_amount, withdraw_choice, account_num)
def bank_login(account_num, pin_num, menu_choice):
log_in = False
while log_in != True:
login_cursor = connection.cursor()
account_num = int(input("Account Number: "))
pin_num = int(input("PIN: "))
find_name = f"SELECT name FROM bank WHERE accountnumber = '{account_num}' AND pin = '{pin_num}'"
login_cursor.execute(find_name)
result = login_cursor.fetchone()
if result:
name = result[0]
print(f"\nWelcome {name.title()}! You are now logged in :).")
time.sleep(1)
log_in = True
login_choice = 0
while login_choice < 1 or login_choice > 8:
login_choice = int(input("\n ~ Menu ~\n1) Return Home\n2) Check Account Balance\n3) Make A Deposit\n4) Make A Withdrawal\n5) Edit Account\n6) Close Your Account\n7) Create An Account\n8) Exit\n\nPlease choose an option (number 1-8): "))
if login_choice == 1:
login_cursor.close()
repeat_menu(menu_choice, name, deposit_amount, withdraw_amount, withdraw_choice, account_num)
elif login_choice == 2:
check_balance(account_num, pin_num)
login_choice = 0
elif login_choice == 3:
deposit(deposit_amount, account_num, pin_num)
login_choice = 0
elif login_choice == 4:
withdraw(withdraw_amount, withdraw_choice, account_num, pin_num)
login_choice = 0
elif login_choice == 5:
modify_account(account_num, pin_num)
login_choice = 0
elif login_choice == 6:
delete_account(account_num, pin_num, name)
login_choice = 0
elif login_choice == 7:
create_account(name, account_num, birth_day, pin_num)
login_choice = 0
elif login_choice == 8:
print("\nExiting: See you next time :)")
sys.exit()
else:
print("Please choose a valid option of 1-8.")
else:
print("\nERROR: You have entered an invalid account number or PIN. Please try again.\n")
log_in = False
def display_menu(menu_choice, name, deposit_amount, withdraw_amount, withdraw_choice, account_num):
while menu_choice < 1 or menu_choice > 3:
menu_choice = int(input("\n ~ Home Menu ~\n1) Create An Account\n2) Log In\n3) Exit\n\nPlease choose an option (number 1-3): "))
display_menu(menu_choice, name, deposit_amount, withdraw_amount, withdraw_choice, account_num)
if menu_choice == 1:
create_account(name, account_num, birth_day, pin_num)
repeat_menu(menu_choice, name, deposit_amount, withdraw_amount, withdraw_choice, account_num)
elif menu_choice == 2:
bank_login(account_num, pin_num, menu_choice)
elif menu_choice == 3:
print("\nExiting: Goodbye!\n")
sys.exit()
else:
print("Please choose a VALID option from the menu 1-3.")
break
# main
print("""
== == == == == == == == == == == == == == == == == == == == ==\n
Hello There. Welcome to Easy Bank!\n
== == == == == == == == == == == == == == == == == == == == ==
""")
display_menu(menu_choice, name, deposit_amount, withdraw_amount, withdraw_choice, account_num)
connection.close() | dt604121/Bank | main.py | main.py | py | 10,592 | python | en | code | 0 | github-code | 13 |
2030008633 | class Student:
def __init__(self,name,student_id):
self.name=name
self.student_id=student_id
self.grades={"语文":0,"数学":0,"英语":0}
def setting_grade(self,course,grade):
if course in self.grades:
self.grades[course]=grade
def print_grades(self):
print(f"学生{self.name}(学号:{self.student_id})的成绩为:")
for course in self.grades:
print(f"{course}:{self.grades[course]}分")
# zeng= Student("小曾","100002")
# print(chen.name)
# zeng.setting_grade("数学",95)
# print(zeng.grades)
chen= Student("小陈","100001")
chen.setting_grade("语文",92)
chen.setting_grade("数学",94)
chen.print_grades()
| OpenAI01/AI- | 对象实战.py | 对象实战.py | py | 712 | python | en | code | 1 | github-code | 13 |
4927020281 | '''
Created on Oct 12, 2019
@author: mvelasco
'''
from optimalTransports import Empirical_Measure, dist
from gurobipy import *
import numpy as np
import pdb
class polytope:
"""
This class is a description of a polytope via inequalities.
It can compute the Chebyshev center of any such polytope
The inequalities take the form
Ineq_Vector \dot variables <= RHS
and Eq_vectors \dot variables == RHS
The Chebyshev center inequalities require the dual norm of the constraints so the polytope requires a dualnorm_fn.
"""
def __init__(self, dualnorm_fn):
self.numIneqs = 0
self.model = Model("Chebyshev_Center")
self.dim = 0
self.Ineq_vectors = []
self.Ineq_RHss = []
self.Eq_vectors = []
self.Eq_RHss = []
self.dualnorm_fn = dualnorm_fn
self.gurobiVars = []
self.XVars = []
def initialize_cube(self, dim, diamK):
"""Creates a cube in R^dim dimensions,
with center (0,0) and infinity norm at most diamK
which we can use as initial setup """
self.dim = dim
for k in range(dim):
vector = np.zeros(dim)
vector[k]=1.0
self.Ineq_vectors.append(vector)
vector = np.zeros(dim)
vector[k]=-1.0
self.Ineq_vectors.append(vector)
self.Ineq_RHss.append(diamK)
self.Ineq_RHss.append(diamK)
self.numIneqs += 2
"""self.Eq_vectors.append(np.array([1.0 for k in range(dim)]))
self.Eq_RHss.append(0.0)"""
def initialize_chebyshev_model(self):
"""This function specifies the optimization problem to be run for finding Chebyshev centers
"""
#One variable per dimension
names = ["X_"+str(k) for k in range(self.dim)]
for name in names:
self.gurobiVars.append(self.model.addVar(name=name,vtype=GRB.CONTINUOUS, lb = (-1)*float("inf"), ub = float("inf")))
#additional nonnegative variable r, for the radius of the ball
rvar = self.model.addVar(name="r",vtype=GRB.CONTINUOUS) #Gurobi DEFAULT behavior is making continuous variables automatically nonnegative
self.gurobiVars.append(rvar)
self.gurobiR = rvar
#r will be the last variable
#We construct the inequalities of the Tchebyshev center problem
newIneq_vectors = []
for vector in self.Ineq_vectors:
newVector = np.zeros(len(vector)+1)
for k in range(self.dim +1):
if k < self.dim:
newVector[k] = vector[k]
else:
newVector[k] = self.dualnorm_fn(vector)
newIneq_vectors.append(newVector)
#Next we add the inequalities to the Model:
for k in range(len(self.Ineq_vectors)):
coeff_vector = newIneq_vectors[k]
try:
assert(len(coeff_vector)==len(self.gurobiVars))
except:
pdb.set_trace()
gurobiLH = LinExpr(coeff_vector, self.gurobiVars)
rhs = self.Ineq_RHss[k]
self.model.addConstr(gurobiLH, sense = "<=", rhs = rhs, name = "Ineq_"+str(k))
#The equalities involve only the X vars and not r
self.XVars =[self.gurobiVars[k] for k in range(self.dim)]
for k in range(len(self.Eq_vectors)):
coeff_vector = self.Eq_vectors[k]
gurobiLH = LinExpr(coeff_vector, self.XVars)
rhs = self.Eq_RHss[k]
self.model.addConstr(gurobiLH, sense = "==", rhs = rhs, name = "Eq_"+str(k))
#We specify the objective function and that it is a maximization problem,
obj_coeffs = [0.0 for k in range(self.dim)]
obj_coeffs.append(1.0)
gurobiOBJ = LinExpr(obj_coeffs, self.gurobiVars)
self.model.setObjective(gurobiOBJ , sense = GRB.MAXIMIZE )
self.model.update()
def compute_chebyshev_center(self):
self.model.update()
self.model.optimize()
self.current_Chebyshev_Center = np.array([Var.X for Var in self.XVars])
self.current_r = self.gurobiR.X
def new_linear_Ineq(self, coeffs_vector, rhs):
#We add a new linear inequality to the polytope and to the model
assert(self.dim == len(coeffs_vector))
self.Ineq_vectors.append(np.array(coeffs_vector))
self.Ineq_RHss.append(rhs)
self.numIneqs += 1
newVector = np.zeros(self.dim + 1)
for k in range(self.dim):
newVector[k] = coeffs_vector[k]
newVector[self.dim] = self.dualnorm_fn(coeffs_vector)
gurobiLH = LinExpr(newVector, self.gurobiVars)
self.model.addConstr(gurobiLH, sense = "<=", rhs = rhs, name = "Ineq_"+str(self.numIneqs))
class minimum_cross_entropy_finder:
def __init__(self, num_MC, samplep_fn, empirical_measure, delta, diamK , dualnorm_fn):
self.num_MC = num_MC
self.samplep_fn = samplep_fn
self.empirical_measure = empirical_measure
self.distance = self.empirical_measure.distance
#self.dim = self.empirical_measure.dim
self.delta = delta
self.diamK = diamK
self.dualnorm_fn = dualnorm_fn
#Start with a cube
self.current_polytope = polytope(dualnorm_fn)
N = self.empirical_measure.ndata_vectors
self.current_polytope.initialize_cube(N, diamK)
self.current_polytope.initialize_chebyshev_model()
self.current_polytope.compute_chebyshev_center()
#Create the samples of the prior
self.samples_p = self.samplep_fn(self.num_MC)
#Begin at Chebyshev center
self.current_lambdas = self.current_polytope.current_Chebyshev_Center
self.maxUVsteps = 200
def weighted_nearest_data_point_index(self, vector):
N = self.empirical_measure.ndata_vectors
weighted_distances = np.array([self.distance(vector, self.empirical_measure.data_vectors[k])-self.current_lambdas[k] for k in range(N)])
k = np.where(weighted_distances == np.min(weighted_distances))[0]
return(k[0])
def weighted_classify_nearest(self, vectors_to_classify):
Classified_Points = [[] for k in range(self.empirical_measure.ndata_vectors)]
for vector in vectors_to_classify:
k = self.weighted_nearest_data_point_index(vector)
Classified_Points[k].append(vector)
return Classified_Points
def minimum_weighted_distance(self,vector):
N = self.empirical_measure.ndata_vectors
weighted_distances = [self.distance(vector,self.empirical_measure.data_vectors[k])-self.current_lambdas[k] for k in range(N)]
return np.min(weighted_distances)
def project_vector_to_Lambda(self, center_vector):
#Orthogonal projection of a vector onto the subspace Lambda with components adding to zero
dev = np.full(len(center_vector), np.average(center_vector))
return(center_vector-dev)
def compute_good_uv(self, method="backtracking", verbose = False):
#The function is computed with a MonteCarlo. Its samples phi below are evaluated only once.
phi_lambdas = [self.minimum_weighted_distance(vector) for vector in self.samples_p]
def obj_value(vector_UV):
u= vector_UV[0]
v= vector_UV[1]
return(-u-v*self.delta-np.sum([np.exp(-1-v*phiL-u) for phiL in phi_lambdas])/self.num_MC)
def gradient_vector(vector_UV):
u= vector_UV[0]
v= vector_UV[1]
gradient = np.array([0.0,0.0])
gradient[0] = -1 + (np.sum([np.exp(-1-v*phiL-u) for phiL in phi_lambdas]))/self.num_MC
gradient[1] = -self.delta + (np.sum([phiL*np.exp(-1-v*phiL-u) for phiL in phi_lambdas]))/self.num_MC
return(gradient)
def project_to_feasible(vector_UV):
result = np.zeros(2)
result[0] = vector_UV[0]
result[1] = max(0.0,vector_UV[1])
return result
#Begin by computing the current initial value
#We begin at the origin
self.currentUV = np.array([0.0,1.0])
self.current_objective_value = obj_value(self.currentUV)
#Compute the gradient there
self.currentUVGradient = gradient_vector(self.currentUV)
self.currentGradientNorm = np.linalg.norm(self.currentUVGradient)
#This is the best value we have seen and the location where this value occurs
self.best_objective_value = obj_value(self.currentUV)
self.bestUV = self.currentUV
if method == "backtracking":
#Increment by gradient ascent with backtracking:
self.back_alpha = 0.15
self.back_beta = 0.5
for k in range(self.maxUVsteps):
t=1 #Step_Size
vector_UV = self.currentUV
future_vector_UV = project_to_feasible(vector_UV+t*self.currentUVGradient)
deltaX = future_vector_UV-vector_UV
cond1 = bool( obj_value(future_vector_UV) < self.current_objective_value + self.back_alpha*t*np.dot(self.currentUVGradient, deltaX))
while cond1:
t=t*self.back_beta
future_vector_UV = project_to_feasible(vector_UV+t*self.currentUVGradient)
deltaX = future_vector_UV-vector_UV
cond1 = bool( obj_value(future_vector_UV) < self.current_objective_value + self.back_alpha*t*np.dot(self.currentUVGradient, deltaX))
self.currentUV = future_vector_UV
self.bestUV = future_vector_UV
self.current_objective_value = obj_value(self.currentUV)
self.currentUVGradient = gradient_vector(self.currentUV)
self.currentGradientNorm = np.linalg.norm(self.currentUVGradient)
if verbose:
print("Step " + str(k) + ":")
print("Step_Size: "+str(t))
print("Obj: "+ str(self.current_objective_value))
print("Gradient norm: "+str(self.currentGradientNorm))
print("(u,v): " + str(self.currentUV))
if method == "gradient":
#Increment by gradient ascent
for k in range(self.maxUVsteps):
u=self.currentUV[0]
v=self.currentUV[1]
self.currentUVGradient[0] = -1 + (np.sum([np.exp(-1-v*phiL-u) for phiL in phi_lambdas]))/self.num_MC
self.currentUVGradient[1] = -self.delta + (np.sum([phiL*np.exp(-1-v*phiL-u) for phiL in phi_lambdas]))/self.num_MC
self.current_objective_value = obj_value(self.currentUV)
stepSize = (1/(self.delta+0.1))*(1/(np.log(k+1)+1))*self.dualnorm_fn(self.currentUVGradient)
#En u siempre se hace un paso de descenso del gradiente
self.currentUV[0]+=self.currentUVGradient[0]*stepSize
#En v intentamos dar el paso, si nos salimos hay que proyectar
nextV = self.currentUV[1] + self.currentUVGradient[1]*stepSize
if nextV<=0.0:
self.currentUV[1] = 0.0
else:
self.currentUV[1] = nextV
if self.current_objective_value>= self.best_objective_value:
self.best_objective_value = self.current_objective_value
self.bestUV = self.currentUV
if verbose:
print("Step :" +str(k))
print("Step_Size: "+str(t))
print("Obj: "+ str(self.current_objective_value))
print("Gradient norm: "+str(self.currentGradientNorm))
print("Mejor (u,v) :"+str( self.bestUV))
print("Entropia :"+str(obj_value(self.bestUV)))
def compute_separating_hyperplane(self):
N = self.empirical_measure.ndata_vectors
classified_samples = self.weighted_classify_nearest(self.samples_p)
#First we compute the super-gradient
counts = [(-1)*len(res)/(self.num_MC) + (1/N) for res in classified_samples]
return np.array(counts)
def cutting_plane_one_step(self, verbose = False):
self.compute_good_uv()
g = self.compute_separating_hyperplane()
rhs = (-1)*np.dot(g,self.current_lambdas)
self.current_polytope.new_linear_Ineq((-1)*g, rhs)
if WRITE:
model = self.current_polytope.model
model.write("intento.lp")
self.current_polytope.compute_chebyshev_center()
self.current_lambdas = self.project_vector_to_Lambda(self.current_polytope.current_Chebyshev_Center)
print("Current Radius : "+ str(self.current_polytope.current_r))
def norm(x):
return np.linalg.norm(x)
#This is the prior, implemented as a function capable of producing samples
def sample_p(numSamples):
#Uniform distribution in [-1,1], [-1,1]
ResultsArray = []
Xs = np.random.uniform(-1,1,numSamples)
Ys = np.random.uniform(-1,1,numSamples)
for k in range(numSamples):
ResultsArray.append([Xs[k],Ys[k]])
return ResultsArray
if __name__ == "__main__":
WRITE=False
P = polytope(norm)
N = 2
diamK=1.0
P.initialize_cube(N,diamK)
P.initialize_chebyshev_model()
P.compute_chebyshev_center()
print("_______________________________________________")
print("Center: " + str(P.current_Chebyshev_Center))
print("Radius: " + str(P.current_r))
print("Dimension: "+str(P.dim))
print("Num_Ineqs: "+ str(P.numIneqs))
print("_______________________________________________")
print("done")
P.new_linear_Ineq([1,1], 0)
P.compute_chebyshev_center()
print("\n")
print(P.current_Chebyshev_Center)
print("done")
pdb.set_trace()
Q=polytope(norm)
#empirical_data_points_vector = [np.random.uniform(-1,1,2) for k in range(4)]
empirical_data_points_vector = [[1,0], [0,0], [0,1]]
mu = Empirical_Measure(empirical_data_points_vector, dist)
MF = minimum_cross_entropy_finder(10000, sample_p, mu, 0.005, 16.0, norm)
for k in range(20):
print("Step "+str(k))
MF.cutting_plane_one_step()
print(MF.current_lambdas) | mauricio-velasco/min-cross-entropy | minimumCrossEntropy.py | minimumCrossEntropy.py | py | 14,661 | python | en | code | 0 | github-code | 13 |
21346747896 | from django.urls import path
from rest_framework_simplejwt.views import TokenObtainPairView, TokenRefreshView, TokenVerifyView
from .views import CustomerCreateView, ManagerCreateView, AdminCreateView
urlpatterns = [
path("customer/create/", CustomerCreateView.as_view(), name="customer-create"),
path("manager/create/", ManagerCreateView.as_view(), name="manager-create"),
path("admin/create/", AdminCreateView.as_view(), name="admin-create"),
path("token/", TokenObtainPairView.as_view(), name="token_obtain_pair"),
path("token/refresh/", TokenRefreshView.as_view(), name="token_refresh"),
path("token/verify/", TokenVerifyView.as_view(), name="token_verify"),
]
app_name = "user"
| KatrinLazarenko/perfumes_shop | user/urls.py | urls.py | py | 709 | python | en | code | 0 | github-code | 13 |
43114289762 | tc = int(input())
for _ in range(tc):
queue = []
n, m = map(int, input().split())
tmp = list(map(int, input().split()))
for i in range(len(tmp)):
queue.append((tmp[i], i))
# print()
# print(queue)
# print()
count = 0
while queue:
curr = queue.pop(0)
if queue and curr[0] < max(queue)[0]: # 우선순위 비교
queue.append(curr) # 큐의 최대값(max(queue))보다 작은 것들은 모두 뒤로 보냄
else:
count += 1
if curr[1] == m:
print(count) | jinhyungrhee/Problem-Solving | BOJ/BOJ_1966_프린터큐.py | BOJ_1966_프린터큐.py | py | 516 | python | ko | code | 0 | github-code | 13 |
23028455192 | # -*- coding: utf-8 -*-
"""
Created on Mon Jul 9 17:29:41 2018
@author: 天津拨云咨询服务有限公司 lilizong@gmail.com
"""
import cv2
import numpy as np
import matplotlib.pyplot as plt
image=cv2.imread("image\\girl.bmp",cv2.IMREAD_GRAYSCALE)
mask=np.zeros(image.shape,np.uint8)
mask[200:400,200:400]=255
histMI=cv2.calcHist([image],[0],mask,[256],[0,255])
histImage=cv2.calcHist([image],[0],None,[256],[0,255])
plt.plot(histImage)
plt.plot(histMI)
| IBNBlank/toy_code | OpenCV-Repository-master/13.直方图/example/14.5掩膜直方图.py | 14.5掩膜直方图.py | py | 460 | python | en | code | 0 | github-code | 13 |
16179354735 | from __future__ import print_function
import os
import sys
import glob
import warnings
import functools
import operator
from argparse import ArgumentParser
import numpy as np
import mdtraj as md
from mdtraj.core.trajectory import _parse_topology
from mdtraj.utils import in_units_of
from mdtraj.utils.six import iteritems
###############################################################################
# Crappy class that should go elsewhere
###############################################################################
###############################################################################
# Globals
###############################################################################
formats = {'.dcd': md.formats.DCDTrajectoryFile,
'.xtc': md.formats.XTCTrajectoryFile,
'.trr': md.formats.TRRTrajectoryFile,
'.binpos': md.formats.BINPOSTrajectoryFile,
'.nc': md.formats.NetCDFTrajectoryFile,
'.netcdf': md.formats.NetCDFTrajectoryFile,
'.h5': md.formats.HDF5TrajectoryFile,
'.lh5': md.formats.LH5TrajectoryFile,
'.pdb': md.formats.PDBTrajectoryFile}
fields = {'.trr': ('xyz', 'time', 'step', 'box', 'lambda'),
'.xtc': ('xyz', 'time', 'step', 'box'),
'.dcd': ('xyz', 'cell_lengths', 'cell_angles'),
'.nc': ('xyz', 'time', 'cell_lengths', 'cell_angles'),
'.netcdf': ('xyz', 'time', 'cell_lengths', 'cell_angles'),
'.binpos': ('xyz',),
'.lh5': ('xyz', 'topology'),
'.h5': ('xyz', 'time', 'cell_lengths', 'cell_angles',
'velocities', 'kineticEnergy', 'potentialEnergy',
'temperature', 'lambda', 'topology'),
'.pdb': ('xyz', 'topology', 'cell_angles', 'cell_lengths')}
units = {'.xtc': 'nanometers',
'.trr': 'nanometers',
'.binpos': 'angstroms',
'.nc': 'angstroms',
'.netcdf': 'angstroms',
'.dcd': 'angstroms',
'.h5': 'nanometers',
'.lh5': 'nanometers',
'.pdb': 'angstroms'}
###############################################################################
# Utility Functions
###############################################################################
ext = lambda fn: os.path.splitext(fn)[1]
class _Warner(object):
def __init__(self):
self.active = True
def __call__(self, msg):
if self.active:
print('Warning:', msg, file=sys.stderr)
warn = _Warner()
def index(str):
if str.count(':') == 0:
return int(str)
elif str.count(':') == 1:
start, end = [(None if e == '' else int(e)) for e in str.split(':')]
step = None
elif str.count(':') == 2:
start, end, step = [(None if e == '' else int(e)) for e in str.split(':')]
return slice(start, end, step)
###############################################################################
# Code
###############################################################################
def parse_args():
"""Parse the command line arguments and perform some validation on the
arguments
Returns
-------
args : argparse.Namespace
The namespace containing the arguments
"""
extensions = ', '.join(list(formats.keys()))
parser = ArgumentParser(description='''Convert molecular dynamics
trajectories between formats. The DCD, XTC, TRR, PDB, binpos, NetCDF,
binpos, LH5, and HDF5 formats are supported (%s)''' % extensions)
parser.add_argument('input', nargs='+', help='''path to one or more
trajectory files. Multiple trajectories, if supplied, will
be concatenated together in the output file in the order
supplied. all of the trajectories should be in the same
format. the format will be detected based on the file
extension''')
required = parser.add_argument_group('required arguments')
required.add_argument('-o', '--output', required=True,
help='''path to the save the output. the output
format will chosen based on the file extension
(%s)''' % extensions)
# dirty hack to move the 'optional arguments' group to the end. such that
# the 'required arguments' group shows up before it.
parser._action_groups.append(parser._action_groups.pop(1))
parser.add_argument('-c', '--chunk', default=1000, type=int,
help='''number of frames to read in at once. this
determines the memory requirements of this code.
default=1000''')
parser.add_argument('-f', '--force', action='store_true',
help='''force overwrite if output already exsits''')
parser.add_argument('-s', '--stride', default=1, type=int, help='''load
only every stride-th frame from the input file(s),
to subsample.''')
parser.add_argument('-i', '--index', type=index, help='''load a *specific*
set of frames. flexible, but inefficient for a large
trajectory. specify your selection using (pythonic)
"slice notation" e.g. '-i N' to load the the Nth
frame, '-i -1' will load the last frame, '-i N:M to
load frames N to M, etc. see http://bit.ly/143kloq
for details on the notation''')
parser.add_argument('-a', '--atom_indices', type=str,
help='''load only specific atoms from the input file(s).
provide a path to file containing a space, tab or
newline separated list of the (zero-based) integer
indices corresponding to the atoms you wish to keep.''')
parser.add_argument('-t', '--topology', type=str, help='''path to a
PDB/prmtop file. this will be used to parse the topology
of the system. it's optional, but useful. if specified,
it enables you to output the coordinates of your
dcd/xtc/trr/netcdf/binpos as a PDB file. If you\'re
converting *to* .h5, the topology will be stored
inside the h5 file.''')
args = parser.parse_args()
if not args.force and os.path.exists(args.output):
parser.error('file exists: %s' % args.output)
# rebuild the input list, doing any glob expansions
# necessary
input = []
for fn in args.input:
if not os.path.exists(fn):
if '*' in fn:
input.extend(glob.glob(fn))
else:
parser.error('No such file: %s' % fn)
elif os.path.isdir(fn):
parser.error('%s: Is a directory' % fn)
elif not os.path.isfile(fn):
parser.error('%s: Is not a file' % fn)
else:
input.append(fn)
args.input = input
for fn in args.input:
if not ext(fn) in formats:
parser.error("%s: '%s' is not a known extension" % (fn, ext(fn)))
extensions = list(map(ext, args.input))
if any(e != extensions[0] for e in extensions):
parser.error("all input trajectories do not have the same extension")
if not ext(args.output) in formats:
parser.error("%s: '%s' is not a known extension" % (args.output,
ext(args.output)))
if args.atom_indices is not None and not os.path.isfile(args.atom_indices):
parser.error('no such file: %s' % args.atom_indices)
if args.stride <= 0:
parser.error('stride must be positive')
if args.chunk <= 0:
parser.error('chunk must be positive')
if args.index and len(args.input) > 1:
parser.error('index notation only allowed with a single input trajectory')
if args.index and args.stride != 1:
parser.error('stride and index selections are incompatible')
if args.index is not None:
args.chunk = None
if args.topology is not None and not os.path.isfile(args.topology):
parser.error('no such file: %s' % args.topology)
if ((args.topology is None and not all(ext(e) in ['.h5', '.lh5', '.pdb'] for e in args.input))
and ext(args.output) in ['.h5', '.lh5', '.pdb']):
parser.error('to output a %s file, you need to supply a topology (-t, or --topology)' % ext(args.output))
if args.chunk is not None and (args.chunk % args.stride != 0):
parser.error('--stride must be a divisor of --chunk')
return args
def main(args, verbose=True):
"""Run the main script.
Parameters
----------
args : argparse.Namespace
The collected command line arguments
"""
if args.atom_indices is not None:
atom_indices = np.loadtxt(args.atom_indices, int)
else:
atom_indices = None
out_x = ext(args.output)
out_units = units[out_x]
out_fields = fields[out_x]
OutFileFormat = formats[out_x]
in_x = ext(args.input[0])
InFileFormat = formats[in_x]
if args.topology is not None:
topology = _parse_topology(args.topology)
else:
topology = None
if topology is not None and atom_indices is not None:
topology = topology.subset(atom_indices)
n_total = 0
if args.index is not None:
assert len(args.input) == 1
# when chunk is None, we load up ALL of the frames. this isn't
# strictly necessary, and it costs more memory, but it's ALOT
# harder to get the code correct when we need to use data[start:end]
# notation when all of the data isn't loaded up at once. it's easy
# for hdf5 and netcdf, but for the others...
assert args.chunk is None
# this is the normal invocation pattern, but for PDBTrajectoryFile it's
# different
outfile_factory = functools.partial(OutFileFormat, args.output, 'w',
force_overwrite=args.force)
with outfile_factory() as outfile:
for fn in args.input:
assert in_x == ext(fn)
with InFileFormat(fn, 'r') as infile:
while True:
data, in_units, n_frames = read(infile, args.chunk, stride=args.stride,
atom_indices=atom_indices)
if n_frames == 0:
break
if topology is not None:
# if the user supplied a topology, we should probably
# do some simple checks
if data['xyz'].shape[1] != topology._numAtoms:
warnings.warn('sdsfsd!!!!')
data['topology'] = topology
# if they want a specific set of frames, get those
# with slice notation
if args.index is not None:
_data = {}
for k, v in iteritems(data):
if isinstance(v, np.ndarray):
# we don't want the dimensionality to go deficient
if isinstance(args.index, int):
_data[k] = v[np.newaxis, args.index]
else:
_data[k] = v[args.index]
elif isinstance(v, md.Topology):
_data[k] = v
else:
raise RuntineError()
data = _data
print(list(data.keys()))
n_frames = len(data['xyz'])
convert(data, in_units, out_units, out_fields)
write(outfile, data)
n_total += n_frames
if verbose:
sys.stdout.write('\rconverted %d frames, %d atoms' % (n_total, data['xyz'].shape[1]))
sys.stdout.flush()
if verbose:
print(' ')
def write(outfile, data):
"""Write data out to a file
This is a small wrapper around the native write() method on the
XXXTRajectoryFile objects that is necessary to make sure we pass the
right arguments in the right position
Parameters
----------
outfile : TrajectoryFile
An open trajectory file with a write() method
data : dict
A dict with the data to write in it.
"""
if isinstance(outfile, md.formats.XTCTrajectoryFile):
outfile.write(data.get('xyz', None), data.get('time', None),
data.get('step', None), data.get('box', None))
elif isinstance(outfile, md.formats.TRRTrajectoryFile):
outfile.write(data.get('xyz', None), data.get('time', None),
data.get('step', None), data.get('box', None),
data.get('lambd', None))
elif isinstance(outfile, md.formats.DCDTrajectoryFile):
outfile.write(data.get('xyz', None), data.get('cell_lengths', None),
data.get('cell_angles', None))
elif isinstance(outfile, md.formats.BINPOSTrajectoryFile):
outfile.write(data.get('xyz', None))
elif isinstance(outfile, md.formats.PDBTrajectoryFile):
lengths, angles = None, None
for i, frame in enumerate(data.get('xyz')):
if 'cell_lengths' in data:
lengths = data['cell_lengths'][i]
if 'cell_angles' in data:
angles = data['cell_angles'][i]
outfile.write(frame, data.get('topology', None), i, lengths, angles)
elif isinstance(outfile, md.formats.NetCDFTrajectoryFile):
outfile.write(data.get('xyz', None), data.get('time', None),
data.get('cell_lengths', None), data.get('cell_angles', None))
elif isinstance(outfile, md.formats.HDF5TrajectoryFile):
outfile.write(data.get('xyz', None), data.get('time', None),
data.get('cell_lengths', None), data.get('cell_angles', None),
data.get('velocities', None), data.get('kineticEnergy', None),
data.get('potentialEnergy', None), data.get('temperature', None),
data.get('lambda', None))
if outfile.topology is None:
# only want to write the topology once if we're chunking
outfile.topology = data.get('topology', None)
elif isinstance(outfile, md.formats.LH5TrajectoryFile):
outfile.write(data.get('xyz', None))
if outfile.topology is None:
# only want to write the topology once if we're chunking
outfile.topology = data.get('topology', None)
else:
raise RuntimeError()
def read(infile, chunk, stride, atom_indices):
"""Read data from the infile.
This is a small wrapper around the read() method on the XXXTrajectoryFile
that performs the read and then puts the results in a little dict. It also
returns the distance units that the file uses.
"""
if not isinstance(infile, md.formats.PDBTrajectoryFile):
_data = infile.read(chunk, stride=stride, atom_indices=atom_indices)
if isinstance(infile, md.formats.PDBTrajectoryFile):
if infile.closed:
# signal that we're done reading this pdb
return None, None, 0
if atom_indices is None:
atom_indices = slice(None)
topology = infile.topology
else:
topology = infile.topology.subset(atom_indices)
data = {'xyz': infile.positions[::stride, atom_indices, :],
'topology': topology}
if infile.unitcell_lengths is not None:
data['cell_lengths'] =np.array([infile.unitcell_lengths] * len(data['xyz']))
data['cell_angles'] = np.array([infile.unitcell_angles] * len(data['xyz']))
in_units = 'angstroms'
infile.close()
elif isinstance(infile, md.formats.XTCTrajectoryFile):
data = dict(zip(fields['.xtc'], _data))
in_units = 'nanometers'
elif isinstance(infile, md.formats.TRRTrajectoryFile):
data = dict(zip(fields['.trr'], _data))
in_units = 'nanometers'
elif isinstance(infile, md.formats.DCDTrajectoryFile):
data = dict(zip(fields['.dcd'], _data))
in_units = 'angstroms'
elif isinstance(infile, md.formats.BINPOSTrajectoryFile):
data = {'xyz': _data}
in_units = 'angstroms'
elif isinstance(infile, md.formats.NetCDFTrajectoryFile):
data = dict(zip(fields['.nc'], _data))
in_units = 'angstroms'
elif isinstance(infile, md.formats.HDF5TrajectoryFile):
data = dict(zip(fields['.h5'], _data))
data['topology'] = infile.topology # need to hack this one in manually
if atom_indices is not None:
data['topology'] = data['topology'].subset(atom_indices)
in_units = 'nanometers'
elif isinstance(infile, md.formats.LH5TrajectoryFile):
data = {'xyz': _data}
data['topology'] = infile.topology # need to hack this one in manually
if atom_indices is not None:
data['topology'] = data['topology'].subset(atom_indices)
in_units = 'nanometers'
else:
raise RuntimeError
data = dict((k, v) for k, v in data.items() if v is not None)
return data, in_units, (0 if 'xyz' not in data else len(data['xyz']))
def convert(data, in_units, out_units, out_fields):
# do unit conversion
if 'xyz' in out_fields and 'xyz' in data:
data['xyz'] = in_units_of(data['xyz'], in_units, out_units, inplace=True)
if 'box' in out_fields:
if 'box' in data:
data['box'] = in_units_of(data['box'], in_units, out_units, inplace=True)
elif 'cell_angles' in data and 'cell_lengths' in data:
a, b, c = data['cell_lengths'].T
alpha, beta, gamma = data['cell_angles'].T
data['box'] = np.dstack(md.utils.unitcell.lengths_and_angles_to_box_vectors(a, b, c, alpha, beta, gamma))
data['box'] = in_units_of(data['box'], in_units, out_units, inplace=True)
del data['cell_lengths']
del data['cell_angles']
if 'cell_lengths' in out_fields:
if 'cell_lengths' in data:
data['cell_lengths'] = in_units_of(data['cell_lengths'], in_units, out_units, inplace=True)
elif 'box' in data:
a, b, c, alpha, beta, gamma = md.utils.unitcell.box_vectors_to_lengths_and_angles(data['box'][:, 0], data['box'][:, 1], data['box'][:, 2])
data['cell_lengths'] = np.vstack((a, b, c)).T
data['cell_angles'] = np.vstack((alpha, beta, gamma)).T
data['cell_lengths'] = in_units_of(data['cell_lengths'], in_units, out_units, inplace=True)
del data['box']
ignored_keys = ["'%s'" % s for s in set(data) - set(out_fields)]
formated_fields = ', '.join("'%s'" % o for o in out_fields)
if len(ignored_keys) > 0:
warn('%s data from input file(s) will be discarded. '
'output format only supports fields: %s' % (', '.join(ignored_keys),
formated_fields))
warn.active = False
return data
def entry_point():
args = parse_args()
main(args)
if __name__ == '__main__':
entry_point()
| mdtraj/mdtraj | mdtraj/scripts/mdconvert.py | mdconvert.py | py | 19,501 | python | en | code | 505 | github-code | 13 |
14552015123 | def lower_bound(arr, x):
left = -1
right = len(arr)
while left < right - 1:
mid = (right + left) // 2
if x <= arr[mid]:
right = mid
else:
left = mid
return right
_, _ = input().split()
inp_arr = list(map(int, input().split()))
values = list(map(int, input().split()))
for val in values:
lower_ind = lower_bound(inp_arr, val)
if lower_ind == 0:
print(inp_arr[0])
elif lower_ind == len(inp_arr):
print(inp_arr[-1])
else:
output_ind = lower_ind - 1 if val - inp_arr[lower_ind - 1] <= inp_arr[lower_ind] - val else lower_ind
print(inp_arr[output_ind]) | StepDan23/MADE_algorithms | hw_3/a.py | a.py | py | 662 | python | en | code | 0 | github-code | 13 |
5467678086 | from will.plugin import WillPlugin
from will.decorators import respond_to, periodic, hear, randomly, route, rendered_template, require_settings
class RoomsPlugin(WillPlugin):
@respond_to(r"what are the rooms\?")
def list_rooms(self, message):
"""what are the rooms?: List all the rooms I know about."""
context = {"rooms": self.available_rooms.values(), }
self.say(rendered_template("rooms.html", context), message=message, html=True)
@respond_to("^update the room list")
def update_rooms(self, message):
self.update_available_rooms()
self.say("Done!", message=message)
@respond_to(r"who is in this room\?")
def participants_in_room(self, message):
"""who is in this room?: List all the participants of this room."""
room = self.get_room_from_message(message)
context = {"participants": room.participants, }
self.say(rendered_template("participants.html", context), message=message, html=True)
| skoczen/will | will/plugins/chat_room/rooms.py | rooms.py | py | 995 | python | en | code | 405 | github-code | 13 |
24420960592 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from sklearn.ensemble import RandomForestClassifier
from sklearn.metrics import (accuracy_score, precision_score, recall_score, f1_score,
roc_auc_score, classification_report, confusion_matrix)
from sklearn.model_selection import train_test_split, cross_val_score, TimeSeriesSplit
def test_random_forest_classification_performance_ts(df,test_type, prediction_target, random_state=None):
df['date'] = pd.to_datetime(df['date'])
cutoff_date = df['date'].max() - pd.DateOffset(years=3)
print(df['date'].max())
print(cutoff_date)
df.columns = df.columns.astype(str)
df=df.dropna()
nlp_columns = [col for col in df.columns if col.startswith('nlp_')]
custom_feature_list1 = ['date','district_code', 'centx', 'centy', 'cropland_pct', 'pop',
'ruggedness_mean', 'pasture_pct', 'ipc_months_since_change',
'ipc_lag_1', 'food_price_idx_lag_1', 'ipc_lag_3', 'ndvi_mean_lag_3',
'rain_mean_lag_3', 'et_mean_lag_3', 'food_price_idx_lag_3', 'ipc_lag_6',
'food_price_idx_lag_6', 'ipc_rolling_avg_3',
'food_price_idx_rolling_avg_3', 'food_price_idx_rolling_std_3', 'ipc_lead_1', 'ipc_lead_3', 'ipc_lead_6']
custom_feature_list1.extend(nlp_columns)
custom_feature_list3 = ['date','district_code', 'ipc_months_since_change',
'ipc_lag_1', 'ipc_lag_3', 'food_price_idx_lag_6', 'food_price_idx_rolling_avg_3', 'food_price_idx_rolling_std_3',
'ipc_lag_6','food_price_idx_lag_1','food_price_idx_lag_3', 'ipc_lead_1', 'ipc_lead_3', 'ipc_lead_6']
custom_feature_list3.extend(nlp_columns)
df = df[custom_feature_list1]
###
train_set = df[df['date'] <= cutoff_date]
test_set = df[df['date'] > cutoff_date]
X_train = train_set.drop(columns=['ipc_lead_1', 'ipc_lead_3', 'ipc_lead_6', 'date'])
y_train = train_set[prediction_target]
X_test = test_set.drop(columns=['ipc_lead_1', 'ipc_lead_3', 'ipc_lead_6', 'date'])
y_test = test_set[prediction_target]
print(len(X_test))
split_metrics = pd.DataFrame(columns=[
'test_type', 'prediction_target',
'Accuracy', 'Precision (Weighted)', 'Recall (Weighted)', 'F1 Score (Weighted)'
])
all_district_accuracies = []
# Create and fit the RandomForestClassifier
rf = RandomForestClassifier(n_estimators = 5000, random_state = 23, max_depth = 7)
rf.fit(X_train, y_train)
# Make predictions
y_pred = rf.predict(X_test)
# Store metrics for this split
current_metrics = {
'test_type': test_type,
'prediction_target': prediction_target,
'Accuracy': accuracy_score(y_test, y_pred),
'Precision (Weighted)': precision_score(y_test, y_pred, average='weighted'),
'Recall (Weighted)': recall_score(y_test, y_pred, average='weighted'),
'F1 Score (Weighted)': f1_score(y_test, y_pred, average='weighted')
}
current_metrics_df = pd.DataFrame([current_metrics])
split_metrics = pd.concat([split_metrics,current_metrics_df], ignore_index=True)
# Calculate district accuracies for this split
district_accuracies = {}
unique_districts = X_test['district_code'].unique()
for district in unique_districts:
district_mask = X_test['district_code'] == district
district_y_true = y_test[district_mask]
district_y_pred = y_pred[district_mask]
district_accuracy = accuracy_score(district_y_true, district_y_pred)
district_accuracies[district] = district_accuracy
all_district_accuracies.append((test_type, prediction_target, district, district_accuracy))
district_accuracy_df = pd.DataFrame(all_district_accuracies, columns=['Test_Type', 'Prediction_Target', 'District', 'Accuracy'])
importances = rf.feature_importances_
indices = np.argsort(importances)
features = X_train.columns
plt.title('Feature Importances')
plt.barh(range(len(indices)), importances[indices], color='g', align='center')
plt.yticks(range(len(indices)), [features[i] for i in indices])
plt.xlabel('Relative Importance')
plt.show()
return rf, district_accuracy_df, split_metrics
| philippbeirith/JBG60-23 | scripts/evaluation_test.py | evaluation_test.py | py | 4,276 | python | en | code | 0 | github-code | 13 |
27836757566 | from scipy.io import arff
import numpy as np
import random as r
from time import time
import pandas as pd
from sklearn.neighbors import KDTree
from sklearn.preprocessing import MinMaxScaler
from sklearn.utils import shuffle
np.random.seed(0)
def loaddata(path):
f = path
data, meta = arff.loadarff(f)
df_data = pd.DataFrame(data)
data = df_data.values
# print("Escalado de valores.")
try:
float(data[0][len(data[0])-1])
except:
variables = []
for x in data:
if x[-1] not in variables:
variables.append(x[len(x)-1])
numeros = list(range(0,len(variables)))
diccionario = {}
for i in range(0,len(numeros)):
diccionario.update({variables[i]:numeros[i]})
for i in range(0,len(data)):
data[i][len(data[0])-1] = diccionario.get(data[i][len(data[0])-1])
print("Etiquetas modificadas de la siguiente forma: ",diccionario)
print("Cargado ", path)
return data, meta
def separar(data):
datos = []
etiquetas = []
for data1 in data:
# print(data1[-1])
etiquetas.append(data1[-1])
datos.append(data1[0:-1])
etiquetas = np.array(etiquetas, np.float64) #Conversion al tipo correcto
datos = np.array(datos, np.float64)
return datos,etiquetas
#funcion de distancia
def multiplica_pesos(x,pesos):
M = len(x[0])
data = x[:,0:M]
for i in range(0,len(x)):
for j in range(0,len(x[0])-1):
if pesos[j] >= 0.2:
data[i][j] = x[i][j]*pesos[j]
else:
data[i][j] = 0
return data
#funcion para cargar los datos
def carga_datos(data,iteracion):
# M = len(data[0]) -1
N = len(data)
tam = N // 5
i = iteracion
if i == 0:
x = data[tam:N]
y = data[0:tam]
else:
tope = (i+1)*tam
if tope > N:
tope = N
quitar = range(i*tam,tope)
x = np.delete(data,quitar,0)
y = data[i*tam:tope]
return x,y
#funcion que cuenta cuantos pesos tienen valor inferior a 0.2
def califica_pesos(pesos):
contador = 0
for p in pesos:
if p <= 0.2:
contador += 1
return contador
#FUNCION DEL KNN
def comprobar(pesos,i,boolean=True):
_pesos = np.copy(pesos)
_pesos[_pesos < 0.2 ] = 0
train,test = carga_datos(data,i)
x_train,y_train = separar(train)
x_test,y_test = separar(test)
x_train = (x_train*_pesos)[:,_pesos > 0.2]
x_test = (x_test*_pesos)[:,_pesos > 0.2]
tree = KDTree(x_train, leaf_size=1)
dis,vecinos = tree.query(x_test,k=1)
vecinos = vecinos[:,0]
# print(y_train,y_test)
aciertos = np.mean( y_train[vecinos] == y_test) * 100
# for m in range(0,len(datos_test)):
#
# datos_test_ = datos_test[m]
# dis,ind = tree.query(datos_test_.reshape(1,-1), k=1)
#
# ind = ind[0][0]
#
# if etiquetas_train[ind] == etiquetas_test[m]:
# aciertos += 1
calif = califica_pesos(pesos)
# aciertos = aciertos * 100 / len(datos_test)
calif = calif *100 / len(pesos)
if boolean:
print("CONJUNTO DE DATOS ",i,": ","%_clas: ",aciertos,"%red: ",calif)
return (aciertos + calif) /2
#funcion para el vecino mas cercano
def amigo_cercano(data,x,pesos,boolean):
caracter = x[-1]
cadena = []
tree = KDTree(data, leaf_size=1)
x = x.reshape(1,-1)
i = 2
while True:
dis,ind = tree.query(x, k=i)
# ind2 = ind
ind = ind[0][-1]
# print(dis,(x==data[ind]).all(),ind2)
# break
if (boolean and data[ind][-1] == caracter) or (not boolean and data[ind][-1] != caracter):
cadena = data[ind]
break
else:
i += 1
return cadena
#calcula los pesos en funcion del enemigo y amigo
def calcula_nuevos_pesos(pesos,x,amigo,enemigo):
w = pesos
#seria pesos + (enemigo - ejemplo) - (amigo - ejemplo)
# lo que es enemigo - amigo
for i in range(0,len(amigo)-1):
w[i] = pesos[i] + abs(enemigo[i]) - abs(amigo[i])
return w
#truncamos los pesos
def corregir_pesos(pesos):
w = []
maximo = max(pesos)
for p in pesos:
if p < 0:
w.append(0)
elif p > 1:
w.append(p/maximo)
else:
w.append(p)
return w
#algorimtmo GREEEDY
def greedy(i):
pesos = np.zeros(M,np.float64)
x,y = carga_datos(data,i)
for x1 in x:
target_amigo = amigo_cercano(x,x1,pesos,True)
target_enemigo = amigo_cercano(x,x1,pesos,False)
pesos = calcula_nuevos_pesos(pesos,x1,target_amigo,target_enemigo)
pesos = corregir_pesos(pesos)
# pesos = corregir_pesos(pesos)
return pesos
#pesos aleatorios
def inicia_pesos():
np.random.seed(1)
w = []
for i in range(0,M):
numero = r.randrange(100)
w.append(numero/100)
return w
def comprobar_bl(pesos,iteracion):
#def comprobar_(pesos,iteracion):
_pesos = np.copy(pesos)
_pesos[_pesos < 0.2] = 0
train,test = carga_datos(data,iteracion) #no hacemos nada con la y.
x_train,y_train = separar(train)
x_train = (x_train*_pesos)[:,_pesos > 0.2]
tree = KDTree(x_train)
dis,vecinos = tree.query(x_train,k=2)
vecinos = vecinos[:,1]
aciertos = np.mean( y_train[vecinos] == y_train)*100
calif = califica_pesos(pesos) * 100 / len(pesos)
return (aciertos + calif) /2
#BUSQUEDA LOCAL
def busqueda_local(j):
#solucion inicial:
pesos = inicia_pesos()
# pesos = greedy()
# pesos = np.zeros(M,np.float64)
desviacion = 0.3
O = len(pesos)
# train,test = carga_datos(data,j)
# datos_train,etiquetas_train = separar(train)
calidad = comprobar_bl(pesos,j)
iters = 1
no_mejora = 0
while iters < 15000 and no_mejora < 20*O :
for i in range(0,O):
prev = pesos[i]
valor = np.random.normal(0,desviacion)
pesos[i] = np.clip(pesos[i] + valor,0,1)
calidad1 = comprobar_bl(pesos,j)
# print(calidad1)
iters += 1
if calidad1 > calidad:
# pesos = copia_pesos
no_mejora = 0
calidad = calidad1
break
else:
pesos[i] = prev
no_mejora += 1
return pesos
def k_NN(data_training, tags_training, w, data_test = None, tags_test = None, is_training = True):
w_prim = np.copy( w )
w_prim[w_prim < 0.2] = 0.0
eliminated = w_prim[w_prim < 0.2].shape[0]
hit = 0
hit_rate = 0.0
data_training_mod = (data_training*w_prim)[:, w_prim > 0.2]
tree = KDTree(data_training_mod)
if is_training:
nearest_ind = tree.query(data_training_mod, k=2, return_distance=False)[:,1]
hit_rate = np.mean( tags_training[nearest_ind] == tags_training )
else:
data_test_mod = (data_test*w_prim)[:, w_prim > 0.2]
nearest_ind = tree.query(data_test_mod, k=1, return_distance=False)
for i in range(nearest_ind.shape[0]):
if tags_training[nearest_ind[i]] == tags_test[i]:
hit += 1
hit_rate = hit/data_test_mod.shape[0]
reduction_rate = eliminated/len(w)
f = (hit_rate + reduction_rate)* 0.5
return f, hit_rate, reduction_rate
def local_search(data, tags,iteracion):
w = np.random.uniform(0.0,1.0,data.shape[1])
max_eval = 15000
max_neighbors = 20*data.shape[1]
n_eval = 0
n_neighbors = 0
variance = 0.3
mean = 0.0
class_prev = comprobar_bl(w,iteracion)
while n_eval < max_eval and n_neighbors < max_neighbors:
for i in range(w.shape[0]):
n_eval += 1
prev = w[i]
w[i] = np.clip(w[i] + np.random.normal(mean, variance), 0, 1)
class_mod = comprobar_bl(w,iteracion)
if(class_mod > class_prev):
n_neighbors = 0
class_prev = class_mod
break
else:
w[i] = prev
n_neighbors += 1
"""
for i in range(len(w)):
plt.bar(i,w[i])
plt.show()
"""
return w
archivos = ['datos/colposcopy.arff','datos/ionosphere.arff','datos/texture.arff']
#archivos = ['datos/texture.arff']
var = time()
for archivo in archivos:
data, meta = loaddata(archivo)
# print(data[0])
datos,etiquetas = separar(data)
# if archivo == 'datos/texture.arff':
scaler = MinMaxScaler()
scaler.fit(datos)
datos = scaler.transform(datos)
datos,etiquetas = shuffle(datos,etiquetas)
data = shuffle(data)
# data = shuffle(data)
# print(data[0])
# =============================================================================
# if archivo == 'datos/texture.arff':
# scaler = MinMaxScaler()
# scaler.fit(data)
# data = scaler.transform(data)
# =============================================================================
# print(etiquetas)
M = len(data[0]) -1
N = len(data)
tam = N // 5
pesos = np.ones(M,np.float64)
for i in range(0,5):
var1 = time()
print("KNN, particion ",i,": ",comprobar(pesos,i,True),"%")
var2 = time()
print("Tiempo: ",var2-var1)
for i in range(0,5):
var2 = time()
# print("GREEDY, particion ",i,": ",comprobar(greedy(i),i,True),"%")
var3 = time()
# print("Tiempo: ",var3-var2)
for i in range(0,5):
var3 = time()
# training,test = carga_datos(data,i)
# datos_tr,etiquetas_tr = separar(training)
# print(etiquetas_tr)
# print("BUSQUEDA LOCAL, particion ",i,": ",comprobar(busqueda_local(i),i),"%")
var4 = time()
# print("Tiempo: ",var4-var3)
print("Tiempo TOTAL: ",time()-var)
| penderana/Metaheuristicas | Practica 1/practica1.py | practica1.py | py | 10,243 | python | es | code | 0 | github-code | 13 |
3531146892 | import torch
import torch.nn.functional as f
import sys
import numpy as np
import math
import matplotlib.pyplot as plt
import core_math.transfom as trans
import cv2
import skimage.measure
from skimage.transform import resize
from skimage import img_as_bool
from banet_track.ba_optimizer import gauss_newtown_update, levenberg_marquardt_update, batched_mat_inv
from visualizer.visualizer_2d import show_multiple_img
# sys.path.extend(['/opt/eigency', '/opt/PySophus'])
# from sophus import SE3
""" Utilities ----------------------------------------------------------------------------------------------------------
"""
def batched_x_2d_normalize(h, w, x_2d):
"""
Convert the x_2d coordinates to (-1, 1)
:param x_2d: coordinates mapping, (N, H * W, 2)
:return: x_2d: coordinates mapping, (N, H * W, 2), with the range from (-1, 1)
"""
x_2d[:, :, 0] = (x_2d[:, :, 0] / (float(w) - 1.0))
x_2d[:, :, 1] = (x_2d[:, :, 1] / (float(h) - 1.0))
x_2d = x_2d * 2.0 - 1.0
return x_2d
def batched_interp2d(tensor, x_2d):
"""
[TESTED, file: valid_banet_batched_interp2d.py]
Interpolate the tensor, it will sample the pixel in input tensor by given the new coordinate (x, y) that indicates
the position in original image.
:param tensor: input tensor to be interpolated to a new tensor, (N, C, H, W)
:param x_2d: new coordinates mapping, (N, H, W, 2) in (-1, 1), if out the range, it will be fill with zero
:return: interpolated tensor
"""
return f.grid_sample(tensor, x_2d)
def batched_index_select(input, dim, index):
"""
[TESTED, file: valid_bannet_batched_index_select.py]
:param input: Tensor with shape (N, x, x, ... x)
:param dim: index for the dimension to be selected
:param index: number of M indices for the selected item in different batch, (N, M)
"""
views = [input.shape[0]] + [1 if i != dim else -1 for i in range(1, len(input.shape))]
expanse = list(input.shape)
expanse[0] = -1
expanse[dim] = -1
index = index.view(views).expand(expanse)
return torch.gather(input, dim, index)
def se3_exp(w):
"""
[TESTED, file: valid_banet_exp_mapping.py]
Compute the 2-order approximate exponential mapping of lie se(3) to SE(3), batched version
Reference: http://ethaneade.com/lie_groups.pdf (Page. 12/15)
:param w: lie algebra se(3) tensor, dim: (N, 6), N is the batch size, for each batch, (omega, u), where u \in R^{3}
is translation and \omega \in R^{3} is rotation component.
:return: T[:3, :] dim: N(N, 3, 4), where the T is a SE(3) transformation matrix
"""
N = w.shape[0] # Batches
# Cached variables
theta_sq = torch.sum(w[:, :3] * w[:, :3], dim=1) + 1.0e-8 # Compute the theta by sqrt(\omega^T omega), dim: (N, 1)
theta = torch.sqrt(theta_sq) # dim: (N, 1)
zeros = torch.zeros(theta.shape) # dim: (N, 1)
I = torch.eye(3).repeat(N, 1, 1) # Create batched identity matrix, dim: (N, 3, 3)
A = torch.sin(theta) / theta # dim: (N,1)
B = (1.0 - torch.cos(theta)) / theta_sq
C = (1.0 - A) / theta_sq
# Compute matrix with hat operators
o_hat = torch.stack([zeros, -w[:, 2], w[:, 1],
w[:, 2], zeros, -w[:, 0],
-w[:, 1], w[:, 0], zeros], dim=1).view((-1, 3, 3)) # Skew-symmetric mat, dim: (N, 3, 3)
o_hat2 = torch.bmm(o_hat, o_hat) # dim: (N, 3, 3)
# Rotation and translation
# tip: .view(-1, 1, 1) used as board-casting for scalar and matrix multiply
R = I + A.view(-1, 1, 1) * o_hat + B.view(-1, 1, 1) * o_hat2 # dim: (N, 3, 3)
V = I + B.view(-1, 1, 1) * o_hat + C.view(-1, 1, 1) * o_hat2 # dim: (N, 3, 3)
t = torch.bmm(V, w[:, 3:].view(-1, 3, 1)) # t = V*u, dim: (N, 3, 1)
# return torch.cat([R, t], dim=2)
return R, t
def transform_mat44(R, t):
N = R.shape[0]
bot = torch.tensor([0, 0, 0, 1], dtype=torch.float).view((1, 1, 4)).expand(N, 1, 4)
b = torch.cat([R, t.view(N, 3, 1)], dim=2)
return torch.cat([b, bot], dim=1)
def se3_exp_approx_order1(w):
"""
[TESTED, file: valid_banet_exp_mapping.py]
Compute the 1-order approximate exponential mapping of lie se(3) to SE(3), batched version
used for small rotation and translation case, equation:
exp(\delta \zeta ^) = (I + \delta \zeta ^)
Reference: see SLAM14(Page. 194)
:param w: lie algebra se(3) tensor, dim: (N, 6), N is the batch size, for each batch, (omega, u), where u \in R^{3}
is translation and \omega \in R^{3} is rotation component.
:return: T[:3, :] dim: N(N, 3, 4), where the T is a SE(3) transformation matrix
"""
N = w.shape[0] # Batches
ones = torch.ones(N) # dim: (N, 1)
R = torch.stack([ones, -w[:, 2], w[:, 1],
w[:, 2], ones, -w[:, 0],
-w[:, 1], w[:, 0], ones], dim=1).view((-1, 3, 3)) # Skew-symmetric mat, dim: (N, 3, 3)
t = w[:, 3:].view(-1, 3, 1)
return R, t
def x_2d_coords_torch(n, h, w):
x_2d = np.zeros((n, h, w, 2), dtype=np.float32)
for y in range(0, h):
x_2d[:, y, :, 1] = y
for x in range(0, w):
x_2d[:, :, x, 0] = x
return torch.Tensor(x_2d)
""" Jacobin Mat Computation --------------------------------------------------------------------------------------------
"""
def J_camera_pose(X_3d, K):
"""
[TESTED] with numeric, when transformation is Identity Mat, other transformation has problem.
Compute the Jacobin of Camera pose
:param X_3d: 3D Points Position, dim: (N, M, 3), N is the batch size, M is the number sampled points
:param fx: focal length on x dim (float32)
:param fy: focal length on y dim (float32)
:return: Jacobin Mat Tensor with Dim (N, M*2, 6) where the (M*2, 6) represent the Jacobin matrix and N is the batches
"""
N = X_3d.shape[0] # number of batches
M = X_3d.shape[1] # number of samples
fx, fy = K[:, 0:1, 0], K[:, 1:2, 1]
inv_z = 1 / X_3d[:, :, 2] # 1/Z
x_invz = X_3d[:, :, 0] * inv_z # X/Z
y_invz = X_3d[:, :, 1] * inv_z # Y/Z
xy_invz = x_invz * y_invz
J_00 = - fx * xy_invz # J[0, 0] = -fx * (X * Y)/Z^2, dim: (N, M)
J_01 = fx * (1.0 + x_invz ** 2) # J[0, 1] = fx + fx * X^2 / Z^2
J_02 = - fx * y_invz # J[0, 2] = - fx * Y / Z
J_10 = - fy * (1.0 + y_invz ** 2) # J[1, 0] = -fy - fy * Y^2/ Z^2
J_11 = fy * xy_invz # J[1, 1] = fy * (X * Y ) / Z^2
J_12 = fy * x_invz # J[1, 2] = fy * X / Z
J_03 = fx * inv_z # J[0, 3] = fx / Z
J_04 = torch.zeros(J_03.shape) # J[0, 4] = 0
J_05 = - fx * x_invz * inv_z # J[0, 5] = - fx * X / Z^2
J_13 = torch.zeros(J_03.shape) # J[1, 3] = 0
J_14 = fy * inv_z # J[1, 4] = fy / Z
J_15 = - fy * y_invz * inv_z # J[1, 5] = - fy * Y / Z^2
# Stack it together
J = torch.stack([J_00, J_01, J_02, J_03, J_04, J_05,
J_10, J_11, J_12, J_13, J_14, J_15], dim=2).view((N, M * 2, 6))
return J
""" Non-linear solver --------------------------------------------------------------------------------------------------
"""
def gauss_newton(f, Jac, x0, eps=1e-4, max_itr=20, verbose=False):
"""
Reference: https://blog.xiarui.net/2015/01/22/gauss-newton/
:param f: residual error computation, output out dim: (N, n_f_out)
:param Jac: jacobi matrix of input parameter, out dim: (N, n_f_out, n_f_in)
:param x0: initial guess of parameter, dim: (N, n_f_in)
:param eps: stop condition, when eps > norm(delta), where delta is the update vector
:param max_itr: maximum iteration
:param verbose: print the iteration information
:return: x: optimized parameter
:return: boolean: optimization converged
"""
N = x0.shape[0] # batch size
n_f_in = x0.shape[1] # input parameters
r = f(x0) # residual error r(x0), dim: (N, n_f_out)
n_f_out = r.shape[1]
# Iterative optimizer
x = x0
for itr in range(0, max_itr):
# Compute the Jacobi with respect to the residual error
J = Jac(x) # out dim: (N, n_f_out, n_f_in)
# Compute Update Vector: (J^tJ)^{-1} J^tR
Jt = J.transpose(1, 2) # batch transpose (H,W) to (W, H), dim: (N, n_f_in, n_f_out)
JtJ = torch.bmm(Jt, J) # dim: (N, n_f_in, n_f_in)
JtR = torch.bmm(Jt, r.view(N, n_f_out, 1)) # dim: (N, n_f_in, 1)
delta_x = torch.bmm(batched_mat_inv(JtJ), JtR).view(N, n_f_in) # dim: (N, n_f_in)
delta_x_norm = torch.sqrt(torch.sum(delta_x * delta_x, dim=1)).detach().cpu().numpy() # dim: (N, 1)
max_delta_x_norm = np.max(delta_x_norm)
if max_delta_x_norm < eps:
break
# Update parameter
x = x - delta_x
r = f(x)
if verbose:
print('[Gauss-Newton Optimizer ] itr=%d, update_norm:%f' % (itr, max_delta_x_norm))
return x, max_delta_x_norm < eps
def batched_gradient(features):
"""
Compute gradient of a batch of feature maps
:param features: a 3D tensor for a batch of feature maps, dim: (N, C, H, W)
:return: gradient maps of input features, dim: (N, 2*C, H, W), the last row and column are padded with zeros
(N, 0:C, H, W) = dI/dx, (N, C:2C, H, W) = dI/dy
"""
H = features.size(-2)
W = features.size(-1)
C = features.size(1)
N = features.size(0)
grad_x = (features[:, :, :, 2:] - features[:, :, :, :W - 2]) / 2.0
grad_x = f.pad(grad_x, (1, 1, 0, 0))
grad_y = (features[:, :, 2:, :] - features[:, :, :H - 2, :]) / 2.0
grad_y = f.pad(grad_y, (0, 0, 1, 1))
grad = torch.cat([grad_x.view(N, C, H, W), grad_y.view(N, C, H, W)], dim=1)
return grad
def batched_select_gradient_pixels(imgs, depths, I_b, K, R, t, grad_thres=0.1, depth_thres=1e-4, num_pyramid=3, num_gradient_pixels=2000, visualize=False):
"""
batch version of select gradient pixels, all operate in CPU
:param imgs: input mini-batch gray-scale images, torch.Tensor (N, 1, H, W)
:param depths: mini-batch depth maps, torch.Tensor (N, 1, H, W)
:param I_b: paired images, torch.Tensor(N, C, H, W)
:param K: camera intrinsic matrix tensor (N, 3, 3)
:param R: rotation matrix in dimension of (N, 3, 3)
:param t: translation vector (N, 3)
:param grad_thres: selecting the pixel if gradient norm > gradient threshold
:param depth_thres: selecting the pixel if depth > depth threshold
:param num_pyramid: number of feature map pyramids used in ba_tracknet
:param num_gradient_pixels: the number of pixels we want to select in one feature map
:param visualize: plot selected pixels
:return: selected indices, torch.Tensor (N, num_pyramid, num_gradient_pixels)
"""
N, C, H, W = imgs.shape
depths_np = depths.view(N, H, W).numpy() # (N, H, W)
grad = batched_gradient(imgs) # (N, 2, H, W)
grad_np = grad.numpy()
grad_np = np.transpose(grad_np, [0, 2, 3, 1]) # (N, H, W, 2)
grad_norm = np.linalg.norm(grad_np, axis=-1) # (N, H, W)
# Cache several variables:
x_a_2d = x_2d_coords_torch(N, H, W).cpu() # (N, H*W, 2)
X_a_3d = batched_pi_inv(K, x_a_2d.view(N, H * W, 2),
depths.view(N, H * W, 1))
X_b_3d = batched_transpose(R, t, X_a_3d)
x_b_2d, _ = batched_pi(K, X_b_3d)
x_b_2d = batched_x_2d_normalize(float(H), float(W), x_b_2d).view(N, H, W, 2) # (N, H, W, 2)
I_b_wrap = batched_interp2d(I_b, x_b_2d)
I_b_norm_wrap_np = torch.norm(I_b_wrap, p=2, dim=1).numpy() # (N, H, W)
sel_index = torch.empty((N, num_pyramid, num_gradient_pixels), device=torch.device('cpu')).long()
for i in range(N):
cur_H = H
cur_W = W
for j in range(num_pyramid):
pixel_count = 0
cur_grad_thres = grad_thres
while pixel_count < num_gradient_pixels:
cur_grad_norm = cv2.resize(grad_norm[i, :, :], dsize=(cur_W, cur_H))
cur_depths_np = skimage.measure.block_reduce(depths_np[i, :, :], (2**j, 2**j), np.min)
cur_I_b_norm_wrap_np = skimage.measure.block_reduce(I_b_norm_wrap_np[i, :, :], (2**j, 2**j), np.min)
cur_mask = np.logical_and(cur_grad_norm > cur_grad_thres, cur_depths_np > depth_thres) # (H, W)
cur_mask = np.logical_and(cur_mask, cur_I_b_norm_wrap_np > 1e-5)
cur_sel_index = np.asarray(np.where(cur_mask.reshape(cur_H * cur_W)), dtype=np.int)
cur_sel_index = cur_sel_index.ravel()
np.random.shuffle(cur_sel_index)
num_indices = cur_sel_index.shape[0]
start = pixel_count
last = pixel_count + num_indices if pixel_count + num_indices < num_gradient_pixels else num_gradient_pixels
sel_index[i, j, start:last] = torch.from_numpy(cur_sel_index[:last - start]).long()
pixel_count += num_indices
cur_grad_thres -= 1. / 255.
cur_H //= 2
cur_W //= 2
# Visualize
if visualize:
img_list = [{'img': I_b[0].numpy().transpose(1, 2, 0), 'title': 'I_b'},
{'img': I_b_wrap[0].numpy().transpose(1, 2, 0), 'title': 'I_b_wrap_to_a'},
{'img': I_b_norm_wrap_np[0], 'title': 'I_b_norm_wrap_to_a', 'cmap': 'gray'},
{'img': imgs[0, 0].numpy(), 'title': 'I_a', 'cmap': 'gray'},
{'img': depths_np[0], 'title': 'd_a', 'cmap': 'gray'}]
cur_H = H
cur_W = W
for i in range(num_pyramid):
selected_mask = np.zeros((cur_H * cur_W), dtype=np.float32)
selected_mask[sel_index[0, i, :].numpy()] = 1.0
img_list.append({'img': selected_mask.reshape(cur_H, cur_W), 'title': 'sel_index_'+str(i), 'cmap': 'gray'})
cur_H //= 2
cur_W //= 2
show_multiple_img(img_list, title='select pixels visualization', num_cols=4)
return sel_index
""" Camera Operations --------------------------------------------------------------------------------------------------
"""
def batched_pi(K, X):
"""
Projecting the X in camera coordinates to the image plane
:param K: camera intrinsic matrix tensor (N, 3, 3)
:param X: point position in 3D camera coordinates system, is a 3D array with dimension of (N, num_points, 3)
:return: N projected 2D pixel position u (N, num_points, 2) and the depth X (N, num_points, 1)
"""
fx, fy, cx, cy = K[:, 0:1, 0:1], K[:, 1:2, 1:2], K[:, 0:1, 2:3], K[:, 1:2, 2:3]
u_x = fx * X[:, :, 0:1] / X[:, :, 2:3] + cx
u_y = fy * X[:, :, 1:2] / X[:, :, 2:3] + cy
u = torch.cat([u_x, u_y], dim=-1)
return u, X[:, :, 2:3]
def batched_pi_inv(K, x, d):
"""
Projecting the pixel in 2D image plane and the depth to the 3D point in camera coordinate.
:param x: 2d pixel position, a 2D array with dimension of (N, num_points, 2)
:param d: depth at that pixel, a array with dimension of (N, num_points, 1)
:param K: camera intrinsic matrix tensor (N, 3, 3)
:return: 3D point in camera coordinate (N, num_points, 3)
"""
fx, fy, cx, cy = K[:, 0:1, 0:1], K[:, 1:2, 1:2], K[:, 0:1, 2:3], K[:, 1:2, 2:3]
X_x = d * (x[:, :, 0:1] - cx) / fx
X_y = d * (x[:, :, 1:2] - cy) / fy
X_z = d
X = torch.cat([X_x, X_y, X_z], dim=-1)
return X
def batched_inv_pose(R, t):
"""
Compute the inverse pose [Verified]
:param R: rotation matrix, dim (N, 3, 3)
:param t: translation vector, dim (N, 3)
:return: inverse pose of [R, t]
"""
N = R.size(0)
Rwc = torch.transpose(R, 1, 2)
tw = -torch.bmm(Rwc, t.view(N, 3, 1))
return Rwc, tw
def batched_transpose(R, t, X):
"""
Pytorch batch version of computing transform of the 3D points
:param R: rotation matrix in dimension of (N, 3, 3)
:param t: translation vector (N, 3)
:param X: points with 3D position, a 2D array with dimension of (N, num_points, 3)
:return: transformed 3D points
"""
assert R.shape[1] == 3
assert R.shape[2] == 3
assert t.shape[1] == 3
N = R.shape[0]
M = X.shape[1]
X_after_R = torch.bmm(R, torch.transpose(X, 1, 2))
X_after_R = torch.transpose(X_after_R, 1, 2)
trans_X = X_after_R + t.view(N, 1, 3).expand(N, M, 3)
return trans_X
def batched_relative_pose(R_A, t_A, R_B, t_B):
"""
Pytorch batch version of computing the relative pose from
:param R_A: frame A rotation matrix
:param t_A: frame A translation vector
:param R_B: frame B rotation matrix
:param t_B: frame B translation vector
:return: Nx3x3 rotation matrix, Nx3x1 translation vector that build a Nx3x4 matrix of T = [R,t]
Alternative way:
R_{AB} = R_{B} * R_{A}^{T}
t_{AB} = R_{B} * (C_{A} - C_{B}), where the C is the center of camera.
>>> C_A = camera_center_from_Tcw(R_A, t_A)
>>> C_B = camera_center_from_Tcw(R_B, t_B)
>>> R_AB = np.dot(R_B, R_A.transpose())
>>> t_AB = np.dot(R_B, C_A - C_B)
"""
N = R_A.shape[0]
A_Tcw = transform_mat44(R_A, t_A)
A_Twc = batched_mat_inv(A_Tcw)
B_Tcw = transform_mat44(R_B, t_B)
# Transformation from A to B
T_AB = torch.bmm(B_Tcw, A_Twc)
return T_AB[:, :3, :]
def batched_relative_pose_mat44(R_A, t_A, R_B, t_B):
"""
Pytorch batch version of computing the relative pose from
:param R_A: frame A rotation matrix
:param t_A: frame A translation vector
:param R_B: frame B rotation matrix
:param t_B: frame B translation vector
:return: Nx3x3 rotation matrix, Nx3x1 translation vector that build a Nx4x4 matrix
Alternative way:
R_{AB} = R_{B} * R_{A}^{T}
t_{AB} = R_{B} * (C_{A} - C_{B}), where the C is the center of camera.
>>> C_A = camera_center_from_Tcw(R_A, t_A)
>>> C_B = camera_center_from_Tcw(R_B, t_B)
>>> R_AB = np.dot(R_B, R_A.transpose())
>>> t_AB = np.dot(R_B, C_A - C_B)
"""
N = R_A.shape[0]
A_Tcw = transform_mat44(R_A, t_A)
A_Twc = batched_mat_inv(A_Tcw)
B_Tcw = transform_mat44(R_B, t_B)
# Transformation from A to B
T_AB = torch.bmm(B_Tcw, A_Twc)
return T_AB
def dense_corres_a2b(d_a, K, Rab, tab, x_2d=None):
"""
Dense correspondence from frame a to b [Verified]
:param d_a: dim (N, H, W)
:param K: dim (N, 3, 3)
:param R: dim (N, 3, 3)
:param t: dim (N, 3)
:param x_2d: dim (N, H, W, 2)
:return: wrapped image, dim (N, C, H, W)
"""
N, H, W = d_a.shape
x_a_2d = x_2d_coords_torch(N, H, W).view(N, H * W, 2) if x_2d is None else x_2d.view(N, H * W, 2)
X_a_3d = batched_pi_inv(K, x_a_2d, d_a.view((N, H * W, 1)))
X_b_3d = batched_transpose(Rab, tab, X_a_3d)
x_b_2d, _ = batched_pi(K, X_b_3d)
return x_b_2d
def wrap_b2a(I_b, d_a, K, Rab, tab, x_2d=None):
"""
Wrap image by providing depth, rotation and translation [Verified]
:param I_b: dim (N, C, H, W)
:param d_a: dim (N, H, W)
:param K: dim (N, 3, 3)
:param Rab: dim (N, 3, 3)
:param tab: dim (N, 3)
:param x_2d: dim (N, H, W, 2)
:return: wrapped image, dim (N, C, H, W)
"""
N, C, H, W = I_b.shape
x_a2b = dense_corres_a2b(d_a, K, Rab, tab, x_2d)
x_a2b = batched_x_2d_normalize(H, W, x_a2b).view(N, H, W, 2) # (N, H, W, 2)
wrap_img_b = batched_interp2d(I_b, x_a2b)
return wrap_img_b
""" Rotation Representation --------------------------------------------------------------------------------------------
"""
def batched_rot2quaternion(R):
N = R.shape[0]
diag = 1.0 + R[:, 0, 0] + R[:, 1, 1] + R[:, 2, 2]
q0 = torch.sqrt(diag) / 2.0
q1 = (R[:, 2, 1] - R[:, 1, 2]) / (4.0 * q0)
q2 = (R[:, 0, 2] - R[:, 2, 0]) / (4.0 * q0)
q3 = (R[:, 1, 0] - R[:, 0, 1]) / (4.0 * q0)
q = torch.stack([q0, q1, q2, q3], dim=1)
q_norm = torch.sqrt(torch.sum(q*q, dim=1))
return q / q_norm.view(N, 1)
def batched_quaternion2rot(q):
"""
[TESTED]
:param q: normalized quaternion vector, dim: (N, 4)
:return: rotation matrix, dim: (N, 3, 3)
"""
N = q.shape[0]
qw = q[:, 0]
qx = q[:, 1]
qy = q[:, 2]
qz = q[:, 3]
return torch.stack([1 - 2 * qy * qy - 2 * qz * qz, 2 * qx * qy - 2 * qz * qw, 2 * qx * qz + 2 * qy * qw,
2 * qx * qy + 2 * qz * qw, 1 - 2 * qx * qx - 2 * qz * qz, 2 * qy * qz - 2 * qx * qw,
2 * qx * qz - 2 * qy * qw, 2 * qy * qz + 2 * qx * qw, 1 - 2 * qx * qx - 2 * qy * qy
], dim=1).view(N, 3, 3)
def log_quaternion(q):
u = q[:, 0:1] # (N, 1)
v = q[:, 1:] # (N, 3)
u = torch.clamp(u, min=-1.0, max=1.0)
norm = torch.norm(v, 2, dim=1, keepdim=True)
# norm = torch.clamp(norm, min=1e-8)
return torch.acos(u) * v / norm
def exp_quaternion(log_q):
norm = torch.norm(log_q, 2, dim=1, keepdim=True)
# norm = torch.clamp(norm, min=1e-8)
u = torch.cos(norm)
v = log_q * torch.sin(norm) / norm
return torch.cat([u, v], dim=1)
def quaternion_dist(q1, q2):
return 1 - torch.sum(q1 * q2, dim=-1) ** 2
def batched_rot2angle(R):
m00 = R[:, 0, 0]
m01 = R[:, 0, 1]
m02 = R[:, 0, 2]
m10 = R[:, 1, 0]
m11 = R[:, 1, 1]
m12 = R[:, 1, 2]
m20 = R[:, 2, 0]
m21 = R[:, 2, 1]
m22 = R[:, 2, 2]
angle = torch.acos((m00 + m11 + m22 - 1)/2)
factor = torch.sqrt((m21 - m12)**2 + (m02-m20)**2 + (m10 - m01)**2) + 1e-4
x = (m21 - m12) / factor
y = (m02 - m20) / factor
z = (m10 - m01) / factor
axis = torch.stack([x, y, z], dim=1)
return angle, axis
""" Direct Method Core -------------------------------------------------------------------------------------------------
"""
def dm_gauss_newton_itr(alpha, X_a_3d, X_a_3d_sel, I_a, sel_a_idx, K, I_b, I_b_grad):
"""
Special case of Direct Method at 1 iteration of gauss-newton optimization update
:param alpha: se(3) vec: (rotation, translation), dim: (N, 6)
:param X_a_3d: Dense 3D Point in frame A, dim: (N, H*W, 3)
:param X_a_3d_sel: Selected semi-dense points in frame A, dim: (N, M, 3)
:param I_a: image or feature map of frame A, dim: (N, C, H, W)
:param sel_a_idx: selected point indices, dim: (N, M)
:param K: Intrinsic matrix, dim: (N, 3, 3)
:param I_b: image or feature map of frame B, dim: (N, C, H, W)
:param I_b_grad: gradient of image or feature map of frame B, dim: (N, 2*C, H, W),
(N, 0:C, H, W) = dI/dx, (N, C:2C, H, W) = dI/dy
:return: alpha: updated se(3) vector, dim: (N, 6)
:return: e: residual error on selected point, dim: (N, M, C)
:return: delta_norm: l2 norm of gauss-newton update vector, use for determining termination of loop
"""
N, C, H, W = I_a.shape
M = sel_a_idx.shape[1]
R, t = se3_exp(alpha)
X_b_3d = batched_transpose(R, t, X_a_3d)
x_b_2d, _ = batched_pi(K, X_b_3d)
x_b_2d = batched_x_2d_normalize(H, W, x_b_2d).view(N, H, W, 2) # (N, H, W, 2)
# Wrap the image
I_b_wrap = batched_interp2d(I_b, x_b_2d)
# Residual error
e = (I_a - I_b_wrap).view(N, C, H * W) # (N, C, H, W)
e = batched_index_select(e, 2, sel_a_idx) # (N, C, M)
# Compute Jacobin Mat.
# Jacobi of Camera Pose: delta_u / delta_alpha
du_d_alpha = J_camera_pose(X_a_3d_sel, K).view(N * M, 2, 6) # (N*M, 2, 6)
# Jacobi of Image gradient: delta_I_b / delta_u
dI_du = batched_interp2d(I_b_grad, x_b_2d) # (N, 2*C, H, W)
dI_du = batched_index_select(dI_du.view(N, 2 * C, H * W), 2, sel_a_idx) # (N, 2*C, M)
dI_du = torch.transpose(dI_du, 1, 2).contiguous().view(N * M, 2, C) # (N*M, 2, C)
dI_du = torch.transpose(dI_du, 1, 2) # (N*M, C, 2)
# J = -dI_b/du * du/d_alpha
J = -torch.bmm(dI_du, du_d_alpha).view(N, C * M, 6)
# Compute the update parameters
e = e.transpose(1, 2).contiguous().view(N, M * C) # (N, M, C)
delta, delta_norm = gauss_newtown_update(J, e) # (N, 6), (N, 1)
# Update the delta
alpha = alpha + delta
return alpha, e, delta_norm
def dm_levenberg_marquardt_itr(pre_T, X_a_3d, X_a_3d_sel, I_a, sel_a_idx, K, I_b, I_b_grad, lambda_func, level):
"""
Special case of Direct Method at 1 iteration of Levenberg-Marquardt optimization update
:param X_a_3d: Dense 3D Point in frame A, dim: (N, H*W, 3)
:param X_a_3d_sel: Selected semi-dense points in frame A, dim: (N, M, 3)
:param I_a: image or feature map of frame A, dim: (N, C, H, W)
:param sel_a_idx: selected point indices, dim: (N, M)
:param K: Intrinsic matrix, dim: (N, 3, 3)
:param I_b: image or feature map of frame B, dim: (N, C, H, W)
:param I_b_grad: gradient of image or feature map of frame B, dim: (N, 2*C, H, W),
(N, 0:C, H, W) = dI/dx, (N, C:2C, H, W) = dI/dy
:param lambda_func: function generating \lambda vector used in Levenberg-Marquardt optimization, output dim: (N, 6)
:param level: pyramid level used in this iteration, int
:return: alpha: updated se(3) vector, dim: (N, 6)
:return: e: residual error on selected point, dim: (N, M, C)
:return: delta_norm: l2 norm of gauss-newton update vector, use for determining termination of loop
"""
N, C, H, W = I_a.shape
M = sel_a_idx.shape[1]
# R, t = se3_exp(alpha)
# print(R, t)
pre_R = pre_T[:, :3, :3]
pre_t = pre_T[:, :3, 3].view(N, 3, 1)
X_b_3d = batched_transpose(pre_R, pre_t, X_a_3d)
x_b_2d, _ = batched_pi(K, X_b_3d)
x_b_2d = batched_x_2d_normalize(H, W, x_b_2d).view(N, H, W, 2) # (N, H, W, 2)
# Wrap the image
I_b_wrap = batched_interp2d(I_b, x_b_2d)
# Residual error
e = (I_a - I_b_wrap).view(N, C, H * W) # (N, C, H, W)
e = batched_index_select(e, 2, sel_a_idx) # (N, C, M)
# Compute Jacobin Mat.
# Jacobi of Camera Pose: delta_u / delta_alpha
du_d_alpha = J_camera_pose(X_a_3d_sel, K).view(N * M, 2, 6) # (N*M, 2, 6)
# Jacobi of Image gradient: delta_I_b / delta_u
dI_du = batched_interp2d(I_b_grad, x_b_2d) # (N, 2*C, H, W)
dI_du = batched_index_select(dI_du.view(N, 2 * C, H * W), 2, sel_a_idx) # (N, 2*C, M)
dI_du = torch.transpose(dI_du, 1, 2).contiguous().view(N * M, 2, C) # (N*M, 2, C)
dI_du = torch.transpose(dI_du, 1, 2) # (N*M, C, 2)
# J = -dI_b/du * du/d_alpha
J = -torch.bmm(dI_du, du_d_alpha).view(N, C * M, 6)
# Compute the update parameters
lambda_weight = lambda_func(e, level) # (N, 1)
# Transpose the residual error to (N, M, ....)
e = e.transpose(1, 2).contiguous().view(N, M * C) # (N, M, C)
delta, delta_norm = levenberg_marquardt_update(J, e, lambda_weight) # (N, 6), (N, 1)
# Update the delta
delta_R, delta_t = se3_exp(delta)
# Update parameter
new_R = torch.bmm(delta_R, pre_R)
new_t = delta_t + torch.bmm(delta_R, pre_t.view(N, 3, 1))
new_T = transform_mat44(new_R, new_t)
return new_T, e, delta_norm, lambda_weight, x_b_2d
def gen_random_unit_vector():
sum = 2.0
while sum >= 1.0:
x = np.random.uniform(-1, 1)
y = np.random.uniform(-1, 1)
sum = x ** 2 + y ** 2
sq = math.sqrt(1.0 - sum)
return np.array([2.0 * x * sq, 2.0 * y * sq, 1.0 - 2.0 * sum], dtype=np.float32)
#
# def gen_random_alpha(alpha_gt, rot_angle_rfactor, trans_vec_rfactor):
# N = alpha_gt.shape[0]
# R, t = se3_exp(alpha_gt)
# R_set = R.detach().cpu().numpy()
# t_set = t.detach().cpu().numpy()
# new_alpha = torch.zeros(N, 6)
# for batch_idx in range(N):
# # R = R_set[batch_idx]
# R = np.eye(4, dtype=np.float32)
# R[:3, :3] = R_set[batch_idx]
# t = t_set[batch_idx]
#
# # Add rot random noise
# noise_axis = gen_random_unit_vector()
# noise_angle = np.random.normal(-rot_angle_rfactor, rot_angle_rfactor)
# # print('noise angle:', noise_angle)
# delta_R = trans.rotation_matrix(np.deg2rad(noise_angle), noise_axis)
# new_R = np.dot(delta_R, R)
# old_angle, oldaxis, _ = trans.rotation_from_matrix(R)
# new_angle, newaxis, _ = trans.rotation_from_matrix(new_R)
# T = np.eye(4, dtype=np.float32)
# T[:3, :3] = new_R[:3, :3]
#
# # Add trans random noise
# new_t = t + np.random.normal(0, trans_vec_rfactor*np.linalg.norm(t), size=(3,1))
# T[:3, 3] = new_t.ravel()
# T_ = SE3(T.astype(np.float64))
# alpha_ = T_.log().ravel()
# new_alpha[batch_idx, :3] = torch.Tensor(alpha_[3:])
# new_alpha[batch_idx, 3:] = torch.Tensor(alpha_[:3])
#
# return new_alpha#.cuda()
| sfu-gruvi-3dv/sanet_relocal_demo | banet_track/ba_module.py | ba_module.py | py | 30,566 | python | en | code | 51 | github-code | 13 |
39610118330 | from flask import Flask, render_template
from selenium import webdriver
from bs4 import BeautifulSoup
from selenium.webdriver.chrome.options import Options
import time
import pandas as pd
app = Flask(__name__)
@app.route('/')
def get_dictionary():
origin = "KHI"
destination = "SYD"
startdate = '2023-05-01'
url = "https://www.kayak.com/flights/" + origin + "-" + destination + "/" + startdate + "?sort=bestflight_a&"
# options = Options()
# options.add_argument('--headless')
# driver = webdriver.Chrome(options=options)
driver = webdriver.Chrome()
driver.implicitly_wait(15)
driver.get(url)
time.sleep(5)
soup=BeautifulSoup(driver.page_source, 'lxml')
if soup.find_all('p')[0].getText() == "Please confirm that you are a real KAYAK user.":
print("Kayak thinks I'm a bot, which I am ... so let's wait a bit and try again")
driver.close()
time.sleep(20)
time.sleep(5)
soup=BeautifulSoup(driver.page_source, 'lxml')
prices = soup.find_all('div', attrs={'class': 'f8F1-price-text'})
# time_slot = soup.find_all('div', attrs={'class':'VY2U'})
price_list = []
# dpt_list = []
# av_time_list = []
for div in prices:
price = div.getText()
price_list.append(price)
# for s in time_slot:
# span_ele = s.find_all('span')
# dept_time = span_ele[0].text
# dpt_list.append(dept_time)
# arrival_time = span_ele[2].text
# av_time_list.append(arrival_time)
# df = pd.DataFrame({"origin" : origin , "destination" : destination ,
# "startdate" : startdate,
# "price" : price_list,
# "deptime" : dpt_list,
# "arrtime" : av_time_list })
df = pd.DataFrame({"origin" : origin , "destination" : destination ,
"startdate" : startdate,
"price" : price_list })
airline_dic = df.to_dict()
return render_template('results.html', data = airline_dic)
if __name__ == '__main__':
app.run() | Samreenhabib/WebScraping | ticket_price_tracker/flaskapi.py | flaskapi.py | py | 2,116 | python | en | code | 0 | github-code | 13 |
74564433618 | #!/usr/bin/env python
"""
_GetBulkRunLumi_
MySQL implementation of GetBulkRunLumi
"""
from WMCore.Database.DBFormatter import DBFormatter
class GetBulkRunLumi(DBFormatter):
"""
Note that this is ID based. I may have to change it back
to lfn based.
"""
sql = """SELECT flr.run AS run, flr.lumi AS lumi, flr.fileid AS id
FROM wmbs_file_runlumi_map flr
WHERE flr.fileid = :id
"""
def getBinds(self, files=None):
binds = []
files = self.dbi.makelist(files)
for f in files:
binds.append({'id': f['id']})
return binds
def format(self, result):
"Return a list of Run/Lumi Set"
finalResult = {}
res = self.formatDict(result)
for entry in res:
fileid = entry['id']
run = entry['run']
finalResult.setdefault(fileid, {})
finalResult[fileid].setdefault(run, [])
finalResult[fileid][run].append(entry['lumi'])
return finalResult
def execute(self, files=None, conn=None, transaction=False):
binds = self.getBinds(files)
result = self.dbi.processData(self.sql, binds,
conn=conn, transaction=transaction)
return self.format(result)
| dmwm/WMCore | src/python/WMCore/WMBS/MySQL/Files/GetBulkRunLumi.py | GetBulkRunLumi.py | py | 1,301 | python | en | code | 44 | github-code | 13 |
72723052179 | from django.urls import path
from . import views
urlpatterns = [
path("", views.index),
path("skyrim/", views.skyrim),
path("doom/", views.doom),
path("fallout/", views.fallout),
path("prey/", views.prey),
path("quake/", views.quake),
]
| KonstantinLjapin/samples_and_tests | Skillbox/dpo_python_django/02_IntroductionToDjango/mysite/thrift_shop/urls.py | urls.py | py | 263 | python | en | code | 0 | github-code | 13 |
70139759378 | """This small module downloads and adjusts the OpenAPI spec of a given Argo Workflows version."""
import json
import logging
import sys
from typing import Dict, List, Set
import requests
logger: logging.Logger = logging.getLogger(__name__)
# get the OpenAPI spec URI from the command line, along with the output file
open_api_spec_url = sys.argv[1]
assert open_api_spec_url is not None, "Expected the OpenAPI spec URL to be passed as the first argument"
output_file = sys.argv[2]
assert output_file is not None, "Expected the output file to be passed as the second argument"
# download the spec
response = requests.get(open_api_spec_url)
# get the spec into a dictionary
spec = response.json()
# these are specifications of objects with fields that are marked as required. However, it is possible for the Argo
# Server to not return anything for those fields. In those cases, Pydantic fails type validation for those objects.
# Here, we maintain a map of objects specifications whose fields must be marked as optional i.e. removed from the
# `required` list in the OpenAPI specification.
DEFINITION_TO_OPTIONAL_FIELDS: Dict[str, List[str]] = {
"io.argoproj.workflow.v1alpha1.CronWorkflowStatus": ["active", "lastScheduledTime", "conditions"],
"io.argoproj.workflow.v1alpha1.CronWorkflowList": ["items"],
"io.argoproj.workflow.v1alpha1.ClusterWorkflowTemplateList": ["items"],
"io.argoproj.workflow.v1alpha1.WorkflowList": ["items"],
"io.argoproj.workflow.v1alpha1.WorkflowTemplateList": ["items"],
"io.argoproj.workflow.v1alpha1.WorkflowEventBindingList": ["items"],
"io.argoproj.workflow.v1alpha1.Metrics": ["prometheus"],
}
for definition, optional_fields in DEFINITION_TO_OPTIONAL_FIELDS.items():
try:
curr_required: Set[str] = set(spec["definitions"][definition]["required"])
except KeyError as e:
raise KeyError(
f"Could not find definition {definition} in Argo specification for OpenAPI URI {open_api_spec_url}, "
f"caught error: {e}"
)
for optional_field in optional_fields:
if optional_field in curr_required:
curr_required.remove(optional_field)
else:
logger.warning(
f"Expected to find and change field {optional_fields} of {definition} from required to optional, "
f"but it was not found"
)
spec["definitions"][definition]["required"] = list(curr_required)
# finally, we write the spec to the output file that is passed to use assuming the client wants to perform
# something with this file
with open(output_file, "w+") as f:
json.dump(spec, f, indent=2)
| argoproj-labs/hera | scripts/spec.py | spec.py | py | 2,655 | python | en | code | 375 | github-code | 13 |
654762176 | import unittest
import numpy as np
from core.semantic.polyconvex import Manager, Query
from core.semantic.sequential import Sequential
import time
class TestPartitionManager(unittest.TestCase):
def test_manager(self): # Test if Manager can be initiated
vector_space = [np.random.rand(512, 1) for _ in range(0, 100)]
self.assertTrue(Manager(vector_space))
def test_random_tests(self): # Test if single tree indexing works
vector_space = np.asarray([np.random.rand(512, 1) for _ in range(0, 50000)]) # memory error at 1 million
manager = Manager(vector_space)
prepared_tree = manager.index_space()
# print(prepared_tree.show_depth(), prepared_tree.left_child_percentage(), prepared_tree.right_child_percentage(),
# prepared_tree.leaf_node_percentage())
self.assertFalse(any([(i.cell_count() * i.ratio) > i.cell_count() < manager.capacity
for i in prepared_tree.list_leaves()])) # No leaf node shall exceed capacity
def test_random_forest(self):
vector_space = np.asarray([np.random.rand(512, 1) for _ in range(0, 5000)])
manager = Manager(vector_space)
tm = time.time()
manager.create_forest()
print("\nIndexing partition forest took {0}s ({1}, {2}, {3}, {4}, {5})\n".format(round(time.time() - tm, 2),
vector_space.ravel().size,
manager.tree_count,
manager.split_ratio,
manager.capacity,
manager.indices))
self.assertTrue(len(manager.random_forest), manager.tree_count) # Forest must be the size of tree_count
def test_query(self):
"""
Test if Polyhedral query results contains an optimal point given by Sequential query (rarely fails)
"""
vector_space = np.asarray([np.random.rand(512, 1) for _ in range(0, 20000)])
query_vector = np.random.rand(512, 1)
manager = Manager(vector_space)
manager.create_forest()
polyhedral_query = Query()
polyhedral_query.import_forest(manager.random_forest)
t = time.time()
sequential_query = Sequential(vector_space)
sequential_results = sequential_query.query(query_vector)[0]
print("Sequential query took {0}".format(time.time() - t))
t = time.time()
polyhedral_results = polyhedral_query.search(query_vector)
print("Polyhedral query took {0}".format(time.time() - t))
polyhedral_results = list(polyhedral_results)
self.assertTrue(any([i for i in polyhedral_results
if round(i[0], 2) == round(sequential_results[0][0], 2)])) # rounded for precision
if __name__ == '__main__':
unittest.main()
| ShellRox/Lucifitas | core/semantic/tests/polyconvex.py | polyconvex.py | py | 3,127 | python | en | code | 1 | github-code | 13 |
40351181042 | #
# Sophia Wang
# December 10, 2019
# keypoints_parse_12-10-19b.py
#
import sys, os
import json
import math
body_angle_key = {0: (1, 0, 15), # -------
1: (1, 0, 16), # / \
2: (0, 1, 2), # | o o |
3: (1, 2, 3), # \ O /
4: (2, 3, 4), # -------
5: (0, 1, 5), # | |
6: (1, 5, 6), #
7: (5, 6, 7), #
8: (0, 1, 8), #
9: (2, 1, 8),
10: (5, 1, 8),
11: (1, 8, 9),
12: (8, 9, 10),
13: (9, 10, 11),
14: (10, 11, 24),
15: (10, 11, 22),
16: (1, 8, 12),
17: (8, 12, 13),
18: (12, 13, 14),
19: (13, 14, 21),
20: (13, 14, 19),
21: (5, 1, 2), # NEW ANGLES
22: (7, 8, 4),
23: (14, 8, 11),
24: (1, 8, 7),
25: (1, 5, 4)}
hand_angle_key = {0: (0, 1, 2), # \ | | | | /
1: (1, 2, 3), # \||||
2: (2, 3, 4),
3: (0, 5, 6),
4: (5, 6, 7), # | | | |
5: (6, 7, 8), # | | | |
6: (0, 9, 10), # | | | |
7: (9, 10, 11), # | | |
8: (10, 11, 12), # | |||||||
9: (0, 13, 14), # | |||||||
10: (13, 14, 15),# ||||||||||
11: (14, 15, 16),
12: (0, 17, 18),
13: (17, 18, 19),
14: (18, 19, 20)}
hand_angle_key_cont = {21: (0, 1, 2),
22: (1, 2, 3),
23: (2, 3, 4),
24: (0, 5, 6),
25: (5, 6, 7),
26: (6, 7, 8),
27: (0, 9, 10),
28: (9, 10, 11),
29: (10, 11, 12),
30: (0, 13, 14),
31: (13, 14, 15),
32: (14, 15, 16),
33: (0, 17, 18),
34: (17, 18, 19),
35: (18, 19, 20)}
points_key = { '0': "Nose",
'1': "Neck",
'2': "RShoulder",
'3': "RElbow",
'4': "RWrist",
'5': "LShoulder",
'6': "LElbow",
'7': "LWrist",
'8': "MidHip",
'9': "RHip",
'10': "RKnee",
'11': "RAnkle",
'12': "LHip",
'13': "LKnee",
'14': "LAnkle",
'15': "REye",
'16': "LEye",
'17': "REar",
'18': "LEar",
'19': "LBigToe",
'20': "LSmallToe",
'21': "LHeel",
'22': "RBigToe",
'23': "RSmallToe",
'24': "RHeel"}
body_points = { "Nose" : [],
"Neck" : [],
"RShoulder" : [],
"RElbow" : [],
"RWrist" : [],
"LShoulder" : [],
"LElbow" : [],
"LWrist" : [],
"MidHip" : [],
"RHip" : [],
"RKnee" : [],
"RAnkle" : [],
"LHip" : [],
"LKnee" : [],
"LAnkle" : [],
"REye" : [],
"LEye" : [],
"REar" : [],
"LEar" : [],
"LBigToe" : [],
"LSmallToe" : [],
"LHeel" : [],
"RBigToe" : [],
"RSmallToe" : [],
"RHeel" : []}
frame_angle = {} # int frame number : [angles]
def angle_calc(x1, y1, x2, y2, x3, y3): # x2, y2 is center point
jx12 = (x1 -x2)
jy12 = (y1 -y2)
jx32 = (x3 -x2)
jy32 = (y3 -y2)
r12 = math.sqrt(jx12 *jx12 + jy12 *jy12)
r32 = math.sqrt(jx32 *jx32 + jy32 *jy32)
if (r12 * r32) == 0: #in case the keypoints were just 0
return 0
theta = math.acos( (jx12 * jx32 + jy12 * jy32) / (r12 *r32) )
return math.degrees(theta)
def body_frames(): # NOT TESTED - to go through all the files and collect and write the data
#you'll have to change the path names
# i made a folder called output_angle_calc to write the outputs in - that's commpath0
commpath0 = r'C:\Users\1707612\PycharmProjects\SeniorResearch\research-sophia_neha-master\research-sophia_neha-master\output' + r'\output_angle_calc'
commpath = r'C:\Users\1707612\PycharmProjects\SeniorResearch\research-sophia_neha-master\research-sophia_neha-master\output\output_motion_test'
for d1 in os.listdir(commpath):
savepath = commpath0 + '\\' + d1 + '.txt' # making filename to write it to
f = open(savepath, 'x') # making a file for every vid test case to write the frame angles to - rn should be just a bunch of lists
for filename in os.listdir(commpath + '\\' + d1):
if filename.endswith(".json"):
# print(os.path.join(directory, filename))
output = frame_parse(commpath + '\\' + d1 + '\\' + filename)
f.write(output)
else:
continue
return
def frame_parse(frame_num): # takes an int, corresponding to the number on the file name, {0:012d} formats num with 0's in front
json_frame = open("../output/video_output/VID_TEST_CASE_1_keypoints/VID_TEST_CASE_1_{0:012d}_keypoints.json".format(frame_num), 'r')
data = json_frame.read()
frame = json.loads( data )
frame_angles = [] # the list to store all angles for the frame - should probs convert to dict
people = frame["people"] # pose points, l/r hand points, face points (2d and 3d)
for keys in people: # keyval: people -> hand,face keypoints etc. part_candidates->candidates for
print(keys)
for key in keys:
if(key == "person_id"):
continue
print(key)
if key == 'pose_keypoints_2d': #to do only the first pose keypoints set. idk which one to do
keypoints = keys[key]
grouped = [(keypoints[i], keypoints[ i +1]) for i in range(0 ,len(keypoints) ,3)]
'''for g in grouped:
print(g)'''
for angval in body_angle_key: #to calculate all angles in frame
v = body_angle_key[angval]
ang = angle_calc(grouped[v[0]][0], grouped[v[0]][1], \
grouped[v[1]][0], grouped[v[1]][1], \
grouped[v[2]][0], grouped[v[2]][0])
frame_angles.append(ang)
return frame_angles
print(frame_parse(234));
'''print(angle_calc(1 ,0 ,0 ,0 ,0 ,1))
i = 234
with open("../output/video_output/VID_TEST_CASE_1_keypoints/VID_TEST_CASE_1_{0:012d}_keypoints.json".format(i), 'r') as tfile:
data = tfile.read()
frame0 = json.loads( data )
print(frame0)
for key_name in frame0: # key_name = "version", "people", "part_candidates"
print()
print(key_name)
if key_name == "people":
keyval = frame0[key_name ]# pose points, l/r hand points, face points (2d and 3d)
for i in keyval: # keyval: people -> hand, face keypoints etc. part_candidates -> candidates for the
for j in i: # body part before assembling, don't worry about it
if j in points_key:
print(j ,points_key[j], i[j])
else:
print(j, i[j])
frame_parse(234)
'''
'''for key_name in frame0: #key_name = "version", "people", "part_candidates"
print(key_name)
if k == "version":
continue
key_val = frame0[k] # people: person id,
print(key_val)
for i in key_val: # keyval: people -> hand, face keypoints etc. part_candidates -> candidates for the body part before
print(i) # assembling, don't worry about it
'''
| tjresearch/research-sophia_neha | anaylsis/archive/keypoints_parse_12-11-19.py | keypoints_parse_12-11-19.py | py | 8,455 | python | en | code | 0 | github-code | 13 |
15791542810 | def divisibleSumPairs(n, k, ar):
# Write your code here
a=ar
b=a
count=0
j=1
for i in range(len(a)):
for j in range(len(b)):
if( i<j):
d= a[i]+a[j]
if(d%k==0):
count+=1
return count
if __name__ == '__main__':
fptr = open(os.environ['OUTPUT_PATH'], 'w')
first_multiple_input = input().rstrip().split()
n = int(first_multiple_input[0])
k = int(first_multiple_input[1])
ar = list(map(int, input().rstrip().split()))
result = divisibleSumPairs(n, k, ar)
fptr.write(str(result) + '\n')
fptr.close()
| Joshwa034/testrepo | divbyk.py | divbyk.py | py | 636 | python | en | code | 0 | github-code | 13 |
16948092367 | import os
import openpyxl
import datetime
#Excelファイルパス指定
book = openpyxl.load_workbook('dates.xlsx')
sheet = book.active
#日時取得
dt = datetime.datetime.now()
#配列
dta = [dt.year, dt.month, dt.day, dt.hour, dt.minute, dt.second]
#print(len(dta))
print(dta)
i = 1
while True:
if sheet.cell(row=i,column=1).value is None:
break
i+=1
rows = i
clm = 1
for thing in dta:
sheet.cell(row=rows,column=clm).value = thing
clm+=1
book.save('dates.xlsx')
| RRRCCCIII/systemtest | timecard.py | timecard.py | py | 500 | python | en | code | 0 | github-code | 13 |
73374141137 | import urllib.request
import time
def get_price():
page = urllib.request.urlopen("http://www.beans-r-us.biz/prices.html")
text = page.read().decode("utf8")
where = text.find('>$')
start_of_price = where + 2
end_of_price = start_of_price + 4
price = float(text[start_of_price:end_of_price])
price_now = input("Do you want to see the price now (y/n)? ")
if price_now == "y":
print(get_price())
else:
price = 99.99
while price > 4.74:
time.sleep(.900)
price = get_price()
print("Buy!")
| amgauna/Python-2021 | price/price-beans2.py | price-beans2.py | py | 536 | python | en | code | 3 | github-code | 13 |
70136640018 | from peewee import *
from datetime import datetime
import csv
class BaseModel(Model):
class Meta:
database = None
class Device(BaseModel):
SMS = 1
VOICE = 2
CALL_FORWARD = 2
CALL_TYPES = (
(SMS, "SMS"),
(VOICE, "VOICE"),
(CALL_FORWARD, "CALL_FORWARD")
)
Anum = CharField(max_length=13 ,verbose_name='phone number starts with 98')
Bnum = CharField(max_length=13 ,verbose_name='phone number starts with 98')
Cnum = CharField(max_length=13 ,verbose_name='phone number starts with 98')
duration = DecimalField(max_digits=4, decimal_places=2,
auto_round=True, verbose_name="round up second base number")
location = CharField(max_length=10,
verbose_name="hex(lac)-hex(cell) sample CD8E-5F98 5 digit max each",
null=True)
call_type = SmallIntegerField(choices=CALL_TYPES, default=SMS)
device_name = CharField(max_length=255, verbose_name="name of the device")
created = DateTimeField(default=datetime.now, formats=['%Y-%m-%d %H:%M:%S'])
@property
def formatted_time(self):
return self.created.strftime('%Y-%m-%d %H:%M:%S')
class Meta:
table_name = "device"
class DeviceManager:
def create(self, Anum="41", Bnum="96", Cnum="3",
duration=7.10, location="K046E207",
device_name="device number three"):
new_device = Device(Anum=Anum, Bnum=Bnum, Cnum=Cnum,
duration=duration, location=location,
device_name=device_name)
new_device.save()
print(f"The New Device has made with Name '{device_name}'")
def update(self, device_id=1, device_name="updated one"):
try:
device = Device.get(Device.id == device_id)
except:
print(f"There is no Device with ID {device_id} to UPDATE!")
return
device.device_name = device_name
device.save()
print(f"The Devies With ID {device_id} UPDATED Successfuly.")
def select(self):
devices = Device.select()
if devices:
for device in devices:
print("id:", device.id, "\nAnum:", device.Anum,"\nBnum:", device.Bnum,
"\nduration:", device.duration, "\nlocation:", device.location,
"\ncall_type:", device.call_type, "\ndevice_name:",
device.device_name, "\ncreated:", device.formatted_time,"\n")
else:
print("There is no Data in The Table!")
def delete(self, device_id=1):
try:
device = Device.get(Device.id == device_id)
except:
print(f"There Is No Device With ID {device_id} To DELETE!")
return
device.delete_instance()
print(f"The Device With ID {device_id} DELETED Successfuly.")
def export_to_csv(self):
devices = Device.select()
if devices:
with open("device.csv", 'w', newline='') as csvfile:
writer = csv.writer(csvfile)
writer.writerow(['ID', 'Anum', 'Bnum', 'Cnum','duration',
'location', 'call_type', 'device_name', 'created']) # Write header
for device in devices:
writer.writerow([device.id, device.Anum, device.Bnum,
device.Cnum, device.duration, device.location,
device.call_type, device.device_name, device.formatted_time])
else:
print("There is No Data in The Table to create CSV!")
| erfanfs10/Peewee-ORM-Postgresql | models.py | models.py | py | 3,779 | python | en | code | 2 | github-code | 13 |
27790107583 | #q4 Create a class MovieDetails and initialize it with Movie name, artistname,Year of release and ratings .
#Make methods to
#1. Display-Display the details.
#2. Update- Update the movie details.
class MovieDetails:
def __init__(self,movname,artname,year,rating):
self.movname=movname
self.artname=artname
self.year=year
self.rating=rating
print("")
def display(self):
print("movie",self.movname)
print("artist name",self.artname)
print("release year:",self.year)
print("rating out of 5",self.rating)
print("")
def update(self):
self.movname=input("enter the new updated movie")
self.artname=input("enter the artist name")
self.year=(input("enter its release year"))
self.rating=(input("enter the rating out of 5"))
movname=input(" movie")
artname=input("artist name")
year=(input("release year"))
rating=(input("rating out of 5"))
s1=MovieDetails(movname,artname,year,rating)
s1.display()
s1.update()
s1.display() | Gayatri-soni/python-training | assignment9/q4.py | q4.py | py | 954 | python | en | code | 0 | github-code | 13 |
27880690743 |
import grid as g
import cells as c
import numpy as np
def create_malha():
return
def create_city(cityname, Population, Area, Pop_ratio=1, Area_ratio=100):
# first create city_grid and city_cellsmatrix
grid = g.create_grid(Area, Area_ratio)
cellsmatrix = c.create_cellsmatrix(Population)
# random position of cells
cellsmatrix = c.cells_randompos(cellsmatrix, grid)
# read the position of cells e update cells matrix
grid_popdensity = g.positionupdate(cellsmatrix, grid)
# grid_visualizer
g.grid_visualization(grid_popdensity)
return grid, cellsmatrix
def save_city(cityname, grid, cellsmatrix):
with open(cityname + '.txt', 'w') as f:
for i in range(cellsmatrix.shape[0]):
f.write('{} {} {} \n'.format(cellsmatrix[i][0],cellsmatrix[i][1],cellsmatrix[i][2]))
return print( cityname + ' saved data')
def read_city(cityname, Population, Pop_ratio=1):
cells = np.zeros((Population,3))
i = 0
with open(cityname + '.txt', 'r') as f:
for line in f:
parts = line.split(' ')
cells[i][0] = int(float(parts[0]))
cells[i][1] = int(float(parts[1]))
cells[i][2] = int(float(parts[2]))
i = i + 1
return cells
| lcscosta/CellAutCovidRP | cellautcovidrp/cities.py | cities.py | py | 1,266 | python | en | code | 0 | github-code | 13 |
4296236885 | from django.contrib.auth.models import Group
from django.core.checks import messages
from django.shortcuts import redirect, render
from django.http import HttpResponse, JsonResponse
from core.models import *
from core.forms import *
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.contrib.auth import authenticate, login, logout
from django.db.models import Q
import json
# Create your views here.
def home_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/home.html', context)
def mujer_page(request):
productos = Producto.objects.all().filter(categoria='MJ')
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
context['productos'] = productos
context['nombre'] = 'Mujer'
return render(request, 'pages/categoria.html', context)
def hombre_page(request):
productos = Producto.objects.all().filter(categoria='HM')
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
context['productos'] = productos
context['nombre'] = 'Hombre'
return render(request, 'pages/categoria.html', context)
def nino_page(request):
productos = Producto.objects.all().filter(categoria='NN')
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
context['productos'] = productos
context['nombre'] = 'Niños'
return render(request, 'pages/categoria.html', context)
def producto_page(request, pk):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
producto = Producto.objects.get(id=pk)
context['producto'] = producto
return render(request, 'pages/producto.html', context)
# Clientes
def registrarse_page(request):
form1 = CreateUserForm()
form2 = ClienteForm()
if request.method == 'POST':
form1 = CreateUserForm(request.POST)
form2 = ClienteForm(request.POST)
if form1.is_valid():
user = form1.save()
apellido_paterno = request.POST.get('apellido_paterno')
apellido_materno = request.POST.get('apellido_materno')
telefono = request.POST.get('telefono')
group = Group.objects.get(name='cliente')
user.groups.add(group)
Cliente.objects.create(
usuario = user,
apellido_paterno=apellido_paterno,
apellido_materno=apellido_materno,
telefono=telefono
)
messages.success(request, 'Cuenta creada con exito')
else:
messages.error(request, 'La cuenta no pudo ser creada')
context = {'formUser': form1, 'formCliente': form2}
return render(request, 'pages/register.html', context)
def login_page(request):
context = {}
if request.method == 'POST':
correo = request.POST.get('email')
password = request.POST.get('password')
usuario = User.objects.get(email=correo)
print(usuario.username)
user = authenticate(request, username=usuario.username, password=password)
if user is not None:
login(request, user)
return redirect('home_page')
else:
messages.error(request, 'Usuario o contraseña incorrecto')
return render(request, 'pages/login.html', context)
#TO-DO: Agregar condición para logeado y para clientes con decoradores
@login_required(login_url='home_page')
def carro_page(request):
#TO-DO: Agregar try and catch para cada variable, excepto cliente
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
except:
carro = None
context = {'items': items, 'compra': compra, 'carro':carro}
return render(request, 'pages/carro.html', context)
def pagar_page(request):
#TO-DO: Agregar try and catch para cada variable, excepto cliente
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
context = {'items': items, 'compra': compra}
return render(request, 'pages/pagar.html', context)
def updateItem(request):
data = json.loads(request.body)
productoId = data['productId']
action = data['action']
print(productoId, action)
cliente = request.user.cliente
producto = Producto.objects.get(id=productoId)
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
productoCompra, creada = ProductoCompra.objects.get_or_create(compra=compra, producto=producto)
if action == 'add':
productoCompra.cantidad = (productoCompra.cantidad + 1)
elif action == 'remove':
productoCompra.cantidad = (productoCompra.cantidad - 1)
productoCompra.save()
if productoCompra.cantidad <= 0:
productoCompra.delete()
return JsonResponse('Item fue añadido', safe=False)
| felipe-quirozlara/proyecto-grupo-Hellmanns | changeWear/pages/views.py | views.py | py | 6,787 | python | es | code | 0 | github-code | 13 |
24060035410 | import python as LibPKMN
#
# This test's LibPKMN's internal functionality for copying shared pointers,
# which comes into place in custom copy constructors and assignment operators.
#
if __name__ == "__main__":
t_pkmn = LibPKMN.team_pokemon("Darmanitan", "X", 70, "None", "None", "None", "None")
b_pkmn1 = t_pkmn.get_base_pokemon(True)
b_pkmn2 = t_pkmn.get_base_pokemon(False)
b_pkmn1.set_form("Standard")
b_pkmn2.set_form("Zen")
assert(t_pkmn.get_pokemon_id() == b_pkmn2.get_pokemon_id())
assert(t_pkmn.get_pokemon_id() != b_pkmn1.get_pokemon_id())
| codemonkey85/LibPKMN | tests/python_copy_sptr_test.py | python_copy_sptr_test.py | py | 583 | python | en | code | 0 | github-code | 13 |
19038079706 | #------------------------------
#GICS sectors:
#GICS_10_Utilities: 5510
stages_ = {
"recovery": {
"GICS_3_Industrials": ["2010", "2020", "2030"],
"GICS_8_Information_Technology": ["4510", "4520", "4530"],
"GICS_9_Communication_Services": ["5010", "5020"],
"GICS_7_Financials": ["4010", "4020", "4030"],
"GICS_11_Real_Estate": ["6010"]
},
"expansion": {
"GICS_1_Energy": ["1010"],
"GICS_3_Industrials": ["2010", "2020", "2030"],
"GICS_4_Consumer_Discretionary": ["2510", "2520", "2530," "2550"],
"Paper_And_Forest_Products": ["151050"]
},
"slowdown": {
"GICS_2_Materials": ["1510"],
"GICS_6_Health_Care": ["3510", "3520"],
"GICS_8_Information_Technology": ["4510", "4520", "4530"],
"GICS_9_Communication_Services": ["5010", "5020"],
"GICS_11_Real_Estate": ["6010"],
"Aerospace_And_Defense": ["201010"]
},
"contraction": {
"GICS_4_Consumer_Discretionary": ["2510", "2520", "2530," "2550"],
"GICS_5_Consumer_Staples": ["3010", "3020", "3030"],
"GICS_6_Health_Care": ["3510", "3520"],
"Aerospace_And_Defense": ["201010"]
}
}
class Macro_analysis():
def __init__(self, Two_Prev_GDP, Prev_GDP, This_GDP, Two_Prev_CPI, Prev_CPI, This_CPI) -> None:
GDP_Change = This_GDP-Prev_GDP #change of GDP
self.GDP_Change_Rate = (GDP_Change/Prev_GDP)/((Prev_GDP-Two_Prev_GDP)/Two_Prev_GDP) #Rate of change of GDP
CPI_Change = This_CPI-Prev_CPI #change of CPI
self.CPI_Change_Rate = (CPI_Change/Prev_CPI)/((Prev_CPI-Two_Prev_CPI)/Two_Prev_CPI) #Rate of change of CPI
def get_market_stage(self):
if (self.GDP_Change_Rate>0 and self.CPI_Change_Rate<0): #1 - Recovery
print("Focus on European, high yield, growing stocks")
return stages_["recovery"]
if (self.GDP_Change_Rate>0 and self.CPI_Change_Rate>0): #2 - Expansion
print("Focus on European, high yield stocks")
return stages_["expansion"]
if (self.GDP_Change_Rate<0 and self.CPI_Change_Rate>0): #3 - Slowdown
print("Focus on American, low volatility stocks")
return stages_["slowdown"]
if (self.GDP_Change_Rate<0 and self.CPI_Change_Rate<0): #4 - Contraction
print("Focus on American, low volatility stocks")
return stages_["contraction"] | ditariab/ditari_app | macro.py | macro.py | py | 2,453 | python | en | code | 0 | github-code | 13 |
17052975464 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class InsCoverage(object):
def __init__(self):
self._coverage_name = None
self._coverage_no = None
self._effect_end_time = None
self._effect_start_time = None
self._iop = None
self._iop_premium = None
self._premium = None
self._sum_insured = None
@property
def coverage_name(self):
return self._coverage_name
@coverage_name.setter
def coverage_name(self, value):
self._coverage_name = value
@property
def coverage_no(self):
return self._coverage_no
@coverage_no.setter
def coverage_no(self, value):
self._coverage_no = value
@property
def effect_end_time(self):
return self._effect_end_time
@effect_end_time.setter
def effect_end_time(self, value):
self._effect_end_time = value
@property
def effect_start_time(self):
return self._effect_start_time
@effect_start_time.setter
def effect_start_time(self, value):
self._effect_start_time = value
@property
def iop(self):
return self._iop
@iop.setter
def iop(self, value):
self._iop = value
@property
def iop_premium(self):
return self._iop_premium
@iop_premium.setter
def iop_premium(self, value):
self._iop_premium = value
@property
def premium(self):
return self._premium
@premium.setter
def premium(self, value):
self._premium = value
@property
def sum_insured(self):
return self._sum_insured
@sum_insured.setter
def sum_insured(self, value):
self._sum_insured = value
def to_alipay_dict(self):
params = dict()
if self.coverage_name:
if hasattr(self.coverage_name, 'to_alipay_dict'):
params['coverage_name'] = self.coverage_name.to_alipay_dict()
else:
params['coverage_name'] = self.coverage_name
if self.coverage_no:
if hasattr(self.coverage_no, 'to_alipay_dict'):
params['coverage_no'] = self.coverage_no.to_alipay_dict()
else:
params['coverage_no'] = self.coverage_no
if self.effect_end_time:
if hasattr(self.effect_end_time, 'to_alipay_dict'):
params['effect_end_time'] = self.effect_end_time.to_alipay_dict()
else:
params['effect_end_time'] = self.effect_end_time
if self.effect_start_time:
if hasattr(self.effect_start_time, 'to_alipay_dict'):
params['effect_start_time'] = self.effect_start_time.to_alipay_dict()
else:
params['effect_start_time'] = self.effect_start_time
if self.iop:
if hasattr(self.iop, 'to_alipay_dict'):
params['iop'] = self.iop.to_alipay_dict()
else:
params['iop'] = self.iop
if self.iop_premium:
if hasattr(self.iop_premium, 'to_alipay_dict'):
params['iop_premium'] = self.iop_premium.to_alipay_dict()
else:
params['iop_premium'] = self.iop_premium
if self.premium:
if hasattr(self.premium, 'to_alipay_dict'):
params['premium'] = self.premium.to_alipay_dict()
else:
params['premium'] = self.premium
if self.sum_insured:
if hasattr(self.sum_insured, 'to_alipay_dict'):
params['sum_insured'] = self.sum_insured.to_alipay_dict()
else:
params['sum_insured'] = self.sum_insured
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = InsCoverage()
if 'coverage_name' in d:
o.coverage_name = d['coverage_name']
if 'coverage_no' in d:
o.coverage_no = d['coverage_no']
if 'effect_end_time' in d:
o.effect_end_time = d['effect_end_time']
if 'effect_start_time' in d:
o.effect_start_time = d['effect_start_time']
if 'iop' in d:
o.iop = d['iop']
if 'iop_premium' in d:
o.iop_premium = d['iop_premium']
if 'premium' in d:
o.premium = d['premium']
if 'sum_insured' in d:
o.sum_insured = d['sum_insured']
return o
| alipay/alipay-sdk-python-all | alipay/aop/api/domain/InsCoverage.py | InsCoverage.py | py | 4,492 | python | en | code | 241 | github-code | 13 |
69960356819 | from flask import request, jsonify, Blueprint
from ..models.BookModel import BookModel, BookSchema
book_blueprint = Blueprint('books', __name__, url_prefix='/books')
book_schema = BookSchema()
@book_blueprint.route('/', methods=['GET', 'POST'])
def get_or_create_book():
if request.method == 'GET':
result = BookModel.query.all()
return jsonify(book_schema.dump(result, many=True)), 200
elif request.method == 'POST':
data = request.json
errors = book_schema.validate(data)
if errors.get("book_name"):
return jsonify(Error="Book name cannot be empty"), 400
elif errors.get("book_author"):
return jsonify(Error="Author name cannot be empty"), 400
else:
book = BookModel(data.get("book_id"), data.get("book_name", ''), data.get("book_author", ''))
if book.add():
return jsonify(Message="Book added successfully"), 201
else:
return jsonify(Error="Book with same id already exists"), 400
@book_blueprint.route('/<int:book_id>', methods=['GET'])
def get_book(book_id):
book = BookModel.query.get(book_id)
if book:
return jsonify(book_schema.dump(book)), 200
else:
return jsonify(Error='No book with that ID'), 400
@book_blueprint.route('/<int:book_id>', methods=['DELETE'])
def delete_book(book_id):
book = BookModel.query.get(book_id)
if book:
book.delete()
return jsonify(Message="Success"), 204
else:
return jsonify(Error='No book with that ID'), 400
| dev-sajal/Library-Management-System-Flask | src/views/BookView.py | BookView.py | py | 1,577 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.