seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
38546263572 | from collections import OrderedDict
import torch
import torch.nn as nn
import torch.nn.functional as F
class Neck(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None, conv_bias=True):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.conv1 = nn.Sequential(nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1, bias=conv_bias),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True) )
self.conv2 = nn.Sequential(nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1, bias=conv_bias),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True) )
def forward(self, x):
out1 = self.conv1(x)
out2 = self.conv2(out1)
return out1+out2
class ResBlock(nn.Module):
def __init__(self, num_convs, in_channels, out_channels, mid_channels=None, conv_bias=True):
super().__init__()
if not mid_channels:
mid_channels = out_channels
if not num_convs in [2,3]:
raise ValueError('num_convs must be 2 or 3')
if num_convs==2:
in_list = [in_channels, mid_channels]
out_list = [mid_channels, out_channels]
elif num_convs==3:
in_list = [in_channels, mid_channels, mid_channels]
out_list = [mid_channels, mid_channels, out_channels]
self.conv0 = nn.Conv2d(in_channels, out_channels, kernel_size=1, bias=conv_bias)
module_dict = OrderedDict()
for i, (in_c, out_c) in enumerate(zip(in_list, out_list)):
module_dict['conv{}'.format(i)] = nn.Conv2d(in_c, out_c, kernel_size=3, padding=1, bias=conv_bias)
module_dict['bn{}'.format(i)] = nn.BatchNorm2d(out_c)
module_dict['relu{}'.format(i)] = nn.ReLU(inplace=True)
self.convs = nn.Sequential(module_dict)
def forward(self, x):
identity = x
out = self.convs(x)
return out+self.conv0(identity)
class Down(nn.Module):
def __init__(self, num_convs, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
ResBlock(num_convs, in_channels, out_channels))
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
# Upscaling then double / triple conv
def __init__(self, num_convs, in1_channels, in2_channels, out_channels, bilinear=True, halve_channel=False):
super().__init__()
if bilinear:
assert 0
else:
if halve_channel:
mid_channels = in1_channels//2
else:
mid_channels = in1_channels
self.up = nn.ConvTranspose2d(in1_channels, mid_channels, kernel_size=2, stride=2)
self.conv = ResBlock(num_convs, mid_channels+in2_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
x = torch.cat([x2, x1], dim=1)
x = self.conv(x)
return x
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
class MonoTrack(nn.Module):
'''
https://arxiv.org/abs/2204.01899
'''
def __init__(self, n_channels, n_classes, bilinear=False, halve_channel=False):
super(MonoTrack, self).__init__()
self.n_channels = n_channels
self.n_classes = n_classes
self.bilinear = bilinear
self.halve_channel = halve_channel
self.inc = Neck(n_channels, 32)
self.down1 = Down(2, 32, 64)
self.down2 = Down(3, 64, 128)
self.down3 = Down(3, 128, 256)
self.up1 = Up(3, 256, 128, 128, bilinear=bilinear, halve_channel=halve_channel)
self.up2 = Up(2, 128, 64, 64, bilinear=bilinear, halve_channel=halve_channel)
self.up3 = Up(2, 64, 32, 32, bilinear=bilinear, halve_channel=halve_channel)
self.outc = OutConv(32, n_classes)
def forward(self, x):
x1 = self.inc(x)
x2 = self.down1(x1)
x3 = self.down2(x2)
x4 = self.down3(x3)
x = self.up1(x4, x3)
x = self.up2(x, x2)
x = self.up3(x, x1)
logits = self.outc(x)
return {0: logits}
| nttcom/WASB-SBDT | src/models/monotrack.py | monotrack.py | py | 4,496 | python | en | code | 0 | github-code | 13 |
31797857983 |
from django.test import TestCase, RequestFactory
import os
import sys
import django
os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
sys.path.append('/home/sany/the_test/the_test/the_test')
django.setup()
from product.models import Product, Category
from datetime import datetime, timedelta
from django.urls import reverse
from django.contrib.auth.models import User
from product.views import OnlyView
def create_product(days):
category = Category.objects.create(name='test', slug='test', description='test')
time = datetime.now() + timedelta(days=days)
return Product.objects.create(category=category, name='test', slug='test', description='test',
price=3.4, created_at=time, modified_at=time)
class ProductViewTest(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(username='test18', email='test@test.com', password='test12345')
def test_product_created_more_than_tf_hours_with_no_user(self):
create_product(days=0)
response = self.client.get(reverse('product:only'))
# this one must redirect
self.assertEqual(response.status_code, 302)
def test_product_created_more_than_tf_hours_with_user(self):
request = self.factory.get('/24_only')
request.user = self.user
create_product(days=-2)
response = OnlyView.as_view()(request)
# must display nothing
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(response.context_data['only_24'], [])
def test_product_created_less_than_tf_hours_with_user(self):
request = self.factory.get('/24_only')
request.user = self.user
create_product(days=0)
response = OnlyView.as_view()(request)
# must display 'test'
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(list(response.context_data['only_24']), ['<Product: TEST test>'])
| ripiuk/the_test | product/tests.py | tests.py | py | 1,980 | python | en | code | 0 | github-code | 13 |
42588817193 | #!/usr/bin/python3
from socket import *
import json
import threading
import sys
import os
ip_user={}
user_ip={}
class ServerService(object):
def __init__(self):
self.next_serial_number = 1
def HandleTCP(self, sockfd, saddr):
try:
sermsg="Hello, please assign your username:"
sockfd.send(sermsg.encode())
while 1:
msg = sockfd.recv(1024).decode()
username=msg
if username in user_ip:
sermsg="The username is already used!"
sockfd.send(sermsg.encode())
else:
break
ip_user[saddr]=username
user_ip[username]=saddr
sermsg="Welcome, "+username
sockfd.send(sermsg.encode())
while True:
msg = sockfd.recv(1024).decode()
if msg.find('\n')!=-1:
pos=msg.find('\n')
#print("pos:",pos)
msg=msg[:pos-1]
#content = json.loads(msg)
#print(msg)
cmd = msg.split(" ")[0]
servmsg = self.HandleClientMsg(msg, saddr)
sockfd.send(servmsg.encode())
if cmd == "exit":
username=ip_user[saddr]
del ip_user[saddr]
del user_ip[username]
sockfd.close()
print(username,"\t",saddr[0],":",saddr[1],"\tdisconnected")
return
except KeyboardInterrupt:
return
def HandleClientMsg(self, msg, addr):
cmd = msg.split(" ")[0]
Arg=msg.split(" ")
servmsg=""
if cmd=="list-users":
for it in user_ip:
ad=user_ip[it]
servmsg=servmsg+it+'\t'+str(ad[0])+":"+str(ad[1])+'\n'
if cmd=="sort-users":
mmm=[]
for it in user_ip:
mmm.append(it)
mmm.sort()
for it in mmm :
ad=user_ip[it]
servmsg=servmsg+it+'\t'+str(ad[0])+":"+str(ad[1])+'\n'
if cmd=="exit":
name=ip_user[addr]
servmsg="Bye, "+name+".\n"
return servmsg
def main():
if len(sys.argv) != 2:
print(f"\tUsage {sys.argv[0]} <Port>")
exit(-1)
HOSTNAME, ListenPort = "", int(sys.argv[1])
TCPsockfd = socket(AF_INET, SOCK_STREAM)
TCPsockfd.bind((HOSTNAME, ListenPort))
TCPsockfd.listen(30)
server=ServerService()
# TCP_thread
TCP_thread = []
try:
while True:
now_TCPfd, now_addr = TCPsockfd.accept()
print("New connection from ",now_addr[0],":",now_addr[1])
server.next_serial_number+=1
TCP_thread.append(
threading.Thread(
target=server.HandleTCP,
args=(
now_TCPfd,
now_addr,
),
)
)
TCP_thread[-1].start()
except KeyboardInterrupt:
print("\nClose Server.")
for sockfd in [TCPsockfd]:
sockfd.close()
return 0
if __name__ == "__main__":
main() | axde954e6/NCTU-Intro.2_NP | NP/mid/0712534/P2/server.py | server.py | py | 3,506 | python | en | code | 0 | github-code | 13 |
70182864977 | import uwsgi
def application(env, start_response):
if env['REQUEST_METHOD'] == 'OPTIONS':
content = b'Ok'
else:
uwsgi.async_sleep(1)
content = b"Hello World"
start_response('200 OK', [('Content-Type', 'text/plain'),
('Content-Length', str(len(content)))])
return [content]
| baverman/services | haproxy/app.py | app.py | py | 345 | python | en | code | 0 | github-code | 13 |
36308689394 | import requests
import tkinter as tk
from tkinter import messagebox
class SportsApp:
def __init__(self, root):
self.root = root
self.root.title("Sports App")
self.search_label = tk.Label(root, text="Enter Player/Event:")
self.search_label.pack()
self.search_entry = tk.Entry(root)
self.search_entry.pack()
self.search_button = tk.Button(root, text="Search", command=self.search_info)
self.search_button.pack()
def search_info(self):
query = self.search_entry.get()
if query:
api_key = "3"
# Search for players by name
player_url = f"https://www.thesportsdb.com/api/v1/json/3/searchplayers.php?p={query}"
player_data = self.api_request(player_url)
if "players" in player_data:
player_name = player_data["players"][0]["strPlayer"]
sport = player_data["players"][0]["strSport"]
description = player_data["players"][0]["strDescriptionEN"]
messagebox.showinfo("Player Information", f"Player: {player_name}\nSport: {sport}\nDescription: {description}")
return
# Search for events by event name
event_url = f"https://www.thesportsdb.com/api/v1/json/3/searchevents.php?e={query}"
event_data = self.api_request(event_url)
if "events" in event_data:
event_name = event_data["events"][0]["strEvent"]
season = event_data["events"][0]["strSeason"]
messagebox.showinfo("Event Information", f"Event: {event_name}\nSeason: {season}")
return
messagebox.showinfo("Information", "No results found.")
else:
messagebox.showwarning("Warning", "Please enter a team/player/sport/event.")
def api_request(self, url):
try:
response = requests.get(url)
data = response.json()
return data
except requests.ConnectionError:
messagebox.showerror("Error", "Failed to connect to TheSportsDB API.")
return {}
if __name__ == "__main__":
root = tk.Tk()
app = SportsApp(root)
root.geometry("350x250")
root.mainloop()
| debsicat22/AdvanceProgramming | Assessment2/assessment2API.py | assessment2API.py | py | 2,264 | python | en | code | 0 | github-code | 13 |
16007536360 | def main():
import gym
import os
import argparse
from solver.networks import PointnetBackbone
from solver.goal_env import make_env
from rl.vec_envs import SubprocVectorEnv, DummyVectorEnv
from rl.sac_agent import SACAgent
from tools.utils import logger
from torch.multiprocessing import set_start_method
from solver import MODEL_PATH
parser = argparse.ArgumentParser()
parser.add_argument("--env_name", type=str, default='GripperUmaze')
parser.add_argument("--path", type=str, default='sac')
args, _ = parser.parse_known_args()
logger.configure(os.path.join(MODEL_PATH, args.path), format_strs='csv+tensorboard+stdout'.split('+'))
env = DummyVectorEnv([lambda: make_env(args.env_name) for i in range(1)])
agent = SACAgent.parse(
env.observation_space[0],
env.action_space[0],
nsteps=None,
eval_episode=50,
actor=dict(backbone=dict(TYPE="PointnetBackbone"), head=dict(TYPE="MaskHead", std_mode='statewise')),
**dict(
evaluator_cfg=dict(
render_episodes=1
)
),
start_step=128,
parser=parser
).cuda()
print('start ...')
for i in range(5000000):
agent.train(env)
if __name__ == '__main__':
main() | haosulab/RPG | solver/trainer/train_sac.py | train_sac.py | py | 1,304 | python | en | code | 18 | github-code | 13 |
21061223413 | continuer = 'o'
# On créé une liste de films vide qui va contenir les films ajoutés
liste_de_films = []
# Boucle principale
while continuer == 'o':
# On récupère le nom du film à ajouter
film_a_ajouter = raw_input('Entrez un titre de film a ajouter: ')
# On créé une liste qui contient tous les films ajoutés en minuscule
liste_minuscule = [film.lower() for film in liste_de_films]
# On vérifie que le film ajouté n'est pas déjà présent dans la liste
if film_a_ajouter.lower() in liste_minuscule:
print('{0} est deja present dans la liste'.format(film_a_ajouter))
else:
# Si le film n'est pas déjà présent dans la liste, on l'ajoute
liste_de_films.append(film_a_ajouter)
# On demande si l'utilisateur veut ajouter un autre film
continuer = raw_input('Voulez-vous ajouter un autre film? o/n ')
print('')
# On trie la liste et on l'affiche à l'écran
liste_de_films.sort()
print(liste_de_films)
| yogisen/python | BasesUdemy/list/Mini-projet-Cr-er-une-liste-de-films.py | Mini-projet-Cr-er-une-liste-de-films.py | py | 954 | python | fr | code | 1 | github-code | 13 |
17196207203 | import matplotlib
matplotlib.use("Agg")
import matplotlib.pyplot as plt
plt.rcParams.update({'font.size': 12})
import multiprocessing as mp
import numpy as np
import os
from algorithms.gradient_descent import GradientDescent
from algorithms.iterative_threshold_methods import IterativeThresholdMethods
import utils.constants as constants
from utils.draw import Draw
from utils.error import Error
from utils.generate_data import GenerateData
class ChangeSignal:
"""Simulations on the change of exact recovery w.r.t. the signal.
"""
def __init__(self, kappa, error_name=constants.EXACT_RECOVERY_NAME):
"""Initialize.
"""
self.kappa = kappa
self.error_name = error_name
self.design = constants.ANISOTROPIC_NAME
self.steps = constants.GD_STEPS # number of experiments
self.num_iter = constants.GD_NUM_ITERATION # number of iterations
self.gd_types = (constants.GD_NAME, constants.FAST_NEWTON_NAME)
self.iter_types = (constants.IHT_NAME, constants.HTP_NAME)
self.iterative_threshold_methods = (constants.ISTA_NAME, )
def _update_algos_map(self, algos_map, algo_name, a, error_added):
"""A private function to update @param algos_map.
@param algos_map: key is algo name, value is a dict with signal-error as key-value pair.
@param algo_name: the key of algos_map which needs an update.
e.g. constants.ISTA_NAME, constants.GD_NAME+constants.IHT_NAME, see utils.constants.
@param a - the signal value.
@param error_added: the error to be added to the map.
@return the updated map.
"""
if algos_map.get(algo_name):
curr_map = algos_map[algo_name]
else:
curr_map = dict()
if curr_map.get(a):
curr_map[a] += error_added
else:
curr_map[a] = error_added
algos_map[algo_name] = curr_map
return algos_map
def run_one_experiment(self, a):
"""Run one experiment on the change of exact recovery w.r.t. the signal.
@param a - the value of the true signal
@return algo_name gener_error hashmap.
"""
signal = a * np.ones((constants.P))
signal[constants.S:] = 0
signal = np.random.permutation(signal)
y, H, self.SIGMA_half = GenerateData(self.design, self.kappa,
signal).generate_data()
algos_map = dict()
for algo_name in self.iterative_threshold_methods:
_, best_lambda, gener_error = IterativeThresholdMethods(
self.error_name).get_errors_by_cv(signal, y, H, self.num_iter,
self.SIGMA_half, algo_name,
False)
# To add {algo_name: {a, final_error}} key-value-pair to algos_map.
algos_map = self._update_algos_map(algos_map, algo_name, a,
gener_error[-1])
print(algo_name, best_lambda)
for gd_type in self.gd_types:
for iter_type in self.iter_types:
_, best_lambda, gener_error = GradientDescent(
constants.FAST_NEWTON_NUM_GD,
self.error_name).get_errors_by_cv(signal, y, H,
self.num_iter,
self.SIGMA_half, gd_type,
iter_type, False)
algo_name = gd_type + "+" + iter_type
algos_map = self._update_algos_map(algos_map, algo_name, a,
gener_error[-1])
print(algo_name, best_lambda)
return algos_map
def change_signal(self, signal_range):
"""Run several experiments and get the change of exact recovery w.r.t. signal.
"""
algos_map = {}
for _ in range(self.steps): # Run several experiments
for a in signal_range:
map_result = self.run_one_experiment(a)
for algo_name in map_result:
for signal in map_result[algo_name]:
error = map_result[algo_name][signal] / self.steps
algos_map = self._update_algos_map(
algos_map, algo_name, signal, error)
for algo_name in algos_map:
Draw().plot_using_a_map(algos_map[algo_name], algo_name)
# Store @param algos_map in a npy (binary) format.
with open(
os.path.dirname(os.path.abspath(__file__)) +
"/figures/change signal/result_dict_kappa" +
str(int(self.kappa)) + ".npy", 'wb') as f:
np.save(f, algos_map)
plt.xlabel("signal a")
plt.ylabel("exact recovery")
plt.title("Change signal with kappa " + str(int(self.kappa)))
plt.legend()
plt.savefig(
os.path.dirname(os.path.abspath(__file__)) +
"/figures/change signal/comparison by change of signal kappa" +
str(int(self.kappa)) + ".pdf")
plt.clf()
if __name__ == "__main__":
"""To change the condition number, modify kappa.
"""
kappa = 1.
signal_range = [0.001, 0.005, 0.01, 0.03, 0.05, 0.07, 0.1, 0.15]
ChangeSignal(kappa).change_signal(signal_range)
| Ruola/Sparse-Linear-Regression | support_recovery.py | support_recovery.py | py | 5,465 | python | en | code | 0 | github-code | 13 |
18187983306 | from analysis import HolisticAnalyis, reset_wcrt
from vector.vholistic import VectorHolisticAnalysis
from mast.mast_wrapper import MastHolisticAnalysis
import time
import numpy as np
from examples import get_medium_system, get_big_system
from assignment import PDAssignment
from random import Random
import pandas as pd
import matplotlib.pyplot as plt
def step(system, scenarios):
"""
Measures the execution vector_times of analyzing a number of priority scenarios
:param system:
:param scenarios:
:return: (mast vector_times, holistic vector_times, vector vector_times) tuple
"""
mast = MastHolisticAnalysis(limit_factor=10)
hol = HolisticAnalyis(limit_factor=10)
vec = VectorHolisticAnalysis(limit_factor=10)
# note: perf_counter returns vector_times in fractional seconds:
# https://docs.python.org/3/library/time.html#time.perf_counter
# MAST Holistic: sequentially analyze each priority scenario
before = time.perf_counter()
for _ in range(scenarios):
system.apply(mast)
mast_time = time.perf_counter() - before
# Holistic: sequentially analyze each priority scenario
before = time.perf_counter()
for _ in range(scenarios):
system.apply(hol)
hol_time = time.perf_counter() - before
# Vector Holistic: analyze all the scenarios in parallel
before = time.perf_counter()
t = len(system.tasks)
if scenarios > 1:
priorities = np.array([task.priority for task in system.tasks]).reshape((t, 1)).repeat(scenarios-1, axis=1)
vec.set_priority_scenarios(priorities)
system.apply(vec)
vec_time = time.perf_counter() - before
return np.asarray((mast_time, hol_time, vec_time))
def run(systems, name):
print(f"Running {name}")
pd = PDAssignment()
scenarios = [1, 10, 100, 1000, 10000]
tools = ["holistic-mast", "holistic", "holistic-vector"]
results = np.zeros(shape=(len(tools), len(scenarios)), dtype=np.float64)
for s, system in enumerate(systems):
print(f" system {s}:", end="")
system.apply(pd)
for i, scenario in enumerate(scenarios):
print(f" {scenario}", end="")
res = step(system, scenario)
results[:, i] += res
save(results, s+1, tools, scenarios, name)
print()
def save(results, number, columns, index, name):
df = pd.DataFrame(results.T, columns=columns, index=index) / number
df.to_excel(f"{name}.xlsx")
fig, ax = plt.subplots()
df.plot(ax=ax, xlabel="number of scenarios", ylabel="execution vector_times (s)",
use_index=True, logy=True, logx=True, figsize=(6, 3))
fig.tight_layout()
fig.savefig(f"{name}.png")
fig.show()
if __name__ == '__main__':
# medium-size systems
random = Random(42)
mediums = [get_medium_system(random, utilization=0.7) for _ in range(10)]
run(mediums, "vector-vector_times-medium")
# big-size systems
random = Random(71)
bigs = [get_big_system(random, utilization=0.7) for _ in range(10)]
run(bigs, "vector-vector_times-big")
| rivasjm/gdpa | paper/vector_times/vector_time.py | vector_time.py | py | 3,085 | python | en | code | 0 | github-code | 13 |
41467103235 | """User preference modeling interface.
Preferences are stored for (1) items in the collection and (2) slot-value pairs
(for slots defined in the domain). Preferences are represented as real values
in [-1,1], where zero corresponds to neutral.
"""
import random
import string
from abc import ABC, abstractmethod
from typing import Tuple
from dialoguekit.core.domain import Domain
from usersimcrs.items.item_collection import ItemCollection
from usersimcrs.items.ratings import Ratings
# Key used to identify items. This will need to be replaced once support for
# multiple entity types is added.
KEY_ITEM_ID = "ITEM_ID"
class PreferenceModel(ABC):
"""Representation of the user's preferences."""
# Above this threshold, a preference is considered positive;
# below -1 x this threshold, a preference is considered negative;
# otherwise, it's considered neutral.
PREFERENCE_THRESHOLD = 0.25
def __init__(
self,
domain: Domain,
item_collection: ItemCollection,
historical_ratings: Ratings,
historical_user_id: str = None,
) -> None:
"""Initializes the preference model of a simulated user.
A list of initial seen items is generated based on historical ratings.
Further preferences are inferred along the way as the simulated user is
being prompted by the agent for preferences.
Args:
domain: Domain.
item_collection: Item collection.
historical_ratings: Historical ratings.
historical_user_id (Optional): If provided, the simulated user is
based on this particular historical user; otherwise, it is based
on a randomly sampled user. This is mostly added to make the
class testable. Defaults to None.
"""
self._domain = domain
self._item_collection = item_collection
self._historical_ratings = historical_ratings
# If historical user ID is not provided, randomly pick one.
self._historical_user_id = (
historical_user_id or self._historical_ratings.get_random_user_id()
)
# Create a random user ID (in {real_user_id}_{3_random_chars} format).
random_str = "".join(random.choices(string.ascii_uppercase, k=3))
self._user_id = f"{self._historical_user_id}_{random_str}"
def is_item_consumed(self, item_id: str) -> bool:
"""Returns whether or not an item has been consumed by the user.
This is used to answer questions like: "Have you seen Inception?"
Args:
item_id: Item ID.
Returns:
True if the item has been consumed (i.e., appears among the
historical ratings).
"""
return (
self._historical_ratings.get_user_item_rating(
self._historical_user_id, item_id
)
is not None
)
def _assert_item_exists(self, item_id: str) -> None:
"""Checks if item exists in the collection and throws an exception if
not."""
if not self._item_collection.exists(item_id):
raise ValueError("Item does not exist in item collection.")
def _assert_slot_exists(self, slot: str) -> None:
"""Checks if slot exists in the domain and throws an exception if
not."""
if slot not in self._domain.get_slot_names():
raise ValueError(
f"The slot '{slot}' does not exist in this domain."
)
@abstractmethod
def get_item_preference(self, item_id: str) -> float:
"""Returns a preference for a given item.
This is used to answer questions like: "How did you like it?",
where "it" refers to the movie mentioned previously.
Args:
item_id: Item ID.
Returns:
Item preference, which is generally in [-1,1].
Raises:
NotImplementedError: If not implemented in derived class.
"""
raise NotImplementedError
@abstractmethod
def get_slot_value_preference(self, slot: str, value: str) -> float:
"""Returns a preference on a given slot-value pair.
This is used to answer questions like: "Do you like action movies?"
Args:
slot: Slot name (needs to exist in the domain).
value: Slot value.
Returns:
Slot-value preference.
Raises:
NotImplementedError: If not implemented in derived class.
"""
raise NotImplementedError
def get_slot_preference(self, slot: str) -> Tuple[str, float]:
"""Returns a preferred value for a given slot.
This is used to answer questions like: "What movie genre do you prefer?"
While in principle negative preferences could also be returned, here it
is always a positive preference that is expressed.
Args:
slot: Slot name (needs to exist in the domain).
Returns:
A value and corresponding preferences; if no preference could be
obtained for that slot, then (None, 0) are returned.
"""
self._assert_slot_exists(slot)
preference = None
attempts = 0
while not preference:
# Pick a random value for the slot.
value: str = random.choice(
list(self._item_collection.get_possible_property_values(slot))
)
preference = self.get_slot_value_preference(slot, value)
if preference < self.PREFERENCE_THRESHOLD:
preference = None
# It would in principle be possible to enter into an infinite loop
# here (e.g., if there is a small set of possible values for the
# slot and the user has already expressed negative preference on all
# of them), therefore we limit the number of attempts.
attempts += 1
if attempts == 10:
return None, 0
return value, preference
| iai-group/UserSimCRS | usersimcrs/user_modeling/preference_model.py | preference_model.py | py | 6,028 | python | en | code | 8 | github-code | 13 |
11628571445 | #!/usr/bin/env python
import time
import numpy as np
from olympus.objects import ParameterVector
from olympus.planners.abstract_planner import AbstractPlanner
from olympus.planners.utils_planner import get_bounds, get_init_guess
from olympus.utils import daemon
# ===============================================================================
class SteepestDescent(AbstractPlanner):
PARAM_TYPES = ["continuous"]
def __init__(
self,
goal="minimize",
learning_rate=1e-3,
dx=1e-5,
random_seed=None,
init_guess=None,
init_guess_method="random",
init_guess_seed=None,
):
"""
Args:
goal:
learning_rate:
dx:
random_seed:
init_guess (array, optional): initial guess for the optimization
init_guess_method (str): method to construct initial guesses if init_guess is not provided.
Choose from: random
init_guess_seed (str): random seed for init_guess_method
"""
AbstractPlanner.__init__(**locals())
self.has_optimizer = False
def _set_param_space(self, param_space):
self.param_space = param_space
self.bounds = get_bounds(param_space)
if self.init_guess is None:
self.init_guess = get_init_guess(
param_space,
method=self.init_guess_method,
random_seed=self.init_guess_seed,
)
def _tell(self, observations):
self._params = observations.get_params(as_array=False)
self._values = observations.get_values(
as_array=True, opposite=self.flip_measurements
)
if len(self._values) > 0:
self.RECEIVED_VALUES.append(self._values[-1])
def _priv_evaluator(self, params):
params = self._project_into_domain(params)
self.SUBMITTED_PARAMS.append(params)
while len(self.RECEIVED_VALUES) == 0:
time.sleep(0.1)
value = self.RECEIVED_VALUES.pop(0)
return value
@daemon
def start_optimizer(self):
guess = self.init_guess.copy()
while True:
func = self._priv_evaluator(guess)
dy = np.zeros(len(guess))
perturb = guess.copy()
for index in range(len(guess)):
perturb[index] += self.dx
probed = self._priv_evaluator(perturb)
dy[index] = (probed - func) / self.dx
perturb[index] -= self.dx
guess = guess - self.learning_rate * dy
guess = self._project_into_domain(guess)
def _ask(self):
if self.has_optimizer is False:
self.start_optimizer()
self.has_optimizer = True
while len(self.SUBMITTED_PARAMS) == 0:
# print('SUBMITTED_PARAMS', len(self.SUBMITTED_PARAMS))
time.sleep(0.1)
params = self.SUBMITTED_PARAMS.pop(0)
return ParameterVector().from_array(params, self.param_space)
# ===============================================================================
if __name__ == "__main__":
from olympus import Parameter, ParameterSpace
param_space = ParameterSpace()
param_space.add(Parameter(name="param_0"))
param_space.add(Parameter(name="param_1"))
planner = SteepestDescent(
learning_rate=1e-3, dx=1e-5, random_seed=None, init_guess=None
)
planner.set_param_space(param_space=param_space)
param = planner.ask()
print("PARAM", param)
| aspuru-guzik-group/olympus | src/olympus/planners/planner_steepest_descent/wrapper_steepest_descent.py | wrapper_steepest_descent.py | py | 3,545 | python | en | code | 70 | github-code | 13 |
17588156152 | import sys
sys.stdin = open('input.txt')
def dfs(x,y):
global re
dx = [-1,1,0,0]
dy = [0,0,-1,1]
visited[x][y] = 1
for i in range(4):
nx = x + dx[i]
ny = y + dy[i]
if 0 <= nx < n and 0 <= nx < n:
if mazz[nx][ny] == 3:
re += 1
return
if mazz[nx][ny] == 0 and visited[nx][ny] == 0:
visited[nx][ny] = 1
dfs(nx,ny)
for tc in range(1,int(input())+1):
n = int(input())
mazz = [list(map(int, input())) for _ in range(n)]
visited = [[0]*n for _ in range(n)]
re = 0
for i in range(n):
for j in range(n):
if mazz[i][j] == 2:
dfs(i,j)
break
if re == 1:
print('#{} {}'.format(tc,1))
else:
print('#{} {}'.format(tc,0))
| hyejiny/Algo_selfstudy | SWEA/미로.py | 미로.py | py | 843 | python | en | code | 0 | github-code | 13 |
27174038406 | from league.models import Champion, ChampionMastery
class TestSummoner:
def test_champion_mastery_list(self, summoner):
top_champion = summoner.get_top_champion_mastery()
all_champion = summoner.get_all_champion_mastery()
assert isinstance(top_champion[0], ChampionMastery)
assert isinstance(all_champion[0], ChampionMastery)
def test_get_champion_mastery_by_championId(self, summoner):
champion = summoner.get_champion_mastery_by_championId("5")
assert isinstance(champion, ChampionMastery)
class TestChampion:
def test_kwargs(self, api):
champion_by_id = api.get_champion_by_id("5")
assert isinstance(champion_by_name, Champion)
champion_by_name = api.get_champion_by_name("Ahri")
assert isinstance(champion_by_id, Champion)
assert champion_by_id.__str__ == champion_by_id.id + " " + champion_by_id.key
| ah00ee/python-league | tests/test_models.py | test_models.py | py | 920 | python | en | code | 0 | github-code | 13 |
14412221290 | # This file is part of Korman.
#
# Korman is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Korman is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Korman. If not, see <http://www.gnu.org/licenses/>.
from __future__ import annotations
import bpy
from bpy.props import *
import itertools
import math
from typing import *
from PyHSPlasma import *
from ...exporter import ExportError
from .base import PlasmaModifierProperties
from ... import idprops
if TYPE_CHECKING:
from ...exporter import Exporter
from ..prop_world import PlasmaAge, PlasmaPage
class _GameGuiMixin:
@property
def gui_sounds(self) -> Iterable[Tuple[str, int]]:
"""Overload to automatically export GUI sounds on the control. This should return an iterable
of tuple attribute name and sound index.
"""
return []
def get_control(self, exporter: Exporter, bo: Optional[bpy.types.Object] = None, so: Optional[plSceneObject] = None) -> Optional[pfGUIControlMod]:
return None
@property
def has_gui_proc(self) -> bool:
return True
def iterate_control_modifiers(self) -> Iterator[_GameGuiMixin]:
pl_mods = self.id_data.plasma_modifiers
yield from (
getattr(pl_mods, i.pl_id)
for i in self.iterate_control_subclasses()
if getattr(pl_mods, i.pl_id).enabled
)
@classmethod
def iterate_control_subclasses(cls) -> Iterator[_GameGuiMixin]:
yield from filter(
lambda x: x.is_game_gui_control(),
_GameGuiMixin.__subclasses__()
)
@classmethod
def is_game_gui_control(cls) -> bool:
return True
@property
def requires_dyntext(self) -> bool:
return False
def sanity_check(self):
age: PlasmaAge = bpy.context.scene.world.plasma_age
# Game GUI modifiers must be attached to objects in a GUI page, ONLY
page_name: str = self.id_data.plasma_object.page
our_page: Optional[PlasmaPage] = next(
(i for i in age.pages if i.name == page_name)
)
if our_page is None or our_page.page_type != "gui":
raise ExportError(f"'{self.id_data.name}': {self.bl_label} Modifier must be in a GUI page!")
# Only one Game GUI Control per object. Continuously check this because objects can be
# generated/mutated during the pre-export phase.
modifiers = self.id_data.plasma_modifiers
controls = [i for i in self.iterate_control_subclasses() if getattr(modifiers, i.pl_id).enabled]
num_controls = len(controls)
if num_controls > 1:
raise ExportError(f"'{self.id_data.name}': Only 1 GUI Control modifier is allowed per object. We found {num_controls}.")
# Blow up on invalid sounds
soundemit = self.id_data.plasma_modifiers.soundemit
for attr_name, _ in self.gui_sounds:
sound_name = getattr(self, attr_name)
if not sound_name:
continue
sound = next((i for i in soundemit.sounds if i.name == sound_name), None)
if sound is None:
raise ExportError(f"'{self.id_data.name}': Invalid '{attr_name}' GUI Sound '{sound_name}'")
class PlasmaGameGuiControlModifier(PlasmaModifierProperties, _GameGuiMixin):
pl_id = "gui_control"
pl_page_types = {"gui"}
bl_category = "GUI"
bl_label = "GUI Control (ex)"
bl_description = "XXX"
bl_object_types = {"FONT", "MESH"}
tag_id = IntProperty(
name="Tag ID",
description="",
min=0,
options=set()
)
visible = BoolProperty(
name="Visible",
description="",
default=True,
options=set()
)
proc = EnumProperty(
name="Notification Procedure",
description="",
items=[
("default", "[Default]", "Send notifications to the owner's notification procedure."),
("close_dialog", "Close Dialog", "Close the current Game GUI Dialog."),
("console_command", "Run Console Command", "Run a Plasma Console command.")
],
options=set()
)
console_command = StringProperty(
name="Command",
description="",
options=set()
)
def convert_gui_control(self, exporter: Exporter, ctrl: pfGUIControlMod, bo: bpy.types.Object, so: plSceneObject):
ctrl.tagID = self.tag_id
ctrl.visible = self.visible
if self.proc == "default":
ctrl.setFlag(pfGUIControlMod.kInheritProcFromDlg, True)
elif self.proc == "close_dialog":
ctrl.handler = pfGUICloseDlgProc()
elif self.proc == "console_command":
handler = pfGUIConsoleCmdProc()
handler.command = self.console_command
ctrl.handler = handler
def convert_gui_sounds(self, exporter: Exporter, ctrl: pfGUIControlMod, ctrl_mod: _GameGuiMixin):
soundemit = ctrl_mod.id_data.plasma_modifiers.soundemit
if not ctrl_mod.gui_sounds or not soundemit.enabled:
return
# This is a lot like the plPhysicalSndGroup where we have a vector behaving as a lookup table.
# NOTE that zero is a special value here meaning no sound, so we need to offset the sounds
# that we get from the emitter modifier by +1.
sound_indices = {}
for attr_name, gui_sound_idx in ctrl_mod.gui_sounds:
sound_name = getattr(ctrl_mod, attr_name)
if not sound_name:
continue
sound_keys = soundemit.get_sound_keys(exporter, sound_name)
sound_key, soundemit_index = next(sound_keys, (None, -1))
if sound_key is not None:
sound_indices[gui_sound_idx] = soundemit_index + 1
# Compress the list to include only the highest entry we need.
if sound_indices:
ctrl.soundIndices = [sound_indices.get(i, 0) for i in range(max(sound_indices) + 1)]
def export(self, exporter: Exporter, bo: bpy.types.Object, so: plSceneObject):
ctrl_mods = list(self.iterate_control_modifiers())
if not ctrl_mods:
exporter.report.msg(str(list(self.iterate_control_subclasses())))
exporter.report.warn("This modifier has no effect because no GUI control modifiers are present!")
for ctrl_mod in ctrl_mods:
ctrl_obj = ctrl_mod.get_control(exporter, bo, so)
self.convert_gui_control(exporter, ctrl_obj, bo, so)
self.convert_gui_sounds(exporter, ctrl_obj, ctrl_mod)
@property
def has_gui_proc(self) -> bool:
return any((i.has_gui_proc for i in self.iterate_control_modifiers()))
@classmethod
def is_game_gui_control(cls) -> bool:
# How is a control not a control, you ask? Because, grasshopper, this modifier does not
# actually export a GUI control itself. Instead, it holds common properties that may
# or may not be used by other controls. This just helps fill out the other modifiers.
return False
class GameGuiAnimation(bpy.types.PropertyGroup):
def _poll_target_object(self, value):
# Only allow targetting things that are in our GUI page.
if value.plasma_object.page != self.id_data.plasma_object.page:
return False
if self.anim_type == "OBJECT":
return idprops.poll_animated_objects(self, value)
else:
return idprops.poll_drawable_objects(self, value)
def _poll_texture(self, value):
# must be a legal option... but is it a member of this material... or, if no material,
# any of the materials attached to the object?
if self.target_material is not None:
return value.name in self.target_material.texture_slots
else:
target_object = self.target_object if self.target_object is not None else self.id_data
for i in (slot.material for slot in target_object.material_slots if slot and slot.material):
if value in (slot.texture for slot in i.texture_slots if slot and slot.texture):
return True
return False
def _poll_material(self, value):
# Don't filter materials by texture - this would (potentially) result in surprising UX
# in that you would have to clear the texture selection before being able to select
# certain materials.
target_object = self.target_object if self.target_object is not None else self.id_data
object_materials = (slot.material for slot in target_object.material_slots if slot and slot.material)
return value in object_materials
anim_type: str = EnumProperty(
name="Type",
description="Animation type to affect",
items=[
("OBJECT", "Object", "Object Animation"),
("TEXTURE", "Texture", "Texture Animation"),
],
default="OBJECT",
options=set()
)
target_object: bpy.types.Object = PointerProperty(
name="Object",
description="Target object",
poll=_poll_target_object,
type=bpy.types.Object
)
target_material: bpy.types.Material = PointerProperty(
name="Material",
description="Target material",
type=bpy.types.Material,
poll=_poll_material
)
target_texture: bpy.types.Texture = PointerProperty(
name="Texture",
description="Target texture",
type=bpy.types.Texture,
poll=_poll_texture
)
class GameGuiAnimationGroup(bpy.types.PropertyGroup):
def _update_animation_name(self, context) -> None:
if not self.animation_name:
self.animation_name = "(Entire Animation)"
animations = CollectionProperty(
name="Animations",
description="",
type=GameGuiAnimation,
options=set()
)
animation_name: str = StringProperty(
name="Animation Name",
description="Name of the animation to play",
default="(Entire Animation)",
update=_update_animation_name,
options=set()
)
active_anim_index: int = IntProperty(options={"HIDDEN"})
show_expanded: bool = BoolProperty(options={"HIDDEN"})
def export(
self, exporter: Exporter, bo: bpy.types.Object, so: plSceneObject,
ctrl_obj: pfGUIControlMod, add_func: Callable[[plKey], None],
anim_name_attr: str
):
keys = set()
for anim in self.animations:
target_object = anim.target_object if anim.target_object is not None else bo
if anim.anim_type == "OBJECT":
keys.add(exporter.animation.get_animation_key(target_object))
elif anim.anim_type == "TEXTURE":
# Layer animations don't respect the name field, so we need to grab exactly the
# layer animation key that is requested. Cyan's Max plugin does not allow specifying
# layer animations here as best I can tell, but I don't see why we shouldn't.
keys.update(
exporter.mesh.material.get_texture_animation_key(
target_object,
anim.target_material,
anim.target_texture,
self.animation_name
)
)
else:
raise RuntimeError()
# This is to make sure that we only waste space in the PRP file with the animation
# name if we actually have some doggone animations.
if keys:
setattr(ctrl_obj, anim_name_attr, self.animation_name)
for i in keys:
add_func(i)
class PlasmaGameGuiButtonModifier(PlasmaModifierProperties, _GameGuiMixin):
pl_id = "gui_button"
pl_depends = {"gui_control"}
pl_page_types = {"gui"}
bl_category = "GUI"
bl_label = "GUI Button (ex)"
bl_description = "XXX"
bl_object_types = {"FONT", "MESH"}
def _update_notify_type(self, context):
# It doesn't make sense to have no notify type at all selected, so
# default to at least one option.
if not self.notify_type:
self.notify_type = {"DOWN"}
notify_type = EnumProperty(
name="Notify On",
description="When the button should perform its action",
items=[
("UP", "Up", "When the mouse button is down over the GUI button."),
("DOWN", "Down", "When the mouse button is released over the GUI button."),
],
default={"UP"},
options={"ENUM_FLAG"},
update=_update_notify_type
)
mouse_over_anims: GameGuiAnimationGroup = PointerProperty(type=GameGuiAnimationGroup)
mouse_click_anims: GameGuiAnimationGroup = PointerProperty(type=GameGuiAnimationGroup)
show_expanded_sounds: bool = BoolProperty(options={"HIDDEN"})
mouse_down_sound: str = StringProperty(
name="Mouse Down SFX",
description="Sound played when the mouse button is down",
options=set()
)
mouse_up_sound: str = StringProperty(
name="Mouse Up SFX",
description="Sound played when the mouse button is released",
options=set()
)
mouse_over_sound: str = StringProperty(
name="Mouse Over SFX",
description="Sound played when the mouse moves over the GUI button",
options=set()
)
mouse_off_sound: str = StringProperty(
name="Mouse Off SFX",
description="Sound played when the mouse moves off of the GUI button",
options=set()
)
@property
def gui_sounds(self):
return (
("mouse_down_sound", pfGUIButtonMod.kMouseDown),
("mouse_up_sound", pfGUIButtonMod.kMouseUp),
("mouse_over_sound", pfGUIButtonMod.kMouseOver),
("mouse_off_sound", pfGUIButtonMod.kMouseOff),
)
def get_control(self, exporter: Exporter, bo: Optional[bpy.types.Object] = None, so: Optional[plSceneObject] = None) -> pfGUIButtonMod:
return exporter.mgr.find_create_object(pfGUIButtonMod, bl=bo, so=so)
def export(self, exporter: Exporter, bo: bpy.types.Object, so: plSceneObject):
ctrl = self.get_control(exporter, bo, so)
ctrl.setFlag(pfGUIControlMod.kWantsInterest, True)
if self.notify_type == {"UP"}:
ctrl.notifyType = pfGUIButtonMod.kNotifyOnUp
elif self.notify_type == {"DOWN"}:
ctrl.notifyType = pfGUIButtonMod.kNotifyOnDown
elif self.notify_type == {"UP", "DOWN"}:
ctrl.notifyType = pfGUIButtonMod.kNotifyOnUpAndDown
else:
raise ValueError(self.notify_type)
self.mouse_over_anims.export(exporter, bo, so, ctrl, ctrl.addMouseOverKey, "mouseOverAnimName")
self.mouse_click_anims.export(exporter, bo, so, ctrl, ctrl.addAnimationKey, "animName")
class PlasmaGameGuiDialogModifier(PlasmaModifierProperties, _GameGuiMixin):
pl_id = "gui_dialog"
pl_page_types = {"gui"}
bl_category = "GUI"
bl_label = "GUI Dialog (ex)"
bl_description = "XXX"
bl_object_types = {"FONT", "MESH"}
camera_object: bpy.types.Object = PointerProperty(
name="GUI Camera",
description="Camera used to project the GUI to screenspace.",
type=bpy.types.Object,
poll=idprops.poll_camera_objects,
options=set()
)
is_modal = BoolProperty(
name="Modal",
description="",
default=True,
options=set()
)
def export(self, exporter: Exporter, bo: bpy.types.Object, so: plSceneObject):
# Find all of the visible objects in the GUI page for use in hither/yon raycast and
# camera matrix calculations.
visible_objects = [
i for i in exporter.get_objects(bo.plasma_object.page)
if i.type == "MESH" and i.data.materials
]
camera_object = self.id_data if self.id_data.type == "CAMERA" else self.camera_object
if camera_object:
exporter.report.msg(f"Using camera matrix from camera '{camera_object.name}'")
if camera_object != self.id_data and camera_object.plasma_object.enabled:
with exporter.report.indent():
exporter.report.warn("The camera object should NOT be a Plasma Object!")
camera_matrix = camera_object.matrix_world
# Save the clipping info from the camera for later use.
cam_data = camera_object.data
fov, hither, yonder = cam_data.angle, cam_data.clip_start, cam_data.clip_end
else:
exporter.report.msg(f"Building a camera matrix to view: {', '.join((i.name for i in visible_objects))}")
fov = math.radians(45.0)
camera_matrix = exporter.gui.calc_camera_matrix(
bpy.context.scene,
visible_objects,
fov
)
# There isn't a real camera, so just pretend like the user didn't set the clipping info.
hither, yonder = 0.0, 0.0
with exporter.report.indent():
exporter.report.msg(str(camera_matrix))
# If no hither or yonder was specified on the camera, then we need to determine that ourselves.
if not hither or not yonder:
exporter.report.msg(f"Incomplete clipping: H:{hither:.02f} Y:{yonder:.02f}; calculating new...")
with exporter.report.indent():
clipping = exporter.gui.calc_clipping(
camera_matrix,
bpy.context.scene,
visible_objects,
fov
)
exporter.report.msg(f"Calculated: H:{clipping.hither:.02f} Y:{clipping.yonder:.02f}")
if not hither:
hither = clipping.hither
if not yonder:
yonder = clipping.yonder
exporter.report.msg(f"Corrected clipping: H:{hither:.02f} Y:{yonder:.02f}")
# Both of the objects we export go into the pool.
scene_node_key = exporter.mgr.get_scene_node(bl=bo)
post_effect = exporter.mgr.find_create_object(plPostEffectMod, bl=bo)
post_effect.defaultC2W, post_effect.defaultW2C = exporter.gui.convert_post_effect_matrices(camera_matrix)
post_effect.fovX = math.degrees(fov)
post_effect.fovY = math.degrees(fov * (3.0 / 4.0))
post_effect.hither = min((hither, yonder))
post_effect.yon = max((hither, yonder))
post_effect.nodeKey = scene_node_key
dialog_mod = exporter.mgr.find_create_object(pfGUIDialogMod, bl=bo)
dialog_mod.name = bo.plasma_object.page
dialog_mod.setFlag(pfGUIDialogMod.kModal, self.is_modal)
dialog_mod.renderMod = post_effect.key
dialog_mod.sceneNode = scene_node_key
@property
def has_gui_proc(self) -> bool:
return False
@classmethod
def is_game_gui_control(cls) -> bool:
return False
def post_export(self, exporter: Exporter, bo: bpy.types.Object, so: plSceneObject):
# All objects have been exported. Now, we can establish linkage to all controls that
# have been exported.
dialog = exporter.mgr.find_object(pfGUIDialogMod, bl=bo, so=so)
control_modifiers: Iterable[_GameGuiMixin] = itertools.chain.from_iterable(
obj.plasma_modifiers.gui_control.iterate_control_modifiers()
for obj in exporter.get_objects(bo.plasma_object.page)
if obj.plasma_modifiers.gui_control.enabled
)
for control_modifier in control_modifiers:
control = control_modifier.get_control(exporter, control_modifier.id_data)
ctrl_key = control.key
exporter.report.msg(f"GUIDialog '{bo.name}': [{control.ClassName()}] '{ctrl_key.name}'")
dialog.addControl(ctrl_key)
| H-uru/korman | korman/properties/modifiers/game_gui.py | game_gui.py | py | 20,323 | python | en | code | 31 | github-code | 13 |
73515665936 | from selenium import webdriver
from selenium.webdriver.chrome.options import Options # オプションを使うために必要
option = Options() # オプションを用意
option.add_argument('--headless') # ヘッドレスモードの設定を付与
driver = webdriver.Chrome("/home/fumihachi/ws/lib/chromedriver_linux64/chromedriver", options=option)
# <https://www.ufret.jp/>から楽曲を選択したURLを指定
url = "https://www.ufret.jp/song.php?data=56999"
driver.get(url)
# 簡単弾き設定をOFFにする:「簡単弾き:ON→OFF」
buttons = driver.find_elements_by_tag_name('button')
for button in buttons:
if button.get_attribute("onclick") == "kantanon();":
print("「簡単弾き:OFF」を選択")
button.click()
break
# コード要素だけを抽出
code_elem = driver.find_elements_by_tag_name('ruby')
for c in code_elem:
print(c.text, end=' ')
print("")
| fumihachi94/algo_basics | 030_code/code.py | code.py | py | 956 | python | ja | code | 0 | github-code | 13 |
15335410372 | start = '''
You wake up one morning and find that you aren't in your bed; you aren't even in your room.
You're in the middle of a giant labrynth.
A sign is hanging from the slimy wall: "You have one hour. Don't touch the walls."
There is a hallway to your right and to your left. Also just so you know your current total health is 100
'''
health = 100
print(start)
done = ("game over")
print("Type 'left' to go left or 'right' to go right.")
user_input = input()
if user_input == "left":
answer = 0
while answer == 0:
print("You decide to go left and you run into a troll")
user_input = input("do you fight the troll, yes or no? ")
if user_input == "yes":
print("sorry dude your totally dead")
done = True
answer += 1
elif user_input == "no":
print("congrats you live")
print("you continue down the mossy corridor")
print("...")
answer = 0
while answer == 0:
print("you come to a slimy river that crosses your cave")
user_input = input("do you cross the river, yes or no? ")
if user_input == "yes":
print("sorry dude you are totally dead, armour is great but it doesnt float very well")
done = True
answer += 1
elif user_input == "no":
print("congrats you live")
print("but whilst you were walking away from the river you tripped and fell")
print("health:")
health -= 5
print(health)
if health <= 0:
done = True
answer = 0
while answer == 0 and health > 0:
print("will you stop and 'patch up' your ankle or 'keep walking'?")
user_input = input()
if user_input == "patch up":
print("wise decision")
answer += 1
elif user_input == "keep walking":
print("for every step you take your health depletes by 6, your injury was worse than you thought")
print("but remember whilst sitting you are vulnerable to any enemies that may find you")
print("you take five steps")
health -= 30
print(health)
if health <= 0:
done = True
if done == True:
print("game over")
answer += 1
else:
print("that was not an option")
answer +=1
else:
print("not an option bro")
elif user_input == "right":
answer = 0
while answer == 0:
print("You choose to go right and you come to a slimy river that crosses your cave")
user_input = input("do you cross the river, yes or no? ")
if user_input == "yes":
print("sorry dude your totally dead, armour is great but it doesnt float very well")
done = True
answer += 1
elif user_input == "no":
print("congrats you live")
print("you turn around and discover the corridor has changed behind you")
print("you enter the new corridor and you continue down it for some time")
print("and some more")
print("and some more")
print("...")
print("...")
print("...")
print("...")
print("trust me this is a super long corridor")
print("you run into a troll")
print("do you fight the troll, yes or no? ")
while answer == 0:
user_input = input()
if user_input == "yes":
print("sorry dude your totally dead")
done = True
answer += 1
elif user_input == "no":
print("congrats you live, you successfully run around the troll and continue to sprint down the corridor")
answer = 0
while answer == 0:
print("should you keep running? yes or no? if you stop you may be caught by the troll; however, if you keep going you will deplete your health by 5")
user_input = input()
if user_input == "yes":
health -= 5
print(health)
answer += 1
elif user_input == "no":
print("the troll was not following you, you survive")
answer += 1
else:
print("that was not an option, yes or no???")
answer += 1
else:
print("that was not an option, you should die of dumbness but ill let you off this time")
answer +=1
else:
print("that was not an option")
else:
print("that was not an option, you die of dumbness")
done = True
if health <= 0:
done = True
if done == True:
print("game over")
| espickermann/2016_GWC_Class_Pojects | python/text_adventure.py | text_adventure.py | py | 4,112 | python | en | code | 0 | github-code | 13 |
41124927354 | import os
import pygame
from pygame import *
from constants import *
def load_image(
name,
sizex=-1,
sizey=-1,
colorkey=None,
):
fullname = os.path.join('sprites', name)
image = pygame.image.load(fullname)
image = image.convert()
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0, 0))
image.set_colorkey(colorkey, RLEACCEL)
if sizex != -1 or sizey != -1:
image = pygame.transform.scale(image, (sizex, sizey))
return (image, image.get_rect())
def load_sprite_sheet(
sheetname,
nx,
ny,
scalex = -1,
scaley = -1,
colorkey = None,
):
fullname = os.path.join('sprites',sheetname)
sheet = pygame.image.load(fullname)
sheet = sheet.convert()
sheet_rect = sheet.get_rect()
sprites = []
sizex = sheet_rect.width/nx
sizey = sheet_rect.height/ny
for i in range(0,ny):
for j in range(0,nx):
rect = pygame.Rect((j*sizex,i*sizey,sizex,sizey))
image = pygame.Surface(rect.size)
image = image.convert()
image.blit(sheet,(0,0),rect)
if colorkey is not None:
if colorkey is -1:
colorkey = image.get_at((0,0))
image.set_colorkey(colorkey,RLEACCEL)
if scalex != -1 or scaley != -1:
image = pygame.transform.scale(image,(scalex,scaley))
sprites.append(image)
sprite_rect = sprites[0].get_rect()
return sprites,sprite_rect
| shivamshekhar/LittleFighter | data/functions.py | functions.py | py | 1,572 | python | en | code | 9 | github-code | 13 |
26461691621 | from sales_mania.zoho.get_session import get_session
from collections import defaultdict
import os, copy, datetime, json
import urllib.parse
class inv_document( get_session ):
def __init__(self, login_store, cache_store, user_id, password, data_center, org_id) -> None:
self.service = 'inventory'
super().__init__(login_store, user_id, password, data_center, org_id, service = self.service)
def __add_path(self, path_addon):
path_addon = ""
if len(path_addon) > 0:
path_addon = f"/{'/'.join(path_addon)}"
return path_addon
def __get_params(self, document, attribute, extra_params = {}):
document_map = {
"items":{
"item_count":{'page': '1', 'per_page': '200', 'filter_by': 'Status.All',
'sort_column': 'created_time', 'response_option': '2'},
"item_page":{'filter_by': 'Status.All', 'sort_column': 'created_time',
'sort_order': 'A', 'usestate': 'true'},
"item_details":{}
},
"report_activity":{
"report_activity":{"sort_column":"date", "sort_order":"D", "response_option":"1"}
}
}
#treateing base cofig as immute to preserve config for future use
temp = copy.deepcopy(document_map[document][attribute])
temp.update(extra_params)
temp = {param: json.dumps(value) if type(value) in [dict, list] else value for param, value in temp.items()}
return urllib.parse.urlencode(temp, quote_via=urllib.parse.quote)
def __get_base_uri(self, document, path_addon = []):
api_names = {"items":"items",
"report_activity":"reports/activitylogs"}
path_addon = self.__add_path(path_addon = path_addon)
print(document)
return f"https://inventory.zoho{self.data_center}/api/v1/{api_names[document]}{path_addon}?organization_id={self.org_id}&"
def __find_chained(self, response, chain_list):
final_response = None
for each in chain_list:
if final_response == None:
final_response = response[each]
else:
final_response = final_response[each]
return final_response
def __fetch_result(self, document, attribute, path_addon = [], extra_params = {}):
keys = {"item_count":
{"status":"message", "data":["page_context", "total"], "success":"success"},
"item_page":
{"status":"message", "data":["items"], "success":"success"},
"item_details":
{"status":"message", "data":["items"], "success":"success"},
"report_activity":
{"status":"message", "data":["activitylogs"], "success":"success"}
}
url = f"{self.__get_base_uri(document=document, path_addon=path_addon)}{self.__get_params(document=document, attribute = attribute, extra_params = extra_params)}"
zoho_resp = self.session.get(url)
response = self.to_json(
response = self.validate_200(
response = zoho_resp
)
)
status = response[keys[attribute]["status"]]
if status == keys[attribute]["success"]:
return self.__find_chained(response = response, chain_list = keys[attribute]["data"])
else:
raise Exception(f"Can not collect documet {document} with attribute {attribute} failed with {status}")
def __document_check(self, relations, document, segment = None):
if relations.get(document) is False:
raise Exception(f"Document {document} not avaialble")
if segment != None and segment not in relations.get(document)[1]:
raise Exception(f"Segment {segment} of {document} not possible")
return True
def get_document_page_n(self, document, segment, round, extra_params = {}, path_addon = []):
"""
if directly accessing this function, its user reponsiblilty to
catch out of range page exception
{"document":["attribute_query_name", None/[Allowed segments], ]}
"""
page_relation = {"items":["item_page", None],
"report_activity":["report_activity", None]}
self.__document_check(relations = page_relation, document = document)
__extra_params = {
"page":round, "per_page":segment
}
extra_params.update(__extra_params)
return self.__fetch_result(document = document,
attribute = page_relation[document][0],
extra_params = extra_params,
path_addon = path_addon)
def get_document_page_range(self, document, segment, start_page, end_page, extra_params):
results = []
for round in range(start_page, end_page + 1):
results.append(self.get_document_page_n(document = document, segment = segment,
round = round, extra_params = extra_params))
return results
def get_document_detail(self, document, document_id):
details_relations = {"items":["item_details", None, [document_id, "inventorysummary"]]}
return self.__fetch_result(document=document, attribute=details_relations[document][0],
path_addon=details_relations[document][2], extra_params={})
def get_documents_details(self, documents, document_ids, sort_document = False):
document_store = defaultdict(list)
for stick0 in documents if sort_document else document_ids:
for stick in document_ids if sort_document else documents:
document_id = stick if sort_document else stick0
document = stick0 if sort_document else stick
documet_detail = self.get_document_detail(document = document,
document_id = document_id)
document_store[stick0].append(documet_detail)
return document_store
def get_report_activity(self, round, start_date = None, end_date = None,
static_range = None, document_filters = [],
additional_filters = [], columns = []):
allowed_statics = ["today","week","month","quarter","year",
"day_1","week_1","month_1","quarter_1","year_1"]
documents_possible = ["bill","vendor_payment","invoice","customer_payment",
"item","purchaseorder","salesorder","package","shipment_order",
"purchase_receive","stripe","users","contact","vendor",
"vat_return","bill_of_entry","shipping_bill"]
if len(columns) == 0:
columns = [{"field":"date","group":"report"},
{"field":"transaction_type","group":"report"},
{"field":"description","group":"report"}]
if static_range not in allowed_statics:
raise Exception(f"static_range {static_range} not allowed for this report")
if start_date != None:
start_date = datetime.datetime.strptime(start_date, "%Y-%m-%d")
if end_date != None:
end_date = datetime.datetime.strptime(end_date, "%Y-%m-%d")
error_documents = set(document_filters) - set(documents_possible)
if len(error_documents) != 0:
raise Exception(f"Filter for {error_documents} are not possible, posssible filters are {documents_possible}")
select_rule = [{"index":1,
"field":"transaction_type",
"value":document_filters,
"comparator":"in",
"group":"report"}]
columns = [{"field":"date","group":"report"},
{"field":"transaction_type","group":"report"},
{"field":"description","group":"report"}]
return self.get_report(document = "report_activity", round = round,
static_range = static_range,
end_date = end_date, start_date = start_date,
select_rule = select_rule, select_columns = columns,
additional_filters = additional_filters)
def get_report(self, document, round, static_range = None,
start_date = None, end_date = None,
select_rule = [], select_columns = [],
additional_filters = [], extra_params = {}):
static_range_map = {"today":"CreatedDate.Today",
"week":"CreatedDate.ThisWeek",
"month":"CreatedDate.ThisMonth",
"quarter":"CreatedDate.ThisQuarter",
"year":"CreatedDate.ThisYear",
"day_1":"CreatedDate.PreviousDay",
"week_1":"CreatedDate.PreviousWeek",
"month_1":"CreatedDate.PreviousMonth",
"quarter_1":"CreatedDate.PreviousQuarter",
"year_1":"CreatedDate.PreviousQuarter",
"range":"CreatedDate.CustomDate"}
if all(each == None for each in [start_date, end_date, static_range]):
raise Exception("Please provide start_date and end_date or static_range")
if None not in [start_date, end_date] and start_date > end_date:
raise Exception("start_date can not be earlier than end_date")
if static_range == "range":
extra_params.update({"from_date": start_date.strftime("%Y-%m-%d"),
"to_date":end_date.strftime("%Y-%m-%d")})
for index, rule in enumerate(additional_filters):
rule.update({"index": index + len(select_rule)})
select_rule.append(rule)
criteria_string = f"({' AND '.join([str(each) for each in range(1, len(select_rule) + 1)])})"
__for_urlencode = {"select_columns": select_columns, "filter_by":static_range_map[static_range]}
if len(select_rule) > 0:
__for_urlencode.update({"rule":{"columns":select_rule, "criteria_string":criteria_string}})
__for_urlencode.update(extra_params)
extra_params = __for_urlencode
return self.get_document_page_n(document=document, segment=200, round=round, extra_params=extra_params)
def get_document_count(self, document):
count_relation = {"items":["item_count", None]}
self.__document_check(relations = count_relation, document = document)
return self.__fetch_result(document = document,
attribute = count_relation[document][0],
extra_params = {}, segment = None, )
document = inv_document(login_store = os.path.join(os.getcwd(), "sales_mania", "pickel"),
cache_store = os.path.join(os.getcwd(), "sales_mania", "cache"),
user_id = "it.kvtek@outlook.com", password = "IT4kvtek",
data_center = ".in", org_id = "60008720898")
#print(document.get_document_count("items"))
#print(document.get_document_page_n(document = "items", segment=10, round=1))
#print(document.get_document_detail(document = "items", document_id = "536460000007476086"))
print(document.get_report_activity(round=1, start_date=None, end_date=None, static_range="year",
document_filters=["bill"], additional_filters=[], columns=[])) | shivamswims456/sales_mania | inv_consolidated/inv_document.py | inv_document.py | py | 12,489 | python | en | code | 0 | github-code | 13 |
73534483856 | import datetime
import time
import json
import tweepy
from tweepy import Stream
from tweepy import OAuthHandler
from tweepy.streaming import StreamListener
from azure.eventhub import EventHubProducerClient, EventData
from configurations import EventHubSettings, TwitterSettings
# Twitter settings
CONSUMER_KEY = TwitterSettings['consumer_key']
CONSUMER_SECRET = TwitterSettings['consumer_secret']
ACCESS_TOKEN = TwitterSettings['access_token']
ACCESS_SECRET = TwitterSettings['access_secret']
FILTER_KEYWORDS = TwitterSettings['filter_keywords']
# Eventhub settings
CONNECTION_STRING = EventHubSettings['connection_string']
EVENT_HUB_NAME = EventHubSettings['eventhub_name']
PARTITION_ID = EventHubSettings['partition_id']
class listener(StreamListener):
def on_data(self, data):
self.postMessageToEventHub(data)
return True
def on_error(self, status):
print(status)
def postMessageToEventHub(self, data):
try:
event_data_batch = client.create_batch()
event_data_batch.add(EventData(data))
client.send_batch(event_data_batch)
print(data)
except Exception as e:
print(e.args)
pass
if __name__ == '__main__':
try:
client = EventHubProducerClient.from_connection_string(
CONNECTION_STRING, eventhub_name=EVENT_HUB_NAME)
auth = OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
twitterStream = Stream(auth, listener())
twitterStream.filter(track=FILTER_KEYWORDS, is_async=True)
except Exception as e:
print("Top level Error: args:{0}, message:N/A".format(e.args))
finally:
pass
| DaveCheema/Optum | Big Data Platform & ML Architectures/Lambda Architecture/Python Projects/WIP/TwitterReader.py | TwitterReader.py | py | 1,645 | python | en | code | 0 | github-code | 13 |
5986414801 | import click
from pulsecalc import core
from rich import print as rprint
from rich.table import Table
@click.group(help="NMR pulse calculator")
def main():
"""Just a placeholder for the main entrypoint."""
pass
@main.command()
def init():
"""Create a table containing the reference pulse definitions"""
click.echo("Initializing reference pulse table")
core.create_reference_table()
for channel in core.channels:
pulse_length = click.prompt(
f"Enter {channel} reference pulse length in μs", type=float
)
pulse_power = click.prompt(
f"Enter {channel} reference pulse power in W", type=float
)
pulse_frequency = core.calculate_frequency_from_length(pulse_length)
core.set_reference_pulse(channel, pulse_length, pulse_power, pulse_frequency)
@main.command()
def show():
"""Show the reference pulse definitions"""
try:
reference_table = core.get_reference_table()
except FileNotFoundError:
return
rich_table = Table(title="Reference pulses", row_styles=["yellow", "blue", "red"])
rich_table.add_column("Channel", style="bold")
rich_table.add_column(
"Length (μs)",
)
rich_table.add_column("Power (W)")
rich_table.add_column("Frequency (kHz)")
with open(reference_table, "r") as f:
# skip header
lines = f.readlines()[1:4]
for line in lines:
channel, pulse_length, pulse_power, pulse_frequency = line.split("\t")
rich_table.add_row(
channel,
pulse_length,
pulse_power,
pulse_frequency,
)
rprint(rich_table)
return
@main.command()
def reset():
"""Reset the reference pulse definitions and/or calculations"""
try:
core.get_reference_table()
except FileNotFoundError:
return
core.reset_reference_table()
@main.command()
def update():
"""Update a given reference pulse definition"""
try:
core.get_reference_table()
except FileNotFoundError:
return
channel = click.prompt(
"Which channel do you want to update?",
show_choices=True,
type=click.Choice(["1H", "13C", "15N"], case_sensitive=False),
)
pulse_length = click.prompt("What is the reference pulse length in μs?", type=float)
pulse_power = click.prompt("What is the reference pulse power in W?", type=float)
pulse_frequency = core.calculate_frequency_from_length(pulse_length)
core.set_reference_pulse(channel, pulse_length, pulse_power, pulse_frequency)
click.echo(
f"{channel} reference pulse updated: {pulse_length:.2f} μs @ {pulse_power:.2f} W == {pulse_frequency:.2f} kHz"
)
@main.command()
def hh():
"""Calculate typical Hartmann-Hahn conditions for Cross Polarization
at a given MAS frequency based on the reference pulses.
Conditions are given in kHz and as a fraction of the MAS frequency.
Powers are given in W.
"""
try:
core.get_reference_table()
except FileNotFoundError:
return
mas = click.prompt("What is the MAS frequency in kHz?", type=float)
rich_table = Table(
title=f"Hartmann-Hanh frequencies @ {mas} kHz (as n*MAS)",
row_styles=["yellow", "blue", "red"],
)
rich_table.add_column("Channel", style="bold")
hh_frequency_list = []
for condition, ratio in core.hh_conditions.items():
rich_table.add_column(condition, justify="center")
hh_frequency_list.append(ratio * mas)
for channel in core.channels:
try:
_, reference_power, reference_frequency = core.get_reference_pulse(channel)
except TypeError:
click.echo(f"No reference pulse defined for {channel}")
continue
hh_power_list = []
for hh_frequency in hh_frequency_list:
hh_power_list.append(
core.calculate_power_from_frequency(
reference_frequency, reference_power, hh_frequency
)
)
hh_power_list = ["{:.2f}".format(power) for power in hh_power_list]
rich_table.add_row(channel, *hh_power_list)
rprint(rich_table)
return
@main.command()
def freq():
"""Calculate the frequency of a given pulse given a new power or length, based on the reference pulses"""
try:
core.get_reference_table()
except FileNotFoundError:
return
channel = click.prompt(
"Which channel do you want to calculate the frequency for?",
show_choices=True,
type=click.Choice(["1H", "13C", "15N"], case_sensitive=False),
)
reference_length, reference_power, reference_frequency = core.get_reference_pulse(
channel
)
unit = click.prompt(
"Calculate from (l)ength or (p)ower?",
show_choices=True,
type=click.Choice(["l", "p"]),
)
if unit == "l":
new_length = click.prompt("What is the pulse length in μs?", type=float)
new_frequency = core.calculate_frequency_from_length(new_length)
new_power = core.calculate_power_from_frequency(
reference_frequency, reference_power, new_frequency
)
click.echo(
f"{new_length:.2f} μs @ {reference_power:.2f} W == {new_frequency:.2f} kHz"
)
elif unit == "p":
new_power = click.prompt("What is the pulse power in W?", type=float)
new_frequency = core.calculate_frequency_from_power(
reference_frequency, reference_power, new_power
)
click.echo(
f"{reference_length:.2f} μs @ {new_power:.2f} W == {new_frequency:.2f} kHz"
)
@main.command()
def power():
"""Calculate the power of a given pulse given a new length or frequency, based on the reference pulses"""
try:
core.get_reference_table()
except FileNotFoundError:
return
channel = click.prompt(
"Which channel do you want to calculate the power for?",
show_choices=True,
type=click.Choice(["1H", "13C", "15N"], case_sensitive=False),
)
reference_length, reference_power, reference_frequency = core.get_reference_pulse(
channel
)
unit = click.prompt(
"Calculate from (l)ength or (f)requency?",
show_choices=True,
type=click.Choice(["l", "f"]),
)
if unit == "l":
new_length = click.prompt("What is the pulse length in μs?", type=float)
new_frequency = core.calculate_frequency_from_length(new_length)
new_power = core.calculate_power_from_frequency(
reference_frequency, reference_power, new_frequency
)
click.echo(
f"{new_length:.2f} μs @ {new_power:.2f} W == {reference_frequency:.2f} kHz"
)
elif unit == "f":
new_frequency = click.prompt("What is the pulse frequency in kHz?", type=float)
new_power = core.calculate_power_from_frequency(
reference_frequency, reference_power, new_frequency
)
click.echo(
f"{reference_length:.2f} μs @ {new_power:.2f} W == {new_frequency:.2f} kHz"
)
if __name__ == "__main__":
main()
| miguelarbesu/pulsecalc | src/pulsecalc/__main__.py | __main__.py | py | 7,218 | python | en | code | 1 | github-code | 13 |
17768905325 | # import sys
# import random
# f = sys.argv[1]
# l = sys.argv[2]
# def check(no):
# # print('here def')
# if (no<int(f) or no>int(l)):
# print('not valid no')
# return True
# while True:
# try:
# no = int(input(f'Between {f} to {l} Guess no: '))
# # print(f'here1{type(no)}')
# if(check(no)):
# # print('here2')
# continue
# elif(no == random.randrange(int(f),int(l))):
# print('Correct Choice')
# # print('here3')
# break
# else:
# # print('here4')
# continue
# except ValueError:
# print(" value error")
# import pyjokes
#
# print(pyjokes.get_joke())
#
# with open('C:/Users/Tushhh/OneDrive/Desktop/hello.txt', mode='w') as my_file:
# # print(my_file.read())
# # my_file.write("FO")
# # my_file.write("remove all")
# # my_file.write('Append kiya')
# # my_file.write('new file created using IDE')
#
#
#
# print(my_file.read())
# my_file.seek(0)
# print(my_file.readline())
# print('x')
# print(my_file.readline())
# print('x')
# print(my_file.readline())
# print(my_file.readlines())
#
# my_file.close()
import unittest
import v
class Test(unittest.TestCase):
def test1(self):
ans = 10
data = 5
result = v.mult(data)
self.assertEqual(result,ans)
def test2(self):
ans = 20
data = 10
result = v.mult(data)
self.assertEqual(result,ans)
def test3(self):
ans = 10
data = 3
result = v.mult(data)
self.assertEqual(result,ans)
if __name__ == '__main__' :
print(v.__name__)
unittest.main()
| Tushhh71/PythonTraining | v-test.py | v-test.py | py | 1,517 | python | en | code | 0 | github-code | 13 |
43756067276 | import sys, multiprocessing
from top2vec import Top2Vec
import logging
logger = logging.getLogger('gensim')
logger.setLevel(logging.INFO)
sh = logging.StreamHandler(sys.stderr)
sh.setFormatter(logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s'))
logger.addHandler(sh)
def parser(file):
docs = list()
doc_ids = list()
with open(file, 'r') as fp:
for line in fp.readlines():
elems = line.split('|')
docs.append(elems[2])
doc_ids.append(elems[0])
return docs, doc_ids
def main():
if not sys.argv[1:]:
print("need to give dataset path!")
exit(1)
data_file = sys.argv[1:][0]
print(data_file)
save_file = "top2vec.model"
docs, doc_ids = parser(data_file)
model = Top2Vec(documents=docs, document_ids=doc_ids, min_count=20, keep_documents=False,
use_corpus_file=True, workers=multiprocessing.cpu_count(),
verbose=True)
model.save(save_file)
if __name__ == "__main__":
main()
| kongyq/project_IRDL | topics/main.py | main.py | py | 1,049 | python | en | code | 1 | github-code | 13 |
23619305622 | # 백준 1991
tree = {}
n = int(input())
for _ in range(n):
a, b, c = input().split()
tree[a] = [b, c]
# 전위, 중위, 후위 순회
def pre_order(a):
global ans
if a == ".":
return
ans += a
if tree[a]:
left, right = tree[a]
pre_order(left)
pre_order(right)
def post_order(a):
global ans
if a == ".": return
if tree[a]:
left, right = tree[a]
post_order(left)
post_order(right)
ans += a
def in_order(a):
global ans
if a == ".": return
if tree[a]:
left, right = tree[a]
in_order(left)
ans += a
in_order(right)
ans = ''
pre_order('A')
print(ans)
ans = ''
in_order('A')
print(ans)
ans = ''
post_order('A')
print(ans)
| yoooyeon/Algorithm | Tree, Graph/트리 순회.py | 트리 순회.py | py | 769 | python | en | code | 0 | github-code | 13 |
7608113580 | class Music:
def __init__(self, title, interpreter, composer, year):
self.title = title
self.interpreter = interpreter
self.composer = composer
self.year = year
class Search:
def search_by_title(self, playlist, title):
for i in range(len(playlist)):
if playlist[i].title == title:
return i
return -1
def lets_search(self, music_to_search):
playlist = [
Music("Human", "Rag'n'Bone Man", "Rag'n'Bone Man", 2016),
Music("Down With The Sickness", "Disturbed", "RDisturbed", 2000),
Music("Aerials", "System Of A Down", "System Of A Down", 2011)
]
finded = self.search_by_title(playlist, music_to_search)
if finded == -1:
print("The music ", music_to_search, " it's no present on current playlist!")
else:
index = playlist[finded]
print(index.title, index.interpreter, index.composer, index.year, sep=", ")
s = Search()
s.lets_search("Aerials") | devjavaedu/coursera | python II/sequential_search.py | sequential_search.py | py | 1,069 | python | en | code | 0 | github-code | 13 |
37997350238 | #
## @file TrackD3PDMaker/share/VertexLumiD3PD_prodjobOFragment.py
## @brief Setup D3PD Maker algorithm for luminosity measurement/debug
## @author Simone Pagan Griso
## @date Mar, 2012
##
## Notes:
## - To include non beam-constrained, use as preExec:
## InDetFlags.doVertexFindingForMonitoring.set_Value_and_Lock(True)
## or use as preInclude TrackD3PDMaker/VertexLumiD3PD_configure.py
## and make sure that VertexD3PDAnalysisFlags.useAllVertexCollections = True
## (it is by default; if the algorithms are not run, it does not dump info)
"""
Setup Vertex Lumi D3PD Maker algorithm when running from Reco_trf.py
"""
from AthenaCommon.JobProperties import jobproperties
vtxprodFlags = jobproperties.D3PDProdFlags
from PrimaryDPDMaker.PrimaryDPDHelpers import buildFileName
from TrackD3PDMaker.VertexD3PDAnalysisKeys import VertexD3PDAnalysisKeys
from TrackD3PDMaker.VertexD3PDAnalysisFlags import VertexD3PDAnalysisFlags
### Configure Vertex D3PD Maker
VertexD3PDAnalysisFlags.useAllVertexCollections = False
VertexD3PDAnalysisFlags.useEventInfo=True
VertexD3PDAnalysisFlags.useTracks=False
VertexD3PDAnalysisFlags.useBeamspot=False
VertexD3PDAnalysisFlags.useTrigger=False
VertexD3PDAnalysisFlags.useBackgroundWord=True
VertexD3PDAnalysisFlags.useSecondaryVertex=False
VertexD3PDAnalysisFlags.useMET=False
VertexD3PDAnalysisFlags.useElectrons=False
VertexD3PDAnalysisFlags.useMuons=False
VertexD3PDAnalysisFlags.usePhotons=False
VertexD3PDAnalysisFlags.useJets=False
VertexD3PDAnalysisFlags.useTaus=False
include("TrackD3PDMaker/VertexGroupD3PD_loadConfig.py")
### Semi-automatic configuration steps
## The following needs to be given as preExec
## (it has to be set before including RecExCommon)
#if VertexD3PDAnalysisFlags.useAllVertexCollections:
# #Need to enable algorithms in IDRE
# from InDetRecExample.InDetJobProperties import InDetFlags
# InDetFlags.doVertexFindingForMonitoring.set_Value_and_Lock(True)
### Setup algorithm
if vtxprodFlags.WriteIDVTXLUMID3PD.isVirtual:
raise NameError( "IDVTXLUMID set to be a virtual stream" )
pass
streamName = vtxprodFlags.WriteIDVTXLUMID3PD.StreamName
fileName = buildFileName( vtxprodFlags.WriteIDVTXLUMID3PD )
from OutputStreamAthenaPool.MultipleStreamManager import MSMgr
VertexLumiD3PDMaker = MSMgr.NewRootStream(vtxprodFlags.WriteIDVTXLUMID3PD.StreamName, fileName, VertexD3PDAnalysisKeys.D3PDTreeName())
### Add objects to the algorithm
from TrackD3PDMaker.VertexGroupD3PD import VertexGroupD3PD
VertexGroupD3PD(VertexLumiD3PDMaker, **(VertexD3PDAnalysisFlags.D3PDMakerExtraDict()))
| rushioda/PIXELVALID_athena | athena/PhysicsAnalysis/D3PDMaker/TrackD3PDMaker/share/VertexLumiD3PD_prodJobOFragment.py | VertexLumiD3PD_prodJobOFragment.py | py | 2,580 | python | en | code | 1 | github-code | 13 |
28391352174 | """
Support for Wolf heating via ISM8 adapter
"""
import logging
from homeassistant.components.binary_sensor import BinarySensorDeviceClass
from homeassistant.components.binary_sensor import BinarySensorEntity
from wolf_ism8 import Ism8
from .const import (
DOMAIN,
WOLF,
WOLF_ISM8,
SensorType,
)
from homeassistant.const import (
CONF_DEVICES,
STATE_PROBLEM,
STATE_OK,
STATE_ON,
STATE_OFF,
STATE_UNKNOWN,
)
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass,
config_entry,
async_add_entities,
):
"""
performs setup of the binary sensors, needs a
reference to an ism8-protocol implementation via hass.data
"""
config = hass.data[DOMAIN][config_entry.entry_id]
ism8: Ism8 = hass.data[DOMAIN]["protocol"]
sensors = []
for nbr in ism8.get_all_sensors().keys():
if ism8.get_device(nbr) in config[CONF_DEVICES]:
if ism8.get_type(nbr) in (
SensorType.DPT_SWITCH,
SensorType.DPT_BOOL,
SensorType.DPT_ENABLE,
SensorType.DPT_OPENCLOSE,
):
if not ism8.is_writable(nbr):
sensors.append(WolfBinarySensor(ism8, nbr))
async_add_entities(sensors)
class WolfBinarySensor(BinarySensorEntity):
"""Binary sensor representation for DPT_SWITCH, DPT_BOOL,
DPT_ENABLE, DPT_OPENCLOSE types"""
def __init__(self, ism8, dp_nbr):
self.dp_nbr = dp_nbr
self._device = ism8.get_device(dp_nbr)
self._name = self._device + "_" + ism8.get_name(dp_nbr)
self._type = ism8.get_type(dp_nbr)
self._unit = ism8.get_unit(dp_nbr)
self._state = STATE_UNKNOWN
self._ism8 = ism8
_LOGGER.debug("setup BinarySensor no. %d as %s", self.dp_nbr, self._type)
@property
def name(self) -> str:
"""Return the name of this sensor."""
return self._name
@property
def unique_id(self):
"""Return the unique_id of this sensor."""
return str(self.dp_nbr)
@property
def device_info(self):
"""Return device info."""
return {
"identifiers": {(DOMAIN, self._device)},
"name": self._device,
"manufacturer": WOLF,
"model": WOLF_ISM8,
}
@property
def state(self):
"""Return the state of the device."""
if self.device_class == BinarySensorDeviceClass.PROBLEM:
return STATE_PROBLEM if self.is_on else STATE_OK
else:
return STATE_ON if self.is_on else STATE_OFF
@property
def is_on(self) -> str:
"""Return true if the binary sensor is on."""
return self._state
@property
def device_class(self):
"""Return the class of the device."""
if self._name == "Stoerung":
return BinarySensorDeviceClass.PROBLEM
elif self._name in ["Status Brenner / Flamme", "Status E-Heizung"]:
return BinarySensorDeviceClass.HEAT
elif self._name in [
"Status Heizkreispumpe",
"Status Speicherladepumpe",
"Status Mischerkreispumpe",
"Status Solarkreispumpe SKP1",
"Status Zubringer-/Heizkreispumpe",
]:
return BinarySensorDeviceClass.MOVING
else:
return None
async def async_update(self):
"""Return state"""
self._state = self._ism8.read(self.dp_nbr)
return
| marcschmiedchen/home-assistant-wolf_ism8 | custom_components/wolf/binary_sensor.py | binary_sensor.py | py | 3,495 | python | en | code | 18 | github-code | 13 |
43559767216 | import urlparse
from scrapy.contrib.spiders import CrawlSpider, Rule
from scrapy.contrib.linkextractors.sgml import SgmlLinkExtractor
from scrapy.selector import HtmlXPathSelector
from isbullshit.items import IsBullshitItem
class IsBullshitSpider(CrawlSpider):
""" General configuration of the Crawl Spider """
name = 'isbullshit'
start_urls = ['http://isbullsh.it'] # urls from which the spider will start crawling
rules = [Rule(SgmlLinkExtractor(allow=[r'page/\d+']), follow=True),
# r'page/\d+' : regular expression for http://isbullsh.it/page/X URLs
Rule(SgmlLinkExtractor(allow=[r'\d{4}/\d{2}/\w+']), callback='parse_blogpost')]
# r'\d{4}/\d{2}/\w+' : regular expression for http://isbullsh.it/YYYY/MM/title URLs
def parse_blogpost(self, response):
""" Extract title, author, tag(s), date, location, url and the html text of a blogpost,
using XPath selectors
"""
hxs = HtmlXPathSelector(response)
item = IsBullshitItem()
# Extract title
item['title'] = hxs.select('//header/h1/text()').extract()[0]
# Extract author
item['author'] = hxs.select('//header/p/a/text()').extract()[0]
# Extract tag(s)
item['tag'] = hxs.select("//header/div[@class='post-data']/p/a/text()").extract()
# Extract date
item['date'] = hxs.select("//header/div[@class='post-data']/p[contains(text(), '20')]/text()").extract()[0]
# Extract location
item['location'] = hxs.select("//header/div[@class='post-data']/p[contains(text(), 'From')]/text()").extract()[0].replace('From', '')
# Extract article url
urls = hxs.select("//div[@class='breadcrumb-container']/ul[@class='breadcrumb']/li/a/@href").extract()
item['url'] = urlparse.urljoin(urls[1], urls[2])
# Extract article text, with html tags
item['article_html'] = hxs.select("//div[@role='main']/article").extract()[0]
return item
| brouberol/isbullshit-crawler | isbullshit/spiders/isbullshit_spiders.py | isbullshit_spiders.py | py | 1,992 | python | en | code | 31 | github-code | 13 |
16038807807 | import logging, traceback, sys, os, inspect
logging.basicConfig(filename=__file__[:-3] +'.log', filemode='w', level=logging.DEBUG)
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.append(parentdir)
#Checks for neutral planet
def if_neutral_planet_available(state):
return any(state.neutral_planets())
#Checks for neutral planet
def if_no_neutral_planet_available(state):
return (len(state.neutral_planets()) == 0)
#Checks if an enemy planet is within the players planets
def enemy_planets_near(state):
if len(state.my_planets()) < 1:
return False
start_planet = state.my_planets()[0]
total = 0
for planet in state.my_planets():
total += state.distance(start_planet.ID, planet.ID)
avg = total / len(state.my_planets())
for planet in state.my_planets():
for enemy_planet in state.enemy_planets():
if avg > state.distance(planet.ID, enemy_planet.ID):
return True
return False
#Check if the player has the largest fleet
def have_largest_fleet(state):
return sum(planet.num_ships for planet in state.my_planets()) \
+ sum(fleet.num_ships for fleet in state.my_fleets()) \
> sum(planet.num_ships for planet in state.enemy_planets()) \
+ sum(fleet.num_ships for fleet in state.enemy_fleets())
# Checks whether a enemy fleet is incoming towards a player planet
def check_incoming(state):
for fleet in state.enemy_fleets():
for planet in state.my_planets():
if fleet.destination_planet == planet.ID:
return True
return False
| rabhatna/CM146 | P4/behavior_tree_bot/checks.py | checks.py | py | 1,730 | python | en | code | 0 | github-code | 13 |
19913298003 | import pygad.kerasga
import pygad
import numpy as np
from kapibara_audio import BUFFER_SIZE
from emotions import EmotionTuple
import tensorflow as tf
import time
from tensorflow.keras import layers
from tensorflow.keras import models
import math
import os.path
from timeit import default_timer as timer
from tflitemodel import LiteModel
class MindOutputs:
MAX_INPUT_VALUE=4294967295.0
def __init__(self,speedA:int=0,speedB:int=0,directionA:int=0,directionB:int=0) -> None:
self.speedA=speedA
self.directionA=directionA
self.speedB=speedB
self.directionB=directionB
def error(self)->float:
error=0
if self.speedA>100:
error-=50*(self.speedA/100.0)
if self.speedB>100:
error-=50*(self.speedB/100.0)
if self.directionA>3:
error-=50*(self.directionA/3.0)
if self.directionB>3:
error-=50*(self.directionB/3.0)
if math.isnan(self.speedA):
error-=100
if math.isnan(self.speedB):
error-=100
if math.isnan(self.directionA):
error-=100
if math.isnan(self.directionB):
error-=100
return error
def get(self)->list[float]:
return [self.speedA,self.speedB,self.directionA,self.directionB]
def get_norm(self)->list[float]:
return [self.speedA/100.0,self.speedB/100.0,self.directionA/4.0,self.directionB/4.0]
def set_from_norm(self,speedA:float,speedB:float,directionA:float,directionB:float):
self.speedA=speedA*100
self.speedB=speedB*100
if directionA>=0 and directionA<0.25:
self.directionA=0
elif directionA>=0.25 and directionA<0.5:
self.directionA=1
elif directionA>=0.5 and directionA<0.75:
self.directionA=2
else:
self.directionA=3
if directionB>=0 and directionB<0.25:
self.directionB=0
elif directionB>=0.25 and directionB<0.5:
self.directionB=1
elif directionB>=0.5 and directionB<0.75:
self.directionB=2
else:
self.directionB=3
if self.speedA>self.MAX_INPUT_VALUE:
self.speedA=self.MAX_INPUT_VALUE
if self.speedB>self.MAX_INPUT_VALUE:
self.speedB=self.MAX_INPUT_VALUE
if self.directionA>self.MAX_INPUT_VALUE:
self.directionA=self.MAX_INPUT_VALUE
if self.directionB>self.MAX_INPUT_VALUE:
self.directionB=self.MAX_INPUT_VALUE
def motor1(self)->tuple[int,int]:
return (int(self.speedA),int(self.directionA))
def motor2(self)->tuple[int,int]:
return (int(self.speedB),int(self.directionB))
class Mind:
OUTPUTS_BUFFER=10
NUM_GENERATIONS=50
'''A class that represents decision model
A inputs:
*gyroscope
*accelerometer
*distance sensors:
-front
-floor
*audio - mono,combined stereo
*audio coefficent, wich audio channel is stronger
*10 last outputs
A outputs:
Outputs:
'''
def __init__(self,emotions:EmotionTuple,fitness,callback) -> None:
self.last_outputs=np.array([MindOutputs(0,0,0,0)]*10)
self.gyroscope=np.zeros(3,dtype=np.float32)
self.accelerometer=np.zeros(3,dtype=np.float32)
self.dis_front=0.0
self.dis_floor=0.0
self.audio=np.zeros(BUFFER_SIZE,dtype=np.float32)
self.audio_coff=(0,0)
self.emotions=emotions
self.inputs=np.ndarray(len(self.gyroscope)+len(self.accelerometer)+len(self.audio)+
len(self.audio_coff)+(len(self.last_outputs)*4)+2,dtype=np.float32)
#self.inputs=self.inputs.reshape(len(self.inputs),1)
self.fitness=fitness
self.callback=callback
def init_model(self):
input=layers.Input(len(self.inputs))
layer_1=layers.Dense(512,activation="linear")(input)
layer_2=layers.Dense(386,activation="linear")(layer_1)
layer_out1_1=layers.Dense(256,activation="linear")(layer_2)
layer_out1_2=layers.Dense(128,activation="sigmoid")(layer_out1_1)
layer_out1_3=layers.Dense(64,activation="sigmoid")(layer_out1_2)
output_1_speed=layers.Dense(1,activation="sigmoid")(layer_out1_3)
output_1_direction=layers.Dense(1,activation="sigmoid")(layer_out1_3)
layer_out2_1=layers.Dense(256,activation="linear")(layer_2)
layer_out2_2=layers.Dense(128,activation="sigmoid")(layer_out2_1)
layer_out2_3=layers.Dense(64,activation="sigmoid")(layer_out2_2)
output_2_speed=layers.Dense(1,activation="sigmoid")(layer_out2_3)
output_2_direction=layers.Dense(1,activation="sigmoid")(layer_out2_3)
self.model=models.Model(inputs=input,outputs=[output_1_speed,output_1_direction,output_2_speed,output_2_direction])
self.keras_ga=pygad.kerasga.KerasGA(model=self.model,
num_solutions=10)
initial_population=self.keras_ga.population_weights
#print(initial_population)
if os.path.isfile("./mind.pkl"):
self.mind=pygad.load("./mind")
print("Model has been loaded")
return
self.mind=pygad.GA(num_generations=100,
num_parents_mating=10,
initial_population=initial_population,
fitness_func=self.fitness,
on_generation=self.callback,
init_range_high=10,
init_range_low=-5,
parent_selection_type="rank",
crossover_type="scattered",
mutation_type="random",
mutation_percent_genes= 10
)
def test_tflite(self):
lite=LiteModel.from_keras_model(self.model)
self.prepareInput()
print(lite.predict(self.inputs.reshape(1,len(self.inputs))))
for i in range(50):
start=timer()
print(lite.predict(self.inputs.reshape(1,len(self.inputs))))
print(timer()-start," s")
def run_model(self,solutions):
self.prepareInput()
predictions=pygad.kerasga.predict(model=self.model,
solution=solutions,
data=self.inputs.reshape(1,len(self.inputs)))
return predictions
def getData(self,data:dict):
self.gyroscope:np.array=data["Gyroscope"]["gyroscope"]/(2**16 -1)
self.accelerometer:np.array=data["Gyroscope"]["acceleration"]/(2**16 -1)
self.dis_front=data["Distance_Front"]["distance"]/8160.0
self.dis_floor=data["Distance_Floor"]["distance"]/8160.0
left:np.array=np.array(data["Ears"]["channel1"],dtype=np.float64)/32767.0
right:np.array=np.array(data["Ears"]["channel2"],dtype=np.float64)/32767.0
for x in left:
if np.isnan(x):
print("Nan in left")
for x in right:
if np.isnan(x):
print("Nan in right")
self.audio:np.array=np.add(left,right,dtype=np.float64)/2.0
for x in self.audio:
if np.isnan(x):
print("Nan in audio")
m:float=np.mean(self.audio)
l:float=np.mean(left)
r:float=np.mean(right)
#print(self.gyroscope)
#print(self.accelerometer)
#print(self.dis_front)
#print(self.dis_floor)
if m==0:
self.audio_coff=(0.0,0.0)
return
self.audio_coff=(l/m,r/m)
def prepareInput(self):
self.inputs[0]=self.gyroscope[0]
self.inputs[1]=self.gyroscope[1]
self.inputs[2]=self.gyroscope[2]
self.inputs[3]=self.accelerometer[0]
self.inputs[4]=self.accelerometer[1]
self.inputs[5]=self.accelerometer[2]
self.inputs[6]=self.audio_coff[0]
self.inputs[7]=self.audio_coff[1]
self.inputs[8]=self.dis_front
self.inputs[9]=self.dis_floor
i=0
for samp in self.audio:
self.inputs[10+i]=samp
i=i+1
i=0
l=len(self.audio)
for out in self.last_outputs:
put=out.get_norm()
for x in put:
self.inputs[10+i+l]=x
i=i+1
def push_output(self,output:MindOutputs):
self.last_outputs=np.roll(self.last_outputs,-1)
self.last_outputs[-1]=output
def loop(self):
self.mind.run()
solution, solution_fitness, solution_idx = self.mind.best_solution()
print("Parameters of the best solution : {solution}".format(solution=solution))
print("Fitness value of the best solution = {solution_fitness}".format(solution_fitness=solution_fitness))
self.mind.save("./mind") | ProjectRobal/Kapibara-Decision-Center | mind.py | mind.py | py | 9,030 | python | en | code | 0 | github-code | 13 |
35000101131 | while True:
try:
age = int(input("What is your age? "))
if age <= 0:
raise ValueError("Hey cut it out")
10 / age
except ValueError as err:
print(f"ValueError exception is raised: {err}")
else:
break
finally:
print("Thank you")
| tmohod10/PythonBasics | 06_error_handling/03_error_handling_iii.py | 03_error_handling_iii.py | py | 304 | python | en | code | 0 | github-code | 13 |
38910382166 | from typing import Dict
from fastapi import APIRouter
from ..schema.status import StatusResponse
router = APIRouter()
@router.get(
path='',
response_model=StatusResponse,
summary='Server status',
description='Returns server status information',
response_description='Status information',
)
async def status() -> Dict[str, str]:
"""Check server status. Will return "OK" and current runtime in seconds
:return: Status information
"""
return {
'application': 'port-16',
'version': '1.0',
'status': 'ok',
}
| stefan2811/port-16 | port_16/api/status/handlers/status.py | status.py | py | 573 | python | en | code | 0 | github-code | 13 |
5756290684 | import rclpy
from rclpy.node import Node
from sensor_msgs.msg import Image
from std_msgs.msg import Float32
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
from collections import deque
from wcrc_ctrl.BEV import BEV
from wcrc_ctrl.Logger import Logger
# from wcrc_ctrl.Sensor import Sensor
# colors
red, green, blue, yellow = (0, 0, 255), (0, 255, 0), (255, 0, 0), (0, 255, 255)
class LaneDetector:
'''
Detects left, middle, right lane from an image and calculate angle of the lane.
Uses canny, houghlinesP for detecting possible lane candidates.
Calculates best fitted lane position and predicted lane position from previous result.
'''
def __init__(self, node: Node = None):
# Ensure that the node argument is indeed an instance of rclpy.node.Node
if not isinstance(node, Node):
raise TypeError("Logger expects an rclpy.node.Node instance.")
self.node = node
self.logger = Logger(self.node)
self.bev = BEV()
# canny params
self.canny_low, self.canny_high = 40, 120
# HoughLineP params
self.hough_threshold, self.min_length, self.max_gap = 5, 20, 10
# initial state
self.angle = 0.0
self.prev_angle = deque([0.0], maxlen=5)
self.lane = np.array([1280])
# filtering params:
self.angle_tolerance = np.radians(30)
self.cluster_threshold = 25
self.target_lane = 0.0
def to_canny(self, img, show=False):
img = cv2.GaussianBlur(img, (9, 9), 0)
img = cv2.Canny(img, self.canny_low, self.canny_high)
if show:
cv2.imshow('canny', img)
cv2.waitKey(1)
return img
def hough(self, img, show=False):
lines = cv2.HoughLinesP(
img, 1, np.pi / 180, self.hough_threshold, self.min_length, self.max_gap)
if show:
hough_img = np.zeros((img.shape[0], img.shape[1], 3))
if lines is not None:
for x1, y1, x2, y2 in lines[:, 0]:
cv2.line(hough_img, (x1, y1), (x2, y2), red, 2)
cv2.imshow('hough', hough_img)
cv2.waitKey(1)
return lines
def find_best_pair(self, lines, img):
pass
def draw_lines(self, img, lines):
width = img.shape[1]
if lines is not None:
for x1, y1, x2, y2 in lines:
if x2 - x1 == 0:
intercept = y1
start_point = (0, int(intercept)) # Start at x = 0
end_point = (
width - 1, int(intercept))
cv2.line(img, start_point,
end_point, (255, 0, 0), 3)
else:
slope = (y2 - y1) / (x2 - x1)
intercept = y1 - slope * x1
start_point = (0, int(intercept)) # Start at x = 0
end_point = (
width - 1, int(slope * (width - 1) + intercept))
cv2.line(img, start_point,
end_point, (255, 0, 0), 3)
def filter(self, lines, show=True):
'''
filter lines that are close to previous angle and calculate its positions
'''
thetas, positions = [], [] # ??? ??? ???? ?? ??? ???
if show:
# ??? ??? ??? ? ??? ???
filter_img = np.zeros(
(self.bev.warp_img_h, self.bev.warp_img_w, 3))
if lines is not None: # ??? ???? ??? ??
for x1, y1, x2, y2 in lines[:, 0]: # ? ??? ??
if y1 == y2: # ??? ??? ?? ??
continue
# ??? ??? ?? ??? ??
flag = 1 if y1-y2 > 0 else -1
# ??? ?? ??
theta = np.arctan2(flag * (x2-x1), flag * 0.9 * (y1-y2))
# ??? ??? ??? ?? ??? ??? ??? ??
if abs(theta - self.angle) < self.angle_tolerance:
# ?? ??? ?? ??
position = float(
(x2-x1)*(self.bev.warp_img_mid-y1))/(y2-y1) + x1
thetas.append(theta) # ??? ?? ??
positions.append(position) # ??? ?? ??
if show: # ???? ?? ??
cv2.line(filter_img, (x1, y1), (x2, y2), red, 2)
# ?? ?? ???? ?? ?? ??
self.prev_angle.append(self.angle)
# ??? ???? ????? ?? ?? ????
if thetas:
self.angle = np.mean(thetas)
if show: # ???? ???? ??
cv2.imshow('filtered lines', filter_img)
cv2.waitKey(1)
return positions # ??? ??? ??
def get_cluster(self, positions):
'''
group positions that are close to each other
'''
clusters = [] # ????? ???? ?? ??? ???
# ? ??? ??
for position in positions:
# ?? ??? ??? ?? ?? ?? ??
if 0 <= position < self.bev.warp_img_w:
for cluster in clusters:
# ?? ??? ?? ??? ????? ?? ?? ??
if abs(cluster[0] - position) < self.cluster_threshold:
# ????? ?? ??
cluster.append(position)
break
else:
# ?? ?????? ??? ?? ?? ??? ???? ??
clusters.append([position])
# ? ????? ?? ?? ??
lane_candidates = [np.mean(cluster) for cluster in clusters]
# print(lane_candidates)
return lane_candidates # ??? ?? ??? ??
def predict_lane(self):
'''
predicts the center lane position from the previous center lane position and angle
'''
# self.lane[1]? ?? ?? ???? ??? ?:
if isinstance(self.lane, (list, np.ndarray)) and len(self.lane) > 1:
# ?? ?? ????? ??? 2?? ??? ?? ?? ?? ?? ?? ??
center_lane = self.lane[1]
else:
# self.lane? ??? ?? ?? ?? ??
center_lane = self.lane
# ??? ?? ?? ?? ??
predicted_center_lane = center_lane + \
(self.angle - np.mean(self.prev_angle)) * 70
# print(predicted_center_lane)
return predicted_center_lane
def update_lane(self, lane_candidates, predicted_lane):
# If predicted_lane is an array with one element, convert it to a scalar
if isinstance(predicted_lane, np.ndarray) and predicted_lane.size == 1:
predicted_lane = predicted_lane.item()
# Find the lane candidate closest to the predicted lane
if lane_candidates:
# ??? ??? ??? ??? ???? ?? ??? ?? ????.
closest_lane = min(
lane_candidates, key=lambda lc: abs(lc - predicted_lane))
# ???? ???????.
self.lane = closest_lane
else:
# ?? ??? ??? ??? ???? ?????.
self.lane = 1004
def mark_lane(self, img, lane=None):
'''
mark calculated lane position to an image
'''
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
if lane is None:
lane = self.lane
l1 = self.lane
self.target_lane = l1
cv2.circle(img, (int(l1), self.bev.warp_img_mid),
3, red, 5, cv2.FILLED)
# print(l1)
# cv2.circle(img, (int(l2), self.bev.warp_img_mid),
# 3, green, 5, cv2.FILLED)
# cv2.circle(img, (int(l3), self.bev.warp_img_mid),
# 3, blue, 5, cv2.FILLED)
cv2.imshow('marked', img)
cv2.waitKey(1)
def __call__(self, img):
'''
returns angle and cte of a target lane from an image
angle : radians
cte : pixels
'''
if img is None:
return 1004
canny = self.to_canny(img, show=False)
image_height = img.shape[0]
image_width = img.shape[1]
x_padding = 300
y_top_padding = 660
# 1280 x 720
roi_org = img[y_top_padding:720, x_padding:image_width-x_padding]
bev = canny[y_top_padding:720, x_padding:image_width-x_padding]
window_width = 60
window_height = bev.shape[0]
window_step = 20
i = 0
mean = [0, 0, 0]
means = []
black_min_means = [150, 150, 150]
yellow_min_means = [100, 180, 210]
yellow_max_means = [160, 255, 255]
goal_means = yellow_min_means
goal_max_means = yellow_max_means
detected = False
while True:
start_x = window_step * i
end_x = start_x + window_width
if end_x > bev.shape[1]:
break
roi = roi_org[0:window_height, start_x:end_x]
mean_values = cv2.mean(roi)
means.append(mean_values)
mean[0] += mean_values[0]
mean[1] += mean_values[1]
mean[2] += mean_values[2]
r_valid = goal_means[0] <= mean_values[0] <= goal_max_means[0]
g_valid = goal_means[1] <= mean_values[1] <= goal_max_means[1]
b_valid = goal_means[2] <= mean_values[2] <= goal_max_means[2]
if r_valid and g_valid and b_valid:
detected = True
# print(mean_values)
# self.logger.warn(str(mean_values))
# cv2.imshow('max_roi', roi)
# cv2.waitKey(0)
i += 1
print("detected: ", detected)
# bev = self.bev(canny, show=True)
# cv2.imshow('bev', bev)
# cv2.waitKey(1)
return detected
# return float(self.target_lane)
| ggh-png/wcrc | wcrc_ctrl/wcrc_ctrl/LaneDetector.py | LaneDetector.py | py | 9,624 | python | en | code | 1 | github-code | 13 |
15468527720 |
import numpy as np
import sys
import math
import time
import socket
from util import quaternion_to_euler_angle_vectorized1
from NatNetClient import NatNetClient
import networkx as nx
import matplotlib.pyplot as plt
import random
import matplotlib.path as mpath
import networkx as nx
import matplotlib.pyplot as plt
from shapely import Point, LineString
import numpy as np
import cv2 as cv
# Create an empty graph
G = nx.Graph()
# Define the number of rows and columns
num_rows = 6
num_columns = 6
# Define the x and y ranges
# yr = (3.78, -2.38)
# xr = (5.649, -4.15)
xr = (3.88, 5.56)
yr = (-2.38, 3.78)
# Calculate the increment for x and y
xi = (xr[1] - xr[0]) / num_columns
yi = (yr[1] - yr[0]) / num_rows
# Generate nodes and edges for the organized pattern
for row in range(num_rows):
for col in range(num_columns):
# Generate the node label as a tuple with coordinate values formatted as floats
node_label = (round(xr[0] + xi * col, 2), round(yr[0] + yi * row, 2))
G.add_node(node_label)
if col > 0:
left_node_label = (
round(xr[0] + xi * (col - 1), 2), round(yr[0] + yi * row, 2))
G.add_edge(node_label, left_node_label)
if col < num_columns - 1:
right_node_label = (
round(xr[0] + xi * (col + 1), 2), round(yr[0] + yi * row, 2))
G.add_edge(node_label, right_node_label)
if row > 0:
top_node_label = (round(xr[0] + xi * col, 2),
round(yr[0] + yi * (row - 1), 2))
G.add_edge(node_label, top_node_label)
if row < num_rows - 1:
bottom_node_label = (
round(xr[0] + xi * col, 2), round(yr[0] + yi * (row + 1), 2))
G.add_edge(node_label, bottom_node_label)
if col > 0 and row > 0:
top_left_node_label = (
round(xr[0] + xi * (col - 1), 2), round(yr[0] + yi * (row - 1), 2))
G.add_edge(node_label, top_left_node_label)
if col > 0 and row < num_rows - 1:
bottom_left_node_label = (
round(xr[0] + xi * (col - 1), 2), round(yr[0] + yi * (row + 1), 2))
G.add_edge(node_label, bottom_left_node_label)
if col < num_columns - 1 and row > 0:
top_right_node_label = (
round(xr[0] + xi * (col + 1), 2), round(yr[0] + yi * (row - 1), 2))
G.add_edge(node_label, top_right_node_label)
if col < num_columns - 1 and row < num_rows - 1:
bottom_right_node_label = (
round(xr[0] + xi * (col + 1), 2), round(yr[0] + yi * (row + 1), 2))
G.add_edge(node_label, bottom_right_node_label)
# Set the positions of the nodes based on their labels
pos = {node_label: node_label for node_label in G.nodes()}
print(G.nodes())
# Add new nodes to the graph
new_nodes = ['A', 'B', 'C']
G.add_nodes_from(new_nodes)
# Specify positions for the new nodes
pos.update({'A': (1.5, -1.5), 'B': (-2.5, -2.5), 'C': (3.5, 3.5)})
# Add edges between new nodes to form a triangle
G.add_edge('A', 'B')
G.add_edge('B', 'C')
G.add_edge('C', 'A')
# Visualize the updated graph
nx.draw(G, pos, with_labels=True, node_size=500,
node_color='lightblue', font_size=8)
# plt.axis('equal')
# plt.xlim(*(-5, 5))
# plt.ylim(*(-5, 5))
plt.show()
# Create a list to store the nodes to be removed
nodes_to_remove = []
edges_to_remove = []
# Check for intersection of edges with the line connecting 'A', 'B', and 'C'
line_ab = LineString([pos['A'], pos['B']])
line_bc = LineString([pos['B'], pos['C']])
line_ca = LineString([pos['C'], pos['A']])
for u, v in G.edges:
line_uv = LineString([pos[u], pos[v]])
if line_uv.intersects(line_ab) or line_uv.intersects(line_bc) or line_uv.intersects(line_ca):
edges_to_remove.append((u, v))
# Check for intersection of nodes with the line connecting 'A', 'B', and 'C'
for node_label, coord in pos.items():
if node_label in new_nodes:
continue # Skip the target nodes 'A', 'B', and 'C'
line_node = LineString([pos['A'], pos['C']])
if line_node.contains(Point(coord)):
nodes_to_remove.append(node_label)
# Remove the edges and nodes from the graph
G.remove_edges_from(edges_to_remove)
G.remove_nodes_from(nodes_to_remove)
# Remove nodes that have a degree <2
remove_nodes = [node for node, degree in dict(
G.degree()).items() if degree < 2]
print(f"Removed nodes Degree < 2: {remove_nodes}")
G.remove_nodes_from(remove_nodes)
# Remove the corresponding coordinates from pos
pos = {node: coord for node, coord in pos.items() if node not in remove_nodes}
# if nodes are isolated:
G.remove_nodes_from(list(nx.isolates(G)))
# Remove the corresponding coordinates from pos
pos = {node: coord for node, coord in pos.items() if node not in remove_nodes}
# given values for obstacles in the evironment
# remove nodes and edges that are within the box
A = (1.5, -1.5)
B = (-2.5, -2.5)
C = (3.5, 3.5)
# Create the polygon path using A, B, and C
polygon_path = mpath.Path([A, B, C, A])
# Iterate over the nodes and their positions
for node_label, node_position in pos.items():
if polygon_path.contains_point(node_position):
nodes_to_remove.append(node_label)
edges_to_remove.extend(G.edges(node_label))
# Remove the edges within the shape formed by A, B, and C
G.remove_edges_from(edges_to_remove)
# Remove the nodes within the shape formed by A, B, and C
G.remove_nodes_from(nodes_to_remove)
# get the coords in a list
co = nx.get_node_attributes(G, 'pos')
for node, coord in pos.items():
print(f"node {node}: coord {coord}")
coordList = list(pos.values())
print(coordList)
# xList = [(coordList[i][0]) for i in range(len(coordList))]
# print(xList)
# yList = [(coordList[i][1]) for i in range(len(coordList))]
# print(yList)
print(f'GNODES: {G.nodes}')
gList = list(G.nodes())
source = (4.72, 2.75)
target = (4.16, -2.38)
# source = (4.72, 2.75)
# target = (5.0, 2.75)
def random_path_dfs(graph, start, end):
# Create a stack to store the nodes during the depth-first search
stack = [(start, [start])]
while stack:
node, path = stack.pop()
# Check if the current node is the target node
if node == end:
return path
# Get the neighbors of the current node
neighbors = list(graph.neighbors(node))
# Shuffle the neighbors to visit them in a random order
random.shuffle(neighbors)
# Iterate over the neighbors
for neighbor in neighbors:
# Check if the neighbor is not already visited
if neighbor not in path:
# Push the neighbor and the updated path to the stack
stack.append((neighbor, path + [neighbor]))
# If no path is found, return an empty path
return []
# Usage:
# source = coordList[0]
# target = coordList[9]
# Find a random path using DFS
path = random_path_dfs(G, source, target)
print(f'PATHS: {path}')
# once x y lens == max then def () global xd yd path
# path = random_path_dfs(G, start, end)
x_d = [(path[i][0]) for i in range(len(path))]
y_d = [(path[i][1]) for i in range(len(path))]
# Visualize the updated graph
nx.draw(G, pos, with_labels=True, node_size=700,
node_color='lightblue', font_size=6)
plt.show()
IP_ADDRESS = '192.168.0.205'
# Connect to the robot
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((IP_ADDRESS, 5000))
print('Connected')
positions = {}
rotations = {}
index = 0
# x_d = [(path[i][0]) for i in range(len(path))]
# y_d = [(path[i][1]) for i in range(len(path))]
global world_vec
world_vec = [0, 0]
def SbLinApprox(s):
# 1/sb vs dist
# y = 0.0025x - 0.0002
Sb = 1/s
res = (0.0025 * Sb) - 0.0002
# print("Sb approx: ", res)
return res * -1 * 55108.012
# This is a callback function that gets connected to the NatNet client. It is called once per rigid body per frame
def receive_rigid_body_frame(robot_id, position, rotation_quaternion):
# Position and rotation received
positions[robot_id] = position
# The rotation is in quaternion. We need to convert it to euler angles
# print()
rotx, roty, rotz = quaternion_to_euler_angle_vectorized1(
rotation_quaternion)
rotations[robot_id] = rotz
# replace with picam2
cap = cv.VideoCapture('http://192.168.0.205:3000/stream.mjpg')
cnt_swap = 0
# List of coordinates
# coordinates = [(1, 2), (3, 4), (5, 6), (7, 8), (9, 10)]
# Variables to store the previous and current random coordinates
previous_coordinate = None
current_coordinate = None
def get_random_coordinate():
global current_coordinate, previous_coordinate
# If all coordinates have been used, reset the previous_coordinate variable
if previous_coordinate is not None and len(path) == 1:
previous_coordinate = None
# Get a random coordinate
while True:
current_coordinate = random.choice(path)
if current_coordinate != previous_coordinate:
break
return current_coordinate
def detect_duck():
# incorporate the optitrack system:
global rotations, robot_id, positions, index, path
# print("index: ", index)
# gain
k_w = 35
k_v = 1450
x = positions[robot_id][0]
y = positions[robot_id][1]
print(f"current: ({x}, {y})")
theta = math.radians(rotations[robot_id])
found_duck = False
# if is_running and (robot_id in positions):
# ============================ Setting Up the Mask and Gray Image ============================#
# After recieving the camera data the mask is used to filter out everything that is not yello (since the ducks are yellow)
# the difficult part is how do you decipher the difference between yellow ducks and yello tap or other objects
# whatever system we use need a way to decipher ducks from everything else
ret, img = cap.read()
hsv = cv.cvtColor(img, cv.COLOR_BGR2HSV)
# upper and lower bounds for the mask
lower = np.array([20, 100, 100])
upper = np.array([30, 255, 255])
# creating mask
mask = cv.inRange(hsv, lower, upper)
# masking image
masked_img = cv.bitwise_and(img, img, mask=mask)
# converting original image back gray for the blob detector
gray_img = cv.cvtColor(masked_img, cv.COLOR_BGR2GRAY)
# =========================== Setting Up Detector ============================#
# nb: using RETR_EXTERNAL this disregards the countors found within countors and focuses on
# the outside. th CHAIN_APPROX_SIMPLE runs faster than the alternative
cnts = cv.findContours(gray_img, cv.RETR_EXTERNAL, cv.CHAIN_APPROX_SIMPLE)
cnts = cnts[0] if len(cnts) == 2 else cnts[1]
# =================== Calculating Ducks px and py Vectors =====================#
for c in cnts:
# create rectangular shape on the object
x, y, w, h = cv.boundingRect(c)
area = cv.contourArea(c)
# print("area: " + str(area) + " w: " + str(w) + " h: " + str(h))
# small area father away
# big area closer
portion = abs(w - h)
# filter
if area < 2000 or area > 8000 or portion > 3:
# if portion > 3 or area < 2000 or area > 9000: TRY ME
# if area > 9000:
continue
found_duck = True
u_value = x + (w / 2)
est_dist = SbLinApprox(area)
print("u value == " + str(u_value) + " area == " +
str(area) + "est dist == " + str(est_dist))
# Now we need to find px and py
# these values need to be fixed
# in some units need to scale into the proper units
px = (u_value - 320) / 96
theta = np.arcsin(px / est_dist) * 57.2958
# print("x distance == " + str(px))
# theta = np.arcsin(px / est_dist)# * 57.2958
# print("theta == " + str(theta))
py = px / (np.tan(theta * 0.0174533))
print("px == " + str(px) + "py == " + str(py))
p_vec = [px,
py]
# change this value to optitrack recieved value
world_theta = rotations[robot_id]
rot_matrix = [[np.cos(world_theta), np.sin(world_theta) * -1],
[np.sin(world_theta), np.cos(world_theta)]]
temp_world_vec = np.cross(p_vec, rot_matrix)
# world_vec
# world vec are x and u position of the duck
world_vec[0] = (world_vec[0] * 0.0254) + positions[robot_id][0]
world_vec[1] = (world_vec[1] * 0.0254) + positions[robot_id][1]
print(world_vec)
# finally take the robots x and y and add it to the world_vec tor get the ducks world coordinates
x_d = world_vec[0]
y_d = world_vec[1]
# now that we have the px and py vector need to convert it to the world coordinates
# If it makes it to this point then the robot should go towards the duck
cv.rectangle(img, (x, y), (x+w, y+h), (36, 255, 12), 2)
if detect_duck:
distance = math.sqrt(((x_d[index] - x)**2) + ((y_d[index] - y)**2))
print("distance: ", distance)
v = k_v * distance
# print("linear velocity: ",v)
alpha = (math.atan2((y_d - y), (x_d - x)))
# print()
w = k_w * \
math.degrees(math.atan2(
(math.sin(alpha - theta)), math.cos(alpha - theta)))
u = np.array([v - w, v + w])
u[u > 1500] = 1500
u[u < -1500] = -1500
command = 'CMD_MOTOR#%d#%d#%d#%d\n' % (u[0], u[0], u[1], u[1])
s.send(command.encode('utf-8'))
# t += 0.5
time.sleep(0.1)
if distance < 0.45:
# move on to next desired position
# index += 1
x_d = 4.72
y_d = 2.75
index = 0
command = 'CMD_MOTOR#00#00#00#00\n'
s.send(command.encode('utf-8'))
else:
distance = math.sqrt(
((x_d[index] - x)**2) + ((y_d[index] - y)**2))
print("distance: ", distance)
v = k_v * distance
# print("linear velocity: ",v)
alpha = (math.atan2((y_d[index] - y), (x_d[index] - x)))
print()
w = k_w * \
math.degrees(math.atan2(
(math.sin(alpha - theta)), math.cos(alpha - theta)))
u = np.array([v - w, v + w])
u[u > 1500] = 1500
u[u < -1500] = -1500
command = 'CMD_MOTOR#%d#%d#%d#%d\n' % (u[0], u[0], u[1], u[1])
s.send(command.encode('utf-8'))
# t += 0.5
time.sleep(0.1)
if distance < 0.45 and index < len(x_d):
command = 'CMD_MOTOR#00#00#00#00\n'
# move on to next desired position
index += 1
s.send(command.encode('utf-8'))
if (index == len(x_d)):
if cnt_swap == 0:
source = (4.72, 2.75)
target = get_random_coordinate()
cnt_swap = 1
elif cnt_swap == 1:
target = (4.72, 2.75)
source = get_random_coordinate()
cnt_swap = 0
path = random_path_dfs(G, source, target)
x_d = [(path[i][0]) for i in range(len(path))]
y_d = [(path[i][1]) for i in range(len(path))]
index = 0
try:
if __name__ == "__main__":
# clientAddress = "192.168.0.115"
# optitrackServerAddress = "192.168.0.172"
# robot_id = 300
clientAddress = "192.168.0.31"
optitrackServerAddress = "192.168.0.4"
robot_id = 203
streaming_client = NatNetClient()
streaming_client.set_client_address(clientAddress)
streaming_client.set_server_address(optitrackServerAddress)
streaming_client.set_use_multicast(True)
streaming_client.rigid_body_listener = receive_rigid_body_frame
# Start up the streaming client now that the callbacks are set up.
# This will run perpetually, and operate on a separate thread.
is_running = streaming_client.run()
# This will create a new NatNet client
streaming_client = NatNetClient()
streaming_client.set_client_address(clientAddress)
streaming_client.set_server_address(optitrackServerAddress)
streaming_client.set_use_multicast(True)
# Configure the streaming client to call our rigid body handler on the emulator to send data out.
streaming_client.rigid_body_listener = receive_rigid_body_frame
# Start up the streaming client now that the callbacks are set up.
# This will run perpetually, and operate on a separate thread.
is_running = streaming_client.run()
try:
while is_running:
if robot_id in positions:
# print('Last position', positions[robot_id], ' rotation', rotations[robot_id])
# for i in range(len(x_d)):
# funcTest()
detect_duck()
# break
# break
except KeyboardInterrupt:
command = 'CMD_MOTOR#00#00#00#00\n'
s.send(command.encode('utf-8'))
s.shutdown(2)
s.close()
except KeyboardInterrupt:
# STOP
command = 'CMD_MOTOR#00#00#00#00\n'
s.send(command.encode('utf-8'))
# Close the connection
s.shutdown(2)
s.close()
sys.exit("Exiting Program!")
| gerardohmacoto/CSE360 | final/final.py | final.py | py | 17,340 | python | en | code | 0 | github-code | 13 |
29861351197 | import os
import tempfile
import unittest
import intelmq.lib.test as test
from intelmq.bots.outputs.file.output import FileOutputBot
class TestFileOutputBot(test.BotTestCase, unittest.TestCase):
@classmethod
def set_bot(cls):
cls.bot_reference = FileOutputBot
cls.os_fp, cls.filename = tempfile.mkstemp()
cls.sysconfig = {"hierarchical_output": True,
"file": cls.filename}
def test_event(self):
self.run_bot()
filepointer = os.fdopen(self.os_fp, 'rt')
filepointer.seek(0)
self.assertEqual('{}\n', filepointer.read())
filepointer.close()
@classmethod
def tearDownClass(cls):
os.remove(cls.filename)
if __name__ == '__main__': # pragma: no cover
unittest.main()
| certtools/intelmq | intelmq/tests/bots/outputs/file/test_output.py | test_output.py | py | 791 | python | en | code | 856 | github-code | 13 |
4036552271 | import os
import time
import pandas as pd
class History:
def __init__(self, path: str, header=None):
self.path = path
if header is None:
header = ['time', 'bundleid', 'success']
if not os.path.exists(self.path):
os.system(f"echo '{','.join(header)}' >> {self.path}")
def get_bundleid_list(self):
df = pd.read_csv(self.path)
# success as str
df["success"] = df["success"].astype(str)
succ = df[df['success'] == "True"]["bundleid"].tolist()
fail = df[df['success'] == "False"]["bundleid"].tolist()
fatal = df[df['success'] == "Fatal"]["bundleid"].tolist()
return succ, fail, fatal
def resume_from_history(self, ttl: list):
# True or Fatal
ttl = set(ttl)
succ, fail, fatal = self.get_bundleid_list()
succ = set(succ) & ttl
fatal += [x for x in fail if fail.count(x) >= 2]
fatal = set(fatal) & ttl - succ
wait = ttl - succ - fatal
return list(succ), list(fatal), list(wait)
def record(self, bundleid: str, success: str):
record_str = f"{time.strftime('%Y-%m-%d %H:%M:%S')},{bundleid},{success}"
os.system(f"echo {record_str} >> {self.path}")
if __name__ == "__main__":
history = History("./log/download_history.csv")
| u36318/IPADownie | src/History.py | History.py | py | 1,326 | python | en | code | 0 | github-code | 13 |
2097344713 | from typing import Optional, Any
class BNode:
def __init__(self,
val: Any = None,
l_node: Optional['BNode'] = None,
r_node: Optional['BNode'] = None) -> None:
self.val = val
self.l_node = l_node
self.r_node = r_node
class BTree:
def __init__(self, root_val: Any) -> None:
root_node = BNode(root_val)
self.root = root_node
def create_tree(node: BNode, depth: int) -> None:
if depth == 0:
return
node.l_node = BNode(f'{node.val}-l')
node.r_node = BNode(f'{node.val}-r')
depth -= 1
create_tree(node.l_node, depth)
create_tree(node.r_node, depth)
def preorder_traversal(node: BNode):
print(node.val, sep=' -> ')
if node.l_node is not None:
preorder_traversal(node.l_node)
if node.r_node is not None:
preorder_traversal(node.r_node)
def bfs_traversal(nodes: list[BNode]):
curr = nodes.pop(0)
print(curr.val)
if curr.l_node is not None:
nodes.append(curr.l_node)
if curr.r_node is not None:
nodes.append(curr.r_node)
if len(nodes) > 0:
bfs_traversal(nodes)
if __name__ == "__main__":
tree = BTree('root')
create_tree(tree.root, 3)
print(tree)
preorder_traversal(tree.root)
print('-' * 40)
bfs_traversal([tree.root])
| atlanmatrix/Algorithm-Py3 | tree.py | tree.py | py | 1,327 | python | en | code | 0 | github-code | 13 |
32846428139 | from asyncio import run
from asyncio.exceptions import CancelledError
from argparse import ArgumentParser
from pyhtools.attackers.web.spider import Spider
from pyhtools.UI.colors import BRIGHT_RED
parser = ArgumentParser(prog='pyspider')
parser.add_argument('-t', '--target', dest='target_url', required=True,
help='url of the target eg: https://example.com')
args = parser.parse_args()
target_url = args.target_url
spider = Spider()
try:
discovered_links = run(spider.start(
target_url=target_url, print_links=True))
print(f'[*] Total Links Found: {len(discovered_links)-1}')
except (EOFError, KeyboardInterrupt, CancelledError):
print(BRIGHT_RED + "[!] Error: User Interrupted")
| dmdhrumilmistry/pyhtools | examples/Web/pyspider.py | pyspider.py | py | 724 | python | en | code | 297 | github-code | 13 |
5904929514 | from .common import *
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.postgresql_psycopg2',
'NAME': 'project',
}
}
PUBLIC_ROOT = os.path.join(os.sep, 'var', 'www', 'project', 'public')
STATIC_ROOT = os.path.join(PUBLIC_ROOT, 'static')
MEDIA_ROOT = os.path.join(PUBLIC_ROOT, 'media')
TEMPLATE_LOADERS = (
('django.template.loaders.cached.Loader', TEMPLATE_LOADERS),
)
ALLOWED_HOSTS = ['162.243.12.142', '162.243.129.112']
PREPEND_WWW = False
| DjangoLover/djangodash2013-1 | project/settings/production.py | production.py | py | 483 | python | en | code | 0 | github-code | 13 |
31003178186 | # -*- coding: utf-8 -*-
#http://leda.univ-lyon1.fr/fullsql.html
#http://leda.univ-lyon1.fr/leda/meandata.html
#http://leda.univ-lyon1.fr/fG.cgi?n=meandata&c=o&of=1,leda,simbad&nra=l&nakd=1&d=v3k%2C%20mod0&sql=(objtype%3D%27G%27)%20AND%20(v3k>3000)%20AND%20(v3k<30000)%20AND%20(mod0%20IS%20NOT%20NULL)%20AND%20(v3k%20IS%20NOT%20NULL)&ob=&a=html
#http://leda.univ-lyon1.fr/fullsql.html
import pandas as pd
data= pd.read_csv('C:/Users/Neo/Desktop/leda.csv', delimiter=";")
data
#se quiser refazer a estimativa usando uma amostra aleatória de 80% dos dados
#data = data.sample(n = 1133)
#Onde mod0 é o...
#MÓDULO DA DISTÂNCIA (mod0)
#https://en-m-wikipedia-org.translate.goog/wiki/Distance_modulus?_x_tr_sl=en&_x_tr_tl=pt&_x_tr_hl=pt-BR&_x_tr_pto=sc
#mod0 = |m - M|
#a diferença entre a magnitude aparente (m) e a magnitude absoluta (M) de um objeto astronômico.
#E v3k é a
#VELOCIDADE RADIAL (cz) em relação à radiação CMB
#https://pt.wikipedia.org/wiki/Velocidade_radial
#DISTÂNCIA DE LUMINOSIDADE (Dl)
#https://en-m-wikipedia-org.translate.goog/wiki/Luminosity_distance?_x_tr_sl=en&_x_tr_tl=pt&_x_tr_hl=pt-BR&_x_tr_pto=sc
#OBS: ela é boa para distâncias locais (em nossa galaxia)
Dl=10**((data['mod0']/5)+1 )
#DESVIO PARA O VERMELHO (z)
#https://pt.wikipedia.org/wiki/Desvio_para_o_vermelho
#v é a componente radial de velocidade em relação a fonte e o observador.
#c é a velocidade da luz no vácuo.
#OBS c = 299792.458 km/s
z = data["v3k"]/299792.458
#DISTANCIA DE MOVIMENTO
#http://leda.univ-lyon1.fr/leda/param/modz.html
#A distancia de luminosidade com a correção do desvio para o vermelho
Dm = Dl/(z+1)
#OBS: a distância está em Parsec
# Converter da base do parsec para a base do kilometro
#https://www.unitconverters.net/length/parsec-to-kilometer.htm
Dm = Dm * 30856775812800
from scipy.optimize import curve_fit
def lin(x, b):
return b * x
#OBS: f(x) = a*x + 0
#não possuimos o intercept(coeficiente linear) apenas o slope(coeficiente angular)
coeff, cov= curve_fit(lin,Dm,data["v3k"])
#o melhor slope para a regressão sem intercept:
b = coeff[0]
b
#A correlação entre distância e velocidade
Dm.corr(data["v3k"]) #0.89
#Ou seja, há uma forte relação positiva entre as variaveis
#Visualizando a relação linear entre distância e velocidade
import matplotlib.pyplot as plt
import numpy as np
x=np.array(Dm)
y=np.array(data["v3k"])
plt.scatter(x,y)
plt.title('Relação linear entre distância e velocidade')
plt.xlabel('DISTÂNCIA DE MOVIMENTO')
plt.ylabel('VELOCIDADE RADIAL')
f = lambda x: b*x #OBS: y = b*x
plt.plot(x,f(x), c="red", label="fit line between min and max")
#lei de Hubble
#https://pt.wikipedia.org/wiki/Lei_de_Hubble
#v = H0 * d
#onde
#v – velocidade (km/s^(-1))
#H0 – constante de Hubble (km/s^(-1) Mpc^(-1))
#r – distância em megaparsecs (Mpc)
#podendo ser re-escrita da seguinte forma
#v = H0 * d
#1 = (H0 *d)/v
#1/h0= d/v
#tempo = Distancia / Velocidade
#t = d/v
#Dada uma linha de regressão simples, se você tiver d e v (definir o intercept para 0);
#A inclinação será o tempo em segundos.
#O tempo do UNIVERSO em segundos
Tempo_em_seg = (1/b)
Tempo_em_seg
#A idade do UNIVERSO em anos
ANOS = ((((Tempo_em_seg / 60)/ 60)/ 24)/ 365)
"{:,}".format(int(ANOS))
| AlessandroPTSN/Como-calcular-a-idade-do-UNIVERSO | Universo_Anos.py | Universo_Anos.py | py | 3,847 | python | pt | code | 2 | github-code | 13 |
36180403196 | import string
import os
import json
import random
import hashlib
from collections import namedtuple
import glob
import boto3
AudioFormat = namedtuple( "AudioFormat", ["OutputFormat","file_extension"] )
format_ogg = AudioFormat(
OutputFormat = "ogg_vorbis",
file_extension = "ogg"
)
format_mp3 = AudioFormat(
OutputFormat = "mp3",
file_extension = "mp3"
)
AUDIO_FORMAT_DEFAULT = format_ogg
def cache_path( fn ):
return os.path.join(
os.path.realpath( "./cache" ),
fn
)
def hash_for_string( str_in ):
return hashlib.md5( str_in.encode('utf-8') ).hexdigest()
def path_for_sentence( sentence, voice_id, audio_format ):
fn = "{}.{}.{}".format(
hash_for_string( sentence ),
voice_id,
audio_format.file_extension
)
return cache_path( fn )
def get_client():
creds = None
with open( "config/aws.json" ) as fp:
creds = json.load( fp )
print( creds )
return boto3.Session(
region_name='eu-west-1',
aws_access_key_id = creds['access_key_id'],
aws_secret_access_key = creds['secret_access_key']
).client('polly')
def random_voice():
all_voices = None
fn_cached = cache_path( "voices.json" )
if os.path.exists( fn_cached ):
with open( fn_cached ) as fp:
all_voices = json.load( fp )
else:
polly_client = get_client()
all_voices = polly_client.describe_voices()
with open( fn_cached, "w" ) as fp:
json.dump( all_voices, fp )
voices = []
voice_blacklist = [ 'Joey', 'Justin', 'Ivy', 'Kimberly', "Joanna" ]
for voice in all_voices["Voices"]:
if voice['LanguageCode'].startswith( "en-"):
voice_id = voice['Id']
if voice_id not in voice_blacklist:
voices.append( voice_id )
return random.choice( voices )
def render_sentence( sentence, voice_id="Brian", audio_format=AUDIO_FORMAT_DEFAULT ):
polly_client = get_client()
response = polly_client.synthesize_speech(
VoiceId = voice_id,
OutputFormat = audio_format.OutputFormat,
Text = sentence
)
fn = path_for_sentence( sentence, voice_id, audio_format )
with open( fn, 'wb' ) as fp:
fp.write( response['AudioStream'].read() )
return fn
def render_sentences( sentences, audio_format=AUDIO_FORMAT_DEFAULT ):
rendered_paths = []
for sentence in sentences:
fn = render_sentence(
sentence,
voice_id = random_voice(),
audio_format = audio_format
)
rendered_paths.append(fn)
return rendered_paths
| prehensile/phone-algos | polly_handler.py | polly_handler.py | py | 2,670 | python | en | code | 0 | github-code | 13 |
36727124276 | import cv2
import numpy as np
import sqlite3
import os
conn = sqlite3.connect('database.db')
if not os.path.exists('./dataset'):
os.makedirs('./dataset')
c = conn.cursor()
face_cascade = cv2.CascadeClassifier('C:/Users/Qweku/Desktop/NEW_CODE/haarcascade_frontalface_default.xml')
cap = cv2.VideoCapture(0)
Fullname = input("Enter your name: ")
Id_number = input("Index: ")
Relationship = input("Relationship: ")
c.execute('INSERT INTO Details(id_no,name,Relationship) VALUES (?,?,?)', (Id_number,Fullname,Relationship))
uid = c.lastrowid
print(uid)
counter = 0
while True:
ret, img = cap.read()
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.1, 5)
for (x,y,w,h) in faces:
counter += 1
cv2.imwrite(f"dataset/{Fullname}."+str(Id_number)+"."+str(counter)+".jpg",gray[y:y+h,x:x+w])
cv2.rectangle(img, (x,y), (x+w, y+h), (255,0,0), 3)
cv2.waitKey(100)
cv2.imshow('img',img)
if cv2.waitKey(10)== ord('q'):
break
if counter == 100:
break
cap.release()
conn.commit()
conn.close()
cv2.destroyAllWindows() | AsieduAmos/intruder_detection | record_face.py | record_face.py | py | 1,082 | python | en | code | 0 | github-code | 13 |
5517964945 | from src.jobs import read
def get_unique_job_types(path):
jobs_list = read(path)
jobs_types = []
for job in jobs_list:
if job["job_type"] != "" and job["job_type"] not in jobs_types:
jobs_types.append(job["job_type"])
return jobs_types
def filter_by_job_type(jobs, job_type):
filtered_list = [
job for job in jobs if job["job_type"] == job_type
]
return filtered_list
def get_unique_industries(path):
jobs_list = read(path)
jobs_industry = []
for job in jobs_list:
if job["industry"] != "" and job["industry"] not in jobs_industry:
jobs_industry.append(job["industry"])
return jobs_industry
def filter_by_industry(jobs, industry):
filtered_list = [
job for job in jobs if job["industry"] == industry
]
return filtered_list
def get_max_salary(path):
jobs_list = read(path)
max_salary = 0
for job in jobs_list:
is_salary_not_invalid = job["max_salary"] != "invalid"
is_salary_not_empty = job["max_salary"] != ""
if is_salary_not_invalid and is_salary_not_empty:
is_salary_greater = float(job["max_salary"]) > max_salary
if is_salary_greater:
max_salary = int(job["max_salary"])
return max_salary
def get_min_salary(path):
jobs_list = read(path)
min_salary = None
for job in jobs_list:
is_salary_not_invalid = job["min_salary"] != "invalid"
is_salary_not_empty = job["min_salary"] != ""
if is_salary_not_invalid and is_salary_not_empty:
if min_salary is None:
min_salary = float(job["min_salary"])
continue
is_salary_smaller = float(job["min_salary"]) < min_salary
if is_salary_smaller:
min_salary = int(job["min_salary"])
return min_salary
def is_integers_valid(values):
for num in values:
if not isinstance(num, int):
raise(ValueError)
def is_keys_valid(expected_keys, keys):
for key in expected_keys:
if key not in keys:
raise(ValueError)
def matches_salary_range(job, salary):
expected_keys = ["min_salary", "max_salary"]
is_keys_valid(expected_keys, keys=job.keys())
min_salary = job["min_salary"]
max_salary = job["max_salary"]
is_integers_valid([min_salary, max_salary, salary])
if min_salary > max_salary:
raise(ValueError)
if min_salary <= salary <= max_salary:
return True
return False
def filter_by_salary_range(jobs, salary):
filtered_list = []
for job in jobs:
try:
if matches_salary_range(job, salary):
filtered_list.append(job)
except ValueError:
pass
return filtered_list
| ScriptCamilo/trybe-job-Insights | src/insights.py | insights.py | py | 2,794 | python | en | code | 0 | github-code | 13 |
41161855674 | number_of_rooms = int(input())
all_rooms = []
free_chairs = 0
number_of_room = 1
has_space = True
for i in range(number_of_rooms):
all_rooms.append(input().split())
for room in all_rooms:
chairs = len(room[0])
people = int(room[1])
if chairs >= people:
free_chairs += chairs - people
else:
has_space = False
needed_chairs = people - chairs
print(f"{needed_chairs} more chairs needed in room {number_of_room}")
number_of_room += 1
if has_space:
print(f"Game On, {free_chairs} free chairs left") | lefcho/SoftUni | Python/SoftUni - Python Fundamentals/Lists_Advanced/Office Chairs.py | Office Chairs.py | py | 577 | python | en | code | 0 | github-code | 13 |
351798233 | class SLLNode:
def __init__(self, data):
self.data = data
self.next = None
def __repr__(self):
return "SLLNode object: data={}".format(self.data)
def get_data(self):
"""
Return the self.data attribute
"""
return self.data
def set_data(self, new_data):
"""
Replace the existing value of the self.data attribute with new_data
parameter.
"""
self.data = new_data
def get_next(self):
"""
Return the self.next attribute
"""
return self.next
def set_next(self, new_next):
"""
Replace the existing value of the self.next attribute with new_next
parameter.
"""
self.next = new_next
class SLL:
def __init__(self):
self.head = None
def __repr__(self):
return "SLL objet: head={}".format(self.head)
def is_empty(self):
"""
Returns True if the Linked List is empty.
Otherwise, returns False
"""
return self.head is None
def add_front(self, new_data):
"""
Add a Node whose data is the new_data argument to the front of the
Linked List
"""
temp = SLLNode(new_data)
temp.set_next(self.head)
self.head = temp
def size(self):
"""
Traverses the Linked List and returns an integer value representing the
number of nodes in the Linked List.
The time complexity is O(n) bcos every Node in the Linked List must be
visited in order to calculate the size of the Linked List.
"""
size = 0
if self.head is None:
return 0
current = self.head
while current is not None: # While there are still Nodes left to count
size += 1
current = current.get_next()
return size
def search(self, data):
"""Traverses the Linked List and returns True if the data searched for
is present in one of the Nodes. Otherwise, it returns False.
Time complexity is 0(n) bcos in the worst
"""
if self.head is None:
return "Linked List is empty. No node to search"
current = self.head
while current is not None:
# The Node's data matches what we are looking for
if current.get_data() == data:
return True
# The Node's data doesn't match
else:
current = current.get_next()
return False
def remove(self, data):
"""
Removes the first occurence of a Node that contains the data arg
as a self.data variable . Return Nothing.
The time complexity is O(n) bcos in the worst case we have to visit
every Node before we find the one we need to remove.
"""
if self.head is None:
return "Linked List is empty. No Nodes to remove."
current = self.head
previous = None
found = False
while not found:
if current.get_data() == data:
found = True
else:
if current.get_next() == None:
return "A Node with that data value is not present"
else:
previous = current
current = current.get_next()
if previous is None:
self.head = current.get_next()
else:
previous.set_next(current.get_next)
sll = SLL()
print(sll.size())
print(sll.search(3))
sll.head
sll.add_front('berry')
sll.add_front('TOM')
print(sll.search('bird'))
print(sll.search('berry'))
print(sll.remove(27))
print(sll.remove('berry'))
print(sll.head)
print(sll.size())
print(sll.is_empty())
node = SLLNode('apple')
sll.head = node
print(sll.is_empty())
print(node.get_data())
node = SLLNode(7)
print(node.get_data())
node2 = SLLNode('carrot')
print(node.set_next(node2))
print(node.get_next())
| moseswong74/pythonPractice | Must_Know/Data_Structure/SLLNode.py | SLLNode.py | py | 3,990 | python | en | code | 0 | github-code | 13 |
3860048000 | #This script follows a user's Twitter stream and messages them when they tweet.
#The interval between tweets can be adjusted using the sleep() function
from twython import TwythonStreamer, Twython
from datetime import date
import random
import time
#auth.py is the second file, containing your dev.twitter.com credentials
from auth import (
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
def timediff():
#timediff() gets the difference between the previous launch date and today
#d0 should be formatted yyyy/m//d
#Example date added
d0 = date(2017, 1, 3)
d1 = date.today()
result = d1 - d0
result = result.days
return result
#Populate this messages array with various openers. A few examples are included for inspiration
messages = [
"Get back to work. ",
"Stop this. ",
"Finish the game. ",
"We're waiting. ",
"Back to development! ",
"You're talking nonsense. ",
"It's all irrelevant. ",
"The time is short. ",
"Focus on the task at hand. "
]
#This block performs initial setup when the script first runs
flavor = random.choice(messages)
result = timediff()
#message must begin with the Twitter handle of whom you wish to tweet
#after flavor, add gameTitle
message = "@someonesTwitterHandle "+ flavor + "gameTitle shipped " + str(result) + " days ago!"
lastMessage = message
twitter = Twython(
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
def buildTweet(messages):
#buildTweet() creates the message for you, and checks it isn't the same as your last message, to avoid flagging as spam
global lastMessage
flavor = random.choice(messages)
result = timediff()
message = "@someonesTwitterHandle "+ flavor + "gameTitle shipped " + str(result) + " days ago!"
#if lastMessage == message, then buildTweet() again
if lastMessage == message:
buildTweet(messages)
return message
#This is the real focus of the bot's functionality, where the magic happens
class MyStreamer(TwythonStreamer):
def on_success(self, data):
if 'text' in data:
try:
username = data['user']['screen_name']
tweet = data['text']
print("@%s: %s" % (username, tweet))
#Bot only tweets if user has tweeted
#username == 'someonesTwitterHandle'
if username == 'someonesTwitterHandle':
message = buildTweet(messages)
print("Built tweet")
#waits 30 seconds before tweeting, for a more natural cadence
time.sleep(30)
twitter.update_status(status=message)
print("Tweeted: %s" % message)
global lastMessage
lastMessage = message
print("Waiting 6 hours before tweeting again")
#Bot stops looking
self.disconnect()
#Waits 21600 seconds - 6 hours
time.sleep(21600)
#Attempts to re-open the stream
stream.statuses.filter(follow=['6348742'])
except BaseException as e:
print("Threw an exception: " + str(e))
#if an exception is thrown, it will state why, and will wait for the next tweet before trying again
pass
stream = MyStreamer(
consumer_key,
consumer_secret,
access_token,
access_token_secret
)
print("Stream is now running")
#this code searches for tweets from a given userID
#Get the id of the account from here: http://gettwitterid.com/
stream.statuses.filter(follow=['userID'])
| circumlocutory/pytwybot | cleanStream.py | cleanStream.py | py | 3,745 | python | en | code | 0 | github-code | 13 |
365740693 | import logging
import math
import skia
from fontTools.misc.transform import Transform
from fontTools.pens.basePen import BasePen
from fontTools.pens.pointPen import PointToSegmentPen, SegmentToPointPen
from .gstate import TextStyle
# TODO:
# - textBox
# MAYBE:
# - contours
# - expandStroke
# - intersectionPoints
# - offCurvePoints
# - onCurvePoints
# - optimizePath
# - points
# - svgClass
# - svgID
# - svgLink
# - traceImage
class BezierPath(BasePen):
def __init__(self, path=None, glyphSet=None):
super().__init__(glyphSet)
if path is None:
path = skia.Path()
self.path = path
def _moveTo(self, pt):
self.path.moveTo(*pt)
def _lineTo(self, pt):
self.path.lineTo(*pt)
def _curveToOne(self, pt1, pt2, pt3):
x1, y1 = pt1
x2, y2 = pt2
x3, y3 = pt3
self.path.cubicTo(x1, y1, x2, y2, x3, y3)
def _qCurveToOne(self, pt1, pt2):
x1, y1 = pt1
x2, y2 = pt2
self.path.quadTo(x1, y1, x2, y2)
def _closePath(self):
self.path.close()
def beginPath(self, identifier=None):
self._pointToSegmentPen = PointToSegmentPen(self)
self._pointToSegmentPen.beginPath()
def addPoint(self, point, segmentType=None, smooth=False, name=None, identifier=None, **kwargs):
if not hasattr(self, "_pointToSegmentPen"):
raise AttributeError("path.beginPath() must be called before the path can be used as a point pen")
self._pointToSegmentPen.addPoint(
point,
segmentType=segmentType,
smooth=smooth,
name=name,
identifier=identifier,
**kwargs
)
def endPath(self):
if hasattr(self, "_pointToSegmentPen"):
# We are drawing as a point pen
pointToSegmentPen = self._pointToSegmentPen
del self._pointToSegmentPen
pointToSegmentPen.endPath()
def arc(self, center, radius, startAngle, endAngle, clockwise):
cx, cy = center
diameter = radius * 2
rect = (cx - radius, cy - radius, diameter, diameter)
sweepAngle = (endAngle - startAngle) % 360
if clockwise:
sweepAngle -= 360
self.path.arcTo(rect, startAngle, sweepAngle, False)
def arcTo(self, point1, point2, radius):
self.path.arcTo(point1, point2, radius)
def rect(self, x, y, w, h):
self.path.addRect((x, y, w, h))
def oval(self, x, y, w, h):
self.path.addOval((x, y, w, h))
def line(self, pt1, pt2):
points = [(x, y) for x, y in [pt1, pt2]]
self.path.addPoly(points, False)
def polygon(self, firstPoint, *points, close=True):
points = [(x, y) for x, y in (firstPoint,) + points]
self.path.addPoly(points, close)
def pointInside(self, point):
x, y = point
return self.path.contains(x, y)
def bounds(self):
if self.path.countVerbs() == 0:
return None
return tuple(self.path.computeTightBounds())
def controlPointBounds(self):
if self.path.countVerbs() == 0:
return None
return tuple(self.path.getBounds())
def reverse(self):
path = skia.Path()
path.reverseAddPath(self.path)
self.path = path
def appendPath(self, other):
self.path.addPath(other.path)
def copy(self):
path = skia.Path(self.path)
return BezierPath(path=path)
def translate(self, x, y):
self.path.offset(x, y)
def scale(self, x, y=None, center=(0, 0)):
if y is None:
y = x
self.transform((x, 0, 0, y, 0, 0), center=center)
def rotate(self, angle, center=(0, 0)):
t = Transform()
t = t.rotate(math.radians(angle))
self.transform(t, center=center)
def skew(self, x, y=0, center=(0, 0)):
t = Transform()
t = t.skew(math.radians(x), math.radians(y))
self.transform(t, center=center)
def transform(self, transform, center=(0, 0)):
cx, cy = center
t = Transform()
t = t.translate(cx, cy)
t = t.transform(transform)
t = t.translate(-cx, -cy)
matrix = skia.Matrix()
matrix.setAffine(t)
self.path.transform(matrix)
def drawToPen(self, pen):
it = skia.Path.Iter(self.path, False)
needEndPath = False
for verb, points in it:
penVerb, startIndex, numPoints = _pathVerbsToPenMethod.get(verb, (None, None, None))
if penVerb is None:
continue
assert len(points) == numPoints, (verb, numPoints, len(points))
if penVerb == "conicTo":
# We should only call _convertConicToCubicDirty()
# if it.conicWeight() == sqrt(2)/2, but skia-python doesn't
# give the correct value.
# https://github.com/kyamagu/skia-python/issues/116
# if abs(it.conicWeight() - 0.707...) > 1e-10:
# logging.warning("unsupported conic form (weight != sqrt(2)/2): conic to cubic conversion will be bad")
# TODO: we should fall back to skia.Path.ConvertConicToQuads(),
# but that call is currently also not working.
pen.curveTo(*_convertConicToCubicDirty(*points))
elif penVerb == "closePath":
needEndPath = False
pen.closePath()
else:
if penVerb == "moveTo":
if needEndPath:
pen.endPath()
needEndPath = True
pointArgs = ((x, y) for x, y in points[startIndex:])
getattr(pen, penVerb)(*pointArgs)
if needEndPath:
pen.endPath()
def drawToPointPen(self, pen):
self.drawToPen(SegmentToPointPen(pen))
def text(self, txt, offset=None, font=None, fontSize=10, align=None):
if not txt:
return
textStyle = TextStyle(font=font, fontSize=fontSize)
glyphsInfo = textStyle.shape(txt)
textStyle.alignGlyphPositions(glyphsInfo, align)
gids = sorted(set(glyphsInfo.gids))
paths = [textStyle.skFont.getPath(gid) for gid in gids]
for path in paths:
path.transform(FLIP_MATRIX)
paths = dict(zip(gids, paths))
x, y = (0, 0) if offset is None else offset
for gid, pos in zip(glyphsInfo.gids, glyphsInfo.positions):
path = paths[gid]
self.path.addPath(path, pos[0] + x, pos[1] + y)
def _doPathOp(self, other, operator):
from pathops import Path, op
path1 = Path()
path2 = Path()
self.drawToPen(path1.getPen())
other.drawToPen(path2.getPen())
result = op(
path1,
path2,
operator,
fix_winding=True,
keep_starting_points=True,
)
resultPath = BezierPath()
result.draw(resultPath)
return resultPath
def union(self, other):
from pathops import PathOp
return self._doPathOp(other, PathOp.UNION)
def intersection(self, other):
from pathops import PathOp
return self._doPathOp(other, PathOp.INTERSECTION)
def difference(self, other):
from pathops import PathOp
return self._doPathOp(other, PathOp.DIFFERENCE)
def xor(self, other):
from pathops import PathOp
return self._doPathOp(other, PathOp.XOR)
def removeOverlap(self):
from pathops import Path
path = Path()
self.drawToPen(path.getPen())
path.simplify(
fix_winding=True,
keep_starting_points=False,
)
resultPath = BezierPath()
path.draw(resultPath)
self.path = resultPath.path
__mod__ = difference
def __imod__(self, other):
result = self.difference(other)
self.path = result.path
return self
__or__ = union
def __ior__(self, other):
result = self.union(other)
self.path = result.path
return self
__and__ = intersection
def __iand__(self, other):
result = self.intersection(other)
self.path = result.path
return self
__xor__ = xor
def __ixor__(self, other):
result = self.xor(other)
self.path = result.path
return self
FLIP_MATRIX = skia.Matrix()
FLIP_MATRIX.setAffine((1, 0, 0, -1, 0, 0))
def _convertConicToCubicDirty(pt1, pt2, pt3):
#
# NOTE: we do a crude conversion from a conic segment to a cubic bezier,
# for two common cases, based on the following assumptions:
# - drawbot itself does not allow conics to be drawn
# - skia draws conics implicitly for oval(), arc() and arcTo()
# - for oval the conic segments span 90 degrees
# - for arc and arcTo the conic segments do not span more than 90 degrees
# - for arc and arcTo the conic segments are circular, never elliptical
# For all these cases, the conic weight will be (close to) zero.
#
# This no longer holds once a path has been transformed with skew or x/y
# scale, in which case we need to fall back to
# skia.Path.ConvertConicToQuads(), but that is blocked by
# https://github.com/kyamagu/skia-python/issues/115
# https://github.com/justvanrossum/drawbot-skia/issues/7
#
(x1, y1), (x2, y2), (x3, y3) = pt1, pt2, pt3
dx1 = x2 - x1
dy1 = y2 - y1
dx2 = x2 - x3
dy2 = y2 - y3
angle1 = math.atan2(dy1, dx1)
angle2 = math.atan2(-dy2, -dx2)
angleDiff = (angle1 - angle2) % (2 * math.pi)
if angleDiff > math.pi:
angleDiff = 2 * math.pi - angleDiff
if abs(angleDiff - math.pi / 2) < 0.0001:
# angle is close enough to 90 degrees, we use stupid old BEZIER_ARC_MAGIC
handleRatio = 0.5522847498
else:
# Fall back to the circular assumption: |pt1 pt2| == |pt2 pt3|
d1 = math.hypot(dx1, dy1)
d2 = math.hypot(dx2, dy2)
if abs(d1 - d2) > 0.00001:
logging.warning("unsupported conic form (non-circular, non-90-degrees): conic to cubic conversion will be bad")
# TODO: we should fall back to skia.Path.ConvertConicToQuads(),
# but that call is currently not working.
angleHalf = angleDiff / 2
radius = d1 / math.tan(angleHalf)
D = radius * (1 - math.cos(angleHalf))
handleLength = (4 * D / 3) / math.sin(angleHalf) # length of the bcp line
handleRatio = handleLength / d1
return (
(x1 + dx1 * handleRatio, y1 + dy1 * handleRatio),
(x3 + dx2 * handleRatio, y3 + dy2 * handleRatio),
(x3, y3),
)
_pathVerbsToPenMethod = {
skia.Path.Verb.kMove_Verb: ("moveTo", 0, 1),
skia.Path.Verb.kLine_Verb: ("lineTo", 1, 2),
skia.Path.Verb.kCubic_Verb: ("curveTo", 1, 4),
skia.Path.Verb.kQuad_Verb: ("qCurveTo", 1, 3),
skia.Path.Verb.kConic_Verb: ("conicTo", 1, 3),
skia.Path.Verb.kClose_Verb: ("closePath", 1, 1),
# skia.Path.Verb.kDone_Verb: (None, None), # "StopIteration", not receiving when using Python iterator
}
| alexnathanson/solar-protocol | backend/createHTML/venv-bk/lib/python3.7/site-packages/drawbot_skia/path.py | path.py | py | 11,161 | python | en | code | 207 | github-code | 13 |
7797217994 | def best_features_fre(x_train, y_train, qtd_var = 20, valor_se_missing = -999, tipo_de_modelo = 'LogisticRegression'):
from sklearn.feature_selection import RFE
from sklearn.linear_model import LinearRegression
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import ExtraTreesClassifier
print("Você escolheu a técnica: ", tipo_de_modelo)
resumo = pd.DataFrame()
x_train[np.isnan(x_train)] = valor_se_missing
if tipo_de_modelo == 'LogisticRegression':
model = LogisticRegression()
rfe = RFE(model, qtd_var)
fit = rfe.fit(x_train, y_train)
resumo['Variaveis'] = list(x_train.columns)
resumo['fl_elegivel_RFE'] = np.where(rfe.support_ == False, 0, 1)
elif tipo_de_modelo == 'RandomForestClassifier':
model = RandomForestClassifier()
rfe = RFE(model, qtd_var)
fit = rfe.fit(x_train, y_train)
resumo['Variaveis'] = list(x_train.columns)
resumo['fl_elegivel_RFE'] = np.where(rfe.support_ == False, 0, 1)
elif tipo_de_modelo == 'ExtraTreesClassifier':
model = ExtraTreesClassifier()
rfe = RFE(model, qtd_var)
fit = rfe.fit(x_train, y_train)
resumo['Variaveis'] = list(x_train.columns)
resumo['fl_elegivel_RFE'] = np.where(rfe.support_ == False, 0, 1)
else:
print("""
***************************************
tipo_de_modelo não suportado, tente:
-LogisticRegression
-RandomForestClassifier
-ExtraTreesClassifier
***************************************
""")
return False
return resumo | Guilherme-maia/projeto_ml_publico | best_features_fre.py | best_features_fre.py | py | 1,719 | python | en | code | 0 | github-code | 13 |
35901420793 | # -*- coding:utf-8 -*-
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Base model class for convolutional neural network models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
class Model(object):
"""Base class for convolutional neural networks."""
def __init__(self,
image_size,
num_classes,
params):
"""
Args:
imgage_size: height/width of the input images. We assume the height and
the width of images are of the same.
batch_size: batch size per iteration
learning_rate: initial learning rate
nclass: number of classes for dataset, e.g., 10 for Cifar10, 100 for
Cifar100 and 1000 for ImageNet.
params: other parameters, an object of argparse.parser()
Returns:
None
"""
self.image_size = image_size
self.batch_size = params.batch_size
self.learning_rate = params.init_learning_rate
self.depth = 3 # number of channels for images, always be 3.
self.data_format = params.data_format
self.num_classes = num_classes
self.data_type = tf.float16 if params.use_fp16 else tf.float32
if self.data_format == 'NCHW':
self.channel_pos = 'channels_first'
else:
self.channel_pos = 'channels_last'
def build_network(self, inputs, is_training):
"""Builds the forward pass of the model.
Args:
inputs: the list of inputs, excluding labels
is_training: if in the phrase of training.
Returns:
The logits of the model.
"""
raise NotImplementedError("This method must be implemented in subclasses.")
| sandyhouse/dlbenchmark | models/cnn/model.py | model.py | py | 2,265 | python | en | code | 0 | github-code | 13 |
38639682081 | #!/usr/bin/python3
"""
This module is responsible for printing a square using the character '#'.
"""
def print_square(size):
"""
Prints a square with the character '#' of the specified size.
Args:
size (int): The length of one side of the square.
Raises:
TypeError: If the 'size' parameter is not an integer.
ValueError: If the 'size' parameter is less than 0.
TypeError: If the 'size' parameter is a float and is less than 0.
Returns:
None
"""
if not isinstance(size, int):
raise TypeError("size must be an integer")
if size < 0:
raise ValueError("size must be >= 0")
if isinstance(size, float) and size < 0:
raise TypeError("size must be an integer")
for _ in range(size):
print("#" * size)
| DoxaNtow/alx-higher_level_programming | 0x07-python-test_driven_development/4-print_square.py | 4-print_square.py | py | 811 | python | en | code | 0 | github-code | 13 |
42241543661 | import torch
import torch.nn as nn
from collections import OrderedDict
class Bottleneck(nn.Module):
def __init__(self, inchannel, growthrate, bn_size):
super(Bottleneck, self).__init__()
self.innerchannel = growthrate*bn_size
self.bn = nn.BatchNorm2d(inchannel)
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(inchannel, self.innerchannel, kernel_size=1, bias=False)
def forward(self, *inputs):
concat_input = torch.cat(inputs, 1)
x1 = self.bn(concat_input)
x2 = self.relu(x1)
output = self.conv(x2)
return output
class DenseLayer(nn.Module):
def __init__(self, inchannel, growthrate):
super(DenseLayer, self).__init__()
self.bn = nn.BatchNorm2d(inchannel)
self.relu = nn.ReLU(inplace=True)
self.conv = nn.Conv2d(inchannel, growthrate, kernel_size=3, padding = 1, bias=False)
def forward(self, *inputs):
if len(inputs) == 1:
concat_input = inputs[0]
else:
concat_input = torch.cat(inputs, 1)
x1 = self.bn(concat_input)
x2 = self.relu(x1)
output = self.conv(x2)
return output
class DenseLayer_B(nn.Module):
def __init__(self, num_feature_map, growthrate, bn_size):
super(DenseLayer_B, self).__init__()
self.bottleneck=Bottleneck(num_feature_map, growthrate, bn_size)
self.vanilladenselayer=DenseLayer(growthrate*bn_size, growthrate)
def forward(self, *inputs):
x = self.bottleneck(*inputs)
out = self.vanilladenselayer(x)
return out
class DenseBlock(nn.Module):
def __init__(self, num_layer, growthrate, num_input_features, bn_size):
self.num_layer = num_layer
super(DenseBlock, self).__init__()
for i in range(num_layer):
layer = DenseLayer_B(num_input_features + i*growthrate, growthrate, bn_size)
self.add_module('denselayer{}'.format(i), layer)
def forward(self, input):
features = [input]
for i in range(self.num_layer):
output = getattr(self, 'denselayer{}'.format(i))(*features)
features.append(output)
return torch.cat(features, 1)
class Transition(nn.Module):
def __init__(self, num_input_features, num_output_features):
super(Transition, self).__init__()
self.transition = nn.Sequential(OrderedDict([
('conv', nn.Conv2d(num_input_features, num_output_features,
kernel_size=1, stride=1, bias=False)),
('pool', nn.AvgPool2d(kernel_size=2, stride=2))
]))
def forward(self, input):
return self.transition(input)
| HugoAhoy/RADN-Region-Adaptive-Dense-Network-for-Efficient-Motion-Deblurring-PyTorch-Implementation | network/densemodule.py | densemodule.py | py | 2,702 | python | en | code | 6 | github-code | 13 |
30801581413 | import os
import requests as r
def getArticles(topic='bitcoin', limit=5):
'''
Creates a API request to get articles on a certain
topic provided by URL.
Params:
@topic string (required): The topic you would like news on
@limit integer : The number of articles you would like to receive
'''
# Get News API KEY & Add To Header
headers = {'X-API-KEY': os.environ['NEWS_API_KEY']}
# API URL
api_url = 'https://newsapi.org/v2/top-headlines?q='
# Makes Request
data = r.get(api_url + topic, headers=headers).json()
# Limit Data
data = data['articles'][:limit]
# Return Data
return data
| meads2/tableau-newsfeed | endpoints/getNews.py | getNews.py | py | 652 | python | en | code | 1 | github-code | 13 |
12532696584 | from __future__ import absolute_import, division, print_function
import json
from collections import defaultdict
import six
from flask import current_app
from inspirehep.modules.disambiguation.core.db.readers import (
get_all_curated_signatures,
get_all_publications,
)
from inspirehep.modules.disambiguation.core.ml.models import (
DistanceEstimator,
EthnicityEstimator,
)
from inspirehep.modules.disambiguation.core.ml.sampling import sample_signature_pairs
from inspirehep.modules.disambiguation.utils import open_file_in_folder
def save_curated_signatures_and_input_clusters():
"""Save curated signatures and input clusters to disk.
Saves two files to disk called (by default) ``input_clusters.jsonl`` and
``curated_signatures.jsonl``. The former contains one line per each cluster
initially present in INSPIRE, while the latter contains one line per each
curated signature that will be used as ground truth by ``BEARD``.
"""
signatures_with_author = defaultdict(list)
signatures_without_author = []
with open_file_in_folder(current_app.config['DISAMBIGUATION_CURATED_SIGNATURES_PATH'], 'w') as fd:
for signature in get_all_curated_signatures():
if signature.get('author_id'):
signatures_with_author[signature['author_id']].append(signature['signature_uuid'])
fd.write(json.dumps(signature) + '\n')
else:
signatures_without_author.append(signature['signature_uuid'])
with open_file_in_folder(current_app.config['DISAMBIGUATION_INPUT_CLUSTERS_PATH'], 'w') as fd:
for cluster_id, (author_id, signature_uuids) in enumerate(six.iteritems(signatures_with_author)):
fd.write(json.dumps({
'author_id': author_id,
'cluster_id': cluster_id,
'signature_uuids': signature_uuids,
}) + '\n')
for cluster_id, signature_uuid in enumerate(signatures_without_author, cluster_id + 1):
fd.write(json.dumps({
'author_id': None,
'cluster_id': cluster_id,
'signature_uuids': [signature_uuid],
}) + '\n')
def save_sampled_pairs():
"""Save sampled signature pairs to disk.
Save a file to disk called (by default) ``sampled_pairs.jsonl``, which
contains one line per each pair of signatures sampled from INSPIRE that
will be used by ``BEARD`` during training.
"""
with open_file_in_folder(current_app.config['DISAMBIGUATION_SAMPLED_PAIRS_PATH'], 'w') as fd:
signatures_path = current_app.config['DISAMBIGUATION_CURATED_SIGNATURES_PATH']
clusters_path = current_app.config['DISAMBIGUATION_INPUT_CLUSTERS_PATH']
pairs_size = current_app.config['DISAMBIGUATION_SAMPLED_PAIRS_SIZE']
for pair in sample_signature_pairs(signatures_path, clusters_path, pairs_size):
fd.write(json.dumps(pair) + '\n')
def save_publications():
"""Save publications to disk.
Saves a file to disk called (by default) ``publications.jsonl``, which
contains one line per record in INSPIRE with information that will be
useful for ``BEARD`` during training and prediction.
"""
with open_file_in_folder(current_app.config['DISAMBIGUATION_PUBLICATIONS_PATH'], 'w') as fd:
for publication in get_all_publications():
fd.write(json.dumps(publication) + '\n')
def train_and_save_ethnicity_model():
"""Train the ethnicity estimator model and save it to disk."""
estimator = EthnicityEstimator()
estimator.load_data(current_app.config['DISAMBIGUATION_ETHNICITY_DATA_PATH'])
estimator.fit()
estimator.save_model(current_app.config['DISAMBIGUATION_ETHNICITY_MODEL_PATH'])
def train_and_save_distance_model():
"""Train the distance estimator model and save it to disk."""
ethnicity_estimator = EthnicityEstimator()
ethnicity_estimator.load_model(current_app.config['DISAMBIGUATION_ETHNICITY_MODEL_PATH'])
distance_estimator = DistanceEstimator(ethnicity_estimator)
distance_estimator.load_data(
current_app.config['DISAMBIGUATION_CURATED_SIGNATURES_PATH'],
current_app.config['DISAMBIGUATION_SAMPLED_PAIRS_PATH'],
current_app.config['DISAMBIGUATION_SAMPLED_PAIRS_SIZE'],
current_app.config['DISAMBIGUATION_PUBLICATIONS_PATH'],
)
distance_estimator.fit()
distance_estimator.save_model(current_app.config['DISAMBIGUATION_DISTANCE_MODEL_PATH'])
| miguelgrc/inspire-next | inspirehep/modules/disambiguation/api.py | api.py | py | 4,490 | python | en | code | null | github-code | 13 |
26334801440 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('core', '0016_piggyuser_profile_image'),
]
operations = [
migrations.AddField(
model_name='piggyproject',
name='urlid',
field=models.CharField(default=b'', help_text=b'An indexing keyword, useful for human-readable URLs', max_length=200, db_index=True),
),
migrations.AddField(
model_name='website',
name='urlid',
field=models.CharField(default=b'', help_text=b'An indexing keyword, useful for human-readable URLs', max_length=200, db_index=True),
),
migrations.AlterField(
model_name='piggyuser',
name='profile_image',
field=models.CharField(default=b'', help_text=b"User's profile image", max_length=1024),
),
]
| wavesoft/creditpiggy | creditpiggy-server/creditpiggy/core/migrations/0017_auto_20150624_1257.py | 0017_auto_20150624_1257.py | py | 961 | python | en | code | 0 | github-code | 13 |
72935947538 | import pprint
import torch
import torch.nn as nn
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
vocab_size = 100 # 단어 idx 1-100
pad_id = 0
data = [
[85,14,80,34,99,20,31,65,53,86,3,58,30,4,11,6,50,71,74,13],
[62,76,79,66,32],
[93,77,16,67,46,74,24,70],
[19,83,88,22,57,40,75,82,4,46],
[70,28,30,24,76,84,92,76,77,51,7,20,82,94,57],
[58,13,40,61,88,18,92,89,8,14,61,67,49,59,45,12,47,5],
[22,5,21,84,39,6,9,84,36,59,32,30,69,70,82,56,1],
[94,21,79,24,3,86],
[80,80,33,63,34,63],
[87,32,79,65,2,96,43,80,85,20,41,52,95,50,35,96,24,80]
]
# 1
# 가장 긴 문장에 맞춰서 padding 처리
# "max_len" "valid_lens"
max_len = len(max(data, key=len))#20
valid_data = [] # padding 처리될
data_lens = []
for sentence in data:
valid_data.append(sentence.copy())
data_lens.append(len(sentence))
if len(sentence) < max_len:
valid_data[-1].extend([pad_id]*(max_len - len(sentence)))
pprint.pprint(valid_data)
pprint.pprint(data)
# 2
# tensor로 변환 (batch 하나 통째로)
# "batch"
batch = torch.LongTensor(valid_data)
batch_lens = torch.LongTensor(data_lens)
# 3
# one-hot -> cont vector embedding
# 100차원 -> 256차원 "embedding_size" "embedding"
embedding_size = 256
embedding = nn.Embedding(vocab_size, embedding_size)
embedded_batch = embedding(batch)
print(embedded_batch.shape)
# 4
hidden_size = 512 # RNN의 hidden size
num_layers = 1 # 쌓을 RNN layer의 개수
num_dirs = 1 # 1: 단방향 RNN, 2: 양방향 RNN / bidirectional: (h1+h2 -> h )
# rnn 모델 초기화 & "h_0" 초기화
rnn_model = nn.RNN(
input_size = embedding_size,
hidden_size = hidden_size,
num_layers = num_layers,
batch_first=True)
h_0 = torch.zeros(num_layers*num_dirs, batch.shape[0], hidden_size)
# 5
# rnn forward 결과
# embedded_batch = (10,20,256)
# (sequence length, batch size, input_size) batch_first=True
# hidden_state 가 h_n을 포함??
hidden_state, h_n = rnn_model(embedded_batch, h_0) # output == hidden_state
print("here", hidden_state.shape) # torch.Size([10, 20, 512]) -> 20개 위치에 대한 20개 h
print("here", h_n.shape) # # torch.Size([1, 10, 512]) -> 마지막 위치에 대한 마지막 1개 h
# 마지막 h_n으로 text classification 수행 (num_classes = 2)
#h_n -> linear -> y_0(num_clases = 2)
num_classes= 2
linear = nn.Linear(hidden_size,num_classes) # W_hy
y_tc = linear(h_n)
print(y_tc.shape) # torch.Size([1, 10, 2])
# 7
# "hidden_states"로 token-level의 task를 수행
num_classes = 5
linear = nn.Linear(hidden_size,num_classes)
y_tl = linear(hidden_state)
print(y_tl.shape) # torch.Size([10, 20, 5])
# 8
# PackedSequence
# 데이터를 padding전 원래 길이 기준으로 정렬합니다. sorted_lens sorted_idx sorted_batch
sorted_lens, sorted_idx = batch_lens.sort(descending=True)
print(sorted_lens, sorted_idx)
sorted_batch = batch[sorted_idx]
# 9
# embedding & pack_padded_sequence() 수행
packed_embedded_batch = embedding(sorted_batch)
packed_input = pack_padded_sequence(packed_embedded_batch, sorted_lens, batch_first=True)
# print(packed_input[0]) # 모든 단어를 합친 하나의 객체 (모든 단어 합친 개수, 차원)
# print(packed_input[1]) # 각 단어 개수를 담은 리스트
# data, batch_sizes, sorted_indices, unsorted_indices
# 10
# rnn에 통과
hidden_state, h_n = rnn_model(packed_input, h_0)
# 기존 rnn에 비해 packed sequence를 사용하며 달라진 점
# 기존 : input-> hidden_state -> y
# now : input-> packed -> hidden -> unpacked -> y
# 11
# pad_packed_sequence() 수행 -> pad 복원 mojo
seq_unpacked, lens_unpacked = pad_packed_sequence(hidden_state , batch_first=True)
print(seq_unpacked.shape) # torch.Size([10, 20, 512])
print(seq_unpacked.transpose(0, 1))
print(lens_unpacked) # tensor([20, 18, 18, 17, 15, 10, 8, 6, 6, 5])
# 4시간 만에 rnn 클리어!
# 너무 좋습니다 | zeus0007/til | code/rnn.py | rnn.py | py | 3,889 | python | en | code | 1 | github-code | 13 |
14140536482 | import socket
import os
import time
import random
import logging
from _thread import start_new_thread
from threading import Lock
import pickle
import copy
import utils
from collections import deque
from heapq import heappush, heappop
import json
import sys
class Server:
CHANNEL_PORT = 10000
SERVER_PORTS = {
# Client listener port, raft vote listener port, raft operation listener port.
0: (11001, 12001, 13001),
1: (11002, 12002, 13002),
2: (11003, 12003, 13003),
}
CLIENTS = [0, 1, 2]
MAX_CONNECTION = 100
BUFFER_SIZE = 65536
LEADER_ELECTION_TIMEOUT = 10
MESSAGE_SENDING_TIMEOUT = 10
HEARTBEAT_TIMEOUT = 1
MAX_TRANSACTION_COUNT = 3
SLEEP_INTERVAL = 0.01
def __init__(self):
# Get the server name.
while True:
self.server_id = int(input('Which server are you? Enter 0, 1 or 2. \n'))
if self.server_id in Server.SERVER_PORTS:
self.other_servers = {0, 1, 2} - {self.server_id}
self.sockets = [None, None, None] # Client port, vote port, and operation port.
for i in range(3):
self.sockets[i] = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sockets[i].bind((socket.gethostname(), Server.SERVER_PORTS[self.server_id][i]))
break
else:
print('Wrong server name. Please enter 0, 1 or 2.')
# Initialize blockchains, balance tables, proof of work working area, etc.
# Server state.
self.server_state = 'Follower' # Follower, Leader, Candidate
self.server_state_lock = Lock()
self.server_state_lock_by = ''
self.leader_id = None
self.leader_id_lock = Lock()
self.leader_id_lock_by = ''
# Server term.
self.server_term = 0
self.server_term_lock = Lock()
self.server_term_lock_by = ''
# State variables for operation.
self.servers_operation_last_seen = [time.time(), time.time(), time.time()]
self.servers_operation_last_seen_lock = Lock()
self.servers_operation_last_seen_lock_by = ''
self.servers_log_next_index = [0, 0, 0]
self.servers_log_next_index_lock = Lock()
self.servers_log_next_index_lock_by = ''
self.accept_indexes = [-1, -1, -1]
self.accept_indexes_lock = Lock()
self.accept_indexes_lock_by = ''
self.commit_index = -1
self.commit_index_lock = Lock()
self.commit_index_lock_by = ''
self.commit_watches = []
self.commit_watches_lock = Lock()
self.commit_watches_lock_by = ''
# State variables for vote.
self.last_election_time = 0
self.last_election_time_lock = Lock()
self.last_election_time_lock_by = ''
self.voted_candidate = None
self.voted_candidate_lock = Lock()
self.voted_candidate_lock_by = ''
self.received_votes = 0
self.received_votes_lock = Lock()
self.received_votes_lock_by = ''
# State variables for client.
self.blockchain = [] # each block: {'term': ..., 'phash': ..., 'nonce': ...,
# 'transactions': ((unique_id, (A, B, 5)), ((unique_id, (A,)), None)}
self.first_blockchain_read = False
self.blockchain_lock = Lock()
self.blockchain_lock_by = ''
self.balance_table = [100, 100, 100]
self.balance_table_lock = Lock()
self.balance_table_lock_by = ''
self.transaction_queue = deque()
self.transaction_queue_lock = Lock()
self.transaction_queue_lock_by = ''
self.transaction_ids = set()
self.transaction_ids_lock = Lock()
self.transaction_ids_lock_by = ''
# Set up loggers.
log_file = f'server_{self.server_id}.log'
# if os.path.exists(log_file):
# os.remove(log_file)
self.logger = logging.getLogger(f'Server_{self.server_id}')
file_handler = logging.FileHandler(log_file)
formatter = logging.Formatter('%(asctime)s %(message)s', "%H:%M:%S")
file_handler.setFormatter(formatter)
self.logger.addHandler(file_handler)
self.logger.setLevel(logging.INFO)
self.logger.info("==============================================STARTING==============================================")
def save_state(self, variable_names):
state = self.__dict__
for variable_name in variable_names:
value = state[variable_name]
if variable_name == 'transaction_queue':
jso_object = {variable_name: list(value)}
elif variable_name == 'transaction_ids':
jso_object = {variable_name: list(value)}
else:
jso_object = {variable_name: value}
with open(f'server_{self.server_id}_states/{variable_name}.json', 'w') as _file:
json.dump(jso_object, _file)
def load_state(self, variable_names):
for variable_name in variable_names:
path = f'server_{self.server_id}_states/{variable_name}.json'
if os.path.exists(path):
with open(path, 'r') as _file:
state = dict(json.load(_file))
if 'transaction_queue' in state.keys():
state['transaction_queue'] = deque(state['transaction_queue'])
elif 'transaction_ids' in state.keys():
state['transaction_ids'] = set(state['transaction_ids'])
self.__dict__.update(state)
# Operation utilities.
def generate_operation_response_message(self, receiver, last_log_index_after_append, success):
# server_term is already locked here...
header = 'Operation-Response'
sender = self.server_id
message = {
'term': self.server_term,
'last_log_index_after_append': last_log_index_after_append,
'success': success
}
return header, sender, receiver, message
def generate_operation_request_message(self, receiver, is_heartbeat=False):
header = 'Operation-Request'
sender = self.server_id
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.server_term_lock.acquire()
self.server_term_lock_by = acquired_by
self.save_state(['server_term_lock_by'])
self.servers_log_next_index_lock.acquire()
self.servers_log_next_index_lock_by = acquired_by
self.save_state(['servers_log_next_index_lock_by'])
self.commit_index_lock.acquire()
self.commit_index_lock_by = acquired_by
self.save_state(['commit_index_lock_by'])
self.blockchain_lock.acquire()
self.blockchain_lock_by = acquired_by
self.save_state(['blockchain_lock_by'])
next_log_index = self.servers_log_next_index[receiver]
previous_log_index = next_log_index - 1
previous_log_term = self.blockchain[previous_log_index]['term'] if len(self.blockchain) > 0 and previous_log_index > -1 else -1
message = {
'term': self.server_term,
'leader_id': self.server_id,
'previous_log_index': previous_log_index,
'previous_log_term': previous_log_term,
'entries': [] if is_heartbeat else self.blockchain[next_log_index:],
'commit_index': self.commit_index
}
self.server_term_lock_by = released_by
self.save_state(['server_term_lock_by'])
self.server_term_lock.release()
self.servers_log_next_index_lock_by = released_by
self.save_state(['servers_log_next_index_lock_by'])
self.servers_log_next_index_lock.release()
self.commit_index_lock_by = released_by
self.save_state(['commit_index_lock_by'])
self.commit_index_lock.release()
self.blockchain_lock_by = released_by
self.save_state(['blockchain_lock_by'])
self.blockchain_lock.release()
return header, sender, receiver, message
def update_transaction_ids(self, block, add=True):
# if add is False, remove transactions ids
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.transaction_ids_lock.acquire()
self.transaction_ids_lock_by = acquired_by
self.save_state(['transaction_ids_lock_by'])
for transaction in block['transactions']:
if transaction is not None:
transaction_id = transaction[0]
if add:
self.transaction_ids.add(transaction_id)
else:
self.transaction_ids.remove(transaction_id)
self.save_state(['transaction_ids'])
self.transaction_ids_lock_by = released_by
self.save_state(['transaction_ids_lock_by'])
self.transaction_ids_lock.release()
def on_receive_operation_request(self, sender, message):
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
self.leader_id_lock.acquire()
self.leader_id_lock_by = acquired_by
self.save_state(['leader_id_lock_by'])
self.server_term_lock.acquire()
self.server_term_lock_by = acquired_by
self.save_state(['server_term_lock_by'])
self.commit_index_lock.acquire()
self.commit_index_lock_by = acquired_by
self.save_state(['commit_index_lock_by'])
self.voted_candidate_lock.acquire()
self.voted_candidate_lock_by = acquired_by
self.save_state(['voted_candidate_lock_by'])
if message['term'] < self.server_term:
# reject message because term is smaller.
msg = self.generate_operation_response_message(sender, None, success=False)
start_new_thread(utils.send_message, (msg, Server.CHANNEL_PORT))
else: # message['term'] >= self.server_term:
# saw bigger term from another process, step down, and continue
if message['term'] > self.server_term:
self.voted_candidate = None
self.save_state(['voted_candidate'])
self.server_term = message['term']
self.save_state(['server_term'])
if self.server_state != 'Follower':
self.server_state = 'Follower'
self.save_state(['server_state'])
print(f'Becomes Follower for Term: {self.server_term}')
self.logger.info(f'Follower! Term: {self.server_term} because of on_receive_operation_request')
self.leader_id = message['leader_id']
self.save_state(['leader_id'])
if len(message['entries']) > 0: # append message
print(f'Append RPC from Server {sender}')
self.logger.info(message)
self.blockchain_lock.acquire()
self.blockchain_lock_by = acquired_by
self.save_state(['blockchain_lock_by'])
print(f'Blockchain before Append RPC: {utils.blockchain_print_format(self.blockchain)}')
self.logger.info(f'Blockchain before Append RPC: {self.blockchain}')
prev_log_index = message['previous_log_index']
if prev_log_index == -1 or \
(len(self.blockchain) > prev_log_index and
message['previous_log_term'] == self.blockchain[prev_log_index]['term']): # matches update blockchain
# Overwrite any new entries
for i, entry in enumerate(message['entries']):
if len(self.blockchain) < prev_log_index + i + 2:
self.blockchain.append(entry)
self.update_transaction_ids(entry)
elif entry['term'] != self.blockchain[prev_log_index + i + 1]['term']:
# remove overwritten transactions_ids
for will_be_deleted_block in self.blockchain[prev_log_index + i + 1:]:
self.update_transaction_ids(will_be_deleted_block, add=False)
self.blockchain = self.blockchain[:prev_log_index + i + 1]
self.blockchain.append(entry)
self.update_transaction_ids(entry)
self.save_state(['blockchain'])
success = True
print(f'Follower: Before commit balance table: {self.balance_table}')
self.logger.info(f'Follower: Before commit balance table: {self.balance_table}')
# update commit index depends on given message, and commit the previous entries
if self.commit_index < message['commit_index']: # If not, the block has been already committed.
first_commit_index = self.commit_index
self.commit_index = min(len(self.blockchain) - 1, message['commit_index'])
self.save_state(['commit_index'])
for i in range(first_commit_index + 1, self.commit_index + 1):
block = self.blockchain[i]
print(f'Committing: Block index: {i}, Block: {utils.blockchain_print_format([block])}')
self.logger.info(f'Committing: Block index: {i}, Block: {block}')
self.commit_block(block)
print(f'Follower: After commit balance table: {self.balance_table}')
self.logger.info(f'Follower: After commit balance table: {self.balance_table}')
else:
success = False
print(f'Blockchain after Append RPC: {utils.blockchain_print_format(self.blockchain)}')
self.logger.info(f'Blockchain after Append RPC: {self.blockchain}')
last_log_index_after_append = len(self.blockchain) - 1
self.blockchain_lock_by = released_by
self.save_state(['blockchain_lock_by'])
self.blockchain_lock.release()
msg = self.generate_operation_response_message(sender, last_log_index_after_append, success=success)
start_new_thread(utils.send_message, (msg, Server.CHANNEL_PORT))
else: # heartbeat
self.logger.info(f'Heartbeat from Server {sender}')
start_new_thread(self.threaded_leader_election_watch, ())
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
self.leader_id_lock_by = released_by
self.save_state(['leader_id_lock_by'])
self.leader_id_lock.release()
self.server_term_lock_by = released_by
self.save_state(['server_term_lock_by'])
self.server_term_lock.release()
self.commit_index_lock_by = released_by
self.save_state(['commit_index_lock_by'])
self.commit_index_lock.release()
self.voted_candidate_lock_by = released_by
self.save_state(['voted_candidate_lock_by'])
self.voted_candidate_lock.release()
def on_receive_operation_response(self, sender, message):
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
term = message['term']
last_log_index_after_append = message['last_log_index_after_append']
success = message['success']
self.servers_operation_last_seen_lock.acquire()
self.servers_operation_last_seen_lock_by = acquired_by
self.save_state(['servers_operation_last_seen_lock_by'])
self.servers_operation_last_seen[sender] = time.time()
self.save_state(['servers_operation_last_seen'])
self.servers_operation_last_seen_lock_by = released_by
self.save_state(['servers_operation_last_seen_lock_by'])
self.servers_operation_last_seen_lock.release()
if success:
self.server_term_lock.acquire()
self.server_term_lock_by = acquired_by
self.save_state(['server_term_lock_by'])
self.servers_log_next_index_lock.acquire()
self.servers_log_next_index_lock_by = acquired_by
self.save_state(['servers_log_next_index_lock_by'])
self.accept_indexes_lock.acquire()
self.accept_indexes_lock_by = acquired_by
self.save_state(['accept_indexes_lock_by'])
self.commit_index_lock.acquire()
self.commit_index_lock_by = acquired_by
self.save_state(['commit_index_lock_by'])
self.blockchain_lock.acquire()
self.blockchain_lock_by = acquired_by
self.save_state(['blockchain_lock_by'])
if self.accept_indexes[sender] < last_log_index_after_append:
self.accept_indexes[sender] = last_log_index_after_append
self.save_state(['accept_indexes'])
sorted_accept_indexes = sorted(self.accept_indexes)
target_accept_index = sorted_accept_indexes[int((len(sorted_accept_indexes) - 1) / 2)]
if self.blockchain[target_accept_index]['term'] == self.server_term:
self.commit_index = target_accept_index
self.save_state(['commit_index'])
self.servers_log_next_index[sender] = len(self.blockchain)
self.save_state(['servers_log_next_index'])
self.server_term_lock_by = released_by
self.save_state(['server_term_lock_by'])
self.server_term_lock.release()
self.servers_log_next_index_lock_by = released_by
self.save_state(['servers_log_next_index_lock_by'])
self.servers_log_next_index_lock.release()
self.accept_indexes_lock_by = released_by
self.save_state(['accept_indexes_lock_by'])
self.accept_indexes_lock.release()
self.commit_index_lock_by = released_by
self.save_state(['commit_index_lock_by'])
self.commit_index_lock.release()
self.blockchain_lock_by = released_by
self.save_state(['blockchain_lock_by'])
self.blockchain_lock.release()
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
self.server_term_lock.acquire()
self.server_term_lock_by = acquired_by
self.save_state(['server_term_lock_by'])
self.voted_candidate_lock.acquire()
self.voted_candidate_lock_by = acquired_by
self.save_state(['voted_candidate_lock_by'])
if term > self.server_term:
# success = False
self.server_state = 'Follower'
self.server_term = term
self.voted_candidate = None
print(f'Becomes Follower for Term: {self.server_term}')
self.logger.info(f'Follower! Term: {self.server_term} because of on_receive_operation_response')
self.save_state(['server_state', 'server_term', 'voted_candidate'])
start_new_thread(self.threaded_leader_election_watch, ())
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
self.server_term_lock_by = released_by
self.save_state(['server_term_lock_by'])
self.server_term_lock.release()
self.voted_candidate_lock_by = released_by
self.save_state(['voted_candidate_lock_by'])
self.voted_candidate_lock.release()
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
self.servers_log_next_index_lock.acquire()
self.servers_log_next_index_lock_by = acquired_by
self.save_state(['servers_log_next_index_lock_by'])
if not success and self.server_state == "Leader": # index problem, retry
self.servers_log_next_index[sender] -= 1
self.save_state(['servers_log_next_index'])
start_new_thread(self.threaded_response_watch, (sender,))
start_new_thread(self.threaded_send_append_request, ([sender],))
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
self.servers_log_next_index_lock_by = released_by
self.save_state(['servers_log_next_index_lock_by'])
self.servers_log_next_index_lock.release()
def threaded_on_receive_operation(self, connection):
# Receive and process append request/response and heartbeat messages.
header, sender, receiver, message = utils.receive_message(connection)
self.logger.info(f"Received {header} from Server {sender}: {message}")
if header == 'Operation-Request':
self.on_receive_operation_request(sender, message)
elif header == 'Operation-Response':
self.on_receive_operation_response(sender, message)
else:
raise NotImplementedError(f'Header {header} is not related!')
def start_threaded_response_watch(self, receiver, commit_index_lock=True, blockchain_lock=True, servers_log_next_index=True):
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
if servers_log_next_index:
self.servers_log_next_index_lock.acquire()
self.servers_log_next_index_lock_by = acquired_by
self.save_state(['servers_log_next_index_lock_by'])
if commit_index_lock:
self.commit_index_lock.acquire()
self.commit_watches_lock_by = acquired_by
self.save_state(['commit_watches_lock_by'])
if blockchain_lock:
self.blockchain_lock.acquire()
self.blockchain_lock_by = acquired_by
self.save_state(['blockchain_lock_by'])
if self.commit_index < len(self.blockchain) - 1 and self.servers_log_next_index[receiver] < len(self.blockchain):
# at least one index is not committed
# print('Leader: Uncommitted blocks are detected... Sending request to servers...')
self.logger.info('Leader: Uncommitted blocks are detected... Sending request to servers...')
start_new_thread(self.threaded_response_watch, (receiver,))
if servers_log_next_index:
self.servers_log_next_index_lock_by = released_by
self.save_state(['servers_log_next_index_lock_by'])
self.servers_log_next_index_lock.release()
if commit_index_lock:
self.commit_watches_lock_by = released_by
self.save_state(['commit_watches_lock_by'])
self.commit_index_lock.release()
if blockchain_lock:
self.blockchain_lock_by = released_by
self.save_state(['blockchain_lock_by'])
self.blockchain_lock.release()
def threaded_response_watch(self, receiver):
# Watch whether we receive response for a specific normal operation message sent. If not, resend the message.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.logger.info(f'Response watch starts, {time.time()}')
timeout = random.uniform(Server.MESSAGE_SENDING_TIMEOUT, Server.MESSAGE_SENDING_TIMEOUT * 2)
time.sleep(timeout)
self.servers_operation_last_seen_lock.acquire()
self.servers_operation_last_seen_lock_by = acquired_by
self.save_state(['servers_operation_last_seen_lock_by'])
if time.time() - self.servers_operation_last_seen[receiver] >= timeout: # timed out, resend
# start_new_thread(self.threaded_response_watch, (receiver,))
start_new_thread(self.threaded_send_append_request, ([receiver],))
self.servers_operation_last_seen_lock_by = released_by
self.save_state(['servers_operation_last_seen_lock_by'])
self.servers_operation_last_seen_lock.release()
def threaded_send_append_request(self, receivers):
# Send append requests to followers.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
if self.server_state == 'Leader':
for receiver in receivers:
msg = self.generate_operation_request_message(receiver)
# start_new_thread(self.threaded_on_receive_operation, ())
self.logger.info(f'Sending append request {msg} to {receiver}')
start_new_thread(utils.send_message, (msg, Server.CHANNEL_PORT))
start_new_thread(self.threaded_response_watch, (receiver,))
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
def threaded_send_heartbeat(self):
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
while self.server_state == 'Leader':
# heartbeat broadcast
for receiver in self.other_servers:
# print(f'Sending heartbeat to {receiver}')
msg = self.generate_operation_request_message(receiver, is_heartbeat=True)
# start_new_thread(self.threaded_on_receive_operation, ())
start_new_thread(utils.send_message, (msg, Server.CHANNEL_PORT))
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
time.sleep(Server.HEARTBEAT_TIMEOUT)
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
print('Step down from leader. Heartbeat stops.')
self.logger.info('Step down from leader. Heartbeat stops.')
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
def threaded_become_leader(self, term):
# Initialize the next index, last log index, send the first heartbeat.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
self.leader_id_lock.acquire()
self.leader_id_lock_by = acquired_by
self.save_state(['leader_id_lock_by'])
self.server_term_lock.acquire()
self.server_term_lock_by = acquired_by
self.save_state(['server_term_lock_by'])
self.servers_log_next_index_lock.acquire()
self.servers_log_next_index_lock_by = acquired_by
self.save_state(['servers_log_next_index_lock_by'])
self.commit_index_lock.acquire()
self.commit_index_lock_by = acquired_by
self.save_state(['commit_index_lock_by'])
self.commit_watches_lock.acquire()
self.commit_watches_lock_by = acquired_by
self.save_state(['commit_watches_lock_by'])
self.blockchain_lock.acquire()
self.blockchain_lock_by = acquired_by
self.save_state(['blockchain_lock_by'])
self.transaction_queue_lock.acquire()
self.transaction_queue_lock_by = acquired_by
self.save_state(['transaction_queue_lock_by'])
if self.server_term == term:
print(f'Becomes Leader for Term: {self.server_term}')
self.logger.info(f'Leader! Term: {self.server_term}')
self.server_state = 'Leader'
self.servers_log_next_index = 3 * [len(self.blockchain)]
self.leader_id = self.server_id
self.transaction_queue = deque()
self.commit_watches = []
for block_index in range(self.commit_index + 1, len(self.blockchain)):
heappush(self.commit_watches, block_index)
start_new_thread(self.threaded_commit_watch, (block_index,))
self.save_state(['server_state', 'servers_log_next_index', 'leader_id', 'transaction_queue', 'commit_watches'])
start_new_thread(self.threaded_send_heartbeat, ())
start_new_thread(self.threaded_proof_of_work, ())
start_new_thread(self.threaded_announce_leadership_to_clients, ())
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
self.leader_id_lock_by = released_by
self.save_state(['leader_id_lock_by'])
self.leader_id_lock.release()
self.server_term_lock_by = released_by
self.save_state(['server_term_lock_by'])
self.server_term_lock.release()
self.servers_log_next_index_lock_by = released_by
self.save_state(['servers_log_next_index_lock_by'])
self.servers_log_next_index_lock.release()
self.commit_index_lock_by = released_by
self.save_state(['commit_index_lock_by'])
self.commit_index_lock.release()
self.commit_watches_lock_by = released_by
self.save_state(['commit_watches_lock_by'])
self.commit_watches_lock.release()
self.blockchain_lock_by = released_by
self.save_state(['blockchain_lock_by'])
self.blockchain_lock.release()
self.transaction_queue_lock_by = released_by
self.save_state(['transaction_queue_lock_by'])
self.transaction_queue_lock.release()
def start_operation_listener(self):
# Start listener for operation messages.
self.sockets[2].listen(Server.MAX_CONNECTION)
while True:
connection, (ip, port) = self.sockets[2].accept()
start_new_thread(self.threaded_on_receive_operation, (connection,))
# Vote utilities.
def threaded_leader_election_watch(self):
# Watch whether the election has timed out. Call on leader election timeout if so.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
# Update the last updated time.
self.last_election_time_lock.acquire()
self.last_election_time_lock_by = acquired_by
self.save_state(['last_election_time_lock_by'])
self.last_election_time = time.time()
self.save_state(['last_election_time'])
self.logger.info(f'Election watch time is started: {self.last_election_time}')
self.last_election_time_lock_by = released_by
self.save_state(['last_election_time_lock_by'])
self.last_election_time_lock.release()
timeout = random.uniform(Server.LEADER_ELECTION_TIMEOUT, Server.LEADER_ELECTION_TIMEOUT * 2)
time.sleep(timeout)
self.last_election_time_lock.acquire()
self.last_election_time_lock_by = acquired_by
self.save_state(['last_election_time_lock_by'])
diff = time.time() - self.last_election_time
if diff >= timeout:
start_new_thread(self.threaded_on_leader_election_timeout, ())
self.last_election_time_lock_by = released_by
self.save_state(['last_election_time_lock_by'])
self.last_election_time_lock.release()
def generate_vote_request_message(self, receiver):
header = 'Vote-Request'
sender = self.server_id
message = {
'candidate_id': self.server_id,
'term': self.server_term,
'last_log_index': len(self.blockchain) - 1,
'last_log_term': self.blockchain[-1]['term'] if len(self.blockchain) > 0 else -1,
}
return header, sender, receiver, message
def threaded_on_leader_election_timeout(self):
# Send request for votes.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
self.leader_id_lock.acquire()
self.leader_id_lock_by = acquired_by
self.save_state(['leader_id_lock_by'])
self.server_term_lock.acquire()
self.server_term_lock_by = acquired_by
self.save_state(['server_term_lock_by'])
self.voted_candidate_lock.acquire()
self.voted_candidate_lock_by = acquired_by
self.save_state(['voted_candidate_lock_by'])
self.received_votes_lock.acquire()
self.received_votes_lock_by = acquired_by
self.save_state(['received_votes_lock_by'])
self.blockchain_lock.acquire()
self.blockchain_lock_by = acquired_by
self.save_state(['blockchain_lock_by'])
self.server_term += 1
self.server_state = 'Candidate'
print(f'Becomes Candidate for Term: {self.server_term}')
self.logger.info(f'Candidate! Term: {self.server_term}')
self.voted_candidate = self.server_id
self.received_votes = 1
self.leader_id = None
msgs = [self.generate_vote_request_message(receiver) for receiver in self.other_servers]
self.save_state(['server_term', 'server_state', 'voted_candidate', 'received_votes', 'leader_id'])
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
self.leader_id_lock_by = released_by
self.save_state(['leader_id_lock_by'])
self.leader_id_lock.release()
self.server_term_lock_by = released_by
self.save_state(['server_term_lock_by'])
self.server_term_lock.release()
self.voted_candidate_lock_by = released_by
self.save_state(['voted_candidate_lock_by'])
self.voted_candidate_lock.release()
self.received_votes_lock_by = released_by
self.save_state(['received_votes_lock_by'])
self.received_votes_lock.release()
self.blockchain_lock_by = released_by
self.save_state(['blockchain_lock_by'])
self.blockchain_lock.release()
for msg in msgs:
start_new_thread(utils.send_message, (msg, Server.CHANNEL_PORT))
start_new_thread(self.threaded_leader_election_watch, ())
def generate_vote_response_message(self, receiver, vote):
header = 'Vote-Response'
sender = self.server_id
message = {
'term': self.server_term,
'vote': vote
}
return header, sender, receiver, message
def on_receive_vote_request(self, message):
# Receive and process vote request.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
self.server_term_lock.acquire()
self.server_term_lock_by = acquired_by
self.save_state(['server_term_lock_by'])
self.voted_candidate_lock.acquire()
self.voted_candidate_lock_by = acquired_by
self.save_state(['voted_candidate_lock_by'])
self.blockchain_lock.acquire()
self.blockchain_lock_by = acquired_by
self.save_state(['blockchain_lock_by'])
reset_leader_election = False
# Update term.
if message['term'] > self.server_term:
self.server_term = message['term']
print(f'Becomes Follower for Term: {self.server_term}')
self.logger.info(f'Follower! Term: {self.server_term} because of on_receive_vote_request')
self.server_state = 'Follower'
self.voted_candidate = None
reset_leader_election = True
# Decide whether to cast vote.
last_log_term = self.blockchain[-1]['term'] if len(self.blockchain) > 0 else -1
if message['term'] == self.server_term \
and self.voted_candidate in {None, message['candidate_id']} \
and not \
(last_log_term > message['last_log_term']
or (last_log_term == message['last_log_term'] and len(self.blockchain) - 1 > message['last_log_index'])):
vote = True
reset_leader_election = True
self.voted_candidate = message['candidate_id']
else:
vote = False
msg = self.generate_vote_response_message(message['candidate_id'], vote)
self.save_state(['server_term', 'server_state', 'voted_candidate'])
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
self.server_term_lock_by = released_by
self.save_state(['server_term_lock_by'])
self.server_term_lock.release()
self.voted_candidate_lock_by = released_by
self.save_state(['voted_candidate_lock_by'])
self.voted_candidate_lock.release()
self.blockchain_lock_by = released_by
self.save_state(['blockchain_lock_by'])
self.blockchain_lock.release()
# Send message and reset election timeout if vote.
start_new_thread(utils.send_message, (msg, Server.CHANNEL_PORT))
if reset_leader_election:
start_new_thread(self.threaded_leader_election_watch, ())
def on_receive_vote_response(self, message):
# Receive and process vote response.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
self.server_term_lock.acquire()
self.server_term_lock_by = acquired_by
self.save_state(['server_term_lock_by'])
self.last_election_time_lock.acquire()
self.last_election_time_lock_by = acquired_by
self.save_state(['last_election_time_lock_by'])
self.received_votes_lock.acquire()
self.received_votes_lock_by = acquired_by
self.save_state(['received_votes_lock_by'])
become_leader = False
if message['term'] > self.server_term: # Discover higher term.
self.server_term = message['term']
print(f'Becomes Follower for Term: {self.server_term}')
self.logger.info(f'Follower! Term: {self.server_term} because of on_receive_vote_response')
self.server_state = 'Follower'
if self.server_state == 'Candidate': # Hasn't stepped down yet.
if message['vote'] and message['term'] == self.server_term: # Receive vote for current term.
self.received_votes += 1
if self.received_votes >= len(Server.SERVER_PORTS) // 2 + 1: # Received enough votes to become leader.
become_leader = True
self.last_election_time = time.time() # Update the last election time to avoid previous timeout watches. Don't start new timeout watch.
if become_leader and self.server_state != 'Leader':
start_new_thread(self.threaded_become_leader, (self.server_term,))
self.save_state(['server_term', 'server_state', 'received_votes', 'last_election_time'])
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
self.server_term_lock_by = released_by
self.save_state(['server_term_lock_by'])
self.server_term_lock.release()
self.received_votes_lock_by = released_by
self.save_state(['received_votes_lock_by'])
self.received_votes_lock.release()
self.last_election_time_lock_by = released_by
self.save_state(['last_election_time_lock_by'])
self.last_election_time_lock.release()
def threaded_on_receive_vote(self, connection):
# Receive and process the vote request/response messages.
header, sender, receiver, message = utils.receive_message(connection)
self.logger.info(f"Received {header} from Server {sender}: {message}")
if header == 'Vote-Request':
self.on_receive_vote_request(message)
elif header == 'Vote-Response':
self.on_receive_vote_response(message)
else:
raise NotImplementedError(f'Header {header} is not related!')
def start_vote_listener(self):
# Start listener for vote messages.
self.sockets[1].listen(Server.MAX_CONNECTION)
while True:
connection, (ip, port) = self.sockets[1].accept()
start_new_thread(self.threaded_on_receive_vote, (connection,))
# Blockchain and client message utilities.
def generate_client_response_message(self, transaction, transaction_result):
header = 'Client-Response'
sender = self.server_id
receiver = transaction[1][0]
message = {
'transaction': transaction,
'result': transaction_result,
}
return header, sender, receiver, message
def threaded_send_client_response(self, transaction, transaction_result):
# Compose the response for client.
# transaction = (id, (A, B, amt)) or (id, (A, )), transaction_result = (True/False, balance_of_A).
msg = self.generate_client_response_message(transaction, transaction_result)
start_new_thread(utils.send_message, (msg, Server.CHANNEL_PORT))
def threaded_announce_leadership_to_clients(self):
# Announce self is the new leader by sending a special client-response message without actual transaction or result.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
if self.server_state == 'Leader':
for client in Server.CLIENTS:
header = 'Client-Response'
sender = self.server_id
message = {
'transaction': None,
'result': None
}
msg = (header, sender, client, message)
start_new_thread(utils.send_message, (msg, Server.CHANNEL_PORT))
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
def update_balance_table(self, transaction_content):
if len(transaction_content) == 3: # transfer transaction
sender, receiver, amount = transaction_content
self.balance_table[sender] -= amount
self.balance_table[receiver] += amount
self.save_state(['balance_table'])
def commit_block(self, block, lock_balance_table=True):
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
if lock_balance_table:
self.balance_table_lock.acquire()
self.balance_table_lock_by = acquired_by
self.save_state(['balance_table_lock_by'])
for transaction in block['transactions']:
if transaction is not None:
transaction_id, transaction_content = transaction
self.update_balance_table(transaction_content)
if lock_balance_table:
self.balance_table_lock_by = released_by
self.save_state(['balance_table_lock_by'])
self.balance_table_lock.release()
def send_clients_responses(self, block, lock_balance_table=True):
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
if lock_balance_table:
self.balance_table_lock.acquire()
self.balance_table_lock_by = acquired_by
self.save_state(['balance_table_lock_by'])
for i, transaction in enumerate(block['transactions']):
if transaction is not None:
transaction_id, transaction_content = transaction
balance = self.balance_table[transaction_content[0]]
estimated_balance = self.get_estimate_balance_table(
lock_commit_table=False, lock_balance_table=False, lock_blockchain=False)[transaction_content[0]]
start_new_thread(self.threaded_send_client_response,
(transaction, (True, balance, estimated_balance)))
if lock_balance_table:
self.balance_table_lock_by = released_by
self.save_state(['balance_table_lock_by'])
self.balance_table_lock.release()
def threaded_commit_watch(self, block_index):
# Inform the client if the transaction's block has been committed.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
sent = False
print(f'Commit watch started for block index {block_index}')
self.logger.info(f'Commit watch started for block index {block_index}')
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
while not sent and self.server_state == 'Leader':
self.commit_index_lock.acquire()
self.commit_index_lock_by = acquired_by
self.save_state(['commit_index_lock_by'])
self.commit_watches_lock.acquire()
self.commit_watches_lock_by = acquired_by
self.save_state(['commit_watches_lock_by'])
if self.commit_index >= block_index == self.commit_watches[0]:
self.blockchain_lock.acquire()
self.blockchain_lock_by = acquired_by
self.save_state(['blockchain_lock_by'])
print(f'Leader: Before commit balance table: {self.balance_table}')
self.logger.info(f'Leader: Before commit balance table: {self.balance_table}')
block = self.blockchain[block_index]
print(f'Committing: Block index: {block_index}, Block: {utils.blockchain_print_format([block])}')
self.logger.info(f'Committing: Block index: {block_index}, Block: {block}')
self.commit_block(block)
print(f'Leader: After commit balance table: {self.balance_table}')
self.logger.info(f'Leader: After commit balance table: {self.balance_table}')
self.send_clients_responses(block)
sent = True
# Remove the commit watch from the commit watch list.
heappop(self.commit_watches)
self.blockchain_lock_by = released_by
self.save_state(['blockchain_lock_by'])
self.blockchain_lock.release()
self.save_state(['commit_watches'])
self.commit_index_lock_by = released_by
self.save_state(['commit_index_lock_by'])
self.commit_index_lock.release()
self.commit_watches_lock_by = released_by
self.save_state(['commit_watches_lock_by'])
self.commit_watches_lock.release()
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
time.sleep(Server.SLEEP_INTERVAL)
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
@classmethod
def is_transaction_valid(cls, estimated_balance_table, transactions, new_transaction):
estimated_balance_table_copy = copy.deepcopy(estimated_balance_table)
for transaction in transactions:
if transaction is not None and len(new_transaction[1]) == 3: # transfer transaction
sender, receiver, amount = new_transaction[1]
estimated_balance_table_copy[sender] -= amount
estimated_balance_table_copy[receiver] += amount
if new_transaction is not None and len(new_transaction[1]) == 3:
sender, receiver, amount = new_transaction[1]
if estimated_balance_table_copy[sender] < amount:
return False
return True
def get_estimate_balance_table(self, lock_balance_table=True, lock_commit_table=True, lock_blockchain=True, from_scratch=False):
# lock_balance_table: True if lock should be used.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
def get_balance_table_change(blockchain, start_index):
table_diff = [0, 0, 0]
for block in blockchain[start_index:]:
for transaction in block['transactions']:
if transaction is not None:
transaction_content = transaction[1]
if len(transaction_content) == 3: # transfer transaction
sender, receiver, amount = transaction_content
table_diff[sender] -= amount
table_diff[receiver] += amount
return table_diff
if lock_commit_table:
self.commit_index_lock.acquire()
self.commit_index_lock_by = acquired_by
self.save_state(['commit_index_lock_by'])
if lock_blockchain:
self.blockchain_lock.acquire()
self.blockchain_lock_by = acquired_by
self.save_state(['blockchain_lock_by'])
if lock_balance_table:
self.balance_table_lock.acquire()
self.balance_table_lock_by = acquired_by
self.save_state(['balance_table_lock_by'])
balance_table_copy = copy.deepcopy(self.balance_table)
estimated_balance_table = [100, 100, 100]
if from_scratch:
# assuming everyone has 10-10-10 in the beginning
balance_table_diff = get_balance_table_change(self.blockchain, start_index=0)
elif len(self.blockchain) - 1 == self.commit_index:
# last index is committed no need to update
estimated_balance_table = balance_table_copy
balance_table_diff = [0, 0, 0]
else:
# from commit index
estimated_balance_table = balance_table_copy
balance_table_diff = get_balance_table_change(self.blockchain, start_index=self.commit_index + 1)
for i, diff in enumerate(balance_table_diff):
estimated_balance_table[i] += diff
if lock_commit_table:
self.commit_index_lock_by = released_by
self.save_state(['commit_index_lock_by'])
self.commit_index_lock.release()
if lock_blockchain:
self.blockchain_lock_by = released_by
self.save_state(['blockchain_lock_by'])
self.blockchain_lock.release()
if lock_balance_table:
self.balance_table_lock_by = released_by
self.save_state(['balance_table_lock_by'])
self.balance_table_lock.release()
return estimated_balance_table
def threaded_proof_of_work(self):
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
# Doing proof of work based on the queue of transactions.
transactions = []
nonce = None
found = False
estimated_balance_table = self.get_estimate_balance_table()
# print(f'estimated_balance_table: {estimated_balance_table}')
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
while self.server_state == 'Leader':
# Add new transactions to current proof of work.
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
self.transaction_queue_lock.acquire()
self.transaction_queue_lock_by = acquired_by
self.save_state(['transaction_queue_lock_by'])
while len(transactions) < Server.MAX_TRANSACTION_COUNT and len(self.transaction_queue) > 0:
# print(f'before transaction: {self.transaction_queue}')
transaction = self.transaction_queue.popleft()
# print(f'after transaction: {self.transaction_queue}')
self.transaction_queue_lock_by = released_by
self.save_state(['transaction_queue_lock_by'])
self.transaction_queue_lock.release()
if Server.is_transaction_valid(estimated_balance_table, transactions, transaction): # Transaction valid
transactions.append(transaction)
else: # Transaction invalid.
self.balance_table_lock.acquire()
self.balance_table_lock_by = acquired_by
self.save_state(['balance_table_lock_by'])
transaction_content = transaction[1]
balance = self.balance_table[transaction_content[0]]
estimated_balance = self.get_estimate_balance_table(lock_balance_table=False)[transaction_content[0]]
self.balance_table_lock_by = released_by
self.save_state(['balance_table_lock_by'])
self.balance_table_lock.release()
start_new_thread(self.threaded_send_client_response,
(transaction, (False, balance, estimated_balance)))
self.save_state(['transaction_queue'])
self.transaction_queue_lock.acquire()
self.transaction_queue_lock_by = acquired_by
self.save_state(['transaction_queue_lock_by'])
self.transaction_queue_lock_by = released_by
self.save_state(['transaction_queue_lock_by'])
self.transaction_queue_lock.release()
# Do proof of work if transactions are not empty.
if len(transactions) > 0:
nonce = utils.generate_random_string_with_ending(length=6, ending={'0', '1', '2'})
cur_pow = utils.get_hash(transactions, nonce)
if '2' >= cur_pow[-1] >= '0':
found = True
# If PoW is found:
if found:
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
if self.server_state == 'Leader':
# Update the blockchain.
self.server_term_lock.acquire()
self.server_term_lock_by = acquired_by
self.save_state(['server_term_lock_by'])
self.accept_indexes_lock.acquire()
self.accept_indexes_lock_by = acquired_by
self.save_state(['accept_indexes_lock_by'])
self.commit_watches_lock.acquire()
self.commit_watches_lock_by = acquired_by
self.save_state(['commit_watches_lock_by'])
self.blockchain_lock.acquire()
self.blockchain_lock_by = acquired_by
self.save_state(['blockchain_lock_by'])
phash = None
if len(self.blockchain) > 0:
previous_nonce = self.blockchain[-1]['nonce']
previous_transactions = self.blockchain[-1]['transactions']
phash = utils.get_hash(previous_transactions, previous_nonce)
while len(transactions) < Server.MAX_TRANSACTION_COUNT:
transactions.append(None)
block = {
'term': self.server_term,
'phash': phash,
'nonce': nonce,
'transactions': transactions,
}
self.blockchain.append(block)
self.accept_indexes[self.server_id] = len(self.blockchain) - 1
block_index = len(self.blockchain) - 1
# Send append request.
start_new_thread(self.threaded_send_append_request, (self.other_servers,))
# Call commit watch.
heappush(self.commit_watches, block_index)
self.save_state(['commit_watches'])
start_new_thread(self.threaded_commit_watch, (block_index,))
self.save_state(['blockchain', 'accept_indexes'])
self.server_term_lock_by = released_by
self.save_state(['server_term_lock_by'])
self.server_term_lock.release()
self.accept_indexes_lock_by = released_by
self.save_state(['accept_indexes_lock_by'])
self.accept_indexes_lock.release()
self.commit_watches_lock_by = released_by
self.save_state(['commit_watches_lock_by'])
self.commit_watches_lock.release()
self.blockchain_lock_by = released_by
self.save_state(['blockchain_lock_by'])
self.blockchain_lock.release()
print("Proof of work is done.")
self.logger.info(f'Proof of work is done for block: {block}')
# Reset proof of work variables.
transactions = []
nonce = None
found = False
estimated_balance_table = self.get_estimate_balance_table()
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
time.sleep(Server.SLEEP_INTERVAL)
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
def threaded_on_receive_client(self, connection):
# Receive transaction request from client.
func_name = sys._getframe().f_code.co_name
acquired_by = 'ACQUIRED by ' + func_name
released_by = 'RELEASED by ' + func_name
header, sender, receiver, message = utils.receive_message(connection)
self.logger.info(f"Received {header} from Client {sender}: {message}")
self.server_state_lock.acquire()
self.server_state_lock_by = acquired_by
self.save_state(['server_state_lock_by'])
if self.server_state == 'Leader': # Process the request.
transaction = message['transaction']
transaction_id = transaction[0]
self.transaction_queue_lock.acquire()
self.transaction_queue_lock_by = acquired_by
self.save_state(['transaction_queue_lock_by'])
self.transaction_ids_lock.acquire()
self.transaction_ids_lock_by = acquired_by
self.save_state(['transaction_ids_lock_by'])
if transaction_id not in self.transaction_ids: # Transactions hasn't been processed yet.
self.transaction_ids.add(transaction_id)
self.transaction_queue.append(transaction)
self.save_state(['transaction_ids', 'transaction_queue'])
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
self.transaction_queue_lock_by = released_by
self.save_state(['transaction_queue_lock_by'])
self.transaction_queue_lock.release()
self.transaction_ids_lock_by = released_by
self.save_state(['transaction_ids_lock_by'])
self.transaction_ids_lock.release()
else: # Relay the client message to the current leader.
self.server_state_lock_by = released_by
self.save_state(['server_state_lock_by'])
self.server_state_lock.release()
while True:
time.sleep(Server.SLEEP_INTERVAL)
self.leader_id_lock.acquire()
self.leader_id_lock_by = acquired_by
self.save_state(['leader_id_lock_by'])
if self.leader_id is not None: # Wait until a leader is elected.
msg = (header, self.server_id, self.leader_id, message)
start_new_thread(utils.send_message, (msg, server.CHANNEL_PORT))
self.leader_id_lock_by = released_by
self.save_state(['leader_id_lock_by'])
self.leader_id_lock.release()
break
self.leader_id_lock_by = released_by
self.save_state(['leader_id_lock_by'])
self.leader_id_lock.release()
def start_client_listener(self):
# Start listener for client messages.
self.sockets[0].listen(Server.MAX_CONNECTION)
while True:
connection, (ip, port) = self.sockets[0].accept()
start_new_thread(self.threaded_on_receive_client, (connection,))
def start(self):
# Start the listeners for messages and timeout watches.
# creating persistence folder
if not os.path.exists(f'server_{self.server_id}_states'):
os.makedirs(f'server_{self.server_id}_states')
# Load the state, if any.
persistent_states = ['server_state', 'leader_id', 'server_term', 'servers_operation_last_seen', 'servers_log_next_index',
'accept_indexes', 'commit_index', 'commit_watches', 'last_election_time', 'voted_candidate', 'received_votes', 'blockchain',
'first_blockchain_read', 'balance_table', 'transaction_queue', 'transaction_ids']
self.load_state(persistent_states)
if not self.first_blockchain_read:
with open('first_blockchain_processed.pkl', 'rb') as _fb:
self.blockchain = pickle.load(_fb)
self.commit_index = len(self.blockchain) - 1
self.first_blockchain_read = True
self.balance_table = self.get_estimate_balance_table(from_scratch=True)
self.servers_log_next_index = [len(self.blockchain), len(self.blockchain), len(self.blockchain)]
self.accept_indexes = [self.commit_index, self.commit_index, self.commit_index]
self.save_state(persistent_states)
# Start/Resume operations based on the server state.
threads = [(self.start_client_listener, ()), (self.start_vote_listener, ()), (self.start_operation_listener, ())]
if self.server_state in ('Follower', 'Candidate'):
threads.append((self.threaded_leader_election_watch, ()))
else: # Leader.
threads.append((self.threaded_send_heartbeat, ()))
threads.append((self.threaded_proof_of_work, ()))
for commit_watch in self.commit_watches:
threads.append((self.threaded_commit_watch, (commit_watch,)))
for receiver in self.other_servers:
threads.append((self.start_threaded_response_watch, (receiver,)))
for (thread, args) in threads:
start_new_thread(thread, args)
print(f'Starts as {self.server_state} for Term: {self.server_term}')
self.logger.info(f'Starts as {self.server_state}! Term: {self.server_term}')
while 1:
pass
if __name__ == '__main__':
server = Server()
server.start()
| zexihuang/raft-blockchain | server.py | server.py | py | 65,440 | python | en | code | 4 | github-code | 13 |
25375789729 | #!/usr/bin/env python
# coding: utf-8
import os
import sys
import numpy as np
class Target:
def __init__(self,pathToTargetFile):
self.targetStateData = np.loadtxt(pathToTargetFile)
(self.numberOfPoints,self.sizeOfState) = np.shape(self.targetStateData)
self.time = self.targetStateData[:,0]
self.East = self.targetStateData[:,1]
self.North = self.targetStateData[:,2]
self.Up = self.targetStateData[:,3]
self.VEast = self.targetStateData[:,4]
self.VNorth = self.targetStateData[:,5]
self.VUp = self.targetStateData[:,6]
self.AEast = self.targetStateData[:,7]
self.ANorth = self.targetStateData[:,8]
self.AUp = self.targetStateData[:,9]
def GetState(self,time):
state = np.zeros((self.sizeOfState,))
for index in np.arange(self.sizeOfState):
state[index] = np.interp(time, self.time, self.targetStateData[:,index])
return np.append(np.array([time]),state)
def IsTargetInBeam(self, boresightAzimuth,boresightElevation, diffractionAngleBeamAz, diffractionAngleBeamEl, targetPositionENU, debug):
# From class and notes, the strat is to use cos(angle) = a/|a| (dot) b/|b|
# convert target position to
targetPosNorm = np.linalg.norm(targetPositionENU)
targetPosNormENUVector = targetPositionENU/targetPosNorm
if debug:
print("This should be 1.0: {}".format(np.linalg.norm(targetPosNormENUVector)))
# forming the boresigth unit vector
eastBoresight = 1.0 * np.sin(boresightAzimuth) * np.cos(boresightElevation)
northBoresight = 1.0 * np.cos(boresightAzimuth) * np.cos(boresightElevation)
upBoresight = 1.0 * np.sin(boresightElevation)
boresightVector = np.array([eastBoresight,northBoresight,upBoresight])
if debug:
print("This should be 1.0: {}".format(np.linalg.norm(boresightVector)))
print("Boresight East: {}, Boresight North {}, Boresigth Up {}".format(boresightVector[0],boresightVector[1],boresightVector[2]))
# calulate the angle
theta = np.arccos(np.dot(targetPosNormENUVector,boresightVector))
if debug:
print("Theta is: {}".format(theta))
# compare to beam angle
beamAngle = np.min(np.array([diffractionAngleBeamAz,diffractionAngleBeamEl]))
if debug:
print("Half of beamangle is: {}".format(beamAngle/2.0))
if (theta < beamAngle/2.0):
isInBeam = True
else:
isInBeam = False
return isInBeam
def IsTargetInRange(self,PositionENU, PulseDuration, PRI):
if (Detection.TargetRange < Detection.Rmax and Detection.TargetRange > Detection.Rmin):
isInRange = True
else:
isInRange = False
return isInRange
| bishopaudrey/radar-modeling | target_model.py | target_model.py | py | 3,028 | python | en | code | 0 | github-code | 13 |
34908727216 | # -*- coding:utf-8 -*-
# @Time : 2019/8/22 13:18
# @Author : Junwu Yu
'''
给定一个未排序的整数数组,找出最长连续序列的长度。
要求算法的时间复杂度为 O(n)。
示例:
输入: [100, 4, 200, 1, 3, 2]
输出: 4
解释: 最长连续序列是 [1, 2, 3, 4]。它的长度为 4。
'''
class Solution:
def longestConsecutive(self, nums) -> int:
# 位图法,memorry error
if len(nums) == 0:
return 0
l = [-1] * (max(nums) + 1)
max_count = 0
temp_count = 0
for i in range(len(nums)):
l[nums[i]] = nums[i]
for j in range(len(l)):
if l[j] == j:
temp_count += 1
else:
temp_count = 0
if temp_count > max_count:
max_count = temp_count
return max_count
def longestConsecutive_2(self, nums) -> int:
# 哈希法
# 用dict记录当前值所在子序列的长度
hash_dict = dict()
max_length = 0
for num in nums:
if num not in hash_dict:
# dict.get() 如果指定键的值不存在时,返回该默认值
left = hash_dict.get(num - 1, 0)
right = hash_dict.get(num + 1, 0)
curr_length = 1 + left + right
if curr_length > max_length:
max_length = curr_length
# num, num-left, num+right 在同一个序列,所以它们的值相同
hash_dict[num] = curr_length
hash_dict[num - left] = curr_length
hash_dict[num + right] = curr_length
return max_length
s = Solution()
nums = [100, 4, 200, 1, 3, 2, 101, 99, 97, 96, 98]
print(s.longestConsecutive_2(nums))
| Yujunw/leetcode_python | 128_最长连续序列.py | 128_最长连续序列.py | py | 1,798 | python | en | code | 0 | github-code | 13 |
16610723237 |
from .dataset import load_xview_metadata, read_labels_file
import json
train_data, val_data, _ = load_xview_metadata("/home/c3-0/rohitg/xviewdata/xview/", data_version="v2/", use_tier3=True)
# print(val_data.keys())
val_loc_split = open("loc_val.txt", "w")
train_loc_split = open("loc_train.txt", "w")
val_cls_split = open("cls_val.csv", "w")
train_cls_split = open("cls_train.csv", "w")
print("id,uuid,labels", file=val_cls_split, flush=True)
print("id,uuid,labels", file=train_cls_split, flush=True)
label_mapping_dict = {"no-damage": 0,
"minor-damage": 1,
"major-damage": 2,
"destroyed": 3}
i = 0
for scene_id, scene_data in val_data.items():
building_polys = read_labels_file(scene_data["post_label_file"])
if len(building_polys) != 0:
print(scene_data["pre_image_file"].split("/")[-1], file=val_loc_split, flush=True)
for building_poly in building_polys:
uuid = building_poly['properties']['uid']
damage_type = building_poly['properties']['subtype']
if damage_type != "un-classified":
print(i, uuid, label_mapping_dict[damage_type], sep=",", file=val_cls_split, flush=True)
i += 1
i = 0
for scene_id, scene_data in train_data.items():
building_polys = read_labels_file(scene_data["post_label_file"])
if len(building_polys) != 0:
print(scene_data["pre_image_file"].split("/")[-1], file=train_loc_split, flush=True)
for building_poly in building_polys:
uuid = building_poly['properties']['uid']
damage_type = building_poly['properties']['subtype']
if damage_type != "un-classified":
print(i, uuid, label_mapping_dict[damage_type], sep=",", file=train_cls_split, flush=True)
i += 1
| rohit-gupta/building-damage-assessment | utils/baseline.py | baseline.py | py | 1,869 | python | en | code | 0 | github-code | 13 |
28955367165 | from .test_base import TestBase
from b2.parse_args import parse_arg_list
class TestParseArgs(TestBase):
NO_ARGS = {
'option_flags': [],
'option_args': [],
'list_args': [],
'optional_before': [],
'required': [],
'optional': [],
'arg_parser': {}
}
EVERYTHING = {
'option_flags': ['optionFlag'],
'option_args': ['optionArg'],
'list_args': ['list'],
'optional_before': [],
'required': ['required'],
'optional': ['optional'],
'arg_parser': {
'optionArg': int
}
}
BEFORE_AND_AFTER = {
'option_flags': [],
'option_args': [],
'list_args': [],
'optional_before': ['optionalBefore'],
'required': ['required'],
'optional': ['optional'],
'arg_parser': {}
}
def test_no_args(self):
args = parse_arg_list([], **self.NO_ARGS)
self.assertTrue(args is not None)
def test_unexpected_flag(self):
args = parse_arg_list(['--badFlag'], **self.NO_ARGS)
self.assertTrue(args is None)
def test_unexpected_arg(self):
args = parse_arg_list(['badArg'], **self.NO_ARGS)
self.assertTrue(args is None)
def test_option_defaults(self):
args = parse_arg_list(['req-value'], **self.EVERYTHING)
self.assertFalse(args.optionFlag)
self.assertTrue(args.optionArg is None)
self.assertEqual([], args.list)
self.assertEqual('req-value', args.required)
self.assertTrue(args.optional is None)
def test_all_there(self):
args = parse_arg_list(
['--optionFlag', '--optionArg', '99', '--list', '1', '--list', '2', 'b', 'c'],
**self.EVERYTHING
) # yapf disable
self.assertTrue(args.optionFlag)
self.assertEqual(99, args.optionArg)
self.assertEqual('b', args.required)
self.assertEqual(['1', '2'], args.list)
self.assertEqual('c', args.optional)
def test_optional_arg_missing_value(self):
args = parse_arg_list(['--optionArg'], **self.EVERYTHING)
self.assertTrue(args is None)
def test_no_optional(self):
args = parse_arg_list(['a'], **self.BEFORE_AND_AFTER)
self.assertEqual((None, 'a', None), (args.optionalBefore, args.required, args.optional))
def test_optional_before(self):
args = parse_arg_list(['a', 'b'], **self.BEFORE_AND_AFTER)
self.assertEqual(('a', 'b', None), (args.optionalBefore, args.required, args.optional))
def test_same_arg_in_two_places(self):
arg_spec = dict(self.NO_ARGS)
arg_spec['optional_before'] = ['a']
arg_spec['required'] = ['a']
with self.assertRaisesRegexp(
ValueError, "argument 'a' is in both 'optional_before' an 'required'"
):
parse_arg_list([], **arg_spec)
| jhill69/Hello-World | test/test_parse_args.py | test_parse_args.py | py | 2,896 | python | en | code | 0 | github-code | 13 |
14800654413 | import pygame
from pygame.locals import *
import random
class Comida:
'Clase para comida'
x = 0 # Posicion x, y
y = 0
i = 0
frutas = {}
def __init__(self, x, y):
# Se cargan las imagenes de las distinas comidas
self.imgManzana = pygame.image.load("imagenes/comida/manzana.png")
self.imgUvas = pygame.image.load("imagenes/comida/uvas.png")
self.imgFrutilla = pygame.image.load("imagenes/comida/frutilla.png")
self.imgNaranja = pygame.image.load("imagenes/comida/naranja.png")
self.imgBanana = pygame.image.load("imagenes/comida/banana.png")
# Coordenadas de la comida
self.x = x * 44
self.y = y * 44
self.i = 0
'''Diccionario de las comidas, que a su vez contiene un diccionario
con la imagen y el puntaje asignado'''
punto_fruta = random.randint(-500, 500)
self.frutas = {
0: {'img': self.imgManzana, 'puntaje': punto_fruta},
1: {'img': self.imgUvas, 'puntaje': punto_fruta},
2: {'img': self.imgFrutilla, 'puntaje': punto_fruta},
3: {'img': self.imgNaranja, 'puntaje': punto_fruta},
4: {'img': self.imgBanana, 'puntaje': punto_fruta}
}
def dibujar(self, superficie):
superficie.blit(self.frutas[self.i]['img'], (self.x, self.y))
| anakloss/snake_game | Snake/clases/Comida.py | Comida.py | py | 1,353 | python | es | code | 2 | github-code | 13 |
23248973322 | from typing import final
from flask import Flask, render_template, Markup
from flask_table import Table, Col
import os
# Kevin's files
from Kevin import *
from Kevin.backtest import *
# Wenlei's files
from Wenlei.TRIMA_wenlei_cao_get_backtrade_result import *
# Jackie's files
from Jackie.Turner_TradingSystemWithBacktrader import *
# Jordan's files
# Anchit's files
# Expectunity Calculation
from expectunityCalc import *
class ResultsTable(Table):
stock = Col("Stock")
initialValue = Col("Initial Value")
finalValue = Col("Final Value")
PnL = Col("Profit / Loss")
pct = Col("Percentage Change")
ex = Col("Expectunity")
class Result(object):
def __init__(self, stock, initialValue, finalValue, PnL, pct, ex):
self.stock = stock
self.initialValue = initialValue
self.finalValue = finalValue
self.PnL = PnL
self.pct = pct
self.ex = ex
class PortfolioTable(Table):
stock = Col("Stock")
initialValue = Col("Initial Value")
finalValue = Col("Final Value")
PnL = Col("Profit / Loss")
pct = Col("Percentage Change")
avgwins = Col("Avg Wins")
avglosses = Col("Avg Losses")
wins = Col("Wins")
losses = Col("Losses")
totaltrades = Col("Total Trades")
ex = Col("Expectunity")
class Portfolio(object):
def __init__(self, stock, initialValue, finalValue, PnL, pct, avgwins, avglosses, wins, losses, totaltrades, ex):
self.stock = stock
self.initialValue = initialValue
self.finalValue = finalValue
self.PnL = PnL
self.pct = pct
self.avgwins = avgwins
self.avglosses = avglosses
self.wins = wins
self.losses = losses
self.totaltrades = totaltrades
self.ex = ex
app = Flask(__name__)
@app.route("/")
def homepage():
return render_template("index.html")
@app.route("/results")
def results():
if os.path.exists("result.csv"):
os.remove("result.csv")
else:
print("The file does not exist")
r = []
backtest()
expectunityKevin = combined_expectunity_calcuation(
"/Users/kevinmartin/Documents/Fall '20/GQP/Trading System/result.csv", 1000)
main()
expectunityWenlei = combined_expectunity_calcuation(
"/Users/kevinmartin/Documents/Fall '20/GQP/Trading System/result.csv", 100)
r.append(Result("Index Funds", 10000, 10986,
986, 986/10000, 3.85))
jackie()
r.append(Result("Software Stocks", 10000,
11066.16, 1066.16, 0.1066, 13.55))
r.append(Result("Electronics Stocks", 10000, 11136,
1136, 0.0378, 10.05))
r.append(Result("Biotech Stocks", 10000, 12446.37,
2446.37, 2446.37/10000, 11.15))
r.append(Result("Aerospace Stocks", 10000, 13125,
3125, 3125/10000, 13.01))
p = []
p.append(Portfolio("Total Portfolio", 50000, 13125+11066.16+11483+10986 +
11136, ((13125+11066.16+11483+10986+11136)-50000).__round__, 0.15592, 22.93, 12.18, 511, 319, 511+319, (10.5 + 3.85 + 13.55 + 13.008 + 13.01)/5))
resultsTable = ResultsTable(r)
portfolioTable = PortfolioTable(p)
return render_template("results.html", expectunityKevin=Markup(expectunityKevin),
expectunityWenlei=Markup(expectunityWenlei),
resultsTable=Markup(resultsTable.__html__()),
portfolioTable=Markup(portfolioTable.__html__()))
if __name__ == '__main__':
app.run(debug=True)
| kmart8/Trading-System | app.py | app.py | py | 3,557 | python | en | code | 0 | github-code | 13 |
2244791689 | """
Overview
========
This plugin implements the basic cursor movements.
Key-Commands
============
Namespace: main-jumps
Mode: NORMAL
Event: <Key-j>
Description: Move the cursor one line down.
Mode: NORMAL
Event: <Key-k>
Description: Move the cursor one line up.
Mode: NORMAL
Event: <Key-h>
Description: Move the cursor one character left.
Mode: NORMAL
Event: <Key-l>
Description: Move the cursor one character right.
"""
def install(area):
area.install('main-jumps', ('NORMAL', '<Key-j>', lambda event: event.widget.down()),
('NORMAL', '<Key-k>', lambda event: event.widget.up()),
('NORMAL', '<Key-h>', lambda event: event.widget.left()),
('NORMAL', '<Key-l>', lambda event: event.widget.right()))
| vyapp/vy | vyapp/plugins/main_jumps.py | main_jumps.py | py | 779 | python | en | code | 1,145 | github-code | 13 |
29118297963 | import cv2
import numpy as np
from utils import FileController
def main():
has_capture = input('Capture from camera? [y/n]: ') == 'y'
device_id = 0
selected_video = None if has_capture else FileController.get_file(['mp4'], './video_in')[0]
capture_from = device_id if has_capture else selected_video
video_capture = cv2.VideoCapture(capture_from)
has_save_first_frame = input('Save a first frame? [y/n]: ') == 'y'
# params for ShiTomasi corner detection
feature_params = dict(maxCorners=1000, qualityLevel=0.1, minDistance=10, blockSize=7)
color = np.random.randint(0, 255, (100, 3))
# take first frame and find corners in it
retval, frame = video_capture.read()
frame_gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
corners = cv2.goodFeaturesToTrack(frame_gray, mask=None, **feature_params)
corners = np.int0(corners)
retval, frame = video_capture.read()
cv2.namedWindow('frame', cv2.WINDOW_KEEPRATIO)
for corner in corners:
i, j = corner.ravel()
cv2.circle(frame, (i, j), 5, (200, 0, 0), -1)
cv2.imshow('frame', frame)
cv2.waitKey(0)
cv2.destroyAllWindows()
if has_save_first_frame:
cv2.imwrite(f'./image_out/detect_gftt.png', frame_gray)
video_capture.release()
if __name__ == '__main__':
main()
| mktia/plant_reservoir | detect_corners_by_gftt.py | detect_corners_by_gftt.py | py | 1,330 | python | en | code | 0 | github-code | 13 |
10896840331 | def min_keystrokes(S):
# Initialize the displayed number and the keystroke count to 0
num = 0
count = 0
# Iterate through the digits of S
while S > 0:
# Get the most significant digit
digit = S // 10
# Check if the digit is 0 or 00
if digit == 0:
# Check if the displayed number is divisible by 100
if num % 100 == 0:
# Increment the keystroke count and update the displayed number
count += 1
num *= 100
else:
# Increment the keystroke count and update the displayed number
count += 2
num = num * 10 + digit
else:
# Increment the keystroke count and update the displayed number
count += 1
num = num * 10 + digit
# Update S to remove the most significant digit
S = S % 10
return count
# Test the function
S = int(input())
print(min_keystrokes(S)) # Output: 4
| Yeansovanvathana/Vathana_Python | own project/vathana.py | vathana.py | py | 1,008 | python | en | code | 0 | github-code | 13 |
5328634672 | # -*- coding:utf8 -*-
"""
Created on 2020/3/12 17:43
@author: minc
# 接口
"""
import numpy as np
import pandas as pd
from sklearn.model_selection import train_test_split
from sklearn import metrics
from sklearn.externals import joblib
from sklearn.linear_model import LinearRegression
from sklearn.linear_model import LogisticRegression
from sklearn.naive_bayes import MultinomialNB
from sklearn.neighbors import KNeighborsClassifier
from sklearn.ensemble import RandomForestClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.model_selection import GridSearchCV
from sklearn.neural_network import MLPClassifier
from sklearn.neural_network import MLPRegressor
# 数据切割
def ai_data_cut(df,xlst,ysgn,path,fgPr=False):
x,y = df[xlst],df[ysgn]
x_train,x_test,y_train,y_test = train_test_split(x,y,random_state=1)
fss = path+'xtrain.csv';x_train.to_csv(fss,index=False);
fss = path+'xtest.csv';x_test.to_csv(fss,index=False);
fss = path+'ytrain.csv';y_train.to_csv(fss,index=False,header=True);
fss = path+'ytest.csv';y_test.to_csv(fss,index=False,header=True);
if fgPr:
print(x_train.tail())
print('-------------')
print(y_train.tail())
print('-------------')
print(x_test.tail())
print('-------------')
print(y_test.tail())
# 读取数据
def ai_dat_rd(path,k0=1,fgPr=False):
fss = path+'xtrain.csv';x_train=pd.read_csv(fss,index_col=False);
fss = path+'xtest.csv';x_test=pd.read_csv(fss,index_col=False);
fss = path+'ytrain.csv';y_train=pd.read_csv(fss,index_col=False);
fss = path+'ytest.csv';y_test=pd.read_csv(fss,index_col=False);
ysgn = y_train.columns[0]
y_train[ysgn] = round(y_train[ysgn]*k0).astype(int)
y_test[ysgn] = round(y_test[ysgn]*k0).astype(int)
if fgPr:
print(x_train.tail())
print('-------------')
print(y_train.tail())
print('-------------')
print(x_test.tail())
print('-------------')
print(y_test.tail())
return x_train,x_test,y_train,y_test
# 效果评估函数
def ai_acc_xed(df9,ky0=5,fgDebug=False):
'''
df9,pandas的DatFrame格式,结果数据保存变量
ky0,结果数据误差k值,默认5,表示5%;整数模式设置为1
fgDebug,调试模式变量,默认是False
'''
ny_test,ny_pred = len(df9['y_test']),len(df9['y_pred'])
df9['ysub'] = df9['y_test'] - df9['y_pred']
df9['ysub2'] = np.abs(df9['ysub'])
df9['y_test_div'] = df9['y_test']
df9.loc[df9['y_test'] == 0, 'y_test_div']=0.00001
df9['ysubk'] = (df9['ysub2']/df9['y_test_div'])*100
dfk = df9[df9['ysubk']<ky0]
dsum = len(dfk['y_pred'])
dacc = dsum/ny_test*100
if fgDebug:
print(df9.head())
y_test,y_pred = df9['y_test'],df9['y_pred']
# 平均绝对误差
dmae = metrics.mean_absolute_error(y_test,y_pred)
# 均方差
dmse = metrics.mean_squared_error(y_test,y_pred)
# 均方根
drmse = np.sqrt(metrics.mean_squared_error(y_test,y_pred))
print('acc-kok:{0:.2f}%,MAE:{1:.2f},MSE:{2:.2f},RMSE:{3:.2f}'.format(dacc,dmae,dmse,drmse))
return dacc
#------------机器学习函数------------
# 线性回归
def mx_line(train_x,train_y):
mx = LinearRegression()
mx.fit(train_x,train_y)
return mx
# 逻辑回归算法
def mx_log(train_x,train_y):
mx = LogisticRegression(penalty='l2')
mx.fit(train_x,train_y)
return mx
# 朴素贝叶斯算法
def mx_bayes(train_x,train_y):
mx = MultinomialNB(alpha=0.01)
mx.fit(train_x,train_y)
return mx
# KNN邻近算法
def mx_knn(train_x,train_y):
mx = KNeighborsClassifier()
mx.fit(train_x,train_y)
return mx
# 随机森林算法
def mx_forest(train_x,train_y):
mx = RandomForestClassifier(n_estimators=8)
mx.fit(train_x,train_y)
return mx
# 决策树算法
def mx_dtree(train_x,train_y):
mx = DecisionTreeClassifier()
mx.fit(train_x,train_y)
return mx
# GBDT迭代决策树算法
def mx_GBDT(train_x,train_y):
mx = GradientBoostingClassifier(n_estimators=200)
mx.fit(train_x,train_y)
return mx
# SVM向量机算法
def mx_SVM(train_x,train_y):
mx = SVC(kernel='rbf',probability=True)
mx.fit(train_x,train_y)
return mx
# SVM-cross项链及交叉算法
def mx_svm_cross(train_x,train_y):
mx = SVC(kernel='rbf',probability=True)
param_grid = {'C':[1e-3,1e-2,1e-1,1,10,100,1000],'gamma':[0.001,0.0001]}
grid_search = GridSearchCV(mx,param_grid,n_jobs=1,verbose=1)
grid_search.fit(train_x,train_y)
best_parameters = grid_search.best_estimator_.get_params()
mx = SVC(kernel='rbf',C=best_parameters['C'],gamma=best_parameters['gamma'],probability=True)
mx.fit(train_x,train_y)
return mx
# 神经网络算法
def mx_MLP(train_x,train_y):
#mx = MLPClassifier(solver='lbfgs',alpha=1e-5,hidden_layer_sizes=(5,2),random_state=1)
mx = MLPClassifier()
mx.fit(train_x,train_y)
return mx
# MLP_reg神经网络回归算法
def mx_MLP_reg(train_x,train_y):
#mx = MLPClassifier(solver='lbfgs',alpha=1e-5,hidden_layer_sizes=(5,2),random_state=1)
mx = MLPRegressor()
mx.fit(train_x,train_y)
return mx
#------------机器学习函数------------
# 函数调用名称
mxfunLst = ['line','log','bayes','knn','forest','dtree','gbdt','svm','svmcr','mlp','mlpreg']
mxfunSgn = {'line':mx_line,
'log':mx_log,
'bayes':mx_bayes,
'knn':mx_knn,
'forest':mx_forest,
'dtree':mx_dtree,
'gbdt':mx_GBDT,
'svm':mx_SVM,
'svmcr':mx_svm_cross,
'mlp':mx_MLP,
'mlpreg':mx_MLP_reg}
# 统一接口函数
def mx_fun010(funSgn,x_train,x_test,y_train,y_test,yk0=5,fgInt=False,fgDebug=False):
df9 = x_test.copy()
mx_fun = mxfunSgn[funSgn]
mx = mx_fun(x_train.values,y_train.values)
y_pred = mx.predict(x_test.values)
df9['y_test'],df9['y_pred'] = y_test,y_pred
if fgInt:
df9['y_predsr'] = y_pred
df9['y_pred'] = round(df9['y_predsr']).astype(int)
#print(df9)
#print('123')
dacc = ai_acc_xed(df9,yk0,fgDebug)
if fgDebug:
df9.to_csv('tmp/pred_result.csv')
print('@mx:mx_sum,kok:{0:.2f}'.format(dacc))
return dacc,df9
# 批量调用接口
def mx_funlst(funlst,x_train,x_test,y_train,y_test,yk0=5,fgInt=False):
for funSgn in funlst:
print('function---',funSgn)
mx_fun010(funSgn,x_train,x_test,y_train,y_test,yk0,fgInt)
# 一体化调用接口,对多个函数的封装
def mx_fun_call(df,xlst,ysgn,funSgn,yksiz=1,yk0=5,fgInt=True,fgDebug=False):
'''
:param df: 数据源,pandas.DataFrame格式
:param xlst: 参数数据集字段名
:param ysgn: 结果数据集字段名
:param funSgn: 调用的函数名
:param yksiz: 结果数据缩放比例
:param yk0: 结果数据误差值
:param fgInt: 整数结果模式变量
:param fgDebug: 调试模式变量
:return:
'''
df[ysgn] = df[ysgn].astype(float)
df[ysgn] = round(df[ysgn]*yksiz).astype(int)
x,y = df[xlst],df[ysgn]
x_train, x_test, y_train, y_test = train_test_split(x, y, random_state=1)
# num_train,num_feat = x_train.shape
# num_test,num_feat = x_test.shape
# 预测
df9 = x_test.copy()
mx_fun = mxfunSgn[funSgn]
mx = mx_fun(x_train.values,y_train.values)
y_pred = mx.predict(x_test.values)
df9['y_test'],df9['y_pred'] = y_test,y_pred
if fgInt:
df9['y_predsr'] = y_pred
df9['y_pred'] = round(df9['y_predsr']).astype(int)
dacc = ai_acc_xed(df9,yk0,fgDebug)
if fgDebug:
df9.to_csv('tmp/pred_result.csv')
return dacc,df9
# 模型保存函数
def ai_f_mxWr(ftg,funSgn,x_train,y_train):
mx_fun = mxfunSgn[funSgn]
mx = mx_fun(x_train.values,y_train.values)
joblib.dump(mx,ftg)
# 模型预测函数
def mx_fun8mx(mx,x_test,y_test,yk0=5,fgInt=True,fgDebug=False):
df9 = x_test.copy()
y_pred = mx.predict(x_test.values)
df9['y_test'],df9['y_pred'] = y_test,y_pred
if fgInt:
df9['y_predsr'] = y_pred
df9['y_pred'] = round(df9['y_predsr']).astype(int)
dacc = ai_acc_xed(df9, yk0, fgDebug)
if fgDebug:
df9.to_csv('tmp/pred_result.csv')
return dacc, df9
# 批量存储算法函数
def ai_f_mxWrlst(ftg0,mxlst,x_train,y_train):
for funSgn in mxlst:
ftg = ftg0+funSgn+'.pkl'
print(ftg)
ai_f_mxWr(ftg, funSgn, x_train, y_train)
# 读取单个文件
def ai_f_datRd010(fsr,k0=0,fgPr=False):
df = pd.read_csv(fsr,index_col = False)
if k0>0:
ysgn = df.columns[0]
df[ysgn] = round(df[ysgn]*k0).astype(int)
if fgPr:
print(df.tail())
return df
xmodel = {}
# 批量加载算法函数
def ai_f_mxRdlst(fsr0,funlst):
for funSgn in funlst:
fss = fsr0 + funSgn + '.pkl'
xmodel[funSgn] = joblib.load(fss)
# 批量调用模型
def mx_funlst8mx(mxlst,x_test,y_test,yk0=5,fgInt=False):
for msgn in mxlst:
mx = xmodel[msgn]
dacc,df9 = mx_fun8mx(mx,x_test, y_test,yk0,fgInt)
print(msgn,dacc)
def mx_mul(mlst,x_test,y_test,yk0=5,fgInt=False,fgDebug=False):
df9,xc,mxn9 = x_test.copy(),0,len(mlst)
df9['y_test'] = y_test
for msgn in mlst:
xc+=1
mx = xmodel[msgn]
y_pred = mx.predict(x_test.values)
if xc==1:
df9['y_sum'] = y_pred
else:
df9['y_sum'] = df9['y_sum'] + y_pred
df9['y_pred'] = y_pred
dacc = ai_acc_xed(df9,1,fgDebug)
xss='y_pred{0:02},kok:{1:.2f}%'.format(xc,dacc)
ysgn = 'y_pred' + str(xc)
df9[ysgn] = y_pred
df9['y_pred'] = df9['y_sum']/mxn9
dacc = ai_acc_xed(df9,yk0,fgDebug)
if fgDebug:
df9.to_csv('tmp/pred_result.csv')
return dacc, df9
if __name__ == '__main__':
fsr0 = 'tmp/ccpp_'
x_train, x_test, y_train, y_test = ai_dat_rd(fsr0)
funSgn = 'line'
dacc, df9 = mx_fun010(funSgn, x_train, x_test, y_train, y_test, 5, False, True) | wminc/Machine_Learning | scikit-learn/api.py | api.py | py | 10,177 | python | en | code | 0 | github-code | 13 |
21059517764 | import pandas as pd
from reed_solomon_code.ReedSolomonCode import ReedSolomonCode
def rs_stat_test(solomon, poly_errors, parity_errors):
arr = []
"""
:type solomon: ReedSolomonCode
"""
correct_probes = 0
decoded_but_good = 0
count = 1000
i = 0
while i < count:
rand_message = ReedSolomonCode.generate_random_message(solomon.k, solomon.m)
if rand_message in arr:
continue
arr.append(rand_message)
message = ReedSolomonCode.array_to_binary(rand_message, solomon.m)
encoded_message = solomon.encode_number(message)
mesage_with_errors = solomon.add_errors_string(poly_errors, encoded_message, is_parity=False)
mesage_with_errors = solomon.add_errors_string(parity_errors, mesage_with_errors, is_parity=True)
try:
decoded_message = solomon.simple_mochnacki_decoder(mesage_with_errors)
if solomon.add_missing_zeros(decoded_message, encoding=False) == solomon.add_missing_zeros(encoded_message,
encoding=False):
correct_probes += 1
i += 1
except:
i += 1
continue
return round((correct_probes / count) * 100, 2)
solomon = ReedSolomonCode(4, 3)
arr = []
for i in range(5):
for j in range(5):
arr.append([i, j, rs_stat_test(solomon, i, j), round((((i + j) / 15) * 100), 2)])
frame = pd.DataFrame(data=arr, columns=['błędy w wiadomości', 'błędy w części kontrolnej', 'sukces dekodowania [%]',
'stopień zepsucia wiadomości [%]'])
frame.to_csv('../csv/mochnacki.csv', sep=';')
| Mazako/NIDUC_projekt | tests/mochnacki.py | mochnacki.py | py | 1,736 | python | en | code | 0 | github-code | 13 |
25756279389 | import pathlib
import lib
data_path = pathlib.Path('data')
db = data_path.joinpath('db.sqlite')
cn = lib.get_sqlite_conn(db)
holder = lib.get_holder(cn)[0]
print('{} has the token for {}'.format(holder[0], holder[2]))
| sesquivel312/tos | tos/experiment.py | experiment.py | py | 239 | python | en | code | 0 | github-code | 13 |
18234816113 | import pandas as pd
import numpy as np
import plotly.express as px
import streamlit as st
def get_overview(df):
info_name = ["Total Number of Projects", "Total Transaction Amount(current usd)"]
info_value = [df.shape[0], df["usd_current"].sum()]
df_overview = pd.DataFrame({"name": info_name, "value": info_value})
return df_overview
def table(df, col):
abs_count = df[col].value_counts()
rel_count = np.round(abs_count.values / df.shape[0], 3)
return pd.DataFrame({col: abs_count.index.values,
'count': abs_count.values,
'percentage': rel_count})
def group_select_box(key):
return st.selectbox('How would you like to break down the project',
('flow_class', 'flow', 'crs_sector_name', 'status'), key=key)
def total_transaction_amount_plot(df, select):
df_trans = df.groupby(select).sum()['usd_current'].reset_index()
df_trans['percentage'] = np.round(df_trans["usd_current"] / (df[["usd_current"]].sum().values.item()), 3)
df_trans.sort_values("usd_current", ascending=False, inplace=True)
fig_trans_group = px.bar(df_trans, x=select, y="usd_current",
hover_data=[select, 'usd_current', 'percentage'],
title="Total Transaction Amount in Each Category")
return fig_trans_group
def year_slider(the_min=2000, the_max=2020, key=None):
return st.slider('', the_min, the_max, value=[the_min, the_max], key=key)
def year_filter(df_input, year_range, include_year_undef=False):
df = df_input.copy()
df['year'] = df['year'].fillna(0)
year_min = year_range[0]
year_max = year_range[1]
df = df[(df['year'] >= year_min) & (df['year'] <= year_max) | (df['year'] == 0)]
if include_year_undef:
df = df.query('year != 0')
return df
def seg_type_widget(key):
seg_type = st.selectbox("What variables do you want to plot separately?",
("None", 'crs_sector_name', 'flow_class', 'status'),
key=key)
return seg_type
def graph_type_widget(key):
graph_type = st.radio("What type of graph do you want to plot", ('line', 'bar'), key=key)
return graph_type
def cat_filter(df_input,crs_select, flow_class_select):
df = df_input.copy()
df = df[(df['crs_sector_name'].isin(crs_select)) & (df['flow_class'].isin(flow_class_select))].reset_index()
return df
def plot_over_time(df, col, break_by, graph_type):
if break_by == "None":
df_time = df.groupby('year').agg(count=('project_id', 'count'),
usd_current=('usd_current', 'sum')).reset_index()
if graph_type == 'line':
fig_time = px.line(df_time, x='year', y=col, color=None)
elif graph_type == 'bar':
fig_time = px.bar(df_time, x='year', y=col)
else:
df_time = df.groupby(['year', break_by]).agg(count=('project_id', 'count'),
usd_current=('usd_current', 'sum')).reset_index()
if graph_type == 'line':
fig_time = px.line(df_time, x='year', y=col, color=break_by)
elif graph_type == 'bar':
fig_time = px.bar(df_time, x='year', y=col, color=break_by)
return fig_time
def country_profile():
st.title('Country Profile')
uploaded_file = st.file_uploader("Choose an export csv file", type="csv")
if uploaded_file is not None:
df = pd.read_csv(uploaded_file)
official_finance = st.checkbox('Only Select Official Finance')
if official_finance:
df = df.query('is_official_finance == 1').reset_index(drop=True)
st.header("Overview")
df_overview = get_overview(df)
st.dataframe(df_overview)
st.header("3 Most Important Projects")
df_most_value = df.sort_values('usd_current', ascending=False, ignore_index=True)[
['project_id', 'title', 'usd_current']].head(3)
st.table(df_most_value)
st.header("Information By Category")
st.subheader("Filter")
st.markdown("#### By Year")
year_range = year_slider()
include_year_undefine = st.checkbox('Include Year Undefined')
df_select = year_filter(df, year_range, include_year_undefine)
st.subheader("Number of Projects")
group_option_1 = group_select_box("first time")
df_count_group = table(df_select, group_option_1)
fig_count_group = px.bar(df_count_group, x=group_option_1, y="count",
hover_data=[group_option_1, 'count', 'percentage'],
title="Number of Projects in Each Category")
st.plotly_chart(fig_count_group)
st.subheader("Transaction Amount")
group_option_2 = group_select_box("second time")
fig_trans_group = total_transaction_amount_plot(df_select, group_option_2)
st.plotly_chart(fig_trans_group)
st.header("Information Overtime")
st.text("Note: projects with undefined year are excluded.")
st.subheader('Filter')
st.markdown('#### CRS Sector')
crs_select = st.multiselect("", options=list(df['crs_sector_name'].unique()),
default=list(df['crs_sector_name'].unique()))
st.markdown('#### flow class')
flow_class_select = st.multiselect("", options=list(df['flow_class'].unique()),
default=list(df['flow_class'].unique()))
df_cat_select = cat_filter(df,crs_select, flow_class_select)
st.subheader("Number of Projects")
seg_type_1 = seg_type_widget(key='first')
graph_type_1 = graph_type_widget(key='first')
fig_count_time = plot_over_time(df_cat_select, 'count', seg_type_1, graph_type_1)
fig_count_time.update_layout(title="Number of Projects Over Time")
st.plotly_chart(fig_count_time)
st.subheader("Transaction Amount")
seg_type_2 = seg_type_widget(key='second')
graph_type_2 = graph_type_widget(key='second')
fig_count_usd = plot_over_time(df_cat_select, 'usd_current', seg_type_2, graph_type_2)
fig_count_usd.update_layout(title="Transaction Amount Over Time")
st.plotly_chart(fig_count_usd)
| wpan03/tdf_app | country_profile.py | country_profile.py | py | 6,337 | python | en | code | 0 | github-code | 13 |
13030276745 | def solution():
n = int(input())
day = [[0,0] for _ in range(n)]
dp = [0 for _ in range(n+1)]
for i in range(n):
time, money = [int(v) for v in input().split(' ')]
day[i][0] = time
day[i][1] = money
for i in range(n):
dp[i + 1] = max(dp[i + 1], dp[i])
if i + day[i][0] > n:
continue
dp[i + day[i][0]] = max(dp[i + day[i][0]], day[i][1] + dp[i])
print(max(dp))
if __name__ == '__main__':
answer = solution() | chaeheejo/algorithm | baekjun/silver/leave_company.py | leave_company.py | py | 500 | python | en | code | 0 | github-code | 13 |
22467466643 | """
Реализовать класс «Дата», функция-конструктор которого должна принимать дату в виде строки формата «день-месяц-год».
В рамках класса реализовать два метода. Первый, с декоратором @classmethod,
должен извлекать число, месяц, год и преобразовывать их тип к типу «Число».
Второй, с декоратором @staticmethod, должен проводить валидацию числа, месяца и года (например, месяц — от 1 до 12).
Проверить работу полученной структуры на реальных данных.
"""
import re
from typing import Tuple
class Date:
""" Класс даты """
def __init__(self, date_str: str):
""" Инициализация
:param date_str: строковое представление даты
"""
self._date_str = date_str
@classmethod
def date_to_int(cls, date_str: str) -> Tuple[int, int, int]:
""" Конвертирует строковое предстваление даты в кортеж
:param date_str: строковое предстваление даты
:return: кортеж
"""
if not cls.is_valid_date(date_str):
raise ValueError('The date is incorrect')
day, month, year = tuple(map(int, date_str.split('-')))
return day, month, year
@staticmethod
def is_valid_date(date_str: str) -> bool:
""" Проверка валидности даты
:param date_str: строковое предстваление даты
:return: bool
"""
date_parse = re.match(r'^([1-9]|[12]\d|3[01])-([1-9]|1[0-2])-(\d{4})$', date_str)
if not date_parse:
return False
return True
if __name__ == '__main__':
assert Date.date_to_int('31-12-5232') == (31, 12, 5232), 'Error'
assert Date.date_to_int('2-11-2002') == (2, 11, 2002), 'Error'
| slavaprotogor/python_base | homeworks/lesson8/task1.py | task1.py | py | 2,153 | python | ru | code | 0 | github-code | 13 |
6204966732 | from fmc.resources.base import ResourceBase
from fmc.decorators import (
RequiredArguments,
RequiredProperties
)
class UserToGroupAddition(ResourceBase):
Type = "AWS::IAM::UserToGroupAddition"
@RequiredArguments([
"LogicalID"
])
@RequiredProperties([
"GroupName",
"Users"
])
def __init__(self, **kwargs):
super(UserToGroupAddition, self).__init__(**kwargs)
#! vim: ts=4 sw=4 ft=python expandtab:
| logikone/form_my_cloud | fmc/resources/iam/user_to_group_addition.py | user_to_group_addition.py | py | 487 | python | en | code | 0 | github-code | 13 |
10041154126 | from telnetlib import EC
import psycopg2 as pg
import pytest
from selenium.webdriver.common.by import By
from selenium import webdriver
from selenium.common import NoSuchElementException
from selenium.webdriver import Keys
from selenium.webdriver.chrome.service import Service
from selenium.webdriver.common.by import By
import time
from selenium.webdriver.support.wait import WebDriverWait
from PageObjes.dyno_pom import Loginpage
class TestLogin:
def test_Login(self):
path= Loginpage(self)
self.driver = path.get_driver()
self.driver.implicitly_wait(30)
self.driver.get(Loginpage.URL)
self.driver.find_element(By.XPATH, Loginpage.textbox_mailid_xpath).send_keys(Loginpage.email)
shadow = self.driver.find_element(By.CSS_SELECTOR, Loginpage.next)
shadow.click()
port = '5432'
host = '34.100.216.73'
user = "postgres"
password = "t3djo7b0jfd9J3JL"
database = "devdyno"
con = pg.connect(database=database, user=user, password=password, host=host, port=port)
cur = con.cursor()
QueryString = '''SELECT (payload ->>'OTP') FROM auth.nq Order by pid desc limit 1'''
time.sleep(3)
cur.execute(QueryString)
con.commit()
output1 = cur.fetchall()
a = str(output1)
b = a.replace('[(', '')
otp = b.replace(',)]', '')
self.driver.find_element(By.XPATH, "//input[@placeholder='Enter OTP']").send_keys(otp)
shadow1 = self.driver.find_element(By.CSS_SELECTOR,".next-btn.md.button.button-solid.ion-activatable.ion-focusable.hydrated")
shadow1.click()
# ******************************************************************************************
# valid otp - valid testcase
self.driver.find_element(By.XPATH, "//input[@placeholder='Enter OTP']").send_keys(otp)
time.sleep(5)
shadow1 = self.driver.find_element(By.CSS_SELECTOR, ".next-btn.md.button.button-solid.ion-activatable.ion-focusable.hydrated")
shadow1.click()
# logout
self.driver.find_element(By.CSS_SELECTOR, ".avatar").click()
time.sleep(5)
self.driver.find_element(By.XPATH, "/html[1]/body[1]/div[2]/div[2]/div[1]/div[1]/div[1]/button[2]/div[1]").click()
time.sleep(2)
# validations
logout_page = self.driver.find_element(By.CSS_SELECTOR, "input[placeholder='Email']")
print("Element Found : Focus On", logout_page.is_displayed())
print("Logout page verified:")
time.sleep(5)
if logout_page.is_displayed() == True:
assert True
else:
print("Element Not Found : Not verified", logout_page.is_displayed())
self.driver.close()
| Manjuurs1234/DynoWebTesting | Testcases/logout_page_03.py | logout_page_03.py | py | 2,765 | python | en | code | 0 | github-code | 13 |
42658291719 | import numpy as np
from main import perform_operation
def chain(img: np.ndarray, operations: list[str]) -> np.ndarray:
"""Apply multiple operations to the image in sequence
Args:
img (np.ndarray): The image to apply the operations to
operations (list[str]): The operations to apply to the image
Returns:
np.ndarray: The image after the operations have been applied
"""
for operation in operations:
if operation in ['composite', 'crop', 'chain']:
raise ValueError(f"Operation {operation} is not supported in chain mode")
else:
img = perform_operation(img, operation)
return img
| CullenStClair/img-editor | operations/chain.py | chain.py | py | 672 | python | en | code | 1 | github-code | 13 |
42617449643 | import math
import torch
import torch.nn as nn
from vidar.utils.distributed import print0, rank, dist_mode
from vidar.utils.logging import pcolor
from vidar.utils.tensor import same_shape
from vidar.utils.types import is_list
def freeze_layers(network, layers=('ALL',), flag_freeze=True):
"""
Freeze layers of a network (weights and biases)
Parameters
----------
network : nn.Module
Network to be modified
layers : List or Tuple
List of layers to freeze/unfreeze ('ALL' for everything)
flag_freeze : Bool
Whether the layers will be frozen (True) or not (False)
"""
if len(layers) > 0:
for name, parameters in network.named_parameters():
for layer in layers:
if layer in name or layer == 'ALL':
parameters.requires_grad_(not flag_freeze)
def freeze_norms(network, layers=('ALL',), flag_freeze=True):
"""
Freeze layers of a network (normalization)
Parameters
----------
network : nn.Module
Network to be modified
layers : List or Tuple
List of layers to freeze/unfreeze ('ALL' for everything)
flag_freeze : Bool
Whether the layers will be frozen (True) or not (False)
"""
if len(layers) > 0:
for name, module in network.named_modules():
for layer in layers:
if layer in name or layer == 'ALL':
if isinstance(module, nn.BatchNorm2d):
if hasattr(module, 'weight'):
module.weight.requires_grad_(not flag_freeze)
if hasattr(module, 'bias'):
module.bias.requires_grad_(not flag_freeze)
if flag_freeze:
module.eval()
else:
module.train()
def freeze_layers_and_norms(network, layers=('ALL',), flag_freeze=True):
"""Freeze layers and normalizations of a network"""
freeze_layers(network, layers, flag_freeze)
freeze_norms(network, layers, flag_freeze)
def make_val_fit(model, key, val, updated_state_dict, strict=False):
"""
Parse state dictionary to fit a model, and make tensors fit if requested
Parameters
----------
model : nn.Module
Network to be used
key : String
Which key will be used
val : torch.Tensor
Key value
updated_state_dict : Dict
Updated dictionary
strict : Bool
True if no changes are allowed, False if tensors can be changed to fit
Returns
-------
fit : Int
Number of tensors that fit the model
"""
fit = 0
val_new = model.state_dict()[key]
if same_shape(val.shape, val_new.shape):
updated_state_dict[key] = val
fit += 1
elif not strict:
for i in range(val.dim()):
if val.shape[i] != val_new.shape[i]:
if val_new.shape[i] > val.shape[i]:
ratio = math.ceil(val_new.shape[i] / val.shape[i])
val = torch.cat([val] * ratio, i)
if val.shape[i] != val_new.shape[i]:
val = val[:val_new.shape[i]]
if same_shape(val.shape, val_new.shape):
updated_state_dict[key] = val
fit += 1
elif val_new.shape[0] < val.shape[i]:
val = val[:val_new.shape[i]]
if same_shape(val.shape, val_new.shape):
updated_state_dict[key] = val
fit += 1
assert fit <= 1 # Each tensor cannot fit 2 or more times
return fit
def load_checkpoint(model, checkpoint, strict=False, verbose=False, prefix=None):
"""
Load checkpoint into a model
Parameters
----------
model : nn.Module
Input network
checkpoint : String or list[String]
Checkpoint path (if it's a list, load them in order)
strict : Bool
True if all tensors are required, False if can be partially loaded
verbose : Bool
Print information on screen
prefix : String
Prefix used to change keys
Returns
-------
model: nn.Module
Loaded network
"""
if is_list(checkpoint):
for ckpt in checkpoint:
load_checkpoint(model, ckpt, strict, verbose)
return model
font1 = {'color': 'magenta', 'attrs': ('bold', 'dark')}
font2 = {'color': 'magenta', 'attrs': ('bold',)}
if verbose:
print0(pcolor('#' * 60, **font1))
print0(pcolor('###### Loading from checkpoint: ', **font1) +
pcolor('{}'.format(checkpoint), **font2))
state_dict = torch.load(
checkpoint,
map_location='cpu' if dist_mode() == 'cpu' else 'cuda:{}'.format(rank())
)['state_dict']
updated_state_dict = {}
total, fit = len(model.state_dict()), 0
for key, val in state_dict.items():
for start in ['model.', 'module.']:
if key.startswith(start):
key = key[len(start):]
if prefix is not None:
idx = key.find(prefix)
if idx > -1:
key = key[(idx + len(prefix) + 1):]
if key in model.state_dict().keys():
fit += make_val_fit(model, key, val, updated_state_dict, strict=strict)
model.load_state_dict(updated_state_dict, strict=strict)
if verbose:
color = 'red' if fit == 0 else 'yellow' if fit < total else 'green'
print0(pcolor('###### Loaded ', **font1) + \
pcolor('{}/{}'.format(fit,total), color=color, attrs=('bold',)) + \
pcolor(' tensors', **font1))
print0(pcolor('#' * 60, **font1))
return model
def save_checkpoint(filename, wrapper, epoch=None):
"""
Save checkpoint to disk
Parameters
----------
filename : String
Name of the file
wrapper : nn.Module
Model wrapper to save
epoch : Int
Training epoch
"""
if epoch is None:
torch.save({
'state_dict': wrapper.state_dict(),
}, filename)
else:
torch.save({
'epoch': epoch,
'config': wrapper.cfg,
'state_dict': wrapper.arch.state_dict(),
}, filename)
| bingai/vidar | vidar/utils/networks.py | networks.py | py | 6,314 | python | en | code | 1 | github-code | 13 |
40179604773 | import sys
sys.path.extend([".", "../../code"])
from PyQt5.QtWidgets import QTabWidget, QFileDialog
from PyQt5.QtWidgets import QVBoxLayout
from PyQt5.QtWidgets import QWidget, QApplication, QMainWindow, QAction, QMessageBox
from input_output.sbml_saver import SbmlSaver
from simulation.ode_simulator import OdeSimulator
from ui.gene_presenter import GenePresenter
from ui.open_sbml_dialog import OpenSbmlDialog
from ui.reactions.reactions_tab import ReactionsTab
from ui.constraint_satisfaction.constraint_satisfaction_tab import ConstraintSatisfactionModifyTab
from ui.simulation.deterministic_simulation_dialog import DeterministicSimulationDialog
from ui.simulation.stochastic_simulation_dialog import StochasticSimulationDialog
from ui.species.species_tab import SpeciesTab
class GeneWindow(QMainWindow):
def __init__(self):
super().__init__()
self._init_ui()
def _init_ui(self):
layout = QVBoxLayout()
tabs = QTabWidget()
self.species_tab = SpeciesTab(self)
self.reactions_tab = ReactionsTab()
self.rev_eng_modify_tab = ConstraintSatisfactionModifyTab()
tabs.addTab(self.species_tab, "Species")
tabs.addTab(self.reactions_tab, "Reactions")
tabs.addTab(self.rev_eng_modify_tab, "Constraint Satisfaction")
self._init_menubar()
layout.addWidget(tabs)
central_widget = QWidget()
central_widget.setLayout(layout)
self.setCentralWidget(central_widget)
self.setWindowTitle("Gene")
self.show()
def _deterministic_simulation_clicked(self):
def handler(s):
net = GenePresenter.get_instance().network
OdeSimulator.visualise(net, s, OdeSimulator.simulate(net, s))
DeterministicSimulationDialog(handler)
def _stochastic_simulation_clicked(self):
StochasticSimulationDialog()
def _save_file_as_sbml_clicked(self):
net = GenePresenter.get_instance().network
d = QFileDialog()
filename = d.getSaveFileName(self, "Save file", ".", "XML Files (*.xml)")
if filename:
SbmlSaver.save_network_to_file(net, filename[0] + ".xml")
def _help_units_clicked(self):
units_message = QMessageBox()
units_message.setIcon(QMessageBox.Information)
units_message.setWindowTitle("Units")
units_message.setStandardButtons(QMessageBox.Ok)
units = """
The quantities used in this programme have these units:
Time: seconds
Species: molecules
Transcription rate: molecules/second
Translation rate: molecules/mRNA molecules/second
Degradation rate: molecules/second
"""
units_message.setText(units)
units_message.exec_()
def _help_user_manual_clicked(self):
user_manual_message = QMessageBox()
user_manual_message.setWindowTitle("User's Manual")
user_manual_message.setStandardButtons(QMessageBox.Ok)
user_manual = """
This programme has three main functions:
=== 1. Designing a gene regulatory network ===
· The network is modelled as a series of chemical reactions.
· You have to add every species in the "Species" tab before using them in a reaction.
You have to add mRNA and protein associated with a gene separately.
· "Reactions" tab allows adding network reactions (such as transcription, translation, etc.)
The tab also offers a visualisation of the network.
Regulation relationships of the network are specified when adding transcription reactions.
=== 2. Simulating networks ===
· Two types of simulation are supported: Deterministic and Stochastic.
· Both can be achieved through the "Simulate" menu.
=== 3. Constraint satisfaction (explained below) ===
· Constraint satisfaction allows modifying a network to fit a set of constraints (e.g. species
X <= 100 between seconds 0 to 20).
· "Mutables" are variables which are allowed to vary in order to satisfy the constraints.
· "Constraints" are all the constraints which have to be satisfied.
Constraint satisfaction modes:
1. Exact match: Find a network which exactly matches the required network.
2. Closest match: If no network exactly satisfies all the constraints,
finds a network which comes closest to satisfying all of them.
"""
user_manual_message.setText(user_manual)
user_manual_message.exec_()
def _init_menubar(self):
self.menubar = self.menuBar()
# File menu
file = self.menubar.addMenu("File")
open_file = QAction("Open SBML file", self)
open_file.triggered.connect(lambda _: OpenSbmlDialog(self))
file.addAction(open_file)
save_file = QAction("Save as SBML file", self)
save_file.triggered.connect(self._save_file_as_sbml_clicked)
file.addAction(save_file)
# Simulate menu
simulate = self.menubar.addMenu("Simulate")
deterministic = QAction("Deterministic (ODE model)", self)
deterministic.triggered.connect(self._deterministic_simulation_clicked)
simulate.addAction(deterministic)
stochastic = QAction("Stochastic (Gillespie Algorithm)", self)
stochastic.triggered.connect(self._stochastic_simulation_clicked)
simulate.addAction(stochastic)
# Help menu
help_menu = self.menubar.addMenu("Help")
help_units = QAction("Units", self)
help_units.triggered.connect(self._help_units_clicked)
help_menu.addAction(help_units)
help_user_manual = QAction("User's Manual", self)
help_user_manual.triggered.connect(self._help_user_manual_clicked)
help_menu.addAction(help_user_manual)
app = QApplication([])
g = GeneWindow()
app.exec_()
| ilkutkutlar/gene-grn-simulator | src/ui/gui.py | gui.py | py | 6,263 | python | en | code | 0 | github-code | 13 |
26588861282 | from datetime import datetime, date
print("date_today=", date.today())
# создадим даты как строки
ds1 = 'Friday, November 17, 2020'
ds2 = '11/17/20'
ds3 = '11-17-2020'
# Конвертируем строки в объекты datetime и сохраним
dt1 = datetime.strptime(ds1, '%A, %B %d, %Y')
dt2 = datetime.strptime(ds2, '%m/%d/%y')
dt3 = datetime.strptime(ds3, '%m-%d-%Y')
print(dt1)
print(dt2)
print(dt3)
# еще пример
date_string = 'Oct 17 2020 9:00PM'
date_object = datetime.strptime(date_string, '%b %d %Y %I:%M%p')
print("*", date_object)
# **************************************************************
# Dateutil - парсер, сводит всё в один формат. Для установки через терминал: pip install python-dateutil
from dateutil import parser
dt_obj = parser.parse('Thu Oct 17 17:10:28 2019')
print(dt_obj)
dt_obj1 = parser.parse('Thursday, 17. October 2019 5:10PM')
print(dt_obj1)
dt_obj2 = parser.parse('10/17/2019 17:10:28')
print(dt_obj2)
t_obj = parser.parse('10/17/2019')
print(t_obj)
| a6m2zerot/Planner_v.1.0 | unnamed.py | unnamed.py | py | 1,090 | python | ru | code | 0 | github-code | 13 |
42720984970 | #!/bin/env python
import boto3
import logging
import os
from modules.RegionInfo import RegionInfo as RegionInfo
def show_identity():
# boto3.session.Session(region_name=None, profile_name=None)
aws = boto3.session.Session()
sts = aws.client('sts')
identity = sts.get_caller_identity()
logging.debug(f'Using Identity: {identity}')
if __name__ == "__main__":
# Execute when module not intialized from import
logging.basicConfig(level=os.environ.get("LOGLEVEL"))
show_identity()
region = os.environ.get('AWS_DEFAULT_REGION')
region_info=RegionInfo(region)
# Show Resource 1
print(f"List Instances for {region}:")
print('------------------------------')
region_info.list_instances()
print('\n')
print(f"Describe Instances for {region}:")
print('------------------------------')
region_info.describe_instances()
print('\n')
# Show Resource 2
print(f"List Load Balancers for {region}:")
print('------------------------------')
region_info.list_load_balancers()
print('\n')
print(f"Describe Load Balancers for {region}:")
print('------------------------------')
region_info.describe_load_balancers()
print('\n') | yosefrow/bizzabo | python/src/main.py | main.py | py | 1,287 | python | en | code | 0 | github-code | 13 |
26590149696 | import json
import xmltodict
import pafy
from youtube_search import YoutubeSearch
from genericpath import exists
from requests import request
def get_textfile(filepath, textstring=''):
if exists(filepath):
with open(filepath, 'r') as f:
return f.read()
elif textstring:
with open(filepath, 'w') as f:
f.write(textstring)
return textstring
def get_request(filepath, url, params={}):
if exists(filepath):
return get_textfile(filepath)
else:
return get_textfile(filepath, request('GET', url, params=params).text)
def do_search(platform, query):
if platform == 'podcast':
filename = f'search_podcast_{query}.json'
url = 'https://itunes.apple.com/search'
params = {'term': query, 'entity': 'podcast'}
results = json.loads(get_request(filename, url, params)).get('results')
selections = [({
'platform': 'podcast',
'url': l.get('feedUrl'),
'name': l.get('collectionName')
}) for l in results]
elif platform == 'youtube':
selections = [({
'platform': 'youtube',
'url': f'https://www.youtube.com/{l.get("url_suffix")}',
'name': l.get("title")
}) for l in YoutubeSearch(query).to_dict()]
elif platform == 'radio':
filename = f'search_radio_{query}.json'
url = 'http://de1.api.radio-browser.info/json/stations/search'
params = {'name': query}
results = json.loads(get_request(filename, url, params))
selections = [({'platform': 'radio', 'url': l.get(
'url'), 'name': l.get('name')}) for l in results]
return selections
def get_episodes(url):
from hashlib import md5
hex = md5(url.encode('utf-8')).hexdigest()
x = xmltodict.parse(get_request(f'rss_{hex}.xml', url))
selections = [({
'platform': 'episode',
'url': l.get('enclosure').get('@url'),
'name': f'{l.get("pubDate")} – {l.get("title")}'
}) for l in x.get('rss').get('channel').get('item')]
return selections
| webavant/seekseek | server/project/server/search_functions.py | search_functions.py | py | 2,107 | python | en | code | 0 | github-code | 13 |
4565215170 | import numpy as np
import pandas as pd
import plotly.plotly as py
import plotly.graph_objs as go
import plotly.tools as tls
class cleaner(object):
def __init__(self, data):
self.data = data
@staticmethod
def append_string(append_type, row):
if append_type == 'hospital':
return 'hosp_' + row['psu'].__str__()
elif append_type == 'product':
return 'product_' + row['prod1'].__str__()
else:
raise Exception('not valid append type')
@staticmethod
def recode_race(row):
if row['race'] == 1:
return 'white'
elif row['race'] == 2:
return 'black'
elif row['race'] == 3 and row['race_other'] == 'HISPANIC':
return 'hispanic'
else:
return 'other'
@staticmethod
def recode_stratum(row, stratum):
if row['stratum'] == stratum:
return 1
else:
return 0
@staticmethod
def recode_race_report(row):
if row['race'] == 0:
return 'not reported'
else:
return 'reported'
@property
def processed_data(self):
data = self.data
data['hospital'] = data.apply(lambda x: self.append_string('hospital', x), axis=1)
data['product'] = data.apply(lambda x: self.append_string('product', x), axis=1)
data['new_race'] = data.apply(lambda x: self.recode_race(x), axis=1)
for stratum in ['C', 'S', 'M', 'L', 'V']:
data['stratum_' + stratum] = data.apply(lambda x: self.recode_stratum(x, stratum), axis=1)
data['race_reported'] = data.apply(lambda x: self.recode_race_report(x), axis=1)
return data
@property
def crosstab(self):
grouped = pd.crosstab(self.data['hospital'], self.data['product'])
return grouped
class query(object):
def __init__(self, cleaned_data, crosstab):
self.data = cleaned_data
self.crosstab = crosstab
def retrieve_query(self, group_name, group_value, query_name, top_num=9):
data = self.data
subset = data.ix[data[group_name] == group_value, query_name].value_counts()[0:top_num]
return subset
def get_product_by_hospital(self, hospital_name, top_num=9):
return self.retrieve_query(group_name='hospital', group_value=hospital_name,
query_name='product', top_num=top_num)
def get_hospitals_by_product(self, product_name, top_num=9):
return self.retrieve_query(group_name='product', group_value=product_name,
query_name='hospital', top_num=top_num)
def get_product_by_size(self, stratum_value, top_num=9):
return self.retrieve_query(group_name='stratum', group_value=stratum_value,
query_name='product', top_num=top_num)
def get_counts(self, count_type, product_num=None, hosp_name=None):
if count_type == 'product':
return self.crosstab.ix[:, product_num]
elif count_type == 'hospital':
return self.crosstab.ix[hosp_name,:]
else:
raise Exception('invalid count type input')
def product_counts(self, product_num):
return self.get_counts('product', product_num=product_num)
def hospital_counts(self, hosp_name):
return self.get_counts('hospital', hosp_name=hosp_name)
def plot_product(self, product_num):
data = self.product_counts(product_num)
graph = [go.Bar(
x=self.crosstab.index.values.tolist(),
y=data.values,
)]
layout = go.Layout(title='Hospital Records for Product - ' + product_num)
fig = go.Figure(data=graph, layout=layout)
return py.iplot(fig)
def plot_hospital(self, hosp_name):
data = self.hospital_counts(hosp_name)
graph = [go.Bar(
x=self.crosstab.columns.values.tolist(),
y=data.values,
)]
layout = go.Layout(title='Product Counts for Hospital - ' + hosp_name)
fig = go.Figure(data=graph, layout=layout)
return py.iplot(fig)
def get_top_product(self, hospital_name):
return self.data.ix[self.data['hospital'] == hospital_name, 'product'].value_counts().index[0]
def top_product_for_hospital(self):
hosp_dict = {}
for hospital in self.data.hospital.value_counts().index:
hosp_dict[hospital] = self.get_top_product(hospital)
return pd.Series([val for val in hosp_dict.values()]).value_counts()
def prepare_race_modeling(self, *args):
counts = self.data.ix[self.data['product'] == product, :]
def prepare_stratum_modeling(self, product, dummy_cols=False):
if dummy_cols:
columns = ['hospital', 'stratum_C', 'stratum_S', 'stratum_M', 'stratum_L', 'stratum_V']
else:
columns= ['hospital' ,'stratum']
counts = self.data.ix[self.data['product'] == product,:]['hospital'].value_counts()
product_df = pd.DataFrame(counts)
product_df.columns = ['counts']
df = pd.merge(product_df, self.data.ix[:, columns], left_index=True, right_on=['hospital'], how='left')
return df.drop_duplicates('hospital')
def plot_stratum_dist(self, product, stratum):
prepared = self.prepare_stratum_modeling(product)
data = prepared.ix[prepared['stratum'] == stratum, :]
graph = [go.Bar(
x=data['hospital'].values.tolist(),
y=data['counts'].values.tolist(),
)]
layout = go.Layout(title='Counts for Stratum ' + stratum)
fig = go.Figure(data=graph, layout=layout)
print('Variance:', np.var(data.counts.values))
print('Mean:', np.mean(data.counts.values))
return py.iplot(fig) | minh5/cpsc | reports/neiss.py | neiss.py | py | 5,904 | python | en | code | 0 | github-code | 13 |
23552322040 | import json
import os
import numpy as np
from PIL import Image
def rotate_xy(x, y, deg, rot_center_x=0, rot_center_y=0):
'''
Args:
x, y: int
回転前の点の座標
deg: int, float
回転させる角度
rot_center_x, rot_center_y: int, float
回転中心
Returns:
rotated_corrd: tuple
回転後の座標
'''
corrd = np.array([x - rot_center_x, y - rot_center_y])
rad = np.radians(deg)
cos = np.cos(rad)
sin = np.sin(rad)
rot_matrix = np.array([[cos, -1 * sin], [sin, cos]])
rotated_corrd = np.dot(rot_matrix, corrd) + np.array([rot_center_x, rot_center_y])
return rotated_corrd
def extract_keypoints_from_json(json_path):
'''
Args:
json_path: str
OpenPoseの結果jsonファイルのパス
Returns:
keypoints_array: numpy.array
キーポイントが格納されたnp.array
人を認識できていなかった場合要素が全て0のnp.arrayを返す
'''
with open(json_path) as f:
json_dic = json.load(f)
if json_dic['people']:
keypoints_array = np.array(json_dic['people'][0]['pose_keypoints_2d'])
else:
keypoints_array = np.zeros(54)
return keypoints_array
def euclidean_distance(pre_keypoints_array, keypoints_array):
'''
Args:
pre_keypoints_array: numpy.array
1フレーム前のキーポイントのnp.array
keypoints_array: numpy.array
現在のフレームのキーポイントのnp.array
Returns:
euclidean_dist: float
与えられたキーポイント間のユークリッド距離
頭の部分は無視して計算している(0~13番目のキーポイントのみ)
'''
euclidean_dist = 0
for i in range(14):
pre_xy = pre_keypoints_array[i * 3: i * 3 + 1]
xy = keypoints_array[i * 3: i * 3 + 1]
euclidean_dist += np.linalg.norm(pre_xy - xy)
return euclidean_dist
def rotate_keypoints_array(keypoints_array, deg, rot_center_x=0, rot_center_y=0):
'''
Args:
keypoints_array: numpy.array
キーポイントが格納されたnp.array
deg: int, float
回転させる角度
rot_center_x, rot_center_y: int, float
回転中心の座標
Returns:
rotated_keypoints_array: numpy.array
与えられたキーポイントをdeg度だけ回転させたnp.array
'''
rotated_keypoints_array = np.array([])
for i in range(18):
x = keypoints_array[i * 3]
y = keypoints_array[i * 3 + 1]
confidence = keypoints_array[i * 3 + 2]
rotated_xy = rotate_xy(x, y, deg, rot_center_x, rot_center_y)
rotated_keypoints_array = np.append(rotated_keypoints_array, rotated_xy)
rotated_keypoints_array = np.append(rotated_keypoints_array, confidence)
return rotated_keypoints_array
def make_list_in_dir(dir_path):
'''
Args:
dir_path: str
ディレクトリの名前
Returns:
list_in_dir: list
引数で指定したディレクトリ内のディレクトリorファイルの名前のソート済みリスト
'''
list_in_dir = os.listdir(dir_path)
list_in_dir.sort()
return list_in_dir
def get_rot_center(img_path):
'''
Args:
img_path: str
画像のパス
Returns:
center_x, center_y: int, float
画像の中心座標
'''
img = Image.open(img_path)
width, height = img.size
center_x, center_y = width / 2, height / 2
return center_x, center_y
def save_rotate_image(image_path, save_path, deg):
'''
画像の回転を戻して保存する
'''
img = Image.open(image_path)
img_rotate = img.rotate(deg)
img_rotate.save(save_path)
| ktoyod/rotatedpose | src/utils/rotate.py | rotate.py | py | 3,906 | python | ja | code | 2 | github-code | 13 |
33782623378 | from huggingface_hub import HfApi, hf_hub_download, snapshot_download, upload_folder
repo_id = "vermouthdky/SimTeG"
snapshot_download(
repo_id=repo_id,
repo_type="dataset",
local_dir="../lambda_out",
local_dir_use_symlinks=False,
allow_patterns=[
"ogbn-products/e5-large/optuna_peft/best/cached_embs/*.pt"
],
)
| vermouthdky/SimTeG | download_embs.py | download_embs.py | py | 345 | python | en | code | 10 | github-code | 13 |
26978106874 | from django.contrib import admin
from django.urls import path
from .import views
from django.contrib.auth.views import LoginView,LogoutView
from .import views
# urls for admin
urlpatterns= [
path('',views.home_view,name='home'),
path('signup',views.signup_view,name="signup"),
path('adminsignup', views.admin_signup_view),
path('staffsignup', views.staff_signup_view),
path('doctorsignup', views.doctor_signup_view,name='doctorsignup'),
path('patientsignup', views.patient_signup_view),
path('login',LoginView.as_view(template_name='login.html')),
path('afterlogin', views.afterlogin_view,name='afterlogin'),
path('aboutus',views.aboutus_view),
path('contactus',views.contactus_view),
path('weather',views.weather_view),
path('blood_view',views.blood_view,name="blood_view"),
path('med',views.med_view,name='med'),
path('organ',views.organ_view,name='organ'),
path('logout', LogoutView.as_view(template_name='index.html'),name='logout'),
#lab
path('staffdash', views.staff_dashboard_view,name='staffdash'),
path('upload/<int:pk>', views.staff_upload_view,name='upload'),
path('details', views.staff_details_view,name='details'),
path('cart', views.cart_view,name='cart'),
#admin-doctor
path('admindash', views.admin_dashboard_view,name='admindash'),
path('admin-doctor', views.admin_doctor_view,name='admin-doctor'),
path('admin-view-doctor', views.admin_view_doctor_view,name='admin-view-doctor'),
path('delete-doctor-from-hospital/<int:pk>', views.delete_doctor_from_hospital_view,name='delete-doctor-from-hospital'),
path('update-doctor/<int:pk>', views.update_doctor_view,name='update-doctor'),
path('admin-add-doctor', views.admin_add_doctor_view,name='admin-add-doctor'),
path('admin-approve-doctor', views.admin_approve_doctor_view,name='admin-approve-doctor'),
path('approve-doctor/<int:pk>', views.approve_doctor_view,name='approve-doctor'),
path('reject-doctor/<int:pk>', views.reject_doctor_view,name='reject-doctor'),
path('admin-view-doctor-specialisation',views.admin_view_doctor_specialisation_view,name='admin-view-doctor-specialisation'),
#admin-patient
path('admin-patient', views.admin_patient_view,name='admin-patient'),
path('admin-view-patient', views.admin_view_patient_view,name='admin-view-patient'),
path('delete-patient-from-hospital/<int:pk>', views.delete_patient_from_hospital_view,name='delete-patient-from-hospital'),
path('update-patient/<int:pk>', views.update_patient_view,name='update-patient'),
path('admin-add-patient', views.admin_add_patient_view,name='admin-add-patient'),
path('admin-approve-patient', views.admin_approve_patient_view,name='admin-approve-patient'),
path('approve-patient/<int:pk>', views.approve_patient_view,name='approve-patient'),
path('reject-patient/<int:pk>', views.reject_patient_view,name='reject-patient'),
path('admin-discharge-patient', views.admin_discharge_patient_view,name='admin-discharge-patient'),
path('discharge-patient/<int:pk>', views.discharge_patient_view,name='discharge-patient'),
path('download-pdf/<int:pk>', views.download_pdf_view,name='download-pdf'),
#admin-appointment
path('admin-appointment', views.admin_appointment_view,name='admin-appointment'),
path('admin-view-appointment', views.admin_view_appointment_view,name='admin-view-appointment'),
path('admin-add-appointment', views.admin_add_appointment_view,name='admin-add-appointment'),
path('admin-approve-appointment', views.admin_approve_appointment_view,name='admin-approve-appointment'),
path('approve-appointment/<int:pk>', views.approve_appointment_view,name='approve-appointment'),
path('reject-appointment/<int:pk>', views.reject_appointment_view,name='reject-appointment'),
path('admin-approve-bed', views.admin_approve_bed_view,name='admin-approve-bed'),
path('approve-bed/<int:pk>', views.approve_bed_view,name='approve-bed'),
path('reject-bed/<int:pk>', views.reject_bed_view,name='reject-bed'),
path('adminbbed', views.admin_view_bed_view,name='adminbbed'),
#admin url ends
#doctorpage urls
path('doctor-dashboard', views.doctor_dashboard_view,name='doctordash'),
path('search', views.search_view,name='search'),
path('doctor-patient', views.doctor_patient_view,name='doctor-patient'),
path('doctor-view-patient', views.doctor_view_patient_view,name='doctor-view-patient'),
path('doctor-view-discharge-patient',views.doctor_view_discharge_patient_view,name='doctor-view-discharge-patient'),
path('doctor-appointment', views.doctor_appointment_view,name='doctor-appointment'),
path('doctor-view-appointment', views.doctor_view_appointment_view,name='doctor-view-appointment'),
path('doctor-delete-appointment',views.doctor_delete_appointment_view,name='doctor-delete-appointment'),
path('delete-appointment/<int:pk>', views.delete_appointment_view,name='delete-appointment'),
path('patient-dashboard', views.patient_dashboard_view,name='patientdash'),
path('patient-appointment', views.patient_appointment_view,name='patient-appointment'),
path('patient-book-appointment', views.patient_book_appointment_view,name='patient-book-appointment'),
path('patient-book-bed', views.patient_book_bed_view,name='patient-book-bed'),
path('patient-view-appointment', views.patient_view_appointment_view,name='patient-view-appointment'),
path('patient-view-bed', views.patient_view_bed_view,name='patient-view-bed'),
path('patient-view-doctor', views.patient_view_doctor_view,name='patient-view-doctor'),
path('searchdoctor', views.search_doctor_view,name='searchdoctor'),
path('patient-discharge', views.patient_discharge_view,name='patient-discharge'),
#cart
path('product', views.ProductView, name='product'),
path('order-summary', views.OrderSummaryView, name='order-summary'),
path('add-to-cart/<pk>/', views.add_to_cart, name='add-to-cart'),
path('remove-from-cart/<pk>/', views.remove_from_cart, name='remove-from-cart'),
path('reduce-quantity-item/<pk>/', views.reduce_quantity_item, name='reduce-quantity-item')
]
| Sanjay-272002/hms | mediapp/urls.py | urls.py | py | 6,218 | python | en | code | 0 | github-code | 13 |
5040156385 | ###### Enums ######
DECODING_SOURCE = {
"DecodingSourceXMLFile": 0,
"DecodingSourceWbem": 1,
"DecodingSourceWPP": 2,
}
DECODING_SOURCE_INV = {
0: "DecodingSourceXMLFile",
1: "DecodingSourceWbem",
2: "DecodingSourceWPP",
}
TDH_IN_TYPE = {
"TDH_INTYPE_NULL": 0,
"TDH_INTYPE_UNICODESTRING": 1,
"TDH_INTYPE_ANSISTRING": 2,
"TDH_INTYPE_INT8": 3,
"TDH_INTYPE_UINT8": 4,
"TDH_INTYPE_INT16": 5,
"TDH_INTYPE_UINT16": 6,
"TDH_INTYPE_INT32": 7,
"TDH_INTYPE_UINT32": 8,
"TDH_INTYPE_INT64": 9,
"TDH_INTYPE_UINT64": 10,
"TDH_INTYPE_FLOAT": 11,
"TDH_INTYPE_DOUBLE": 12,
"TDH_INTYPE_BOOLEAN": 13,
"TDH_INTYPE_BINARY": 14,
"TDH_INTYPE_GUID": 15,
"TDH_INTYPE_POINTER": 16,
"TDH_INTYPE_FILETIME": 17,
"TDH_INTYPE_SYSTEMTIME": 18,
"TDH_INTYPE_SID": 19,
"TDH_INTYPE_HEXINT32": 20,
"TDH_INTYPE_HEXINT64": 21,
"TDH_INTYPE_COUNTEDSTRING": 300,
"TDH_INTYPE_COUNTEDANSISTRING": 301,
"TDH_INTYPE_REVERSEDCOUNTEDSTRING": 302,
"TDH_INTYPE_REVERSEDCOUNTEDANSISTRING": 303,
"TDH_INTYPE_NONNULLTERMINATEDSTRING": 304,
"TDH_INTYPE_NONNULLTERMINATEDANSISTRING": 305,
"TDH_INTYPE_UNICODECHAR": 306,
"TDH_INTYPE_ANSICHAR": 307,
"TDH_INTYPE_SIZET": 308,
"TDH_INTYPE_HEXDUMP": 309,
"TDH_INTYPE_WBEMSID": 310,
}
TDH_IN_TYPE_INV = {
0: "TDH_INTYPE_NULL",
1: "TDH_INTYPE_UNICODESTRING",
2: "TDH_INTYPE_ANSISTRING",
3: "TDH_INTYPE_INT8",
4: "TDH_INTYPE_UINT8",
5: "TDH_INTYPE_INT16",
6: "TDH_INTYPE_UINT16",
7: "TDH_INTYPE_INT32",
8: "TDH_INTYPE_UINT32",
9: "TDH_INTYPE_INT64",
10: "TDH_INTYPE_UINT64",
11: "TDH_INTYPE_FLOAT",
12: "TDH_INTYPE_DOUBLE",
13: "TDH_INTYPE_BOOLEAN",
14: "TDH_INTYPE_BINARY",
15: "TDH_INTYPE_GUID",
16: "TDH_INTYPE_POINTER",
17: "TDH_INTYPE_FILETIME",
18: "TDH_INTYPE_SYSTEMTIME",
19: "TDH_INTYPE_SID",
20: "TDH_INTYPE_HEXINT32",
21: "TDH_INTYPE_HEXINT64",
300: "TDH_INTYPE_COUNTEDSTRING",
301: "TDH_INTYPE_COUNTEDANSISTRING",
302: "TDH_INTYPE_REVERSEDCOUNTEDSTRING",
303: "TDH_INTYPE_REVERSEDCOUNTEDANSISTRING",
304: "TDH_INTYPE_NONNULLTERMINATEDSTRING",
305: "TDH_INTYPE_NONNULLTERMINATEDANSISTRING",
306: "TDH_INTYPE_UNICODECHAR",
307: "TDH_INTYPE_ANSICHAR",
308: "TDH_INTYPE_SIZET",
309: "TDH_INTYPE_HEXDUMP",
310: "TDH_INTYPE_WBEMSID",
}
TDH_OUT_TYPE = {
"TDH_OUTTYPE_NULL": 0,
"TDH_OUTTYPE_STRING": 1,
"TDH_OUTTYPE_DATETIME": 2,
"TDH_OUTTYPE_BYTE": 3,
"TDH_OUTTYPE_UNSIGNEDBYTE": 4,
"TDH_OUTTYPE_SHORT": 5,
"TDH_OUTTYPE_UNSIGNEDSHORT": 6,
"TDH_OUTTYPE_INT": 7,
"TDH_OUTTYPE_UNSIGNEDINT": 8,
"TDH_OUTTYPE_LONG": 9,
"TDH_OUTTYPE_UNSIGNEDLONG": 10,
"TDH_OUTTYPE_FLOAT": 11,
"TDH_OUTTYPE_DOUBLE": 12,
"TDH_OUTTYPE_BOOLEAN": 13,
"TDH_OUTTYPE_GUID": 14,
"TDH_OUTTYPE_HEXBINARY": 15,
"TDH_OUTTYPE_HEXINT8": 16,
"TDH_OUTTYPE_HEXINT16": 17,
"TDH_OUTTYPE_HEXINT32": 18,
"TDH_OUTTYPE_HEXINT64": 19,
"TDH_OUTTYPE_PID": 20,
"TDH_OUTTYPE_TID": 21,
"TDH_OUTTYPE_PORT": 22,
"TDH_OUTTYPE_IPV4": 23,
"TDH_OUTTYPE_IPV6": 24,
"TDH_OUTTYPE_SOCKETADDRESS": 25,
"TDH_OUTTYPE_CIMDATETIME": 26,
"TDH_OUTTYPE_ETWTIME": 27,
"TDH_OUTTYPE_XML": 28,
"TDH_OUTTYPE_ERRORCODE": 29,
"TDH_OUTTYPE_WIN32ERROR": 30,
"TDH_OUTTYPE_NTSTATUS": 31,
"TDH_OUTTYPE_HRESULT": 32,
"TDH_OUTTYPE_CULTURE_INSENSITIVE_DATETIME": 33,
"TDH_OUTTYPE_REDUCEDSTRING": 300,
"TDH_OUTTYPE_NOPRINT": 301,
}
TDH_OUT_TYPE_INV = {
0: "TDH_OUTTYPE_NULL",
1: "TDH_OUTTYPE_STRING",
2: "TDH_OUTTYPE_DATETIME",
3: "TDH_OUTTYPE_BYTE",
4: "TDH_OUTTYPE_UNSIGNEDBYTE",
5: "TDH_OUTTYPE_SHORT",
6: "TDH_OUTTYPE_UNSIGNEDSHORT",
7: "TDH_OUTTYPE_INT",
8: "TDH_OUTTYPE_UNSIGNEDINT",
9: "TDH_OUTTYPE_LONG",
10: "TDH_OUTTYPE_UNSIGNEDLONG",
11: "TDH_OUTTYPE_FLOAT",
12: "TDH_OUTTYPE_DOUBLE",
13: "TDH_OUTTYPE_BOOLEAN",
14: "TDH_OUTTYPE_GUID",
15: "TDH_OUTTYPE_HEXBINARY",
16: "TDH_OUTTYPE_HEXINT8",
17: "TDH_OUTTYPE_HEXINT16",
18: "TDH_OUTTYPE_HEXINT32",
19: "TDH_OUTTYPE_HEXINT64",
20: "TDH_OUTTYPE_PID",
21: "TDH_OUTTYPE_TID",
22: "TDH_OUTTYPE_PORT",
23: "TDH_OUTTYPE_IPV4",
24: "TDH_OUTTYPE_IPV6",
25: "TDH_OUTTYPE_SOCKETADDRESS",
26: "TDH_OUTTYPE_CIMDATETIME",
27: "TDH_OUTTYPE_ETWTIME",
28: "TDH_OUTTYPE_XML",
29: "TDH_OUTTYPE_ERRORCODE",
30: "TDH_OUTTYPE_WIN32ERROR",
31: "TDH_OUTTYPE_NTSTATUS",
32: "TDH_OUTTYPE_HRESULT",
33: "TDH_OUTTYPE_CULTURE_INSENSITIVE_DATETIME",
300: "TDH_OUTTYPE_REDUCEDSTRING",
301: "TDH_OUTTYPE_NOPRINT",
}
MAP_VALUETYPE = {
"EVENTMAP_ENTRY_VALUETYPE_ULONG": 0,
"EVENTMAP_ENTRY_VALUETYPE_STRING": 1,
}
MAP_VALUETYPE_INV = {
0: "EVENTMAP_ENTRY_VALUETYPE_ULONG",
1: "EVENTMAP_ENTRY_VALUETYPE_STRING",
}
EVENT_FIELD_TYPE = {
"EventKeywordInformation": 0,
"EventLevelInformation": 1,
"EventChannelInformation": 2,
"EventTaskInformation": 3,
"EventOpcodeInformation": 4,
}
EVENT_FIELD_TYPE_INV = {
0: "EventKeywordInformation",
1: "EventLevelInformation",
2: "EventChannelInformation",
3: "EventTaskInformation",
4: "EventOpcodeInformation",
}
TDH_CONTEXT_TYPE = {
"TDH_CONTEXT_WPP_TMFFILE": 0,
"TDH_CONTEXT_WPP_TMFSEARCHPATH": 1,
"TDH_CONTEXT_WPP_GMT": 2,
"TDH_CONTEXT_POINTERSIZE": 3,
}
TDH_CONTEXT_TYPE_INV = {
0: "TDH_CONTEXT_WPP_TMFFILE",
1: "TDH_CONTEXT_WPP_TMFSEARCHPATH",
2: "TDH_CONTEXT_WPP_GMT",
3: "TDH_CONTEXT_POINTERSIZE",
}
###################
###### Types ######
TDH_HANDLE = HANDLE
PTDH_HANDLE = Ptr("<I", TDH_HANDLE())
PEVENT_HEADER_EXTENDED_DATA_ITEM = LPVOID
class TRACE_PROVIDER_INFO(MemStruct):
fields = [
("ProviderGuid", GUID()),
("SchemaSource", ULONG()),
("ProviderNameOffset", ULONG()),
]
TRACE_PROVIDER_INFO__ANYSIZE_ARRAY_ = Array(TRACE_PROVIDER_INFO, 1)
class PROVIDER_ENUMERATION_INFO(MemStruct):
fields = [
("NumberOfProviders", ULONG()),
("Reserved", ULONG()),
("TraceProviderInfoArray", TRACE_PROVIDER_INFO__ANYSIZE_ARRAY_()),
]
PPROVIDER_ENUMERATION_INFO = Ptr("<I", PROVIDER_ENUMERATION_INFO())
DECODING_SOURCE = UINT
TEMPLATE_FLAGS = UINT
TDH_IN_TYPE = USHORT
TDH_OUT_TYPE = USHORT
PROPERTY_FLAGS = UINT
class _EVENT_PROPERTY_INFO_u1_s1_(MemStruct):
fields = [
("InType", TDH_IN_TYPE()),
("OutType", TDH_OUT_TYPE()),
("MapNameOffset", ULONG()),
]
class _EVENT_PROPERTY_INFO_u1_s2_(MemStruct):
fields = [
("StructStartIndex", USHORT()),
("NumOfStructMembers", USHORT()),
("padding", ULONG()),
]
_EVENT_PROPERTY_INFO_u1_ = Union([
("nonStructType", _EVENT_PROPERTY_INFO_u1_s1_),
("structType", _EVENT_PROPERTY_INFO_u1_s2_),
])
_EVENT_PROPERTY_INFO_u2_ = Union([
("count", USHORT),
("countPropertyIndex", USHORT),
])
_EVENT_PROPERTY_INFO_u3_ = Union([
("length", USHORT),
("lengthPropertyIndex", USHORT),
])
class EVENT_PROPERTY_INFO(MemStruct):
fields = [
("Flags", PROPERTY_FLAGS()),
("NameOffset", ULONG()),
(None, _EVENT_PROPERTY_INFO_u1_()),
(None, _EVENT_PROPERTY_INFO_u2_()),
(None, _EVENT_PROPERTY_INFO_u3_()),
("Reserved", ULONG()),
]
EVENT_PROPERTY_INFO__ANYSIZE_ARRAY_ = Array(EVENT_PROPERTY_INFO, 1)
class PROVIDER_FILTER_INFO(MemStruct):
fields = [
("Id", UCHAR()),
("Version", UCHAR()),
("MessageOffset", ULONG()),
("Reserved", ULONG()),
("PropertyCount", ULONG()),
("EventPropertyInfoArray", EVENT_PROPERTY_INFO__ANYSIZE_ARRAY_()),
]
PPROVIDER_FILTER_INFO = Ptr("<I", PROVIDER_FILTER_INFO())
PPROVIDER_FILTER_INFO_PTR = Ptr("<I", PPROVIDER_FILTER_INFO())
class TRACE_EVENT_INFO(MemStruct):
fields = [
("ProviderGuid", GUID()),
("EventGuid", GUID()),
("EventDescriptor", EVENT_DESCRIPTOR()),
("DecodingSource", DECODING_SOURCE()),
("ProviderNameOffset", ULONG()),
("LevelNameOffset", ULONG()),
("ChannelNameOffset", ULONG()),
("KeywordsNameOffset", ULONG()),
("TaskNameOffset", ULONG()),
("OpcodeNameOffset", ULONG()),
("EventMessageOffset", ULONG()),
("ProviderMessageOffset", ULONG()),
("BinaryXMLOffset", ULONG()),
("BinaryXMLSize", ULONG()),
("ActivityIDNameOffset", ULONG()),
("RelatedActivityIDNameOffset", ULONG()),
("PropertyCount", ULONG()),
("TopLevelPropertyCount", ULONG()),
("Flags", TEMPLATE_FLAGS()),
("EventPropertyInfoArray", EVENT_PROPERTY_INFO__ANYSIZE_ARRAY_()),
]
PTRACE_EVENT_INFO = Ptr("<I", TRACE_EVENT_INFO())
MAP_VALUETYPE = UINT
MAP_FLAGS = UINT
_EVENT_MAP_ENTRY_u_ = Union([
("Value", ULONG),
("InputOffset", ULONG),
])
class EVENT_MAP_ENTRY(MemStruct):
fields = [
("OutputOffset", ULONG()),
(None, _EVENT_MAP_ENTRY_u_()),
]
EVENT_MAP_ENTRY__ANYSIZE_ARRAY_ = Array(EVENT_MAP_ENTRY, 1)
_EVENT_MAP_INFO_u_ = Union([
("MapEntryValueType", MAP_VALUETYPE),
("FormatStringOffset", ULONG),
])
class EVENT_MAP_INFO(MemStruct):
fields = [
("NameOffset", ULONG()),
("Flag", MAP_FLAGS()),
("EntryCount", ULONG()),
(None, _EVENT_MAP_INFO_u_()),
("MapEntryArray", EVENT_MAP_ENTRY__ANYSIZE_ARRAY_()),
]
PEVENT_MAP_INFO = Ptr("<I", EVENT_MAP_INFO())
EVENT_FIELD_TYPE = UINT
class PROVIDER_FIELD_INFO(MemStruct):
fields = [
("NameOffset", ULONG()),
("DescriptionOffset", ULONG()),
("Value", ULONGLONG()),
]
PROVIDER_FIELD_INFO__ANYSIZE_ARRAY_ = Array(PROVIDER_FIELD_INFO, 1)
class PROVIDER_FIELD_INFOARRAY(MemStruct):
fields = [
("NumberOfElements", ULONG()),
("FieldType", EVENT_FIELD_TYPE()),
("FieldInfoArray", PROVIDER_FIELD_INFO__ANYSIZE_ARRAY_()),
]
PPROVIDER_FIELD_INFOARRAY = Ptr("<I", PROVIDER_FIELD_INFOARRAY())
TDH_CONTEXT_TYPE = UINT
class TDH_CONTEXT(MemStruct):
fields = [
("ParameterValue", ULONGLONG()),
("ParameterType", TDH_CONTEXT_TYPE()),
("ParameterSize", ULONG()),
]
PTDH_CONTEXT = Ptr("<I", TDH_CONTEXT())
class PROPERTY_DATA_DESCRIPTOR(MemStruct):
fields = [
("PropertyName", ULONGLONG()),
("ArrayIndex", ULONG()),
("Reserved", ULONG()),
]
PPROPERTY_DATA_DESCRIPTOR = Ptr("<I", PROPERTY_DATA_DESCRIPTOR())
class _EVENT_HEADER_u_s_(MemStruct):
fields = [
("KernelTime", ULONG()),
("UserTime", ULONG()),
]
_EVENT_HEADER_u_ = Union([
(None, _EVENT_HEADER_u_s_),
("ProcessorTime", ULONG64),
])
_EVENT_HEADER_FLAG_ = USHORT
_EVENT_HEADER_PROPERTY_ = USHORT
class EVENT_HEADER(MemStruct):
fields = [
("Size", USHORT()),
("HeaderType", USHORT()),
("Flags", _EVENT_HEADER_FLAG_()),
("EventProperty", _EVENT_HEADER_PROPERTY_()),
("ThreadId", ULONG()),
("ProcessId", ULONG()),
("TimeStamp", LARGE_INTEGER()),
("ProviderId", GUID()),
("EventDescriptor", EVENT_DESCRIPTOR()),
(None, _EVENT_HEADER_u_()),
("ActivityId", GUID()),
]
class EVENT_RECORD(MemStruct):
fields = [
("EventHeader", EVENT_HEADER()),
("BufferContext", ETW_BUFFER_CONTEXT()),
("ExtendedDataCount", USHORT()),
("UserDataLength", USHORT()),
("ExtendedData", PEVENT_HEADER_EXTENDED_DATA_ITEM()),
("UserData", PVOID()),
("UserContext", PVOID()),
]
PEVENT_RECORD = Ptr("<I", EVENT_RECORD())
###################
###### Functions ######
def tdh_TdhCloseDecodingHandle(jitter):
"""
[ERROR_CODE_ULONG] TdhCloseDecodingHandle(
TDH_HANDLE Handle
)
"""
ret_ad, args = jitter.func_args_stdcall(["Handle"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhEnumerateProviderFieldInformation(jitter):
"""
[ERROR_CODE_ULONG] TdhEnumerateProviderFieldInformation(
LPGUID pGuid,
EVENT_FIELD_TYPE EventFieldType,
PPROVIDER_FIELD_INFOARRAY pBuffer,
ULONG* pBufferSize
)
"""
ret_ad, args = jitter.func_args_stdcall(["pGuid", "EventFieldType", "pBuffer", "pBufferSize"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhEnumerateProviderFilters(jitter):
"""
[ERROR_CODE_ULONG] TdhEnumerateProviderFilters(
LPGUID pGuid,
ULONG TdhContextCount,
PTDH_CONTEXT pTdhContext,
ULONG* FilterCount,
PPROVIDER_FILTER_INFO* pBuffer,
ULONG* pBufferSize
)
"""
ret_ad, args = jitter.func_args_stdcall(["pGuid", "TdhContextCount", "pTdhContext", "FilterCount", "pBuffer", "pBufferSize"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhEnumerateProviders(jitter):
"""
[ERROR_CODE_ULONG] TdhEnumerateProviders(
PPROVIDER_ENUMERATION_INFO pBuffer,
ULONG* pBufferSize
)
"""
ret_ad, args = jitter.func_args_stdcall(["pBuffer", "pBufferSize"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhFormatProperty(jitter):
"""
[ERROR_CODE_ULONG] TdhFormatProperty(
PTRACE_EVENT_INFO EventInfo,
PEVENT_MAP_INFO MapInfo,
ULONG PointerSize,
USHORT PropertyInType,
USHORT PropertyOutType,
USHORT PropertyLength,
USHORT UserDataLength,
PBYTE UserData,
PULONG BufferSize,
PWCHAR Buffer,
PUSHORT UserDataConsumed
)
"""
ret_ad, args = jitter.func_args_stdcall(["EventInfo", "MapInfo", "PointerSize", "PropertyInType", "PropertyOutType", "PropertyLength", "UserDataLength", "UserData", "BufferSize", "Buffer", "UserDataConsumed"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhGetDecodingParameter(jitter):
"""
[ERROR_CODE_ULONG] TdhGetDecodingParameter(
TDH_HANDLE Handle,
PTDH_CONTEXT TdhContext
)
"""
ret_ad, args = jitter.func_args_stdcall(["Handle", "TdhContext"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhGetEventInformation(jitter):
"""
[ERROR_CODE_ULONG] TdhGetEventInformation(
PEVENT_RECORD pEvent,
ULONG TdhContextCount,
PTDH_CONTEXT pTdhContext,
PTRACE_EVENT_INFO pBuffer,
ULONG* pBufferSize
)
"""
ret_ad, args = jitter.func_args_stdcall(["pEvent", "TdhContextCount", "pTdhContext", "pBuffer", "pBufferSize"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhGetEventMapInformation(jitter):
"""
[ERROR_CODE_ULONG] TdhGetEventMapInformation(
PEVENT_RECORD pEvent,
LPWSTR pMapName,
PEVENT_MAP_INFO pBuffer,
ULONG* pBufferSize
)
"""
ret_ad, args = jitter.func_args_stdcall(["pEvent", "pMapName", "pBuffer", "pBufferSize"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhGetPropertySize(jitter):
"""
[ERROR_CODE_ULONG] TdhGetPropertySize(
PEVENT_RECORD pEvent,
ULONG TdhContextCount,
PTDH_CONTEXT pTdhContext,
ULONG PropertyDataCount,
PPROPERTY_DATA_DESCRIPTOR pPropertyData,
ULONG* pPropertySize
)
"""
ret_ad, args = jitter.func_args_stdcall(["pEvent", "TdhContextCount", "pTdhContext", "PropertyDataCount", "pPropertyData", "pPropertySize"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhGetProperty(jitter):
"""
[ERROR_CODE_ULONG] TdhGetProperty(
PEVENT_RECORD pEvent,
ULONG TdhContextCount,
PTDH_CONTEXT pTdhContext,
ULONG PropertyDataCount,
PPROPERTY_DATA_DESCRIPTOR pPropertyData,
ULONG BufferSize,
PBYTE pBuffer
)
"""
ret_ad, args = jitter.func_args_stdcall(["pEvent", "TdhContextCount", "pTdhContext", "PropertyDataCount", "pPropertyData", "BufferSize", "pBuffer"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhGetWppMessage(jitter):
"""
[ERROR_CODE_ULONG] TdhGetWppMessage(
TDH_HANDLE Handle,
PEVENT_RECORD EventRecord,
PULONG BufferSize,
PBYTE Buffer
)
"""
ret_ad, args = jitter.func_args_stdcall(["Handle", "EventRecord", "BufferSize", "Buffer"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhGetWppProperty(jitter):
"""
[ERROR_CODE_ULONG] TdhGetWppProperty(
TDH_HANDLE Handle,
PEVENT_RECORD EventRecord,
PWSTR PropertyName,
PULONG BufferSize,
PBYTE Buffer
)
"""
ret_ad, args = jitter.func_args_stdcall(["Handle", "EventRecord", "PropertyName", "BufferSize", "Buffer"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhLoadManifest(jitter):
"""
[ERROR_CODE_ULONG] TdhLoadManifest(
PWSTR Manifest
)
"""
ret_ad, args = jitter.func_args_stdcall(["Manifest"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhLoadManifestFromBinary(jitter):
"""
[ERROR_CODE_ULONG] TdhLoadManifestFromBinary(
PWSTR BinaryPath
)
"""
ret_ad, args = jitter.func_args_stdcall(["BinaryPath"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhOpenDecodingHandle(jitter):
"""
[ERROR_CODE_ULONG] TdhOpenDecodingHandle(
PTDH_HANDLE Handle
)
"""
ret_ad, args = jitter.func_args_stdcall(["Handle"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhQueryProviderFieldInformation(jitter):
"""
[ERROR_CODE_ULONG] TdhQueryProviderFieldInformation(
LPGUID pGuid,
ULONGLONG EventFieldValue,
EVENT_FIELD_TYPE EventFieldType,
PPROVIDER_FIELD_INFOARRAY pBuffer,
ULONG* pBufferSize
)
"""
ret_ad, args = jitter.func_args_stdcall(["pGuid", "EventFieldValue", "EventFieldType", "pBuffer", "pBufferSize"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhSetDecodingParameter(jitter):
"""
[ERROR_CODE_ULONG] TdhSetDecodingParameter(
TDH_HANDLE Handle,
PTDH_CONTEXT TdhContext
)
"""
ret_ad, args = jitter.func_args_stdcall(["Handle", "TdhContext"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
def tdh_TdhUnloadManifest(jitter):
"""
[ERROR_CODE_ULONG] TdhUnloadManifest(
PWSTR Manifest
)
"""
ret_ad, args = jitter.func_args_stdcall(["Manifest"])
raise RuntimeError('API not implemented')
jitter.func_ret_stdcall(ret_ad, ret_value)
| commial/temp | api_miasm/tdh.dll.py | tdh.dll.py | py | 19,244 | python | en | code | 4 | github-code | 13 |
2486816259 |
import random
y = (random.randint(0,9))
x = int(input("Enter a number between 0 to 9 \n"))
if y == x:
print("Exactly right answer")
elif abs(y - x) >= 3:
print("Not close ")
elif abs(y - x) < 3:
print("Too Close ")
print("Computer selected {} ".format(y))
| shivanksaxena93/Python | Solutions-of-practisepython.org/GuessingGame-1.py | GuessingGame-1.py | py | 270 | python | en | code | 0 | github-code | 13 |
21632097815 | """ For a file, plots the eigenfunctions """
import numpy as np
import matplotlib.pyplot as plt
import sys
def density(rho,func):
return func**2
infilename=sys.argv[1]
infile=open(infilename)
n=int(infile.readline().split()[-1])
print(n)
rhomax=float(infile.readline().split()[-1])
rhomin=float(infile.readline().split()[-1])
omega=""
try:
omega=float(infile.readline().split()[-1])
except:
pass
eigenval=[float(i) for i in infile.readline().split()];
values=infile.readlines();
for i in range(len(values)):
values[i]=list(map(float, values[i].split()))
eigenvec=np.array(values)
print(eigenvec)
i=np.argsort(eigenval)
eigenvec=eigenvec[:,i]
eigenvec=np.transpose(eigenvec)
# Don't even ask me why or how this works. It works. That's all I need.
print(eigenvec)
rho=np.linspace(rhomin,rhomax,n)
for i in range(3):
densityp=density(rho,eigenvec[i])
densityp=densityp/sum(densityp)
label=("%d. exicted state"%i)
if i==0:
label="ground state"
plt.plot(rho,densityp,label=label)
plt.xlabel(r"$\rho$")
plt.ylabel(r"$(u(\rho))^2$")
plt.legend()
plt.savefig(infilename[:-4]+".png")
plt.show()
| adrian2208/FYS3150_collab | Project2/plot_solutions.py | plot_solutions.py | py | 1,132 | python | en | code | 0 | github-code | 13 |
4834279732 | from django.urls import path, include
from rest_framework import routers
from job.api.views import JobCategoryViewSet, SkillViewSet, JobViewSet
router = routers.SimpleRouter()
router.register('skill', SkillViewSet)
router.register('job-category', JobCategoryViewSet)
router.register('', JobViewSet)
urlpatterns = [
path('', include(router.urls)),
]
| jamedadi/jobnet | job/api/urls.py | urls.py | py | 356 | python | en | code | 14 | github-code | 13 |
17027231969 | from CGRtools.files import RDFread, RDFwrite
from enumeration_reaction import enumeration_cgr
from new_cycl import cycl
from constructor import constructor
import pickle
det = False
with RDFread():
fg_fg = {}
for n, reaction in enumerate(reaction_file, start = 1):
print(n)
# if n != 58925:
# continue
# err.write(reaction)
if n != 180:
continue
reaction.meta['id'] = n
try:
cgrs = ~reaction
except ValueError:
continue
if cgrs.center_atoms:
v= cgrs.center_atoms
if any(x.is_radical or x.p_is_radical for _, x in cgrs.atoms()):
continue
if cgrs.center_atoms:
print('len_fg_fg = ' + str(len(fg_fg)))
perebor = enumeration_cgr(reaction)
for new_reaction in perebor:
new_reaction.standardize()
# new_reaction.reset_query_marks()
new_reaction.meta['id'] = n
if not constructor(*cycl(new_reaction),fg_fg,n):
print('COMPOSITION IS None '+ str(n))
det = True
break
if det :
break
with RDFwrite('/home/ravil/Desktop/Projects/retro/rules_09_19.rdf') as fg:
for x in fg_fg.values():
fg.write(x)
| neon-monster/retrosintetic_rules | trules.py | trules.py | py | 1,403 | python | en | code | 0 | github-code | 13 |
25085487533 | import os
from dotenv import load_dotenv
# This helper function will check if a user is an admin or not
def get_admins(email):
load_dotenv(verbose=True)
# get all possible admin emails
admin_emails = os.getenv("ADMIN_EMAILS")
for emailOf in admin_emails.split(' '):
if email == emailOf:
return True
return False
| CSchairez/ParMe | server/routes/helpers.py | helpers.py | py | 356 | python | en | code | 0 | github-code | 13 |
17083248727 | import random
import json
from vk_api.keyboard import VkKeyboardColor as color
BLUE = color.PRIMARY # Синяя
WHITE = color.SECONDARY # Белая
RED = color.NEGATIVE # Красная
GREEN = color.POSITIVE # Зелёная
"""
KeyBoardDoc --> https://dev.vk.com/api/bots/development/keyboard
"""
a = {'label': '___', 'color': None, 'payload': {'type': '___'}}
BUTTONS_SETTINGS: dict[str, dict] = {
'accept': {'label': 'принять', 'color': GREEN, 'payload': {'type': 'accept', 'ids': []}},
'deny': {'label': 'отказать', 'color': RED, 'payload': {'type': 'deny', 'ids': []}},
'rock': {'label': 'Камень', 'color': RED, 'payload': {'type': 'rock', 'squad': 'rps', 'ids': []}},
'paper': {'label': 'Бумага', 'color': WHITE, 'payload': {'type': 'paper', 'squad': 'rps', 'ids': []}},
'scissors': {'label': 'Ножницы', 'color': GREEN, 'payload': {'type': 'scissors', 'squad': 'rps', 'ids': []}},
'sniper': {'label': 'Снайпер', 'color': RED, 'payload': {'type': 'sniper', 'squad': 'class'}},
'solder': {'label': 'Солдат', 'color': BLUE, 'payload': {'type': 'solder', 'squad': 'class'}},
'demoman': {'label': 'Подрывник', 'color': GREEN, 'payload': {'type': 'demoman', 'squad': 'class'}},
'shot_L': {'label': 'Влево', 'color': GREEN,
'payload': {'type': '_left_', 'squad': 'game', 'step': False, 'ids': []}},
'shot_R': {'label': 'Вправо', 'color': WHITE,
'payload': {'type': '_right_', 'squad': 'game', 'step': False, 'ids': []}},
'body_shot': {'label': 'В тело', 'color': GREEN,
'payload': {'type': '_body_', 'squad': 'game', 'step': False, 'ids': []}},
'head_shot': {'label': 'В голову', 'color': RED,
'payload': {'type': '_head_', 'squad': 'game', 'step': False, 'ids': []}},
'move_R': {'label': 'Вправо', 'color': BLUE,
'payload': {'type': '_right_m', 'squad': 'game', 'step': False, 'ids': []}},
'move_L': {'label': 'Влево', 'color': BLUE,
'payload': {'type': '_left_m', 'squad': 'game', 'step': False, 'ids': []}},
'units': {'label': 'Персонажи', 'color': BLUE,
'payload': {'type': 'persons', 'squad': 'menu'}},
'back': {'label': 'Назад', 'color': BLUE,
'payload': {'type': 'back', 'squad': 'menu'}},
'menu_setting': {
'stat': {'label': 'Статистика', 'color': WHITE, 'payload': {'type': 'stat', 'squad': 'menu', 'ids': []}},
'units': {'sniper': {'label': 'Снайпер', 'color': RED,
'payload': {'type': 'sniper_up', 'squad': 'menu', 'ids': []}},
'solder': {'label': 'Солдат', 'color': BLUE,
'payload': {'type': 'solder_up', 'squad': 'menu', 'ids': []}},
'demoman': {'label': 'Подрывник', 'color': GREEN,
'payload': {'type': 'demoman_up', 'squad': 'menu', 'ids': []}}},
'lvl_up': {'damage': {'label': 'Урон', 'color': RED, 'payload': {'type': 'damage', 'squad': 'menu', 'ids': []}},
'health': {'label': 'Здоровье', 'color': GREEN,
'payload': {'type': 'health', 'squad': 'menu', 'ids': []}},
'accuracy': {'label': 'Точность', 'color': WHITE,
'payload': {'type': 'accuracy', 'squad': 'menu', 'ids': []}}}},
'sniper_stat': {'label': 'Снайпер', 'color': RED,
'payload': {'type': 'sniper_stat', 'squad': 'menu', 'ids': []}},
'solder_stat': {'label': 'Солдат', 'color': BLUE,
'payload': {'type': 'solder_stat', 'squad': 'menu', 'ids': []}},
'demoman_stat': {'label': 'Подрывник', 'color': GREEN,
'payload': {'type': 'demoman_stat', 'squad': 'menu', 'ids': []}}
}
pop_up = {"type": "show_snackbar", "text": None}
speech = {'inv': ['@id уже приглашён кем-то!', '@id ожидает своей битвы!', '@id всё ещё в раздумьях, подожди ещё!'],
'ntubut': ['Это не твоя кнопочка!❏', 'не трогай меня ╱╸◠╺╲', 'Я могу и разочароваться в тебеت'],
'==': ['@id с самим собой?〠', '@id, а-за-за, нельзя так ㋡', '@id, много умный играть против себя?'],
'ntrg': ['@id ещё не регистрировался для участия в битвах', '@id, не находится в списках МГЕ...'],
'wait': ['Ожидай ответа!']}
| SHkipperX/Mge_Bot | button.py | button.py | py | 4,739 | python | ru | code | 1 | github-code | 13 |
8616422528 | from django.conf.urls import url, include
from django.contrib import admin
from . import views
urlpatterns = [
url(r'^admin/', admin.site.urls),
url(r'channel/(?P<title>.+)/$', views.channel, name='channel'),
url(r'^api/', include('api.urls')),
url(r'^api-auth/', include(
'rest_framework.urls', namespace='rest_framework')),
url(r'^', views.cate),
]
| Windsooon/youtube-channels | django_base/django_base/urls.py | urls.py | py | 380 | python | en | code | 0 | github-code | 13 |
32023481820 | from aiolava.misc import HTTPMethod
from aiolava.endpoints.base import LavaEndpoint
from aiolava.types.wallet.create_invoice import CreateInvoiceResponse
class CreateInvoice(LavaEndpoint):
__http_method__ = HTTPMethod.POST
__endpoint__ = "/invoice/create"
__returns__ = CreateInvoiceResponse
wallet_to: str
sum: float
order_id: str = None
hook_url: str = None
success_url: str = None
fail_url: str = None
expire: int = None
subtract: str = None
custom_fields: str = None
comment: str = None
merchant_id: str = None
merchant_name: str = None
| TheArcherST/aiolava | aiolava/endpoints/wallet/create_invoice.py | create_invoice.py | py | 605 | python | en | code | 1 | github-code | 13 |
7329228026 | from copy import deepcopy
import cv2
import os
import numpy as np
class MMImage:
def __init__(self, method='blur'):
self.method = method
self.img = None
def process(self, save_path='', para=[]):
if save_path != '':
save_path = save_path
img_out = getattr(self, "_"+self.method)(para)
cv2.imwrite(save_path, img_out)
def load_image(self, image_path):
img = cv2.imread(image_path, cv2.IMREAD_UNCHANGED)
self.img = img
def _blur(self, para=[]):
type_method, kernal = para
img_out = cv2.blur(self.img, (kernal, kernal)) #sum(square)/25
return img_out
def _contour(self,para=[]):
gray_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
dep, img_bin = cv2.threshold(gray_img, 128, 255, cv2.THRESH_BINARY)
image_, contours = cv2.findContours(img_bin, mode=cv2.RETR_TREE, method=cv2.CHAIN_APPROX_SIMPLE)
to_write = self.img.copy()
# cv2.drawContours(img,contours,0,(0,0,255),3)
ret = cv2.drawContours(to_write,image_,-1,(0,0,255),3)
return ret
def _hist(self, para=[]):
gray_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
img = gray_img.astype(np.uint8)
return cv2.equalizeHist(img)
def _watershed_contour(self, para=[]):
gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) # 转为灰度图像
# 查找和绘制图像轮廓
Gauss = cv2.GaussianBlur(gray, (5, 5), sigmaX=4.0)
grad = cv2.Canny(Gauss, 50, 150)
grad, contours = cv2.findContours(grad, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # 查找图像轮廓
markers = np.zeros(self.img.shape[:2], np.int32) # 生成标识图像,所有轮廓区域标识为索引号 (index)
for index in range(len(contours)): # 用轮廓的索引号 index 标识轮廓区域
markers = cv2.drawContours(markers, grad, index, (index, index, index), 1, 8, contours)
ContoursMarkers = np.zeros(self.img.shape[:2], np.uint8)
ContoursMarkers[markers > 0] = 255
# 分水岭算法
markers = cv2.watershed(self.img, markers) # 所有轮廓的像素点被标注为 -1
WatershedMarkers = cv2.convertScaleAbs(markers)
# 用随机颜色填充分割图像
bgrMarkers = np.zeros_like(self.img)
for i in range(len(contours)):
colorKind = [np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)]
bgrMarkers[markers == i] = colorKind
bgrFilled = cv2.addWeighted(self.img, 0.67, bgrMarkers, 0.33, 0)
return cv2.cvtColor(bgrFilled, cv2.COLOR_BGR2RGB)
def _watershed(self,para):
gray = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY) # 转为灰度图像
# 图像的形态学梯度
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (5, 5)) # 生成 5*5 结构元
grad = cv2.morphologyEx(gray, cv2.MORPH_GRADIENT, kernel) # 形态学梯度
# 阈值分割,将灰度图像分为黑白二值图像
_, thresh = cv2.threshold(np.uint8(grad), 0.2*grad.max(), 255, cv2.THRESH_BINARY)
# 形态学操作,生成 "确定背景" 区域
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3)) # 生成 3*3 结构元
opening = cv2.morphologyEx(thresh, cv2.MORPH_OPEN, kernel, iterations=2) # 开运算,消除噪点
sure_bg = cv2.dilate(opening, kernel, iterations=3) # 膨胀操作,生成 "确定背景" 区域
# 距离变换,生成 "确定前景" 区域
distance = cv2.distanceTransform(opening, cv2.DIST_L2, 5) # DIST_L2: 3/5
_, sure_fg = cv2.threshold(distance, 0.1 * distance.max(), 255, 0) # 阈值选择 0.1*max 效果较好
sure_fg = np.uint8(sure_fg)
# 连通域处理
ret, component = cv2.connectedComponents(sure_fg, connectivity=8) # 对连通区域进行标号,序号为 0-N-1
markers = component + 1 # OpenCV 分水岭算法设置标注从 1 开始,而连通域编从 0 开始
kinds = markers.max() # 标注连通域的数量
maxKind = np.argmax(np.bincount(markers.flatten())) # 出现最多的序号,所占面积最大,选为底色
markersBGR = np.ones_like(self.img) * 255
for i in range(kinds):
if (i!=maxKind):
colorKind = [np.random.randint(0, 255), np.random.randint(0, 255), np.random.randint(0, 255)]
markersBGR[markers == i] = colorKind
# 去除连通域中的背景区域部分
unknown = cv2.subtract(sure_bg, sure_fg) # 待定区域,前景与背景的重合区域
markers[unknown == 255] = 0 # 去掉属于背景的区域 (置零)
# 分水岭算法标注目标的轮廓
markers = cv2.watershed(self.img, markers) # 分水岭算法,将所有轮廓的像素点标注为 -1
kinds = markers.max() # 标注连通域的数量
# 把轮廓添加到原始图像上
imgWatershed = self.img.copy()
imgWatershed[markers == -1] = [0, 0, 255] # 将分水岭算法标注的轮廓点设为红色
# print(self.img.shape, markers.shape, markers.max(), markers.min(), ret)
return cv2.cvtColor(markersBGR, cv2.COLOR_BGR2RGB)
def _canny(self,para=[100,200]):
return cv2.Canny(self.img, para[0], para[1])
def _corner(self,para = 0.01):
gray_img = cv2.cvtColor(self.img, cv2.COLOR_BGR2GRAY)
gray_img = np.float32(gray_img)
Harris_detector = cv2.cornerHarris(gray_img, 2, 3, 0.04)
dst = cv2.dilate(Harris_detector, None)
# 设置阈值
thres = para*dst.max()
img_out = deepcopy(self.img)
img_out[dst > thres] = [255,0,0]
return img_out
| OpenXLab-Edu/OpenBaseLab-Edu | tools/mmImage.py | mmImage.py | py | 5,796 | python | en | code | 5 | github-code | 13 |
74920632978 |
import argparse
import csv
import logging
import multiprocessing
import os
import pickle
import pprint
from ruffus.proxy_logger import *
from scipy.stats import kendalltau, pearsonr, spearmanr
from predict_regulons import *
def parseCommandLineArguments():
parser = argparse.ArgumentParser(prog="predict_regulons.py",
description="""Performs all related operations to predict regulons
""")
# Mandatory arguments
parser.add_argument("--probe_set_info","-pb",help="Please enter the name of the csv file containing all information about probe sets, genes and regulons",required=True)
parser.add_argument("--output","-o",help="Please enter the name of the output directory. Download will be skipped if file is present",required=True)
parser.add_argument("--gtf","-gtf",help = "Enter the GTF file",required=True)
parser.add_argument("--genome","-g",help="Enter the genome fasta file",required=True)
parser.add_argument("--counts","-c",help="Enter the name of the counts file",required=True)
parser.add_argument("--star_index","-star_index",help="Enter the location of STAR index",required = True)
parser.add_argument("--transcript_to_gene_map","-map",help="Enter the transcript to gene map",required = True)
parser.add_argument("--genes_in_microarray","-gm",help = "Genes represented in microarrat",required=True)
# Optional arguments
parser.add_argument("--cpu","-n",help="Enter the number of CPUs to be used.",default=1)
return parser.parse_args()
def calculateSpearmanInParallel(eachinput):
output=[]
for row in eachinput:
X,Y,gene1,gene2=row
output.append([spearmanr(X,Y)[0],gene1,gene2])
return output
def calculatePearsonrInParallel(eachinput):
output=[]
for row in eachinput:
X,Y,gene1,gene2=row
output.append([pearsonr(X,Y)[0],gene1,gene2])
return output
def calculateKendallTauInParallel(eachinput):
output=[]
for row in eachinput:
X,Y,gene1,gene2=row
output.append([kendalltau(X,Y)[0],gene1,gene2])
return output
def computeGeneCorrelation(options,logging_mutex,logger_proxy,countsdata_matrix,n_comp):
countsdata_matrix_n_comp = {}
for gene in countsdata_matrix:
countsdata_matrix_n_comp[gene] = countsdata_matrix[gene][:n_comp]
pearson_filename = f"{options.output}/pearson_{n_comp}.txt"
spearman_filename = f"{options.output}/spearman_{n_comp}.txt"
kendalltau_filename = f"{options.output}/kendall_{n_comp}.txt"
genes_to_be_skipped_pr=[]
genes_to_be_skipped_sp=[]
genes_to_be_skipped_kt=[]
if os.path.exists(pearson_filename)==False:
fhw_pearson_filename = open(pearson_filename,"w",buffering = 1)
else:
genes_to_be_skipped_pr=[]
fhr=open(pearson_filename,"r")
for line in fhr:
gene1,gene2,corr=line.strip().split("\t")
genes_to_be_skipped_pr.append(gene1)
fhr.close()
genes_to_be_skipped_pr=list(set(genes_to_be_skipped_pr))
fhw_pearson_filename = open(pearson_filename,"a",buffering = 1)
if os.path.exists(pearson_filename)==False:
fhw_spearman_filename = open(spearman_filename,"w",buffering = 1)
else:
genes_to_be_skipped_sp=[]
fhr=open(pearson_filename,"r")
for line in fhr:
gene1,gene2,corr=line.strip().split("\t")
genes_to_be_skipped_sp.append(gene1)
fhr.close()
genes_to_be_skipped_sp=list(set(genes_to_be_skipped_sp))
fhw_spearman_filename = open(spearman_filename,"a",buffering = 1)
if os.path.exists(pearson_filename)==False:
fhw_kendalltau_filename = open(kendalltau_filename,"w",buffering = 1)
else:
genes_to_be_skipped_kt=[]
fhr=open(pearson_filename,"r")
for line in fhr:
gene1,gene2,corr=line.strip().split("\t")
genes_to_be_skipped_kt.append(gene1)
fhr.close()
genes_to_be_skipped_kt=list(set(genes_to_be_skipped_kt))
fhw_kendalltau_filename = open(kendalltau_filename,"a",buffering = 1)
pool = multiprocessing.Pool(processes=int(options.cpu))
allinputs_sp,allinputs_pr,allinputs_kt = [],[],[]
"""genes_to_be_skipped=[]
genes_to_be_skipped.extend(genes_to_be_skipped_sp)
genes_to_be_skipped.extend(genes_to_be_skipped_pr)
genes_to_be_skipped.extend(genes_to_be_skipped_kt)
genes_to_be_skipped=set(genes_to_be_skipped)"""
with logging_mutex:
logger_proxy.info(f"Number of genes to be skipped Pearson {len(genes_to_be_skipped_pr)}")
logger_proxy.info(f"Number of genes to be skipped Spearman {len(genes_to_be_skipped_sp)}")
logger_proxy.info(f"Number of genes to be skipped Kendall-tau {len(genes_to_be_skipped_kt)}")
print(f"Number of genes to be skipped Pearson {len(genes_to_be_skipped_pr)}")
print(f"Number of genes to be skipped Spearman {len(genes_to_be_skipped_sp)}")
print(f"Number of genes to be skipped Kendall-tau {len(genes_to_be_skipped_kt)}")
sys.stdout.flush()
genenames = list(countsdata_matrix.keys())
for gene_num1 in range(len(genenames)):
allinputs_per_gene_pr,allinputs_per_gene_sp,allinputs_per_gene_kt=[],[],[]
gene1 = genenames[gene_num1]
if len(set(countsdata_matrix_n_comp[gene1]))==1 and countsdata_matrix_n_comp[gene1][0]==0:
continue
if genenames[gene_num1] not in set(genes_to_be_skipped_pr):
gene_num2=gene_num1+1
while gene_num2!=len(genenames):
gene2=genenames[gene_num2]
allinputs_per_gene_pr.append([countsdata_matrix_n_comp[gene1],countsdata_matrix_n_comp[gene2],genenames[gene_num1],genenames[gene_num2]])
gene_num2+=1
if genenames[gene_num1] not in set(genes_to_be_skipped_sp):
gene_num2=gene_num1+1
while gene_num2!=len(genenames):
gene2=genenames[gene_num2]
allinputs_per_gene_sp.append([countsdata_matrix_n_comp[gene1],countsdata_matrix_n_comp[gene2],genenames[gene_num1],genenames[gene_num2]])
gene_num2+=1
if genenames[gene_num1] not in set(genes_to_be_skipped_kt):
gene_num2=gene_num1+1
while gene_num2!=len(genenames):
gene2=genenames[gene_num2]
allinputs_per_gene_kt.append([countsdata_matrix_n_comp[gene1],countsdata_matrix_n_comp[gene2],genenames[gene_num1],genenames[gene_num2]])
gene_num2+=1
with logging_mutex:
logger_proxy.info(f"Processed {gene_num1} {len(allinputs_per_gene_pr)} {len(allinputs_per_gene_sp)} {len(allinputs_per_gene_kt)}")
allinputs_pr.append(allinputs_per_gene_pr)
allinputs_sp.append(allinputs_per_gene_sp)
allinputs_kt.append(allinputs_per_gene_kt)
#if len(allinputs)>=int(options.cpu):
with logging_mutex:
logger_proxy.info(f"Starting calculations with {n_comp} components and {gene_num1} gene")
results=pool.map(calculatePearsonrInParallel,allinputs_pr)
for row in results:
for corr,gene1,gene2 in row:
print(f"Writing to Pearson file {n_comp}")
sys.stdout.flush()
fhw_pearson_filename.write("\t".join([gene1,gene2,str(corr)])+"\n")
results=pool.map(calculateSpearmanInParallel,allinputs_sp)
for row in results:
for corr,gene1,gene2 in row:
print(f"Writing to Spearman file {n_comp}")
sys.stdout.flush()
fhw_spearman_filename.write("\t".join([gene1,gene2,str(corr)])+"\n")
results=pool.map(calculateKendallTauInParallel,allinputs_kt)
for row in results:
for corr,gene1,gene2 in row:
print(f"Writing to Kendall-tau file {n_comp}")
sys.stdout.flush()
fhw_kendalltau_filename.write("\t".join([gene1,gene2,str(corr)])+"\n")
allinputs_sp,allinputs_pr,allinputs_kt = [],[],[]
fhw_spearman_filename.close()
fhw_pearson_filename.close()
fhw_kendalltau_filename.close()
pool.close()
pool.join()
def configureLogger(options):
os.system("rm -f "+options.output+"/calculate_correlations_progress.log")
arguments={}
arguments["file_name"]=options.output+"/calculate_correlations_progress.log"
arguments["formatter"] = "%(asctime)s - %(name)s - %(levelname)6s - %(message)s"
arguments["level"] = logging.DEBUG
arguments["delay"] = False
(logger_proxy,logging_mutex) = make_shared_logger_and_proxy (setup_std_shared_logger,"calculate_correlations", arguments)
return logger_proxy,logging_mutex
def main():
commandLineArg=sys.argv
if len(commandLineArg)==1:
print("Please use the --help option to get usage information")
options=parseCommandLineArguments()
os.system("mkdir -p "+options.output)
logger_proxy,logging_mutex=configureLogger(options)
readFromProbeSetFile(options)
fhr = open(options.output+"/genes_to_regulons.tsv","r")
gene_to_regulon = {}
regulon_to_labels = {}
labels_to_regulons = {}
label_number = 1
for line in fhr:
gene,regulon = line.split("\t")
if regulon=='X':
gene_to_regulon[gene] = 0
regulon_to_labels[regulon] = 0
else:
if regulon not in regulon_to_labels:
regulon_to_labels[regulon] = label_number
labels_to_regulons[label_number] = regulon
gene_to_regulon[gene] = regulon_to_labels[regulon]
label_number+=1
else:
gene_to_regulon[gene] = regulon_to_labels[regulon]
fhr.close()
options.pca_pkl_file = options.output+"/pca.pkl"
#os.system("rm "+options.pca_pkl_file)
if os.path.exists(options.pca_pkl_file)==False:
countsdata = readCountsFile(options.counts,list(gene_to_regulon.keys()))
countsdata_matrix_pca_all_components = performPCA(countsdata)
pickle.dump(countsdata_matrix_pca_all_components,open(options.pca_pkl_file,"wb"))
else:
countsdata = readCountsFile(options.counts,list(gene_to_regulon.keys()))
pca_data = pickle.load(open(options.pca_pkl_file,"rb"))
countsdata_matrix_pca_all_components = {}
for gene_num,gene in enumerate(countsdata):
countsdata_matrix_pca_all_components[gene.strip()] = list(pca_data[gene_num])
"""for gene in countsdata_matrix_pca_all_components:
print(gene,countsdata_matrix_pca_all_components[gene])
return"""
for pca_comp in list(range(150,501,25))[1:]:
with logging_mutex:
logger_proxy.info(f"Calling countsdata_matrix_pca_all_components with {pca_comp} components")
print(f"Calling countsdata_matrix_pca_all_components with {pca_comp} components")
sys.stdout.flush()
computeGeneCorrelation(options, logging_mutex,logger_proxy,countsdata_matrix = countsdata_matrix_pca_all_components,n_comp=pca_comp)
if __name__ == "__main__":
main()
| priyanka8590/ReguloPred | calculate_correlations.py | calculate_correlations.py | py | 11,281 | python | en | code | 0 | github-code | 13 |
13251203845 | import networkx as nx
import numpy as np
import pymetis
import copy
import pandas as pd
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("netname")
args = parser.parse_args()
netname = str(args.netname)
year = "2023"
CANDIDATES_INFORMATION = 1
class SocialNetwork():
def __init__(self, name, filename):
self.name = name
self.directed_graph = read_graphml_file(filename)
self.undirected_graph = self.directed_graph.to_undirected()
largest_cc = max(nx.connected_components(self.undirected_graph), key=len)
self.giant_component = self.undirected_graph.subgraph(largest_cc).copy()
self.giant_component_int = nx.convert_node_labels_to_integers(self.giant_component, first_label = 0, ordering = 'default', label_attribute = 'user_id')
def get_giant_component_fraction(self):
return len(self.giant_component)/len(self.undirected_graph)
def get_adjacency_dict(self):
adj_list = {}
for node in self.giant_component_int.nodes:
neighbors = list(self.giant_component_int.neighbors(node))
adj_list[node] = neighbors
return adj_list
def read_graphml_file(filename):
"""Reads a graph from a GraphML file and returns a NetworkX graph object."""
try:
# Read the graph from the file
graph = nx.read_graphml(filename)
# Return the graph object
return graph
except Exception as e:
print(f"Error: {e}")
return None
def get_partition(net):
adj_dict = net.get_adjacency_dict()
adj_list = [np.asarray(neighs) for neighs in adj_dict.values()]
n_cuts, membership = pymetis.part_graph(nparts = 2, adjacency = adj_list, options = pymetis.Options(ufactor=400, niter=100, contig=True))
membership = dict(zip(adj_dict.keys(), membership))
return n_cuts, membership
def finetune_partition(net, membership):
potential_bridge_nodes = []
loner_nodes = []
for node in net.giant_component_int.nodes:
neighbors = net.giant_component_int.neighbors(node)
neighbors_cluster = set([net.giant_component_int.nodes[n]["cluster"] for n in neighbors])
if membership[node] not in neighbors_cluster:
loner_nodes.append(node)
membership_finetuned = copy.deepcopy(membership)
c0 = {k for k, v in membership.items() if v == 0}
c1 = {k for k, v in membership.items() if v == 1}
q_best = nx.community.modularity(net.giant_component_int, [c0, c1])
print(f"Before finetuning modularity is {q_best}")
for node in loner_nodes:
if membership[node] == 0:
membership_finetuned[node] = 1
new_label = 1
else:
membership_finetuned[node] = 0
new_label = 0
c0_candidate = {k for k, v in membership_finetuned.items() if v == 0}
c1_candidate = {k for k, v in membership_finetuned.items() if v == 1}
new_q = nx.community.modularity(net.giant_component_int, [c0_candidate, c1_candidate])
if new_q > q_best:
print(f"Improvement {new_q - q_best} by swapping node {node}")
membership[node] = new_label
q_best = new_q
else:
print(f"No improvement by swapping node {node}")
membership_finetuned[node] = 1-new_label
print(f"After finetuning modularity is {q_best}")
return membership
def get_candidate_mappings():
candidates = pd.read_csv("candidates-2023.csv")
candidates_full = pd.read_csv("candidates2023-complete.csv")
id_2_candidate = dict(zip(candidates.id.astype(str), candidates.screen_name))
candidate_2_id = dict(zip(candidates.screen_name, candidates.id.astype(str)))
candidates_full['twitter_id'] = candidates_full['screen_name'].map(candidate_2_id)
id_2_party = dict(zip(candidates_full.twitter_id, candidates_full.puolue))
id_2_age = dict(zip(candidates_full.twitter_id, candidates_full.ikä))
id_2_sex = dict(zip(candidates_full.twitter_id, candidates_full.sukupuoli))
id_2_hometown = dict(zip(candidates_full.twitter_id, candidates_full.kotikunta))
id_2_lang = dict(zip(candidates_full.twitter_id, candidates_full.kieli))
return id_2_candidate, id_2_party, id_2_age, id_2_sex, id_2_hometown, id_2_lang
def run_pipeline():
filename = f"./pure-networks/{year}/{netname}_{year}_net.graphml"
net = SocialNetwork(name = f"{netname}{year}", filename = filename)
n_cuts, membership = get_partition(net)
# ATTRIBUTE 1: Original partition
nx.set_node_attributes(net.giant_component_int, membership, name="cluster")
#membership_original = copy.deepcopy(membership)
membership = finetune_partition(net, membership)
# ATTRIBUTE 2: Finetuned partition
nx.set_node_attributes(net.giant_component_int, membership, name="finetuned_cluster")
if CANDIDATES_INFORMATION:
# ATTRIBUTE 3: Candidate information
id_2_candidate, id_2_party, id_2_age, id_2_sex, id_2_hometown, id_2_lang = get_candidate_mappings()
screen_name_attributes = dict()
party_attributes = dict()
sex_attributes = dict()
language_attributes = dict()
for node in net.giant_component_int.nodes():
node_user_id = net.giant_component_int.nodes[node]["user_id"]
try:
if node_user_id in id_2_candidate.keys():
screen_name_attributes[node] = id_2_candidate[node_user_id].rstrip()
party_attributes[node] = id_2_party[node_user_id].rstrip()
sex_attributes[node] = id_2_sex[node_user_id]
language_attributes[node] = id_2_lang[node_user_id]
else:
screen_name_attributes[node] = "NA"
party_attributes[node] = "NA"
sex_attributes[node] = "NA"
language_attributes[node] = "NA"
except:
screen_name_attributes[node] = "NA"
party_attributes[node] = "NA"
sex_attributes[node] = "NA"
language_attributes[node] = "NA"
print(f"Error with node {node_user_id}")
nx.set_node_attributes(net.giant_component_int, screen_name_attributes, "screen_name")
nx.set_node_attributes(net.giant_component_int, party_attributes, "party")
nx.set_node_attributes(net.giant_component_int, sex_attributes, "sex")
nx.set_node_attributes(net.giant_component_int, language_attributes, "language")
nx.write_graphml_lxml(net.giant_component_int, f"./rich-networks/{year}/RICH_{netname}_{year}_NET.graphml")
if __name__ == "__main__":
run_pipeline() | alesalloum/political-polarization | enrich_network.py | enrich_network.py | py | 6,715 | python | en | code | 0 | github-code | 13 |
19067855406 | from django.db import models
from django.contrib.auth.models import User
class Problem(models.Model):
difficulty_choices = [("Easy", "Easy"), ("Medium", "Medium"), ("Difficult", "Difficult")]
problem_id = models.CharField(max_length=120)
problem_title = models.CharField(max_length=200)
problem_statement = models.TextField(max_length=10000)
difficulty = models.CharField(max_length=50, choices=difficulty_choices, default="Easy")
count_solved = models.IntegerField(default=0)
count_attempted = models.IntegerField(default=0)
user_uploaded = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
class TestCase(models.Model):
problem_id = models.ForeignKey(Problem, on_delete=models.CASCADE)
testcase_id = models.CharField(max_length=120)
testcase = models.TextField(max_length=10000)
output = models.TextField(max_length=10000)
class Submission(models.Model):
verdict_choices = [("ACCEPTED", "ACCEPTED"), ("WRONG ANSWER", "WRONG ANSWER"),
("TIME LIMIT EXCEEDED", "TIME LIMIT EXCEEDED"), ("COMPILATION ERROR", "COMPILATION ERROR"),
("MEMORY LIMIT EXCEEDED", "MEMORY LIMIT EXCEEDED")]
language_choices = [("CPP", "CPP"), ("JAVA", "JAVA"), ("PYTHON", "PYTHON")]
submission_id = models.CharField(max_length=120)
user = models.ForeignKey(User, on_delete=models.SET_NULL, null=True)
problem = models.ForeignKey(Problem, on_delete=models.CASCADE)
code = models.TextField(max_length=10000)
language = models.CharField(max_length=100,choices = language_choices, default="CPP")
verdict = models.CharField(max_length=100, choices=verdict_choices, default="WRONG ANSWER")
time = models.DateTimeField(auto_now_add=True)
| Surya-Varman/OnlineJudge | code_execution/models.py | models.py | py | 1,752 | python | en | code | 0 | github-code | 13 |
10011884236 | import psycopg2
import numpy as np
import psycopg2.extras as extras
import pandas as pd
import time
import threading
import requests
import json
def get_weather_data(api_key, city_id,id_number):
api_url = "http://api.openweathermap.org/data/2.5/weather"
params = {
"id": city_id,
"units": "metric",
"appid": api_key
}
response = requests.get(api_url, params=params)
data = response.json()
city = data["name"]
temperature = data["main"]["temp"]
wind_speed = data["wind"]["speed"]
description=data["weather"][0]["description"]
optime= pd.Timestamp('now').strftime("%Y-%m-%d %H:%M:%S")
weather_ankara = pd.DataFrame({
'id': [id_number],
'city': [city],
'temperature': [temperature],
'wind_speed': [wind_speed],
'weather_type' : [description],
'optime' : [optime]
})
return weather_ankara
def get_hourly_weather_data(sira):
while True:
id_number = sira
target_data = get_weather_data(api_key, city_id, id_number)
target_data["id"] = id_number + 1
print(target_data.head(5))
return target_data
baslama=0
def execute_values(conn, table):
while True:
global baslama
sayac = 0
print("Değer:",baslama)
girdi = baslama + sayac
print("Girdi:",girdi)
df=get_hourly_weather_data(girdi)
tuples = [tuple(x) for x in df.to_numpy()]
cols = ','.join(list(df.columns))
# SQL query to execute
query = "INSERT INTO %s(%s) VALUES %%s" % (table, cols)
cursor = conn.cursor()
try:
extras.execute_values(cursor, query, tuples)
conn.commit()
except (Exception, psycopg2.DatabaseError) as error:
print("Error: %s" % error)
conn.rollback()
cursor.close()
return 1
print("the dataframe is inserted")
sayac +=1
baslama = girdi + sayac
print("Değer Son:",baslama)
if sayac > 100:
print("sayac:",sayac)
cursor.close()
return 1
else:
time.sleep(5)
api_key = "<YOUR_API_KEY>"
city_id = "323786" # Ankara
print("Connection doing!")
conn = psycopg2.connect(
database='<YOUR_DB>', user='<YOUR_USER>', password='<YOUR_PASS>', host='localhost', port='5432'
)
thread = threading.Thread(target=execute_values(conn, 'weather'))
thread.start()
print('Waiting for the thread...')
thread.join()
| Barissdal/ETL_From_API_With_Python_To_PostgreSQL_And_Schedule | etl_api_data_with_python_into_postgress.py | etl_api_data_with_python_into_postgress.py | py | 2,575 | python | en | code | 0 | github-code | 13 |
27691941513 | from torch.nn import functional as F, ModuleList, Module, Dropout
from torch_geometric.nn import GCNConv, GATConv
class GCN(Module):
def __init__(self, convs):
super(GCN, self).__init__()
self.convs = ModuleList()
self.dropouts = ModuleList()
for key, (inputDim, outputDim, dropout) in convs.items():
# 定义图卷积层
self.convs.append(GCNConv(int(inputDim), int(outputDim)))
# 定义dropout
self.dropouts.append(Dropout(float(dropout)))
def forward(self, x, edge_index):
for i, conv in enumerate(self.convs):
x = F.relu(conv(x, edge_index)) # 卷积
x = self.dropouts[i](x)
return F.softmax(x, dim=1)
class GAT(Module):
def __init__(self, convs):
super(GAT, self).__init__()
self.convs = ModuleList()
for key, (inputDim, outputDim, numHeads, dropout, concat) in convs.items():
# 定义图注意层
self.convs.append(GATConv(int(inputDim), int(outputDim), heads=int(numHeads), dropout=float(dropout),
concat=(concat == '拼接')))
def forward(self, x, edge_index):
for conv in self.convs:
x = F.relu(conv(x, edge_index))
return F.softmax(x, dim=1)
| MengBaofh/GModel | GNNs/Models.py | Models.py | py | 1,344 | python | en | code | 1 | github-code | 13 |
10387042396 | from controller.appReglas import AppReglas
from util import enumerations as enu
from openpyxl import Workbook
#Estaciones selecionadas
listEstation=['M1219','M1221','M1230','M1231','M1233','M1238','M1239','M1240','M1243','M1244','M1246','M1248',
'M1249','M1250','M1256','M1257','M1257','M1259','M1260','M1261','M1265','M1267']
wb= Workbook()
ws=wb.active
for est in listEstation:
appR = AppReglas(est, 2014)
ra=appR.reglaA()
rb=appR.reglaB()
rc07=appR.reglaC(colIzq=4,colDer=6,regla="Tmx > Ts a las 07")
rc13=appR.reglaC(colIzq=4,colDer=7,regla="Tmx > Ts a las 13")
rc19=appR.reglaC(colIzq=4,colDer=8,regla="Tmx > Ts a las 19")
rd07=appR.reglaD(colIzq=5,colDer=6,regla="Tmn <= Ts a las 07")
rd13=appR.reglaD(colIzq=5,colDer=7,regla="Tmn <= Ts a las 13")
rd19=appR.reglaD(colIzq=5,colDer=8,regla="Tmn <= Ts a las 19")
re07=appR.reglaE(colIzq=6,colDer=9,regla="TS07 < TH07")
re13=appR.reglaE(colIzq=7,colDer=10,regla="TS13 < TH13")
re19=appR.reglaE(colIzq=8,colDer=11,regla="TS19 < TH19")
rf=appR.reglaF()
rg=appR.reglaG()
rh=appR.reglaH()
ri07=appR.reglaI(12,30,"RR > 30 mm a las 07",enu.TypeErros(2))
ri13=appR.reglaI(13,30,"RR > 30 mm a las 13 ",enu.TypeErros(2))
ri19=appR.reglaI(14,30,"RR > 30 mm a las 19 ",enu.TypeErros(2))
"""rj07=appR.reglaJ(15,5,"Evap > 5 m a las 07")
rj13=appR.reglaJ(17,5,"Evap > 5 m a las 13")
rj19=appR.reglaJ(19,5,"Evap > 5 m a las 19")
rk07=appR.reglaK(19,0,"Evap < 0 m a las 07")
rk13=appR.reglaK(19,0,"Evap < 0 m a las 13")
rk19=appR.reglaK(19,0,"Evap < 0 m a las 19")
"""
frames=[ra,rb,rc07,rc13,rc19,rd07,rd13,rd19,re07,re13,re19,rf,rg,rh,ri07,ri13,ri19]
for df in frames:
if not df.empty:
#print("en el for")
#print(df)
#print("fin del for")
for rowdf in range(0,len(df)):
print(df.iloc[rowdf, :].values)
ws.append(list(df.iloc[rowdf,:].values))
wb.save("/home/drosero/Escritorio/anuario2014.xlsx") | meteorodev/rev3horas | View/impReglas.py | impReglas.py | py | 2,054 | python | es | code | 0 | github-code | 13 |
4035172296 | from django.urls import path
from . import views
urlpatterns = [
path('add/<int:product_id>/', views.add_review, name='add_review'),
path(
'delete/<int:product_id>/<int:review_id>/',
views.delete_review, name='delete_review'),
]
| WisamTa/Supplement-Store | reviews/urls.py | urls.py | py | 255 | python | en | code | 1 | github-code | 13 |
40217525703 | """All that this class should do is putting the setup, execution and saving all together."""
import numpy as np
import time
from one_ray_solver.utility import screen_COM_converter, redshift
from one_ray_solver.ode import solver
from one_ray_solver.collision import collider
from one_ray_solver.save import saver_cfg, saver_json
from one_ray_solver.sign import check
from one_ray_solver.velocities import *
class OneRaySolver:
def __init__(self, s=0., rem=8., tem=np.pi/2, pem=0., geometry=(0.5,), robs=35., tobs=1., pobs=0.,
alpha=0., beta=-5., m=1, start=0, stop=70, num=100000, abserr=1e-7, relerr=1e-7, interp_num=10000,
sign_r=-1, sign_theta=1, sign_phi=1, fp='./', saver='json', shape='sphere',
save_even_when_not_colliding=True, save_handle=None,
save_csv=False, save_data=True):
self.s = s
self.rem = rem
self.tem = tem
self.pem = pem
self.geometry = geometry
self.robs = robs
self.tobs = tobs
self.pobs = pobs
self.alpha = alpha
self.beta = beta
self.m = m
self.start = start
self.stop = stop
self.ray_num = num
self.abserr = abserr
self.relerr = relerr
self.interpolate_num = interp_num
self.shape = shape
self.sign_r = sign_r
self.sign_theta = sign_theta
self.sign_phi = sign_phi
self.collider = collider.Collider(self.rem, self.tem, self.pem, self.geometry, self.interpolate_num, self.shape)
if saver == 'json':
self.saver = saver_json.DataSaverJson(fp)
elif saver == 'config':
self.saver = saver_cfg.DataSaverConfig(fp)
else:
raise ValueError(f'Saver type {saver} is not supported.')
self.orb = OrbitVelocitySchwarzschild(self.s, self.rem)
self.rel = RelativeVelocitySchwarzschild(self.s, self.rem)
self.lamda = None
self.qu = None
self.save_even_when_not_colliding_flag = save_even_when_not_colliding
self.save_handle = save_handle
self.save_csv = save_csv
self.save_data = save_data
def solve(self, full_output=False):
start_time = time.time()
# step 1: get the constants of motion
self.lamda, self.qu = screen_COM_converter.lamda_qu_from_alpha_beta(self.alpha, self.beta,
self.robs, self.tobs, self.m)
# step 2: setup the solver itself
sol = solver.ODESolverSchwazrschild(self.robs, self.tobs, self.pobs, self.lamda, self.qu, self.m,
self.start, self.stop, self.ray_num, self.abserr, self.relerr,
self.sign_r, self.sign_theta, self.sign_phi)
sigma, ray = sol.solve()
# step 3: see if there is a collision
collision_point, local_coord, collision_flag = self.collider.check(ray)
# step 3a: save the light ray that is not colliding
if not collision_flag:
return self._no_collision()
# step 3b: continue with the colliding light ray
else:
# step 4: check the signs of initial velocities at impact
self.checker = check.SignImpactSchwarzschild(sol, collision_point, [self.robs, self.tobs, self.pobs])
if self.checker.problem:
return self._no_collision()
sigma, ray = self.checker._solve()
dt, dr, dtheta, dphi = self.checker.calculate_initial_velocities()
pt, pr, ptheta, pphi = self.checker.calculate_initial_momenta_general()
p0, p1, p2, p3 = self.checker.calculate_initial_momenta_ZAMO()
# step 5: calculate the velocities
(orbit_velocity, ), gamma_orb = self.orb.get_velocity()
(relative_vel, ), gamma_rel_vel = self.rel.get_velocity()
if self.shape == 'sphere':
surface = SurfaceVelocityRigidSphere(self.s, (self.geometry[0], local_coord[0], local_coord[1]))
elif self.shape == 'ellipsoid':
surface = SurfaceVelocityMaclaurinEllipsoid(self.s, (self.geometry[0], local_coord[0], local_coord[1]))
else:
raise ValueError(f'The shape {self.shape} was not understood; must be either -sphere- or -ellipsoid-')
(surf_vel_u1, surf_vel_u3), gamma_surf = surface.get_velocity()
# step 6: calculate the redshift of the ray
g = redshift.g(p0, p1, p3, orbit_velocity, gamma_orb, relative_vel, gamma_rel_vel,
surf_vel_u1, surf_vel_u3, gamma_surf)
g = 1 / np.sqrt(1 - 2 / self.robs) * 1 / g
# step 6: save!
self.saver.add_observer_info(self.robs, self.tobs, self.pobs, self.alpha, self.beta)
self.saver.add_emitter_info(self.s, self.geometry, *local_coord, self.shape)
self.saver.add_constants_of_motion(self.lamda, self.qu, g)
self.saver.add_initial_data_info(0, *collision_point, dt, dr, dtheta, dphi)
self.saver.add_momenta_info(pt, pr, ptheta, pphi, p0, p1, p2, p3)
self.saver.add_velocities_info(orbit_velocity, gamma_orb, relative_vel, gamma_rel_vel,
surf_vel_u1, surf_vel_u3, gamma_surf)
self.saver.add_numerics_info(self.start, self.stop, self.ray_num, self.abserr, self.relerr,
self.interpolate_num, time.time() - start_time)
if self.save_data:
self.saver.save(self.save_handle)
if self.save_csv:
self.saver.save_data_to_csv(sigma, ray, self.save_handle)
if full_output:
return ray, self.saver.config
def set_alpha_beta(self, alpha, beta):
self.alpha = alpha
self.beta = beta
self.sign_theta = - np.sign(beta) # that should work
def get_solver(self):
# this method will allow the user to access a solver object, without running the whole solver wrapper.
# especially useful for plotting shenanigans.
sol = solver.ODESolverSchwazrschild(self.robs, self.tobs, self.pobs, 0, 0, self.m,
self.start, self.stop, self.ray_num, self.abserr, self.relerr,
self.sign_r, self.sign_theta, self.sign_phi)
return sol
def _no_collision(self):
self.saver.add_observer_info(self.robs, self.tobs, self.pobs, self.alpha, self.beta)
self.saver.add_emitter_info(self.s, self.geometry, 0, 0, self.shape)
self.saver.add_constants_of_motion(0, 0, 0)
self.saver.add_initial_data_info(0, 0, 0, 0, 0, 0, 0, 0)
self.saver.add_momenta_info(0, 0, 0, 0, 0, 0, 0, 0)
self.saver.add_numerics_info(self.start, self.stop, self.ray_num, self.abserr, self.relerr,
self.interpolate_num, 0)
self.saver.add_velocities_info(0, 0, 0, 0, 0, 0, 0)
if self.save_even_when_not_colliding_flag and self.save_data:
self.saver.save(self.save_handle)
return None, self.saver.config | uhrwecker/Spin | one_ray_solver/solve.py | solve.py | py | 7,259 | python | en | code | 0 | github-code | 13 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.