seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
5542097679 | from odoo import api, fields, models, _
from odoo.exceptions import UserError, ValidationError, Warning
import ipdb
from odoo.addons import decimal_precision as dp
class DepositoMakeInvoice(models.TransientModel):
_name = 'deposito.make.invoice'
_description = 'Deposito Crear Factura'
def cargar_campos_impresion(self, partner, invoice):
invoice.print_output_reference = partner.print_output_reference
invoice.print_origin_destiny_grouped = partner.print_origin_destiny_grouped
invoice.print_cont_grouped = partner.print_cont_grouped
invoice.print_product_grouped = partner.print_product_grouped
invoice.print_invoice_load = partner.print_invoice_load
invoice.print_invoice_product = partner.print_invoice_product
invoice.print_date_start = partner.print_date_start
invoice.print_ms_in_out = partner.print_ms_in_out
invoice.print_mic = partner.print_mic
invoice.print_crt = partner.print_crt
invoice.print_consignee = partner.print_consignee
invoice.print_purchase_order = partner.print_purchase_order
invoice.print_origin_destiny = partner.print_origin_destiny
invoice.print_container_number = partner.print_container_number
invoice.print_container_size = partner.print_container_size
invoice.print_booking = partner.print_booking
invoice.print_gex = partner.print_gex
invoice.print_sender = partner.print_sender
invoice.print_dua = partner.print_dua
invoice.print_packages = partner.print_packages
invoice.print_kg = partner.print_kg
invoice.print_volume = partner.print_volume
invoice.print_extra_info = partner.print_extra_info
invoice.show_extra_info = partner.show_extra_info
def calcular_diario(self, partner_id):
journal_obj = self.env['account.journal']
if partner_id.vat_type == '2' and partner_id.country_id.code == 'UY':
# e-Factura
journal_id = journal_obj.search([('code', '=', 'EF')]).id
if (partner_id.vat_type == '4' and partner_id.country_id.code != 'UY') or partner_id.vat_type == '3':
# e-Ticket
journal_id = journal_obj.search([('code', '=', 'ET')]).id
return journal_id
def generar_costos(self, products=None):
cost_obj = self.env['rt.service.product.supplier']
tax_obj = self.env['account.tax']
if not products:
return
for prod in products:
taxes = tax_obj.search([('name', '=', 'IVA Directo Op Grav B')])
if prod.product_id.name == 'Alquiler':
taxes = tax_obj.search([('name', '=', 'Compras Exentos IVA')])
if prod.product_type == 'terceros' and prod.is_outgoing:
if prod.supplier_id:
if prod.valor_compra:
if prod.valor_compra_currency_id:
if prod.supplier_ids:
#vamos a borrar los costos
for prd in prod.supplier_ids:
if not prd.invoice_id:
prd.unlink()
line_dict = {}
line_dict['deposito_id'] = prod.deposito_srv_id.id
line_dict['supplier_id'] = prod.supplier_id.id
line_dict['currency_id'] = prod.valor_compra_currency_id.id
line_dict['amount'] = prod.valor_compra
if prod.product_id.name == 'Alquiler':
line_dict['price_subtotal'] = prod.valor_compra
else:
line_dict['price_subtotal'] = prod.valor_compra * 1.22
line_dict['ref'] = prod.deposito_srv_id.referencia
line_dict['rt_service_id'] = False
line_dict['rt_consol_product_id'] = False
line_dict['rt_marfrig_product_id'] = False
line_dict['rt_deposito_product_id'] = prod.id
line_dict['service_state'] = prod.state
line_dict['tax_ids'] = [(6, 0, taxes.ids)]
line_dict['service_date'] = prod.start
line_dict['origin_id'] = prod.origin_id.id
line_dict['destiny_id'] = prod.destiny_id.id
line_dict['product_id'] = prod.product_id.id
line_dict['output_reference'] = prod.name
line_dict['partner_invoice_id'] = prod.deposito_srv_id.partner_invoice_id.id
result = cost_obj.create(line_dict)
@api.multi
def make_invoices(self):
inv_obj = self.env['account.invoice']
if not self._context.get('active_ids'):
return {'type': 'ir.actions.act_window_close'}
product_service = self.env['deposito.service.products'].browse(self._context.get('active_ids'))
self.generar_costos(products=product_service)
tax_obj = self.env['account.tax']
account_obj = self.env['account.account']
operation_taxes = {
'exento': False,
'asimilado': tax_obj.search([('name', '=', 'IVA Venta asimilado a exportación')]),
'gravado': tax_obj.search([('name', '=', 'IVA Ventas (22%)')])
}
lineas = []
for line in product_service:
taxes = operation_taxes['gravado']
account = account_obj.search([('code', '=', '41021001')])
line_dict = {}
line_dict['name'] = line.name
line_dict['account_id'] = account.id
line_dict['price_unit'] = line.importe
line_dict['uom_id'] = line.product_id.uom_id.id
line_dict['product_deposito_srv_id'] = line.id
line_dict['product_id'] = line.product_id.id
line_dict['invoice_line_tax_ids'] = [(6, 0, taxes.ids)]
lineas.append((0, 0, line_dict))
#Facturado
line.invoiced = True
line.deposito_srv_id.state = 'invoiced'
journal_id = self.calcular_diario(line.partner_invoice_id)
invoice = inv_obj.create({
'name': line.partner_invoice_id.name or '',
'origin': line.name,
'type': 'out_invoice',
'account_id': line.partner_invoice_id.property_account_receivable_id.id,
'partner_id': line.partner_invoice_id.id,
'journal_id': journal_id,
'currency_id': line.currency_id.id,
'fiscal_position_id': line.partner_invoice_id.property_account_position_id.id,
'company_id': line.deposito_srv_id.company_id.id,
'user_id': line.deposito_srv_id.user_id.id,
'deposito_operation_id': line.deposito_srv_id.id,
'invoice_line_ids': lineas
})
# line.invoices_ids += invoice
# line.deposito_srv_id.invoices_ids
partner = line.partner_invoice_id
self.cargar_campos_impresion(partner, invoice)
if self._context['open_invoices']:
return {
'domain': [('id', 'in', invoice.ids)],
'name': 'Invoices',
'view_type': 'form',
'view_mode': 'tree,form',
'res_model': 'account.invoice',
'view_id': False,
'views': [(self.env.ref('account.invoice_tree').id, 'tree'),
(self.env.ref('account.invoice_form').id, 'form')],
'context': "{'type':'out_invoice'}",
'type': 'ir.actions.act_window'
}
else:
return {'type': 'ir.actions.act_window_close'}
| LFer/ras | deposito/wizard/product_service_make_invoice.py | product_service_make_invoice.py | py | 7,933 | python | en | code | 0 | github-code | 36 |
15616400542 | from typing import Dict
from tools.coco_dataset_metrics import COCODatasetMetrics
from tools.tools import load_json_data
from detectron2_metrics import TrainingMetrics, InferenceMetrics
def load_annotations(annotation_paths: Dict):
"""
Load annotations from path and assign them to corresponding key.
:param annotation_paths: Dict
:return: Dict - {key: annotations}
"""
annotations = {}
for key, path in annotation_paths.items():
annotations[key] = load_json_data(path)
return annotations
if __name__ == '__main__':
# Setup path variables
annotation_org_dataset_path = {
'Original dataset': '../data/testing_annotations/labels_household_object_detection_newest.json'
}
annotation_split_dataset_path = {
'Split data - train': '../data/labels/train_coco_annotations.json',
'Split data - validation': '../data/labels/val_coco_annotations.json',
'Split data - test': '../data/labels/test_coco_annotations.json'
}
annotation_generated_split_dataset_path = {
'Generated data - train': '../generated_data/labels/train_coco_annotations.json',
'Generated data - validation': '../generated_data/labels/val_coco_annotations.json',
'Generated data - test': '../generated_data/labels/test_coco_annotations.json'
}
all_annotations = {
'original_dataset': annotation_org_dataset_path,
'original_split_dataset': annotation_split_dataset_path,
'generated_split_dataset': annotation_generated_split_dataset_path
}
# Iterate over the annotations and plot corresponding dataset distributions
for key, annotation_paths in all_annotations.items():
annotations = load_annotations(annotation_paths)
dataset_metrics = COCODatasetMetrics(annotations)
dataset_metrics.plot_metrics(key)
# Plot training and inference metrics for all training runs
training_metrics = TrainingMetrics()
inference_metrics = InferenceMetrics()
training_metrics.plot_training_metrics()
inference_metrics.plot_inference_metrics()
| Mathiasn21/household_object_detection | code/plot_metrics.py | plot_metrics.py | py | 2,100 | python | en | code | 0 | github-code | 36 |
15576803575 | A, B = map(int, input().split())
sosu_list = []
for i in range(A, B + 1):
if i == 1:
continue
for j in range(2, int(i**0.5)+1):
if i % j == 0: # 너 소수아님
break
else:
print(i)
| HelloWook/AlgorithmStudy | 백준/Silver/1929. 소수 구하기/소수 구하기.py | 소수 구하기.py | py | 234 | python | en | code | 0 | github-code | 36 |
70585526504 | import sys, copy
from udp_interface import udp_interface
import numpy as np
import os
# from ppo.run import train
# from baselines.common import tf_util as U
from utils.action_filter import ActionFilterButter
from utils.reference_generator import ReferenceMotionGenerator
from collections import deque
from utils.utility import *
from utils.quaternion_function import euler2quat
import time
from sshkeyboard import listen_keyboard
import threading
import torch
from rsl_rl.modules import ActorCritic
from dataclasses import dataclass, field
@dataclass
class RobotState:
trans_vel: np.ndarray
trans_acc: np.ndarray
rot_quat: np.ndarray
rot_vel: np.ndarray
motor_pos: np.ndarray
motor_vel: np.ndarray
NUM_MOTORS=12
class ExpEnv():
def __init__(self, ref_file='../motions/MotionLibrary/LiftingMotion_Simulator.motionlib', model_path=None, cfg=None,
recv_IP=None, recv_port=None,send_IP=None,send_port=None):
self.robot = udp_interface(recv_IP=recv_IP,
recv_port=recv_port,
send_IP=send_IP,
send_port=send_port)
self.dir = os.path.dirname(os.path.abspath(__file__))
self.rt_freq = 1000
self.exp_env_freq = 30
self.num_sims_per_env_step = self.rt_freq // self.exp_env_freq
self.secs_per_policy_step = self.num_sims_per_env_step / self.rt_freq
self.policy_freq = 1 / self.secs_per_policy_step
self.motor_kps = [100.0]*12
self.motor_kds = 4*[1.0,2.0,2.0]
self.motor_vel_idx = [i+6 for i in range(NUM_MOTORS)]
'''a1, but also applied on mini-cheetah'''
self.default_target_positions = [0.0,1.0661,-2.1869, 0.0,1.0661,-2.1869,
0.0,1.0661,-2.1869, 0.0,1.0661,-2.1869]
self.action_bounds = np.array([[-0.7767, 0.7767],
[-0.3011, 3.7045],
[-2.8500, -0.1500],
[-0.7767, 0.7767],
[-0.3011, 3.7045],
[-2.8500, -0.1500],
[-0.7767, 0.7767],
[-0.3011, 3.7045],
[-2.8500, -0.1500],
[-0.7767, 0.7767],
[-0.3011, 3.7045],
[-2.8500, -0.1500]]).T
self.history_len = 15
self.action_filter_order = 2
self.action_filter = ActionFilterButter(lowcut=None, highcut=[4], sampling_rate=self.policy_freq,order=self.action_filter_order,num_joints=NUM_MOTORS)
self.selected_policy = 0
self.use_planner = True
self.ever_jump = False
self.policy_running = False
self.init_model(model_path)
self.init_robot_state()
self.previous_obs = deque(maxlen=self.history_len)
self.previous_acs = deque(maxlen=self.history_len)
self.reference_generator = ReferenceMotionGenerator(ref_file, 2000, self.secs_per_policy_step)
self.__reset()
if not self.use_planner:
self.reference_generator.set_policy(self.selected_policy)
self.low_obs_act = []
def init_robot_state(self):
trans_vel = np.zeros((3,))
trans_acc = np.zeros((3,))
rot_vel = np.zeros((3,))
rot_quat = np.zeros((4,))
motor_pos = np.zeros((NUM_MOTORS,))
motor_vel = np.zeros((NUM_MOTORS,))
self.obs_robot_state = RobotState(trans_vel=trans_vel, trans_acc=trans_acc, rot_quat=rot_quat, rot_vel=rot_vel, motor_pos=motor_pos, motor_vel=motor_vel)
def init_model(self, model_path):
self.pi = []
if isinstance(model_path, str):
selection = input("Single policy experiment, press 1 or 2 to select policy, any key to exit: ")
selection = int(selection)
if selection == 1 or selection == 2:
self.selected_policy = selection
self.use_planner = False
path = [None, None]
path[self.selected_policy - 1] = model_path
model_path = path
else:
raise NotImplementedError
for model in model_path:
pi = ActorCritic( 499, 536, NUM_MOTORS, actor_hidden_dims=[512, 256, 128], #499, 536
critic_hidden_dims=[512, 256, 128])
if model is not None:
loaded_dict = torch.load(model, map_location=torch.device('cpu'))
pi.load_state_dict(loaded_dict['model_state_dict'])
pi.eval()
self.pi.append(pi)
def __process_recv_package(self, obs):
self._raw_state = obs
# Convert quaternion from wxyz to xyzw, which is default for Pybullet.
rpy = self._raw_state[0:3]
q = euler2quat(rpy[0], rpy[1], rpy[2])
self.obs_robot_state.motor_pos = np.array(self._raw_state[6:18])
self.obs_robot_state.rot_quat = np.copy(np.array([q[1], q[2], q[3], q[0]]))
# print(self.obs_robot_state.rot_quat)
''' Thigh and Calf joints are reversed on the real robot '''
self.obs_robot_state.motor_pos[[1,2,4,5,7,8,10,11]] *= -1
def __get_observation(self, acs = np.zeros(NUM_MOTORS), step = False):
__acs = np.copy(acs)
ref_dict_1 = self.reference_generator.getReferenceMotion(look_forward=1)
ref_dict_4 = self.reference_generator.getReferenceMotion(look_forward=4)
ref_dict_7 = self.reference_generator.getReferenceMotion(look_forward=7)
ob1 = ref_dict_1["joints_rot"]
ob4 = ref_dict_4["joints_rot"]
ob7 = ref_dict_7["joints_rot"]
ob_curr = np.concatenate([self.obs_robot_state.rot_quat, self.obs_robot_state.motor_pos])
bezier_param = np.concatenate([self.reference_generator.get_bezier_coefficients(), self.reference_generator.get_fixed_motion_duration(),
self.reference_generator.get_motion_t_norm(), self.reference_generator.get_motion_phase()]) # motion_type: 0,1,2,3,4
feet_pos = np.concatenate([ref_dict_1["foot_pos_bezier"], ref_dict_4["foot_pos_bezier"], ref_dict_7["foot_pos_bezier"]])
if self.timestep == 0:
[self.previous_obs.append(ob_curr) for i in range(self.history_len)]
[self.previous_acs.append(self.default_target_positions) for i in range(self.history_len)]
ob_prev = np.concatenate([np.array(self.previous_obs).flatten(), np.array(self.previous_acs).flatten()])
# print(bezier_param)
if step:
self.previous_obs.append(ob_curr)
self.previous_acs.append(__acs)
self.curr_obs = np.concatenate([ob_prev, ob_curr, ob1, ob4, ob7, bezier_param, feet_pos])
def process_send_cmd(self, motor_commands):
return motor_commands
def acs_norm2actual(self, acs):
return self.action_bounds[0] + (acs + 1)/2.0 * (self.action_bounds[1] - self.action_bounds[0])
def acs_actual2norm(self, actual_acs):
return (actual_acs - self.action_bounds[0])*2 / (self.action_bounds[1] - self.action_bounds[0]) - 1
def __get_action(self):
print(self.selected_policy, self.curr_obs[-15:-9])
if self.selected_policy == 0:
acs = np.copy(self.acs_actual2norm(self.default_target_positions))
else:
acs = self.pi[self.selected_policy - 1].act_inference(torch.from_numpy(self.curr_obs).to(torch.float32).unsqueeze(0))[0]
acs = acs.detach().numpy()
acs = np.clip(np.copy(acs), -1, 1)
if self.selected_policy == 1 or self.selected_policy == 2:
if self.reference_generator.time_in_sec < 0.33:
acs[[6,9]] = np.clip(acs[[6,9]], -0.2, 0.2)
else:
acs[[6,9]] = np.clip(acs[[6,9]], -0.8, 0.8)
assert acs.shape[0] == 12 and -1.0 <= acs.all() <= 1.0
if self.timestep == 0: # prevent zero action output
default_action = np.array(self.default_target_positions)
self.actual_pTs_filtered = default_action
self.action_filter.init_history(self.acs_actual2norm(default_action))
pTs_filtered = np.copy(self.action_filter.filter(np.copy(acs)))
actual_pTs_filtered = np.copy(self.acs_norm2actual(pTs_filtered))
return actual_pTs_filtered, np.copy(self.curr_obs), np.copy(acs)
def __env_update(self):
# if self.timestep<3:
self.timestep += 1
self.time_in_sec = (self.timestep*self.num_sims_per_env_step) / self.rt_freq
self.reference_generator.update_step(self.timestep)
def __reset(self):
self.action_filter.reset()
self.timestep = 0.0
self.est_timestep = 0
self.time_in_sec = 0.0
self.actual_pTs = np.zeros(NUM_MOTORS)
self.actual_pTs_filtered = np.zeros(NUM_MOTORS)
def pid_ctrl(self):
policy_count = 0
previous_time = time.time()
t = threading.currentThread()
a1_default_target_positions = np.array([0.0,0.9,-1.8, 0.0,0.9,-1.8,
0.0,0.9,-1.8, 0.0,0.9,-1.8])
while getattr(t, "do_run", True):
obs = self.robot.receive_observation()
self.__process_recv_package(np.copy(obs))
if policy_count % 1 == 0:
self.__get_observation(np.copy(self.default_target_positions), step=False)
self.actual_pTs_filtered_sent = np.copy(a1_default_target_positions)
for i in range(4):
self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1]
self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2]
cmd=self.process_send_cmd(np.concatenate((self.actual_pTs_filtered_sent,[0.0, 0.0, 0.0],np.zeros((12,)), [0.0,0.0,0.0])))
self.robot.send_command(cmd)
policy_count += 1
current_time = time.time()
# print("proc", "Frequency: ", 1/(current_time - previous_time + 1e-10))
previous_time = current_time
delay = 0.6
_ = time.perf_counter() + delay/1000
while time.perf_counter() < _:
pass
def pid_ctrl_squat_prep(self):
policy_count = 0
previous_time = time.time()
t = threading.currentThread()
a1_default_target_positions = np.array([0.0,0.9,-1.8, 0.0,0.9,-1.8,
0.0,0.9,-1.8, 0.0,0.9,-1.8])
while getattr(t, "do_run", True):
obs = self.robot.receive_observation()
self.__process_recv_package(np.copy(obs))
if policy_count > 30:
self.actual_pTs_filtered_sent = np.copy(self.default_target_positions)
for i in range(4):
self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1]
self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2]
break
else:
self.__get_observation(np.copy(self.default_target_positions), step=False)
self.actual_pTs_filtered_sent = (30 - policy_count) / 30 * a1_default_target_positions + policy_count / 30 * np.array(self.default_target_positions)
for i in range(4):
self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1]
self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2]
cmd=self.process_send_cmd(np.concatenate((self.actual_pTs_filtered_sent,[0.0, 0.0, 0.0],np.zeros((12,)), [0.0,0.0,0.0])))
self.robot.send_command(cmd)
policy_count += 1
current_time = time.time()
# print("proc", "Frequency: ", 1/(current_time - previous_time + 1e-10))
previous_time = current_time
delay = 0.6
_ = time.perf_counter() + delay/1000
while time.perf_counter() < _:
pass
def press(self, key):
print("Doing nothing")
def set_actions_from_policy(self, planner_actions=None):
import time
# print("delay: ", time.time()-planner_actions[-1])
planner_actions = planner_actions[:-1]
if not self.use_planner:
raise NotImplementedError
else:
if not self.reference_generator.action_enabled:
self.selected_policy = int(planner_actions[-1])
if self.selected_policy == 1:
self.ever_jump = True
# if self.reference_generator.motion_phase == 2:
# if self.ever_jump:
# self.selected_policy = 1
# else:
# self.selected_policy = 2
planner_actions[-1] = self.selected_policy
self.reference_generator.set_actions_from_policy(planner_actions, self.timestep)
def get_robot_states(self, planner_actions=None):
ob_curr = np.concatenate([self.obs_robot_state.rot_quat, self.obs_robot_state.motor_pos])
robot_states = np.concatenate([np.array(self.previous_obs)[-5:].flatten(), ob_curr])
robot_actions = np.concatenate([np.array(self.previous_acs)[-5:].flatten(), self.actual_pTs_filtered])
reference_params = np.concatenate([self.reference_generator.get_fixed_motion_duration(),
self.reference_generator.get_motion_t_norm(), self.reference_generator.get_motion_phase()])
return robot_states, robot_actions, reference_params
def run_policy(self):
proc = threading.Thread(target=self.pid_ctrl)
proc.start()
listen_keyboard(on_press=self.press, until='space')
proc.do_run = False
previous_time = time.perf_counter()
# proc_squat_prep = threading.Thread(target=self.pid_ctrl_squat_prep)
# proc_squat_prep.start()
# listen_keyboard(on_press=self.press, until='space')
# proc_squat_prep.do_run = False
self.pid_ctrl_squat_prep()
while(True):
if not self.policy_running:
self.policy_running = True
obs = self.robot.receive_observation()
self.__process_recv_package(obs)
if self.est_timestep % 1 == 0:
# print("self.est_timestep", self.est_timestep)
# print("self.num_sims_per_env_step", self.num_sims_per_env_step)
# if self.timestep < 30:
# self.reference_generator.set_policy(0, self.timestep)
# self.selected_policy = 0
# else:
# self.reference_generator.set_policy(1, self.timestep)
# self.selected_policy = 1
if self.timestep == 0:
self.actual_pTs_filtered = np.zeros(12)
self.__get_observation(np.copy(self.actual_pTs_filtered), step=True)
self.actual_pTs_filtered, ob, ac= self.__get_action()
self.low_obs_act.append((np.copy(self.curr_obs), np.copy(self.actual_pTs_filtered)))
self.actual_pTs_filtered = np.round(self.actual_pTs_filtered,5)
self.actual_pTs_filtered_sent = np.copy(self.actual_pTs_filtered)
for i in range(4):
# self.actual_pTs_filtered_sent[3*i] = -self.actual_pTs_filtered_sent[3*i]
self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1]
self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2]
self.robot.send_command(self.actual_pTs_filtered_sent)
self.est_timestep = 0
self.__env_update()
else:
# send previous action package
self.robot.send_command(self.actual_pTs_filtered_sent)
time.sleep(0.00001)
current_time = time.time()
# print("Frequency: ", 1/(current_time - previous_time))
previous_time = current_time
self.est_timestep += 1
def pid_ctrl_restore_stand(self):
policy_count = 0
a1_default_target_positions = np.array([0.0,0.9,-1.8, 0.0,0.9,-1.8,
0.0,0.9,-1.8, 0.0,0.9,-1.8])
obs = self.robot.receive_observation()
self.__process_recv_package(np.copy(obs))
landing_joint_pos = np.array(obs[6:18])
landing_joint_pos[[1,2,4,5,7,8,10,11]] *= -1
restore_duration = 60
while policy_count < 60:
obs = self.robot.receive_observation()
self.__process_recv_package(np.copy(obs))
if policy_count > restore_duration:
self.actual_pTs_filtered_sent = np.copy(a1_default_target_positions)
for i in range(4):
self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1]
self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2]
cmd=self.process_send_cmd(np.concatenate((self.actual_pTs_filtered_sent,[0.0, 0.0, 0.0],np.zeros((12,)), [0.0,0.0,0.0])))
else:
self.actual_pTs_filtered_sent = (restore_duration - policy_count) / restore_duration * landing_joint_pos + policy_count / restore_duration * np.array(a1_default_target_positions)
for i in range(4):
self.actual_pTs_filtered_sent[3*i+1] = -self.actual_pTs_filtered_sent[3*i+1]
self.actual_pTs_filtered_sent[3*i+2] = -self.actual_pTs_filtered_sent[3*i+2]
cmd=self.process_send_cmd(np.concatenate((self.actual_pTs_filtered_sent,[0.0, 0.0, 0.0],np.zeros((12,)), [0.0,0.0,0.0])))
self.robot.send_command(cmd)
policy_count += 1
current_time = time.time()
# print("proc", "Frequency: ", 1/(current_time - previous_time + 1e-10))
previous_time = current_time
delay = 0.6
_ = time.perf_counter() + delay/1000
while time.perf_counter() < _:
pass | yichen928/RSR_Goalkeeper | src/rl_control/env.py | env.py | py | 18,624 | python | en | code | 0 | github-code | 36 |
13744203488 | import glob
import os
import requests
import time
import sys
import numpy as np
import pandas as pd
from pandas.core.frame import DataFrame
from geocoding_api_extract.utils.progress import Progress
def create_geocoding_api_request_str(street, city, state,
benchmark='Public_AR_Census2010',
vintage='Census2010_Census2010',
layers='14',
format='json') -> str:
"""Create geocoding api request str
Args:
street (str): street address
city (str): city
state (str): state as 2 digit initial
benchmark (str, optional): Defaults to 'Public_AR_Census2010'.
vintage (str, optional): Defaults to 'Census2010_Census2010'.
layers (str, optional): Defaults to '14'.
format (str, optional): Defaults to 'json'.
Returns:
str: geocoding api request string.
"""
return 'https://geocoding.geo.census.gov/geocoder/geographies/address?street=' + \
street + '&city=' + city + '&state=' + state + '&benchmark=' + benchmark + \
'&vintage=' + vintage + '&layers=' + layers + '&format=' + format
def extract_address_batch(address_batch, city, state, retries=5):
"""Extract one address batch
Args:
address_batch (list(str)): list of addresses
city (str): City
state (str): 2-digit state code
retries (int, optional): Number of time to retry the api request. Defaults to 5.
Returns:
DataFrame: result table from api extract
"""
result = {'address': address_batch,
'state': [],
'county': [],
'tract': [],
'cent_lat': [],
'cent_lon': [],
'us_zip': []}
exception = ""
for address in address_batch:
request = requests.get(
create_geocoding_api_request_str(address, city, state))
for attempt in range(retries):
try:
if request.status_code == 200 and request.json()['result']['addressMatches'] != []:
result['state'].append(request.json()['result']['addressMatches']
[0]['geographies']['Census Blocks'][0]['STATE'])
result['county'].append(request.json()['result']['addressMatches']
[0]['geographies']['Census Blocks'][0]['COUNTY'])
result['tract'].append(request.json()['result']['addressMatches']
[0]['geographies']['Census Blocks'][0]['TRACT'])
result['cent_lat'].append(request.json()['result']['addressMatches']
[0]['geographies']['Census Blocks'][0]['CENTLAT'])
result['cent_lon'].append(request.json()['result']['addressMatches']
[0]['geographies']['Census Blocks'][0]['CENTLON'])
result['us_zip'].append(request.json()['result']['addressMatches']
[0]['addressComponents']['zip'])
else:
result['state'].append("not found")
result['county'].append("not found")
result['tract'].append("not found")
result['cent_lat'].append("not found")
result['cent_lon'].append("not found")
result['us_zip'].append("not found")
except Exception as x:
print(f'BAD REQUEST: {type(x)} {x} {request}')
exception = x
# wait incrementally longer each retry
wait_time = 30 * (attempt+1)**2
print(f'Waiting {wait_time} seconds.')
time.sleep(wait_time)
else:
break
else:
# all attempts failed, log this
print(
f'API REQUEST FAILED AFTER {retries} ATTEMPTS WITH EXCEPTION: {exception} :: {request}')
empty_result = pd.DataFrame()
return empty_result
results = pd.DataFrame(result)
return results
def extract_address_batches(address_batches, city, state, progress):
"""Wrapper function to yeild results of each address batch extraction.
Args:
address_batches (list[list[str]]): list of address batches
city (str): City
state (str): 2 digit state code
progress (Progress): Object for keeping track of progress
Yields:
DataFrame: resulting DataFrame from address extraction
"""
for i in range(progress.value, len(address_batches)):
print('Processing address batch:', i)
result = extract_address_batch(
address_batches[i], city, state)
yield result
def extract_address_details(addresses, city, state,
tmp_folder, tmp_filename_prefix='',
reset=False, clean_result=True) -> DataFrame:
"""Extract address details from geocoding api
Args:
addresses (list[str]): a list of addresses
city (str): the city where the addresses reside
state (str): 2-digit state abbreviation
tmp_folder (str): the folder to put partial data extracts in
tmp_filename_prefix (str, optional): export filename suffix for temp result chunks
reset (bool, optional): if True, the extraction will reset
which will delete all temp files and reset progress
Returns:
DataFrame: A table showing each address and the following extra columns:
['address','state'(id),'county'(id),'tract'(id),'cent_lat','cent_lon','us_zip']
"""
if type(addresses) != list:
print("Type mismatch: 'addresses' needs to be a list of strings")
return pd.DataFrame()
if (type(city) != str):
print("Type mismatch: 'city' needs to be a string")
return pd.DataFrame()
if (type(state) != str):
print("Type mismatch: 'state' needs to be a string")
return pd.DataFrame()
if (type(tmp_folder) != str):
print("Type mismatch: 'tmp_folder' needs to be a string")
return pd.DataFrame()
# batch addresses into manageable chunks
parts = len(addresses)//50
if parts > 1:
address_batches = np.array_split(addresses, parts)
else:
address_batches = [addresses]
path = tmp_folder + tmp_filename_prefix + state + '_' + city + '/'
try:
if os.path.isdir(path):
print("Directory already exists")
else:
os.mkdir(path)
except OSError as e:
print("Returning empty DataFrame: There is a problem with the tmp_folder path: %s." % path)
return pd.DataFrame()
else:
print("Successfully created the directory %s " % path)
export_tmp_fp = path + 'geocoding_api_extract'
# initialize progress tracker
# TODO: add 'home' directory in a project config file and use that as the root for
# this progress filepath
progress = Progress('GEOCODING_API', path + 'geocoding_api_progress.cfg')
# reset progress
if reset:
progress.reset()
filepaths = glob.glob(export_tmp_fp + "_part*.parquet.gzip")
for fp in filepaths:
os.remove(fp)
# extract and export each batch
for result in extract_address_batches(address_batches, city, state, progress):
# export batch DataFrame
fp = export_tmp_fp + '_part' + str(progress.value) + '.parquet.gzip'
result.to_parquet(fp)
# record progress
progress.increment()
# combine all the batch files into a DataFrame
filepaths = glob.glob(export_tmp_fp + "_part*.gzip")
result_dfs = [pd.read_parquet(filepath) for filepath in filepaths]
results = pd.concat(result_dfs, ignore_index=True)
if clean_result:
results = results[results['tract'] != 'not found']
print("Geocoding api address extract is complete.")
return results
def remove_tmp_files(city, state, tmp_folder, tmp_filename_prefix='') -> None:
"""Remove temp files created during an extract
Args:
city (str): the city where the addresses reside
state (str): 2-digit state abbreviation
tmp_filename_prefix (str, optional): export filename suffix for temp result chunks
"""
path = tmp_folder + tmp_filename_prefix + state + '_' + city + '/'
if not os.path.isdir(path):
return
export_tmp_fp = path + 'geocoding_api_extract'
filepaths = glob.glob(export_tmp_fp + "_part*.parquet.gzip")
for fp in filepaths:
os.remove(fp)
if os.path.isfile(path + 'geocoding_api_progress.cfg'):
os.remove(path + 'geocoding_api_progress.cfg')
if os.path.isdir(path):
os.rmdir(path)
| AndoKalrisian/geocoding_api_extract | src/geocoding_api_extract/__init__.py | __init__.py | py | 9,005 | python | en | code | 0 | github-code | 36 |
10346633894 | def last_digit(n1, n2):
if n1 is None or n2 is None:
raise ValueError("Both inputs must not be None")
if not isinstance(n1, int) or not isinstance(n2, int) or isinstance(n1, bool) or isinstance(n2, bool):
raise TypeError("Both inputs must be integers")
if n1 < 0 or n2 < 0:
raise ValueError("Inputs must be non-negative integers")
result = n1 ** n2
last_digit = result % 10
return last_digit
| Takhar1/code_wars_katas | lastDigitOfLargeNumber/lastDigit.py | lastDigit.py | py | 446 | python | en | code | 0 | github-code | 36 |
24390099064 | import re
from . import builder, cc, msvc
from .. import log, shell
from .common import choose_builder, guess_command, make_command_converter
from ..languages import known_langs
with known_langs.make('rc') as x:
x.vars(compiler='RC', flags='RCFLAGS')
x.exts(source=['.rc'])
_c_to_rc = make_command_converter([
(re.compile(r'gcc(?:-[\d.]+)?(?:-(?:posix|win32))?'), 'windres'),
])
_posix_cmds = ['windres']
_windows_cmds = ['rc', 'windres']
_builders = (cc.CcRcBuilder, msvc.MsvcRcBuilder)
@builder('rc')
def rc_builder(env):
langinfo = known_langs['rc']
cmd = env.getvar(langinfo.var('compiler'))
if cmd:
return choose_builder(env, langinfo, _builders, candidates=cmd)
# We don't have an explicitly-set command from the environment, so try to
# guess what the right command would be based on the C compiler command.
candidates = (_windows_cmds if env.host_platform.family == 'windows'
else _posix_cmds)
sibling = env.builder('c').compiler
guessed_cmd = guess_command(sibling, _c_to_rc)
# If the guessed command is the same as the first default command
# candidate, remove it. This will keep us from logging a useless info
# message that we guessed the default value for the command.
if guessed_cmd is not None and guessed_cmd != candidates[0]:
try:
builder = choose_builder(env, langinfo, _builders,
candidates=guessed_cmd, strict=True)
log.info('guessed windows rc compiler {!r} from c compiler {!r}'
.format(guessed_cmd, shell.join(sibling.command)))
return builder
except FileNotFoundError:
pass
# Try the default command candidates.
return choose_builder(env, langinfo, _builders,
candidates=candidates)
| jimporter/bfg9000 | bfg9000/tools/rc.py | rc.py | py | 1,858 | python | en | code | 73 | github-code | 36 |
70585542504 | ##
# 邮件自动推送 -- 20191105 created by terrell
# 配置相关变量
# 设置主题、正文等信息
# 添加附件
# 登录、发送#
import time
import os
import smtplib
import email
import datetime
import sys
import traceback
from email.mime.application import MIMEApplication
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
year = int(datetime.datetime.now().strftime('%Y'))
month = int(datetime.datetime.now().strftime('%m'))
day = int(datetime.datetime.now().strftime('%d'))
print(day)
print(month)
print(year)
target = " 离考试只有: " + str((datetime.datetime(2020,12,21)-datetime.datetime(year,month,day)).days) + "天了!"
print(target)
# 配置变量
sender = "2516234365@qq.com"
qqCode = 'lnxcnjuvmtqmdjif'
receiver = '1203562850@qq.com'
cc = '2516234365@qq.com'
subject = "向同学☺加油, 我们一起上岸! " + target
username = "2516234365@qq.com"
password = "yuyali2010970514"
# 邮件主题、正文设置
massage = MIMEMultipart()
massage['subject'] = subject
massage['to'] = receiver
massage['Cc'] = cc
massage['from'] = 'dongjian.yu@qtdatas.com'
body = '''Dear 向同学:
昨天学了吗?
今天要学吗?
任务完成了吗?
①英语
②政治
③专业理论
④毕业创作
⑤毕业论文
------------------
Terrell
QTdatas dongjian.yu
Mobile: +86 15188593321
Email: dongjian.yu@qtdatas.com'''
massage.attach(MIMEText(body, 'plain', 'utf-8'))
# 添加附件
# for i in filekkkk:
# appendix = MIMEApplication(open(file, 'rb').read())
# appendix.add_header('content-disposition', 'attachment', filename=file_name)
# massage.attach(appendix)
def main():
# smtp_server = 'smtp.exmail.qq.com'
smtp_server = 'smtp.qq.com'
server = smtplib.SMTP_SSL(smtp_server, 465)
server.login(sender, qqCode)
server.set_debuglevel(1)
# server.ehlo()
# server.starttls()
server.login(username, password)
print('登录成功')
server.sendmail(sender, receiver.split(',') + cc.split(','), massage.as_string())
print('邮件发送完成')
# except Exception as e:
# print('报错了...')
# traceback.print_exc()
# print(e)
# else:
# server.quit()
main()
| yudongjian/remember_word_tkinter | sent_mailToLeo.py | sent_mailToLeo.py | py | 2,257 | python | en | code | 0 | github-code | 36 |
31063677025 |
from ..utils import Object
class LoginUrlInfoRequestConfirmation(Object):
"""
An authorization confirmation dialog needs to be shown to the user
Attributes:
ID (:obj:`str`): ``LoginUrlInfoRequestConfirmation``
Args:
url (:obj:`str`):
An HTTP URL to be opened
domain (:obj:`str`):
A domain of the URL
bot_user_id (:obj:`int`):
User identifier of a bot linked with the website
request_write_access (:obj:`bool`):
True, if the user needs to be requested to give the permission to the bot to send them messages
Returns:
LoginUrlInfo
Raises:
:class:`telegram.Error`
"""
ID = "loginUrlInfoRequestConfirmation"
def __init__(self, url, domain, bot_user_id, request_write_access, **kwargs):
self.url = url # str
self.domain = domain # str
self.bot_user_id = bot_user_id # int
self.request_write_access = request_write_access # bool
@staticmethod
def read(q: dict, *args) -> "LoginUrlInfoRequestConfirmation":
url = q.get('url')
domain = q.get('domain')
bot_user_id = q.get('bot_user_id')
request_write_access = q.get('request_write_access')
return LoginUrlInfoRequestConfirmation(url, domain, bot_user_id, request_write_access)
| iTeam-co/pytglib | pytglib/api/types/login_url_info_request_confirmation.py | login_url_info_request_confirmation.py | py | 1,365 | python | en | code | 20 | github-code | 36 |
39845550652 | """
Iguana (c) by Marc Ammon, Moritz Fickenscher, Lukas Fridolin,
Michael Gunselmann, Katrin Raab, Christian Strate
Iguana is licensed under a
Creative Commons Attribution-ShareAlike 4.0 International License.
You should have received a copy of the license along with this
work. If not, see <http://creativecommons.org/licenses/by-sa/4.0/>.
"""
from datetime import timedelta
from django.utils import timezone
import datetime
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def date_is_present_or_past(value):
if isinstance(value, datetime.datetime):
if value > timezone.now():
raise ValidationError(_("The date entered must be today or lesser."))
else:
raise ValidationError(_("The value entered isn't a valid type of date or datetime."))
def logged_time_is_positive(value):
if isinstance(value, timedelta):
if value <= timedelta(seconds=0):
raise ValidationError(_("The logged time must be at least one minute"))
else:
raise ValidationError(_("The value entered isn't a valid type of timedelta."))
| midas66/iguana | src/timelog/validators.py | validators.py | py | 1,138 | python | en | code | null | github-code | 36 |
24490610851 | auth_google = config_get('auth_google', False)
if auth_google:
import urllib2
from gluon.contrib.login_methods.oauth20_account import OAuthAccount
client_id = config_get('google_client_id', None)
client_secret = config_get('google_client_secret', None)
class googleAccount(OAuthAccount):
AUTH_URL="https://accounts.google.com/o/oauth2/auth"
TOKEN_URL="https://accounts.google.com/o/oauth2/token"
def __init__(self):
OAuthAccount.__init__(self,
client_id=client_id,
client_secret=client_secret,
auth_url=self.AUTH_URL,
token_url=self.TOKEN_URL,
approval_prompt='force',
state='auth_provider=google',
scope='https://www.googleapis.com/auth/userinfo.profile https://www.googleapis.com/auth/userinfo.email')
def get_user(self):
token = self.accessToken()
if not token:
return None
uinfo_url = 'https://www.googleapis.com/oauth2/v1/userinfo?access_token=%s' % urllib2.quote(token, safe='')
uinfo = None
try:
uinfo_stream = urllib2.urlopen(uinfo_url)
except:
session.token = None
return
data = uinfo_stream.read()
uinfo = json.loads(data)
return dict(first_name = uinfo['given_name'],
last_name = uinfo['family_name'],
username = uinfo['id'], email=uinfo['email'])
#auth.settings.actions_disabled=['register', 'change_password','request_reset_password','profile']
#auth.settings.login_form=googleAccount()
| opensvc/collector | init/models/auth_google.py | auth_google.py | py | 1,865 | python | en | code | 0 | github-code | 36 |
33164677249 | """
2019
La Brachistochrone Réelle
Un TIPE réalisé par Gautier BEN AÏM
http://tobog.ga
"""
import numpy as np
#
# I. Calculs physiques
# ======================
#
def generer_ligne(longueur, hauteur, nb_points):
"""
Renvoie le toboggan ligne droite.
Un toboggan est représenté par un triplet
(longueur, hauteur, liste des hauteurs des points intermédiaires)
longueur : flottant, distance horizontale entre le départ et l'arrivée
hauteur : flottant, distance verticale
nb_points : entier, nombre total de points
"""
return (
longueur,
hauteur,
[hauteur * (1. - i / (nb_points - 1)) for i in range(1, nb_points - 1)],
)
def calculer_temps_segment(distance, v, deriver_v, limite, pas):
"""
Renvoie le temps et la vitesse après le parcours d'un segment.
distance : flottant, distance à parcourir
v : flottant, vitesse intiale
deriver_v : fonction, renvoie la dérivée de la vitesse
limite : flottant, limite de temps de parcours
pas : flottant, intervalle de temps dt
"""
t = 0.
x = 0.
# On utilise la méthode d'Euler
while x < distance and t < limite and v >= 0.:
x += pas * v
v += pas * deriver_v(v)
t += pas
if x >= distance:
return t, v
return None, None
def calculer_temps_toboggan(toboggan, appliquer_pfd, limite, pas):
"""
Renvoie le temps de parcours du toboggan donné.
toboggan : triplet
appliquer_pfd : fonction, renvoie deriver_v
limite : flottant, limite de temps de parcours
pas : flottant, intervalle de temps dt
"""
points = toboggan[2][:]
points.append(0.) # On rajoute l'arrivée
l = len(points)
section = toboggan[0] / l # Distance horizontale entre deux points
section2 = section * section
temps_total = 0.
vitesse = 0.
depart = toboggan[1]
for i in range(l):
arrivee = points[i]
distance = ((depart - arrivee) * (depart - arrivee) + section2) ** 0.5
# On applique le PFD sur le segment
deriver_v = appliquer_pfd(section, depart - arrivee)
temps, vitesse = calculer_temps_segment(
distance, vitesse, deriver_v, limite, pas
)
if temps is None:
return None
temps_total += temps
limite -= temps
depart = arrivee
return temps_total
#
# II. Algorithme hybride
# ========================
#
def generer_evaluateur(appliquer_pfd):
"""
Renvoie une fonction qui calcule le score (le temps de parcours)
d'un toboggan.
appliquer_pfd : fonction, renvoie deriver_v
"""
return lambda toboggan, limite, pas: (
calculer_temps_toboggan(toboggan, appliquer_pfd, limite, pas)
)
def muter_creuser(toboggan, n):
""" Creuse un intervalle choisi au hasard d'une profondeur au hasard. """
_, hauteur, points = toboggan
i = np.random.randint(len(points))
j = np.random.randint(len(points))
if i > j:
i, j = j, i
h = hauteur / (1. + 0.05 * n)
v = np.random.uniform(-h, h)
for k in range(i, j + 1):
points[k] += v
def muter_lisser(toboggan, n):
""" Prend un point au hasard et en fait la moyenne de ses voisins. """
_, _, points = toboggan
i = np.random.randint(len(points) - 2)
points[i + 1] = (points[i] + points[i + 2]) / 2.
def diviser(toboggan, nb_points):
""" Coupe chaque segment pour augmenter le nombre de points. """
longueur, hauteur, anciens_points = toboggan
anciens_points = [hauteur] + anciens_points + [0.]
ancien_nb_points = len(anciens_points)
points = []
for i in range(1, nb_points - 1):
x = i * (ancien_nb_points - 1) / (nb_points - 1)
j = int(x)
t = x % 1
points.append((1 - t) * anciens_points[j] + t * anciens_points[j + 1])
return longueur, hauteur, points
def generer_incrementeur(evaluateur, nb_points, facteur_nb_points, pas, facteur_pas):
"""
Renvoie une fonction qui permet de passer à la génération suivante.
evaluateur : fonction, renvoyée par generer_evaluateur
nb_points : entier, nombre de points initial
facteur_nb_points : flottant, coefficient multiplicateur
pas : flottant, pas initial
facteur_pas : flottant, coefficient multiplicateur
"""
def premiere_generation(meilleur_candidat):
""" Lorsque incrementer_generation est appelée pour la première fois. """
def calculer_score(toboggan, limite):
return evaluateur(toboggan, limite, pas)
meilleur_score = calculer_score(meilleur_candidat, 10.)
if meilleur_score is None:
raise Exception("Le candidat proposé ne fonctionne pas")
return meilleur_candidat, meilleur_score, calculer_score
def incrementer_generation(generation, meilleur_candidat, meilleur_score):
""" Passe à la génération suivante. """
if generation == 0:
return premiere_generation(meilleur_candidat)
nouveau_pas = pas * facteur_pas ** generation
def calculer_score(toboggan, limite):
return evaluateur(toboggan, limite, nouveau_pas)
meilleur_candidat = diviser(
meilleur_candidat, (nb_points - 1) * facteur_nb_points ** generation + 1
)
score = calculer_score(meilleur_candidat, 2 * meilleur_score)
if not score is None:
meilleur_score = score
return meilleur_candidat, meilleur_score, calculer_score
return incrementer_generation
def evoluer(
toboggan,
nb_generations,
generation_suivante,
incrementer_generation,
periode_lisser,
signaler_fin,
rafraichir=None,
):
"""
Améliore itérativement le toboggan donné en argument.
toboggan : triplet
nb_generations : entier, maximum de modifications des paramètres
generation_suivante : entier, individus à tester avant de passer
incrementer_generation : fonction, appelée au changement de génération
periode_lisser : entier, période entre deux lissages
signaler_fin : fonction, commande l'arrêt de la fonction
rafraichir : fonction, appelée à chaque amélioration
"""
generation = 0
meilleur_candidat, meilleur_score, calculer_score = incrementer_generation(
generation, toboggan, None
)
# Nombre de candidats générés, dernier progrès enregistré
n = 0
dernier_progres = 0
nb_progres = 0
print("Initialisation, score : {:f}".format(meilleur_score))
while not signaler_fin():
n += 1
# Si l'algorithme ne progresse plus, on augmente la finesse
if (
n - dernier_progres >= generation_suivante
and generation < nb_generations - 1
):
generation += 1
dernier_progres = n
meilleur_candidat, meilleur_score, calculer_score = incrementer_generation(
generation, meilleur_candidat, meilleur_score
)
print(
"Génération {} ({}), score : {:f}".format(generation, n, meilleur_score)
)
# On prend un nouveau candidat
candidat = (meilleur_candidat[0], meilleur_candidat[1], meilleur_candidat[2][:])
# On le mute
if n % periode_lisser == 0:
muter_lisser(candidat, n)
else:
muter_creuser(candidat, n)
# Et enfin on le teste
score = calculer_score(candidat, meilleur_score)
if not score is None and score < meilleur_score:
nb_progres += 1
dernier_progres = n
meilleur_candidat = candidat
meilleur_score = score
if not rafraichir is None:
rafraichir(meilleur_candidat, meilleur_score)
print(("{} individus testés, {} conservés").format(n, nb_progres))
return meilleur_candidat
#
# III. Génération d'une cycloïde
# ================================
#
def generer_cycloide(longueur, hauteur, nb_points):
""" Renvoie le toboggan cycloïde. """
def trouver_zero(f, a, b, precision=1e-9):
""" Recherche dichotomique du zéro de f entre a et b. """
fa = f(a)
while b - a > precision:
m = (a + b) / 2.
fm = f(m)
if fm == 0.:
return m
elif fm * fa > 0.:
a = m
fa = f(a)
else:
b = m
return m
# Valeur de thêta du point d'arrivée
theta = trouver_zero(
lambda t: hauteur / longueur - (1. - np.cos(t)) / (t - np.sin(t)),
0.001,
2 * np.pi,
)
# Rayon de la cycloïde reliant le départ et l'arrivée
r = hauteur / (1. - np.cos(theta))
# Points de la courbe paramétrée
courbe = []
for i in range(2 * nb_points + 1):
t = theta * i / (2 * nb_points)
x = r * (t - np.sin(t))
y = r * (np.cos(t) - 1.) + hauteur
courbe.append((x, y))
# Points intermédiaires du toboggan
points = []
j = 0
for i in range(1, nb_points - 1):
x = longueur * i / (nb_points - 1)
while courbe[j][0] < x:
j += 1
a = (courbe[j][1] - courbe[j - 1][1]) / (courbe[j][0] - courbe[j - 1][0])
b = courbe[j][1] - a * courbe[j][0]
points.append(a * x + b)
return longueur, hauteur, points
#
# IV. Génération de la meilleure courbe
# =======================================
#
if __name__ == "__main__":
import sys
import matplotlib.pyplot as plt
from time import time
debut = time()
# Paramètres de l'expérience
longueur = 1.2
hauteur = 0.5
# Paramètres de l'algorithme
nb_points = 121 # Départ + intermédiaires + arrivée
pas = 0.000001 # Intervalle de temps dt
nb_generations = 4
generation_suivante = 150
periode_lisser = 8
nb_points_initial = 16
facteur_nb_points = 2
pas_initial = 0.0004
facteur_pas = 0.2
temps_de_calcul = int(sys.argv[1]) if len(sys.argv) >= 2 else 60
def appliquer_pfd(x, y):
""" PFD au point parcourant le toboggan. """
g_sin_theta = 9.81 * y / (y * y + x * x) ** 0.5
fg_cos_theta = 0.3263 * 9.81 * x / (y * y + x * x) ** 0.5
a = g_sin_theta - fg_cos_theta
# Renvoie la dérivée de la vitesse v exprimée en fonction d'elle-même
return lambda v: a - 0.0026 * v - 0.4748 * v * v
# Calcul pour la cycloïde
cycloide = generer_cycloide(longueur, hauteur, nb_points)
calculer_score = generer_evaluateur(appliquer_pfd)
temps_cycloide = calculer_score(cycloide, 10., pas)
# Point de départ de l'algorithme
ligne = generer_ligne(longueur, hauteur, nb_points_initial)
# Affichage
plt.figure("Toboggan", figsize=(8, 6), dpi=72)
plt.plot(
np.linspace(0., longueur, nb_points),
[hauteur] + cycloide[2] + [0.],
"#363737",
dashes=[3, 2],
label="cycloïde"
if temps_cycloide is None
else "cycloïde ({:f} s)".format(temps_cycloide),
)
graphe, = plt.plot(
np.linspace(0., longueur, nb_points_initial),
[hauteur] + ligne[2] + [0.],
"#ef4026",
linewidth=2,
label="toboggan",
)
plt.title("La brachistochrone réelle")
plt.xlabel("Longueur (m)")
plt.ylabel("Hauteur (m)")
plt.axis("equal")
plt.legend()
plt.draw()
plt.pause(0.001)
def generer_chronometre():
""" Renvoie toutes les fonctions dépendantes du temps. """
debut = time()
def temps_ecoule():
""" Temps écoulé. """
return time() - debut
def signaler_fin():
""" Signal de fin. """
return temps_ecoule() > temps_de_calcul
def rafraichir(toboggan, temps):
""" Met à jour le graphe à chaque amélioration. """
t = temps_ecoule()
nb_points = len(toboggan[2]) + 2
if len(graphe.get_xdata()) != nb_points:
graphe.set_xdata(np.linspace(0., longueur, nb_points))
graphe.set_ydata([hauteur] + toboggan[2] + [0.])
graphe.set_label("toboggan ({:f} s)".format(temps))
plt.title(
"La brachistochrone réelle après {:d} min {:0>2d} s de calcul".format(
int(t / 60), int(t % 60)
)
)
if temps_cycloide is None or temps <= temps_cycloide:
graphe.set_color("#0165fc")
plt.legend()
plt.draw()
plt.pause(0.001)
return signaler_fin, rafraichir
signaler_fin, rafraichir = generer_chronometre()
# Appel de l'algorithme hybride
toboggan = evoluer(
ligne,
nb_generations,
generation_suivante,
generer_incrementeur(
calculer_score,
nb_points_initial,
facteur_nb_points,
pas_initial,
facteur_pas,
),
periode_lisser,
signaler_fin,
rafraichir,
)
temps = calculer_score(toboggan, 10., pas)
rafraichir(toboggan, temps)
print("Temps sur le toboggan optimisé : {:f} secondes".format(temps))
if not temps_cycloide is None:
print(
(
"Temps sur la cycloïde ........ : {:f} secondes\n" +
"Différence de temps .......... : {:f} secondes"
).format(temps_cycloide, abs(temps_cycloide - temps))
)
else:
print("La cycloïde ne permet pas de rejoindre les deux points")
# Temps d'exécution
print("Calculé en {:f} secondes".format(time() - debut))
if len(sys.argv) >= 3 and sys.argv[2] == "svg":
plt.savefig("toboggan.svg")
plt.show()
| GauBen/Toboggan | toboggan.py | toboggan.py | py | 14,074 | python | fr | code | 1 | github-code | 36 |
27435770961 | import os
import pathlib
from matplotlib import pyplot as plt
from skimage import io, img_as_float
from skimage.color import rgb2gray
from skimage.filters.edges import sobel
from skimage.segmentation import felzenszwalb, watershed, mark_boundaries, slic, quickshift
def read_boundaries(_img):
# Read image
_img = io.imread(_img)
row = 3
column = 2
_img_as_float = img_as_float(_img[::2, ::2])
# ------------------------------------------------------------------------------------------------------------------
plt.figure("Segmentation")
plt.axis("off")
plt.subplot(row, column, 1, title="Original")
plt.imshow(_img_as_float)
# ---------------------------------------------------------------------------------------------------------------- #
segments_fz = felzenszwalb(_img_as_float, scale=100, sigma=0.5, min_size=50)
plt.subplot(row, column, 3, title="Felzenszwalb")
plt.imshow(mark_boundaries(_img_as_float, segments_fz))
# ---------------------------------------------------------------------------------------------------------------- #
gradient = sobel(rgb2gray(_img_as_float))
segments_watershed = watershed(gradient, markers=250, compactness=0.001)
plt.subplot(row, column, 4, title="Watershed")
plt.imshow(mark_boundaries(_img_as_float, segments_watershed))
# ---------------------------------------------------------------------------------------------------------------- #
_slic = slic(_img_as_float, n_segments=250, compactness=10, sigma=1, start_label=1)
plt.subplot(row, column, 5, title="SLIC")
plt.imshow(mark_boundaries(_img_as_float, _slic))
# ---------------------------------------------------------------------------------------------------------------- #
_quick = quickshift(_img_as_float, kernel_size=3, max_dist=6, ratio=0.5)
plt.subplot(row, column, 6, title="Quick")
plt.imshow(mark_boundaries(_img_as_float, _quick))
plt.show(block=True)
if __name__ == "__main__":
dir_path = os.path.join(pathlib.Path(__file__).parent.parent, "input3", "fish.bmp")
read_boundaries(dir_path)
| 206081/psio | Lab3/zad3.py | zad3.py | py | 2,134 | python | en | code | 0 | github-code | 36 |
12486383692 | '''
Created on Jan 29, 2020
@author: Michal.Busta at gmail.com
'''
import numpy as np
import neptune
class Meter:
'''A meter to keep track of losses scores throughout an epoch'''
def __init__(self, phase, epoch, use_neptune=False, log_interval = 100, total_batches = 100):
self.metrics = {}
self.rmetrics = {}
self.phase = phase
self.epoch = epoch
self.use_neptune = use_neptune
self.log_interval = log_interval
self.total_batches = total_batches
def update(self, **kwargs):
itr = 0
for name, value in kwargs.items():
if name == 'itr':
itr = value
continue
try:
self.metrics[name].append(value)
self.rmetrics[name].append(value)
except:
self.metrics[name] = []
self.metrics[name].append(value)
self.rmetrics[name] = []
self.rmetrics[name].append(value)
if itr % self.log_interval == 0:
if self.use_neptune:
for key in self.rmetrics.keys():
mean = np.mean(self.rmetrics[key])
self.rmetrics[key] = []
neptune.log_metric(f'{key}_{self.phase}', itr + self.epoch * self.total_batches, mean)
else:
for key in self.rmetrics.keys():
mean = np.mean(self.rmetrics[key])
self.rmetrics[key] = []
print(f' - {key}: {mean}')
def get_metrics(self):
ret = {}
log_str = ''
for key in self.metrics.keys():
mean = np.mean(self.metrics[key])
ret[key] = mean
log_str = '%s | %s: %0.4f ' % (log_str, key, mean)
if self.use_neptune:
neptune.log_metric(f'epoch_{key}_{self.phase}', self.epoch, mean)
print(log_str)
return ret
| drivendataorg/open-cities-ai-challenge | 3rd Place/meter.py | meter.py | py | 1,744 | python | en | code | 113 | github-code | 36 |
31867884735 | from setuptools import setup, find_packages
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
version = {}
with open(path.join(here, "sonocrop", "__version__.py")) as f:
exec(f.read(), version)
setup(
name='sonocrop',
version=version["__version__"],
description='Prepare ultrasound videos for machine learning-- crop and remove static clutter from ultrasound video.',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/davycro/sonocrop',
author='David Crockett, MD',
author_email='davycro1@gmail.com',
license = "Apache Software License 2.0",
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Healthcare Industry',
'Topic :: Multimedia :: Video',
'Topic :: Scientific/Engineering :: Medical Science Apps.',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8'
],
keywords='ultrasound bedside ultrasound pocus ffmpeg opencv echo cardiac',
packages=find_packages(),
python_requires='>=3.6',
install_requires=[
'numpy',
'opencv-python',
'fire',
'rich',
'matplotlib',
],
entry_points={ # Optional
'console_scripts': [
'sonocrop=sonocrop.cli:main',
],
},
)
| davycro/sonocrop | setup.py | setup.py | py | 1,442 | python | en | code | 6 | github-code | 36 |
38232459709 | """
获取csdn用户资料
"""
import sys
import json
sys.path.append(r'D:\github\python\python-spider')
from csdnTest import *
#1.链接本地数据库服务
name = MongoClient('localhost')
#2.链接本地数据库 demo 没有会创建
db = name.demo #demo数据库名
# 3.创建,连接集合
emp = db.employees # employees集合名
user = db.csdn_users # page集合名
# 爬取‘前端’模块
category = 'web'
headers = getHeader() # 定制请求头
#开启多线程
def openThread():
return False;
def getData(proxies,shown_offset):
url = 'https://blog.csdn.net/api/articles?type=more&category=' + category+'&shown_offset='+shown_offset
print("当前访问的url:" + url + ",访问的proxies:" + str(proxies))
try:
request = requests.get(url=url, proxies=proxies, headers=headers, timeout=4)
if request.status_code != 200:
print('200')
return False
except:
print('3333')
return False
content = json.loads(request.content)
shownOffset = str(content['shown_offset'])
print(shownOffset)
all = content['articles']
list = []
for each in all:
print(each['user_name'])
handleData.writeUser(user, {
"user_name": each['user_name'],
"user_url": each['user_url'],
"avatar": each['avatar']
})
sleepTime = random.choice([3,4,5,6]);
time.sleep(sleepTime)
getData(proxies, shownOffset)
def start():
url = 'https://blog.csdn.net/nav/'+category
ip = handleData.getIp(emp)
# 代理ip
proxies = {"http": "http://" + ip, "https": "http://" + ip}
print("当前访问的url:" + url + ",访问的ip:" + ip)
try:
request = requests.get(url=url, proxies=proxies, headers=headers, timeout=4)
if request.status_code != 200:
return False
text = request.text
soup = BeautifulSoup(text, 'lxml')
content = soup.find('ul', class_='feedlist_mod')
shownOffset = str(content['shown-offset'])
except:
print('失败了,删除ip' + ip)
# 删除失效ip
handleData.delete(emp, {'ip': ip})
start()
return False
getData(proxies, shownOffset)
if __name__ == '__main__':
start() | guosimin/python-spider | csdnTest/getUser.py | getUser.py | py | 2,266 | python | en | code | 6 | github-code | 36 |
16738074050 | lista = []
par = []
imp = []
resp = ' '
while True:
lista.append(int(input('Digite um número: ')))
resp = str(input('Quer continuar [S/N]: ')).strip().upper()[0]
if 'N' in resp:
break
for i, v in enumerate(lista):
if v % 2 == 0:
par.append(v)
else:
imp.append(v)
print(f'O números digitados foram {lista}')
print(f'Os números pares são: {par}')
print(f'Os números ímpares são: {imp}')
| TiagoFar/PythonExercises | ex082.py | ex082.py | py | 437 | python | pt | code | 0 | github-code | 36 |
4409217473 | from time import sleep
import requests
class AntiCaptcha:
def __init__(self, client_key):
self.base_url = "https://api.anti-captcha.com/"
self.headers = {"Content-Type": "Application/json"}
self.client_key = client_key
def _post(self, endpoint: str, data: object):
"""Make requests to api.anti-captcha.com
Args:
endpoint (str): API Endpoint
data (object): API Payload
Raises:
Exception: API Error
Returns:
Response: Request Response
"""
url = self.base_url + endpoint
data.update({"clientKey": self.client_key})
response = requests.post(url, data, headers=self.headers)
json = response.json()
if not hasattr(json, "errorId") or json.errorId == 0:
return json
else:
raise Exception(json)
def create_task(self, data: object):
"""Create Task
Args:
data (object): createTask Payload
Returns:
Response: Request Response
"""
return self._post("createTask", data)
def get_task(self, task_id: str):
"""Get
Args:
task_id (str): API Task ID
Returns:
Response: Request Response
"""
return self._post("getTaskResult", {"taskId": task_id})
def get_result(self, task_id: str, sleep_seconds: float = 5):
"""Wait for result
Args:
task_id (str): API Task ID
sleep_seconds (float, optional): Amount of time to sleep between checks. Defaults to 5.
Raises:
Exception: API Error
Returns:
Response: Request Response
"""
json = {}
while json.status == "processing":
json = self.get_task(task_id)
sleep(sleep_seconds)
if not hasattr(json, "errorId") or json.errorId == 0:
return json
else:
raise Exception(json)
def get_token(self, task_id: str, sleep_seconds: float = 5):
"""Get result token
Args:
task_id (str): API Task ID
sleep_seconds (float, optional): Amount of time to sleep between checks. Defaults to 5.
Returns:
str: API Result Token
"""
return self.get_result(task_id, sleep_seconds).solution.token
def solve(self, data: object):
"""All-in-one function to get token
Args:
data (object): createTask Payload
Returns:
str: API Token
"""
json = self.create_task(data)
token = self.get_token(json.taskId)
return token
| ShayBox/AntiCaptcha | anticaptcha/main.py | main.py | py | 2,693 | python | en | code | 3 | github-code | 36 |
38877688552 | import pandas as pd
import numpy as np
# For preprocessing the data
from sklearn.preprocessing import Imputer
from sklearn import preprocessing
# To split the dataset into train and test datasets
from sklearn.cross_validation import train_test_split
# To model the Gaussian Navie Bayes classifier
from sklearn.naive_bayes import GaussianNB
from sklearn import datasets
# To calculate the accuracy score of the model
from sklearn.metrics import accuracy_score
import urllib
file = r'/home/deepa/Downloads/adult.csv'
# df = pd.read_csv(file)
# print(df)
# comma delimited is the default
adult_df = pd.read_csv(file,
header = None, delimiter=' *, *', engine='python')
adult_df.columns = ['age', 'workclass', 'fnlwgt', 'education', 'education_num',
'marital_status', 'occupation', 'relationship',
'race', 'sex', 'capital_gain', 'capital_loss',
'hours_per_week', 'native_country', 'income']
adult_df.isnull().sum()
for value in ['workclass', 'education',
'marital_status', 'occupation',
'relationship','race', 'sex',
'native_country', 'income']:
print (value,":", sum(adult_df[value] == '?'))
#data preprocessing
adult_df_rev = adult_df
adult_df_rev.describe(include= 'all')
# for value in ['workclass', 'education',
# 'marital_status', 'occupation',
# 'relationship','race', 'sex',
# 'native_country', 'income']:
# adult_df_rev[value].replace(['?'], [adult_df_rev.describe(include='all')[value][2]],
# inplace='True')
num_features = ['age', 'workclass_cat', 'fnlwgt', 'education_cat', 'education_num',
'marital_cat', 'occupation_cat', 'relationship_cat', 'race_cat',
'sex_cat', 'capital_gain', 'capital_loss', 'hours_per_week',
'native_country_cat']
scaled_features = {}
for each in num_features:
mean, std = adult_df_rev[each].mean(), adult_df_rev[each].std()
scaled_features[each] = [mean, std]
adult_df_rev.loc[:, each] = (adult_df_rev[each] - mean)/std
rev.values[:,:14]
target = adult_df_rev.values[:,14]
features_train, features_test, target_train, target_test = train_test_split(features,
target, test_size = 0.33, random_state = 10)
clf = GaussianNB()
clf.fit(features_train, target_train)
target_pred = clf.predict(features_test)
d=accuracy_score(target_test, target_pred, normalize = True)
print(d)
| XecureBot/DeepikaDS | data.py | data.py | py | 2,534 | python | en | code | 0 | github-code | 36 |
34350297600 | # -*- coding: utf-8 -*-
"""
Created on Tue May 7 17:29:34 2019
@author: Administrator
"""
r = 'RESTART'
# Reverse of 'RESTART'
r = r[::-1]
# Now the string is 'TRATSER'
# Replacing Initial 'R'
r = r.replace('R', '$', 1)
# Now the string is T$ARSER
# Printing reverse of it i.e. 'RESTA$T'
print(r[::-1]) | MohitBansal1999/forsk | d1/restrart.py | restrart.py | py | 308 | python | en | code | 1 | github-code | 36 |
14031447002 | """
N N
NN N
N N N
N NN
N N
"""
n=int(input("enter the number of rows:"))
for i in range(1,n+1):
for j in range(1,n+1):
if (i==j) or (j==1)or (j==n):
print("N",end="")
else:
print(" ",end="")
print()
| aravind225/hackerearth | n pattern.py | n pattern.py | py | 279 | python | en | code | 1 | github-code | 36 |
7207928507 | print("1 sposob *****")
def decorator(func): # nazwa funkcji ktora bedzie udekorowana
def wrapper():
print("------------")
func()
print("------------")
return wrapper
def hello():
print("Hello World")
hello2 = decorator(hello)
hello2()
print("2 sposob ******")
@decorator # nazwa funkcji ktora bedzie udekorowana
def witaj():
print("Witaj Swiecie")
witaj()
print("***")
hello3 = decorator(witaj)
hello3()
print("***")
hello4 = witaj
hello4()
print("***")
hello()
| 0xbm/Courses | KoW_YT/23.dekoratory.py | 23.dekoratory.py | py | 518 | python | pl | code | 0 | github-code | 36 |
10328382240 | from django.shortcuts import render,redirect,get_object_or_404
from django.http import HttpResponse
from .models import Item,Category
import datetime
# Create your views here.
def index(request):
tasks = Item.objects.all()
categories = Category.objects.all()
context = {"task_list" : tasks,
"category_list":categories}
return render(request,"index.html" , context)
def add_task(request):
# time = datetime.now()
title = request.POST["task"]
iscompleted = False
category = request.POST["category_select"]
print(category)
todo = Item()
todo.title= title
todo.iscompleted = iscompleted
todo.category = Category.objects.get(title = category)
todo.save()
return redirect('/')
def complete_task(request,task_id):
todo = Item.objects.get(id=task_id)
todo.iscompleted = True
todo.save()
return redirect('/')
def delete_task(request,task_id):
Item.objects.get(id=task_id).delete()
return redirect('/')
| FazalJarral/Notetaker | todo/views.py | views.py | py | 1,004 | python | en | code | 0 | github-code | 36 |
72071449384 | import time
import random
def l():
print("This program will ask you for a list of animals and then tell where the animal is in the list")
time.sleep(0.5)
print("Please type a list of animals with a space inbetween each")
arr = input()
print()
lst = list(map(str,arr.split( )))
time.sleep(0.5)
le = len(lst)
time.sleep(0.5)
print("You have {0} items in your animal list.".format(le))
print()
time.sleep(0.5)
for counter in range(le):
time.sleep(0.5)
print("Animal {0} is {1}".format((counter+1),lst[counter]))
| dandocmando/Python-Answers-by-dandocmando | PT15_15.1.py | PT15_15.1.py | py | 593 | python | en | code | 0 | github-code | 36 |
8596743224 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
""" Extract persistent network by removing ephemeral links and adding missing links.
Two filters:
1. at least 100 daily views for target video
2. the mean daily views of source video is at least 1% of the target video
Usage: python extract_persistent_network.py
Input data files: ../data/vevo_forecast_data_60k.tsv, ../data/network_pickle/
Output data files: ../data/persistent_network.csv
Time: ~7M
"""
import sys, os, pickle
from datetime import datetime, timedelta
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
from utils.data_loader import DataLoader
from utils.helper import Timer, obj2str, is_persistent_link, is_same_genre
def main():
# == == == == == == Part 1: Set up environment == == == == == == #
timer = Timer()
timer.start()
data_prefix = '../data/'
# == == == == == == Part 2: Load video views == == == == == == #
data_loader = DataLoader()
data_loader.load_video_views()
embed_avg_view_dict = data_loader.embed_avg_view_dict
num_videos = data_loader.num_videos
data_loader.load_embed_content_dict()
embed_cid_dict = data_loader.embed_cid_dict
embed_genre_dict = data_loader.embed_genre_dict
# == == == == == == Part 3: Load dynamic network snapshot == == == == == == #
network_dict_list = []
for t in range(T):
target_date_str = obj2str(datetime(2018, 9, 1) + timedelta(days=t))
filename = 'network_{0}.p'.format(target_date_str)
network_dict = pickle.load(open(os.path.join(data_prefix, 'network_pickle', filename), 'rb'))
for embed in network_dict:
network_dict[embed] = [x[0] for x in network_dict[embed] if x[1] < NUM_REL]
network_dict_list.append(network_dict)
persistent_src_embed_set = set()
persistent_tar_embed_set = set()
existing_edges = set()
num_reciprocal_edges = 0
num_same_artist = 0
num_same_genre = 0
with open(os.path.join(data_prefix, 'persistent_network.csv'), 'w') as fout:
fout.write('Source,Target\n')
for tar_embed in range(num_videos):
src_union_set = set()
for t in range(T):
src_union_set.update(set(network_dict_list[t][tar_embed]))
for src_embed in src_union_set:
linkage_list = [0] * T
for t in range(T):
if src_embed in network_dict_list[t][tar_embed]:
linkage_list[t] = 1
if is_persistent_link(linkage_list):
# filter: at least 100 daily views for target video,
# and the mean daily views of source video is at least 1% of the target video
src_mean = embed_avg_view_dict[src_embed]
tar_mean = embed_avg_view_dict[tar_embed]
if tar_mean >= 100 and src_mean >= 0.01 * tar_mean:
fout.write('{0},{1}\n'.format(src_embed, tar_embed))
persistent_src_embed_set.add(src_embed)
persistent_tar_embed_set.add(tar_embed)
if '{1}-{0}'.format(src_embed, tar_embed) in existing_edges:
num_reciprocal_edges += 1
if embed_cid_dict[src_embed] == embed_cid_dict[tar_embed]:
num_same_artist += 1
if is_same_genre(embed_genre_dict[src_embed], embed_genre_dict[tar_embed]):
num_same_genre += 1
existing_edges.add('{0}-{1}'.format(src_embed, tar_embed))
print('{0} edges in the persistent network'.format(len(existing_edges)))
print('{0} source videos, {1} target videos, {2} videos appear in both set'.format(len(persistent_src_embed_set),
len(persistent_tar_embed_set),
len(persistent_src_embed_set.intersection(persistent_tar_embed_set))))
print('{0} pairs of reciprocal edges'.format(num_reciprocal_edges))
print('{0} ({1:.1f}%) edges belong to the same artist'.format(num_same_artist, 100 * num_same_artist / len(existing_edges)))
print('{0} ({1:.1f}%) edges belong to the same genre'.format(num_same_genre, 100 * num_same_genre / len(existing_edges)))
timer.stop()
if __name__ == '__main__':
NUM_REL = 15
T = 63
main()
| avalanchesiqi/networked-popularity | wrangling/extract_persistent_network.py | extract_persistent_network.py | py | 4,496 | python | en | code | 11 | github-code | 36 |
38568577899 | '''
298. Binary Tree Longest Consecutive Sequence
Given a binary tree, find the length of the longest consecutive sequence path.
The path refers to any sequence of nodes from some starting node to any node in the
tree along the parent-child connections. The length consecutive path need to be
from parent to child (cannot be the reverse).
Example 1:
Input:
1
\
3
/ \
2 4
\
5
Output: 3
Explanation: Longest consecutive sequence path is 3-4-5, so return 3.
Example 2:
Input:
2
\
3
/
2
/
1
Output: 2
Explanation: Longest consecutive sequence path is 2-3, not 3-2-1, so return 2.
* it is not 3-2-1 because you cannot have a reverse sequence
'''
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution(object):
def dfs(self, root, pathLength, parent):
self.length = max(self.length, pathLength)
if root == None:
return None
if parent.val - root.val == -1: # checking the consecutiveness
pathLength += 1
else:
pathLength = 1
self.dfs(root.left, pathLength, root)
self.dfs(root.right, pathLength, root)
def longestConsecutive(self, root):
self.length = 0
if root:
self.dfs(root, 1, root)
return self.length
# root = TreeNode(2)
# root.right = TreeNode(3)
# root.right.left = TreeNode(2)
# root.right.left.left = TreeNode(1)
root = TreeNode(1)
root.left = TreeNode(2)
root.left.left = TreeNode(3)
sol = Solution()
print(sol.longestConsecutive(root)) | archanakalburgi/Algorithms | testPrepration/BTLongestConsecSequence.py | BTLongestConsecSequence.py | py | 1,772 | python | en | code | 1 | github-code | 36 |
17849635757 | from ..config import ElementName as BasicElementName, extract_display_name, ReactionConfig, \
NetworkGeneralConfig, ParameterName
from ..metabolic_network_elements.reaction_element import ReactionElement
class Reaction(object):
def __init__(
self, reaction_name, reversible=False, reaction_start_end_list=None, change_arrow_by_value=True,
extra_parameter_dict=None, **kwargs):
self.reaction_name = reaction_name
self.kwargs = kwargs
self.display_reaction_name = extract_display_name(reaction_name)
self.reaction_start_end_list = reaction_start_end_list
if extra_parameter_dict is None:
extra_parameter_dict = {}
self.extra_parameter_dict = extra_parameter_dict
self.reversible = False
self.boundary_flux = False
self.display_config_nested_dict = {}
self.default_display_text_config = {}
self.change_arrow_by_value = True
self.forward_value = None
self.backward_value = None
self.net_value = None
self.tail_arrow = None
self.head_arrow = None
self._initialize_tags(reversible, change_arrow_by_value)
def _initialize_tags(self, reversible=False, change_arrow_by_value=True):
self.reversible = reversible
self.boundary_flux = False
self.display_config_nested_dict = {}
self.default_display_text_config = ReactionConfig.default_display_text_config
self.change_arrow_by_value = change_arrow_by_value
self.forward_value = None
self.backward_value = None
self.net_value = None
self.tail_arrow = None
self.head_arrow = None
def reset(self):
self.reaction_start_end_list = None
self._initialize_tags()
def set_reversible(self, reversible):
self.reversible = reversible
def set_reaction_start_end_list(self, new_start_end_list):
self.reaction_start_end_list = new_start_end_list
return self
def extend_reaction_start_end_list(self, new_added_start_end_list):
if self.reaction_start_end_list is None:
self.reaction_start_end_list = []
self.reaction_start_end_list.extend(new_added_start_end_list)
return self
def set_boundary_flux(self, boundary_flux: bool):
self.boundary_flux = boundary_flux
return self
def set_display_text_config_dict(self, display_config_dict: dict, config_key=None):
new_text_config_dict = dict(self.default_display_text_config)
new_text_config_dict.update(display_config_dict)
self.display_config_nested_dict[config_key] = new_text_config_dict
return self
def update_display_text_config_item(self, updated_display_config_dict: dict = None, config_key=None):
if updated_display_config_dict is not None:
if config_key is not None:
self.display_config_nested_dict[config_key].update(updated_display_config_dict)
else:
for display_config_dict in self.display_config_nested_dict.values():
display_config_dict.update(updated_display_config_dict)
return self
def set_display_text(self, display_text: str, config_key=None):
if self.reaction_name == NetworkGeneralConfig.biomass_str:
display_text = f'Biomass reaction:\n{display_text}'
self.update_display_text_config_item({ParameterName.string: display_text}, config_key)
# if config_key is not None:
# self.display_config_nested_dict[config_key][ParameterName.string] = display_text
# else:
# for display_config_dict in self.display_config_nested_dict.values():
# display_config_dict[ParameterName.string] = display_text
return self
def set_value(self, flux_value):
# if self.reversible:
# assert isinstance(flux_value, (tuple, list)) and len(flux_value) == 2
# forward, backward = flux_value
# self.net_value = abs(forward - backward)
# else:
# assert isinstance(flux_value, (float, int))
# forward = flux_value
# backward = None
# self.net_value = forward
if isinstance(flux_value, (tuple, list)) and len(flux_value) == 2:
forward, backward = flux_value
self.net_value = abs(forward - backward)
elif isinstance(flux_value, (float, int)):
forward = flux_value
backward = 0
self.net_value = forward
else:
raise ValueError()
self.forward_value = forward
self.backward_value = backward
def judge_bidirectional_flag(self):
if not self.change_arrow_by_value:
net_value = None
else:
net_value = self.net_value
if self.tail_arrow is None:
if self.reversible and net_value is None:
head_arrow = tail_arrow = True
else:
if self.reversible and net_value is not None and \
self.backward_value > self.forward_value:
head_arrow = False
tail_arrow = True
else:
head_arrow = True
tail_arrow = False
self.tail_arrow = tail_arrow
self.head_arrow = head_arrow
return self.tail_arrow, self.head_arrow
def judge_if_reverse(self):
tail, head = self.judge_bidirectional_flag()
return tail and not head
def update_extra_parameter_dict(self, new_extra_parameter_dict):
self.extra_parameter_dict.update(new_extra_parameter_dict)
return self
def to_element(self, scale=1, bottom_left_offset=None):
gap_line_pair_list_label = ParameterName.gap_line_pair_list
dash_solid_empty_width_label = ParameterName.dash_solid_empty_width
branch_list_label = ParameterName.branch_list
reaction_edge_parameter_list = []
current_reaction_edge_dict = None
for reaction_edge_property, *reaction_edge_parameter_tuple in self.reaction_start_end_list:
if reaction_edge_property != ParameterName.branch:
if current_reaction_edge_dict is not None:
reaction_edge_parameter_list.append(current_reaction_edge_dict)
tail_arrow, head_arrow = self.judge_bidirectional_flag()
if reaction_edge_property == ParameterName.normal:
tail, head, parameter_dict = reaction_edge_parameter_tuple
current_reaction_edge_dict = {
ParameterName.class_name: BasicElementName.Arrow,
ParameterName.tail: tail,
ParameterName.head: head,
ParameterName.tail_arrow: tail_arrow,
ParameterName.head_arrow: head_arrow,
ParameterName.boundary_flux: self.boundary_flux,
**self.extra_parameter_dict,
}
elif reaction_edge_property == ParameterName.cycle:
theta_tail, theta_head, center, radius, parameter_dict = reaction_edge_parameter_tuple
current_reaction_edge_dict = {
ParameterName.class_name: BasicElementName.ArcArrow,
ParameterName.theta_tail: theta_tail,
ParameterName.theta_head: theta_head,
ParameterName.center: center,
ParameterName.radius: radius,
ParameterName.tail_arrow: tail_arrow,
ParameterName.head_arrow: head_arrow,
ParameterName.boundary_flux: self.boundary_flux,
**self.extra_parameter_dict,
}
elif reaction_edge_property == ParameterName.path_cycle:
tail, mid, head, parameter_dict = reaction_edge_parameter_tuple
current_reaction_edge_dict = {
ParameterName.class_name: BasicElementName.ArcPathArrow,
ParameterName.tail: tail,
ParameterName.mid: mid,
ParameterName.head: head,
ParameterName.tail_arrow: tail_arrow,
ParameterName.head_arrow: head_arrow,
ParameterName.boundary_flux: self.boundary_flux,
**self.extra_parameter_dict,
}
elif reaction_edge_property == ParameterName.bent:
tail, head, arrow_head_direction, parameter_dict = reaction_edge_parameter_tuple
current_reaction_edge_dict = {
ParameterName.class_name: BasicElementName.BentArrow,
ParameterName.tail: tail,
ParameterName.head: head,
ParameterName.radius: ReactionConfig.bent_reaction_radius,
ParameterName.arrow_head_direction: arrow_head_direction,
ParameterName.tail_arrow: tail_arrow,
ParameterName.head_arrow: head_arrow,
ParameterName.boundary_flux: self.boundary_flux,
**self.extra_parameter_dict,
}
elif reaction_edge_property == ParameterName.broken:
tail, head, transition_point_list, parameter_dict = reaction_edge_parameter_tuple
current_reaction_edge_dict = {
ParameterName.class_name: BasicElementName.BrokenArrow,
ParameterName.tail: tail,
ParameterName.head: head,
ParameterName.tail_arrow: tail_arrow,
ParameterName.head_arrow: head_arrow,
ParameterName.boundary_flux: self.boundary_flux,
ParameterName.transition_point_list: transition_point_list,
**self.extra_parameter_dict,
}
else:
raise ValueError()
if gap_line_pair_list_label in parameter_dict:
current_reaction_edge_dict[gap_line_pair_list_label] = parameter_dict[gap_line_pair_list_label]
if dash_solid_empty_width_label in parameter_dict:
current_reaction_edge_dict[dash_solid_empty_width_label] = parameter_dict[
dash_solid_empty_width_label]
else:
if current_reaction_edge_dict is None:
raise ValueError('Cannot put branch to first of reaction list')
else:
stem_location, terminal_location, parameter_dict = reaction_edge_parameter_tuple
branch_parameter_dict = {
ParameterName.stem_location: stem_location,
ParameterName.terminal_location: terminal_location,
}
if ParameterName.arrow in parameter_dict:
branch_parameter_dict[ParameterName.arrow] = parameter_dict[ParameterName.arrow]
if ParameterName.dash in parameter_dict:
branch_parameter_dict[ParameterName.dash] = parameter_dict[ParameterName.dash]
if branch_list_label not in current_reaction_edge_dict:
current_reaction_edge_dict[branch_list_label] = []
current_reaction_edge_dict[branch_list_label].append(branch_parameter_dict)
if current_reaction_edge_dict is not None:
reaction_edge_parameter_list.append(current_reaction_edge_dict)
display_text_param_nested_dict = {
key: config_dict for key, config_dict in self.display_config_nested_dict.items()
if ParameterName.string in config_dict
}
return ReactionElement(
self.reaction_name, self.display_reaction_name, reaction_edge_parameter_list,
display_text_param_nested_dict=display_text_param_nested_dict,
scale=scale, bottom_left_offset=bottom_left_offset, **self.kwargs)
| LocasaleLab/Automated-MFA-2023 | figures/figure_plotting/figure_elements/metabolic_network/metabolic_network_contents/reaction.py | reaction.py | py | 12,308 | python | en | code | 0 | github-code | 36 |
22782780958 | #
# @lc app=leetcode id=21 lang=python3
#
# [21] Merge Two Sorted Lists
#
# https://leetcode.com/problems/merge-two-sorted-lists/description/
#
# algorithms
# Easy (57.29%)
# Likes: 11001
# Dislikes: 1011
# Total Accepted: 2M
# Total Submissions: 3.4M
# Testcase Example: '[1,2,4]\n[1,3,4]'
#
# You are given the heads of two sorted linked lists list1 and list2.
#
# Merge the two lists in a one sorted list. The list should be made by splicing
# together the nodes of the first two lists.
#
# Return the head of the merged linked list.
#
#
# Example 1:
#
#
# Input: list1 = [1,2,4], list2 = [1,3,4]
# Output: [1,1,2,3,4,4]
#
#
# Example 2:
#
#
# Input: list1 = [], list2 = []
# Output: []
#
#
# Example 3:
#
#
# Input: list1 = [], list2 = [0]
# Output: [0]
#
#
#
# Constraints:
#
#
# The number of nodes in both lists is in the range [0, 50].
# -100 <= Node.val <= 100
# Both list1 and list2 are sorted in non-decreasing order.
#
#
#
# @lc code=start
# Definition for singly-linked list.
# class ListNode:
# def __init__(self, val=0, next=None):
# self.val = val
# self.next = next
class Solution:
def mergeTwoLists(self, list1: Optional[ListNode], list2: Optional[ListNode]) -> Optional[ListNode]:
if not list1 and not list2:
return list1
if not list1:
return list2
if not list2:
return list1
dummy = ListNode(0)
head = dummy
head1, head2 = list1, list2
while head1 and head2:
if head1.val <= head2.val:
head.next = head1
head1 = head1.next
else:
head.next = head2
head2 = head2.next
head = head.next
if head1:
head.next = head1
if head2:
head.next = head2
return dummy.next
# @lc code=end
| Zhenye-Na/leetcode | python/21.merge-two-sorted-lists.py | 21.merge-two-sorted-lists.py | py | 1,892 | python | en | code | 17 | github-code | 36 |
1398892267 | from flask import Flask,render_template
from time import time
class Blockchain:
def __init__(self):
self.transactions = []
self.chain = []
self.create_block(0, '00')
def create_block(self, nonce, previous_hash):
block = {
'block_number': len(self.chain)+1,
'timestamp': time(),
'transactions': self.transactions,
'nonce': nonce,
'previous_hash': previous_hash
}
self.transactions = []
self.chain.append(block)
app = Flask(__name__)
@app.route('/')
def index():
return render_template("index.html")
if __name__ == "__main__":
app.run(debug=True)
| Thilagavathycse/Block-Chain-learnings | blockchain.py | blockchain.py | py | 687 | python | en | code | 0 | github-code | 36 |
73577910183 | import os, sys
import numpy as np
import pickle
from UserCode.HGCalMaskResolutionAna import Argparser
from array import array as Carray
from collections import OrderedDict
from ROOT import TCanvas, TLatex, TFile, TMath, TH1F
from ROOT import TLegend, TH2F, TLorentzVector, TProfile, TH1D, TGraphErrors
from ROOT import Double, gStyle, gROOT, kTemperatureMap
from UserCode.HGCalMaskVisualProd.SystemUtils import averageContiguousVals as Av
from UserCode.HGCalMaskVisualProd.SystemUtils import EtaStr as ES
from UserCode.HGCalMaskResolutionAna.SoftwareCorrection import IncompleteShowersCorrection
from UserCode.HGCalMaskResolutionAna.SoftwareCorrection import DifferentiateShowersByEnergy
from UserCode.HGCalMaskResolutionAna.Calibration import Calibration
from UserCode.HGCalMaskResolutionAna.PartialWafersStudies import PartialWafersStudies
from UserCode.HGCalMaskVisualProd.RootPlotting import RootPlotting
from UserCode.HGCalMaskVisualProd.RootObjects import RootHistograms
def plotHistograms(histos, cdims, pcoords, cname):
"""
Plots histograms from a list.
Arguments:
-> histos: list of histograms
-> cdims & pccords: as described in RooUtils
-> cname: name of the canvas to be created
"""
if not isinstance(histos, list):
raise TypeError('The histograms have to be passed in a list.,')
npads = len(pcoords[0])
hdiv, stack = ([] for _ in range(2))
with RootPlotting(ncanvas=1, npads=npads, cdims=cdims, pcoords=pcoords) as plot:
titles = ['Resolution', 'Resolution vs Eta',
'RMS vs Eta', 'Bias vs Eta', 'RMS/(1+Bias) vs Eta',
'Resolution', 'Nevents', 'Resolution / Nevents']
if FLAGS.mode == 1:
legends1 = [TLegend(0.12, 0.76, 0.44, 0.89) for _ in range(3)]
it = -1
for ih in range(len(histos[:-3])):
if ih%3==0: it += 1
h = histos[ih]
if ih<6:
plot.plotHistogram(cpos=0, ppos=ih, h=h,
lw=3,mc=1,msize=.5,lc=1,
title=titles[it], draw_options='colz')
if ih<3:
tex = plot.setLatex(ts=0.04)
plot.fitHistogram(h=h, fname='crystalball',
frange=(-1.,1.), tex=tex)
else:
plot.plotGraph(cpos=0, ppos=ih, g=h,
lw=3,mc=4,msize=.5,lc=4,
yranges=(-0.5,3.5),
title=titles[it],
draw_options='AP')
elif FLAGS.mode == 2:
if FLAGS.apply_weights:
legends1 = [TLegend(0.12, 0.76, 0.44, 0.89) for _ in range(3)]
else:
legends1 = [TLegend(0.56, 0.66, 0.86, 0.89) for _ in range(3)]
legends2 = [TLegend(0.68, 0.75, 0.86, 0.89) for _ in range(3)]
for ih in range(NREG):
if FLAGS.apply_weights:
for ixx in range(int(npads/NREG)):
idx = ih+NREG*ixx
if ixx == 0:
plot.plotHistogram(cpos=0, ppos=idx, h=histos[idx],
lw=3,mc=4,msize=.5,lc=4,
draw_options='colz')
elif ixx == 1:
plot.plotHistogram(cpos=0, ppos=idx, h=histos[idx],
lw=3,mc=4,msize=.5,lc=4,
title=titles[ixx],
draw_options='colz')
else:
plot.plotGraph(cpos=0, ppos=idx, g=histos[idx],
lw=3,mc=4,msize=.5,lc=4,
yranges=(-0.5,3.5),
title=titles[ixx],
name = str(ixx*ih),
draw_options='AP')
tex = plot.setLatex()
if ixx == 0:
plot.fitHistogram(h=histos[idx], fname='crystalball',
frange=(-1.,1.), tex=tex)
if FLAGS.samples == 'inner':
tex.DrawLatex(0.58,0.92,'Inner radius; SR{}'
.format((ih%3)+1))
elif FLAGS.samples == 'outer':
tex.DrawLatex(0.58,0.92,'Outer radius; SR{}'
.format((ih%3)+1))
tex.DrawLatex(0.11,0.92,'#bf{CMS} #it{simulation preliminary}')
tex.SetTextAlign(31)
elif not FLAGS.apply_weights:
linec = [4, 2, 3, 7]
plot.plotHistogram(cpos=0, ppos=ih, h=histos[ih],
lw=3,mc=linec[0],msize=.5,lc=linec[0],
draw_options='E')
plot.plotHistogram(cpos=0, ppos=ih, h=histos[ih+3],
lw=3, mc=linec[1], msize=.5, lc=linec[1],
draw_options='same E')
plot.plotHistogram(cpos=0, ppos=ih, h=histos[ih+6],
lw=3, mc=linec[2], msize=.5, lc=linec[2],
draw_options='same E')
plot.plotHistogram(cpos=0, ppos=ih, h=histos[ih+9],
lw=3, mc=linec[3], msize=.5, lc=linec[3],
draw_options='same E')
tex = plot.setLatex()
bckgcuts_str = [str(i) for i in bckgcuts]
if FLAGS.samples == 'inner':
tex.DrawLatex(0.58,0.92,'Inner radius; SR{}'.format((ih%3)+1))
elif FLAGS.samples == 'outer':
tex.DrawLatex(0.58,0.92,'Outer radius; SR{}'.format((ih%3)+1))
legends1[ih].AddEntry(histos[ih],
'Cumdiff < '+bckgcuts_str[0], 'L')
for it in range(len(bckgcuts_str)-1):
legends1[ih].AddEntry(histos[ih+3*(it+1)],
bckgcuts_str[it]+'< Cumdiff < '+bckgcuts_str[it+1], 'L')
legends1[ih].AddEntry(histos[ih+3*(it+2)],
'Cumdiff > '+bckgcuts_str[it+1], 'L')
tex.DrawLatex(0.11,0.92,'#bf{CMS} #it{simulation preliminary}')
tex.SetTextAlign(31)
legends1[ih].Draw()
hdiv.append(histos[ih+3].Clone('weight1_sr{}'.format(ih+1)))
hdiv.append(histos[ih+6].Clone('weight2_sr{}'.format(ih+1)))
hdiv.append(histos[ih+9].Clone('weight3_sr{}'.format(ih+1)))
for idiv in range(len(bckgcuts)):
hdiv[-3+idiv].Divide(histos[ih])
extrastr = '' if idiv==0 else 'same'
hdiv[-3+idiv].GetYaxis().SetRangeUser(0., 6.)
plot.plotHistogram(cpos=0, ppos=ih+3, h=hdiv[-3+idiv],
yaxis_title='Weight',
lw=3, mc=linec[idiv+1], msize=.5,
lc=linec[idiv+1],
draw_options='HIST'+extrastr, copy=True)
tex = plot.setLatex()
if FLAGS.samples == 'inner':
tex.DrawLatex(0.58,0.92,'Inner radius; SR{}'.format((ih%3)+1))
elif FLAGS.samples == 'outer':
tex.DrawLatex(0.58,0.92,'Outer radius; SR{}'.format((ih%3)+1))
for iv in range(len(bckgcuts)):
legends2[ih].AddEntry(hdiv[iv], 'weight'+str(iv+1), 'L')
legends2[ih].Draw()
tex.DrawLatex(0.11,0.92,'#bf{CMS} #it{simulation preliminary}')
tex.SetTextAlign(31)
plot.save(cpos=0, name=cname)
if not FLAGS.apply_weights:
save_str = base.paths.weights
RootHistograms(histos).save(save_str)
RootHistograms(hdiv).save(save_str, mode='UPDATE')
def main():
#gStyle.SetOptStat(0)
gROOT.SetBatch(True)
gStyle.SetPalette(kTemperatureMap)
fIn=TFile.Open(FLAGS.noPUFile)
data=fIn.Get('data')
calib_str = base.paths.calibrations_nopu
"""
calibration = Calibration(FLAGS)
calibration.nopu_calibration()
calibration.save(calib_str)
"""
with open(calib_str, 'r') as cachefile:
calib = pickle.load(cachefile)
if FLAGS.apply_weights:
calibshowers_str = base.paths.weights
bckgcuts_extended = np.append(bckgcuts, 0.9)
showercorr = IncompleteShowersCorrection(calibshowers_str,
discrvals=Av(bckgcuts_extended))
weights = showercorr.CorrectionWeights()
if FLAGS.samples == 'inner':
boundaries = [5, 5, 5]
corr_mode = 'left'
elif FLAGS.samples == 'outer':
boundaries = [23, 23, 23]
corr_mode = 'right'
lowstats_factors = showercorr.calculateLowStatisticsFactor(boundaries, corr_mode)
weights_graphs = [showercorr.buildCorrectionWeightsGraphs(region=i+1)
for i in range(NREG)]
histos=OrderedDict()
enbins, eninf, ensup = 200, -2.01, 1.99
if FLAGS.samples == 'inner':
phibins, etabins, etainf, etasup = 12, 10, 2.69, 3.04
elif FLAGS.samples == 'outer':
phibins, etabins, etainf, etasup = 12, 10, 1.44, 1.66
if FLAGS.mode == 1:
hn = ['den{}', 'den_eta{}', 'den{}_2D_res', 'den{}_2D_events']
for ireg in range(1,NREG+1):
bins = Carray('d', np.arange(-1.05, .8, 0.01))
strings = ';#Delta E/E_{gen};PDF'
histos[hn[0].format(ireg)] = TH1F(hn[0].format(ireg), strings,
len(bins)-1, bins)
histos[hn[1].format(ireg)] = TH2F(hn[1].format(ireg), ';|#eta|;#Delta E/E',
etabins, etainf, etasup,
enbins, eninf, ensup)
histos[hn[2].format(ireg)] = TH2F(hn[2].format(ireg), ';|#eta|;#phi',
50, etainf, etasup,
phibins, -TMath.Pi(), TMath.Pi())
histos[hn[3].format(ireg)] = TH2F(hn[3].format(ireg), ';|#eta|;#phi',
50, etainf, etasup,
phibins, -TMath.Pi(), TMath.Pi())
elif FLAGS.mode == 2:
fracEn = np.zeros((NREG,NLAYERS), dtype=float)
countfracEn = np.zeros((NREG,NLAYERS), dtype=int)
for i in range(0, data.GetEntriesFast()):
data.GetEntry(i)
genen = getattr(data,'genen')
geneta = abs(getattr(data,'geneta'))
genphi = getattr(data,'genphi')
for ireg in range(1,NREG+1):
recen = getattr(data,'en_sr{}_ROI'.format(ireg))
avgnoise = getattr(data,'noise_sr3_ROI')*A[ireg-1]/A[2]
#Calibration factors. f2 is used for PU.
f1, f2 = 1., 0.
etaregions_shifted = np.roll(etaregions, shift=-1)[:-1]
for ieta1,ieta2 in zip(etaregions[:-1], etaregions_shifted):
#in case it lies outside the limits of the calibration
#the event is calibrated with the full calibration region
if geneta < etaregions[0] or geneta > etaregions[-1]:
idstr = 'sr{}_from{}to{}'.format(ireg,
ES(etaregions[0]),
ES(etaregions[-1]))
elif (geneta < ieta1 or geneta >= ieta2):
continue
else:
idstr = 'sr{}_from{}to{}'.format(ireg, ES(ieta1), ES(ieta2))
if 'L0' in calib:
f1 /= calib['L0'][idstr].Eval(geneta)+1.0
if 'L1' in calib:
f1 /= calib['L1'][idstr].Eval(f1*recen)+1.0
if 'L2' in calib and ireg in calib['L2']:
f2 = calib['L2'][idstr].Eval(avgnoise)
recen = f1*recen - f2
for il in range(1,NLAYERS+1):
v = f1*getattr(data,'en_sr{}_layer{}'.format(ireg,il)) - f2
if ( (FLAGS.samples == "inner" and geneta < etaregions[0]+0.05 or
FLAGS.samples == "outer" and geneta > etaregions[-1]-0.05) and
recen != 0 ):
fracEn[ireg-1,il-1] += v / recen
countfracEn[ireg-1,il-1] += 1
fracEn /= countfracEn
hn = ['res_complete_before{}', 'res_complete_after{}',
'res_incomplete_before{}', 'res_incomplete_after{}',
'res_total_before{}', 'res_total_after{}',
'res_vs_eta_before{}', 'res_vs_eta_after{}',
'en{}_per_layer_signal', 'en{}_per_layer_bckg1',
'en{}_per_layer_bckg2', 'en{}_per_layer_bckg3',
'noise{}_per_layer_signal', 'noise{}_per_layer_bckg1',
'noise{}_per_layer_bckg2', 'noise{}_per_layer_bckg3']
for ireg in range(1,NREG+1):
bins = Carray('d', np.arange(-1.05, .8, 0.01))
strings = ';#Delta E/E_{gen};PDF'
for ih in range(6):
histos[hn[ih].format(ireg)] = TH1F(hn[ih].format(ireg), strings,
len(bins)-1, bins)
histos[hn[6].format(ireg)] = TH2F(hn[6].format(ireg), ';|#eta|;#Delta E/E',
etabins, etainf, etasup,
enbins, eninf, ensup)
histos[hn[7].format(ireg)] = TH2F(hn[7].format(ireg), ';|#eta|;#Delta E/E',
etabins, etainf, etasup,
enbins, eninf, ensup)
bins = Carray('d', np.arange(0.5,29,1.))
strings = ';Layer;E_{reco} / E_{gen}'
for ih in range(8,16):
histos[hn[ih].format(ireg)] = TProfile(hn[ih].format(ireg), strings,
len(bins)-1, bins)
for h in histos:
histos[h].Sumw2()
histos[h].SetMarkerStyle(20)
histos[h].SetDirectory(0)
for i in range(0, data.GetEntriesFast()):
data.GetEntry(i)
genen = getattr(data,'genen')
geneta = abs(getattr(data,'geneta'))
genphi = getattr(data,'genphi')
for ireg in range(1,NREG+1):
recen = getattr(data,'en_sr{}_ROI'.format(ireg))
avgnoise = getattr(data,'noise_sr3_ROI')*A[ireg-1]/A[2]
#Calibration factors. f2 is used for PU.
f1, f2 = 1., 0.
etaregions_shifted = np.roll(etaregions, shift=-1)[:-1]
for ieta1,ieta2 in zip(etaregions[:-1], etaregions_shifted):
if geneta < etaregions[0] or geneta > etaregions[-1]:
idstr = 'sr{}_from{}to{}'.format(ireg,
ES(etaregions[0]), ES(etaregions[-1]))
elif (geneta < ieta1 or geneta > ieta2):
continue
else:
idstr = 'sr{}_from{}to{}'.format(ireg, ES(ieta1), ES(ieta2))
if 'L0' in calib:
f1 /= calib['L0'][idstr].Eval(geneta)+1.0
if 'L1' in calib:
f1 /= calib['L1'][idstr].Eval(f1*recen)+1.0
if 'L2' in calib and ireg in calib['L2']:
f2 = calib['L2'][idstr].Eval(avgnoise)
assert f1 != 1.
recen = f1*recen - f2
deltaE = recen/genen-1.
###Store the energy resolution###
if FLAGS.mode == 1:
if deltaE > -1:
histos[hn[0].format(ireg)].Fill(deltaE)
histos[hn[1].format(ireg)].Fill(geneta, deltaE)
histos[hn[2].format(ireg)].Fill(geneta, genphi, deltaE)
histos[hn[3].format(ireg)].Fill(geneta, genphi)
elif FLAGS.mode == 2:
#differentiate complete from incomplete showers
ROI_en = np.zeros((NLAYERS), dtype=float)
for il in range(1,NLAYERS+1):
v = f1*getattr(data,'en_sr{}_layer{}'.format(ireg,il)) - f2
try:
ROI_en[il-1] = v/recen
except ZeroDivisionError:
ROI_en[il-1] = 0.
lshift = [1., 1., 1.] if FLAGS.samples == 'outer' else [.65, .59, .48] #layer shift
assert len(bckgcuts) == len(lshift)
showerid = DifferentiateShowersByEnergy(ROI_en, fracEn[ireg-1,:],
thresholds=bckgcuts, min_val=0.05)
###Calculate andc calibrate the energy per layer###
recen_corr = 0
for il in range(1,NLAYERS+1):
if FLAGS.apply_weights:
if FLAGS.samples == 'inner':
weight_limit = il > boundaries[ireg-1]
else:
weight_limit = il < boundaries[ireg-1]
b = histos[hn[8].format(ireg)].FindBin(il)
if showerid==0: #complete shower
v = f1*getattr(data,'en_sr{}_layer{}'.format(ireg,il)) - f2
try:
histos[hn[8].format(ireg)].Fill(b,v/recen)
except ZeroDivisionError:
histos[hn[8].format(ireg)].Fill(b,0.)
if FLAGS.apply_weights:
recen_corr += v
v = (f1*getattr(data,'noise_sr3_layer{}'.format(il))
*A[ireg-1]/A[2] - f2)
try:
histos[hn[12].format(ireg)].Fill(b,v/recen)
except ZeroDivisionError:
histos[hn[12].format(ireg)].Fill(b,0.)
else:
w = showerid-1
v = f1*getattr(data,'en_sr{}_layer{}'.format(ireg,il)) - f2
try:
histos[hn[9+w].format(ireg)].Fill(b*lshift[w],v/recen)
except ZeroDivisionError:
histos[hn[9+w].format(ireg)].Fill(b*lshift[w],0.)
if ( FLAGS.apply_weights and
weights[ireg-1][w][il-1]!=0 and
weight_limit):
recen_corr += v/weights[ireg-1][w][int(round((il-1)*lshift[w],0))]
#weight_graphs[ireg][il].SetBit(weight_graphs[ireg][il].klsSortedX)
#weight_graphs[ireg][il].Eval(geneta, spline=0, 'S')
v = (f1*getattr(data,'noise_sr3_layer{}'.format(il))
*A[ireg-1]/A[2] - f2)
try:
histos[hn[13+w].format(ireg)].Fill(b,v/recen)
except ZeroDivisionError:
histos[hn[13+w].format(ireg)].Fill(b,0.)
if FLAGS.apply_weights and FLAGS.method == 'ed':
if showerid==0: #complete shower
deltaE_corr = recen_corr/genen-1.
histos[hn[0].format(ireg)].Fill(deltaE)
histos[hn[1].format(ireg)].Fill(deltaE_corr)
else:
recen_corr *= (1 / (1-lowstats_factors[ireg-1]) )
distshift = 0.09 if FLAGS.samples == 'inner' else 0.08
recen_corr *= 1/distshift
deltaE_corr = recen_corr/genen-1.
if deltaE>-.95 and deltaE<-0.1:
histos[hn[2].format(ireg)].Fill(deltaE)
histos[hn[3].format(ireg)].Fill(deltaE_corr)
histos[hn[6].format(ireg)].Fill(geneta, deltaE)
histos[hn[7].format(ireg)].Fill(geneta, deltaE_corr)
#end of tree loop
fIn.Close()
if FLAGS.mode == 1:
pcoords = [[[0.01,0.755,0.33,0.995],
[0.34,0.755,0.66,0.995],
[0.67,0.755,0.99,0.995],
[0.01,0.505,0.33,0.745],
[0.34,0.505,0.66,0.745],
[0.67,0.505,0.99,0.745],
[0.01,0.255,0.33,0.495],
[0.34,0.255,0.66,0.495],
[0.67,0.255,0.99,0.495],
[0.01,0.005,0.33,0.245],
[0.34,0.005,0.66,0.245],
[0.67,0.005,0.99,0.245]]]
cdims = [[1600,2000]]
picname = '1comp_'+FLAGS.samples+'_'+FLAGS.method
if FLAGS.apply_weights:
picname += '_corrected'
else:
pcoords = [[[0.01,0.51,0.33,0.99],
[0.34,0.51,0.66,0.99],
[0.67,0.51,0.99,0.99],
[0.01,0.01,0.33,0.49],
[0.34,0.01,0.66,0.49],
[0.67,0.01,0.99,0.49]]]
cdims = [[1000,600]]
picname = '2comp_'+FLAGS.samples+'_'+FLAGS.method
if FLAGS.apply_weights:
picname += '_corrected'
correct_order = []
for i in range(len(hn)):
for ireg in range(1,NREG+1):
correct_order.append(hn[i].format(ireg))
assert len(correct_order) == len(histos.keys())
histos = [histos[correct_order[i]] for i in range(len(correct_order))]
if FLAGS.mode == 1:
histos.append(histos[6].Clone())
histos.append(histos[7].Clone())
histos.append(histos[8].Clone())
histos[-3].Divide(histos[9])
histos[-2].Divide(histos[10])
histos[-1].Divide(histos[11])
htmp = []
for ireg in range(NREG):
h = histos[3+ireg]
xbins, exbins, rms, erms, bias, ebias = ([] for _ in range(6))
for xbin in xrange(1,h.GetNbinsX()+1):
tmp = h.ProjectionY('tmp', xbin, xbin)
xbins.append(h.GetXaxis().GetBinCenter(xbin))
horizerror = ( h.GetXaxis().GetBinCenter(1) -
h.GetXaxis().GetBinLowEdge(1) )
exbins.append( horizerror )
rms.append(tmp.GetRMS())
erms.append(tmp.GetRMSError())
"""
xq = Carray('d', [0.16,0.5,0.84])
yq = Carray('d', [0.0,0.0,0.0 ])
tmp.GetQuantiles(3,yq,xq)
bias.append(yq[1])
"""
bias.append(tmp.GetMean())
#ebias.append((yq[0]+yq[2])/2)
ebias.append(tmp.GetMeanError())
tmp.Delete()
xbins, exbins = np.array(xbins), np.array(exbins)
rms, erms = np.array(rms), np.array(erms)
bias, ebias = np.array(bias), np.array(ebias)
indep = rms/(1.+bias)
eindep = indep * np.sqrt( erms**2/rms**2 + ebias**2/(1+bias**2) )
htmp.append( TGraphErrors(etabins, xbins, rms, exbins, erms) )
htmp.append( TGraphErrors(etabins, xbins, bias, exbins, ebias) )
htmp.append( TGraphErrors(etabins, xbins, indep, exbins, eindep) )
ht_tmp = [htmp[0],htmp[3],htmp[6],htmp[1],htmp[4],htmp[7],htmp[2],htmp[5],htmp[8]]
histos = histos[:6] + ht_tmp
if FLAGS.method == 'fineeta':
htitles = ['rmsVSeta1', 'rmsVSeta2', 'rmsVSeta3',
'biasVSeta1', 'biasVSeta2', 'biasVSeta3',
'indepVSeta1','indepVSeta2','indepVSeta3']
indices = [1, 2, 3] * 3
indices.sort()
fOut = TFile( base.paths.plots, 'RECREATE' )
fOut.cd()
for ih,h in enumerate(ht_tmp):
h.SetName(htitles[ih])
h.Write(htitles[ih])
fOut.Write()
fOut.Close()
plotHistograms(histos, cdims, pcoords,
os.path.join(FLAGS.outpath,picname+'.png'))
elif FLAGS.mode == 2:
if FLAGS.apply_weights:
fOut = TFile( base.paths.plots, 'RECREATE' )
fOut.cd()
for ireg in range(NREG):
str1 = hn[4].format(ireg+1)
str2 = hn[5].format(ireg+1)
histos[12+ireg] = histos[ireg].Clone(str1)
histos[15+ireg] = histos[3+ireg].Clone(str2)
histos[12+ireg].Add(histos[6+ireg])
histos[15+ireg].Add(histos[9+ireg])
for h in histos:
h.Write()
histos_complete = histos[:6]
histos_incomplete = histos[6:12]
histos_total = histos[12:18]
histos_res2D_before = histos[21:24]
histos_res2D_after = histos[21:24]
plotHistograms(histos_complete, cdims, pcoords,
os.path.join(FLAGS.outpath,picname+'_complete.png'))
plotHistograms(histos_incomplete, cdims, pcoords,
os.path.join(FLAGS.outpath,picname+'_incomplete.png'))
ht = histos_total[3:] + histos_res2D_after #res 1D + res 2D
ht_tmp = []
for ireg in range(NREG):
h = ht[3+ireg]
xbins, exbins, rms, erms, bias, ebias = ([] for _ in range(6))
for xbin in xrange(1,h.GetNbinsX()+1):
tmp = h.ProjectionY('tmp', xbin, xbin)
xbins.append(h.GetXaxis().GetBinCenter(xbin))
horizerror = ( h.GetXaxis().GetBinCenter(1) -
h.GetXaxis().GetBinLowEdge(1) )
exbins.append( horizerror )
rms.append(tmp.GetRMS())
erms.append(tmp.GetRMSError())
"""
xq = Carray('d', [0.16,0.5,0.84])
yq = Carray('d', [0.0,0.0,0.0 ])
tmp.GetQuantiles(3,yq,xq)
bias.append(yq[1])
"""
bias.append(tmp.GetMean())
#ebias.append((yq[0]+yq[2])/2)
ebias.append(tmp.GetMeanError())
tmp.Delete()
xbins, exbins = np.array(xbins), np.array(exbins)
rms, erms = np.array(rms), np.array(erms)
bias, ebias = np.array(bias), np.array(ebias)
indep = rms/(1.+bias)
eindep = indep * np.sqrt( erms**2/rms**2 + ebias**2/(1+bias**2) )
ht_tmp.append( TGraphErrors(etabins, xbins, rms, exbins, erms) )
ht_tmp.append( TGraphErrors(etabins, xbins, bias, exbins, ebias) )
ht_tmp.append( TGraphErrors(etabins, xbins, indep, exbins, eindep) )
fOut.cd()
ht_tmp = [ht_tmp[-9],ht_tmp[-6],ht_tmp[-3],
ht_tmp[-8],ht_tmp[-5],ht_tmp[-2],
ht_tmp[-7],ht_tmp[-4],ht_tmp[-1]]
ht_titles = ['rmsVSeta1', 'rmsVSeta2', 'rmsVSeta3',
'biasVSeta1', 'biasVSeta2', 'biasVSeta3',
'indepVSeta1','indepVSeta2','indepVSeta3']
indices = [1, 2, 3] * 3
indices.sort()
for ih,h in enumerate(ht_tmp):
h.SetName(ht_titles[ih])
h.Write(ht_titles[ih])
pcoords = [[[0.01,0.805,0.33,0.995],
[0.34,0.805,0.66,0.995],
[0.67,0.805,0.99,0.995],
[0.01,0.605,0.33,0.795],
[0.34,0.605,0.66,0.795],
[0.67,0.605,0.99,0.795],
[0.01,0.405,0.33,0.595],
[0.34,0.405,0.66,0.595],
[0.67,0.406,0.99,0.595],
[0.01,0.205,0.33,0.395],
[0.34,0.205,0.66,0.395],
[0.67,0.205,0.99,0.395],
[0.01,0.005,0.33,0.195],
[0.34,0.005,0.66,0.195],
[0.67,0.005,0.99,0.195]]]
cdims = [[1600,2000]]
ht += ht_tmp
plotHistograms(ht, cdims, pcoords,
os.path.join(FLAGS.outpath,picname+'_total.png'))
fOut.Write()
fOut.Close()
else:
histos = histos[24:]
plotHistograms(histos, cdims, pcoords,
os.path.join(FLAGS.outpath,picname+'.png'))
if __name__ == "__main__":
parser = Argparser.Argparser()
FLAGS = parser.get_flags()
parser.print_args()
if FLAGS.apply_weights and FLAGS.mode != 2:
raise ValueError('The weights can only be used when mode==2.')
base = PartialWafersStudies(FLAGS)
NREG, NLAYERS, A = base.nsr, base.nlayers, base.sr_area
etaregions = base.etaregions
if FLAGS.method == 'ed':
bckgcuts = np.array(FLAGS.bckgcuts)
main()
| bfonta/HGCal | HGCalMaskResolutionAna/scripts/analysis.py | analysis.py | py | 30,149 | python | en | code | 0 | github-code | 36 |
73774729063 | import torch
from torch import nn
import torch.nn.functional as F
import os
import math
import numpy as np
from train_pipeline import *
def init_siren(W, fan_in, omega=30, init_c=24, flic=2, is_first=False):
if is_first:
c = flic / fan_in
else:
c = np.sqrt(init_c / fan_in) / omega
W.uniform_(-c, c)
def _init(W, c):
W.uniform_(-c, c)
class SplitLayer(nn.Module):
def __init__(self, input_dim, output_dim, m=1.0, cs=(1, 1, 1, 1), omegas=(1, 1, 1.0, 1), use_bias=True):
super().__init__()
self.linear = nn.Linear(input_dim, output_dim * 4, bias=use_bias)
self.dropout = nn.Dropout(0)
self.input_dim = input_dim
self.output_dim = output_dim
self.m = m
self.omegas = omegas
self.cs = cs
self.init_weights()
def init_weights(self):
self.linear.bias.data.uniform_(0, 0)
s = self.output_dim
W = self.linear.weight.data
_init(W[:s], self.cs[0])
_init(W[s : s * 2], self.cs[1])
_init(W[s * 2 : s * 3], self.cs[2])
_init(W[s * 3 : s * 4], self.cs[3])
def forward(self, x):
h, acts = self.forward_with_activations(x)
return h
def forward_with_activations(self, x):
preact = self.linear(x)
preacts = preact.chunk(4, dim=-1)
preacts = list(preacts)
for i in range(len(preacts)):
preacts[i] = self.omegas[i] * preacts[i]
preact_tanh, preact_sigmoid, preact_sin, preact_cos = preacts
act_tanh, act_sigmoid, act_sin, act_cos = preact_tanh.tanh(), preact_sigmoid.sigmoid(), preact_sin.sin(), preact_cos.cos()
h = act_tanh * act_sigmoid * act_sin * act_cos
h = h * self.m
return h, [x, preact, preact_tanh, preact_sigmoid, preact_sin, preact_cos, act_tanh, act_sigmoid, act_sin, act_cos]
class SimpleSplitNet(nn.Module):
def __init__(self, cs, use_bias=True, omegas=(1, 1, 1.0, 1), m=1.0):
super().__init__()
in_features = 128
hidden_layers = 2
if not hasattr(m, "__len__"):
m = [m] * (len(hidden_layers) + 1)
is_layerwise_omegas = hasattr(omegas[0], "__len__")
if not is_layerwise_omegas:
omegas = [omegas] * (len(hidden_layers) + 1)
net = [SplitLayer(in_features, 64, use_bias=use_bias, cs=cs[0], m=m[0], omegas=omegas[0]), SplitLayer(64, 32, use_bias=use_bias, cs=cs[1], m=m[1], omegas=omegas[1]), nn.Linear(32, 3)]
_init(net[-1].weight.data, cs[2])
self.net = nn.Sequential(*net)
def forward(self, x):
return self.net(x)
def forward_with_activations(self, x):
h = x
intermediate_acts = []
for layer in self.net:
if isinstance(layer, SplitLayer):
h, acts = layer.forward_with_activations(h)
else:
h = layer(h)
acts = []
intermediate_acts.append((h, acts))
return h, intermediate_acts
class ParallelSplitNet(nn.Module):
def __init__(self, model_configs, out_features, encoding_size=128):
super().__init__()
# if not hasattr(m, '__len__'):
# m = [m] * (hidden_layers+2)
import rff
self.encoding = rff.layers.GaussianEncoding(sigma=10.0, input_size=2, encoded_size=encoding_size)
in_features = encoding_size * 2
self.networks = nn.ModuleList([SimpleSplitNet(**k, in_features=in_features, out_features=out_features) for k in model_configs])
def forward(self, x):
x = self.encoding(x)
o = 0
for net in self.networks:
o = o + net(x)
return o
def get_example_model():
kwargs = {"cs": [(1, 1, 1, 1), (1, 1, 1, 1), 0.1], "omegas": [(1, 1, 1, 1), (1, 1, 1, 1)], "m": [1, 1]}
net = SimpleSplitNet(**kwargs)
import wandb
m_range = (0.1, 30)
c_range = (1e-3, 1e1)
omega_range = (0.1, 30)
PROJECT_NAME = "splitnet_3_sweep"
sweep_configuration = {
"method": "random",
"name": "sweep",
"metric": {"goal": "maximize", "name": "psnr"},
"parameters": {
**{f"m{i}": {"distribution": "uniform", "min": m_range[0], "max": m_range[1]} for i in range(2)},
**{f"c{i}": {"distribution": "uniform", "min": c_range[0], "max": c_range[1]} for i in range(4 + 4 + 1)},
**{f"omega{i}": {"distribution": "uniform", "min": omega_range[0], "max": omega_range[1]} for i in range(4 + 4)},
"lr": {"values": [1e-3, 1e-4, 1e-5]},
"weight_decay": {'values': [0, 1e-5]},
},
}
def _train_for_sweep(model, cfg, lr, weight_decay):
seed_all(cfg["random_seed"])
device = cfg["device"]
total_steps = cfg["total_steps"]
model_input, ground_truth, H, W = load_data(cfg)
model_input, ground_truth = model_input.to(device), ground_truth.to(device)
model.to(device)
import rff
encoding = rff.layers.GaussianEncoding(sigma=10.0, input_size=2, encoded_size=64).to(device)
model_input = encoding(model_input)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
for step in range(total_steps):
model_output = model(model_input)
mse, psnr = mse_and_psnr(model_output, ground_truth)
loss = mse
optimizer.zero_grad()
loss.backward()
optimizer.step()
return psnr.item()
def objective(c):
cs = [(c["c0"], c["c1"], c["c2"], c["c3"]), (c["c4"], c["c5"], c["c6"], c["c7"]), c["c8"]]
omegas = [(c["omega0"], c["omega1"], c["omega2"], c["omega3"]), (c["omega4"], c["omega5"], c["omega6"], c["omega7"])]
m = [c["m0"], c["m1"]]
kwargs = {"cs": cs, "omegas": omegas, "m": m}
net = SimpleSplitNet(**kwargs)
psnr = _train_for_sweep(net, cfg, c["lr"], c["weight_decay"])
return psnr
def main():
wandb.init(project=PROJECT_NAME)
psnr = objective(wandb.config)
wandb.log({"psnr": psnr})
import os
from hydra import initialize, initialize_config_module, initialize_config_dir, compose
from hydra.utils import instantiate
from omegaconf import OmegaConf
def load_cfg(config_name="config", overrides=()):
# with initialize_config_dir(config_dir="/app/notebooks/draft_02/conf"):
with initialize(version_base=None, config_path="./conf"):
cfg = compose(config_name=config_name, overrides=list(overrides))
return cfg
cfg = load_cfg("sweep_config_0", overrides=["+device=cuda:0"])
print(OmegaConf.to_yaml(cfg))
if __name__ == "__main__":
sweep_id = wandb.sweep(sweep=sweep_configuration, project=PROJECT_NAME)
wandb.agent(sweep_id, function=main, count=10)
| kilianovski/my-neural-fields | notebooks/draft_01/sweep_pipeline.py | sweep_pipeline.py | py | 6,628 | python | en | code | 0 | github-code | 36 |
6347811698 | # 클래스 객체지향
# 클래스 생성
class Person: # 클래스 정의 ()없음
name = '익명'
height = ''
gender = ''
blood_type = 'A'
# 1. 초기화 추가
# def __init__(self):
# self.name = '홍길동'
# self.height = '170'
# self.gender = 'male'
# self.blood_type = 'AB'
def __init__(self, name = '홍동현', height = 171, gender = 'male') -> None:
self.name = name # self.name의 name은 속성변수 = 다음의 name은 매개변수
self.height = height
self.gender = gender
def walk(self): # self = 클래스 자기자신
print(f'{self.name}이(가) 걷습니다.')
def run(self, option):
if option == 'Fast':
self.walk()
print(f'{self.name}이(가) 빨리 뜁니다.')
else:
print(f'{self.name}이(가) 천천히 뜁니다.')
def stop(self):
print(f'{self.name}이(가) 멈춥니다.')
# 2. 생성자외 매직메서드(function) __str__
def __str__(self) -> str:
return f'출력 : 이름은 {self.name}, 성별은 {self.gender}입니다.'
# 객체를 instance, person이라는 함수를 만들 때 ()씀
donghyun = Person() # 객체생성!
# donghyun.name = '홍동현'
# donghyun.height = '171'
# donghyun.gender = '남'
# donghyun.blood_type = 'B'
print(f'{donghyun.name}의 혈액형은 {donghyun.blood_type}입니다.')
donghyun.run('Fast')
print(donghyun)
# 1. 초기화 후 새로 객체 생성
hong = Person()
hong.run('')
print(hong)
print('====================')
# 2. 파라미터를 받는 생성자 실행
ashely = Person('애슐리', 165, 'female')
print(ashely.name)
print(ashely.height)
print(ashely.gender)
print(ashely.blood_type)
print(ashely) | d0ng999/basic-Python2023 | Day04/code22_person.py | code22_person.py | py | 1,770 | python | ko | code | 0 | github-code | 36 |
5756767654 | import base64
import os
from io import BytesIO, StringIO
from pprint import pprint
import easyocr
import pandas as pd
from PIL import Image
import streamlit as st
bn_reader = easyocr.Reader(['bn'], gpu=True)
en_reader = easyocr.Reader(['en'], gpu=True)
def get_nid_image(image_url):
image_data = base64.b64decode(image_url)
image_data = BytesIO(image_data)
image = Image.open(image_data)
return image
def get_nid_text(front_image, back_image):
bn_front = bn_reader.readtext(front_image, detail=0, paragraph=False)
en_front = en_reader.readtext(front_image, detail=0, paragraph=False)
bn_back = bn_reader.readtext(back_image, detail=0, paragraph=True)
en_back = en_reader.readtext(back_image, detail=0, paragraph=True)
# nid_pattern = "[0-9]{3} [0-9]{3} [0-9]{4}"
# dob_pattern = "^Date of Bir"
# name_pattern = "[A-Z]* [A-Z]* [A-Z]*"
for index, phrase in enumerate(bn_front):
if phrase == 'নাম':
bn_name = bn_front[index + 1]
elif phrase == 'পিতা':
bn_father_name = bn_front[index + 1]
elif phrase == 'মাতা':
bn_mother_name = bn_front[index + 1]
for index, phrase in enumerate(en_front):
if phrase == 'Name':
en_name = en_front[index + 1]
elif phrase == 'Date of Birth':
en_dob = en_front[index + 1]
elif phrase == 'NID No':
en_nid = en_front[index + 1]
response = {
"bn_name": bn_name,
"en_name": en_name,
"bn_father_name": bn_father_name,
"bn_mother_name": bn_mother_name,
"en_dob": en_dob,
"en_nid": en_nid,
"bn_address": bn_back[0],
"en_birth_place": en_back[2],
"en_issue_date": en_back[3]
}
# pprint(response, indent=4)
return response
with st.form("nid_scanner_form", clear_on_submit=True):
front_image = st.file_uploader("Front Image", type=["jpg", "png", "jpeg"])
back_image = st.file_uploader("Back Image", type=["jpg", "png", "jpeg"])
submit = st.form_submit_button("Submit")
if submit:
if front_image is not None and back_image is not None:
front_image_ext = os.path.splitext(front_image.name)[
1].replace(".", "")
back_image_ext = os.path.splitext(back_image.name)[
1].replace(".", "")
front_image_bytes = front_image.getvalue()
back_image_bytes = back_image.getvalue()
front_image_base64 = base64.b64encode(
front_image_bytes).decode("utf-8")
back_image_base64 = base64.b64encode(
back_image_bytes).decode("utf-8")
front_image_data = f"data:image/{front_image_ext};base64," + \
front_image_base64
back_image_data = f"data:image/{back_image_ext};base64," + \
back_image_base64
st.image(front_image_data, caption="Front Image")
st.image(back_image_data, caption="Back Image")
front_str_to_img = Image.open(BytesIO(base64.b64decode(
front_image_base64)))
back_str_to_img = Image.open(BytesIO(base64.b64decode(
back_image_base64)))
try:
response = get_nid_text(front_str_to_img, back_str_to_img)
st.code(response, language="python")
except Exception as e:
st.error(e)
else:
st.error("Please upload both images in order to proceed")
# decodeit = open('hello_level.jpeg', 'wb')
# decodeit.write(base64.b64decode((byte)))
# decodeit.close()
# import base64
# from io import BytesIO
# buffered = BytesIO()
# image.save(buffered, format="JPEG")
# img_str = base64.b64encode(buffered.getvalue())
# if uploaded_file is not None:
# # To read file as bytes:
# bytes_data = uploaded_file.getvalue()
# st.write(bytes_data)
# # To convert to a string based IO:
# stringio = StringIO(uploaded_file.getvalue().decode("utf-8"))
# st.write(stringio)
# # To read file as string:
# string_data = stringio.read()
# st.write(string_data)
# # Can be used wherever a "file-like" object is accepted:
# dataframe = pd.read_csv(uploaded_file)
# st.write(dataframe)
| bhuiyanmobasshir94/Computer-Vision | notebooks/colab/cv/ocr/ekyc/nid_scanner_with_streamlit_app.py | nid_scanner_with_streamlit_app.py | py | 4,287 | python | en | code | 0 | github-code | 36 |
27687264440 | from WGF import GameWindow, AssetsLoader, shared
from os.path import join
import logging
log = logging.getLogger(__name__)
SETTINGS_PATH = join(".", "settings.toml")
LEADERBOARD_PATH = join(".", "leaderboard.json")
LB_LIMIT = 5
def load_leaderboard():
if getattr(shared, "leaderboard", None) is None:
from Game.leaderboard import Leaderboard
try:
lb = Leaderboard.from_file(LEADERBOARD_PATH, limit=LB_LIMIT)
except Exception as e:
log.warning(f"Unable to load leaderboard: {e}")
# Creating default lb, in case our own doesnt exist
board = {
# #TODO: for now, entries are placeholders and dont match actual
# score/kills values you can get in game
"endless": {
"slug": "Endless",
"entries": [
{"name": "xXx_Gamer_xXx", "score": 720, "kills": 69},
{"name": "amogus", "score": 300, "kills": 50},
{"name": "Gabriel", "score": 100, "kills": 20},
{"name": "Default", "score": 50, "kills": 10},
{"name": "Karen", "score": 10, "kills": 1},
],
},
"time_attack": {
"slug": "Time Attack",
"entries": [
{"name": "Top_Kek", "score": 300, "kills": 50},
{"name": "loss", "score": 200, "kills": 30},
{"name": "Someone", "score": 150, "kills": 25},
{"name": "Amanda", "score": 75, "kills": 13},
{"name": "123asd123", "score": 10, "kills": 1},
],
},
}
lb = Leaderboard(
leaderboard=board,
path=LEADERBOARD_PATH,
limit=LB_LIMIT,
)
lb.to_file()
shared.leaderboard = lb
return shared.leaderboard
def make_game() -> GameWindow:
"""Factory to create custom GameWindow"""
mygame = GameWindow("WeirdLand")
assets_directory = join(".", "Assets")
img_directory = join(assets_directory, join("Sprites"))
mygame.assets = AssetsLoader(
assets_directory=assets_directory,
fonts_directory=join(assets_directory, "Fonts"),
sounds_directory=join(assets_directory, "Sounds"),
font_extensions=[".ttf"],
image_extensions=[".png"],
sound_extensions=[".wav"],
)
# Overriding some built-in defaults and adding new
mygame.settings.set_default("vsync", True)
mygame.settings.set_default("show_fps", False)
mygame.settings.set_default("camera_speed", 0.8)
mygame.settings.set_default(
"window_modes",
{
"double_buffer": True,
"hardware_acceleration": True,
},
)
mygame.icon_path = join(".", "icon.png")
mygame.settings.from_toml(SETTINGS_PATH)
mygame.init()
mygame.assets.load_all()
mygame.assets.spritesheets = {}
# from WGF import shared
load_leaderboard()
# This is kinda janky, but also kinda not?
shared.sprite_scale = 4
mygame.assets.load_images(
path=join(img_directory, "4x"),
scale=shared.sprite_scale,
)
shared.extra_scale = 2
mygame.assets.load_images(
path=join(img_directory, "2x"),
scale=shared.extra_scale,
)
# Specifying font as shared variable, since it should be used in all scenes
shared.font = mygame.assets.load_font("./Assets/Fonts/romulus.ttf", 36)
shared.game_paused = False
from WGF.nodes import Align
from WGF import Point
from Game.ui import make_text
fps_counter = make_text(
name="fps_counter",
text="",
pos=Point(mygame.screen.get_rect().width, 0),
align=Align.topright,
)
@fps_counter.updatemethod
def update_fps():
# if not shared.game_paused:
fps_counter.text = f"FPS: {mygame.clock.get_fps():2.0f}"
from Game.scenes import logo, level, menus
mygame.tree.add_child(logo.sc)
mygame.tree.add_child(menus.mm_wrapper, show=False)
mygame.tree.add_child(level.sc, show=False)
level.sc.stop()
mygame.tree.add_child(fps_counter, show=mygame.settings["show_fps"])
return mygame
| moonburnt/WeirdLand | Game/main.py | main.py | py | 4,372 | python | en | code | 1 | github-code | 36 |
30643576972 | #Michal Badura
#simple Logistic Regression class with gradient checking
import numpy as np
from numpy.linalg import norm
import pickle, gzip
class LogisticRegression():
"""
Logistic regression, trained by gradient descent
"""
def __init__(self, nin, nout):
#adding one row for bias
self.theta = np.random.uniform(-1, 1, (nout, nin+1))
#activation function
self.activ = lambda x: 1 / (1.0 + np.exp(-x))
self.activprime = lambda x: self.activ(x) * (1 - self.activ(x))
def single_gradient(self, X, y):
"""Calculates gradient of theta for a single datapoint"""
ypred = self.predict(X)
Xbias = np.vstack((X, np.array([1.0])))
delta = (ypred - y)
return np.dot(delta, Xbias.T)
def train(self, Xtrain, ytrain, niter, alpha, batchsize=100):
for t in range(niter):
total_gradient = 0
batchidx = np.random.choice(range(len(Xtrain)), batchsize)
Xbatch = [Xtrain[i] for i in batchidx]
ybatch = [ytrain[i] for i in batchidx]
for i in range(len(Xbatch)):
X,y = Xbatch[i],ybatch[i]
total_gradient -= self.single_gradient(X,y)
#total_gradient += self.numerical_gradient(X, y)
total_gradient /= len(Xbatch)
self.theta += alpha * total_gradient
if t%10==0:
print("Iteration {0}, error: {1}".format(
t, self.error(Xtrain, ytrain)))
def predict(self, X):
X = np.vstack((X, np.array([1.0]))) #add bias input
return self.activ(np.dot(self.theta, X))
def error(self, Xs, ys):
"""Computes preditction error for a dataset.
Uses a convex cost function."""
m = len(Xs)
ypreds = list(map(self.predict, Xs))
errs = [ys[i]*np.log(ypreds[i]) + (1-ys[i])*np.log(1-ypreds[i]) for i in range(len(ys))]
total = sum(map(norm, errs))
#diffs = sum([np.dot((ys[i]-ypreds[i]).T, (ys[i]-ypreds[i])) for i in range(len(ys))])
return -1.0/(m) * total
def numerical_gradient(self, X, y, eps=0.000001):
"""
Gradient computed by numerical approximation.
For gradient checking.
"""
saved_theta = self.theta[:] #copy
theta = self.theta
grad = np.zeros(theta.shape)
for i in range(theta.shape[0]):
for j in range(theta.shape[1]):
self.theta[i][j] += eps
high = self.error([X],[y])
self.theta[i][j] -= 2*eps
low = self.error([X],[y])
dWij = (high - low) / (2*eps)
grad[i][j] = dWij
self.theta = saved_theta #restore the values
return grad
def test():
Xs = [np.random.uniform(-10,10,(5,1)) for _ in range(100)]
ys = [np.array([np.sum(X)/3, np.sum(X)/5]).reshape(2,1) for X in Xs]
global model
model = LogisticRegression(2,1)
print(model.error(Xs, ys))
model.train(Xs, ys, 1000, 0.02)
def testSingle():
Xs = [np.array([1,1,1,1,1]).reshape(5,1)] + [np.random.uniform(-10,10,(5,1)) for _ in range(100)]
ys = [np.array([1 if np.sum(X)/5 > 1 else 0]).reshape(1,1) for X in Xs]
global model
model = LogisticRegression(5,1)
print(model.error(Xs, ys))
print(Xs[0], ys[0])
model.train(Xs, ys, 100, 0.2)
def testMnist(to_test):
#pickled MNIST dataset from deeplearning.net
global train_set, valid_set
global model
f = gzip.open('mnist.pkl.gz', 'rb')
train_set, valid_set, test_set = pickle.load(f, encoding='latin1')
f.close()
Xs = list(map(lambda arr: np.array(arr).reshape(784,1), train_set[0]))
ys = list(map(lambda n: np.array([1 if i==n else 0 for i in range(10)]).reshape(10,1), train_set[1]))
model = LogisticRegression(784, 10)
model.train(Xs, ys, 1000, 0.15)
correct = 0
for i in range(to_test):
pred = np.argmax(model.predict(test_set[0][i].reshape(784,1)))
actual = test_set[1][i]
msg = "INCORRECT"
if actual == pred:
msg = "CORRECT"
correct += 1
#print("Number: {0}. Predicted: {1}. {2}".format(actual, pred, msg))
print("Accuracy: {0}".format(correct*1.0/to_test))
np.random.seed(1)
testMnist(10000) | michbad/misc | logreg.py | logreg.py | py | 4,531 | python | en | code | 0 | github-code | 36 |
32518923983 | #username - omrikaplan
#id1 - 319089256
#name1 - Omri Kaplan
#id2 - 209422054
#name2 - Barak Neuberger
import random
"""A class represnting a node in an AVL tree"""
import math
import random
import sys
class AVLNode(object):
"""Constructor, you are allowed to add more fields.
@type value: str
@param value: data of your node
"""
def __init__(self, value):
self.value = value
self.left = None
self.right = None
self.parent = None
self.size = 1
self.height = 0 # Balance factor
self.is_real_node = True
"""returns the left child
@rtype: AVLNode
@returns: the left child of self, None if there is no left child
"""
def getSize(self):
return self.size
def getLeft(self):#works in 0(1)
return self.left
"""returns the right child
@rtype: AVLNode
@returns: the right child of self, None if there is no right child
"""
def getRight(self):#works in 0(1)
return self.right
"""returns the parent
@rtype: AVLNode
@returns: the parent of self, None if there is no parent
"""
def getParent(self):#works in 0(1)
return self.parent
"""return the value
@rtype: str
@returns: the value of self, None if the node is virtual
"""
def getValue(self):#works in 0(1)
return self.value
"""returns the height
@rtype: int
@returns: the height of self, -1 if the node is virtual
"""
def getHeight(self):#works in 0(1)
return self.height
"""sets left child
@type node: AVLNode
@param node: a node
"""
def setLeft(self, node):#works in 0(1)
self.left = node
return None
"""sets right child
@type node: AVLNode
@param node: a node
"""
def setRight(self, node):#works in 0(1)
self.right = node
return None
"""sets parent
@type node: AVLNode
@param node: a node
"""
def setParent(self, node):#works in 0(1)
self.parent = node
return None
"""sets value
@type value: str
@param value: data
"""
def setValue(self, value):#works in 0(1)
self.value= value
return None
"""sets the balance factor of the node
@type h: int
@param h: the height
"""
def setHeight(self, h):#works in 0(1)
self.height = h
return None
"""returns whether self is not a virtual node
@rtype: bool
@returns: False if self is a virtual node, True otherwise.
"""
def isRealNode(self): #works in 0(1)
return self.is_real_node
def fixSizeUpwards(node): #works in O(log(n))
while node.parent is not None:
node.parent.size = node.parent.right.size + node.parent.left.size + 1
node = node.parent
def CreateVarNode(): #works in O(1)
emptyNode = AVLNode(None)
emptyNode.left = None
emptyNode.right = None
emptyNode.parent = None
emptyNode.size = 0
emptyNode.height = -1 # Balance factor
emptyNode.is_real_node = False
return emptyNode
def bf(node):
return node.left.height - node.right.height
"""
A class implementing the ADT list, using an AVL tree.
"""
def mergesort(lst): # works in O(nlog(n)) from intro to cs
n = len(lst)
if n <= 1:
return lst
else:
return merge(mergesort(lst[0:n//2]), mergesort(lst[n//2:n]))
def merge(A, B): #from intro to cs, works in O(n+m) where n and m are the length of A and B
n = len(A)
m = len(B)
C = [None for i in range(n+m)]
a = 0; b = 0; c = 0
while a < n and b < m:
if A[a] < B[b]:
C[c] = A[a]
a += 1
else:
C[c] = B[b]
b += 1
c += 1
C[c:] = A[a:] + B[b:]
return C
class AVLTreeList(object):
"""
Constructor, you are allowed to add more fields.
"""
def __init__(self):
self.size = 0
self.root = None
self.min = None
# add your fields here
def successor(self, node): # finding a node's successor O(log(n))
if node.right.is_real_node:
node = node.right
while node.left.is_real_node:
node = node.left
return node
else:
parent = node.getParent()
while parent is not None and node == parent.right:
node = parent
parent = node.parent
return parent
def predeccessor(self, node): #same as successor, just swtiching left with right and vise versa. O(log(n))
if node.left.is_real_node:
node = node.left
while node.right.is_real_node:
node = node.right
return node
else:
parent = node.getParent()
while parent is not None and node == parent.left:
node = parent
parent = node.parent
return parent
def rank(self, node): # finding a node's rank O(log(n))
r = node.left.size + 1
x = node
while x.parent is not None:
if x == x.parent.right:
r += x.parent.left.size + 1
x = x.parent
return r
"""returns whether the list is empty
@rtype: bool
@returns: True if the list is empty, False otherwise
"""
def empty(self):
if self.size == 0:
return True
return False
def tree_select(self, k):
def tree_select_rec(node, k):
r = node.left.size + 1
if k == r:
return node
elif k < r and node.left.is_real_node:
return tree_select_rec(node.left, k)
elif node.right.is_real_node:
return tree_select_rec(node.right, k - r)
else:
return
return tree_select_rec(self.root, k)
"""retrieves the value of the i'th item in the list
@type i: int
@pre: 0 <= i < self.length()
@param i: index in the list
@rtype: str
@returns: the the value of the i'th item in the list
"""
def retrieve(self, i):
if i < 0 or i > self.size or self.size == 0:
return None
return self.tree_select(i+1).value
"""inserts val at position i in the list
@type i: int
@pre: 0 <= i <= self.length()
@param i: The intended index in the list to which we insert val
@type val: str
@param val: the value we inserts
@rtype: list
@returns: the number of rebalancing operation due to AVL rebalancing
"""
def insert(self, i, val):
cnt = 0 #number of balancing fixes
toInsert = AVLNode(val)
toInsert.right = CreateVarNode()
toInsert.left = CreateVarNode()
toInsert.right.parent = toInsert
toInsert.left.parent = toInsert
if self.root is None:
self.root = toInsert
self.min = toInsert
self.size += 1
return 0
if i == 0: #inserting to the start of the list
toInsert.parent = self.min
self.min.left = toInsert
self.min = toInsert
elif i == self.size: #inserting to the end of the list
n = self.root
while n.right.isRealNode():
n = n.right
n.setRight(toInsert)
toInsert.setParent(n)
else: #finding rank i+1 and inserting as left child if that position is open
currNode = self.tree_select(i+1)
if not currNode.left.isRealNode():
currNode.left = toInsert
toInsert.setParent(currNode)
else: #finding i+1's predescessor and inserting as max to it's left sub-tree
currNode = self.predeccessor(currNode)
currNode.setRight(toInsert)
toInsert.parent = currNode
cnt += self.balanceUp(toInsert.parent)
fixSizeUpwards(toInsert)
self.size += 1
return cnt
def leftRotate(self, z): #+1 to fixing actions O(1), y.right height is not updated
y = z.right
T2 = y.left
# Perform rotation
y.left = z
z.right = T2
# organize parents
y.parent = z.parent
z.parent = y
T2.parent = z
if y.parent is not None:
if y.parent.right == z:
y.parent.right = y
else:
y.parent.left = y
# Update heights and sizes
z.height = 1 + max(z.left.getHeight(), z.right.getHeight())
y.height = 1 + max(y.left.getHeight(), y.right.getHeight())
z.size = 1 + z.left.size + z.right.size
y.size = 1 + y.left.size + y.right.size
return 1
def rightRotate(self, z): ##+1 to fixing actions
y = z.left
T3 = y.right
# Perform rotation
y.right = z
z.left = T3
# origzine parents
y.parent = z.parent
z.parent = y
T3.parent = z
if y.parent is not None:
if y.parent.right == z:
y.parent.right = y
else:
y.parent.left = y
# Update heights and sizes
z.height = 1 + max(z.left.getHeight(), z.right.getHeight())
y.height = 1 + max(y.left.getHeight(), y.right.getHeight())
z.size = 1 + z.left.size + z.right.size
y.size = 1 + y.left.size + y.right.size
# Return the number of corrections done
return 1
def leftRightRotate(self, z): #+2 to fixing actions
self.leftRotate(z.left)
self.rightRotate(z)
# Return the number of corrections done
return 2
def rightLeftRotate(self, z): ##+2 to fixing actions
self.rightRotate(z.right)
self.leftRotate(z)
# Return the number of corrections done
return 2
def balanceUp(self, n):
cnt = 0
n.height += 1
while n.getParent() is not None:
n = n.getParent()
n.height = 1 + max(n.left.height, n.right.height)
BF = bf(n)
if BF == -2 and (bf(n.right) == -1 or bf(n.right) == 0):
cnt += self.leftRotate(n)
continue
elif BF == -2 and bf(n.right) == 1:
cnt += self.rightLeftRotate(n)
continue
elif BF == 2 and bf(n.left) == -1:
cnt += self.leftRightRotate(n)
continue
elif BF == 2 and (bf(n.left) == 1 or bf(n.left) == 0):
cnt += self.rightRotate(n)
continue
else:
continue
self.root = n
return cnt
"""deletes the i'th item in the list
@type i: int
@pre: 0 <= i < self.length()
@param i: The intended index in the list to be deleted
@rtype: int
@returns: the number of rebalancing operation due to AVL rebalancing
"""
def delete(self, i):
cnt = 0
currNode = self.tree_select(i+1) #getting to the node
needToBalFrom = currNode.parent
self.delNode(currNode)
cnt += self.balanceUp(needToBalFrom)
return cnt
def delNode(self, currNode): ##returns the node from wise the balanceUp needs to accure
if not currNode.right.isRealNode() and not currNode.left.isRealNode():
if currNode.parent.right == currNode:
currNode.parent.right = CreateVarNode()
else:
currNode.parent.left = CreateVarNode()
elif currNode.right.isRealNode() and currNode.left.isRealNode():
suc = self.successor(currNode)
## recursive with successor
## asked Omri to try
self.delNode(suc)
elif not currNode.right.isRealNode():
if currNode.parent.right == currNode:
currNode.parent.right = currNode.left
else:
currNode.parent.left = currNode.left
elif not currNode.left.isRealNode():
if currNode.parent.right == currNode:
currNode.parent.right = currNode.right
else:
currNode.parent.left = currNode.right
return
"""returns the value of the first item in the list
@rtype: str
@returns: the value of the first item, None if the list is empty
"""
def first(self): #works in O(1)
return self.min
"""returns the value of the last item in the list
@rtype: str
@returns: the value of the last item, None if the list is empty
"""
def last(self): #works in O(log(n))
if self.size == 0:
return None
node = self.root
while node.right.is_real_node:
node = node.right
return node
"""returns an array representing list
@rtype: list
@returns: a list of strings representing the data structure
"""
def listToArray(self): # works like inorder walk, O(n) whereas n is the length of the list
array = []
if self.size == 0:
return []
def listToArray_rec(node, array):
if node.is_real_node is False:
return
listToArray_rec(node.left, array)
array.append(node.value)
listToArray_rec(node.right, array)
return listToArray_rec(self.root, array)
"""returns the size of the list
@rtype: int
@returns: the size of the list
"""
def length(self): #works in O(1)
return self.size
"""sort the info values of the list
@rtype: list
@returns: an AVLTreeList where the values are sorted by the info of the original list.
"""
def sort(self): #works in O(n) because of the listToArray
tree_array = self.listToArray()
sorted_tree = AVLTreeList
sorted_tree_array = mergesort(tree_array)
for i in range(len(tree_array)):
sorted_tree.insert(i, sorted_tree_array[i])
return sorted_tree
"""permute the info values of the list
@rtype: list
@returns: an AVLTreeList where the values are permuted randomly by the info of the original list. ##Use Randomness
"""
def permutation(self): #works in O(n) because of the listToArray
permed_tree = AVLTreeList
tree_array = self.listToArray()
rand_locations = random.sample(range(0, self.size - 1), self.size)
for i in range(len(tree_array)):
permed_tree.insert(i, tree_array[rand_locations[i]])
return permed_tree
"""concatenates lst to self
@type lst: AVLTreeList
@param lst: a list to be concatenated after self
@rtype: int
@returns: the absolute value of the difference between the height of the AVL trees joined
"""
def concat(self, lst): #works in O(log(|n-m|)) whereas n is the size of self and m is the size of lst
#follow the algorithm from the class
# waiting for barak's "fix from here upward" function
if self.root.height - lst.root.height <= -2:
connector = self.last()
self.delNode(connector)
self.balanceUp(connector.parent)
n = lst.root
while n.height > self.root.height:
n.left
connector.left = self.root
self.root.parent = connector
connector.right = n
connector.parent = n.parent
n.parent = connector
self.root = lst.root
self.balanceUp(connector)
elif self.root.height - lst.root.height >= 2:
connector = lst.first()
lst.delNode(connector)
lst.balanceUp(connector.parent)
n = self.root
while n.height > lst.root.height:
n.right
connector.right = lst.root
lst.root.parent = connector
connector.left = n
connector.parent = n.parent
n.parent = connector
self.balanceUp(connector)
else:
connector = self.last()
balance_from = connector.parent
self.delNode(connector)
self.balanceUp(connector.parent)
connector.left = self.root
connector.right = lst.root
connector.parent = None
self.root = connector
return abs(self.root.height - lst.root.height)
"""searches for a *value* in the list
@type val: str
@param val: a value to be searched
@rtype: int
@returns: the first index that contains val, -1 if not found.
"""
def search(self, val): #works in O(nlog(n)) proof from recitation
if self.size == 0:
return -1
node = self.min
if node.value == val:
return self.rank(node)
while self.successor(node).value != val:
node = self.successor(node)
if node is None: # if after the loop it's still the min then val is not in the list
return -1
return self.rank(node)
"""returns the root of the tree representing the list
@rtype: AVLNode
@returns: the root, None if the list is empty
"""
def getRoot(self): #works in 0(1)
return self.root
def append(self, val):
self.insert(self.length(), val)
| BarakNeu/Software-Stractures-Project1 | avl_template_new.py | avl_template_new.py | py | 14,892 | python | en | code | 0 | github-code | 36 |
21011472446 | import os
from pytest import raises
from pydantic.error_wrappers import ValidationError
from pathlib import Path
from unittest import TestCase
from lazy_env_configurator import BaseConfig, BaseEnv
from lazy_env_configurator.custom_warnings import EnvWarning
class TestInvalidEnv(TestCase):
def test_eager_validation(self):
with raises(ValidationError) as e:
class ContainedEnv(BaseEnv):
class Config(BaseConfig):
envs = ("FOO", "APP")
dot_env_path = Path(__file__).parent / ".env.contained"
contained = True
validations = {
"FOO": {
"required": True,
"type": str,
},
"APP": {
"required": True,
"type": str
}
}
eagerly_validate = True
self.assertIsInstance(e.value, ValidationError)
self.assertEqual(e.value.raw_errors[0].loc_tuple()[1], "APP")
| satyamsoni2211/lazy_env_configurator | tests/test_eager_validation.py | test_eager_validation.py | py | 1,131 | python | en | code | 2 | github-code | 36 |
40280870495 | import cv2
import numpy as np
import argparse
import random
import os
import os.path as osp
from sklearn.feature_extraction import image
# import imutils
from tqdm import tqdm,trange
# argument parser
'''
dataNum: How many samples you want to synthesize
load_image_path: Path to load background image
load_rain_path: Path to load rain streak
load_depth_path: Path to load depth information
save_input_image_path: Path to save images with rain and haze
save_gt_image_path: Path to save clean ground truth images
save_gtNohaze_image_path: Path to save no haze (rainy) images
save_gtNoRain_image_path: Path to save no rain (hazy) images
save_depth_path: Path to save depth information
rainType: How many rain streaks you want to overlay on images
ang: Angle for random rotating [-ang:ang]
'''
def Parser():
parser = argparse.ArgumentParser()
parser.add_argument("--load_image_path", type=str, default="cam_stereo_left_lut/", help='path to load images')
parser.add_argument("--load_depth_path", type=str, default="depth_image/", help='path to load depth info')
parser.add_argument("--save_image_path", type=str, default="foggy_camera/", help='path to save ground truth images')
parser.add_argument("--light_min", type=float, default=0.3)
parser.add_argument("--light_max", type=float, default=0.8)
parser.add_argument("--beta_min", type=float, default=1.3)
parser.add_argument("--beta_max", type=float, default=1.3)
parser.add_argument("--beta_range", type=float, default=0.3)
parser.add_argument("--train_only", type=bool, default=False)
parser.add_argument("--target_image_path", type=str, default="foggy_camera/", help='path to load images')
opt = parser.parse_args()
return opt
# depth to transmission formula
def depthToTransmission(depth, b_min, b_max):
depth=depth/255.0
beta = np.random.uniform(b_min, b_max)
# print(beta)
trans = np.exp(-beta * depth)
return trans
def light_effect(img,airlight,night):
if night==1:
# rgb to gray
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# gaussian
gray=cv2.GaussianBlur(gray,(21,21),0)
# threshold
light=gray>205
brightness_0=light*((gray-205)/50.0)
brightness_gau=cv2.GaussianBlur(brightness_0,(25,25),0)
brightness=np.maximum(brightness_0,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(25,25),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(45,45),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(45,45),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(65,65),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(65,65),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(21,21),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(21,21),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(21,21),0)
brightness=np.maximum(brightness,brightness_gau)
# adjust airlight
atlight = airlight*np.ones(img.shape)[:,:,0]
atlight = atlight+ (0.95-airlight)*brightness
return cv2.merge([atlight,atlight,atlight]), cv2.merge([brightness_0,brightness_0,brightness_0]), cv2.merge([brightness,brightness,brightness])
else:
atlight = airlight*np.ones(img.shape)[:,:,0]
return cv2.merge([atlight,atlight,atlight]), None, None
def add_fog(image,depth,trans,airLight,night):
# trans = cv2.merge([trans, trans, trans])
light, b0, b=light_effect(image, airLight, night)
image = image / 255.0
image = image.astype('float32')
# start adding haze
constant = np.ones(image.shape)
hazyimage = image * trans + light * (constant - trans)
return hazyimage, light, b0, b
def get_valid_list():
spilt_path=f"splits/train_clear_day.txt"
kitti_names = open(spilt_path,'r')
kitti_names_contents = kitti_names.readlines()
valid_day=[]
for class_name in kitti_names_contents:
valid_day.append(class_name.replace(",","_").rstrip()+'.png')
kitti_names.close()
spilt_path=f"splits/train_clear_night.txt"
kitti_names = open(spilt_path,'r')
kitti_names_contents = kitti_names.readlines()
valid_night=[]
for class_name in kitti_names_contents:
valid_night.append(class_name.replace(",","_").rstrip()+'.png')
kitti_names.close()
return valid_day, valid_night
def main():
opt = Parser()
# check dirs exist or not
if not os.path.isdir(opt.save_image_path):
os.makedirs(opt.save_image_path)
print(f'save image at {opt.save_image_path}')
# load dir and count
images_list = os.listdir(opt.load_image_path)
datasize = len(images_list)
valid_day_list, valid_night_list=get_valid_list()
# start synthesizing loop
for i in trange(datasize):
file_name=images_list[i]
if (file_name in valid_day_list) or (file_name in valid_night_list):
if file_name in valid_night_list:
night=1
elif file_name in valid_day_list:
night=0
else:
print("wrong")
# load image/depth path
image_path=osp.join(opt.load_image_path,file_name)
depth_path=osp.join(opt.load_depth_path,file_name)
# load image/depth
image=cv2.imread(image_path)
depth=cv2.imread(depth_path)
# cv2.imwrite(osp.join(opt.save_image_path,'image.png'), image)
# cv2.imwrite(osp.join(opt.save_image_path,'depth.png'), depth)
# convert depth to transmission
trans = depthToTransmission(depth, 1.0, 1.6)
# cv2.imwrite(osp.join(opt.save_image_path,'trans.png'), trans*255)
if night==0:
airLight = np.random.uniform(0.4, 0.75)
elif night==1:
airLight = np.random.uniform(0.3, 0.65)
# start adding
hazyimage,light,b0,b=add_fog(image,depth,trans,airLight,night)
# save
cv2.imwrite(osp.join(opt.save_image_path,file_name), hazyimage*255)
else:
continue
if __name__ == "__main__":
main()
| Chushihyun/MT-DETR | data/datagen_fog.py | datagen_fog.py | py | 6,879 | python | en | code | 22 | github-code | 36 |
21333304207 | import unittest
from climateeconomics.sos_processes.iam.witness.witness_coarse.usecase_witness_coarse_new import Study
from sostrades_core.execution_engine.execution_engine import ExecutionEngine
from tempfile import gettempdir
from copy import deepcopy
from gemseo.utils.compare_data_manager_tooling import delete_keys_from_dict,\
compare_dict
import numpy as np
class WITNESSParallelTest(unittest.TestCase):
def setUp(self):
self.name = 'Test'
self.root_dir = gettempdir()
self.ee = ExecutionEngine(self.name)
def test_01_exec_parallel(self):
"""
8 proc
"""
n_proc = 16
repo = 'climateeconomics.sos_processes.iam.witness'
self.ee8 = ExecutionEngine(self.name)
builder = self.ee8.factory.get_builder_from_process(
repo, 'witness_coarse')
self.ee8.factory.set_builders_to_coupling_builder(builder)
self.ee8.configure()
self.ee8.display_treeview_nodes()
usecase = Study()
usecase.study_name = self.name
values_dict = {}
for dict_item in usecase.setup_usecase():
values_dict.update(dict_item)
values_dict[f'{self.name}.sub_mda_class'] = "GSPureNewtonMDA"
values_dict[f'{self.name}.max_mda_iter'] = 50
values_dict[f'{self.name}.n_processes'] = n_proc
self.ee8.load_study_from_input_dict(values_dict)
self.ee8.execute()
dm_dict_8 = deepcopy(self.ee8.get_anonimated_data_dict())
"""
1 proc
"""
n_proc = 1
builder = self.ee.factory.get_builder_from_process(
repo, 'witness_coarse')
self.ee.factory.set_builders_to_coupling_builder(builder)
self.ee.configure()
self.ee.display_treeview_nodes()
usecase = Study()
usecase.study_name = self.name
values_dict = {}
for dict_item in usecase.setup_usecase():
values_dict.update(dict_item)
values_dict[f'{self.name}.sub_mda_class'] = "GSPureNewtonMDA"
values_dict[f'{self.name}.max_mda_iter'] = 50
values_dict[f'{self.name}.n_processes'] = n_proc
self.ee.load_study_from_input_dict(values_dict)
self.ee.execute()
dm_dict_1 = deepcopy(self.ee.get_anonimated_data_dict())
residual_history = self.ee.root_process.sub_mda_list[0].residual_history
dict_error = {}
# to delete modelorigin and discipline dependencies which are not the
# same
delete_keys_from_dict(dm_dict_1)
delete_keys_from_dict(dm_dict_8)
compare_dict(dm_dict_1,
dm_dict_8, '', dict_error)
residual_history8 = self.ee8.root_process.sub_mda_list[0].residual_history
#self.assertListEqual(residual_history, residual_history8)
for key, value in dict_error.items():
print(key)
print(value)
for disc1, disc2 in zip(self.ee.root_process.sos_disciplines, self.ee8.root_process.sos_disciplines):
if disc1.jac is not None:
# print(disc1)
for keyout, subjac in disc1.jac.items():
for keyin in subjac.keys():
comparison = disc1.jac[keyout][keyin].toarray(
) == disc2.jac[keyout][keyin].toarray()
try:
self.assertTrue(comparison.all())
except:
print('error in jac')
print(keyout + ' vs ' + keyin)
np.set_printoptions(threshold=1e6)
for arr, arr2 in zip(disc1.jac[keyout][keyin], disc2.jac[keyout][keyin]):
if not (arr.toarray() == arr2.toarray()).all():
print(arr)
print(arr2)
# The only different value is n_processes
self.assertDictEqual(dict_error, {
'.<study_ph>.n_processes.value': "1 and 16 don't match"})
if '__main__' == __name__:
cls = WITNESSParallelTest()
cls.setUp()
cls.test_01_exec_parallel()
| os-climate/witness-core | climateeconomics/tests/_l1_test_witness_parallel.py | _l1_test_witness_parallel.py | py | 4,198 | python | en | code | 7 | github-code | 36 |
17878440373 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
r"""
Measures based on noise measurements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_cjv:
- :py:func:`~mriqc.qc.anatomical.cjv` -- **coefficient of joint variation**
(:abbr:`CJV (coefficient of joint variation)`):
The ``cjv`` of GM and WM was proposed as objective function by [Ganzetti2016]_ for
the optimization of :abbr:`INU (intensity non-uniformity)` correction algorithms.
Higher values are related to the presence of heavy head motion and large
:abbr:`INU (intensity non-uniformity)` artifacts. Lower values are better.
.. _iqms_cnr:
- :py:func:`~mriqc.qc.anatomical.cnr` -- **contrast-to-noise ratio**
(:abbr:`CNR (contrast-to-noise ratio)`): The ``cnr`` [Magnota2006]_,
is an extension of the :abbr:`SNR (signal-to-noise Ratio)` calculation
to evaluate how separated the tissue distributions of GM and WM are.
Higher values indicate better quality.
.. _iqms_snr:
- :py:func:`~mriqc.qc.anatomical.snr` -- **signal-to-noise ratio**
(:abbr:`SNR (signal-to-noise ratio)`): calculated within the
tissue mask.
.. _iqms_snrd:
- :py:func:`~mriqc.qc.anatomical.snr_dietrich`: **Dietrich's SNR**
(:abbr:`SNRd (signal-to-noise ratio, Dietrich 2007)`) as proposed
by [Dietrich2007]_, using the air background as reference.
.. _iqms_qi2:
- :py:func:`~mriqc.qc.anatomical.art_qi2`: **Mortamet's quality index 2**
(:abbr:`QI2 (quality index 2)`) is a calculation of the goodness-of-fit
of a :math:`\chi^2` distribution on the air mask,
once the artifactual intensities detected for computing
the :abbr:`QI1 (quality index 1)` index have been removed [Mortamet2009]_.
Lower values are better.
Measures based on information theory
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_efc:
- :py:func:`~mriqc.qc.anatomical.efc`:
The :abbr:`EFC (Entropy Focus Criterion)`
[Atkinson1997]_ uses the Shannon entropy of voxel intensities as
an indication of ghosting and blurring induced by head motion.
Lower values are better.
The original equation is normalized by the maximum entropy, so that the
:abbr:`EFC (Entropy Focus Criterion)` can be compared across images with
different dimensions.
.. _iqms_fber:
- :py:func:`~mriqc.qc.anatomical.fber`:
The :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,
defined as the mean energy of image values within the head relative
to outside the head [QAP-measures]_.
Higher values are better.
Measures targeting specific artifacts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_inu:
- **inu_\*** (*nipype interface to N4ITK*): summary statistics (max, min and median)
of the :abbr:`INU (intensity non-uniformity)` field (bias field) as extracted
by the N4ITK algorithm [Tustison2010]_. Values closer to 1.0 are better, values
further from zero indicate greater RF field inhomogeneity.
.. _iqms_qi:
- :py:func:`~mriqc.qc.anatomical.art_qi1`:
Detect artifacts in the image using the method described in [Mortamet2009]_.
The :abbr:`QI1 (quality index 1)` is the proportion of voxels with intensity
corrupted by artifacts normalized by the number of voxels in the background.
Lower values are better.
.. figure:: ../resources/mortamet-mrm2009.png
The workflow to compute the artifact detection from [Mortamet2009]_.
.. _iqms_wm2max:
- :py:func:`~mriqc.qc.anatomical.wm2max`:
The white-matter to maximum intensity ratio is the median intensity
within the WM mask over the 95% percentile of the full intensity
distribution, that captures the existence of long tails due to
hyper-intensity of the carotid vessels and fat. Values
should be around the interval [0.6, 0.8].
Other measures
^^^^^^^^^^^^^^
.. _iqms_fwhm:
- **fwhm** (*nipype interface to AFNI*): The :abbr:`FWHM (full-width half maximum)` of
the spatial distribution of the image intensity values in units of voxels [Forman1995]_.
Lower values are better, higher values indicate a blurrier image. Uses the gaussian
width estimator filter implemented in AFNI's ``3dFWHMx``:
.. math ::
\text{FWHM} = \sqrt{-{\left[4 \ln{(1-\frac{\sigma^2_{X^m_{i+1,j}-X^m_{i,j}}}
{2\sigma^2_{X^m_{i,j}}}})\right]}^{-1}}
.. _iqms_icvs:
- :py:func:`~mriqc.qc.anatomical.volume_fraction` (**icvs_\***):
the
:abbr:`ICV (intracranial volume)` fractions of :abbr:`CSF (cerebrospinal fluid)`,
:abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. They should move within
a normative range.
.. _iqms_rpve:
- :py:func:`~mriqc.qc.anatomical.rpve` (**rpve_\***): the
:abbr:`rPVe (residual partial voluming error)` of :abbr:`CSF (cerebrospinal fluid)`,
:abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. Lower values are better.
.. _iqms_summary:
- :py:func:`~mriqc.qc.anatomical.summary_stats` (**summary_\*_\***):
Mean, standard deviation, 5% percentile and 95% percentile of the distribution
of background, :abbr:`CSF (cerebrospinal fluid)`, :abbr:`GM (gray-matter)` and
:abbr:`WM (white-matter)`.
.. _iqms_tpm:
- **overlap_\*_\***:
The overlap of the :abbr:`TPMs (tissue probability maps)` estimated from the image and
the corresponding maps from the ICBM nonlinear-asymmetric 2009c template. Higher
values are better.
.. math ::
\text{JI}^k = \frac{\sum_i \min{(\text{TPM}^k_i, \text{MNI}^k_i)}}
{\sum_i \max{(\text{TPM}^k_i, \text{MNI}^k_i)}}
.. topic:: References
.. [Dietrich2007] Dietrich et al., *Measurement of SNRs in MR images: influence
of multichannel coils, parallel imaging and reconstruction filters*, JMRI 26(2):375--385.
2007. doi:`10.1002/jmri.20969 <http://dx.doi.org/10.1002/jmri.20969>`_.
.. [Ganzetti2016] Ganzetti et al., *Intensity inhomogeneity correction of structural MR images:
a data-driven approach to define input algorithm parameters*. Front Neuroinform 10:10. 2016.
doi:`10.3389/finf.201600010 <http://dx.doi.org/10.3389/finf.201600010>`_.
.. [Magnota2006] Magnotta, VA., & Friedman, L., *Measurement of signal-to-noise
and contrast-to-noise in the fBIRN multicenter imaging study*.
J Dig Imag 19(2):140-147, 2006. doi:`10.1007/s10278-006-0264-x
<http://dx.doi.org/10.1007/s10278-006-0264-x>`_.
.. [Mortamet2009] Mortamet B et al., *Automatic quality assessment in
structural brain magnetic resonance imaging*, Mag Res Med 62(2):365-372,
2009. doi:`10.1002/mrm.21992 <http://dx.doi.org/10.1002/mrm.21992>`_.
.. [Tustison2010] Tustison NJ et al., *N4ITK: improved N3 bias correction*,
IEEE Trans Med Imag, 29(6):1310-20,
2010. doi:`10.1109/TMI.2010.2046908 <http://dx.doi.org/10.1109/TMI.2010.2046908>`_.
.. [Shehzad2015] Shehzad Z et al., *The Preprocessed Connectomes Project
Quality Assessment Protocol - a resource for measuring the quality of MRI data*,
Front. Neurosci. Conference Abstract: Neuroinformatics 2015.
doi:`10.3389/conf.fnins.2015.91.00047 <https://doi.org/10.3389/conf.fnins.2015.91.00047>`_.
.. [Forman1995] Forman SD et al., *Improved assessment of significant activation in functional
magnetic resonance imaging (fMRI): use of a cluster-size threshold*,
Magn. Reson. Med. 33 (5), 636–647, 1995.
doi:`10.1002/mrm.1910330508 <https://doi.org/10.1002/mrm.1910330508>`_.
mriqc.qc.anatomical module
^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import os.path as op
from sys import version_info
from math import pi, sqrt
import numpy as np
import scipy.ndimage as nd
from scipy.stats import kurtosis # pylint: disable=E0611
DIETRICH_FACTOR = 1.0 / sqrt(2 / (4 - pi))
FSL_FAST_LABELS = {"csf": 1, "gm": 2, "wm": 3, "bg": 0}
PY3 = version_info[0] > 2
def snr(mu_fg, sigma_fg, n):
r"""
Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.
The estimation may be provided with only one foreground region in
which the noise is computed as follows:
.. math::
\text{SNR} = \frac{\mu_F}{\sigma_F\sqrt{n/(n-1)}},
where :math:`\mu_F` is the mean intensity of the foreground and
:math:`\sigma_F` is the standard deviation of the same region.
:param float mu_fg: mean of foreground.
:param float sigma_fg: standard deviation of foreground.
:param int n: number of voxels in foreground mask.
:return: the computed SNR
"""
return float(mu_fg / (sigma_fg * sqrt(n / (n - 1))))
def snr_dietrich(mu_fg, sigma_air):
r"""
Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.
This must be an air mask around the head, and it should not contain artifacts.
The computation is done following the eq. A.12 of [Dietrich2007]_, which
includes a correction factor in the estimation of the standard deviation of
air and its Rayleigh distribution:
.. math::
\text{SNR} = \frac{\mu_F}{\sqrt{\frac{2}{4-\pi}}\,\sigma_\text{air}}.
:param float mu_fg: mean of foreground.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed SNR for the foreground segmentation
"""
if sigma_air < 1.0:
from .. import config
config.loggers.interface.warning(
f"SNRd - background sigma is too small ({sigma_air})"
)
sigma_air += 1.0
return float(DIETRICH_FACTOR * mu_fg / sigma_air)
def cnr(mu_wm, mu_gm, sigma_air):
r"""
Calculate the :abbr:`CNR (Contrast-to-Noise Ratio)` [Magnota2006]_.
Higher values are better.
.. math::
\text{CNR} = \frac{|\mu_\text{GM} - \mu_\text{WM} |}{\sqrt{\sigma_B^2 +
\sigma_\text{WM}^2 + \sigma_\text{GM}^2}},
where :math:`\sigma_B` is the standard deviation of the noise distribution within
the air (background) mask.
:param float mu_wm: mean of signal within white-matter mask.
:param float mu_gm: mean of signal within gray-matter mask.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed CNR
"""
return float(abs(mu_wm - mu_gm) / sigma_air)
def cjv(mu_wm, mu_gm, sigma_wm, sigma_gm):
r"""
Calculate the :abbr:`CJV (coefficient of joint variation)`, a measure
related to :abbr:`SNR (Signal-to-Noise Ratio)` and
:abbr:`CNR (Contrast-to-Noise Ratio)` that is presented as a proxy for
the :abbr:`INU (intensity non-uniformity)` artifact [Ganzetti2016]_.
Lower is better.
.. math::
\text{CJV} = \frac{\sigma_\text{WM} + \sigma_\text{GM}}{|\mu_\text{WM} - \mu_\text{GM}|}.
:param float mu_wm: mean of signal within white-matter mask.
:param float mu_gm: mean of signal within gray-matter mask.
:param float sigma_wm: standard deviation of signal within white-matter mask.
:param float sigma_gm: standard deviation of signal within gray-matter mask.
:return: the computed CJV
"""
return float((sigma_wm + sigma_gm) / abs(mu_wm - mu_gm))
def fber(img, headmask, rotmask=None):
r"""
Calculate the :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,
defined as the mean energy of image values within the head relative
to outside the head. Higher values are better.
.. math::
\text{FBER} = \frac{E[|F|^2]}{E[|B|^2]}
:param numpy.ndarray img: input data
:param numpy.ndarray headmask: a mask of the head (including skull, skin, etc.)
:param numpy.ndarray rotmask: a mask of empty voxels inserted after a rotation of
data
"""
fg_mu = np.median(np.abs(img[headmask > 0]) ** 2)
airmask = np.ones_like(headmask, dtype=np.uint8)
airmask[headmask > 0] = 0
if rotmask is not None:
airmask[rotmask > 0] = 0
bg_mu = np.median(np.abs(img[airmask == 1]) ** 2)
if bg_mu < 1.0e-3:
return 0
return float(fg_mu / bg_mu)
def efc(img, framemask=None):
r"""
Calculate the :abbr:`EFC (Entropy Focus Criterion)` [Atkinson1997]_.
Uses the Shannon entropy of voxel intensities as an indication of ghosting
and blurring induced by head motion. A range of low values is better,
with EFC = 0 for all the energy concentrated in one pixel.
.. math::
\text{E} = - \sum_{j=1}^N \frac{x_j}{x_\text{max}}
\ln \left[\frac{x_j}{x_\text{max}}\right]
with :math:`x_\text{max} = \sqrt{\sum_{j=1}^N x^2_j}`.
The original equation is normalized by the maximum entropy, so that the
:abbr:`EFC (Entropy Focus Criterion)` can be compared across images with
different dimensions:
.. math::
\text{EFC} = \left( \frac{N}{\sqrt{N}} \, \log{\sqrt{N}^{-1}} \right) \text{E}
:param numpy.ndarray img: input data
:param numpy.ndarray framemask: a mask of empty voxels inserted after a rotation of
data
"""
if framemask is None:
framemask = np.zeros_like(img, dtype=np.uint8)
n_vox = np.sum(1 - framemask)
# Calculate the maximum value of the EFC (which occurs any time all
# voxels have the same value)
efc_max = 1.0 * n_vox * (1.0 / np.sqrt(n_vox)) * np.log(1.0 / np.sqrt(n_vox))
# Calculate the total image energy
b_max = np.sqrt((img[framemask == 0] ** 2).sum())
# Calculate EFC (add 1e-16 to the image data to keep log happy)
return float(
(1.0 / efc_max)
* np.sum(
(img[framemask == 0] / b_max)
* np.log((img[framemask == 0] + 1e-16) / b_max)
)
)
def wm2max(img, mu_wm):
r"""
Calculate the :abbr:`WM2MAX (white-matter-to-max ratio)`,
defined as the maximum intensity found in the volume w.r.t. the
mean value of the white matter tissue. Values close to 1.0 are
better:
.. math ::
\text{WM2MAX} = \frac{\mu_\text{WM}}{P_{99.95}(X)}
"""
return float(mu_wm / np.percentile(img.reshape(-1), 99.95))
def art_qi1(airmask, artmask):
r"""
Detect artifacts in the image using the method described in [Mortamet2009]_.
Caculates :math:`\text{QI}_1`, as the proportion of voxels with intensity
corrupted by artifacts normalized by the number of voxels in the background:
.. math ::
\text{QI}_1 = \frac{1}{N} \sum\limits_{x\in X_\text{art}} 1
Lower values are better.
:param numpy.ndarray airmask: input air mask, without artifacts
:param numpy.ndarray artmask: input artifacts mask
"""
# Count the number of voxels that remain after the opening operation.
# These are artifacts.
return float(artmask.sum() / (airmask.sum() + artmask.sum()))
def art_qi2(img, airmask, min_voxels=int(1e3), max_voxels=int(3e5), save_plot=True):
r"""
Calculates :math:`\text{QI}_2`, based on the goodness-of-fit of a centered
:math:`\chi^2` distribution onto the intensity distribution of
non-artifactual background (within the "hat" mask):
.. math ::
\chi^2_n = \frac{2}{(\sigma \sqrt{2})^{2n} \, (n - 1)!}x^{2n - 1}\, e^{-\frac{x}{2}}
where :math:`n` is the number of coil elements.
:param numpy.ndarray img: input data
:param numpy.ndarray airmask: input air mask without artifacts
"""
from sklearn.neighbors import KernelDensity
from scipy.stats import chi2
from mriqc.viz.misc import plot_qi2
# S. Ogawa was born
np.random.seed(1191935)
data = img[airmask > 0]
data = data[data > 0]
# Write out figure of the fitting
out_file = op.abspath("error.svg")
with open(out_file, "w") as ofh:
ofh.write("<p>Background noise fitting could not be plotted.</p>")
if len(data) < min_voxels:
return 0.0, out_file
modelx = data if len(data) < max_voxels else np.random.choice(data, size=max_voxels)
x_grid = np.linspace(0.0, np.percentile(data, 99), 1000)
# Estimate data pdf with KDE on a random subsample
kde_skl = KernelDensity(
bandwidth=0.05 * np.percentile(data, 98), kernel="gaussian"
).fit(modelx[:, np.newaxis])
kde = np.exp(kde_skl.score_samples(x_grid[:, np.newaxis]))
# Find cutoff
kdethi = np.argmax(kde[::-1] > kde.max() * 0.5)
# Fit X^2
param = chi2.fit(modelx[modelx < np.percentile(data, 95)], 32)
chi_pdf = chi2.pdf(x_grid, *param[:-2], loc=param[-2], scale=param[-1])
# Compute goodness-of-fit (gof)
gof = float(np.abs(kde[-kdethi:] - chi_pdf[-kdethi:]).mean())
if save_plot:
out_file = plot_qi2(x_grid, kde, chi_pdf, modelx, kdethi)
return gof, out_file
def volume_fraction(pvms):
r"""
Computes the :abbr:`ICV (intracranial volume)` fractions
corresponding to the (partial volume maps).
.. math ::
\text{ICV}^k = \frac{\sum_i p^k_i}{\sum\limits_{x \in X_\text{brain}} 1}
:param list pvms: list of :code:`numpy.ndarray` of partial volume maps.
"""
tissue_vfs = {}
total = 0
for k, lid in list(FSL_FAST_LABELS.items()):
if lid == 0:
continue
tissue_vfs[k] = pvms[lid - 1].sum()
total += tissue_vfs[k]
for k in list(tissue_vfs.keys()):
tissue_vfs[k] /= total
return {k: float(v) for k, v in list(tissue_vfs.items())}
def rpve(pvms, seg):
"""
Computes the :abbr:`rPVe (residual partial voluming error)`
of each tissue class.
.. math ::
\\text{rPVE}^k = \\frac{1}{N} \\left[ \\sum\\limits_{p^k_i \
\\in [0.5, P_{98}]} p^k_i + \\sum\\limits_{p^k_i \\in [P_{2}, 0.5)} 1 - p^k_i \\right]
"""
pvfs = {}
for k, lid in list(FSL_FAST_LABELS.items()):
if lid == 0:
continue
pvmap = pvms[lid - 1]
pvmap[pvmap < 0.0] = 0.0
pvmap[pvmap >= 1.0] = 1.0
totalvol = np.sum(pvmap > 0.0)
upth = np.percentile(pvmap[pvmap > 0], 98)
loth = np.percentile(pvmap[pvmap > 0], 2)
pvmap[pvmap < loth] = 0
pvmap[pvmap > upth] = 0
pvfs[k] = (
pvmap[pvmap > 0.5].sum() + (1.0 - pvmap[pvmap <= 0.5]).sum()
) / totalvol
return {k: float(v) for k, v in list(pvfs.items())}
def summary_stats(img, pvms, airmask=None, erode=True):
r"""
Estimates the mean, the standard deviation, the 95\%
and the 5\% percentiles of each tissue distribution.
.. warning ::
Sometimes (with datasets that have been partially processed), the air
mask will be empty. In those cases, the background stats will be zero
for the mean, median, percentiles and kurtosis, the sum of voxels in
the other remaining labels for ``n``, and finally the MAD and the
:math:`\sigma` will be calculated as:
.. math ::
\sigma_\text{BG} = \sqrt{\sum \sigma_\text{i}^2}
"""
from .. import config
from statsmodels.robust.scale import mad
# Check type of input masks
dims = np.squeeze(np.array(pvms)).ndim
if dims == 4:
# If pvms is from FSL FAST, create the bg mask
stats_pvms = [np.zeros_like(img)] + pvms
elif dims == 3:
stats_pvms = [np.ones_like(pvms) - pvms, pvms]
else:
raise RuntimeError(
"Incorrect image dimensions ({0:d})".format(np.array(pvms).ndim)
)
if airmask is not None:
stats_pvms[0] = airmask
labels = list(FSL_FAST_LABELS.items())
if len(stats_pvms) == 2:
labels = list(zip(["bg", "fg"], list(range(2))))
output = {}
for k, lid in labels:
mask = np.zeros_like(img, dtype=np.uint8)
mask[stats_pvms[lid] > 0.85] = 1
if erode:
struc = nd.generate_binary_structure(3, 2)
mask = nd.binary_erosion(mask, structure=struc).astype(np.uint8)
nvox = float(mask.sum())
if nvox < 1e3:
config.loggers.interface.warning(
'calculating summary stats of label "%s" in a very small '
"mask (%d voxels)",
k,
int(nvox),
)
if k == "bg":
continue
output[k] = {
"mean": float(img[mask == 1].mean()),
"stdv": float(img[mask == 1].std()),
"median": float(np.median(img[mask == 1])),
"mad": float(mad(img[mask == 1])),
"p95": float(np.percentile(img[mask == 1], 95)),
"p05": float(np.percentile(img[mask == 1], 5)),
"k": float(kurtosis(img[mask == 1])),
"n": nvox,
}
if "bg" not in output:
output["bg"] = {
"mean": 0.0,
"median": 0.0,
"p95": 0.0,
"p05": 0.0,
"k": 0.0,
"stdv": sqrt(sum(val["stdv"] ** 2 for _, val in list(output.items()))),
"mad": sqrt(sum(val["mad"] ** 2 for _, val in list(output.items()))),
"n": sum(val["n"] for _, val in list(output.items())),
}
if "bg" in output and output["bg"]["mad"] == 0.0 and output["bg"]["stdv"] > 1.0:
config.loggers.interface.warning(
"estimated MAD in the background was too small (MAD=%f)",
output["bg"]["mad"],
)
output["bg"]["mad"] = output["bg"]["stdv"] / DIETRICH_FACTOR
return output
def _prepare_mask(mask, label, erode=True):
fgmask = mask.copy()
if np.issubdtype(fgmask.dtype, np.integer):
if isinstance(label, (str, bytes)):
label = FSL_FAST_LABELS[label]
fgmask[fgmask != label] = 0
fgmask[fgmask == label] = 1
else:
fgmask[fgmask > 0.95] = 1.0
fgmask[fgmask < 1.0] = 0
if erode:
# Create a structural element to be used in an opening operation.
struc = nd.generate_binary_structure(3, 2)
# Perform an opening operation on the background data.
fgmask = nd.binary_opening(fgmask, structure=struc).astype(np.uint8)
return fgmask
| pGarciaS/PREEMACS | scripts/mriqc/mriqc/qc/anatomical.py | anatomical.py | py | 21,630 | python | en | code | 8 | github-code | 36 |
4932293353 | from transformers import GPT2Tokenizer
import json
import matplotlib.pyplot as plt
## This file was used to find the length of our longest input in tokens and visualize the distribution of token length
with open('./combined_data.jsonl', 'r') as json_file:
json_list = list(json_file)
tokenizer = GPT2Tokenizer.from_pretrained("gpt2")
longestString = ''
longestLength = 0
listOfLengths = []
newList = []
for json_str in json_list:
result = json.loads(json_str)
string = result['text']
listOfIds = tokenizer(string)['input_ids']
tokens = len(listOfIds)
if tokens <= 70:
newList.append(result)
listOfLengths.append(tokens)
# if (length > longestLength):
# longestLength = length
# longestString = string
plt.hist(listOfLengths)
plt.show()
# print(len(newList))
# with open('combined_and_cut_data.jsonl', 'w') as f:
# for entry in newList:
# json.dump(entry, f)
# f.write('\n')
| brennanem/CS324FinalProject | check_tokens.py | check_tokens.py | py | 958 | python | en | code | 0 | github-code | 36 |
24609091 | from django.contrib.auth.models import User
from django.shortcuts import redirect, render, get_object_or_404
from .models import Post,Comment
from .forms import NewCommentForm
# Create your views here.
from qna.models import Question
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
@login_required(login_url='/login/')
def posts(request):
if request.method == "POST":
user = request.user
description = request.POST.get("postDescription")
url = request.POST.get("url")
file = request.FILES['file']
url = list(url.split(","))
if url == ['']:
url = ['none']
form = Post(user=user,description=description,url=url,file=file)
form.save()
return redirect('posts')
all_post = Post.objects.all().order_by('-created_on')
context={'all_post':all_post}
return render(request,'post/post.html',context)
@login_required(login_url='/login/')
def like_unlike_post(request):
user = request.user.pk
if request.method == 'POST':
post_id = request.POST.get('post_id')
post_obj = Post.objects.get(id=post_id)
profile = User.objects.get(pk=user)
if profile in post_obj.likes.all():
post_obj.likes.remove(profile)
else:
post_obj.likes.add(profile)
if profile in post_obj.dislikes.all():
post_obj.dislikes.remove(profile)
post_obj.save()
data = {
# 'likes': post_obj.likes.all().count()
}
return JsonResponse(data, safe=True)
return redirect('posts:main-post-view')
@login_required(login_url='/login/')
def dislike_post(request):
user = request.user.pk
if request.method == 'POST':
post_id = request.POST.get('post_id')
post_obj = Post.objects.get(id=post_id)
profile = User.objects.get(pk=user)
if profile in post_obj.dislikes.all():
post_obj.dislikes.remove(profile)
else:
post_obj.dislikes.add(profile)
if profile in post_obj.likes.all():
post_obj.likes.remove(profile)
post_obj.save()
data = {
# 'dislikes': post_obj.dislikes.all().count()
}
return JsonResponse(data, safe=True)
return redirect('posts:main-post-view')
@login_required(login_url='/login/')
def star_post(request):
user = request.user.pk
if request.method == 'POST':
post_id = request.POST.get('post_id')
post_obj = Post.objects.get(id=post_id)
profile = User.objects.get(pk=user)
if profile in post_obj.star.all():
post_obj.star.remove(profile)
else:
post_obj.star.add(profile)
post_obj.save()
data = {
# 'star': post_obj.star.all().count()
}
return JsonResponse(data,safe=True)
return redirect('posts:main-post-view')
@login_required(login_url='/login/')
def postsingle(request,pk):
v_post = Post.objects.get(id=int(pk))
comment_form = NewCommentForm()
if request.method == "POST":
if request.method == 'POST':
comment_form = NewCommentForm(request.POST)
if comment_form.is_valid():
user_comment = comment_form.save(commit=False)
user_comment.post = v_post
user_comment.user = request.user
user_comment.save()
return redirect('viewpost',pk)
comment = Comment.objects.filter(post=v_post)
count = comment.count()
context = {'v_post': v_post, 'comments': comment,'comment_form':comment_form,'count':count}
return render(request,'post/viewPost.html',context)
| adityachaudhary147/MindQ | post/views.py | views.py | py | 3,755 | python | en | code | 1 | github-code | 36 |
7209827647 | import unittest
import os.path
from bundle.bundle import Bundle
from bundle.types import BundleType
# List of commonly installed apps.
MAS_APPS = ["Pages.app", "Keynote.app", "Numbers.app", "WhatsApp.app", "Xcode.app", "The Unarchiver.app"]
MAS_APP = None
for app in MAS_APPS:
path = os.path.join("/Applications/", app)
if os.path.exists(path):
MAS_APP = path
break
else:
assert(False and "Please install one of these free apps from the Mac App Store: {}".format(MAS_APPS))
class TestApplication(unittest.TestCase):
"""Tests for the Application class"""
def setUp(self):
if MAS_APP:
self.mas_app = Bundle.make(MAS_APP)
else:
self.mas_app = None
# The Calculator.app app has been part of macOS for as long as I can think.
# There is no risk this app is going anywhere.
self.system_app = Bundle.make("/Applications/Calculator.app")
def test_bundle_type(self):
self.assertEqual(self.mas_app.bundle_type, BundleType.APPLICATION)
self.assertEqual(self.system_app.bundle_type, BundleType.APPLICATION)
def test_mas_app(self):
self.assertIsNotNone(self.mas_app)
self.assertTrue(self.mas_app.is_mas_app())
self.assertFalse(self.system_app.is_mas_app())
def test_paths(self):
# Since we don't know which MAS app is chosen, only the system app is tested here.
self.assertEqual(self.system_app.executable_path(), "/Applications/Calculator.app/Contents/MacOS/Calculator")
self.assertEqual(self.system_app.info_dictionary_path(), "/Applications/Calculator.app/Contents/Info.plist")
def test_executable(self):
bin = self.system_app.executable()
self.assertIsNotNone(bin)
| 0xbf00/maap | tests/test_application.py | test_application.py | py | 1,761 | python | en | code | 8 | github-code | 36 |
1767000003 | from setuptools import setup, find_packages
REQUIREMENTS = []
with open("requirements.txt") as f:
for line in f.readlines():
line = line.strip()
if len(line) == 0:
continue
REQUIREMENTS.append(line)
setup(
name = "wallstreet",
version = "0.1",
packages = find_packages(exclude=["*.test", "*.test.*", "test.*", "test"]),
entry_points = {
"console_scripts" : [
'wallstreet = wallstreet.bin.__main__:main'
]
},
install_requires = REQUIREMENTS,
setup_requires=['pytest-runner'],
tests_require = ['pytest']
) | breakhearts/wallstreet | setup.py | setup.py | py | 605 | python | en | code | 0 | github-code | 36 |
6187883293 | from __future__ import annotations
from datetime import datetime, time
from discord.ext import tasks
from app.log import logger
from app.utils import is_last_month_day, is_sunday, catch_exception
from app.utils.message_stats_routine import UserStatsForCurrentDay, message_day_counter
from app.utils.data.user_stats import UserOverallStats, UserStatsForCurrentWeek, UserStatsForCurrentMonth, \
UserMaxStats, UserCurrentStats
class UserStats:
tz = datetime.now().astimezone().tzinfo
daily_time = time(hour=23, tzinfo=tz)
weekly_time = time(hour=23, minute=3, tzinfo=tz)
monthly_time = time(hour=23, minute=6, tzinfo=tz)
@tasks.loop(time=daily_time)
@catch_exception
async def daily_routine(self):
self.update_users_stats_by_end_of_day()
self.update_user_max_stats_for_period("day")
logger.info("Successfully updated user daily stats")
@tasks.loop(time=weekly_time)
@catch_exception
async def weekly_routine(self):
if not is_sunday():
return
self.update_user_max_stats_for_period("week")
logger.info("Successfully updated user weekly stats")
@tasks.loop(time=monthly_time)
@catch_exception
async def monthly_routine(self):
if not is_last_month_day():
return
self.update_user_max_stats_for_period("month")
logger.info("Successfully updated user month stats")
@staticmethod
def update_users_stats_by_end_of_day():
with UserMaxStats.begin() as connect:
for user_stats_class in [UserOverallStats, UserStatsForCurrentWeek, UserStatsForCurrentMonth]:
UserCurrentStats.add_or_update_user_stats(message_day_counter.authors, user_stats_class, connect)
def update_user_max_stats_for_period(self, period: str):
users_new_data = self.get_current_users_stats(period)
if not users_new_data:
return
messages_info, symbols_info = UserMaxStats.get_all_users_max_stats(period)
if messages_info or symbols_info:
grouped_old_user_info = self.group_users_stats(messages_info, symbols_info)
else:
grouped_old_user_info = {}
UserMaxStats.compare_and_update_users_max_info(grouped_old_user_info, users_new_data, period)
@staticmethod
def get_current_users_stats(period: str) -> dict[int, UserStatsForCurrentDay] | None:
users_info = {}
if period == "day":
return message_day_counter.authors
elif period == "week":
result = UserCurrentStats.fetch_users_current_stats_for_period(UserStatsForCurrentWeek)
elif period == "month":
result = UserCurrentStats.fetch_users_current_stats_for_period(UserStatsForCurrentMonth)
else:
return
for user_id, messages, symbols in result:
users_info[user_id] = UserStatsForCurrentDay(amount_of_symbols=symbols, amount_of_messages=messages)
return users_info
@staticmethod
def group_users_stats(messages_info: list, symbols_info: list) -> dict[int, UserStatsForCurrentDay]:
user_ids = {user_id for stats_info in (messages_info, symbols_info) for user_id, _ in stats_info}
messages_info = {user_id: amount for user_id, amount in messages_info}
symbols_info = {user_id: amount for user_id, amount in symbols_info}
users_info = {}
for user_id in user_ids:
amount_of_messages = messages_info.get(user_id, -100)
amount_of_symbols = symbols_info.get(user_id, -100)
users_info[user_id] = UserStatsForCurrentDay(
amount_of_messages=amount_of_messages,
amount_of_symbols=amount_of_symbols,
)
return users_info
user_stats = UserStats()
| range-kun/pituhon-bot | app/utils/message_stats_routine/user_stats_routine.py | user_stats_routine.py | py | 3,790 | python | en | code | 0 | github-code | 36 |
503262151 | from dataclasses import *
# frozen=True makes the class immutable.
# The class can be modified internally with a copy of itself using dataclasses.replace().
@dataclass(frozen=True)
class Rover:
facing : str
def turn_right(self):
directions = ["N", "E", "S", "W"] #sorted clockwise
return replace(self, facing = directions[directions.index(self.facing) + 1]) | alelom/Python_TDD | 04-Duplication-RuleOfThree/Rover.py | Rover.py | py | 397 | python | en | code | 0 | github-code | 36 |
30586982057 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import wx
from regionfixer_core.version import version_string as rf_ver
from gui.version import version_string as gui_ver
class AboutWindow(wx.Frame):
def __init__(self, parent, title="About"):
wx.Frame.__init__(self, parent, title=title,
style=wx.CLOSE_BOX | wx.RESIZE_BORDER | wx.CAPTION)
# Every windows should use panel as parent. Not doing so will
# make the windows look non-native (very ugly)
panel = wx.Panel(self)
self.about1 = wx.StaticText(panel, style=wx.ALIGN_CENTER,
label="Minecraft Region-Fixer (GUI) (ver. {0})\n(using Region-Fixer ver. {1})".format(gui_ver,rf_ver))
self.about2 = wx.StaticText(panel, style=wx.ALIGN_CENTER,
label="Fix problems in Minecraft worlds.")
self.about3 = wx.StaticText(panel, style=wx.ALIGN_CENTER,
label="Official-web:")
self.link_github = wx.HyperlinkCtrl(panel, wx.ID_ABOUT,
"https://github.com/Fenixin/Minecraft-Region-Fixer",
"https://github.com/Fenixin/Minecraft-Region-Fixer",
style=wx.ALIGN_CENTER)
self.about4 = wx.StaticText(panel,
style=wx.TE_MULTILINE | wx.ALIGN_CENTER,
label="Minecraft forums post:")
self.link_minecraft_forums = wx.HyperlinkCtrl(panel, wx.ID_ABOUT,
"http://www.minecraftforum.net/topic/302380-minecraft-region-fixer/",
"http://www.minecraftforum.net/topic/302380-minecraft-region-fixer/",
style=wx.ALIGN_CENTER)
self.close_button = wx.Button(panel, wx.ID_CLOSE)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.about1, 0, wx.ALIGN_CENTER | wx.TOP, 10)
self.sizer.Add(self.about2, 0, wx.ALIGN_CENTER| wx.TOP, 20)
self.sizer.Add(self.about3, 0, wx.ALIGN_CENTER | wx.TOP, 20)
self.sizer.Add(self.link_github, 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.sizer.Add(self.about4, 0, wx.ALIGN_CENTER | wx.TOP, 20)
self.sizer.Add(self.link_minecraft_forums, 0,wx.ALIGN_CENTER | wx.ALL, 5)
self.sizer.Add(self.close_button, 0, wx.ALIGN_CENTER | wx.ALL, 20)
# Fit sizers and make the windows not resizable
panel.SetSizerAndFit(self.sizer)
self.sizer.Fit(self)
size = self.GetSize()
self.SetMinSize(size)
self.SetMaxSize(size)
self.Bind(wx.EVT_BUTTON, self.OnClose, self.close_button)
def OnClose(self, e):
self.Show(False)
| Fenixin/Minecraft-Region-Fixer | gui/about.py | about.py | py | 2,684 | python | en | code | 509 | github-code | 36 |
40142994549 | import random
play = True
list = []
while play:
count = 0
sum = 0
average = 0
diceTimes = int(input("How many times would you like to roll?\n"))
print("============")
while (count < diceTimes):
num = random.randint(1, 6)
list.append(num)
count += 1
# Print out all stored rolls in the list
def list_function(list):
for x in list:
print(x)
# Add all stored rolls up
def sum_function(list):
sum = 0
for x in list:
sum += x
return sum
# Get the average of the stored rolls in the list
def ave_function(sum, list):
average = round(sum/len(list), 2)
return average
list_function(list)
sum = sum_function(list)
print("The sum of the stored rolls is", sum)
average = ave_function(sum, list)
print("The average of the stored rolls is", average)
print("============")
answer = input("Would you like to roll again? Y/N\n")
print("============")
if answer.lower() == "n":
play = False
print("Thank you!")
| DorisYY/Challenge2 | C2.py | C2.py | py | 1,096 | python | en | code | 0 | github-code | 36 |
71232207785 | import torch
from typing import Optional, Dict, Any, Tuple
from transformers import (
AutoConfig,
AutoTokenizer,
T5ForConditionalGeneration,
MT5ForConditionalGeneration,
MT5EncoderModel
)
from parlai.agents.hugging_face.t5 import T5Agent, ParlaiT5Model
#from transformers.models.mt5.modeling_mt5 import MT5Model
try:
from transformers.models.t5.modeling_t5 import T5Stack
except ModuleNotFoundError:
# Prior versions of transformers package do not have T5Stack
T5Stack = object
from parlai.agents.hugging_face.hugging_face import HF_VERSION
from parlai.agents.hugging_face.dict import mT5DictionaryAgent
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.torch_agent import Batch, TorchAgent
from parlai.core.torch_generator_agent import TorchGeneratorAgent, TorchGeneratorModel
def check_hf_version(v: Tuple[int, int]) -> bool:
"""
Check that HF version is greater than 4.3.
"""
main, sub = v
return main > 4 or (main == 4 and sub >= 3)
def build_mt5(opt: Opt) -> MT5ForConditionalGeneration:
if not check_hf_version(HF_VERSION):
raise RuntimeError('Must use transformers package >= 4.3 to use t5')
return MT5ForConditionalGeneration.from_pretrained(
opt['mt5_model_arch'], dropout_rate=opt['mt5_dropout']
)
def set_device(func):
"""
Decorator for setting device.
HF's model parallel uses `torch.cuda.set_device`, which does not vibe well with
ParlAI.
"""
def wrap(*args, **kwargs):
if torch.cuda.is_available():
torch.cuda.set_device('cuda:0')
ret = func(*args, **kwargs)
if torch.cuda.is_available():
torch.cuda.set_device('cuda:0')
return ret
return wrap
##############
# mT5 Modules #
##############
class ParlaimT5Encoder(torch.nn.Module):
def __init__(self, opt: Opt, encoder: T5Stack, padding_idx: Optional[int] = None):
super().__init__()
self.stack = encoder
self.padding_idx = padding_idx
self.paralleled = not opt[
'mt5_model_parallel'
] # need to parallel in forward; bug in HF
@set_device
def forward(
self,
input: torch.LongTensor,
positions: Optional[torch.LongTensor] = None,
segments: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor, torch.BoolTensor]:
"""
Forward pass.
:param LongTensor[batch,seqlen] input:
The input IDs
:param LongTensor[batch,seqlen] positions:
Positions for input IDs
:param LongTensor[batch,seqlen] segments:
If provided, additionally adds ``segments`` as extra embedding features.
"""
if not self.paralleled:
self.stack.parallelize()
mask = input != self.padding_idx
outputs = self.stack(input, attention_mask=mask, output_hidden_states=False)
for k in outputs:
if torch.is_tensor(outputs[k]):
outputs[k] = outputs[k].to(input.device)
return outputs[0], mask
class ParlaimT5Decoder(torch.nn.Module):
def __init__(self, opt: Opt, decoder: T5Stack, padding_idx: Optional[int] = None):
super().__init__()
self.stack = decoder
self.padding_idx = padding_idx
self.paralleled = not opt[
'mt5_model_parallel'
] # need to parallel in forward; bug in HF
@set_device
def forward(
self, input: torch.LongTensor, encoder_state: Tuple[Any], incr_state=None
):
"""
Forward pass.
:param LongTensor[batch,seqlen] input:
The decoder inputs (partial or full decoded token IDs).
:param encoder_state:
Output from the encoder module forward pass.
:param incr_state:
The incremental state: a dictionary whose keys index the layers and whose
values contain the incremental state for each layer.
"""
if not self.paralleled:
self.stack.parallelize()
encoder_output, encoder_mask = encoder_state
mask = input != self.padding_idx
mask[:, 0] = True # first token is pad
outputs = self.stack(
input_ids=input,
attention_mask=mask,
encoder_hidden_states=encoder_output.to(input.device),
encoder_attention_mask=encoder_mask.to(input.device),
)
return outputs[0].to(input.device), incr_state
class ParlaimT5Model(ParlaiT5Model):
"""
Wrap mT5 in ParlAI.
"""
def __init__(self, opt, dictionary):
self.pad_idx = dictionary[dictionary.null_token]
self.start_idx = self.pad_idx
self.end_idx = dictionary[dictionary.end_token]
super().__init__(self.pad_idx, self.start_idx, self.end_idx)
self.mt5 = build_mt5(opt)
self.encoder = ParlaimT5Encoder(opt, self.mt5.get_encoder(), self.pad_idx)
self.decoder = ParlaimT5Decoder(opt, self.mt5.get_decoder(), self.pad_idx)
@set_device
def output(self, tensor):
"""
Compute output logits.
"""
tensor = tensor * (self.mt5.model_dim ** -0.5)
lm_logits = self.mt5.lm_head(tensor)
return lm_logits
class mT5Agent(T5Agent):
"""
mT5 Agent.
Relies on the mT5 model implemented in huggingface
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt=partial_opt)
group = parser.add_argument_group('mT5 Args')
group.add_argument(
'--mt5-model-arch',
type=str,
default='mt5-base',
choices=["mt5-small", "mt5-base", "mt5-large", "mt5-xxl"],
)
group.add_argument(
'--mt5-model-parallel',
type='bool',
default=False,
help='use HF model parallel',
)
group.add_argument(
'--mt5-dropout', type=float, default=0.0, help='Dropout for mT5'
)
return parser
def build_model(self) -> 'ParlaimT5Model':
"""
Build and return model.
"""
model = ParlaimT5Model(self.opt, self.dict)
if self.opt['mt5_model_parallel']:
model.mt5.parallelize()
return model
def build_dictionary(self):
"""
Overrides TorchAgent.build_dictionary to use mt5 dict.
"""
return mT5DictionaryAgent(self.opt) | evelynkyl/xRAD_multilingual_dialog_systems | parlai_internal/agents/hugging_face/mt5.py | mt5.py | py | 6,563 | python | en | code | 1 | github-code | 36 |
2019695848 | #!/usr/bin/env python3.6
# -*-encoding=utf8-*-
import time
import pyquery
import requests
from fake_useragent import UserAgent
from spider.log import logging as log
class Get:
def __init__(self, url: str, try_time=9, try_sec=2):
ua = UserAgent()
self._url = url
self._try_time = try_time
self._try_sec = try_sec
self._header = {
'User-Agent': ua.ie,
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
}
def html(self)-> str:
tm = 0
text = ''
retry = 1
while tm <= self._try_time:
try:
s = requests.Session()
r = s.get(self._url, headers=self._header)
if r.status_code == requests.codes.ok:
doc = pyquery.PyQuery(r.text.encode(r.encoding))
text = doc.html()
break
s.close()
except Exception as e:
log.warning(self._url + '重试:' + str(retry))
retry += 1
time.sleep(self._try_sec)
tm += 1
return text
def binary(self):
tm = 0
binary = None
retry = 1
while tm <= self._try_time:
try:
s = requests.Session()
r = s.get(self._url)
if r.status_code == requests.codes.ok:
binary = r.content
if None is not binary:
break
break
s.close()
except Exception as e:
log.warning(self._url + '重试:' + str(retry))
retry += 1
time.sleep(self._try_sec)
tm += 1
return binary
| dingjingmaster/library_t | python/spider/spider/get.py | get.py | py | 1,374 | python | en | code | 0 | github-code | 36 |
1641551953 | # -*- coding: utf-8 -*-
import json
from datetime import datetime
from twisted.internet import reactor
from twisted.web.http import BAD_REQUEST
from twisted.web.server import NOT_DONE_YET
from config.config import CHAT_PER_PAGE, CHAT_CONNECTION_INTERVAL
from exception import BadRequest
from helper.chat_cmd import ChatCmdManager
from helper.model_control import get_chat_newer_than, get_chat_page,\
create_chat
from helper.resource import YuzukiResource, need_anybody_permission
from helper.template import render_template
from model.chat import Chat as ChatModel
from model.user import User as UserModel
def yuzuki_convert_int(num_str):
try:
value = int(num_str)
return value
except ValueError:
raise BadRequest()
class Chat(YuzukiResource):
isLeaf = False
def __init__(self):
YuzukiResource.__init__(self)
self.putChild("user", ChatUser())
self.putChild("message", ChatMessage())
@need_anybody_permission
def render_GET(self, request):
page = request.get_argument("page", None)
chat_total_count = request.dbsession.query(ChatModel).count()
page_total = chat_total_count / CHAT_PER_PAGE
user_nicknames = request.dbsession.query(UserModel.nickname).all()
plucked_user_nicknames = [nickname for (nickname, ) in user_nicknames]
if page_total % CHAT_PER_PAGE != 0:
page_total += 1
context = {
"CHAT_PER_PAGE": CHAT_PER_PAGE,
"page": page,
"page_total": page_total,
"user_nicknames": json.dumps(plucked_user_nicknames)
}
return render_template("chat.html", request, context)
class ChatUser(YuzukiResource):
isLeaf = False
def __init__(self):
YuzukiResource.__init__(self)
stream = ChatUserStream()
self.putChild("data", ChatUserData(stream))
self.putChild("out", ChatUserOut(stream))
self.putChild("stream", stream)
class ChatMessage(YuzukiResource):
isLeaf = False
def __init__(self):
YuzukiResource.__init__(self)
self.putChild("data", ChatMessageData())
self.putChild("stream", ChatMessageStream())
class ChatMessageStream(YuzukiResource):
def __init__(self):
YuzukiResource.__init__(self)
self.request_pool = list()
self.cmd_manager = ChatCmdManager()
@need_anybody_permission
def render_GET(self, request):
self.request_pool.append(request)
return NOT_DONE_YET
@need_anybody_permission
def render_POST(self, request):
content = request.get_argument("content")
if content.startswith("/"):
chat, err = self.cmd_manager.process_cmd(request, content)
if err:
request.setResponseCode(BAD_REQUEST)
return err
else:
chat = create_chat(request, content)
request.dbsession.add(chat)
request.dbsession.commit()
for req in self.request_pool:
try:
req.write("message coming")
req.finish()
except:
pass
self.request_pool = []
return "chat posted"
class ChatMessageData(YuzukiResource):
@need_anybody_permission
def render_GET(self, request):
chat_id = request.get_argument("id", None)
page = request.get_argument("page", None)
if not chat_id and not page:
raise BadRequest()
if chat_id:
chat_id = yuzuki_convert_int(chat_id)
chats = get_chat_newer_than(request, chat_id)
else:
page = yuzuki_convert_int(page)
chats = get_chat_page(request, page)
data = [chat.to_dict() for chat in chats]
data = sorted(data, key=lambda c: c["uid"])
request.setNoCache()
return json.dumps(data)
class ChatUserStream(YuzukiResource):
def __init__(self):
YuzukiResource.__init__(self)
self.request_pool = list()
self.user_pool = dict()
def notify_all(self):
for req in self.request_pool:
if not req.finished:
req.write("refresh")
req.finish()
self.request_pool = list()
def send_refresh_signal(self, request):
if request in self.request_pool:
self.request_pool.remove(request)
if not request.finished:
request.write("refresh")
request.finish()
def response_failed(self, err, request, call):
call.cancel()
if request in self.request_pool:
self.request_pool.remove(request)
self.notify_all()
@need_anybody_permission
def render_GET(self, request):
self.request_pool.append(request)
call = reactor.callLater(CHAT_CONNECTION_INTERVAL - 5,
self.send_refresh_signal, request)
request.notifyFinish().addErrback(self.response_failed, request, call)
refresh_flag = False
if request.user not in self.user_pool:
refresh_flag = True
self.user_pool[request.user] = datetime.now()
new_user_pool = dict()
for user, connection_time in self.user_pool.iteritems():
if (datetime.now() - connection_time).seconds <= \
CHAT_CONNECTION_INTERVAL:
new_user_pool[user] = connection_time
else:
refresh_flag = True
self.user_pool = new_user_pool
if refresh_flag:
self.request_pool.remove(request)
self.notify_all()
return "refresh"
else:
return NOT_DONE_YET
class ChatUserData(YuzukiResource):
def __init__(self, stream):
YuzukiResource.__init__(self)
self.stream = stream
@need_anybody_permission
def render_GET(self, request):
user_data_list = list()
for user in self.stream.user_pool:
user_data = {
"user_id": user.uid,
"user_nickname": user.nickname,
}
user_data_list.append(user_data)
user_data_list = sorted(user_data_list, key=lambda u: u["user_id"])
request.setNoCache()
return json.dumps(user_data_list)
class ChatUserOut(YuzukiResource):
def __init__(self, stream):
YuzukiResource.__init__(self)
self.stream = stream
@need_anybody_permission
def render_GET(self, request):
if request.user in self.stream.user_pool:
del (self.stream.user_pool[request.user])
self.stream.notify_all()
return "out"
| PoolC/Yuzuki | resource/chat.py | chat.py | py | 6,624 | python | en | code | 10 | github-code | 36 |
12532858756 | from setuptools import setup
with open('README.md') as f:
long_description = f.read()
setup(
name = "moderate",
version = "0.1",
license = 'MIT',
description = "A Python Distrubted System",
author = 'Thomas Huang',
url = 'https://github.com/thomashuang/Moderate',
packages = ['moderate', 'moderate.queue'],
install_requires = ['setuptools',
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: Distrubted System',
],
long_description=long_description,
) | whiteclover/Moderate | setup.py | setup.py | py | 676 | python | en | code | 0 | github-code | 36 |
8365368260 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Date : 2019/7/18 10:04
# @Version : Python 3.7.1
import os
import re
class Infinit:
def __iter__(self):
return self
def __next__(self):
return None
def connect():
# 通过任务管理器查看PID,再用cmd查找PID对应的端口
os.popen('adb connect 127.0.0.1:62001')
out = os.popen('adb devices').read()
global devices_list
devices_list = re.sub('\tdevice', '', out[25:]).strip().split('\n')
print('设备已连接:%s' % devices_list)
def Input(Num):
if len(devices_list) > 1:
os.system("adb -s 127.0.0.1:62001 shell input text \"" + Num + "\"")
os.system("adb -s 127.0.0.1:62001 shell input keyevent 66")
else:
os.system("adb shell input text \"" + Num + "\"")
os.system("adb shell input keyevent 66")
if __name__ == '__main__':
connect()
for i in Infinit():
Num = input('请输入内容:')
Input(Num)
| Xiexinxmh/master | python tools/Android emulator code scanning input(for single-data).py | Android emulator code scanning input(for single-data).py | py | 984 | python | en | code | 0 | github-code | 36 |
32697179622 | import pandas as pd
import os.path
# needs to put this file under sensing folder
class Attr:
def __init__(self):
self.activity = []
i = 0
while (i < 60):
if (i < 10 and os.path.exists("activity/activity_u0"+str(i)+".csv")):
self.activity.append(pd.read_csv("activity/activity_u0"+str(i)+".csv"))
if (i >= 10 and os.path.exists("activity/activity_u"+str(i)+".csv")):
self.activity.append(pd.read_csv("activity/activity_u"+str(i)+".csv"))
i = i + 1
def display(self):
print(self.activity[0])
a = Attr()
a.display()
| z5036602/ROCK_and_ROLL | read.py | read.py | py | 643 | python | en | code | 0 | github-code | 36 |
1379123460 | import cv2
import time
from eye_tracking import EyeTracking
eye_tracking = EyeTracking()
webcam = cv2.VideoCapture(0)
while True:
_, frame = webcam.read()
if frame is None:
break
eye_tracking.refresh(frame)
frame = eye_tracking.annotated_frame()
text = ""
attention_text = ""
if eye_tracking.is_blinking():
text = "Blinking"
elif eye_tracking.is_right():
# print('right')
eye_tracking.is_attention -= 1
text = "Looking right"
elif eye_tracking.is_left():
# print('left')
eye_tracking.is_attention -= 1
text = "Looking left"
elif eye_tracking.is_center():
# print('center')
eye_tracking.is_attention += 1
text = "Looking center"
# attention example
if eye_tracking.is_attention > 100:
eye_tracking.is_attention = 100
elif eye_tracking.is_attention < 0:
eye_tracking.is_attention = 0
if eye_tracking.is_attention < 10:
attention_text = "Cheer up"
else:
attention_text = "Good!"
if eye_tracking.is_focus():
print('focus!')
else:
print('hey!')
cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = eye_tracking.pupil_left_coords()
right_pupil = eye_tracking.pupil_right_coords()
attention = eye_tracking.is_attention
method = eye_tracking.get_method()
cv2.putText(frame, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Attention: " + str(attention), (90, 200), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, method, (90, 235), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Focus?: " + attention_text, (90, 270), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.imshow("Demo", frame)
# esc key
if cv2.waitKey(10) == 27:
break
webcam.release()
cv2.destroyAllWindows() | dead4s/SpaHeron_MachineLearning_UXIS | eye_tracking/main.py | main.py | py | 2,111 | python | en | code | 3 | github-code | 36 |
37411929075 | import os
import numpy as np
import matplotlib.pyplot as plt
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
# Create output directory if it does not already exist
if not os.path.exists('frames'):
os.mkdir('frames')
# Open model
m = ModelOutput('flyaround_cube.rtout')
# Read image from model
image = m.get_image(distance=300 * pc, units='MJy/sr')
# image.val is now an array with four dimensions (n_view, n_y, n_x, n_wav)
for iview in range(image.val.shape[0]):
# Open figure and create axes
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1)
# This is the command to show the image. The parameters vmin and vmax are
# the min and max levels for the grayscale (remove for default values).
# The colormap is set here to be a heat map. Other possible heat maps
# include plt.cm.gray (grayscale), plt.cm.gist_yarg (inverted grayscale),
# plt.cm.jet (default, colorful). The np.sqrt() is used to plot the
# images on a sqrt stretch.
ax.imshow(np.sqrt(image.val[iview, :, :, 0]), vmin=0, vmax=np.sqrt(2000.),
cmap=plt.cm.gist_heat, origin='lower')
# Save figure. The facecolor='black' and edgecolor='black' are for
# esthetics, and hide the axes
fig.savefig('frames/frame_%05i.png' % iview,
facecolor='black', edgecolor='black')
# Close figure
plt.close(fig)
| hyperion-rt/hyperion | docs/tutorials/scripts/flyaround_cube_animate.py | flyaround_cube_animate.py | py | 1,402 | python | en | code | 51 | github-code | 36 |
70306634025 | from audioop import reverse
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http.response import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from registration.forms import SignupForm
from user.models import Person, Studentity, University, Department, Tag
# registration is done in this view
# @transaction.atomic
def signup(request):
if request.method == 'GET':
return render(request, 'sign-up.html', {'form': SignupForm()})
# elif request.method == 'POST'
form = SignupForm(request.POST)
if form.is_valid():
user = User.objects.create_user(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
p = Person()
p.user = user
user.save()
p.save()
s = Studentity()
s.person = p
s.student_id = form.cleaned_data['student_id']
s.department = Department.objects.get(name='unknown')
s.save()
return HttpResponseRedirect(reverse('registration_select_initial_tags', args=[user.username, p.id]))
else:
return render(request, 'sign-up.html', {'form': form, 'status': 'Notice errors below:'})
# after signup, initial tags will be selected by the new user
def select_initial_tags(request, username, identifier):
if request.method == 'GET':
return render(request, 'sing-up-tags.html', {'super_tags': Tag.objects.filter(parent=None)})
# elif request.method == 'POST'
person = Person.objects.get(id=identifier)
super_tags = Tag.objects.filter(parent=None)
for tag in super_tags:
if tag.name in request.POST:
person.interested_tags.add(tag)
return render(request, 'sing-up-tags.html', {'super_tags': Tag.objects.filter(parent=None)})
| Hosseinyousefi23/smallApp | registration/views.py | views.py | py | 1,854 | python | en | code | 0 | github-code | 36 |
29696828936 | import utils
import numpy as np
def parse_input(path):
lines = utils.read_lines(path)
paths = [[utils.parse_coord_str(p) for p in l.split(" -> ")]
for l in lines]
height = 0
width = 0
for path in paths:
for p in path:
width = max(width, p[0])
height = max(height, p[1])
# 0: Air
# 1: Rock
# 2: Sand
grid = np.zeros((height + 1, width + 1))
for path in paths:
for [f, t] in zip(path[:-1], path[1:]):
fx = min(f[0], t[0])
tx = max(f[0], t[0])
fy = min(f[1], t[1])
ty = max(f[1], t[1])
grid[fy:ty + 1, fx:tx + 1] = 1
return grid
def drop_a_sand(grid, sx, sy):
(height, width) = grid.shape
x, y = sx, sy
if grid[sy, sx] != 0:
return False
while True:
if y == height - 1:
# drop
return False
if (grid[y + 1, x] != 0 and grid[y + 1, x - 1] != 0
and grid[y + 1, x + 1] != 0):
# come to rest
grid[y, x] = 2
return True
if grid[y + 1, x] == 0:
y += 1
elif grid[y + 1, x - 1] == 0:
y += 1
x -= 1
elif grid[y + 1, x + 1] == 0:
y += 1
x += 1
def hpad(grid, n):
height = grid.shape[0]
return np.concatenate([np.zeros(
(height, n)), grid, np.zeros((height, n))],
axis=1)
def part1(path):
grid = parse_input(path)
width = grid.shape[1]
grid = np.concatenate([grid, np.zeros((1, width))], axis=0)
grid = hpad(grid, 1)
n = 0
while drop_a_sand(grid, 500 + 1, 0):
n += 1
print(n)
def part2(path):
grid = parse_input(path)
width = grid.shape[1]
padding_size = 500
grid = np.concatenate([grid, np.zeros((2, width))], axis=0)
grid = hpad(grid, padding_size)
grid[-1, :] = 1
n = 0
while drop_a_sand(grid, 500 + padding_size, 0):
n += 1
print(n) | dialogbox/adventofcode | py/2022/day14.py | day14.py | py | 2,028 | python | en | code | 0 | github-code | 36 |
29084870079 | from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report
def naviebayes():
news = fetch_20newsgroups(data_home='./', subset='all')
x_train, x_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.25)
# 对数据集进行特征抽取
tf = TfidfVectorizer()
x_train = tf.fit_transform(x_train)
# print(tf.get_feature_names())
x_test = tf.transform(x_test)
# 进行朴素贝叶斯算法
nb = MultinomialNB(alpha=1.0)
# print(x_train)
nb.fit(x_train, y_train)
y_predict = nb.predict(x_test)
# print("预测文章类别为:", y_predict)
# print("准确率:", nb.score(x_test, y_test))
print("每个类别的精确率和召回率:", classification_report(y_test, y_predict, target_names=news.target_names))
if __name__ == '__main__':
naviebayes()
| shnehna/machine_study | 朴素贝叶斯算法/NB.py | NB.py | py | 1,039 | python | en | code | 0 | github-code | 36 |
37626028800 | from flask import Blueprint, redirect, render_template, session, url_for
from .forms import InfoForms
from flask_learning.models import Post
from datetime import datetime
date_time = Blueprint('date_time_picker', __name__)
@date_time.route("/dating", methods=['GET', 'POST'])
def select_date_time():
form = InfoForms()
if form.validate_on_submit():
session['startdate'] = form.start_date.data
session['enddate'] = form.end_date.data
return redirect(url_for('date_time_picker.dates'))
return render_template('dating.html', form=form)
@date_time.context_processor
def formss():
form = InfoForms()
return dict(form=form)
@date_time.route("/datetime", methods=["GET", "POST"])
def dates():
startdate = session['startdate']
enddate = session['enddate']
startdate = datetime.strptime(startdate, '%a, %d %b %Y %H:%M:%S %Z').strftime('%Y-%m-%d')
enddate = datetime.strptime(enddate, '%a, %d %b %Y %H:%M:%S %Z').strftime('%Y-%m-%d')
posts = Post.query.filter(Post.date_posted.between(startdate, enddate))
return render_template('date.html', posts=posts)
| SunnyYadav16/Flask_Learning | flask_learning/date_time_picker/routes.py | routes.py | py | 1,121 | python | en | code | 0 | github-code | 36 |
4917735234 | from os import path
import os
import openalea.strawberry as strawberry
from openalea.strawberry import import_mtgfile
from openalea.strawberry import visu3d
import openalea.mtg.mtg as mtg
from openalea.mtg.algo import orders
import openalea.plantgl.all as pgl
import format_io as f_io
DATA = "/".join(strawberry.__file__.split("/")[:-3])+"/share/data"
CAPRISS = DATA + "/Capriss.mtg"
def deep_print(geo, tab = 0):
print("\t"*tab, end = "")
print(geo)
if hasattr(geo, "geometry"):
deep_print(geo.geometry, tab = tab + 1)
def merge_shapes(shapes):
if len(shapes) > 1:
color = shapes[0].appearance
geo = pgl.tesselate(pgl.Group(*[sh.geometry for sh in shapes]))
if not isinstance(geo, pgl.TriangleSet):
print(geo)
return pgl.Shape(geo, color)
else:
# if there is only one shape in the list, we don't need to merge
return shapes[0]
def deep_primitive(sh):
if isinstance(sh, pgl.Shape):
return deep_primitive(sh.geometry)
elif isinstance(sh, pgl.Transformed):
return deep_primitive(sh.geometry)
else:
return sh
def remove_bezier(scene_dict):
keys = list(scene_dict.keys())
shapes = list(scene_dict.values())
poped_nb = 0
for i,sh in enumerate(scene_dict.values()):
if isinstance(deep_primitive(sh), pgl.BezierCurve2D):
keys.pop(i-poped_nb)
shapes.pop(i-poped_nb)
poped_nb+=1
return dict(zip(keys,shapes))
def main():
gariguette = import_mtgfile.import_mtgfile(filename = ["Gariguette"])
gariguette.properties()["order"] = orders(gariguette)
# root = gariguette.root
plants_id = gariguette.roots(scale = 1)
# plant = gariguette.sub_mtg(plants_id[0], copy = True)
scene = visu3d.plot3d(gariguette,by=["Sample_date"], hide_leaves = False, display = False)
pgl.Viewer.display(scene)
input()
scene_dict = scene.todict()
keys = iter(scene_dict.keys())
merged = (merge_shapes(shapes) for shapes in scene_dict.values())
scene_dict_merged = dict(zip(keys, merged))
scene_dict_merged = remove_bezier(scene_dict_merged)
f_io.opf.writer.apply_scene(gariguette, scene_dict_merged)
io = f_io.io(ignored_name=["index","order","color"])
io.g = gariguette
io.write("gariguette.opf")
if __name__ == "__main__":
main() | thomasarsouze/plantconvert | demo/strawberry/generate_opf_strawberry.py | generate_opf_strawberry.py | py | 2,384 | python | en | code | 0 | github-code | 36 |
43831264470 | from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union
from uuid import UUID
from eventsourcing.domain.model.aggregate import AggregateRoot
# Locations in the world.
class Location(Enum):
HAMBURG = "HAMBURG"
HONGKONG = "HONGKONG"
NEWYORK = "NEWYORK"
STOCKHOLM = "STOCKHOLM"
TOKYO = "TOKYO"
NLRTM = "NLRTM"
USDAL = "USDAL"
AUMEL = "AUMEL"
# Leg of an Itinerary.
class Leg(object):
def __init__(self, origin: str, destination: str, voyage_number: str):
self.origin: str = origin
self.destination: str = destination
self.voyage_number: str = voyage_number
# Itinerary.
class Itinerary(object):
def __init__(self, origin: str, destination: str, legs: List[Leg]):
self.origin = origin
self.destination = destination
self.legs = legs
# Handling activities.
class HandlingActivity(Enum):
RECEIVE = "RECEIVE"
LOAD = "LOAD"
UNLOAD = "UNLOAD"
CLAIM = "CLAIM"
# Custom static types.\
CargoDetails = Dict[str, Optional[Union[str, bool, datetime, Tuple]]]
LegDetails = Dict[str, str]
ItineraryDetails = Dict[str, Union[str, List[LegDetails]]]
# Type variable for Cargo aggregate class.
T_cargo = TypeVar("T_cargo", bound="Cargo")
# Some routes from one location to another.
REGISTERED_ROUTES = {
("HONGKONG", "STOCKHOLM"): [
Itinerary(
origin="HONGKONG",
destination="STOCKHOLM",
legs=[
Leg(origin="HONGKONG", destination="NEWYORK", voyage_number="V1"),
Leg(origin="NEWYORK", destination="STOCKHOLM", voyage_number="V2"),
],
)
],
("TOKYO", "STOCKHOLM"): [
Itinerary(
origin="TOKYO",
destination="STOCKHOLM",
legs=[
Leg(origin="TOKYO", destination="HAMBURG", voyage_number="V3"),
Leg(origin="HAMBURG", destination="STOCKHOLM", voyage_number="V4"),
],
)
],
}
NextExpectedActivity = Optional[
Union[Tuple[HandlingActivity, Location], Tuple[HandlingActivity, Location, str]]
]
# Custom aggregate root class.
class Aggregate(AggregateRoot):
__subclassevents__ = True
# The Cargo aggregate is an event sourced domain model aggregate that
# specifies the routing from origin to destination, and can track what
# happens to the cargo after it has been booked.
class Cargo(Aggregate):
@classmethod
def new_booking(
cls: Type[T_cargo],
origin: Location,
destination: Location,
arrival_deadline: datetime,
) -> T_cargo:
assert issubclass(cls, Cargo) # For PyCharm navigation.
return cls.__create__(
origin=origin, destination=destination, arrival_deadline=arrival_deadline
)
def __init__(
self,
origin: Location,
destination: Location,
arrival_deadline: datetime,
**kwargs: Any
) -> None:
super().__init__(**kwargs)
self._origin: Location = origin
self._destination: Location = destination
self._arrival_deadline: datetime = arrival_deadline
self._transport_status: str = "NOT_RECEIVED"
self._routing_status: str = "NOT_ROUTED"
self._is_misdirected: bool = False
self._estimated_time_of_arrival: Optional[datetime] = None
self._next_expected_activity: NextExpectedActivity = None
self._route: Optional[Itinerary] = None
self._last_known_location: Optional[Location] = None
self._current_voyage_number: Optional[str] = None
@property
def origin(self) -> Location:
return self._origin
@property
def destination(self) -> Location:
return self._destination
@property
def arrival_deadline(self) -> datetime:
return self._arrival_deadline
@property
def transport_status(self) -> str:
return self._transport_status
@property
def routing_status(self) -> str:
return self._routing_status
@property
def is_misdirected(self) -> bool:
return self._is_misdirected
@property
def estimated_time_of_arrival(self) -> Optional[datetime]:
return self._estimated_time_of_arrival
@property
def next_expected_activity(self) -> Optional[Tuple]:
return self._next_expected_activity
@property
def route(self) -> Optional[Itinerary]:
return self._route
@property
def last_known_location(self) -> Optional[Location]:
return self._last_known_location
@property
def current_voyage_number(self) -> Optional[str]:
return self._current_voyage_number
class Event(Aggregate.Event):
pass
def change_destination(self, destination: Location) -> None:
self.__trigger_event__(self.DestinationChanged, destination=destination)
class DestinationChanged(Event):
def mutate(self, obj: "Cargo") -> None:
obj._destination = self.destination
@property
def destination(self) -> Location:
return self.__dict__["destination"]
def assign_route(self, itinerary: Itinerary) -> None:
self.__trigger_event__(self.RouteAssigned, route=itinerary)
class RouteAssigned(Event):
def mutate(self, obj: "Cargo") -> None:
obj._route = self.route
obj._routing_status = "ROUTED"
obj._estimated_time_of_arrival = datetime.now() + timedelta(weeks=1)
obj._next_expected_activity = (HandlingActivity.RECEIVE, obj.origin)
obj._is_misdirected = False
@property
def route(self) -> Itinerary:
return self.__dict__["route"]
def register_handling_event(
self,
tracking_id: UUID,
voyage_number: Optional[str],
location: Location,
handling_activity: HandlingActivity,
) -> None:
self.__trigger_event__(
self.HandlingEventRegistered,
tracking_id=tracking_id,
voyage_number=voyage_number,
location=location,
handling_activity=handling_activity,
)
class HandlingEventRegistered(Event):
def mutate(self, obj: "Cargo") -> None:
assert obj.route is not None
if self.handling_activity == HandlingActivity.RECEIVE:
obj._transport_status = "IN_PORT"
obj._last_known_location = self.location
obj._next_expected_activity = (
HandlingActivity.LOAD,
self.location,
obj.route.legs[0].voyage_number,
)
elif self.handling_activity == HandlingActivity.LOAD:
obj._transport_status = "ONBOARD_CARRIER"
obj._current_voyage_number = self.voyage_number
for leg in obj.route.legs:
if leg.origin == self.location.value:
if leg.voyage_number == self.voyage_number:
obj._next_expected_activity = (
HandlingActivity.UNLOAD,
Location[leg.destination],
self.voyage_number,
)
break
else:
raise Exception(
"Can't find leg with origin={} and "
"voyage_number={}".format(self.location, self.voyage_number)
)
elif self.handling_activity == HandlingActivity.UNLOAD:
obj._current_voyage_number = None
obj._last_known_location = self.location
obj._transport_status = "IN_PORT"
if self.location == obj.destination:
obj._next_expected_activity = (
HandlingActivity.CLAIM,
self.location,
)
elif self.location.value in [leg.destination for leg in obj.route.legs]:
for i, leg in enumerate(obj.route.legs):
if leg.voyage_number == self.voyage_number:
next_leg: Leg = obj.route.legs[i + 1]
assert Location[next_leg.origin] == self.location
obj._next_expected_activity = (
HandlingActivity.LOAD,
self.location,
next_leg.voyage_number,
)
break
else:
obj._is_misdirected = True
obj._next_expected_activity = None
elif self.handling_activity == HandlingActivity.CLAIM:
obj._next_expected_activity = None
obj._transport_status = "CLAIMED"
else:
raise Exception(
"Unsupported handling event: {}".format(self.handling_activity)
)
@property
def voyage_number(self) -> str:
return self.__dict__["voyage_number"]
@property
def location(self) -> Location:
return self.__dict__["location"]
@property
def handling_activity(self) -> str:
return self.__dict__["handling_activity"]
| johnbywater/es-example-cargo-shipping | cargoshipping/domainmodel.py | domainmodel.py | py | 9,436 | python | en | code | 3 | github-code | 36 |
24981710765 | import pandas as pd
import numpy as np
from typing import Tuple
import os
import seaborn as sns
import matplotlib.pyplot as plt
from fancyimpute import IterativeImputer
import sys
sys.path.append('.')
def load_all_data(basepath: str, names_files: list) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""aim to load all data
Args:
names_files (list): name of data sources
basepath (str) : base path
Returns:
tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: 3 dataframe that contains different parts of the dataset
"""
#constant to be created
df_feature = pd.read_csv(os.path.join(basepath, names_files[0]), parse_dates=["Date"])
df_store = pd.read_csv(os.path.join(basepath, names_files[1]))
df_sales = pd.read_csv(os.path.join(basepath, names_files[2]), parse_dates=["Date"])
return (df_feature, df_store, df_sales)
def group_by_feature_by_date(df_feature: pd.DataFrame) -> pd.DataFrame:
"""aim to group by feature by date and compute agg using mean.
Args:
df_feature (pd.DataFrame): feature dataframe
Returns:
pd.DataFrame: data aggregated
"""
data_date = df_feature.groupby("Date").agg(
{
"Temperature":"mean",
"Fuel_Price":"mean",
"IsHoliday":"sum",
"CPI":"mean",
"Unemployment":"mean"
}
)
data_date = data_date.sort_index()
temp_date_data = data_date[:'2012-12-10']
return temp_date_data
def group_by_sales_by_date(df_sales: pd.DataFrame) -> pd.DataFrame:
"""aims to group by date and compute agg using sum
Args:
df_sales (pd.DataFrame): sales dataframe
Returns:
pd.DataFrame: return aggregated data
"""
data_sales_date = df_sales.groupby("Date").agg({"Weekly_Sales":"sum"})
data_sales_date.sort_index(inplace=True)
return data_sales_date
def merge_feature_and_sales(df_feature: pd.DataFrame, df_sales: pd.DataFrame) -> pd.DataFrame:
"""Will merge feature and sales on indexes
Args:
df_feature (pd.DataFrame): features aggregated data
df_sales (pd.DataFrame): sales aggregated data
Returns:
pd.DataFrame: merged dataframe
"""
df_sales.Weekly_Sales = df_sales.Weekly_Sales/1000000 #convert weekly sales in million
df_sales.Weekly_Sales = df_sales.Weekly_Sales.apply(int)
df_sales_features = pd.merge(df_sales, df_feature, left_index=True, right_index=True, how='left')
df_sales_features["IsHoliday"] = df_sales_features["IsHoliday"].apply(lambda x: True if x == 45.0 else False )
return df_sales_features
def agg_store_on_temp_fuel_price_holiday(df_store: pd.DataFrame,
df_feature: pd.DataFrame,
df_sales: pd.DataFrame
) -> pd.DataFrame:
"""scall columns (temperature, fuel price) in df_store by mean, (weekly_sales and isholliday by sum)
Args:
df_sales (pd.DataFrame) : sales dataframe
df_store (pd.DataFrame): store dataframe
df_features (pd.DataFrame): features dataframe
Returns:
pd.DataFrame: scalled dataframe
"""
data_Store = df_feature.groupby("Store").agg(
{
"Temperature": "mean",
"Fuel_Price": "mean",
"IsHoliday": "sum"
}
)
temp_store = df_sales.groupby("Store").agg({"Weekly_Sales":"sum"})
temp_store.Weekly_Sales = temp_store.Weekly_Sales/1000000
temp_store.Weekly_Sales = temp_store.Weekly_Sales.apply(int)
data_Store.set_index(np.arange(0,45),inplace=True)
df_store["Temperature"] = data_Store.Temperature
df_store["Fuel_Price"] = data_Store.Fuel_Price
df_store["Holiday"] = data_Store.IsHoliday
df_store["Weekly_Sales"] = temp_store.Weekly_Sales
return df_store
def dataset_construction(df_sales: pd.DataFrame,
df_feature: pd.DataFrame,
df_store: pd.DataFrame
) -> pd.DataFrame:
"""create dataset and divide the dataset into train and test data
Args:
df_sales (pd.DataFrame): sales data
df_features (pd.DataFrame): features data
df_store (pd.DataFrame): stores data
Returns:
pd.DataFrame : dataset as dataframe
"""
sales_date_store = df_sales.groupby(["Date","Store"]).agg({"Weekly_Sales":"sum"})
sales_date_store.sort_index(inplace=True)
sales_date_store.Weekly_Sales = sales_date_store.Weekly_Sales/10000
sales_date_store.Weekly_Sales = sales_date_store.Weekly_Sales.apply(int)
data_table = pd.merge(df_feature, sales_date_store, how='left', on=["Date", "Store"])
data_table = pd.merge(data_table, df_store[["Store", "Type"]], how='left', on=["Store"])
return data_table
def markdown_data_imputation(data_table: pd.DataFrame, col_to_impute: list) -> pd.DataFrame:
"""impute missing values
Args:
data_table (pd.DataFrame): dataset
col_to_impute (list): list of column to impute
Returns:
pd.DataFrame: dataset imputed
"""
itt = IterativeImputer()
df = itt.fit_transform(data_table[col_to_impute])
compte = 0
for col in col_to_impute:
data_table[col] = df[:,compte]
compte = compte + 1
return data_table
def data_imputation_by_mean(data_table: pd.DataFrame, cols: list) -> pd.DataFrame:
"""impute data by mean
Args:
data_table (pd.DataFrame): dataset
cols (list): col to impute by mean
Returns:
pd.DataFrame: data imputed by mean
"""
CPI = cols[0]
Unemployment = cols[1]
data_table[CPI].fillna((data_table[CPI].mean()), inplace=True)
data_table[Unemployment].fillna((data_table[Unemployment].mean()), inplace=True)
return data_table
def createdummies(data, cols):
for col in cols:
one_hot = pd.get_dummies(data[col], prefix=col)
data = data.join(one_hot)
data.drop(col, axis = 1, inplace=True)
return data
def create_columns_and_convert_categorical_data(data_table: pd.DataFrame) -> pd.DataFrame:
"""create columns and convert categorical data
Args:
data_table (pd.DataFrame): dataset
Returns:
pd.DataFrame: transformed data
"""
data_table['IsHoliday'] = data_table['IsHoliday'].map({True:0, False:1})
data_table["Month"] = data_table.Date.dt.month
data_table["Year"] = data_table.Date.dt.year
data_table["WeekofYear"] = data_table.Date.dt.weekofyear
data_table.drop(['Date'], axis=1, inplace=True)
#create dummies out of categorical column
data_table = createdummies(data_table, ["Type", "Month", "Year", "WeekofYear"])
return data_table
def data_processing(base_path: str,
names_files: list,
col_to_impute: list,
cols_: list) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""process data before training
Args:
base_path (str): base dir
names_files (list): files to be loaded
col_to_impute (list): a list of columns to impute by using specific method
cols_ (list): a list of columns to impute by mean
Returns:
Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: dataset, train data and test data
"""
df_feature, df_store, df_sales = load_all_data(base_path, names_files)
#TBC
# df_agg_feature_by_date = group_by_feature_by_date(df_feature=df_feature)
# df_agg_sales_by_date = group_by_sales_by_date(df_sales=df_sales)
# df_feature_agg_sales_agg = merge_feature_and_sales(
# df_feature=df_agg_feature_by_date,
# df_sales=df_agg_sales_by_date
# )
df_scalled_store = agg_store_on_temp_fuel_price_holiday(
df_store=df_store,
df_feature=df_feature,
df_sales=df_sales
)
data_table = dataset_construction(
df_sales=df_sales,
df_feature=df_feature,
df_store=df_scalled_store
)
data_table_imputed_markdown = markdown_data_imputation(
data_table=data_table,
col_to_impute=col_to_impute
)
data_table_compltete_imputed = data_imputation_by_mean(
data_table=data_table_imputed_markdown,
cols=cols_
)
data_table_with_new_features = create_columns_and_convert_categorical_data(
data_table=data_table_compltete_imputed
)
#convert from Fahrenheit to Celcus
data_table_with_new_features['Temperature'] = (data_table_with_new_features['Temperature']- 32) * 5./9.
# creating train and test data
data_train = data_table_with_new_features[data_table_with_new_features.Weekly_Sales.notnull()]
data_test = data_table_with_new_features[data_table_with_new_features.Weekly_Sales.isnull()]
return data_table_with_new_features, data_train, data_test
def data_processing_with_io(base_path: str,
names_files: list,
col_to_impute: list,
cols_: list,
output_path: str
) -> None:
"""use the data_processing function and generate some artefact
Args:
base_path (str): base dir
names_files (list): file to be loaded
col_to_impute (list): a list of columns to impute by using specific method
cols_ (list): a list of columns to impute by mean
output_path (str): output dir to store result of preprocessing
"""
data_table, data_train, data_test = data_processing(base_path=base_path,
names_files=names_files,
col_to_impute=col_to_impute,
cols_=cols_
)
data_table.to_csv(os.path.join(output_path, 'preprocess_dataset'), index=False)
data_train.to_csv(os.path.join(output_path, 'train/train.csv'), index=False)
data_test.to_csv(os.path.join(output_path, 'test/test.csv'), index=False) | infini11/MLOps-project | src/preprocessing.py | preprocessing.py | py | 9,913 | python | en | code | 0 | github-code | 36 |
34180539673 | import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
from viewer.models import (
Target,
Molecule,
MoleculeTag,
TagCategory
)
from scoring.models import MolGroup
class Command(BaseCommand):
help = 'Add moleculeTag record for existing mol_groups for a given target. This effectively adds molecule tags for all the sites for the Target'
def add_arguments(self, parser):
parser.add_argument('target', type=str, help='Target to be corrected')
parser.add_argument('update', type=str, help='Whether to update the target (yes) or display what will be updated (no)')
def handle(self, *args, **kwargs):
tags_existing = 0
tags_to_be_created = 0
tags_created = 0
time_start = timezone.now().strftime('%X')
self.stdout.write("Start %s" % time_start)
target_name = kwargs['target']
self.stdout.write("target_name: %s" % target_name)
if kwargs['update'] == 'yes':
update = True
else:
update = False
self.stdout.write("update: %s" % update)
target = Target.objects.filter(title=target_name)
if not target:
self.stdout.write("Target %s not found" % target_name)
exit(1)
else:
self.stdout.write("Updating tags for Target %s" % target[0].title)
# First, try sites file - e.g. /code/media/targets/mArh/sites.csv
# If this is there, then the new sites functionality was used.
sites_filepath = os.path.join(settings.MEDIA_ROOT, 'targets', target_name, 'sites.csv')
if os.path.isfile(sites_filepath):
expected_sites = sum(1 for line in open(sites_filepath)) - 1
self.stdout.write("Expected number of sites: %s" % expected_sites)
# These should correspond to the sites for the target held in sites.csv
mol_groups = MolGroup.objects.filter(target_id__title=target_name, group_type = "MC")
tag_type = 'site'
else:
# The sites should correspond to the centres of mass. The sites will be generated from them
mol_groups = MolGroup.objects.filter(target_id__title=target_name, group_type = "MC", description = "c_of_m")
expected_sites = len(mol_groups)
self.stdout.write("Expected number of sites: %s" % expected_sites)
tag_type = 'c_of_e'
if not mol_groups:
self.stdout.write("No sites found for target")
exit(1)
for idx, mol_group in enumerate(mol_groups):
self.stdout.write("mol_group description: {}, index: {}".format(mol_group.description, idx))
# A molecule tag record should not exist, but if it does go no further
try:
mol_tag = MoleculeTag.objects.get(mol_group=mol_group)
except:
mol_tag = None
if tag_type == 'site':
tag_name = mol_group.description
else:
tag_name = 'c_of_m_{}'.format(idx)
if mol_tag:
self.stdout.write("Tag already exists for {}, index: {}".format(mol_group.description, idx))
tags_existing += 1
continue
else:
self.stdout.write("Tag to be created for %s" % tag_name)
self.stdout.write(" mol_tag.tag = %s" % tag_name)
self.stdout.write(" mol_tag.category = %s" % TagCategory.objects.get(category='Sites'))
self.stdout.write(" mol_tag.target = %s" % target[0])
self.stdout.write(" mol_tag.mol_group = %s" % mol_group)
self.stdout.write(" mol_tag.molecules = %s" % [mol['id'] for mol in mol_group.mol_id.values()])
tags_to_be_created += 1
# If update flag is set then actually create molecule Tags.
if update:
mol_tag = MoleculeTag()
mol_tag.tag = tag_name
mol_tag.category = TagCategory.objects.get(category='Sites')
mol_tag.target = target[0]
mol_tag.mol_group = mol_group
mol_tag.save()
for mol in mol_group.mol_id.values():
this_mol = Molecule.objects.get(id=mol['id'])
mol_tag.molecules.add(this_mol)
tags_created += 1
self.stdout.write("Expected number of sites: %s" % expected_sites)
self.stdout.write("tags_existing %s" % tags_existing)
self.stdout.write("tags_to_be_created %s" % tags_to_be_created)
self.stdout.write("tags_created: %s" % tags_created)
if tags_to_be_created == expected_sites:
self.stdout.write('Looking good - tags_to_be_created = expected sites')
if tags_created == expected_sites:
self.stdout.write('Looking good - tags_created = expected sites')
time_end = timezone.now().strftime('%X')
self.stdout.write("End %s" % time_end)
| xchem/fragalysis-backend | viewer/management/commands/tags_from_sites.py | tags_from_sites.py | py | 5,073 | python | en | code | 4 | github-code | 36 |
17772414022 | import swapper
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from formula_one.mixins.period_mixin import ActiveStatus
def get_role(person, role_name, active_status=ActiveStatus.ANY, silent=False, *args, **kwargs):
"""
Get a role corresponding to a person
:param person: an instance of the Person model whose roles are sought
:param role_name: the name of the role class whose instance is required
:param active_status: whether the role was, is, isn't or will be active
:param silent: whether to fail silently or raise exceptions
:return: the role, if the person fulfills it
:raise: Role.DoesNotExist, if the given role is not fulfilled by the person
:raise: ImproperlyConfigured, if the name of the role class is incorrect
"""
is_custom_role = kwargs.get('is_custom_role', False)
try:
if is_custom_role:
Role = swapper.load_model(
role_name.split('.')[0],
role_name.split('.')[1],
)
else:
Role = swapper.load_model('kernel', role_name)
try:
query_set = Role.objects_filter(active_status)
role = query_set.get(person=person)
return role
except Role.DoesNotExist:
if not silent:
raise
except ImproperlyConfigured:
if not silent:
raise
return None
def get_all_roles(person):
"""
Get all roles corresponding to a person
:param person: an instance of the Person model whose roles are sought
:return: a dictionary of all roles mapped to their instance and ActiveStatus
"""
all_roles = dict()
roles = settings.ROLES
for role_name in roles:
try:
role = get_role(
person=person,
role_name=role_name,
active_status=ActiveStatus.ANY,
silent=False,
is_custom_role='.' in role_name,
)
active_status = role.active_status
all_roles[role_name] = {
'instance': role,
'activeStatus': active_status,
}
except ObjectDoesNotExist:
pass
return all_roles
| IMGIITRoorkee/omniport-backend | omniport/core/kernel/managers/get_role.py | get_role.py | py | 2,280 | python | en | code | 67 | github-code | 36 |
4814315395 | from selenium import webdriver
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
driver.get("https://www.example.com")
# Get element with tag name 'div'
element = driver.find_element(By.TAG_NAME, 'div')
# Get all the elements available with tag name 'p'
elements = element.find_elements(By.TAG_NAME, 'p')
for e in elements:
print(e.text) | Duyanhdda/IOT-LAB | a.py | a.py | py | 374 | python | en | code | 0 | github-code | 36 |
39844703302 | """
Iguana (c) by Marc Ammon, Moritz Fickenscher, Lukas Fridolin,
Michael Gunselmann, Katrin Raab, Christian Strate
Iguana is licensed under a
Creative Commons Attribution-ShareAlike 4.0 International License.
You should have received a copy of the license along with this
work. If not, see <http://creativecommons.org/licenses/by-sa/4.0/>.
"""
import datetime
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def date_is_present_or_future(value):
if type(value) is datetime.date:
if value < datetime.date.today():
raise ValidationError(
_("Enter a date starting from today")
)
elif type(value) is datetime.datetime:
if value < datetime.datetime.today():
raise ValidationError(
_("Enter a date starting from today")
)
else:
raise ValidationError(
_("Enter a date starting from today")
)
| midas66/iguana | src/common/validators.py | validators.py | py | 986 | python | en | code | null | github-code | 36 |
30170451702 | """
written in Python 3
Find all the solutions of a given board.
Algorithm: backtracking (dfs) in an optimal order
1. Keep track of candidates of each cell.
2. Find the cell with fewest candidates. Fill the cell with one of the candidates. Update the candidates of other cells.
3. Repeat step 2 until solved. Or if the board is not solvable anymore (there's any cell that is empty but has no candidates), undo step 2 and try the next candidate.
"""
from copy import deepcopy
class SudokuSolver:
col_size = 9 # len(self.board)
row_size = 9 # len(self.board[0])
block_col_size = 3
block_row_size = 3
digits = '123456789'
empty_symbol = ' '
# def solve_board(self, board: List[List[str]]) -> None:
def solve_board(self, board):
self.init_board(board)
self.solve()
return self.solutions
def init_board(self, board):
self.board = deepcopy(board)
# list all empty cells. a `cell` is a tuple `(row_index, col_index)`
self.empty_cells = set([(ri, ci) for ri in range(self.row_size) for ci in range(self.col_size) if self.board[ri][ci] == self.empty_symbol])
# find candidates of each cell
self.candidates = [[set(self.digits) for ci in range(self.col_size)] for ri in range(self.row_size)]
for ri in range(self.row_size):
for ci in range(self.col_size):
digit = self.board[ri][ci]
if digit != self.empty_symbol:
self.candidates[ri][ci] = set()
self.update_candidates((ri, ci), digit)
self.solutions = []
def solve(self):
# if there are no empty cells, it's solved
if not self.empty_cells:
self.solutions.append(deepcopy(self.board))
return
# get the cell with fewest candidates
cell = min(self.empty_cells, key=lambda cell: len(self.candidates[cell[0]][cell[1]]))
# try filling the cell with one of the candidates, and solve recursively
ri, ci = cell
for digit in list(self.candidates[ri][ci]):
candidate_updated_cells = self.fill_cell(cell, digit)
solved = self.solve()
self.unfill_cell(cell, digit, candidate_updated_cells)
def fill_cell(self, cell, digit):
# fill the cell with the digit
ri, ci = cell
self.board[ri][ci] = digit
# remove the cell from empty_cells
self.empty_cells.remove(cell)
# update the candidates of other cells
# keep a list of updated cells. will be used when unfilling cells
candidate_updated_cells = self.update_candidates(cell, digit)
return candidate_updated_cells
def unfill_cell(self, cell, digit, candidate_updated_cells):
# unfill cell
ri, ci = cell
self.board[ri][ci] = self.empty_symbol
# add the cell back to empty_cells
self.empty_cells.add(cell)
# add back candidates of other cells
for ri, ci in candidate_updated_cells:
self.candidates[ri][ci].add(digit)
def update_candidates(self, filled_cell, digit):
candidate_updated_cells = []
for ri, ci in self.related_cells(filled_cell):
if (self.board[ri][ci] == self.empty_symbol) and (digit in self.candidates[ri][ci]):
self.candidates[ri][ci].remove(digit)
candidate_updated_cells.append((ri, ci))
return candidate_updated_cells
def related_cells(self, cell):
return list(set(self.cells_in_same_row(cell) + self.cells_in_same_col(cell) + self.cells_in_same_block(cell)))
def cells_in_same_row(self, cell):
return [(cell[0], ci) for ci in range(self.col_size)]
def cells_in_same_col(self, cell):
return [(ri, cell[1]) for ri in range(self.row_size)]
def cells_in_same_block(self, cell):
block_first_cell_ri = (cell[0] // self.block_row_size) * self.block_row_size
block_first_cell_ci = (cell[1] // self.block_col_size) * self.block_col_size
return [
(block_first_cell_ri + in_block_ri, block_first_cell_ci + in_block_ci)
for in_block_ri in range(self.block_row_size)
for in_block_ci in range(self.block_col_size)
]
def print_solutions(self, boards):
for board in boards:
self.print_board(board)
def print_board(self, board):
border = '+' + '+'.join(['-'.join('-' * self.block_col_size)] * (self.row_size // self.block_col_size)) + '+'
inside_border = '+' + '+'.join([' '.join(' ' * self.block_col_size)] * (self.row_size // self.block_col_size)) + '+'
print(border)
for ri in range(self.row_size):
if ri % self.block_row_size == 0 and ri != 0:
print(border)
row = board[ri]
row_str = '|'
for block_col_idx in range(self.row_size // self.block_row_size):
start_ci = block_col_idx * self.block_col_size
end_ci = start_ci + self.block_col_size
row_str += ' '.join(row[start_ci:end_ci]) + '|'
print(row_str)
print(border)
if __name__ == '__main__':
import time
board_0 = [
['8','2',' ',' ',' ',' ',' ',' ',' '],
[' ',' ','3','6',' ',' ',' ',' ',' '],
[' ','7',' ',' ','9',' ','2',' ',' '],
[' ','5',' ',' ',' ','7',' ',' ',' '],
[' ',' ',' ',' ','4','5','7',' ',' '],
[' ',' ',' ','1',' ',' ',' ','3',' '],
[' ',' ','1',' ',' ',' ',' ','6','8'],
[' ',' ','8','5',' ',' ',' ','1',' '],
[' ','9',' ',' ',' ',' ','4',' ',' ']
]
# [hardest sudoku](https://www.telegraph.co.uk/news/science/science-news/9359579/Worlds-hardest-sudoku-can-you-crack-it.html)
board_1 = [
['8',' ',' ',' ',' ',' ',' ',' ',' '],
[' ',' ','3','6',' ',' ',' ',' ',' '],
[' ','7',' ',' ','9',' ','2',' ',' '],
[' ','5',' ',' ',' ','7',' ',' ',' '],
[' ',' ',' ',' ','4','5','7',' ',' '],
[' ',' ',' ','1',' ',' ',' ','3',' '],
[' ',' ','1',' ',' ',' ',' ','6','8'],
[' ',' ','8','5',' ',' ',' ','1',' '],
[' ','9',' ',' ',' ',' ','4',' ',' ']
]
board_2 = [
[' ','8',' ',' ',' ','9','7','4','3'],
[' ','5',' ',' ',' ','8',' ','1',' '],
[' ','1',' ',' ',' ',' ',' ',' ',' '],
['8',' ',' ','2',' ','5',' ',' ',' '],
[' ',' ',' ','8',' ','4',' ',' ',' '],
[' ',' ',' ','3',' ',' ',' ',' ','6'],
[' ',' ',' ',' ',' ',' ',' ','7',' '],
[' ','3',' ','5',' ',' ',' ','8',' '],
['9','7','2','4',' ',' ',' ','5',' '],
]
solver = SudokuSolver()
def solve_and_print(board):
start_time = time.time()
solutions = solver.solve_board(board)
elapsed_time = time.time() - start_time
print('board to solve:')
solver.print_board(board)
print('{} solution found in {} seconds.'.format(len(solutions), elapsed_time))
solver.print_solutions(solutions)
print('------------------------------------------------------------')
solve_and_print(board_0)
solve_and_print(board_1)
solve_and_print(board_2)
| Roger-Wu/sudoku-solver | SudokuSolver.py | SudokuSolver.py | py | 7,226 | python | en | code | 1 | github-code | 36 |
74176385062 | '''
This file is used to ge through and upload metadata to cloud
storage for artist submissions
'''
#imports
import datetime
import os
import firebase_admin
from firebase_admin import credentials, firestore
import google.cloud
# can potentially execute the cloud utils rsync here if we want all in one
# authorization and connect method
def cloudConnect(debug=False):
# let's connect and add a new document
# get credentials locally
creds = credentials.Certificate("./firebaseSAkey.json")
# use credentials to login to client
app = firebase_admin.initialize_app(creds)
# return the client instance
db = firestore.client()
if debug:
print("Connected to firebase")
return db
def uploadData(doc_ref, artist, debug=False):
# when we get to the final version, this will use directory to be specific
doc = doc_ref.get()
if doc.exists:
# can probably just not show anything if there OR merge
print(f'Doc data: {doc.to_dict()}')
else:
# might be easier to create an artist class later
print(u'No such document found! Adding a new one.')
doc_ref.document(artist).set({
u'name': artist,
u'email': "{}@gmail.test".format(artist),
u'upload_time': datetime.datetime.now()
})
# create a subcollection for the image metadata
for roots, dirs, files in os.walk("./{}".format(artist)):
for f in files:
# go through each of the images uploaded and fill in their metadata
doc_ref.collection(u'images').document(f).set({
u'title': f,
u'upload_date': datetime.datetime.today(),
u'description': "This is a test image"
}#, merge=True
)
if debug:
print("Uploaded artist and image metadata")
# function to go through each folder and upload the artist and image metadata
# use last upload time to determine whether to change metadata?
def uploadArtistMetadata(db, artist="", debug=False):
# use the artist name to open the right folder. If no argument, go through
# all folders. Currently O(n) but should in theory be O(1). Fix later
for roots, dirs, files in os.walk("."):
# we only care about the directories at this point
for directory in dirs:
# get document corresponding to artist
doc_ref = db.collection(u'artists').document(directory)
if artist:
if artist != directory:
continue
# here we know that we found artist
if debug:
print("Found {} for {}".format(directory, artist))
uploadData(doc_ref, artist, debug)
return # finished at this point
uploadData(doc_ref, directory, debug)
# main
def main(debug=False):
# get client and authorize the credentials locally
# connect to our Firestore database
db = cloudConnect(debug)
# get the doc_refument that we are going to be uploading to
print("What is the name of the artist?")
artist = input("Input the name here: ")
uploadArtistMetadata(db, artist, debug)
print("Finished uploading info for {}".format(artist))
if __name__ == "__main__":
print("Starting Upload to isolating together cloud database")
main(debug=True)
| reyluno/isolatingtogether.github.io | utils/dbUpdate.py | dbUpdate.py | py | 3,428 | python | en | code | 1 | github-code | 36 |
42509586676 | import random
import math
import sys
import string
import copy
class Board(object):
COLORS = string.ascii_uppercase
def __init__(self, orig=None, size=10, color=4):
self.COLORS = self.COLORS[0:color]#random.sample(self.COLORS, k=color)
self.size = size
self.board = [[' ' for i in range(self.size)] for i in range(self.size)]
self.FC = 0
self.FLOODED = []
self.GROUPS = []
self.reset()
if orig:
self.COLORS = copy.deepcopy(orig.COLORS)
self.size = orig.size
self.board = copy.deepcopy([list(col) for col in orig.board])
self.FC = copy.deepcopy(orig.FC)
self.FLOODED = copy.deepcopy(orig.FLOODED)
self.GROUPS = copy.deepcopy(orig.GROUPS)
def reset(self):
for i in range(self.size):
for j in range(self.size):
# get a random color
tempc = self.COLORS[random.randrange(len(self.COLORS))]
# set the color for this block
self.board[i][j] = tempc
self.GROUPS.append([tempc, [(i, j)]])
# grouping
while True:
done = True
for n, g in enumerate(self.GROUPS):
for coor in g[1]:
x, y = coor
for m, gg in enumerate(self.GROUPS):
if n != m and g[0] == gg[0]:
if (y > 0 and (x, y-1) in gg[1]) or (x > 0 and (x-1, y) in gg[1]):
if n < m:
tempg = g
tempg[1] = tempg[1] + gg[1]
keep = n
dele = gg
if n > m:
tempg = gg
tempg[1] = tempg[1] + g[1]
keep = m
dele = g
done = False
break
if not done:
break
if not done:
break
if done:
break
else:
self.GROUPS[keep] = tempg
self.GROUPS.remove(dele)
self.FC = self.GROUPS[0][0]
self.FLOODED = self.GROUPS[0][1]
del self.GROUPS[0]
#---------------------------------------------
def hash(self):
output = ""
#for i in range(len(self.GROUPS)+1):
for g in self.GROUPS:
output += g[0] + str(g[1][0])
#output = output << 2
return output
#---------------------------------------------
def move(self, c):
self.FC = c
for coor in self.FLOODED:
x, y = coor
self.board[x][y] = c
self.flood()
#---------------------------------------------
def children(self):
children = []
for c in self.COLORS:
if c != self.FC:
child = Board(orig=self)
child.move(c)
if len(child.GROUPS) < len(self.GROUPS):
children.append((child, c))
return children
#---------------------------------------------
def isOver(self):
if len(self.GROUPS) == 0:
return True
else:
return False
#---------------------------------------------
def score(self):
return len(self.GROUPS)
def scoree(self):
return len(self.FLOODED)
#---------------------------------------------
# merge adjacent same color groups in larger group and update self.GROUPS
# continues to check until no update is made, then break the loop.
def flood(self):
while True:
done = True
for coor in self.FLOODED:
x, y = coor
for n, g in enumerate(self.GROUPS):
if self.FC == g[0]:
if (y < self.size and (x, y+1) in g[1]) or (x <= self.size and (x+1, y) in g[1]):
tempg = g[1]#self.GROUPS[0]
#tempg[1] = tempg[1] + g[1]
done = False
break
if not done:
break
if done:
break
else:
self.FLOODED += tempg#self.GROUPS[0][1] += tempg
del self.GROUPS[n]
#---------------------------------------------
def print(self):
print()
print('+' + '---+' * self.size)
for i in range(self.size):
row = '|'
for j in range(self.size):
row += ' ' + str(self.board[i][j]) + ' |'
print(row)
print('+' + '---+' * self.size)
print()
def show(self):
print(self.FC)
print(self.FLOODED)
print(len(self.GROUPS))
print(self.GROUPS)
if __name__ == "__main__":
b = Board(size=10, color=4)
#b.reset()
b.show()
b.print()
i = 0
while not b.isOver():
inp = input("Input the color: ")
if inp.upper() in b.COLORS:
b.move(inp.upper())
i += 1
print("Moves: " + i)
b.print()
else:
print("Invalid input. ")
| LeanMilk/Flood-It | Flood-It/board.py | board.py | py | 5,443 | python | en | code | 0 | github-code | 36 |
17837907452 | # -*- coding: utf-8 -*-
# B
import sys
from collections import defaultdict, deque
from heapq import heappush, heappop
import math
import bisect
input = sys.stdin.readline
# 再起回数上限変更
# sys.setrecursionlimit(1000000)
N= int(input())
A = list(map(int, input().split()))
A.sort()
ans = 1
for a in A:
ans *= a
if a == 0:
print(0)
sys.exit()
if ans > 10**18:
print(-1)
sys.exit()
print(ans)
| hsuetsugu/atc | ABC169/B.py | B.py | py | 449 | python | en | code | 0 | github-code | 36 |
5813639636 | # type: ignore
import os
import pathlib
import subprocess
import gnupg
import pytest
import requests
import toml
def pytest_collect_file(file_path, parent):
if file_path.suffix == ".sh" and file_path.name.startswith("test_"):
return ScriptFile.from_parent(parent, path=file_path)
class ScriptFile(pytest.File):
# To make pytest.Function happy
obj = None
def collect(self):
# Extract the name between "test_" and ".sh".
name = self.path.name[5:][:-3]
yield ScriptItem.from_parent(self, name=name, path=self.path)
# HACK: Inherit from `pytest.Function` to be able to use the fixtures
class ScriptItem(pytest.Function):
def __init__(self, path, **kwargs):
super().__init__(callobj=self._runscript, **kwargs)
self.path = path
self.add_marker("script")
if path.parts[-3] == "scripts":
self.add_marker(path.parts[-2])
def _runscript(self, pulp_cli_env, tmp_path, pulp_container_log):
run = subprocess.run([self.path], cwd=tmp_path)
if run.returncode == 23:
pytest.skip("Skipped as requested by the script.")
if run.returncode != 0:
raise ScriptError(f"Script returned with exit code {run.returncode}.")
def reportinfo(self):
return self.path, 0, f"test script: {self.name}"
def repr_failure(self, excinfo):
if isinstance(excinfo.value, ScriptError):
return str(excinfo.value)
return super().repr_failure(excinfo)
class ScriptError(Exception):
"""Custom exception to mark script execution failure."""
@pytest.fixture
def pulp_cli_vars():
"""
This fixture will return a dictionary that is used by `pulp_cli_env` to setup the environment.
To inject more environment variables, it can overwritten.
It will be initialized with "PULP_FIXTURE_URL".
"""
PULP_FIXTURES_URL = os.environ.get("PULP_FIXTURES_URL", "https://fixtures.pulpproject.org")
return {"PULP_FIXTURES_URL": PULP_FIXTURES_URL}
@pytest.fixture(scope="session")
def pulp_cli_settings(tmp_path_factory):
"""
This fixture will setup the config file once per session only.
It is most likely not useful to be included standalone.
The `pulp_cli_env` fixture, however depends on it and sets $XDG_CONFIG_HOME up accordingly.
"""
settings = toml.load("tests/cli.toml")
if os.environ.get("PULP_API_ROOT"):
for key in settings:
settings[key]["api_root"] = os.environ["PULP_API_ROOT"]
settings_path = tmp_path_factory.mktemp("config", numbered=False)
(settings_path / "pulp").mkdir(parents=True)
with open(settings_path / "pulp" / "cli.toml", "w") as settings_file:
toml.dump(settings, settings_file)
yield settings_path, settings
@pytest.fixture(scope="session")
def pulp_cli_gnupghome(tmp_path_factory):
"""
This fixture will setup a GPG home directory once per session only.
"""
gnupghome = tmp_path_factory.mktemp("gnupghome")
gpg = gnupg.GPG(gnupghome=str(gnupghome))
key_file = pathlib.Path(__file__).parent / "GPG-PRIVATE-KEY-pulp-qe"
if key_file.exists():
private_key_data = key_file.read_text()
else:
private_key_url = (
"https://github.com/pulp/pulp-fixtures/raw/master/common/GPG-PRIVATE-KEY-pulp-qe"
)
private_key_data = requests.get(private_key_url).text
key_file.write_text(private_key_data)
import_result = gpg.import_keys(private_key_data)
gpg.trust_keys(import_result.fingerprints[0], "TRUST_ULTIMATE")
return gnupghome
@pytest.fixture
def pulp_cli_env(pulp_cli_settings, pulp_cli_vars, pulp_cli_gnupghome, monkeypatch):
"""
This fixture will set up the environment for cli commands by:
* creating a tmp_dir
* placing the config there
* pointing XDG_CONFIG_HOME accordingly
* supplying other useful environment vars
"""
settings_path, settings = pulp_cli_settings
monkeypatch.setenv("XDG_CONFIG_HOME", str(settings_path))
monkeypatch.setenv("PULP_BASE_URL", settings["cli"]["base_url"])
monkeypatch.setenv("VERIFY_SSL", str(settings["cli"].get("verify_ssl", True)).lower())
monkeypatch.setenv("GNUPGHOME", str(pulp_cli_gnupghome))
for key, value in pulp_cli_vars.items():
monkeypatch.setenv(key, value)
yield settings
if "PULP_LOGGING" in os.environ:
@pytest.fixture(scope="session")
def pulp_container_log_stream():
with subprocess.Popen(
[os.environ["PULP_LOGGING"], "logs", "-f", "--tail", "0", "pulp-ephemeral"],
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
os.set_blocking(proc.stdout.fileno(), False)
yield proc.stdout
proc.kill()
@pytest.fixture
def pulp_container_log(pulp_container_log_stream):
# Flush logs before starting the test
pulp_container_log_stream.read()
yield
logs = pulp_container_log_stream.read()
if logs is not None:
print(logs.decode())
else:
@pytest.fixture
def pulp_container_log():
yield
| pulp/pulp-cli | pytest_pulp_cli/__init__.py | __init__.py | py | 5,190 | python | en | code | 26 | github-code | 36 |
15556149755 | from __future__ import annotations
import numpy as np
from typing import List
from .meshdata import MeshCode
from . import R, rad2deg, deg2rad
class Point:
def __init__( self,
latitude: float, # <deg>
longitude: float, # <deg>
elevation: float = None, # <meter>
valid_elevation: float = None, # <meter>
diameter: float = 0, #<meter>
name: str =None,
) -> None:
assert 0 <= latitude and latitude < 66
assert 100 <= longitude and longitude < 180
self.name = name
self.latitude = latitude
self.longitude = longitude
self.diameter = diameter
self._mesh_code = MeshCode(latitude, longitude)
self.elevation = elevation or self._mesh_code.elevation()
self.valid_elevation = valid_elevation or self.elevation + .1
def next( self,
azimuth: float, # [0,360]
dist: float, # <meter>
valid_elevation: float, #<meter>
name: str = None,
) -> Point:
a = dist / R
b = np.pi / 2 - deg2rad(self.latitude)
gamma = 2*np.pi - deg2rad(azimuth)
c = np.arccos( np.cos(a) * np.cos(b) \
+ np.sin(a) * np.sin(b) * np.cos(gamma) )
alpha = np.arcsin( np.sin(a) * np.sin(gamma) / np.sin(c) )
lat = rad2deg(np.pi / 2 - c)
lon = self.longitude - rad2deg(alpha)
return Point(lat, lon, valid_elevation=valid_elevation, name=name)
def to_location_format(self) -> List[str, float, float]:
return [
self.name,
self.latitude,
self.longitude,
self._mesh_code.label,
self.elevation,
self.valid_elevation,
self.valid_elevation - self.elevation
]
def __str__(self):
return f"""
==== {self.name} ====
lat: {self.latitude}
log: {self.longitude}
h: {self.elevation}
====================="""
def validElevation( dist: float, #<meter>
altitude: float, # [0,90]
root_point: Point,#<meter>
) -> float:
root_elevation = root_point.elevation \
+ root_point.diameter * np.tan(deg2rad(altitude))
theta = dist / R
Y = deg2rad(90 - altitude)
X = np.pi - Y - theta
h1 = ( np.sin(Y) / np.sin(Y + theta) ) * (R + root_elevation) - R
return h1 | kawa-yo/DiamonPearl | engine/utils/point.py | point.py | py | 2,509 | python | en | code | 0 | github-code | 36 |
35715668806 | import os
from mercurial import hg, ui
from mercurial.hgweb.hgwebdir_mod import hgwebdir
os.mkdir('webdir')
os.chdir('webdir')
webdir = os.path.realpath('.')
u = ui.ui()
hg.repository(u, 'a', create=1)
hg.repository(u, 'b', create=1)
os.chdir('b')
hg.repository(u, 'd', create=1)
os.chdir('..')
hg.repository(u, 'c', create=1)
os.chdir('..')
paths = {'t/a/': '%s/a' % webdir,
'b': '%s/b' % webdir,
'coll': '%s/*' % webdir,
'rcoll': '%s/**' % webdir}
config = os.path.join(webdir, 'hgwebdir.conf')
configfile = open(config, 'w')
configfile.write('[paths]\n')
for k, v in paths.items():
configfile.write('%s = %s\n' % (k, v))
configfile.close()
confwd = hgwebdir(config)
dictwd = hgwebdir(paths)
assert len(confwd.repos) == len(dictwd.repos), 'different numbers'
assert len(confwd.repos) == 9, 'expected 9 repos, found %d' % len(confwd.repos)
found = dict(confwd.repos)
for key, path in dictwd.repos:
assert key in found, 'repository %s was not found' % key
assert found[key] == path, 'different paths for repo %s' % key
| helloandre/cr48 | bin/mercurial-1.7.5/tests/test-hgwebdir-paths.py | test-hgwebdir-paths.py | py | 1,066 | python | en | code | 41 | github-code | 36 |
41902628523 | numero = list();
while (True):
num = float(input("Informe um valor: "));
if num in numero:
print("O valor já foi adicionado anteriormente");
else:
numero.append(num);
print("Valor adicionado com sucesso");
opt = str(input("Deseja continuar/ [S/N]: ")).strip().upper();
while((opt != 'S') and (opt != 'N')):
opt = str(input("Informe apenas os caracteres solicitidos. Deseja continuar/ [S/N]: ")).strip().upper();
if opt == 'N':
break;
numero.sort();
for n in numero:
print(f"{n:.2f}", end = " ");
print("")
| renansald/Python | cursos_em_video/Desafio79.py | Desafio79.py | py | 576 | python | pt | code | 0 | github-code | 36 |
19993372010 | import re
import argparse
import japanize_matplotlib
import matplotlib.pyplot as plt
from moviepy.editor import VideoClip
from moviepy.video.io.bindings import mplfig_to_npimage
def validate_date(date):
if re.match(r"^\d{4}/\d{1,2}/\d{1,2}", date):
return date
else:
raise argparse.ArgumentTypeError(
f"{date} is not a valid date\ne.g. 2020/1/1")
parser = argparse.ArgumentParser()
parser.add_argument("fileName", help="LINEのトーク履歴のファイル名")
parser.add_argument("-o", "--output", help="出力ファイル名")
parser.add_argument("-s", "--start-date",
help="開始日 | 例: 2020/1/1", type=validate_date)
parser.add_argument("-dpi", "--dpi", help="解像度", default=150, type=int)
parser.add_argument("-lowest", "--lowest",
help="最低メッセージ数", default=0, type=int)
args = parser.parse_args()
fileName = args.fileName
output = args.output
startDate = args.start_date
dpi = args.dpi
lowest = args.lowest
# LINEのトーク履歴を分析
try:
with open(fileName, "r", encoding="utf-8") as f:
data = f.read()
except FileNotFoundError:
print('\033[31m' + f'{fileName}が見つかりません。' + '\033[0m')
exit()
print(f'{fileName}を分析を開始します。')
nowDate = None
isStart = False
user_messages = {}
for line in data.splitlines():
try:
if re.match(r"^\d{4}/\d{1,2}/\d{1,2}\(.+\)", line):
if startDate:
if line.startswith(startDate):
isStart = True
if isStart:
nowDate = f"{line.split('/')[0]}-{line.split('/')[1].zfill(2)}-{line.split('/')[2].split('(')[0].zfill(2)}"
else:
nowDate = f"{line.split('/')[0]}-{line.split('/')[1].zfill(2)}-{line.split('/')[2].split('(')[0].zfill(2)}"
if nowDate is not None and line != nowDate and line != "":
if re.match(r"\d{1,2}:\d{1,2}", line):
if line.endswith("が退出しました。"):
continue
name = line.split("\t")[1]
if name not in user_messages:
user_messages[name] = {}
if nowDate not in user_messages[name]:
user_messages[name][nowDate] = 0
user_messages[name][nowDate] += 1
except Exception as e:
lineCount = len(data.splitlines())
lineIndex = data.splitlines().index(line) + 1
print(
'\033[31m' + f'{lineIndex}行目のデータが正しくありません。' + '\033[0m')
dates = sorted(
list(set([date for user in user_messages.values() for date in user.keys()])))
if dates == []:
print('\033[31m' + 'データが見つかりませんでした。' + '\033[0m')
exit()
print('\033[32m' + f'{dates[0]} から {dates[-1]}のデータを読み込みました。' + '\033[0m')
print(f'ユーザー数: {len(user_messages)}')
print(f'日数: {len(user_messages[list(user_messages.keys())[0]])}')
print(
f'メッセージ数: {sum([sum(user.values()) for user in user_messages.values()])}')
print('----------------------------------------')
# ユーザーごとに色を割り当て
userColor = {}
for i, user in enumerate(user_messages.keys()):
userColor[user] = i
print('\033[32m' + 'グラフを作成します。' + '\033[0m')
# フレームを作成
def make_frame(t):
plt.rcParams["figure.figsize"] = (14, 10)
plt.rcParams["figure.dpi"] = dpi
plt.rcParams["font.size"] = 14
plt.clf()
fig = plt.figure()
ax = fig.gca()
time_index = int(t * 10)
# ユーザーごとのメッセージ数を計算
user_counts = {}
for user, messages in user_messages.items():
values = [messages.get(date, 0) for date in dates[:time_index]]
if sum(values) > 0:
if sum(values) > lowest:
user_counts[user] = sum(values)
# ユーザーごとのメッセージ数を棒グラフで表示
sorted_users = sorted(user_counts, key=user_counts.get, reverse=False)
y_pos = range(len(user_counts))
for user_index, user in enumerate(sorted_users):
ax.barh(
y_pos[user_index],
user_counts[user],
color="C{}".format(userColor[user]),
label=user,
)
ax.text(
user_counts[user] + 0.2,
y_pos[user_index],
str(user_counts[user]),
va="center",
)
values = [user_counts[user]]
if len(values) > 0:
ax.barh(
y_pos[user_index],
values[-1],
color=f"C{userColor[user]}",
label=user
)
ax.text(values[-1] + 0.2, y_pos[user_index],
str(values[-1]), va="center")
# グラフの設定
ax.set_xlabel("メッセージ数")
ax.xaxis.set_label_position('top')
ax.tick_params(top=True, labeltop=True, bottom=False, labelbottom=False)
ax.text(0, len(user_counts) + 1,
dates[time_index - 1], ha="left", va="center")
ax.set_yticks(y_pos)
ax.set_yticklabels(map(lambda x: x[:8], sorted_users))
plt.gcf().tight_layout()
return mplfig_to_npimage(plt.gcf())
# 動画を作成
if output == None:
output = fileName.split(".")[0]
try:
animation = VideoClip(make_frame, duration=len(dates) / 10)
animation.write_videofile(output + ".mp4", fps=10,
codec="libx264", audio=False)
except KeyboardInterrupt:
print('\033[31m' + 'キャンセルしました。' + '\033[0m')
exit()
print('\033[32m' + f'{output}.mp4を作成しました。' + '\033[0m')
| HRTK92/line-to-movie | line_to_video.py | line_to_video.py | py | 5,704 | python | en | code | 0 | github-code | 36 |
25718308081 | read_me = """Python 3 script that logs data from the Meraki dashboard into a MongoDB database.
You will need to have MongoDB installed and supply a configuration file for this script to run.
You can get the MongoDB Community Server here: https://www.mongodb.com/try/download/community
You can find a sample configuration file here:
https://github.com/meraki/automation-scripts/blob/master/offline_logging/config.yaml
Script syntax:
python offline_logging.py -c <config_file>
Required Python 3 modules:
requests
pyyaml
pymongo
To install these Python 3 modules via pip you can use the following commands:
pip install requests
pip install pyyaml
pip install pymongo
Depending on your operating system and Python environment, you may need to use commands
"python3" and "pip3" instead of "python" and "pip".
View the created database with a MongoDB viewing tool such as MongoDB Compass:
https://www.mongodb.com/products/compass
A version of MongoDB Compass can be installed with the MongoDB Community Server.
"""
import sys, getopt, yaml, time, datetime, pymongo
from urllib.parse import urlencode
from requests import Session, utils
class NoRebuildAuthSession(Session):
def rebuild_auth(self, prepared_request, response):
"""
This method is intentionally empty. Needed to prevent auth header stripping on redirect. More info:
https://stackoverflow.com/questions/60358216/python-requests-post-request-dropping-authorization-header
"""
API_MAX_RETRIES = 3
API_CONNECT_TIMEOUT = 60
API_TRANSMIT_TIMEOUT = 60
API_STATUS_RATE_LIMIT = 429
#Set to True or False to enable/disable console logging of sent API requests
FLAG_REQUEST_VERBOSE = True
API_BASE_URL = "https://api.meraki.com/api/v1"
def merakiRequest(p_apiKey, p_httpVerb, p_endpoint, p_additionalHeaders=None, p_queryItems=None,
p_requestBody=None, p_verbose=False, p_retry=0):
#returns success, errors, responseHeaders, responseBody
if p_retry > API_MAX_RETRIES:
if(p_verbose):
print("ERROR: Reached max retries")
return False, None, None, None
bearerString = "Bearer " + p_apiKey
headers = {"Authorization": bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ""
if not p_queryItems is None:
query = "?" + urlencode(p_queryItems, True)
url = API_BASE_URL + p_endpoint + query
verb = p_httpVerb.upper()
session = NoRebuildAuthSession()
try:
if(p_verbose):
print(verb, url)
if verb == "GET":
r = session.get(
url,
headers = headers,
timeout = (API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)
)
elif verb == "PUT":
if not p_requestBody is None:
if (p_verbose):
print("body", p_requestBody)
r = session.put(
url,
headers = headers,
json = p_requestBody,
timeout = (API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)
)
elif verb == "POST":
if not p_requestBody is None:
if (p_verbose):
print("body", p_requestBody)
r = session.post(
url,
headers = headers,
json = p_requestBody,
timeout = (API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)
)
elif verb == "DELETE":
r = session.delete(
url,
headers = headers,
timeout = (API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)
)
else:
return False, None, None, None
except:
return False, None, None, None
if(p_verbose):
print(r.status_code)
success = r.status_code in range (200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
if(p_verbose):
print("INFO: Hit max request rate. Retrying %s after %s seconds" % (p_retry+1, r.headers["Retry-After"]))
time.sleep(int(r.headers["Retry-After"]))
success, errors, responseHeaders, responseBody = merakiRequest(p_apiKey, p_httpVerb, p_endpoint, p_additionalHeaders,
p_queryItems, p_requestBody, p_verbose, p_retry+1)
return success, errors, responseHeaders, responseBody
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if "errors" in rjson:
errors = rjson["errors"]
if(p_verbose):
print(errors)
else:
responseBody = rjson
if "Link" in r.headers:
parsedLinks = utils.parse_header_links(r.headers["Link"])
for link in parsedLinks:
if link["rel"] == "next":
if(p_verbose):
print("Next page:", link["url"])
splitLink = link["url"].split("/api/v1")
success, errors, responseHeaders, nextBody = merakiRequest(p_apiKey, p_httpVerb, splitLink[1],
p_additionalHeaders=p_additionalHeaders,
p_requestBody=p_requestBody,
p_verbose=p_verbose)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
return success, errors, responseHeaders, responseBody
def getNetworks(p_apiKey, p_organizationId):
endpoint = "/organizations/%s/networks" % p_organizationId
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getClients(p_apiKey, p_networkId, p_timespan):
endpoint = "/networks/%s/clients" % p_networkId
query = {"timespan": p_timespan}
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_queryItems=query, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getApplicationUsage(p_apiKey, p_networkId, p_clientsStr, p_timespan):
endpoint = "/networks/%s/clients/applicationUsage" % p_networkId
query = {"clients": p_clientsStr, "timespan": p_timespan}
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_queryItems=query, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getClientTrafficHistory(p_apiKey, p_networkId, p_clientId):
endpoint = "/networks/%s/clients/%s/trafficHistory" % (p_networkId, p_clientId)
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getNetworkMerakiAuthUsers(p_apiKey, p_networkId):
endpoint = "/networks/%s/merakiAuthUsers" % p_networkId
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getNetworkSmDevices(p_apiKey, p_networkId):
endpoint = "/networks/%s/sm/devices" % p_networkId
query = {"fields[]": ['ip', 'systemType', 'lastConnected', 'location', 'lastUser',
'ownerEmail', 'ownerUsername', 'imei', 'simCarrierNetwork']}
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_queryItems=query,
p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getOrganizationAdmins(p_apiKey, p_organizationId):
endpoint = "/organizations/%s/admins" % p_organizationId
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def kill_script():
print(read_me)
sys.exit(2)
def load_config(p_file):
config = None
with open(p_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
return config
def filter_networks(config_sources, networks):
result = []
if config_sources['include_all_networks']:
return networks
for net in networks:
found_match = False
if not config_sources['network_names'] is None:
if net['name'] in config_sources['network_names']:
result.append(net)
found_match = True
if not found_match:
if not config_sources['network_ids'] is None:
if net['id'] in config_sources['network_ids']:
result.append(net)
found_match = True
print ('match id ' + net['id'])
if not found_match:
if not config_sources['network_tags'] is None:
for tag in config_sources['network_tags']:
if tag in net['tags']:
result.append(net)
break
return result
def filter_admins(p_admins, p_networks, p_tags):
# Return admin if they have org access, or net/tag access to an item matching filters
result = []
for admin in p_admins:
include_admin = False
if admin['orgAccess'] != 'none':
include_admin = True
else:
for anet in admin['networks']:
for onet in p_networks:
if anet['id'] == onet['id']:
include_admin = True
break
if include_admin:
break
if not include_admin:
for atag in admin['tags']:
if atag['tag'] in p_tags:
include_admin = True
break
if include_admin:
result.append(admin)
return result
def log_to_database(db, document, collection, mode='append', keyValuePair=None):
dbc = db[collection]
if mode == 'append':
try:
dbc.insert_one(document)
except Exception as e:
print(e)
print("ERROR: Could not create document in database")
return False
elif mode == 'update':
try:
dbc.update_one(keyValuePair, {"$set": document}, upsert=True)
except Exception as e:
print(e)
print("ERROR: Could not update document in database")
return False
return True
def database_delete_all_matches(db, collection, filter):
dbc = db[collection]
try:
dbc.delete_many(filter)
except Exception as e:
print(e)
print("ERROR: Could not delete document in database")
return False
return True
def split_history_array(history, max_records):
result = []
line = []
for record in history:
line.append(record)
if len(line) >= max_records:
result.append(line)
line = []
if len(line) > 0:
result.append(line)
return result
def perform_scan(config):
print(str(datetime.datetime.now()) + " -- Starting scan")
api_key = config['meraki_dashboard_api']['api_key']
org_id = config['meraki_dashboard_api']['organization_id']
scan_interval = config['scan_interval_minutes']*60
success, errors, headers, all_networks = getNetworks(api_key, org_id)
if not success:
print("ERROR: Unable to get networks' list")
else:
filtered_networks = filter_networks(config['sources'], all_networks)
mongo_client = pymongo.MongoClient("mongodb://" + config['mongodb']['host'] + ":" + str(config['mongodb']['port']) + "/")
db = mongo_client[config['mongodb']['database_name']]
if 'getOrganizationAdmins' in config['endpoints'] and config['endpoints']['getOrganizationAdmins']['enabled']:
success, errors, headers, all_admins = getOrganizationAdmins(api_key, org_id)
if not all_admins is None:
admins = filter_admins(all_admins, filtered_networks, config['sources']['network_tags'])
for admin in admins:
log_to_database(db, admin, config['endpoints']['getOrganizationAdmins']['collection'],
config['endpoints']['getOrganizationAdmins']['mode'],
keyValuePair={'id': admin['id']})
for network in filtered_networks:
# value used as a flag if "getNetworkClients" is disabled
clients = None
if 'getNetworkClients' in config['endpoints'] and config['endpoints']['getNetworkClients']['enabled']:
success, errors, headers, raw_clients = getClients(api_key, network['id'], scan_interval)
if raw_clients is None:
print("ERROR: Cloud not fetch clients for net %s" % network['id'])
else:
scan_time = datetime.datetime.now()
if config['endpoints']['getNetworkClients']['ignore_manufacturer_meraki']:
clients = []
for client in raw_clients:
if not client['manufacturer'] in ["Cisco Meraki", "Meraki"]:
clients.append(client)
else:
clients = raw_clients
for client in clients:
document = client
document['scanTime'] = scan_time
document['scanIntervalMinutes'] = config['scan_interval_minutes']
document['networkId'] = network['id']
document['networkName'] = network['name']
log_to_database(db, document, config['endpoints']['getNetworkClients']['collection'],
config['endpoints']['getNetworkClients']['mode'])
if 'getNetworkClientsApplicationUsage' in config['endpoints'] and config['endpoints']['getNetworkClientsApplicationUsage']['enabled']:
if clients is None:
print("ERROR: Client list must be fetched for getNetworkClientsApplicationUsage")
else:
client_list = ""
for client in clients:
if client_list != "":
client_list += ","
client_list += client['id']
success, errors, headers, usage = getApplicationUsage(api_key, network['id'], client_list, scan_interval)
if usage is None:
print("ERROR: Cloud not fetch clients' usage for net %s" % network['id'])
else:
scan_time = datetime.datetime.now()
for item in usage:
document = item
document['scanTime'] = scan_time
document['scanIntervalMinutes'] = config['scan_interval_minutes']
document['networkId'] = network['id']
document['networkName'] = network['name']
log_to_database(db, document, config['endpoints']['getNetworkClientsApplicationUsage']['collection'],
config['endpoints']['getNetworkClientsApplicationUsage']['mode'])
if 'getNetworkClientTrafficHistory' in config['endpoints'] and config['endpoints']['getNetworkClientTrafficHistory']['enabled']:
if clients is None:
print("ERROR: Client list must be fetched for getNetworkClientTrafficHistory")
else:
for client in clients:
success, errors, headers, traffic_history = getClientTrafficHistory(api_key, network['id'], client['id'])
if not traffic_history is None:
history_pages = split_history_array(traffic_history,
config['endpoints']['getNetworkClientTrafficHistory']['max_history_records_per_document'])
total_pages = len(history_pages)
if total_pages > 0:
base_info = {
'clientId' : client['id'],
'clientMac' : client['mac'],
'clientIp' : client['ip'],
'clientDescription' : client['description'],
'networkId' : network['id'],
'networkName' : network['name'],
'scanTime' : scan_time,
'scanIntervalMinutes' : config['scan_interval_minutes'],
'totalPages' : total_pages
}
filter = {
'clientId' : base_info['clientId'],
'networkId' : base_info['networkId']
}
if config['endpoints']['getNetworkClientTrafficHistory']['mode'] == 'update':
success = database_delete_all_matches(db,
config['endpoints']['getNetworkClientTrafficHistory']['collection'], filter)
page_number = 0
for page in history_pages:
page_number += 1
document = {}
for key in base_info:
document[key] = base_info[key]
document['pageNumber'] = page_number
document['trafficHistory'] = page
success = log_to_database(db, document, config['endpoints']['getNetworkClientTrafficHistory']['collection'], mode="append")
if not success:
print("clientId : %s" % document['clientId'])
print("clientMac : %s" % document['clientMac'])
print("clientIp : %s" % document['clientIp'])
print("clientDescription : %s" % document['clientDescription'])
print("networkId : %s" % document['networkId'])
print("networkName : %s" % document['networkName'])
print("pageNumber : %s" % document['pageNumber'])
print("trafficHistory record count : %s" % len(document['trafficHistory']))
if 'getNetworkMerakiAuthUsers' in config['endpoints'] and config['endpoints']['getNetworkMerakiAuthUsers']['enabled']:
success, errors, headers, auth_users = getNetworkMerakiAuthUsers(api_key, network['id'])
if 'configTemplateId' in network and config['endpoints']['getNetworkMerakiAuthUsers']['include_template_users']:
success, errors, headers, template_users = getNetworkMerakiAuthUsers(api_key, network['configTemplateId'])
if not template_users is None:
if not auth_users is None:
auth_users += template_users
else:
auth_users = template_users
if not auth_users is None:
for user in auth_users:
document = user
document['networkId'] = network['id']
log_to_database(db, document, config['endpoints']['getNetworkMerakiAuthUsers']['collection'],
config['endpoints']['getNetworkMerakiAuthUsers']['mode'],
keyValuePair={'id': user['id'], 'networkId': network['id']})
if 'getNetworkSmDevices' in config['endpoints'] and config['endpoints']['getNetworkSmDevices']['enabled']:
if 'systemsManager' in network['productTypes']:
success, errors, headers, sm_devices = getNetworkSmDevices(api_key, network['id'])
if not sm_devices is None:
tag_disabled = not config['endpoints']['getNetworkSmDevices']['filter_by_device_tag_enabled']
tag_filter = config['endpoints']['getNetworkSmDevices']['target_device_tag']
scan_time = datetime.datetime.now()
for device in sm_devices:
if tag_disabled or tag_filter in device['tags']:
document = {
'scanTime': scan_time,
'scanIntervalMinutes': config['scan_interval_minutes'],
'networkId': network['id'],
'networkName': network['name']
}
for key in device:
document[key] = device[key]
log_to_database(db, document,
config['endpoints']['getNetworkSmDevices']['collection'],
config['endpoints']['getNetworkSmDevices']['mode'],
keyValuePair={'id': device['id']})
print(str(datetime.datetime.now()) + " -- Scan complete")
def main(argv):
arg_config_file = None
try:
opts, args = getopt.getopt(argv, 'c:')
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt == '-c':
arg_config_file = arg
if arg_config_file is None:
kill_script()
try:
config = load_config(arg_config_file)
print(str(datetime.datetime.now()) + " -- Initializing script")
except:
kill_script()
while(True):
perform_scan(config)
print(str(datetime.datetime.now()) + " -- Next scan in " + str(config['scan_interval_minutes']) + " minutes")
time.sleep(config['scan_interval_minutes']*60)
if __name__ == '__main__':
main(sys.argv[1:]) | meraki/automation-scripts | offline_logging/offline_logging.py | offline_logging.py | py | 24,201 | python | en | code | 361 | github-code | 36 |
25218293506 | n = int(input("Please enter a square size "))
if n % 2 == 0:
n = int(input("Please enter uneven number "))
if n % 2 == 1:
for i in range(n):
for j in range(n):
if ((i == (n-1)/2) and (j != (n-1))) or (i != (n-1)/2 and j == (n-1)/2):
print("+ ", end=" ")
elif (i == (n - 1) / 2) and (j == n - 1):
print("+ ")
elif (i != (n - 1) / 2) and j !=(n - 1)/2 and j != (n - 1):
print("* ", end=" ")
else:
print("* ")
input("press enter to exit")
| OkanZengin/Python_Learning | Games/star_and_sum.py | star_and_sum.py | py | 565 | python | en | code | 0 | github-code | 36 |
20039489365 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import open
from builtins import super
from builtins import str
from future import standard_library
from future.utils import with_metaclass
standard_library.install_aliases()
import abc
import http.client
import http.server
import io
import logging
import os
import socketserver
from . import io as avro_io
from . import protocol
from . import schema
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Constants
def LoadResource(name):
dir_path = os.path.dirname(__file__)
rsrc_path = os.path.join(dir_path, name)
with open(rsrc_path, 'r') as f:
return f.read()
# Handshake schema is pulled in during build
HANDSHAKE_REQUEST_SCHEMA_JSON = LoadResource('HandshakeRequest.avsc')
HANDSHAKE_RESPONSE_SCHEMA_JSON = LoadResource('HandshakeResponse.avsc')
HANDSHAKE_REQUEST_SCHEMA = schema.Parse(HANDSHAKE_REQUEST_SCHEMA_JSON)
HANDSHAKE_RESPONSE_SCHEMA = schema.Parse(HANDSHAKE_RESPONSE_SCHEMA_JSON)
HANDSHAKE_REQUESTOR_WRITER = avro_io.DatumWriter(HANDSHAKE_REQUEST_SCHEMA)
HANDSHAKE_REQUESTOR_READER = avro_io.DatumReader(HANDSHAKE_RESPONSE_SCHEMA)
HANDSHAKE_RESPONDER_WRITER = avro_io.DatumWriter(HANDSHAKE_RESPONSE_SCHEMA)
HANDSHAKE_RESPONDER_READER = avro_io.DatumReader(HANDSHAKE_REQUEST_SCHEMA)
META_SCHEMA = schema.Parse('{"type": "map", "values": "bytes"}')
META_WRITER = avro_io.DatumWriter(META_SCHEMA)
META_READER = avro_io.DatumReader(META_SCHEMA)
SYSTEM_ERROR_SCHEMA = schema.Parse('["string"]')
AVRO_RPC_MIME = 'avro/binary'
# protocol cache
# Map: remote name -> remote MD5 hash
_REMOTE_HASHES = {}
# Decoder/encoder for a 32 bits big-endian integer.
UINT32_BE = avro_io.STRUCT_INT
# Default size of the buffers use to frame messages:
BUFFER_SIZE = 8192
# ------------------------------------------------------------------------------
# Exceptions
class AvroRemoteException(schema.AvroException):
"""
Raised when an error message is sent by an Avro requestor or responder.
"""
def __init__(self, fail_msg=None):
schema.AvroException.__init__(self, fail_msg)
class ConnectionClosedException(schema.AvroException):
pass
# ------------------------------------------------------------------------------
# Base IPC Classes (Requestor/Responder)
class BaseRequestor( with_metaclass( abc.ABCMeta, object ) ):
"""Base class for the client side of a protocol interaction."""
def __init__(self, local_protocol, transceiver):
"""Initializes a new requestor object.
Args:
local_protocol: Avro Protocol describing the messages sent and received.
transceiver: Transceiver instance to channel messages through.
"""
self._local_protocol = local_protocol
self._transceiver = transceiver
self._remote_protocol = None
self._remote_hash = None
self._send_protocol = None
@property
def local_protocol(self):
"""Returns: the Avro Protocol describing the messages sent and received."""
return self._local_protocol
@property
def transceiver(self):
"""Returns: the underlying channel used by this requestor."""
return self._transceiver
@abc.abstractmethod
def _IssueRequest(self, call_request, message_name, request_datum):
"""TODO: Document this method.
Args:
call_request: ???
message_name: Name of the message.
request_datum: ???
Returns:
???
"""
raise Error('Abstract method')
def Request(self, message_name, request_datum):
"""Writes a request message and reads a response or error message.
Args:
message_name: Name of the IPC method.
request_datum: IPC request.
Returns:
The IPC response.
"""
# build handshake and call request
buffer_writer = io.BytesIO()
buffer_encoder = avro_io.BinaryEncoder(buffer_writer)
self._WriteHandshakeRequest(buffer_encoder)
self._WriteCallRequest(message_name, request_datum, buffer_encoder)
# send the handshake and call request; block until call response
call_request = buffer_writer.getvalue()
return self._IssueRequest(call_request, message_name, request_datum)
def _WriteHandshakeRequest(self, encoder):
"""Emits the handshake request.
Args:
encoder: Encoder to write the handshake request into.
"""
local_hash = self._local_protocol.md5
# if self._remote_hash is None:
# remote_name = self.transceiver.remote_name
# self._remote_hash = _REMOTE_HASHES.get(remote_name)
if self._remote_hash is None:
self._remote_hash = local_hash
self._remote_protocol = self._local_protocol
request_datum = {
'clientHash': local_hash,
'serverHash': self._remote_hash,
}
if self._send_protocol:
request_datum['clientProtocol'] = str(self._local_protocol)
logger.info('Sending handshake request: %s', request_datum)
HANDSHAKE_REQUESTOR_WRITER.write(request_datum, encoder)
def _WriteCallRequest(self, message_name, request_datum, encoder):
"""
The format of a call request is:
* request metadata, a map with values of type bytes
* the message name, an Avro string, followed by
* the message parameters. Parameters are serialized according to
the message's request declaration.
"""
# request metadata (not yet implemented)
request_metadata = {}
META_WRITER.write(request_metadata, encoder)
# Identify message to send:
message = self.local_protocol.message_map.get(message_name)
if message is None:
raise schema.AvroException('Unknown message: %s' % message_name)
encoder.write_utf8(message.name)
# message parameters
self._WriteRequest(message.request, request_datum, encoder)
def _WriteRequest(self, request_schema, request_datum, encoder):
logger.info('writing request: %s', request_datum)
datum_writer = avro_io.DatumWriter(request_schema)
datum_writer.write(request_datum, encoder)
def _ReadHandshakeResponse(self, decoder):
"""Reads and processes the handshake response message.
Args:
decoder: Decoder to read messages from.
Returns:
call-response exists (boolean) ???
Raises:
schema.AvroException on ???
"""
handshake_response = HANDSHAKE_REQUESTOR_READER.read(decoder)
logger.info('Processing handshake response: %s', handshake_response)
match = handshake_response['match']
if match == 'BOTH':
# Both client and server protocol hashes match:
self._send_protocol = False
return True
elif match == 'CLIENT':
# Client's side hash mismatch:
self._remote_protocol = \
protocol.Parse(handshake_response['serverProtocol'])
self._remote_hash = handshake_response['serverHash']
self._send_protocol = False
return True
elif match == 'NONE':
# Neither client nor server match:
self._remote_protocol = \
protocol.Parse(handshake_response['serverProtocol'])
self._remote_hash = handshake_response['serverHash']
self._send_protocol = True
return False
else:
raise schema.AvroException('handshake_response.match=%r' % match)
def _ReadCallResponse(self, message_name, decoder):
"""Reads and processes a method call response.
The format of a call response is:
- response metadata, a map with values of type bytes
- a one-byte error flag boolean, followed by either:
- if the error flag is false,
the message response, serialized per the message's response schema.
- if the error flag is true,
the error, serialized per the message's error union schema.
Args:
message_name:
decoder:
Returns:
???
Raises:
schema.AvroException on ???
"""
# response metadata
response_metadata = META_READER.read(decoder)
# remote response schema
remote_message_schema = self._remote_protocol.message_map.get(message_name)
if remote_message_schema is None:
raise schema.AvroException('Unknown remote message: %s' % message_name)
# local response schema
local_message_schema = self._local_protocol.message_map.get(message_name)
if local_message_schema is None:
raise schema.AvroException('Unknown local message: %s' % message_name)
# error flag
if not decoder.read_boolean():
writer_schema = remote_message_schema.response
reader_schema = local_message_schema.response
return self._ReadResponse(writer_schema, reader_schema, decoder)
else:
writer_schema = remote_message_schema.errors
reader_schema = local_message_schema.errors
raise self._ReadError(writer_schema, reader_schema, decoder)
def _ReadResponse(self, writer_schema, reader_schema, decoder):
datum_reader = avro_io.DatumReader(writer_schema, reader_schema)
result = datum_reader.read(decoder)
return result
def _ReadError(self, writer_schema, reader_schema, decoder):
datum_reader = avro_io.DatumReader(writer_schema, reader_schema)
return AvroRemoteException(datum_reader.read(decoder))
class Requestor(BaseRequestor):
"""Concrete requestor implementation."""
def _IssueRequest(self, call_request, message_name, request_datum):
call_response = self.transceiver.Transceive(call_request)
# process the handshake and call response
buffer_decoder = avro_io.BinaryDecoder(io.BytesIO(call_response))
call_response_exists = self._ReadHandshakeResponse(buffer_decoder)
if call_response_exists:
return self._ReadCallResponse(message_name, buffer_decoder)
else:
return self.Request(message_name, request_datum)
# ------------------------------------------------------------------------------
class Responder( with_metaclass( abc.ABCMeta, object ) ):
"""Base class for the server side of a protocol interaction."""
def __init__(self, local_protocol):
self._local_protocol = local_protocol
self._local_hash = self._local_protocol.md5
self._protocol_cache = {}
self.set_protocol_cache(self._local_hash, self._local_protocol)
@property
def local_protocol(self):
return self._local_protocol
# utility functions to manipulate protocol cache
def get_protocol_cache(self, hash):
return self._protocol_cache.get(hash)
def set_protocol_cache(self, hash, protocol):
self._protocol_cache[hash] = protocol
def Respond(self, call_request):
"""Entry point to process one procedure call.
Args:
call_request: Serialized procedure call request.
Returns:
Serialized procedure call response.
Raises:
???
"""
buffer_reader = io.BytesIO(call_request)
buffer_decoder = avro_io.BinaryDecoder(buffer_reader)
buffer_writer = io.BytesIO()
buffer_encoder = avro_io.BinaryEncoder(buffer_writer)
error = None
response_metadata = {}
try:
remote_protocol = self._ProcessHandshake(buffer_decoder, buffer_encoder)
# handshake failure
if remote_protocol is None:
return buffer_writer.getvalue()
# read request using remote protocol
request_metadata = META_READER.read(buffer_decoder)
remote_message_name = buffer_decoder.read_utf8()
# get remote and local request schemas so we can do
# schema resolution (one fine day)
remote_message = remote_protocol.message_map.get(remote_message_name)
if remote_message is None:
fail_msg = 'Unknown remote message: %s' % remote_message_name
raise schema.AvroException(fail_msg)
local_message = self.local_protocol.message_map.get(remote_message_name)
if local_message is None:
fail_msg = 'Unknown local message: %s' % remote_message_name
raise schema.AvroException(fail_msg)
writer_schema = remote_message.request
reader_schema = local_message.request
request = self._ReadRequest(writer_schema, reader_schema, buffer_decoder)
logger.info('Processing request: %r', request)
# perform server logic
try:
response = self.Invoke(local_message, request)
except AvroRemoteException as exn:
error = exn
except Exception as exn:
error = AvroRemoteException(str(exn))
# write response using local protocol
META_WRITER.write(response_metadata, buffer_encoder)
buffer_encoder.write_boolean(error is not None)
if error is None:
writer_schema = local_message.response
self._WriteResponse(writer_schema, response, buffer_encoder)
else:
writer_schema = local_message.errors
self._WriteError(writer_schema, error, buffer_encoder)
except schema.AvroException as exn:
error = AvroRemoteException(str(exn))
buffer_encoder = avro_io.BinaryEncoder(io.StringIO())
META_WRITER.write(response_metadata, buffer_encoder)
buffer_encoder.write_boolean(True)
self._WriteError(SYSTEM_ERROR_SCHEMA, error, buffer_encoder)
return buffer_writer.getvalue()
def _ProcessHandshake(self, decoder, encoder):
"""Processes an RPC handshake.
Args:
decoder: Where to read from.
encoder: Where to write to.
Returns:
The requested Protocol.
"""
handshake_request = HANDSHAKE_RESPONDER_READER.read(decoder)
logger.info('Processing handshake request: %s', handshake_request)
# determine the remote protocol
client_hash = handshake_request.get('clientHash')
client_protocol = handshake_request.get('clientProtocol')
remote_protocol = self.get_protocol_cache(client_hash)
if remote_protocol is None and client_protocol is not None:
remote_protocol = protocol.Parse(client_protocol)
self.set_protocol_cache(client_hash, remote_protocol)
# evaluate remote's guess of the local protocol
server_hash = handshake_request.get('serverHash')
handshake_response = {}
if self._local_hash == server_hash:
if remote_protocol is None:
handshake_response['match'] = 'NONE'
else:
handshake_response['match'] = 'BOTH'
else:
if remote_protocol is None:
handshake_response['match'] = 'NONE'
else:
handshake_response['match'] = 'CLIENT'
if handshake_response['match'] != 'BOTH':
handshake_response['serverProtocol'] = str(self.local_protocol)
handshake_response['serverHash'] = self._local_hash
logger.info('Handshake response: %s', handshake_response)
HANDSHAKE_RESPONDER_WRITER.write(handshake_response, encoder)
return remote_protocol
@abc.abstractmethod
def Invoke(self, local_message, request):
"""Processes one procedure call.
Args:
local_message: Avro message specification.
request: Call request.
Returns:
Call response.
Raises:
???
"""
raise Error('abtract method')
def _ReadRequest(self, writer_schema, reader_schema, decoder):
datum_reader = avro_io.DatumReader(writer_schema, reader_schema)
return datum_reader.read(decoder)
def _WriteResponse(self, writer_schema, response_datum, encoder):
datum_writer = avro_io.DatumWriter(writer_schema)
datum_writer.write(response_datum, encoder)
def _WriteError(self, writer_schema, error_exception, encoder):
datum_writer = avro_io.DatumWriter(writer_schema)
datum_writer.write(str(error_exception), encoder)
# ------------------------------------------------------------------------------
# Framed message
class FramedReader(object):
"""Wrapper around a file-like object to read framed data."""
def __init__(self, reader):
self._reader = reader
def Read(self):
"""Reads one message from the configured reader.
Returns:
The message, as bytes.
"""
message = io.BytesIO()
# Read and append frames until we encounter a 0-size frame:
while self._ReadFrame(message) > 0: pass
return message.getvalue()
def _ReadFrame(self, message):
"""Reads and appends one frame into the given message bytes.
Args:
message: Message to append the frame to.
Returns:
Size of the frame that was read.
The empty frame (size 0) indicates the end of a message.
"""
frame_size = self._ReadInt32()
remaining = frame_size
while remaining > 0:
data_bytes = self._reader.read(remaining)
if len(data_bytes) == 0:
raise ConnectionClosedException(
'FramedReader: expecting %d more bytes in frame of size %d, got 0.'
% (remaining, frame_size))
message.write(data_bytes)
remaining -= len(data_bytes)
return frame_size
def _ReadInt32(self):
encoded = self._reader.read(UINT32_BE.size)
if len(encoded) != UINT32_BE.size:
raise ConnectionClosedException('Invalid header: %r' % encoded)
return UINT32_BE.unpack(encoded)[0]
class FramedWriter(object):
"""Wrapper around a file-like object to write framed data."""
def __init__(self, writer):
self._writer = writer
def Write(self, message):
"""Writes a message.
Message is chunked into sequences of frames terminated by an empty frame.
Args:
message: Message to write, as bytes.
"""
while len(message) > 0:
chunk_size = max(BUFFER_SIZE, len(message))
chunk = message[:chunk_size]
self._WriteBuffer(chunk)
message = message[chunk_size:]
# A message is always terminated by a zero-length buffer.
self._WriteUnsignedInt32(0)
def _WriteBuffer(self, chunk):
self._WriteUnsignedInt32(len(chunk))
self._writer.write(chunk)
def _WriteUnsignedInt32(self, uint32):
self._writer.write(UINT32_BE.pack(uint32))
# ------------------------------------------------------------------------------
# Transceiver (send/receive channel)
class Transceiver( with_metaclass( abc.ABCMeta, object ) ):
@abc.abstractproperty
def remote_name(self):
pass
@abc.abstractmethod
def ReadMessage(self):
"""Reads a single message from the channel.
Blocks until a message can be read.
Returns:
The message read from the channel.
"""
pass
@abc.abstractmethod
def WriteMessage(self, message):
"""Writes a message into the channel.
Blocks until the message has been written.
Args:
message: Message to write.
"""
pass
def Transceive(self, request):
"""Processes a single request-reply interaction.
Synchronous request-reply interaction.
Args:
request: Request message.
Returns:
The reply message.
"""
self.WriteMessage(request)
result = self.ReadMessage()
return result
def Close(self):
"""Closes this transceiver."""
pass
class HTTPTransceiver(Transceiver):
"""HTTP-based transceiver implementation."""
def __init__(self, host, port, req_resource='/'):
"""Initializes a new HTTP transceiver.
Args:
host: Name or IP address of the remote host to interact with.
port: Port the remote server is listening on.
req_resource: Optional HTTP resource path to use, '/' by default.
"""
self._req_resource = req_resource
self._conn = http.client.HTTPConnection(host, port)
self._conn.connect()
self._remote_name = self._conn.sock.getsockname()
@property
def remote_name(self):
return self._remote_name
def ReadMessage(self):
response = self._conn.getresponse()
response_reader = FramedReader(response)
framed_message = response_reader.Read()
response.read() # ensure we're ready for subsequent requests
return framed_message
def WriteMessage(self, message):
req_method = 'POST'
req_headers = {'Content-Type': AVRO_RPC_MIME}
bio = io.BytesIO()
req_body_buffer = FramedWriter(bio)
req_body_buffer.Write(message)
req_body = bio.getvalue()
self._conn.request(req_method, self._req_resource, req_body, req_headers)
def Close(self):
self._conn.close()
self._conn = None
# ------------------------------------------------------------------------------
# Server Implementations
def _MakeHandlerClass(responder):
class AvroHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
reader = FramedReader(self.rfile)
call_request = reader.Read()
logger.info('Serialized request: %r', call_request)
call_response = responder.Respond(call_request)
logger.info('Serialized response: %r', call_response)
self.send_response(200)
self.send_header('Content-type', AVRO_RPC_MIME)
self.end_headers()
framed_writer = FramedWriter(self.wfile)
framed_writer.Write(call_response)
self.wfile.flush()
logger.info('Response sent')
return AvroHTTPRequestHandler
class MultiThreadedHTTPServer(
socketserver.ThreadingMixIn,
http.server.HTTPServer,
):
"""Multi-threaded HTTP server."""
pass
class AvroIpcHttpServer(MultiThreadedHTTPServer):
"""Avro IPC server implemented on top of an HTTP server."""
def __init__(self, interface, port, responder):
"""Initializes a new Avro IPC server.
Args:
interface: Interface the server listens on, eg. 'localhost' or '0.0.0.0'.
port: TCP port the server listens on, eg. 8000.
responder: Responder implementation to handle RPCs.
"""
super(AvroIpcHttpServer, self).__init__(
server_address=(interface, port),
RequestHandlerClass=_MakeHandlerClass(responder),
)
if __name__ == '__main__':
raise Exception('Not a standalone module')
| kineticadb/kinetica-api-python | gpudb/packages/avro/avro_py3/ipc.py | ipc.py | py | 21,544 | python | en | code | 13 | github-code | 36 |
42937554918 | import sys
from PyQt5.QtWidgets import *
from PyQt5.QtGui import *
from PyQt5.QtCore import *
# TP1
# SKOCZYLAS Nestor & FRET Gaëlle
class MainWindow(QMainWindow):
def __init__(self):
super().__init__()
self.resize(200,300)
self.textEdit = QTextEdit(self)
bar = self.menuBar()
fileMenu = bar.addMenu("File")
fileToolBar = self.addToolBar("File")
actNewFile = QAction(QIcon("new.png"), "New…", self)
actNewFile.setShortcut("Ctrl+N")
actNewFile.setToolTip("New File")
actNewFile.setStatusTip("New file")
fileMenu.addAction(actNewFile)
fileToolBar.addAction(actNewFile)
actNewFile.triggered.connect(open)
actCopy = QAction(QIcon("copy.png"), "Copy…", self)
actCopy.setShortcut("Ctrl+C")
actCopy.setToolTip("Copy")
actCopy.setStatusTip("Copy")
fileMenu.addAction(actCopy)
fileToolBar.addAction(actCopy)
#actCopy.triggered.connect(file_open)
actOpen = QAction(QIcon("open.png"), "Open…", self)
actOpen.setShortcut("Ctrl+O")
actOpen.setToolTip("Open")
actOpen.setStatusTip("Open")
fileMenu.addAction(actOpen)
fileToolBar.addAction(actOpen)
actOpen.triggered.connect(self.openFile)
actQuit = QAction(QIcon("quit.png"), "Quit…", self)
actQuit.setShortcut("Ctrl+Q")
actQuit.setToolTip("Quit")
actQuit.setStatusTip("Quit")
fileMenu.addAction(actQuit)
fileToolBar.addAction(actQuit)
actQuit.triggered.connect(self.quitApp)
actSave = QAction(QIcon("save.png"), "Save…", self)
actSave.setShortcut("Ctrl+S")
actSave.setToolTip("Save")
actSave.setStatusTip("Save")
fileMenu.addAction(actSave)
fileToolBar.addAction(actSave)
actSave.triggered.connect(self.saveFile)
self.setCentralWidget(self.textEdit)
self.statusBar = QStatusBar()
self.setStatusBar(self.statusBar)
def openFile(self):
name = QFileDialog.getOpenFileName(self, "Open File", "./", "*.png")
file = QFile(name[0], 'r')
file.open(QFile.ReadOnly)
stream = QTextStream(file)
self.textEdit.setHtml(stream.readAll())
file.close()
def saveFile(self):
save = QFileDialog.getSaveFileName(self, "Save File","./")
file = open(save[0], 'w')
text = self.textEdit.toPlainText()
file.write(text)
file.close()
def quitApp(self):
msgBox = QMessageBox()
msgBox.setIcon(QMessageBox.Information)
msgBox.setText("Are you sure you want to close the window?")
msgBox.setWindowTitle("Window Close")
msgBox.setStandardButtons(QMessageBox.Yes | QMessageBox.No)
msgBox.button(msgBox.Yes).clicked.connect(QApplication.quit)
msgBox.exec()
def closeEvent(self, event):
event.ignore()
self.quitApp()
def main(args):
app = QApplication(args)
win = MainWindow()
win.show()
app.exec()
if __name__ == "__main__":
#print(sys.argv)
main(sys.argv)
#Q1. Il ne faut pas oublier d'appeler main(sys.argv) dans le if
# On voit ['mainWindow.py', 'argv']
#Q2.1 Ajouter un super dans le constructeur
#Q2.2 La fenêtre s'affiche :/
#Q4. triggered.connect()
#Q6. Mettre self.textEdit = QTextEdit(self) dans l'initialisation
| nestorskoczylas/Licence3_Informatique | Semestre 5/IHM/tp_pyqt1/mainWindow.py | mainWindow.py | py | 3,500 | python | en | code | 1 | github-code | 36 |
35919457830 | from django.shortcuts import render, redirect
from .models import *
from login.models import *
from django.contrib import messages
from datetime import date, datetime
# Create your views here.
def appointments(request):
if 'user_id' not in request.session:
return redirect('/')
today = datetime.now()
context = {
'active_user': User.objects.get(id=request.session['user_id']),
'appoint_list': Appointment.objects.filter(user__id = request.session['user_id']).filter(date__gte = today).order_by('date'),
'past_appo_list': Appointment.objects.filter(user__id = request.session['user_id']).filter(date__lte = today).order_by('date'),
}
return render(request, 'appointments.html', context)
def add_appointment(request):
return render(request, 'add_appo.html')
def new_appo(request):
active_user = User.objects.get(id=request.session['user_id'])
errors = {}
today = datetime.now()
comp_date = request.POST['date']
date_object = datetime.strptime(comp_date, "%Y-%m-%d")
# print("********************","desde formulario:",comp_date,"fecha actual de datetime",today,"Fecha convertida",date_object)
if len(request.POST['name']) == 0:
errors['name'] = "Must give a name to your task"
if request.POST['status'] == '0':
errors['no_status'] = "Must select a status"
if len(request.POST['date']) == 0:
errors['no_date'] = "Must provide a date"
if today > date_object and request.POST['status'] == 'Pending':
errors['dates'] = "Future appointments cannot be in set in a past date"
if len(errors) > 0:
for key, msg in errors.items():
messages.error(request, msg)
return redirect('/appointments/add')
else:
Appointment.objects.create(
name = request.POST['name'],
status=request.POST['status'],
date = comp_date,
user = active_user,
)
return redirect('appointments')
def del_appo(request, appo_id):
appo = Appointment.objects.get(id=appo_id)
appo.delete()
return redirect('/appointments')
def edit_appo(request, appo_id):
context = {
'appo': Appointment.objects.get(id=appo_id),
'appo_past': Appointment.objects.get(id=appo_id)
}
return render(request, 'edit_appo.html', context)
def upd_appo(request, appo_id):
errors = {}
today = datetime.now()
comp_date = request.POST['date']
date_object = datetime.strptime(comp_date, "%Y-%m-%d")
#print("********************","desde formulario:",comp_date,"fecha actual de datetime",today,"Fecha convertida",date_object)
if len(request.POST['name']) == 0:
errors['ed_name'] = "Must give a name to your task"
if request.POST['status'] == '0':
errors['ed_no_status'] = "Must select a status"
if len(request.POST['date']) == 0:
errors['ed_no_date'] = "Must provide a date"
if today > date_object and request.POST['status'] == 'Pending':
errors['ed_dates'] = "Future appointments cannot be in set in a past date"
if len(errors) > 0:
for key, msg in errors.items():
messages.error(request, msg)
return redirect(f'/appointments/{appo_id}/edit')
else:
appo = Appointment.objects.get(id=appo_id)
appo.name = request.POST['name']
appo.date = request.POST['date']
appo.status = request.POST['status']
appo.save()
return redirect('/appointments') | AlexUrtubia/appointments | appo_app/views.py | views.py | py | 3,521 | python | en | code | 0 | github-code | 36 |
29085620425 | import random
from threading import Timer
from typing import Union, List
from zone_api.audio_manager import Genre, get_music_streams_by_genres, get_nearby_audio_sink
from zone_api.core.devices.weather import Weather
from zone_api.core.parameters import ParameterConstraint, positive_number_validator, Parameters
from zone_api.environment_canada import EnvCanada
from zone_api.core.action import action, Action
from zone_api.core.devices.motion_sensor import MotionSensor
from zone_api.core.zone_event import ZoneEvent
from zone_api.core.devices.activity_times import ActivityType
@action(events=[ZoneEvent.MOTION], external_events=[ZoneEvent.DOOR_CLOSED],
devices=[MotionSensor], activity_types=[ActivityType.WAKE_UP], zone_name_pattern='.*Kitchen.*')
class AnnounceMorningWeatherAndPlayMusic(Action):
"""
Announces the current weather and plays a random music stream twice during the wake up period.
This is based on the assumption of a household having two adults that leave work at different
times. The music stops when the front door is closed.
"""
@staticmethod
def supported_parameters() -> List[ParameterConstraint]:
return Action.supported_parameters() + \
[ParameterConstraint.optional('durationInMinutes', positive_number_validator),
ParameterConstraint.optional('maximumStartCount', positive_number_validator)
]
# noinspection PyDefaultArgument
def __init__(self, parameters: Parameters):
super().__init__(parameters)
self._music_streams = get_music_streams_by_genres(
[Genre.CLASSICAL, Genre.INSTRUMENT, Genre.JAZZ])
self._duration_in_minutes = self.parameters().get(self, self.supported_parameters()[-2].name(), 120)
self._max_start_count = self.parameters().get(self, self.supported_parameters()[-1].name(), 2)
self._in_session = False
self._start_count = 0
self._timer = None
self._sink = None
def on_action(self, event_info):
zone = event_info.get_zone()
zone_manager = event_info.get_zone_manager()
def stop_music_session():
self._sink.pause()
self._in_session = False
if event_info.get_event_type() == ZoneEvent.DOOR_CLOSED:
if self._in_session:
owning_zone = event_info.get_owning_zone()
if owning_zone.is_external():
stop_music_session()
return True
return False
else:
self._sink = get_nearby_audio_sink(zone, zone_manager)
if self._sink is None:
self.log_warning("Missing audio device; can't play music.")
return False
if not self._in_session and \
self._start_count < self._max_start_count:
self._in_session = True
weather_msg = self.get_morning_announcement(zone_manager)
if weather_msg is not None:
self._sink.play_message(weather_msg)
self._sink.play_stream(random.choice(self._music_streams), 40)
self._start_count += 1
def reset_state():
stop_music_session()
self._sink = None
self._start_count = 0
if self._timer is not None and self._timer.is_alive():
self._timer.cancel()
self._timer = Timer(self._duration_in_minutes * 60, reset_state)
self._timer.start()
return True
# noinspection PyMethodMayBeStatic
def get_morning_announcement(self, zone_manager) -> Union[None, str]:
""" Returns a string containing the current's weather and today's forecast. """
weather = zone_manager.get_first_device_by_type(Weather)
if weather is None or not weather.support_forecast_min_temperature() \
or not weather.support_forecast_max_temperature():
return None
message = u'Good morning. It is {} degree currently; the weather ' \
'condition is {}. Forecasted temperature range is between {} and {} ' \
'degrees.'.format(weather.get_temperature(),
weather.get_condition(),
weather.get_forecast_min_temperature(),
weather.get_forecast_max_temperature())
forecasts = EnvCanada.retrieve_hourly_forecast('Ottawa', 12)
rain_periods = [f for f in forecasts if
'High' == f.get_precipitation_probability() or
'Medium' == f.get_precipitation_probability()]
if len(rain_periods) > 0:
if len(rain_periods) == 1:
message += u" There will be precipitation at {}.".format(
rain_periods[0].get_user_friendly_forecast_time())
else:
message += u" There will be precipitation from {} to {}.".format(
rain_periods[0].get_user_friendly_forecast_time(),
rain_periods[-1].get_user_friendly_forecast_time())
return message
| yfaway/zone-apis | src/zone_api/core/actions/announce_morning_weather_and_play_music.py | announce_morning_weather_and_play_music.py | py | 5,234 | python | en | code | 2 | github-code | 36 |
31952638633 | ''' EXERCÍCIOS:
2) Dado a sequência de Fibonacci, onde se inicia por 0 e 1 e o próximo valor sempre será a soma dos 2 valores anteriores (exemplo: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34...), escreva um programa na linguagem que desejar onde, informado um número, ele calcule a sequência de Fibonacci e retorne uma mensagem avisando se o número informado pertence ou não a sequência.
IMPORTANTE:
Esse número pode ser informado através de qualquer entrada de sua preferência ou pode ser previamente definido no código;
*/
SOLUÇÃO '''
num = int(input('Digite um número: '))
a, b = 0, 1
fib = [a, b]
while b < num:
a, b = b, a + b
fib.append(b)
if num in fib:
print(f'{num} pertence à sequência de Fibonacci.')
else:
print(f'{num} não pertence à sequência de Fibonacci.')
'''
3) Dado um vetor que guarda o valor de faturamento diário de uma distribuidora, faça um programa, na linguagem que desejar, que calcule e retorne:
• O menor valor de faturamento ocorrido em um dia do mês;
• O maior valor de faturamento ocorrido em um dia do mês;
• Número de dias no mês em que o valor de faturamento diário foi superior à média mensal.
IMPORTANTE:
a) Usar o json ou xml disponível como fonte dos dados do faturamento mensal;
b) Podem existir dias sem faturamento, como nos finais de semana e feriados. Estes dias devem ser ignorados no cálculo da média;
SOLUÇÃO '''
import json
# Carregando os dados do faturamento mensal a partir de um arquivo JSON
with open('faturamento.json', 'r') as f:
data = json.load(f)
faturamento = data['faturamento']
# Calculando o menor e o maior valor de faturamento
menor_faturamento = min(faturamento)
maior_faturamento = max(faturamento)
# Calculando a média mensal de faturamento
dias_com_faturamento = [dia for dia in faturamento if dia > 0]
media_mensal = sum(dias_com_faturamento) / len(dias_com_faturamento)
# Calculando o número de dias com faturamento superior à média mensal
dias_acima_da_media = sum(1 for dia in dias_com_faturamento if dia > media_mensal)
# Imprimindo os resultados
print(f'Menor valor de faturamento: {menor_faturamento}')
print(f'Maior valor de faturamento: {maior_faturamento}')
print(f'Dias com faturamento superior à média mensal: {dias_acima_da_media}')
'''
4) Dado o valor de faturamento mensal de uma distribuidora, detalhado por estado:
SP – R$67.836,43
RJ – R$36.678,66
MG – R$29.229,88
ES – R$27.165,48
Outros – R$19.849,53
Escreva um programa na linguagem que desejar onde calcule o percentual de representação que cada estado teve dentro do valor total mensal da distribuidora.
SOLUÇÃO '''
# Definindo o dicionário com as informações de faturamento de cada estado
faturamento_por_estado = {
'SP': 67836.43,
'RJ': 36678.66,
'MG': 29229.88,
'ES': 27165.48,
'Outros': 19849.53
}
# Calculando o faturamento total mensal da distribuidora
faturamento_total = sum(faturamento_por_estado.values())
# Calculando o percentual de representação de cada estado
percentuais = {}
for estado, faturamento in faturamento_por_estado.items():
percentuais[estado] = (faturamento / faturamento_total) * 100
# Imprimindo os resultados
for estado, percentual in percentuais.items():
print('{} - {:.2f}%'.format(estado, percentual))
'''
5) Escreva um programa que inverta os caracteres de um string.
IMPORTANTE:
a) Essa string pode ser informada através de qualquer entrada de sua preferência ou pode ser previamente definida no código;
b) Evite usar funções prontas, como, por exemplo, reverse;
SOLUÇÃO '''
# Lendo a string de entrada do usuário
string = input('Digite uma string para inverter: ')
# Definindo a string previamente no código
# string = 'Exemplo de string para inverter'
# Criando uma lista vazia para armazenar os caracteres invertidos
caracteres_invertidos = []
# Percorrendo a string de trás para frente e adicionando os caracteres na lista
for i in range(len(string)-1, -1, -1):
caracteres_invertidos.append(string[i])
# Convertendo a lista de caracteres invertidos para uma string
string_invertida = ''.join(caracteres_invertidos)
# Imprimindo a string invertida
print('A string invertida é:', string_invertida)
| bruno-kilo/FibonacciSequence | Fibonacci.py | Fibonacci.py | py | 4,259 | python | pt | code | 0 | github-code | 36 |
20847546702 | """
Django settings for test_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rw2wiza36p)(d7fxun0xl5$2k%3p5t=f9zva1rpoic-lbl&es4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hydra',
'test_app',
'django_extensions'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
import dj_database_url
DATABASES = {
'default': dj_database_url.config(default='postgres://hydra:@localhost:5432/hydra')
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'hydra': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False
}
},
'root': {
'handlers': ['console'],
'level': 'INFO'
}
}
HYDRA_MODELS = {
'test_app.Reader',
'test_app.Author',
'test_app.Book'
}
| j00bar/django-hydra | test_project/test_project/settings.py | settings.py | py | 2,762 | python | en | code | 0 | github-code | 36 |
24643892493 | import sqlite3
class OperationDB(object):
def get_conn(self):
'''连接数据库'''
conn = sqlite3.connect('cltdata.db')
return conn
def get_cursor(self):
'''创建游标'''
conn = self.get_conn()
#print('执行游标')
return conn.cursor()
def close_all(self, conn, cu):
'''关闭数据库游标对象和数据库连接对象'''
try:
if cu is not None:
cu.close()
finally:
if conn is not None:
conn.close()
def create_table(self):
'''创建数据库表:sampleinfo testresult'''
conn = self.get_conn()
c = self.get_cursor()
c.execute('''
create table if not exists sampleinfo
(sampleid varchar(20) PRIMARY KEY,pati_id varchar(20),pati_name varchar(20),
pati_age varchar(20),pati_gender varchar(5),status varchar(5))
'''
)
c.execute('''
create table if not exists testresult
(sampleid varchar(20),testname varchar(20),testvalue varchar(20))
'''
)
c.execute('create index testresult_sid on testresult(sampleid)')
conn.commit()
print('创建数据库表[sampleinfo,testresult]成功!')
conn.close()
def insert_sampleinfo(self, data):
'''sampleinfo表插入数据'''
#conn = sqlite3.connect('svrdata.db')
conn = self.get_conn()
conn.execute('insert into sampleinfo values (?,?,?,?,?,?)', data)
conn.commit()
print('表[sampleinfo]数据写入成功!')
conn.close()
def insert_testresult(self, data):
'''testresult表插入数据'''
#conn = sqlite3.connect('svrdata.db')
conn = self.get_conn()
conn.execute('insert into testresult (sampleid, testvalue) values (?,?)', data)
conn.commit()
print('表[testresult]数据写入成功!')
conn.close()
def update_resultvalue(self, data):
'''更新数据...'''
print('更新数据...')
conn = self.get_conn()
conn.execute('update testresult set testvalue = ? where sampleid = ? and testname = ?', data)
conn.commit()
print('数据修改成功')
conn.close()
#sid = input('pls input sid :',)
def select_db(self, sid):
cursor = self.get_cursor()
result= ()
cursor.execute("select 'P' as P,pati_id,pati_name,pati_age,pati_gender from sampleinfo where sampleid=?", sid)
result += (cursor.fetchone(),)
cursor.execute("select 'O' as O,sampleid,status from sampleinfo where sampleid=?", sid)
result += (cursor.fetchone(),)
cursor.execute("select 'R' as R,testname,testvalue from testresult where sampleid =?", sid)
result += tuple(cursor.fetchall())
cursor.close()
self.get_conn().close()
return result
def select_test(self):
'''查询结果数据'''
conn = self.get_conn()
c = self.get_cursor()
c.execute("select * from testresult")
rows = c.fetchall()
conn.close()
return rows
def select_data(self, sid):
'''查询结果数据'''
conn = self.get_conn()
c = self.get_cursor()
c.execute('''select sampleinfo.sampleid,pati_id,pati_name,pati_age,status,testname,testvalue
from sampleinfo left outer join testresult on sampleinfo.sampleid = testresult.sampleid where sampleinfo.sampleid=?''', sid)
rows = c.fetchone()
c.close()
conn.close()
return rows
def testConn(self, params):
return "success"
def orderEntry(self, params):
conn = self.get_conn()
c = conn.cursor()
c.execute('insert into sampleinfo values (?,?,?,?,?,?)', (params[0],params[1],params[2],params[3],params[4],params[5]))
conn.commit()
print('表[sampleinfo]数据写入成功!')
for test in params[6]:
c.execute('insert into testresult (sampleid, testname) values (?,?)', (params[0], test))
conn.commit()
print('表[testresult]数据写入成功!')
conn.close()
test = self.select_data((params[0],))
return '数据写入成功!, sid: ' + str(test)
def selectTest(self, sid):
cursor = self.get_cursor()
result= ()
cursor.execute("select 'P' as P,pati_id,pati_name,pati_age,pati_gender from sampleinfo where sampleid=?", sid)
result += (cursor.fetchone(),)
cursor.execute("select 'O' as O,sampleid,status from sampleinfo where sampleid=?", sid)
result += (cursor.fetchone(),)
cursor.execute("select 'R' as R,testname,testvalue from testresult where sampleid =?", sid)
result += tuple(cursor.fetchall())
cursor.close()
return result | weijingwei/liwei_python | ASTM2/Operation_db.py | Operation_db.py | py | 4,387 | python | en | code | 0 | github-code | 36 |
36635799452 | from cmath import exp
from email import message
import ssl, socket
import requests
from dateutil import parser
import pytz
import datetime, time
import telegram
requests.packages.urllib3.disable_warnings()
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
def get_domain_content(domain):
requests.packages.urllib3.disable_warnings()
url = 'https://' + domain
response = requests.get(url, verify=False).headers
print(response)
def get_my_domain(mydomain):
try:
socket.setdefaulttimeout(5)
my_addr = socket.getaddrinfo(mydomain, None)
c = ssl.create_default_context()
s = c.wrap_socket(socket.socket(), server_hostname=mydomain)
s.connect((mydomain, 443))
my_cert = s.getpeercert()
get_my_cert_dated(mydomain, my_cert, my_addr)
except ssl.CertificateError and socket.gaierror as e:
pass
def days(str1, str2):
date1 = datetime.datetime.strptime(str1[0:10], "%Y-%m-%d")
date2 = datetime.datetime.strptime(str2[0:10], "%Y-%m-%d")
num = (date1 - date2).days
# print(num)
return num
def msg_push(message):
bot = telegram.Bot(token="XXXX")
bot.send_message(chat_id='XXXX', text=message)
def get_my_cert_dated(domain, certs, my_addr):
cert_beginning_time = parser.parse(certs['notBefore']).astimezone(pytz.utc)
cert_end_time = parser.parse(certs['notAfter']).astimezone(pytz.utc)
# cert_end_time_str = datetime.datetime.strptime(cert_end_time[0:10], "%Y-%m-%d")
local_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
# print(days(str(cert_end_time), str(local_time)))
# print('域名:(%s) 证书失效时间: %s' % (domain, cert_end_time))
expired_days = days(str(cert_end_time), str(local_time))
# print(expired_days)
if (expired_days < 7):
# print('域名:(%s) 证书还有: %s' % (domain, expired_days))
message = '域名:(%s) 证书过期天数: %s 证书失效时间: %s' % (domain, expired_days, cert_end_time)
msg_push(message)
# print('域名:(%s) 证书过期天数: %s 证书失效时间: %s' % (domain, expired_days, cert_end_time))
def read_domain_files():
with open('./domain.txt', 'r',
encoding="utf-8") as file:
for domain in file:
try:
get_my_domain(domain.strip())
except Exception as e:
print('域名: (%s)-%s' %(domain.strip(), e))
# print("code")
if __name__ == "__main__":
read_domain_files()
| coeus-lei/python | domain-ssl-check/domain-ssl.py | domain-ssl.py | py | 2,862 | python | en | code | 0 | github-code | 36 |
70190179945 | import random
import time
# gera lista aleatória
lista_rand = random.sample(range(1, 860), 100)
print(lista_rand)
# bubble_selection
def bubble(lista):
"""
Função de ordenação do tipo bubble sorte
input: Recebe uma lista de números desordenada
output: retorna a lista inicial ordenada
"""
n = len(lista)
for j in range(n-1):
for i in range(n-1):
if lista[i] > lista[i+1]:
# troca os elementos de posição
lista[i], lista[i+1] = lista[i+1], lista[i]
# inicia o contador de tempo
start = time.time()
# chama a função de ordenação
print(bubble(lista_rand))
# encerra o cronometro
end = time.time()
print("\n Tempo: ", end-start)
| mabittar/desafios_pythonicos | ordenacao/bubble_sort.py | bubble_sort.py | py | 724 | python | pt | code | 0 | github-code | 36 |
42600435981 | #!/usr/bin/env python3
import os, psutil, signal
import sys
import fcntl
import pytz
import time
from datetime import datetime
import multiprocessing
from multiprocessing import Queue
import subprocess, shlex
import atexit
import signal
import socketserver
import socket
import re
import shutil
def getTaipeiTime():
return datetime.now(pytz.timezone('Asia/Taipei')).strftime("%m-%d_%H-%M")
def check_PidAlive(pid):
"""
return True if the pid is still working
return False if the pid id dead
"""
if pid != None:
try:
if os.waitpid(pid, os.WNOHANG) == (0,0):
return True
else:
return False
except OSError:
pass
return False
def KillProcesses(pid):
'''
kill all the children of pid and itself
'''
parent_pid = pid
try:
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
child.kill()
except Exception as e:
print("Failed to KillProcesses with pid={}\n Skip it.".format(pid))
return
parent.kill()
def KillChildren(pid):
'''
kill all the children of the pid except itself
'''
parent_pid = pid
try:
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
try:
child.kill()
except Exception as e:
pass
except Exception as e:
print("Failed to KillChildren with pid={}\nReasons:{}".format(pid, e))
return
def KillPid(pid):
'''
kill the pid
'''
try:
os.kill(pid, signal.SIGKILL)
except Exception as e:
print("KillPid() failed.\n reasons:{}".format(e))
def LimitTimeExec(LimitTime, Func, *args):
"""
Input:
1. LimitTime: is in the unit of secs.
2. Func: must return a list that contains your return value
3. args: pass into Func
Return value:
1. isKilled: killed by timing
2. ret(int): from Func(args) to indicate success or not
"""
ret = -1
PrevWd = os.getcwd()
isKilled = False
WaitSecs = 0
WaitUnit = 10
ExecProc = multiprocessing.Process(target=Func, args=[args])
# NOTE: SIGKILL will not kill the children
# kill all its sub-process when parent is killed.
ExecProc.daemon = True
ExecProc.start()
while True:
date = getTaipeiTime()
if ExecProc.is_alive():
# log date to check liveness
print("Alive at {}".format(date))
time.sleep(WaitUnit)
WaitSecs += WaitUnit
else:
# return the return code to indicate success or not
ret = ExecProc.exitcode
isKilled = False
print("The command is finished at {} with exitcode={}, break.".format(date, ret))
break
if WaitSecs > LimitTime:
if not ExecProc.is_alive():
# if the work is done after the sleep
continue
# handle the processes twice, kill its children first
KillChildren(ExecProc.pid)
# with daemon flag, all children will be terminated
ExecProc.terminate()
KillPid(ExecProc.pid)
# wait for a few secs
ExecProc.join(10)
if ExecProc.exitcode is None: # exitcode is None for unfinished proc.
print("ExecProc.terminate() failed; Daemon handler exit.")
sys.exit(0)
isKilled = True
ret = -1
print("Achieve time limitation, kill it at {}.".format(getTaipeiTime()))
break
os.chdir(PrevWd)
return isKilled, ret
def ExecuteCmd(WorkerID=1, Cmd="", Block=True, ParallelBuild=False):
"""
return cmd's return code, STDOUT, STDERR
"""
# Use taskset by default
if Block:
'''
The taskset configuration depends on the hardware.
If your computer is other than 8700K, you must customized it.
Current configuration:
intel 8700K:
Core 0 as the "benchmark scheduler"
Core 1~5 as the "worker" to run programs.
Core 6~11 are not "real core", they are hardware threads shared with Core 0~5.
'''
CpuWorker = str((int(WorkerID) % 5) + 1)
TrainLoc = os.getenv("LLVM_THESIS_TrainingHome", "Error")
if not ParallelBuild:
FullCmd = "taskset -c " + CpuWorker + " " + Cmd
else:
if Cmd.split()[0] == "make":
FullCmd = Cmd + " -j" + str(multiprocessing.cpu_count())
else:
FullCmd = Cmd
#print(FullCmd)
p = subprocess.Popen(shlex.split(FullCmd),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = p.communicate()
p.wait()
return p.returncode, out, err
else:
print("TODO: non-blocking execute", file=sys.stderr)
class EnvBuilder:
def CheckTestSuiteCmake(self, WorkerID):
"""
return LitTestDict: { target-name: .test-loc }
"""
llvmSrc = os.getenv("LLVM_THESIS_HOME", "Error")
if llvmSrc == "Error":
print("$LLVM_THESIS_HOME or not defined.", file=sys.stderr)
sys.exit(1)
TestSrc = llvmSrc + "/test-suite/build-worker-" + WorkerID
PrevWd = os.getcwd()
# if the cmake is not done, do it once.
if not os.path.isdir(TestSrc):
os.mkdir(TestSrc)
os.chdir(TestSrc)
'''
ex.
cmake -DCMAKE_C_COMPILER=/home/jrchang/workspace/llvm-thesis/build-release-gcc7-worker1/bin/clang -DCMAKE_CXX_COMPILER=/home/jrchang/workspace/llvm-thesis/build-release-gcc7-worker1/bin/clang++ ../
'''
cBinSrc = llvmSrc + "/build-release-gcc7-worker" + WorkerID + "/bin/clang"
cxxBinSrc = cBinSrc + "++"
cmd = "cmake -DCMAKE_C_COMPILER=" + cBinSrc + " -DCMAKE_CXX_COMPILER=" + cxxBinSrc + " ../"
ret = ExecuteCmd(WorkerID=WorkerID, Cmd=cmd, Block=True)
os.chdir(PrevWd)
if ret != 0:
print("cmake failed.", file=sys.stderr)
sys.exit(1)
# Build .test dict for verification and run
LitTestDict = {}
'''
only add the "measurable targets"
'''
MeasurableRec = os.getenv("LLVM_THESIS_Random_LLVMTestSuiteScript", "Error")
MeasurableRec = \
MeasurableRec + '/GraphGen/output/newMeasurableStdBenchmarkMeanAndSigma'
MeasurableList = []
with open(MeasurableRec, 'r') as f:
for line in f:
MeasurableList.append(line.split(';')[0].split('/')[-1].strip())
for root, dirs, files in os.walk(TestSrc):
for file in files:
if file.endswith(".test"):
name = file[:-5]
if name in MeasurableList:
path = os.path.join(root, file)
LitTestDict[name] = path
return LitTestDict
def workerMake(self, args):
"""
Input: args(tuple):
[0]:WorkerID
[1]:BuildTarget
[2]:ParallelBuild <---This arg is optional
(Default is using taskset to build on a core)
Return a int:
a number that indicate status.
0 --> build success
others --> build failed
"""
PrevWd = os.getcwd()
WorkerID = args[0]
BuildTarget = args[1]
ParallelBuild = False
if len(args) > 2:
ParallelBuild = args[2]
ret = -1
'''
build
'''
llvmSrc = os.getenv("LLVM_THESIS_HOME", "Error")
TestSrc = llvmSrc + "/test-suite/build-worker-" + WorkerID
os.chdir(TestSrc)
cmd = "make " + BuildTarget
ret, _, _ = ExecuteCmd(WorkerID=WorkerID, Cmd=cmd, Block=True, ParallelBuild=ParallelBuild)
return ret
def make(self, WorkerID, BuildTarget, ParallelBuild=False):
"""
return a number:
0 --> build success
others --> build failed
"""
isKilled, ret = LimitTimeExec(900, self.workerMake, WorkerID, BuildTarget, ParallelBuild)
if isKilled or ret != 0:
return -1
else:
return 0
def workerVerify(self, args):
"""
Input(tuple):
[0]:WorkerID
[1]:TestLoc
Return a int:
a number that indicate status.
0 --> build success
others --> build failed
"""
ret = -1
WorkerID = args[0]
TestLoc = args[1]
Lit = os.getenv("LLVM_THESIS_lit", "Error")
if Lit == "Error":
print("$LLVM_THESIS_lit not defined.", file=sys.stderr)
sys.exit(1)
cmd = Lit + " -q " + TestLoc
_, out, err = ExecuteCmd(WorkerID=WorkerID, Cmd=cmd, Block=True)
if out:
ret = -1
else:
ret = 0
return ret
def verify(self, WorkerID, TestLoc):
"""
return a number:
0 --> success and correct
others --> failed
"""
isKilled, ret = LimitTimeExec(500, self.workerVerify, WorkerID, TestLoc)
if isKilled or ret != 0:
return -1
else:
return 0
def distributePyActor(self, TestFilePath):
"""
return 0 for success
return -1 for failure.
"""
Log = LogService()
# Does this benchmark need stdin?
NeedStdin = False
with open(TestFilePath, "r") as TestFile:
for line in TestFile:
if line.startswith("RUN:"):
if line.find("<") != -1:
NeedStdin = True
break
TestFile.close()
# Rename elf and copy actor
ElfPath = TestFilePath.replace(".test", '')
NewElfPath = ElfPath + ".OriElf"
#based on "stdin" for to copy the right ones
InstrumentSrc = os.getenv("LLVM_THESIS_InstrumentHome", "Error")
if NeedStdin == True:
PyCallerLoc = InstrumentSrc + '/PyActor/WithStdin/PyCaller'
PyActorLoc = InstrumentSrc + '/PyActor/WithStdin/MimicAndFeatureExtractor.py'
else:
PyCallerLoc = InstrumentSrc + '/PyActor/WithoutStdin/PyCaller'
PyActorLoc = InstrumentSrc + '/PyActor/WithoutStdin/MimicAndFeatureExtractor.py'
try:
# Rename the real elf
shutil.move(ElfPath, NewElfPath)
# Copy the feature-extractor
shutil.copy2(PyActorLoc, ElfPath + ".py")
except Exception as e:
print("distributePyActor() errors, Reasons:\n{}".format(e))
return -1
# Copy the PyCaller
if os.path.exists(PyCallerLoc) == True:
shutil.copy2(PyCallerLoc, ElfPath)
else:
Log.err("Please \"$ make\" to get PyCaller in {}\n".format(PyCallerLoc))
return -1
return 0 #success
def run(self, WorkerID, TestLoc):
ret = self.verify(WorkerID, TestLoc)
return ret
class EnvResponseActor:
def EnvEcho(self, BuildTarget, WorkerID, LitTestDict, ParallelBuild=False):
"""
return "Success" or "Failed"
"""
testLoc = LitTestDict[BuildTarget]
retString = "Success"
'''
remove previous build and build again
'''
env = EnvBuilder()
'''
ex1. RUN: /llvm/test-suite/build-worker-1/SingleSource/Benchmarks/Dhrystone/dry
ex2. RUN: cd /home/jrchang/workspace/llvm-thesis/test-suite/build-worker-1/MultiSource/Applications/sqlite3 ; /home/jrchang/workspace/llvm-thesis/test-suite/build-worker-1/MultiSource/Applications/sqlite3/sqlite3 -init /home/jrchang/workspace/llvm-thesis/test-suite/MultiSource/Applications/sqlite3/sqlite3rc :memory: < /home/jrchang/workspace/llvm-thesis/test-suite/MultiSource/Applications/sqlite3/commands
'''
with open(testLoc, "r") as file:
fileCmd = file.readline()
file.close()
MultiCmdList = fileCmd.split(';')
if len(MultiCmdList) == 1:
# cases like ex1.
BuiltBin = fileCmd.split()[1]
else:
# cases like ex2.
BuiltBin = MultiCmdList[1].strip().split()[0]
'''
remove binary does not ensure it will be built again.
Therefore, we must use "make clean"
'''
binName = BuiltBin.split('/')[-1]
dirPath = BuiltBin[:-(len(binName) + 1)]
prevWd = os.getcwd()
'''
print("fileCmd={}".format(fileCmd))
print("BuiltBin={}".format(BuiltBin))
print("dirPath={}".format(dirPath))
print("binName={}".format(binName))
'''
os.chdir(dirPath)
os.system("make clean")
os.chdir(prevWd)
# remove feature file
FeatureFile = '/tmp/PredictionDaemon/worker-{}/features'.format(WorkerID)
if os.path.exists(FeatureFile):
os.remove(FeatureFile)
'''
build
assuming the proper cmake is already done.
'''
ret = env.make(WorkerID, BuildTarget, ParallelBuild)
if ret != 0:
print("Failed sent.")
return "Failed"
'''
verify
'''
ret = env.verify(WorkerID, testLoc)
if ret != 0:
print("Failed sent.")
return "Failed"
'''
distribute PyActor
'''
ret = env.distributePyActor(testLoc)
if ret != 0:
print("Failed sent.")
return "Failed"
'''
run and extract performance
The return value from env.run() can be ignored.
We already use env.verify() to verify it.
'''
ret = env.run(WorkerID, testLoc)
return retString
class LogService():
def __init__(self):
pass
def outNotToFile(self, msg):
print(msg, end="", file=sys.stdout)
def FileWriter(self, path, msg):
file = open(path, "a")
fcntl.flock(file, fcntl.LOCK_EX)
file.write(msg)
fcntl.flock(file, fcntl.LOCK_UN)
file.close()
def out(self, msg):
self.outNotToFile(msg)
def err(self, msg):
self.out(msg)
#self.FileWriter("/tmp/PredictionDaemon.err", msg)
class ConnectInfoService():
def getConnectDict(self, path):
'''
return Dict[WorkerID] = ["RemoteEnv-ip", "RemoteEnv-port"]
'''
Dict = {}
with open(path, "r") as file:
# skip the header line
file.readline()
for line in file:
info = line.split(",")
strippedInfo = []
for subInfo in info:
strippedInfo.append(subInfo.strip())
Dict[strippedInfo[0]] = [strippedInfo[1], strippedInfo[2]]
file.close()
return Dict
| TibaChang/ThesisTools | PassInstrument/training/Lib.py | Lib.py | py | 15,026 | python | en | code | 0 | github-code | 36 |
23603029370 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 5 17:05:36 2018
@author: s.jayanthi
"""
import cv2, numpy as np
img = cv2.imread(params.color_transfer_target_label1)
dst = [];
rows,cols = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D((cols/2,rows/2),45,1)
for channel in range(img.shape[2]):
d_img = img[:,:,channel]
dst.append(cv2.warpAffine(d_img,M,(cols,rows)))
dst = np.stack(dst, axis=-1)
#cv2.imshow('.',img);cv2.imshow('..',dst);
img = cv2.imread(params.here)
cv2.imshow('I', img)
#img_scaled = cv2.resize(img,None,fx=1.5, fy=1.5, interpolation = cv2.INTER_LINEAR)
#cv2.imshow('Scaling - Linear Interpolation', img_scaled)
img_scaled = cv2.resize(img,None,fx=1.2, fy=1.2, interpolation = cv2.INTER_CUBIC)
cv2.imshow('Scaling - Cubic Interpolation', img_scaled)
#img_scaled = cv2.resize(img,(450, 400), interpolation = cv2.INTER_AREA)
#cv2.imshow('Scaling - Skewed Size', img_scaled)
img = cv2.imread(params.here_mask)
cv2.imshow('M', img)
#img_scaled = cv2.resize(img,None,fx=1.5, fy=1.5, interpolation = cv2.INTER_LINEAR)
#cv2.imshow('M Scaling - Linear Interpolation', img_scaled)
img_scaled = cv2.resize(img,None,fx=1.2, fy=1.2, interpolation = cv2.INTER_CUBIC)
cv2.imshow('M Scaling - Cubic Interpolation', img_scaled)
img_scaled = cv2.resize(img,(450, 400), interpolation = cv2.INTER_AREA)
cv2.imshow('M Scaling - Skewed Size', img_scaled) | murali1996/semantic_segmentation_of_nuclei_images | old_versions/dummy.py | dummy.py | py | 1,372 | python | en | code | 0 | github-code | 36 |
7963630141 | from tkinter import *
from tkinter.font import Font
import numpy as np
import itertools as itr
import os
kryteria = ['rowery', 'telewizory', 'książki', 'telefony', 'drukarki']
#kryteria = ['rowery', 'telewizory', 'telefony']
kryteria_d = { i : "%.2f" % (1/len(kryteria)) for i in kryteria}
kryteriaKomb =list(itr.combinations(range(len(kryteria)),2))
kryteriaKomb_d = { i : 1 for i in kryteriaKomb }
label_list = []
label_list2 = []
button_list = []
spinbox_list = []
scale_list = []
skala = ['1/9', '1/8', '1/7', '1/6', '1/5', '1/4', '1/3', '1/2', '1', '2', '3', '4', '5', '6', '7', '8', '9']
##################### 'ramki' w okienku
root_ahp = Tk()
root_ahp.wm_title("AHP tkinter")
frame_g = Frame(root_ahp)
frame_g.grid(row = 0, column = 0, sticky = 'n', columnspan = 2, padx = 10, pady = 10)
frame_l = Frame(root_ahp)
frame_l.grid(row = 1, column = 0, sticky = 'n')
frame_p = Frame(root_ahp)
frame_p.grid(row = 1, column = 1, sticky = 'n')
frame_d = Frame(root_ahp)
frame_d.grid(row = 2, column = 0, columnspan = 2 ,sticky = 'n', padx = 10, pady = 10)
##################### funkcje
def aktd():
# zapisz skale z przycisku do słownika
for i in range(len(kryteriaKomb_d)):
kryteriaKomb_d[kryteriaKomb[i]] = label_list[(i*4)+1].cget('text')
def wagi():
# AHP
RandomIndex = [0.01, 0.01, 0.58, 0.90, 1.12, 1.24, 1.32, 1.41, 1.45, 1.49]
cri = 0.20
n = len(kryteria)
k_matrix = np.ones((n, n))
for i in range(n):
for j in range(n):
if i == j :
k_matrix[i][j] = 1
if i < j:
k_matrix[i][j] = eval(str(kryteriaKomb_d[(i,j)]))
k_matrix[j][i] = k_matrix[i][j]**(-1)
weights = np.true_divide(k_matrix, np.sum(k_matrix, axis=0))
weights = np.sum(weights, axis=1)
weights = weights / weights.shape[0]
cons_vector = np.multiply(k_matrix, weights)
weightedsum = np.sum(cons_vector, axis=1)
ratio = weightedsum / weights
lambdamax = np.sum(ratio, axis = 0) / n
if n - 1 == 0:
ConsistencyIndex = (lambdamax - n) / 1
else:
ConsistencyIndex = (lambdamax - n) / (n - 1)
ConsistencyRatio = ConsistencyIndex / RandomIndex[n-1]
if ConsistencyRatio <= cri:
listbox2.delete(0,END)
listbox2.insert(END,'macierz jest spójna:')
listbox2.insert(END, '{0:.3g}'.format(ConsistencyRatio)+ ' < '+ str(cri))
listbox2.config(bg = '#b2ffa8')
b_ok.config(relief=RAISED)
b_ok.config(state=NORMAL)
else:
listbox2.delete(0,END)
listbox2.insert(END, 'macierz NIE jest spójna: ')
listbox2.insert(END, '{0:.3g}'.format(ConsistencyRatio)+ ' > '+ str(cri))
listbox2.config(bg = '#ff7a7a')
b_ok.config(relief=SUNKEN)
b_ok.config(state=DISABLED)
for i in range(len(kryteria)):
kryteria_d[kryteria[i]] = "%.3f" % weights[i]
def wyswietl_wynik():
# wyswietl słownik w boxie
listbox.delete(0,END)
for i in kryteria_d:
listbox.insert(END, (i, kryteria_d[i]))
def sval(r):
for i in range(len(scale_list)):
label_list[(i*4)+1].config(text = skala[-int(scale_list[i].get())-1])
label_list[(i*4)+2].config(text = skala[int(scale_list[i].get())])
b_ok.config(relief = SUNKEN)
b_ok.config(state = DISABLED)
def bt():
# funkcja zbiorcza dla przycisku
aktd()
wagi()
wyswietl_wynik()
def nLabel(r, c, tx):
# nowy label widget
label = Label(frame_p, text = tx)
label_list.append(label)
label.grid(row=r, column=c, pady=1, padx = 4)
def nLabel2(r, c, tx):
# nowy label widget 2
label = Label(frame_p, text = tx, width = 3, relief = GROOVE)
label_list.append(label)
label.grid(row=r, column=c, pady=1, padx = 4)
def nSpinbox(r, c):
# nowy spinbox widget
spinbox = Spinbox(frame_p, values=skala, width = 3, font=Font(family='default', size=12),
command = lambda: bt(spinbox, spinbox.grid_info()['row']))
spinbox_list.append(spinbox)
spinbox.grid(row=r, column=c, pady=1, padx=4)
spinbox.delete(0,"end")
spinbox.insert(0,1)
def nScale(r, c):
# nowy scale widget
scale = Scale(frame_p, from_=0, to= 16, orient= HORIZONTAL, showvalue = 0, command = sval, length = 150)
scale_list.append(scale)
scale.set(8)
scale.grid(row =r, column= c, pady = 1, padx = 4)
def lkat(r, x):
# jeden rząd do porównania
nLabel(r,0, kryteria[int(x[0])])
nLabel2(r,1, '--')
nScale(r,2)
nLabel2(r,3, '--')
nLabel(r,4, kryteria[int(x[1])])
def reset():
# resetuje wagi
for i in range(len(kryteriaKomb_d)):
scale_list[i].set(8)
kryteriaKomb_d[kryteriaKomb[i]] = 1
wagi()
wyswietl_wynik()
b_ok.config(relief=RAISED)
b_ok.config(state=NORMAL)
listbox = Listbox(frame_l, width=21, height=len(kryteria)+1)
listbox.grid(columnspan = 2, row=0, column=0, pady=4, padx = 4)
listbox2 = Listbox(frame_l, width=21, height=2)
listbox2.grid(columnspan = 2, row=1, column=0, pady=4, padx = 4)
for i in kryteria_d:
listbox.insert(END, (i, kryteria_d[i]))
for i in range(len(kryteriaKomb)):
lkat(i, kryteriaKomb[i])
b_ok = Button(frame_l, text = 'ok', command=root_ahp.destroy)
b_ok.grid(row = 4, column = 0, sticky = 'nwes', columnspan = 2, pady =(0,4), padx = 4)
b_m = Button(frame_l, text = 'oblicz wagi', command= bt)
b_m.grid(row = 3, column = 0, sticky = 'nwes', columnspan = 2, padx = 4)
b_r = Button(frame_l, text = 'reset', command= reset)
b_r.grid(row = 5, column = 0, sticky = 'nwes', columnspan = 2, pady = (8,0), padx = 4)
root_ahp.mainloop()
| kwiecien-rafal/optymalizator-prezentow | AHP tkinter.py | AHP tkinter.py | py | 5,843 | python | en | code | 0 | github-code | 36 |
38516381300 | """
Demo showing GP predictions in 1d and optimization of the hyperparameters.
"""
import numpy as np
from ezplot import figure, show
from reggie import make_gp
def main():
"""Run the demo."""
# generate random data from a gp prior
rng = np.random.RandomState(0)
gp = make_gp(0.1, 1.0, 0.1, kernel='matern1')
X = rng.uniform(-2, 2, size=(20, 1))
Y = gp.sample(X, latent=False, rng=rng)
# create a new GP and optimize its hyperparameters
gp = make_gp(1, 1, 1, kernel='se')
gp.add_data(X, Y)
gp.optimize()
# get the posterior moments
x = np.linspace(X.min(), X.max(), 500)
mu, s2 = gp.predict(x[:, None])
# plot the posterior
ax = figure().gca()
ax.plot_banded(x, mu, 2*np.sqrt(s2), label='posterior mean')
ax.scatter(X.ravel(), Y, label='observed data')
ax.legend(loc=0)
ax.set_title('Basic GP')
ax.set_xlabel('inputs, X')
ax.set_ylabel('outputs, Y')
# draw/show it
ax.figure.canvas.draw()
show()
if __name__ == '__main__':
main()
| mwhoffman/reggie | reggie/demos/basic.py | basic.py | py | 1,040 | python | en | code | 6 | github-code | 36 |
12231215672 | '''
INITIALIZE empty list grocery_inventory
SET products = file products.txt
'''
grocery_inventory = []
products = "products.txt"
art = "grocery_art.txt"
'''
Fucntion LOAD ART loads the ascii art file.
It removes the line break at the end of each line and prints each line to show the art.
'''
def load_art(filename):
file = open(filename, "r")
lines = file.readlines()
for line in lines:
art_line = line.replace("\n", "")
print (art_line)
'''
FUNCTION load_data(filename) imports a file and reads the lines.
Each line is then spliced at the comma to create a list.
Each list represents one item and every index is sorted into a dictionary for the item.
Finally, each item dictionary is appended into the master inventory list.
'''
def load_data(filename):
file = open(filename, "r")
lines = file.readlines()
for line in lines:
#print ("========================")
#print(line)
line = line.replace("\n", "")
value = line.split(',') #split each line at the comma into a list
#print (value)
#print ("========================")
product_name = value[1] #assign index 1 to the product name
name = product_name #assign name to product name (mutability)
#print(product_name)
cost = value[2] #assign index 2 to the cost
qty = value[3] #assign index 3 to the qty
#initalize an empty dictionary
product_name = {}
#append key-value pairs into the dictionary
product_name ["name"]=name
product_name["cost"]=cost
product_name["qty"]=qty
#print (product_name)
#append the dictionary to the grocery_inventory list
grocery_inventory.append(product_name)
#print(grocery_inventory)
file.close()
return grocery_inventory
'''
FUNCTION display_product(product_name):
DISPLAY product_name
DISPLAY brand
DISPLAY cost
DISPLAY qty
'''
def display_product(product_index):
#print ("DISPLAY FUNCTION")
name = product_index['name']
cost = product_index['cost']
qty = product_index['qty']
total = round((float(cost) * int(qty)), 2)
print ("")
print (name)
print ("")
print (f"This item costs ${cost}")
print (f"We have {qty} in stock.")
print ("")
print (f"You can buy our entire inventory of {name} for a bargain price of ${total}!")
print ("")
'''
function SEARCH allows the user to search for a product (case insensitive),
get a list of the inventory, and returns a message if the product is not in stock.
'''
def search():
user_input = input("Search for a product: ")
valid_search = False
#if they input inventory, show a list of all items in stock
if user_input.lower() == "inventory":
for d in grocery_inventory:
print (d['name'])
valid_search = True
print("")
search()
#otherwise, look to see if their input matches any groceries
for d in grocery_inventory:
if user_input.title() in d['name']:
#print (d['name'])
display_product(d)
valid_search = True
continue
#if there are no items that match, print the following
if valid_search == False:
print ("")
print("It looks like we don't have any items that match your search.")
print("Try again.")
print("")
#call search again function
search_again()
'''
function SEARCH AGAIN allows the user to decide if they want to search for another product or not
'''
def search_again():
print("Would you like to search for another product?")
user_input = input("yes/no --> ")
if user_input.lower() == "yes":
print("")
search()
elif user_input.lower() != "no":
print("")
print("Input not recognized.")
search_again()
'''
Open the grocery store!
'''
load_data(products)
#print(grocery_inventory)
print ("")
load_art(art)
print ("")
print ("")
print ("Welcome to Internet Produce!")
print ("Your not so specialist grocer.")
print ("")
print ("Get started by searching for a product below or type")
print ("'inventory' at any time to see what's available")
print ("")
search()
| brooklyndippo/final-exam-produce-python | script.py | script.py | py | 4,247 | python | en | code | 0 | github-code | 36 |
41076521456 |
from PyQt5.QtWidgets import QDialog, QDialogButtonBox, QVBoxLayout, QLabel
class AlertDialogClass(QDialog):
"""
This class load the help dialog pyqt component
"""
def __init__(self, title, message, parent=None):
"""
Confirm dialog class constructor
:param parent:
"""
QDialog.__init__(self, parent)
self.setWindowTitle(title)
print(title)
print(message)
q_btn = QDialogButtonBox.Ok # | QDialogButtonBox.Cancel
self.buttonBox = QDialogButtonBox(q_btn)
self.buttonBox.accepted.connect(self.accept)
self.buttonBox.rejected.connect(self.reject)
self.layout = QVBoxLayout()
message = QLabel(message)
self.layout.addWidget(message)
self.layout.addWidget(self.buttonBox)
self.setLayout(self.layout)
| samuelterra22/Analysis-of-antenna-coverage | src/main/python/dialogs/alert_dialog_class.py | alert_dialog_class.py | py | 855 | python | en | code | 5 | github-code | 36 |
42354884711 | from openpyxl import Workbook, load_workbook
target_workbook = Workbook()
sheet=target_workbook.active
sheet["A1"]="Номер заказа у партнера"
sheet["B1"]="Номер сертификата" # номер сертификата
sheet["C1"]="Продукт в системе партнера" #назначение платежа
sheet["D1"]="Код продукта в BD"
sheet["E1"]="Дата начала действия" #дата оплаты
sheet["F1"]="Дата окончания действия" # дата окончания сертификата
sheet["G1"]="Стоимость" #сумма
sheet["H1"]="ФИО плательщика" #ФИО
sheet["I1"]="Дата рождения плательщика"
sheet["J1"]="Пол плательщика"
sheet["K1"]="Номер телефона плательщика" #Номер телефона
sheet["L1"]="Адрес электронной почты плательщика"
sheet["M1"]="Серия паспорта плательщика"
sheet["N1"]="Номер паспорта плательщика"
sheet["O1"]="Кем выдан паспорт плательщика"
sheet["P1"]="Дата выдачи паспорта плательщика"
sheet["Q1"]="Адрес плательщика"
sheet["R1"]="Гражданство плательщика"
sheet["S1"]="Город"
sheet["T1"]="Банк"
sheet["U1"]="Наименование ДО" #Офис
morphing_workbook1=load_workbook(filename="sample_for_test.xlsx")
target_sheet=morphing_workbook1.active
for value in target_sheet.iter_cols(min_row=4,min_col=2,values_only=True):
if value=="Назначение платежа":
length=morphing_workbook1.max_row()
for row in range(0,length):
morphing_workbook1.append([row])
#for row_value in target_workbook.iter_rows(min_row=2,min_col=3,max_col=3,values_only=True):
target_workbook.save(filename="target_table.xlsx") | Mi6k4/programming_stuff | coding_stuff/python/opnepyexl/morphing.py | morphing.py | py | 1,960 | python | ru | code | 0 | github-code | 36 |
25641131982 | import shared
from shared import bcolours
from shared import baseSolution
class Solution(baseSolution):
RED_SQUARE = '🟥'
GREEN_SQUARE = '🟩'
def is_within_range(self, subject, range):
"""determines if a range (subject) is within another range
Args:
subject (array): an array to be compared to the range
range (array): an array used for the comparison
Returns:
boolean: whether or not the subject is completely within the range
"""
if self.debug:
print(f'subject {subject[0]} <= {range[0]}')
print(f'subject {subject[1]} >= {range[1]}')
if int(subject[0]) <= int(range[0]) and int(subject[1]) >= int(range[1]):
return True
def get_range(self, pair):
range_1 = pair[0].split('-')
range_2 = pair[1].split('-')
return range_1, range_2
def visualiseRange(self, range):
i = 1
while i <= 100:
if i < int(range[0]) or i > int(range[1]):
print(self.RED_SQUARE, end='')
else:
print(self.GREEN_SQUARE, end='')
i += 1
print('')
def run(self):
"""all the elven magic happens here
"""
pairs = 0
in_range = 0
pair_data = shared.fileToArray('day_4/'+self.inputFile)
for pair in pair_data:
pairs += 1
pair = pair.split(',')
if self.debug:
print(f'pair {pairs}: {pair}')
range_1, range_2 = self.get_range(pair)
if self.visualise:
print(f'{range_1} - {range_2}')
self.visualiseRange(range_1)
self.visualiseRange(range_2)
if self.is_within_range(range_1, range_2) or self.is_within_range(range_2, range_1):
in_range += 1
if self.debug:
print(f'{bcolours.OKBLUE} {pairs} {bcolours.ENDC}')
if self.debug:
print(f'-------')
print(f'Total pairs: {pairs}')
print(f'Total in range: {in_range}')
| hellboy1975/aoc2022 | day_4/part_1.py | part_1.py | py | 2,182 | python | en | code | 0 | github-code | 36 |
20961161383 | # Author:HU YUE
import pickle
import os
import sys
import logging
import random
BASE_DIR=os.path.dirname(os.path.dirname( os.path.abspath(__file__) ))
sys.path.append(BASE_DIR)
def loadd(f_all,name):
with open("%s.txt"%name, 'wb')as f:
pickle.dump(f_all, f)
# def nadd(wood):
# with open("%s.txt"%name, "rb")as t:
# # stuffs=stuffs
# t_all = pickle.load(t)
# print(t_all)
# # print(type(t_all))
# t_all.append(wood)
# # print(t_all)
# loadd(t_all)
logger = logging.getLogger('TEST_LOG')
def coller1(name): #管理员模块可修改用户信息!
while True:
print("____管理员系统1___\n"
"0.查询用户密码\n"
"1.创建用户和密码\n"
"2.修改用户信息\n"
"3.删除用户\n"
"4.冻结用户\n"
"5.退出")
number=input("输入数字进行操作:")
if number=="0":
name=input("输入用户名:")
if not os.path.exists("%s.txt"%name):
print("用户不存在!")
else:
with open("%s.txt"%name,"rb")as f:
f_all=pickle.load(f)
print(f_all)
logger.info('您查询了%s的用户信息。'%name)
if number=="1":
name=input("创建新用户:")
if os.path.exists("%s.txt"%name):
print("用户已存在请重新输出入")
else:
open("%s.txt"%name, 'w').close()
password=input("新用户密码:")
new_user={"card_number":"",
"user":name,
"password":password,
"Credit_line":10000,
"balance":0,
"repayment":0,
}
for i in range(6): #随机生成信用卡号!
each = random.randrange(0, 9)
tmp = chr(random.randint(65, 90))
new_user["card_number"]+= str(each) + str(tmp)
print("用户账号已创建!")
print(new_user)
with open("%s.txt"%name,"wb")as f:
pickle.dump(new_user,f)
logger.info('您创建了%s新用户!。' % name)
if number=="2":
name=input("输入需要修改的用户名:")
if os.path.exists("%s.txt" % name):
n=0
while n<3:
print("____修改用户信息___\n"
"0.修改用户password\n"
"1.修改用户Credit_line\n"
"2.修改用户balance\n"
"3.修改用户repayment\n"
"4.返回上层菜单")
with open("%s.txt" % name, "rb")as f: # 输出用户当前信息
f_all = pickle.load(f)
print(f_all)
number1 = input("选择修改:")
if number1 == "0":
new = input("新密码:")
with open("%s.txt" % name, "rb")as f:
f_all = pickle.load(f)
f_all["password"] = new
loadd(f_all, name)
logger.info('您对%s的密码进行了修改,新密码为%s!。' % (name,new))
if number1 == "1":
new = input("新额度:")
with open("%s.txt" % name, "rb")as f:
f_all = pickle.load(f)
f_all["Credit_line"] = new
loadd(f_all, name)
logger.info('您对%s的额度进行了修改,新额度为%s!。' % (name, new))
if number1 == "2":
new = input("新余额:")
with open("%s.txt" % name, "rb")as f:
f_all = pickle.load(f)
f_all["balance"] = new
loadd(f_all, name)
logger.info('您对%s的余额进行了修改,新余额为%s!。' % (name, new))
if number1 == "3":
new = input("新还款金度:")
with open("%s.txt" % name, "rb")as f:
f_all = pickle.load(f)
f_all["repayment"] = new
loadd(f_all, name)
logger.info('您对%s的还款金度进行了修改,新还款金额为%s!。' % (name, new))
if number1 == "4":
n=3
else:
print("要修改的用户不存在!请确认后输入")
if number=="3":
name=input("输入用户名:")
if os.path.exists("%s.txt"%name):
os.remove("%s.txt"%name)
logger.info('您删除了%s的用户信息!。' % name)
else:
print("要删除的用用户不存在!")
if number=="4":
if not os.path.exists("forzen_user.txt"):
open("forzen_user.txt","w").close()
forzen=[]
with open("forzen_user.txt","wb")as f:
pickle.dump(forzen,f)
else:
with open("forzen_user.txt", "rb")as f:
f_all=pickle.load(f)
print(f_all) #测试代码
dname=input("需冻结账户:")
if dname in f_all:
print("用户已冻结!")
continue
else:
with open("forzen_user.txt", "wb")as t:
f_all.append(dname)
pickle.dump(f_all,t)
logger.info('您冻结了%s用户!。' % name)
if number=="5":
break
# os.path.exists("user_ma.txt")
#
# print(os.path.exists("user_ma.txt"))
# coller1("hy")
| 001fly/-Module-two-operation | Atm/core/account1.py | account1.py | py | 6,440 | python | en | code | 0 | github-code | 36 |
73360149224 | # django imports
from django.contrib.auth.decorators import permission_required
from django.core.urlresolvers import reverse
from django.forms import ModelForm
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
# lfs imports
import lfs.core.utils
from lfs.catalog.models import StaticBlock
class StaticBlockForm(ModelForm):
"""Form to add and edit a static block.
"""
class Meta:
model = StaticBlock
@permission_required("manage_shop", login_url="/login/")
def manage_static_blocks(request):
"""Dispatches to the first static block or to the add static block form.
"""
try:
sb = StaticBlock.objects.all()[0]
url = reverse("lfs_manage_static_block", kwargs={"id": sb.id})
except IndexError:
url = reverse("lfs_add_static_block")
return HttpResponseRedirect(url)
@permission_required("manage_shop", login_url="/login/")
def manage_static_block(request, id, template_name="manage/static_block/static_block.html"):
"""Displays the main form to manage static blocks.
"""
sb = get_object_or_404(StaticBlock, pk=id)
if request.method == "POST":
form = StaticBlockForm(instance=sb, data=request.POST)
if form.is_valid():
form.save()
return lfs.core.utils.set_message_cookie(
url = reverse("lfs_manage_static_block", kwargs={"id" : sb.id}),
msg = _(u"Static block has been saved."),
)
else:
form = StaticBlockForm(instance=sb)
return render_to_response(template_name, RequestContext(request, {
"static_block" : sb,
"static_blocks" : StaticBlock.objects.all(),
"form" : form,
"current_id" : int(id),
}))
@permission_required("manage_shop", login_url="/login/")
def add_static_block(request, template_name="manage/static_block/add_static_block.html"):
"""Provides a form to add a new static block.
"""
if request.method == "POST":
form = StaticBlockForm(data=request.POST)
if form.is_valid():
new_sb = form.save()
return lfs.core.utils.set_message_cookie(
url = reverse("lfs_manage_static_block", kwargs={"id" : new_sb.id}),
msg = _(u"Static block has been added."),
)
else:
form = StaticBlockForm()
return render_to_response(template_name, RequestContext(request, {
"form" : form,
"static_blocks" : StaticBlock.objects.all(),
}))
@permission_required("manage_shop", login_url="/login/")
def preview_static_block(request, id, template_name="manage/static_block/preview.html"):
"""Displays a preview of an static block
"""
sb = get_object_or_404(StaticBlock, pk=id)
return render_to_response(template_name, RequestContext(request, {
"static_block" : sb,
}))
@permission_required("manage_shop", login_url="/login/")
def delete_static_block(request, id):
"""Deletes static block with passed id.
"""
sb = get_object_or_404(StaticBlock, pk=id)
# First we delete all referencing categories. Otherwise they would be
# deleted
for category in sb.categories.all():
category.static_block = None
category.save()
sb.delete()
return lfs.core.utils.set_message_cookie(
url = reverse("lfs_manage_static_blocks"),
msg = _(u"Static block has been deleted."),
) | django-lfs/lfs | manage/views/static_blocks.py | static_blocks.py | py | 3,646 | python | en | code | 23 | github-code | 36 |
7386840342 | # Definition for singly-linked list.
# class ListNode:
# def __init__(self, x):
# self.val = x
# self.next = None
class Solution:
def mergeTwoLists(self, l1: 'ListNode', l2: 'ListNode') -> 'ListNode':
merged = ListNode(0)
walker = merged
while l1 or l2:
if not l1:
walker.next = l2
return merged.next
if not l2:
walker.next = l1
return merged.next
if l1.val <= l2.val:
walker.next = ListNode(l1.val)
l1 = l1.next
walker = walker.next
else:
walker.next = ListNode(l2.val)
l2 = l2.next
walker = walker.next
return merged.next
| dmauro22/Examples | Python/MergeTwoSortedLists/MergeTwoSortedLists.py | MergeTwoSortedLists.py | py | 877 | python | en | code | 1 | github-code | 36 |
35001350068 | import os
import struct
import numpy as np
# Based on https://gist.github.com/akesling/5358964 which is in return
# loosely inspired by http://abel.ee.ucla.edu/cvxopt/_downloads/mnist.py
# which is GPL licensed.
def read(dataset = "training", path = "."):
# Python function for importing the MNIST data set. It returns an iterator
# of 2-tuplesq2f s with the first element being the label and the second element
# being a numpy.uint8 2D array of pixel data for the given image.
if dataset is "training":
fname_img = os.path.join(path, 'train-images.idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')
elif dataset is "testing":
fname_img = os.path.join(path, 't10k-images.idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')
else:
raise ValueError("dataset must be 'testing' or 'training'")
# Load everything in some numpy arrays
with open(fname_lbl, 'rb') as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
lbl = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)
img = np.divide(img, 255)
l = list()
for i in range(len(lbl)):
img_vec = img[i].flatten()
lbl_vec = np.zeros(10)
lbl_vec[lbl[i]] = 1
l.append([list(img_vec), list(lbl_vec)])
return l
def show(image):
# Render a given numpy.uint8 2D array of pixel data.
image = np.array(image).reshape(28, 28)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
imgplot = ax.imshow(image, cmap='Greys')
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
plt.show() | CaptainProton42/MNISTFromScratch | modules/mnist.py | mnist.py | py | 1,898 | python | en | code | 1 | github-code | 36 |
17837818682 | # -*- coding: utf-8 -*-
# E
N = int(input())
A = list(map(int, input().split()))
d = []
for idx, a in enumerate(A):
d.append({'idx':idx, 'val':a})
d = sorted(d, key=lambda x:x['val'], reverse=True)
dp = [[-float('inf')]*(N+1) for _ in range(N+1)]
dp[0][0] = 0
# dp[0][1] = d[0]['val'] * (N - d[0]['idx'])
# dp[1][0] = d[0]['val'] * (d[0]['idx'] - 1)
ans = -float('inf')
# x + y = 1からNまで
for xy in range(1, N+1):
# x = 0 から x+yまで
for x in range(xy+1):
y = xy - x
if y > 0:
dp[x][y] = max(dp[x][y], dp[x][y - 1] + d[xy - 1]['val'] * (N - y + 1 - d[xy - 1]['idx']))
if x > 0:
dp[x][y] = max(dp[x][y], dp[x - 1][y] + d[xy - 1]['val'] * (d[xy -1]['idx'] - x))
if xy == N:
ans = max(ans, dp[x][y])
print(dp)
print(ans)
| hsuetsugu/atc | ABC163/E.py | E.py | py | 818 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.