seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
20131502451 | #!/usr/bin/env python3
"""Module containing the AverageStiffness class and the command line interface."""
import shutil
import argparse
from pathlib import Path
import matplotlib.pyplot as plt
import pandas as pd
import numpy as np
from biobb_dna.utils import constants
from biobb_dna.utils.loader import read_series
from biobb_common.generic.biobb_object import BiobbObject
from biobb_common.tools.file_utils import launchlogger
from biobb_common.tools import file_utils as fu
from biobb_common.configuration import settings
class AverageStiffness(BiobbObject):
"""
| biobb_dna AverageStiffness
| Calculate average stiffness constants for each base pair of a trajectory's series.
Args:
input_ser_path (str): Path to .ser file for helical parameter. File is expected to be a table, with the first column being an index and the rest the helical parameter values for each base/basepair. File type: input. `Sample file <https://raw.githubusercontent.com/bioexcel/biobb_dna/master/biobb_dna/test/data/stiffness/canal_output_roll.ser>`_. Accepted formats: ser (edam:format_2330).
output_csv_path (str): Path to .csv file where output is saved. File type: output. `Sample file <https://raw.githubusercontent.com/bioexcel/biobb_dna/master/biobb_dna/test/reference/stiffness/stiffavg_roll.csv>`_. Accepted formats: csv (edam:format_3752).
output_jpg_path (str): Path to .jpg file where output is saved. File type: output. `Sample file <https://raw.githubusercontent.com/bioexcel/biobb_dna/master/biobb_dna/test/reference/stiffness/stiffavg_roll.jpg>`_. Accepted formats: jpg (edam:format_3579).
properties (dict):
* **KT** (*float*) - (0.592186827) Value of Boltzmann temperature factor.
* **sequence** (*str*) - (None) Nucleic acid sequence corresponding to the input .ser file. Length of sequence is expected to be the same as the total number of columns in the .ser file, minus the index column (even if later on a subset of columns is selected with the *usecols* option).
* **helpar_name** (*str*) - (None) helical parameter name.
* **seqpos** (*list*) - (None) list of sequence positions (columns indices starting by 0) to analyze. If not specified it will analyse the complete sequence.
* **remove_tmp** (*bool*) - (True) [WF property] Remove temporal files.
* **restart** (*bool*) - (False) [WF property] Do not execute if output files exist.
Examples:
This is a use example of how to use the building block from Python::
from biobb_dna.stiffness.average_stiffness import average_stiffness
prop = {
'helpar_name': 'twist',
'sequence': 'GCAT',
}
average_stiffness(
input_ser_path='/path/to/twist.ser',
output_csv_path='/path/to/table/output.csv',
output_jpg_path='/path/to/table/output.jpg',
properties=prop)
Info:
* wrapped_software:
* name: In house
* license: Apache-2.0
* ontology:
* name: EDAM
* schema: http://edamontology.org/EDAM.owl
"""
def __init__(self, input_ser_path, output_csv_path, output_jpg_path,
properties=None, **kwargs) -> None:
properties = properties or {}
# Call parent class constructor
super().__init__(properties)
self.locals_var_dict = locals().copy()
# Input/Output files
self.io_dict = {
'in': {
'input_ser_path': input_ser_path,
},
'out': {
'output_csv_path': output_csv_path,
'output_jpg_path': output_jpg_path
}
}
self.properties = properties
self.sequence = properties.get("sequence")
self.KT = properties.get(
"KT", 0.592186827)
self.seqpos = properties.get("seqpos", None)
self.helpar_name = properties.get("helpar_name", None)
# Check the properties
self.check_properties(properties)
self.check_arguments()
@launchlogger
def launch(self) -> int:
"""Execute the :class:`AverageStiffness <stiffness.average_stiffness.AverageStiffness>` object."""
# Setup Biobb
if self.check_restart():
return 0
self.stage_files()
# check sequence
if self.sequence is None or len(self.sequence) < 2:
raise ValueError("sequence is null or too short!")
# get helical parameter from filename if not specified
if self.helpar_name is None:
for hp in constants.helical_parameters:
if hp.lower() in Path(
self.io_dict['in']['input_ser_path']).name.lower():
self.helpar_name = hp
if self.helpar_name is None:
raise ValueError(
"Helical parameter name can't be inferred from file, "
"so it must be specified!")
else:
if self.helpar_name not in constants.helical_parameters:
raise ValueError(
"Helical parameter name is invalid! "
f"Options: {constants.helical_parameters}")
# get base length and unit from helical parameter name
if self.helpar_name.lower() in ["roll", "tilt", "twist"]:
self.hp_unit = "kcal/(mol*degree²)"
scale = 1
else:
self.hp_unit = "kcal/(mol*Ų)"
scale = 10.6
# check seqpos
if self.seqpos is not None:
if (max(self.seqpos) > len(self.sequence) - 2) or (min(self.seqpos) < 1):
raise ValueError(
f"seqpos values must be between 1 and {len(self.sequence) - 2}")
if not (isinstance(self.seqpos, list) and len(self.seqpos) > 1):
raise ValueError(
"seqpos must be a list of at least two integers")
# Creating temporary folder
self.tmp_folder = fu.create_unique_dir(prefix="avgstiffness_")
fu.log('Creating %s temporary folder' % self.tmp_folder, self.out_log)
# Copy input_file_path1 to temporary folder
shutil.copy(self.io_dict['in']['input_ser_path'], self.tmp_folder)
# read input .ser file
ser_data = read_series(
self.io_dict['in']['input_ser_path'],
usecols=self.seqpos)
if self.seqpos is None:
ser_data = ser_data[ser_data.columns[1:-1]]
# discard first and last base(pairs) from sequence
sequence = self.sequence[1:]
xlabels = [
f"{sequence[i:i+2]}"
for i in range(len(ser_data.columns))]
else:
sequence = self.sequence
xlabels = [
f"{sequence[i:i+2]}"
for i in self.seqpos]
# calculate average stiffness
cov = ser_data.cov()
stiff = np.linalg.inv(cov) * self.KT
avg_stiffness = np.diag(stiff) * scale
# save plot
fig, axs = plt.subplots(1, 1, dpi=300, tight_layout=True)
axs.plot(
range(len(xlabels)),
avg_stiffness,
"-o")
axs.set_xticks(range(len(xlabels)))
axs.set_xticklabels(xlabels)
axs.set_xlabel("Sequence Base Pair")
axs.set_ylabel(f"{self.helpar_name.capitalize()} ({self.hp_unit})")
axs.set_title(
"Base Pair Helical Parameter Stiffness: "
f"{self.helpar_name.capitalize()}")
fig.savefig(
self.io_dict['out']['output_jpg_path'],
format="jpg")
# save table
dataset = pd.DataFrame(
data=avg_stiffness,
index=xlabels,
columns=[f"{self.helpar_name}_stiffness"])
dataset.to_csv(self.io_dict['out']['output_csv_path'])
plt.close()
# Remove temporary file(s)
self.tmp_files.extend([
self.stage_io_dict.get("unique_dir"),
self.tmp_folder
])
self.remove_tmp_files()
self.check_arguments(output_files_created=True, raise_exception=False)
return 0
def average_stiffness(
input_ser_path: str, output_csv_path: str, output_jpg_path: str,
properties: dict = None, **kwargs) -> int:
"""Create :class:`AverageStiffness <stiffness.average_stiffness.AverageStiffness>` class and
execute the :meth:`launch() <stiffness.average_stiffness.AverageStiffness.launch>` method."""
return AverageStiffness(
input_ser_path=input_ser_path,
output_csv_path=output_csv_path,
output_jpg_path=output_jpg_path,
properties=properties, **kwargs).launch()
def main():
"""Command line execution of this building block. Please check the command line documentation."""
parser = argparse.ArgumentParser(description='Calculate average stiffness constants for each base pair of a trajectory\'s series.',
formatter_class=lambda prog: argparse.RawTextHelpFormatter(prog, width=99999))
parser.add_argument('--config', required=False, help='Configuration file')
required_args = parser.add_argument_group('required arguments')
required_args.add_argument('--input_ser_path', required=True,
help='Helical parameter input ser file path. Accepted formats: ser.')
required_args.add_argument('--output_csv_path', required=True,
help='Path to output csv file. Accepted formats: csv.')
required_args.add_argument('--output_jpg_path', required=True,
help='Path to output jpg file. Accepted formats: jpg.')
args = parser.parse_args()
args.config = args.config or "{}"
properties = settings.ConfReader(config=args.config).get_prop_dic()
average_stiffness(
input_ser_path=args.input_ser_path,
output_csv_path=args.output_csv_path,
output_jpg_path=args.output_jpg_path,
properties=properties)
if __name__ == '__main__':
main()
| bioexcel/biobb_dna | biobb_dna/stiffness/average_stiffness.py | average_stiffness.py | py | 10,178 | python | en | code | 0 | github-code | 90 |
9412524873 |
import torch
from torch import autograd
from torch.autograd import Variable
import torch.nn.functional as F
import torch.nn as nn
import bouncing_balls as b
from conv_lstm import CLSTM, weights_init
import numpy as np
import cv2
import time
num_features=10
filter_size=3
batch_size=4
shape=(32,32) #H,W
inp_chans=3
nlayers=1
seq_len=10
num_balls = 2
max_step = 200000
seq_start = 5
lr = .001
keep_prob = 0.8
dtype = torch.cuda.FloatTensor
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
def generate_bouncing_ball_sample(batch_size, seq_length, shape, num_balls):
dat = np.zeros((batch_size, seq_length, shape, shape, 3), dtype=np.float32)
for i in xrange(batch_size):
dat[i, :, :, :, :] = b.bounce_vec(32, num_balls, seq_length)
return torch.from_numpy(dat).permute(0,1,4,2,3)
def train():
global hidden_state
model = CLSTM(shape, inp_chans, filter_size, num_features,nlayers)
model.apply(weights_init)
model = model.cuda()
crit = nn.MSELoss()
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
drop = nn.Dropout(keep_prob)
hidden_state = model.init_hidden(batch_size)
start = time.time()
l = list()
for step in xrange(max_step):
dat = generate_bouncing_ball_sample(batch_size, seq_len, shape[0], num_balls)
input = Variable(dat.cuda(), requires_grad=True)
input = drop(input)
target = Variable(dat.cuda(), requires_grad=False)
hidden_state = model.init_hidden(batch_size)
output = list()
for i in xrange(input.size(1)-1):
if i < seq_start:
out , hidden_state = model(input[:,i,:,:,:].unsqueeze(1), hidden_state)
else:
out , hidden_state = model(out, hidden_state)
output.append(out)
output = torch.cat(output,1)
loss = crit(output[:,seq_start:,:,:,:], target[:,seq_start+1:,:,:,:])
optimizer.zero_grad()
loss.backward()
optimizer.step()
l.append(loss.data[0])
if step%100 == 0 and step != 0:
print(np.array(l).mean(), time.time()-start)
l = list()
start = time.time()
if step%1000 == 0:
# make video
print(step)
print("now generating video!")
video = cv2.VideoWriter()
success = video.open("generated_conv_lstm_video_{0}.avi".format(step), fourcc, 4, (180, 180), True)
hidden_state = model.init_hidden(batch_size)
output = list()
for i in xrange(25):
if i < seq_start:
out , hidden_state = model(input[:,i,:,:,:].unsqueeze(1), hidden_state)
else:
out , hidden_state = model(out, hidden_state)
output.append(out)
ims = torch.cat(output,1).permute(0,1,4,3,2)
ims = ims[0].data.cpu().numpy()
for i in xrange(5,25):
x_1_r = np.uint8(np.maximum(ims[i,:,:,:], 0) * 255)
new_im = cv2.resize(x_1_r, (180,180))
video.write(new_im)
video.release()
train()
| rAm1n/bouncing-ball-pytorch | train.py | train.py | py | 2,709 | python | en | code | 2 | github-code | 90 |
18581971069 | N = int(input())
T = 0
t = 0
ans = []
for i in range(N-1):
C, S, F = map(int, input().split())
for j in range(len(ans)):
if ans[j] <= S:
ans[j] = S+C
elif ans[j]%F != 0:
ans[j] = ans[j]+F-(ans[j] % F)+C
else:
ans[j] += C
ans.append(S+C)
for a in ans:
print(a)
print(0)
| Aasthaengg/IBMdataset | Python_codes/p03475/s582032066.py | s582032066.py | py | 345 | python | en | code | 0 | github-code | 90 |
23079683349 | """
This is the first part of the Python project, related to webscrapping data from
Boursorama website.
"""
import requests
import pandas as pd
from tqdm import tqdm
from bs4 import BeautifulSoup
def data_collection(nb_pages):
""" This is the webscrapping function that will allow us to retrieve fund
data from Boursorama website"""
# data storage
response = []
soup = []
data = []
rows = []
# relevant info storage
fund_name = []
fund_last_close = []
fund_var = []
fund_yoy = []
fund_risk = []
for i in tqdm(range(1, nb_pages + 1), desc='Webscrapping pages from the website'):
# here we get the data for each page, and append it and its equivalent
# BeautifulSoup result and the tables found in it
response.append(requests.get("https://www.boursorama.com/bourse/opcvm/" + \
f"recherche/page-{i}?beginnerFundSearch%5Bsaving%5D=1",
headers={'User-Agent': 'My User Agent 1.0'}))
soup.append(BeautifulSoup(response[-1].text, 'lxml'))
data.append(soup[-1].find_all('table', {'class': \
'c-table c-table--generic c-table--generic c-shadow-overflow__table' + \
'-fixed-column c-shadow-overflow__table-fixed-column'})[0])
for j in data[-1]:
rows.append(j.find_all('tr'))
for k in range(1, len(rows[-1])):
# fund name
if k == 1:
fund_name.append(rows[-1][0].find_all('a')[0].text)
info = rows[-1][0].find_all('td')
fund_last_close.append(info[2].text)
fund_var.append(info[3].text)
fund_yoy.append(info[4].text)
fund_risk.append(info[5].div['data-gauge-current-step'])
name_cell = rows[-1][k].find_all('a')
fund_name.append(name_cell[0].text)
# other info on the fund
info = rows[-1][k].find_all('td')
fund_last_close.append(info[2].text)
fund_var.append(info[3].text)
fund_yoy.append(info[4].text)
fund_risk.append(info[5].div['data-gauge-current-step'])
# create a dictionary with the relevant information to then transform it into a dataframe
dict_info = {
'fund_name': fund_name,
'last_close': fund_last_close,
'fund_var': fund_var,
'fund_YoY': fund_yoy,
'risk_level': fund_risk
}
data_df = pd.DataFrame(data=dict_info).dropna().drop_duplicates().reset_index(drop=True)
data_df.fund_name = data_df.fund_name.str.capitalize()
# transformation of the number column strings into the correct type (float or integer)
for i in range(len(data_df)):
data_df.last_close.iloc[i] = float(data_df.last_close.iloc[i].split( \
'\n')[-1].replace(" ", ""))
data_df.fund_var.iloc[i] = float(data_df.fund_var.iloc[i].split('\n')[-1] \
.replace(" ", "").strip('%'))
if data_df.fund_YoY.iloc[i].split('\n')[-1]. \
replace(" ", "").strip('%') == '-': # Last funds' YoY perf data are missing
data_df.drop(data_df.tail(len(data_df) - i).index, inplace=True)
break
else:
data_df.fund_YoY.iloc[i] = float(data_df.fund_YoY.iloc[i].split('\n')[-1]. \
replace(" ", "").strip('%'))
data_df = data_df.astype({'last_close': 'float',
'fund_var': 'float',
'fund_YoY': 'float',
'risk_level': 'int32'})
return data_df
df_funds = data_collection(31)
df_funds.to_csv("data_WS.csv")
| ELAARADI/Projects | Webscrapping&API project/Webscrapping.py | Webscrapping.py | py | 3,820 | python | en | code | 0 | github-code | 90 |
43492347457 | import urllib.request
import re
stock_url = 'http://quote.eastmoney.com/stocklist.html'
def urlToList(url):
allCodeList = []
html = urllib.request.urlopen(url).read()
html = html.decode('gbk')
s = r'<li><a target="_blank" href="http://quote.eastmoney.com/\S\S(.*?).html">'
pat = re.compile(s)
code = pat.findall(html)
for item in code:
if(item[0] == '6' or item[0] == '3'\
or item[0] == '0'):
allCodeList.append(item)
return allCodeList
if __name__ == '__main__':
allCodeList = urlToList(stock_url)
print(allCodeList)
| vencewill/myscripts | Python/tutorials/getStockID.py | getStockID.py | py | 595 | python | en | code | 0 | github-code | 90 |
3516741582 | import pandas as pd
import rdflib
import os
import pickle
import time
import constants
def _get_sources_from_mapping(mapping_graph: rdflib.Graph):
"""Retrieves a list of sources from the mappings.
Args:
mapping_graph:
A rdflib.Graph that contains the mapping triples.
Returns:
A list[str] with the sources.
"""
mapping_query = """
PREFIX rml: <http://semweb.mmlab.be/ns/rml#>
SELECT ?source {
?h rml:source ?source
}
"""
query_res = mapping_graph.query(mapping_query)
all_sources = set([str(row['source']) for row in query_res]) # Ignore duplicates TODO: ??
return all_sources
def _process_source(source_file: str, snapshot: dict, new_version: bool, extension: str):
"""Process a source according to the type. It reads the data from 'source_file' and returns
the current, new and removed data.
Args:
source_file:
The source file name.
snapshot:
The snapshot that contains the old data.
new_version:
True if it is the first materialization.
extension:
Extension of 'source_file'.
Returns:
A triple (current_data, new_data, removed_data). The type depends on 'extension'.
new_data and removed_data have null intersection (no duplicates between them).
- current_data: data from source_file.
- new_data: data that is in source_file and not in snapshot.
- removed_data: data that is in snapshot and not in source_file.
"""
if extension == '.csv':
# Read new data
df_ds = pd.read_csv(source_file, dtype=str) # source dataframe
# Read dataframe from snapshot
df_sp = pd.DataFrame(columns=df_ds.columns) if new_version else snapshot[source_file]
# Calculate simmetric differences
# This retrieves new data + removed data
# - New data: data that is present in df_ds but not in df_sp
# - Removed data: data that is present in df_sp but not in df_ds
diff = pd.concat([df_sp, df_ds]).drop_duplicates(keep=False)
# Calculate removed data
removed_data = pd.merge(df_sp, diff, how='inner')
# Calculate new data
new_data = pd.merge(df_ds, diff, how='inner')
return df_ds, new_data, removed_data
elif extension == '.json':
raise NotImplementedError(f'The file type {extension} is not supported yet!')
else:
raise NotImplementedError(f'The file type {extension} is not supported yet!')
def _save_data_to_file(data_path:str, source_file: str, extension: str, data: object):
"""Saves 'data' to 'aux_data_pah', with 'source_file' name, and returns the new full file name.
Args:
aux_data_pah:
The destiny directory path.
source_file:
The file name.
extension:
The extension of 'source_file'.
data:
The data that is stored. The type depends on 'extension':
.csv: python dictionary.
Returns:
The name of the new file created.
"""
# Create file name
new_file_path = data_path + '/' + source_file
# Create directories for aux file
# TODO: verify that this does not need to be run with every data source
os.makedirs(os.path.dirname(new_file_path), exist_ok=True)
# Save data
if extension == '.csv':
data.to_csv(new_file_path, index=False)
elif extension == '.json':
raise NotImplementedError(f'The file type {extension} is not supported yet!')
else:
raise NotImplementedError(f'The file type {extension} is not supported yet!')
return new_file_path
def _update_mappings(mapping_graph: rdflib.Graph, query_update: str, data_path: str, mapping_file: str):
"""Updates the 'mapping_graph' with the query 'query_update' and saves the mapping to 'data_path'/'mapping_file'.
Args:
mapping_graph:
A rdflib.Graph that stores the mapping rules.
query_update:
The query that is going to be executed on the graph.
data_path:
The path where the new mapping will be stored.
mapping_file:
The name of the new mapping file.
Returns:
The path of the new mapping file.
"""
mapping_graph.update(query_update)
# Save new mappings to file
new_mapping_file = data_path + '/.aux_' + mapping_file
# Create parent dirs
os.makedirs(os.path.dirname(new_mapping_file), exist_ok=True)
mapping_graph.serialize(new_mapping_file)
return new_mapping_file
def _materialize_set(engine: str, mapping_function, new_mapping_file: str, method: str, data_dict: dict, aux_data_path: str, type_p: str):
"""Runs the 'engine' mapping engine and returns the generated triples.
Args:
engine:
The name of the mapping engine to materialize the graph: 'morph', 'rdfizer'.
mapping_function:
The function that performs the materialization.
new_mapping_file:
The file name that contains the mappings.
method:
A string that determines how the auxiliary data is threated.
data_dict:
If 'method' is 'memory', the dictionary with the auxiliary data.
aux_data_path:
The path of an auxiliary directory.
type_p:
Either 'new' or 'removed'. It is used by some engines to write the config.
Returns:
A rdflib.Graph
"""
if engine == 'morph':
config = "[inc]\nmappings: %s" % new_mapping_file
if method == 'disk':
triples = mapping_function(config)
#triples = morph_kgc.materialize(config)
elif method == 'memory':
triples = mapping_function(config, data_dict)
#triples = morph_kgc.materialize(config, data_dict)
elif engine == 'rdfizer':
if type_p == 'new':
sub_dir = 'new_data'
elif type_p == 'removed':
sub_dir = 'removed_data'
else:
raise TypeError("%s is not 'new' or 'removed'!")
config = """
[default]
main_directory: %s/%s/data
[datasets]
number_of_datasets: 1
output_folder: %s/%s
all_in_one_file: no
remove_duplicate: yes
enrichment: yes
name: output
ordered: yes
[dataset1]
name: %s
mapping: %s
"""
with open(aux_data_path + '/%s/rdfizer_config.ini' % sub_dir, "w") as f:
f.write(config % (aux_data_path, sub_dir, aux_data_path, sub_dir, sub_dir, new_mapping_file))
mapping_function(config_path=aux_data_path + '/%s/rdfizer_config.ini' % sub_dir)
#semantify(config_path=aux_data_path + '/%s/rdfizer_config.ini' % sub_dir)
# Read the output of semantify()
triples = rdflib.Graph().parse(aux_data_path + '/%s/%s.nt' % (sub_dir, sub_dir))
else:
raise RuntimeError('%s is not supported yet!' % engine)
return triples
def load_kg(mapping_file: str,
snapshot_file: str,
aux_data_path: str = '.aux',
old_graph: rdflib.Graph = None,
method: str = 'memory',
engine: str = 'morph',
mapping_optimization: bool = True):
"""Materializes a knowledge graph given a data source and a mapping file. It also supports
updating versions of previously generated graphs when 'snapshot_file' and 'old_graph' are provided.
Args:
mapping_file:
The file name that contains the mappings.
snapshot_file:
The snapshot file name.
aux_data_path:
The path of an auxiliary directory.
old_graph:
A rdflib.Graph that contains the version previously generated. If None, it creates a graph
from scratch.
method:
A string that determines how the auxiliary data is threated:
- 'disk': The auxiliary data is stored in the disk, under the 'aux_data_path' directory.
- 'memory': The auxiliary data is stored in memory.
engine:
The name of the mapping engine to materialize the graph:
- 'morph': https://github.com/morph-kgc/morph-kgc.
- 'rdfizer': https://github.com/SDM-TIB/SDM-RDFizer.
mapping_optimization:
If true, the mappings are reduced to contain the rules from the datasources that are updated.
Returns:
A new materialized graph when 'old_graph' is None, or a new version of 'old_graph'. The directory
'aux_data_path' contains at the end:
- A snapshot file named 'snapshot_file'. This file should not be deleted, since it is used
with subsequent calls to load_kg().
- An auxiliary mapping file. This file can be removed.
- If 'method' is 'disk', the auxiliary data. It can be removed.
"""
# Argument checks
if method not in ['disk', 'memory']:
raise ValueError("'method' argument must be either 'disk' or 'memory'")
if engine not in ['morph', 'rdfizer']:
raise ValueError("'engine' argument must be 'morph' or 'rdfizer'")
if engine == 'rdfizer' and method != 'disk':
raise ValueError("'rdfizer' engine only supports 'disk' method")
# Import only the used mapping engine
if engine == 'morph':
from morph_kgc import materialize
elif engine == 'rdfizer':
from rdfizer import semantify
# Check if first version or not
if old_graph is None:
# First version
new_version = True
sp = dict()
else:
# Subsequent updates
new_version = False
with open(snapshot_file, 'rb') as f:
sp = pickle.load(file=f)
# Read mapping
mapping_graph_new_data = rdflib.Graph().parse(mapping_file)
mapping_graph_removed_data = rdflib.Graph().parse(mapping_file)
# Extract sources from mapping
all_sources = _get_sources_from_mapping(mapping_graph_new_data)
# Create auxiliary data directory
aux_data_path_enc = os.fsencode(aux_data_path) # TODO: quitar '/' si aparece al final
if not os.path.exists(aux_data_path_enc):
os.makedirs(aux_data_path_enc)
# Create auxiliary dictionary for new data
new_data_dict = {}
removed_data_dict = {}
has_new_data = False
has_removed_data = False
updated_new_sources = set()
updated_removed_sources = set()
start = time.time()
# Process each source
for source_file in all_sources:
# Get extension from source file
_, extension = os.path.splitext(source_file)
# Calculate new and removed data
current_data, new_data, removed_data = _process_source(source_file=source_file,
snapshot=sp,
new_version=new_version,
extension=extension)
# Save new and removed data
if method == 'disk':
# Save new data to disk if there is any
if len(new_data) > 0:
new_data_file_path = _save_data_to_file(data_path=aux_data_path + '/new_data',
source_file=source_file,
extension=extension,
data=new_data)
msg_new_data = "\tFound new data, saved to %s." % new_data_file_path
has_new_data = True
updated_new_sources.add(source_file)
else:
if not mapping_optimization:
new_data_file_path = _save_data_to_file(data_path=aux_data_path + '/new_data',
source_file=source_file,
extension=extension,
data=new_data)
msg_new_data = "\tNo new data, saved to %s." % new_data_file_path
else:
msg_new_data = "\tNo new data."
# Save removed data to disk if there is any
if len(removed_data) > 0:
removed_data_file_path = _save_data_to_file(data_path=aux_data_path + '/removed_data',
source_file=source_file,
extension=extension,
data=removed_data)
msg_removed_data = "\tFound removed data, saved to %s." % removed_data_file_path
has_removed_data = True
updated_removed_sources.add(source_file)
else:
if not mapping_optimization:
removed_data_file_path = _save_data_to_file(data_path=aux_data_path + '/removed_data',
source_file=source_file,
extension=extension,
data=removed_data)
msg_removed_data = "\tNo removed data, saved to %s." % removed_data_file_path
else:
msg_removed_data = "\tNo removed data."
elif method == 'memory':
# Save new data to in-memory dict if there is any
if len(new_data) > 0:
new_data_dict[source_file] = new_data
msg_new_data = "\tFound new data, saved to dataframe."
has_new_data = True
updated_new_sources.add(source_file)
else:
if not mapping_optimization:
new_data_dict[source_file] = new_data
msg_new_data = "\tNo new data, saved to dataframe."
else:
msg_new_data = "\tNo new data."
# Save removed data to in-memory dict if there is any
if len(removed_data) > 0:
removed_data_dict[source_file] = removed_data
msg_removed_data = "\tFound removed data, saved to dataframe."
has_removed_data = True
updated_removed_sources.add(source_file)
else:
if not mapping_optimization:
removed_data_dict[source_file] = removed_data
msg_removed_data = "\tNo removed data, saved to dataframe."
else:
msg_removed_data = "\tNo removed data."
else:
raise RuntimeError('\'method\' is not \'disk\' or \'memory\', This should not happend :(')
# Print messages
print(source_file)
print(msg_new_data)
print(msg_removed_data)
# Save the current in the snapshot
sp[source_file] = current_data
# Save current snapshot data = old + new_data - removed_data
# updated_snapshot_data = _calculate_new_snapshot_df(old_data=old_data,
# new_data=new_data,
# removed_data=removed_data,
# extension=extension)
# sp[source_file] = updated_snapshot_data
end = time.time()
print("Finished calculating diff in %.2fs." % (end-start))
# Save snapshot
# TODO: Create parent dir if it does not exist
with open(snapshot_file, 'wb') as f:
pickle.dump(obj=sp, file=f)
print("Saved snapshot to", snapshot_file)
if mapping_optimization:
# Calculate the mappings of the changed files
if len(updated_new_sources) > 0:
# Retrieve mappings related to the sources with new data
query_update_sources = constants.QUERY_SOURCES % (tuple([", ".join(["\"%s\"" % e for e in updated_new_sources])] * 7))
query_res = mapping_graph_new_data.query(query_update_sources)
new_g = rdflib.Graph()
for i in query_res:
new_g.add(i)
mapping_graph_new_data = new_g
if len(updated_removed_sources) > 0:
# Retrieve mappings related to the sources with removed data
query_update_sources = constants.QUERY_SOURCES % (tuple([", ".join(["\"%s\"" % e for e in updated_removed_sources])] * 7))
query_res = mapping_graph_removed_data.query(query_update_sources)
new_g = rdflib.Graph()
for i in query_res:
new_g.add(i)
mapping_graph_removed_data = new_g
# Create queries for the mappings to support new sources
if method == 'disk':
query_update_new_data = constants.QUERY_DISK % (aux_data_path + '/new_data')
query_update_removed_data = constants.QUERY_DISK % (aux_data_path + '/removed_data')
pass
elif method == 'memory':
query_update_new_data = constants.QUERY_MEMORY
query_update_removed_data = query_update_new_data # Same query in 'memory'
else:
raise RuntimeError('\'method\' is not \'disk\' or \'memory\', This should not happend :(')
if engine == 'morph':
mapping_func = materialize
elif engine == 'rdfizer':
mapping_func = semantify
# Materialize new data
print("Materializing graph...")
start = time.time()
if has_new_data:
print("Updating mappings... ", end='')
new_mapping_file = _update_mappings(mapping_graph=mapping_graph_new_data,
query_update=query_update_new_data,
data_path=aux_data_path + '/new_data',
mapping_file=mapping_file)
print("OK")
print("Running mapping engine on the new data...")
new_triples = _materialize_set(engine=engine,
mapping_function=mapping_func,
new_mapping_file=new_mapping_file,
method=method,
data_dict=new_data_dict,
aux_data_path=aux_data_path,
type_p='new')
else:
print("No new data detected in the data source, no need to run the mapping engine.")
new_triples = rdflib.Graph()
# Materialize removed data
if has_removed_data:
print("Updating mappings... ", end='')
new_mapping_file = _update_mappings(mapping_graph=mapping_graph_removed_data,
query_update=query_update_removed_data,
data_path=aux_data_path + '/removed_data',
mapping_file=mapping_file)
print("OK")
print("Running mapping engine on the removed data...")
removed_triples = _materialize_set(engine=engine,
mapping_function=mapping_func,
new_mapping_file=new_mapping_file,
method=method,
data_dict=removed_data_dict,
aux_data_path=aux_data_path,
type_p='removed')
else:
print("No removed data detected in the data source, no need to run the mapping engine.")
removed_triples = rdflib.Graph()
end = time.time()
print("Materialization complete in %.2fs" % (end-start))
# Return the materialized graph if it is a new version
if old_graph is None:
return new_triples
# Return the new graph = old graph + new graph - removed graph
print("Constructing new graph... ", end='')
start = time.time()
# old_plus_new_graph = old_graph + new_graph
for new_triple in new_triples:
old_graph.add(new_triple)
# old_plus_new_minus_rm_graph = old_plus_new_graph - removed_graph
for removed_triple in removed_triples:
old_graph.remove(removed_triple)
# TODO: (optimization) check which of the graphs is larger, and run the for loop in the other (validate)
end = time.time()
print("OK (%.2fs)" % (end-start))
return old_graph
| ershimen/incremental-kgc | src/incremental_kgc.py | incremental_kgc.py | py | 20,366 | python | en | code | 1 | github-code | 90 |
37715193250 |
import numpy as np
def gen_gabor(imgsize, sigma = 5.0, theta = np.pi / 4, Lambda = np.pi, psi = 0.0, gamma = 1.0):
sigma_x = sigma
sigma_y = float(sigma) / gamma
# Bounding box
nstds = 5 # Number of standard deviation
xmax = max(abs(nstds * sigma_x * np.cos(theta)), abs(nstds * sigma_y * np.sin(theta)))
xmax = np.ceil(max(1, xmax))
ymax = max(abs(nstds * sigma_x * np.sin(theta)), abs(nstds * sigma_y * np.cos(theta)))
ymax = np.ceil(max(1, ymax))
xmin = -xmax
ymin = -ymax
(y, x) = np.meshgrid(np.arange(ymin, ymax + 1), np.arange(xmin, xmax + 1))
x_theta = x * np.cos(theta) + y * np.sin(theta)
y_theta = -x * np.sin(theta) + y * np.cos(theta)
gauss = np.exp(-0.5 * (x_theta ** 2 / sigma_x ** 2 + y_theta ** 2 / sigma_y ** 2))
sinusoid = np.cos(2 * np.pi / Lambda * x_theta + psi)
return gauss * sinusoid
def gen_sinusoid(imgsize, A, omega, rho):
''' Generate sinusoid grating
imgsize: size of the generated image (width, height)
'''
radius = (int(imgsize[0]/2.0),int(imgsize[1]/2.0))
[x,y] = np.meshgrid(range(-radius[0],radius[0]+1),
range(-radius[1],radius[1]+1))
stimuli = A * np.cos(omega[0] * x + omega[1] * y + rho)
return stimuli
| msrepo/opencv-practice | python-image-operators/gabor.py | gabor.py | py | 1,309 | python | en | code | 1 | github-code | 90 |
36205425284 | import numpy as np
import time
import sys
class regression:
def __init__(self, X, y, theta, learning_rate, iterations):
self.X = X
self.y = y
self.theta = theta
self.learning_rate = learning_rate
self.iterations = iterations
def sigmoid(self, z):
return (1 / (1 + np.exp(-z)))
def cost(self):
m = len(self.y)
h = self.sigmoid(np.dot(self.X,self.theta))
cost = (1/m)*(((-(self.y)).T @ np.log(h))-((1-self.y).T @ np.log(1-h)))
return (cost)
def gradient_descent(self):
m = len(self.y)
if m < 1:
print("Not enough data")
sys.exit(-1)
cost_history = []
for i in range(self.iterations):
self.theta -= (self.learning_rate / m) * (np.dot(self.X.T, self.sigmoid((self.X @ self.theta)) - self.y))
cost = self.cost()[0][0]
if (len(cost_history) > 0):
delta = cost_history[-1] - cost
if ((delta < 0.000001 and self.learning_rate > 0.0001 and cost < cost_history[-1]) or cost > cost_history[-1]):
self.learning_rate /= 2
if (delta < 1e-10):
cost_history.append(cost)
break
cost_history.append(cost)
if cost < 0.02:
cost_history.append(cost)
break
return(cost_history) | ayguillo/dslr | src/reg_fit.py | reg_fit.py | py | 1,150 | python | en | code | 1 | github-code | 90 |
22555224658 | import pygame
import os
_image_library = {}
def get_image(path):
global _image_library
image = _image_library.get(path)
if image == None:
canonicalized_path = path.replace('/', os.sep).replace('\\', os.sep)
image = pygame.image.load(canonicalized_path)
_image_library[path] = image
return image
_sound_library = {}
def play_sound(path):
global _sound_library
sound = _sound_library.get(path)
if sound == None:
canonicalized_path = path.replace('/', os.sep).replace('\\', os.sep)
sound = pygame.mixer.Sound(canonicalized_path)
_sound_library[path] = sound
sound.play()
pygame.init()
screen = pygame.display.set_mode((400, 300))
done = False
x = 30
y = 30
clock = pygame.time.Clock()
color = (200,0,200)
while not done:
for event in pygame.event.get():
if event.type == pygame.QUIT:
done = True
if event.type == pygame.KEYDOWN and event.key == pygame.K_SPACE:
is_blue = not is_blue
screen.fill((255, 255, 255))
screen.blit(get_image('images/ball.png'), (100, 200))
play_sound("sounds/m1.ogg")
# draw a rectangle
pygame.draw.rect(screen, color, pygame.Rect(10, 10, 100, 100), 10)
# draw a circle
pygame.draw.circle(screen, color, (300, 60), 50, 10)
pygame.display.flip()
clock.tick(60)
| Beisenbek/PP2_2023 | week10/3.py | 3.py | py | 1,509 | python | en | code | 0 | github-code | 90 |
18012304389 | from math import factorial
n, a, b = map(int, input().split())
V = sorted(map(int, input().split()), reverse=True)
av = sum(V[:a]) / a
print(av)
def comb(m, k):
ans = 1
if k == 0:
return 1
else:
for i in range(k):
ans *= m - i
ans //= i + 1
return ans
num = V[a]
m = V.count(num)
k = V[:a].count(num)
#print(m, k, a)
if a != k:
ans = factorial(m) // (factorial(k) * factorial(m - k))
else:
#mCi(i: a=>b)
ans = 0
for k in range(a, b+1):
ans += comb(m, k)
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03776/s787448757.py | s787448757.py | py | 554 | python | en | code | 0 | github-code | 90 |
43442052607 | """
8. Scrieti un program de tip joc "ghiceste numarul".
Cerinte:
1. Programul genereaza un numar aleator in intervalul [1, 99].
2. Intr-o bucla conditionata de gasirea numarului cautat:
- se citeste de la tastatura un numar
- se compara cu numarul cautat
- daca numarul introdus este mai mic decat numarul cautat se afiseaza +
- daca este mai mic se afiseaza -
3. Dupa ce numarul este ghicit se afiseaza un mesaj de felicitare si numarul cautat.
"""
import random
random_numbers = random.randint(1,100)
number = int(input("Introduceti un numar: "))
while number != random_numbers:
if number > random_numbers:
print("-")
elif number < random_numbers:
print("+")
number = int(input("Introduceti un numar: "))
print("Felicitari,numarul cautat este:",random_numbers)
| tohhhi/it_school_2022 | Sesiunea 4/temaEx8.py | temaEx8.py | py | 888 | python | ro | code | 0 | github-code | 90 |
44455752204 | import pandas as pd
from pathlib import Path
import shutil
from constants import ap_obj
class Copier:
def __init__(self, df_copy_groups, df_addresses, subfldr):
self.__df_groups = df_copy_groups
self.__df_addresses = df_addresses
self.__subfldr = subfldr
adr = ap_obj.getData('DUMP_FLDR')
self.__destination = adr
def __check(self):
pass
def __create_concrete_dir(self, dir, subdir):
p = Path(dir) / subdir
p.mkdir(parents=True, exist_ok=True)
return p
def copy(self):
i = int(self.__subfldr)
p_global = self.__create_concrete_dir(self.__destination, f'{i:03d}')
for index, row in self.__df_groups.iterrows():
# 1.9E-07
# 2E-07
if float(row.B) < 0.0000002:
continue
p_local = self.__create_concrete_dir(p_global, str(row.B))
for el in row.AAA:
seri = self.__df_addresses[self.__df_addresses.A == el].B
if not seri.empty:
str_file = str(seri.item())
str_file_name = str_file.split('\\')[-1]
my_file = Path(str_file)
to_file = p_local / str_file_name
try:
shutil.copy(my_file, to_file)
except:
print(el)
print(my_file.name)
print()
# if index % 500 == 0:
# print(f'{index:03d}')
print('COPYING DONE') | YuriGribauskasDenis/PYTHON_BigProjMLDataFilter | core_help/copier.py | copier.py | py | 1,621 | python | en | code | 0 | github-code | 90 |
39546709181 | import aiohttp
import sqlite3
from retry import retry
from typing import Optional
@retry(aiohttp.ContentTypeError, tries=5, delay=1, backoff=2)
async def extract(
url: str, session: aiohttp.ClientSession, params: Optional[dict[str, str]] = None
) -> dict:
"""
Perform an API get requests and return the response in json asynchronously.
Parameters:
url: The request URL.
session: The aiohttp Client session.
params: Parameters to send in the request.
Returns:
The json-encoded content of a response.
"""
async with session.get(url, params=params) as response:
return await response.json()
def load(
data: list[dict],
con: sqlite3.Connection,
insert_sql: str,
create_sql: Optional[str] = None,
truncate_sql: Optional[str] = None,
) -> None:
"""
Load data into a SQLite database.
Parameters:
data: A list of dictionaries of data.
con: The SQLite database connection.
insert_sql: The insert SQL statement.
create_sql: The create SQL statement.
truncate_sql: The truncate SQL statement.
Returns:
None.
"""
cur = con.cursor()
if create_sql:
cur.execute(create_sql)
if truncate_sql:
cur.execute(truncate_sql)
cur.executemany(insert_sql, data)
con.commit()
| tanjt107/football-prediction | footballprediction/pipeline.py | pipeline.py | py | 1,349 | python | en | code | 12 | github-code | 90 |
21617340168 | class Solution:
def findMinArrowShots(self, intervals: List[List[int]]) -> int:
if len(intervals) == 1:
return 1
#sort it based one the first value
intervals = sorted(intervals, key=lambda x:x[0])
count = 1
start = intervals[0][0]
end = intervals[0][1]
#the main logic is when you find a point that has start value greater than your considered/present array value
#that is the time to reset your values
# for example: In [[1,6],[2,8],[7,12],[10,16]]
#6 is the lowest end value of [1,6] and [2,8], so you can blast within that range
#so reset when you see start time 7
for i in range(1, len(intervals)):
if intervals[i][0] > end:
count += 1
start = intervals[i][0]
end = intervals[i][1]
elif intervals[i][1] < end:
end = intervals[i][1]
return count
| sgowdaks/CP_Problems | LeetCode/minimum_numbers_of_arrows_to_burst_ballon.py | minimum_numbers_of_arrows_to_burst_ballon.py | py | 1,038 | python | en | code | 0 | github-code | 90 |
6552344872 | import sys
from os import system
import getopt
from player import Player
from game import BlackJack
from question import Question
# Starts a game of Blackjack
def main():
game_options = {
"bet_min": 2,
"bet_max": 10,
"shoe_count": 3
}
try:
matched_args, _ = getopt.getopt(sys.argv[1:], "hn:x:s:",["help", "bet_min=","bet_max=","shoe_count="])
# Need bet min to be known before setting bet_max
matched_args.sort(key= lambda option: option[0], reverse=True)
for option, value in matched_args:
if option in ["-h", "--help"]:
print("""
Options:
-n, --bet_min: The table's minimum bet, must be >= default $2.
-x, --bet_max: Table limit, must be >= default $10 and 5 * bet-min.
-s --shoe_count: decks in the shoe. Default 3. Min 1, max 10.
""")
sys.exit()
if not value.isnumeric():
raise getopt.GetoptError("Option inputs must be an integer value.")
value = int(value)
if option in ["-n", "--bet_min"]:
if value < 2:
raise getopt.GetoptError("bet-min must be at least 2")
game_options.update({"bet_min": value})
elif option in ["-x", "--bet_max"]:
if value < 10 or value < 5 * game_options["bet_min"]:
raise getopt.GetoptError("bet-max must be > 10 and 5 * bet_min")
game_options.update({"bet_max": value})
elif option in ["-s", "--shoe_count"]:
if not (1 <= value <= 10):
raise getopt.GetoptError("shoe_count must be between 1 to 10")
game_options.update({"shoe_count": value})
except getopt.GetoptError as e:
print(e)
sys.exit()
system("clear")
print("Hi! Ready to play Black Jack...")
name = input("What's your name? ")
chips = Question.get_numeric_response("How many $'s of chips do you want to buy?")
player = Player(name, chips)
game = BlackJack(player, **game_options)
game.play()
if __name__ == "__main__":
main() | tomfuller71/BlackJack | blackjack.py | blackjack.py | py | 1,986 | python | en | code | 0 | github-code | 90 |
15402442101 | """
value % 3 foo
value % 5 bar
value % 3 and value % 5 foobar
value
"""
def foobar(upto):
for num in range(1, upto):
if num % 3 == 0 and num % 5 == 0:
print("FooBar")
elif num % 3 == 0:
print("Foo")
elif num % 5 == 0:
print("Bar")
else:
print(num)
print("This is num", num)
def main():
till = int(input("Enter a number: "))
foobar(till)
if __name__ == "__main__":
main()
| np-n/Python-Basics-GCA | Session 4/foobar.py | foobar.py | py | 518 | python | en | code | 0 | github-code | 90 |
31774694091 | # READING AND WRITING FILES
''' FILE COMMANDS
• close – Closes the file. Like File->Save.. in your editor.
• read – Reads the contents of the file. You can assign the result to a variable.
• readline – Reads just one line of a text file.
• truncate – Empties the file. Watch out if you care about the file.
• write('stuff') – Writes ”stuff” to the file.
• seek(0) – Move the read/write location to the beginning of the file.
'''
from sys import argv
script, filename = argv # taking input of filename in script
print('We are going to erase the given file ', filename)
print('Hit Ctrl + C to stop')
print('Hit Enter to continue')
input('???')
print('Opening the file ...')
file1 = open (filename, 'w')
print('Truncating the file in 3..2..1..')
file1.truncate() # empty the file
print('Now your file is empty, input 3 lines for the file now')
line1 = input('Line 1 :\t')
line2 = input('Line 2 :\t')
line3 = input('Line 3 :\t')
file1.write(line1) # writing in file can write directly in brackets or use variables
file1.write('\n')
file1.write(line2)
file1.write('\n')
file1.write(line3)
file1.write('\n')
print('File Closed')
file1.close() # closing a file
'''
What does 'w' mean? It’s really just a string with a character in it for the kind of mode for the file. If
you use 'w' then you’re saying ”open this file in ’write’ mode,” thus the 'w' character. There’s
also 'r' for ”read,” 'a' for append, and modifiers on these.
'w+' , 'r+' , and 'a+'
Does just doing open(filename) open it in 'r' (read) mode
'''
| madhur3u/Python3 | Basic File Handling/ex16.py | ex16.py | py | 1,590 | python | en | code | 0 | github-code | 90 |
38502548174 | from tkinter import *
from tkinter import messagebox
from ResumeGenerator import *
def e1_del():
e1.delete(first=0,last=99)
def e2_del():
e2.delete(first=0,last=99)
def e3_del():
e3.delete(first=0,last=99)
def check(val):
if val == '':
messagebox.showinfo("ERROR", "PUT BASE FORMAT TEXT FILE PLEASE")
salutations = ["Dear Sir/Madam",
"To Whom It May Concern ",
"Dear Hiring Manager",
]
master = Tk()
Label(master, text="Position Name").grid(row=0)
Label(master, text="Company Name").grid(row=1)
Label(master, text="Salutation").grid(row=2)
Label(master, text="Base Format").grid(row=3)
variable = StringVar(master)
variable.set(salutations[0]) # default value
e1 = Entry(master)
e2 = Entry(master)
e3 = Entry(master)
e4 = OptionMenu(master, variable, *salutations)
e1.grid(row=0, column=1) #Position
e2.grid(row=1, column=1) # Company
e3.grid(row=3, column=1) # Base Format
e4.grid(row=2, column=1) # Salutations
Button(master, text='Generate DOC', command= lambda: [check(e3.get()),resumeGeneratorDOC(e1.get(),e2.get(),e3.get(),variable.get()),e1_del(),e2_del(),e3_del()], bg="cyan").grid(row=4, column=0, sticky=W, pady=4)
Button(master, text='Generate PDF', command= lambda: [check(e3.get()),resumeGeneratorPDF(e1.get(),e2.get(),e3.get(),variable.get()),e1_del(),e2_del(),e3_del()], bg="red").grid(row=4, column=1, sticky=W, pady=4)
master.mainloop()
| animehart/CoverLetterGenerator | GUI.py | GUI.py | py | 1,435 | python | en | code | 0 | github-code | 90 |
22626967030 | #! /usr/bin/env python3
import argparse
import datetime
import json
import requests
def parseLinkHeader(headers):
"""
adapted from: https://github.com/PyGithub/PyGithub/blob/master/github/PaginatedList.py#L227
"""
links = {}
if "link" in headers:
linkHeaders = headers["link"].split(", ")
for linkHeader in linkHeaders:
(url, rel) = linkHeader.split("; ")
url = url[1:-1]
rel = rel[5:-1]
links[rel] = url
return links
def run_analysis(access_token, repo_name, user, start_year, end_year):
years = range(start_year, end_year + 1)
per_page = 100
auth_header = "token %s" % (access_token)
headers = {
'authorization': auth_header,
'accept': 'application/vnd.github.v3+json'
}
commit_hash = {}
for year in years:
since = datetime.datetime(year, 1, 1)
until = datetime.datetime(year, 12, 31, 23, 59, 59)
since_str = since.isoformat()
until_str = until.isoformat()
url = "https://api.github.com/repos/%s/%s/commits" % (user, repo_name)
params = {
'since': since_str,
'until_str': until_str,
'per_page': per_page
}
author_count = {}
while True:
try:
response = requests.get(url, params=params, headers=headers)
json_data = response.json()
for item in json_data:
if "author" not in item:
continue
author_obj = item["author"]
if author_obj is None:
continue
if "login" not in author_obj:
continue
author = author_obj['login']
if author is None:
continue
if author not in author_count:
author_count[author] = 1
else:
author_count[author] += 1
# check if we can get more links
links = parseLinkHeader(response.headers)
if "next" not in links:
break
else:
url = links["next"]
params = {}
except Exception as e:
print("Got error searching between %s and %s. Reason: %s Aborting!\n" % (since_str, until_str, str(e)))
break
commit_hash[year] = author_count
output_file = open("commit_analysis.json", "w")
json.dump(commit_hash, output_file, indent=4)
output_file.close()
print("Analysis Complete")
if __name__ == '__main__':
arg_parser = argparse.ArgumentParser(description='Run analysis on commits in a repo')
arg_parser.add_argument('--repo', required=True, dest='repo', type=str)
arg_parser.add_argument('--access_token', required=True, dest='access_token', type=str)
arg_parser.add_argument('--owner', required=True, dest='owner', type=str)
arg_parser.add_argument('--start_year', required=True, dest='start', type=int)
arg_parser.add_argument('--end_year', required=True, dest='end', type=int)
args = arg_parser.parse_args()
run_analysis(args.access_token, args.repo, args.owner, args.start, args.end)
| teliov/tech_debt_analysis | commit_analysis.py | commit_analysis.py | py | 3,353 | python | en | code | 0 | github-code | 90 |
70910146537 | # class Solution:
# def numTrees(self, n: int) -> int:
# # dp[n] = dp[0] * dp[n-1] + dp[1] * dp[n-2] + ... + dp[n-1] * dp[0]
# dp = [0] * (n + 1)
# dp[0] = 1
# dp[1] = 1
# for i in range(2, n + 1):
#
# for j in range(i):
#
# dp[i] += dp[j] * dp[i - j - 1]
# print(dp)
# return dp[-1]
# Definition for a binary tree node.
class TreeNode:
def __init__(self, val=0, left=None, right=None):
self.val = val
self.left = left
self.right = right
class Solution:
def generateTrees(self, n):
def generatestrees(start, end):
if start > end:
return [None, ]
alltrees = []
for i in range(start, end + 1):
# 返回所有可能的左子树 右子树集合
l = generatestrees(start, i - 1)
r = generatestrees(i + 1, end)
for lson in l:
for rson in r:
cur = TreeNode(i)
cur.left = lson
cur.right = rson
alltrees.append(cur)
return alltrees
return generatestrees(1, n)
s = Solution()
res = s.generateTrees(3)
print(res) | Ericshunjie/algorithm | 动态规划/不同的二叉搜索树.py | 不同的二叉搜索树.py | py | 1,291 | python | en | code | 0 | github-code | 90 |
18029410099 | n=int(input())
ans=1
rec=[True]*(n+1)
for i in range(2,n+1):
if rec[i]:
x=1
for j in range(1,n+1):
y=j
if y%i==0:
rec[y]=False
while y%i==0:
x+=1
y=y//i
ans=(ans*x)%1000000007
print(ans) | Aasthaengg/IBMdataset | Python_codes/p03828/s974345018.py | s974345018.py | py | 298 | python | en | code | 0 | github-code | 90 |
10586395142 | # -*- coding: utf-8 -*-
import os,shutil
def copy(sorcePath,targetPath):
if(os.path.exists(sorcePath)):
shutil.copyfile(sorcePath,targetPath)
print("copy %s => %s"%(sorcePath,targetPath))
source = r"TD\table\unit.ini"
dest = r"物遍生成\unit.ini"
copy(dest,source)
source = r"TD\table\ability.ini"
dest = r"物遍生成\ability.ini"
copy(dest,source)
source = r"TD\table\item.ini"
dest = r"物遍生成\item.ini"
copy(dest,source)
source = r"TD\table\upgrade.ini"
dest = r"物遍生成\upgrade.ini"
copy(dest,source) | j8383888/War3MapTD | copy-package.py | copy-package.py | py | 527 | python | en | code | 1 | github-code | 90 |
6557431577 | #
# replay_memory.py
# Here we go once again...
#
import random
import numpy as np
class ArbitraryReplayMemory:
"""
A replay memory for storing any type of elements
"""
def __init__(self, max_size):
self.replay_size = max_size
self.replay_memory = [None for i in range(max_size)]
# Index for the next sample
self.ctr = 0
# How many items are in the replay memory
self.max_size = 0
def __len__(self):
"""
Return current number of samples in the
replay memory
"""
return self.max_size
def add(self, item):
"""
Add an item to the replay memory
"""
self.replay_memory[self.ctr] = item
self.ctr += 1
self.max_size = max(
self.max_size,
self.ctr
)
if self.ctr == self.replay_size:
self.ctr = 0
def get_batch(self, batch_size):
"""
Return batch_size random elements from the replay_memory,
"""
random_idxs = random.sample(range(self.max_size), batch_size)
return_values = [self.replay_memory[idx] for idx in random_idxs]
return return_values
| Miffyli/minecraft-bc | utils/replay_memory.py | replay_memory.py | py | 1,209 | python | en | code | 12 | github-code | 90 |
15835744688 | from fastapi import HTTPException
from SharedInterfaces.RegistryModels import *
from SharedInterfaces.AsyncJobModels import *
from SharedInterfaces.RegistryAPI import VersionRequest
from helpers.util import py_to_dict
from helpers.job_api_helpers import *
from config import Config
from helpers.dynamo_helpers import write_registry_dynamo_db_entry_raw
async def spinoff_creation_job(linked_person_id: str, created_item: ItemBase, username: str, config: Config) -> str:
"""
Runs the creation spin off tasks using the async job infra.
This involves submitting a registry job which creates a Create
activity, then in a chained job, lodges the provenance.
Returns the session ID from the job api.
Parameters
----------
linked_person_id : str
The linked person ID already fetched
created_item : ItemBase
The created item to be registered/handled
username : str
The job username
config : Config
API config
Returns
-------
str
Session ID
Raises
------
HTTPException
Managed HTTP exception
"""
session_id = await submit_register_create_activity(
username=username,
payload=RegistryRegisterCreateActivityPayload(
created_item_id=created_item.id,
created_item_subtype=created_item.item_subtype,
linked_person_id=linked_person_id
),
config=config
)
created_item.workflow_links = WorkflowLinks(
create_activity_workflow_id=session_id)
# write the object to the registry
friendly_format = py_to_dict(created_item)
try:
write_registry_dynamo_db_entry_raw(
registry_item=friendly_format,
config=config
)
# Raise 500 if something goes wrong
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Failed to write updated item including workflow session id to registry. Aborting. Contact administrator. Error {e}."
)
# return the session id
return session_id
async def spinoff_version_job(
username: str,
version_request: VersionRequest,
new_item: ItemBase,
version_number: int,
from_id: str,
to_id: str,
linked_person_id: str,
item_subtype: ItemSubType,
config: Config
) -> str:
"""
Spins off a version job.
This uses the async job infra as the registry API service account.
This creates a Version activity in the registry, and lodges the provenance data.
Parameters
----------
username : str
The job username
version_request : VersionRequest
The version request which includes info needed for the job
new_item : ItemBase
The new item which was generated
version_number : int
The version number for the new item
from_id : str
Where did it come from?
to_id : str
Where did it go (cotton eyed joe)
linked_person_id : str
The ID of the Person linked to the username
item_subtype : ItemSubType
The item subtype - used for provenance in job payload
config : Config
The config
Returns
-------
str
The session ID of the version create activity
Raises
------
HTTPException
Managed HTTP exceptions
"""
session_id = await submit_register_version_activity(
username=username,
payload=RegistryRegisterVersionActivityPayload(
reason=version_request.reason,
version_number=version_number,
from_version_id=from_id,
to_version_id=to_id,
linked_person_id=linked_person_id,
item_subtype=item_subtype
),
config=config
)
# update session ID in response
# track session info against new item
new_item.workflow_links = WorkflowLinks(
version_activity_workflow_id=session_id
)
# write the object to the registry
friendly_format = py_to_dict(new_item)
try:
write_registry_dynamo_db_entry_raw(
registry_item=friendly_format,
config=config
)
# Raise 500 if something goes wrong
except Exception as e:
raise HTTPException(
status_code=500,
detail=f"Failed to write updated item including workflow session id to registry. Aborting. Contact administrator. Error {e}."
)
# return the session id
return session_id
| provena/provena | registry-api/helpers/workflow_helpers.py | workflow_helpers.py | py | 4,514 | python | en | code | 3 | github-code | 90 |
20907925222 | import numpy as np
from scipy.special import comb
from pymoab import rng
from preprocessor.meshHandle.finescaleMesh import FineScaleMesh
from .utils import rotation_to_align
class DFNMeshGenerator(object):
"""
Base class for the mesh generator.
"""
def __init__(self):
pass
def run(self):
raise NotImplementedError()
def compute_vugs(self):
raise NotImplementedError()
def compute_fractures(self):
raise NotImplementedError()
class DFNMeshGenerator2D(DFNMeshGenerator):
"""
A 2D mesh generator for fracured and vuggy reservoirs.
"""
def __init__(self, mesh_file, ellipsis_params_range, num_ellipsis, num_fractures):
"""
Parameters
------
mesh_file : str
A string containing the path to the input mesh file.
ellipsis_params_range : iterable
An iterable containing the maximum and the minimum value for the ellipsis parameters.
num_ellipsis : int
The number of ellipsis to create.
num_fractures : int
Number of fractures to create.
Raises
------
ValueError
If the number of fractures is greater than the number of possible
pairs of ellipsis.
"""
self.mesh = FineScaleMesh(mesh_file, dim=2)
self.ellipsis_params_range = ellipsis_params_range
self.num_ellipsis = num_ellipsis
if num_fractures > comb(self.num_ellipsis, 2):
raise ValueError(
"The number of fractures must be inferior to the number of possible pairs of ellipsis.")
self.num_fractures = num_fractures
self.random_rng = np.random.default_rng()
def run(self):
"""
Main method. Generates a mesh containing fractures and vugs.
"""
centroids = self.mesh.faces.center[:][:, 0:2]
xs, ys = centroids[:, 0], centroids[:, 1]
x_range = xs.min(), xs.max()
y_range = ys.min(), ys.max()
centers, params, angles = self.get_random_ellipsis(x_range, y_range)
print('Computing vugs')
faces_per_ellipsis = self.compute_vugs(
centers, angles, params, centroids)
print('Computing fractures')
self.compute_fractures(faces_per_ellipsis, centers)
print('Done!')
def compute_vugs(self, centers, angles, params, centroids):
"""
Compute the volumes inside the ellipsis given by centers, angles and params.
Parameters
------
centers : numpy.array
Array containing the cartesian coordinates of
each ellipsoid center.
angles : numpy.array
Array containing the values (in radians) of the
three rotation angles with respect to the cartesian axis.
params : numpy.array
Array containing the parameters of each ellipsoid, i.e,
the size of the axis.
centroids : numpy.array
The centroids of the volumes compouding the mesh.
Returns
------
vols_per_ellipsoid : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
"""
faces_per_ellipsis = []
for center, param, angle in zip(centers, params, angles):
R = self.get_rotation_matrix(angle)
X = (centroids - center).dot(R.T)
faces_in_vug = (X / param)**2
faces_in_vug = faces_in_vug.sum(axis=1)
# Recuperar range dos volumes que estão no vug e salvar na lista faces_per_ellipsis
faces_per_ellipsis.append(
self.mesh.core.all_faces[faces_in_vug < 1])
self.mesh.vug[faces_in_vug < 1] = 1
return faces_per_ellipsis
def compute_fractures(self, faces_per_ellipsis, centers):
"""
Generates random fractures, i.e, rectangles connecting two vugs,
and computes the volumes inside them. If a volumes is inside a
fracture, then the property "fracture" from the mesh data
structure is set to 1.
Parameters
----------
faces_per_ellipsis : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
centers : numpy.array
Array containing the cartesian coordinates of
each ellipsis' center.
Returns
-------
None
"""
selected_pairs = []
num_possible_pairs = comb(self.num_ellipsis, 2)
found = True
for i in range(self.num_fractures):
# Find a pair of ellipsis that are not overlapped and are
# not already connected by a fracture.
count = 0
while True:
count += 1
e1, e2 = self.random_rng.choice(
np.arange(self.num_ellipsis), size=2, replace=False)
if (e1, e2) not in selected_pairs and \
rng.intersect(faces_per_ellipsis[e1], faces_per_ellipsis[e2]).empty():
selected_pairs.extend([(e1, e2), (e2, e1)])
break
if count > num_possible_pairs:
found = False
break
if not found:
break
# Calculating the rectangle's parameters.
L = np.linalg.norm(centers[e1] - centers[e2]) # Length
h = 10 / L # Height
print("Creating fracture {} of {}".format(i+1, self.num_fractures))
self.check_intersections(h, L, centers[e1], centers[e2])
def get_random_ellipsis(self, x_range, y_range):
random_centers = np.zeros((self.num_ellipsis, 2))
random_centers[:, 0] = self.random_rng.uniform(
low=x_range[0], high=x_range[1], size=self.num_ellipsis)
random_centers[:, 1] = self.random_rng.uniform(
low=y_range[0], high=y_range[1], size=self.num_ellipsis)
random_params = self.random_rng.uniform(low=self.ellipsis_params_range[0],
high=self.ellipsis_params_range[1],
size=(self.num_ellipsis, 2))
random_angles = self.random_rng.uniform(
low=0.0, high=2*np.pi, size=self.num_ellipsis)
return random_centers, random_params, random_angles
def check_intersections(self, h, L, c1, c2):
"""
Check which volumes are inside the fracture.
Parameters
----------
h : float
Rectangle's height.
L : float
Rectangle's length.
c1 : numpy.array
Left end of the rectangle's axis.
c2 : numpy.array
Right end of the rectangle's axis.
Returns
-------
None
"""
vertices_coords = self.mesh.nodes.coords[:][:, 0:2]
edges_endpoints = self.mesh.edges.connectivities[:]
num_edges_endpoints = edges_endpoints.shape[0]
edges_endpoints_coords = self.mesh.nodes.coords[edges_endpoints.ravel()][:, 0:2].reshape(
(num_edges_endpoints, 2, 2))
# We'll first check whether an edge intersects the line segment between
# the ellipsis' centers.
u = edges_endpoints_coords[:, 1, :] - edges_endpoints_coords[:, 0, :]
v = c2 - c1
w = edges_endpoints_coords[:, 0, :] - c1
uv_perp_prod = np.cross(u, v)
wv_perp_prod = np.cross(w, v)
uw_perp_prod = np.cross(u, w)
maybe_intersect = np.where(uv_perp_prod != 0)[0]
s1 = - wv_perp_prod[maybe_intersect] / uv_perp_prod[maybe_intersect]
t1 = uw_perp_prod[maybe_intersect] / uv_perp_prod[maybe_intersect]
intersecting_edges = maybe_intersect[(
s1 >= 0) & (s1 <= 1) & (t1 >= 0) & (t1 <= 1)]
faces_in_fracture_from_edges = self.mesh.edges.bridge_adjacencies(
intersecting_edges, "edges", "faces").ravel()
# We now check which vertices are inside the fracture's bounding
# rectangle.
r = vertices_coords - c1
norm_v = np.linalg.norm(v)
d = np.cross(r, v) / norm_v
l = np.dot(r, v) / norm_v
vertices_in_fracture = self.mesh.nodes.all[(
d >= 0) & (d <= h) & (l >= 0) & (l <= L)]
faces_in_fracture_from_nodes = np.concatenate(self.mesh.nodes.bridge_adjacencies(
vertices_in_fracture, "edges", "faces")).ravel()
faces_in_fracture = np.intersect1d(np.unique(faces_in_fracture_from_edges),
np.unique(faces_in_fracture_from_nodes))
faces_in_fracture_vug_value = self.mesh.vug[faces_in_fracture].flatten(
)
filtered_faces_in_fracture = faces_in_fracture[faces_in_fracture_vug_value != 1]
self.mesh.vug[filtered_faces_in_fracture] = 2
def get_rotation_matrix(self, angle):
"""
Calculates the 2D rotation matrix for the given angle.
Parameters
----------
angle : numpy.array
The three rotation angles in radians.
Returns
-------
R : numpy.array
The rotation matrix.
"""
cos_theta = np.cos(angle)
sin_theta = np.sin(angle)
R = np.array((cos_theta, sin_theta,
- sin_theta, cos_theta)).reshape(2, 2)
return R
def write_file(self, path):
"""
Writes the resulting mesh to a file.
Parameters
----------
path : str
A string containing the file path.
Returns
-------
None
"""
vugs_meshset = self.mesh.core.mb.create_meshset()
self.mesh.core.mb.add_entities(
vugs_meshset, self.mesh.core.all_faces)
self.mesh.core.mb.write_file(path, [vugs_meshset])
class DFNMeshGenerator3D(DFNMeshGenerator):
"""
A 3D mesh generator for fracured and vuggy reservoirs.
"""
def __init__(self, mesh_file, ellipsis_params_range,
num_ellipsoids, num_fractures, fracture_shape="cylinder"):
"""
Constructor method.
Parameters
----------
mesh_file : str
A string containing the path to the input mesh file.
ellipsis_params_range : iterable
An iterable of size 2 containing the maximum and the
minimum value for the ellipsoids parameters.
num_ellipsoids : int
The number of ellipsoids to create.
num_fractures : int
The number of fractures to create.
fracture_shape: str
The shape of fractures to be generated.
Raises
------
ValueError
If the number of fractures is greater than the number of possible
pairs of ellipsoids.
"""
self.mesh = FineScaleMesh(mesh_file)
self.ellipsis_params_range = ellipsis_params_range
self.num_ellipsoids = num_ellipsoids
if num_fractures > comb(self.num_ellipsoids, 2):
raise ValueError(
"The number of fractures must be inferior to the number of possible pairs of ellipsoids.")
self.num_fractures = num_fractures
if fracture_shape not in ("cylinder", "box", "ellipsoid"):
raise ValueError("Invalid shape for fractures.")
self.fracture_shape = fracture_shape
self.random_rng = np.random.default_rng()
def run(self):
"""
Main method. Creates random sized and rotated ellipsoids and
assigns the volumes inside the vugs.
"""
centroids = self.mesh.volumes.center[:]
xs, ys, zs = centroids[:, 0], centroids[:, 1], centroids[:, 2]
x_range = xs.min(), xs.max()
y_range = ys.min(), ys.max()
z_range = zs.min(), zs.max()
centers, params, angles = self.get_random_ellipsoids(
x_range, y_range, z_range)
print('Computing vugs')
vols_per_ellipsoid = self.compute_vugs(
centers, angles, params, centroids)
print('Computing fractures')
self.compute_fractures(vols_per_ellipsoid, centers)
print('Done!')
def compute_vugs(self, centers, angles, params, centroids):
"""
Generates random ellipsoids and computes the volumes inside those
ellipsoids. If a volumes is inside a vug, the property "vug" from
the mesh data structure is set to 1.
Parameters
----------
centers : numpy.array
Array containing the cartesian coordinates of
each ellipsoid center.
angles : numpy.array
Array containing the values (in radians) of the
three rotation angles with respect to the cartesian axis.
params : numpy.array
Array containing the parameters of each ellipsoid, i.e,
the size of the axis.
centroids : numpy.array
The centroids of the volumes compouding the mesh.
Returns
------
vols_per_ellipsoid : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
"""
vols_per_ellipsoid = []
for center, param, angle in zip(centers, params, angles):
R = self.get_rotation_matrix(angle)
X = (centroids - center).dot(R.T)
vols_in_vug = (X / param)**2
vols_in_vug = vols_in_vug.sum(axis=1)
# Recuperar range dos volumes que estão no vug e salvar na lista vols_per_ellipsoid
vols_per_ellipsoid.append(
self.mesh.core.all_volumes[vols_in_vug < 1])
self.mesh.vug[vols_in_vug < 1] = 1
return vols_per_ellipsoid
def compute_fractures(self, vols_per_ellipsoid, centers):
"""
Generate fractures according to the instance parameters
`fracture_shape` and `num_fractures`.
Parameters
----------
vols_per_ellipsoid : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
centers : numpy.array
Array containing the cartesian coordinates of
each vug ellipsoid center.
Returns
------
None
"""
if self.fracture_shape == "cylinder":
self.compute_fractures_as_cylinders(vols_per_ellipsoid, centers)
elif self.fracture_shape == "box":
self.compute_fractures_as_boxes(vols_per_ellipsoid, centers)
elif self.fracture_shape == "ellipsoid":
self.compute_fractures_as_ellipsoids(vols_per_ellipsoid, centers)
def compute_fractures_as_cylinders(self, vols_per_ellipsoid, centers):
"""
Generates random fractures shaped as cylinders connecting two vugs,
and computes the volumes inside them. If a volumes is inside a
fracture, then the property `fracture` of the mesh data
structure is set to 1.
Parameters
----------
vols_per_ellipsoid : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
centers : numpy.array
Array containing the cartesian coordinates of
each vug ellipsoid center.
Returns
------
None
"""
selected_pairs = []
for i in range(self.num_fractures):
# Find a pair of ellipsoids that are not overlapped and are
# not already connected by a fracture.
while True:
e1, e2 = self.random_rng.choice(
np.arange(self.num_ellipsoids), size=2, replace=False)
if (e1, e2) not in selected_pairs and \
rng.intersect(vols_per_ellipsoid[e1], vols_per_ellipsoid[e2]).empty():
selected_pairs.extend([(e1, e2), (e2, e1)])
break
# Calculating the cylinder's parameters.
L = np.linalg.norm(centers[e1] - centers[e2]) # Length
r = 10 / L # Radius
print("Creating fracture {} of {}".format(i+1, self.num_fractures))
self.check_intersections_for_cylinders(
r, L, centers[e1], centers[e2])
def compute_fractures_as_boxes(self, vols_per_ellipsoid, centers):
"""
Generates random fractures shaped as boxes connecting two vugs,
and computes the volumes inside them. If a volumes is inside a
fracture, then the property `fracture` of the mesh data
structure is set to 1.
Parameters
----------
vols_per_ellipsoid : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
centers : numpy.array
Array containing the cartesian coordinates of
each ellipsoid center.
Returns
------
None
"""
# Compute minimal parameter size for boxes.
all_edges_endpoints = self.mesh.edges.connectivities[:]
N = len(all_edges_endpoints)
all_edges_coords = self.mesh.nodes.coords[all_edges_endpoints.flatten()].reshape((N, 2, 3))
edges_length = np.linalg.norm(all_edges_coords[:, 0, :] - all_edges_coords[:, 1, :], axis=1)
min_height = edges_length.min()
min_length = edges_length.max()
selected_pairs = []
for i in range(self.num_fractures):
# Find a pair of ellipsoids that are not overlapped and are
# not already connected by a fracture.
while True:
e1, e2 = self.random_rng.choice(
np.arange(self.num_ellipsoids), size=2, replace=False)
if (e1, e2) not in selected_pairs and \
rng.intersect(vols_per_ellipsoid[e1], vols_per_ellipsoid[e2]).empty():
selected_pairs.extend([(e1, e2), (e2, e1)])
break
d = np.linalg.norm(centers[e1] - centers[e2])
l = min_length if min_length > d / 20 else d / 20
h = min_height
print("Creating fracture {} of {}".format(i+1, self.num_fractures))
self.check_intersections_for_boxes(
centers[e1], centers[e2], d, l, h)
def compute_fractures_as_ellipsoids(self, vols_per_ellipsoid, centers):
"""
Generates random fractures shaped as ellipsoids connecting two vugs,
and computes the volumes inside them. If a volumes is inside a
fracture, then the property `fracture` of the mesh data
structure is set to 1.
Parameters
----------
vols_per_ellipsoid : list
A list of Pymoab's ranges describing the volumes
inside each ellipsoid.
centers : numpy.array
Array containing the cartesian coordinates of
each ellipsoid center.
Returns
------
None
"""
# Compute minimal parameter size for ellipsoids.
all_edges_endpoints = self.mesh.edges.connectivities[:]
N = len(all_edges_endpoints)
all_edges_coords = self.mesh.nodes.coords[all_edges_endpoints.flatten()].reshape((N, 2, 3))
edges_length = np.linalg.norm(all_edges_coords[:, 0, :] - all_edges_coords[:, 1, :], axis=1)
param_c = edges_length.min()
selected_pairs = []
for i in range(self.num_fractures):
# Find a pair of ellipsoids that are not overlapped and are
# not already connected by a fracture.
while True:
e1, e2 = self.random_rng.choice(
np.arange(self.num_ellipsoids), size=2, replace=False)
if (e1, e2) not in selected_pairs and \
rng.intersect(vols_per_ellipsoid[e1], vols_per_ellipsoid[e2]).empty():
selected_pairs.extend([(e1, e2), (e2, e1)])
break
print("Creating fracture {} of {}".format(
i + 1, self.num_fractures))
c1, c2 = centers[e1], centers[e2]
d = np.linalg.norm(c1 - c2)
params = np.array((d / 2, d / 20, param_c))
self.check_intersections_for_ellipsoids(c1, c2, params)
def check_intersections_for_cylinders(self, R, L, c1, c2):
"""
Check which volumes are inside the fracture.
Parameters
----------
R : float
Cylinder's radius
L : float
Cylinder's length
c1 : numpy.array
Left end of the cylinder's axis.
c2 : numpy.array
Right end of the cylinder's axis.
Returns
------
None
"""
vertices = self.mesh.nodes.coords[:]
# Cylinder's vector parameters.
e = c2 - c1
m = np.cross(c1, c2)
# Calculating the distance between the vertices and the main axis.
d_vector = m + np.cross(e, vertices)
d = np.linalg.norm(d_vector, axis=1) / L
# Computing the projection of the vertices onto the cylinder's axis.
u = vertices - c1
proj_vertices = u.dot(e) / L
# Checking which vertices are inside the cylinder.
vertices_in_cylinder = self.mesh.nodes.all[(d <= R) & (
proj_vertices >= 0) & (proj_vertices <= L)]
if len(vertices_in_cylinder) > 0:
volumes_in_cylinder = self.mesh.nodes.bridge_adjacencies(vertices_in_cylinder,
"edges", "volumes").ravel()
if volumes_in_cylinder.dtype == "object":
volumes_in_cylinder = np.concatenate(volumes_in_cylinder)
volumes_in_cylinder = np.unique(volumes_in_cylinder)
volumes_vug_value = self.mesh.vug[volumes_in_cylinder].flatten()
non_vug_volumes = volumes_in_cylinder[volumes_vug_value == 0]
self.mesh.vug[non_vug_volumes] = 2
self.check_intersections_along_axis(c1, c2)
def check_intersections_for_boxes(self, c1, c2, d, l, h):
"""
Check which volumes are inside the box shaped fracture.
Parameters
----------
c1 : numpy.array
Left end of the cylinder's axis.
c2 : numpy.array
Right end of the cylinder's axis.
d: float
Depth of box.
l: float
Length of box.
h: float
Height of box.
Returns
------
None
"""
# Box center.
center = (c1 + c2) / 2
# Defining an orientation vector so we can compute rotations.
u = np.array([d / 2, 0.0, 0.0])
v = c1 - center
R = rotation_to_align(u, v)
# Compute the rotated axis.
rotated_ax = np.array([1.0, 0.0, 0.0]).dot(R)
rotated_ay = np.array([0.0, 1.0, 0.0]).dot(R)
rotated_az = np.array([0.0, 0.0, 1.0]).dot(R)
# Compute volumes inside the box (what's in the box!?).
vertices = self.mesh.nodes.coords[:]
X = vertices - center
vertices_in_x_range = np.abs(X.dot(rotated_ax)) <= (d / 2)
vertices_in_y_range = np.abs(X.dot(rotated_ay)) <= (l / 2)
vertices_in_z_range = np.abs(X.dot(rotated_az)) <= (h / 2)
vertices_handles = self.mesh.nodes.all[vertices_in_x_range & vertices_in_y_range & vertices_in_z_range]
if len(vertices_handles) > 0:
vols_in_fracture = np.concatenate(self.mesh.nodes.bridge_adjacencies(
vertices_handles, "edges", "volumes")).ravel()
unique_vols_in_fracture = np.unique(vols_in_fracture)
unique_volumes_vug_values = self.mesh.vug[unique_vols_in_fracture].flatten()
non_vug_volumes = unique_vols_in_fracture[unique_volumes_vug_values == 0]
self.mesh.vug[non_vug_volumes] = 2
self.check_intersections_along_axis(c1, c2)
def check_intersections_for_ellipsoids(self, c1, c2, params):
"""
Check which volumes are inside the ellipsoid shaped fracture.
Parameters
----------
c1 : numpy.array
Left end of the cylinder's axis.
c2 : numpy.array
Right end of the cylinder's axis.
Returns
------
None
"""
# Ellipsoid's parameters
center = (c1 + c2) / 2
# Defining orientation vectors.
u = np.array([params[0], 0.0, 0.0])
v = c1 - center
R = rotation_to_align(u, v)
vertices = self.mesh.nodes.coords[:]
X = (vertices - center).dot(R.T)
vertices_in_ellipsoid = ((X / params)**2).sum(axis=1)
vertices_handles = self.mesh.nodes.all[vertices_in_ellipsoid < 1]
if len(vertices_handles) > 0:
vols_in_fracture = np.concatenate(self.mesh.nodes.bridge_adjacencies(
vertices_handles, "edges", "volumes")).ravel()
unique_vols_in_fracture = np.unique(vols_in_fracture)
unique_volumes_vug_values = self.mesh.vug[unique_vols_in_fracture].flatten()
non_vug_volumes = unique_vols_in_fracture[unique_volumes_vug_values == 0]
self.mesh.vug[non_vug_volumes] = 2
self.check_intersections_along_axis(c1, c2)
def check_intersections_along_axis(self, c1, c2):
# Check for intersection between the box's axis and the mesh faces.
faces = self.mesh.faces.all[:]
num_faces = len(faces)
faces_nodes_handles = self.mesh.faces.connectivities[:]
num_vertices_of_volume = faces_nodes_handles.shape[1]
faces_vertices = self.mesh.nodes.coords[faces_nodes_handles.flatten()].reshape(
(num_faces, num_vertices_of_volume, 3))
# Plane parameters of each face.
R_0 = faces_vertices[:, 0, :]
N = np.cross(faces_vertices[:, 1, :] - R_0, faces_vertices[:, 2, :] - R_0)
# Compute the parameters of the main axis line.
num = np.einsum("ij,ij->i", N, R_0 - c1)
denom = N.dot(c2 - c1)
non_zero_denom = denom[np.abs(denom) > 1e-6]
non_zero_num = num[np.abs(denom) > 1e-6]
r = non_zero_num / non_zero_denom
# Check faces intersected by the axis' line.
filtered_faces = faces[np.abs(denom) > 1e-6]
filtered_faces = filtered_faces[(r >= 0) & (r <= 1)]
filtered_nodes = faces_vertices[np.abs(denom) > 1e-6]
filtered_nodes = filtered_nodes[(r >= 0) & (r <= 1)]
r = r[(r >= 0) & (r <= 1)]
P = c1 + r[:, np.newaxis]*(c2 - c1)
# Compute the intersection point between the face plane and the axis
# line and check if such point is in the face.
angle_sum = np.zeros(filtered_nodes.shape[0])
for i in range(num_vertices_of_volume):
p0, p1 = filtered_nodes[:, i, :], filtered_nodes[:, (i+1) % num_vertices_of_volume, :]
a = p0 - P
b = p1 - P
norm_prod = np.linalg.norm(a, axis=1)*np.linalg.norm(b, axis=1)
# If the point of intersection is too close to a vertex, then
# take it as the vertex itself.
angle_sum[norm_prod <= 1e-6] = 2*np.pi
cos_theta = np.einsum("ij,ij->i", a, b) / norm_prod
theta = np.arccos(cos_theta)
angle_sum += theta
# If the sum of the angles around the intersection point is 2*pi, then
# the point is inside the polygon.
intersected_faces = filtered_faces[np.abs(2*np.pi - angle_sum) < 1e-6]
volumes_sharing_face = self.mesh.faces.bridge_adjacencies(
intersected_faces, "faces", "volumes")
unique_volumes = np.unique(volumes_sharing_face.ravel())
unique_volumes_vug_values = self.mesh.vug[unique_volumes].flatten()
non_vug_volumes = unique_volumes[unique_volumes_vug_values == 0]
self.mesh.vug[non_vug_volumes] = 2
def write_file(self, path="results/vugs.vtk"):
"""
Writes the resulting mesh into a file. Default path is 'results/vugs.vtk'.
Parameters
----------
path : str
A string containing the file path.
Returns
-------
None
"""
vugs_meshset = self.mesh.core.mb.create_meshset()
self.mesh.core.mb.add_entities(
vugs_meshset, self.mesh.core.all_volumes)
self.mesh.core.mb.write_file(path, [vugs_meshset])
def get_random_ellipsoids(self, x_range, y_range, z_range):
"""
Generates random points as the ellipsoids centers as the axis sizes
and random rotation angles with respect to the cartesian coordinates (x,y,z).
Parameters
----------
x_range : iterable
An iterable containing the maximum and minimum
values of the x coordinate.
y_range : iterable
An iterable containing the maximum and minimum
values of the y coordinate.
z_range : iterable
An iterable containing the maximum and minimum
values of the z coordinate.
Returns
-------
random_centers : numpy.array
The generated center points for the ellipsoids.
random_params : numpy.array
The parameters a.k.a the size of the three axis of the ellipsoids.
random_angles : numpy.array
The rotation angles for each ellipsoid.
"""
random_centers = np.zeros((self.num_ellipsoids, 3))
random_centers[:, 0] = self.random_rng.uniform(
low=x_range[0], high=x_range[1], size=self.num_ellipsoids)
random_centers[:, 1] = self.random_rng.uniform(
low=y_range[0], high=y_range[1], size=self.num_ellipsoids)
random_centers[:, 2] = self.random_rng.uniform(
low=z_range[0], high=z_range[1], size=self.num_ellipsoids)
random_params = self.random_rng.uniform(low=self.ellipsis_params_range[0],
high=self.ellipsis_params_range[1],
size=(self.num_ellipsoids, 3))
random_angles = self.random_rng.uniform(
low=0.0, high=2*np.pi, size=(self.num_ellipsoids, 3))
return random_centers, random_params, random_angles
def get_rotation_matrix(self, angle):
"""
Calculates the 3D rotation matrix for the given angle.
Parameters
----------
angle : numpy.array
The three rotation angles in radians.
Returns
-------
R : numpy.array
The rotation matrix.
"""
cos_ang = np.cos(angle)
sin_ang = np.sin(angle)
R = np.array([
cos_ang[1] * cos_ang[2],
cos_ang[2] * sin_ang[1] * sin_ang[0] - sin_ang[2] * cos_ang[0],
cos_ang[2] * sin_ang[1] * cos_ang[0] + sin_ang[2] * sin_ang[0],
sin_ang[2] * cos_ang[1],
sin_ang[2] * sin_ang[1] * sin_ang[0] + cos_ang[2] * cos_ang[0],
sin_ang[2] * sin_ang[1] * cos_ang[0] - cos_ang[2] * sin_ang[0],
- sin_ang[1],
cos_ang[1] * sin_ang[0],
cos_ang[1] * cos_ang[0]
]).reshape((3, 3))
return R
| padmec-reservoir/dfn-vugs-generator | src/dfn_mesh_generator.py | dfn_mesh_generator.py | py | 31,330 | python | en | code | 1 | github-code | 90 |
12705574611 | class Solution:
def removeDuplicates(self, s: str, k: int) -> str:
stack = []
for i in s:
if stack and stack[-1][0] == i:
stack[-1][1] += 1
else:
stack.append([i, 1])
if stack[-1][1] == k:
stack.pop()
result = ""
for i, count in stack:
result += i * count
return result | FevenBelay23/competitive-programming | 1209-remove-all-adjacent-duplicates-in-string-ii/1209-remove-all-adjacent-duplicates-in-string-ii.py | 1209-remove-all-adjacent-duplicates-in-string-ii.py | py | 411 | python | en | code | 0 | github-code | 90 |
18487253479 | N = int(input())
xyh = []
for i in range(N):
xyh.append(list(map(int,input().split())))
xyh_sorted = sorted(xyh, key=lambda x: x[2], reverse=True)
answer = [0, 0, 0]
for cy in range(0, 101):
for cx in range(0, 101):
H = xyh_sorted[0][2] + abs(xyh_sorted[0][0] - cx) + abs(xyh_sorted[0][1] - cy)
flag = True
for i in range(1,N):
h2 = max(H - abs(xyh_sorted[i][0] - cx) - abs(xyh_sorted[i][1] - cy), 0)
if h2 != xyh_sorted[i][2]:
flag = False
break
if flag:
answer = [cx, cy, H]
break
if flag:
break
answerString = str(answer[0]) + " " + str(answer[1]) + " " + str(answer[2])
print(answerString) | Aasthaengg/IBMdataset | Python_codes/p03240/s179454517.py | s179454517.py | py | 729 | python | en | code | 0 | github-code | 90 |
28411658694 | import tensorflow as tf
from tensorflow.contrib import layers
def flatten_layer(layer):
layer_shape = layer.get_shape()
num_features = layer_shape[1:4].num_elements()
layer_flat = tf.reshape(layer, [-1, num_features])
return layer_flat, num_features
def graph_attention_layer(A, M, v, layer):
with tf.variable_scope("layer_%s" % layer):
f1 = tf.matmul(M, v[0])
f1 = A * f1
f2 = tf.matmul(M, v[1])
f2 = A * tf.transpose(f2, [1, 0])
logits = tf.sparse_add(f1, f2)
unnormalized_attentions = tf.SparseTensor(indices=logits.indices,
values=tf.nn.sigmoid(logits.values),
dense_shape=logits.dense_shape)
attentions = tf.sparse_softmax(unnormalized_attentions)
attentions = tf.SparseTensor(indices=attentions.indices,
values=attentions.values,
dense_shape=attentions.dense_shape)
return attentions
class GATE:
def __init__(self, hidden_dims, hidden_dims2, lambda_):
self.lambda_ = lambda_
self.C = {}
self.C2 = {}
self.n_layers = len(hidden_dims) - 1
self.n_layers2 = len(hidden_dims2) - 1
self.W, self.v = self.define_weights(hidden_dims)
self.weight = tf.Variable(1.0e-4 * tf.ones(shape=(10299, 10299)), name="weight")
self.coef = self.weight - tf.matrix_diag(tf.diag_part(self.weight))
self.params = {"n_clusters": 6, "encoder_dims": [256], "alpha": 1.0}
self.mu = tf.Variable(tf.zeros(shape=(self.params["n_clusters"], self.params["encoder_dims"][-1])), name="mu")
self.n_cluster = self.params["n_clusters"]
self.input_batch_size = 10299
self.alpha = self.params['alpha']
def __call__(self, A, A2, X, X2, R, R2, S, S2, p, Theta, Labels):
# Encoder1
H = X
for layer in range(self.n_layers):
H = self.__encoder(A, H, layer)
self.H = H
self.HC = tf.matmul(self.coef, H)
H = self.HC
# Decoder1
for layer in range(self.n_layers - 1, -1, -1):
H = self.__decoder(H, layer)
X_ = H
layer_flat, num_features = flatten_layer(self.H)
layer_full = tf.layers.dense(inputs=layer_flat, units=512, activation=None,
kernel_initializer=layers.variance_scaling_initializer(dtype=tf.float32))
self.z = tf.layers.dense(inputs=layer_full, units=6, activation=None,
kernel_initializer=layers.variance_scaling_initializer(dtype=tf.float32))
# Encoder2
H2 = X2
for layer in range(self.n_layers2):
H2 = self.__encoder(A2, H2, layer)
# Final node representations
self.H2 = H2
# self.coef = self.weight - tf.matrix_diag(tf.diag_part(self.weight))
self.HC2 = tf.matmul(self.coef, H2)
H2 = self.HC2
# Decoder2
for layer in range(self.n_layers2 - 1, -1, -1):
H2 = self.__decoder(H2, layer)
X2_ = H2
layer_flat2, num_features2 = flatten_layer(self.H2)
layer_full2 = tf.layers.dense(inputs=layer_flat2, units=512, activation=None,
kernel_initializer=layers.variance_scaling_initializer(dtype=tf.float32))
self.z2 = tf.layers.dense(inputs=layer_full2, units=6, activation=None,
kernel_initializer=layers.variance_scaling_initializer(dtype=tf.float32))
self.p = p
self.Theta = Theta
self.Labels = Labels
# The reconstruction loss of node features
self.features_loss = tf.reduce_sum(tf.pow(tf.subtract(X, X_), 2.0)) + tf.reduce_sum(
tf.pow(tf.subtract(X2, X2_), 2.0))
self.SE_loss = 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.H, self.HC), 2)) \
+ 0.5 * tf.reduce_sum(tf.pow(tf.subtract(self.H2, self.HC2), 2))
self.S_Regular = tf.reduce_sum(tf.pow(tf.abs(self.coef), 1.0))
self.Cq_loss = tf.reduce_sum(tf.pow(tf.abs(tf.transpose(self.coef) * self.Theta), 1.0))
# CrossEntropy Loss
self.cross_entropy1 = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.z, labels=self.p))
self.cross_entropy2 = tf.reduce_sum(tf.nn.softmax_cross_entropy_with_logits_v2(logits=self.z2, labels=self.p))
self.dense_loss = self.cross_entropy1 + self.cross_entropy2
# The reconstruction loss of the graph structure
self.S_emb = tf.nn.embedding_lookup(self.H, S)
self.R_emb = tf.nn.embedding_lookup(self.H, R)
structure_loss1 = -tf.log(tf.sigmoid(tf.reduce_sum(self.S_emb * self.R_emb, axis=-1)))
structure_loss1 = tf.reduce_sum(structure_loss1)
# The reconstruction loss of the graph structure
self.S_emb2 = tf.nn.embedding_lookup(self.H2, S2)
self.R_emb2 = tf.nn.embedding_lookup(self.H2, R2)
structure_loss2 = -tf.log(tf.sigmoid(tf.reduce_sum(self.S_emb2 * self.R_emb2, axis=-1)))
structure_loss2 = tf.reduce_sum(structure_loss2)
self.structure_loss = structure_loss1 + structure_loss2
self.consistent_loss = tf.reduce_sum(tf.pow(tf.subtract(self.H, self.H2), 2.0))
# Pre_train loss
self.pre_loss = self.features_loss + self.lambda_ * self.structure_loss + 10*self.SE_loss + 0.01*self.consistent_loss + 1*self.S_Regular
# Total loss
self.loss = 1e-2*self.features_loss + self.lambda_ * self.structure_loss + 10*self.SE_loss + 1e-3*self.consistent_loss + 1*self.S_Regular + \
5*self.Cq_loss + 5*self.dense_loss
return self.pre_loss, self.loss, self.dense_loss, self.features_loss, self.structure_loss, self.SE_loss, self.coef, \
self.consistent_loss, self.S_Regular, self.Cq_loss, self.H, self.H2
def __encoder(self, A, H, layer):
H = tf.matmul(H, self.W[layer])
self.C[layer] = graph_attention_layer(A, H, self.v[layer], layer)
return tf.sparse_tensor_dense_matmul(self.C[layer], H)
def __decoder(self, H, layer):
H = tf.matmul(H, self.W[layer], transpose_b=True)
return tf.sparse_tensor_dense_matmul(self.C[layer], H)
def define_weights(self, hidden_dims):
W = {}
for i in range(self.n_layers):
W[i] = tf.get_variable("W%s" % i, shape=(hidden_dims[i], hidden_dims[i + 1]))
Ws_att = {}
for i in range(self.n_layers):
v = {0: tf.get_variable("v%s_0" % i, shape=(hidden_dims[i + 1], 1)),
1: tf.get_variable("v%s_1" % i, shape=(hidden_dims[i + 1], 1))}
Ws_att[i] = v
return W, Ws_att
| xdweixia/SGCMC | 2021-TMM-SGCMC/Network/Graph_Attention_Encoder.py | Graph_Attention_Encoder.py | py | 6,941 | python | en | code | 31 | github-code | 90 |
4549378940 | '''
Напишите программу-калькулятор, которая поддерживает следующие операции: сложение, вычитание, умножение, деление и
возведение в степень. Программа должна выдавать сообщения об ошибке и продолжать работу при вводе некорректных данных,
делении на ноль и возведении нуля в отрицательную степень.
'''
def calc():
correct_operations = ('+', '-', '*', '/', '**')
while True:
x = input('It`s a calculator. Press Enter to continue, type "exit" to quit: ')
if x == 'exit':
break
while True:
try:
a = int_check('Enter first number: ')
operation = operation_check(
'Enter an operation {}: '.format(correct_operations),
correct_operations
)
b = int_check('Enter second number: ')
calc_and_print_result(operation, a, b)
break
except (ZeroDivisionError, ValueError, AssertionError) as error:
print(error)
print('Enter values again')
finally:
print()
def int_check(message):
"""
Функция, вызывающая ValueError, если введеное значение не является действительным числом
:param message: Сообщение для пользователя
:return: Введенное значение
:raises ValueError: Вызывает ValueError, если введеное значение не является действительным числом
"""
try:
value = float(input(message))
return value
except ValueError:
raise ValueError('Entered value isn`t a real number')
def operation_check(message, correct_operations):
"""
Функция, вызывающая AssertionError, если введеное значение не является корректной математической операцией
:param message: Сообщение для пользователя
:param correct_operations: Кортеж из допустимых математических операций
:return: Введенное значение
:raises AssertionError: Вызывает AssertionError, если введеное операция не в числе допустимых
"""
operation = input(message)
assert operation in correct_operations, 'Incorrect operation'
return operation
def calc_and_print_result(operation, a, b):
if operation == '+':
print('The result of adding {} and {} is {}'.format(a, b, a + b))
elif operation == '-':
print('Subtraction result of {} and {} is {}'.format(a, b, a - b))
elif operation == '*':
print('The result of multiplying {} and {} is {}'.format(a, b, a * b))
elif operation == '/':
print('The result of dividing {} by {} is {}'.format(a, b, a / b))
elif operation == '**':
print('The result of raising {} to the {} power is {}'.format(a, b, a ** b))
if __name__ == '__main__':
calc()
| kds3000/Python_Essential_Homeworks | calculator_with_exceptions.py | calculator_with_exceptions.py | py | 3,481 | python | ru | code | 0 | github-code | 90 |
5143513774 | import random
f = open("small_input.txt", "r")
interval = f.readline().split(" ")
start = int(interval[0])
end = int(interval[1])
def gcd(a,b):
if (a < b):
return gcd(b, a)
if (a % b == 0):
return b
return gcd(b, a % b)
def is_prime(n):
if n <= 2:
return n == 2
if n % 2 == 0:
return False
for divisor in range(3, int(n ** 0.5) + 1, 2):
if n % divisor == 0:
return False
return True
def is_Carmichael(n):
if n <= 2 or n % 2 == 0 or is_prime(n):
return False
for a in range(3, n, 2):
if gcd(a, n) == 1:
if pow(a, n - 1, n) != 1:
return False
return True
def find_carmichael_numbers(start, end):
res = []
for n in range(start, end):
if is_Carmichael(n):
res.append(n)
return res
res = find_carmichael_numbers(start, end)
print(res)
| nikitasadok/myparcs | serial.py | serial.py | py | 924 | python | en | code | 0 | github-code | 90 |
12885021818 | import requests
def test():
url = "http://localhost:5000/predict"
body = {
'address': '504 W 35 St',
'dogs_allowed': 1,
'cats_allowed': 1,
'trash_valet': 0,
'ev_charging': 0,
'washer_dryer': 1,
'stainless_steel_appliances': 1,
'bedrooms': 3,
'bathrooms': 2,
'sqft': 1248
}
res = requests.post(url, json=body, headers={'Content-Type': 'application/json'})
print(res.json())
test() | chrischen88/apartment-worth-app | backend/test.py | test.py | py | 481 | python | en | code | 0 | github-code | 90 |
33890839078 | #! /usr/bin/env morseexec
""" Basic MORSE simulation scene for <test> environment
Feel free to edit this template as you like!
"""
from morse.builder import *
from morse.sensors import *
from fourwd.builder.robots import Hummerscaled
import math
robot = Hummerscaled()
robot.add_default_interface('ros')
scale = 0.2
robot.properties(scale=scale)
robot.properties(GroundRobot=True)
robot.name = "FourWD"
robot.scale = [scale, scale, scale]
# This is the wheel odometry tf
odom = Odometry()
odom.add_stream('ros', frame_id="odom", topic="wheel_odom", child_frame_id='wheel_odom') #child_frame_id='base_link')
odom.alter('Noise', pos_std = 0.1, rot_std = math.radians(5))
odom.translate(0.0, 0.0, 0.0)
odom.rotate(0.0, 0.0, 0)
# IMU sensor located inside the camera
imu = IMU()
imu.name = "imu"
imu.add_stream('ros', frame_id='camera_imu_optical_frame', topic='imu/data')
imu.alter('Noise', pos_std = 0.1, rot_std = math.radians(5))
imu.translate(0.6, 0.0, 1.2)
imu.rotate(0.0, -math.pi/2, 0.0)
# Add a pose sensor that exports the current location and orientation. Note that this is only for testing purposes
pose = Pose()
pose.add_stream('ros', frame_id="map", topic='pose')
# Laser scanner for 360 degree
laser_scanner = Hokuyo()
laser_scanner.name = "laser_scan"
laser_scanner.add_stream('ros', frame_id="laser", topic="scan")
laser_scanner.translate(0.0, 0.0, 1.6)
laser_scanner.properties(resolution=1.0) #0.5 before
laser_scanner.properties(laser_range=25.0)
laser_scanner.properties(scan_window=360)
laser_scanner.properties(Visible_arc=False)
laser_scanner.rotate(0.0, 0.0, 0.0)
laser_scanner.create_laser_arc()
# RGBD camera
kinect = Kinect()
kinect.depth_camera.add_stream('ros', frame_id="camera_depth_frame", topic='/camera/depth', topic_suffix='/image_raw')
kinect.video_camera.add_stream('ros', frame_id="camera_color_frame", topic='/camera/rgb', topic_suffix='/image_raw')
kinect.translate(0.6, 0, 1.2)
kinect.rotate(0.0, 0.0, 0)
# Rear camera
rgba_camera = VideoCamera() # Rear camera?
rgba_camera.add_stream('ros', frame_id="camera_rear", topic='/camera_rear/', topic_suffix='/image_raw') #TODO: the frame_id of the cameras need to be linked to /camera_link
rgba_camera.rotate(0, math.pi, math.pi)
rgba_camera.translate(-3.3, 0, 1)
# The list of the main methods to manipulate your components
# is here: http://www.openrobots.org/morse/doc/astable/user/builder_overview.html
robot.translate(-10.8, 4.5, 0.2)
robot.rotate(0.0, 0.0, 0.0)
robot.set_mass(1.5)
# Add a motion controller
# Check here the other available actuators:
# http://www.openrobots.org/morse/doc/stable/components_library.html#actuators
#
# 'morse add actuator <name> test' can help you with the creation of a custom
# actuator.
steerforce = SteerForce()
steerforce.add_stream('ros', 'fourwd.middleware.ros.ackermann_ros.AckermannROS', topic='cmd_vel')
# place your component at the correct location
steerforce.translate(0, 0, 0)
steerforce.rotate(0, 0, 0)
robot.append(imu)
robot.append(laser_scanner)
robot.append(steerforce)
robot.append(odom)
robot.append(rgba_camera)
robot.append(kinect)
robot.append(pose)
# a basic keyboard controller for testing purposes
keyboard = Keyboard()
robot.append(keyboard)
# To ease development and debugging, we add a socket interface to our robot.
#
# Check here: http://www.openrobots.org/morse/doc/stable/user/integration.html
# the other available interfaces (like ROS, YARP...)
# set 'fastmode' to True to switch to wireframe mode
env = Environment('fourwd/environments/Final.blend',fastmode = False)
env.set_camera_location([-18.0, -6.7, 10.8])
env.set_camera_rotation([1.09, 0, -1.14])
env.properties(latitude=1.53, longitude=45.1, altitude=0.0)
env.set_viewport(viewport_shade='TEXTURED', clip_end=1000)
env.show_framerate(True)
env.add_stream('ros')
| eugeniu1994/Small-scale-self-driving-car | software_integration-master/morse/fourwd/default.py | default.py | py | 3,798 | python | en | code | 0 | github-code | 90 |
38744734103 | from __future__ import annotations
from collections import defaultdict
from typing import List, Tuple
import tree_path as tp
from tree_path import Search, Match, Tree, ParsedSentence, ParsedDoc
from valences import check_valences
class FullLemma:
def __init__(self, lemma : str, others : List[Search|str], others_rep : List[str] = None):
self._lemma = lemma
self.others : List[Search] = [Search(o) if isinstance(o, str) else o for o in others]
self.others_rep = others_rep
if not self.others_rep:
self.others_rep = [str(o) for o in self.others]
def __hash__(self):
vals = [str(s) for s in self.others]
vals.sort()
vals = (self._lemma,) + tuple(vals)
return vals.__hash__()
def __eq__(self, other):
if not isinstance(other, FullLemma): return False
return self.__hash__() == other.__hash__()
def __str__(self):
return ' '.join([str(s) for s in ([self._lemma] + self.others_rep)])
def __repr__(self):
return repr(str(self))
def matches(self, node : Tree) -> bool:
if node.data('lemma') != self._lemma: return False
others = list(self.others)
while others:
if not others[-1].find(node): return False
others.pop()
return True
def get_verb_lemma(node : Tree) -> FullLemma:
lemma = node.data('lemma')
fixed = Search('/[deprel=fixed]').find(node)
_bad_ones = [m for m in fixed if '-' in m.node.sdata('lemma')]
fixed = [m for m in fixed if '-' not in m.node.sdata('lemma')] # this because of bad parses
others = [Search('/[deprel=fixed lemma=%s]' % m.node.data('lemma')) for m in fixed]
others_rep = [m.node.data('lemma') for m in fixed]
# add adjs
if Search('.[upos=ADJ /[deprel=cop] ]').find(node):
lemma = 'fi ' + lemma
lemma = FullLemma(lemma, others, others_rep)
return lemma
aspectuale = ['începe', 'continua', 'termina']
relatare_parataxa =\
{'afirma', 'zice', 'autoriza', 'povesti', 'ruga', 'avea grijă', 'afla', 'preciza',
'solicita', 'scrie', 'reieși', 'stabili', 'informa', 'crede', 'relata', 'începe',
'întreba', 'releva', 'obliga', 'aminti', 'dori', 'raporta', 'anunța', 'continua',
'spune', 'declara', 'aproba', 'prevedea', 'cugeta', 'răspunde', 'mulțumi',
'șopti', 'sublinia', 'invita', 'indica', 'descoperi', 'admite', 'urma', 'răcni',
'repeta', 'explica', 'dezvălui', 'prefera', 'mărturisi', 'avertiza', 'recunoaște',
'conveni', 'observa', 'îndemna', 'da seamă', 'asigura', }
basic_filter_expr = 'upos=VERB & !deprel=fixed & !misc.Mood=Part'
supine_filter_expr = '!misc.Mood=Supine | (misc.Mood=Supine & deprel=ccomp)'
def basic_filter(node : Tree) -> bool:
# aspectual gerunds
if node.data('lemma') in aspectuale and Search('.[misc.Mood=Ger]').find(node):
return False
return bool(Search('.[%s & (%s) ]' % (basic_filter_expr, supine_filter_expr)).find(node))
# _val = lambda s : list(s)[0]
def quote_introduction_filter(node : Tree) -> bool:
"""False if verb is introducing a quote, otherwise true """
if not node.sdata('misc.FullLemma'):
raise Exception('Node %s (%s) does not have misc.FullLemma' % (node.sdata('form'), node.sdata('id')))
if node.sdata('misc.FullLemma') not in relatare_parataxa:
return True
if node.data('deprel') == 'parataxis': return False
parataxis = Search('/[deprel=parataxis]').find(node)
if not parataxis: return True
# is it a line heading, like (e), 3., or a parenthesis?
parataxis = parataxis[0].node
if parataxis.data('misc.FullLemma') and parataxis.sdata('misc.FullLemma') in relatare_parataxa:
return True
proj = parataxis.projection_nodes()
if proj[0].data('xpos') in ['COLON', 'QUOT', 'DBLQ']:
return False
# 1), 1., (e), etc
if proj[0].data('upos') in ['PUNCT', 'NUM'] and proj[-1].data('xpos') in ('RPAR', 'PERIOD') or \
proj[0].data('id') == '1' and proj[-1].data('xpos') in ('RPAR', 'PERIOD'):
return True
return False
def basic_next_word_filter(node : Tree) -> bool:
"""Check if next word is an infinitive or the particle _să_.
False if this is the case, True otherwise"""
proj = node.projection_nodes()
if proj[-1] is node: return True # can't tell
next = proj[proj.index(node)+1]
if next.data('form') == 'să' or \
Search('.[(upos=VERB & misc.Mood=Inf) | (upos=AUX & feats.VerbForm=Inf)]').find(next) \
or Search('.[lemma=a upos=PART]').find(next):
return False
return True
| serban-hartular/UD_Search | valences/verb_lemma.py | verb_lemma.py | py | 4,661 | python | en | code | 0 | github-code | 90 |
18509983979 | s = input()
if s[0] == 'A':
t = ''
c_cnt = 0
for i in range(1, len(s)):
if s[i] != 'C':
t += s[i]
else:
c_cnt += 1
if t.islower() and 'C' in s[2:-1] and c_cnt == 1:
print('AC')
else:
print('WA')
else:
print('WA')
| Aasthaengg/IBMdataset | Python_codes/p03289/s376928585.py | s376928585.py | py | 253 | python | en | code | 0 | github-code | 90 |
30786998634 | """
FILE IO
"""
from translate import Translator
translator = Translator(to_lang='fr')
try:
with open('translate.txt', mode='r') as my_file:
# mode='r+' to read and write
# mode='w' to write and create a new folder if it doens't exist with specific name
# mode='a' to read and append
# print(my_file.read()) # cu read citim textul
# my_file.seek(0) # muta cursorul de citit de la 0 pentru ca read va muta cursorul mereu
# print(my_file.read())
# print(my_file.readlines()) # sau punem pentru a citi toate liniile
# my_file.close() nu mai e nevoie sa dai close daca pui with
text = my_file.read()
translation = translator.translate(text)
with open('/test-fr.txt', mode='w') as file_fr:
file_fr.write(translation)
except FileNotFoundError as err:
print(f'File not exists!')
raise err
| AntonioIonica/Automation_testing | exercices_todo/text_test.py | text_test.py | py | 898 | python | en | code | 0 | github-code | 90 |
6859034302 | #https://www.acmicpc.net/problem/1931
times=[]
for i in range(0,int(input(""))):
times.append([int(i) for i in input('').split()])
s=len(times)
for i in range(0,s):
for j in range(i+1,s):
if times[i][1]>times[j][1]:
t=times[i]
times[i]=times[j]
times[j]=t
b=0
n=1
for i in range(0,len(times)):
if times[b][1]<=times[i][0]:
b=i
n+=1
print(n)
| dltbwoddl/Algorithmuslearning | 그리디 알고리즘/회의실배정.py | 회의실배정.py | py | 431 | python | en | code | 0 | github-code | 90 |
13090400547 | import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import train_test_split
from sklearn.metrics import mean_squared_error
# Generar datos de ejemplo
X = np.random.randn(100, 1) # 100 ejemplos con 1 variable independiente
y = 2 * X[:, 0] + 3 + np.random.randn(100) # Relacion lineal con ruido
# Dividir los datos en conjuntos de entrenamiento y prueba
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.2)
# Crear un modelo de regresion lineal
model = LinearRegression()
# Ajustar el modelo a los datos de entrenamiento
model.fit(X_train, y_train)
# Realizar predicciones
y_pred = model.predict(X_test)
# Calcular el error cuadratico medio
mse = mean_squared_error(y_test, y_pred)
print("Coeficientes: ", model.coef_)
print("Intercepto: ", model.intercept_)
print("Error cuadratico medio: ", mse)
# Visualizar datos y ajuste lineal
# font = {'family': 'serif',
# # 'serif': ['CMR10'],
# 'serif': ['Latin Modern Roman'],
# 'weight': 'normal',
# 'size': 14}
# plt.rc('font', **font)
plt.scatter(X_test, y_test, label='Datos')
plt.plot(X_test, y_pred, color='red', label='Ajuste lineal')
plt.xlabel('Variable independiente ($X$)')
plt.ylabel('Variable dependiente ($y$)')
plt.legend()
plt.show()
| JJSirius/MLLab | Lab02/regresion.py | regresion.py | py | 1,332 | python | es | code | 1 | github-code | 90 |
36886514536 | import requests
import pandas
import scipy
import numpy
import sys
import pandas as pd
TRAIN_DATA_URL = "https://storage.googleapis.com/kubric-hiring/linreg_train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/kubric-hiring/linreg_test.csv"
def transform(dat):
dat = dat.T.reset_index()
dat.columns = ['area','price']
dat = dat.iloc[1:]
dat['area'] = pd.to_numeric(dat['area'], downcast='float')
dat['price'] = pd.to_numeric(dat['price'], downcast='float')
return dat
def get_data():
TRAIN_DATA_URL = "https://storage.googleapis.com/kubric-hiring/linreg_train.csv"
TEST_DATA_URL = "https://storage.googleapis.com/kubric-hiring/linreg_test.csv"
train = transform(pd.read_csv(TRAIN_DATA_URL))
test = transform(pd.read_csv(TEST_DATA_URL))
train_x = train['area'].values
train_y = train['price'].values
test_x = test['area'].values
test_y = test['price'].values
return train_x, test_x, train_y, test_y
def predict_price(area) -> float:
train_x, test_x, train_y, test_y = get_data()
# YOUR IMPLEMENTATION HERE
pol = numpy.polyfit(train_x,train_y,1)
poly = numpy.poly1d(pol)
# print(poly)
preds = poly(area)
return preds
if __name__ == "__main__":
# DO NOT CHANGE THE FOLLOWING CODE
from data import validation_data
areas = numpy.array(list(validation_data.keys()))
prices = numpy.array(list(validation_data.values()))
predicted_prices = predict_price(areas)
rmse = numpy.sqrt(numpy.mean((predicted_prices - prices) ** 2))
try:
assert rmse < 170
except AssertionError:
print(f"Root mean squared error is too high - {rmse}. Expected it to be under 170")
sys.exit(1)
print(f"Success. RMSE = {rmse}")
| Piperidine/Anvay_Varerkar_Kubric | regression.py | regression.py | py | 1,816 | python | en | code | 0 | github-code | 90 |
74744099817 | import gc
from collections import defaultdict
from functools import partial
from time import time
from typing import Dict, NamedTuple, Generator, Optional, Iterator, Tuple, Union
import bpy
from sverchok.data_structure import post_load_call
from sverchok.core.events import TreeEvent, GroupEvent
from sverchok.utils.logging import debug, catch_log_error, log_error
from sverchok.utils.tree_structure import Tree, Node
from sverchok.utils.handle_blender_data import BlTrees, BlTree
from sverchok.utils.profile import profile
class TreeHandler:
@staticmethod
def send(event: TreeEvent):
# this should be first other wise other instructions can spoil the node statistic to redraw
if NodesUpdater.is_running():
if event.cancel:
NodesUpdater.cancel_task()
else:
return # ignore the event
# frame update
# This event can't be handled via NodesUpdater during animation rendering because new frame change event
# can arrive before timer finishes its tusk. Or timer can start working before frame change is handled.
if event.type == TreeEvent.FRAME_CHANGE:
ContextTrees.mark_nodes_outdated(event.tree, event.updated_nodes)
list(global_updater(event.type))
return
# mark given nodes as outdated
elif event.type == TreeEvent.NODES_UPDATE:
ContextTrees.mark_nodes_outdated(event.tree, event.updated_nodes)
# it will find changes in tree topology and mark related nodes as outdated
elif event.type == TreeEvent.TREE_UPDATE:
ContextTrees.mark_tree_outdated(event.tree)
# force update
elif event.type == TreeEvent.FORCE_UPDATE:
ContextTrees.reset_data(event.tree)
event.tree['FORCE_UPDATE'] = True
# Unknown event
else:
raise TypeError(f'Detected unknown event - {event}')
# Add update tusk for the tree
NodesUpdater.add_task(event)
@staticmethod
def get_error_nodes(bl_tree) -> Iterator[Optional[Exception]]:
"""Return map of bool values to group tree nodes where node has error if value is True"""
for node in bl_tree.nodes:
yield NodesStatuses.get(node).error
@staticmethod
def get_update_time(bl_tree) -> Iterator[Optional[float]]:
for node in bl_tree.nodes:
yield NodesStatuses.get(node).update_time
@staticmethod
def get_cum_time(bl_tree) -> Iterator[Optional[float]]:
cum_time_nodes = ContextTrees.calc_cam_update_time(bl_tree)
for node in bl_tree.nodes:
yield cum_time_nodes.get(node)
def tree_event_loop(delay):
"""Sverchok event handler"""
with catch_log_error():
if NodesUpdater.is_running():
NodesUpdater.run_task()
elif NodesUpdater.has_task(): # task should be run via timer only https://developer.blender.org/T82318#1053877
NodesUpdater.start_task()
NodesUpdater.run_task()
return delay
tree_event_loop = partial(tree_event_loop, 0.01)
class NodesUpdater:
"""It can update only one tree at a time"""
_event: Union[TreeEvent, GroupEvent] = None
_handler: Optional[Generator] = None
_node_tree_area: Optional[bpy.types.Area] = None
_last_node: Optional[Node] = None
_start_time: float = None
@classmethod
def add_task(cls, event: Union[TreeEvent, GroupEvent]):
"""It can handle only one tree at a time"""
if cls.is_running():
raise RuntimeError(f"Can't update tree: {event.tree.name}, already updating tree: {cls._event.tree.name}")
cls._event = event
@classmethod
def start_task(cls):
changed_tree = cls._event.tree
if cls.is_running():
raise RuntimeError(f'Tree "{changed_tree.name}" already is being updated')
cls._handler = global_updater(cls._event.type)
# searching appropriate area index for reporting update progress
for area in bpy.context.screen.areas:
if area.ui_type == 'SverchCustomTreeType':
path = area.spaces[0].path
if path and path[-1].node_tree.name == changed_tree.name:
cls._node_tree_area = area
break
gc.disable()
cls._start_time = time()
@classmethod
@profile(section="UPDATE")
def run_task(cls):
try:
# handle un-cancellable events
if cls._event.type == TreeEvent.FRAME_CHANGE:
while True:
next(cls._handler)
# handler cancellable events
else:
if cls._last_node:
cls._last_node.bl_tween.set_temp_color()
start_time = time()
while (time() - start_time) < 0.15: # 0.15 is max timer frequency
node = next(cls._handler)
cls._last_node = node
node.bl_tween.set_temp_color((0.7, 1.000000, 0.7))
cls._report_progress(f'Pres "ESC" to abort, updating node "{node.name}"')
except StopIteration:
cls.finish_task()
@classmethod
def debug_run_task(cls):
"""Color updated nodes for a few second after all"""
try:
start_time = time()
while (time() - start_time) < 0.15: # 0.15 is max timer frequency
node = next(cls._handler)
node.bl_tween.use_custom_color = True
node.bl_tween.color = (0.7, 1.000000, 0.7)
cls._last_node = node
cls._report_progress(f'Pres "ESC" to abort, updating node "{node.name}"')
except StopIteration:
if 'node' in vars():
return
from time import sleep
sleep(2)
cls.finish_task()
@classmethod
def cancel_task(cls):
try:
cls._handler.throw(CancelError)
except (StopIteration, RuntimeError):
pass
finally: # protection from the task to be stack forever
cls.finish_task()
@classmethod
def finish_task(cls):
try:
gc.enable()
debug(f'Global update - {int((time() - cls._start_time) * 1000)}ms')
cls._report_progress()
finally:
cls._event, cls._handler, cls._node_tree_area, cls._last_node, cls._start_time = [None] * 5
@classmethod
def has_task(cls) -> bool:
return cls._event is not None
@classmethod
def is_running(cls) -> bool:
return cls._handler is not None
@classmethod
def _report_progress(cls, text: str = None):
if cls._node_tree_area:
cls._node_tree_area.header_text_set(text)
def global_updater(event_type: str) -> Generator[Node, None, None]:
"""Find all Sverchok main trees and run their handlers and update their UI if necessary
update_ui of group trees will be called only if they opened in one of tree editors
update_ui of main trees will be called if they are opened or was changed during the update event"""
# grab trees from active node group editors
trees_ui_to_update = set()
if bpy.context.screen: # during animation rendering can be None
for area in bpy.context.screen.areas:
if area.ui_type == BlTrees.MAIN_TREE_ID:
if area.spaces[0].path: # filter editors without active tree
trees_ui_to_update.add(area.spaces[0].path[-1].node_tree)
for bl_tree in BlTrees().sv_main_trees:
was_changed = False
# update only trees which should be animated (for performance improvement in case of many trees)
if event_type == TreeEvent.FRAME_CHANGE:
if bl_tree.sv_animate:
was_changed = yield from tree_updater(bl_tree)
# tree should be updated any way
elif event_type == TreeEvent.FORCE_UPDATE and 'FORCE_UPDATE' in bl_tree:
del bl_tree['FORCE_UPDATE']
was_changed = yield from tree_updater(bl_tree)
# this seems the event upon some changes in the tree, skip tree if the property is switched off
else:
if bl_tree.sv_process:
was_changed = yield from tree_updater(bl_tree)
# it has sense to call this here if you press update all button or creating group tree from selected
if was_changed:
bl_tree.update_ui() # this only will update UI of main trees
trees_ui_to_update.discard(bl_tree) # protection from double updating
# this will update all opened trees (in group editors)
# regardless whether the trees was changed or not, including group nodes
for bl_tree in trees_ui_to_update:
args = [bl_tree.get_update_path()] if BlTree(bl_tree).is_group_tree else []
bl_tree.update_ui(*args)
def tree_updater(bl_tree) -> Generator[Node, None, bool]:
tree = ContextTrees.get(bl_tree)
tree_output_changed = False
for node in tree.sorted_walk(tree.output_nodes):
can_be_updated = all(n.is_updated for n in node.last_nodes)
if not can_be_updated:
# here different logic can be implemented but for this we have to know if is there any output of the node
# we could leave the node as updated and don't broke work of the rest forward nodes
# but if the node does not have any output all next nodes will gen NoDataError what is horrible
node.is_updated = False
node.is_output_changed = False
continue
if hasattr(node.bl_tween, 'updater'):
updater = group_node_updater(node)
elif hasattr(node.bl_tween, 'process'):
updater = node_updater(node)
else:
updater = empty_updater(node, error=None)
# update node with sub update system, catch statistic
start_time = time()
node_error = yield from updater
update_time = (time() - start_time)
if node.is_output_changed or node_error:
stat = NodeStatistic(node_error, None if node_error else update_time)
NodesStatuses.set(node.bl_tween, stat)
tree_output_changed = True
return tree_output_changed
class ContextTrees:
"""It keeps trees with their states"""
_trees: Dict[str, Tree] = dict()
@classmethod
def get(cls, bl_tree):
"""Return caught tree or new if the tree was not build yet"""
tree = cls._trees.get(bl_tree.tree_id)
# new tree, all nodes are outdated
if tree is None:
tree = Tree(bl_tree)
cls._trees[bl_tree.tree_id] = tree
# topology of the tree was changed and should be updated
elif not tree.is_updated:
tree = cls._update_tree(bl_tree)
cls._trees[bl_tree.tree_id] = tree
return tree
@classmethod
def _update_tree(cls, bl_tree):
"""
This method will generate new tree, copy is_updates status from previous tree
and update 'is_input_changed' node attribute according topological changes relatively previous call
Two reasons why always new tree is generated - it's simpler and new tree keeps fresh references to the nodes
"""
new_tree = Tree(bl_tree)
# copy is_updated attribute
if new_tree.id in cls._trees:
old_tree = cls._trees[new_tree.id]
for node in new_tree.nodes:
if node.name in old_tree.nodes:
node.is_updated = old_tree.nodes[node.name].is_updated
# update is_input_changed attribute
cls._update_topology_status(new_tree)
return new_tree
@classmethod
def mark_tree_outdated(cls, bl_tree):
"""Whenever topology of a tree is changed this method should be called."""
tree = cls._trees.get(bl_tree.tree_id)
if tree:
tree.is_updated = False
@classmethod
def mark_nodes_outdated(cls, bl_tree, bl_nodes):
"""It will try to mark given nodes as to be recalculated.
If node won't be found status of the tree will be changed to outdated"""
if bl_tree.tree_id not in cls._trees:
return # all nodes will be outdated either way when the tree will be recreated (nothing to do)
tree = cls._trees[bl_tree.tree_id]
for bl_node in bl_nodes:
try:
tree.nodes[bl_node.name].is_updated = False
# it means that generated tree does no have given node and should be recreated by next request
except KeyError:
tree.is_updated = False
@classmethod
def reset_data(cls, bl_tree=None):
"""
Should be called upon loading new file, other wise it can lead to errors and even crash
Also according the fact that trees have links to real blender nodes
it is also important to call this method upon undo method otherwise errors and crashes
Also single tree can be added, in this case only it will be deleted
(it's going to be used in force update)
"""
if bl_tree and bl_tree.tree_id in cls._trees:
del cls._trees[bl_tree.tree_id]
else:
cls._trees.clear()
@classmethod
def calc_cam_update_time(cls, bl_tree) -> dict:
cum_time_nodes = dict()
if bl_tree.tree_id not in cls._trees:
return cum_time_nodes
tree = cls._trees[bl_tree.tree_id]
for node in tree.sorted_walk(tree.output_nodes):
update_time = NodesStatuses.get(node.bl_tween).update_time
if update_time is None: # error node?
cum_time_nodes[node.bl_tween] = None
continue
if len(node.last_nodes) > 1:
cum_time = sum(NodesStatuses.get(n.bl_tween).update_time for n in tree.sorted_walk([node])
if NodesStatuses.get(n.bl_tween).update_time is not None)
else:
cum_time = sum(cum_time_nodes.get(n.bl_tween, 0) for n in node.last_nodes) + update_time
cum_time_nodes[node.bl_tween] = cum_time
return cum_time_nodes
@classmethod
def _update_topology_status(cls, new_tree: Tree):
"""Copy link node status by comparing with previous tree and save current"""
if new_tree.id in cls._trees:
old_tree = cls._trees[new_tree.id]
new_links = new_tree.links - old_tree.links
for link in new_links:
if link.from_node.name in old_tree.nodes:
from_old_node = old_tree.nodes[link.from_node.name]
from_old_socket = from_old_node.get_output_socket(link.from_socket.identifier)
has_old_from_socket_links = from_old_socket.links if from_old_socket is not None else False
else:
has_old_from_socket_links = False
# this is only because some nodes calculated data only if certain output socket is connected
# ideally we would not like to make previous node outdated, but it requires changes in many nodes
if not has_old_from_socket_links:
link.from_node.is_input_changed = True
else:
link.to_node.is_input_changed = True
removed_links = old_tree.links - new_tree.links
for link in removed_links:
if link.to_node in new_tree.nodes:
new_tree.nodes[link.to_node.name].is_input_changed = True
class NodeStatistic(NamedTuple):
"""
Statistic should be kept separately for each node
because each node can have 10 or even 100 of different statistic profiles according number of group nodes using it
"""
error: Optional[Exception] = None
update_time: float = None # sec
class NodesStatuses:
"""It keeps node attributes"""
NodeId = str
_statuses: Dict[NodeId, NodeStatistic] = defaultdict(NodeStatistic)
@classmethod
def get(cls, bl_node) -> NodeStatistic:
return cls._statuses[bl_node.node_id]
@classmethod
def set(cls, bl_node, stat: NodeStatistic):
node_id = bl_node.node_id
cls._statuses[node_id] = stat
@classmethod
def reset_data(cls):
"""This method should be called before opening new file to free all statistic data"""
cls._statuses.clear()
class CancelError(Exception):
"""Aborting tree evaluation by user"""
def node_updater(node: Node, *args) -> Generator[Node, None, Optional[Exception]]:
"""The node should has process method, all previous nodes should be updated"""
node_error = None
previous_nodes_are_changed = any(n.is_output_changed for n in node.last_nodes)
should_be_updated = not node.is_updated or node.is_input_changed or previous_nodes_are_changed
node.is_output_changed = False # it should always False unless the process method was called
node.is_input_changed = False # if node wont be able to handle new input it will be seen in its update status
if should_be_updated:
try:
yield node
node.bl_tween.process(*args)
node.is_updated = True
node.is_output_changed = True
except CancelError as e:
node.is_updated = False
node_error = e
except Exception as e:
node.is_updated = False
log_error(e)
node_error = e
return node_error
def group_node_updater(node: Node) -> Generator[Node, None, Tuple[bool, Optional[Exception]]]:
"""The node should have updater attribute"""
previous_nodes_are_changed = any(n.is_output_changed for n in node.last_nodes)
should_be_updated = (not node.is_updated or node.is_input_changed or previous_nodes_are_changed)
yield node # yield groups node so it be colored by node Updater if necessary
updater = node.bl_tween.updater(is_input_changed=should_be_updated)
is_output_changed, out_error = yield from updater
node.is_input_changed = False
node.is_updated = not out_error
node.is_output_changed = is_output_changed
return out_error
def empty_updater(node: Node = None, **kwargs):
"""Reroutes, frame nodes, empty updaters which do nothing, set node in correct state
returns given kwargs (only their values) like error=None, is_updated=True"""
if node: # ideally we would like always get first argument as node but group updater does not posses it
previous_nodes_are_changed = any(n.is_output_changed for n in node.last_nodes)
should_be_updated = not node.is_updated or node.is_input_changed or previous_nodes_are_changed
node.is_input_changed = False # if node wont be able to handle new input it will be seen in its update status
node.is_updated = True
node.is_output_changed = True if should_be_updated else False
return tuple(kwargs.values()) if len(kwargs) > 1 else next(iter(kwargs.values()))
yield
@post_load_call
def post_load_register():
# when new file is loaded all timers are unregistered
# to make them persistent the post load handler should be used
# but it's also is possible that the timer was registered during registration of the add-on
if not bpy.app.timers.is_registered(tree_event_loop):
bpy.app.timers.register(tree_event_loop)
def register():
"""Registration of Sverchok event handler"""
# it appeared that the timers can be registered during the add-on initialization
# The timer should be registered here because post_load_register won't be called when an add-on is enabled by user
bpy.app.timers.register(tree_event_loop)
def unregister():
bpy.app.timers.unregister(tree_event_loop)
| thatboyjake/.config | blender/3.0/scripts/addons/sverchok-master/core/main_tree_handler.py | main_tree_handler.py | py | 19,812 | python | en | code | 0 | github-code | 90 |
70202253097 | import numpy as np
from config import GlobalConfig
from UTIL.colorful import *
from UTIL.tensor_ops import my_view, __hash__, repeat_at, gather_righthand
from MISSION.uhmap.actset_lookup import encode_action_as_digits
from .foundation import AlgorithmConfig
from .cython_func import roll_hisory
from .hete_assignment import select_nets_for_shellenv
class ShellEnvConfig:
add_avail_act = False
class ActionConvertLegacy():
def __init__(self, SELF_TEAM_ASSUME, OPP_TEAM_ASSUME, OPP_NUM_ASSUME) -> None:
self.SELF_TEAM_ASSUME = SELF_TEAM_ASSUME
self.OPP_TEAM_ASSUME = OPP_TEAM_ASSUME
self.OPP_NUM_ASSUME = OPP_NUM_ASSUME
# (main_cmd, sub_cmd, x=None, y=None, z=None, UID=None, T=None, T_index=None)
self.dictionary_args = [
('N/A', 'N/A', None, None, None, None, None, None), # 0
('Idle', 'DynamicGuard', None, None, None, None, None, None), # 1
('Idle', 'StaticAlert', None, None, None, None, None, None), # 2
('Idle', 'AsFarAsPossible', None, None, None, None, None, None), # 4
('Idle', 'StayWhenTargetInRange', None, None, None, None, None, None), # 5
('SpecificMoving', 'Dir+X', None, None, None, None, None, None), # 7
('SpecificMoving', 'Dir+Y', None, None, None, None, None, None), # 8
('SpecificMoving', 'Dir-X', None, None, None, None, None, None), # 9
('SpecificMoving', 'Dir-Y', None, None, None, None, None, None), # 10
]
for i in range(self.OPP_NUM_ASSUME):
self.dictionary_args.append( ('SpecificAttacking', 'N/A', None, None, None, None, OPP_TEAM_ASSUME, i) )
def convert_act_arr(self, type, a):
if type == 'RLA_UAV_Support':
args = self.dictionary_args[a]
# override wrong actions
if args[0] == 'SpecificAttacking':
return encode_action_as_digits('N/A', 'N/A', None, None, None, None, None, None)
# override incorrect actions
if args[0] == 'Idle':
return encode_action_as_digits('Idle', 'StaticAlert', None, None, None, None, None, None)
return encode_action_as_digits(*args)
else:
return encode_action_as_digits(*self.dictionary_args[a])
def get_tp_avail_act(self, type):
DISABLE = 0
ENABLE = 1
n_act = len(self.dictionary_args)
ret = np.zeros(n_act) + ENABLE
for i in range(n_act):
args = self.dictionary_args[i]
# for all kind of agents
if args[0] == 'PatrolMoving': ret[i] = DISABLE
if type == 'RLA_UAV_Support':
if args[0] == 'PatrolMoving': ret[i] = DISABLE
if args[0] == 'SpecificAttacking': ret[i] = DISABLE
if args[0] == 'Idle': ret[i] = DISABLE
if args[1] == 'StaticAlert': ret[i] = ENABLE
return ret
def confirm_parameters_are_correct(self, team, agent_num, opp_agent_num):
assert team == self.SELF_TEAM_ASSUME
assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1
assert self.SELF_TEAM_ASSUME + self.OPP_TEAM_ASSUME == 1
assert opp_agent_num == self.OPP_NUM_ASSUME
def count_list_type(x):
type_cnt = {}
for xx in x:
if xx not in type_cnt: type_cnt[xx] = 0
type_cnt[xx] += 1
return len(type_cnt)
class ShellEnvWrapper(object):
def __init__(self, n_agent, n_thread, space, mcv, rl_functional, alg_config, ScenarioConfig, team):
self.n_agent = n_agent
self.n_thread = n_thread
self.team = team
self.space = space
self.mcv = mcv
self.rl_functional = rl_functional
if GlobalConfig.ScenarioConfig.EntityOriented:
self.core_dim = GlobalConfig.ScenarioConfig.obs_vec_length
else:
self.core_dim = space['obs_space']['obs_shape']
self.n_entity_placeholder = alg_config.n_entity_placeholder
# whether to use avail_act to block forbiden actions
self.AvailActProvided = False
if hasattr(ScenarioConfig, 'AvailActProvided'):
self.AvailActProvided = ScenarioConfig.AvailActProvided
self.action_converter = ActionConvertLegacy(
SELF_TEAM_ASSUME=team,
OPP_TEAM_ASSUME=(1-team),
OPP_NUM_ASSUME=GlobalConfig.ScenarioConfig.N_AGENT_EACH_TEAM[1-team]
)
# heterogeneous agent types
agent_type_list = [a['type'] for a in GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list]
opp_type_list = [a['type'] for a in GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list if a['team']!=self.team]
self_type_list = [a['type'] for a in GlobalConfig.ScenarioConfig.SubTaskConfig.agent_list if a['team']==self.team]
def str_array_to_num(str_arr):
out_arr = []
buffer = {}
for str in str_arr:
if str not in buffer:
buffer[str] = len(buffer)
out_arr.append(buffer[str])
return out_arr
self.HeteAgentType = str_array_to_num(agent_type_list)
self.hete_type = np.array(self.HeteAgentType)[GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[team]]
self.n_hete_types = count_list_type(self.hete_type)
# check parameters
assert self.n_agent == len(self_type_list)
self.action_converter.confirm_parameters_are_correct(team, self.n_agent, len(opp_type_list))
self.patience = 2000
self.epsiode_cnt = 0
def cold_start_warmup(self, StateRecall):
self.agent_uid = GlobalConfig.ScenarioConfig.AGENT_ID_EACH_TEAM[self.team]
self.agent_type = [agent_meta['type']
for agent_meta in StateRecall['Latest-Team-Info'][0]['dataArr']
if agent_meta['uId'] in self.agent_uid]
if ShellEnvConfig.add_avail_act:
self.avail_act = np.stack(tuple(self.action_converter.get_tp_avail_act(tp) for tp in self.agent_type))
self.avail_act = repeat_at(self.avail_act, insert_dim=0, n_times=self.n_thread)
def interact_with_env(self, StateRecall):
# warm up at first execution
if not hasattr(self, 'agent_type'):
self.cold_start_warmup(StateRecall)
# action init to: -1
act = np.zeros(shape=(self.n_thread, self.n_agent), dtype=np.int) - 1
# read and reshape observation
obs = StateRecall['Latest-Obs']
obs = my_view(obs,[0, 0, -1, self.core_dim])
# mask out invalid observation with NaN
obs[(obs==0).all(-1)] = np.nan
# stopped env mask
P = StateRecall['ENV-PAUSE']
# running env mask
R = ~P
# reset env mask
RST = StateRecall['Env-Suffered-Reset']
# when needed, train!
if not StateRecall['Test-Flag']: self.rl_functional.train()
# if true: just experienced full reset on all episode, this is the first step of all env threads
if RST.all():
if AlgorithmConfig.allow_fast_test and GlobalConfig.test_only and (self.epsiode_cnt > GlobalConfig.report_reward_interval):
import sys
sys.exit(0)
self.epsiode_cnt += self.n_thread
# policy resonance
eprsn_yita = self.rl_functional.stage_planner.yita if AlgorithmConfig.policy_resonance else 0
EpRsn = np.random.rand(self.n_thread) < eprsn_yita
StateRecall['_EpRsn_'] = EpRsn
# heterogeneous agent identification
StateRecall['_hete_type_'] = repeat_at(self.hete_type, 0, self.n_thread)
# select static/frontier actor network
StateRecall['_hete_pick_'], StateRecall['_gp_pick_'] = select_nets_for_shellenv(
n_types=self.n_hete_types,
policy=self.rl_functional.policy,
hete_type_list=self.hete_type,
n_thread = self.n_thread,
n_gp=AlgorithmConfig.hete_n_net_placeholder,
testing=StateRecall['Test-Flag']
)
print([(t['win_rate'], t['ckpg_cnt']) for t in self.rl_functional.policy.ckpg_info])
# prepare observation for the real RL algorithm
I_StateRecall = {
'obs':obs[R],
'avail_act':self.avail_act[R],
'Test-Flag':StateRecall['Test-Flag'],
'_EpRsn_':StateRecall['_EpRsn_'][R],
'_hete_pick_':StateRecall['_hete_pick_'][R],
'_hete_type_':StateRecall['_hete_type_'][R],
'_gp_pick_':StateRecall['_gp_pick_'][R],
'threads_active_flag':R,
'Latest-Team-Info':StateRecall['Latest-Team-Info'][R],
}
# load available act to limit action space if possible
if self.AvailActProvided:
avail_act = np.array([info['avail-act'] for info in np.array(StateRecall['Latest-Team-Info'][R], dtype=object)])
I_StateRecall.update({'avail_act':avail_act})
# the real RL algorithm ! !
act_active, internal_recall = self.rl_functional.interact_with_env_genuine(I_StateRecall)
# get decision results
act[R] = act_active
# confirm actions are valid (satisfy 'avail-act')
if ShellEnvConfig.add_avail_act and self.patience>0:
self.patience -= 1
assert (gather_righthand(self.avail_act, repeat_at(act, -1, 1), check=False)[R]==1).all()
# translate action into ue4 tuple action
act_converted = np.array([[ self.action_converter.convert_act_arr(self.agent_type[agentid], act) for agentid, act in enumerate(th) ] for th in act])
# swap thread(batch) axis and agent axis
actions_list = np.swapaxes(act_converted, 0, 1)
# register callback hook
if not StateRecall['Test-Flag']:
StateRecall['_hook_'] = internal_recall['_hook_']
assert StateRecall['_hook_'] is not None
else:
if AlgorithmConfig.policy_matrix_testing:
StateRecall['_hook_'] = internal_recall['_hook_']
assert StateRecall['_hook_'] is not None
# all done
return actions_list, StateRecall
| binary-husky/unreal-map | PythonExample/hmp_minimal_modules/ALGORITHM/hete_league_onenet_fix/shell_env.py | shell_env.py | py | 10,724 | python | en | code | 145 | github-code | 90 |
18459888109 | import sys
sys.setrecursionlimit(10 ** 6)
def dfs(x, y, matrix, visited, isWhite):
global black, white
dxy = [[1, 0], [-1, 0], [0, -1], [0, 1]]
visited[y][x] = True
w = len(matrix[0])
h = len(matrix)
for dx, dy in dxy:
nx = x + dx
ny = y + dy
if nx < 0 or w <= nx or ny < 0 or h <= ny:
continue
if visited[ny][nx] is True:
continue
if (matrix[ny][nx] == "#" and isWhite is False) or (
matrix[ny][nx] == "." and isWhite is True
):
continue
if matrix[ny][nx] == "#":
black += 1
else:
white += 1
isWhite = not isWhite
dfs(nx, ny, matrix, visited, isWhite)
isWhite = not isWhite
return
h, w = map(int, input().split())
matrix = [""] * h
for i in range(h):
matrix[i] = input()
ans = 0
visited = [[False] * w for _ in range(h)]
for i in range(h):
for j in range(w):
if matrix[i][j] == "#" and visited[i][j] is False:
black = 1
white = 0
dfs(j, i, matrix, visited, False)
ans += black * white
print(ans)
| Aasthaengg/IBMdataset | Python_codes/p03157/s241642158.py | s241642158.py | py | 1,164 | python | en | code | 0 | github-code | 90 |
35349566743 | from unittest import TestCase, main
from project.hardware.hardware import Hardware
from project.software.express_software import ExpressSoftware
class TestHardware(TestCase):
def setUp(self) -> None:
self.test_hardware = Hardware("test", "Power", 100, 200)
def test_initialization(self):
self.assertEqual("test", self.test_hardware.name)
self.assertEqual("Power", self.test_hardware.type)
self.assertEqual(100, self.test_hardware.capacity)
self.assertEqual(200, self.test_hardware.memory)
self.assertListEqual([], self.test_hardware.software_components)
def test_install_invalid_software_memory(self):
test_software = ExpressSoftware("test", 75, 150)
with self.assertRaises(Exception) as context:
self.test_hardware.install(test_software)
self.assertEqual("Software cannot be installed", str(context.exception))
def test_install_invalid_software_capacity(self):
test_software = ExpressSoftware("test", 150, 25)
with self.assertRaises(Exception) as context:
self.test_hardware.install(test_software)
self.assertEqual("Software cannot be installed", str(context.exception))
def test_install_software(self):
test_software = ExpressSoftware("test", 75, 25)
self.test_hardware.install(test_software)
self.assertListEqual([test_software], self.test_hardware.software_components)
self.assertEqual(test_software.memory_consumption, self.test_hardware.used_memory)
self.assertEqual(test_software.capacity_consumption, self.test_hardware.used_capacity)
def test_uninstall_software(self):
test_software = ExpressSoftware("test", 75, 25)
self.test_hardware.install(test_software)
self.assertListEqual([test_software], self.test_hardware.software_components)
self.test_hardware.uninstall(test_software)
self.assertListEqual([], self.test_hardware.software_components)
if __name__ == "__main__":
main()
| yetoshimo/python-oop | JC_20200816_Exam/tests/test_hardware.py | test_hardware.py | py | 2,028 | python | en | code | 0 | github-code | 90 |
35645770866 | class Solution(object):
def subsetsWithDup(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
nums.sort()
res = []
self.dfs(0,nums,[],res)
return res
def dfs(self,level,nums,tmp,res):
res.append(tmp[:])
for i in range(level,len(nums)):
if i > level and nums[i] == nums[i-1]:
continue
tmp.append(nums[i])
self.dfs(i+1,nums,tmp,res)
tmp.pop()
| cloi1994/session1 | Facebook/90.py | 90.py | py | 571 | python | en | code | 0 | github-code | 90 |
29542752585 | from django.urls import path
from . import views
from django.contrib.auth import views as auth_views
urlpatterns = [
path('',views.IndexView.as_view(),name="shop"),
path('products/', views.ProductListView.as_view(),name="productList"),
path('caregories/<int:idc>/products/', views.ProductsByCategoryView.as_view(),name="productByCetgorie"),
path('register/', views.Register.as_view(), name='register'),
path('login/', views.User_login.as_view(), name='login'),
path('logout/', views.user_logout, name='logout'),
path('cart/add/<int:product_id>/', views.cart_add, name='cart_add'),
path('cart/', views.cart_detail,name='cart_detail'),
path('cart/remove/<int:product_id>/', views.cart_remove,name='cart_remove'),
path('cart/update/<int:product_id>/', views.cart_update, name='cart_update'),
path('create/', views.order_create, name='order_create'),
path('rechercher/', views.rechercher, name='rechercher'),
path('mescommandes/<int:user_id>/', views.order_history, name='history'),
]
| leriaetnasta/E-commerce-web-app | backend/shop/urls.py | urls.py | py | 1,063 | python | en | code | 0 | github-code | 90 |
5830171249 | OPERAND_SIGIL = '*'
PSEUDO_OPERAND_FLAG = ':'
UNARY_OP_SIGIL = '='
BINARY_OP_SIGIL = '\\'
SIMULTANEOUS_OP_FLAG = '+'
MODIFIER_SIGIL = ','
ANNOTATION_SIGIL = ';'
ALPHA_CHARS = "A-Za-z"
NZ_DIGIT_CHARS = "1-9"
DIGIT_CHARS = "0" + NZ_DIGIT_CHARS
PHRASE_CHARS = "-' \"" + ALPHA_CHARS
STRING_CHARS = PHRASE_CHARS + DIGIT_CHARS + "[]#."
PARTITION_OP_SIGIL = '<'
PARTITION_A_PORTION_FLAG = '%'
PARTITION_B_PORTION_FLAG = '_'
STORED_OPERAND_SIGIL = '>'
MIXED_NUM_SEPARATOR = '_'
FRACTION_SEPARATOR = '/'
DECIMAL_SEPARATOR = '.'
AMOUNT_SIGIL = '@'
QUANTITY_RANGE_SEPARATOR = '~'
QUANTITY_RANGE_APPROX_FLAG = '~' | linclelinkpart5/cheffu-old | cheffu/constants.py | constants.py | py | 958 | python | en | code | 1 | github-code | 90 |
74809259177 |
def inMap(str):
l = ['i', 'like', 'sam', 'sung', 'samsung' , 'mobile' , 'ice' ,
'cream' , 'icecream' , 'man' , 'go' , 'mango' ]
for i in l:
if i==str:
return True
return False
def printList(ans):
for i in ans:
print(i,end=" ")
def isSegmented(input,startIndex,ans):
if(len(input) == 0):
printList(ans)
return True
for i in range(1,len(input)+1):
custString = input[startIndex:startIndex + i]
remainString = input[startIndex+i : len(input)+1]
ans.append(custString)
if(inMap(custString) and isSegmented(remainString , startIndex,ans)):
return True
ans.pop()
return False
inputString = input()
ans = []
if(isSegmented(inputString,0,ans)):
print('Yes')
else:
print('No') | anmolaithinker/Algorithms-Practise | Recursion/word-break.py | word-break.py | py | 821 | python | en | code | 1 | github-code | 90 |
3246767962 | def split_excel_equally(filepath, split_number):
try:
import pandas as pd
from time import strftime
file_location = '\\'.join(filepath.split('\\')[:-1])
log_list = [strftime("%d/%m/%Y %H:%M:%S") + '- Inside split_excel_equally']
df = pd.read_excel(filepath)
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + '- Step 1: Input Excel is read')
limit = 0
df1 = pd.DataFrame() # Initialize DF
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + "- Step 2 : DF is initialized")
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + "- Step 3 : Loop through the split_number count begins")
for count in range(1, split_number + 1):
df1 = df1[0:0] # for making the DF null
if count != split_number:
for i in range(len(df) // split_number):
# df1 = df1.append(df.loc[[limit]].squeeze())
df1 = df1.append(df.loc[[limit]])
limit = limit + 1
else:
df1 = df[limit:] # For pushing the last batch of excel
# df1.to_excel(r"C:\Users\Dell\Documents\UiPath\Uipath_python\output" + str(count) + ".xlsx", index=False)
writer = pd.ExcelWriter(file_location + "\\output" + str(count) + ".xlsx", engine='xlsxwriter')
df1.to_excel(writer, sheet_name='output', index=False)
writer.save()
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + "- Output file created : output" + str(count) + ".xlsx")
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + "- Total number of records : " + str(len(df1)))
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + "- File splitting completed successfully")
with open(file_location + '\\app_log.log', 'w+') as f:
for item in log_list:
f.write('%s\n' % item)
f.close()
return log_list
except Exception as e:
log_list.append("Exception caught : " + e)
return log_list
def split_excel_percent(filepath, percent_list):
import pandas as pd
import logging
from time import strftime
file_location = '\\'.join(filepath.split('\\')[:-1])
df = pd.read_excel(filepath)
limit = 0
log_list = [strftime("%d/%m/%Y %H:%M:%S") + " Step 1 : Input excel is read"]
df_actual = pd.DataFrame() # Initialize DF
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + " Step 2 : DF is initialized")
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + " Step 3 : Loop through the percent value count begins")
for count in range(len(percent_list)):
df_actual = df_actual[0:0] # for making the DF null
if count != len(percent_list) - 1:
for i in range(int(int(percent_list[count]) / 100 * len(df))):
df_actual = df_actual.append((df.loc[[limit]].squeeze()))
limit = limit + 1
else:
df_actual = df[limit:]
# df_actual.to_excel(r"C:\Users\Dell\Documents\UiPath\Uipath_python\output" + str(count) + ".xlsx", index=False)
writer = pd.ExcelWriter(file_location+"\\output"+str(count)+".xlsx", engine='xlsxwriter')
df_actual.to_excel(writer, sheet_name='output', index=False)
writer.save()
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + " Output file created : output" + str(count) + ".xlsx")
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + " No. of records in the file : " + str(len(df_actual)))
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + " Total number of output files created : " + str(count-1))
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + " File splitting completed successfully")
with open(file_location + '\\app_log.log', 'w+') as f:
for item in log_list:
f.write('%s\n' % item)
f.close()
return log_list
def filter_excel_manual(filepath, column_name, column_values):
import pandas as pd
file_location = '\\'.join(filepath.split('\\')[:-1])
# logging.basicConfig(filename=r"" + '\\'.join(filepath.split('\\')[:-1]) + "\\app.log", filemode='w',
# format='%(name)s - %(levelname)s - %(message)s')
log_list = ['read df']
df = pd.read_excel(filepath)
log_list.append('starting log')
if int(column_name[-1]) == 0:
log_list.append('Generating single file as ' + column_name[1] + 'is selected in Config')
df_filtered = df[df[column_name[0]].isin(column_values)]
# df_filtered.to_excel(file_location + "\\filtered_output.xlsx", index=False)
writer = pd.ExcelWriter(file_location + "\\filtered_output.xlsx", engine='xlsxwriter')
df_filtered.to_excel(writer, sheet_name='output', index=False)
writer.save()
log_list.append('Output file generated : filtered_output.xlsx')
log_list.append('Total records : ' + str(len(df_filtered)))
else:
log_list.append('Generating multiple files as ' + column_name[1] + ' is selected in Config')
count = 1
for value in column_values:
df_filtered = df[df[column_name[0]] == value]
# df_filtered.to_excel(file_location + "\\filtered_output" + str(count) + ".xlsx",
# index=False)
writer = pd.ExcelWriter(file_location + "\\filtered_output" + str(count) + ".xlsx", engine='xlsxwriter')
df_filtered.to_excel(writer, sheet_name='output', index=False)
writer.save()
log_list.append('Output file generated : ' + "filtered_output" + str(count) + ".xlsx")
log_list.append('Total records : ' + str(len(df_filtered)))
count = count + 1
log_list.append('Total files generated : ' + str(count - 1))
with open(file_location + '\\app_log.log', 'w+') as f:
for item in log_list:
f.write('%s\n' % item)
f.close()
return log_list
def filter_excel_sql(filepath, sql_query):
import sqlite3
import pandas as pd
from time import strftime
file_location = '\\'.join(filepath.split('\\')[:-1])
# logging.basicConfig(filename=r"" + file_location + "\\app.log", filemode='w',
# format='%(name)s - %(levelname)s - %(message)s')
log_list = ['Python logs']
con = sqlite3.connect(file_location + '\\database.db')
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + '- Connecting to staging database - Complete')
df = pd.read_excel(filepath)
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + "- read excel to df - Complete")
df.to_sql(name='temp_table', con=con, if_exists='replace', index=True)
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + '- Convert df to sql table - Complete')
df_sql = pd.read_sql_query(sql_query, con)
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + '- Running the sql query - Complete')
# df_sql.to_excel(file_location + '\\sql_output.xlsx', index=False)
writer = pd.ExcelWriter(file_location + "\\filtered_output.xlsx", engine='xlsxwriter')
df_sql.to_excel(writer, sheet_name='output', index=False)
writer.save()
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + '- Converting the sql table to excel - Complete')
log_list.append(strftime("%d/%m/%Y %H:%M:%S") + '- Number of records in the output file : ' + str(len(df_sql)))
with open(file_location + '\\app_log.log', 'w+') as f:
for item in log_list:
f.write('%s\n' % item)
f.close()
con.commit()
con.close()
return log_list
| sumitmx/Uipath_Python | Uipath_python.py | Uipath_python.py | py | 7,547 | python | en | code | 0 | github-code | 90 |
28593346607 | from __future__ import print_function
import tensorflow as tf
a = tf.get_variable("a", dtype=tf.float32, initializer=tf.constant(0.0))
b = tf.get_variable("b", dtype=tf.float32, initializer=tf.constant(0.0))
x = tf.placeholder(tf.float32)
linear_model = a*x + b
y = tf.placeholder(tf.float32)
loss = tf.reduce_sum(tf.square(linear_model-y))
optimizer = tf.train.GradientDescentOptimizer(0.01)
train = optimizer.minimize(loss)
x_train = [1,2,3,4]
y_train=[0, -1, -2, -3]
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
for i in range(1000):
sess.run(train, feed_dict={x:x_train, y:y_train})
curr_a, curr_b, curr_loss = sess.run([a, b, loss], feed_dict={x:x_train, y:y_train})
print("a:%s, b:%s, loss:%s", curr_a, curr_b, curr_loss)
| jasonzhang2022/tfprac | get_started1.py | get_started1.py | py | 787 | python | en | code | 0 | github-code | 90 |
20767864911 | import torch
import torch.nn.init as init
import math
class Conv2dZ2P4(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, g_type="p4",
dilation=1, groups=1, bias=False, device="cuda", dtype=None, *args, **kwargs):
super().__init__()
assert g_type == "p4" or g_type == "p4m"
# define the layer weight
w = torch.empty(out_channels, in_channels, kernel_size, kernel_size)
self.weight = torch.nn.Parameter(w).to(device)
self.g_type = g_type
self.get_kernel = get_p8weight if self.g_type == "p4m" else get_p4weight
self.gconv_dim = 8 if self.g_type == "p4m" else 4
self.__args = args
self.__kwargs = kwargs
if bias:
self.bias = torch.nn.Parameter(torch.zeros(out_channels))
else:
self.bias = None
self.__weight_initialization()
def forward(self, x):
w = self.get_kernel(self.weight)
padding_dim = w.shape[-1] // 2
y = torch.nn.functional.conv2d(x, w, *self.__args, **self.__kwargs)
y = y.view(y.size(0), -1, 4, y.size(2), y.size(3))
if self.bias is not None:
y = y + self.bias.view(1, -1, 1, 1, 1)
return y
def __weight_initialization(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
class Conv2dP4P4(torch.nn.Module):
def __init__(self, in_channels, out_channels, kernel_size, g_type="p4", bias=False, device="cuda", *args, **kwargs):
assert g_type == "p4" or g_type == "p4m"
super().__init__()
self.out_channels = out_channels
w = torch.empty(out_channels*4, in_channels, kernel_size, kernel_size)
self.weight = torch.nn.Parameter(w).to(device)
self.g_type = g_type
self.get_kernel = get_p8weight if self.g_type == "p4m" else get_p4weight
self.gconv_dim = 8 if self.g_type == "p4m" else 4
self.__args = args
self.__kwargs = kwargs
if bias:
self.bias = torch.nn.Parameter(torch.zeros(out_channels))
else:
self.bias = None
self.__weight_initialization()
def forward(self, x):
b = self.bias.repeat(self.gconv_dim) if self.bias is not None else None
w = self.weight
B, C, _, H, W = x.shape
y = None
device = x.device
padding_dim = w.shape[-1] // 2
for i in range(4):
_, _, _, H, W = x.shape
x_ = g_rot4(x, -i)
x_ = x_.transpose(1,2).reshape(B, C * self.gconv_dim, H, W)
t = torch.nn.functional.conv2d(x_, w, groups=self.gconv_dim, *self.__args, **self.__kwargs)
_, _, H, W = t.shape
t = t.reshape(B, -1, 4, H, W).sum(dim=2)
if y is None:
y = torch.zeros(B, self.out_channels, 4, H, W).to(device)
y[:, :, i, :, :] = t
if self.bias is not None:
y = y + b
return y
def __weight_initialization(self):
init.kaiming_uniform_(self.weight, a=math.sqrt(5))
if self.bias is not None:
fan_in, _ = init._calculate_fan_in_and_fan_out(self.weight)
bound = 1 / math.sqrt(fan_in)
init.uniform_(self.bias, -bound, bound)
class MaxPoolingP4(torch.nn.Module):
def __init__(self, kernel_size=(2,2), stride=2):
super().__init__()
self.pool = torch.nn.MaxPool2d(kernel_size)
def forward(self, x):
B, C, _, H, W = x.shape
x = x.view(B, -1, H, W)
x_pool = self.pool(x)
_, _, H, W = x_pool.shape
return x_pool.view(B, C, 4, H, W)
class AvgPoolingP4(torch.nn.Module):
def __init__(self, kernel_size=(2,2), stride=2):
super().__init__()
self.pool = torch.nn.AvgPool2d(kernel_size)
def forward(self, x):
B, C, _, H, W = x.shape
x = x.view(B, -1, H, W)
x_pool = self.pool(x)
_, _, H, W = x_pool.shape
return x_pool.view(B, C, 4, H, W)
def get_p4weight(w):
# input: [C, K, H, W]
# output: [4*C, K, H, W]
# rotate the input weight
ws = [torch.rot90(w, k, (-2, -1)) for k in range(4)]
return torch.cat(ws, 1).view(-1, w.size(1), w.size(2), w.size(3))
def get_p8weight(w):
# input: [K, C, H, W]
# output: [8*K, C, H, W]
w_p4 = get_p4weight(w)
return torch.cat([w_p4, torch.flip(w_p4, dims=(-1,))], 1).view(-1, w.size(1), w.size(2), w.size(3))
def g_rot4(x, k, reverse=False):
device = x.device
if reverse:
k = -k
x = torch.rot90(x, k, (-2, -1))
return torch.roll(x, k, dims=-3).to(device)
| maple5717/PyTorch-Implementation-of-Group-Equivariant-CNN | gcnn/layers.py | layers.py | py | 5,075 | python | en | code | 0 | github-code | 90 |
37326953535 | ROCK = 'X'
OPPONENT_ROCK = 'A'
PAPER = 'Y'
OPPONENT_PAPER = 'B'
SCISSORS = 'Z'
OPPONENT_SCISSORS = 'C'
play_mappings_and_scores = {
ROCK: {
'match': OPPONENT_ROCK,
'defeats': OPPONENT_SCISSORS,
'score': 1
},
PAPER: {
'match': OPPONENT_PAPER,
'defeats': OPPONENT_ROCK,
'score': 2
},
SCISSORS: {
'match': OPPONENT_SCISSORS,
'defeats': OPPONENT_PAPER,
'score': 3
}
}
# read input
input_file_path = '/Users/andrewdieken/dev/advent-of-code/2022/day_2/input.txt'
with open(input_file_path, 'r') as input_file:
raw_input = input_file.readlines()
score = 0
for raw_round in raw_input:
# clean input row
cleaned_round = raw_round.strip().split(' ')
opponent_play, your_play = cleaned_round[0], cleaned_round[1]
shape_score = play_mappings_and_scores[your_play]['score']
# tie
if opponent_play == play_mappings_and_scores[your_play]['match']:
score += (3 + shape_score)
# win
elif play_mappings_and_scores[your_play]['defeats'] == opponent_play:
score += (6 + shape_score)
# loss
else:
score += shape_score
print(score)
| andrewdieken/advent-of-code | 2022/day_2/pt_1.py | pt_1.py | py | 1,181 | python | en | code | 0 | github-code | 90 |
44036529161 | #!/usr/bin/env python
import numpy as np
from util import softmax, sample_probs
class Tree:
''' Data structure used during simulated games '''
def __init__(self, prior, c_puct):
self.c_puct = c_puct
self.T = 0 # Total visits
self.N = np.zeros(len(prior), dtype=int) # Visit count
self.W = np.zeros(len(prior)) # Total action-value
self.Q = np.zeros(len(prior)) # Mean action-value == W / N
self.P = np.array(prior) # Scaled prior == prior / (1 + N)
self.prior = np.array(prior)
self.children = dict()
def leaf(self, action, prior):
self.children[action] = Tree(prior, c_puct=self.c_puct)
@property
def U(self): # Upper Confidence Bound
return self.c_puct * np.sqrt(self.T) * self.P
@property
def values(self): # Mean action value + UCB == Q + U
return self.Q + self.U
def select(self, valid):
''' Select given valid moves and return action, child '''
action = np.argmax(np.where(valid, self.values, -np.inf))
return action, self.children.get(action, None)
def backup(self, action, value):
''' Backup results of a simulation game '''
self.T += 1
self.N[action] = n = self.N[action] + 1
self.W[action] = w = self.W[action] + value
self.Q[action] = w / n
self.P[action] = self.prior[action] / (1 + n)
class AlphaZero:
def __init__(self, game, model, seed=None,
c_puct=1.0,
tau=1.0,
eps=1e-6,
sims_per_search=1000):
''' Train a model to play a game with the AlphaZero algorithm '''
self.rs = np.random.RandomState(seed)
self._game = game
self._model = model
self.c_puct = c_puct
self.tau = tau
self.eps = eps
self.sims_per_search = sims_per_search
@classmethod
def make(cls, game_cls, model_cls, seed=None, *args, **kwargs):
''' Convenience method to build from game and model classes '''
game = game_cls(seed=seed)
model = model_cls(game.n_action, game.n_view, game.n_player, seed=seed)
return cls(game=game, model=model, seed=seed, *args, **kwargs)
def model(self, state, player):
''' Wrap the model to give the proper view and mask actions '''
valid = self._game.valid(state, player)
view = self._game.view(state, player)
logits, value = self._model.model(view)
probs = softmax(logits, valid)
return probs, value
def simulate(self, state, player, tree):
'''
Simulate a game by traversing tree
state - game state tuple
player - current player index
tree - MCTS tree rooted at current state
returns
values - player-length list of values
'''
valid = self._game.valid(state, player)
action, child = tree.select(valid)
if child is None:
prior, values = self.model(state, player)
tree.leaf(action, prior)
else:
state, next_player, values = self._game.step(state, player, action)
if values is None:
values = self.simulate(state, next_player, child)
tree.backup(action, values[player])
return values
def search(self, state, player, sims_per_search=None):
''' MCTS to generate move probabilities for a state '''
if sims_per_search is None:
sims_per_search = self.sims_per_search
prior, _ = self.model(state, player)
tree = Tree(prior, self.c_puct)
for i in range(sims_per_search):
self.simulate(state, player, tree)
pi = np.power(tree.N, 1 / self.tau)
probs = pi / np.sum(pi)
return probs, tree
def play(self):
'''
Play a whole game, and get states on which to update
Return tuple of:
trajectory - (observation, probabilities) for each step
outcome - final reward for each player
'''
trajectory = []
state, player, outcome = self._game.start()
while outcome is None:
probs, _ = self.search(state, player)
action = sample_probs(probs, rs=self.rs)
obs = self._game.view(state, player)
trajectory.append((obs, probs))
state, player, outcome = self._game.step(state, player, action)
return trajectory, outcome
def play_multi(self, n_games=10):
'''
Play multiple whole games, return a list of game results.
See play() for result of a single game.
'''
games = []
for i in range(n_games):
print('playing game', i)
games.append(self.play())
return games
def train(self, n_epochs=10, n_games=10):
'''
Train the model for a number of epochs of multi-play
'''
for i in range(n_epochs):
games = self.play_multi(n_games=n_games)
loss = self._model.update(games)
print('epoch', i, 'loss', loss)
def rollout(self):
''' Rollout a game against self and return final state '''
state, player, outcome = self._game.start()
while outcome is None:
probs, _ = self.search(state, player)
action = sample_probs(probs, rs=self.rs)
state, player, outcome = self._game.step(state, player, action)
return state
def print_rollout(self):
''' Print out final board state '''
print(self._game.human(self.rollout()))
if __name__ == '__main__':
from game import MNOP # noqa
from model import MLP # noqa
azero = AlphaZero.make(MNOP, MLP)
azero.train(n_epochs=3, n_games=5)
| machinaut/azero | azero.py | azero.py | py | 5,785 | python | en | code | 2 | github-code | 90 |
73332713258 | # goorm / 기타 / 채점하기
# https://level.goorm.io/exam/43280/%EC%B1%84%EC%A0%90%ED%95%98%EA%B8%B0/quiz/1
target = list(input())
score = 0
answer = 0
for a in target:
if a == 'o':
score += 1
answer += score
else:
score = 0
print(answer) | devwithpug/Algorithm_Study | python/goorm/기타/goorm_43280.py | goorm_43280.py | py | 277 | python | en | code | 0 | github-code | 90 |
24543433240 | for slovo in 'Viktorija':
print(slovo)
# set
for item in {1,2,3,4,5}:
for x in ['a','b','c']:
print(item, x)
# iterator - objekat/kolekcija kroz koji mozemo da iterisemo
# da idemo 1 po 1, da proverimo svaki item u kolekciji
# iterables - list, dict, tuple, set, string
# Objekat - dictionary
user = {
"name": "koko",
"age": 2,
"can_swim": False
}
# .() za key i vrednost
# keys() - za keys
for osobina in user.keys():
print(osobina)
# izadje tapl sa key i vrednostima
# values() - za vrednosti
for osobina in user.values():
print(osobina)
# items() - key i vrednost
for osobina in user.items():
print(osobina)
# da izadju oba a da nisu tapl?
for k, v in user.items(): # key i value
print(k, v)
my_list = [1,2,3,4,5,6,7,8,9,10]
counter = 0
for broj in my_list:
counter = counter + broj
print(counter)
# range ()
# objekat, vraca objekat koji vraca sekvencu intidzera
print(range(100)) # 0 je difolt, moze 50,100 npr
for i in range(0, 100, 2): # moze _ umesto i ili promenljive, kada je nebitna
print(i) # 2 - ide po svaki drugi
for _ in range(10, 0, -1): # -1 da ide unazad, -2 svaki drugi
print(_)
for _ in range(2):
print(list(range(10)))
# enumerate()
# uzme iterable, i daje indeks kaunter i item tog indeksa - i
for i, char in enumerate('Viktorija'):
print(i, char)
for i, char in enumerate([1,2,3,4]):
print(i, char)
for i, char in enumerate(list(range(100))):
if char == 50:
print(i)
# Vezba
picture = [
[0,0,0,1,0,0,0],
[0,0,1,1,1,0,0],
[0,1,1,1,1,1,0],
[1,1,1,1,1,1,1],
[0,0,0,1,0,0,0],
[0,0,0,1,0,0,0]
]
fill = '*'
empty = ''
for row in picture:
for pixel in row:
if pixel:
print(fill) #, end='') # end ne radi u VS cod, radi u repl
else:
print(empty) #end='')
print('')
# Vezba 2
lista = ['a','b','c','b','d','m','n','n']
duplicates = []
for char in lista: #prolaziš jedan po jedan kroz elemente prvog niza...
#i to po indexima.. prvo nulti, pa prvi, pa drugi itd do kraja niza
if lista.count(char) > 1: #proveravaš da li je broj pojavljivanja
#trenutno uzetog elementa niza veći od 1 (znači da je duplikat u prvom nizu)
if char not in duplicates: #ako je gornji uslov zadovoljen
#(duplikat je u prvom nizu) proveravaš da nije već dodat u drugi niz
duplicates.append(char) #ako su prethodna dva uslova zadovoljena (duplikat
#je u prvom nizu i nije već dodat u drugi niz) dodaješ element u drugi niz
print(duplicates)
| ViktorijaSiktorija/Python-Concepts | forLoops.py | forLoops.py | py | 2,567 | python | sr | code | 0 | github-code | 90 |
43954274261 | import argparse
from src.constants import CHOICES, CHOICES_PLURAL, JOB_POSTINGS
from src.constants import DESCRIPTION, EPILOG, ADD_HELP, SHOW_HELP, UPDATE_HELP, DELETE_HELP
from src.cli_helper import print_to_screen, show, class_factory, get_all_objects_in_db, selection_screen, update_class
from src.cli_helper import delete_class_object, db_exist, create_db
def main():
jobs = argparse.ArgumentParser(description=DESCRIPTION, epilog=EPILOG)
#jobs.add_argument("-v", action="store_true", help="verbose help")
group = jobs.add_mutually_exclusive_group()
group.add_argument("--add", choices=CHOICES, help=ADD_HELP)
group.add_argument("--show", choices=CHOICES_PLURAL, help=SHOW_HELP)
group.add_argument("--update", choices=CHOICES, help=UPDATE_HELP)
group.add_argument("--delete", choices=CHOICES, help=DELETE_HELP)
cli_args = jobs.parse_args()
if cli_args.add:
class_object = class_factory(cli_args.add)
# Need to figure this out...
update_class(class_object, class_object.properties)
elif cli_args.show:
objects_to_show = show(cli_args.show)
if objects_to_show:
print_to_screen(objects_to_show)
else:
print("No {} in the database. Try adding one.".format(cli_args.show))
elif cli_args.update:
class_object = class_factory(cli_args.update)
all_classes = get_all_objects_in_db(class_object)
wanted_class_object = selection_screen(all_classes)
update_class(wanted_class_object, wanted_class_object.properties)
elif cli_args.delete:
class_object = class_factory(cli_args.delete)
all_classes = get_all_objects_in_db(class_object)
wanted_class_object = selection_screen(all_classes)
delete_class_object(wanted_class_object)
else:
objects_to_show = show(JOB_POSTINGS)
if objects_to_show:
print_to_screen(objects_to_show)
else:
print("No jobpostings in the database. Try adding one.")
if __name__ == "__main__":
if not db_exist():
create_db()
main()
| crazcalm/job-search | jobs.py | jobs.py | py | 2,108 | python | en | code | 3 | github-code | 90 |
18484555409 | def main():
n=int(input())
for k in range(1,450):
if k*(k+1)==2*n:
print("Yes")
print(k+1)
break
else:
print("No")
return 0
ans=[]
cnt=1
for i in range(k+1):
ans.append([])
for j in range(i):
ans[i].append(ans[j][i-1])
for j in range(i,k):
ans[i].append(cnt)
cnt+=1
for i in ans:
print(len(i),*i)
main() | Aasthaengg/IBMdataset | Python_codes/p03230/s832994486.py | s832994486.py | py | 459 | python | en | code | 0 | github-code | 90 |
17981280619 | from collections import deque
n = int(input())
A = deque(map(str, input().split()))
D = deque()
while A:
x = A.popleft()
D.append(x)
if A:
y = A.popleft()
D.appendleft(y)
if n % 2 != 0:
D.reverse()
print(" ".join(D)) | Aasthaengg/IBMdataset | Python_codes/p03673/s385090778.py | s385090778.py | py | 232 | python | en | code | 0 | github-code | 90 |
15541407495 | from pathlib import Path
import win32com.client
def excelVBARun():
xlapp = win32com.client.Dispatch("Excel.Application")
# 開く
abspath = str(Path(r"C:\Users\dede2\OneDrive\デスクトップ\開発\Excelツール\個人的なテスト.xlsm").resolve())
workbook = xlapp.Workbooks.Open(abspath, UpdateLinks=0, ReadOnly=True)
# シート一覧
print(workbook.Worksheets.Count)
for i in range(0, workbook.Worksheets.Count):
print(workbook.Worksheets[i].name)
print('------------------------------------')
# Moduleネームを取得
for mod1 in workbook.VBProject.VBComponents:
print(mod1.Name)
print('------------------------------------')
# マクロ実行_戻り値テスト
res = xlapp.Application.Run("M_Others.pythonReturnValue", 7)
print(res, )
# マクロ実行_参照渡しテスト
# # num2 = None
# num2 = 1
# # str2 = None
# str2 = 'None'
# res = xlapp.Application.Run("pythonReturnValue_2", num2, str2)
# print(str(num2) + ':' + str2)
num2 = 1
str2 = '砂糖'
res = xlapp.Application.Run("pythonReturnValue_3", num2, str2)
print(res)
print(res[0])
print(res[1])
# # 終了する
# xlapp.Quit()
if __name__ == '__main__':
excelVBARun()
| dede-20191130/CreateToolAndTest | Test_Miscellaneous_Python/Test_ExcelLink/ExcelLink.py | ExcelLink.py | py | 1,295 | python | ja | code | 0 | github-code | 90 |
27480307993 | __author__ = 'Cue'
countries = ['China', 'India', 'United States', 'Indonesia', 'Pakistan']
populations = [1439323776, 1380004385, 331002651, 273523615, 220892340]
no_of_countries = len(countries)
# for i in range(0, no_of_countries):
# print('Population of',countries[i],'is',populations[i])
print('COUNTRIES')
print('=' * len('COUNTRIES'))
for i in range(0, no_of_countries):
print(str(i+1)+'.',countries[i])
do_again = 'Y'
while do_again == 'Y':
choice = int(input('Enter a number from 1 to 5: '))
print('Population of',countries[choice-1],'is',populations[choice-1])
do_again = input('Enter y to continue or anything else to quit: ')
do_again = do_again.upper()
print('Bye.')
| kevin-aus/cp1404_TR1_2022 | lec04/activity2_first_way.py | activity2_first_way.py | py | 706 | python | en | code | 0 | github-code | 90 |
36527224605 | """
This setup.py handles compiling .po (portable object) files into their
appropriate .mo (machine object) results.
Reference: https://setuptools.pypa.io/en/latest/userguide/extension.html#setuptools.command.build.SubCommand
"""
# pyright: strict
import shutil
import subprocess
from pathlib import Path
from typing import Iterator
import warnings
from setuptools import Command, setup
from setuptools.command.build import SubCommand, build # type: ignore # missing stub
class build_mo(Command, SubCommand):
"""Builds machine object translation files."""
build_lib = "build/lib"
editable_mode = False # shouldn't be necessary but pyright wants this
_source_root = Path("src")
# SubCommand protocol
def initialize_options(self) -> None:
"""
Set or (reset) all options/attributes/caches used by the command
to their default values. Note that these values may be overwritten during
the build.
"""
def finalize_options(self) -> None:
"""
Set final values for all options/attributes used by the command.
Most of the time, each option/attribute/cache should only be set if it does not
have any value yet (e.g. ``if self.attr is None: self.attr = val``).
"""
def run(self) -> None:
"""
Execute the actions intended by the command.
(Side effects **SHOULD** only take place when ``run`` is executed,
for example, creating new files or writing to the terminal output).
"""
msgfmt = shutil.which("msgfmt")
if msgfmt is None:
return warnings.warn(
"msgfmt is not available, building .mo files will be skipped",
RuntimeWarning,
)
for source_po in self._find_po_files():
output_po = self._get_output_path(source_po)
output_mo = output_po.with_suffix(".mo")
if not self.editable_mode:
# Parent directory required for msgfmt to work correctly
output_mo.parent.mkdir(parents=True, exist_ok=True)
subprocess.check_call(["msgfmt", "-o", output_mo, output_po])
if not self.editable_mode:
# Space savings
output_po.unlink()
def get_source_files(self) -> list[str]:
"""
Return a list of all files that are used by the command to create
the expected outputs. For example, if your build command transpiles
Java files into Python, you should list here all the Java files.
The primary purpose of this function is to help populating the ``sdist``
with all the files necessary to build the distribution.
All files should be strings relative to the project root directory.
"""
return list(self.get_output_mapping().values())
def get_outputs(self) -> list[str]:
"""
Return a list of files intended for distribution as they would have been
produced by the build.
These files should be strings in the form of
``"{build_lib}/destination/file/path"``.
.. note::
The return value of ``get_output()`` should include all files used as keys
in ``get_output_mapping()`` plus files that are generated during the build
and don't correspond to any source file already present in the project.
"""
return list(self.get_output_mapping().keys())
def get_output_mapping(self) -> dict[str, str]:
"""
Return a mapping between destination files as they would be produced by the
build (dict keys) into the respective existing (source) files (dict values).
Existing (source) files should be represented as strings relative to the project
root directory.
Destination files should be strings in the form of
``"{build_lib}/destination/file/path"``.
"""
mapping: dict[str, str] = {}
for source_po in self._find_po_files():
output_po = self._get_output_path(source_po)
output_mo = output_po.with_suffix(".mo")
mapping[str(output_mo)] = str(source_po)
return mapping
# Utility methods
def _find_po_files(self) -> Iterator[Path]:
"""Yields all PO files in the project source directory."""
for package_path in self._source_root.iterdir():
if not package_path.is_dir():
continue
for po_path in package_path.rglob("*.po"):
yield po_path
def _get_output_path(self, path: Path) -> Path:
if self.editable_mode:
return path
path = path.relative_to(self._source_root)
return Path(self.build_lib) / path
# Subcommand registration
build.sub_commands.append(("build_mo", None))
setup(cmdclass={"build_mo": build_mo})
| thegamecracks/discord.py-i18n-demo | setup.py | setup.py | py | 4,866 | python | en | code | 1 | github-code | 90 |
25606101755 | import logging
from flask import request
from airflow.api_connexion import security
from airflow.security import permissions
from airflow.www.app import csrf
from airflow_xtended_api.api.app import blueprint
import airflow_xtended_api.core.s3_sync as s3
import airflow_xtended_api.utils as utils
import airflow_xtended_api.api.dag_utils as dag_utils
from airflow_xtended_api.api.response import ApiResponse
from airflow_xtended_api.exceptions import (
S3BucketDoesNotExistsError,
S3ObjDownloadError,
S3GenericError,
OSFileHandlingError,
)
# decorator precedence matters... route should always be first
@blueprint.route("/s3_sync", methods=["POST"])
@csrf.exempt
@security.requires_access(
[
(permissions.ACTION_CAN_READ, permissions.RESOURCE_DAG),
(permissions.ACTION_CAN_DELETE, permissions.RESOURCE_DAG),
]
)
def sync_dags_from_s3():
"""Custom Function for the s3_sync API
Sync DAG files from a S3 compatible storage backend.
args:
s3_bucket_name: s3 bucket name where the DAG files exist
s3_region: s3 region name where the specified bucket exists
s3_access_key: IAM entity access key having atleast S3 bucket read access
s3_secret_key: IAM secret key for the specifed access key
s3_object_prefix: filter s3 objects based on provided prefix
s3_object_keys: sync DAG files specified by the object keys
skip_purge: skip deleting existing files in the DAG folder
otf_sync: sync dags in foreground
"""
logging.info("Executing custom 'sync_dags_from_s3' function")
s3_bucket_name = request.form.get("s3_bucket_name")
if s3_bucket_name is None:
return ApiResponse.bad_request("s3_bucket_name is required")
s3_region = request.form.get("s3_region")
if s3_region is None:
return ApiResponse.bad_request("s3_region is required")
s3_access_key = request.form.get("s3_access_key")
if s3_access_key is None:
return ApiResponse.bad_request("s3_access_key is required")
s3_secret_key = request.form.get("s3_secret_key")
if s3_secret_key is None:
return ApiResponse.bad_request("s3_secret_key is required")
obj_prefix = request.form.get("s3_object_prefix")
if not obj_prefix:
obj_prefix = ""
# comma seperated obj keys as string
obj_keys = request.form.get("s3_object_keys")
skip_purge = request.form.get("skip_purge") is not None
otf_sync = request.form.get("otf_sync") is not None
logging.info("create_dag in sync state: " + str(otf_sync))
if not skip_purge:
try:
dag_utils.empty_dag_folder()
except Exception:
logging.exception("error emptying dag folder... ")
else:
logging.warning("skipping: purge dag folder... ")
sync_status = None
try:
s3.init_client(s3_access_key, s3_secret_key, s3_region)
if obj_keys:
obj_keys = obj_keys.strip().split(",")
sync_status = s3.sync_specific_objects_from_bucket(
s3_bucket_name, obj_keys, dag_utils.get_dag_folder()
)
else:
sync_status = s3.sync_files_from_bucket(
s3_bucket_name, dag_utils.get_dag_folder(), obj_prefix
)
except Exception as e:
logging.exception("unable to complete sync from s3!!")
if hasattr(e, "message"):
return ApiResponse.other_error(e.message)
return ApiResponse.other_error("unable to handle this request at this moment!!")
dag_utils.scan_dags(otf_sync)
return ApiResponse.success(
{"message": "dag files synced from s3", "sync_status": sync_status}
)
| anr007/airflow-xtended-api | airflow_xtended_api/api/endpoints/sync_dags_from_s3.py | sync_dags_from_s3.py | py | 3,793 | python | en | code | 11 | github-code | 90 |
73426286697 | #!/usr/bin/env python3
import re
import json
import re
import os
_HERE = os.path.dirname(os.path.abspath(__file__))
import pandas as pd
def makeWordLists(file):
# helper function for assembling regexes from dictionary files
# takes in a file that has one word/phrase per line
# produces (word1|word2|word with space3|...|wordN)
def makeRegexList(lsFile):
regex_list = "("
with open(os.path.join(_HERE, lsFile)) as listFile:
for line in listFile:
line = line.strip()
regex_list += line
regex_list += "|"
regex_list = regex_list[:-1]
regex_list += ")"
return regex_list
word_lists = {}
with open(file) as f:
# get the word lists from the file
for line in f:
line = line.strip()
if len(line) > 0 and line[0] != '#':
tmp = line.split("=")
word_lists[tmp[0]] = makeRegexList(tmp[1])
return word_lists
########################
# Assemble the regexes #
########################
word_lists = makeWordLists(os.path.join(_HERE, "word_lists.txt"))
def assembleRegexes(word_lists, regex_file):
regexes = []
# assemble the regexes
with open(regex_file) as f:
regex_id = 0
regex_flavor = 'UNKNOWN'
for line in f:
line = line.strip()
# Skip empty lines o
if len(line) == 0 or line[0] == '#':
continue
elif line.startswith('TYPE'):
tmp = line.split("=")
regex_flavor = tmp[1]
continue
else:
for word_set in word_lists.keys():
line = line.replace(word_set, word_lists[word_set])
regexes.append({ 'regex_id': regex_id, 'category': regex_flavor, 'pattern': line,
're': re.compile(line)})
regex_id = regex_id + 1
return regexes
regexes = assembleRegexes(word_lists, os.path.join(_HERE, "regex.txt"))
def non_answers(sents):
"""sents is a list of sentences returned by nltk.sent_tokenize"""
matches = [{'regex_id': r['regex_id'],
'sentence': s,
'phrase': r['re'].search(s).group() }
for s in sents
for r in regexes
if r['re'].search(s) is not None]
if not matches:
return None
else:
return [json.dumps(match) for match in matches]
def get_regexes_df():
regexes_df = pd.DataFrame(regexes)
regexes_df = regexes_df.drop(['re'], axis=1)
return regexes_df
if __name__ == "__main__":
sents = ["I refuse to answer that.",
"I don't know about that.",
"We expect profits of $123 million.",
"Let me get back to you on that."]
res = non_answers(sents)
for r in res:
print(r)
| iangow/ling_features | non_answer/non_answers.py | non_answers.py | py | 2,919 | python | en | code | 10 | github-code | 90 |
36674028744 | from django.urls import path
from services.views import (CategoryList, CategoryCreate, CategoryDetail, ServiceCreate,
SalonServices, ServiceUpdate, CategoryUpdate, CategoryDelete,
ServiceDelete)
from staffer.views import StafferAddService
app_name = "services"
urlpatterns = [
path('', CategoryList.as_view(), name="list-category"),
path('category/', CategoryCreate.as_view(), name="new-category"),
path('category/<int:pk>', CategoryDetail.as_view(), name="category-detail"),
path('category/update/<int:cid>', CategoryUpdate.as_view(), name="category-update"),
path('category/delete/<int:pk>', CategoryDelete.as_view(), name="category-delete"),
path('new/', ServiceCreate.as_view(), name="new"),
path('salon/<int:sid>', SalonServices.as_view(), name="salon"),
path('master', StafferAddService.as_view(), name="add-master"),
path('update/<int:pk>', ServiceUpdate.as_view(), name="update"),
path('delete/<int:pk>', ServiceDelete.as_view(), name="delete"),
]
| Abdubek/salon | src/services/urls.py | urls.py | py | 1,055 | python | en | code | 0 | github-code | 90 |
8583857075 | ## @package app.stats_app
from app.app import App
from ui.stats_app.view_configs_window import ViewConfigsWindow
## Handles startup for the statistics app.
class StatsApp(App):
## Constructor
# @param self
def __init__(self):
super(StatsApp, self).__init__(
'stats_app',
App.APP_TYPES.GUI,
'icons/open_icon_library-standard/icons/png/64x64/apps/gnumeric.png'
)
## See superclass description.
def start(self):
ViewConfigsWindow()
| babylanguagelab/bll_app | wayne/app/stats_app.py | stats_app.py | py | 516 | python | en | code | 0 | github-code | 90 |
70117278698 | import os
import click
import datasheets
import numpy as np
import pandas as pd
from pulp import LpVariable, LpProblem, LpMaximize, lpSum, PULP_CBC_CMD
import yaml
class Optimizer:
def __init__(self, input_data, num_screens, budget):
self.input_data = input_data
self.num_screens = num_screens
self.budget = budget
self.movie_counts = None
self.problem = None
def create_vars(self):
"""Define the optimization decision variables"""
self.movie_counts = {}
for _, row in self.input_data.iterrows():
var = LpVariable(f'{row.movie}_counts', cat='Integer',
lowBound=0, upBound=self.num_screens)
self.movie_counts[row.movie] = var
def get_objective_function(self, solved=False):
objective = []
for _, row in self.input_data.iterrows():
val = _get_val(self.movie_counts[row.movie], solved)
objective.append(val * row.revenue)
return lpSum(objective) if solved else np.sum(objective)
def get_constraints(self):
constraints = []
constraint = (
lpSum(self.movie_counts.values()) == self.num_screens,
'every screen must be assigned'
)
constraints.append(constraint)
total_cost = []
for _, row in self.input_data.iterrows():
total_cost.append(self.movie_counts[row.movie] * row.cost)
constraint = lpSum(total_cost) <= self.budget, 'Limited budget'
constraints.append(constraint)
return constraints
def get_solution(self, solved):
"""Generate a string that contains the solution information"""
msg = []
if solved:
objective_value = self.get_objective_function(solved)
msg.append(f'Optimization successful! '
f'Total Revenue = {objective_value}')
for _, row in self.input_data.iterrows():
val = self.movie_counts[row.movie].varValue
if row.movie == 'empty':
msg.append(f'Leave {int(val)} screens empty')
else:
msg.append(f'Movie {row.movie} is on {int(val)} screens')
else:
msg.append('Optimization algorithm failed!')
return '\n'.join([x for x in msg])
def build_allocation(self):
movie = []
num_screens = []
cost = []
revenue = []
for _, row in self.input_data.iterrows():
val = self.movie_counts[row.movie].varValue
movie.append(row.movie)
num_screens.append(val)
cost.append(row.cost * val)
revenue.append(row.revenue * val)
df = pd.DataFrame({'movie': movie, 'num_screens': num_screens,
'revenue': revenue, 'cost': cost})
total_revenue = df.revenue.sum()
total_cost = df.cost.sum()
total_screens = df.num_screens.sum()
last_row = pd.DataFrame(
{'movie': ['total'], 'num_screens': [total_screens],
'revenue': [total_revenue], 'cost': [total_cost]})
df = pd.concat([df, last_row], axis=0)
df = df.set_index('movie', drop=True)
return df
def run(self):
self.problem = LpProblem('FML', LpMaximize)
self.create_vars()
self.problem += self.get_objective_function(solved=False)
for constraint in self.get_constraints():
self.problem += constraint
status = self.problem.solve(PULP_CBC_CMD(msg=3))
solved = status == 1
return solved
def _get_val(var, solved):
return var.varValue if solved else var
def parse_conf(conf):
with open(conf, 'r') as f:
conf = yaml.load(f)
os.environ['DATASHEETS_SECRETS_PATH'] = conf['creds_file']
os.environ['DATASHEETS_SERVICE_PATH'] = conf['service_file']
workbook = conf['workbook']
num_screens = conf['num_screens']
empty_screen_cost = conf['empty_screen_cost']
budget = conf['budget']
return workbook, num_screens, empty_screen_cost, budget
def load_data(workbook):
tab = workbook.fetch_tab('inputs')
return tab.fetch_data()
def run_pipeline(conf='conf.yml'):
"""
Pull inputs from google sheets, solve the allocation problem, and write the
solution back to the sheet.
"""
workbook, num_screens, empty_screen_cost, budget = parse_conf(conf)
# Pull data
client = datasheets.Client(service=True)
workbook = client.fetch_workbook(workbook)
input_data = load_data(workbook)
empty_screen = pd.DataFrame({'movie': ['empty'], 'revenue': [0],
'cost': [empty_screen_cost]})
input_data = pd.concat([input_data, empty_screen], axis=0)
# Define and solve allocation problem
optimizer = Optimizer(input_data, num_screens, budget)
solved = optimizer.run()
solution_msg = optimizer.get_solution(solved)
print(solution_msg)
if solved:
# Write the results to google sheet.
allocation = optimizer.build_allocation()
tab = workbook.fetch_tab('outputs')
tab.insert_data(allocation)
return solution_msg
@click.command()
@click.option('--conf', default='conf.yml')
def main(conf):
run_pipeline(conf)
if __name__ == '__main__':
main()
| EthanRosenthal/fml | fml/optimizer.py | optimizer.py | py | 5,342 | python | en | code | 2 | github-code | 90 |
9503807486 | import pipes
import re
import os
import sys
import shutil
import argparse
import subprocess
from pathlib import Path
from datetime import datetime
FOURTEEN_DAYS = 14
def how_old(date: datetime) -> int:
"""Return a integer that represents how many days has passed given a date"""
return (datetime.now() - date).days
def error_print(*args, **kwargs):
print(*args, file=sys.stderr, **kwargs)
def get_commits(repo_name: str, repository_url: str) -> list:
try:
return subprocess.check_output(
f"cd {pipes.quote(repo_name)} \
&& git fetch --prune {pipes.quote(repository_url)} '+refs/pull/*/head:refs/remotes/prs/*' '+refs/heads/*:refs/remotes/origin/*' \
&& git rev-list --all --remotes", shell=True
).decode('utf-8').split('\n')
except subprocess.CalledProcessError as err:
error_print(err.output.decode())
exit(err.returncode)
def has_valid_name(folder_name: str) -> bool:
return not re.match(r'^[a-f0-9]{40}$', folder_name) == None
if __name__ == '__main__':
parser = argparse.ArgumentParser("Prune old builds")
parser.add_argument("repo_name", help="Repository name.", type=str)
parser.add_argument(
"builds_path",
help="The path where the builds are stored.",
type=str)
parser.add_argument(
"repository_url",
help="The url to the repository.",
type=str)
parser.add_argument("-f", "--delete", action="store_true",
help="Delete folders.")
args, _ = parser.parse_known_args()
builds_path = Path(args.builds_path)
repository_url = args.repository_url
repo_name = args.repo_name
if not os.path.exists(builds_path):
error_print(
f"error: The directory \033[1m{builds_path}\033[0m does not exists.")
exit(1)
if not os.path.exists(repo_name):
error_print("Cloning repository")
subprocess.check_call(
['git', 'clone', '-q', '--bare', repository_url, repo_name])
error_print('Getting all commits of the repository\n')
commits = get_commits(repo_name, repository_url)
to_be_deleted = []
for folder in os.listdir(builds_path):
folder_path = builds_path / folder
created_at = datetime.fromtimestamp(os.path.getctime(folder_path))
if not folder in commits and has_valid_name(folder) and how_old(created_at) > FOURTEEN_DAYS:
error_print(
f"""Build \033[93m{folder}\033[0m accomplish the following criteria:\n* Is not part of the repo/PR\n* Has more then fourteen days\n\033[1;31mThis build will be deleted.\033[0m\n""")
to_be_deleted.append(folder_path)
if args.delete:
for folder_path in to_be_deleted:
try:
shutil.rmtree(folder_path)
except FileNotFoundError:
error_print("Build folder not found")
exit(2)
except Exception as err:
error_print(err)
exit(1)
# quick-lint-js finds bugs in JavaScript programs.
# Copyright (C) 2020 Matthew "strager" Glazar
#
# This file is part of quick-lint-js.
#
# quick-lint-js is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# quick-lint-js is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with quick-lint-js. If not, see <https://www.gnu.org/licenses/>.
| quick-lint/quick-lint-js | infrastructure/quick-lint-js-web-2/roles/builds/files/prune-old-builds.py | prune-old-builds.py | py | 3,814 | python | en | code | 956 | github-code | 90 |
14775720595 | """Script to run different optimization methods on rosenbrock funciton."""
from function import *
from algorithm import steepest_descent
import numpy as np
def main():
frosenbrock = RosenbrockFunction()
f, grad = frosenbrock.value, frosenbrock.grad_value
optimizer = steepest_descent.SteepestDescent(f,
grad,
1e-4,
0.9,
iterations=100)
x = optimizer.optimize(x0=np.array([2, 10, 1, 10, 0]))
print(f'Final Result x={x}, f(x)={f(x)}')
if __name__ == '__main__':
main()
| zhuyifengzju/optimization | rosenbrock_minima.py | rosenbrock_minima.py | py | 701 | python | en | code | 2 | github-code | 90 |
71441041256 | import json
from django.contrib.auth.decorators import login_required
from django.contrib.auth import get_user_model
from django.core.paginator import Paginator, EmptyPage, PageNotAnInteger
from django.shortcuts import render, get_object_or_404
from django.http import JsonResponse
from .models import Favorite
from recipes.models import Recipe, Tag
from shopping_list.models import ShoppingList
from recipes.utils import get_pagination, purchase_counter, get_recipes
User = get_user_model()
@login_required
def favorite_list(request):
recipes = get_recipes(request).filter(recipe_fav__user=request.user)
all_tags = Tag.objects.all()
recipes_tags = []
purchase_count = purchase_counter(request)
paginator = get_pagination(request, recipes, recipes_tags)
page_number = request.GET.get("page")
page = paginator.get_page(page_number)
title = "Избранное"
return render(request, "index.html", {
"page": page,
"paginator": paginator,
"title": title,
"purchase_count": purchase_count,
"all_tags": all_tags,
})
@login_required
def favorites_add(request):
request_body = json.loads(request.body)
recipe_id = get_object_or_404(Recipe, id=int(request_body.get('id')))
user_id = request.user
Favorite.objects.get_or_create(user=user_id, recipe=recipe_id)
return JsonResponse({"success": True})
@login_required
def favorites_del(request, id):
favorites_recipe = get_object_or_404(
Favorite,
recipe=id,
user=request.user
)
favorites_recipe.delete()
return JsonResponse({"success": True})
| dronsovest/foodgram-project | favorites/views.py | views.py | py | 1,632 | python | en | code | 0 | github-code | 90 |
21211540065 | import sys
sys.path.append('/home/data/hq/DA')
#from yacs.config import CfgNode as CN
import time
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.optim.lr_scheduler import StepLR, LambdaLR
from torch.utils import data
from build_dataset import build_dataset_preDA,build_dataset_DA
#from build_adadataset import build_dataset_DA
from experiment_config import EXPERIMENTS,EXPERIMENTS_m
from utils.transforms import random_flip_rotate
from models.layers import conv_block, up_conv
from utils.metrics import MHDValue, DiceScore
from utils.loss import dice_loss
from utils.utils import set_requires_grad, load_pretrained, setup_seed #, eightway_affinity_kld,fourway_affinity_kld
import os
import shutil
from runx.logx import logx
# SOURCE = CN()
# SOURCE.dataset = 'IBSR'
# SOURCE.PATH = '/home/huqian/baby/DA_code/IBSR_18/IBSR_18_re' #
# SOURCE.label_s = (9, 10, 11, 12, 13, 17, 18, 48, 49, 50, 51, 52, 53, 54)
# SOURCE.label_t = (1, 1, 2, 3, 4, 5, 6, 1, 1, 2, 3, 4, 5, 6)
# SOURCE.IDs_train = ['08', '09', '02', '07', '04', '05', '16', '03', '06']
# TARGET = CN()
# TARGET.dataset = 'MALC'
# TARGET.PATH = '/home/huqian/baby/DA_code/MICCAI/MALC_re' #
# TARGET.label_s = (59, 60, 36, 37, 57, 58, 55, 56, 47, 48, 31, 32)
# TARGET.label_t = (1, 1, 2, 2, 3, 3, 4, 4, 5, 5, 6, 6)
# TARGET.IDs_train = ['20', '28', '08', '31', '06', '35', '34', '25', '13', '05', '01', '21',
# '17', '27', '33', '11', '12', '16', '10', '32', '18', '04', '14', '02',
# '22', '09', '19']
# TARGET.IDs_eval = ['29', '03', '26', '23']
# TARGET.IDs_test = ['07', '15', '24', '30']
class U_Net_4(nn.Module):
def __init__(self, in_ch=3, num_classes=7):
super(U_Net_4, self).__init__()
n1 = 64
filters = [n1, n1 * 2, n1 * 4, n1 * 8, n1 * 16]
self.Maxpool = nn.MaxPool2d(kernel_size=2, stride=2)
self.Conv1 = conv_block(in_ch, filters[0])
self.Conv2 = conv_block(filters[0], filters[1])
self.Conv3 = conv_block(filters[1], filters[2])
self.Conv4 = conv_block(filters[2], filters[3])
self.Conv5 = conv_block(filters[3], filters[4])
self.Up4 = up_conv(filters[4], filters[3])
self.Up_conv4 = conv_block(filters[4], filters[3])
self.Up3 = up_conv(filters[3], filters[2])
self.Up_conv3 = conv_block(filters[3], filters[2])
self.Up2 = up_conv(filters[2], filters[1])
self.Up_conv2 = conv_block(filters[2], filters[1])
self.Up1 = up_conv(filters[1], filters[0])
self.Up_conv1 = conv_block(filters[1], filters[0])
self.cls = nn.Conv2d(filters[0], num_classes, kernel_size=1, stride=1, padding=0)
def forward(self, x):
e1 = self.Conv1(x)
e2 = self.Maxpool(e1)
e2 = self.Conv2(e2)
e3 = self.Maxpool(e2)
e3 = self.Conv3(e3)
e4 = self.Maxpool(e3)
e4 = self.Conv4(e4)
e5 = self.Maxpool(e4)
e5 = self.Conv5(e5)
d4 = self.Up4(e5)
d4 = torch.cat((e4, d4), dim=1)
d4 = self.Up_conv4(d4)
d3 = self.Up3(d4)
d3 = torch.cat((e3, d3), dim=1)
d3 = self.Up_conv3(d3)
d2 = self.Up2(d3)
d2 = torch.cat((e2, d2), dim=1)
d2 = self.Up_conv2(d2)
d1 = self.Up1(d2)
d1 = torch.cat((e1, d1), dim=1)
d1 = self.Up_conv1(d1)
out = self.cls(d1)
return d1, out
class C(nn.Module):
def __init__(self, num_classes=7):
super(C, self).__init__()
self.cls = nn.Sequential(nn.Conv2d(64, 32, kernel_size=1, stride=1, padding=0),
nn.ReLU(inplace=True),
nn.Conv2d(32, num_classes, kernel_size=1, stride=1, padding=0))
def forward(self, x):
return self.cls(x)
class PixelDiscriminator(nn.Module):
def __init__(self, input_nc, ndf=64, num_classes=7):
super(PixelDiscriminator, self).__init__()
self.D = nn.Sequential(
nn.Conv2d(input_nc, ndf, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True),
nn.Conv2d(ndf, ndf*2, kernel_size=3, stride=1, padding=1),
nn.LeakyReLU(negative_slope=0.2, inplace=True)
)
self.cls = nn.Conv2d(ndf*2, num_classes, kernel_size=1, stride=1)
def forward(self, x):
# x = fourway_affinity_kld(x) #fourway_affinity_kld eightway_affinity_kld
out = self.cls(self.D(x))
return out
def validation(model, eval_loader):
model.eval()
pred_all = []
label_all = []
for inputs in eval_loader:
img, label = inputs
img = img.cuda()
with torch.no_grad():
_, outputs = model(img)
outputs = outputs[0, :, :, :]
pred = outputs.data.max(0)[1].cpu()
pred_all.append(pred)
label_all.append(label)
pred_all = torch.stack(pred_all, dim=0)
label_all = torch.cat(label_all, dim=0)
score = DiceScore(pred_all, label_all, 7)
logx.msg('eval:')
logx.msg('Mean Dice: {}'.format(score['Mean Dice']))
logx.msg('Thalamus: {}'.format(score['Dice'][0]))
logx.msg('Caudate: {}'.format(score['Dice'][1]))
logx.msg('Putamen: {}'.format(score['Dice'][2]))
logx.msg('Pallidum: {}'.format(score['Dice'][3]))
logx.msg('Hippocampus: {}'.format(score['Dice'][4]))
logx.msg('Amygdala: {}'.format(score['Dice'][5]))
return score
def test(model, best_checkpoint, test_loader):
checkpoint = torch.load(best_checkpoint)
model_state_dict = checkpoint['model_state_dict']
load_pretrained(model, model_state_dict)
model.eval()
pred_all = []
label_all = []
for inputs in test_loader:
img, label = inputs
img = img.cuda()
with torch.no_grad():
_, outputs = model(img)
outputs = outputs[0, :, :, :]
pred = outputs.data.max(0)[1].cpu()
pred_all.append(pred)
label_all.append(label)
pred_all = torch.stack(pred_all, dim=0)
label_all = torch.cat(label_all, dim=0)
score = DiceScore(pred_all, label_all, 7)
score_mhd = MHDValue(pred_all, label_all, 7)
logx.msg('test:')
logx.msg('Mean Dice: {}'.format(score['Mean Dice']))
logx.msg('Thalamus: {}'.format(score['Dice'][0]))
logx.msg('Caudate: {}'.format(score['Dice'][1]))
logx.msg('Putamen: {}'.format(score['Dice'][2]))
logx.msg('Pallidum: {}'.format(score['Dice'][3]))
logx.msg('Hippocampus: {}'.format(score['Dice'][4]))
logx.msg('Amygdala: {}'.format(score['Dice'][5]))
logx.msg('MHD Thalamus: {}'.format(score_mhd['MHD'][0]))
logx.msg('MHD Caudate: {}'.format(score_mhd['MHD'][1]))
logx.msg('MHD Putamen: {}'.format(score_mhd['MHD'][2]))
logx.msg('MHD Pallidum: {}'.format(score_mhd['MHD'][3]))
logx.msg('MHD Hippocampus: {}'.format(score_mhd['MHD'][4]))
logx.msg('MHD Amygdala: {}'.format(score_mhd['MHD'][5]))
logx.msg('----------------------------------------------------------------')
log_(score, score_mhd, 'val')
def log_(score, score_mhd, phase = 'val', epoch=None):
log = {
'Mean Dice' : score['Mean Dice'],
'Thalamus' : score['Dice'][0],
'Caudate' : score['Dice'][1],
'Putamen' : score['Dice'][2],
'Pallidum' : score['Dice'][3],
'Hippocampus': score['Dice'][4],
'Amygdala' : score['Dice'][5],
'MHD Thalamus' : score_mhd['MHD'][0],
'MHD Caudate' : score_mhd['MHD'][1],
'MHD Putamen' : score_mhd['MHD'][2],
'MHD Pallidum' : score_mhd['MHD'][3],
'MHD Hippocampus': score_mhd['MHD'][4],
'MHD Amygdala' : score_mhd['MHD'][5]
}
logx.metric(phase=phase, metrics=log, epoch=epoch)
def train(model, D, num_iters,train_loader, optimizer, optimizer_D, config):
model.train()
D.train()
source_label = 0
target_label = 1
train_iter = enumerate(train_loader)
for i in range(num_iters):
# train segmentation network
set_requires_grad(D, requires_grad=False)
model.zero_grad()
_, inputs = train_iter.__next__()
img_source, label_source, img_target = inputs[0].cuda(), inputs[1].cuda(), inputs[2].cuda()
_, outputs_s = model(img_source)
_, outputs_t = model(img_target)
#print(label_source.shape) #4,96,128
#print(outputs_s.shape) #4,7,96,128
loss_seg = F.cross_entropy(outputs_s, label_source) + dice_loss(outputs_s, label_source)
D_t = D(F.softmax(outputs_t, dim=1))
loss_adv = F.binary_cross_entropy_with_logits(D_t, torch.FloatTensor(D_t.data.size()).fill_(source_label).cuda())
loss = loss_seg + config.lambda_adv * loss_adv
loss.backward()
optimizer.step()
# train Discriminator
set_requires_grad(D, requires_grad=True)
# train with source
D.zero_grad()
outputs_s = outputs_s.detach()
D_s = D(F.softmax(outputs_s, dim=1))
loss_D_s = F.binary_cross_entropy_with_logits(D_s, torch.FloatTensor(D_s.data.size()).fill_(source_label).cuda())
# train with target
outputs_t = outputs_t.detach()
D_t = D(F.softmax(outputs_t, dim=1))
loss_D_t = F.binary_cross_entropy_with_logits(D_t, torch.FloatTensor(D_t.data.size()).fill_(target_label).cuda())
loss_D = config.lambda_D * (loss_D_s + loss_D_t)
loss_D.backward()
optimizer_D.step()
def main(config):
os.environ["CUDA_VISIBLE_DEVICES"] = str(config.gpu_id)
setup_seed(config.seed)
TRAIN_CONFIG = EXPERIMENTS[config.experiment] #EXPERIMENTS_m
train_dataset, eval_dataset, test_dataset = build_dataset_DA(TRAIN_CONFIG, random_flip_rotate)
train_loader = data.DataLoader(train_dataset, batch_size=4, num_workers=1, shuffle=True)
#test_loader = data.DataLoader(test_dataset, batch_size=1, num_workers=1, shuffle=False)
eval_loader = data.DataLoader(eval_dataset, batch_size=1, num_workers=1, shuffle=False)
model = U_Net_4().cuda()
D = PixelDiscriminator(7).cuda()
optimizer = torch.optim.SGD(model.parameters(), lr=config.init_lr, momentum=0.9, weight_decay=5e-4)
optimizer_D = torch.optim.Adam(D.parameters(), lr=config.init_lr_D, betas=(0.5, 0.999))
scheduler = StepLR(optimizer, step_size=config.step_size, gamma=config.gamma)
lr_decay_function = lambda epoch: 1.0 - max(0, epoch - 20) / float(40)
scheduler_D = LambdaLR(optimizer_D, lr_lambda=lr_decay_function)
t_start = time.time()
for epoch in range(config.n_epochs):
logx.msg('epoch: {}'.format(epoch))
t_epoch = time.time()
train(model, D,config.num_iters, train_loader, optimizer, optimizer_D, config)
scheduler.step()
scheduler_D.step()
t_train = time.time()
logx.msg('cost {:.2f} seconds in this train epoch'.format(t_train - t_epoch))
score_eval = validation(model, eval_loader)
save_dict = {
'model_state_dict':model.state_dict(),
}
logx.save_model(save_dict, metric=score_eval['Mean Dice'], epoch=epoch, higher_better=True)
best_checkpoint = logx.get_best_checkpoint()
test(model, best_checkpoint, eval_loader)
t_end = time.time()
logx.msg('cost {:.2f} minutes in this train epoch'.format((t_end - t_start)/60))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Hyperparams')
parser.add_argument('--gpu_id', nargs='?', type=int, default=3)
parser.add_argument('--seed', nargs='?', type=int, default=200)
parser.add_argument('--num_iters', nargs='?', type=int, default=250)
parser.add_argument('--batch_size', nargs='?', type=int, default=4)
parser.add_argument('--n_epochs', nargs='?', type=int, default=20)
parser.add_argument('--init_lr', nargs='?', type=float, default=1e-2)
parser.add_argument('--init_lr_D', nargs='?', type=float, default=1e-4)
parser.add_argument('--step_size', nargs='?', type=int, default=10)
parser.add_argument('--gamma', nargs='?', type=float, default=0.1)
parser.add_argument('--experiment', nargs='?', type=int, default=0)#IBSR:7 MALC:0
parser.add_argument('--logdir', nargs='?', type=str, default='/home/data/hq/DA/train/test')
parser.add_argument('--lambda_adv', nargs='?', type=float, default=1)
parser.add_argument('--lambda_D', nargs='?', type=float, default=0.5)
config = parser.parse_args()
if os.path.exists(config.logdir):
shutil.rmtree(config.logdir)
logx.initialize(logdir=config.logdir, coolname=False, tensorboard=False, hparams=vars(config), no_timestamp=True)
main(config)
| huqian999/UDA-MIMA | train/ADA.py | ADA.py | py | 12,835 | python | en | code | 1 | github-code | 90 |
19134106863 | class Date(object):
def __init__(self,z0=1900,x0=1,y0=1):
self.x = x0
self.z = z0
self.y = y0
def __str__(self):
return "{}/ {}/ {}".format(self.z,str(self.x).rjust(2,'0'),str(self.y).rjust(2,'0'))
def same_day_in_year(self,other):
if self.y == other.y and self.x == other.x:
return True
else:
return False
def is_leap_year(self):
if ((self.z%4 == 0) or (self.z%400 == 0)):
return True
else:
return False
def __lt__(self,other):
if self.z < other.z:
return True
elif self.z == other.z:
if self.x < other.x:
return True
elif self.y < other.y:
return True
return False
def __gt__(self,other):
if self.z > other.z:
return True
elif self.z == other.z:
if self.x > other.x:
return True
elif self.y > other.y:
return True
return False
if __name__ == "__main__":
d1 = Date(1972, 3, 27)
d2 = Date(1998, 4, 13)
d3 = Date(1998, 5, 13)
d4 = Date(1998, 4, 11)
d5 = Date(1900, 11, 5)
d6 = Date(2012, 5, 8)
print("d1: " + str(d1))
print("d2: " + str(d2))
print("d3: " + str(d3))
print('d4: ' + str(d4))
print('d5: ' + str(d5))
print('d6: ' + str(d6))
print()
print("d1.is_leap_year:", d1.is_leap_year())
print("d2.is_leap_year:", d2.is_leap_year())
print("d3.is_leap_year()", d3.is_leap_year())
print("d4.is_leap_year()", d4.is_leap_year())
print("d5.is_leap_year()", d5.is_leap_year())
print("d6.is_leap_year()", d6.is_leap_year())
print()
print("d1 < d2:", d1<d2)
print("d2 < d3:", d2<d3)
print("d3 < d4:", d3<d4)
print("d4 < d5:", d4<d5)
print("d5 < d6:", d5<d6)
print () | Anooj-Pai/Python-Projects | Labs/Lab9/check2.py | check2.py | py | 1,895 | python | en | code | 0 | github-code | 90 |
28170422187 | import re
from pyndn.util.regex.ndn_regex_matcher_base import NdnRegexMatcherBase
class NdnRegexComponentMatcher(NdnRegexMatcherBase):
"""
Create a RegexComponent matcher from expr.
:param str expr: The standard regular expression to match a component.
:param NdnRegexBackrefManager backrefManager: The back reference manager.
:param bool isExactMatch: (optional) The flag to provide exact match. If
omitted, use True.
"""
def __init__(self, expr, backrefManager, isExactMatch = True):
super(NdnRegexComponentMatcher, self).__init__(
expr, NdnRegexMatcherBase.NdnRegexExprType.COMPONENT, backrefManager)
self._componentRegex = None
# Array of NdnRegexPseudoMatcher
self._pseudoMatchers = []
self._isExactMatch = isExactMatch
self._compile()
def match(self, name, offset, length):
"""
:param Name name:
:param int offset:
:param int length:
:rtype: bool
"""
self._matchResult = []
if self._expr == "":
self._matchResult.append(name.get(offset))
return True
if self._isExactMatch:
targetStr = name.get(offset).toEscapedString()
subResult = self._componentRegex.search(targetStr)
if subResult != None:
for i in range(1, self._componentRegex.groups + 1):
self._pseudoMatchers[i].resetMatchResult()
self._pseudoMatchers[i].setMatchResult(subResult.group(i))
self._matchResult.append(name.get(offset))
return True
else:
raise NdnRegexMatcherBase.Error(
"Non-exact component search is not supported yet")
return False
def _compile(self):
self._componentRegex = re.compile(self._expr)
self._pseudoMatchers = []
self._pseudoMatchers.append(NdnRegexPseudoMatcher())
for i in range(1, self._componentRegex.groups + 1):
pMatcher = NdnRegexPseudoMatcher()
self._pseudoMatchers.append(pMatcher)
self._backrefManager.pushRef(pMatcher)
# Put this last to avoid an import loop.
from pyndn.util.regex.ndn_regex_pseudo_matcher import NdnRegexPseudoMatcher
| named-data/PyNDN2 | python/pyndn/util/regex/ndn_regex_component_matcher.py | ndn_regex_component_matcher.py | py | 2,280 | python | en | code | 23 | github-code | 90 |
36048229679 | import turtle
import pandas
screen = turtle.Screen()
screen.title("U.S. States Game")
image = "blank_states_img.gif"
screen.addshape(image)
turtle.shape(image)
data = pandas.read_csv("50_states.csv")
states_list = data["state"].to_list()
correct_guesses = []
while len(correct_guesses) < 50:
answer_state = screen.textinput(title=f"{len(correct_guesses)}/50 States Correct",
prompt="What's another state's name?").title()
if answer_state == "Exit":
states_for_studying = [state for state in states_list if state not in correct_guesses]
states_to_learn = pandas.DataFrame(states_for_studying)
states_to_learn.to_csv("states_to_learn.csv")
break
if answer_state in states_list:
correct_guesses.append(answer_state)
timmy = turtle.Turtle()
timmy.hideturtle()
timmy.penup()
location = data[data.state == answer_state]
x_coordinate = int(location.x)
y_coordinate = int(location.y)
timmy.setpos(x_coordinate, y_coordinate)
timmy.write(answer_state)
# Convert guess to title case
# Check if guess is among 50 states
# Write correct guess onto map - find the location (x,y coordinates) for that state
# Use a loop to allow the user to keep guessing
# Record correct guesses in a list
# Track the score.
# Save the missing states to a .csv
| rachanahegde/python-pro-bootcamp-intermediate-projects | day-25-us-states-game/main.py | main.py | py | 1,388 | python | en | code | 1 | github-code | 90 |
865942954 | # 8C3 , 7C5 프로그램 만들기
# nPr/r!
from tkinter import N
numn = int(input('numn 입력: '))
numr = int(input('numr 입력: '))
resultp = 1
resultr = 1
resultc = 1
for n in range(numn, (numn-numr), -1):
print('n : {}'.format(n))
resultp = resultp * n
print('resultp: {}'.format(resultp))
for n in range(numr, 0, -1):
print('n : {}'.format(n))
resultr = resultr *n
print('resultr: {}'.format(resultr))
resultc = int(resultp / resultr)
print('resultc: {}'.format(resultc))
| jungwonguk/Education | 강의/27~28강 조합/조합1.py | 조합1.py | py | 503 | python | en | code | 1 | github-code | 90 |
17991351979 | n,a,b = map(int,input().split())
s = 0
H = []
for _ in range(n):
h = int(input())
H.append(h)
from copy import deepcopy
from math import ceil
def count(x):
Hc = deepcopy(H)
cnt = 0
for i in range(n):
Hc[i] -= b*x
if Hc[i]>0:
cnt += ceil(Hc[i]/(a-b))
return cnt <= x
l,r = 0, max(H)//b + 1
while l < r:
mid = (l+r)//2
if count(mid):
r = mid
else:
l = mid+1
print(r) | Aasthaengg/IBMdataset | Python_codes/p03700/s799165017.py | s799165017.py | py | 446 | python | en | code | 0 | github-code | 90 |
18356882629 | n = int(input())
s_cnt = {}
for _ in range(n):
s = input()
s = ''.join(sorted(s))
s_cnt.setdefault(s,0)
s_cnt[s] += 1
ans = 0
for s,cnt in s_cnt.items():
ans += cnt*(cnt-1)//2
print(ans) | Aasthaengg/IBMdataset | Python_codes/p02947/s672918472.py | s672918472.py | py | 209 | python | en | code | 0 | github-code | 90 |
22572465015 | """ A weather object. Pure flavor. """
class EwWeather:
# The identifier for this weather pattern.
name = ""
str_sunrise = ""
str_day = ""
str_sunset = ""
str_night = ""
def __init__(
self,
name = "",
sunrise = "",
day = "",
sunset = "",
night = ""
):
self.name = name
self.str_sunrise = sunrise
self.str_day = day
self.str_sunset = sunset
self.str_night = night
| mudkipslaps/endless-war | ew/model/weather.py | weather.py | py | 511 | python | en | code | null | github-code | 90 |
17986453485 | # -*- coding: utf-8 -*-
from collections import defaultdict
from intervaltree import Interval, IntervalTree
from utils import tsv, overlap_length
from gtfclasses import GTFLine, GTFCluster
__author__ = 'Matthew L. Bendall'
__copyright__ = "Copyright (C) 2017 Matthew L. Bendall"
def subtract_gtflines(gA, gB):
""" Subtract overlapping portions from GTF record
Returns two copies of A with portion(s) overlapping B removed. Depending
on the nature of the overlap, the operation can result in 0, 1, or 2 new
records.
Args:
gA:
gB:
Returns:
"""
r1 = gA.copy()
r2 = gA.copy()
r1.end = max(gA.start, min(gA.end, gB.start))
r2.start = min(max(gA.start, gB.end), gA.end)
return r1, r2
def sort_gtf(giter, chrom_order=None):
""" Sort GTF
Sort GTF records by chromosome and start position
Args:
giter (iterable): List of GTFLine or GTFCluster objects
chrom_order (list): Chromosome sort order. Default is alphabetical.
Returns:
list: Sorted GTF records.
"""
ret = sorted(giter, key=lambda x:x.start)
if chrom_order is None:
ret.sort(key=lambda x:x.chrom)
else:
c_d = {k:i for i,k in enumerate(chrom_order)}
ret.sort(key=lambda x:c_d[x.chrom] if x.chrom in c_d else x.chrom)
return ret
def region_gtf(giter, regions):
""" Filter GTF records according to regions
Args:
giter:
regions (list): List of regions to include. Regions can be specified
as chrom[:start-end]. If start and end are not specified, the whole
chromosome is used as the region.
Yields:
GTF records that a
"""
_regions = defaultdict(list)
for r in regions:
if ':' in r:
rchrom, t = r.split(':')
rs, re = map(int, t.split('-'))
assert rs <= re
_regions[rchrom].append((rs,re))
else: # Only chromosome was given
_regions[r].append((float('-inf'), float('inf')))
ret = []
for g in giter:
if g.chrom in _regions:
for iv in _regions[g.chrom]:
if overlap_length((g.start, g.end), iv) > 0:
ret.append(g)
break
return ret
def slop_gtf(giter, b=0, chrom_lengths=None):
""" Increase the size of each GTF record
Args:
giter (iterable): List of GTFLine or GTFCluster objects
b (int): Number of base pairs to increase. Default is 0.
chrom_lengths (dict): Chromosome name (str) -> chromosome length (int)
Yields:
GTFLine: New object with increased size.
"""
chrom_lengths = {} if chrom_lengths is None else chrom_lengths
for g in giter:
yield GTFLine([
g.chrom, '.', 'slop',
max(1, g.start - b), # start
min(chrom_lengths.setdefault(g.chrom, int(3e9)), g.end + b), # end
'.', g.strand, '.', g.attr,
])
def cluster_gtf(gtf, dist=0, stranded=True, criteria=None):
bychrom = defaultdict(list)
for g in gtf:
if stranded:
bychrom['%s#%s' % (g.chrom, g.strand)].append(g)
else:
bychrom[g.chrom].append(g)
ret = []
for cchrom, glist in bychrom.iteritems():
glist.sort(key=lambda x:x.start)
cur = GTFCluster(glist[0]) if type(glist[0]) is GTFLine else glist[0]
for g1 in glist[1:]:
domerge = (g1.start - cur.end) <= dist
if criteria is not None:
domerge &= criteria(cur, g1)
if domerge:
cur.add(g1)
else:
ret.append(cur)
cur = GTFCluster(g1) if type(g1) is GTFLine else g1
ret.append(cur)
return ret
def _chromstrand(g, stranded):
return '{}#{}'.format(g.chrom, g.strand) if stranded else g.chrom
def intersect_gtf(gtfA, gtfBs, stranded=True):
bychrom = defaultdict(IntervalTree)
for i,gtfB in enumerate(gtfBs):
for h in gtfB:
bychrom[_chromstrand(h, stranded)].addi(h.start, h.end, (i, h))
for g in gtfA:
cchrom = _chromstrand(g, stranded)
if cchrom in bychrom:
m = [iv.data for iv in bychrom[cchrom].overlap(g.start, g.end)]
else:
m = []
yield (g, m)
def conflict_gtf(gtf, dist=0, stranded=False):
bychrom = defaultdict(list)
for g in gtf:
if stranded:
bychrom['%s#%s' % (g.chrom, g.strand)].append(g)
else:
bychrom[g.chrom].append(g)
ret = []
for cchrom, glist in bychrom.iteritems():
glist.sort(key=lambda x:x.start)
tmp = [glist[0],]
for g1 in glist[1:]:
isconflict = (g1.start - tmp[-1].end) <= dist
if isconflict:
tmp.append(g1)
else:
if len(tmp) > 1:
ret.append(tmp)
tmp = [g1, ]
if len(tmp) > 1:
ret.append(tmp)
return ret # return None if len(ret) == 0 else ret
"""
def subtract(gA, gB, both=False):
r1 = gA.copy()
r2 = gA.copy()
r1.end = max(gA.start, min(gA.end, gB.start))
r2.start = min(max(gA.start, gB.end), gA.end)
return r1, r2
"""
def write_gtf_file(gtf, outfile, comment=True, span=True):
outh = open(outfile, 'w') if type(outfile) is str else outfile
for g in gtf:
if type(g) is GTFCluster:
print >>outh, g.display_str(comment, span)
else:
print >>outh, str(g)
if type(outfile) is str: outh.close()
def read_gtf_file(infile, comment='#'):
return (GTFLine(r) for r in tsv(infile, comment))
def read_gtf_clusters(infile, group_by=None, comment='#'):
if group_by is None:
groups = []
for g in read_gtf_file(infile, comment):
if g.feature == 'gene':
groups.append([g])
else:
groups[-1].append(g)
else:
groups = defaultdict(list)
for g in read_gtf_file(infile, comment):
assert group_by in g.attr, 'ERROR: All row must contain group_by attribute "%s"' % group_by
groups[g.attr[group_by]].append(g)
groups = groups.values()
clusters = []
for grp in groups:
spn = [g for g in grp if g.feature == 'gene']
spn_attr = spn[0].attr
clusters.append(GTFCluster([g for g in grp if g.feature != 'gene']))
clusters[-1].attr = spn_attr
return clusters
| mlbendall/telebuilder | telebuilder/utils/gtfutils.py | gtfutils.py | py | 6,519 | python | en | code | 2 | github-code | 90 |
29596181293 | import pandas as pd
import numpy as np
import numpy.random as r
import scipy.io as sio
import scipy.stats
import matplotlib.pyplot as plt
import random
from collections import Counter
pi = np.pi
def LDA(mu0, mu1, cov,X):
return ((mu1-mu0).T).dot(np.linalg.inv(cov)).dot(X) - 1/2*((mu1.T).dot(np.linalg.inv(cov)).dot(mu1)-(mu0.T).dot(np.linalg.inv(cov)).dot(mu0))
def QDA(mu0, mu1, cov0, cov1, X):
term1 = 1/2*(((X-mu0).T).dot(np.linalg.inv(cov0)).dot((X-mu0))-((X-mu1).T).dot(np.linalg.inv(cov1)).dot((X-mu1)))
#print(term1.shape)
term2 = 1/2*math.log(np.linalg.det(cov0)/np.linalg.det(cov1))
return term1+term2
from sklearn.datasets import load_iris
iris=load_iris()
# You have two features and two classifications
data_0, data_1 = iris.data[:,1:3][:50], iris.data[:,1:3][50:100]
mu0 = np.mean(data_0,axis=0).reshape((2,1))
mu1 = np.mean(data_1,axis=0).reshape((2,1))
cov = np.cov(data,rowvar=False).reshape((2,2))
cov0 = np.cov(data_0,rowvar=False).reshape((2,2))
cov1 = np.cov(data_1,rowvar=False).reshape((2,2))
LDA_score = LDA(mu0, mu1, cov, data.T).reshape(-1)
LDA_result = np.array(LDA_score>0)
QDA_score = np.diag(QDA(mu0, mu1, cov0, cov1, data.T))
QDA_result = np.array(QDA_score>0)
# LDA plot
w = (mu1-mu0).T.dot(np.linalg.inv(cov))
b = -1/2*((mu1.T).dot(np.linalg.inv(cov)).dot(mu1)-(mu0.T).dot(np.linalg.inv(cov)).dot(mu0))[0][0]
x = np.arange(0,6,0.1)
y = (w[0,0]*x+b)/(-w[0,1])
plt.scatter(x=data[:,0],y=data[:,1],c= LDA_result.reshape(-1))
plt.plot(x,y)
plt.xlabel('data[:,0]')
plt.ylabel('data[:,1]')
plt.title('LDA')
plt.show()
# QDA plot
from sympy import plot_implicit, cos, sin, symbols, Eq, And
x, y = symbols('x y')
X = np.array([x,y]).reshape((2,1))
diff1 = np.linalg.inv(cov0) - np.linalg.inv(cov1)
diff2 = np.linalg.inv(cov0).dot(mu0) - np.linalg.inv(cov1).dot(mu1)
threshold = 1/2*((mu1.T).dot(np.linalg.inv(cov)).dot(mu1)-(mu0.T).dot(np.linalg.inv(cov)).dot(mu0))[0][0] + \
1/2*np.log(np.linalg.det(cov1)/np.linalg.det(cov0))
expr = 1/2*(X.T.dot(diff1).dot(X)) - X.T.dot(diff2) - threshold
plt2 = plot_implicit(Eq(expr[0,0],0),(x,-5,5), (y,-6,6))
plt2._backend.ax.scatter(data_0[:,0],data_0[:,1],label='data_0')
plt2._backend.ax.scatter(data_1[:,0],data_1[:,1],label='data_1')
plt2._backend.save('plt2.png') | Loielaine/Machine_learning | hw4/p6_LDA_QDA.py | p6_LDA_QDA.py | py | 2,264 | python | en | code | 1 | github-code | 90 |
18608212016 | import os
import cv2
import numpy as np
class betterFrameCacher:
def __init__(self, threshold):
self.bestFrame = None
self.prev = -1
self.isBoring = True
self.threshold = threshold
def getScore(self, newFrame):
grayFrame = cv2.cvtColor(newFrame, cv2.COLOR_BGR2GRAY)
hist = cv2.calcHist([grayFrame], [0], None, [16], [0, 256])
hist = np.reshape(hist, [len(hist)])
hist = hist/np.sum(hist)
vals = np.array(range(len(hist)))
variance = (np.sum(vals*vals*hist))-(np.sum(vals*hist))**2
return variance
def update(self, newFrame):
score = self.getScore(newFrame)
if (score > self.prev):
self.bestFrame = newFrame
self.prev = score
self.isBoring = score < self.threshold
def set(self, newFrame):
self.bestFrame = newFrame
self.prev = self.getScore(newFrame)
self.isBoring = self.prev < self.threshold
# Get histograms of each
def video_pipeline(inputFile, outputFolder):
# Video pipeline started.
# Create the /frames directory
if not os.path.exists(outputFolder+"/frames"):
os.mkdir(outputFolder+"/frames")
cap = cv2.VideoCapture(inputFile)
prevFrame = None
frame = None
sequenceFrames = 0
videoSequenceFrames = 0
frameCount = 0
fps = cap.get(cv2.CAP_PROP_FPS)
fcount = cap.get(cv2.CAP_PROP_FRAME_COUNT)
results = []
currentSectionIsVideo = False
sequencebestFrameCache = betterFrameCacher(threshold=0.05)
globalBestFrameCache = betterFrameCacher(threshold=0.05)
def writeRecord():
nonlocal frameCount
nonlocal sequenceFrames
nonlocal currentSectionIsVideo
nonlocal sequencebestFrameCache
# ignore snippets that are less than 0.1s
if sequenceFrames < 0.1*fps:
return
# Also ignore baseline-boring frames
if sequencebestFrameCache.isBoring:
return
# Write a record
protoResult = {
"start_time": (frameCount - sequenceFrames)/fps,
"end_time": frameCount/fps,
"type": "still",
"fname": f"frames/{frameCount - sequenceFrames}_{frameCount}.jpg"
}
# wrote record {protoResult['fname']}
# calculate percentage
print(int(50 + (frameCount / fcount) * 50),flush=True)
# Behave differently for subvideos and stills
# Arbitrary threshold of 10s to be counted as a real video
if currentSectionIsVideo and videoSequenceFrames > 10*fps:
# Update metadata
# h264 video files are playable by chrome, the mp4s cv2 can generate aren't :(
protoResult["type"] = "subvideo"
cv2.imwrite(outputFolder+"/" +
protoResult["fname"], sequencebestFrameCache.bestFrame)
else:
cv2.imwrite(outputFolder+"/" +
protoResult["fname"], sequencebestFrameCache.bestFrame)
results.append(protoResult)
# Reset some stuff
currentSectionIsVideo = False
sequenceFrames = 0
def bitsDifferent(phash1, phash2):
# calculate bitwise difference
hnorm = 0
for byteIndex in range(8):
xorPhash = phash1[0][byteIndex] ^ phash2[0][byteIndex]
for bitIndex in range(8):
if (xorPhash >> bitIndex) & 1 == 1:
hnorm += 1
return hnorm
oldPhash = None
oldDiffs = []
# Process all frames
while True:
ret, frame = cap.read()
if not ret:
break
phash = cv2.img_hash.pHash(frame)
if prevFrame is not None:
difference = bitsDifferent(phash, oldPhash)
# Check if there was a huge difference (i.e. a slide has popped up, etc)
if difference > 2:
significant=True
for d in oldDiffs:
if difference<d+5:
significant=False
if significant:
writeRecord()
# reset most interesting frame
sequencebestFrameCache.set(frame)
else:
videoSequenceFrames += 1
currentSectionIsVideo = True
oldDiffs.append(difference)
if len(oldDiffs) > 10:
oldDiffs.pop(0)
# Also pick out the most interesting frames from the picture, lest we get weird fade-ins
sequencebestFrameCache.update(frame)
globalBestFrameCache.update(frame)
prevFrame = frame
oldPhash = phash
sequenceFrames += 1
frameCount += 1
if frameCount % 1000 == 0:
pass
# Processed up to {frameCount/fps}s
# write the final slide
writeRecord()
# write the global most interesting slide so we can have a thumbnail for the video
cv2.imwrite(outputFolder+"/thumbnail.png", globalBestFrameCache.bestFrame)
return results
if __name__ == "__main__":
result = video_pipeline("audiotest.webm", ".")
#print(result)
| acenturyandabit/lec2note | lec2note_main/video_pipeline.py | video_pipeline.py | py | 5,179 | python | en | code | 0 | github-code | 90 |
13491899267 | """
数据包的创建和导入
"""
import tensorflow as tf
import cv2
import numpy as np
import os
import random
import sys
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
from tflearn.layers.conv import global_avg_pool
from tensorflow.contrib.layers import batch_norm,flatten
from tensorflow.contrib.framework import arg_scope
"""
层数和参数的定义
conv、pooling、basis、weight
基本的包的导入和创建
"""
growth_k = 12
nb_block = 2
init_learning_rate = 1e-4
epsilon = 1e-8
dropout_rate = 0.2
nesterov_momentum = 0.9
weight_decay = 1e-4
class_num = 2
total_epochs = 50
"""
测试数据的输入
1.获取文件中的图像size
2.并对文件中的图像进行标签序列化
3.讲文件中所有图像进行归回处理
"""
size = 64
images = []
labels = []
main_faces='E:/DeepLearning/ComProject/Project/Project/My_project/main_faces'
minor_faces='E:/DeepLearning/ComProject/Project/Project/My_project//minor_faces'
if main_faces==0:
print('main_faces:为空')
else:
print('文件不为空')
def getPaddingSize(img):
h, w, _ = img.shape
#初始化图像中人脸的四个坐标
top, bottom, left, right = (0,0,0,0)
longest = max(h, w)
#判断w和h
if w < longest:
tmp = longest - w
"""
获取图像中人脸的位置
左右位置
left和right
//表示整除符号
"""
left = tmp // 2
right = tmp - left
elif h < longest:
tmp = longest - h
top = tmp // 2
bottom = tmp - top
else:
pass
return top, bottom, left, right
"""
读入数据imagedata
"""
def readData(path , h=size, w=size):
for filename in os.listdir(path):
if filename.endswith('.jpg'):
filename = path + '/' + filename
img = cv2.imread(filename)
top,bottom,left,right = getPaddingSize(img)
"""
为了提取图像中人脸的边缘讲图像整体放大并补充边缘
Padding
并将图像灰度化处理cv2.border
将图片放大, 扩充图片边缘部分
"""
img = cv2.copyMakeBorder(img, top, bottom, left, right, cv2.BORDER_CONSTANT, value=[0,0,0])
img = cv2.resize(img, (h, w))
print("h",h)
print("w",w)
images.append(img)
labels.append(path)
"""
图像路径的输入
将图像和标签转化成对应的数组array
"""
readData(main_faces)
readData(minor_faces)
images = np.array(images)
print(images.shape)
labels = np.array([[0,1] if lab == main_faces else [1,0] for lab in labels])
print(labels)
"""
模型训练和创建
目前使用cnn模型进行测试
后期改成动态路由
胶囊网络
1.随机划分测试集与训练集
2.参数:图片数据的总数,图片的高、宽、通道
3.# 将数据转换成小于1的数
"""
train_x,test_x,train_y,test_y = train_test_split(images,
labels,
test_size=0.05,
random_state=0)
train_x = train_x.reshape(train_x.shape[0], size, size, 3)
"""
图像的类型的转化
reshape
转化成三通道rgb
将图像数据归一化处理
"""
test_x = test_x.reshape(test_x.shape[0], size, size, 3)
train_x = train_x.astype('float32')/255.0
test_x = test_x.astype('float32')/255.0
print('train size:%s, test size:%s' % (len(train_x), len(test_x)))
# 图片块,每次取100张图片
batch_size = 32
num_batch = len(train_x) // batch_size
x = tf.placeholder(tf.float32, [None, 4096])
x = tf.placeholder(tf.float32, [None, size, size, 3])
#print(x)
batch_images = tf.reshape(x, [-1, size, size, 3])
label = tf.placeholder(tf.float32, shape=[None, 2])
print(label)
"""
层数和基本参数的定义
经典模式
"""
def conv_layer(input,filter,kernel,stride=1,layer_name="conv"):
with tf.name_scope(layer_name):
network=tf.layers.conv2d(inputs=input,
filters=filter,
kernel_size=kernel,
strides=stride,
padding='SAME')
return network
"""
全局池化层的定义和创建
"""
def Global_Average_Pooling(x, stride=1):
"""
图像的行和列数据集合
width=np.shape(x)[1]
height=np.shape(x)[2]
:param x:
:param stride:
:return:
下面使用h5对其进行相应的存储
"""
return global_avg_pool(x,name='Global_Average_Pooling')
"""
图像归一化处理
"""
def Batch_Normalization(x, training, scope):
with arg_scope([batch_norm],
scope=scope,
updates_collections=None,
decay=0.9,
center=True,
scale=True,
zero_debias_moving_mean=True) :
return tf.cond(training,
lambda : batch_norm(inputs=x, is_training=training, reuse=None),
lambda : batch_norm(inputs=x, is_training=training, reuse=True))
"""
dropout的定义和创建
激活函数relu创建
"""
def Drop_out(x, rate, training) :
return tf.layers.dropout(inputs=x, rate=rate, training=training)
def Relu(x):
return tf.nn.relu(x)
def Average_pooling(x, pool_size=[2,2], stride=2, padding='VALID'):
return tf.layers.average_pooling2d(inputs=x,
pool_size=pool_size,
strides=stride,
padding=padding)
def Max_Pooling(x, pool_size=[3,3],
stride=2,
padding='VALID'):
return tf.layers.max_pooling2d(inputs=x,
pool_size=pool_size,
strides=stride,
padding=padding)
def Concatenation(layers) :
return tf.concat(layers, axis=3)
def Linear(x) :
return tf.layers.dense(inputs=x, units=class_num, name='linear')
"""
密集连接块和模型的创建
"""
class DenseNet():
def __init__(self, x, nb_blocks, filters, training):
self.nb_blocks = nb_blocks
self.filters = filters
self.training = training
self.model = self.Dense_net(x)
def bottleneck_layer(self, x, scope):
# print(x)
with tf.name_scope(scope):
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')
x = Relu(x)
x = conv_layer(x, filter=4 * self.filters, kernel=[1,1], layer_name=scope+'_conv1')
x = Drop_out(x, rate=dropout_rate, training=self.training)
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch2')
x = Relu(x)
x = conv_layer(x, filter=self.filters, kernel=[3,3], layer_name=scope+'_conv2')
x = Drop_out(x, rate=dropout_rate, training=self.training)
return x
def transition_layer(self, x, scope):
with tf.name_scope(scope):
x = Batch_Normalization(x, training=self.training, scope=scope+'_batch1')
x = Relu(x)
x = conv_layer(x, filter=self.filters, kernel=[1,1], layer_name=scope+'_conv1')
x = Drop_out(x, rate=dropout_rate, training=self.training)
x = Average_pooling(x, pool_size=[2,2], stride=2)
return x
def dense_block(self, input_x, nb_layers, layer_name):
with tf.name_scope(layer_name):
layers_concat = list()
layers_concat.append(input_x)
x = self.bottleneck_layer(input_x, scope=layer_name + '_bottleN_' + str(0))
layers_concat.append(x)
for i in range(nb_layers - 1):
x = Concatenation(layers_concat)
x = self.bottleneck_layer(x, scope=layer_name + '_bottleN_' + str(i + 1))
layers_concat.append(x)
x = Concatenation(layers_concat)
return x
def Dense_net(self, input_x):
x = conv_layer(input_x, filter=2 * self.filters, kernel=[7,7], stride=2, layer_name='conv0')
x = Max_Pooling(x, pool_size=[3,3], stride=2)
"""
密集连接块的
3个密集连接块
"""
for i in range(self.nb_blocks) :
# 6 -> 12 -> 48
x = self.dense_block(input_x=x, nb_layers=4, layer_name='dense_'+str(i))
x = self.transition_layer(x, scope='trans_'+str(i))
x = self.dense_block(input_x=x, nb_layers=32, layer_name='dense_final')
#层数的添加
x = Batch_Normalization(x, training=self.training, scope='linear_batch')
x = Relu(x)
x = Global_Average_Pooling(x)
x = flatten(x)
x = Linear(x)
return x
training_flag = tf.placeholder(tf.bool)
learning_rate = tf.placeholder(tf.float32, name='learning_rate')
logits = DenseNet(x=batch_images, nb_blocks=nb_block, filters=growth_k, training=training_flag).model
cost = tf.reduce_mean(tf.nn.softmax_cross_entropy_with_logits(labels=label, logits=logits))
"""
优化器
损失函数
正则化定义和创建
"""
optimizer = tf.train.AdamOptimizer(learning_rate=learning_rate, epsilon=epsilon)
train = optimizer.minimize(cost)
correct_prediction = tf.equal(tf.argmax(logits, 1), tf.argmax(label, 1))
accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
tf.summary.scalar('loss', cost)
tf.summary.scalar('accuracy', accuracy)
merged = tf.summary.merge_all()
saver = tf.train.Saver(tf.global_variables())
print("程序结束")
"""
模型的训练
"""
with tf.Session() as sess:
print("sesson")
sess.run(tf.global_variables_initializer())
summary_writer = tf.summary.FileWriter('../tmp', graph=tf.get_default_graph())
# merged = tf.summary.merge_all()
# writer = tf.summary.FileWriter('./logs', sess.graph)
print("www")
global_step = 0
epoch_learning_rate = init_learning_rate
for epoch in range(total_epochs):
if epoch==(total_epochs*0.5) or epoch==(total_epochs*0.75):
#学习率随着迭代次数的变化而调整
epoch_learning_rate=epoch_learning_rate/10
for i in range(num_batch):
batch_x = train_x[i*batch_size : (i+1)*batch_size]
batch_y = train_y[i*batch_size : (i+1)*batch_size]
#print("ssss")
#训练批次
train_feed_dict = {x: batch_x,
label: batch_y,
learning_rate: epoch_learning_rate,
training_flag : True}
_, loss = sess.run([train, cost], feed_dict=train_feed_dict)
#print("cccc")
if (epoch*num_batch+i) % 100 == 0:
global_step += 100
#print("10000")
train_summary, train_accuracy = sess.run([merged, accuracy], feed_dict=train_feed_dict)
print("Step:", epoch*num_batch+i, "Loss:", loss, "Training accuracy:", train_accuracy)
#print("nihao dao muqian ")
test_feed_dict = {
x: test_x,
label: test_y,
learning_rate: epoch_learning_rate,
training_flag : False
}
print("lllll")
accuracy_rates = sess.run(accuracy, feed_dict=test_feed_dict)
print('Epoch:', '%04d' % (epoch + 1), '/ Accuracy =', accuracy_rates)
| LLAYGDD/FaceRecognition_project | FaceRecognition_project/Model_Code/Densenet_train.py | Densenet_train.py | py | 11,600 | python | en | code | 1 | github-code | 90 |
12684452726 | import pandas as pd
import numpy as np
from .utilities import can_be_float
import csv
def csvToMatrix(csv_name):
"""Takes the name of the csv file and returns the 2D matrix version of the file.
Args:
csv_name (str) : the name of the csv file
Returns:
result_mat (2d array) : Matrix version of the csv file
"""
with open(csv_name, 'r') as sheet:
delimiter = csv.Sniffer().sniff(sheet.read()).delimiter
df = pd.read_csv(csv_name, dtype = str, delimiter = delimiter, na_filter = False)
res = np.empty((df.shape[0] + 1, df.shape[1]), dtype = 'U128')
res[0] = df.columns
res[1:] = df.to_numpy(dtype = 'U128')
return res
def has_header(mat):
"""Determines whether the spreadsheet has a header.
Args:
mat (np.array) : a 2D array of strings
Returns:
header (int) : how many rows to skip initially
"""
all_strs = lambda i: not any(map(can_be_float, mat[i]))
return int(all_strs(0) and
not all(map(all_strs, range(1, mat.shape[0]))))
| Shubhanshi-Gaudani/338-dirtydata | src/csv_to_matrix.py | csv_to_matrix.py | py | 1,062 | python | en | code | 0 | github-code | 90 |
69984263978 | __all__ = ["PowerState", "Pulse", "Mini", "DiyPlug"]
from pyiot.watchers.sonoff import EwelinkWatcher
from pyiot.connections.http import HttpConnection
from pyiot.traits import OnOff
from pyiot.watchers import Watcher
from pyiot.discover.sonoff import DiscoverSonoff
from pyiot import BaseDevice, Attribute
from enum import Enum
import json
from typing import Dict, Any
class PowerState(Enum):
"""on: the device is on when power supply is recovered.
off: the device is off when power supply is recovered.
stay: the device status keeps as the same as the state before power supply is gone
"""
ON = "on"
OFF = "off"
STAY = "stay"
class Pulse(Enum):
"""on: activate the inching function
off: disable the inching function
"""
ON = "on"
OFF = "off"
class BaseSONOFFDIYDevice(BaseDevice, OnOff):
"""Base class for sonnoff DIY device: to parint device you need hotspot with
WiFi SSID: sonoffDiy
WiFi password: 20170618sn
then you need to discover device and if you have an ip of device you can set new wifi ssid and password with set_wifi method
"""
def __init__(self, sid: str, ip: str = "", port: int = 8081):
super().__init__(sid)
self.status.register_attribute(Attribute("ip", str))
self.status.register_attribute(Attribute("port", int))
self.status.register_attribute(Attribute("startup", str))
self.status.register_attribute(Attribute("pulse", str))
self.status.register_attribute(Attribute("pulseWidth", int))
self.status.register_attribute(Attribute("rrsi", int))
self.status.register_attribute(Attribute("ssid", str))
self.status.register_attribute(Attribute("sledOnline", str))
self.status.add_alias("switch", "power")
if ip:
self.status.update({"ip": ip, "port": port})
else:
self._find_device()
self.conn = HttpConnection(
url=f"http://{self.status.ip}", port=self.status.port
)
self.watcher = Watcher(EwelinkWatcher())
self.watcher.add_report_handler(self.report)
def report(self, data: Dict[str, Any]):
print(data)
if self.status.sid == data.get("sid"):
print("updateing")
self.status.update(data.get("data", {}))
def _find_device(self):
dsc = DiscoverSonoff()
dev = dsc.find_by_sid(self.status.sid)
if dev:
self.status.update(dev)
def on(self):
"""Set power state on"""
self.conn.post(path="zeroconf/switch", data=self._cmd(switch="on"))
def off(self):
"""Set power state on"""
self.conn.post(path="zeroconf/switch", data=self._cmd(switch="off"))
def is_on(self):
return self.status.power == "on"
def is_off(self):
return self.status.power == "off"
def set_power_on_state(self, state: PowerState):
"""Set what device should do when power supply is recovered
Args:
state (PowerState)
"""
self.conn.post(path="zeroconf/startup", data=self._cmd(startup=state.value))
def set_pulse(self, pulse: str, pulse_width: int = 500):
"""Set pulse
Args:
pulse (Pulse) Pulse.on: activate the inching function;
Pulse.off: disable the inching function
pulse_width (int) Required when "pulse" is on, pulse time length,
positive integer, ms, only supports multiples of 500
in range of 500~36000000
"""
self.conn.post(
path="zeroconf/pulse",
data=self._cmd(pulse=pulse.value, pulseWidth=pulse_width),
)
def set_wifi(self, ssid: str, password: str):
self.conn.post(
path="zeroconf/wifi", data=self._cmd(ssid=ssid, password=password)
)
def get_info(self) -> Dict[str, Any]:
ret: Dict[str, Any] = {}
resp = self.conn.post(path="zeroconf/info", data=self._cmd())
if resp.code == 200:
_data = resp.json.get("data", {})
if type(_data) == str:
ret = json.loads(_data)
else:
ret = _data.copy()
return ret
def get_signal_strength(self) -> int:
"""The WiFi signal strength currently received by the device, negative integer, dBm"""
resp = self.conn.post(path="zeroconf/signal_strength", data=self._cmd())
if resp.code == 200:
return resp.json
else:
return 0
def _cmd(self, **kwargs) -> Dict[str, Any]:
return {"deviceid": self.status.sid, "data": kwargs}
class Mini(BaseSONOFFDIYDevice):
pass
class DiyPlug(BaseSONOFFDIYDevice):
pass
| angrysoft/pyiot | pyiot/sonoff/diy.py | diy.py | py | 4,768 | python | en | code | 0 | github-code | 90 |
23977269095 | import random
import unittest
import numpy
import torch
import torch.nn.functional as F
import nni
from nni.compression.pytorch.pruning import (
LinearPruner,
AGPPruner,
LotteryTicketPruner,
SimulatedAnnealingPruner,
AutoCompressPruner,
AMCPruner
)
from nni.algorithms.compression.v2.pytorch.utils import compute_sparsity_mask2compact
class TorchModel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = torch.nn.Conv2d(1, 10, 5, 1)
self.bn1 = torch.nn.BatchNorm2d(10)
self.conv2 = torch.nn.Conv2d(10, 10, 5, 1)
self.bn2 = torch.nn.BatchNorm2d(10)
self.fc1 = torch.nn.Linear(4 * 4 * 10, 100)
self.fc2 = torch.nn.Linear(100, 10)
def forward(self, x):
x = F.relu(self.bn1(self.conv1(x)))
x = F.max_pool2d(x, 2, 2)
x = F.relu(self.bn2(self.conv2(x)))
x = F.max_pool2d(x, 2, 2)
x = x.view(x.size(0), -1)
x = F.relu(self.fc1(x))
x = self.fc2(x)
return F.log_softmax(x, dim=1)
def trainer(model, optimizer, criterion):
model.train()
for _ in range(10):
input = torch.rand(10, 1, 28, 28)
label = torch.Tensor(list(range(10))).type(torch.LongTensor)
optimizer.zero_grad()
output = model(input)
loss = criterion(output, label)
loss.backward()
optimizer.step()
def get_optimizer(model):
return nni.trace(torch.optim.SGD)(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
criterion = torch.nn.CrossEntropyLoss()
def evaluator(model):
return random.random()
def finetuner(model):
optimizer = torch.optim.SGD(model.parameters(), lr=0.1, momentum=0.9, weight_decay=5e-4)
trainer(model, optimizer, criterion)
class IterativePrunerTestCase(unittest.TestCase):
def test_linear_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = LinearPruner(model, config_list, 'level', 3, log_dir='../../../logs')
pruner.compress()
_, pruned_model, masks, _, _ = pruner.get_best_result()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_agp_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = AGPPruner(model, config_list, 'level', 3, log_dir='../../../logs')
pruner.compress()
_, pruned_model, masks, _, _ = pruner.get_best_result()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_lottery_ticket_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'sparsity': 0.8}]
pruner = LotteryTicketPruner(model, config_list, 'level', 3, log_dir='../../../logs')
pruner.compress()
_, pruned_model, masks, _, _ = pruner.get_best_result()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_simulated_annealing_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}]
pruner = SimulatedAnnealingPruner(model, config_list, evaluator, start_temperature=40, log_dir='../../../logs')
pruner.compress()
_, pruned_model, masks, _, _ = pruner.get_best_result()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def test_amc_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.5, 'max_sparsity_per_layer': 0.8}]
dummy_input = torch.rand(10, 1, 28, 28)
ddpg_params = {'hidden1': 300, 'hidden2': 300, 'lr_c': 1e-3, 'lr_a': 1e-4, 'warmup': 5, 'discount': 1.,
'bsize': 64, 'rmsize': 100, 'window_length': 1, 'tau': 0.01, 'init_delta': 0.5, 'delta_decay': 0.99,
'max_episode_length': 1e9, 'epsilon': 50000}
pruner = AMCPruner(10, model, config_list, dummy_input, evaluator, finetuner=finetuner, ddpg_params=ddpg_params, target='flops', log_dir='../../../logs')
pruner.compress()
class FixSeedPrunerTestCase(unittest.TestCase):
def test_auto_compress_pruner(self):
model = TorchModel()
config_list = [{'op_types': ['Conv2d'], 'total_sparsity': 0.8}]
admm_params = {
'trainer': trainer,
'traced_optimizer': get_optimizer(model),
'criterion': criterion,
'iterations': 10,
'training_epochs': 1
}
sa_params = {
'evaluator': evaluator,
'start_temperature': 40
}
pruner = AutoCompressPruner(model, config_list, 10, admm_params, sa_params=sa_params, log_dir='../../../logs')
pruner.compress()
_, pruned_model, masks, _, _ = pruner.get_best_result()
sparsity_list = compute_sparsity_mask2compact(pruned_model, masks, config_list)
print(sparsity_list)
assert 0.78 < sparsity_list[0]['total_sparsity'] < 0.82
def setUp(self) -> None:
# fix seed in order to solve the random failure of ut
random.seed(1024)
numpy.random.seed(1024)
torch.manual_seed(1024)
def tearDown(self) -> None:
# reset seed
import time
now = int(time.time() * 100)
random.seed(now)
seed = random.randint(0, 2 ** 32 - 1)
random.seed(seed)
numpy.random.seed(seed)
torch.manual_seed(seed)
if __name__ == '__main__':
unittest.main()
| linbinskn/nni_movement | test/ut/compression/v2/test_iterative_pruner_torch.py | test_iterative_pruner_torch.py | py | 5,828 | python | en | code | 0 | github-code | 90 |
18818290181 | import os
import glob
import matplotlib.pyplot as plt
import torch
import torchvision
from tqdm import tqdm
from PIL import Image
from utils import make_gif, make_gif_from_tensor, save_images
from modules import UNet_conditional
from ddpm_conditional import Diffusion
# import japanize_matplotlib
if __name__ == '__main__':
# load model
device = "cuda"
model = UNet_conditional(num_classes=10).to(device)
ckpt = torch.load('models/trained_cifar10/conditional_ckpt.pt')
model.load_state_dict(ckpt)
model.eval()
# cuDNN用
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
# label
"""
label 0 -> 飛行機
label 1 -> 車
label 2 -> 鳥
label 3 -> 猫
label 4 -> 鹿
label 5 -> 犬
label 6 -> 蛙
label 7 -> 馬
label 8 -> 船
label 9 -> トラック
"""
# ================================================================
# run name
# ================================================================
i_run = 0 # run number
# seed
torch.manual_seed(i_run)
# load image directly
# airplane to car
label = torch.tensor([0]).to(device)
label_cond = torch.tensor([1]).to(device)
# img_name = 'img90'
# img_name = 'img2185'
# img_name = 'img2330'
img_name = 'img9699'
# car to airplane
# label = torch.tensor([1]).to(device)
# label_cond = torch.tensor([0]).to(device)
# img_name = 'img104'
# img_name = 'img330'
# img_name = 'img659'
# img_name = 'img7964'
img_path = f'data/cifar10_64/cifar10-64/test/class{label[0]}/{img_name}.png'
# load image from index
# i_img = 0
# imgs_path = glob.glob(f'data/cifar10_64/cifar10-64/test/class{label[0]}/*')
# img_path = imgs_path[i_img]
# img_name = os.path.basename(os.path.splitext(img_path)[0])
img_mask_path = f'masks/{img_name}_mask.png'
run_name = f'{img_name}_run{i_run}'
# ================================================================
# exp name
# ================================================================
exp_name = 'regenerate_mask'
t_cfg = True
cfg_scale = 5
if t_cfg:
exp_name += f'_t-cfg{cfg_scale}'
else:
exp_name += f'_cfg{cfg_scale}'
exp_name += f'_{label[0]}to{label_cond[0]}'
# exp_name += '_debug'
# ================================================================
# settings
# ================================================================
noise_steps = 1000
save_step = 5
save_x_t = False
t_starts = [300]
# t_starts = [100, 200, 300, 400]
# t_starts = [5 * i for i in range(30)]
# t_starts = [0, 100, 200, 300, 400, 500]
n = len(t_starts)
exp_dir = f'{os.path.abspath(".")}/results/test/{exp_name}'
out_dir = f'{exp_dir}/{run_name}'
os.makedirs(out_dir, exist_ok=True)
if save_x_t:
os.makedirs(f'{out_dir}/img', exist_ok=True)
# load image
img = Image.open(img_path)
mask = Image.open(img_mask_path)
# transform
diffusion = Diffusion(noise_steps=noise_steps, img_size=64, device=device)
to_tensor = torchvision.transforms.ToTensor()
img = to_tensor(img)[None]
img = img.to(device)
img = (img - 0.5) * 2
mask = to_tensor(mask)[None]
mask = mask.to(device)
mask = (mask - 0.5) * 2
mask[mask < 0.8] = -1
mask[mask > 0.8] = 1
with torch.no_grad():
# ================================================================
# forward process
# ================================================================
print('Forward Process ----------------------------------------------------------------')
x_t_forward = torch.zeros((noise_steps + 1, 1, 3, 64, 64)).to(device)
x_t_forward[0] = img[0]
x = x_t_forward[0]
for i in range(noise_steps):
epsilon_t = torch.randn_like(x)
x = torch.sqrt(diffusion.alpha[i]) * x + torch.sqrt(1 - diffusion.alpha[i]) * epsilon_t
if save_x_t and i % save_step == 0:
x_plot = torch.cat([x, img[0]], dim=0)
x_plot = (x_plot.clamp(-1, 1) + 1) / 2
x_plot = (x_plot * 255).type(torch.uint8)
save_images(x_plot, f'{out_dir}/img/{i:03d}.png')
x_t_forward[i + 1] = x
# ================================================================
# reverse process
# ================================================================
print('Reverse Process ----------------------------------------------------------------')
for ni, t_start in enumerate(t_starts):
x = x_t_forward[t_start]
x_t_reverse = torch.zeros((t_start + 1, 1, 3, 64, 64)).to(device)
x_t_reverse[t_start] = x
for i in tqdm(reversed(range(1, t_start + 1))):
ti = (torch.ones(1) * i).long().to(device)
predicted_noise = model(x, ti, label_cond)
if cfg_scale > 0:
if t_cfg:
uncond_predicted_noise = model(x, ti, label)
else:
uncond_predicted_noise = model(x, ti, None)
predicted_noise = torch.lerp(uncond_predicted_noise, predicted_noise, cfg_scale)
alpha = diffusion.alpha[ti][:, None, None, None]
alpha_hat = diffusion.alpha_hat[ti][:, None, None, None]
beta = diffusion.beta[ti][:, None, None, None]
if i > 1:
noise = torch.randn_like(x)
else:
noise = torch.zeros_like(x)
x_mask = 1 / torch.sqrt(alpha) * (x - ((1 - alpha) / (torch.sqrt(1 - alpha_hat))) * predicted_noise) + torch.sqrt(beta) * noise
# マスクされた領域のみを再生成
x[mask > 0] = x_t_forward[i][mask > 0]
x[mask < 0] = x_mask[mask < 0]
x_t_reverse[i - 1] = x
if save_x_t and i % save_step == 0:
x_plot = torch.cat([x, img[0]], dim=0)
x_plot = (x_plot.clamp(-1, 1) + 1) / 2
x_plot = (x_plot * 255).type(torch.uint8)
save_images(x_plot, f'{out_dir}/img/{(t_start - i) + t_start:03d}.png')
x_gif = torch.cat([x_t_forward[:t_start - 1], x_t_reverse.flip(dims=[0])], dim=0)
x_gif = (x_gif.clamp(-1, 1) + 1) / 2
make_gif_from_tensor(x=x_gif[::save_step, 0, ...], out_dir=out_dir, img_name=f'{run_name}_t{t_start}', delay=1000)
x_plot = (x.clamp(-1, 1) + 1) / 2
x_plot = (x_plot * 255).type(torch.uint8)
save_images(x_plot, f'{out_dir}/out_t{t_start}.png')
x_out_list = torch.cat([img, mask, x], dim=0)
x_plot = (x_out_list.clamp(-1, 1) + 1) / 2
x_plot = (x_plot * 255).type(torch.uint8)
save_images(x_plot, f'{exp_dir}/{run_name}_t{t_starts[ni]:03d}_list.png')
save_images(x_plot, f'{out_dir}/{run_name}_t{t_starts[ni]:03d}_list.pdf')
| tf63/diffusion-trans | regenerate_mask.py | regenerate_mask.py | py | 7,178 | python | en | code | 0 | github-code | 90 |
35166286439 |
import numpy as np
import matplotlib.pyplot as plt
if __name__ == "__main__":
b = np.load('./state_trajectory_mpc.npy')
data_des = np.load('./state_trajectory_mpc_desired.npy')
b = b.squeeze()
b = b.T
data_des = data_des.squeeze()
data_des = data_des.T
line_c = ['b', 'g', 'r', 'c', 'k', 'm', 'y']
x_ = np.arange(0, b.shape[1], 1)
# fig, axs = plt.subplots(2, 1)
fig1 = plt.figure(1)
for i in range(7):
line_str = 'Joint' + str(i + 1)
plt.plot(x_, b[i, ::], line_c[i], label=line_str)
plt.plot(x_, data_des[i, ::], line_c[i]+'--', linewidth=2)
plt.legend()
fig2 = plt.figure(2)
for i in range(3):
line_str = 'Force' + str(i + 1)
plt.plot(x_, b[i + 14, ::], line_c[i], label=line_str)
plt.plot(x_, data_des[i + 14, ::], line_c[i]+'--', linewidth=2)
plt.legend()
plt.show()
| lasithagt/admm | data/plot_state_trajectory_ddp_mpc.py | plot_state_trajectory_ddp_mpc.py | py | 815 | python | en | code | 9 | github-code | 90 |
12122027447 | import os
from unittest.mock import patch, Mock, MagicMock
from datetime import datetime, timedelta
from teuthology import worker
from teuthology.contextutil import MaxWhileTries
class TestWorker(object):
def setup_method(self):
self.ctx = Mock()
self.ctx.verbose = True
self.ctx.archive_dir = '/archive/dir'
self.ctx.log_dir = '/log/dir'
self.ctx.tube = 'tube'
@patch("os.path.exists")
def test_restart_file_path_doesnt_exist(self, m_exists):
m_exists.return_value = False
result = worker.sentinel(worker.restart_file_path)
assert not result
@patch("os.path.getmtime")
@patch("os.path.exists")
@patch("teuthology.worker.datetime")
def test_needs_restart(self, m_datetime, m_exists, m_getmtime):
m_exists.return_value = True
m_datetime.utcfromtimestamp.return_value = datetime.utcnow() + timedelta(days=1)
result = worker.sentinel(worker.restart_file_path)
assert result
@patch("os.path.getmtime")
@patch("os.path.exists")
@patch("teuthology.worker.datetime")
def test_does_not_need_restart(self, m_datetime, m_exists, getmtime):
m_exists.return_value = True
m_datetime.utcfromtimestamp.return_value = datetime.utcnow() - timedelta(days=1)
result = worker.sentinel(worker.restart_file_path)
assert not result
@patch("os.symlink")
def test_symlink_success(self, m_symlink):
worker.symlink_worker_log("path/to/worker.log", "path/to/archive")
m_symlink.assert_called_with("path/to/worker.log", "path/to/archive/worker.log")
@patch("teuthology.worker.log")
@patch("os.symlink")
def test_symlink_failure(self, m_symlink, m_log):
m_symlink.side_effect = IOError
worker.symlink_worker_log("path/to/worker.log", "path/to/archive")
# actually logs the exception
assert m_log.exception.called
@patch("teuthology.worker.run_with_watchdog")
@patch("teuthology.worker.teuth_config")
@patch("subprocess.Popen")
@patch("os.environ")
@patch("os.mkdir")
@patch("yaml.safe_dump")
@patch("tempfile.NamedTemporaryFile")
def test_run_job_with_watchdog(self, m_tempfile, m_safe_dump, m_mkdir,
m_environ, m_popen, m_t_config,
m_run_watchdog):
config = {
"suite_path": "suite/path",
"config": {"foo": "bar"},
"verbose": True,
"owner": "the_owner",
"archive_path": "archive/path",
"name": "the_name",
"description": "the_description",
"job_id": "1",
}
m_tmp = MagicMock()
temp_file = Mock()
temp_file.name = "the_name"
m_tmp.__enter__.return_value = temp_file
m_tempfile.return_value = m_tmp
env = dict(PYTHONPATH="python/path")
m_environ.copy.return_value = env
m_p = Mock()
m_p.returncode = 0
m_popen.return_value = m_p
m_t_config.results_server = True
worker.run_job(config, "teuth/bin/path", "archive/dir", verbose=False)
m_run_watchdog.assert_called_with(m_p, config)
expected_args = [
'teuth/bin/path/teuthology',
'-v',
'--lock',
'--block',
'--owner', 'the_owner',
'--archive', 'archive/path',
'--name', 'the_name',
'--description',
'the_description',
'--',
"the_name"
]
m_popen.assert_called_with(args=expected_args, env=env)
@patch("time.sleep")
@patch("teuthology.worker.symlink_worker_log")
@patch("teuthology.worker.teuth_config")
@patch("subprocess.Popen")
@patch("os.environ")
@patch("os.mkdir")
@patch("yaml.safe_dump")
@patch("tempfile.NamedTemporaryFile")
def test_run_job_no_watchdog(self, m_tempfile, m_safe_dump, m_mkdir,
m_environ, m_popen, m_t_config, m_symlink_log,
m_sleep):
config = {
"suite_path": "suite/path",
"config": {"foo": "bar"},
"verbose": True,
"owner": "the_owner",
"archive_path": "archive/path",
"name": "the_name",
"description": "the_description",
"worker_log": "worker/log.log",
"job_id": "1",
}
m_tmp = MagicMock()
temp_file = Mock()
temp_file.name = "the_name"
m_tmp.__enter__.return_value = temp_file
m_tempfile.return_value = m_tmp
env = dict(PYTHONPATH="python/path")
m_environ.copy.return_value = env
m_p = Mock()
m_p.returncode = 1
m_popen.return_value = m_p
m_t_config.results_server = False
worker.run_job(config, "teuth/bin/path", "archive/dir", verbose=False)
m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"])
@patch("teuthology.worker.report.try_push_job_info")
@patch("teuthology.worker.symlink_worker_log")
@patch("time.sleep")
def test_run_with_watchdog_no_reporting(self, m_sleep, m_symlink_log, m_try_push):
config = {
"name": "the_name",
"job_id": "1",
"worker_log": "worker_log",
"archive_path": "archive/path",
"teuthology_branch": "main"
}
process = Mock()
process.poll.return_value = "not None"
worker.run_with_watchdog(process, config)
m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"])
m_try_push.assert_called_with(
dict(name=config["name"], job_id=config["job_id"]),
dict(status='dead')
)
@patch("subprocess.Popen")
@patch("teuthology.worker.symlink_worker_log")
@patch("time.sleep")
@patch("teuthology.worker.report.try_push_job_info")
def test_run_with_watchdog_with_reporting(self, m_tpji, m_sleep, m_symlink_log, m_popen):
config = {
"name": "the_name",
"job_id": "1",
"worker_log": "worker_log",
"archive_path": "archive/path",
"teuthology_branch": "jewel"
}
process = Mock()
process.poll.return_value = "not None"
m_proc = Mock()
m_proc.poll.return_value = "not None"
m_popen.return_value = m_proc
worker.run_with_watchdog(process, config)
m_symlink_log.assert_called_with(config["worker_log"], config["archive_path"])
@patch("teuthology.repo_utils.ls_remote")
@patch("os.path.isdir")
@patch("teuthology.repo_utils.fetch_teuthology")
@patch("teuthology.worker.teuth_config")
@patch("teuthology.repo_utils.fetch_qa_suite")
def test_prep_job(self, m_fetch_qa_suite, m_teuth_config,
m_fetch_teuthology, m_isdir, m_ls_remote):
config = dict(
name="the_name",
job_id="1",
suite_sha1="suite_hash",
)
archive_dir = '/archive/dir'
log_file_path = '/worker/log'
m_fetch_teuthology.return_value = '/teuth/path'
m_fetch_qa_suite.return_value = '/suite/path'
m_ls_remote.return_value = 'teuth_hash'
m_isdir.return_value = True
m_teuth_config.teuthology_path = None
got_config, teuth_bin_path = worker.prep_job(
config,
log_file_path,
archive_dir,
)
assert got_config['worker_log'] == log_file_path
assert got_config['archive_path'] == os.path.join(
archive_dir,
config['name'],
config['job_id'],
)
assert got_config['teuthology_branch'] == 'main'
m_fetch_teuthology.assert_called_once_with(branch='main', commit='teuth_hash')
assert teuth_bin_path == '/teuth/path/virtualenv/bin'
m_fetch_qa_suite.assert_called_once_with('main', 'suite_hash')
assert got_config['suite_path'] == '/suite/path'
def build_fake_jobs(self, m_connection, m_job, job_bodies):
"""
Given patched copies of:
beanstalkc.Connection
beanstalkc.Job
And a list of basic job bodies, return a list of mocked Job objects
"""
# Make sure instantiating m_job returns a new object each time
jobs = []
job_id = 0
for job_body in job_bodies:
job_id += 1
job = MagicMock(conn=m_connection, jid=job_id, body=job_body)
job.jid = job_id
job.body = job_body
jobs.append(job)
return jobs
@patch("teuthology.worker.run_job")
@patch("teuthology.worker.prep_job")
@patch("beanstalkc.Job", autospec=True)
@patch("teuthology.repo_utils.fetch_qa_suite")
@patch("teuthology.repo_utils.fetch_teuthology")
@patch("teuthology.worker.beanstalk.watch_tube")
@patch("teuthology.worker.beanstalk.connect")
@patch("os.path.isdir", return_value=True)
@patch("teuthology.worker.setup_log_file")
def test_main_loop(
self, m_setup_log_file, m_isdir, m_connect, m_watch_tube,
m_fetch_teuthology, m_fetch_qa_suite, m_job, m_prep_job, m_run_job,
):
m_connection = Mock()
jobs = self.build_fake_jobs(
m_connection,
m_job,
[
'foo: bar',
'stop_worker: true',
],
)
m_connection.reserve.side_effect = jobs
m_connect.return_value = m_connection
m_prep_job.return_value = (dict(), '/bin/path')
worker.main(self.ctx)
# There should be one reserve call per item in the jobs list
expected_reserve_calls = [
dict(timeout=60) for i in range(len(jobs))
]
got_reserve_calls = [
call[1] for call in m_connection.reserve.call_args_list
]
assert got_reserve_calls == expected_reserve_calls
for job in jobs:
job.bury.assert_called_once_with()
job.delete.assert_called_once_with()
@patch("teuthology.repo_utils.ls_remote")
@patch("teuthology.worker.report.try_push_job_info")
@patch("teuthology.worker.run_job")
@patch("beanstalkc.Job", autospec=True)
@patch("teuthology.repo_utils.fetch_qa_suite")
@patch("teuthology.repo_utils.fetch_teuthology")
@patch("teuthology.worker.beanstalk.watch_tube")
@patch("teuthology.worker.beanstalk.connect")
@patch("os.path.isdir", return_value=True)
@patch("teuthology.worker.setup_log_file")
def test_main_loop_13925(
self, m_setup_log_file, m_isdir, m_connect, m_watch_tube,
m_fetch_teuthology, m_fetch_qa_suite, m_job, m_run_job,
m_try_push_job_info, m_ls_remote,
):
m_connection = Mock()
jobs = self.build_fake_jobs(
m_connection,
m_job,
[
'name: name',
'name: name\nstop_worker: true',
],
)
m_connection.reserve.side_effect = jobs
m_connect.return_value = m_connection
m_fetch_qa_suite.side_effect = [
'/suite/path',
MaxWhileTries(),
MaxWhileTries(),
]
worker.main(self.ctx)
assert len(m_run_job.call_args_list) == 0
assert len(m_try_push_job_info.call_args_list) == len(jobs)
for i in range(len(jobs)):
push_call = m_try_push_job_info.call_args_list[i]
assert push_call[0][1]['status'] == 'dead'
| ceph/teuthology | teuthology/test/test_worker.py | test_worker.py | py | 11,616 | python | en | code | 153 | github-code | 90 |
1529350824 | import random
import numpy as np
import pandas as pd
import sys
import SimpleNetwork
parameter = sys.argv[1]
if len(sys.argv) == 2:
network_extract = 100
else:
network_extract = int(sys.argv[2])
random.seed(12)
for time in range(100):
if (time + 1) % 10 == 0:
print('Iteration ' + str(time + 1))
if parameter == '2002':
network = SimpleNetwork.SimpleNetwork(n_start=5, n_end=1000, beta=0.16, delta=0.58, k_mean=2.0)
elif parameter == '2003':
network = SimpleNetwork.SimpleNetwork(n_start=5, n_end=1000, beta=0.155, delta=0.562, k_mean=2.5)
while True:
if network.n >= network.n_end:
connection_1000 = []
for i in range(len(network.network)):
connection_1000.append(sum(network.network['gene_' + str(i + 1)]))
break
if network.n == 100:
connection_100 = []
for i in range(100):
connection_100.append(sum(network.network['gene_' + str(i + 1)]))
if network.n == 500:
connection_500 = []
for i in range(500):
connection_500.append(sum(network.network['gene_' + str(i + 1)]))
network.gene_duplication()
network.remove_linkage()
network.create_linkage()
if (time + 1) == network_extract:
network_result = pd.DataFrame(network.network)
if parameter == '2002':
network_result.to_csv('File/SimpleNetwork_network_result_2002.txt', sep='\t', header=False, index=False)
elif parameter == '2003':
network_result.to_csv('File/SimpleNetwork_network_result_2003.txt', sep='\t', header=False, index=False)
connection_num_100 = np.arange(0, 100, 1)
connection_num_500 = np.arange(0, 500, 1)
connection_num_1000 = np.arange(0, 1000, 1)
gene_num_100 = np.zeros(100, dtype=int)
gene_num_500 = np.zeros(500, dtype=int)
gene_num_1000 = np.zeros(1000, dtype=int)
for item in connection_100:
gene_num_100[item] += 1
for item in connection_500:
gene_num_500[item] += 1
for item in connection_1000:
gene_num_1000[item] += 1
connection_num_100.reshape(100)
connection_num_500.reshape(500)
connection_num_1000.reshape(1000)
gene_num_100.reshape(100)
gene_num_500.reshape(500)
gene_num_1000.reshape(1000)
if time == 0:
result_100 = pd.DataFrame(None, columns=['connection_num', 'gene_num_1'])
result_100['connection_num'] = connection_num_100
result_100['gene_num_1'] = gene_num_100
result_500 = pd.DataFrame(None, columns=['connection_num', 'gene_num_1'])
result_500['connection_num'] = connection_num_500
result_500['gene_num_1'] = gene_num_500
result_1000 = pd.DataFrame(None, columns=['connection_num', 'gene_num_1'])
result_1000['connection_num'] = connection_num_1000
result_1000['gene_num_1'] = gene_num_1000
else:
result_100['gene_num_' + str(time + 1)] = gene_num_100
result_500['gene_num_' + str(time + 1)] = gene_num_500
result_1000['gene_num_' + str(time + 1)] = gene_num_1000
if parameter == '2002':
result_100.to_csv('File/SimpleNetwork_connection_result_100_2002.txt', sep='\t', header=False, index=False)
result_500.to_csv('File/SimpleNetwork_connection_result_500_2002.txt', sep='\t', header=False, index=False)
result_1000.to_csv('File/SimpleNetwork_connection_result_1000_2002.txt', sep='\t', header=False, index=False)
elif parameter == '2003':
result_100.to_csv('File/SimpleNetwork_connection_result_100_2003.txt', sep='\t', header=False, index=False)
result_500.to_csv('File/SimpleNetwork_connection_result_500_2003.txt', sep='\t', header=False, index=False)
result_1000.to_csv('File/SimpleNetwork_connection_result_1000_2003.txt', sep='\t', header=False, index=False)
| zwhbio2017/Network_duplication | Run_SimpleNetwork.py | Run_SimpleNetwork.py | py | 3,864 | python | en | code | 0 | github-code | 90 |
18372218319 | #!/usr/bin/env python3
import sys
import math
import decimal
import itertools
from itertools import product
from functools import reduce
def input():
return sys.stdin.readline()[:-1]
def sort_zip(a:list, b:list):
z = zip(a, b)
z = sorted(z)
a, b = zip(*z)
a = list(a)
b = list(b)
return a, b
def main():
L, R = map(int, input().split())
for i in range(1, 990589):
if L <= 2019*i <= R:
print(0)
exit()
ans = 10**19
for i in range(L, R):
for j in range(L + 1, R + 1):
num = (i * j) % 2019
ans = min(ans, num)
print(ans)
if __name__ == '__main__':
main()
| Aasthaengg/IBMdataset | Python_codes/p02983/s617814160.py | s617814160.py | py | 673 | python | en | code | 0 | github-code | 90 |
18430529297 | #!/usr/bin/env python
"""
Solution to Project Euler Problem 44
http://projecteuler.net/
by Apalala <apalala@gmail.com>
(cc) Attribution-ShareAlike
http://creativecommons.org/licenses/by-sa/3.0/
Pentagonal numbers are generated by the formula, Pn=n(3n−1)/2. The first
ten pentagonal numbers are:
1, 5, 12, 22, 35, 51, 70, 92, 117, 145, ...
It can be seen that P4 + P7 = 22 + 70 = 92 = P8. However, their difference,
70 − 22 = 48, is not pentagonal.
Find the pair of pentagonal numbers, Pj and Pk, for which their sum and
difference is pentagonal and D = |Pk − Pj| is minimised; what is the value
of D?
"""
from series import pentagonals, pentagonal, is_pentagonal
def pentagonal_sum_and_diff():
for k in pentagonals():
for d in pentagonals(upto=k):
j = abs(k - d)
if j > 0 and is_pentagonal(j) and is_pentagonal(j + k):
return j, k
def test():
assert is_pentagonal(pentagonal(4) + pentagonal(7))
assert not is_pentagonal(pentagonal(70) + pentagonal(22))
def run():
j, k = pentagonal_sum_and_diff()
print(abs(j - k))
if __name__ == "__main__":
test()
run()
| Web-Dev-Collaborative/PYTHON_PRAC | projecteuler/euler044_pentagon_numbers.py | euler044_pentagon_numbers.py | py | 1,150 | python | en | code | 6 | github-code | 90 |
18408073329 | import math
def main():
N, K = map(int, input().split())
count = 0
flag = True
while flag:
if N - K < 0:
flag = False
else:
N -= 1
count += 1
print(count)
main()
| Aasthaengg/IBMdataset | Python_codes/p03047/s067827402.py | s067827402.py | py | 234 | python | en | code | 0 | github-code | 90 |
41783048829 | import argparse
def main():
parser = argparse.ArgumentParser()
parser.add_argument("-i", "--inputFile", type=str, help="the log file to parse")
parser.add_argument("-o", "--outputFile", type=str, help="the output log file")
args = parser.parse_args()
if(args.inputFile == None):
inputFile = "position.log"
else:
inputFile = args.inputFile
if(args.outputFile == None):
outFile = "parsed_loc.txt"
else:
outFile = args.outputFile
out = open(outFile,"w")
with open(inputFile) as f:
for line in f.readlines():
if("location data: position:" in line):
parts = line.split(" ")
#print(parts)
timestamp = parts[0][:len(parts[0])-1]
time_str = '"Time":"{}", '.format(timestamp)
loc_str = '"Sensor":"{}", location": "X":"{}", "Y":"{}", "Z":"{}"'.format(parts[1][-4:], parts[5][2:], parts[6][2:], parts[7][2:])
print(time_str)
print(loc_str)
out.write(time_str+ loc_str+"\n")
if __name__ == "__main__":
main(); | ArifSohaib/player_tracking | parse_log.py | parse_log.py | py | 1,128 | python | en | code | 0 | github-code | 90 |
11438745928 | """
Render a set of NetCDF files to images.
Stretched renderers may have one of the following colormap values:
1.0 (absolute)
max (calculate max across datasets)
0.5*max (calculate max across datasets, and multiply by value)
TODO:
* connect palettes to create matching class breaks
* combine palette and scale over which to stretch
"""
import os
import glob
import json
import webbrowser
import numpy
from netCDF4 import Dataset
import click
from pyproj import Proj
from rasterio.crs import CRS
from rasterio.warp import calculate_default_transform
from rasterio.enums import Resampling
from jinja2 import Environment, PackageLoader
from trefoil.render.renderers.utilities import renderer_from_dict
from trefoil.render.renderers.legend import composite_elements
from trefoil.netcdf.variable import SpatialCoordinateVariables
from trefoil.geometry.bbox import BBox
from trefoil.netcdf.warp import warp_array
from trefoil.netcdf.crs import get_crs, is_geographic
from trefoil.cli import cli
from trefoil.cli.utilities import (
render_image, collect_statistics, colormap_to_stretched_renderer,
palette_to_stretched_renderer, palette_to_classified_renderer,
get_leaflet_anchors, get_mask)
# Common defaults for usability wins
DEFAULT_PALETTES = {
'tmin': ('colorbrewer.sequential.YlOrRd_5', 'min,max'),
'tmax': ('colorbrewer.sequential.YlOrRd_5', 'min,max'),
'ppt': ('colorbrewer.diverging.RdYlGn_5', 'min,max'),
'pet': ('colorbrewer.diverging.RdYlGn_5', 'max,min')
}
@cli.command(short_help="Render netcdf files to images")
@click.argument('filename_pattern')
@click.argument('variable')
@click.argument('output_directory', type=click.Path())
@click.option('--renderer_file', help='File containing renderer JSON', type=click.Path(exists=True))
@click.option('--save', 'save_file', type=click.Path(), default=None, help='Save renderer to renderer_file')
@click.option('--renderer_type', type=click.Choice(['stretched', 'classified']), default='stretched', help='Name of renderer.', show_default=True)
@click.option('--colormap', default=None, help='Provide colormap as comma-separated lookup of value to hex color code. (Example: -1:#FF0000,1:#0000FF)')
@click.option('--fill', type=click.FLOAT, default=None, help='Fill value (will be rendered as transparent)')
@click.option('--colorspace', default='hsv', type=click.Choice(['hsv', 'rgb']), help='Color interpolation colorspace')
@click.option('--palette', default=None, help='Palettable color palette (Example: colorbrewer.sequential.Blues_3)')
@click.option('--palette_stretch', default='min,max', help='Value range over which to apply the palette when using stretched renderer (comma-separated)', show_default=True)
@click.option('--scale', default=1.0, help='Scale factor for data pixel to screen pixel size')
@click.option('--id_variable', help='ID variable used to provide IDs during image generation. Must be of same dimensionality as first dimension of variable (example: time). Guessed from the 3rd dimension')
@click.option('--lh', default=150, help='Height of the legend in pixels [default: 150]')
@click.option('--legend_breaks', default=None, type=click.INT, help='Number of breaks to show on legend for stretched renderer')
@click.option('--legend_ticks', default=None, type=click.STRING, help='Legend tick values for stretched renderer')
@click.option('--legend_precision', default=2, type=click.INT, help='Number of decimal places of precision for legend labels', show_default=True)
@click.option('--format', default='png', type=click.Choice(['png', 'jpg', 'webp']), show_default=True)
# Projection related options
@click.option('--src-crs', '--src_crs', default=None, type=click.STRING, help='Source coordinate reference system (limited to EPSG codes, e.g., EPSG:4326). Will be read from file if not provided.')
@click.option('--dst-crs', '--dst_crs', default=None, type=click.STRING, help='Destination coordinate reference system')
@click.option('--res', default=None, type=click.FLOAT, help='Destination pixel resolution in destination coordinate system units' )
@click.option('--resampling', default='nearest', type=click.Choice(('nearest', 'cubic', 'lanczos', 'mode')), help='Resampling method for reprojection (default: nearest')
@click.option('--anchors', default=False, is_flag=True, help='Print anchor coordinates for use in Leaflet ImageOverlay')
@click.option('--map', 'interactive_map', default=False, is_flag=True, help='Open in interactive map')
# Other options
@click.option('--mask', 'mask_path', default=None, help='Mask dataset:variable (e.g., mask.nc:mask). Mask variable assumed to be named "mask" unless otherwise provided')
def render_netcdf(
filename_pattern,
variable,
output_directory,
renderer_file,
save_file,
renderer_type,
colormap,
fill,
colorspace,
palette,
palette_stretch,
scale,
id_variable,
lh,
legend_breaks,
legend_ticks,
legend_precision,
format,
src_crs,
dst_crs,
res,
resampling,
anchors,
interactive_map,
mask_path):
"""
Render netcdf files to images.
colormap is ignored if renderer_file is provided
--dst-crs is ignored if using --map option (always uses EPSG:3857
If no colormap or palette is provided, a default palette may be chosen based on the name of the variable.
If provided, mask must be 1 for areas to be masked out, and 0 otherwise. It
must be in the same CRS as the input datasets, and have the same spatial
dimensions.
"""
# Parameter overrides
if interactive_map:
dst_crs = 'EPSG:3857'
filenames = glob.glob(filename_pattern)
if not filenames:
raise click.BadParameter('No files found matching that pattern', param='filename_pattern', param_hint='FILENAME_PATTERN')
if not os.path.exists(output_directory):
os.makedirs(output_directory)
mask = get_mask(mask_path) if mask_path is not None else None
if renderer_file is not None and not save_file:
if not os.path.exists(renderer_file):
raise click.BadParameter('does not exist', param='renderer_file', param_hint='renderer_file')
# see https://bitbucket.org/databasin/ncdjango/wiki/Home for format
renderer_dict = json.loads(open(renderer_file).read())
if variable in renderer_dict and not 'colors' in renderer_dict:
renderer_dict = renderer_dict[variable]
renderer_type = renderer_dict['type']
if renderer_type == 'stretched':
colors = ','.join([str(c[0]) for c in renderer_dict['colors']])
if 'min' in colors or 'max' in colors or 'mean' in colors:
statistics = collect_statistics(filenames, (variable,), mask=mask)[variable]
for entry in renderer_dict['colors']:
if isinstance(entry[0], basestring):
if entry[0] in ('min', 'max', 'mean'):
entry[0] = statistics[entry[0]]
elif '*' in entry[0]:
rel_value, statistic = entry[0].split('*')
entry[0] = float(rel_value) * statistics[statistic]
renderer = renderer_from_dict(renderer_dict)
else:
if renderer_type == 'stretched':
if palette is not None:
renderer = palette_to_stretched_renderer(palette, palette_stretch, filenames, variable, fill_value=fill, mask=mask)
elif colormap is None and variable in DEFAULT_PALETTES:
palette, palette_stretch = DEFAULT_PALETTES[variable]
renderer = palette_to_stretched_renderer(palette, palette_stretch, filenames, variable, fill_value=fill, mask=mask)
else:
if colormap is None:
colormap = 'min:#000000,max:#FFFFFF'
renderer = colormap_to_stretched_renderer(colormap, colorspace, filenames, variable, fill_value=fill, mask=mask)
elif renderer_type == 'classified':
if not palette:
raise click.BadParameter('palette required for classified (for now)',
param='--palette', param_hint='--palette')
renderer = palette_to_classified_renderer(palette, filenames, variable, method='equal', fill_value=fill, mask=mask) # TODO: other methods
if save_file:
if os.path.exists(save_file):
with open(save_file, 'r+') as output_file:
data = json.loads(output_file.read())
output_file.seek(0)
output_file.truncate()
data[variable] = renderer.serialize()
output_file.write(json.dumps(data, indent=4))
else:
with open(save_file, 'w') as output_file:
output_file.write(json.dumps({variable: renderer.serialize()}))
if renderer_type == 'stretched':
if legend_ticks is not None and not legend_breaks:
legend_ticks = [float(v) for v in legend_ticks.split(',')]
legend = renderer.get_legend(image_height=lh, breaks=legend_breaks, ticks=legend_ticks, max_precision=legend_precision)[0].to_image()
elif renderer_type == 'classified':
legend = composite_elements(renderer.get_legend())
legend.save(os.path.join(output_directory, '{0}_legend.png'.format(variable)))
with Dataset(filenames[0]) as ds:
var_obj = ds.variables[variable]
dimensions = var_obj.dimensions
shape = var_obj.shape
num_dimensions = len(shape)
if num_dimensions == 3:
if id_variable:
if shape[0] != ds.variables[id_variable][:].shape[0]:
raise click.BadParameter('must be same dimensionality as 3rd dimension of {0}'.format(variable),
param='--id_variable', param_hint='--id_variable')
else:
# Guess from the 3rd dimension
guess = dimensions[0]
if guess in ds.variables and ds.variables[guess][:].shape[0] == shape[0]:
id_variable = guess
ds_crs = get_crs(ds, variable)
if not ds_crs and is_geographic(ds, variable):
ds_crs = 'EPSG:4326' # Assume all geographic data is WGS84
src_crs = CRS.from_string(ds_crs) if ds_crs else CRS({'init': src_crs}) if src_crs else None
# get transforms, assume last 2 dimensions on variable are spatial in row, col order
y_dim, x_dim = dimensions[-2:]
coords = SpatialCoordinateVariables.from_dataset(
ds, x_dim, y_dim, projection=Proj(src_crs.to_dict()) if src_crs else None
)
if mask is not None and not mask.shape == shape[-2:]:
# Will likely break before this if collecting statistics
raise click.BadParameter(
'mask variable shape does not match shape of input spatial dimensions',
param='--mask', param_hint='--mask'
)
flip_y = False
reproject_kwargs = None
if dst_crs is not None:
if not src_crs:
raise click.BadParameter('must provide src_crs to reproject',
param='--src-crs',
param_hint='--src-crs')
dst_crs = CRS.from_string(dst_crs)
src_height, src_width = coords.shape
dst_transform, dst_width, dst_height = calculate_default_transform(
src_crs, dst_crs, src_width, src_height,
*coords.bbox.as_list(), resolution=res
)
reproject_kwargs = {
'src_crs': src_crs,
'src_transform': coords.affine,
'dst_crs': dst_crs,
'dst_transform': dst_transform,
'resampling': getattr(Resampling, resampling),
'dst_shape': (dst_height, dst_width)
}
else:
dst_transform = coords.affine
dst_height, dst_width = coords.shape
dst_crs = src_crs
if coords.y.is_ascending_order():
# Only needed if we are not already reprojecting the data, since that will flip it automatically
flip_y = True
if anchors or interactive_map:
if not (dst_crs or src_crs):
raise click.BadParameter('must provide at least src_crs to get Leaflet anchors or interactive map',
param='--src-crs', param_hint='--src-crs')
leaflet_anchors = get_leaflet_anchors(BBox.from_affine(dst_transform, dst_width, dst_height,
projection=Proj(dst_crs) if dst_crs else None))
if anchors:
click.echo('Anchors: {0}'.format(leaflet_anchors))
layers = {}
for filename in filenames:
with Dataset(filename) as ds:
click.echo('Processing {0}'.format(filename))
filename_root = os.path.split(filename)[1].replace('.nc', '')
if not variable in ds.variables:
raise click.BadParameter('variable {0} was not found in file: {1}'.format(variable, filename),
param='variable', param_hint='VARIABLE')
var_obj = ds.variables[variable]
if not var_obj.dimensions == dimensions:
raise click.ClickException('All datasets must have the same dimensions for {0}'.format(variable))
if num_dimensions == 2:
data = var_obj[:]
if mask is not None:
data = numpy.ma.masked_array(data, mask=mask)
image_filename = os.path.join(output_directory, '{0}_{1}.{2}'.format(filename_root, variable, format))
if reproject_kwargs:
data = warp_array(data, **reproject_kwargs)
render_image(renderer, data, image_filename, scale, flip_y=flip_y, format=format)
local_filename = os.path.split(image_filename)[1]
layers[os.path.splitext(local_filename)[0]] = local_filename
elif num_dimensions == 3:
for index in range(shape[0]):
id = ds.variables[id_variable][index] if id_variable is not None else index
image_filename = os.path.join(output_directory, '{0}_{1}__{2}.{3}'.format(filename_root, variable, id, format))
data = var_obj[index]
if mask is not None:
data = numpy.ma.masked_array(data, mask=mask)
if reproject_kwargs:
data = warp_array(data, **reproject_kwargs)
render_image(renderer, data, image_filename, scale, flip_y=flip_y, format=format)
local_filename = os.path.split(image_filename)[1]
layers[os.path.splitext(local_filename)[0]] = local_filename
# TODO: not tested recently. Make sure still correct
# else:
# # Assume last 2 components of shape are lat & lon, rest are iterated over
# id_variables = None
# if id_variable is not None:
# id_variables = id_variable.split(',')
# for index, name in enumerate(id_variables):
# if name:
# assert data.shape[index] == ds.variables[name][:].shape[0]
#
# ranges = []
# for dim in data.shape[:-2]:
# ranges.append(range(0, dim))
# for combined_index in product(*ranges):
# id_parts = []
# for index, dim_index in enumerate(combined_index):
# if id_variables is not None and index < len(id_variables) and id_variables[index]:
# id = ds.variables[id_variables[index]][dim_index]
#
# if not isinstance(id, basestring):
# if isinstance(id, Iterable):
# id = '_'.join((str(i) for i in id))
# else:
# id = str(id)
#
# id_parts.append(id)
#
# else:
# id_parts.append(str(dim_index))
#
# combined_id = '_'.join(id_parts)
# image_filename = os.path.join(output_directory, '{0}__{1}.{2}'.format(filename_root, combined_id, format))
# if reproject_kwargs:
# data = warp_array(data, **reproject_kwargs) # NOTE: lack of index will break this
# render_image(renderer, data[combined_index], image_filename, scale, flip_y=flip_y, format=format)
#
# local_filename = os.path.split(image_filename)[1]
# layers[os.path.splitext(local_filename)[0]] = local_filename
if interactive_map:
index_html = os.path.join(output_directory, 'index.html')
with open(index_html, 'w') as out:
template = Environment(loader=PackageLoader('trefoil.cli')).get_template('map.html')
out.write(
template.render(
layers=json.dumps(layers),
bounds=str(leaflet_anchors),
variable=variable
)
)
webbrowser.open(index_html)
| consbio/trefoil | trefoil/cli/render_netcdf.py | render_netcdf.py | py | 18,125 | python | en | code | 13 | github-code | 90 |
19378751676 | # Given a string containing digits from 2-9 inclusive, return all possible letter combinations that the number could represent.
#
# A mapping of digit to letters (just like on the telephone buttons) is given below. Note that 1 does not map to any letters.
#
#
#
# Example:
#
# Input: "23"
# Output: ["ad", "ae", "af", "bd", "be", "bf", "cd", "ce", "cf"].
from typing import List
# *******首先要定义全局变量存储临时结果以及返回结果(回溯算法相比于递归算法的关键之处);
# 在递归函数中,首先要判断是否满足递归条件,不满足直接返回;
# 其次,要对当前状态的每一种情况进行循环递归处理;这其中就包含了对临时结果的添加(方便进入下一层递归)以及删除(方便回溯到上一层)操作。
# 注意:正是这样的全局变量保证了回溯操作的正常进行!
# 本质是树的形式,进行一个全局的遍历。
class Solution:
def letterCombinations(self, digits: str) -> List[str]:
# 1 exception
if not digits:
return []
dict ={
'2':['a','b','c'],
'3': ['e', 'f', 'd'],
'4': ['g', 'h', 'i'],
'5': ['j', 'k', 'l'],
'6': ['m', 'n', 'o'],
'7': ['p', 'q', 'r', 's'],
'8': ['t', 'u', 'v'],
'9': ['w', 'x', 'y', 'z']
}
# tmp 用来存取每次做完的结果
# i作为digitals的index,当递增到4的时候已经算好一条分支
res = []
def dfs(tmp, i):
if i == len(digits):
res.append(tmp)
else:
numberRef = dict[digits[i]]
for wd in numberRef:
dfs(tmp+wd, i+1)
dfs("", 0)
return res
s = Solution()
s.letterCombinations('23') | joneyyx/LeetCodes | Others/17LetterCombinationsOfPhoneNumber.py | 17LetterCombinationsOfPhoneNumber.py | py | 1,847 | python | en | code | 0 | github-code | 90 |
35753663921 | #!/usr/bin/env python
from string import ascii_lowercase
alpha,copy={},{}
for _ in range(int(input())):
flag=True
for i in ascii_lowercase:
alpha[i]=0
copy[i]=0
word, cpy = map(str,input().split())
if len(word)!=len(cpy):
flag = False
break
else:
for i in range(len(word)):
alpha[word[i]]+=1
copy[cpy[i]]+=1
for i in ascii_lowercase:
if alpha[i]!=copy[i]:
flag=False
break
if flag==False:
print("Impossible")
else:
print("Possible")
| hansojin/python | string/bj11328.py | bj11328.py | py | 601 | python | en | code | 0 | github-code | 90 |
7058390358 | from fastapi import FastAPI, HTTPException, Request
from fastapi.middleware.cors import CORSMiddleware
from typing import Optional
from pydantic import BaseModel
import db_manager
app = FastAPI()
origins = [
"http://127.0.0.1:8080",
"http://localhost:8080",
"http://192.168.1.41:8080",
"https://buzo.xyz"
]
app.add_middleware(
CORSMiddleware,
allow_origins=origins,
allow_credentials=True,
allow_methods=["*"],
allow_headers=["*"],
)
@app.get("/")
async def root():
return {
"name": "buzo.xyz API",
"version": "1.1.0",
"author": "Gurjot Sidhu",
"contact": "contact@thatgurjot.com"
}
# Pull info
@app.get("/api/v1/resources/links")
async def main(count: Optional[int] = 1, source: Optional[str] = None,
link: Optional[str] = None):
result = db_manager.read(count=count, source=source, link=link)
return result
# Add item to DB
@app.post("/api/v1/storage/add")
async def store(link: str):
response = db_manager.read(link=link,short=1)
# if it's a new link, add it to db
if response == []:
response = db_manager.add(link)
else:
response = response[0]
response['exists'] = True
return response
# Delete item from DB
@app.delete("/api/v1/storage/purge")
async def delete(link: str):
response = db_manager.delete(link)
return {"deleted": response['response']}
# Update item in DB
@app.post("/api/v1/storage/update")
async def update(link: str, likes: int):
response = db_manager.update(link, likes)
return response | gsidhu/buzo.dog | api/main.py | main.py | py | 1,575 | python | en | code | 0 | github-code | 90 |
5074343196 | gender = "man"
h = 173 * 0.01
w = 65
bmi = round(w / (h*h), 2)
print(bmi)
if bmi > 30 :
print("fat")
elif 25<= bmi <30:
print("little fat")
elif 20<= bmi < 25:
print("good")
else:
print("skinny") | hsbummy/multicampus_lecture | 1. Python-django/day02/ws100.py | ws100.py | py | 214 | python | en | code | 0 | github-code | 90 |
38152767481 | import cv2
import numpy as np
import matplotlib.pyplot as plt
image = cv2.imread('images/frame1296.jpg')
hls = cv2.cvtColor(image, cv2.COLOR_BGR2HLS)
lower = np.array([0, 150, 0], dtype = "uint8")
upper = np.array([255, 255, 255], dtype = "uint8")
mask = cv2.inRange(hls, lower, upper)
res = cv2.bitwise_and(image, image, mask = mask).astype(np.uint8)
res = cv2.cvtColor(res, cv2.COLOR_HLS2BGR)
"""plt.imshow(res, cmap = "gray")
plt.show()"""
"""img = res
edges = cv2.Canny(img,100,200,L2gradient=True)
plt.subplot(121),plt.imshow(img,cmap = 'gray')
plt.title('Original Image'), plt.xticks([]), plt.yticks([])
plt.subplot(122),plt.imshow(edges,cmap = 'gray')
plt.title('Edge Image'), plt.xticks([]), plt.yticks([])
plt.show()"""
height,width = mask.shape
skel = np.zeros([height,width],dtype=np.uint8) #[height,width,3]
kernel = cv2.getStructuringElement(cv2.MORPH_CROSS, (3,3))
temp_nonzero = np.count_nonzero(mask)
while(np.count_nonzero(mask) != 0 ):
eroded = cv2.erode(mask,kernel)
temp = cv2.dilate(eroded,kernel)
temp = cv2.subtract(mask,temp)
skel = cv2.bitwise_or(skel,temp)
mask = eroded.copy()
img = res
edges = cv2.Canny(skel, 50, 150)
lines = cv2.HoughLinesP(edges,1,np.pi/180,100,minLineLength=30,maxLineGap=30)
for i in range(len(lines)):
x1,y1,x2,y2 = lines[i][0]
cv2.line(img,(x1,y1),(x2,y2),(0,0,255),2)
print(x1,y1,x2,y2)
"""
img = res
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
edges = cv2.Canny(gray, 50, 200, apertureSize = 3)
lines = cv2.HoughLines(edges, 0.01, np.pi/180, 50)
for i in range(len(lines)):
r, theta = lines[i][0]
a = np.cos(theta)
b = np.sin(theta)
x0 = a*r
y0 = b*r
x1 = int(x0 + 1000*(-b))
y1 = int(y0 + 1000*(a))
x2 = int(x0 - 1000*(-b))
y2 = int(y0 - 1000*(a))
cv2.line(img,(x1,y1), (x2,y2), (0,0,255),2)
"""
plt.imshow(img)
plt.show()
| ridouaneg/DeepFootballAnalysis | hsl_test.py | hsl_test.py | py | 1,862 | python | en | code | 10 | github-code | 90 |
40172572810 | import datetime
import ccxt
import pytz
from unittest import mock
from django.test import TestCase
from django.core.management import call_command
from .utils import parse_datetime
from . import factories as factory
from .factories import ExchangeFactory, MarketFactory, MarketOHLCVFactory, AccountFactory
from .models import Market, Exchange, MarketOHLCV
class AccountTest(TestCase):
def setUp(self):
self.kraken = ExchangeFactory.create(id_name='kraken')
self.market = MarketFactory.create(symbol='BTC/USD', exchange=self.kraken)
@mock.patch('src.exchanges.models.Market.exchange_api')
def test_put_order_exchange(self, MockExchangeApi):
account = AccountFactory.create(exchange=self.kraken)
order_id = 123
MockExchangeApi.return_value.create_order.return_value = order_id
order = account.put_order_exchange(self.market, side='buy', price=800, amount=100, type_order='market',
params={'test': True})
self.assertEquals(order_id, order)
class ExchangeTest(TestCase):
def setUp(self):
self.kraken = ExchangeFactory.create(id_name='kraken')
self.market = MarketFactory.create(symbol='BTC/USD', exchange=self.kraken)
def test_get_api_instance(self):
api = self.kraken.api_instance()
self.assertIsInstance(api, ccxt.Exchange)
def test_get_api_instance_with_parameters(self):
api_key = 'KEY'
secret = 'CODE'
rate_limit = True
api = self.kraken.api_instance({
'apiKey': api_key,
'secret': secret,
'enableRateLimit': rate_limit,
})
self.assertEquals(api.apiKey, api_key)
self.assertEquals(api.secret, secret)
self.assertEquals(api.enableRateLimit, rate_limit)
class MarketTest(TestCase):
def setUp(self):
self.kraken = ExchangeFactory.create(id_name='kraken')
self.market = MarketFactory.create(symbol='BTC/USD', exchange=self.kraken)
@mock.patch('ccxt.kraken.fetch_ohlcv')
def test_fecth_ohlcv(self, FecthMock):
datetime = '2017-01-01 00:00:00'
FecthMock.return_value = factory.simulate_fetch_ohlcv()
ohlcv = self.market.fetch_ohlcv_history(from_datetime=datetime)
self.assertEqual(720, len(ohlcv))
def test_convert_datetime_to_timestamp(self):
kraken = self.kraken
market = self.market
datetime = '2015-01-01 00:00:00'
expected_timestamp = 1420070400000
timestamp = market.convert_datetime_to_timestamp(market.exchange_api(), datetime)
self.assertEqual(expected_timestamp, timestamp)
def test_get_last_historical_date(self):
first_timestamp = 1546300800000 # 2019-01-01 00:00:00
last_timestamp = 1546301100000 # 2019-01-01 00:05:00
lure_timestamp = 1557032705000 # 2019-05-05 05:05:05
last_date = parse_datetime(last_timestamp)
MarketOHLCVFactory.create(market=self.market, date=parse_datetime(first_timestamp))
MarketOHLCVFactory.create(market=self.market, date=last_date)
# The Lure Row
MarketOHLCVFactory.create(date=parse_datetime(lure_timestamp))
date = Market.objects.get_last_historical_date(self.market)
self.assertEqual(last_date, date)
self.assertIsInstance(date, datetime.datetime)
def test_get_last_historical_date_when_not_exists_ohlcv_in_market(self):
lure_timestamp = 1557032705000 # 2019-05-05 05:05:05
# The Lure Row
MarketOHLCVFactory.create(date=parse_datetime(lure_timestamp))
result = Market.objects.get_last_historical_date(self.market)
self.assertEqual(Market.FROM_DATE, result)
self.assertIsInstance(result, datetime.datetime)
class MarketOHLCVTest(TestCase):
def setUp(self):
self.kraken = ExchangeFactory.create(id_name='kraken')
self.market = MarketFactory.create(symbol='BTC/USD', exchange=self.kraken)
self.raw_ohlcv = [[1546300800000, 3698.0, 3700.6, 3697.9, 3700.6, 5.0886],
[1546300860000, 3700.6, 3709.9, 3700.6, 3709.8, 7.15316174]]
def test_build_from_ohlcv(self):
lot_ohlcv = factory.simulate_fetch_ohlcv()
saved = MarketOHLCV.objects.bulk_ohlcv(market=self.market, ohlcv=lot_ohlcv)
self.assertEqual(720, saved)
self.assertEqual(720, MarketOHLCV.objects.count())
def test_add_price(self):
date = datetime.datetime(2019, 1, 1, hour=0, minute=1, second=0, tzinfo=pytz.utc)
MarketOHLCV.objects.add_price(self.market, self.raw_ohlcv)
self.assertEqual(2, MarketOHLCV.objects.count())
self.assertEqual(date, MarketOHLCV.objects.last().date)
class UtilsTest(TestCase):
def test_parse_datetime(self):
"""Parse keeping utc.
Note: unix_time parameters for default it's received in milliseconds"""
from .utils import parse_datetime
unix_time = 1546300800000
date = datetime.datetime(2019, 1, 1, tzinfo=pytz.utc)
parse = parse_datetime(unix_time)
self.assertEqual(date, parse)
def test_parse_datetime_when_unit_time_parameters_not_has_milliseconds(self):
from .utils import parse_datetime
unix_time = 1546300800
date = datetime.datetime(2019, 1, 1, tzinfo=pytz.utc)
parse = parse_datetime(unix_time, has_milliseconds=False)
self.assertEqual(date, parse)
| henrypalacios/crypstation | src/exchanges/tests.py | tests.py | py | 5,463 | python | en | code | 0 | github-code | 90 |
37118828163 | def merge_sort(seq):
if len(seq) <= 1:
return seq
mid = int(len(seq)/2)
left = merge_sort(seq[:mid])
right = merge_sort(seq[mid:])
return merge_sorted_list(left, right)
def merge_sorted_list(sorted_a, sorted_b):
len_a, len_b = len(sorted_a), len(sorted_b)
a = b = 0
new_sorted_seq = list()
while a < len_a and b < len_b:
if sorted_a[a] < sorted_b[b]:
new_sorted_seq.append(sorted_a[a])
a += 1
else:
new_sorted_seq.append(sorted_b[b])
if a < len_a:
new_sorted_seq.extend(sorted_a[a:])
else:
new_sorted_seq.extend(sorted_b[b:])
return new_sorted_seq
l = [1, 3, 5, 7, 2, 2, 2, 2, 2, 2, 4, 6, 9, 8, 0, 11, -5]
l = merge_sort(l)
print(l)
| nanw01/python-algrothm | Python Algrothm Advanced/practice/040207mergesorted copy 4.py | 040207mergesorted copy 4.py | py | 767 | python | en | code | 1 | github-code | 90 |
73805668776 | total = 0
mais = 1000
contp = 0
pbarato = 9999999999999999999999999
nomepb = 'a'
while True:
pdt = str(input('Informe o nome do produto: '))
preço = float(input('Informe o preço do produto: '))
total += preço
if preço > 1000:
contp += 1
if preço < pbarato:
pbarato = preço
nomepb = pdt
quest = str(input('Digite C para continuar ou P para parar: ')).upper()
if quest == 'P':
break
print(f'O total foi R${total:.2f}')
if contp > 1:
print(f'{contp} produtos custam mais de R$1000,00!')
if contp == 1:
print('Apenas um produto custa mais de R$1000,00!')
if contp == 0:
print('Nenhum produto custa mais de R$1000,00!')
print(f'{nomepb} é o produto mais barato. ')
| lucasptcastro/projetos-curso-em-video-python | ex070.py | ex070.py | py | 739 | python | pt | code | 1 | github-code | 90 |
44872392858 | import sys
import collections
import copy
def bfs(virus_activated):
chart = copy.deepcopy(table)
queue = collections.deque()
dx = [0, 0, 1, -1]; dy = [1, -1, 0, 0]
check = [[0 for _ in range(size)] for _ in range(size)]
while virus_activated:
temp = virus_activated.pop()
check[temp[0]][temp[1]] = 1
queue.append(temp)
maximum = 0
while queue:
x, y, count = queue.popleft()
for i in range(4):
new_x = x + dx[i]; new_y = y + dy[i]
if 0 <= new_x < size and 0 <= new_y < size and check[new_x][new_y] == 0:
if chart[new_x][new_y] != 1:
if chart[new_x][new_y] == 0: # only if new_x new_y == 0, maximum compare.
if count + 1 > maximum:
maximum = count + 1
chart[new_x][new_y] = 2 # set to virus
check[new_x][new_y] = 1 # check pos
queue.append((new_x, new_y, count + 1))
flag = 0
for n in range(size):
for m in range(size):
if chart[n][m] == 0:
flag = 1
break
if flag == 1:
break
if flag == 1:
return -1
else:
return maximum
def recursive(pos): # combinations.
global minimum
if pos == virus:
virus_activated = []
for j in range(virus):
virus_activated.append(virus_list[cd[j]])
num = bfs(virus_activated)
if num == -1:
return
minimum = min(num, minimum)
return
for i in range(max(cd) + 1, len(virus_list)):
cd[pos] = i
recursive(pos + 1)
cd[pos] = -1
size, virus = map(int, sys.stdin.readline().split())
table = []
virus_list = []
for i in range(size):
temp = list(map(int, sys.stdin.readline().split()))
for j in range(len(temp)):
if temp[j] == 2:
virus_list.append((i, j, 0))
table.append(temp)
cd = [-1 for _ in range(virus)]
minimum = 2502
recursive(0)
if minimum == 2502:
print(-1)
else:
print(minimum) | Quinsie/BOJ | Python/BOJ_17142_연구소 3.py | BOJ_17142_연구소 3.py | py | 2,121 | python | en | code | 0 | github-code | 90 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.