seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 โ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k โ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
13145861341 | from model.component.component_specification import ComponentSpecification
from model.component.socket.socket_specification import SocketSpecification
from model.component.subgraph_component import SubgraphComponentModel
from model.module.prototype_specifications import PrototypeSpecifications
from model.module.toolbox_item.toolbox_item_specifications import ToolboxItemSpecifications
from observables.observable_dictionary import ObservableDict
class ComponentRepository:
defined_components = None
def __init__(self, identifier_factory, socket_repository, module_repository, xml_helper):
self.identifier_factory = identifier_factory
self.socket_repository = socket_repository
self.xml_helper = xml_helper
self.module_repository = module_repository
self.defined_components = ObservableDict()
def create_component_with_sockets(self, component):
prototype = self.module_repository.get_prototype_by_id(component.prototype_id)
identifier = self.identifier_factory.get_next_identifier(name_string=prototype.get_name())
component.set_unique_identifier(identifier)
for in_socket_description in component.get_default_in_sockets():
socket_specification = SocketSpecification()
socket_specification.parent_component = component
socket_specification.socket_type = "in"
socket_specification.description = in_socket_description
component.add_in_socket(self.socket_repository.create_socket(socket_specification))
for out_socket_description in component.get_default_out_sockets():
socket_specification = SocketSpecification()
socket_specification.parent_component = component
socket_specification.socket_type = "out"
socket_specification.description = out_socket_description
component.add_out_socket(self.socket_repository.create_socket(socket_specification))
self.defined_components.append(component)
return component
def update_component(self, component):
self.defined_components.update(component)
def save_component(self, component, outfile):
name = component.get_unique_identifier()
print(self.xml_helper.get_header("component", {"name": name}, indentation=2), file=outfile)
print(self.xml_helper.get_header("class", indentation=3) + component.module_name + self.xml_helper.get_footer("class"), file=outfile)
print(self.xml_helper.get_header("package", indentation=3) + component.module_package + self.xml_helper.get_footer("package"), file=outfile)
for attribute in component.attributes:
print(self.xml_helper.get_header("attribute", {"key": attribute}, indentation=3)
+ component.attributes[attribute] + self.xml_helper.get_footer("attribute"), file=outfile)
for in_socket in component.get_in_sockets():
out_socket_names = [e.origin.get_unique_identifier() for e in in_socket.get_edges_in()]
if out_socket_names:
in_socket_name = in_socket.description['name']
socket_string = self.xml_helper.get_header("socket", {"name": in_socket_name}, indentation=3)
socket_string += ",".join(out_socket_names)
socket_string += self.xml_helper.get_footer("socket")
print(socket_string, file=outfile)
print(self.xml_helper.get_footer("component", indentation=2), file=outfile)
def load_next_component(self, lines, start_index=0):
symbol, attributes, next_index = self.xml_helper.pop_symbol(lines, start_index=start_index)
name = attributes["name"]
class_symbol = ""
package_symbol = ""
component_attributes = {}
#TODO: This is a code smell
edges = {}
while symbol != "/component":
symbol, attributes, next_index = self.xml_helper.pop_symbol(lines, start_index=next_index)
if symbol == "class":
class_symbol, _, next_index = self.xml_helper.pop_symbol(lines, start_index=next_index, expect_value=True)
elif symbol == "package":
package_symbol, _, next_index = self.xml_helper.pop_symbol(lines, start_index=next_index, expect_value=True)
elif symbol == "attribute":
value, _, next_index = self.xml_helper.pop_symbol(lines, start_index=next_index, expect_value=True)
component_attributes[attributes['key']] = value
elif symbol == "socket":
target, _, next_index = self.xml_helper.pop_symbol(lines, start_index=next_index, expect_value=True)
edges[attributes['name']] = target
toolbox_item_specification = PrototypeSpecifications()
toolbox_item_specification.package = package_symbol
toolbox_item_specification.name = class_symbol
#TODO: Stupid hack
if 'canvas' in component_attributes:
toolbox_item_specification.canvas = component_attributes['canvas']
toolbox_item = self.module_repository.get_prototype(toolbox_item_specification)
component = toolbox_item.prototype_class(None)
component.prototype_id = toolbox_item.get_unique_identifier()
component.update_attributes(toolbox_item.get_attributes())
component.update_attributes(component_attributes)
component = self.create_component_with_sockets(component)
# TODO: Load all graphs, then load all components
return component, next_index, edges | MichSchli/Mindblocks | model/component/component_repository.py | component_repository.py | py | 5,570 | python | en | code | 0 | github-code | 36 |
3674524668 | #Importing pyplot submodule
import matplotlib.pyplot as plt
import numpy as np
x = np.array(['Apples','Bananas','Lichi','Pineapple'])
y = np.array([100,45,60,90])
#using bar() to represent data in bar graph
plt.subplot(1,2,2)
plt.bar(x,y, width = 0.5)
plt.title('Vertical')
plt.show()
#for showing the graph horizontally, barh() is used and instead of width, height is used
plt.subplot(1,2,1)
plt.barh(x,y, height = 0.5)
plt.title('Horizontal')
plt.suptitle('Prices of fruits per kg')
plt.show()
| manudeepsinha/daily_commit | 2020/12/Python/23_matplotlib_bar.py | 23_matplotlib_bar.py | py | 524 | python | en | code | 0 | github-code | 36 |
35383679834 | #!/usr/bin/env python3
from sys import exit
from collections import Counter
import random
from statistics import mean
from TALinputs import TALinput
from multilanguage import Env, Lang, TALcolors
import mastermind_utilities as Utilities
# METADATA OF THIS TAL_SERVICE:
args_list = [
('max_num_attempts',int),
('num_matches',int),
('num_pegs',int),
('num_colors',int),
]
ENV =Env(args_list)
TAc =TALcolors(ENV)
LANG=Lang(ENV, TAc, lambda fstring: eval(f"f'{fstring}'"))
# START CODING YOUR SERVICE:
if ENV["seed"] == 'random_seed':
seed = random.randint(100000,999999)
else:
seed = int(ENV["seed"])
print(LANG.render_feedback("assigned-instance", f"# The assigned instance is:\n# number of pegs: {ENV['num_pegs']}\n# number of colors: {ENV['num_colors']}\n# Seed: "), end="")
TAc.print(seed, "yellow")
print(LANG.render_feedback("prompt", f"# Enter your first attempt which must be a sequence of {ENV['num_pegs']} colors separated by spaces.\n# example: \n# 1 4 3 \n# The server will respond with as many 'b' as the colors in the correct position and as many 'w' as the correct colors. \n"))
maxNumAttempts = ENV["max_num_attempts"]
numPegs = ENV["num_pegs"]
numColors = ENV["num_colors"]
sumAttempts = []
matchWin = 0
matchDone = 0
while matchDone < ENV["num_matches"]:
matchDone += 1
seed = random.randint(100000, 999999)
print(LANG.render_feedback("new-match", f"# match {matchDone} of {ENV['num_matches']}. Seed: "), end="")
TAc.print(seed, "yellow")
secretCode = Utilities.generateRandomPegsList(numPegs, numColors, seed)
count = 0
bufferOld = None
buffer = None
while count < maxNumAttempts:
count += 1
bufferOld = buffer
buffer = TALinput(
str,
num_tokens=numPegs,
regex=r"^([1-" + str(numColors) + "])$",
regex_explained="a sequence of number from 1 to " + str(numColors) + " separated by spaces. An example is: '4 2 1'.",
TAc=TAc
)
guessedCode = [int(i) for i in buffer]
rightColor, rightPositonAndColor = Utilities.calculateScore(secretCode, guessedCode)
result = Utilities.getStringOfResult(rightColor, rightPositonAndColor)
print(result)
if rightPositonAndColor == numPegs and rightColor == 0:
TAc.print(LANG.render_feedback("right-secret-code", f"# You found the secret code in {count} attempts.\n"), "green", ["bold"])
sumAttempts.append(count)
matchWin += 1
break
if count >= maxNumAttempts:
guessedCode = [int(i) for i in buffer]
rightColor, rightPositonAndColor = Utilities.calculateScore(secretCode, guessedCode)
if rightPositonAndColor == numPegs:
TAc.print(LANG.render_feedback("right-secret-code", f"# You found the secret code in {count} attempts.\n"), "green", ["bold"])
sumAttempts.append(count)
matchWin += 1
else:
TAc.print(LANG.render_feedback("wrong-secret-code", f"# You didn't find the secret code, the secret code is [{' '.join(map(str, secretCode))}].\n"), "red", ["bold"])
print('#end')
print(LANG.render_feedback("matches-statistics", f"# Statistics:\n# Matches won: {matchWin}/{ENV['num_matches']}\n# avg number of attempts (over won matches): {mean(sumAttempts)}\n# maximum number of attempts (over won matches): {max(sumAttempts)}"))
| romeorizzi/TALight | example_problems/tutorial/mastermind/services/eval_driver.py | eval_driver.py | py | 3,432 | python | en | code | 11 | github-code | 36 |
6884335052 | import time
import torch
torch.set_printoptions(precision=7)
from addict import Dict as adict
from torch.nn import functional as F
from zerovl.core import DistHook, HookMode, WandbHook
from zerovl.core.hooks.log import LogHook
from zerovl.core.runners.builder import RUNNER
from zerovl.core.runners.epoch_runner import EpochRunner
from zerovl.utils.dist import generate_local_groups
from zerovl.tasks.clip.hooks import *
from zerovl.utils.misc import calc_topk_accuracy
from zerovl.utils import all_gather_group, logger, ENV
try:
from apex import amp
except ImportError:
pass
import numpy as np
import random
def setup_seed(seed):
# for stable decoupled gradient accumulation~(DGA).
torch.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
@RUNNER.register_obj
class CLIP_BSGS_Runner(EpochRunner):
""" A runner used for clip
Args:
cfg (adict): global config.
"""
def __init__(self, cfg, data_loaders, model):
logger.info("CLIP runner initiated")
super(CLIP_BSGS_Runner, self).__init__(cfg, data_loaders, model)
self._init_clip_runner()
if data_loaders['train_dataset']:
num_samples = [len(dataset) for dataset in data_loaders['train_dataset']]
total_num = sum(num_samples)
self.sample_weights = [num_sample / total_num for num_sample in num_samples]
else:
self.sample_weights = None
def _init_clip_runner(self):
self.train_type = self.cfg.data.train_type
self.total_steps = self.train_steps * self.max_epochs
self.warmup_steps = int(self.total_steps * self.cfg.optim.lr.warmup_proportion)
self.dist_name = self.cfg.dist.name
self.fp16 = self.cfg.dist.fp16
self.batch_size_train = self.cfg.data.batch_size_train // ENV.size
self.batch_size_val = self.cfg.data.batch_size_val // ENV.size
self.batch_size = self.cfg.data.batch_size // ENV.size
assert self.batch_size_val % self.batch_size_train == 0
assert self.batch_size % self.batch_size_val == 0
group_size = self.cfg.loss.group_size
if group_size < 0:
group_size = ENV.size
group, group_rank = generate_local_groups(group_size)
self.rank = group_rank
self.group = group
if self.cfg.runner.stable_random != "none":
assert self.cfg.data.batch_size_train == self.cfg.data.batch_size_val
# set random seed for sampling from the same dataset.
self.rng = np.random.default_rng(2021)
def init_hook(self):
self.register_hook(ClipOptimizerHook(self),
priority='very_high', hook_mode=HookMode.TRAIN)
self.register_hook(DistHook(self),
priority='very_high', hook_mode=HookMode.TRAIN)
self.register_hook(ClipCheckpointHook(self),
priority='low', hook_mode=HookMode.TRAIN)
self.register_hook(LogHook(self),
priority='very_low')
if self.cfg.data.single_eval:
self.register_hook(RetrievalLocalEvalHook(self),
priority='very_low', hook_mode=HookMode.TRAIN)
else:
self.register_hook(RetrievalEvalHook(self),
priority='very_low', hook_mode=HookMode.TRAIN)
if self.cfg.wandb.enable:
self.register_hook(WandbHook(self),
priority='lowest', hook_mode=HookMode.TRAIN)
def input_preprocess(self, batch, mode='train'):
batch = {k: v.cuda(ENV.device, non_blocking=True) for k,v in batch.items() if k not in ['caption', 'name']}
return batch
def create_batch_dict(self, batch, mode='train'):
batch_dict = adict()
if mode == 'train':
batch_dict['image'], batch_dict['input_ids'], batch_dict['attention_mask'], \
batch_dict['caption'] = batch
else:
batch_dict['image'], batch_dict['input_ids'], batch_dict['attention_mask'], \
batch_dict['caption'], batch_dict['image_id'], batch_dict['caption_id'] = batch
return batch_dict
def train(self, data_iter, epoch_state, train_steps=None):
if data_iter is None:
return
self.model.train()
data_iter = data_iter[0]
self.call_hook('_before_train_epoch', epoch_state)
for batch in data_iter:
step_state = adict()
batch = self.create_batch_dict(batch)
batch = self.input_preprocess(batch)
if train_steps and epoch_state.inner_step > train_steps:
break
self.call_hook('_before_train_step', epoch_state, step_state)
step_state.batch_output = self.batch_processor(batch)
self.call_hook('_after_train_step', epoch_state, step_state)
if self.val_dataloader_list and self.val_interval_steps > 0 and \
((self.step + 1) % self.val_interval_steps == 0 or (self.step + 1) == self.total_steps):
for val_dataloader, val_steps, val_dataset_name in zip(self.val_dataloader_list, self.val_steps_list, self.cfg.data.valid_name):
self.val(val_dataloader, val_steps, val_dataset_name)
self.model.train()
self.step += 1
epoch_state.inner_step += 1
self.call_hook('_after_train_epoch', epoch_state)
def sequential_train(self, data_iters, epoch_state, train_steps=None):
if data_iters is None:
return
self.model.train()
self.call_hook('_before_train_epoch', epoch_state)
for data_iter in data_iters:
for batch in data_iter:
step_state = adict()
batch = self.create_batch_dict(batch)
batch = self.input_preprocess(batch)
if train_steps and epoch_state.inner_step > train_steps:
logger.emph('breaked??')
break
self.call_hook('_before_train_step', epoch_state, step_state)
step_state.batch_output = self.batch_processor(batch)
self.call_hook('_after_train_step', epoch_state, step_state)
if self.val_dataloader_list and self.val_interval_steps > 0 and \
((self.step + 1) % self.val_interval_steps == 0 or (self.step + 1) == self.total_steps):
for val_dataloader, val_steps, val_dataset_name in zip(self.val_dataloader_list, self.val_steps_list, self.cfg.data.valid_name):
self.val(val_dataloader, val_steps, val_dataset_name)
self.model.train()
self.step += 1
epoch_state.inner_step += 1
self.call_hook('_after_train_epoch', epoch_state)
def debias_train(self, data_loaders, epoch_state, train_steps=None):
if data_loaders is None:
return
self.model.train()
data_iters = [iter(data_loader) for data_loader in data_loaders]
num_datasets = len(data_loaders)
self.call_hook('_before_train_epoch', epoch_state)
for i in range(train_steps):
iter_index = self.rng.choice(num_datasets, p=self.sample_weights)
try:
data_iter = data_iters[iter_index]
batch = next(data_iter)
except StopIteration:
data_iters[iter_index] = iter(data_loaders[iter_index])
data_iter = data_iters[iter_index]
batch = next(data_iter)
step_state = adict()
batch = self.create_batch_dict(batch)
batch = self.input_preprocess(batch)
if train_steps and epoch_state.inner_step > train_steps:
break
self.call_hook('_before_train_step', epoch_state, step_state)
step_state.batch_output = self.batch_processor(batch)
self.call_hook('_after_train_step', epoch_state, step_state)
if self.val_dataloader_list and self.val_interval_steps > 0 and \
((self.step + 1) % self.val_interval_steps == 0 or (self.step + 1) == self.total_steps):
for val_dataloader, val_steps, val_dataset_name in zip(self.val_dataloader_list, self.val_steps_list, self.cfg.data.valid_name):
self.val(val_dataloader, val_steps, val_dataset_name)
self.model.train()
self.step += 1
epoch_state.inner_step += 1
self.call_hook('_after_train_epoch', epoch_state)
def val(self, data_loader, val_steps=None, val_dataset_name=None):
if data_loader is None:
return
self.model.eval()
if self.cfg.data.single_eval and ENV.rank != 0: return
epoch_state = adict()
epoch_state.inner_step = 0
epoch_state.data_loader = data_loader
epoch_state.val_steps = val_steps
epoch_state.dataset_name = val_dataset_name
self.call_hook('_before_val_epoch', epoch_state)
for batch in data_loader:
# init step state dict
step_state = adict()
batch = self.create_batch_dict(batch, mode='valid')
batch = self.input_preprocess(batch, mode='valid')
if val_steps and epoch_state.inner_step >= val_steps:
break
self.call_hook('_before_val_step', epoch_state, step_state)
with torch.no_grad():
step_state.batch_output = self.batch_processor(batch, embeddings=True)
self.call_hook('_after_val_step', epoch_state, step_state)
epoch_state.inner_step += 1
self.call_hook('_after_val_epoch', epoch_state)
def run(self):
"""Start running.
"""
# Logging for start running
logger.info(f'=> Start Running')
# data loaders
train_dataloader = self.train_dataloader
val_dataloader_list = self.val_dataloader_list
val_steps_list = self.val_steps_list
self.call_hook('before_run')
inner_step = 0
if self.checkpoint:
inner_step = self.checkpoint['meta']['inner_step']
while self.epoch < self.max_epochs:
# init train epoch state dict
epoch_state = adict()
epoch_state.inner_step = inner_step
epoch_state.data_loader = train_dataloader
# reset inner_step after first epoch from resume
inner_step = 0
if self.train_type == 'shuffle':
self.train(train_dataloader, epoch_state, self.train_steps)
elif self.train_type == 'sequential':
self.sequential_train(train_dataloader, epoch_state, self.train_steps)
elif self.train_type == 'debias':
self.debias_train(train_dataloader, epoch_state, self.train_steps)
else:
raise NotImplementedError
self.epoch += 1
if self.epoch % self.val_interval == 0 and val_dataloader_list and self.val_interval_steps < 0:
for val_dataloader, val_steps in zip(val_dataloader_list, val_steps_list):
try:
val_data_iter = val_dataloader.get_iterator(0, 0)
except:
val_data_iter = val_dataloader
self.val(val_data_iter, val_steps)
time.sleep(1) # wait for some hooks like loggers to finish
self.call_hook('after_run')
def batch_processor(self, data_batch, embeddings=False):
if self.cfg.runner.stable_random != "none":
stable_random_seed = self.step
setup_seed(stable_random_seed)
mixup_kwargs = {}
if self.model.module.use_mixup and not embeddings:
mixup_kwargs = self.model.module.get_mixup_kwargs(mixup_kwargs)
with torch.no_grad():
if embeddings:
_image_embeddings, _text_embeddings, temp = self.model(data_batch, embeddings='all', **mixup_kwargs)
output = {'image_embeddings': _image_embeddings,
'text_embeddings': _text_embeddings,
'image_id': data_batch['image_id'],
'caption_id': data_batch['caption_id']
}
return output
image_embeddings_local, text_embeddings_local = [], []
for _idx_l in range(0, self.batch_size, self.batch_size_train):
_data_batch = {"image": data_batch["image"][_idx_l: _idx_l + self.batch_size_train],
"input_ids": data_batch["input_ids"][_idx_l: _idx_l + self.batch_size_train],
"attention_mask": data_batch["attention_mask"][_idx_l: _idx_l + self.batch_size_train]
}
if self.scaler:
with torch.cuda.amp.autocast():
# (i', d), (t', d)
_image_embeddings, _text_embeddings, temp = self.model(_data_batch, embeddings='all', **mixup_kwargs)
else:
# (i', d), (t', d)
_image_embeddings, _text_embeddings, temp = self.model(_data_batch, embeddings='all', **mixup_kwargs)
image_embeddings_local.append(_image_embeddings)
text_embeddings_local.append(_text_embeddings)
# (i, d), (t, d)
image_embeddings_local = torch.cat(image_embeddings_local, dim = 0)
text_embeddings_local = torch.cat(text_embeddings_local, dim = 0)
temp_sqrt = torch.sqrt(temp)
# (i, d)
image_embeddings_global = torch.cat(all_gather_group(image_embeddings_local, self.group), 0)
# (t, d)
text_embeddings_global = torch.cat(all_gather_group(text_embeddings_local, self.group), 0)
s_i2t_nm = image_embeddings_global @ text_embeddings_local.T
s_i2t_mn = image_embeddings_local @ text_embeddings_global.T
# (i, t'), (i', t)
s_i2t_nm /= temp
s_i2t_mn /= temp
# (i), (t)
targets_i2t = torch.arange(self.batch_size * ENV.rank, self.batch_size * (ENV.rank + 1), device = ENV.device)
targets_t2i = torch.arange(self.batch_size * ENV.rank, self.batch_size * (ENV.rank + 1), device = ENV.device)
loss = 0.5 * (F.cross_entropy(s_i2t_mn, targets_i2t) + F.cross_entropy(s_i2t_nm.T, targets_t2i)).cpu().item()
y_i2t = torch.eye(self.cfg.data.batch_size, device=image_embeddings_local.device)
if self.model.module.use_mixup and not embeddings:
y_i2t_flip = torch.block_diag(*[torch.eye(self.batch_size_train).flip(0)] * (self.cfg.data.batch_size // self.batch_size_train)).to(device=image_embeddings_local.device)
alpha = mixup_kwargs['image_alpha'] if 'image_alpha' in mixup_kwargs else mixup_kwargs['text_alpha']
y_i2t = alpha * y_i2t + (1-alpha) * y_i2t_flip
y_i2t = y_i2t[self.batch_size * ENV.rank: self.batch_size * (ENV.rank + 1), :]
# (i'), (t')
s_i2t_esum_local = torch.sum(torch.exp(s_i2t_mn), dim = 1)
s_t2i_esum_local = torch.sum(torch.exp(s_i2t_nm.T), dim = 1)
# (i), (t)
s_i2t_esum = torch.cat(all_gather_group(s_i2t_esum_local, self.group), 0).unsqueeze(dim = 1)
s_t2i_esum = torch.cat(all_gather_group(s_t2i_esum_local, self.group), 0).unsqueeze(dim = 1)
p_i2t_mn = torch.exp(s_i2t_mn) / s_i2t_esum[self.batch_size * ENV.rank: self.batch_size * (ENV.rank + 1), :]
p_t2i_nm = torch.exp(s_i2t_mn.T) / s_t2i_esum
left_I = (p_i2t_mn + p_t2i_nm.T - 2 * y_i2t) @ text_embeddings_global
p_i2t_nm = torch.exp(s_i2t_nm) / s_i2t_esum
p_t2i_mn = torch.exp(s_i2t_nm.T) / s_t2i_esum[self.batch_size * ENV.rank: self.batch_size * (ENV.rank + 1), :]
left_T = (p_i2t_nm.T + p_t2i_mn - 2 * y_i2t) @ image_embeddings_global
# (i, d) = (1) * ((i, t) @ (t, d))
left_I /= temp_sqrt
left_T /= temp_sqrt
i2t_acc = calc_topk_accuracy(p_i2t_mn, targets_i2t)[0] # (1)
t2i_acc = calc_topk_accuracy(p_t2i_mn, targets_t2i)[0] # (1)
if self.cfg.runner.stable_random != "none":
setup_seed(stable_random_seed)
for _idx_l in range(0, self.batch_size, self.batch_size_train):
_data_batch = {"image": data_batch["image"][_idx_l: _idx_l + self.batch_size_train],
"input_ids": data_batch["input_ids"][_idx_l: _idx_l + self.batch_size_train],
"attention_mask": data_batch["attention_mask"][_idx_l: _idx_l + self.batch_size_train]
}
# (i', d), (t', d)
_left_I = left_I[_idx_l: _idx_l + self.batch_size_train]
_left_T = left_T[_idx_l: _idx_l + self.batch_size_train]
if self.scaler:
with torch.cuda.amp.autocast():
# (i', d), (t', d)
_image_embeddings, _text_embeddings, temp = self.model(_data_batch, embeddings='all', **mixup_kwargs)
else:
# (i', d), (t', d)
_image_embeddings, _text_embeddings, temp = self.model(_data_batch, embeddings='all', **mixup_kwargs)
temp_sqrt = torch.sqrt(temp)
# (i')
loss_temp_i = _left_I * _image_embeddings
loss_temp_t = _left_T * _text_embeddings
loss_temp = (loss_temp_i + loss_temp_t).sum() / 2 / self.batch_size
loss_temp = loss_temp / temp_sqrt
if self.dist_name == 'apex':
with amp.scale_loss(loss_temp, self.optimizer) as scaled_loss:
scaled_loss.backward()
elif self.dist_name == 'torch' and self.fp16:
self.scaler.scale(loss_temp).backward()
else:
loss_temp.backward()
output = {'loss': loss,
'temperature': temp,
'i2t_acc': i2t_acc,
't2i_acc': t2i_acc,
'lr': self.optimizer.param_groups[0]['lr']
}
self.state.log_metrics.add_store('i2t_acc', i2t_acc)
self.state.log_metrics.add_store('t2i_acc', t2i_acc)
self.state.log_metrics.add_store('loss', loss)
return output
| zerovl/ZeroVL | zerovl/tasks/clip/clip_bsgs_runner.py | clip_bsgs_runner.py | py | 18,843 | python | en | code | 39 | github-code | 36 |
71578877863 | # !/usr/bin/env python
# -*- coding: utf-8 -*-
import vtk
def main():
cps = vtk.vtkConvexPointSet()
points = vtk.vtkPoints()
points.InsertNextPoint(0, 0, 0)
points.InsertNextPoint(1, 0, 0)
points.InsertNextPoint(1, 1, 0)
points.InsertNextPoint(0, 1, 0)
points.InsertNextPoint(0, 0, 1)
points.InsertNextPoint(1, 0, 1)
points.InsertNextPoint(1, 1, 1)
points.InsertNextPoint(0, 1, 1)
points.InsertNextPoint(0.5, 0, 0)
points.InsertNextPoint(1, 0.5, 0)
points.InsertNextPoint(0.5, 1, 0)
points.InsertNextPoint(0, 0.5, 0)
points.InsertNextPoint(0.5, 0.5, 0)
for i in range(0, 13):
cps.GetPointIds().InsertId(i, i)
ug = vtk.vtkUnstructuredGrid()
ug.Allocate(1, 1)
ug.InsertNextCell(cps.GetCellType(), cps.GetPointIds())
ug.SetPoints(points)
colors = vtk.vtkNamedColors()
mapper = vtk.vtkDataSetMapper()
mapper.SetInputData(ug)
actor = vtk.vtkActor()
actor.SetMapper(mapper)
actor.GetProperty().SetColor(colors.GetColor3d("Tomato"))
actor.GetProperty().SetLineWidth(3)
actor.GetProperty().EdgeVisibilityOn()
# Glyph the points
sphere = vtk.vtkSphereSource()
sphere.SetPhiResolution(21)
sphere.SetThetaResolution(21)
sphere.SetRadius(.03)
# Create a polydata to store everything in
polyData = vtk.vtkPolyData()
polyData.SetPoints(points)
pointMapper = vtk.vtkGlyph3DMapper()
pointMapper.SetInputData(polyData)
pointMapper.SetSourceConnection(sphere.GetOutputPort())
pointActor = vtk.vtkActor()
pointActor.SetMapper(pointMapper)
pointActor.GetProperty().SetColor(colors.GetColor3d("Peacock"))
# Create a renderer, render window, and interactor
renderer = vtk.vtkRenderer()
renderWindow = vtk.vtkRenderWindow()
renderWindow.SetWindowName("Convex Point Set")
renderWindow.AddRenderer(renderer)
renderWindowInteractor = vtk.vtkRenderWindowInteractor()
renderWindowInteractor.SetRenderWindow(renderWindow)
# Add the actors to the scene
renderer.AddActor(actor)
renderer.AddActor(pointActor)
renderer.SetBackground(colors.GetColor3d("Silver"))
renderer.ResetCamera()
renderer.GetActiveCamera().Azimuth(210)
renderer.GetActiveCamera().Elevation(30)
renderer.ResetCameraClippingRange()
# Render and interact
renderWindow.SetSize(640, 480)
renderWindow.Render()
renderWindowInteractor.Start()
if __name__ == '__main__':
main()
| lorensen/VTKExamples | src/Python/GeometricObjects/ConvexPointSet.py | ConvexPointSet.py | py | 2,484 | python | en | code | 319 | github-code | 36 |
16157216808 | import pandas as pd
from django.contrib.auth.decorators import login_required
from django.contrib.sites.shortcuts import get_current_site
from django.http import HttpResponse
from django.urls import reverse
from researcher_UI.models import Administration
@login_required
def download_links(request, study_obj, administrations=None):
"""Download only the associated administration links instead of the whole data spreadsheet"""
response = HttpResponse(content_type="text/csv") # Format response as a CSV
response["Content-Disposition"] = (
"attachment; filename=" + study_obj.name + "_links.csv" ""
) # Name CSV
if administrations is None:
administrations = Administration.objects.filter(study=study_obj)
admin_data = pd.DataFrame.from_records(administrations.values()).rename(
columns={
"id": "administration_id",
"study_id": "study_name",
"url_hash": "link",
}
) # Grab variables from administration objects
admin_data = admin_data[
["study_name", "subject_id", "repeat_num", "administration_id", "link"]
] # Organize columns
admin_data[
"study_name"
] = study_obj.name # Replace study ID number with actual study name
# Recreate administration links and add them to dataframe
test_url = "".join(
[
"http://",
get_current_site(request).domain,
reverse("administer_cdi_form", args=["a" * 64]),
]
).replace("a" * 64 + "/", "")
admin_data["link"] = test_url + admin_data["link"]
if study_obj.instrument.language in ["English"] and study_obj.instrument.form in [
"WS",
"WG",
]:
admin_data = admin_data.append(
{"study_name": "3rd Edition (Marchman et al., 2023)"}, ignore_index=True
)
admin_data.to_csv(
response, encoding="utf-8", index=False
) # Convert dataframe into a CSV
# Return CSV
return response
| langcog/web-cdi | webcdi/researcher_UI/utils/download/download_links.py | download_links.py | py | 1,991 | python | en | code | 7 | github-code | 36 |
30952198138 | import requests
class Polyline:
def __init__(self) -> None:
self.users = dict()
self.polylines = []
self.matches = []
def add(self, ID: int, name: str, ph_no: int, source: list, destination: list) -> None:
polyline = Polyline.__get_polyline(source, destination)
self.polylines.append([ID, polyline])
self.users[ID] = [name, ph_no]
self.__match_polylines()
def check_status(self, ID) -> int:
try:
idx = next(i for i, j in enumerate(self.matches) if j[0] == ID)
match, share = self.matches[idx][1], self.matches[idx][2]
match = self.users[match] + [share]
del self.matches[idx]
except:
match = [-1, -1]
return match
def remove(self, ID: int):
try:
idx = next(i for i, j in enumerate(self.polylines) if j[0] == ID)
del self.polylines[idx]
except:
print('No such ID')
@staticmethod
def LCSubStr(string1: str, string2: str) -> str:
m = len(string1)
n = len(string2)
result = 0
end = 0
length = [[0 for j in range(m+1)] for i in range(2)]
currRow = 0
for i in range(0, m + 1):
for j in range(0, n + 1):
if (i == 0 or j == 0):
length[currRow][j] = 0
elif (string1[i - 1] == string2[j - 1]):
length[currRow][j] = length[1 - currRow][j - 1] + 1
if (length[currRow][j] > result):
result = length[currRow][j]
end = i - 1
else:
length[currRow][j] = 0
currRow = 1 - currRow
if (result == 0):
return "-1"
return string1[end - result + 1: end + 1]
def _is_matching(self, polyline1: str, polyline2: str) -> bool:
if len(polyline1) < len(polyline2):
polyline1, polyline2 = polyline2, polyline1
lcs = self.LCSubStr(polyline1, polyline2)
com_head_len = 2
n_lcs = len(lcs)
n_line1 = len(polyline1) - com_head_len
n_line2 = len(polyline2) - com_head_len
# Assuming common header is 1st 2 chars
if lcs == polyline1[:com_head_len] or lcs == "-1":
return False
# Fully contained
elif (lcs == polyline1[com_head_len: ] or lcs == polyline2[com_head_len: ]):
return True
# LCS more than 50% of both:
elif (n_lcs > n_line1/2 and n_lcs > n_line2/2):
return True
else:
return False
def __match_polylines(self):
if len(self.polylines) < 2:
return
id1, polyline1 = self.polylines[-1]
for idx, (id2, polyline2) in enumerate(self.polylines[:-1]):
if self._is_matching(polyline1, polyline2):
# share of polyline1
share = self._get_fare_share(polyline1, polyline2)
del self.polylines[-1]
del self.polylines[idx]
self.matches.append([id1, id2, share])
self.matches.append([id2, id1, 100-share])
def _get_fare_share(self, polyline1: str, polyline2: str):
is_poly_swapped = False
if len(polyline1) < len(polyline2):
is_poly_swapped = True
polyline1, polyline2 = polyline2, polyline1
# lcs = self.LCSubStr(polyline1, polyline2)
com_head_len = 2
# n_lcs = len(lcs)
n_line1 = len(polyline1) - com_head_len
n_line2 = len(polyline2) - com_head_len
fare_share = min(1, n_line1/(n_line1+n_line2)) * 100
if is_poly_swapped:
fare_share = 100 - fare_share
return int(fare_share)
@staticmethod
def __get_polyline(source: list, destination: list) -> str:
api = f'https://router.hereapi.com/v8/routes?transportMode=car&origin={source[0]},{source[0]}&destination={destination[0]},{destination[0]}&return=polyline,summary&apikey=DCt7LzSN9sR8IGVpnTjD3CtQWYu55oinzBdFfD9idAE'
polyline = requests.get(api)
return polyline.json()['routes'][0]['sections'][0]['polyline']
if __name__ == '__main__':
polyline = Polyline()
polyline.add(123, "person1", 9988776655, [52.5308,13.3847], [52.5264,13.3686])
polyline.add(122, "person2", 9977553311, [52.5308,13.3847], [52.5264,13.3686])
print(polyline.check_status(122))
| Sivaram46/pool-ride | polyline.py | polyline.py | py | 4,465 | python | en | code | 2 | github-code | 36 |
3238104451 | import numpy as np
import pandas as pd
#from sklearn.cluster import KMeans
import matplotlib.pyplot as plt
import xlwt
import KMeans
import Visualization
plt.rcParams['font.sans-serif'] = ['SimHei'] # ็จๆฅๆญฃๅธธๆพ็คบไธญๆๆ ็ญพ
plt.rcParams['axes.unicode_minus'] = False # ็จๆฅๆญฃๅธธๆพ็คบ่ดๅท
points = []
center_points = []
K = 4
file="D:\\experiment\\็ฌฌไธๆฌก่ฑ็ฃ\\ๆต่ฏ3\\train\\douban_train_zuobiao.csv"
data=pd.read_csv(file)
train_data = np.array(data)#np.ndarray()ๆฏไธชๅงๅ่ฝฌๆขไธบไธไธชlist[]
#print(train_data)
all_list=train_data.tolist()#่ฝฌๆขlist
#print(all_list)
for item in all_list:
print(item[2])
print(item[3])
print("-----------------------")
point = [item[2], item[3]]
points.append(point)
#print(type(points))#ๆฏไธช็นๅญๅ
ฅๅ่กจ
points = np.array(points)#่ฝฌๅไธบๆฐ็ปๅฝขๅผ
#print(points)
center_points=[[18.26227416, -42.2997346], [16.23449381, -36.77185165], [58.35130569, 34.61516792], [-4.43906712, -56.93233191]]
kmeans = KMeans.KMeans(points, center_points, K)#K-means่็ฑป
center_points, kmeans_cluster = kmeans.find_cluster_by_kmeans()#ๆพๅฐK-means่็ฑป็็ฐ
for i in kmeans_cluster:
print(i)
data1 = np.array(center_points) # np.ndarray()ๆฏไธชๅงๅ่ฝฌๆขไธบไธไธชlist[]
data2 = np.array(kmeans_cluster) # np.ndarray()ๆฏไธชๅงๅ่ฝฌๆขไธบไธไธชlist[]
# print(train_data)
visual = Visualization.Visualization(center_points, kmeans_cluster)
visual.visual()
| JiaoZixun/Recommend_By_Canopy-K-means | recommendโโ่ฑ็ฃ/ๅฏนๆฏๅฎ้ชโโK-means่็ฑป.py | ๅฏนๆฏๅฎ้ชโโK-means่็ฑป.py | py | 1,454 | python | en | code | 18 | github-code | 36 |
30662323647 | from datetime import datetime, timedelta
from pytz import timezone
from dateutil.relativedelta import relativedelta
data_1 = datetime(2023, 10, 30, 17, 10, 59)
print(data_1)
data_str = "2023-10-30 17:18:59"
data_str_formatter = "%Y-%m-%d %H:%M:%S"
data_2 = datetime.strptime(data_str, data_str_formatter)
print(data_2)
data_3 = datetime.now()
print(data_3)
data_4 = datetime.now(timezone("Asia/Tokyo"))
print(data_4)
data_5 = datetime(2023, 10, 30, 14, 20, 36, tzinfo=timezone("Asia/Tokyo"))
print(data_5)
fmt_1 = "%d/%m/%Y %H:%M:%S"
data_6 = datetime.strptime("31/10/2023 14:39:30", fmt_1)
data_7 = datetime.strptime("10/10/2023 14:39:30", fmt_1)
delta_1 = data_6 - data_7
delta_2 = timedelta(days=20)
print(data_7 - delta_2)
print(delta_1)
fmt_2 = "%d/%m/%Y %H:%M:%S"
data_8 = datetime.strptime("10/10/2001 15:35:26", fmt_2)
relative_delta_1 = data_8 - relativedelta(days=365 * 20)
print(relative_delta_1)
data_9 = datetime.now()
fmt_3 = "%d/%m/%Y"
print(data_9.strftime(fmt_3))
| juannaee/WorkSpace-Python-Intermediario | SEรรO 4/datetime/main1.py | main1.py | py | 994 | python | en | code | 0 | github-code | 36 |
5547555689 | """
Tests for voting 13/01/2022.
"""
from sys import version
from collections import namedtuple
from brownie import interface, reverts
from scripts.vote_2022_01_13 import start_vote
from tx_tracing_helpers import *
from utils.config import (
lido_dao_lido_repo,
lido_dao_node_operators_registry_repo,
)
lido_old_app = {
'address': '0xC7B5aF82B05Eb3b64F12241B04B2cF14469E39F7',
'ipfsCid': 'QmbmPW5r9HMdyUARNJjjE7MNqBUGrXashwoWvqRZhc1t5b',
'content_uri': '0x697066733a516d626d5057357239484d64795541524e4a6a6a45374d4e714255477258617368776f577671525a686331743562',
'version': (2, 0, 0),
}
lido_new_app = {
'address': '0xC7B5aF82B05Eb3b64F12241B04B2cF14469E39F7',
'ipfsCid': 'QmQkJMtvu4tyJvWrPXJfjLfyTWn959iayyNjp7YqNzX7pS',
'content_uri': '0x697066733a516d516b4a4d7476753474794a76577250584a666a4c667954576e393539696179794e6a703759714e7a58377053',
'version': (2, 0, 1),
}
nos_old_app = {
'address': '0xec3567ae258639a0FF5A02F7eAF4E4aE4416C5fe',
'ipfsCid': 'QmQExJkoyg7xWXJjLaYC75UAmsGY1STY41YTG3wEK7q8dd',
'content_uri': '0x697066733a516d5145784a6b6f7967377857584a6a4c615943373555416d7347593153545934315954473377454b3771386464',
'version': (2, 0, 0),
}
nos_new_app = {
'address': '0xec3567ae258639a0FF5A02F7eAF4E4aE4416C5fe',
'ipfsCid': 'Qma7PXHmEj4js2gjM9vtHPtqvuK82iS5EYPiJmzKLzU58G',
'content_uri': '0x697066733a516d61375058486d456a346a7332676a4d3976744850747176754b3832695335455950694a6d7a4b4c7a55353847',
'version': (2, 0, 1),
}
NodeOperatorAdd = namedtuple(
'NodeOperatorAdd', ['name', 'id', 'address']
)
NEW_NODE_OPERATORS = [
# name, id, address
NodeOperatorAdd(
'Stakin', 14, '0xf6b0a1B771633DB40A3e21Cc49fD2FE35669eF46'
),
NodeOperatorAdd(
'ChainLayer', 15, '0xd5aC23b1adE91A054C4974264C9dbdDD0E52BB05'
),
NodeOperatorAdd(
'Simply Staking', 16, '0xFEf3C7aa6956D03dbad8959c59155c4A465DCacd'
),
NodeOperatorAdd(
'BridgeTower', 17, '0x40C20da8d0214A7eF33a84e287992858dB744e6d'
),
NodeOperatorAdd(
'Stakely', 18, '0x77d2CF58aa4da90b3AFCd283646568e4383193BF'
),
NodeOperatorAdd(
'InfStones', 19, '0x60bC65e1ccA448F98578F8d9f9AB64c3BA70a4c3'
),
NodeOperatorAdd(
'HashQuark', 20, '0x065dAAb531e7Cd50f900D644E8caE8A208eEa4E9'
),
NodeOperatorAdd(
'ConsenSys Codefi', 21, '0x5Bc5ec5130f66f13d5C21ac6811A7e624ED3C7c6'
),
]
def test_2022_01_13(
helpers, accounts, ldo_holder,
dao_voting, node_operators_registry,
vote_id_from_env
):
### LIDO APP
lido_repo = interface.Repo(lido_dao_lido_repo)
lido_old_app_from_chain = lido_repo.getLatest()
# check old versions of lido app is correct
assert lido_old_app['address'] == lido_old_app_from_chain[1]
assert lido_old_app['version'] == lido_old_app_from_chain[0]
assert lido_old_app['content_uri'] == lido_old_app_from_chain[2]
# check old ipfs link
bytes_object = lido_old_app_from_chain[2][:]
lido_old_ipfs = bytes_object.decode("ASCII")
lido_old_app_ipfs = f"ipfs:{lido_old_app['ipfsCid']}"
assert lido_old_app_ipfs == lido_old_ipfs
### NOS APP
nos_repo = interface.Repo(lido_dao_node_operators_registry_repo)
nos_old_app_from_chain = nos_repo.getLatest()
# check old versions of lido app is correct
assert nos_old_app['address'] == nos_old_app_from_chain[1]
assert nos_old_app['version'] == nos_old_app_from_chain[0]
assert nos_old_app['content_uri'] == nos_old_app_from_chain[2]
# check old ipfs link
bytes_object = nos_old_app_from_chain[2][:]
nos_old_ipfs = bytes_object.decode("ASCII")
nos_old_app_ipfs = f"ipfs:{nos_old_app['ipfsCid']}"
assert nos_old_app_ipfs == nos_old_ipfs
# Check that all NOs are unknown yet
for node_operator in NEW_NODE_OPERATORS:
with reverts('NODE_OPERATOR_NOT_FOUND'):
no = node_operators_registry.getNodeOperator(
node_operator.id, True
)
##
## START VOTE
##
vote_id = vote_id_from_env or start_vote({'from': ldo_holder}, silent=True)[0]
tx: TransactionReceipt = helpers.execute_vote(
vote_id=vote_id, accounts=accounts, dao_voting=dao_voting
)
### LIDO APP
# check only version and ipfs was changed
lido_new_app_from_chain = lido_repo.getLatest()
assert lido_new_app['address'] == lido_new_app_from_chain[1]
assert lido_new_app['version'] == lido_new_app_from_chain[0]
assert lido_new_app['content_uri'] == lido_new_app_from_chain[2]
# check new ipfs link
bytes_object = lido_new_app_from_chain[2][:]
lido_old_ipfs = bytes_object.decode("ASCII")
lido_new_app_ipfs = f"ipfs:{lido_new_app['ipfsCid']}"
assert lido_new_app_ipfs == lido_old_ipfs
### NOS APP
# check only version and ipfs was changed
nos_new_app_from_chain = nos_repo.getLatest()
assert nos_new_app['address'] == nos_new_app_from_chain[1]
assert nos_new_app['version'] == nos_new_app_from_chain[0]
assert nos_new_app['content_uri'] == nos_new_app_from_chain[2]
# check new ipfs link
bytes_object = nos_new_app_from_chain[2][:]
lido_old_ipfs = bytes_object.decode("ASCII")
nos_new_app_ipfs = f"ipfs:{nos_new_app['ipfsCid']}"
assert nos_new_app_ipfs == lido_old_ipfs
# Check that all NO was added
for node_operator in NEW_NODE_OPERATORS:
no = node_operators_registry.getNodeOperator(
node_operator.id, True
)
message = f'Failed on {node_operator.name}'
assert no[0] is True, message # is active
assert no[1] == node_operator.name, message # name
assert no[2] == node_operator.address, message # rewards address
assert no[3] == 0 # staking limit
### validate vote events (does not work for some reason)
# assert count_vote_items_by_events(tx) == 10, "Incorrect voting items count"
# display_voting_events(tx)
| lidofinance/scripts | archive/tests/xtest_2022_01_13.py | xtest_2022_01_13.py | py | 5,965 | python | en | code | 14 | github-code | 36 |
2314864237 | # if-elif
"""
a=int(input("Enter a : "))
b=int(input("Enter b: "))
if(a>b):
print("a is greater than b")
elif(b>a):
print("b is greater than a")
else:
print("a and b are equal")
"""
# program for leap year
"""
year = int(input("Enter year: "))
if(year%4==0):
if(year%100==0):
if(year%400==0):
print("Leap Year")
else:
print("not a leap year")
else:
print("Not a leap year!")
"""
# Leap year using logical operator
# year = int(input("Enter year: "))
# if(year%4==0 and (year%100!=0 or year%400==0)):
# print("Leap year")
# else:
# print("Not a leap year")
# #using ternery form (inline)
# print("leap year") if(year%4==0 and (year%100!=0 or year%400==0)) else print("Not a leap year")
list1 = ["sudeep", 32, "mumbai", 400078]
data = 32
if data in list1:
print(data, " is present in list1")
data = 42
if data not in list1:
print(data, " is not present in list1")
| sudeepsawant10/python-development | basic/7_ifelse.py | 7_ifelse.py | py | 944 | python | en | code | 0 | github-code | 36 |
18405451718 | #!/usr/bin/env python3
import boto3
import argparse
import os
import base64
from common_functions import getAllInstances, getDynamoDBItems
from common_jenkins import triggerJob
from common_kms import get_plaintext_key
parser = argparse.ArgumentParser()
parser.add_argument("-e", "--env", help="Staging or Production", type=str, required=True)
parser.add_argument("-t", "--dryrun", help="Displaying orphanedInstances only", required=False, action='store_true', default=False, dest='DryRun')
args = parser.parse_args()
jenkins_server_url = 'https://jenkins.cicd.cloud.fpdev.io'
if ((args.env).lower()).startswith("s"):
tableName = 'dyn-use1-cpt-s-tenant-service-stage'
os.system("okta-login pe-stg")
# Assuming your AWS profile is pe-stg for DEP staging account
session = boto3.Session(profile_name='pe-stg')
jobname = 'GHE-CLDOPS/cpt-staging-deployment-pipelines/edge-ngfw'
tokenHashed = 'AQICAHhYGEB1OYp+r8QB00qX9ggImKyc5paoUPZIsm20O94PvAEvbX7EICaGbwSMNqJIzaksAAAAfjB8BgkqhkiG9w0BBwagbzBtAgEAMGgGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMYyMVNddcwLg/UsV9AgEQgDvkF7+q8nVEEh+94gDS9VxigULgdJE8mv/pQxK4ye/CkFyy5/Woo5QIQSS1J+2AEPc/iRGHxpomG71RwA=='
token = str(base64.b64decode(get_plaintext_key(tokenHashed, 'us-east-2', session)), 'utf-8')
elif ((args.env).lower()).startswith("pe-pre"):
tableName = 'dyn-use1-cpt-s-tenant-service-prestaging'
os.system("okta-login pe-prestg")
# Assuming your AWS profile is pe-stg for DEP staging account
session = boto3.Session(profile_name='pe-prestg')
jobname = 'GHE-CLDOPS/cpt-prestaging-deployment-pipelines/edge-ngfw'
# tokenHashed = to be added once we have pre-staging setup
token = str(base64.b64decode(get_plaintext_key(tokenHashed, 'us-east-2', session)), 'utf-8')
else:
tableName = 'dyn-use1-cpt-p-tenant-service-production'
os.system("okta-login pe-prod")
# Assuming your AWS profile is pe-prod for DEP staging account
session = boto3.Session(profile_name='pe-prod')
jobname = 'GHE-CLDOPS/cpt-prod-deployment-pipelines/edge-ngfw'
tokenHashed = 'AQICAHgNWkrfbqMq3gyhFfHoJjENYsopnb7sN2lR2l5wDhJHNgFxjPyfbu4LqjAKUAAX0vvKAAAAgDB+BgkqhkiG9w0BBwagcTBvAgEAMGoGCSqGSIb3DQEHATAeBglghkgBZQMEAS4wEQQMZmrQxioap7ALvPhEAgEQgD3zHoykyjj95EfzsiIn7GJWEPai+JAkmBKNEufifNOafTXMG0JVDT8KZW4ThV0km1Jx/0pCaqe8z7Bj16t3'
token = str(base64.b64decode(get_plaintext_key(tokenHashed, 'us-east-2', session)), 'utf-8')
table = getDynamoDBItems(tableName, 'us-east-1', session)
instances = getAllInstances(session)
orphanedInstances = []
isValidTenant = False
for reservation in instances["Reservations"]:
for instance in reservation["Instances"]:
for item in table['Items']:
try:
for tag in instance['Tags']:
if tag['Key'] == 'fp-tenant-id' and tag['Value'] == item['tenantId']:
isValidTenant = True
break
except KeyError as error:
print(error)
if not isValidTenant:
orphanedInstance = {}
isEdge = False
for tag in instance['Tags']:
if "fp-tenant-id" in tag["Key"]:
orphanedInstance["fp_tenant_id"] = tag['Value']
orphanedInstance["InstanceId"] = instance["InstanceId"]
orphanedInstance["LaunchTime"] = instance["LaunchTime"]
orphanedInstance["Region"] = instance["Placement"]["AvailabilityZone"][:-1]
if "fp-edge-id" == tag["Key"]:
orphanedInstance["fp-edge-id"] = tag["Value"]
if "Name" == tag["Key"] and "edge-ngfw670" in tag["Value"]:
isEdge = True
if isEdge:
orphanedInstances.append(orphanedInstance)
isValidTenant = False
if len(orphanedInstance) == 0:
print("No orphaned Instances found")
else:
print(f"There are {len(orphanedInstances)} orphaned instances")
for inst in orphanedInstances:
if not args.DryRun:
params = {'REGION': inst["Region"], 'JOB_TYPE': 'destroy', 'TENANT_ID': inst["fp_tenant_id"], 'EDGE_ID': inst["fp-edge-id"]}
triggerJob('trung.truong@forcepoint.com', token, jenkins_server_url, jobname, params)
else:
print("DryRun only. No Action was taken")
print(inst["InstanceId"] + " " + inst["fp_tenant_id"] + " " + inst["fp-edge-id"] + " " + inst["Region"])
| trtruong/utilities | scripts/python/checkOrphanedInstances.py | checkOrphanedInstances.py | py | 4,165 | python | en | code | 0 | github-code | 36 |
25305879977 | __author__ = 'diegopinheiro'
from common.attribute import Attribute
import math
import numpy
class AttributeConverter:
@staticmethod
def get_representation(attribute=Attribute(), category=None):
number_representation = AttributeConverter.get_number_representation(attribute=attribute)
category_index = attribute.categories.index(category)
category_position = int(math.pow(2, category_index))
return [int(bit) for bit in list(numpy.binary_repr(category_position, width=number_representation))]
@staticmethod
def get_number_representation(attribute=Attribute()):
return len(attribute.categories)
@staticmethod
def get_attribute_category(attribute=Attribute,
representation=None):
representation = "".join([str(i) for i in representation])
category_position = int(representation, 2)
category_index = 0
if category_position == 1:
category_position = 0
if category_position != 0:
category_index = int(math.log(category_position, 2))
return attribute.categories[category_index] | diegompin/genetic_algorithm | common/attribute_converter.py | attribute_converter.py | py | 1,144 | python | en | code | 1 | github-code | 36 |
227921979 | """
ๅฎไนๅฝๆฐ,ๅฏนๆฐๅญๅ่กจ่ฟ่กๅๅบๆๅ
"""
def ascending(target):
for r in range(len(target) - 1): # 0 1 2
for c in range(r + 1, len(target)): # 123 23 3
if target[r] > target[c]:
# 2. ไฟฎๆนๅฏๅๆฐๆฎ
target[r], target[c] = target[c], target[r]
# 3. ๆ ้้่ฟreturn่ฟๅ
# 1. ไผ ๅ
ฅๅฏๅๆฐๆฎ
list01 = [170, 160, 180, 165]
ascending(list01)
print(list01)
| testcg/python | code_all/day09/homework/exercise05.py | exercise05.py | py | 455 | python | zh | code | 0 | github-code | 36 |
26552867136 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/4/12 23:29
# @Author : DZQ
# @File : main.py
import sys
from PyQt5 import QtCore, QtGui, QtWidgets
from PyQt5.QtCore import QThread, pyqtSignal
from PyQt5.QtWidgets import *
import xlrd
from threading import Thread
import json
from BaiduIndexSpider import BaiduIndexSpider
from workbook_handle import WorkBook
import math
import time
import random
class MainThread(QThread):
_my_signal = pyqtSignal(str)
_keyword_signal = pyqtSignal(list)
def __init__(self, keywords: list, filePath):
super(MainThread, self).__init__()
self.spider = BaiduIndexSpider(keywords)
self.filePath = filePath
self.keywords = keywords
self.workbookNum = 1
def split_keywords(self, keywords: list) -> [list]:
return [keywords[i * 10: (i + 1) * 10] for i in range(math.ceil(len(keywords) / 10))]
def spider_craw(self):
self._my_signal.emit("ๆญฃๅจ่ฟ่ก็ฌฌ{}ไธช็ฌ่ซ".format(self.workbookNum))
for each in self.spider.get_all_country():
try:
self.workbook.write_cell(each)
except:
pass
self._my_signal.emit("ๅทฒ็ฌๅๅฎๅ
จๅฝไฟกๆฏ")
self.workbook.init_province_cols()
self._my_signal.emit("ๅผๅง็ฌๅๅ็ๅธไฟกๆฏ")
year = 2011
for each in self.spider.get_index():
try:
self.workbook.write_cell(each)
except:
pass
try:
date = int(each['date'].split("-")[0])
if date > year:
self._my_signal.emit("็ฌๅๅฐ{}ๅนดไบ".format(date))
year = date
except:
pass
self._my_signal.emit("็ฌ่ซ็ปๆ๏ผๆญฃๅจไฟๅญexcel")
filePath = self.filePath + "/output{}.xls".format(self.workbookNum)
self.workbookNum += 1
self.workbook.workbook.save(filePath)
self._my_signal.emit("ไฟๅญExcelๅฎๆ")
def run(self) -> None:
if not self.spider.is_login():
self._my_signal.emit("Cookie่ฟๆ")
return
real_keywords = list()
self._my_signal.emit("ๆญฃๅจๅคๆญๅ
ณ้ฎ่ฏๆฏๅฆ่ขซๆถๅฝ")
for each in self.keywords:
if self.spider.is_keyword(each):
real_keywords.append(each)
if len(real_keywords) == 0:
self._my_signal.emit("ๆฒกๆๅฏไปฅ็ฌๅ็ๅ
ณ้ฎ่ฏ")
return
self._keyword_signal.emit(real_keywords)
self.keywords_list = self.split_keywords(real_keywords)
self._my_signal.emit("ๅ
ณ้ฎ่ฏ่ขซๅ่งฃๆไบ{}ไธช็ป\n".format(len(self.keywords_list)))
self._my_signal.emit("ๅผๅง็ฌ่ซ")
for each_keyword_list in self.keywords_list:
self.workbook = WorkBook()
self.spider.set_keywords(each_keyword_list)
self.spider_craw()
time.sleep(random.uniform(30, 35))
class Ui_MainWindow(QtWidgets.QMainWindow):
def __init__(self):
super(Ui_MainWindow, self).__init__()
self.setupUi(self)
self.retranslateUi(self)
def open_file(self):
filePath = QFileDialog.getOpenFileName(self, '้ๆฉๆไปถ', '', 'Excel files(*.xlsx , *.xls)')
self.inputFilePath = filePath[0]
self.filePathText.setPlainText(self.inputFilePath)
filePathList = filePath[0].split("/")[:-1]
outputFilePath = "/".join(filePathList)
self.outputFilePath = outputFilePath
self.get_keywords()
def print_keyword(self, keywords):
for each in keywords:
self.msgBox.append(each)
def handle_signal(self, info):
self.msgBox.append(info)
def start_spider(self):
if len(self.keywords) == 0:
self.msgBox.append("ๆฒกๆๅฏไปฅ็ฌๅ็ๅ
ณ้ฎ่ฏ")
return
self.thread = MainThread(self.keywords, self.outputFilePath)
self.thread._my_signal.connect(self.handle_signal)
self.thread._keyword_signal.connect(self.handle_list_signal)
self.thread.start()
def save_cookie(self):
cookie = self.cookieText.toPlainText()
if len(cookie) < 10:
self.msgBox.append("Cookieไฟกๆฏๅคช็ญ")
return
config = json.loads(open("./config.json", "r", encoding="utf8").read())
config['cookie'] = cookie
json.dump(config, open("./config.json", "w", encoding="utf8"), ensure_ascii=False)
self.msgBox.append("Cookieไฟๅญๆๅ")
def handle_list_signal(self, info):
self.msgBox.append("่ทๅๅฐไปฅไธๅฏไปฅ็ฌๅ็ๅ
ณ้ฎ่ฏ๏ผ")
thread = Thread(target=self.print_keyword, args=(info,))
thread.start()
thread.join()
self.msgBox.append("ๅ
ฑ่ทๅพ{}ไธช่ขซๆถๅฝ็ๅ
ณ้ฎ่ฏ".format(len(info)))
def get_keywords(self):
excelFile = xlrd.open_workbook(self.inputFilePath)
sheet = excelFile.sheet_by_index(0)
row_num = sheet.nrows
keywords = list()
for i in range(row_num):
value = str(sheet.cell_value(i, 0)).strip()
if len(value) > 0:
keywords.append(value)
self.keywords = keywords
thread = Thread(target=self.print_keyword, args=(keywords,))
thread.start()
thread.join()
self.msgBox.append("ๅ
ฑ่ทๅๅฐ{}ไธชๅ
ณ้ฎ่ฏ".format(len(keywords)))
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1050, 744)
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.filePathLabel = QtWidgets.QLabel(self.centralwidget)
self.filePathLabel.setGeometry(QtCore.QRect(30, 50, 101, 51))
font = QtGui.QFont()
font.setFamily("ๆฅทไฝ")
font.setPointSize(12)
self.filePathLabel.setFont(font)
self.filePathLabel.setObjectName("filePathLabel")
self.filePathText = QtWidgets.QPlainTextEdit(self.centralwidget)
self.filePathText.setGeometry(QtCore.QRect(180, 50, 631, 51))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.filePathText.setFont(font)
self.filePathText.setObjectName("filePathText")
self.filePathBtn = QtWidgets.QPushButton(self.centralwidget)
self.filePathBtn.setGeometry(QtCore.QRect(830, 60, 141, 41))
font = QtGui.QFont()
font.setFamily("ๆฅทไฝ")
font.setPointSize(12)
self.filePathBtn.setFont(font)
self.filePathBtn.setObjectName("filePathBtn")
self.startSpiderBtn = QtWidgets.QPushButton(self.centralwidget)
self.startSpiderBtn.setGeometry(QtCore.QRect(390, 150, 201, 61))
font = QtGui.QFont()
font.setFamily("ๆฅทไฝ")
font.setPointSize(12)
self.startSpiderBtn.setFont(font)
self.startSpiderBtn.setObjectName("startSpiderBtn")
self.cookieLabel = QtWidgets.QLabel(self.centralwidget)
self.cookieLabel.setGeometry(QtCore.QRect(40, 290, 81, 41))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.cookieLabel.setFont(font)
self.cookieLabel.setObjectName("cookieLabel")
self.cookieText = QtWidgets.QPlainTextEdit(self.centralwidget)
self.cookieText.setGeometry(QtCore.QRect(180, 270, 631, 81))
self.cookieText.setObjectName("cookieText")
self.cookieBtn = QtWidgets.QPushButton(self.centralwidget)
self.cookieBtn.setGeometry(QtCore.QRect(840, 290, 141, 41))
font = QtGui.QFont()
font.setFamily("Times New Roman")
font.setPointSize(12)
self.cookieBtn.setFont(font)
self.cookieBtn.setObjectName("cookieBtn")
self.msgBox = QtWidgets.QTextBrowser(self.centralwidget)
self.msgBox.setGeometry(QtCore.QRect(160, 380, 681, 301))
font = QtGui.QFont()
font.setFamily("ๆฅทไฝ")
font.setPointSize(12)
self.msgBox.setFont(font)
self.msgBox.setObjectName("msgBox")
MainWindow.setCentralWidget(self.centralwidget)
self.menubar = QtWidgets.QMenuBar(MainWindow)
self.menubar.setGeometry(QtCore.QRect(0, 0, 1352, 30))
self.menubar.setObjectName("menubar")
MainWindow.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(MainWindow)
self.statusbar.setObjectName("statusbar")
MainWindow.setStatusBar(self.statusbar)
self.retranslateUi(MainWindow)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
self.filePathBtn.clicked.connect(self.open_file)
self.cookieBtn.clicked.connect(self.save_cookie)
self.startSpiderBtn.clicked.connect(self.start_spider)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "็พๅบฆๆๆฐ็ฌ่ซ"))
self.filePathLabel.setText(_translate("MainWindow", "ๆไปถ็ฎๅฝ"))
self.filePathBtn.setText(_translate("MainWindow", "้ๆฉๆไปถ"))
self.startSpiderBtn.setText(_translate("MainWindow", "ๅฏๅจ็ฌ่ซ"))
self.cookieLabel.setText(_translate("MainWindow", "Cookie"))
self.cookieBtn.setText(_translate("MainWindow", "ๆดๆฐCookie"))
if __name__ == '__main__':
app = QtWidgets.QApplication(sys.argv)
MainWindow = QtWidgets.QMainWindow()
ui = Ui_MainWindow()
ui.setupUi(MainWindow)
MainWindow.show()
sys.exit(app.exec_())
| dzqann/BaiduIndex | main.py | main.py | py | 9,586 | python | en | code | 9 | github-code | 36 |
36289714812 | from kafka.admin import KafkaAdminClient, ConfigResource, ConfigResourceType
TOPIC_NAME = "kafka.client.tutorial"
BOOTSTRAP_SERVER_HOST = "kafka_tutorial:9092" # ์นดํ์นด ํด๋ฌ์คํฐ ์๋ฒ์ host์ port๋ฅผ ์ง์
admin_client = KafkaAdminClient(
bootstrap_servers=BOOTSTRAP_SERVER_HOST
)
print("== Get broker information")
# return type dict
describe_cluster = admin_client.describe_cluster()
for node in describe_cluster.get('brokers'):
print(f"node : {node}")
cr = ConfigResource(ConfigResourceType.BROKER, name=node.get('node_id'), configs=node)
describe_config = admin_client.describe_configs([cr])
for config_i in describe_config:
print(f"\tconfig:\t:{config_i}")
print("== End broker information")
print("== Get topic information")
describe_topic = admin_client.describe_topics([TOPIC_NAME])
for info_i in describe_topic:
for k, v in info_i.items():
print(f'{k}\t{v}')
print('==================================================================')
print("== End topic information")
admin_client.close() | 2h-kim/kafka-personal-study | simple-kafka-admin-client/kafka-admin-client.py | kafka-admin-client.py | py | 1,069 | python | en | code | 0 | github-code | 36 |
3835970469 | import json
import pandas as pd
import numpy as np
import filenames
myjson = {
"arm": {
"malware": [],
"benign": []
},
"mips": {
"malware": [],
"bening": []
}
}
df_arm_malware_forpoison = pd.read_csv(filenames.forpoison_arm_malware, header=None, index_col=False)
myjson["arm"]["malware"] = \
df_arm_malware_forpoison[df_arm_malware_forpoison.columns[-2]].tolist()
df_arm_benign_forpoison = pd.read_csv(filenames.forpoison_arm_benign, header=None, index_col=False)
myjson["arm"]["benign"] = \
df_arm_benign_forpoison[df_arm_benign_forpoison.columns[-2]].tolist()
df_mips_malware_forpoison = pd.read_csv(filenames.forpoison_mips_malware, header=None, index_col=False)
myjson["mips"]["malware"] = \
df_mips_malware_forpoison[df_mips_malware_forpoison.columns[-2]].tolist()
df_mips_benign_forpoison = pd.read_csv(filenames.forpoison_mips_benign, header=None, index_col=False)
myjson["mips"]["benign"] = \
df_mips_benign_forpoison[df_mips_benign_forpoison.columns[-2]].tolist()
with open(filenames.poisonJSON, "w") as f:
json.dump(myjson, f)
| ZsZs88/Poisoning | filepicker.py | filepicker.py | py | 1,111 | python | en | code | 0 | github-code | 36 |
7436949724 | from plot import *
merge = pd.read_pickle('./pkl/sig/cap_df_fragment_size.pkl')
merge['mtu'] = merge['algo'].str.split('_', expand=True)[1]
merge['mtu'] = merge['mtu'].astype(int)
cap_size_df = merge[['algo','run','frame_nr','frame_len', 'mtu']].groupby(['algo','run','frame_nr']).agg({'mtu': 'first', 'frame_len': 'first'}).groupby(['algo','run','mtu']).sum().reset_index()
sns.boxplot(data=cap_size_df.rename({'mtu': 'MTU Size in Bytes', 'frame_len': 'Sum Ethernet Frame Bytes'},axis=1), x = 'MTU Size in Bytes', y = 'Sum Ethernet Frame Bytes',color=colors[0])
plt.tight_layout()
savefig(__file__)
#plt.show()
| crest42/hostapd | eap_radius_test/scripts/plot_box_fragment_size.py | plot_box_fragment_size.py | py | 616 | python | en | code | 0 | github-code | 36 |
15991913621 | #!/usr/bin/python
from Constants import Constants as cnt
from CoinDaemon import CoinDaemon
from bitcoinrpc.authproxy import AuthServiceProxy
class Wallet:
"""Provides a high-level abstraction of a coin wallet and simplifies
the process of making JSON API RPC calls to the coin wallet
daemon"""
walletPath = ""
walletName = ""
walletNumber = 0
walletNumberStr = '%0*d' % (6, walletNumber)
walletPort = 00000
walletBalance = 0.0
walletProxy = None
def __init__(self, a_str, b_int, c_int, d_str):
self.walletPath = a_str
self.walletNumber = b_int
self.walletPort = c_int
self.walletName = d_str
def get_balance(self):
return self.daemonAPI.getbalance()
def test_wallet(self):
"""Function that tests the wallet and returns an int depending
on the result"""
import os, shutil, time, socket
from bitcoinrpc.authproxy import JSONRPCException
copyPath = cnt.HOME_DIR + "." + self.walletName + "/wallet.dat"
shutil.copy(self.walletPath, copyPath)
theDaemon = CoinDaemon(self.walletName, self.walletPort, self.walletPath)
COMMAND = cnt.SCRIPT_DIR + "bin/" + self.walletName + " getbalance"
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
daemonTimer = 0
RPCTimer = 0
while(theDaemon.is_running()):
print("[LOG] Waited " + str(daemonTimer) + " seconds for " + self.walletName + " daemon to stop...")
daemonTimer = daemonTimer + 1
time.sleep(1)
else:
theDaemon.start_daemon()
while not(sock.connect_ex(('localhost', int(self.walletPort))) == 0):
print("[LOG] Waited " + str(RPCTimer) + " seconds for RPC API...")
RPCTimer = RPCTimer + 10
time.sleep(10)
else:
self.walletProxy = AuthServiceProxy("http://" + cnt.USER + ":" + cnt.PASSWORD + "@127.0.0.1:" + self.walletPort)
print("[LOG] RPC API Up!")
self.walletBalance = self.walletProxy.getbalance()
if(self.walletBalance == 0):
#~ print("[LOG] Wallet tested - " + self.walletPath + " - EMPTY")
theDaemon.stop_daemon()
return 0
else:
try:
print(self.walletProxy.keypoolrefill())
except JSONRPCException:
#~ print("[LOG] LOCKED Wallet tested - " + self.walletPath + " - balance: " + str(self.walletBalance))
theDaemon.stop_daemon()
return 2
else:
#~ print("[LOG] Wallet tested - " + self.walletPath + " - balance: " + str(self.walletBalance))
theDaemon.stop_daemon()
return 1
| chriscassidy561/coinScript | Wallet.py | Wallet.py | py | 2,828 | python | en | code | 0 | github-code | 36 |
73605974825 | class Solution(object):
def firstUniqChar(self, s):
"""
:type s: str
:rtype: int
"""
# 1. Create a hashmap that the character's ascii value as index
hashmap = [-1 for j in range(123)]
# 2. Go through the string and add 1 in the hashmap once the character appears
for c in s:
hashmap[ord(c)] += 1
# 3. Go through the string and return the character hashmap value is 0
for i in range(0, len(s)):
if hashmap[ord(s[i])] == 0:
return i
return -1
if __name__ == '__main__':
solution = Solution()
s = "leelllll"
r = solution.firstUniqChar(s)
print(r) | yichenfromhyrule/LeetCode | #387_FirstUniqueCharacterInAString.py | #387_FirstUniqueCharacterInAString.py | py | 692 | python | en | code | 0 | github-code | 36 |
34916305882 | import numpy as np
import matplotlib.pyplot as plt
with open("../buildxcode/cte.txt") as f:
data = f.read()
data = data.split('\n')
x = [float(i) for i in data]
fig = plt.figure()
ax1 = fig.add_subplot(111)
ax1.set_title("CTE")
ax1.set_ylabel('CTE')
ax1.plot(x, 'b')
plt.show()
| suprnrdy/CarND-PID-Control-Project-master | src/plotCTE.py | plotCTE.py | py | 341 | python | en | code | 0 | github-code | 36 |
28052800472 | from pprint import pprint
import sys
import traceback
def print_pretty(obj):
pprint(obj)
def print_block(string, end="\n"):
hash_num = 45
print("\n\n")
print("#" * hash_num)
mid_hash_num = min((hash_num - len(string) - 2) // 2, 3)
mid_hash = "#" * mid_hash_num
mid_space = " " * ((hash_num - 2 * mid_hash_num - len(string))//2)
print("{}{}{}{}{}".format(
mid_hash, mid_space, " "*len(string), mid_space, mid_hash
))
print("{}{}{}{}{}".format(
mid_hash, mid_space, string, mid_space, mid_hash
))
print("{}{}{}{}{}".format(
mid_hash, mid_space, " "*len(string), mid_space, mid_hash
))
print("#" * hash_num)
print()
# Both `DuplicateWriter` and `Tee` are contributed to the following repo:
# https://github.com/netsharecmu/NetShare/blob/0ade9916d27307e63a31d17afcbcb9785c14b9f0/netshare/utils/tee.py
class DuplicateWriter(object):
def __init__(self, file_objects):
self._file_objects = file_objects
def write(self, data):
for file_object in self._file_objects:
file_object.write(data)
file_object.flush()
def writelines(self, data):
for file_object in self._file_objects:
file_object.write(data)
file_object.flush()
def flush(self):
for file_object in self._file_objects:
file_object.flush()
class Tee(object):
def __init__(self, stdout_path, stderr_path):
self.stdout_file = open(stdout_path, 'w')
self.stderr_file = open(stderr_path, 'w')
self.stdout = sys.stdout
self.stderr = sys.stderr
self.stdout_writer = DuplicateWriter([sys.stdout, self.stdout_file])
self.stderr_writer = DuplicateWriter([sys.stderr, self.stderr_file])
def __enter__(self):
sys.stdout = self.stdout_writer
sys.stderr = self.stderr_writer
def __exit__(self, exc_type, exc, exc_tb):
sys.stdout = self.stdout
sys.stderr = self.stderr
if exc_type is not None:
self.stderr_writer.write(traceback.format_exc())
self.stderr_writer.flush()
self.stdout_writer.flush()
self.stderr_file.close()
self.stdout_file.close() | Xinyu-Li-123/DefenseEval | DefenseEval/utils/utils.py | utils.py | py | 2,237 | python | en | code | 0 | github-code | 36 |
459210709 | import requests
import json
import ntpath
class Jira:
"""Common JIRA API methods.
JIRA's REST APIs provide access to resources (data entities) via URI paths. To use a REST API, your application will
make an HTTP request and parse the response. The JIRA REST API uses JSON as its communication format, and the
standard HTTP methods like GET, PUT, POST and DELETE (see API descriptions below for which methods are available for
each resource). URIs for JIRA's REST API resource have the following structure:
http://host:port/context/rest/api-name/api-version/resource-name
Currently there are two API names available:
auth - for authentication-related operations, and
api - for everything else.
The current API version is 2. However, there is also a symbolic version, called latest, which resolves to the latest
version supported by the given JIRA instance. As an example, if you wanted to retrieve the JSON representation of
issue JRA-9 from Atlassian's public issue tracker, you would access:
https://jira.atlassian.com/rest/api/latest/issue/JRA-9
"""
def __init__(self, base_http_url, project_key, auth):
"""Initialize JIRA object with base_http_url, project_key, and auth.
:param base_http_url: URL for JIRA site - http://host:port/ (i.e. http://localhost:8080/)
:type base_http_url: str
:param project_key: The project matching the projectKey supplied in the resource path as shown in URL.
:type project_key: str
:param auth: Tuple of username and password for authentication.
:type auth: tuple[str, str]
"""
self.base_http_url = base_http_url
self.project_key = project_key
self.auth = auth
def create_issue(self, summary, description='', issue_type='Task'):
"""Create Issue in Jira
Create issue with summary, description, and issue type.
:param summary: A brief summary of the issue. This will be the "title" shown next to the issue id on the boards.
:type summary: str
:param description: More details about the issue.
:type description: str
:param issue_type: Choose one of the predefined issue types for your project ('Bug', 'Task', 'Story', and
'Epic' by default.)
:type issue_type: str
:return: Response from the POST request.
STATUS 201: Success - application/json Returns a link to the created issue. \n
STATUS 400: Error - STATUS 400Returned if the input is invalid (e.g. missing required fields, invalid
field values, and so forth).
:rtype: list[requests.Response, str]
"""
# create Jira issue
url = self.base_http_url + 'rest/api/2/issue'
headers = {'Content-Type': 'application/json'}
data = {
"fields": {
"project": {
"key": self.project_key
},
"summary": summary,
"description": description,
"issuetype": {
"name": issue_type
}
}
}
r = requests.post(url, auth=self.auth, headers=headers, data=json.dumps(data))
return r
def get_issues(self, issue_id=None, max_results=10, start_at=0):
"""Get specific or list of Jira issue(s).
Get specific issue by setting the issue_id. Get a list of issues by leaving the issue_id blank and setting the
limit for the pagination size (default=10).
:param issue_id: JIRA will attempt to identify the issue by the issueIdOrKey path parameter. This can be an
issue id, or an issue key.
:type issue_id: str
:param max_results: The "maxResults" parameter indicates how many results to return per page.
:type max_results: int
:param start_at: Item that should be used as the first item in the page of results.
:type start_at: int
:return:
STATUS 200: Success - application/jsonReturns a full representation of a JIRA issue in JSON format.
STATUS 404: Error - Returned if the requested issue is not found, or the user does not have permission to
view it.
:rtype: list[requests.Response, str]
"""
if issue_id is None:
url = self.base_http_url + 'rest/api/2/issue?maxResults=' + str(max_results) + '&startAt' + str(start_at)
else:
url = self.base_http_url + 'rest/api/2/issue/' + str(issue_id)
headers = {'Content-Type': 'application/json'}
r = requests.get(url, auth=self.auth, headers=headers)
return r
def add_attachment(self, issue_id, attachments):
"""Add attachments to Jira issue
:param issue_id: JIRA will attempt to identify the issue by the issueIdOrKey path parameter. This can be an
issue id, or an issue key.
:type issue_id: str
:param attachments: List of string paths to attachments to be uploaded and added to an issue.
:type attachments: list[str]
:return:
STATUS 200: Success - application/json
STATUS 403: Error - Returned if attachments is disabled or if you don't have permission to add attachments
to this issue.
STATUS 404: Error - Returned if the requested issue is not found, the user does not have permission to
view it, or if the attachments exceeds the maximum configured attachment size.
:rtype: list[requests.Response, str]
"""
# add attachments to Jira issue
url = self.base_http_url + 'rest/api/2/issue/' + issue_id + '/attachments'
headers = {'X-Atlassian-Token': 'no-check'}
r = []
filenames = []
# POST request for attachments
if attachments:
for file in attachments:
upload = open(file, 'rb')
filenames.append(ntpath.basename(file))
r.append(requests.post(url, auth=self.auth, headers=headers, files={'file': upload}))
upload.close()
else:
r.append('ERROR: No attachments to add.')
# verify attachments were attached
if attachments:
jira_attachments = self.get_issues(issue_id).json()['fields']['attachment']
for filename in filenames:
if not any(d['filename'] == filename for d in jira_attachments):
# does not exist
r.append('ERROR: File ' + filename + ' was not attached.')
return r
| mjlabe/python-atlassian-server-api | atlassian_server_api/jira.py | jira.py | py | 6,655 | python | en | code | 0 | github-code | 36 |
10183845847 | from utils import evaluation_utils, embedding_utils
from semanticgraph import io
from parsing import legacy_sp_models as sp_models
from models import baselines
import numpy as np
from sacred import Experiment
import json
import torch
from torch import nn
from torch.autograd import Variable
from tqdm import *
import ast
from models.factory import get_model
import os
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID" # see issue #152
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
ex = Experiment("test")
np.random.seed(1)
p0_index = 1
def to_np(x):
return x.data.cpu().numpy()
@ex.config
def main_config():
""" Main Configurations """
device_id = 0
#
model_name = "GPGNN"
data_folder = "data/gpgnn_data/"
save_folder = "data/models/"
model_params = "model_params.json"
word_embeddings = "glove.6B.50d.txt"
train_set = "test_train.json" #"train.json"
val_set = "test_val.json" #"validation.json"
# a file to store property2idx
# if is None use model_name.property2idx
property_index = None
learning_rate = 1e-3
shuffle_data = True
save_model = True
grad_clip = 0.25
os.environ["CUDA_VISIBLE_DEVICES"] = str(device_id)
@ex.automain
def main(model_params, model_name, data_folder, word_embeddings, train_set, val_set, property_index, learning_rate, shuffle_data, save_folder, save_model, grad_clip):
if not os.path.exists(save_folder):
os.mkdir(save_folder)
with open(model_params) as f:
model_params = json.load(f)
embeddings, word2idx = embedding_utils.load(data_folder + word_embeddings)
print("Loaded embeddings:", embeddings.shape)
def check_data(data):
for g in data:
if(not 'vertexSet' in g):
print("vertexSet missed\n")
training_data, _ = io.load_relation_graphs_from_file(data_folder + train_set, load_vertices=True)
val_data, _ = io.load_relation_graphs_from_file(data_folder + val_set, load_vertices=True)
check_data(training_data)
check_data(val_data)
if property_index:
print("Reading the property index from parameter")
with open(save_folder + args.property_index) as f:
property2idx = ast.literal_eval(f.read())
else:
_, property2idx = embedding_utils.init_random({e["kbID"] for g in training_data
for e in g["edgeSet"]} | {"P0"}, 1, add_all_zeroes=True, add_unknown=True)
max_sent_len = max(len(g["tokens"]) for g in training_data)
print("Max sentence length:", max_sent_len)
max_sent_len = 36
print("Max sentence length set to: {}".format(max_sent_len))
graphs_to_indices = sp_models.to_indices
if model_name == "ContextAware":
graphs_to_indices = sp_models.to_indices_with_real_entities_and_entity_nums_with_vertex_padding
elif model_name == "PCNN":
graphs_to_indices = sp_models.to_indices_with_relative_positions_and_pcnn_mask
elif model_name == "CNN":
graphs_to_indices = sp_models.to_indices_with_relative_positions
elif model_name == "GPGNN":
graphs_to_indices = sp_models.to_indices_with_real_entities_and_entity_nums_with_vertex_padding
_, position2idx = embedding_utils.init_random(np.arange(-max_sent_len, max_sent_len), 1, add_all_zeroes=True)
train_as_indices = list(graphs_to_indices(training_data, word2idx, property2idx, max_sent_len, embeddings=embeddings, position2idx=position2idx))
training_data = None
n_out = len(property2idx)
print("N_out:", n_out)
val_as_indices = list(graphs_to_indices(val_data, word2idx, property2idx, max_sent_len, embeddings=embeddings, position2idx=position2idx))
val_data = None
print("Save property dictionary.")
with open(save_folder + model_name + ".property2idx", 'w') as outfile:
outfile.write(str(property2idx))
print("Training the model")
print("Initialize the model")
model = get_model(model_name)(model_params, embeddings, max_sent_len, n_out).to(device)
loss_func = nn.CrossEntropyLoss(ignore_index=0).to(device)
opt = torch.optim.Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=learning_rate, weight_decay=model_params['weight_decay'])
indices = np.arange(train_as_indices[0].shape[0])
step = 0
for train_epoch in range(model_params['nb_epoch']):
if(shuffle_data):
np.random.shuffle(indices)
f1 = 0
for i in tqdm(range(int(train_as_indices[0].shape[0] / model_params['batch_size']))):
opt.zero_grad()
sentence_input = train_as_indices[0][indices[i * model_params['batch_size']: (i + 1) * model_params['batch_size']]]
entity_markers = train_as_indices[1][indices[i * model_params['batch_size']: (i + 1) * model_params['batch_size']]]
labels = train_as_indices[2][indices[i * model_params['batch_size']: (i + 1) * model_params['batch_size']]]
if model_name == "GPGNN":
output = model(Variable(torch.from_numpy(sentence_input.astype(int))).to(device),
Variable(torch.from_numpy(entity_markers.astype(int))).to(device),
train_as_indices[3][indices[i * model_params['batch_size']: (i + 1) * model_params['batch_size']]])
elif model_name == "PCNN":
output = model(Variable(torch.from_numpy(sentence_input.astype(int))).to(device),
Variable(torch.from_numpy(entity_markers.astype(int))).to(device),
Variable(torch.from_numpy(np.array(train_as_indices[3][i * model_params['batch_size']: (i + 1) * model_params['batch_size']])).float(), requires_grad=False).to(device))
else:
output = model(Variable(torch.from_numpy(sentence_input.astype(int))).to(device),
Variable(torch.from_numpy(entity_markers.astype(int))).to(device))
loss = loss_func(output, Variable(torch.from_numpy(labels.astype(int))).view(-1).to(device))
loss.backward()
torch.nn.utils.clip_grad_norm(model.parameters(), grad_clip)
opt.step()
_, predicted = torch.max(output, dim=1)
labels = labels.reshape(-1).tolist()
predicted = predicted.data.tolist()
p_indices = np.array(labels) != 0
predicted = np.array(predicted)[p_indices].tolist()
labels = np.array(labels)[p_indices].tolist()
_, _, add_f1 = evaluation_utils.evaluate_instance_based(predicted, labels, empty_label=p0_index)
f1 += add_f1
print("Train f1: ", f1 / (train_as_indices[0].shape[0] / model_params['batch_size']))
val_f1 = 0
for i in tqdm(range(int(val_as_indices[0].shape[0] / model_params['batch_size']))):
sentence_input = val_as_indices[0][i * model_params['batch_size']: (i + 1) * model_params['batch_size']]
entity_markers = val_as_indices[1][i * model_params['batch_size']: (i + 1) * model_params['batch_size']]
labels = val_as_indices[2][i * model_params['batch_size']: (i + 1) * model_params['batch_size']]
if model_name == "GPGNN":
output = model(Variable(torch.from_numpy(sentence_input.astype(int)), volatile=True).to(device),
Variable(torch.from_numpy(entity_markers.astype(int)), volatile=True).to(device),
val_as_indices[3][i * model_params['batch_size']: (i + 1) * model_params['batch_size']])
elif model_name == "PCNN":
output = model(Variable(torch.from_numpy(sentence_input.astype(int)), volatile=True).to(device),
Variable(torch.from_numpy(entity_markers.astype(int)), volatile=True).to(device),
Variable(torch.from_numpy(np.array(val_as_indices[3][i * model_params['batch_size']: (i + 1) * model_params['batch_size']])).float(), volatile=True).to(device))
else:
output = model(Variable(torch.from_numpy(sentence_input.astype(int)), volatile=True).to(device),
Variable(torch.from_numpy(entity_markers.astype(int)), volatile=True).to(device))
_, predicted = torch.max(output, dim=1)
labels = labels.reshape(-1).tolist()
predicted = predicted.data.tolist()
p_indices = np.array(labels) != 0
predicted = np.array(predicted)[p_indices].tolist()
labels = np.array(labels)[p_indices].tolist()
_, _, add_f1 = evaluation_utils.evaluate_instance_based(
predicted, labels, empty_label=p0_index)
val_f1 += add_f1
print("Validation f1: ", val_f1 /
(val_as_indices[0].shape[0] / model_params['batch_size']))
# save model
if (train_epoch % 5 == 0 and save_model):
torch.save(model.state_dict(), "{0}{1}-{2}.out".format(save_folder, model_name, str(train_epoch)))
step = step + 1
| jack139/gp-gnn_test | train.py | train.py | py | 9,172 | python | en | code | 0 | github-code | 36 |
34932827697 | maiores = mulheres20 = homens = 0
while True:
i = int(input('Digite a idade: '))
s = ' '
while s not in 'MF':
s = str(input('Digite o sexo [M/F]: ')).strip().upper()[0]
r = ' '
while r not in 'SN':
r = str(input('Deseja continuar cadastrando [S/N]? ')).strip().upper()[0]
if i > 18:
maiores += 1
if s == 'M':
homens += 1
if s == 'F' and i < 20:
mulheres20 = + 1
if r == 'N':
break
print(f'''A) {maiores} pessoas tem mais de 18 anos
B) Foram cadastrados {homens} homens
C) {mulheres20} ร o nรบmero de mulheres com menos de 20 anos''')
| lucasaguiar-dev/Questoes-Python | Projeto donwload/PythonExercicios/ex069.py | ex069.py | py | 620 | python | pt | code | 0 | github-code | 36 |
7774676634 | from urllib.request import urlopen
from bs4 import BeautifulSoup
import ssl
# Ignore SSL certificate errors
ctx = ssl.create_default_context()
ctx.check_hostname = False
ctx.verify_mode = ssl.CERT_NONE
url = input('Enter URL: ')
count2 = input('Enter count: ')
position = input('Enter position: ')
count2 = int(count2)
position = int(position)
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
tags = soup('a')
count = 0
names_list = []
while (count < count2):
names_list.append(soup.find_all('a')[17].get_text())
url = soup.find_all('a')[position-1]["href"]
print ("Retrieving: ", url)
html = urlopen(url, context=ctx).read()
soup = BeautifulSoup(html, "html.parser")
tags = soup('a')
count +=1
print (names_list)
length = int(len(names_list))
print ("The answer to the assignment for this execution is",(names_list[length-1]))
| laurmvan/SI206-Fall2017 | HW6/HW6_PartB.py | HW6_PartB.py | py | 875 | python | en | code | null | github-code | 36 |
22536995817 | #!/usr/bin/env python3
import rospy
from std_msgs.msg import Float64MultiArray, Float64
import time
from controller_manager_msgs.srv import SwitchController
# To run this file:
# roslaunch gazebo_ros empty_world.launch
# rosrun gazebo_ros spawn_model -file `rospack find ur5-joint-position-control`/urdf/ur5_jnt_pos_ctrl.urdf -urdf -x 0 -y 0 -z 0.1 -model ur5
# roslaunch ur5-joint-position-control ur5_joint_position_control.launch
# rosrun ur5-joint-position-control trajectory_command.py
# rosservice call /controller_manager/list_controllers
rospy.init_node('talker', anonymous=True)
rate = rospy.Rate(1)
joint_com_pub0 = rospy.Publisher('/shoulder_pan_joint_position_controller/command', Float64, queue_size=50)
joint_com_pub1 = rospy.Publisher('/shoulder_lift_joint_position_controller/command', Float64, queue_size=50)
joint_com_pub2 = rospy.Publisher('/elbow_joint_position_controller/command', Float64, queue_size=50)
joint_com_pub3 = rospy.Publisher('/wrist_1_joint_position_controller/command', Float64, queue_size=50)
joint_com_pub4 = rospy.Publisher('/wrist_2_joint_position_controller/command', Float64, queue_size=50)
joint_com_pub5 = rospy.Publisher('/wrist_3_joint_position_controller/command', Float64, queue_size=50)
angle= Float64()
angle.data= 0.5
ctrl_c= False
while not ctrl_c:
connections0 = joint_com_pub0.get_num_connections()
connections1= joint_com_pub1.get_num_connections()
connections2= joint_com_pub2.get_num_connections()
connections3= joint_com_pub3.get_num_connections()
connections4= joint_com_pub4.get_num_connections()
connections5= joint_com_pub5.get_num_connections()
if connections0 > 0 and connections1 >0 and connections2 >0 and connections3 >0 and connections4 >0 and connections5>0:
joint_com_pub0.publish(angle)
angle.data=0
joint_com_pub1.publish(angle)
joint_com_pub2.publish(angle)
joint_com_pub3.publish(angle)
joint_com_pub4.publish(angle)
joint_com_pub5.publish(angle)
ctrl_c = True
rospy.sleep(rospy.Duration(10))
else:
rate.sleep()
rospy.wait_for_service('/controller_manager/switch_controller')
try:
switch_controller = rospy.ServiceProxy('/controller_manager/switch_controller', SwitchController)
ret = switch_controller(['joint_motor_controller', 'joint_state_controller'],
['shoulder_pan_joint_position_controller', 'shoulder_lift_joint_position_controller',
'elbow_joint_position_controller', 'wrist_1_joint_position_controller', 'wrist_2_joint_position_controller',
'wrist_3_joint_position_controller'], 0, False,0.0)
except rospy.ServiceException as e:
print("Service call failed: " + str(e))
pub = rospy.Publisher('/joint_motor_controller/command', Float64MultiArray, queue_size=10)
rate = rospy.Rate(1) # 10hz
my_msg = Float64MultiArray()
data = [0,-3,-6,0,0,0]
my_msg.data = data
ctrl_c = False
while not ctrl_c:
connections = pub.get_num_connections()
if connections > 0:
pub.publish(my_msg)
rospy.sleep(rospy.Duration(0.3))
data1 = [0.0, 0.0, 0.0, 0.0, 0.0, 0.0]
my_msg.data = data1
pub.publish(my_msg)
ctrl_c = True
# rospy.sleep(rospy.Duration(10))
else:
rate.sleep()
| Gaurav37/Tossingbot | ur5-joint-position-control/scripts/trajectory_command.py | trajectory_command.py | py | 3,256 | python | en | code | 1 | github-code | 36 |
16793418016 | import pygame
from battle import time, MOVE_DOWN_FREQ, BOARD_HEIGHT, BOARD_WIDTH, BLANK, POISON, SHAPES, BOX_SIZE, \
TEMPLATE_HEIGHT, TEMPLATE_WIDTH, MOVE_SIDE_WAYS_FREQ
from utils import get_new_piece, get_blank_board, calculate_level_and_fall_frequency, is_valid_position
from pygame.locals import *
BOARD_OFFSET = [int(((BOX_SIZE * BOARD_WIDTH) / 2) + 60) * -1, int(((BOX_SIZE * BOARD_WIDTH) / 2) - 47)]
LEFT_CONTROLS = (K_q, K_w, K_a, K_s, K_d, K_SPACE)
RIGHT_CONTROLS = (K_UP, K_DOWN, K_LEFT, K_RIGHT, K_INSERT, K_HOME)
CONTROLS = (LEFT_CONTROLS, RIGHT_CONTROLS)
class Player(object):
board = None
border_color = None
board_offset = 0
bangs = 3.0
now = None
last_move_down_time = now
last_move_sideways_time = now
last_fall_time = now
moving_down = False # Note: there is no moving_up variable
moving_left = False
moving_right = False
score = 0
level = 0
turn = 0
fall_frequency = 0
falling_piece = None
next_piece = None
controls = tuple()
game_over = False
def __init__(self, now=None, player_num=0, single_player=True):
if not now:
self.now = time.time()
else:
self.now = now
self.board = get_blank_board()
self.last_move_down_time = self.last_fall_time = self.last_move_sideways_time = self.now
self.update_level()
self.turn = 1
if single_player:
self.controls = LEFT_CONTROLS + RIGHT_CONTROLS
else:
self.controls = CONTROLS[player_num]
self.board_offset = BOARD_OFFSET[player_num]
def update_level(self):
self.level, self.fall_frequency = calculate_level_and_fall_frequency(self.score)
def update_falling_piece(self, now):
if self.game_over:
return
self.falling_piece = self.next_piece
self.turn += 1
self.next_piece = get_new_piece(self.turn)
self.last_fall_time = now
def remove_completed_line(self):
"""
Remove any completed lines on the board, move everything above them
down, and return the number of complete lines.
"""
if self.game_over:
return
num_lines_removes = 0
y = BOARD_HEIGHT - 1 # Start y at the bottom of the board
while y >= 0:
complete, bonus = self.is_completed_line_with_bonus(y)
if complete:
# Remove the line and pull boxes down by one line.
for pull_down_y in range(y, 0, -1):
for x in range(BOARD_WIDTH):
self.board[x][pull_down_y] = self.board[x][pull_down_y - 1]
# Set very top line to blank
for x in range(BOARD_WIDTH):
self.board[x][0] = BLANK
num_lines_removes += 1
if bonus:
num_lines_removes += 4
# Note on the next iteration of the loop, y is the same.
# This is so that is the line that was pulled down is also
# complete, it will be removed.
else:
y -= 1
if num_lines_removes:
self.score += num_lines_removes
self.update_level()
self.bangs += num_lines_removes * .25 # One new bang every four lines
def is_completed_line_with_bonus(self, y):
"""
Return True is the line filled with boxes with no gaps.
"""
bonus = True
block_color = None
for x in range(BOARD_WIDTH):
if self.board[x][y] in (BLANK, POISON):
return False, False
if block_color is None:
block_color = self.board[x][y]
if bonus:
bonus = block_color == self.board[x][y]
return True, bonus
def handle_event(self, event_type, key):
if key not in self.controls or self.game_over:
return
if event_type == KEYUP:
if key in (K_LEFT, K_a):
self.moving_left = False
elif key in (K_RIGHT, K_d):
self.moving_right = False
elif key in (K_DOWN, K_s):
self.moving_down = False
elif event_type == KEYDOWN:
# moving the block sideways
if key in (K_LEFT, K_a) and is_valid_position(self.board, self.falling_piece, adj_x=-1):
self.falling_piece['x'] -= 1
self.moving_left = True
self.moving_right = False
self.last_move_sideways_time = self.now
elif key in (K_RIGHT, K_d) and is_valid_position(self.board, self.falling_piece, adj_x=1):
self.falling_piece['x'] += 1
self.moving_left = False
self.moving_right = True
self.last_move_sideways_time = self.now
# Rotating the block (if there is room to rotate)
elif key in (K_UP, K_w):
self.falling_piece['rotation'] = (self.falling_piece['rotation'] + 1) % len(SHAPES[self.falling_piece['shape']])
if not is_valid_position(self.board, self.falling_piece):
self.falling_piece['rotation'] = (self.falling_piece['rotation'] - 1) % len(SHAPES[self.falling_piece['shape']])
elif key == K_q:
self.falling_piece['rotation'] = (self.falling_piece['rotation'] - 1) % len(SHAPES[self.falling_piece['shape']])
if not is_valid_position(self.board, self.falling_piece):
self.falling_piece['rotation'] = (self.falling_piece['rotation'] + 1) % len(SHAPES[self.falling_piece['shape']])
# Make the block fall faster with the down key
elif key in (K_DOWN, K_s):
self.moving_down = True
if is_valid_position(self.board, self.falling_piece, adj_y=1):
self.falling_piece['y'] += 1
self.last_move_down_time = self.now
# Move the current block all the way down
elif key == K_SPACE:
self.moving_down = False
self.moving_left = False
self.moving_right = False
for i in range(1, BOARD_HEIGHT):
if not is_valid_position(self.board, self.falling_piece, adj_y=i):
break
self.falling_piece['y'] += i - 1
def calculate_moves(self, now):
if self.game_over:
return
# Handling moving the block because of user input
if (self.moving_left or self.moving_right) and now - self.last_move_sideways_time > MOVE_SIDE_WAYS_FREQ:
if self.moving_left and is_valid_position(self.board, self.falling_piece, adj_x=-1):
self.falling_piece['x'] -= 1
elif self.moving_right and is_valid_position(self.board, self.falling_piece, adj_x=1):
self.falling_piece['x'] += 1
self.last_move_sideways_time = now
if self.moving_down and now - self.last_move_down_time > MOVE_DOWN_FREQ and is_valid_position(self.board, self.falling_piece, adj_y=1):
self.falling_piece['y'] += 1
self.last_move_down_time = now
# Let the piece fall if it is time to fall
if now - self.last_fall_time > self.fall_frequency:
# See if the piece has landed.
if not is_valid_position(self.board, self.falling_piece, adj_y=1):
# falling piece has landed, set it on the self.board
self.add_to_board(self.falling_piece)
self.remove_completed_line()
self.falling_piece = None
else:
# piece did not land just move it down one block
self.falling_piece['y'] += 1
self.last_fall_time = now
def add_to_board(self, piece):
"""
Fill in the board based on piece's location, shape, and rotation
"""
for x in range(TEMPLATE_WIDTH):
for y in range(TEMPLATE_HEIGHT):
if SHAPES[piece['shape']][piece['rotation']][x][y] != BLANK:
self.board[x + piece['x']][y + piece['y']] = piece['color']
| dadisi/battle-tetro | battle/player.py | player.py | py | 8,220 | python | en | code | 0 | github-code | 36 |
70537611943 | import elasticsearch
import json
import luigi
from elasticsearch.helpers import bulk
from luigi.contrib import esindex
from nfl import scraper
class IngestData(luigi.Task):
category = luigi.Parameter()
year = luigi.Parameter()
def output(self):
target = luigi.LocalTarget("output/{0}/{1}.json".format(self.year, self.category))
target.category = self.category
target.year = self.year
return target
def run(self):
with self.output().open('w') as f:
f.write(json.dumps(scraper.scrape_year_category(self.year, self.category), indent=2))
class ExportToES(luigi.Task):
def __init__(self):
self.host = "localhost"
self.port = "9200"
self.index = "demo5"
super(ExportToES, self).__init__()
def _init_connection(self):
return elasticsearch.Elasticsearch(
host=self.host,
port=self.port
)
def requires(self):
for year in range(2000, 2015):
for c in ['KICK_RETURNS', 'KICKING', 'PASSING', 'PUNTING', 'RECEIVING', 'RUSHING', 'SACKS', 'SCORING', 'TACKLES', 'TOUCHDOWNS']:
yield IngestData(c, year)
def output(self):
return esindex.ElasticsearchTarget(host=self.host, port=self.port, index=self.index, doc_type="report", update_id="_id")
def docs(self):
for inputFile in self.input():
with inputFile.open('r') as f:
for element in json.loads(f.read()):
element["_type"] = inputFile.category
element["_index"] = self.index
yield element
def run(self):
es = self._init_connection()
bulk(es, self.docs())
self.output().touch()
| jasonmotylinski/luigi-presentation | pipeline-py/luigi/luigipipeline/demo5.py | demo5.py | py | 1,753 | python | en | code | 1 | github-code | 36 |
19536790306 | """
This module contains numerical method approach
to determine heston model price for a set of
parameters
"""
import numpy as np
from model.implied_volatility.core.implied_vol import implied_volatility_call
def get_heston_price(row):
"""
Generates simultaneous simulation of stock price and
volatility process to solve for final price using numerical method
Args:
row (pd.DataFrame) : input parameters for an option
"""
steps = 2000
Npaths = 10000
dt = row['time_to_maturity'] / steps
size = (Npaths, steps)
prices = np.zeros(size)
K = row['strike']
s_t = row['s0']
v_t = row['initial_variance']
rho = row['correlation']
kappa = row['reversion_speed'] # mean reversion speed
xi = row['vol_vol']
theta = row['Long_average_variance']
r = row['risk_free_rate']
T = row['time_to_maturity']
asset_paths = np.zeros((steps + 1, Npaths))
volatility_paths = np.zeros((steps + 1, Npaths))
asset_paths[0] = s_t
volatility_paths[0] = v_t
dW1 = np.random.normal(size=(steps, Npaths)) * np.sqrt(dt)
dW2 = rho * dW1 + np.sqrt(1 - rho ** 2) * np.random.normal(size=(steps, Npaths)) * np.sqrt(dt)
for t in range(1, steps + 1):
dV = kappa * (theta - volatility_paths[t - 1]) * dt + v_t * np.sqrt(volatility_paths[t - 1]) * dW2[t - 1]
volatility_paths[t] = np.maximum(volatility_paths[t - 1] + dV, 0) # Ensure volatility remains non-negative
dS = r * asset_paths[t - 1] * dt + np.sqrt(volatility_paths[t - 1]) * asset_paths[t - 1] * dW1[t - 1]
asset_paths[t] = np.maximum(asset_paths[t - 1] * dS, 0) # Ensure asset price remains non-negative
final_state_price = asset_paths[:, -1]
if row['kind'] == "call":
opt_price = np.mean(np.maximum(final_state_price - K, 0)) * np.exp(-r * T)
else:
opt_price = np.mean(np.maximum(K - final_state_price, 0)) * np.exp(-r * T)
return opt_price
def create_heston_dataset(df, s0, input_param):
"""
This method takes current underlying price and
a set of other parameters and then computes each
option heston model price using numerical method
Args:
df (pd.DataFrame) : input variables dataframe
s0 (float) : current underlying price
input_param (pd.DataFrame) : contains info about money-ness,
time, rfr, volatility to create dataset
Returns:
pd.DataFrame
"""
input_param['s0'] = s0
input_param['strike'] = input_param.apply(lambda x: x['s0'] / x['moneyness'], axis=1)
input_param['european'] = True if df['opt_type'].iloc[0] == "european" else False
input_param['kind'] = "call" if df['opt_kind'].iloc[0] == "call" else "put"
input_param['calender_days'] = input_param.apply(lambda x: x['time_to_maturity'] * 365, axis=1)
input_param['calender_days'] = input_param['calender_days'].round().astype(int)
input_param['Heston_price'] = input_param.apply(lambda x: get_heston_price(x), axis=1)
input_param['implied_vol'] = input_param.apply(lambda x: implied_volatility_call(x), axis=1)
input_param['opt_price_by_strike'] = input_param.apply(lambda x: x['Heston_price'] / x['strike'], axis=1)
return input_param
| Karanpalshekhawat/option-pricing-and-implied-volatility-using-NNs | model/implied_volatility/core/heston_model_pricing.py | heston_model_pricing.py | py | 3,255 | python | en | code | 1 | github-code | 36 |
26744639927 | import argparse
import os
import time
import numpy as np
import torch
import torch.optim as optim
import torch.nn as nn
from torch.utils.data import DataLoader
from data_loader import CSV_PNG_Dataset, CSV_PNG_Dataset_2D, PNG_PNG_Dataset
from netArchitecture.VGG import VGGModel, VGGModel_2D
from netArchitecture.ResNet import ResNet18_2D
from visualize import Visualizations
import logging
logger = logging.getLogger("In train.py")
logger.setLevel(logging.DEBUG)
logger.disabled = True
#parse parameters
parser = argparse.ArgumentParser(description='train deep color extraction model')
parser.add_argument('--mode', default=2, type=int) ## mode 0: lab 3D histogram; mode 1: lab 2D histogram as input; mode 2: original images with png format
parser.add_argument('--backbone', default="resnet18", type=str) ## "resnet18" or "vgg"
parser.add_argument('--net_name', default="resnet18+ASPP+2D", type=str)
parser.add_argument('--with_aspp', default=True, choices=('True','False'))
parser.add_argument('--trained_model_config', default="", type=str) # used for resuming to train network
parser.add_argument('--legend_width', default=512, type=int)
parser.add_argument('--legend_height', default=20, type=int)
parser.add_argument('--image_width', default=512, type=int)
parser.add_argument('--image_height', default=256, type=int)
parser.add_argument('--cuda_device', default=0, type=int)
parser.add_argument('--lr', default=1e-4, type=float)
parser.add_argument('--train_bs',default=8, type=int)
parser.add_argument('--num_epochs', default=15, type=int)
parser.add_argument('--color_space', default="Lab", type=str) # possible value: "Lab", "Rgb"
parser.add_argument('--loss_function', default="MSE", type=str)
parser.add_argument('--prefix', default="", type=str)
opt = parser.parse_args()
# IS_LABEL_NORMALIZED = opt.is_label_normalized == 'True'
WITH_ASPP = opt.with_aspp == 'True'
LEARNING_RATE = opt.lr
BATCH_SIZE = opt.train_bs
NUM_EPOCHS = opt.num_epochs
NET_NAME = opt.net_name
CUDA_DEVICE = opt.cuda_device
MODE = opt.mode
BACKBONE = opt.backbone
COLOR_SPACE = opt.color_space
TRAINED_MODEL_CONFIG = opt.trained_model_config
TRAINED_EPOCH = 0
LOSS_FUNCTION = opt.loss_function
PREFIX = opt.prefix # used in inference.py
IS_LABEL_NORMALIZED = True
IS_NOT_DEBUG = True
USE_VISDOM = True
# if (MODE == 0): # lab 3D histogram
IMAGE_WIDTH = 32
IMAGE_HEIGHT = 32
IMAGE_CHANNEL = 32
if (MODE == 1): # lab 2D histogram
IMAGE_WIDTH = opt.image_width
IMAGE_HEIGHT = opt.image_height
IMAGE_CHANNEL = 1
elif (MODE == 2): # lab original images
IMAGE_WIDTH = opt.image_width
IMAGE_HEIGHT = opt.image_height
IMAGE_CHANNEL = 3
LABEL_WIDTH = opt.legend_width
LABEL_HEIGHT = opt.legend_height
LABEL_CHANNEL = 3
config = "Net_{}__mode_{}__backbone_{}_colorspace_{}__labelnormalized_{}__lossfun_{}__woaspp_{}__lheight_{}__bs_{}__ep_{}__lr_{}".\
format(NET_NAME, MODE, BACKBONE, COLOR_SPACE, IS_LABEL_NORMALIZED, LOSS_FUNCTION, WITH_ASPP, LABEL_HEIGHT, BATCH_SIZE, NUM_EPOCHS, LEARNING_RATE ) \
if (TRAINED_MODEL_CONFIG == "") else TRAINED_MODEL_CONFIG
# path for save and load netArchitecture
model_dir = "models"
if not os.path.exists(model_dir):
os.makedirs(model_dir)
model_path = os.path.join(model_dir, config)
torch.cuda.set_device(CUDA_DEVICE)
# define dataset
# if MODE == 0:
train_set = CSV_PNG_Dataset(
label_paras ={'width':LABEL_WIDTH,'height':LABEL_HEIGHT,'channel':LABEL_CHANNEL},
image_paras={'width':IMAGE_WIDTH, 'height':IMAGE_HEIGHT, 'channel':IMAGE_CHANNEL},
is_label_normalized= IS_LABEL_NORMALIZED
)
eval_set = CSV_PNG_Dataset(
label_paras ={'width':LABEL_WIDTH,'height':LABEL_HEIGHT,'channel':LABEL_CHANNEL},
image_paras={'width':IMAGE_WIDTH, 'height':IMAGE_HEIGHT, 'channel':IMAGE_CHANNEL},
file_list="./dataset/evaluation.txt",
is_label_normalized= IS_LABEL_NORMALIZED
)
if MODE == 1:
train_set = CSV_PNG_Dataset_2D(
image_paras={'width':IMAGE_WIDTH,'height':IMAGE_HEIGHT,'channel':IMAGE_CHANNEL},
label_paras={'width': LABEL_WIDTH, 'height': LABEL_HEIGHT, 'channel': LABEL_CHANNEL},
color_space=COLOR_SPACE,
is_label_normalized=IS_LABEL_NORMALIZED)
eval_set = CSV_PNG_Dataset_2D(
file_list="./dataset/evaluation.txt", # here change to evaluation.txt
image_paras={'width': IMAGE_WIDTH, 'height': IMAGE_HEIGHT, 'channel': IMAGE_CHANNEL},
label_paras={'width': LABEL_WIDTH, 'height': LABEL_HEIGHT, 'channel': LABEL_CHANNEL},
color_space=COLOR_SPACE,
is_label_normalized=IS_LABEL_NORMALIZED)
elif MODE == 2:
train_set = PNG_PNG_Dataset(image_paras={'width':IMAGE_WIDTH,'height':IMAGE_HEIGHT,'channel':IMAGE_CHANNEL},
label_paras ={'width':LABEL_WIDTH,'height':LABEL_HEIGHT,'channel':LABEL_CHANNEL},
color_space=COLOR_SPACE, is_label_normalized=IS_LABEL_NORMALIZED)
eval_set = PNG_PNG_Dataset(image_paras={'width':IMAGE_WIDTH,'height':IMAGE_HEIGHT,'channel':IMAGE_CHANNEL},
label_paras ={'width':LABEL_WIDTH,'height':LABEL_HEIGHT,'channel':LABEL_CHANNEL},file_list="./dataset/evaluation.txt",
color_space=COLOR_SPACE, is_label_normalized=IS_LABEL_NORMALIZED)
train_loader = DataLoader(train_set, batch_size=BATCH_SIZE, shuffle=IS_NOT_DEBUG, num_workers=2, drop_last=True)
eval_loader = DataLoader(eval_set, batch_size=1, shuffle=False)
# define net, criterion and optimizer
net = VGGModel(input_channel=IMAGE_CHANNEL, label_height=LABEL_HEIGHT, label_width=LABEL_WIDTH)
if MODE == 2 or MODE == 1:
if BACKBONE == "vgg":
net = VGGModel_2D(input_channel=IMAGE_CHANNEL, label_height=LABEL_HEIGHT, label_width=LABEL_WIDTH, with_aspp=WITH_ASPP)
elif BACKBONE == "resnet18":
print("resnet18")
net = ResNet18_2D(input_channel=IMAGE_CHANNEL, label_height=LABEL_HEIGHT, label_width=LABEL_WIDTH, with_aspp=WITH_ASPP)
test_loss_for_each_epoch = [] # used for recording avg mean of each epoch in testing phrase
loss_for_each_epoch = [] # used for recording avg mean of each epoch in training phrase
time_used_cumulation = []
if TRAINED_MODEL_CONFIG != "":
checkpoint = torch.load(model_path)
net.load_state_dict(checkpoint['model_state_dict'])
TRAINED_EPOCH = checkpoint['epoch'] + 1
time_used_cumulation = checkpoint['time_used']
loss_for_each_epoch = checkpoint['loss_for_each_epoch']
print('#netArchitecture parameters:', sum(param.numel() for param in net.parameters()))
if torch.cuda.is_available():
if logger.isEnabledFor(logging.DEBUG):
logger.debug(torch.cuda.current_device())
ts = time.time()
net.cuda()
print("finished loading netArchitecture params to cuda, time elapsed: {}".format(time.time() - ts))
optimizer = optim.Adam(net.parameters(), lr=LEARNING_RATE)
vis = Visualizations(env=config)
if LOSS_FUNCTION == "MSE":
criterian = nn.MSELoss()
elif LOSS_FUNCTION == "BCE":
criterian = nn.BCELoss()
sigmoid = torch.nn.Sigmoid()
def train():
if len(time_used_cumulation) == 0:
time_used = 0.0
else:
time_used = time_used_cumulation[-1]
iter_100_loss_values = []
for epoch in range(NUM_EPOCHS - TRAINED_EPOCH):
tm_start_each_epoch = time.time()
true_epoch = epoch + TRAINED_EPOCH
net.train()
loss_values = [] # used for visdom to visualize
epoch_loss_value_in_one_epoch = [] # used for recording all loss values in an epoch and computing the mean of them
for iter, batch in enumerate(train_loader):
torch.autograd.set_detect_anomaly(True)
images, labels = batch['image'], batch['label']
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
preds = net(images)
if IS_LABEL_NORMALIZED:
preds = sigmoid(preds)
if LOSS_FUNCTION == "MSE":
loss = criterian(labels, preds)
elif LOSS_FUNCTION == "BCE":
loss = criterian(preds, labels.detach())
loss_values.append(loss.item())
epoch_loss_value_in_one_epoch.append(loss.item())
optimizer.zero_grad()
with torch.autograd.detect_anomaly():
loss.backward()
optimizer.step()
if iter % 10 == 0:
print("epoch{}, iter{}, loss: {}".format(true_epoch, iter, loss.item()))
niter = true_epoch * len(train_loader) + iter
if niter % 100 == 0:
iter_100_loss_values.append(np.mean(loss_values))
if USE_VISDOM:
vis.plot_loss(np.mean(loss_values), niter)
vis.plot_ground_truth(labels, COLOR_SPACE, caption="groud_truth_in epoch{}, iter{}".format(true_epoch, iter))
vis.plot_test_pred(preds, COLOR_SPACE, caption="pred_in epoch{}, iter{}".format(true_epoch, iter))
if MODE == 2:
vis.plot_ground_truth(images,COLOR_SPACE, win="original images", caption="image in epoch{}, iter{}".format(true_epoch, iter))
loss_values.clear()
vis.save()
time_used = time_used + time.time() - tm_start_each_epoch
time_used_cumulation.append(time_used)
loss_for_each_epoch.append(np.mean(epoch_loss_value_in_one_epoch))
epoch_loss_value_in_one_epoch.clear()
torch.save({
'epoch': true_epoch,
'model_state_dict': net.state_dict(),
'time_used': time_used_cumulation,
'loss_for_each_epoch':loss_for_each_epoch
}, model_path)
eval(epoch)
def eval(epoch):
net.eval()
loss_value_in_epoch = []
for iter, batch in enumerate(eval_loader):
images, labels = batch['image'], batch['label']
if torch.cuda.is_available():
images = images.cuda()
labels = labels.cuda()
preds = net(images)
if IS_LABEL_NORMALIZED:
preds = sigmoid(preds)
if LOSS_FUNCTION == "MSE":
loss = criterian(labels, preds)
elif LOSS_FUNCTION == "BCE":
loss = criterian(preds, labels.detach())
loss_value_in_epoch.append(loss.item())
if USE_VISDOM:
vis.plot_ground_truth(labels, COLOR_SPACE, win="evaluate_ground_truth")
vis.plot_test_pred(preds, COLOR_SPACE, win="evaluate_test_pred")
test_loss_for_each_epoch.append(np.mean(loss_value_in_epoch))
if __name__=="__main__":
eval(0)
train()
| yuanlinping/deep_colormap_extraction | train.py | train.py | py | 10,586 | python | en | code | 7 | github-code | 36 |
44771327846 | def extract_info(corona_list):
result = []
for corona in corona_list:
info = corona.contents
corona_info = {
'city' : info[1].string,
'city_detail' : info[2].string,
'name' : info[3].text,
'phone' : info[4].string
}
result.append(corona_info)
return result | sumins2/homework | session09_crawling/corona.py | corona.py | py | 309 | python | en | code | 0 | github-code | 36 |
14433470190 | import operator
# ์ํ๋ฒณ ๋์๋ฌธ์๋ก ๋ ๋จ์ด๊ฐ ์ฃผ์ด์ง๋ฉด,์ด ๋จ์ด์์ ๊ฐ์ฅ ๋ง์ด ์ฌ์ฉ๋ ์ํ๋ฒณ์ด ๋ฌด์์ธ์ง ์์๋ด๋ ํ๋ก๊ทธ๋จ์ ์์ฑํ์์ค.
# ๋จ, ๋๋ฌธ์์ ์๋ฌธ์๋ฅผ ๊ตฌ๋ถํ์ง ์๋๋ค.
word = input().strip()
word = word.upper()
count = {} #๋์
๋๋ฆฌ ์ฌ์ฉ
list1 = list(word)
#print(type(count)) #<class 'dict'>
for i in list1 : #์ํ๋ฒณ์ ํ๋์ฉ ๊บผ๋ธ๋ค
try: count[i] += 1 #์ด๋ฏธ ์กด์ฌํ๋ฉด +1
except: count[i] = 1 #์์ผ๋ฉด 1
#print(type(count)) #<class 'dict'>
# key=operator.itemgetter(1) ๋ ๋๋ฒ์งธ ์ธ์๊ฐ์ ๊ธฐ์ค์ผ๋ก ์ ๋ ฌํ๋ค
count = sorted(count.items(), key=operator.itemgetter(1), reverse=True) #๋์
๋๋ฆฌ ์ ๋ ฌ์ sorted(). sorted()ํ๋ฉด listํ์
์ด ๋๋ค.
#print(type(count)) #<class 'list'>
#print(count)
#print(count[0])
if len(count) > 1 and count[0][1] == count[1][1] :
print('?')
else :
print(count[0][0])
# import operator
# word = input().strip()
# word = word.upper()
# count = {}
# list1 = list(word)
# #์ถํ๋น๋ ์ ์ฅ
# for i in list1 :
# try: count[i] += 1
# except: count[i] = 1
# #์ ๋ ฌ
# count = sorted(count.items(), key=operator.itemgetter(1), reverse=True)
# if len(count) > 1 and count[0][1] == count[1][1] :
# print('?')
# else :
# print(count[0][0]) | pivotCosmos/algorithm | 202206/ex0608/1157_mostAlphabet.py | 1157_mostAlphabet.py | py | 1,331 | python | ko | code | 0 | github-code | 36 |
20341081409 | import hashlib
class Block:
def __init__(self, index, timestamp, data, previous_hash):
self.index = index
self.timestamp = timestamp
self.data = data
self.previous_hash = previous_hash
self.hash = self._hash_block()
def _hash_block(self):
sha = hashlib.sha256()
encoded_data = self._encode_data_for_hash_update()
sha.update(encoded_data)
return sha.hexdigest()
def _encode_data_for_hash_update(self):
e_index = str(self.index).encode('utf-8')
e_timestamp = str(self.timestamp).encode('utf-8')
e_data = str(self.data).encode('utf-8')
e_previous_hash = str(self.previous_hash).encode('utf-8')
encoded_data_for_hashing = e_index + e_timestamp + e_data + e_previous_hash
return encoded_data_for_hashing
| rdempsey/simple-python-blockchain | spb/lib/block.py | block.py | py | 835 | python | en | code | 0 | github-code | 36 |
37569791661 | def extract_times(raw_times_dict):
"""
Extract the actual time values from the data provided by the SRC Run API.
"""
actual_times = {}
if raw_times_dict["realtime"] is not None:
actual_times["realtime"] = raw_times_dict["realtime_t"]
if raw_times_dict["realtime_noloads"] is not None:
actual_times["realtime_noloads"] = raw_times_dict["realtime_noloads_t"]
if raw_times_dict["ingame"] is not None:
actual_times["ingame"] = raw_times_dict["ingame_t"]
return actual_times
def format_run_for_post(run, variables=None):
player_data = {
"rel": run["players"][0]["rel"],
}
if player_data["rel"] == "user":
player_data["id"] = run["players"][0]["id"]
else:
player_data["name"] = run["players"][0]["name"]
post_data = {
"run": {
"category": run["category"],
"date": run["date"],
"region": run["system"]["region"],
"platform": run["system"]["platform"],
"verified": False,
"times": {
"realtime": run["times"]["primary_t"],
"realtime_noloads": run["times"]["realtime_noloads_t"],
"ingame": run.get("times", {}).get("ingame_t", 0)
},
"players": [player_data],
"emulated": run["system"]["emulated"],
"comment": run.get("comment", None),
}
}
# Handle optional fields
if run["level"] is not None:
post_data["run"]["level"] = run["level"]
if run.get("videos", None):
post_data["run"]["video"] = run["videos"]["links"][0]["uri"]
if run.get("splits", None):
post_data["run"]["splitsio"] = run["splits"]["uri"].split("/")[-1]
if variables:
post_data["run"]["variables"] = variables
return post_data
| JoshSanch/run_migrator | utils/src_conversion_utils.py | src_conversion_utils.py | py | 1,836 | python | en | code | 2 | github-code | 36 |
74189117224 | #reverses the process of the spritesheetCreator script
from PIL import Image
import sys
#sets all the variables we need to accurately crop the images
imageAmount = 0
i = 1
width = 0
height = 0
maxWidth = 0
maxHeight = 0
counter = 0
row = 0
column = 0
#searches for the file saved by the spritesheet script
try:
with open("imageAtlas.txt", "r") as imageSearch:
data = imageSearch.read()
except IOError:
sys.exit()
imageSearch.close()
#changes the data of the atlas to something more readable
newData = data.replace('[', '').replace('(', '').replace(',', '').replace(')', '').replace(']', '')
#takes the new data and finds out how many images were in the spritesheet
imageAmount = (newData.count(" "))/2+1
#saves the data into a list
listNew = newData.split(' ')
imageList = zip(listNew[::2],listNew[1::2])
#uses the same math in the spritesheet script to figure out how many images per row there will be
while(((len(imageList)/i) != i)) :
i = i + 1
if(((len(imageList)/i) < i)) :
break
maxFramesPerRow = i
#opens the spritesheet and finds the size of the largest image
spritesheet = Image.open("spritesheet.png")
maxWidth, maxHeight = max(imageList)
#loops through the amount of images sets the size of the current image using the atlas
#the counter then checks if the row has ended if it had it moves onto the next row
#each image will be its original size the space between them being what the largest one was
#this is figued out because the space is constant
#saves each image into the current script folder
for x in range(imageAmount):
width, height = imageList[x]
if(counter == maxFramesPerRow):
row = row + 1
column = 0
counter = 0
area = (0, int(maxHeight)*row, int(width), int(height)+(int(maxHeight)*row))
cropped_img = spritesheet.crop(area)
cropped_img.save("image" + str(x+1) + ".png", "PNG")
else:
if(x is not 0):
column = column+1
area = (int(maxWidth)*column, int(maxHeight)*row, int(width)+(int(maxWidth)*column), int(height)+(int(maxHeight)*row))
cropped_img = spritesheet.crop(area)
cropped_img.save("image" + str(x+1) + ".png", "PNG")
counter = counter+1
| IrrationalThinking/portfolio | Example/reversal.py | reversal.py | py | 2,172 | python | en | code | 0 | github-code | 36 |
14806312325 | class Item:
def __init__(self, type, area):
self.type = type
self.area = area
def __str__(self):
return '็ฑปๅ๏ผ%s๏ผๅฑๆง๏ผ%s' % (self.type, self.area)
class Home:
def __init__(self, address, area):
self.address = address
self.area = area
self.free_area = area
self.items = []
def add(self, item):
if self.free_area >= item.area:
self.free_area -= item.area
self.items.append(item)
print('ๆทปๅ ๆๅ')
else:
print('ๆทปๅ ๅคฑ่ดฅ')
def __str__(self):
return 'ๅฐๅ๏ผ%s๏ผๅ ๅฐ้ข็งฏ๏ผ%s๏ผๅฉไฝ้ข็งฏ๏ผ%s๏ผ็ฎๅๅฎถไธญๅฎถๅ
ท๏ผ%s' % (
self.address, self.area, self.free_area, [item.type for item in self.items])
zz1 = Item('ๆกๅญ1', 10)
zz2 = Item('ๆกๅญ2', 300)
h = Home('่ฅฟๅฎ', 300)
h.add(zz1)
h.add(zz2)
print(h)
| penguinsss/Project | ้ขๅๅฏน่ฑก/็ปไน /ๅฎถๅ
ท.py | ๅฎถๅ
ท.py | py | 895 | python | en | code | 0 | github-code | 36 |
25187912716 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Framework Package. This package holds all Data Management, and
# Web-UI helpful to run brain-simulations. To use it, you also need do download
# TheVirtualBrain-Scientific Package (for simulators). See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
import numpy
from tvb.datatypes.local_connectivity_data import LocalConnectivityData, LOG
from tvb.datatypes.surfaces_scientific import gdist
class LocalConnectivityScientific(LocalConnectivityData):
""" This class exists to add scientific methods to LocalConnectivity """
__tablename__ = None
def _find_summary_info(self):
"""
Gather scientifically interesting summary information from an instance
of this datatype.
"""
return self.get_info_about_array('matrix',
[self.METADATA_ARRAY_MAX,
self.METADATA_ARRAY_MIN,
self.METADATA_ARRAY_MEAN,
self.METADATA_ARRAY_SHAPE])
def compute_sparse_matrix(self):
"""
NOTE: Before calling this method, the surface field
should already be set on the local connectivity.
Computes the sparse matrix for this local connectivity.
"""
if self.surface is None:
msg = " Before calling 'compute_sparse_matrix' method, the surface field should already be set."
LOG.error(msg)
raise Exception(msg)
self.matrix_gdist = gdist.local_gdist_matrix(self.surface.vertices.astype(numpy.float64),
self.surface.triangles.astype(numpy.int32),
max_distance=self.cutoff)
self.compute()
# Avoid having a large data-set in memory.
self.matrix_gdist = None
| suraj1074/tvb-library | tvb/datatypes/local_connectivity_scientific.py | local_connectivity_scientific.py | py | 3,029 | python | en | code | null | github-code | 36 |
30414663349 | # 03.Faรงa um programa que leia e valide as seguintes informaรงรตes:
# Nome: maior que 3 caracteres;
# Idade: entre 0 e 150;
# Salรกrio: maior que zero;
# Sexo: 'f' ou 'm';
# Estado Civil: 's', 'c', 'v', 'd';
# Use a funรงรฃo len(string) para saber o tamanho de um texto (nรบmero de caracteres)
in_name = str(input('insira seu nome: '))
in_age = int(input('digite sua idade: '))
in_salary = float(input('digite seu salรกrio: '))
in_genere = str(input('digite seu sexo: ')).upper()
in_marital_status = str(input('Estado Civil: '))
while len(in_name) < 3:
in_name = str(input('*o nome precisa conter no mรญnimo 3 letras: '))
while 0 < in_age > 150:
in_age = int(input('*vocรช precisa ter entre 0 e 150 anos... '))
while in_salary <= 0:
in_salary = float(input('*seu salรกrio precisa ser maior que zero...'))
while in_genere not in 'FM':
in_genere = str(input('*Digite "M" para masculino e "F" para mulher:...'))
while in_marital_status not in 'SCVD':
in_marital_status = str(input('*digite "s" para solteiro \n "c" para casado \n "v" para viรบvo \n ou "D" para divorciado...'))
print(f'Nome:{in_name} \n Idade:{in_age} \n Renda:{in_salary} \n Sexo:{in_genere} \n Estado Civil:{in_marital_status}' )
| KIINN666/crispy-umbrella | cadastro_v2.0.py | cadastro_v2.0.py | py | 1,222 | python | pt | code | 0 | github-code | 36 |
74088296424 | """
Normally step 1 to align trimmed reads
e.g.
"""
import os
import sys
from tqdm import trange
from joblib import Parallel, delayed
import re
input_root = sys.argv[1]
output_root = sys.argv[2]
genome_dir = sys.argv[3]
subdirs = []
for subdir, dirs, files in os.walk(input_root):
for file in files:
subdirs.append(subdir)
subdirs = sorted(set(subdirs))
if not os.path.isdir(output_root):
os.system(f'mkdir -p {output_root}')
def process(pair_dir):
name = re.search(".*/(.*$)", pair_dir).group(1)
if os.path.exists(f"{output_root}/{name}_Aligned.out.bam"):
return
os.system(
(f'STAR --runThreadN 4 --genomeDir {genome_dir} --readFilesIn {pair_dir}/*gz '
f'--readFilesCommand zcat --outFileNamePrefix {output_root}/{name}_ --quantMode TranscriptomeSAM '
'--outSAMtype BAM Unsorted'))
Parallel(n_jobs=2)(delayed(process)(subdirs[i]) for i in trange(len(subdirs)))
| ZhaoxiangSimonCai/BioInfoScripts | RNA_workflows/star_by_folders.py | star_by_folders.py | py | 931 | python | en | code | 0 | github-code | 36 |
40571473891 | import reflex as rx
gradient = "linear(to-l, #7928CA, #FF0080)"
background_gradient = "bgGradient='radial-gradient(circle, rgba(238,174,202,1) 0%, rgba(148,187,233,1) 100%);',"
shadow = "0 0 5px 5px #FF0080"
# Common styles for de app.
app_style = dict(
bgGradient=background_gradient
)
# Common styles for text.
message_style = dict(
padding="1em",
border_radius="5px",
margin_y="0.25em",
display="inline-block",
font_family= "Hack Nerd Font Mono",
font_size="15px",
)
# Common styles for buttons.
button_style = dict(
border_radius="1em",
bgGradient=gradient,
opacity="0.8",
_hover={
"background": "white",
"box_shadow": shadow,
},
)
# Common styles for headings.
heading_style = dict(
# font_family= "Hack Nerd Font Mono",
font_size="2.5em",
font_weight="bold",
margin_y="0.25em",
margin_x="0.25em",
text_align="center",
bgGradient=gradient,
bgClip="text",
)
subheading_style = dict(
# font_family= "Hack Nerd Font Mono",
font_size="1.5em",
text_align="center",
bgGradient=gradient,
bgClip="text",
)
| BortPablo/reflex_portfolio | reflex_test/style.py | style.py | py | 1,182 | python | en | code | 0 | github-code | 36 |
3084548560 | from datetime import datetime
from metloom.pointdata import SnotelPointData
from conversions import imperial_to_metric
def get_snotel_data(name, site_id, dates):
snotel_point = SnotelPointData(site_id, name)
df = snotel_point.get_daily_data(
dates[0], dates[1],
[snotel_point.ALLOWED_VARIABLES.SWE, snotel_point.ALLOWED_VARIABLES.SNOWDEPTH, \
snotel_point.ALLOWED_VARIABLES.TEMPAVG])
df['SWE'] = imperial_to_metric(df['SWE'], 'inch')
df['SD'] = imperial_to_metric(df['SNOWDEPTH'], 'inch')
df['temp'] = imperial_to_metric(df['AVG AIR TEMP'], 'fahrenheit')
df = df.drop(['SWE_units', 'SNOWDEPTH_units','SNOWDEPTH', 'AVG AIR TEMP_units', 'AVG AIR TEMP'], axis = 1)
df['site_name'] = name
return df | ZachKeskinen/uavsar-validation | src/funcs/snotel_extract.py | snotel_extract.py | py | 762 | python | en | code | 0 | github-code | 36 |
73692413545 | import mock
import testtools
from stackalytics.dashboard import helpers
class TestHelpers(testtools.TestCase):
@mock.patch('time.time')
def test_get_current_company(self, mock_time_time):
current_timestamp = 1234567890
mock_time_time.return_value = current_timestamp
user = {
'user_id': 'smith',
'user_name': 'John Smith',
'companies': [{
'company_name': 'Current',
'end_date': current_timestamp + 1
}, {
'company_name': 'TheCompany',
'end_date': 0
}]
}
self.assertEqual('Current', helpers.get_current_company(user))
@mock.patch('stackalytics.dashboard.helpers.make_link')
def test_extend_user(self, mock_make_link):
company_link = mock.Mock()
mock_make_link.return_value = company_link
user = {
'user_id': 'smith',
'user_name': 'John Smith',
'companies': [{
'company_name': 'TheCompany',
'end_date': 0
}]
}
expected = {
'user_id': 'smith',
'user_name': 'John Smith',
'companies': [{
'company_name': 'TheCompany',
'end_date': 0
}],
'id': 'smith',
'company_link': company_link,
'text': 'John Smith',
}
observed = helpers.extend_user(user)
self.assertEqual(expected, observed)
mock_make_link.assert_called_once_with('TheCompany', '/', mock.ANY)
@mock.patch('time.time')
@mock.patch('stackalytics.dashboard.helpers.make_link')
def test_extend_user_current_company(self, mock_make_link, mock_time_time):
company_link = mock.Mock()
mock_make_link.return_value = company_link
current_timestamp = 1234567890
mock_time_time.return_value = current_timestamp
user = {
'user_id': 'smith',
'user_name': 'John Smith',
'companies': [{
'company_name': 'Current',
'end_date': current_timestamp + 1
}, {
'company_name': 'TheCompany',
'end_date': 0
}]
}
helpers.extend_user(user)
mock_make_link.assert_called_once_with('Current', '/', mock.ANY)
| Mirantis/stackalytics | stackalytics/tests/unit/test_helpers.py | test_helpers.py | py | 2,389 | python | en | code | 12 | github-code | 36 |
70679292584 | class OrderEvent(Event):
"""
Signifies event to execute order on stock.
"""
def __init__(self, symbol, order_type, quantity, direction):
self.type = "ORDER"
self.symbol = symbol
self.order_type = order_type
self.quantity = quantity
self.direction = direction
| kevshi/trading | event/order_event.py | order_event.py | py | 276 | python | en | code | 0 | github-code | 36 |
22264746136 | class RutaPeligrosa(Exception):
# Completar
def __init__(self, tipo_peligro, nombre_estrella):
super().__init__('ยกAlto ahรญ viajero! Hay una amenaza en tu ruta...')
self.tipo_peligro = tipo_peligro
self.nombre_estrella = nombre_estrella
def dar_alerta_peligro(self):
if self.tipo_peligro == "luz":
print("ยกTen cuidado, que con tanta luz no podrรกs ver :(!")
elif self.tipo_peligro == "tamaรฑo":
print("ยกOoops! Esa estrella es demasiado grande...")
elif self.tipo_peligro == "calor":
print("ยกAlerta! ยกAlerta! ยกPeligro inminente de quedar carbonizados!")
print(f"La Estrella {self.nombre_estrella} ha quedado fuera de tu ruta.\n")
return
| Alzvil/IIC2233-Progra-Avanzada-Tareas-2021-1 | Actividades/AF2/excepciones_estrellas.py | excepciones_estrellas.py | py | 755 | python | es | code | 0 | github-code | 36 |
73915818985 | from .base import RegexVocabulary, left_pad, NoWildcardsVocabulary, NoRangeFillVocabulary, NoCheckVocabulary,\
ProcedureVocabulary, ModifierVocabulary
import re
from itertools import product
_hcpcs_split_regex = re.compile('^([A-Z]*)([0-9]+)([A-Z]*)$')
def hcpcs_split(code):
match = _hcpcs_split_regex.match(code)
letter_part = match.groups()[0]
number_part = match.groups()[1]
return letter_part, number_part
def hcpcs_join(letter_part, number_part):
digits = 5 - len(letter_part)
return letter_part + (('%%.%dd' % digits) % int(number_part))
class HCPCS(RegexVocabulary, NoCheckVocabulary, ProcedureVocabulary):
vocab_name = 'HCPCS'
def __init__(self):
RegexVocabulary.__init__(self, '([\*ABCDEGHJKLMPQRSTVX\d][\d\*]{3}[FMTU\d\*])|([\d\*]{1,4}[FMTU\d\*])|([\d\*]{1,5})', ignore_case=True)
def _fill_range(self, lower, upper):
lower_start_letter, lower_number, lower_end_letter = _hcpcs_split_regex.match(lower).groups()
upper_start_letter, upper_number, upper_end_letter = _hcpcs_split_regex.match(upper).groups()
assert lower_start_letter == upper_start_letter
assert lower_end_letter == upper_end_letter
result = []
for num in range(int(lower_number), int(upper_number) + 1):
n = 5 - len(lower_start_letter) - len(lower_end_letter)
result.append(lower_start_letter + left_pad(str(num), n) + lower_end_letter)
return result
_places = ['ABCDEGHJKLMPQRSTVX0123456789'] + \
3 * ['0123456789'] + \
['FMTU0123456789']
def _match_pattern(self, pattern):
options = []
for i, item in enumerate(pattern):
if item == '*':
options.append(self._places[i])
else:
options.append([item])
return map(''.join, product(*options))
def _standardize(self, code):
return left_pad(code.strip().upper(), 5)
class HCPCSModifier(RegexVocabulary, NoWildcardsVocabulary, NoRangeFillVocabulary, NoCheckVocabulary, ModifierVocabulary):
vocab_name = 'HCPCSMOD'
def __init__(self):
RegexVocabulary.__init__(self, '[A-Za-z\d]{2}')
def _standardize(self, code):
result = code.strip().upper()
assert len(result) == 2
return result
| modusdatascience/clinvoc | clinvoc/hcpcs.py | hcpcs.py | py | 2,356 | python | en | code | 8 | github-code | 36 |
31521862032 | class Solution(object):
def findRotateSteps(self, ring, key):
"""
:type ring: str
:type key: str
:rtype: int
"""
# the distance between two points (i, j) on the ring
n = len(ring)
def dist(i, j):
return min(abs(i - j), n - abs(i - j))
# d stores the key indices
d = collections.defaultdict(list)
for i, c in enumerate(ring):
d[c].append(i)
dp = [0]*n
for index in range(len(key)-1,-1,-1):
k = key[index]
previous = ring[0] if index==0 else key[index-1]
for i in d[k]:
dp[i] += 1
for start in d[previous]:
steps = [dist(start, end)+dp[end] for end in d[k]]
dp[start] = min(steps)
return dp[0] | szhu3210/LeetCode_Solutions | LC/514.py | 514.py | py | 858 | python | en | code | 3 | github-code | 36 |
18433559527 | import pygame
import random
import time
import turtle
# Class thแป hiแปn ฤแปi tฦฐแปฃng Cรขu hแปi
# Mแปt ฤแปi tฦฐแปฃng Question gแปm cรณ 2 fields:
# - question: ฤแป bร i
# - answer: ฤรกp รกn
class Question:
def __init__(self, question, answer):
self.question = question
self.answer = answer
# Class thแป hiแปn trแบกng thรกi hiแปn tแบกi cแปงa trรฒ chฦกi
class GameState:
# ฤiแปm sแป hiแปn tแบกi
score = 0
roundnum = 1
# Khแปi ฤแปng lแบกi ฤแปng hแป bแบฅm giแป: cho giรก trแป bแบฑng thแปi gian hiแปn tแบกi
def reset_timer(self):
self.start_time = time.time()
# Trแบฃ vแป thแปi gian trแบฃ lแปi cรขu hแปi (tรญnh bแบฑng giรขy), bแบฑng cรกch lแบฅy
# thแปi gian ฤแปng hแป trแปซ ฤi thแปi gian start_time ฤรฃ lฦฐu.
def get_timer(self):
return time.time() - self.start_time
# Khแปi tแบกo ฤแปi tฦฐแปฃng cแปฅ thแป lฦฐu trแบกng thรกi cแปงa trรฒ chฦกi
state = GameState()
# Dรนng thฦฐ viแปn pygame ฤแป chฦกi รขm thanh.
def play_music(file):
pygame.mixer.init()
pygame.mixer.music.load(file)
pygame.mixer.music.play()
def play_sound(file):
pygame.mixer.init()
sound = pygame.mixer.Sound(file)
sound.play()
# Vแบฝ hรฌnh nhรขn vแบญt.
avatar = turtle.Turtle()
def draw_avatar(image):
# Phแบฃi gแปi lแปnh turtle.addshape trฦฐแปc khi vแบฝ แบฃnh.
turtle.addshape(image)
avatar.clear()
avatar.penup()
avatar.setposition(350, -100)
# Lฦฐu รฝ: turtle chแป vแบฝ ฤฦฐแปฃc แบฃnh cรณ ฤแปnh dแบกng .gif
avatar.shape(image)
# Khแปi tแบกo cรขy bรบt chuyรชn dรนng ฤแป vแบฝ thแปi gian.
pen_timer = turtle.Turtle()
def draw_timer():
# แบจn con rรนa.
pen_timer.hideturtle()
# Nhแบฅc bรบt lรชn.
pen_timer.penup()
# Xoรก, ฤแป khi vแบฝ ฤiแปm khรดng bแป ฤรจ lรชn nhau.
pen_timer.clear()
# ฤแปi mร u.
pen_timer.color('green')
# ฤแบทt vแป trรญ.
pen_timer.setposition(-240, 170)
# Viแบฟt ฤiแปm sแป ra mร n hรฌnh.
pen_timer.write(round(state.get_timer()), font=get_font(20))
# Vแบฝ lแบกi ฤiแปm sแป sau 1000ms (1 giรขy) nแปฏa
turtle.Screen().ontimer(draw_timer, 1000)
# Khai bรกo dแปฏ liแปu cรขu hแปi vร ฤรกp รกn
def read_data(round_num):
# ฤแปc cรขu hแปi vร ฤรกp รกn tแปซ Files.
# Sแป lฦฐแปฃng cรขu hแปi
num_questions = 3
# Ban ฤแบงu, mแบฃng dแปฏ liแปu lร trแปng
data = []
# Cรกc file cรขu hแปi ฤรกnh sแป lร q1.txt, q2.txt, q3.txt,...
# Cรกc file cรขu trแบฃ lแปi ฤรกnh sแป lร a1.txt, a2.txt, a3.txt,...
# Ta dรนng hร m range(1, x + 1) ฤแป duyแปt qua cรกc sแป 1, 2, ..., x
for i in range(1, num_questions + 1):
# ฤแปc cรขu hแปi, dรนng encoding='utf-8' ฤแป ฤแปc tiแบฟng Viแปt
filename ='r' + str(round_num) + 'q' + str(i) + '.txt'
f = open(filename, 'r', encoding='utf-8')
question = f.read()
f.close()
# ฤแปc ฤรกp รกn
filename ='r' +str(round_num) + 'a' + str(i) + '.txt'
f = open(filename, 'r', encoding='utf-8')
answer = f.read()
f.close()
# Tแบกo ฤแปi tฦฐแปฃng Question vร thรชm vร o mแบฃng dแปฏ liแปu data
data.append(Question(question, answer))
# Trแบฃ vแป mแบฃng dแปฏ liแปu data
return data
# Sinh ra cรกc cรขu hแปi tรญnh nhแบฉm ngแบซu nhiรชn Siรชu Trรญ Tuแป
def generate_math_questions(round_num):
# Ban ฤแบงu, danh sรกch cรขu hแปi trแปng.
data = []
# Sแป lฦฐแปฃng cรขu hแปi sinh ra.
num_questions = 3
# Hai phรฉp toรกn: cแปng vร nhรขn
operators = ["+", "x"]
# Sแป lฦฐแปฃng chแปฏ sแป tแปi ฤa khi sinh cรขu hแปi ngแบซu nhiรชn
if round_num == 1:
max_digits = 9
min_digits = 1
elif round_num == 2:
max_digits = 99
min_digits = 10
else:
max_digits = 999
min_digits = 100
for i in range(num_questions):
# Chแปn sแป ngแบซu nhiรชn tแปซ 0 ฤแบฟn 10^max_digits - 1
a = random.randint(min_digits, max_digits)
b = random.randint(min_digits, max_digits)
# Chแปn mแปt phรฉp toรกn ngแบซu nhiรชn
op = random.choice(operators)
# Sinh ra ฤแป bร i
question = str(a) + " " + op + " " + str(b) + " = ?"
# Sinh ra ฤรกp รกn
if op == "+":
answer = a + b
elif op == "x":
answer = a * b
# Thรชm cรขu hแปi vร o danh sรกch
data.append(Question(question, str(answer)))
# Trแบฃ vแป danh sรกch cรขu hแปi tรญnh nhแบฉm Siรชu Trรญ Tuแป.
return data
# Trแบฃ vแป font chแปฏ vแปi kรญch thฦฐแปc ฤฦฐแปฃc cho.
def get_font(font_size):
return ("Arial", font_size, "normal")
# Khแปi tแบกo cรขy bรบt chuyรชn dรนng ฤแป vแบฝ ฤiแปm sแป.
pen_score = turtle.Turtle()
def draw_score():
# แบจn con rรนa.
pen_score.hideturtle()
# Nhแบฅc bรบt lรชn.
pen_score.penup()
pen_score.clear()
pen_score.color('red')
pen_score.setposition(300, 175)
temp ="ROUND: "+ str(state.roundnum)
pen_score.write(temp, font=get_font(30))
pen_score.color('white')
pen_score.setposition(340, 110)
pen_score.write(state.score, font=get_font(40))
pen_round = turtle.Turtle()
def draw_round_number(round_num):
pen_round.hideturtle()
pen_round.penup()
pen_round.clear()
pen_round.color('red')
pen_round.setposition(300, 175)
temp ="ROUND: "+ str(state.roundnum)
pen_round.write(temp, font=get_font(30))
def ask_question(question):
print("***************************")
print(question.question)
turtle.clear()
turtle.hideturtle()
turtle.penup()
turtle.setposition(-240, 20)
turtle.write(question.question, font=get_font(15))
draw_score()
draw_avatar('KimNguu-normal.gif')
state.reset_timer()
result = turtle.textinput("Siรชu Lแบญp Trรฌnh", "Cรขu trแบฃ lแปi cแปงa bแบกn lร gรฌ?\n")
check_result(result, question.answer)
def check_result(result, answer):
time_taken = state.get_timer()
if time_taken < 5:
bonus = 5
else:
bonus = 0
state.roundnum =round_number
if result == answer:
state.score += 10 + bonus
play_sound("correct_answer.wav")
draw_avatar('KimNguu-correct.gif')
print("ฤรบng rแปi")
else:
play_sound("wrong_answer.wav")
draw_avatar('KimNguu-wrong.gif')
print("Sai rแปi")
time.sleep(0.5)
print("Thแปi gian trแบฃ lแปi cรขu hแปi lร :", round(time_taken), "giรขy")
if bonus > 0:
print("Bแบกn nhแบญn ฤฦฐแปฃc ฤiแปm thฦฐแปng lร ", bonus, "vรฌ trแบฃ lแปi nhanh")
print("ฤiแปm hiแปn tแบกi cแปงa bแบกn lร : ", state.score)
def setup_turtle():
screen = turtle.Screen()
screen.setup(1200, 600)
screen.bgpic('background.gif')
turtle.title("Siรชu lแบญp trรฌnh")
# Gแปi hร m thiแบฟt lแบญp mร n hรฌnh
setup_turtle()
# Chฦกi nhแบกc
play_music("music.wav")
# Vแบฝ thแปi gian
state.reset_timer()
draw_timer()
round_number = 1
while round_number < 4:
#draw_round_number(round_number)
data = read_data(round_number) + generate_math_questions(round_number)
for question in data:
ask_question(question)
round_number += 1
| aitomatic/contrib | src/aito/util/finalproject.py | finalproject.py | py | 7,485 | python | vi | code | 2 | github-code | 36 |
7796357868 | # ่ฟ้้ขๅๅๅฝข่ฏ็ๅบๅซๆฏๆ่ฝฌ่ฏๅช่ฝๅๅไธๆฌก๏ผๅทฆๅๆขๅ็้จๅไพ็ถๆฏ้กบๅบ็๏ผ
# ๅญๅจ็้ฎ้ข๏ผๅ ไธบๆฏๆผๆฅๅพๅฐ็๏ผๆไปฅๅจๆญคๆผๆฅๅฏไปฅๅพๅฐๅๆฅ็็ปๆ๏ผๆไปฅๅช่ฆๅคๆญs1ๆฏๅฆๅจๆผๆฅๅ็ๅญ็ฌฆไธฒๅฐฑๅฏไปฅไบ
# AC
def solution(s1, s2, n, m):
if n != m or sorted(s1) != sorted(s2):
return 'NO'
for i in range(n):
new_s2 = s2[i:] + s2[:i]
if new_s2 == s1:
return 'YES'
return 'NO'
# ๅพๅฐ็ๆถ่ท๏ผๅญ็ฌฆไธฒๅคๆญๆฏๅฆๅ
ๅซ๏ผ็ดๆฅ็จinๅฐฑๅฏไปฅไบ!
# ๆถ้ดๆด็ญ๏ผ
# AC
# def solution(s1, s2, n, m):
# if n != m or sorted(s1) != sorted(s2):
# return 'NO'
#
# if s1 in s2 + s2:
# return 'YES'
#
# return 'NO'
if __name__ == '__main__':
n, m = map(int, input().strip().split(' '))
s1 = input().strip()
s2 = input().strip()
print(solution(s1, s2, n, m))
| 20130353/Leetcode | target_offer/ๅญ็ฌฆไธฒ้ข/ๅญ็ฌฆไธฒๅๆข-ๆ่ฝฌ่ฏ.py | ๅญ็ฌฆไธฒๅๆข-ๆ่ฝฌ่ฏ.py | py | 914 | python | zh | code | 2 | github-code | 36 |
74113768742 | import click
# import pickle
# import cv2
from recognize import process, recognize, draw, show
@click.command()
@click.argument('image')
@click.option('--encodings', '-e', default='encodings.pickle', help='path to db of BTS facial encodings.')
@click.option('--detection', default='cnn', help='Which face detection model to use. Options are "hog" or "cnn".')
@click.option('--tolerance', default=0.4, help='Tolerance level: (0...1); lower is more accurate, higher for better performance')
def main(image, encodings, detection, tolerance):
names = []
encodings, image = process(image, encodings)
boxes = recognize(image, names, encodings, detection, tolerance)
draw(image, boxes, names)
show(image)
if __name__ == '__main__':
main()
| cache-monet/bts_recognition | image.py | image.py | py | 741 | python | en | code | 1 | github-code | 36 |
2259081804 | import pyperclip as pc
names = ["Apple", "Banana", "Cherry", "Dog", "Elephant"]
present = []
absent = []
not_audio = []
asking = True
number = 0
while asking:
question = names[number]
answer = input(question + ": ")
if answer == "p":
present.append(question)
if answer == "a":
absent.append(question)
if answer == "n":
not_audio.append(question)
if number == 38:
asking = False
number += 1
correction = input("Names to be removed: ")
correction_list = correction.split(",")
for i in correction_list:
absent.remove(i)
counter = 1
absentees = ''
for x in absent:
temp = f"{counter}.{x}"
absentees = absentees + temp + "\n"
counter += 1
count2 = 1
not_audio_names = ''
for i in not_audio:
tempo = f"{count2}.{i}"
not_audio_names = not_audio_names + tempo + "\n"
count2 += 1
if not_audio_names == '':
not_audio_names = "Nil"
not_audio_msg = f"Students not connected to audio: {not_audio}"
present_length = len(present)
absent_length = len(absent)
message = f"Good Morning Ma'am, the attendance of IX-G for today is as follows: \nAbsentees: \n{absentees} \nStudents not connected to audio:\n{not_audio_names}.\nThe total strength today was {5 - absent_length}/5."
pc.copy(message)
print(message)
| PythonGeek07/Attendance-_Bot | main.py | main.py | py | 1,288 | python | en | code | 0 | github-code | 36 |
40059338303 | class Conversion:
# Code created by Luke Reddick
# Please use inputs of one character, so C, F, K, c, f ,k
# for celsius, fahrenheit, and Kelvin respectively
convertFrom = str((input("What temperature are you converting from? (C/F/K) : " + "\n")))
convertTo = str((input("What temperature would you like to convert to? (C/F/K) : " + "\n")))
try:
valueTemp = float((input(("What is the value of your temperature you would like to convert? : " + "\n"))))
except:
print("Your input was invalid.")
def convert(w, s, x):
if s == "F" or s == "f":
if w == "C" or w == "c":
result = (x - 32) * (5/9)
print(str(x) + " degrees Fahrenheit is \n" + str(round(result, 3)) + " degrees Celsius \n")
elif w == "K" or w == "k":
result = (x - 32) * (5/9) + 273.15
print(str(x) + " degrees Fahrenheit is \n" + str(round(result, 3)) + " degrees Kelvin \n")
else:
print("Your input did not make sense to the program, try again.")
elif s == "C" or s == "c":
if w == "F" or w == "f":
result = (x * (9/5)) + 32
print(str(x) + " degrees Celsius is \n" + str(round(result, 3)) + " degrees Fahrenheit \n")
elif w == "K" or w == "k":
result = x + 273.15
print(str(x) + " degrees Celsius is \n" + str(round(result, 3)) + " degrees Kelvin \n")
else:
print("Your input did not make sense to the program, try again.")
elif s == "K" or s == "k":
if w == "F" or w == "f":
result = (x - 273.15 - 32) * (5/9)
print(str(x) + " degrees Kelvin is \n" + str(round(result, 3)) + " degrees Fahrenheit \n")
elif w == "C" or w == "c":
result = x - 273.15
print(str(x) + " degrees Kelvin is \n" + str(round(result, 3)) + " degrees Celsius \n")
else:
print("Your input did not make sense to the program, try again. \n")
else:
print("Your input did not make sense to the program, try again. \n")
try:
convert(convertTo, convertFrom, valueTemp)
except:
print("Since input was invalid... \n Use single characters for the first two inputs and digits for the third. \n")
input(" \nPress enter to exit.")
| Lukares/Asides | conversionTemp.py | conversionTemp.py | py | 2,455 | python | en | code | 0 | github-code | 36 |
13988027867 | import numpy as np
from scipy.interpolate import interp1d
from skfmm import travel_time, distance
from scipy.signal import resample
def resample2d( x, shape=[] ):
if len(shape)==0:
raise ValueError('shape should not be empty.')
x1=resample(x,shape[0],axis=0)
x2=resample(x1,shape[1],axis=1)
return x2
def transform_normal_scores(scores, nscore):
"""
map standard quantiles to empirical probability distribution from
dynamic rupture simulation. values outside the empirical distribution
are mapped to the ends.
"""
x = nscore['nscore']
y = nscore['x']
fill_value = (y.min(), y.max())
f = interp1d(x,y,bounds_error=False,fill_value=fill_value)
return f(scores)
def linear_taper(n, inds=(0,-1), vals=(0.0,1.0) ):
"""
Returns normalized coefficient for linear taper between (start, end) and
values (start_value, end_value)
Args:
n (int) : length of taper
inds (tuple) : indexes of taper, default n
vals (tuple) : coresponding to inds, default {0, 1.0}
Returns:
coef (ndarray) : coefficient {0 .. 1.0} of linear taper over indexes = inds with
values = vals
"""
import numpy as np
# vars
ix = np.arange(n)
coef = np.ones(n)
# linear model
delta_y = vals[1] - vals[0]
if inds == (0,-1):
delta_x = n
else:
delta_x = inds[1] - inds[0]
slope = delta_y / delta_x
intercept = vals[0] - slope * inds[0]
coef[inds[0]:inds[1]] = slope * ix[inds[0]:inds[-1]] + intercept
# returns
return coef
def boundary_taper( field, taper_width=10, free_surface=True, values=0 ):
"""
returns a field tapered along to boundary to zero.
can add taper to some percentage later.
field (2d ndarray) : rupture field to taper.
taper_width (int) : boundary to taper
free_surface (bool) : (true) taper the free surface
(false) do NOT taper free surface
values sequence or int (optional) : ending values for taper. default is zero. value should be specfied
in terms of percentages.
return
tapered_field (ndarray) : tapered field with shape = field.shape
"""
ny, nx = field.shape
if free_surface:
baseline = np.ones( (ny-2*taper_width, nx-2*taper_width) )
padded = np.pad( baseline, ((taper_width,taper_width), (taper_width,taper_width)), 'linear_ramp', end_values=values )
else:
baseline = np.ones( (ny-taper_width, nx-2*taper_width) )
padded = np.pad( baseline, ((0,taper_width), (taper_width,taper_width)), 'linear_ramp', end_values=values )
assert field.shape == padded.shape
return field*padded
"""
Helping functions.
"""
def get_dip(nhat1, nhat2, nhat3):
nz,nx = nhat1.shape
dip = np.ones([nz,nx])
for i in range(nz):
for j in range(nx):
nproj = (nhat1[i,j], 0, nhat3[i,j])
n = (nhat1[i,j], nhat2[i,j], nhat3[i,j])
norm = lambda v: np.sqrt(v[0]**2+v[1]**2+v[2]**2)
scaling = 1.0 / ( norm(nproj) * norm(n) )
arg = scaling*(n[0]**2+n[2]**2)
if np.isclose(1.0, arg):
arg = 1.0
arg=np.arccos(arg)
theta = np.rad2deg(arg)
dip[i,j] = 90 - theta
return dip
def get_moment(slip, vs, rho, params):
mu = vs * vs * rho
area = params['dx'] * params['dx']
moment = mu * area * slip
return moment
def get_strike(nhat1, nhat3, mean_strike=270):
nz,nx = nhat1.shape
strike = np.ones([nz,nx])
for i in range(nz):
for j in range(nx):
nproj = (nhat1[i,j], 0, nhat3[i,j])
x3 = (1,0,0)
norm = lambda v: np.sqrt(v[0]**2+v[1]**2+v[2]**2)
scaling = 1.0 / ( norm(x3) * norm( nproj) )
theta = np.rad2deg(scaling * np.arccos(nproj[2]))
if nhat1[i,j] > 0 and nhat3[i,j] > 0:
strike[i,j] = 270 + theta
elif nhat1[i,j] < 0 and nhat3[i,j] > 0:
strike[i,j] = 270 - theta
elif nhat1[i,j] < 0 and nhat3[i,j] < 0:
# in 3rd quad
strike[i,j] = 270 - theta
elif nhat1[i,j] > 0 and nhat3[i,j] < 0:
# in 4th quad
strike[i,j] = theta - 90
# rotate to different strike
stk = strike - 270 + mean_strike
return stk
def source_time_function():
pass
def compute_trup(vrup, params):
phi = np.ones( (params['nz'],params['nx']) ) #* params['dx']
ihypo = params['ihypo']
phi[ ihypo[0], ihypo[1] ] = -1
trup = travel_time( phi, speed=vrup, dx=params['dx'] )
return np.array(trup)
def expand_bbp_velocity_model(velocity_model_bbp_format, nx, nz, dx):
"""
"""
# create array of discrete depths
z = np.linspace(0, (nz-1)*dx, nz)
# bbp provides layer thickness, so must convert to depth
dep_inc = velocity_model_bbp_format[:,0]
dep = np.cumsum(dep_inc)
# look-up discrete depths in model
layer_idxs = np.searchsorted(dep, z, side='right')
# debugging stuff
vs = np.zeros((nz, nx))
vp = np.zeros((nz, nx))
rho = np.zeros((nz, nx))
for i, idx in enumerate(layer_idxs):
# bbp format has cols: [layer_thickness, vp, vs, rho, qp, qs]
vp[i,:] = velocity_model_bbp_format[idx, 1]
vs[i,:] = velocity_model_bbp_format[idx, 2]
rho[i,:] = velocity_model_bbp_format[idx, 3]
return vp, vs, rho
if __name__ == "__main__":
from utils import plot_2d_image
mod = np.loadtxt('./central_japan_bbp1d.txt')
nx = 273
nz = 136
dx = 0.1
_, vs, _ = expand_bbp_velocity_model(mod, nx, nz, dx)
ax = plot_2d_image(vs, nx=nx, nz=nz, dx=dx,
clabel = r'$c_s$ (km/s) ', xlabel="Distance (km)", ylabel="Distance (km)",
surface_plot=False, contour_plot=False)
| wsavran/sokrg | krg_utils.py | krg_utils.py | py | 5,948 | python | en | code | 3 | github-code | 36 |
7667794703 | #Usage: python3 kptable-appendix-11b.py [-h] [--help]
import datetime
import pathlib
import pandas as pd
import xlsxwriter
from lukeghg.crf.crfxmlconstants import ch4co2eq, n2oco2eq, ctoco2
from lukeghg.crf.crfxmlfunctions import ConvertFloat,ConvertSign, ConvertToCO2, SumTwoValues, SumBiomassLs
from lukeghg.crf.crfxmlfunctions import PaddingList, GenerateRowTitleList
#These constants will come from CrfXMLConstants
#Please check the
#ch4co2eq=25.0
#n2oco2eq=298.0
#ctoco2=44.0/12.0
#nkset={'IE','NA','NO','NE'}
#Sort the Records based on YearName in ascending order
def SortKey(x):
return x.attrib['YearName']
#---------------------------------The main program begins--------------------------------------------------
def appendix11b(start,end,directory,file_name):
#Command line generator
global ch4co2eq, n2oco2eq,ctoco2
inventory_year=end
#Output file, the table
kptable_appendix_11b_file = file_name
directory=directory+'/'
#Table Appendix-11b Afforestation/Reforestation and Deforestation files
#Deforestation "Conversion to water CH4" comes from NIR folder
kp4a2_fl_to_waters_ch4_org=directory+'KP4A2_FLtowaters_orgsoil_CH4.csv'
#The rest of the files are from crf-folder.
kp4a_agr_bm_gains_losses=directory+'KP4A_agr_bm_gains_losses.csv'
#2015 file name change
#2016 file name change
#2015:kpa2_ar_under_D_gains='KP4A2_AR_und_defor_treebm_gains.csv'
kpa2_ar_under_D_gains=directory+'KP4A2_AR_und_D_living_biomass_gains_trees.csv'
kp4a2_sl_soil=directory+'KP4A2_SL_soil.csv'
kp4a2_ar_under_d_soil=directory+'KP4A2_AR_und_defor_soils.csv'
#2015 KP_MTT_UID.csv in two files: KP_defor_mineral.csv and KP_defor_organic.csv
#kp_uid_mtt='KP_MTT_UID.csv'
kp_defor_mineral=directory+'KP_defor_mineral.csv'
kp_defor_organic=directory+'KP_defor_organic.csv'
kp4a2_fl_to_wl_soil=directory+'KP4A2_FLtoWL_soils.csv'
kp4a2_clglpewesl_deadwood=directory+'KP4A2_CLGLPEWESL_deadwood.csv'
kp4a2_d_living_biomass_losses_trees=directory+'KP4A2_D_living_biomass_losses_trees.csv'
kp4a2_fl_to_waters_org_soil=directory+'KP4A2_FLtowaters_orgsoil.csv'
#2015 rename
#kp4a2_d_mineralization='KP4A2_D_mineraalisationcl_gl_sl.csv'
kp4a2_d_mineralization=directory+'KPA2_soil_leaching_N2O.csv'
#2015 addition is Afforestation mineralization
kp4_ar_mineralization=directory+'KP4_Affor_mineralization.csv'
kp4a2_fl_to_wl_non_co2=directory+'KP4A2_FLtoWL_soils_nonCO2.csv'
kp4_living_biomass_gains_trees=directory+'KP4_living_biomass_gains_trees.csv'
kp4_ar_living_biomass_losses_trees=directory+'KP4A1_AR_living_biomass_losses_trees.csv'
kp4a1_clglsl_mineral_soil=directory+'KP4A1_CLGLSL_mineral_soil.csv'
kp4a1_ar_org_soil=directory+'KP4A1_AR_Organic_soil_C.csv'
kp4a11_wildfires=directory+'KP4A11_wildfires.csv'
kp4a1_clglpewesl_organic_soils_nonco2=directory+'KP4A1_CLGLPEWESL_organic_soils_nonco2.csv'
kp4_hwp_ard=directory+'KP4_HWP-AR.csv'
#Data for the two Tables in Appendix 1
#1. Deforestation: Conversion to water CH4
#Change in 2015: use third (CH4) line in kp4a2_fl_to_waters_org_soil
#----------------------------------------
f = open(kp4a2_fl_to_waters_org_soil)
#Read to a list [[year,val1],[year,val2]....,[year,valN]]
ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Third line is CH4
ls = ls[2]
#Convert to CO2
ls.pop(0)
fl_to_waters_ch4_co2_ls = [ConvertToCO2(ch4co2eq,x) for x in ls]
f.close()
#2. Agriculture Afforestation and Deforestation biomasses
#---------------------------------------------------------
f = open(kp4a_agr_bm_gains_losses)
agr_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Pick the deforestation, the first part in the file
agr_d_ls = agr_ls[0:8]
#Pick the Afforestation and Reforestation, rest of the file
agr_ar_ls = agr_ls[8:len(agr_ls)]
#Deforestation: Sum the biomassess: Cropland, Grassland, North and South Finland,
#above ground and below ground
agr_d_bm_ls=SumBiomassLs(agr_d_ls)
#Afforestation: Sum the biomassess: Cropland, Grassland, North and South Finland,
#above ground and below ground
agr_ar_bm_ls=SumBiomassLs(agr_ar_ls)
#Convert to CO2 and convert sign: if biomass increases -> emissiosn decrease and vice versa
agr_d_co2_ls=[ConvertSign(ConvertToCO2(ctoco2,x)) for x in agr_d_bm_ls]
agr_ar_co2_ls=[ConvertSign(ConvertToCO2(ctoco2,x)) for x in agr_ar_bm_ls]
f.close()
#3. Deforestation biomass losses in trees
#----------------------------------------
f = open(kp4a2_d_living_biomass_losses_trees)
d_trees_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Sum the biomasses, CL, GL, SETT, WL, North and South Finland, below and above ground
trees_bm_ls = SumBiomassLs(d_trees_ls)
#Convert to CO2 and convert sign: if biomass increases -> emissions decrease and vice versa
trees_d_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in trees_bm_ls]
f.close()
#Deforestation Biomass: Afforestation/Reforestation under Deforestation, gains
#---------------------------------------------------------------------
f = open(kpa2_ar_under_D_gains)
ar_under_d_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
ar_under_d_sum_ls = SumBiomassLs(ar_under_d_ls)
#Convert to CO2 and convert sign: if biomass increases -> emissions decrease and vice versa
ar_under_d_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in ar_under_d_sum_ls]
f.close()
#Deforestation: DOM+SOM Mineral soils
#-----------------------------------
f = open(kp4a2_sl_soil)
d_sl_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_sl_soil_sum_ls = SumBiomassLs(d_sl_soil_ls)
d_sl_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_sl_soil_sum_ls]
f.close()
f = open(kp4a2_ar_under_d_soil)
d_ar_under_d_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Sum the both lines
d_ar_under_d_min_soil_sum_ls = SumBiomassLs(d_ar_under_d_soil_ls)
d_ar_under_d_min_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_ar_under_d_min_soil_sum_ls]
f.close()
#Settlements are now in Mineral soil, take lines 9 and 10
f = open(kp4a2_clglpewesl_deadwood)
d_clglpewesl_deadwood_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_clglpewesl_deadwood_ls = d_clglpewesl_deadwood_ls[8:len(d_clglpewesl_deadwood_ls)]
d_clglpewesl_deadwood_min_sum_ls = SumBiomassLs(d_clglpewesl_deadwood_ls)
d_clglpewesl_deadwood_min_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_clglpewesl_deadwood_min_sum_ls]
f.close()
f = open(kp_defor_mineral)
d_mtt_min_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_mtt_min_soil_ls = d_mtt_min_soil_ls[0:4]
d_mtt_min_soil_sum_ls = SumBiomassLs(d_mtt_min_soil_ls)
d_mtt_min_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_mtt_min_soil_sum_ls]
f.close()
d_dom_som_min_soil_deadwood_sum_co2_ls = [SumTwoValues(a,SumTwoValues(b,SumTwoValues(c,d))) for (a,b,c,d) in zip(d_sl_soil_co2_ls,
d_ar_under_d_min_soil_co2_ls,
d_clglpewesl_deadwood_min_co2_ls,
d_mtt_min_soil_co2_ls)]
#Deforestation: DOM+SOM Organic soils + Deadwood
#-----------------------------------------------
f = open(kp4a2_fl_to_wl_soil)
d_fl_to_wl_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_fl_to_wl_soil_sum_ls = SumBiomassLs(d_fl_to_wl_soil_ls)
d_fl_to_wl_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_fl_to_wl_soil_sum_ls]
f.close()
f = open(kp4a2_clglpewesl_deadwood)
d_clglpewesl_deadwood_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_clglpewesl_deadwood_ls = d_clglpewesl_deadwood_ls[0:8]
d_clglpewesl_deadwood_org_sum_ls = SumBiomassLs(d_clglpewesl_deadwood_ls)
d_clglpewesl_deadwood_org_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_clglpewesl_deadwood_org_sum_ls]
f.close()
f = open(kp_defor_organic)
d_mtt_org_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_mtt_org_soil_sum_ls = SumBiomassLs(d_mtt_org_soil_ls)
d_mtt_org_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_mtt_org_soil_sum_ls]
#print(8,d_mtt_org_soil_co2_ls)
f.close()
d_dom_som_org_soil_deadwood_sum_co2_ls=[SumTwoValues(a,SumTwoValues(b,c)) for (a,b,c) in
zip(d_fl_to_wl_soil_co2_ls,d_clglpewesl_deadwood_org_co2_ls,d_mtt_org_soil_co2_ls)]
#Deforestation: Conversion to water CO2
#Change in 2015: Lines 1 and 2 are C
#--------------------------------------
f = open(kp4a2_fl_to_waters_org_soil)
d_fl_to_waters_org_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Lines 1,2 are C
d_fl_to_waters_org_soil_ls = d_fl_to_waters_org_soil_ls[0:2]
d_fl_to_waters_org_soil_sum_ls = SumBiomassLs(d_fl_to_waters_org_soil_ls)
d_fl_to_waters_org_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in d_fl_to_waters_org_soil_sum_ls]
f.close()
#Deforestation: Mineralization
#-----------------------------
f = open(kp4a2_d_mineralization)
d_mineralization_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#2015 two last lines are N2O
d_mineralization_ls = d_mineralization_ls[2:len(d_mineralization_ls)]
d_mineralization_sum_ls = SumBiomassLs(d_mineralization_ls)
d_mineralization_co2_ls = [ConvertToCO2(n2oco2eq,x) for x in d_mineralization_sum_ls]
f.close()
#Deforestation: Drained and rewetted organic soils N2O
#-----------------------------------------------------
f = open(kp4a2_fl_to_wl_non_co2)
d_fl_to_wl_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_fl_to_wl_n2o_ls = d_fl_to_wl_ls[0:1]
d_fl_to_wl_n2o_sum_ls = SumBiomassLs(d_fl_to_wl_n2o_ls)
d_fl_to_wl_n2o_co2_ls = [ConvertToCO2(n2oco2eq,x) for x in d_fl_to_wl_n2o_sum_ls]
f.close()
#Deforestation: Drained and rewetted organic soils CH4
#-----------------------------------------------------
f = open(kp4a2_fl_to_wl_non_co2)
d_fl_to_wl_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
d_fl_to_wl_ch4_ls = d_fl_to_wl_ls[1:2]
d_fl_to_wl_ch4_sum_ls = SumBiomassLs(d_fl_to_wl_ch4_ls)
d_fl_to_wl_ch4_co2_ls = [ConvertToCO2(ch4co2eq,x) for x in d_fl_to_wl_ch4_sum_ls]
f.close()
#Deforestation HWP
#-----------------
#HWP is IE
d_hwp_ls = ['IE']*(inventory_year-1990+1)
#4. Afforestation living biomass gains and losses trees
#-------------------------------------------
f = open(kp4_living_biomass_gains_trees)
ar_trees_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
f.close()
#Pick the Afforestation part, 2015 mineral and organic are added (not separately in the file)
ar_bm_gains_trees_ls = ar_trees_ls[4:len(ar_trees_ls)]
#Sum the biomasses, CL, CL, WLpeat, WLorg, Settlement, mineral, orgaing, South and North Finland
ar_bm_sum_gains_trees_ls = SumBiomassLs(ar_bm_gains_trees_ls)
f = open(kp4_ar_living_biomass_losses_trees)
ar_bm_losses_trees_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
f.close()
ar_bm_sum_losses_trees_ls = SumBiomassLs(ar_bm_losses_trees_ls)
ar_bm_net_trees_ls = [SumTwoValues(x,y) for (x,y) in zip(ar_bm_sum_gains_trees_ls,ar_bm_sum_losses_trees_ls)]
#Convert to CO2 and convert sign: if biomass increases -> emissions decrease and vice versa
trees_ar_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in ar_bm_net_trees_ls]
#5. Afforestation, DOM+SOM Mineral soils
#----------------------------------------
f = open(kp4a1_clglsl_mineral_soil)
dom_som_min_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
dom_som_min_soil_sum_ls = SumBiomassLs(dom_som_min_soil_ls)
#Convert to CO2 and convert sign: if biomass increases -> emissions decrease and vice versa
dom_som_min_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in dom_som_min_soil_sum_ls]
f.close()
#6. Afforestation, DOM+SOM Organinc soils
#----------------------------------------
f=open(kp4a1_ar_org_soil)
dom_som_org_soil_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
dom_som_org_soil_sum_ls = SumBiomassLs(dom_som_org_soil_ls)
#Convert to CO2 and convert sign: if biomass increases -> emissions decrease and vice versa
dom_som_org_soil_co2_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in dom_som_org_soil_sum_ls ]
f.close()
#7. Biomass burning
#------------------
f=open(kp4a11_wildfires)
biomass_burning_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#CO2 South and North Finland
bm_burning_co2_ls = biomass_burning_ls[0:2]
bm_burning_co2_sum_ls = SumBiomassLs(bm_burning_co2_ls)
#CH4 South and North Finland
bm_burning_ch4_ls = biomass_burning_ls[2:4]
bm_burning_ch4_sum_ls = SumBiomassLs(bm_burning_ch4_ls)
#N2O South and North Finland
bm_burning_n2o_ls = biomass_burning_ls[4:6]
bm_burning_n2o_sum_ls = SumBiomassLs(bm_burning_n2o_ls)
#Convert ch4 and n2o to co2eq and sum all three emissions
biomass_burning_ch4co2eq_ls = [ConvertToCO2(ch4co2eq,x) for x in bm_burning_ch4_sum_ls]
biomass_burning_n2oco2eq_ls = [ConvertToCO2(n2oco2eq,x) for x in bm_burning_n2o_sum_ls]
biomass_burning_co2_sum_ls = [SumTwoValues(x,SumTwoValues(y,z)) for (x,y,z) in zip(bm_burning_co2_sum_ls,biomass_burning_ch4co2eq_ls,biomass_burning_n2oco2eq_ls)]
#print(biomass_burning_co2_sum_ls)
f.close()
#8. 2015 addition Mineralization
#-------------------------------
f=open(kp4_ar_mineralization)
ar_mineralization_ls=[x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#South and North Fianland
ar_mineralization_no2_ls = ar_mineralization_ls[0:2]
ar_mineralization_n2o_sum_ls=SumBiomassLs(ar_mineralization_no2_ls)
ar_mineralization_n2o_co2_ls = [ConvertToCO2(n2oco2eq,x) for x in ar_mineralization_n2o_sum_ls]
#9.Drained organic soils N2O
#---------------------------
f=open(kp4a1_clglpewesl_organic_soils_nonco2)
drained_org_soils_nonco2_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Two lines in the file, the first one is CH4
drained_org_soils_sum_ch4_ls = drained_org_soils_nonco2_ls[0:1]
drained_org_soils_sum_ch4_ls = SumBiomassLs(drained_org_soils_sum_ch4_ls)
#Convert from N2O to CO2
drained_org_soils_sum_ch4co2eq_ls = [ConvertToCO2(ch4co2eq,x) for x in drained_org_soils_sum_ch4_ls]
f.close()
#10.Drained organic soils CH4
#---------------------------
f=open(kp4a1_clglpewesl_organic_soils_nonco2)
drained_org_soils_nonco2_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#Two lines in the file, the second one is CH4
drained_org_soils_sum_n2o_ls = drained_org_soils_nonco2_ls[1:2]
drained_org_soils_sum_n2o_ls = SumBiomassLs(drained_org_soils_sum_n2o_ls)
#Convert from CH4 to CO2
drained_org_soils_sum_n2oco2eq_ls = [ConvertToCO2(n2oco2eq,x) for x in drained_org_soils_sum_n2o_ls]
f.close()
#11.HWP afforestation
#--------------------
f=open(kp4_hwp_ard)
hwp_ard_ls = [x.rpartition('#')[2].split() for x in f.readlines() if x.count('#') != 1]
#2015 the file structure is in 3 parts: 1) Initial stock, 2) Gains and losses 3) half-time information
#The sum of gains and losses will go to the table
#2016 the file structure is for each item: 1) half life, 2) init stock, 3) gains and 4) losses
#Pick every fourth item starting from the first right place in the list
hwp_ar_ls_gains = hwp_ard_ls[2::4]
hwp_ar_ls_losses = hwp_ard_ls[3::4]
hwp_ar_ls = hwp_ar_ls_gains+hwp_ar_ls_losses
#print(hwp_ar_ls)
hwp_ar_sum_ls = SumBiomassLs(hwp_ar_ls)
#Removals are good for the atmosphere, change the sign
hwp_ar_sum_ls = [ConvertSign(ConvertToCO2(ctoco2,x)) for x in hwp_ar_sum_ls]
#HWP does not have full time series from 1990
hwp_padding_ls = PaddingList(inventory_year,hwp_ar_sum_ls)
hwp_padding_ls = ['IE']*len(hwp_padding_ls)
hwp_ar_sum_co2_ls = hwp_padding_ls+hwp_ar_sum_ls
f.close()
#Create the two tables Afforestation/Reforestation and Deforestation
#-------------------------------------------------------------------
print("Creating first text file for Afforestation/Reforestation and Deforestation in", file_name)
f1 = open(kptable_appendix_11b_file,'w')
delim ='#'
table_name="Appendix_11b"
table_header="Net emissions and removals from the ativities under Articles 3.3\n"
table1title1="Table 1_App_11b Net emissions and removals from Afforestation and Reforestation, kt CO2eq.\n"
table2title2="Table 2_App_11b Net emissions and removals from Deforestation, ktCO2eq.\n"
table1columns1=delim+"Biomass"+delim+"DOM+SOM Mineral soils"+delim+"DOM+SOM Organic soils"+delim+"Biomass burning"+delim+"Mineralization"+delim
table1columns1=table1columns1+"Drained organic soils N2O"+delim+"Drained organic soils CH4"+delim+"HWP"+delim+"Total\n"
table2columns2=delim+"Biomass"+delim+"DOM+SOM Mineral soils"+delim+"DOM+SOM Organic soils+Deadwood"+delim+"Conversion to water CO2"+delim+"Mineralization"+delim
table2columns2=table2columns2+"Drained and rewetted organic soils CH4"+delim+"Drained and rewetted organic soils NO2"+delim
table2columns2=table2columns2+"HWP"+delim+"Conversion to water CH4"+delim+"Total"+"#\n"
#Row titles from 199 to inventory year
row_title_ls = GenerateRowTitleList(start,inventory_year)
f1.write(table_name)
f1.write(table_header)
#Afforestation and Reforestation
f1.write(table1title1)
f1.write(table1columns1)
for (year,agr_ar_co2,trees_ar_co2,dom_som_min,dom_som_org,bm_burning,ar_min_co2,n2oco2eq,ch4co2eq,hwp) in zip(row_title_ls,agr_ar_co2_ls,trees_ar_co2_ls,
dom_som_min_soil_co2_ls,
dom_som_org_soil_co2_ls,
biomass_burning_co2_sum_ls,
ar_mineralization_n2o_co2_ls,
drained_org_soils_sum_n2oco2eq_ls,
drained_org_soils_sum_ch4co2eq_ls,
hwp_ar_sum_co2_ls):
total=SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(agr_ar_co2,trees_ar_co2),
dom_som_min),dom_som_org),bm_burning),
ar_min_co2),n2oco2eq),ch4co2eq),hwp)
f1.write(str(year)+delim+str(SumTwoValues(agr_ar_co2,trees_ar_co2))+"#"+str(dom_som_min)+"#"+str(dom_som_org)+"#"+str(bm_burning)+"#")
f1.write(str(ar_min_co2)+"#"+str(n2oco2eq)+"#"+str(ch4co2eq)+"#"+str(hwp)+"#"+str(total)+"#\n")
f1.write("Data from:"+"#"+kp4a_agr_bm_gains_losses+" Lines:9-"+str(len(agr_ls))+"#"+kp4a1_clglsl_mineral_soil+"#"+kp4a1_ar_org_soil+"#"+kp4a11_wildfires+"#")
f1.write(kp4_ar_mineralization+" Lines:1,2"+delim+kp4a1_clglpewesl_organic_soils_nonco2+" Line:1"+"#"+kp4a1_clglpewesl_organic_soils_nonco2+" Line:2"+"#"+kp4_hwp_ard+"#\n")
f1.write("#"+kp4_living_biomass_gains_trees+" Lines:5-"+str(len(ar_trees_ls))+"#"+"#"+"#"+"CO2 Lines:1-2,CH4 Lines:3-4,N2O Lines:5-6"+"####"+"Gains Lines:3,7,11 etc."+"#\n")
f1.write("#"+kp4_ar_living_biomass_losses_trees+"#######"+"Losses Lines:4,8,12 etc."+"#\n")
f1.write('\n\n')
#Deforestation
f1.write(table2title2)
f1.write(table2columns2)
for (year,agr_d_co2,trees_d_co2,ar_under_d_co2,
d_sl_soil_co2,d_ar_under_d_min_soil_co2,d_mtt_min_soil_co2,d_clglpewesl_deadwood_min_co2,
dom_som_org_soil_deadwood_co2,
d_fl_to_waters_org_soil_co2,
d_mineralization_co2,
d_fl_to_wl_ch4_co2,d_fl_to_wl_n2o_co2,
d_hwp,
to_waters_ch4) in zip(row_title_ls,agr_d_co2_ls,trees_d_co2_ls,ar_under_d_co2_ls,
d_sl_soil_co2_ls,d_ar_under_d_min_soil_co2_ls,d_mtt_min_soil_co2_ls,d_clglpewesl_deadwood_min_co2_ls,
d_dom_som_org_soil_deadwood_sum_co2_ls,
d_fl_to_waters_org_soil_co2_ls,
d_mineralization_co2_ls,
d_fl_to_wl_ch4_co2_ls,d_fl_to_wl_n2o_co2_ls,
d_hwp_ls,
fl_to_waters_ch4_co2_ls):
biomass = SumTwoValues(agr_d_co2,trees_d_co2)
biomass = SumTwoValues(biomass,ar_under_d_co2)
dom_som_min_soil=SumTwoValues(d_sl_soil_co2,d_ar_under_d_min_soil_co2)
dom_som_min_soil=SumTwoValues(dom_som_min_soil,d_mtt_min_soil_co2)
dom_som_min_soil=SumTwoValues(dom_som_min_soil,d_clglpewesl_deadwood_min_co2)
total1 = SumTwoValues(SumTwoValues(SumTwoValues(SumTwoValues(agr_d_co2,trees_d_co2),ar_under_d_co2),d_sl_soil_co2),d_ar_under_d_min_soil_co2)
total2 = SumTwoValues(SumTwoValues(d_mtt_min_soil_co2,d_clglpewesl_deadwood_min_co2),dom_som_org_soil_deadwood_co2)
total3 = SumTwoValues(SumTwoValues(d_fl_to_waters_org_soil_co2,d_mineralization_co2),d_fl_to_wl_ch4_co2)
total4 = SumTwoValues(SumTwoValues(d_fl_to_wl_n2o_co2,d_hwp),to_waters_ch4)
total = total1+total2+total3+total4
f1.write(str(year)+delim+str(biomass)+delim+str(dom_som_min_soil)+delim+str(dom_som_org_soil_deadwood_co2)+delim+str(d_fl_to_waters_org_soil_co2)+delim+
str(d_mineralization_co2)+delim+str(d_fl_to_wl_ch4_co2)+delim+str(d_fl_to_wl_n2o_co2)+delim+str(d_hwp)+delim+str(to_waters_ch4)+"#"+str(total)+"#\n")
f1.write("Data from:"+"#"+kp4a_agr_bm_gains_losses+" Lines:1-8"+delim+kp4a2_sl_soil+delim+kp4a2_fl_to_wl_soil+delim+kp4a2_fl_to_waters_org_soil+" Lines:1,2"+"#")
f1.write(kp4a2_d_mineralization+delim+kp4a2_fl_to_wl_non_co2+" Line:2"+delim+kp4a2_fl_to_wl_non_co2+" Line:1"+delim+"No file"+delim)
f1.write(kp4a2_fl_to_waters_org_soil+" Line:3"+"#\n")
f1.write("#"+kp4a2_d_living_biomass_losses_trees+delim+kp4a2_ar_under_d_soil+delim+kp4a2_clglpewesl_deadwood+" Lines:1-8"+"#\n")
f1.write("#"+kpa2_ar_under_D_gains+delim+kp_defor_mineral+delim+kp_defor_organic+"#\n")
f1.write("#"+delim+kp4a2_clglpewesl_deadwood+" Lines:9-10"+"#\n")
now = datetime.datetime.now()
#print(str(now))
f1.write("Date produced: "+str(now)+"\n")
f1.close()
#Create excel
p = pathlib.Path(file_name)
stem = p.stem
p_excel = pathlib.Path(stem+'.xlsx')
print("Creating Excel file for Afforestation/Reforestation and Deforestation in", str(p_excel))
#Define max number of columns, dataframe can adjust to it
names=['col' + str(x) for x in range(12) ]
df = pd.read_csv(file_name,engine='python',header=None,delimiter='#',keep_default_na=False,names=names,dtype=str)
writer = pd.ExcelWriter(p_excel,engine='openpyxl')
df_float = df.applymap(ConvertFloat)
df_float.to_excel(writer,file_name,header=False)
writer.close()
| jariperttunen/lukeghg | lukeghg/lukeghg/nir/kptableappendix11b.py | kptableappendix11b.py | py | 24,795 | python | en | code | 0 | github-code | 36 |
42669964568 | import glob
import cv2
import numpy as np
from tqdm import tqdm
class Calibration(object):
def __init__(self, targetfilepath):
# termination criteria
self.criteria = (cv2.TERM_CRITERIA_EPS +
cv2.TERM_CRITERIA_MAX_ITER, 30, 1e-5)
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
self.worldPoints = np.zeros((9 * 6, 3), np.float32) # ๋ชจ๋ [0] ์ฒด์คํ์์ ์ฐพ์ ์ ์ 3D์ขํ ์
๋ฐํ
self.worldPoints[:, :2] = np.mgrid[0:9, 0:6].T.reshape(-1, 2)
# ์๋ ์ขํ๊ณ์ ๋ฒ ์ด์ค๋ฅผ ์ก์์ค, ์ผ์ชฝ ๋งจ ์๋ฅผ ์์ ์ผ๋ก ํจ(0,0,0)
# ์ฒด์คํ์ด๋ฏ๋ก Z-์ขํ๊ฐ์ 0์ผ๋ก ํจ
# Arrays to store object points and image points from all the images.
self.objectPoints = [] # 3d point in real world space, ์ผ๋ฆฌ๋ธ๋ ์ด์
๋ฃ์๋ ์
์ง์ ํ๋ ค๊ณ
self.imagePoints = [] # 2d points in image plane.
self.cameraMatrix = None
self.distortion = None
self.img_shape = None
self.rvecs = None
self.tvecs = None
# ๋คํ๋ฅผ ์ ๊ฑฐํ ์ํ๋ ๋ณธ์ง๋ง ๋ด์ ๊ฒ & ํ์ ๋ฒกํฐ๋ 3x3์ผ๋ก ์ฒ๋ฆฌํด๋๊ธฐ๋ ํจ
self.targetRvecs = []
self.targetTvecs = []
self.readfile(targetfilepath)
def readfile(self, targetfilepath):
targetimagefile = glob.glob(targetfilepath + '\\*.jpg')
#targetimagefile.sort()
print("start loading files")
for i in tqdm(range(len(targetimagefile))):
# print(targetimagefile[i])
imgmat = cv2.imread(targetimagefile[i])
# ๊ทธ๋ ์ด ์ค์ผ์ผ๋ก ๋ณ๊ฒฝ
imggray = cv2.cvtColor(imgmat, cv2.COLOR_BGR2GRAY)
# Find the chess board corners
# ์ฑ๊ณต ์ฌ๋ถ, ์ฝ๋ ํฌ์ธํธ ๋ฆฌ์คํธ
# ์ด๋ฏธ์ง, ๋ชจ์๋ฆฌ ์, ํ๋๊ทธ
ret, corners = cv2.findChessboardCorners(imggray, (9, 6), None)
if ret is True:
# If found, add object points, image points (after refining them)
cv2.cornerSubPix(imggray, corners, (11, 11), (-1, -1), self.criteria) # ์ฐพ์ ์ฝ๋์ ๋ํ ๋ณด์
self.imagePoints.append(corners)
self.objectPoints.append(self.worldPoints)
# ret = cv2.drawChessboardCorners(imgmat, (9, 6), corners, ret)
# cv2.imshow("test", imgmat)
# cv2.waitKey(1)
self.img_shape = cv2.cvtColor(cv2.imread(targetimagefile[0]), cv2.COLOR_BGR2GRAY).shape[::-1]
self.calibrate(len(targetimagefile))
def calibrate(self, target_lenght):
print("enter 1d calibration")
ret, self.cameraMatrix, self.distortion, self.rvecs, self.tvecs = cv2.calibrateCamera(
objectPoints=self.objectPoints,
imagePoints=self.imagePoints,
imageSize=self.img_shape,
cameraMatrix=self.cameraMatrix,
distCoeffs=self.distortion,
rvecs=self.rvecs,
tvecs=self.tvecs)
print(ret)
# ํ ์นด๋ฉ๋ผ์ ๋ํ ์ผ๋ฆฌ๋ธ๋ ์ด์
:
# ์ฑ๊ณต์ฌ๋ถ, camera matrix, distortion coefficients, rotation and translation vector"s"
# R|t๋ ํ ๋ทฐ์ ๋ํ ์๋ ์์ - 2D ๋ทฐ์ ์์์ค์ (cx,cy)๊ฐ ์ขํ๋ณํ๋ค
print("exit 1d calibration")
# for i in tqdm(range(target_lenght)):
# dst, _ = cv2.Rodrigues(self.rvecs[i])
# self.targetRvecs.append(dst)
# self.targetTvecs.append(self.tvecs[i])
# print("Rodrigues eqs is solved")
# for i, (r, t) in enumerate(zip(self.rvecs, self.tvecs)):
# dst, _ = cv2.Rodrigues(r) # ํ์ ๋ฒกํฐ์ ๊ฒฝ์ฐ ํํ๋ฐฉ์์ด ๋ฌ๋ผ์ ๋ณํ ํ์. ๋ก๋๋ฆฌ๊ฒ์ค ๋ณํ ์ฐธ์กฐ.
# print(i, "๋ฒ์งธ ํ์ ์ด๋ : \n", dst)
# print(i, "๋ฒ์งธ ํํ์ด๋ : \n", t)
| Team-AllyHyeseongKim/vision-utils-calibrator-depth-map-deblur-odometry- | custom_lib/cail/calibrator.py | calibrator.py | py | 3,920 | python | ko | code | 0 | github-code | 36 |
7654866561 | # -*- coding: utf-8 -*-
"""
Created on Fri Feb 17 10:15:08 2023
@author: anjan
"""
import numpy as np
def swap_rows(A,p,q):
"""
Parameters
----------
A: A numpy.ndarry matrix of any dimensions
p,q : integers
The indices of two rows of the matrix.
Returns
-------
numpy.ndarry matrix A with row p swapped with row q.
"""
A[[p,q],:]=A[[q,p],:]
return A
def partial_pivot(L,P,U,k,max_index):
"""
Parameters
----------
max_index: integer
The index of the maximum of the absolutes
in the elements below the pivot
L : square matrix
A Lower Triangular Matrix
in the intermediate stage of a Gaussian Elimination
P : square matrix
A permuation matrix
U : square matrix
The matrix in the intermediate stage of Gaussian Elimination
that would eventually return an Upper triangular matrix
k : integer
The row index of the pivot of the matrix U
Returns
-------
numpy.ndarry matrices L,P,U with rows pivoted
"""
U[:,k:]=swap_rows(U[:,k:],k,max_index)
P=swap_rows(P,k,max_index)
L[:,:k]=swap_rows(L[:,:k],k,max_index)
return L,P,U
def GE_step(L,U,k,j):
# This function performs the Gaussian
# Elimination step for row j and column k
L[j,k]=U[j,k]/U[k,k]
U[j, k: ]=U[j,k:]-L[j,k]*U [k,k:]
return L,U
def LU_factorize(A):
"""
Parameters
----------
A : TYPE
DESCRIPTION.
Returns
-------
L,U,P : 2D Numpy arrays which satisfy LU=PA and are obtained
using Gaussian Elimination with partial pivoting
"""
n=A.shape[0]
A=A.astype(float)
L=np.identity(n)
P=np.identity(n)
U=A.copy()
pivots=[]
for k in range(n):
max_index = k + np.argmax(np.abs(U[k:, k]))
if U[max_index,k]!=0:
pivots.append(U[max_index,k])
else:
return ("The matrix is singular and the rank is at least {}"
.format(len(pivots)))
if max_index != k:
L,P,U=partial_pivot(L,P,U,k,max_index)
for j in range(k+1,n):
L,U=GE_step(L,U,k,j)
return L,U,P
def substitution(L,U,P,b):
"""
Parameters
----------
L : 2D Numpy Array
A lower triangular Matrix.
U : 2D Numpy Array
.
P : 2D Numpy Array
A permutation Matrix
b : 1D Numpy Array
A vector which satisfies the relation LUx=Pb
Returns
-------
x_ : 1D Numpy Array
Returns the vector x_ for which LUx_=Pb is true
"""
n=len(b)
# Solving PLU=PB
b_=np.matmul(P,b)
#Solve Ly=Pb_ using forward substitution
# Get y=y_
y_=np.zeros(n)
for i in range(len(y_)):
if i==0:
y_[i]=b_[0]
else:
y_[i]=(b_[i]-np.matmul(L[i,:i],y_[:i]))/(L[i,i])
#Solve Ux=y_ using backward substitution
# Get x=x_
x_=np.zeros(n)
for j in range(n-1,-1,-1):
if j==n-1:
x_[j]=y_[j]/U[j,j]
else:
x_[j]=(y_[j]-np.matmul(U[j,j+1:],x_[j+1:]))/U[j,j]
return x_
def p_piv(p):
"""
Parameters
----------
p : 2D Numpy array
Permutation Matrix
Returns
-------
piv : 1D Numpy array
Pivot indices representing the permutation
matrix P: row i of matrix was
interchanged with row piv[i].
"""
n=p.shape[0]
piv=np.zeros(n)
for col in range(n):
i= np.argmax(np.abs(p[:,col]))
piv[col]=i
return piv
n=10
A=np.random.rand(n,n)
b=np.random.rand(n)
L,U,P=LU_factorize(A)
my_PA_LU_norms=np.linalg.norm(P@A-np.matmul(L,U))
print(my_PA_LU_norms) | anjanmondal/CMI-Coursework | Linear Algebra/LU Decomposition/helper_functions.py | helper_functions.py | py | 3,991 | python | en | code | 0 | github-code | 36 |
69826161703 | """
General Numerical Solver for the 1D Time-Dependent Schrodinger Equation.
Authors:
- Jake Vanderplas <vanderplas@astro.washington.edu>
- Andre Xuereb (imaginary time propagation, normalized wavefunction
For a theoretical description of the algorithm, please see
http://jakevdp.github.com/blog/2012/09/05/quantum-python/
License: BSD style
Please feel free to use and modify this, but keep the above information.
"""
import numpy as np
from scipy import fftpack
import matplotlib.pyplot as plt
from matplotlib import animation
import seaborn as sns
sns.set()
class Schrodinger(object):
"""
Class which implements a numerical solution of the time-dependent
Schrodinger equation for an arbitrary potential
"""
def __init__(self, x, psi_x0, V_x, k0=None, hbar=1, m=1, t0=0.0):
"""
Parameters
----------
x : array_like, float
Length-N array of evenly spaced spatial coordinates
psi_x0 : array_like, complex
Length-N array of the initial wave function at time t0
V_x : array_like, float
Length-N array giving the potential at each x
k0 : float
The minimum value of k. Note that, because of the workings of the
Fast Fourier Transform, the momentum wave-number will be defined
in the range
k0 < k < 2*pi / dx ,
where dx = x[1]-x[0]. If you expect nonzero momentum outside this
range, you must modify the inputs accordingly. If not specified,
k0 will be calculated such that the range is [-k0,k0]
hbar : float
Value of Planck's constant (default = 1)
m : float
Particle mass (default = 1)
t0 : float
Initial time (default = 0)
"""
# Validation of array inputs
self.x, psi_x0, self.V_x = map(np.asarray, (x, psi_x0, V_x))
N = self.x.size
assert self.x.shape == (N,)
assert psi_x0.shape == (N,)
assert self.V_x.shape == (N,)
# Validate and set internal parameters
assert hbar > 0
assert m > 0
self.hbar = hbar
self.m = m
self.t = t0
self.dt_ = None
self.N = len(x)
self.dx = self.x[1] - self.x[0]
self.dk = 2 * np.pi / (self.N * self.dx)
# Set momentum scale
if k0 == None:
self.k0 = -0.5 * self.N * self.dk
else:
assert k0 < 0
self.k0 = k0
self.k = self.k0 + self.dk * np.arange(self.N)
self.psi_x = psi_x0
self.compute_k_from_x()
# Variables which hold steps in evolution
self.x_evolve_half = None
self.x_evolve = None
self.k_evolve = None
def _set_psi_x(self, psi_x):
assert psi_x.shape == self.x.shape
self.psi_mod_x = (psi_x * np.exp(-1j * self.k[0] * self.x)
* self.dx / np.sqrt(2 * np.pi))
self.psi_mod_x /= self.norm
self.compute_k_from_x()
def _get_psi_x(self):
return (self.psi_mod_x * np.exp(1j * self.k[0] * self.x)
* np.sqrt(2 * np.pi) / self.dx)
def _set_psi_k(self, psi_k):
assert psi_k.shape == self.x.shape
self.psi_mod_k = psi_k * np.exp(1j * self.x[0] * self.dk
* np.arange(self.N))
self.compute_x_from_k()
self.compute_k_from_x()
def _get_psi_k(self):
return self.psi_mod_k * np.exp(-1j * self.x[0] * self.dk
* np.arange(self.N))
def _get_dt(self):
return self.dt_
def _set_dt(self, dt):
assert dt != 0
if dt != self.dt_:
self.dt_ = dt
self.x_evolve_half = np.exp(-0.5 * 1j * self.V_x
/ self.hbar * self.dt)
self.x_evolve = self.x_evolve_half * self.x_evolve_half
self.k_evolve = np.exp(-0.5 * 1j * self.hbar / self.m
* (self.k * self.k) * self.dt)
def _get_norm(self):
return self.wf_norm(self.psi_mod_x)
psi_x = property(_get_psi_x, _set_psi_x)
psi_k = property(_get_psi_k, _set_psi_k)
norm = property(_get_norm)
dt = property(_get_dt, _set_dt)
def compute_k_from_x(self):
self.psi_mod_k = fftpack.fft(self.psi_mod_x)
def compute_x_from_k(self):
self.psi_mod_x = fftpack.ifft(self.psi_mod_k)
def wf_norm(self, wave_fn):
"""
Returns the norm of a wave function.
Parameters
----------
wave_fn : array
Length-N array of the wavefunction in the position representation
"""
assert wave_fn.shape == self.x.shape
return np.sqrt((abs(wave_fn) ** 2).sum() * 2 * np.pi / self.dx)
def solve(self, dt, Nsteps=1, eps=1e-3, max_iter=1000):
"""
Propagate the Schrodinger equation forward in imaginary
time to find the ground state.
Parameters
----------
dt : float
The small time interval over which to integrate
Nsteps : float, optional
The number of intervals to compute (default = 1)
eps : float
The criterion for convergence applied to the norm (default = 1e-3)
max_iter : float
Maximum number of iterations (default = 1000)
"""
eps = abs(eps)
assert eps > 0
t0 = self.t
old_psi = self.psi_x
d_psi = 2 * eps
num_iter = 0
while (d_psi > eps) and (num_iter <= max_iter):
num_iter += 1
self.time_step(-1j * dt, Nsteps)
d_psi = self.wf_norm(self.psi_x - old_psi)
old_psi = 1. * self.psi_x
self.t = t0
def time_step(self, dt, Nsteps=1):
"""
Perform a series of time-steps via the time-dependent Schrodinger
Equation.
Parameters
----------
dt : float
The small time interval over which to integrate
Nsteps : float, optional
The number of intervals to compute. The total change in time at
the end of this method will be dt * Nsteps (default = 1)
"""
assert Nsteps >= 0
self.dt = dt
if Nsteps > 0:
self.psi_mod_x *= self.x_evolve_half
for num_iter in range(Nsteps - 1):
self.compute_k_from_x()
self.psi_mod_k *= self.k_evolve
self.compute_x_from_k()
self.psi_mod_x *= self.x_evolve
self.compute_k_from_x()
self.psi_mod_k *= self.k_evolve
self.compute_x_from_k()
self.psi_mod_x *= self.x_evolve_half
self.compute_k_from_x()
self.psi_mod_x /= self.norm
self.compute_k_from_x()
self.t += dt * Nsteps
######################################################################
# Helper functions for gaussian wave-packets
def gauss_x(x, a, x0, k0):
"""
a gaussian wave packet of width a, centered at x0, with momentum k0
"""
return ((a * np.sqrt(np.pi)) ** (-0.5)
* np.exp(-0.5 * ((x - x0) * 1. / a) ** 2 + 1j * x * k0))
def gauss_k(k, a, x0, k0):
"""
analytical fourier transform of gauss_x(x), above
"""
return ((a / np.sqrt(np.pi)) ** 0.5
* np.exp(-0.5 * (a * (k - k0)) ** 2 - 1j * (k - k0) * x0))
######################################################################
# Utility functions for running the animation
def theta(x):
"""
theta function :
returns 0 if x<=0, and 1 if x>0
"""
x = np.asarray(x)
y = np.zeros(x.shape)
y[x > 0] = 1.0
return y
def square_barrier(x, width, height):
return height * (theta(x) - theta(x - width))
######################################################################
# Create the animation
# specify time steps and duration
dt = 0.01
N_steps = 50
t_max = 120
frames = int(t_max / float(N_steps * dt))
# specify constants
hbar = 1.0 # planck's constant
m = 1.9 # particle mass
# specify range in x coordinate
N = 2 ** 11
dx = 0.1
x = dx * (np.arange(N) - 0.5 * N)
# specify potential
V0 = 1.5
L = hbar / np.sqrt(2 * m * V0)
a = 3 * L
x0 = -60 * L
V_x = square_barrier(x, a, V0)
V_x[x < -98] = 1E6
V_x[x > 98] = 1E6
# specify initial momentum and quantities derived from it
p0 = np.sqrt(2 * m * 0.2 * V0)
dp2 = p0 * p0 * 1. / 80
d = hbar / np.sqrt(2 * dp2)
k0 = p0 / hbar
v0 = p0 / m
psi_x0 = gauss_x(x, d, x0, k0)
# define the Schrodinger object which performs the calculations
S = Schrodinger(x=x,
psi_x0=psi_x0,
V_x=V_x,
hbar=hbar,
m=m,
k0=-28)
######################################################################
# Set up plot
fig = plt.figure('Quantum Tunneling')
# plotting limits
xlim = (-100, 100)
klim = (-5, 5)
# top axes show the x-space data
ymin = 0
ymax = V0
ax1 = fig.add_subplot(211, xlim=xlim,
ylim=(ymin - 0.2 * (ymax - ymin),
ymax + 0.2 * (ymax - ymin)))
psi_x_line, = ax1.plot([], [], c='r', label=r'$|\psi(x)|$')
V_x_line, = ax1.plot([], [], c='k', label=r'$V(x)$')
center_line = ax1.axvline(0, c='k', ls=':', label=r"$x_0 + v_0t$")
title = ax1.set_title("")
ax1.legend(prop=dict(size=12))
ax1.set_xlabel('$x$')
ax1.set_ylabel(r'$|\psi(x)|$')
# bottom axes show the k-space data
ymin = abs(S.psi_k).min()
ymax = abs(S.psi_k).max()
ax2 = fig.add_subplot(212, xlim=klim,
ylim=(ymin - 0.2 * (ymax - ymin),
ymax + 0.2 * (ymax - ymin)))
psi_k_line, = ax2.plot([], [], c='r', label=r'$|\psi(k)|$')
p0_line1 = ax2.axvline(-p0 / hbar, c='k', ls=':', label=r'$\pm p_0$')
p0_line2 = ax2.axvline(p0 / hbar, c='k', ls=':')
mV_line = ax2.axvline(np.sqrt(2 * V0) / hbar, c='k', ls='--',
label=r'$\sqrt{2mV_0}$')
ax2.legend(prop=dict(size=12))
ax2.set_xlabel('$k$')
ax2.set_ylabel(r'$|\psi(k)|$')
V_x_line.set_data(S.x, S.V_x)
######################################################################
# Functions to Animate the plot
def init():
psi_x_line.set_data([], [])
V_x_line.set_data([], [])
center_line.set_data([], [])
psi_k_line.set_data([], [])
title.set_text("")
return (psi_x_line, V_x_line, center_line, psi_k_line, title)
def animate(i):
S.time_step(dt, N_steps)
psi_x_line.set_data(S.x, 4 * abs(S.psi_x))
V_x_line.set_data(S.x, S.V_x)
center_line.set_data(2 * [x0 + S.t * p0 / m], [0, 1])
psi_k_line.set_data(S.k, abs(S.psi_k))
return (psi_x_line, V_x_line, center_line, psi_k_line, title)
# call the animator.
# blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=frames, interval=30, blit=True)
# uncomment the following line to save the video in mp4 format. This
# requires either mencoder or ffmpeg to be installed on your system
# anim.save('schrodinger_barrier.mp4', fps=15,
# extra_args=['-vcodec', 'libx264'])
plt.tight_layout()
plt.show() | akapet00/schrodinger | src/scripts/quantum_tunneling.py | quantum_tunneling.py | py | 11,191 | python | en | code | 4 | github-code | 36 |
26628181853 | import pickle
from flask import Flask,request,app,jsonify,url_for,render_template
import nltk, re, string
from nltk.corpus import stopwords, twitter_samples
from sklearn.linear_model import LogisticRegression
import pickle
from sklearn.feature_extraction.text import CountVectorizer
from Utilities import process_tweet
from nltk.corpus import stopwords
app=Flask(__name__)
app.config['SECRET_KEY'] = 'mysecretkey'
logistic=pickle.load(open('model.pkl','rb'))
cv=pickle.load(open('cv.pkl','rb'))
def predict_sentiment(tweet):
tweet = process_tweet(tweet)
tweet = cv.transform([tweet])
if logistic.predict(tweet) == 1:
sentiment = 'Positive Sentiment'
elif logistic.predict(tweet) == 0:
sentiment = 'Negetive Sentiment'
else:
sentiment = 'Neutral Sentiment'
return sentiment
@app.route('/',methods=['GET','POST'])
def index():
return render_template('index.html')
@app.route('/prediction',methods=['GET','POST'])
def prediction():
sentiment=predict_sentiment(request.form['tweet'])
print(sentiment)
return render_template('prediction.html', prediction_text="Your tweet is of {}".format(sentiment))
if __name__=="__main__":
app.run(debug=True)
| Sourav9827/Sentiment-Analysis | app.py | app.py | py | 1,224 | python | en | code | 1 | github-code | 36 |
25314716557 | import sys
input = lambda : sys.stdin.readline().strip()
N = int(input())
# # 480 -> 408 : ๋นํธ ์ฐ์ฐ์ผ๋ก 2์ ๋ฐฐ์ ์ ๊ฑฐ
# a = [i&1 for i in range(N+1)]
# a[1] = 0
# # 640 -> 480 : ์์ -> ์์ ์ ๊ณฑ๊ทผ
# for i in range(3, int(N**0.5)+1, 2):
# if a[i]:
# # 408 -> 392 : i์ 2 ๋ฐฐ์๋ถํฐ ํ์ -> i์ ์ ๊ณฑ๋ถํฐ ํ์
# for j in range(i*i, N+1, i):
# a[j] = 0
# sosu_list = [2]
# for i in range(N+1):
# if a[i] == True:
# sosu_list.append(i)
# answer, temp, e = 0, 0, 0
# for i in range(len(sosu_list)):
# while temp<N and e<len(sosu_list):
# # 1600 -> 772 : sum -> ์์ ๋์ ํฉ
# temp += sosu_list[e]
# e += 1
# if temp==N:
# answer += 1
# # 772 -> 640 : temp ๋ค์ ๊ณ์ฐ -> ํฌํฌ์ธํฐ
# temp -= sosu_list[i]
# 408 -> 208 : ๋นํธ๋ง์คํน
a = [255 for _ in range(N//8 +1)]
def is_prime(n):
return True if a[n>>3] & (1<<(n&7)) else False
def set_composite(n):
a[n>>3] &= ~(1 << (n&7))
set_composite(0)
set_composite(1)
for i in range(2, int(N**(1/2))+1):
if is_prime(i):
for j in range(i*i, N+1, i):
set_composite(j)
sosu_list = list()
for i,sosu in enumerate(a):
for j in range(8):
if sosu & (1 << j) and i*8+j <= N:
sosu_list.append(i*8+j)
answer, temp, e = 0, 0, 0
for i in range(len(sosu_list)):
while temp<N and e<len(sosu_list):
temp += sosu_list[e]
e += 1
if temp==N:
answer += 1
temp -= sosu_list[i]
print(answer) | soohi0/Algorithm_study | 5์_4์ฃผ/BOJ_์์์์ฐ์ํฉ/BOJ_์์์์ฐ์ํฉ_์ผ์ฑํ.py | BOJ_์์์์ฐ์ํฉ_์ผ์ฑํ.py | py | 1,551 | python | ko | code | 0 | github-code | 36 |
19739912139 | from __future__ import unicode_literals
import urllib
from vigilo.vigiconf.lib.confclasses.validators import arg, String, List
from vigilo.vigiconf.lib.confclasses.test import Test
from vigilo.common.gettext import l_
class NagiosPlugin(Test):
"""Test gรฉnรฉrique pour utiliser un plugin Nagios externe"""
@arg(
'name', String,
l_('Display name'),
l_("""
Name to display in the GUI.
This setting also controls the name of the service
created in Nagios (service_description).
""")
)
@arg(
'command', String,
l_('Command'),
l_("Command to execute to call the plugin (check_command)")
)
@arg(
'metrics', List(types=String),
l_('Metrics'),
l_("List of metrics returned by the plugin that should be graphed")
)
@arg(
'unit', String,
l_('Unit'),
l_("Unit used by the plugin's metrics")
)
def add_test(self, name, command, metrics=(), unit=''):
# Service
self.add_external_sup_service(name, command)
# Mรฉtrologie
if not metrics:
return
rrd_metrics = []
for metric in metrics:
# On construit un nom unique pour le fichier RRD
# qui stockera les donnรฉes de performance,
# sur le modรจle "NagiosPlugin-<service URL-encodรฉ>@<mรฉtrique>"
# et le nom du service pour rendre le nom du fichier unique.
rrd = "NagiosPlugin-%s@%s" % (urllib.quote_plus(name), metric)
self.add_perfdata_handler(name, rrd, metric, metric,
rra_template='discrete')
rrd_metrics.append(rrd)
# Un graphe est automatiquement crรฉรฉ avec toutes les mรฉtriques.
self.add_graph("Plugin-%s" % name, rrd_metrics, 'lines', unit)
# vim:set expandtab tabstop=4 shiftwidth=4:
| vigilo/vigiconf | src/vigilo/vigiconf/tests/all/NagiosPlugin.py | NagiosPlugin.py | py | 1,918 | python | fr | code | 3 | github-code | 36 |
1653425451 | import numpy
import matplotlib.pyplot as plt
import pylab
import dcf
import utility as util
import logistic_regression as lr
import svm
from tqdm import tqdm
from copy import deepcopy
from preprocessing import preprocess_Z_score
import matplotlib
# ======================================== FEATURES plots ==========================================
def plot_features_distr(D, labels, features, gau=False):
n_features = len(features)
_gau = "gau-" if gau else ""
males = D[:, labels == 0]
females = D[:, labels == 1]
bins = 30
for feature in range(n_features):
plt.Figure()
plt.xlabel(features[feature])
dataset_m = males[feature, :]
dataset_f = females[feature, :]
plt.hist(dataset_m, bins=bins, density=True, label='male', alpha=0.4)
plt.hist(dataset_f, bins=bins, density=True, label='female', alpha=0.4)
plt.legend()
plt.savefig(f"./plots/features/{_gau}/{features[feature]}.png", format="png")
plt.show()
def plot_relation_beetween_feautures(D, labels, features):
n_features = len(features)
males = D[:, labels == 0]
females = D[:, labels == 1]
for featureA in range(n_features):
for featureB in range(featureA, n_features):
if featureA == featureB:
continue
plt.figure()
plt.xlabel(labels[featureA])
plt.ylabel(labels[featureB])
plt.scatter(males[featureA, :], males[featureB, :], label='Male', alpha=0.4)
plt.scatter(females[featureA, :], males[featureB, :], label='Female', alpha=0.4)
plt.legend()
plt.show()
# ============================================ CORRELATION between features plots ======================================================
def pearson_coeff(x, y):
"""
Given two arrays evaluate the Pearson coefficient
Parameters
---------
x: numpy.array
first array
y: numpy.array
second array
"""
cov = numpy.cov(x, y)[0][1]
x_var = numpy.var(x)
y_var = numpy.var(y)
return numpy.abs(cov / (numpy.sqrt(x_var) * numpy.sqrt(y_var)))
def plot_heatmap(D, features, color):
"""
Plot the heatmap of a given dataset. This heat map will show the pearson coefficient between all the feauters.
Parameters
---------
D: dataset
color: an optional value with the color of the heatmap
"""
n_features = len(features)
coeffs = numpy.zeros((n_features, n_features))
# evaluate the person coefficient for each feature
for i in range(n_features):
for j in range(n_features):
coeffs[i][j] = pearson_coeff(D[i, :], D[j, :])
# plot the heat map
fig, ax = plt.subplots()
im = ax.imshow(coeffs, interpolation='nearest', cmap=color)
plt.setp(ax.get_xticklabels(), rotation=45, ha="right", rotation_mode="anchor")
# Loop over data dimensions and create text annotations.
for i in range(len(coeffs)):
for j in range(len(coeffs)):
text = ax.text(j, i, numpy.around(coeffs[i, j],2),
ha="center", va="center", color="w")
ax.set_title("Heat map")
fig.tight_layout()
plt.show()
# ================================================= MIN DCFs Plots ============================================================================
def compare_min_DCF_logreg(DTR, DTE, LTR, LTE, applications, quadratic=False, preprocessing=False, weighted=False):
lambdas = [1e-6, 2e-6, 5e-6, 1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 8e-3, 1e-2, 2e-2, 5e-2, 1e-1, 0.3, 0.5, 1, 5, 10, 50, 100]
app_labels = ['minDCF(pi=0.5)', 'minDCF(pi=0.1)', 'minDCF(pi=0.9)']
quadratic_ = 'quadratic' if quadratic else 'linear'
colors = ['b', 'r', 'g']
params = {
'weighted' : weighted
}
max_y = 0
DCFs_dict = dict()
file_prefix = lr.compute_filename_prefix(quadratic, preprocessing, weighted)
train_minDCFs, train_lambdas = lr.load_results(file_prefix)
PATH = f"./plots/LogReg/experimental/{file_prefix}-minDCF.png"
for i, application in enumerate(applications):
pi, Cfn, Cfp = application
params['priors'] = [pi, 1-pi]
DCFs = lr.compute_minDCF_for_lambda(DTR, DTE, LTR, LTE, application, lambdas, quadratic, params)
DCFs_dict[application] = DCFs
max_y = max(max_y, numpy.amax(numpy.hstack((train_minDCFs[application], DCFs))))
plt.plot(train_lambdas, train_minDCFs[application], color=colors[i], label=f"{app_labels[i]} [Val]", linestyle='dashed')
plt.plot(lambdas, DCFs, color=colors[i], label=f"{app_labels[i]} [Eval]")
plt.ylim(0, max_y + 0.05)
plt.xscale('log')
plt.title(f"DCF {quadratic_} logistic regression")
plt.xlabel('lambda')
plt.ylabel('DCF')
plt.legend()
plt.savefig(PATH, format='png')
plt.show()
return lambdas, DCFs_dict
def plot_min_DCF_logreg(folds, folds_labels, k, applications, quadratic=False, preprocessing=False, weighted=False):
lambdas = [1e-6, 2e-6, 5e-6, 1e-5, 2e-5, 5e-5, 1e-4, 2e-4, 5e-4, 1e-3, 2e-3, 5e-3, 8e-3, 1e-2, 2e-2, 5e-2, 1e-1, 0.3, 0.5, 1, 5, 10, 50, 100]
#lambdas = [1e-6, 1e-5, 1e-4, 1e-3, 1e-2, 5e-2, 1e-1, 0.3, 0.5, 1, 5, 10]
app_labels = ['minDCF(pi=0.5)', 'minDCF(pi=0.1)', 'minDCF(pi=0.9)']
colors = ['b', 'r', 'g']
max_y = 0
quadratic_ = "quadratic" if quadratic else "linear"
file_prefix = lr.compute_filename_prefix(quadratic, preprocessing, weighted)
PATH = f"./plots/LogReg/{file_prefix}-minDCF.png"
DCFs_dict = {}
max_y = 0
for i, application in enumerate(applications):
DCFs = []
pi, Cfn, Cfp = application
classPriors = [pi, 1-pi]
for l in tqdm(lambdas):
if not quadratic:
STE = util.k_folds(folds, folds_labels, k, lr.logreg, priors=classPriors, lambda_=l, preprocessing=preprocessing, weighted=weighted)
else:
STE = util.k_folds(folds, folds_labels, k, lr.quadratic_logreg, priors=classPriors, lambda_=l, preprocessing=preprocessing, weighted=weighted)
scores = numpy.hstack(STE)
DCF = dcf.compute_min_DCF(scores, numpy.hstack(folds_labels), pi, Cfn, Cfp)
max_y = max(max_y, DCF)
DCFs.append(DCF)
DCFs_dict[application] = DCFs
plt.plot(lambdas, DCFs, color=colors[i], label=app_labels[i])
plt.ylim(0, max_y+0.1)
plt.xscale('log')
plt.title(f"DCF {quadratic_} logistic regression")
plt.xlabel('lambda')
plt.ylabel('DCF')
plt.legend()
plt.savefig(PATH, format='png')
plt.show()
return lambdas, DCFs_dict
# ================================================= MIN DCFs SVM Plots ============================================================================
def compare_min_DCF_svm(DTR, DTE, LTR, LTE, kernel:str, evaluation_points: tuple, balanced: bool, preprocessing: bool):
#plot features
#Cs = [0.005, 0.02,0.05, 0.10, 0.20, 0.30, 0.5, 0.8, 1, 5, 10, 20, 50]
Cs = [0.005, 0.05, 0.1, 0.5, 1, 5]
colors = ['b', 'r', 'g']
app_labels = ['minDCF(pi=0.5)', 'minDCF(pi=0.1)', 'minDCF(pi=0.9)'] if kernel != 'rbf' else ['log(\u03BB)=-1', 'log(\u03BB)=-2', 'log(\u03BB)=-3']
balanced_ = "balanced" if balanced else "not balanced"
file_prefix = svm.compute_filename_prefix(balanced, preprocessing)
train_minDCFs, train_Cs = svm.load_results(file_prefix, kernel)
PATH = f"./plots/SVM/experimental/{kernel}-{file_prefix}-minDCF.png"
max_y = 0
minDCFs_dict = dict()
for i, ep in enumerate(evaluation_points):
DCFs = []
if kernel == 'linear':
pi, Cfn, Cfp = ep
params = util.build_params(priors=[pi, 1-pi], balanced=balanced, kernel=kernel)
elif kernel == 'poly':
pi, Cfn, Cfp = ep
params = util.build_params(priors=[pi, 1-pi], balanced=balanced, kernel=kernel, d=2, c=1,)
elif kernel == 'rbf':
params = util.build_params(priors=[0.5, 0.5], balanced=balanced, kernel=kernel, gamma=ep)
minDCFs = svm.compute_minDCF_for_parameter(DTR, DTE, LTR, LTE, ep, Cs, params)
minDCFs_dict[ep] = minDCFs
max_y = max(max_y, numpy.amax(numpy.hstack((train_minDCFs[ep], minDCFs))))
minDCFs = numpy.array(minDCFs).ravel()
plt.plot(Cs, minDCFs, color=colors[i], label=f"{app_labels[i]} [Eval]")
train_minDCF = numpy.array(train_minDCFs[ep]).ravel()
plt.plot(train_Cs, train_minDCF, color=colors[i], label=f"{app_labels[i]} [Val]", linestyle='dashed' )
plt.ylim(0, max_y+0.05)
plt.title(f"minDCF for {kernel} SVM ({balanced_})")
plt.xscale('log')
plt.xlabel('C')
plt.ylabel('DCF')
plt.legend()
plt.savefig(PATH, format="png")
plt.show()
return Cs, minDCFs_dict
def plot_min_DCF_svm(folds, folds_labels, k, applications, balanced=False, preprocessing=None):
balanced_ = "balanced" if balanced else "not balanced"
preprocessing_ = preprocessing if preprocessing else "raw"
PATH = f"./plots/SVM/{preprocessing_}-linear-{balanced_}-minDCF.png"
Cs = [0.005, 0.02,0.05, 0.10, 0.20, 0.30, 0.5, 0.8, 1, 5, 10, 20, 50]
colors = ['b', 'r', 'g']
app_labels = ['minDCF(pi=0.5)', 'minDCF(pi=0.1)', 'minDCF(pi=0.9)']
minDCFs_dict = {}
max_y = 0
for i, application in enumerate(applications):
DCFs = []
pi, Cfn, Cfp = application
classPriors = [pi, 1-pi]
for C in tqdm(Cs):
scores = util.k_folds(folds, folds_labels, k, svm.train_SVM_linear,SVM=True, C = C, balanced=balanced, preprocessing=preprocessing)
scores = numpy.hstack(scores)
minDCF = dcf.compute_min_DCF(scores, numpy.hstack(folds_labels), pi, Cfn, Cfp)
DCFs.append(minDCF)
DCFs = numpy.array(DCFs)
minDCFs_dict[application] = DCFs.ravel()
plt.plot(Cs, DCFs.ravel(), color=colors[i], label=app_labels[i])
plt.ylim(0, 1)
plt.title(f"minDCF for linear SVM ({balanced_})")
plt.xscale('log')
plt.xlabel('C')
plt.ylabel('DCF')
plt.legend()
plt.savefig(PATH, format="png")
plt.show()
return Cs, minDCFs_dict
def plot_min_DCF_poly_svm(folds, folds_labels, k, applications, degree=2.0, balanced=False, preprocessing=None):
balanced_ = "balanced" if balanced else "not balanced"
preprocessing_ = "z-norm" if preprocessing else "raw"
PATH = f"./plots/SVM/{preprocessing_}-poly{int(degree)}-{balanced_}-minDCF.png"
Cs = [0.005, 0.05, 0.1, 0.5, 1, 5]
colors = ['b', 'r', 'g']
app_labels = ['minDCF(pi=0.5)', 'minDCF(pi=0.1)', 'minDCF(pi=0.9)']
minDCFs_dict = {}
for i, application in enumerate(applications):
DCFs = []
pi, Cfn, Cfp = application
classPriors = [pi, 1-pi]
for C in tqdm(Cs):
scores = util.k_folds(folds, folds_labels, k, svm.train_non_linear_SVM, SVM=True, kernel='poly', C=C, d=degree, c=1, balanced=balanced, preprocessing=preprocessing)
scores = numpy.hstack(scores)
minDCF = dcf.compute_min_DCF(scores, numpy.hstack(folds_labels), pi, Cfn, Cfp)
DCFs.append(minDCF)
DCFs = numpy.array(DCFs)
minDCFs_dict[application] = DCFs.ravel()
plt.ylim(0, 1)
plt.title(f"DCF for Poly(d={int(degree)}) SVM ({balanced_})")
plt.xscale('log')
plt.xlabel('C')
plt.ylabel('DCF')
plt.plot(Cs, DCFs.ravel(), color=colors[i], label=app_labels[i])
plt.legend()
plt.savefig(PATH, format="png")
plt.show()
return Cs, minDCFs_dict
def plot_min_DCF_RBFsvm(folds, folds_labels, k, gammas, balanced=False, preprocessing=False):
balanced_ = "balanced" if balanced else "not-balanced"
preprocessing_ = "z-norm" if preprocessing else "raw"
PATH = f"./plots/SVM/{preprocessing_}-RBF-{balanced_}-minDCF.png"
Cs = [0.005, 0.01,0.02,0.05, 0.08, 0.10, 0.20, 0.30, 0.5, 0.8, 1, 3, 5, 10, 20, 50]
colors = ['b', 'r', 'g']
app_labels = ['log(\u03B3)=-1', 'log(\u03B3)=-2', 'log(\u03B3)=-3']
minDCFs_dict = {}
for i,gamma in enumerate(gammas):
DCFs = []
pi, Cfn, Cfp = (0.5, 1, 1)
classPriors = [pi, 1-pi]
for C in tqdm(Cs):
scores = util.k_folds(folds, folds_labels, k, svm.train_non_linear_SVM, SVM=True, kernel='rbf', gamma=gamma, C=C, balanced=balanced, preprocessing=preprocessing)
scores = numpy.hstack(scores)
minDCF = dcf.compute_min_DCF(scores, numpy.hstack(folds_labels), pi, Cfn, Cfp)
DCFs.append(minDCF)
DCFs = numpy.array(DCFs)
minDCFs_dict[gamma] = DCFs.ravel()
plt.ylim(0, 1)
plt.title("DCF for RBF kernel SVM")
plt.xscale('log')
plt.xlabel('C')
plt.ylabel('DCF')
plt.plot(Cs, DCFs.ravel(), color=colors[i], label=app_labels[i])
plt.legend()
plt.savefig(PATH, format="png")
plt.show()
return Cs, minDCFs_dict
# ================================================= MIN DCFs GMM Plots ============================================================================
def plot_minDCF_GMM_hist(DCFs_list: list, G: int, labels: list, filename='plot', experimental= False, title="", colors=['lightsalmon', 'orangered', 'gold', 'orange']):
x_labels = list(map(lambda val:2**val, range(G)))
x = numpy.arange(len(x_labels))
width = 0.18
_experimental = "experimental/" if experimental else ""
path = f"./plots/GMM/{_experimental}{filename}.png"
n_hists = len(DCFs_list)
offsets = list( range(-int(n_hists/2) - 1, int(n_hists/2) + 2, 2))
print("n_hist:", n_hists, "offsets", offsets)
fig, ax = plt.subplots()
for DCFs, offset, label, color in zip(DCFs_list, offsets, labels, colors):
ax.bar(x + offset*width/2, DCFs, width, label=label, color=color)
ax.set_ylabel('DCF')
ax.set_xticks(x, x_labels)
ax.legend()
ax.set_title(title)
fig.tight_layout()
plt.savefig(path, format='png')
plt.show()
# ================================================================ DET Plot ===================================================================
def plot_DET(llrs:list, L: numpy.array, plot_labels:list, colors: list =['r', 'b', 'm', 'g', 'y'], save_figure:bool = True, training:bool = True, multiple_labels: bool = False):
training_ = "training" if training else "experimental"
models = "-".join(plot_labels)
PATH = f"./plots/evaluation/{training_}/DET_{models}.png"
fig,ax = plt.subplots()
if not multiple_labels:
for llr, plot_label, color in zip(llrs, plot_labels, colors):
print(plot_label)
DET_points_FNR, DET_points_FPR = compute_DET_points(llr, L)
ax.plot(DET_points_FNR, DET_points_FPR, color=color, label=plot_label)
else:
for llr, lbl, plot_label, color in zip(llrs, L, plot_labels, colors):
DET_points_FNR, DET_points_FPR = compute_DET_points(llr, lbl)
ax.plot(DET_points_FNR, DET_points_FPR, color=color, label=plot_label)
ax.set_xlabel("FPR")
ax.set_ylabel("FNR")
ax.set_xscale('log')
ax.set_yscale('log')
ax.legend()
if save_figure:
plt.savefig(PATH, format='png')
plt.show()
def compute_DET_points(llr, L):
tresholds = numpy.concatenate([numpy.array([-numpy.inf]),numpy.sort(llr),numpy.array([numpy.inf])])
N_label0 = (L == 0).sum()
N_label1 = (L == 1).sum()
DET_points_FNR = numpy.zeros(L.shape[0] +2 )
DET_points_FPR = numpy.zeros(L.shape[0] +2 )
for (idx,t) in enumerate(tresholds):
pred = 1 * (llr > t)
FNR = 1 - (numpy.bitwise_and(pred == 1, L == 1 ).sum() / N_label1)
FPR = numpy.bitwise_and(pred == 1, L == 0).sum() / N_label0
DET_points_FNR[idx] = FNR
DET_points_FPR[idx] = FPR
return DET_points_FNR, DET_points_FPR
# =============================================== ROC Plots ==================================================
def plot_ROC(llrs: list, labels: list, plot_labels: list, save_figure:bool = True, training:bool = True):
training_ = "training" if training else "experimental"
models = "-".join(plot_labels)
PATH = f"./plots/evaluation/{training_}/ROC_{models}.png"
for llr, plot_label in zip(llrs, plot_labels):
ROC_points_TPR, ROC_points_FPR = compute_ROC_points(llr, labels)
plt.plot(ROC_points_FPR, ROC_points_TPR, label=plot_label)
plt.xlabel("FPR")
plt.ylabel("TPR")
plt.legend()
plt.grid()
if save_figure:
plt.savefig(PATH, format='png')
plt.show()
def compute_ROC_points(llr, L):
tresholds = numpy.concatenate([numpy.array([-numpy.inf]),numpy.sort(llr),numpy.array([numpy.inf])])
N_label0 = (L == 0).sum()
N_label1 = (L == 1).sum()
ROC_points_TPR = numpy.zeros(L.shape[0] +2 )
ROC_points_FPR = numpy.zeros(L.shape[0] +2 )
for (idx,t) in enumerate(tresholds):
pred = 1 * (llr > t)
TPR = numpy.bitwise_and(pred == 1, L == 1 ).sum() / N_label1
FPR = numpy.bitwise_and(pred == 1, L == 0).sum() / N_label0
ROC_points_TPR[idx] = TPR
ROC_points_FPR[idx] = FPR
return ROC_points_TPR, ROC_points_FPR
# =========================================================== Bayes Error Plot =============================================================
def bayes_error_plot(llrs: list, labels: list, plot_labels: list, log_regs: list, n_points:int = 100, colors: list = ['r', 'b', 'g', 'm', 'y'], save_figure: bool = True, training:bool = True, calibrated: bool = False, multiple_labels:bool = False):
training_ = "training" if training else "experimental"
models = "-".join(plot_labels)
calibrated_ = "-calibrated" if calibrated else ""
PATH = f"./plots/evaluation/{training_}/BEP_{models}{calibrated_}.png"
max_y = 0
if not multiple_labels:
for llr, plot_label, log_reg, color in zip(llrs, plot_labels, log_regs, colors):
p_array = numpy.linspace(-3, 3, n_points)
minDCFs = dcf.bayes_error_points(p_array, llr, labels, True, log_reg)
max_y = max(max_y, numpy.max(minDCFs))
actDCFs = dcf.bayes_error_points(p_array, llr, labels, False, log_reg)
max_y = max(max_y, numpy.max(actDCFs))
plt.plot(p_array, minDCFs, label=f"{plot_label} minDCF", color=color, linestyle='dashed')
plt.plot(p_array, actDCFs, label=f"{plot_label} actDCF", color=color)
else:
for llr, lbl, plot_label, log_reg, color in zip(llrs, labels, plot_labels, log_regs, colors):
p_array = numpy.linspace(-3, 3, n_points)
minDCFs = dcf.bayes_error_points(p_array, llr, lbl, True, log_reg)
max_y = max(max_y, numpy.max(minDCFs))
actDCFs = dcf.bayes_error_points(p_array, llr, lbl, False, log_reg)
max_y = max(max_y, numpy.max(actDCFs))
plt.plot(p_array, minDCFs, label=f"{plot_label} minDCF", color=color, linestyle='dashed')
plt.plot(p_array, actDCFs, label=f"{plot_label} actDCF", color=color)
title = "Bayes Error Plot"
plt.yticks(numpy.arange(0, min(max_y+0.1, 1), 0.05))
plt.title(title)
plt.legend()
if save_figure:
plt.savefig(PATH, format='png')
plt.show()
| srrmtt/GenderVoiceDetection | plot.py | plot.py | py | 19,657 | python | en | code | 0 | github-code | 36 |
13252818491 | import math
class magicChecker:
def __init__(self, square, order):
self.square = square
self.n = order
self.mag_num = int(self.findMagicNumber())
self.square_multi = {}
def findMagicNumber(self):
summ = (self.n/2.) * (math.pow(self.n,2) + 1)
return summ
def checkIfMultiMagic(self, power):
''' Construct New Square '''
for x in self.square.keys():
self.square_multi[x] = int(math.pow(self.square[x], power))
def checkIfMagic(self):
for i in range(1, self.n + 1):
if self.checkRow(i):
pass
else:
return False
for j in range(1, self.n + 1):
if self.checkCol(j):
pass
else:
return False
if self.checkDiag():
pass
else:
return False
return True
def checkCol(self, k):
sum_col = 0
for i in range(1, self.n + 1):
sum_col += self.square[i,k]
if sum_col == self.mag_num:
return True
else:
return False
def checkRow(self, k):
sum_row = 0
for j in range(1, self.n + 1):
sum_row += self.square[k,j]
if sum_row == self.mag_num:
return True
else:
return False
def checkDiag(self):
sum_diag_1 = 0
sum_diag_2 = 0
for k in range(1, self.n + 1):
sum_diag_1 += self.square[k,k]
for k in range(1, self.n + 1):
sum_diag_2 += self.square[k, self.n+1-k]
if sum_diag_1 == self.mag_num and sum_diag_2 == self.mag_num:
return True
else:
return False
class magicCheckerMulti(magicChecker):
def __init__(self, square, order, power):
self.n = order
self.mag_num = int(self.findMagicNumber())
self.square = {}
for x in square.keys():
self.square[x] = int(math.pow(square[x], power))
| SiriusTux/MagicSquare | magicChecker.py | magicChecker.py | py | 2,131 | python | en | code | 0 | github-code | 36 |
919257196 | import requests
from bs4 import BeautifulSoup
import re
from domains import CONTENT_AREA
from emoji import emojize
from urllib.parse import urlparse
# ChatGPT d2ee59b7-b368-4a5f-b3af-2e33b7f33b4a
example_url = [
"https://backlinko.com/actionable-seo-tips",
"https://www.semrush.com/blog/seo-tips/",
"https://www.wordstream.com/blog/ws/2021/03/05/seo-strategy",
"https://ahrefs.com/blog/seo-tips/",
"https://backlinko.com/actionable-seo-tips",
"https://developers.google.com/search/docs/fundamentals/seo-starter-guide",
"https://www.pcmag.com/how-to/easy-but-powerful-seo-tips-to-boost-traffic-to-your-website",
"https://www.searchenginejournal.com/seo-tips/374673/",
"https://www.bdc.ca/en/articles-tools/marketing-sales-export/marketing/seo-small-businesses-10-ways-rank-higher",
]
def get_status_code(url):
response = requests.get(url)
return response.status_code
def get_domain(url):
"""Get the domain of a URL"""
domain = url.split("//")[-1].split("/")[0].split(".")[0]
return domain
def is_valid_url(url):
"""
Check if the given URL is valid or not.
Parameters:
url (str): The URL to be checked.
Returns:
bool: True if the URL is valid, False otherwise.
"""
regex = re.compile(
r"^(https?://)?" # http:// or https:// (optional)
r"((([A-Z0-9][A-Z0-9-]{0,61}[A-Z0-9])|localhost)\.)+" # domain...
r"([A-Z]{2,6})" # domain extension
r"(:\d{1,5})?" # optional port
r"(\/.*)?$",
re.IGNORECASE,
) # path (optional)
return bool(regex.match(url))
def domain_disclaimer(url):
"""Display a disclaimer message if domain not defined in domains.py"""
domain = get_domain(url)
if domain not in CONTENT_AREA:
return emojize(
":folded_hands:Content area is undefined, result may not be valid.",
variant="emoji_type",
)
else:
return emojize(
":thumbs_up: Good news! The content area has already been defined, the result should be more valid.",
variant="emoji_type",
)
def get_title(url):
"""Get the title of a webpage"""
try:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get title of webpage
title = soup.title.string
return title
except:
return "Unable to get title"
def get_description(url):
"""Get the description of a webpage"""
try:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get description from meta tags
meta_tags = soup.find_all("meta")
description = ""
for tag in meta_tags:
if tag.get("name", None) == "description":
description = tag.get("content", None)
return description
except:
return "Unable to get description"
def get_content(url):
try:
# Check if domain is registered
parsed_url = urlparse(url)
domain = (
parsed_url.netloc.split(".")[-2]
if parsed_url.netloc.count(".") >= 2
else parsed_url.netloc
)
content_class = CONTENT_AREA.get(domain)
if content_class:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get content of webpage using class
content = soup.find("div", class_=content_class)
return content.get_text()
else:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get content of webpage using tag "body"
content = soup.find("body")
return content.get_text()
except:
return "Unable to get content"
def get_content_with_html(url):
"""Get the content of a webpage with HTML elements"""
try:
# Check if domain is registered
domain = get_domain(url)
content_class = CONTENT_AREA.get(domain)
if content_class:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get content of webpage using class
content = soup.find("div", class_=content_class)
return str(content)
else:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get content of webpage using tag "body"
content = soup.find("body")
return str(content)
except:
return "Unable to get content"
def get_h1(url):
"""Get the H1 of a webpage"""
try:
# Make request to webpage
response = requests.get(url)
# Parse webpage content using Beautiful Soup
soup = BeautifulSoup(response.content, "html.parser")
# Get H1 of webpage
h1 = soup.find("h1").text if soup.find("h1") else None
return h1
except:
return "Unable to get H1"
def get_headings(content_html):
soup = BeautifulSoup(content_html, "html.parser")
# Mencari semua elemen heading
headings = soup.find_all(["h1", "h2", "h3"])
# Inisialisasi list untuk menyimpan heading
all_headings = []
# Perulangan untuk setiap heading
for heading in headings:
# Menambahkan tag sesuai dengan tipe heading
if heading.name == "h1":
all_headings.append(f"<H1>{heading.text}")
elif heading.name == "h2":
all_headings.append(f"<H2>{heading.text}")
elif heading.name == "h3":
all_headings.append(f"<H3>{heading.text}")
# Mengembalikan list heading
return all_headings
def get_first_parapraph(content):
soup = BeautifulSoup(content, "html.parser")
first_pargraph = soup.find("p")
if first_pargraph:
return first_pargraph.text.strip()
else:
" "
| syahidmid/seoanalysis | scrapers/scrape.py | scrape.py | py | 6,496 | python | en | code | 0 | github-code | 36 |
3985469839 | import os
from skimage import io
import copy
import numpy as np
import random
from glob import glob
import json
from sklearn.preprocessing import MultiLabelBinarizer
import torch
import torch.utils.data as data
from torchvision import transforms, datasets
from src.datasets.root_paths import DATA_ROOTS
CLASSES = ['Sea and ocean',
'Coniferous forest',
'Mixed forest',
'Moors and heathland',
'Transitional woodland/shrub',
'Sparsely vegetated areas',
'Discontinuous urban fabric',
'Non-irrigated arable land',
'Pastures',
'Complex cultivation patterns',
'Broad-leaved forest',
'Water bodies',
'Land principally occupied by agriculture, with significant areas of natural vegetation',
'Vineyards',
'Agro-forestry areas',
'Industrial or commercial units',
'Airports',
'Water courses',
'Natural grassland',
'Construction sites',
'Sclerophyllous vegetation',
'Peatbogs',
'Rice fields',
'Continuous urban fabric',
'Olive groves',
'Permanently irrigated land',
'Mineral extraction sites',
'Annual crops associated with permanent crops',
'Dump sites',
'Green urban areas',
'Intertidal flats',
'Bare rock',
'Fruit trees and berry plantations',
'Salt marshes',
'Road and rail networks and associated land',
'Estuaries',
'Inland marshes',
'Sport and leisure facilities',
'Beaches, dunes, sands',
'Coastal lagoons',
'Salines',
'Port areas',
'Burnt areas']
class BaseBigEarthNet(data.Dataset):
NUM_CLASSES = 43
MULTI_LABEL = True
NUM_CHANNELS = 12
FILTER_SIZE = 120
def __init__(
self,
root=DATA_ROOTS["bigearthnet"],
train=True,
image_transforms=None,
seed=42,
):
super().__init__()
self.root = root
self.train = train
self.image_transforms = image_transforms
self.rs = np.random.RandomState(seed)
train_paths, test_paths, train_labels, test_labels = self.train_test_split()
if train:
self.paths = train_paths
self.labels = train_labels
else:
self.paths = test_paths
self.labels = test_labels
self.targets = copy.deepcopy(self.labels)
def train_test_split(self, train_frac=0.8):
all_sample_paths = np.array(os.listdir(self.root))
num_samples = len(all_sample_paths)
labels = []
for i in range(num_samples):
sample_path = all_sample_paths[i]
metadata_path = glob(os.path.join(self.root, sample_path, '*.json'))[0]
class_names = set(json.load(open(metadata_path))['labels'])
labels.append(class_names)
encoder = MultiLabelBinarizer(classes=CLASSES, sparse_output=False)
encoded_labels = encoder.fit_transform(labels)
num_samples = len(all_sample_paths)
indices = np.arange(num_samples)
self.rs.shuffle(indices)
train_indices = indices[:int(num_samples * train_frac)]
test_indices = indices[int(num_samples * train_frac):]
train_paths = all_sample_paths[train_indices]
test_paths = all_sample_paths[test_indices]
train_labels = encoded_labels[train_indices]
test_labels = encoded_labels[test_indices]
return train_paths, test_paths, train_labels, test_labels
def __getitem__(self, index):
path = self.paths[index]
label = self.labels[index]
img_paths = glob(os.path.join(self.root, path, '*.tif'))
image = []
for i, img_path in enumerate(img_paths):
img = np.asarray(io.imread_collection(img_path), dtype=np.float32) # one of (1, 20, 20), (1, 60, 60), (1, 120, 120)
resized_img = transforms.Resize(120)(torch.tensor(img))
image.append(resized_img)
image = torch.vstack(image) # (12, 120, 120)
if self.image_transforms:
image = self.image_transforms(image)
return image, label
def __len__(self):
return len(self.paths)
class BigEarthNet(BaseBigEarthNet):
def __init__(
self,
root=DATA_ROOTS["bigearthnet"],
train=True,
image_transforms=None,
):
super().__init__()
self.dataset = BaseBigEarthNet(
root=root,
train=train,
image_transforms=image_transforms,
)
def __getitem__(self, index):
img_data, label = self.dataset.__getitem__(index)
img2_data, _ = self.dataset.__getitem__(index)
data = [index, img_data.float(), img2_data.float(), label, label]
return tuple(data)
def __len__(self):
return len(self.dataset)
| jbayrooti/divmaker | src/datasets/bigearthnet.py | bigearthnet.py | py | 5,152 | python | en | code | 3 | github-code | 36 |
23184346915 | from mcpi.minecraft import Minecraft as MC
root = MC.create()
my_id = root.getPlayerEntityId("Jooooook")
print("my_id: ", my_id)
my_pos = root.entity.getPos(my_id)
pos_x = {}
pos_z = {}
pos_y = {}
pos_x["Jooooook"] = my_pos.x
print(pos_x) | wewo329/workspace | python_workspace/python_study/with_minecraft/getPos.py | getPos.py | py | 242 | python | en | code | 0 | github-code | 36 |
5726494392 | from flask import Flask, jsonify, request
import json
import os
app = Flask(__name__)
# Load data from JSON file if it exists
def load_data():
if os.path.exists('investment_funds.json'):
with open('investment_funds.json') as file:
return json.load(file)
else:
with open('investment_funds.json', 'w') as file:
json.dump([], file)
return []
# create data to JSON file
def create_data(new_fund, investment_funds):
max_id = 0
with open('investment_funds.json') as file:
data_file = json.load(file)
print('file', data_file)
for fund in data_file:
print('fund', fund)
fund_id = fund.get('id')
if fund_id == max_id:
max_id += 1
new_fund['id'] = max_id
print(new_fund)
investment_funds.append(new_fund)
save_data(investment_funds)
# save data to JSON file
def save_data(investment_funds):
with open('investment_funds.json', 'w') as file:
json.dump(investment_funds, file, indent=2)
# Load initial data
investment_funds = load_data()
# Endpoint to retrieve a list of all funds
@app.route('/funds', methods=['GET'])
def get_all_funds():
return jsonify(investment_funds)
# Endpoint to create a new fund
@app.route('/funds', methods=['POST'])
def create_fund():
data = request.get_json()
new_fund = {
'fund_id': data.get('fund_id'),
'fund_name': data.get('fund_name'),
'fund_manager': data.get('fund_manager'),
'description': data.get('description'),
'nav': data.get('nav'),
'creation_date': data.get('creation_date'),
'performance': data.get('performance')
}
create_data(new_fund, investment_funds)
return jsonify(new_fund), 201
# Endpoint to retrieve details of a specific fund using its ID
@app.route('/funds/<int:fund_id>', methods=['GET'])
def get_fund_by_id(fund_id):
# print('investment_funds', investment_funds)
for fund in investment_funds:
print('fund', fund)
if fund['id'] == fund_id:
return jsonify(fund)
return jsonify({'error': 'fund not found'}), 404
# Endpoint to update the performance of a fund using its ID
@app.route('/funds/<int:fund_id>', methods=['PUT'])
def update_fund(fund_id):
specific_fund = {}
for fund in investment_funds:
print('fund', fund)
print(fund['id'] == fund_id, fund_id)
if fund['id'] == fund_id:
specific_fund = fund
if specific_fund:
print('true')
data = request.get_json()
specific_fund['performance'] = data.get('performance')
save_data(investment_funds)
return jsonify(specific_fund)
return jsonify({'error': 'Fund not found'}), 404
# Endpoint to delete a fund using its ID
@app.route('/funds/<int:fund_id>', methods=['DELETE'])
def delete_fund(fund_id):
deleted_fund = {}
for fund in investment_funds:
print('fund', fund)
if fund['id'] == fund_id:
deleted_fund = fund
investment_funds.remove(fund)
print('investment_funds', investment_funds)
save_data(investment_funds)
break
return jsonify({'message': 'Fund deleted successfully', 'deleted_fund': deleted_fund})
if __name__ == '__main__':
app.run(debug=True, port=5001)
| raqif/fund_management_system | app_json.py | app_json.py | py | 3,352 | python | en | code | 0 | github-code | 36 |
12814165506 | def solution(clothes):
answer = 1
clothes_dict = {}
for c in clothes:
clothes_dict[c[1]] = clothes_dict.get(c[1],[])+[c[0]]
for key in clothes_dict.keys():
answer = answer * (len(clothes_dict[key]) + 1)
return answer - 1
print(solution([["yellowhat", "headgear"], ["bluesunglasses", "eyewear"], ["green_turban", "headgear"]]))
print(solution([["crowmask", "face"], ["bluesunglasses", "face"], ["smoky_makeup", "face"]]))
print(solution([["a", "face"], ["b", "face"],
["c", "up"],["d", "up"],
["e", "down"],["f", "down"]]))
# 1 : a b
# 2 : c d
# 3 : e f
# a,b,c,d,e,f => 3C1 2C1
# ac/ad/ae/af/bc/bd/be/bf/ce/cf/de/df => 3C2 2C1 2C1
# ace/acf/ade/aef/bce/bcf/bde/bdf => 3C3 2C1 2C1 2C1 = 8 | Girin7716/PythonCoding | Programmers/Problem_Solving/42578.py | 42578.py | py | 762 | python | en | code | 1 | github-code | 36 |
19534802491 | from functools import wraps
from flask import request, abort, g
from app.models import User
def login_required(f):
""" This decorator ensures that the current user is logged in before calling the actual view.
"""
@wraps(f)
def decorated(*args, **kwargs):
if request.method != 'OPTIONS':
token = request.headers.get('Authorization')
if not token:
abort(401)
user = User.verify_auth_token(token)
if not user:
abort(401)
g.user = user
return f(*args, **kwargs)
return decorated
def roles_required(*role_names):
""" This decorator ensures that the current user has all of the specified roles.
"""
def wrapper(func):
@wraps(func)
def decorated_view(*args, **kwargs):
if request.method != 'OPTIONS':
token = request.headers.get('Authorization')
if not token:
abort(401)
user = User.verify_auth_token(token)
if not user:
abort(401)
g.user = user
# User must have the required roles
if not user.has_roles(*role_names):
# Redirect to the unauthorized page
abort(403)
# Call the actual view
return func(*args, **kwargs)
return decorated_view
return wrapper
| Zokormazo/ngLlery-backend | app/decorators.py | decorators.py | py | 1,456 | python | en | code | 0 | github-code | 36 |
25056282946 | from django import forms
from django.core.exceptions import ValidationError
from manager.models import Accountancy
from manager.wallet_operations import wallet_choice, wallet_data_parse, change_wallet_balance
class AccountancyForm(forms.ModelForm):
class Meta:
model = Accountancy
fields = ()
def clean(self):
amount = float(self.data["amount"])
if amount < 0:
raise ValidationError("Amount can't be negative.")
wallet_type, previous_amount = wallet_data_parse(self.data)
_, self.wallet_obj = wallet_choice(
wallet_type,
self.instance.card_id or self.instance.cash_id or self.instance.cryptocurrency_id
)
amount = amount - float(previous_amount) if previous_amount else 0
self.wallet_obj = change_wallet_balance(
self.instance.IO, self.wallet_obj, amount
)
return super().clean()
def save(self, commit=True):
accountancy = super(AccountancyForm, self).save(commit=False)
self.clean()
if commit:
accountancy.amount = float(self.data["amount"])
accountancy.save()
self.wallet_obj.save()
return super().save(commit)
class AccountancySearchForm(forms.Form):
IO_type = forms.CharField(
max_length=50,
required=False,
label="",
widget=forms.TextInput(attrs={
"placeholder": "Search by type ...",
"class": "small_plate _comforta_bold text_shadow"
})
)
| AndriyKy/zlatnic | manager/forms.py | forms.py | py | 1,544 | python | en | code | 1 | github-code | 36 |
2986802119 | # https://towardsdatascience.com/a-detailed-guide-to-pytorchs-nn-transformer-module-c80afbc9ffb1
import math
from datetime import datetime
from os import path
import torch
import torch.nn as nn
from torch.utils.tensorboard import SummaryWriter
from tqdm import tqdm
from src.data_management.datasets.better_crnn_dataset import GOBetterCRNNDataset
from src.helper.utils import PATH_TO_LOG_FOLDER, PATH_TO_MODEL_FOLDER, print_blue, print_green, print_red, print_yellow
class PositionalEncoding(nn.Module):
def __init__(self, dim_model, dropout_p, max_len):
super().__init__()
# Info
self.dropout = nn.Dropout(dropout_p)
# Encoding - From formula
pos_encoding = torch.zeros(max_len, dim_model)
positions_list = torch.arange(0, max_len, dtype=torch.float).view(-1, 1) # 0, 1, 2, 3, 4, 5
division_term = torch.exp(torch.arange(0, dim_model, 2).float() * (-math.log(10000.0)) / dim_model) # 1000^(2i/dim_model)
# PE(pos, 2i) = sin(pos/1000^(2i/dim_model))
pos_encoding[:, 0::2] = torch.sin(positions_list * division_term)
# PE(pos, 2i + 1) = cos(pos/1000^(2i/dim_model))
pos_encoding[:, 1::2] = torch.cos(positions_list * division_term)
# Saving buffer (same as parameter without gradients needed)
pos_encoding = pos_encoding.unsqueeze(0).transpose(0, 1)
self.register_buffer('pos_encoding',pos_encoding)
def forward(self, token_embedding: torch.tensor) -> torch.tensor:
# Residual connection + pos encoding
return self.dropout(token_embedding + self.pos_encoding[:token_embedding.size(0), :])
class GOTransformer(nn.Module):
"""
Model from "A detailed guide to Pytorch's nn.Transformer() module.", by
Daniel Melchor: https://medium.com/p/c80afbc9ffb1/
"""
# Constructor
def __init__(
self,
num_tokens,
dim_model,
num_heads,
num_encoder_layers,
num_decoder_layers,
dropout_p,
):
super().__init__()
# INFO
self.model_type = 'Transformer'
self.dim_model = dim_model
# LAYERS
self.embedding = nn.Embedding(num_tokens, dim_model)
self.positional_encoder = PositionalEncoding(
dim_model=dim_model, dropout_p=dropout_p, max_len=5000
)
self.transformer = nn.Transformer(
d_model=dim_model,
nhead=num_heads,
num_encoder_layers=num_encoder_layers,
num_decoder_layers=num_decoder_layers,
dropout=dropout_p,
)
self.linear = nn.Linear(dim_model, num_tokens)
def forward(self, src, tgt, tgt_mask=None, src_pad_mask=None, tgt_pad_mask=None):
# Src size must be (batch_size, src sequence length)
# Tgt size must be (batch_size, tgt sequence length)
# Embedding + positional encoding - Out size = (batch_size, sequence length, dim_model)
#src = self.embedding(src) * math.sqrt(self.dim_model)
tgt = self.embed_token(tgt)
src = self.positional_encoder(src)
tgt = self.positional_encoder(tgt)
# We could use the parameter batch_first=True, but our KDL version doesn't support it yet, so we permute
# to obtain size (sequence length, batch_size, dim_model),
src = src.permute(1,0,2)
tgt = tgt.permute(1,0,2)
# Transformer blocks - Out size = (sequence length, batch_size, num_tokens)
transformer_out = self.transformer(src, tgt, tgt_mask=tgt_mask, src_key_padding_mask=src_pad_mask, tgt_key_padding_mask=tgt_pad_mask)
linear_out = self.linear(transformer_out)
return linear_out
def get_tgt_mask(self, size) -> torch.tensor:
# Generates a squeare matrix where the each row allows one word more to be seen
mask = torch.tril(torch.ones(size, size) == 1) # Lower triangular matrix
mask = mask.float()
mask = mask.masked_fill(mask == 0, float('-inf')) # Convert zeros to -inf
mask = mask.masked_fill(mask == 1, float(0.0)) # Convert ones to 0
return mask
def create_pad_mask(self, matrix: torch.tensor, pad_token: int) -> torch.tensor:
# If matrix = [1,2,3,0,0,0] where pad_token=0, the result mask is
# [False, False, False, True, True, True]
return (matrix == pad_token)
def embed_token(self, token):
return self.embedding(token) * math.sqrt(self.dim_model)
class GOTransformerTrainer:
def __init__(self,
sequence_length: int = 32,
batch_size: int = 8,
dim_model: int = 4096,
epochs: int = 100) -> None:
# parameters
self.sequence_length = sequence_length
self.batch_size = batch_size
self.dim_model = dim_model # (closest power of two to shape of data)
self.epochs = epochs
self.SOS_TOKEN = None
self.EOS_TOKEN = None
self.SOS_TOKEN_EMBEDDED = None
self.EOS_TOKEN_EMBEDDED = None
self.device = 'cuda' if torch.cuda.is_available() else 'cpu'
self.writer = SummaryWriter(path.join(PATH_TO_LOG_FOLDER, 'runs', f'transformer_{str(datetime.now())}'))
def train(self):
torch.autograd.set_detect_anomaly(True)
dataset = GOBetterCRNNDataset(sequence_length=self.sequence_length)
train_set, val_set = torch.utils.data.random_split(dataset, [round(len(dataset) * 0.8), round(len(dataset) * 0.2)])
train_loader = torch.utils.data.DataLoader(train_set, batch_size=self.batch_size, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_set, batch_size=1, shuffle=True)
print(f'Using device {self.device}')
model_params = {
'num_tokens': 4,
'dim_model': self.dim_model,
'num_heads': 2,
'num_encoder_layers': 3,
'num_decoder_layers': 3,
'dropout_p': 0.1
}
self.build(model_params)
model = GOTransformer(**model_params).to(self.device)
opt = torch.optim.SGD(model.parameters(), lr=0.01)
loss_fn = nn.CrossEntropyLoss()
self.writer.add_text('hyperparameters/batch_size', str(self.batch_size))
self.writer.add_text('hyperparameters/sequence_length', str(self.sequence_length))
self.writer.add_text('hyperparameters/dim_model', str(self.dim_model))
self.writer.add_text('hyperparameters/epochs', str(self.epochs))
time_before = datetime.now()
self.fit(model, opt, loss_fn, train_loader, val_loader, self.epochs, self.writer)
time_after = datetime.now()
time_difference = time_after - time_before
print(str(time_difference))
self.writer.add_text('metrics/training_time', str(time_difference))
def build(self, model_params):
model = GOTransformer(**model_params).to(self.device)
self.SOS_TOKEN = torch.tensor([2]).to(self.device)
self.EOS_TOKEN = torch.tensor([3]).to(self.device)
self.SOS_TOKEN_EMBEDDED = model.embed_token(self.SOS_TOKEN)
self.EOS_TOKEN_EMBEDDED = model.embed_token(self.EOS_TOKEN)
def add_sequence_tokens(self, batch):
if isinstance(batch[0], (torch.LongTensor, torch.cuda.LongTensor)):
return torch.stack([torch.concat((self.SOS_TOKEN, item, self.EOS_TOKEN)) for item in batch])
elif isinstance(batch[0], (torch.FloatTensor, torch.cuda.FloatTensor)):
return torch.stack([torch.concat((self.SOS_TOKEN_EMBEDDED, item, self.EOS_TOKEN_EMBEDDED)) for item in batch])
else:
return batch
def features_to_embedding_vectors(self, features):
# 192, 12, 115 -> 5, 52992
split_and_flattened = torch.reshape(features, (self.sequence_length, -1))
# 5, 52992 -> 5, 512
embedded = split_and_flattened[:, :self.dim_model]
return embedded * math.sqrt(self.dim_model)
def train_loop(self, model, opt, loss_fn, dataloader):
model.train()
total_loss = 0
for x, y in dataloader:
# convert from a multi-dimensional feature vector to a simple embedding-vector
x = torch.stack([self.features_to_embedding_vectors(item) for item in x])
x = x.to(self.device)
y = y.type(torch.long).to(self.device)
# prepend and append the sequence tokens
x = self.add_sequence_tokens(x)
y = self.add_sequence_tokens(y)
# Now we shift the tgt by one so with the <SOS> we predict the token at pos 1
y_input = y[:,:-1]
y_expected = y[:,1:]
# Get mask to mask out the next words
sequence_length = y_input.size(1)
tgt_mask = model.get_tgt_mask(sequence_length).to(self.device)
# Standard training except we pass in y_input and tgt_mask
pred = model(x, y_input, tgt_mask)
# Permute pred to have batch size first again
pred = pred.permute(1, 2, 0)
loss = loss_fn(pred, y_expected)
opt.zero_grad()
loss.backward(retain_graph=True)
opt.step()
total_loss += loss.detach().item()
return total_loss / len(dataloader)
def validation_loop(self, model, loss_fn, dataloader, epoch: int):
model.eval()
total_loss = 0
total_accuracy_complete = 0
total_accuracy_start = 0
c_time = 0
with torch.no_grad():
for x, y in dataloader:
# convert from a multi-dimensional feature vector to a simple embedding-vector
x = torch.stack([self.features_to_embedding_vectors(item) for item in x])
x = x.to(self.device)
y = y.type(torch.long).to(self.device)
# prepend and append the sequence tokens
x = self.add_sequence_tokens(x)
y = self.add_sequence_tokens(y)
# Now we shift the tgt by one so with the <SOS> we predict the token at pos 1
y_input = y[:,:-1]
y_expected = y[:,1:]
# Get mask to mask out the next words
sequence_length = y_input.size(1)
tgt_mask = model.get_tgt_mask(sequence_length).to(self.device)
# Standard training except we pass in y_input and src_mask
pred = model(x, y_input, tgt_mask)
# Permute pred to have batch size first again
pred = pred.permute(1, 2, 0)
# get accuracy
_, max_index = torch.max(pred, dim=1)
for sequence in max_index:
correct_complete = 0
correct_start = 0
for j in range(sequence_length):
if not sequence[j] in (0, 1):
sequence[j] = 0.5
_, max_index = torch.max(pred, dim=1)
sequence_length_dec = sequence_length - 1
for i in range(self.batch_size):
correct_complete = 0
correct_start = 0
for j in range(sequence_length_dec):
if max_index[i][j] == y_expected[i][j]:
correct_complete += 1
if correct_start == j:
correct_start += 1
total_accuracy_complete += correct_complete / sequence_length_dec
total_accuracy_start += correct_start / sequence_length_dec
self.writer.add_scalar(f'total_acc/epoch_{epoch}', correct_complete / sequence_length_dec, c_time)
self.writer.add_scalar(f'total_acc_start/epoch_{epoch}', correct_start / sequence_length_dec, c_time)
c_time += 1
loss = loss_fn(pred, y_expected)
total_loss += loss.detach().item()
total_loss /= len(dataloader)
total_accuracy_complete /= len(dataloader) * self.batch_size
total_accuracy_start /= len(dataloader) * self.batch_size
return total_loss, total_accuracy_complete, total_accuracy_start
def fit(self, model, opt, loss_fn, train_dataloader, val_dataloader, epochs, writer):
print_green('Training and validating model')
max_accuracy_start = 0.0
epoch_threshold = 20
for epoch in tqdm(range(epochs), 'Epochs'):
train_loss = self.train_loop(model, opt, loss_fn, train_dataloader)
validation_loss, acc_complete, acc_start = self.validation_loop(model, loss_fn, val_dataloader, epoch)
writer.add_scalar('loss/training', train_loss, epoch)
writer.add_scalar('loss/validation', validation_loss, epoch)
writer.add_scalar('accuracy/complete', acc_complete, epoch)
writer.add_scalar('accuracy/start', acc_start, epoch)
if epoch > epoch_threshold and acc_start > max_accuracy_start:
torch.save(model, f'{PATH_TO_MODEL_FOLDER}/transformer_{epoch}_{acc_start}_{datetime.now().strftime("%Y-%m-%d_%H:%M")}.pt')
max_accuracy_start = acc_start
def predict(self, model, x : list, y : list):
# convert from a multi-dimensional feature vector to a simple embedding-vector
x = torch.stack([self.features_to_embedding_vectors(item) for item in x])
x = x.to(self.device)
y = y.type(torch.long).to(self.device)
# prepend and append the sequence tokens
x = self.add_sequence_tokens(x)
y = self.add_sequence_tokens(y)
# Now we shift the tgt by one so with the <SOS> we predict the token at pos 1
y_input = y[:,:-1]
y_expected = y[:,1:]
# Get mask to mask out the next words
sequence_length = y_input.size(1)
tgt_mask = model.get_tgt_mask(sequence_length).to(self.device)
# Standard training except we pass in y_input and src_mask
pred = model(x, y_input, tgt_mask)
# Permute pred to have batch size first again
pred = pred.permute(1, 2, 0)
# get accuracy
_, max_index = torch.max(pred, dim=1)
for sequence in max_index:
for j in range(sequence_length):
if not sequence[j] in (0, 1):
sequence[j] = 0.5
return max_index
if __name__ == '__main__':
GOTransformerTrainer(
sequence_length=64,
dim_model=2048,
epochs=100,
batch_size=16
).train()
| felix-20/gravitational_oceans | src/ai_nets/transformer.py | transformer.py | py | 14,567 | python | en | code | 1 | github-code | 36 |
32929404532 | #!/usr/bin/env python
# coding: utf-8
import csv
from config import ADDRESSES_FILE
from models import Birthday, get_this_week_list
def parse_data_file(in_file=ADDRESSES_FILE):
"""
:return: [] of {}
List of birthdays
"""
reader = csv.DictReader(open(in_file, "r"))
for row in reader:
if row:
yield row
def send_cake_remainder(birthdays):
"""Sends email remainder about cake to whoever turns
:param birthdays: list of birthdays
"""
this_week_birthdays = list(get_this_week_list(birthdays))
for birthday in this_week_birthdays:
birthday.send_msg()
yield birthday
def send_birthday_remainder(birthdays):
"""Sends email remainder about people who turn to whoever does not turn
:param birthdays: list of birthdays
"""
this_week_birthdays = list(get_this_week_list(birthdays))
if this_week_birthdays: # just send emails if there is some birthday
for b in birthdays:
birthday = Birthday(b) # parse raw csv data
if not birthday.is_this_week():
birthday.send_remainder_msg(this_week_birthdays)
yield birthday
def send_emails(addresses):
"""
:param addresses: str
Path to file containing addresses
:return: void
Run bot
"""
birthdays = list(parse_data_file(in_file=addresses))
cakes_sent = list(send_cake_remainder(birthdays))
reminders_sent = list(send_birthday_remainder(birthdays))
# get summary
cakes_sent = [
{
"reason": "cake",
"message": cake.get_summary()
} for cake in cakes_sent
]
reminders_sent = [
{
"reason": "remind",
"message": remind.get_summary()
} for remind in reminders_sent
]
return cakes_sent + reminders_sent
| raceup/happy-birthday-bot | hbb/bot.py | bot.py | py | 1,857 | python | en | code | 0 | github-code | 36 |
10045098498 | import requests
import json
import datetime,time
from kafka import KafkaProducer
# Liste des cryptos ร rรฉcupรฉrer
# Configuration du Kafka Producer
producer = KafkaProducer(bootstrap_servers=['broker:29092'], value_serializer=lambda x: json.dumps(x).encode('utf-8'))
crypto_list = ["bitcoin", "ethereum", "ripple"]
start_time = int(time.time())
# convert to timestamp
while int(time.time()) < start_time + 200:
for crypto in crypto_list:
url = f"https://api.coingecko.com/api/v3/coins/markets?vs_currency=usd&ids={crypto}"
response = requests.get(url)
data = response.json()[0]
# print(data["current_price"])
current_price = data["current_price"]
date = datetime.datetime.now()
message = {"crypto": crypto, "value": current_price, "date": date.strftime("%d-%m-%Y %H:%M:%S")}
print(message)
producer.send('crypto-values-f', value=message)
# Rรฉcupรฉration toutes les 5 minutes
time.sleep(30) | stdynv/ARCHIDISTR | CryptoProducer.py | CryptoProducer.py | py | 983 | python | en | code | 0 | github-code | 36 |
25289566 | #1065๋ฒ
#์์ด ํ๋ณ ๋จผ์
def seq_dcm(num):
discriminant = True
n = set()
for i in range(1, len(num)):
temp = int(num[i]) - int(num[i-1])
n.add(temp)
if len(n) > 1:
discriminant = False
return discriminant
X = int(input())
count = 0
for i in range(1, X+1):
if seq_dcm(str(i)):
count += 1
print(count) | kmgyu/baekJoonPractice | function/ํ์.py | ํ์.py | py | 362 | python | en | code | 0 | github-code | 36 |
74221037222 | import threading
import time
import actions
from database import *
logInstance = log.logger('pinger')
plogger = logging.getLogger('pinger')
log.logger.init(logInstance, plogger)
plogger.info('Pinger started')
class Pinger(threading.Thread):
"""
Pings all connected clients for activity.
Drops them when they are not responding to pings.
"""
def run(self):
pinged = {}
while 1:
allclients = DB.get_all_connections()
for c in allclients:
#go through all connected clients and see if they need to be pinged for inactivity.
if c['socket'] and c['last_action'] < (time.time() - 30):
if pinged[c['name']]:
#You've already pinged this client, check if it has ponged in time.
if pinged[c['name']] < time.time() - 5:
#irresponsive client, drop it
plogger.info("Client %s (%s) didn't respond to ping, dropping it."\
% (c['name'], c['socket'].getaddress()[0]))
actions.drop_client_by_socket(c['socket'])
else:
#Clients needs to be pinged: do it.
pinged[c['name']] = time.time()
plogger.info("Client %s (%s) inactive for over 30 s. Sending ping"\
% (c['name'], c['socket'].getaddress()[0]))
actions.unicast(140, 'letsping', c['socket'])
else:
#This client has recent activity, don't ping.
pinged[c['name']] = None
time.sleep(1)
| janlaan/distsys2010 | src/pinger.py | pinger.py | py | 1,630 | python | en | code | 5 | github-code | 36 |
5216435364 | from pydantic import BaseModel, EmailStr
from datetime import datetime
from typing import Optional
class User(BaseModel):
user_id : int
username : str
forenames: str
surname : str
email : EmailStr
bio : Optional[str]
display_name : Optional[str]
created_at: datetime
class UserRegistrationRequest(BaseModel):
username : str
forenames: str
surname: str
email: EmailStr
password: str
class UserUpdateRequest(BaseModel):
forenames: str
surname : str
email : EmailStr
bio : str
display_name : str
class UserPartialUpdateRequest(BaseModel):
forenames: Optional[str]
surname : Optional[str]
email : Optional[EmailStr]
bio : Optional[str]
display_name : Optional[str]
class Config:
extra = "forbid" | willpennell/r-shows | user-management-service/app/schemas/user_schemas.py | user_schemas.py | py | 802 | python | en | code | 0 | github-code | 36 |
25676926 | import math
class PriorityQueue:
def __init__(self):
self.items = []
def isEmpty(self):
return len(self.items) == 0
def size(self): return len(self.items)
def clear(self): self.items = []
def enqueue(self, item):
self.items.append(item)
def findMaxIndex(self):
if self.isEmpty(): return None
else:
highest = 0
for i in range(1, self.size()):
if self.items[i][2] > self.items[highest][2]:
highest = i
return highest
def dequeue(self):
highest = self.findMaxIndex()
if highest is not None:
return self.items.pop(highest)
def peek(self):
highest = self.findMaxIndex()
if highest is not None:
return self.items[highest]
ox,oy = 5,4
dx = [0,0,1,-1]
dy = [1,-1,0,0]
def dist(x,y):
dx, dy = ox-x, oy-y
return math.sqrt(dx**2 + dy**2)
def mySmartSearch():
global mapsize
q = PriorityQueue()
q.enqueue((0,1,-dist(0,1)))
print('PQueue: ')
while not q.isEmpty():
here = q.dequeue()
print(here, end='->')
x,y,_ = here
if (map_[y][x]=='x'):return True
else:
map_[y][x]='.'
for i in range(4):
if y+dy[i] < 0 or x+dx[i] < 0 or y+dy[i] > mapsize or x+dx[i] > mapsize:continue
if map_[y+dy[i]][x+dx[i]] == '0' or map_[y+dy[i]][x+dx[i]] == 'x':
q.enqueue((x+dx[i],y+dy[i],-dist(x+dx[i],y+dy[i])))
print('์ฐ์ ์์ํ: ', q.items)
return False
map_ = [['1','1','1','1','1','1'],
['e','0','1','0','0','1'],
['1','0','0','0','1','1'],
['1','0','1','0','1','1'],
['1','0','1','0','0','x'],
['1','1','1','1','1','1'],]
mapsize = len(map_)
mySmartSearch() | kmgyu/baekJoonPractice | ๋ฐ์ดํฐ๊ตฌ์กฐ๋ฐ์ค์ต์์
/0517 PriorityQueue.py | 0517 PriorityQueue.py | py | 1,844 | python | en | code | 0 | github-code | 36 |
6751769066 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QWidget
from PyQt5.QtGui import QDoubleValidator
from PyQt5.QtCore import pyqtSignal, pyqtSlot
from selfdefineformat.views.editcheckbox import Ui_Form
class EditCheckBoxModule(QWidget, Ui_Form):
edited = pyqtSignal()
def __init__(self, element, parent=None):
super(EditCheckBoxModule, self).__init__(parent)
self.setupUi(self)
self.element = element
self.set_detail()
self.set_validator()
def set_validator(self):
doubleValitor = QDoubleValidator()
doubleValitor.setBottom(0)
doubleValitor.setDecimals(2)
doubleValitor.setNotation(QDoubleValidator.StandardNotation)
self.lineEdit_width.setValidator(doubleValitor)
self.lineEdit_height.setValidator(doubleValitor)
self.lineEdit_fontsize.setValidator(doubleValitor)
def set_detail(self):
if self.element.tagName() != 'CheckBox':
self.close()
return
self.lineEdit_text.setText(self.element.attribute("name"))
self.lineEdit_fontsize.setText(self.element.attribute("size"))
self.lineEdit_width.setText(self.element.attribute("width"))
self.lineEdit_height.setText(self.element.attribute("height"))
self.lineEdit_id.setText(self.element.attribute("ID"))
try:
self.style = int(self.element.attribute("style"))
except ValueError:
self.style = 0
format = '{:03b}'.format(self.style)
self.checkBox_bold.setCheckState(2 if format[0] != '0' else 0)
self.checkBox_italic.setCheckState(2 if format[1] != '0' else 0)
self.checkBox_underline.setCheckState(2 if format[2] != '0' else 0)
@pyqtSlot(str)
def on_lineEdit_id_textEdited(self, p_str):
self.element.setAttribute("ID", p_str)
self.edited.emit()
@pyqtSlot(str)
def on_lineEdit_fontsize_textEdited(self, p_str):
self.element.setAttribute("size", p_str)
self.edited.emit()
@pyqtSlot(str)
def on_lineEdit_width_textEdited(self, p_str):
self.element.setAttribute("width", p_str)
self.edited.emit()
@pyqtSlot(str)
def on_lineEdit_height_textEdited(self, p_str):
self.element.setAttribute("height", p_str)
self.edited.emit()
@pyqtSlot(str)
def on_lineEdit_text_textEdited(self, p_str):
self.element.setAttribute("name", p_str)
self.edited.emit()
@pyqtSlot(bool)
def on_checkBox_bold_toggled(self, p_bool):
if p_bool:
self.element.setAttribute("style", self.style + 4)
self.style += 4
else:
self.element.setAttribute("style", self.style - 4)
self.style -= 4
self.edited.emit()
@pyqtSlot(bool)
def on_checkBox_italic_toggled(self, p_bool):
if p_bool:
self.element.setAttribute("style", self.style + 2)
self.style += 2
else:
self.element.setAttribute("style", self.style - 2)
self.style -= 2
self.edited.emit()
@pyqtSlot(bool)
def on_checkBox_underline_toggled(self, p_bool):
if p_bool:
self.element.setAttribute("style", self.style + 1)
self.style += 1
else:
self.element.setAttribute("style", self.style - 1)
self.style -= 1
self.edited.emit()
@pyqtSlot(bool)
def on_checkBox_default_toggled(self, p_bool):
if p_bool:
self.element.setAttribute("default", 2)
else:
self.element.setAttribute("style", 0)
self.edited.emit()
| zxcvbnmz0x/gmpsystem | selfdefineformat/modules/editcheckboxmodule.py | editcheckboxmodule.py | py | 3,645 | python | en | code | 0 | github-code | 36 |
16032494956 | with open(r'C:\Users\oush\Downloads\dataset_3380_5(1).txt') as file:
lst_str = file.readlines()
d = {str(i): [] for i in range(1, 12)}
# print(lst_str)
for item in lst_str:
item = item.split()
print(item)
d[item[0]].append(int(item[2]))
print(d)
with open(r'C:\Users\oush\Downloads\reply_3380_5.txt', 'w') as file:
for i, item in d.items():
if len(item) > 0:
file.write('{} {}\n'.format(i, int(sum(item))/int(len(item))))
# file.write('\n')
else:
file.write('{} -\n'.format(i)) | oushMusa/project_one | game_table.py | game_table.py | py | 553 | python | en | code | 0 | github-code | 36 |
7982812597 | # nums = [3,2,3]
# nums = [1,1,1,3,3,2,2,2]
cnt = {}
res = []
for i in nums:
if i in cnt:
cnt[i] += 1
else:
cnt[i] = 1
if cnt[i] > len(nums)//3 and not i in res:
res.append(i)
print(res) | kyj0701/2020_Summer_Pyton | 2020.07/07.20~07.24/07.23/229.py | 229.py | py | 242 | python | en | code | 0 | github-code | 36 |
34000667448 | import json, random, csv, sys
def check_input():
out_file = ''
if (len(sys.argv) == 3) & (sys.argv[1] == '-i'):
coded_file = sys.argv[2]
out_file = None
elif (len(sys.argv) == 5) & (sys.argv[1] == '-i') & (sys.argv[3] == '-o'):
coded_file = sys.argv[2]
out_file = sys.argv[4]
else:
print("[Error] invalid parameters")
exit()
return coded_file, out_file
def write_data(data, file_name):
if '.json' not in file_name:
file_name = file_name + ".json"
with open(file_name, 'w') as f_out:
json.dump(data, f_out, indent=0)
f_out.close()
def process(coded_file):
data = {
"course-related": 0,
"food-related": 0,
"residence-related": 0,
"other": 0
}
if '.tsv' not in coded_file:
coded_file = coded_file + ".tsv"
with open(coded_file) as f:
tsv_file = csv.reader(f, delimiter="\t")
for line in tsv_file:
anno = line[2]
if len(anno) == 1:
if anno == 'c': data["course-related"] = data["course-related"]+1
elif anno == 'f': data["food-related"] = data["food-related"]+1
elif anno == 'r': data["residence-related"] = data["residence-related"]+1
elif anno == 'o': data["other"] = data["other"]+1
f.close()
return data
def main():
coded_file, out_file = check_input()
data = process(coded_file)
if (out_file == None):
print(json.dumps(data, indent=0))
else:
write_data(data, out_file)
if __name__ == '__main__':
main()
| namdar-nejad/COMP-598 | A7/Code/src/analyze.py | analyze.py | py | 1,690 | python | en | code | 0 | github-code | 36 |
37435918590 | #!/usr/bin/env python
# coding: utf-8
# In[5]:
import numpy as np
# In[1]:
import plotly
import matplotlib.pyplot as plt
# In[3]:
#Equations of motion:
#y = vt + .5a(t**2)
#x = vt
#y = x + 0.5a(x**2)/(v**2)
# In[6]:
#Let intial velocity v = 10 m/s, acceleration a = -9.8 m/s^2, and initial height h = 100 m
# In[11]:
def y(x, a, v, h):
return x + (1/2)*a*(x**2)/(v**2) + h
xlist = np.linspace(0, 100, num = 1000, endpoint=True, retstep=False)
ylist = y(xlist, -9.8, 10, 100)
plt.figure(num=0, dpi=120)
plt.plot(xlist, ylist, color='pink')
plt.xlabel('Distance x (m)')
plt.ylabel('Height y (m)')
plt.title("Projectile Motion")
# In[ ]:
# In[ ]:
| abbychriss/toy-projects | Projectile motion.py | Projectile motion.py | py | 679 | python | en | code | 0 | github-code | 36 |
38265076029 | from collections import OrderedDict
import fnmatch
import re
IMAGENET_DEFAULT_MEAN = (0.485, 0.456, 0.406)
IMAGENET_DEFAULT_STD = (0.229, 0.224, 0.225)
DEFAULT_CONFIGS = OrderedDict({
'ckdn': {
'metric_opts': {
'type': 'CKDN',
},
'metric_mode': 'FR',
},
'lpips': {
'metric_opts': {
'type': 'LPIPS',
'net': 'alex',
'version': '0.1',
},
'metric_mode': 'FR',
'lower_better': True,
},
'lpips-vgg': {
'metric_opts': {
'type': 'LPIPS',
'net': 'vgg',
'version': '0.1',
},
'metric_mode': 'FR',
'lower_better': True,
},
'dists': {
'metric_opts': {
'type': 'DISTS',
},
'metric_mode': 'FR',
'lower_better': True,
},
'ssim': {
'metric_opts': {
'type': 'SSIM',
'downsample': False,
'test_y_channel': True,
},
'metric_mode': 'FR',
},
'psnr': {
'metric_opts': {
'type': 'PSNR',
'test_y_channel': False,
},
'metric_mode': 'FR',
},
'fsim': {
'metric_opts': {
'type': 'FSIM',
'chromatic': True,
},
'metric_mode': 'FR',
},
'ms_ssim': {
'metric_opts': {
'type': 'MS_SSIM',
'downsample': False,
'test_y_channel': True,
'is_prod': True,
},
'metric_mode': 'FR',
},
'vif': {
'metric_opts': {
'type': 'VIF',
},
'metric_mode': 'FR',
},
'gmsd': {
'metric_opts': {
'type': 'GMSD',
'test_y_channel': True,
},
'metric_mode': 'FR',
'lower_better': True,
},
'nlpd': {
'metric_opts': {
'type': 'NLPD',
'channels': 1,
'test_y_channel': True,
},
'metric_mode': 'FR',
'lower_better': True,
},
'vsi': {
'metric_opts': {
'type': 'VSI',
},
'metric_mode': 'FR',
},
'cw_ssim': {
'metric_opts': {
'type': 'CW_SSIM',
'channels': 1,
'level': 4,
'ori': 8,
'test_y_channel': True,
},
'metric_mode': 'FR',
},
'mad': {
'metric_opts': {
'type': 'MAD',
'test_y_channel': True,
},
'metric_mode': 'FR',
'lower_better': True,
},
'niqe': {
'metric_opts': {
'type': 'NIQE',
'test_y_channel': True,
},
'metric_mode': 'NR',
'lower_better': True,
},
'ilniqe': {
'metric_opts': {
'type': 'ILNIQE',
},
'metric_mode': 'NR',
'lower_better': True,
},
'brisque': {
'metric_opts': {
'type': 'BRISQUE',
'test_y_channel': True,
},
'metric_mode': 'NR',
'lower_better': True,
},
'nrqm': {
'metric_opts': {
'type': 'NRQM',
},
'metric_mode': 'NR',
},
'pi': {
'metric_opts': {
'type': 'PI',
},
'metric_mode': 'NR',
'lower_better': True,
},
'musiq': {
'metric_opts': {
'type': 'MUSIQ',
'pretrained': 'koniq10k'
},
'metric_mode': 'NR',
},
'musiq-ava': {
'metric_opts': {
'type': 'MUSIQ',
'pretrained': 'ava'
},
'metric_mode': 'NR',
},
'musiq-koniq': {
'metric_opts': {
'type': 'MUSIQ',
'pretrained': 'koniq10k'
},
'metric_mode': 'NR',
},
'musiq-paq2piq': {
'metric_opts': {
'type': 'MUSIQ',
'pretrained': 'paq2piq'
},
'metric_mode': 'NR',
},
'musiq-spaq': {
'metric_opts': {
'type': 'MUSIQ',
'pretrained': 'spaq'
},
'metric_mode': 'NR',
},
'nima': {
'metric_opts': {
'type': 'NIMA',
'pretrained': 'ava'
},
'metric_mode': 'NR',
},
'pieapp': {
'metric_opts': {
'type': 'PieAPP',
},
'metric_mode': 'FR',
'lower_better': True,
},
'paq2piq': {
'metric_opts': {
'type': 'PAQ2PIQ',
},
'metric_mode': 'NR',
},
'dbcnn': {
'metric_opts': {
'type': 'DBCNN',
'pretrained': 'koniq'
},
'metric_mode': 'NR',
},
})
def _natural_key(string_):
return [int(s) if s.isdigit() else s for s in re.split(r'(\d+)', string_.lower())]
def list_models(filter='', exclude_filters=''):
""" Return list of available model names, sorted alphabetically
Args:
filter (str) - Wildcard filter string that works with fnmatch
exclude_filters (str or list[str]) - Wildcard filters to exclude models after including them with filter
Example:
model_list('*ssim*') -- returns all models including 'ssim'
"""
all_models = DEFAULT_CONFIGS.keys()
if filter:
models = []
include_filters = filter if isinstance(filter, (tuple, list)) else [filter]
for f in include_filters:
include_models = fnmatch.filter(all_models, f) # include these models
if len(include_models):
models = set(models).union(include_models)
else:
models = all_models
if exclude_filters:
if not isinstance(exclude_filters, (tuple, list)):
exclude_filters = [exclude_filters]
for xf in exclude_filters:
exclude_models = fnmatch.filter(models, xf) # exclude these models
if len(exclude_models):
models = set(models).difference(exclude_models)
return list(sorted(models, key=_natural_key))
| Sskun04085/IQA_PyTorch | pyiqa/default_model_configs.py | default_model_configs.py | py | 6,004 | python | hi | code | 0 | github-code | 36 |
72311226344 | import os
import subprocess
from src.manager.manager.launcher.launcher_interface import ILauncher, LauncherException
from src.manager.manager.docker_thread.docker_thread import DockerThread
from src.manager.libs.process_utils import wait_for_xserver
from typing import List, Any
import time
class LauncherDronesRos2(ILauncher):
exercise_id: str
type: str
module: str
resource_folders: List[str]
model_folders: List[str]
plugin_folders: List[str]
world_file: str
running = False
threads: List[Any] = []
def run(self, callback):
# Start X server in display
xserver_cmd = f"/usr/bin/Xorg -quiet -noreset +extension GLX +extension RANDR +extension RENDER -logfile ./xdummy.log -config ./xorg.conf :0"
xserver_thread = DockerThread(xserver_cmd)
xserver_thread.start()
wait_for_xserver(":0")
self.threads.append(xserver_thread)
# expand variables in configuration paths
self._set_environment()
world_file = os.path.expandvars(self.world_file)
# Launching MicroXRCE and Aerostack2 nodes
as2_launch_cmd = f"ros2 launch jderobot_drones as2_default_classic_gazebo.launch.py world_file:={world_file}"
as2_launch_thread = DockerThread(as2_launch_cmd)
as2_launch_thread.start()
self.threads.append(as2_launch_thread)
# Launching gzserver and PX4
px4_launch_cmd = f"$AS2_GZ_ASSETS_SCRIPT_PATH/default_run.sh {world_file}"
px4_launch_thread = DockerThread(px4_launch_cmd)
px4_launch_thread.start()
self.threads.append(px4_launch_thread)
self.running = True
def is_running(self):
return True
def terminate(self):
if self.is_running():
for thread in self.threads:
thread.terminate()
thread.join()
self.running = False
def _set_environment(self):
resource_folders = [os.path.expandvars(path) for path in self.resource_folders]
model_folders = [os.path.expandvars(path) for path in self.model_folders]
plugin_folders = [os.path.expandvars(path) for path in self.plugin_folders]
os.environ["GAZEBO_RESOURCE_PATH"] = f"{os.environ.get('GAZEBO_RESOURCE_PATH', '')}:{':'.join(resource_folders)}"
os.environ["GAZEBO_MODEL_PATH"] = f"{os.environ.get('GAZEBO_MODEL_PATH', '')}:{':'.join(model_folders)}"
os.environ["GAZEBO_PLUGIN_PATH"] = f"{os.environ.get('GAZEBO_PLUGIN_PATH', '')}:{':'.join(plugin_folders)}"
| JdeRobot/RoboticsApplicationManager | manager/manager/launcher/launcher_drones_ros2.py | launcher_drones_ros2.py | py | 2,530 | python | en | code | 2 | github-code | 36 |
1147807834 | """empty message
Revision ID: b7c0cfa43719
Revises: 25279a0b5c75
Create Date: 2016-11-02 00:02:18.768539
"""
# revision identifiers, used by Alembic.
revision = 'b7c0cfa43719'
down_revision = '25279a0b5c75'
from alembic import op
import sqlalchemy as sa
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('twilio', sa.Column('state_number', sa.String(length=255), nullable=True))
op.add_column('user', sa.Column('state_number', sa.String(length=255), nullable=True))
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('user', 'state_number')
op.drop_column('twilio', 'state_number')
### end Alembic commands ###
| CodeForProgress/sms-app | src/migrations/versions/b7c0cfa43719_.py | b7c0cfa43719_.py | py | 760 | python | en | code | 1 | github-code | 36 |
1322353770 | from resizer import *
import argparse
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument(
"--src",
type=str,
required=True,
help="The directory of the folder with the image to be resized.",
)
parser.add_argument(
"--width", type=int, required=True, help="Width of a resized image"
)
parser.add_argument(
"--height", type=int, required=True, help="Height of a resized image"
)
parser.add_argument(
"--save_dir",
type=str,
required=False,
default=None,
help="A directory to store images.",
)
parser.add_argument(
"--inplace",
type=bool,
required=False,
default=False,
help="Whether to save the images inplace or not.",
)
args = parser.parse_args()
images, image_names, folder_name, file_list = open_images(args.src)
images = resize_images(images, args.width, args.height)
if args.inplace:
inplace_save_images(images, file_list)
elif args.save_dir is not None:
save_images(images, image_names, folder_name, args.save_dir)
else:
save_images(images, image_names, folder_name)
print("Done")
| hjk1996/Image-Resizer | main.py | main.py | py | 1,243 | python | en | code | 0 | github-code | 36 |
14159948367 |
"""dividing a given corpus 'test' or 'dev' set into arbitrary sentence lengths"""
### input: .iob file ###
### output: .iob files ###
with open("../test.iob", "r", encoding="utf-8") as f:
test_split = f.readlines()
very_low = list()
very_very_low = list()
low = list()
med = list()
high = list()
current = ""
counter = 0
counter_len = ""
for i in range(len(test_split)):
if test_split[i] != "\n":
current += test_split[i]
counter += 1
else:
current += "\n"
if counter <= 5:
with open("test/test_very_very_low.iob", "a", encoding="utf-8") as f:
f.write(current)
current = current.split("\n")
current = current[:-2]
for j in range(len(current)):
counter_len += current[j].split("\t")[0] + " "
very_very_low.append(counter_len)
current = ""
counter_len = ""
counter = 0
continue
if 5 < counter and counter <= 10:
with open("test/test_very_low.iob", "a", encoding="utf-8") as f:
f.write(current)
current = current.split("\n")
current = current[:-2]
for j in range(len(current)):
counter_len += current[j].split("\t")[0] + " "
very_low.append(counter_len)
current = ""
counter_len = ""
counter = 0
continue
if 10 < counter and counter <= 25:
with open("test/test_low.iob", "a", encoding="utf-8") as f:
f.write(current)
current = current.split("\n")
current = current[:-2]
for j in range(len(current)):
counter_len += current[j].split("\t")[0] + " "
low.append(counter_len)
current = ""
counter_len = ""
counter = 0
continue
if 25 < counter and counter <= 50:
with open("test/test_med.iob", "a", encoding="utf-8") as f:
f.write(current)
current = current.split("\n")
current = current[:-2]
for j in range(len(current)):
counter_len += current[j].split("\t")[0] + " "
med.append(counter_len)
current = ""
counter_len = ""
counter = 0
continue
if 50 < counter:
with open("test/test_high.iob", "a", encoding="utf-8") as f:
f.write(current)
current = current.split("\n")
current = current[:-2]
for j in range(len(current)):
counter_len += current[j].split("\t")[0] + " "
high.append(counter_len)
current = ""
counter_len = ""
counter = 0
continue
print("very_very_low =>5", len(very_low))
print("5 < very_low <= 10", len(very_low))
print("10 < low <= 25 ", len(low))
print("25 < med <= 50 ", len(med))
print("high: 50< ", len(high))
| huspacy/huspacy-resources | scripts/ner_data_analysis/split_set_into_sentence_length.py | split_set_into_sentence_length.py | py | 3,021 | python | en | code | 0 | github-code | 36 |
30731206475 | #######################################################################
# Necessaria a instalacao do biopython (pip install biopython)
from Bio import pairwise2
from Bio.pairwise2 import format_alignment
from Bio.Seq import Seq
def alinhamento(seqt, numseq):
nome1 = "guilherme"
nome2 = "costa"
nome3 = "oliveira"
dicionario = {"q":1, "w":2, "e":3, "r":4, "t":5, "y":6, "u":7, "i":8, "o":9, "p":10, "รก":11, "รฃ":12,
"a":11, "s":9, "d":7, "f":5, "g":3, "h":1, "j":11, "k":9, "l":7, "รง":5, "รฉ":3, "รญ":1,
"z":2, "x":4, "c":6, "v":8, "b":10, "n":12, "m":2, "รณ":4, "รต":6, "รด":8, "แบซ":10, "รช":12}
#Funcao para transformar o primeiro nome em numeros
def converte(x):
for l in dicionario:
x = x.replace(l, str(dicionario[l]))
return x
nome1 = converte(nome1) #O primeiro nome
nome1 = int(nome1)
resto1 = nome1 % 3
if resto1 == 0:
alpha = 1
beta = 0
delta = -1
elif resto1 == 1:
alpha = 2
beta = 0
delta = -1
elif resto1 == 2:
alpha = 1
beta = 0
delta = -2
nome2 = converte(nome2)
nome2 = int(nome2) #O รบltimo nome
pref_gap = nome2 % 3
nome3 = converte(nome3)
nome3 = int(nome3) #O nome do meio
gap_js = nome3 % 2
score = 0
scoreaux = 0
count1 = 0
count2 = 0
n = numseq # numero de sequencias
k = 0
scoretotal = 0
while n > 0:
if n == numseq:# eh reduzido 0.5 do score a cada gap consecutivo, ou seja, as sequencias com gaps separados podem ter maior score, assim cumprindo a condicao de que os gaps sejam separados
alignments = pairwise2.align.globalms(seqt[k], seqt[k + 1], alpha, beta, delta, -0.5)
for alignment in alignments:
if count1 == 0:
seqt[k] = alignment.seqA
seqt[k + 1] = alignment.seqB
score = alignment.score
count1 = count1 + 1
else:
if alignment.score >= score:
seqt[k] = alignment.seqA
seqt[k + 1] = alignment.seqB
score = alignment.score
n = n - 2
k = k + 1
scoretotal = scoretotal + alignment.score
else:
alignments = pairwise2.align.globalms(seqt[k], seqt[k + 1], alpha, beta, delta, -0.5)
for alignment in alignments:
if count2 == 0:
seqt[k] = alignment.seqA
seqt[k + 1] = alignment.seqB
scoreaux = alignment.score
count2 = count2 + 1
else:
if alignment.score >= scoreaux:
seqt[k] = alignment.seqA
seqt[k + 1] = alignment.seqB
scoreaux = alignment.score
n = n - 2
k = k + 1
scoretotal = scoretotal + alignment.score
scoretotal = "Score Total: " + str(scoretotal)
seqt.append(scoretotal)
return seqt | GuilhermeC0sta/Alinhamento-Multiplo-de-DNA | CodWithGUI/alinhamento_func.py | alinhamento_func.py | py | 3,205 | python | en | code | 0 | github-code | 36 |
16968956587 | # -*- coding: utf-8 -*-
from django.template.loader import render_to_string
from django.contrib.admin.utils import quote
def get_mptt_admin_node_template(instance):
'''
Get MPTT admin node template name by model instance
:param instance: instance of mptt model
:return: template name
'''
return 'edw/admin/mptt/_%s_node.html' % instance.__class__.__name__.lower()
def mptt_admin_node_info_update_with_template(admin_instance, template, instance, node_info, context={}):
'''
Update MPTT admin node with rendered by html template node label.
:param admin_instance: mptt admin instance
:param template: template name for renfer
:param instance: instance of mptt model
:param node_info: jstree node info
:param context: additional context for render
:return: none
'''
pk_attname = admin_instance.model._meta.pk.attname
pk = quote(getattr(instance, pk_attname))
context.update({
'instance': instance,
'node_info': node_info,
'app_label': instance._meta.app_label.lower()
})
label = render_to_string(template, context)
node_info.update(
url=admin_instance.get_admin_url('change', (quote(pk),)),
move_url=admin_instance.get_admin_url('move', (quote(pk),)),
label=label,
)
| infolabs/django-edw | backend/edw/admin/mptt/utils.py | utils.py | py | 1,309 | python | en | code | 6 | github-code | 36 |
2956310970 | import socket
import struct
import subprocess
import logging
from datetime import datetime
# Configuraรงรฃo do cliente
MULTICAST_IP = '224.0.0.1'
MULTICAST_PORT = 5004
CHUNK_SIZE = 1472 # Tamanho do pacote incluindo 4 bytes para o contador
CLIENT_INTERFACE_IP = '0.0.0.0' # Use o IP de interface apropriado se necessรกrio
PACKETS_RECEIVED = 0
# Configuraรงรฃo do logging
log_filename = datetime.now().strftime("cliente_%H%M%S.txt")
logging.basicConfig(filename=log_filename,
filemode='w',
level=logging.DEBUG,
format='CLIENTE - %(asctime)s - %(levelname)s - %(message)s')
# Criar um socket UDP
client_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
logging.info("Socket criado.")
# Permitir mรบltiplos clientes na mesma mรกquina (para fins de teste)
client_sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
logging.info("Socket configurado para permitir mรบltiplos clientes.")
# Vincular ao endereรงo do servidor
client_sock.bind((CLIENT_INTERFACE_IP, MULTICAST_PORT))
logging.info(f"Socket vinculado a {CLIENT_INTERFACE_IP}:{MULTICAST_PORT}.")
# Dizer ao sistema operacional para adicionar o socket ao grupo multicast
group = socket.inet_aton(MULTICAST_IP) + socket.inet_aton(CLIENT_INTERFACE_IP)
client_sock.setsockopt(socket.IPPROTO_IP, socket.IP_ADD_MEMBERSHIP, group)
logging.info(f"Socket adicionado ao grupo multicast {MULTICAST_IP}.")
# Inicializar contadores
expected_packet_counter = None
lost_packets = 0
out_of_order_packets = 0
# Preparar comando subprocess para VLC
vlc_command = "vlc -" # O traรงo '-' diz ao VLC para aceitar entrada do stdin
# Iniciar VLC como um subprocesso
vlc_process = subprocess.Popen(["vlc", "fd://0"], stdin=subprocess.PIPE, stdout=subprocess.DEVNULL, stderr=subprocess.DEVNULL)
logging.info("Processo VLC iniciado.")
logging.info("Iniciando loop de recebimento de pacotes.")
try:
while True:
# Receber pacote
data, address = client_sock.recvfrom(CHUNK_SIZE)
# Verificar pacote "fim-de-stream"
if data[4:] == b'END_OF_STREAM':
PACKETS_RECEIVED += 1
logging.info("Fim do stream detectado, redefinindo contador.")
expected_packet_counter = None
continue
# Extrair contador de pacotes
packet_counter, = struct.unpack('>I', data[:4])
video_data = data[4:]
# Se for o primeiro pacote ou se for o pacote esperado
if expected_packet_counter is None or packet_counter == expected_packet_counter:
PACKETS_RECEIVED += 1
# Atualiza o contador esperado para o prรณximo pacote
if expected_packet_counter is None:
logging.info(f"Primeiro pacote recebido com contador {packet_counter}")
expected_packet_counter = (packet_counter + 1) % (2 ** 32)
else:
if packet_counter < expected_packet_counter:
PACKETS_RECEIVED += 1
# Pacote fora de ordem
out_of_order_packets += 1
logging.warning(f"Pacote fora de ordem. Esperado: {expected_packet_counter}, Recebido: {packet_counter}")
else:
# Pacotes perdidos
lost_packets += packet_counter - expected_packet_counter
expected_packet_counter = (packet_counter + 1) % (2 ** 32)
if lost_packets > 0: # Registra apenas se houver pacotes perdidos
logging.warning(f"Pacotes perdidos. Esperado: {expected_packet_counter-1}, Recebido: {packet_counter}")
# Definir o prรณximo contador de pacote esperado
expected_packet_counter = (packet_counter + 1) % (2 ** 32)
# Escrever os dados do vรญdeo no stdin do VLC
vlc_process.stdin.write(video_data)
except KeyboardInterrupt:
logging.info("Cliente encerrando por KeyboardInterrupt.")
logging.info(f"Pacotes recebidos: {PACKETS_RECEIVED}")
finally:
# Fechar processo VLC
if vlc_process:
vlc_process.terminate()
logging.info("Processo VLC terminado.")
# Fechar o socket
client_sock.close()
logging.info("Socket fechado.")
# Logar estatรญsticas finais
logging.info(f"Total de pacotes perdidos: {lost_packets}. Total de pacotes fora de ordem: {out_of_order_packets}.")
| gpdolzan/R2LAST | cliente.py | cliente.py | py | 4,325 | python | pt | code | 0 | github-code | 36 |
33997460968 | from fbchat import *
from fbchat.models import *
from Credentials import *
import json
import requests
import re
import os
import time
from threading import Thread
import socket
def finalVerification(url):
if re.search("homework-help", url):
return True
return False
def question_id(url):
try:
ID = ""
i = len(url) - 1
while i > 0:
if url[i] != "q":
ID = url[i] + ID
else:
return "q" + ID
i -= 1
except Exception:
return "QUESTION ID ERROR"
def verifyURL(url):
try:
response = requests.head(url)
print(response.status_code)
if response.status_code == 404:
print("Bad website.")
return False
return True
except Exception:
print("Bad website")
return False
def isAnswered(url):
id = question_id(url)
for root, dirs, files in os.walk("./screenshots"):
for name in files:
if name == id + ".png":
return True
return False
class CustomClient(Client):
def onMessage(self, mid, author_id, message, message_object, thread_id, thread_type, ts, metadata, msg):
global daily_limit, start_time
def respond(text, msgType=None):
if thread_type == thread_type.GROUP:
if msgType is None:
self.send(Message(text=text), thread_id=thread_id, thread_type=thread_type)
elif msgType == "IMAGE":
self.sendLocalImage(text, thread_id=thread_id, thread_type=thread_type)
elif thread_type == thread_type.USER:
if msgType is None:
self.send(Message(text=text), thread_id=author_id, thread_type=thread_type)
elif msgType == "IMAGE":
self.sendLocalImage(text, thread_id=author_id, thread_type=thread_type)
def collectPNG():
global que_position, socket, recent_call
que_position += 1
print("Started a thread to collect {}".format(question_id(message)))
respond("You have {} new questions left. Approximate retrieval time: {:.0F} seconds".format(daily_limit[author_id], que_position*10 + 1*max(0, 25-(time.time()-recent_call)) + (que_position-1)*25))
while que_position-1 != 0 or time.time() - recent_call < 25:
time.sleep(0.1)
socket.sendto(message.encode(), ("127.0.0.1", 5000))
recent_call = time.time()
print("Request sent to AnswerMe")
started = time.time()
while time.time() - started < 15:
if os.path.exists("./screenshots/" + question_id(message) + ".png"):
respond("./screenshots/" + question_id(message) + ".png", "IMAGE")
que_position -= 1
return
respond("Error: Timed out.")
if time.time() - start_time > 86400:
start_time = time.time()
daily_limit = {}
print(message_object)
if author_id != self.uid:
if re.search("CHEGG", message):
message = message.replace("CHEGG", "").strip()
if verifyURL(message) and finalVerification(message):
respond("Your question {} is being processed.".format(question_id(message)))
if isAnswered(message):
respond("The question has been identified in Steve's data base.")
respond("./screenshots/" + question_id(message) + ".png", "IMAGE")
elif author_id in daily_limit and daily_limit[author_id] > 0 or author_id not in daily_limit:
if author_id not in daily_limit:
daily_limit[author_id] = 4
daily_limit[author_id] -= 1
Thread(target=collectPNG).start()
else:
respond(
"You have asked too many questions today. Please wait {:.2f} minute(s) to ask more questions!".format(
(86400 - (time.time() - start_time)) / 60))
else:
respond("Invalid URL. Please type in a correct link.")
class MessengerBot:
def __init__(self):
self.client = None
def login(self, username, password):
if self.client is None:
cookies = None
try:
with open("session.json") as file:
cookies = json.load(file)
print("Loading session cookies...")
except FileNotFoundError:
print("First time logging in...")
self.client = CustomClient(username, password, session_cookies=cookies)
print("Is logged in? {}".format(self.client.isLoggedIn()))
if cookies is None:
with open("session.json", "w") as file:
json.dump(self.client.getSession(), file)
def listen(self):
print("Listening")
self.client.listen()
if __name__ == "__main__":
while True:
try:
socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
que_position = 0
start_time = time.time()
recent_call = 0
daily_limit = {}
bot = MessengerBot()
bot.login(username, passwd)
bot.listen()
except Exception:
continue
| namdao2000/MessengerBot | MessengerBot.py | MessengerBot.py | py | 5,534 | python | en | code | 0 | github-code | 36 |
30630423030 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 16 19:35:10 2020
@author: isaacparker
"""
#Load libraries
import matplotlib.pyplot as plt
import numpy as np
import seaborn as sns
import pandas as pd
from scipy.stats import lognorm, gaussian_kde
# Specify font used in plots
font = 'Adobe Myungjo Std'
math_font = 'cm'
import matplotlib as mpl
mpl.style.use('seaborn-white')
mpl.rcParams['font.family'] = font
mpl.rcParams['font.size'] = 18
mpl.rcParams['mathtext.fontset'] = 'cm'
#LOAD DATA
JV_exp = np.loadtxt('perov_JV_exp.txt',delimiter=',')
JV_exp = JV_exp
v_sweep = np.linspace (0,1.2,100)
power_exp= JV_exp[:,100*2:100*3]*v_sweep
eff_exp = np.max(power_exp, axis=1)/0.98
exp_condition = pd.read_excel('prcess_label.xlsx',index_col=0)
exp_condition = exp_condition.values
#Stack data and order
X_data = np.concatenate([eff_exp.reshape(-1,1), exp_condition],axis= 1)
p_index = []
X_data_re=[]
for i in [70,90,110,130]:
for j in [2,4,8]:
idx = np.intersect1d(np.where(X_data[:,1]==i) ,np.where(X_data[:,2]==j))
X_data_re.append(X_data[idx,:])
X_data_re = np.vstack(X_data_re)
#Remove data to have same # of samples:
X_data_re = np.delete(X_data_re, [0,15,21,13,14,10,12,17,12,9,7,4], 0)
X_data_re = np.insert(X_data_re, 36, [3.88, 90, 2], axis=0)
X_data_re = np.delete(X_data_re, [106,107,108,96,110,112], 0)
X_data_re = np.insert(X_data_re, 143, [5.77, 130, 8], axis=0)
# Compute efficiency and normalize
df_X1 = pd.DataFrame(X_data_re, columns=['Efficiency','Temperature','Ratio'])
df_X = df_X1.copy()
max_eff = df_X['Efficiency'].max()
# Normalize
df_X['Efficiency'] = df_X['Efficiency'] / max_eff
# Get mean and variance for empirical distribution
X_mean = df_X['Efficiency'].mean()
eff_data = df_X['Efficiency']
log_norm_var = eff_data.std()
# Lognormal distribution histogram
np.random.seed(6)
logn = lognorm(s=1*log_norm_var, scale = 0.7*(1-X_mean))
sample = logn.rvs (size=500)
sample[sample>1]= 1
plt.figure()
plt.hist((1-sample)*20,50,
density=True,
edgecolor='white',
linewidth=1.2,
# color='mediumseagreen',
# alpha=0.5)
# color=(60/255, 110/255, 135/255, 0.8))
color='k',
alpha=0.5)
plt.xlabel(r'Solar Cell Efficiency $\eta$ [%]', size=18, fontname=font)
plt.ylabel(r'Probability $p(\eta)$', size=18, fontname=font)
plt.xlim(left=0, right=20)
plt.yticks(np.arange(0, 0.25, step=0.1))
density = gaussian_kde((1-sample)*20)
xs = np.linspace(0,20,50)
density.covariance_factor = lambda : 1*log_norm_var
density._compute_covariance()
plt.plot(xs,density(xs),
# color=(60/255, 110/255, 135/255))
# color='mediumseagreen')
color='k')
plt.tight_layout()
plt.savefig('Fig1.png',dpi=300)
plt.show()
| PV-Lab/Data-Driven-PV | Figure_1.py | Figure_1.py | py | 2,792 | python | en | code | 4 | github-code | 36 |
36663492998 | import news_parser as Util
import datetime
import time
import DBHandler
from tensorflow.keras.models import load_model
import predict
CompanyList=[]
Headless = True # False : ์ฐฝ๋์, True : ์ฐฝ์์
MakeCompanyList = False # ํ์ฌ ๋ฆฌ์คํธ ๊ฐฑ์
host = '๋ฐ์ดํฐ๋ฒ ์ด์ค ์ฃผ์'
ID= '๊ณ์ ๋ช
'
PW='๋น๋ฐ๋ฒํธ'
DB_name='DB์ด๋ฆ'
def GetNewsInfo(driver):
headlines, news_info, Text,NewsUrl = Util.GetNews(driver) # ํค๋๋ผ์ธ, ์ ๋ฌธ์ฌ ์ ๋ณด ๋ฐ ๊ฒ์ ์๊ฐ, ๋ณธ๋ฌธ, ๊ธฐ์ฌ๋งํฌ ํ์ฑ.
CompanyFromNews = Util.GetCompanyFromNews(headlines, CompanyList)
#Util.save_headlines(headlines, news_info, Text,CompanyFromNews,NewsUrl)
#Util.PrintNews(headlines, news_info, Text, CompanyFromNews)
return headlines, news_info, Text, NewsUrl,CompanyFromNews
def GetPriceInfo(driver):
NameList, PriceInfo, Fluctuation = Util.get_prices(driver) #KTOP 30, KOSPI, KOSPI200, KOSDAQ, KOSDAQ150, KRX300 ์
Util.PrintPrice(NameList, PriceInfo, Fluctuation)
return NameList, PriceInfo, Fluctuation
def MakeCompanyFile(MakeCompanyList):
#Company CSVํ์ผ ์์ฑ
Util.MakeCompanyCSV()
if __name__ == '__main__':
print("Setting Interface...")
CompanyList = Util.GetCompanyList() # ์ฝ์คํผ ์์ฅ ๊ธฐ์
์
๋ก๋
try: #๋ค์ด๋ฒ ์ฆ๊ถ
NewsDriver = Util.News_get_driver(Headless)
except Exception as ex:
print("News Driver Err")
print('์๋ฌ๊ฐ ๋ฐ์ ํ์ต๋๋ค', ex)
try : #ํ๊ตญ๊ฑฐ๋์
PriceDriver = Util.NowPriceDriver(Headless)
except Exception as ex:
print("Price Driver Err")
print('์๋ฌ๊ฐ ๋ฐ์ ํ์ต๋๋ค', ex)
try: # ๋ค์ ์ฆ๊ถ
KospiImageDriver = Util.Get_KospiGraphDriver(Headless)
except Exception as ex:
print("KospiImage Driver Err")
print('์๋ฌ๊ฐ ๋ฐ์ ํ์ต๋๋ค', ex)
MakeCompanyFile(MakeCompanyList) #๊ธฐ์
๋ฆฌ์คํธ ๊ฐฑ์
DBController = DBHandler.MySqlController(host, ID, PW, DB_name)
label=[]
while(True):
now = datetime.datetime.now()
nowDatetime = now.strftime('%Y_%m_%d_%H์%M๋ถ%S์ด'.encode('unicode-escape').decode()).encode().decode('unicode-escape')
nowDatehour = now.strftime('%Y_%m_%d_%H์%M๋ถ'.encode('unicode-escape').decode()).encode().decode('unicode-escape')
try:
NameList, PriceInfo, Fluctuation = Util.get_prices(PriceDriver)
Util.PrintPrice(NameList,PriceInfo,Fluctuation)
DBController.update_totalprice(PriceInfo,Fluctuation)
except Exception as ex:
print("Price Info Err")
print('์๋ฌ๊ฐ ๋ฐ์ ํ์ต๋๋ค', ex)
PriceDriver.quit()
PriceDriver = Util.NowPriceDriver(Headless)
NameList, PriceInfo, Fluctuation = Util.get_prices(PriceDriver)
# print("========================================")
try:
headlines, news_info, Text,NewsUrl,CompanyFromNews = GetNewsInfo(NewsDriver) #๋ด์ค์์ ๊ธฐ์
์ถ์ถ
print("News Updated...")
except Exception as ex:
print("News Update Err")
NewsDriver.quit()
NewsDriver = Util.News_get_driver(Headless)
print('์๋ฌ๊ฐ ๋ฐ์ ํ์ต๋๋ค', ex)
try:
Util.Write_News(headlines, CompanyFromNews, nowDatehour) # ๊ธฐ์
๋ณ ๋ด์ค ์๋ฃ Writing
except Exception as ex:
print("News Write Err")
CompanyList = Util.GetCompanyList() # ์ฝ์คํผ ์์ฅ ๊ธฐ์
์
๋ก๋
try:
Util.GetKospiGraph(KospiImageDriver, PriceInfo, Fluctuation) # Kospi, Kosdaq ๊ทธ๋ํ ์ด๋ฏธ์ง ์ ์ฅ
print("Get Kospi Graph")
except Exception as ex:
KospiImageDriver.quit()
KospiImageDriver = Util.Get_KospiGraphDriver(Headless)
print("Graph Err")
print('์๋ฌ๊ฐ ๋ฐ์ ํ์ต๋๋ค', ex)
try:
label = predict.classification(headlines, model)
print("Get labels")
DBController.UpdateNews(CompanyFromNews, headlines, Text, NewsUrl, news_info, label) # ์ต์ 20๊ฐ ๊ธฐ์ฌ DB์ ์ฅ
DBController.InsertNewsHistory(CompanyFromNews, headlines, Text, NewsUrl, news_info, nowDatehour)
print("DB Commit : News Updated, News History Inserted")
except Exception as ex:
print("Label Err")
MakeCompanyFile(MakeCompanyList) # ๊ธฐ์
๋ฆฌ์คํธ ๊ฐฑ์
DBController = DBHandler.MySqlController(host, ID, PW, DB_name)
print('์๋ฌ๊ฐ ๋ฐ์ ํ์ต๋๋ค', ex)
time.sleep(30)
NewsDriver.refresh()
PriceDriver.refresh()
KospiImageDriver.refresh()
print("DONE") | woqls22/StockNews | BackEnd/PythonScripts/main.py | main.py | py | 4,929 | python | en | code | 3 | github-code | 36 |
22530303098 | import numpy as np
import pytest
from gym.spaces import Box, Discrete
from gym.wrappers import AtariPreprocessing, StepAPICompatibility
from tests.testing_env import GenericTestEnv, old_step_fn
class AleTesting:
"""A testing implementation for the ALE object in atari games."""
grayscale_obs_space = Box(low=0, high=255, shape=(210, 160), dtype=np.uint8, seed=1)
rgb_obs_space = Box(low=0, high=255, shape=(210, 160, 3), dtype=np.uint8, seed=1)
def lives(self) -> int:
"""Returns the number of lives in the atari game."""
return 1
def getScreenGrayscale(self, buffer: np.ndarray):
"""Updates the buffer with a random grayscale observation."""
buffer[...] = self.grayscale_obs_space.sample()
def getScreenRGB(self, buffer: np.ndarray):
"""Updates the buffer with a random rgb observation."""
buffer[...] = self.rgb_obs_space.sample()
class AtariTestingEnv(GenericTestEnv):
"""A testing environment to replicate the atari (ale-py) environments."""
def __init__(self):
super().__init__(
observation_space=Box(
low=0, high=255, shape=(210, 160, 3), dtype=np.uint8, seed=1
),
action_space=Discrete(3, seed=1),
step_fn=old_step_fn,
)
self.ale = AleTesting()
def get_action_meanings(self):
"""Returns the meanings of each of the actions available to the agent. First index must be 'NOOP'."""
return ["NOOP", "UP", "DOWN"]
@pytest.mark.parametrize(
"env, obs_shape",
[
(AtariTestingEnv(), (210, 160, 3)),
(
AtariPreprocessing(
StepAPICompatibility(AtariTestingEnv(), output_truncation_bool=True),
screen_size=84,
grayscale_obs=True,
frame_skip=1,
noop_max=0,
),
(84, 84),
),
(
AtariPreprocessing(
StepAPICompatibility(AtariTestingEnv(), output_truncation_bool=True),
screen_size=84,
grayscale_obs=False,
frame_skip=1,
noop_max=0,
),
(84, 84, 3),
),
(
AtariPreprocessing(
StepAPICompatibility(AtariTestingEnv(), output_truncation_bool=True),
screen_size=84,
grayscale_obs=True,
frame_skip=1,
noop_max=0,
grayscale_newaxis=True,
),
(84, 84, 1),
),
],
)
def test_atari_preprocessing_grayscale(env, obs_shape):
assert env.observation_space.shape == obs_shape
# It is not possible to test the outputs as we are not using actual observations.
# todo: update when ale-py is compatible with the ci
env = StepAPICompatibility(
env, output_truncation_bool=True
) # using compatibility wrapper since ale-py uses old step API
obs, _ = env.reset(seed=0)
assert obs in env.observation_space
obs, _, _, _, _ = env.step(env.action_space.sample())
assert obs in env.observation_space
env.close()
@pytest.mark.parametrize("grayscale", [True, False])
@pytest.mark.parametrize("scaled", [True, False])
def test_atari_preprocessing_scale(grayscale, scaled, max_test_steps=10):
# arbitrarily chosen number for stepping into env. and ensuring all observations are in the required range
env = AtariPreprocessing(
StepAPICompatibility(AtariTestingEnv(), output_truncation_bool=True),
screen_size=84,
grayscale_obs=grayscale,
scale_obs=scaled,
frame_skip=1,
noop_max=0,
)
obs, _ = env.reset()
max_obs = 1 if scaled else 255
assert np.all(0 <= obs) and np.all(obs <= max_obs)
terminated, truncated, step_i = False, False, 0
while not (terminated or truncated) and step_i <= max_test_steps:
obs, _, terminated, truncated, _ = env.step(env.action_space.sample())
assert np.all(0 <= obs) and np.all(obs <= max_obs)
step_i += 1
env.close()
| openai/gym | tests/wrappers/test_atari_preprocessing.py | test_atari_preprocessing.py | py | 4,102 | python | en | code | 33,110 | github-code | 36 |
505642870 | def osm_vs_imd(osmxlsx, osmxml, imd, outfishnet, outshp):
#Create a fishnet use raster file
while imd:
osm_ref_tags = {
"TABLE" : osmxlsx,
"SHEET" : 'osm_features',
"LULC_COL" : 'L4',
"KEY_COL" : "key",
"VALUE_COL" : "value",
"GEOM_COL" : "geom"
}
osmdata = {
"FILE" : osmxml,
"DB" : 'dgt_cmb',
"TABLE" : "multipolygons",
"DBSET" : "local"
}
ref_edificado = [
'1151', '1221',
'1222', '1223', '1231', '1241',
'1251', '1252', '1254', '1255',
'1257', '1253', '1612',
'1631', '1632', '1633', '1651',
'16', '143', '1431', '1432'
]
lulccls = 'lulc_cls'
epsg = 3763
import os
from glass.it.osm import osm_to_psql
from glass.prop.sql import cols_name
from glass.rd import tbl_to_obj
from glass.sql.db import create_pgdb
from glass.pys.oss import mkdir
# Prepare workspace
ws = mkdir(os.path.join(
os.path.dirname(outshp), 'grswork'
), overwrite=True)
#when have data in the workspace, for don't run all code again
#ws = os.path.join(os.path.dirname(outshp), 'grswork')
# Import data into a database
create_pgdb(osmdata["DB"], overwrite=True, dbset=osmdata["DBSET"])
osm_to_psql(osmdata["FILE"], osmdata["DB"])
osm_tags = tbl_to_obj(osm_ref_tags["TABLE"], sheet=osm_ref_tags["SHEET"])
osm_tags = osm_tags[osm_tags[osm_ref_tags["GEOM_COL"]] == 'Polygon']
osm_tags['sevtags'] = osm_tags[osm_ref_tags["LULC_COL"]].str.contains(';')
osm_tags = osm_tags[osm_tags.sevtags != True]
# Create key/value column
osm_tags.loc[:, osm_ref_tags["VALUE_COL"]] = osmdata["TABLE"] + "." + \
osm_tags[osm_ref_tags["KEY_COL"]] + \
"='" + osm_tags[osm_ref_tags["VALUE_COL"]] + "'"
# Add new column to multipolygons table
# Update it adding an LULC class
cols = cols_name(osmdata["DB"], osmdata['TABLE'], dbset=osmdata["DBSET"])
qs = [] if "lulc_cls" in cols else [(
f"ALTER TABLE {osmdata['TABLE']} ADD COLUMN "
"lulc_cls integer"
)]
for cls in osm_tags[osm_ref_tags["LULC_COL"]].unique():
# Se uma feature estiver associada a duas TAGS que dizem respeito a classes
# diferentes, a classe da feature serรก a รบltima classe considerada
# Abordagem multitag vai resolver este problema.
__osmtags = osm_tags[osm_tags[osm_ref_tags["LULC_COL"]] == cls]
qs.append((
f"UPDATE {osmdata['TABLE']} SET lulc_cls={str(cls)} "
f"WHERE {str(__osmtags[osm_ref_tags['VALUE_COL']].str.cat(sep=' OR '))}"
))
cols = cols_name(osmdata["DB"], osmdata['TABLE'], dbset=osmdata["DBSET"])
print(cols) | jasp382/glass | exp/devcode/osm_vs_imd_tst.py | osm_vs_imd_tst.py | py | 3,052 | python | en | code | 2 | github-code | 36 |
29394077350 | import cv2
import numpy as np
import glob
from pyqtgraph.Qt import QtCore, QtGui
import pyqtgraph.opengl as gl
import numpy as np
from time import sleep
dis_test_label = np.load("D:/PythonFile/NestProject/Nest_Model/pos_process/test_data/dis_test_data_complex_exp_shuffle_3.npy")
dis_test_predict = np.load('D:/PythonFile/NestProject/Nest_Model/pos_process/prediction_data/predict_data_complex_exp_shuffle_3.npy')
dis_train_label = np.load("D:/NestData/3tx-32chirp-jaco-55times_all/data_usage/radar_pos_label_deleted.npy")
dis_train_predict = np.load('D:/PythonFile/NestProject/Nest_Model/pos_process/prediction_data/predict_data_complex_exp_shuffle_4(train).npy')
# dis_label = np.concatenate((label1, label2))
print(dis_train_label.shape, dis_test_predict.shape)
app = QtGui.QApplication([])
w = gl.GLViewWidget()
w.show()
g = gl.GLGridItem()
w.addItem(g)
colors_1 = [1.0,0,0,0.5]
colors_2 = [0,1.0,0,0.5]
# dis_test_predict = dis_test_predict[:,0,:]
dis_test_label = dis_test_label/100
dis_test_predict = dis_test_predict/100
dis_train_label = dis_train_label/100
dis_train_predict = dis_train_predict/100
dis_train_label = dis_train_label[:500]
# dis_test_label = dis_test_label[:,1:3]
# dis_predict = dis_predict[:,1:3]
# print(dis_test_label.shape)
# sp0 = gl.GLScatterPlotItem(pos=dis_test_label[:], color=colors_1)
# w.addItem(sp0)
# sp1 = gl.GLScatterPlotItem(pos=dis_test_predict[:])
# w.addItem(sp1)
sp2 = gl.GLScatterPlotItem(pos=dis_train_label[:], color=colors_2)
w.addItem(sp2)
# sp3 = gl.GLScatterPlotItem(pos=dis_train_predict[:])
# w.addItem(sp3)
# i = 0
# def update():
# global i
# sp2 = gl.GLScatterPlotItem(pos=dis_train_label[i], color=colors_1)
# w.addItem(sp2)
# # sp3 = gl.GLScatterPlotItem(pos=dis_test_predict[i])
# # w.addItem(sp3)
# print(i)
# i += 1
# time = QtCore.QTimer()
# time.timeout.connect(update)
# time.start(5)
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| yangyongjx/LoRaNet | pos_process/reprojection_img.py | reprojection_img.py | py | 2,149 | python | en | code | 0 | github-code | 36 |
3273808823 | # This is a sample Python script.
#########################################: Please Don't Change :#######################################
import logging
import os
import sys
from datetime import datetime
sys.path.append(
"/home/sonu/workspace/pro/component/"
)
sys.path.append(
"/home/sonu/workspace/pro/utils/"
)
sys.path.append(
"/home/sonu/workspace/pro/db_conn/"
)
from common import get_logger
from db_conn import DatabaseConnection
def log_setup():
"""This funtion is require for log_confi.yaml file."""
path = os.path.dirname(os.path.realpath(__file__))
log_dir = os.path.join(path, "log")
os.makedirs(log_dir, exist_ok=True)
log_path = os.path.join(path, log_dir, "running_log.log")
filelog = logging.handlers.TimedRotatingFileHandler(
log_path, when="midnight", backupCount=5
)
return filelog
#########################################: Please Code write Below :#######################################
STAGE_01 = "Connection Establish"
STAGE_02 = ""
STAGE_03 = ""
STAGE_04 = ""
def main():
logger = get_logger(logger_name="sample")
logger.info("main logging initialized")
try:
start_time = datetime.now()
logger.info(f"<<<<<<< The start of {STAGE_01} has begun. >>>>>>>")
database_connection = DatabaseConnection()
snowflake_connection = database_connection.get_snowflake_connection()
logger.info(f"<<<<<<< {STAGE_01} has been completed. >>>>>>>")
cux = snowflake_connection.cursor()
cux.execute("select current_timestamp();")
result = cux.fetchone()
logger.info(f"test connection succeed at {str(result)}")
end_time = datetime.now()
logger.info(
"The project has been successfully executed, with a runtime of {0}.".format(
end_time - start_time
)
)
except Exception as e:
logger.exception(f"getting error message {str(e)}")
if __name__ == "__main__":
main()
| rajeshraj124/advanced_logger_with_single_place_credentials | pro_sample/main.py | main.py | py | 1,997 | python | en | code | 0 | github-code | 36 |
35388901124 | #!/usr/bin/env python3
from sys import stderr, stdout
from os import environ
from random import randrange, randint
from tc import TC
from triangolo_lib import Triangolo
############## TESTCASES' PARAMETERS ############################
TL = 1 # the time limit for each testcase
MAPPER = {"tiny": 1, "small": 2, "medium": 3, "big": 4}
DATA = ((10, (5, 7)), (10, (8, 10)), (10, (25, 28)), (70, (30, 40)))
# that is, 10 instances of size "tiny", i.e., with 5 <= n <= 6
#################################################################
def gen_tc(min_n, max_n):
n = randint(min_n, max_n)
Tr = Triangolo([[randint(0, 9) for j in range(i+1)] for i in range(n)])
Tr.display(stdout)
rnk = randrange(Tr.num_opt_sols_ric_memo())
print(rnk)
return (Tr, rnk)
def check_tc(Tr, rnk):
risp_val = int(input())
risp_sol = input().strip()
ok, rank_of_risp = Tr.rank_unsafe(risp_sol, risp_val)
if not ok:
return False, rank_of_risp
if rank_of_risp != rnk:
return False, f"On input:\n{Tr.n} {rnk}\n{Tr.as_str(with_n = False)}\nyou were right in stating that the optimum value of a solution is {risp_val}. However, you then returned the optimal solution:\n{risp_sol}\nwhich is of rank {rank_of_risp}. Instead, the optimal solution of rank {rnk}, as required, was:\n{Tr.unrank_safe(rnk)}"
return True
if __name__ == "__main__":
size = MAPPER[environ["TAL_size"]]
tc = TC(DATA[:size], TL)
tc.run(gen_tc, check_tc)
| romeorizzi/TALight | tal_algo/private/triangolo_unrank_opt_sol/manager.py | manager.py | py | 1,478 | python | en | code | 11 | github-code | 36 |
22869043882 | import pygame, sys #๊ธฐ๋ณธ์ธํ
import random, time #๋ด๊ฐ ์ถ๊ฐํ ๊ฒ
from pygame.locals import *
#Set up pygame.
pygame.init()
#์์ ์ ์
SCREEN =8
BLACK = (0,0,0)
GREEN = (0, 128, 0)
WHITE = (255, 255, 255)
BLUE = (0,0,255)
RED = (255,0,0)
YELLOW = (255,204,51)
screen = pygame.display.set_mode((600,400), 0,32)
pygame.display.set_caption("Othello")
#ํ๋ฉด ์ธํ
screen.fill(GREEN)
#๊ฐ๋ก ์ค ๊ธ๊ธฐ
for x in range(0, 8):
if x==0:
continue
else:
pygame.draw.line(screen, BLACK, [0,x*50],[400,x*50],5)
#์ธ๋ก ์ค ๊ธ๊ธฐ
for y in range(0,9):
if y==0:
continue
else:
pygame.draw.line(screen, BLACK, [y*50,0],[y*50,400],5)
#์ค๋ฅธ์ชฝ์ ์ํ์ฐฝ ๋ง๋ค๊ธฐ
pygame.draw.rect(screen, WHITE, [403,0,200,400])
#๊ฐ ์์น์์์ ๋ธ๋ญ๊ฐ๋ค ์ด๊ธฐํ
screenArr = [] #๋ฆฌ์คํธ ์์ ๋ฆฌ์คํธ. ์ด์ ๋๋๊ธฐ ์ํจ.
for y in range(0,SCREEN):
colList =[]
for x in range(0,SCREEN):
colList.append(0)
screenArr.append(colList)
screenArr[3][3]=2
screenArr[3][4]=1
screenArr[4][3]=1
screenArr[4][4]=2
#๋ณ์
currentTurn =1 #ํ์ฌ ํด- ํ๋ ์ด์ด :1 ์ปดํจํฐ :2
diagnoalScreenArr =[] #๋๊ฐ์ ๊ฒ์ฌ๋ฅผ ์ํ ๋ณ์, 3์ฐจ์ ๋ฐฐ์ด
for i in range(0,4):
rowList =[]
for y in range(0,SCREEN):
colList =[]
for x in range(0,SCREEN):
colList.append(0)
rowList.append(colList)
diagnoalScreenArr.append(rowList)
#ํจ์
def changeTurn(pTurn):
if pTurn ==1: #ํ๋ ์ด์ด์ ํด์ ์ปดํจํฐ์ ํด์ผ๋ก ์ ํ
return 2
elif pTurn ==2:
return 1
else:
return -1 #์ค๋ฅ์ธ ๊ฒฝ์ฐ
def changeArrxToX(arrx): #x, y๋ฅผ arrx, arry๋ก ๋ฐ๊ฟ์ ๋ฆฌํด.
for x in range(0,SCREEN):
if arrx==x:
return 25*(arrx*2+1)
def changeArryToY(arry):
for y in range(0,SCREEN):
if arry ==y:
return 25*(arry*2+1)
def viewGameScreen(): #screen์ด ํด๋น ๊ฐ๋ค์ ๊ฐ์ง๋ฉด ํด๋น ๋ธ๋ญ ์ถ๋ ฅ
for arry in range(0,SCREEN):
for arrx in range(0,SCREEN):
if screenArr[arry][arrx] ==1: #ํ๋ ์ด์ด
pygame.draw.circle(screen, BLACK, [changeArrxToX(arrx),changeArryToY(arry)], 20)
elif screenArr[arry][arrx] ==2: #์ปดํจํฐ
pygame.draw.circle(screen, WHITE, [changeArrxToX(arrx),changeArryToY(arry)], 20)
elif screenArr[arry][arrx] ==3: #์ปดํจํฐ์ ๋ธ๋ญ ๋๋ค์์น
pygame.draw.circle(screen, BLUE, [changeArrxToX(arrx),changeArryToY(arry)], 20)
elif screenArr[arry][arrx] ==4: #๊ฒ์์ด ๋๋ํ ๋น ๊ณต๊ฐ์ด ์์ ๋ ์ฌ์ฉ
pygame.draw.circle(screen, GREEN, [changeArrxToX(arrx),changeArryToY(arry)], 20)
def changeMousePosXToArrx(mousePosX):
for i in range(0,SCREEN):
if mousePosX < 50 * (i+1) -5 and mousePosX > 50*i +5:
return i
else:
return -1 #์ค๋ฅ์ผ ๊ฒฝ์ฐ
def changeMousePosYToArry(mousePosY):
for i in range(0,SCREEN):
if mousePosY < 50 * (i+1) -5 and mousePosY > 50*i +5: #๊ฒฝ๊ณ ์์ชฝ
return i
else: #ํ๋ฉด์ ๊ฒ์์ ๊ฒฝ๊ณ ๋ถ๋ถ
return -1 #์ค๋ฅ์ผ ๊ฒฝ์ฐ
def checkIfTherisBlock(pScreenArr): #ํด๋น ์๋ฆฌ์ ๋ธ๋ญ์ด ํ์ฌ ์๋์ง ์๋์ง
#iScreenArr : screenArr์ ๋งค๊ฐ๋ณ์๋ก ๋ฐ์์ผํ๋๋ฐ ํท๊ฐ๋ฆด๊น๋ด
#parameter์์ p๋ฅผ ๋ฐ์ด
if pScreenArr == 1 or pScreenArr ==2: #ํ๋ ์ด์ด ๋๋ ์ปดํจํฐ์ ๋ธ๋ญ
return 1 #๋ธ๋ญ์ด ์ด๋ฏธ ์์์ ๋ฆฌํด
else:
return 0 #๋ธ๋ญ์ด ํด๋น์๋ฆฌ์ ์์์ ๋ฆฌํด
def setDiagonalCnt(): #๋๊ฐ์ ๊ฒ์ฌ๋ฅผ ์ํด ๋ฏธ๋ฆฌ ๋๊ฐ์ ๊ฐ์ ์ค์
#์ผ์ชฝ ์ ๋ฐฉํฅ ๋๊ฐ์
diagonalDir =0
for row in range(0,SCREEN):
for col in range(7, row-1,-1):
diagnoalScreenArr[diagonalDir][row][col]=row
remainingCol = row
num =0
for col in range(0, remainingCol):
diagnoalScreenArr[diagonalDir][row][col] = num
num=num+1
#์ค๋ฅธ์ชฝ ์ ๋ฐฉํฅ ๋๊ฐ์
diagonalDir =1
for row in range(0,SCREEN):
for col in range(0, SCREEN-row):
diagnoalScreenArr[diagonalDir][row][col]=row
remainingCol = 7 -row
num =row
for col in range(remainingCol, SCREEN):
diagnoalScreenArr[diagonalDir][row][col] = num
num = num-1
#์ผ์ชฝ ์๋ ๋ฐฉํฅ ๋๊ฐ์
diagonalDir =2
for row in range(7, -1, -1):
for col in range(7, 6-row, -1):
diagnoalScreenArr[diagonalDir][row][col] = 7-row
remainingCol = 7-row
num =0
for col in range(0, remainingCol):
diagnoalScreenArr[diagonalDir][row][col] = num
num = num+1
#์ค๋ฅธ์ชฝ ์๋ ๋๊ฐ์ ๊ฐ์
diagonalDir =3
for row in range(7, -1, -1):
for col in range(0, 1+row):
diagnoalScreenArr[diagonalDir][row][col] =7-row
remainingCol = row+1
num = 6-row
for col in range(remainingCol, SCREEN):
diagnoalScreenArr[diagonalDir][row][col] = num
num = num-1
#setDiagonalCnt()ํจ์ ์๊ฐ์ ํ์ธ
##setDiagonalCnt()
##for x in range(0,8):
## print(diagnoalScreenArr[0][x])
def InspectIfItCanBePlacedInPlace(pArrx, pArry, changeValue, pCurrentTurn): #ํด๋น ์์น์ ๋ธ๋ญ์ ๋์ ์ ์๋ ์๋ฆฌ์ธ์ง ๊ฒ์ฌ
returnValue=0
if 1==checkIfTherisBlock(screenArr[pArry][pArrx]):
return 0
#๋๊ฐ์ ๊ฒ์ฌ
for diagonalValue in range(0,4):
if diagnoalScreenArr[diagonalValue][pArry][pArrx] != 0:
if diagonalValue==0: #์ผ์ชฝ ์๋ฐฉํฅ
if screenArr[pArry-1][pArrx-1] == changeTurn(pCurrentTurn):
for a in range(1, diagnoalScreenArr[diagonalValue][pArry][pArrx]+1):
if screenArr[pArry-a][pArrx-a]==0:
break
elif screenArr[pArry-a][pArrx-a] ==pCurrentTurn:
for b in range(1, a):
if changeValue ==True:
screenArr[pArry-b][pArrx-b] =pCurrentTurn
returnValue =1
break
if diagonalValue ==1: #์ค๋ฅธ์ชฝ ์ ๋ฐฉํฅ
if screenArr[pArry-1][pArrx+1] == changeTurn(pCurrentTurn):
for a in range(1, diagnoalScreenArr[diagonalValue][pArry][pArrx]+1):
if screenArr[pArry-a][pArrx+a]==0:
break
elif screenArr[pArry-a][pArrx+a]==pCurrentTurn:
for b in range(1, a):
if changeValue ==True:
screenArr[pArry-b][pArrx+b]=pCurrentTurn
returnValue =1
break
if diagonalValue ==2: #์ผ์ชฝ ์๋ ๋ฐฉํฅ
if screenArr[pArry+1][pArrx-1] == changeTurn(pCurrentTurn):
for a in range(1, diagnoalScreenArr[diagonalValue][pArry][pArrx]+1):
if screenArr[pArry+a][pArrx-a]==0:
break
elif screenArr[pArry+a][pArrx-a]==pCurrentTurn:
for b in range(1, a):
if changeValue ==True:
screenArr[pArry+b][pArrx-b]=pCurrentTurn
returnValue =1
break
if diagonalValue ==3: #์ค๋ฅธ์ชฝ ์๋ ๋ฐฉํฅ
if screenArr[pArry+1][pArrx+1] == changeTurn(pCurrentTurn):
for a in range(1, diagnoalScreenArr[diagonalValue][pArry][pArrx]+1):
if screenArr[pArry+a][pArrx+a]==0:
break
elif screenArr[pArry+a][pArrx+a]==pCurrentTurn:
for b in range(1, a):
if changeValue ==True:
screenArr[pArry+b][pArrx+b]=pCurrentTurn
returnValue =1
break
#ํ ๊ฒ์ฌ - ์ ๋ฐฉํฅ์ผ๋ก ๊ฒ์ฌ
if pArry != 0: #pArry๊ฐ 0์ด๋ฉด ๊ฒ์ฌํ ๋ ๋ฆฌ์คํธ ์ธ๋ฑ์ค ๋์ด๊ฐ
if screenArr[pArry-1][pArrx] == changeTurn(pCurrentTurn):
for a in range(pArry-1, -1, -1):
if screenArr[a][pArrx] ==0:
break
elif screenArr[a][pArrx] ==pCurrentTurn:
for b in range(pArry-1, a,-1):
if changeValue ==True:
screenArr[b][pArrx] =pCurrentTurn
returnValue =1
break
#ํ ๊ฒ์ฌ - ์๋ ๋ฐฉํฅ์ผ๋ก ๊ฒ์ฌ
if pArry != SCREEN-1:
if screenArr[pArry+1][pArrx] == changeTurn(pCurrentTurn):
for a in range(pArry+1, SCREEN):
if screenArr[a][pArrx] ==0:
break
elif screenArr[a][pArrx]==pCurrentTurn:
for b in range(pArry+1, a):
if changeValue ==True:
screenArr[b][pArrx]=pCurrentTurn
returnValue =1
break
#์ด ๊ฒ์ฌ - ์ผ์ชฝ ๋ฐฉํฅ์ผ๋ก ๊ฒ์ฌ
if pArrx !=0:
if screenArr[pArry][pArrx-1] == changeTurn(pCurrentTurn):
for a in range(pArrx-1, -1,-1):
if screenArr[pArry][a] ==0:
break
elif screenArr[pArry][a] ==pCurrentTurn:
for b in range(pArrx-1, a, -1):
if changeValue ==True:
screenArr[pArry][b] =pCurrentTurn
returnValue =1
break
#์ด ๊ฒ์ฌ - ์ค๋ฅธ์ชฝ ๋ฐฉํฅ์ผ๋ก ๊ฒ์ฌ
if pArrx != SCREEN-1:
if screenArr[pArry][pArrx+1] == changeTurn(pCurrentTurn):
for a in range(pArrx+1, SCREEN):
if screenArr[pArry][a] ==0:
break
elif screenArr[pArry][a] ==pCurrentTurn:
for b in range(pArrx+1, a):
if changeValue ==True:
screenArr[pArry][b] =pCurrentTurn
returnValue =1
break
return returnValue #๋์ ์ ์๋ ๊ณณ์ด ์์ ๊ฒฝ์ฐ:0 ์์ ๊ฒฝ์ฐ :1
def calculateComputerRandomPlace(randomComputerNum): #์ปดํจํฐ๊ฐ ๋๋ ์์น ๋๋ค์ผ๋ก ๊ณ์ฐ
randNum=0
randNum = random.randrange(1, randomComputerNum+1)
return randNum
def setWhereComputerCanPutBlock():
randomComputerNum =1
tmpRow=-1
tmpCol=-1
noMeaningStorage=0
computerRandomPlace =[]
#computerRandomPlace ๋ชจ๋ 0์ผ๋ก ์ด๊ธฐํ(8x8 2์ฐจ์ ๋ฐฐ์ด)
for y in range(0,SCREEN):
colList =[]
for x in range(0,SCREEN):
colList.append(0)
computerRandomPlace.append(colList)
for row in range(0, SCREEN):
for col in range(0,SCREEN):
if InspectIfItCanBePlacedInPlace(col, row, False, currentTurn) ==1:
computerRandomPlace[row][col] = randomComputerNum
randomComputerNum = randomComputerNum +1
randomComputerNum = calculateComputerRandomPlace(randomComputerNum-1) #-1ํ๋ ์ด์ ๋งจ ๋ง์ง๋ง์ +1๋ผ์ ๋๋๊ธฐ ๋๋ฌธ
for row in range(0,SCREEN):
for col in range(0,SCREEN):
if computerRandomPlace[row][col] == randomComputerNum:
screenArr[row][col] =3 #์ปดํจํฐ๊ฐ ๋๋ค์ผ๋ก ๋์์์น ํ๋์์ผ๋ก ์ค์
tmpRow = row
tmpCol = col
#์ปดํจํฐ๊ฐ ๋๋ค์ผ๋ก ๋์ ์์น ๋ฏธ๋ฆฌ ๋ณด์ฌ์ฃผ๊ธฐ
viewGameScreen()
pygame.display.update()
#ํด๋น ์์น ์๋ ์ปดํจํฐ ๋ธ๋ญ์์ผ๋ก ๋ณ๊ฒฝ
time.sleep(2)
noMeaningStorage = InspectIfItCanBePlacedInPlace(tmpCol, tmpRow, True, currentTurn)
screenArr[tmpRow][tmpCol] = 2 #์ปดํจํฐ๊ฐ ๋๋ค์ผ๋ก ๋์์์น ์๋์์ธ ํ์์์ผ๋ก ๋ณ๊ฒฝ
viewGameScreen()
pygame.display.update()
def moveNextTurnWhenBlockCanNotPutPlace(): #๋ ๊ณณ์ด ์์ ๊ฒฝ์ฐ ๋ค์ํด์ผ๋ก ๋์ด๊ฐ๋ค.
global currentTurn
global isClick
cannotPutPlaceCnt =0
for row in range(0,SCREEN):
for col in range(0,SCREEN):
if screenArr[row][col] == 0:
if InspectIfItCanBePlacedInPlace(col,row,False, currentTurn)==1:
cannotPutPlaceCnt = cannotPutPlaceCnt+1
if cannotPutPlaceCnt ==0 :
currentTurn = changeTurn(currentTurn)
print(currentTurn,"์ ์ ์ ๊ฐ ๋์ ๊ณณ์ด ์์ต๋๋ค. ")
clearStateScreen(False)
printTurnInformation() #ํ๋ ์ด์ด -> ์ปดํจํฐ ํด : ์ปดํจํฐ ํด ์ถ๋ ฅ
time.sleep(1)
def viewGameResult():
font = pygame.font.SysFont("arial",20,True)
playerBlockCnt =0
computerBlockCnt =0
for row in range(0,SCREEN):
for col in range(0,SCREEN):
if screenArr[row][col] ==1:
playerBlockCnt = playerBlockCnt+1
elif screenArr[row][col] ==2:
computerBlockCnt = computerBlockCnt+1
screenArr[row][col] = 4
tmpBlockCnt =0
isFirstCheck = False
for row in range(0,SCREEN):
for col in range(0,SCREEN):
if (tmpBlockCnt < playerBlockCnt) and isFirstCheck == False:
screenArr[row][col] =1
tmpBlockCnt = tmpBlockCnt+1
else:
if isFirstCheck == False:
isFirstCheck = True
tmpBlockCnt =0
if tmpBlockCnt < computerBlockCnt:
screenArr[row][col] =2
tmpBlockCnt = tmpBlockCnt+1
print("์ปดํจํฐ ๋ธ๋ญ ๊ฐ์ : ", computerBlockCnt)
print("ํ๋ ์ด์ด ๋ธ๋ญ ๊ฐ์ : ", playerBlockCnt)
print("tmpBlockCnt : ", tmpBlockCnt)
clearStateScreen(True)
if computerBlockCnt < playerBlockCnt:
printWinner("Player")
elif computerBlockCnt > playerBlockCnt:
printWinner("Computer")
else: #๋์
printWinner("Draw")
viewGameScreen()
printBlockCnt(playerBlockCnt, computerBlockCnt)
pygame.display.update()
print("๊ฐ์ ์ถ๋ ฅํ๋ฉด๊น์ง ๋")
time.sleep(3)
#์ดํ ๋ค์ ๊ฒ์์ ๋ค์ํ ์ง ์์ํ๋ฉด์ผ๋ก๊ฐ์ง ๋์ง ์ ํ.
sys.exit()
def ifNoOneDoNotPutBlock():
enablePutBlock= [True,True]
for row in range(0,SCREEN):
for col in range(0,SCREEN):
#ํ๋ ์ด์ด ๊ฒ์ฌ์ ๊ฒํจํฐ ๋ชจ๋ ๋ธ๋ญ์ ๋ ๊ณณ์ด ์์ ๊ฒฝ์ฐ
if 1==InspectIfItCanBePlacedInPlace(col,row,False,1):
#print("ํ๋ ์ด์ด : (",row,col,") : 0")
enablePutBlock[0] = False
if 1==InspectIfItCanBePlacedInPlace(col,row,False,2):
#print("์ปดํจํฐ : (",row,col,") : 0")
enablePutBlock[1] = False
if enablePutBlock[0] ==True and enablePutBlock[1] ==True:
return True
else:
return False
def checkGameOver():
spaceFilledCnt =0
for row in range(0,SCREEN):
for col in range(0,SCREEN):
if screenArr[row][col] ==1 or screenArr[row][col] ==2:
spaceFilledCnt= spaceFilledCnt+1
if spaceFilledCnt == SCREEN * SCREEN or ifNoOneDoNotPutBlock() == True:
clearStateScreen(True)
printGameOverText()
printCalculateGameResult()
time.sleep(5) #๊ฒฐ๊ณผ ์ง๊ณ์ค 5์ด๋์ ๋์ด ๋ค ๊ฒฐ๊ณผ ๋ณด์ฌ์ฃผ๊ธฐ
viewGameResult()
def printTurn(pTurn):
if pTurn ==1:
return "Player"
elif pTurn ==2:
return "Computer"
else:
return "Error"
def clearStateScreen(isGameOver):
clearScreenScaleY =145
if isGameOver == True:
clearScreenScaleY = 400
pygame.draw.rect(screen, WHITE, [403,0,200,clearScreenScaleY])
pygame.display.update()
def printTurnInformation():
userTextFont = pygame.font.SysFont("arial",20, True)
userTextContentFont = pygame.font.SysFont("arial",20)
userText = userTextFont.render("Current Turn : ", True, BLACK)
userTextContent = userTextContentFont.render(printTurn(currentTurn), True, BLACK)
screen.blit(userText, (410,100))
screen.blit(userTextContent, (525,100))
pygame.display.update()
def printUserColorInformation():
font = pygame.font.SysFont("arial",20,True)
playerColor = font.render("Player Color : ", True, BLACK)
computerColor = font.render("Computer Color : ", True, BLACK)
screen.blit(playerColor, (410,150))
screen.blit(computerColor, (410,200))
pygame.draw.rect(screen, GREEN, (548, 148, 30, 30))
pygame.draw.circle(screen, BLACK, [563, 163], 10)
pygame.draw.rect(screen, GREEN, (548, 198, 30, 30))
pygame.draw.circle(screen, WHITE, [563, 213], 10)
pygame.display.update()
def printGameOverText():
font = pygame.font.SysFont("arial",30,True)
text = font.render("-Game Over-", True, RED)
screen.blit(text, (425,50))
pygame.display.update()
def printCalculateGameResult(): #๊ฒ์ ๊ฒฐ๊ณผ ๊ณ์ฐ์ค ์ด๋ผ๊ณ ์ถ๋ ฅ
font = pygame.font.SysFont("arial",15)
text = font.render("~Calculating Game Result~", True, BLACK)
screen.blit(text, (425,100))
pygame.display.update()
def printWinner(winner):
winnerFont = pygame.font.SysFont("arial",40)
winnerContentFont = pygame.font.SysFont("arial",30)
if winner != "Draw":
winnerText = winnerFont.render("Winner", True, RED)
else:
winnerText = winnerFont.render("Result", True, RED)
winnerContentText = winnerContentFont.render("-"+winner+"-", True, YELLOW)
screen.blit(winnerText, (450,50))
if winner == "Computer":
screen.blit(winnerContentText, (440,100))
elif winner == "Plyaer":
screen.blit(winnerContentText, (460,100))
else:
screen.blit(winnerContentText, (460,100))
pygame.display.update()
def printBlockCnt(playerBlockCnt, computerBlockCnt):
font = pygame.font.SysFont("arial",20)
playerBlockCntText = font.render("Player Block : "+ str(playerBlockCnt), True, BLACK)
computerBlockCntText = font.render("Computer Block : " + str(computerBlockCnt), True, BLACK)
screen.blit(playerBlockCntText, (440,200))
screen.blit(computerBlockCntText, (430,225))
pygame.display.update()
def printReplayButton():
font = pygame.font.SysFont("arial",40)
replayBtnText = font.render("Replay", True, WHITE,2)
screen.blit(replayBtnText, (100,200))
def printGoStartScreenButton():
font = pygame.font.SysFont("arial",40)
goStartScreenBtnText = font.render("Go StartScreen", True, WHITE,2)
screen.blit(goStartScreenBtnText, (300,200))
#๋๋ค ๋ธ๋ญ์ ๋์ ์ ์๋ ๊ฒฝ์ฐ
##for row in range(0,SCREEN):
## for col in range(0,SCREEN):
## screenArr[row][col] =2
##screenArr[2][6] =1
##screenArr[2][2] =1
##screenArr[3][3] =1
##screenArr[4][4] =1
##screenArr[4][6] =1
##screenArr[5][5] =1
##screenArr[7][7] =1
##screenArr[6][7] =0
##
##for row in range(0,SCREEN):
## for col in range(0,SCREEN):
## screenArr[row][col] =2
##
##screenArr[2][2]=1
##screenArr[1][0]=0
##screenArr[2][0]=0
##screenArr[3][0]=0
setDiagonalCnt()
viewGameScreen()
printTurnInformation()
printUserColorInformation()
printReplayButton()
printGoStartScreenButton()
#Game Loop
while True:
checkGameOver()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
sys.exit()
if event.type == pygame.MOUSEBUTTONDOWN:
mousePosToArr = []
mousePosToArr.append(changeMousePosXToArrx(pygame.mouse.get_pos()[0]))
mousePosToArr.append(changeMousePosYToArry(pygame.mouse.get_pos()[1]))
if not(mousePosToArr[0] ==-1 or mousePosToArr[1] ==-1):
if InspectIfItCanBePlacedInPlace(mousePosToArr[0],mousePosToArr[1], True, currentTurn) ==1:
mousePos = pygame.mouse.get_pos() #์๋ฃํ : tuple
screenArr[changeMousePosYToArry(mousePos[1])][changeMousePosXToArrx(mousePos[0])] =1 #ํด๋ฆญํ ๊ณณ ์๊น ๋ฐ๊พธ๊ธฐ
currentTurn = changeTurn(currentTurn) #ํด ๋ฐ๊พธ๊ธฐ
clearStateScreen(False)
printTurnInformation() #ํ๋ ์ด์ด -> ์ปดํจํฐ ํด : ์ปดํจํฐ ํด ์ถ๋ ฅ
viewGameScreen()
pygame.display.update()
moveNextTurnWhenBlockCanNotPutPlace()
if currentTurn ==2 :
time.sleep(2)
setWhereComputerCanPutBlock()
currentTurn = changeTurn(currentTurn)
clearStateScreen(False)
printTurnInformation() #์ปดํจํฐ -> ํ๋ ์ด์ด ํด : ์ปดํจํฐ ํด ์ถ๋ ฅ
| Choiseungpyo/Othello_Python | Othello.py | Othello.py | py | 22,209 | python | en | code | 0 | github-code | 36 |
69912444584 | """Tools for preprocessing Gradebooks before grading."""
from __future__ import annotations
import typing
import pandas as pd
from .core import AssignmentGrouper
from ._common import resolve_assignment_grouper
# private helper functions =============================================================
def _empty_mask_like(table):
"""Given a dataframe, create another just like it with every entry False."""
empty = table.copy()
empty.iloc[:, :] = False
return empty.astype(bool)
# public functions =====================================================================
# combine_assignment_parts -------------------------------------------------------------
def _combine_assignment_parts(gb, new_name, parts):
"""A helper function to combine assignments under the new name."""
parts = list(parts)
if gb.dropped[parts].any(axis=None):
raise ValueError("Cannot combine assignments with drops.")
assignment_points = gb.points_earned[parts].sum(axis=1)
assignment_max = gb.points_possible[parts].sum()
assignment_lateness = gb.lateness[parts].max(axis=1)
gb.points_earned[new_name] = assignment_points
gb.points_possible[new_name] = assignment_max
gb.lateness[new_name] = assignment_lateness
# we're assuming that dropped was not set; we need to provide an empy
# mask here, else ._replace will use the existing larger dropped table
# of gb, which contains all parts
gb.dropped = _empty_mask_like(gb.points_earned)
gb.remove_assignments(set(parts) - {new_name})
def combine_assignment_parts(gb, grouper: AssignmentGrouper):
"""Combine the assignment parts into one assignment with the new name.
Sometimes assignments may have several parts which are recorded
separately in the grading software. For instance, a homework might have
a written part and a programming part. This method makes it easy to
combine these parts into a single assignment.
The individual assignment parts are removed from the gradebook.
The new marked points and possible points are calculated by addition.
The lateness of the new assignment is the *maximum* lateness of any of
its parts.
It is unclear what the result should be if any of the assignments to be
unified has been dropped, but other parts have not. For this reason,
this method will raise a `ValueError` if *any* of the parts have been
dropped.
Assignment groups are automatically reset to prevent errors. It is
suggested that the gradebook's assignments be finalized before setting
the assignment groups.
Parameters
----------
grouper : AssignmentGrouper
Either: 1) a mapping whose keys are new assignment names, and whose
values are collections of assignments that should be unified under
their common key; 2) a list of string prefixes or an instance of
:class:`Assignments`/:class`LazyAssignments`; each prefix defines a
group that should be combined; or 3) a callable which maps assignment
names to new assignment by which they should be grouped.
Raises
------
ValueError
If any of the assignments to be unified is marked as dropped. See above for
rationale.
Example
-------
Assume the gradebook has assignments named `homework 01`, `homework 01 -
programming`, `homework 02`, `homework 02 - programming`, etc., the
following will "combine" the assignments into `homework 01`, `homework 02`,
etc. In order to unify the programming parts with the rest of the homework,
we can write:
>>> gradebook.combine_assignment_parts(lambda s: s.split('-')[0].strip())
This used a callable grouper. Alternatively, one could use a list of prefixes:
>>> gradebook.combine_assignment_parts(["homework 01", "homework 02"])
Or a dictionary mapping new assignment names to their parts:
>>> gradebook.combine_assignment_parts({
... 'homework 01': {'homework 01', 'homework 01 - programming'},
... 'homework 02': {'homework 02', 'homework 02 - programming'}
... })
"""
dct = resolve_assignment_grouper(grouper, gb.assignments)
for key, value in dct.items():
_combine_assignment_parts(gb, key, value)
gb.grading_groups = {}
# combine_assignment_versions ----------------------------------------------------------
def _combine_assignment_versions(gb, new_name, versions):
"""A helper function to combine assignments under the new name."""
versions = list(versions)
if gb.dropped[versions].any(axis=None):
raise ValueError("Cannot combine assignments with drops.")
# check that points are not earned in multiple versions
assignments_turned_in = (~pd.isna(gb.points_earned[versions])).sum(axis=1)
if (assignments_turned_in > 1).any():
students = assignments_turned_in[assignments_turned_in > 1].index
msg = f"{list(students)} turned in more than one version."
raise ValueError(msg)
assignment_points = gb.points_earned[versions].max(axis=1)
assignment_max = gb.points_possible[versions[0]]
assignment_lateness = gb.lateness[versions].max(axis=1)
gb.points_earned[new_name] = assignment_points
gb.points_possible[new_name] = assignment_max
gb.lateness[new_name] = assignment_lateness
# we're assuming that dropped was not set; we need to provide an empy
# mask here, else ._replace will use the existing larger dropped table
# of gb, which contains all versions
gb.dropped = _empty_mask_like(gb.points_earned)
gb.remove_assignments(set(versions) - {new_name})
def combine_assignment_versions(gb, grouper: AssignmentGrouper):
"""Combine the assignment versions into one single assignment with the new name.
Sometimes assignments may have several versions which are recorded separately
in the grading software. For instance, multiple versions of a midterm may be
distributed to mitigate cheating.
The individual assignment versions are removed from the gradebook and
are unified into a single new version.
It is assumed that all assignment versions have the same number of
points possible. If this is not the case, a `ValueError` is raised.
Similarly, it is assumed that no student earns points for more than one
of the versions. If this is not true, a `ValueError` is raised.
If a student's submission for a version is late, the lateness of that
submission is used for the student's submission in the unified assignment.
It is unclear what the result should be if any of the assignments to be
unified is dropped, but other parts are not. Therefore, if any version is
dropped, this method will raise a `ValueError`.
Assignment groups are automatically reset to prevent errors. It is
suggested that the gradebook's assignments be finalized before setting
the assignment groups.
Parameters
----------
grouper : AssignmentGrouper
Either: 1) a mapping whose keys are new assignment names, and whose
values are collections of assignments that should be unified under
their common key; 2) a list of string prefixes or an instance of
:class:`Assignments`/:class`LazyAssignments`; each prefix defines a
group that should be combined; or 3) a callable which maps assignment
names to new assignment by which they should be grouped.
Raises
------
ValueError
If any of the assumptions are violated. See above.
Example
-------
Assuming the gradebook has assignments named `midterm - version a`,
`midterm - version b`, `midterm - version c`, etc., the following will
"combine" the assignments into `midterm`:
>>> gradebook.combine_assignment_versions(lambda s: s.split('-')[0].strip())
Alternatively, you could write:
>>> gradebook.combine_assignment_versions(["midterm"])
Or:
>>> gradebook.combine_assignment_versions({
'midterm': {'midterm - version a', 'midterm - version b', 'midterm - 'version c'},
})
"""
dct = resolve_assignment_grouper(grouper, gb.assignments)
for key, value in dct.items():
_combine_assignment_versions(gb, key, value)
| eldridgejm/gradelib | gradelib/preprocessing.py | preprocessing.py | py | 8,266 | python | en | code | 6 | github-code | 36 |
29101674414 | # Debugging script to see how much GPS signal bounced around
import csv
import math
import numpy as np
from matplotlib import pyplot as plt
'''
# distance between points
dx_between_pts = []
prev_lat, prev_long = 0, 0
with open('2_2_23_gps.csv', mode ='r') as f:
csv_f = csv.reader(f)
for i, line in enumerate(csv_f):
# column/header line (e.g. "time, lat, long, alt")
if i == 0:
continue
lat, long = float(line[1]), float(line[2])
if prev_lat != 0:
dlat = lat - prev_lat
dlong = long - prev_long
dx = (dlat**2 + dlong**2) ** .5
if not math.isnan(dx):
dx_between_pts.append(dx)
prev_lat = lat
prev_long = long
dx_min = min(dx_between_pts)
dx_max = max(dx_between_pts)
print("max: ", dx_max, " min: ", dx_min)
dx_sorted = np.sort(dx_between_pts)
plt.plot(dx_sorted)
assert dx_sorted[0] == dx_min, f"{dx_sorted[0]} != {dx_min}"
assert dx_sorted[-1] == dx_max, f"{dx_sorted[-1]} != {dx_max}"
plt.show()
'''
#################
### Phone GPS ###
#################
import gpxpy
import gpxpy.gpx
gpx_file = open('phone_gps.gpx', 'r')
gpx = gpxpy.parse(gpx_file)
prev_lat, prev_long = 0, 0
dx_between_pts = []
for track in gpx.tracks:
for segment in track.segments:
for point in segment.points:
lat, long = point.latitude, point.longitude
if prev_lat != 0:
dlat = lat - prev_lat
dlong = long - prev_long
dx = (dlat**2 + dlong**2) ** .5
if not math.isnan(dx):
dx_between_pts.append(dx)
prev_lat = lat
prev_long = long
dx_min = min(dx_between_pts)
dx_max = max(dx_between_pts)
print("max: ", dx_max, " min: ", dx_min)
dx_sorted = np.sort(dx_between_pts)
plt.plot(dx_sorted)
assert dx_sorted[0] == dx_min, f"{dx_sorted[0]} != {dx_min}"
assert dx_sorted[-1] == dx_max, f"{dx_sorted[-1]} != {dx_max}"
plt.show()
'''
# Creating histogram
fig, ax = plt.subplots(figsize = (10, 7))
min_dx = min(dx_between_pts)
max_dx = max(dx_between_pts)
bin_width = (max_dx - min_dx) / 5
ax.hist(dx_between_pts, bins = np.arange(min_dx, max_dx, bin_width))
# Show plot
plt.show()
'''
| bainro/jackal_melodic | plotGPS.py | plotGPS.py | py | 2,117 | python | en | code | 0 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.