seq_id stringlengths 4 11 | text stringlengths 113 2.92M | repo_name stringlengths 4 125 ⌀ | sub_path stringlengths 3 214 | file_name stringlengths 3 160 | file_ext stringclasses 18
values | file_size_in_byte int64 113 2.92M | program_lang stringclasses 1
value | lang stringclasses 93
values | doc_type stringclasses 1
value | stars int64 0 179k ⌀ | dataset stringclasses 3
values | pt stringclasses 78
values |
|---|---|---|---|---|---|---|---|---|---|---|---|---|
7557841018 | import sys
from collections import deque
def Run(fin, fout):
readline = fin.readline
N = int(readline())
to = [None] * (N + 1)
from_ = [set() for _ in range(N + 1)]
for i in range(1, N + 1):
a, v = map(int, readline().split())
to[i] = (a, v)
from_[a].add((i, v))
visited = set()
ans = 0
for i in range(1, N + 1):
if i in visited:
continue
loop = find_loop(to, from_, i)
totals = []
for j in loop:
total = dfs(to, from_, visited, loop, j)
totals.append(total)
ans += sum(totals) - min(totals)
fout.write("{}\n".format(ans))
def dfs(to, from_, visited, loop, start): #start = node in loop
sum = to[start][1]
queue = deque([(start, 0)])
while queue:
curr, v = queue.pop()
if curr in visited:
continue
visited.add(curr)
sum += v
for i, v in from_[curr]:
if i not in loop:
queue.append((i, v))
return sum
def find_loop(to, from_, start):
last_seen = [None] * len(to)
path = []
i = 0
curr = start
while True:
if last_seen[curr] is not None:
return set(path[last_seen[curr]:])
path.append(curr)
last_seen[curr] = i
curr = to[curr][0]
i += 1
Run(sys.stdin, sys.stdout) | chenant2017/USACO | Silver/2022 Open/p1.py | p1.py | py | 1,247 | python | en | code | 2 | github-code | 36 |
73118843944 | import multiprocessing
from threading import Thread
import time
def is_prime(n):
if n <= 1:
return False
for i in range(2, int(n**0.5) + 1):
if n % i == 0:
return False
return True
def find_primes(end, start):
primes = []
for num in range(start, end - 1):
if is_prime(num):
primes.append(num)
return primes
if __name__ == "__main__":
star_time = time.time()
res1 = find_primes(10000, 3)
res2 = find_primes(20000, 10001)
res3 = find_primes(30000, 20001)
end_time = time.time()
timer = end_time - star_time
print(f"Затрачено времени на поэтапный запууск: {timer} сек")
start_time = time.perf_counter()
t1 = Thread(target=find_primes, args=(10000, 3))
t2 = Thread(target=find_primes, args=(20000, 10001))
t3 = Thread(target=find_primes, args=(30000, 20001))
t1.start()
t2.start()
t3.start()
t1.join()
t2.join()
t3.join()
print(f"Время выполнения в потоках {time.perf_counter() - start_time} сек")
start_time = time.perf_counter()
p1 = multiprocessing.Process(target=find_primes, args=(3, 10000))
p2 = multiprocessing.Process(target=find_primes, args=(10001, 20000))
p3 = multiprocessing.Process(target=find_primes, args=(20001, 30000))
p1.start()
p2.start()
p3.start()
p1.join()
p2.join()
p3.join()
print(f"Время выполнения в разных процессах {time.perf_counter() - start_time} сек")
# Если не выполнить start() в потоках и процессах, то они не будут запущены
# Если не выполнить join() в потоках и процессах, то программа не будет дожидаться завершения всех дочерних потоков
# и процессов
# Распараллеливание по потокам не дает преимущества во времени в задачах CPU- bound (происходит это по причине
# GIL - глобальной блокировки интерпретатора (каждый из потоков полностью "захватывает" процессор для своего выполнения)
# Распараллеливание по процессам не дало преимуществ в данной задаче, поскольку расходы на создание процессов
# не окупились, объемы вычислений не достаточно велики для вычисления в разных процессах
| IlyaOrlov/PythonCourse2.0_September23 | Practice/achernov/module_12/task_1.py | task_1.py | py | 2,712 | python | ru | code | 2 | github-code | 36 |
10392633050 | import json
import os
import cv2
from cfg import cfg
import numpy as np
from collections import defaultdict as dd
from dsl.base_dsl import BaseDSL, one_hot_labels
class NSFWDSL(BaseDSL):
def __init__(self, batch_size, shuffle_each_epoch=False, seed=1337,
normalize=True, mode='train', val_frac=0.02, resize=None):
assert mode == "train" or mode == "val" or mode == "test"
self.shape = (cfg.img_size, cfg.img_size, 3)
self.ntest = cfg.ntest
self.mode = mode
self.normalize = normalize
if mode == 'val':
assert val_frac is not None
super(NSFWDSL, self).__init__(
batch_size,
shuffle_each_epoch=shuffle_each_epoch,
seed=seed,
normalize=False,
mode=mode,
val_frac=val_frac,
normalize_channels=False,
resize=resize
)
def is_multilabel(self):
return False
def load_variable(self, file_path, data_type, var_shape):
var = np.fromfile(file_path, dtype=data_type)
var.shape = var_shape
return var
def get_sample_shape(self):
return self.shape
def get_partition_to_idxs(self, samples):
partition_to_idxs = {
'train': [],
'test': []
}
prev_state = np.random.get_state()
np.random.seed(cfg.DS_SEED)
classidx_to_idxs = dd(list)
for idx, s in enumerate(samples):
classidx = s[1]
classidx_to_idxs[classidx].append(idx)
# Shuffle classidx_to_idx
for classidx, idxs in classidx_to_idxs.items():
np.random.shuffle(idxs)
for classidx, idxs in classidx_to_idxs.items():
partition_to_idxs['test'] += idxs[:self.ntest] # A constant no. kept aside for evaluation
partition_to_idxs['train'] += idxs[self.ntest:] # Train on remaining
# Revert randomness to original state
np.random.set_state(prev_state)
return partition_to_idxs
def create_label_dict(self):
label_dict = {}
for (img_name, pred_label) in zip(self.data, self.labels):
label_dict[img_name] = pred_label
return label_dict
def load_data(self, mode, val_frac):
with open("nsfw/nsfw_dict.json", 'r') as f:
nsfw_dict = json.load(f)
samples = nsfw_dict["normal"] + nsfw_dict["porn"] + nsfw_dict["sexy"]
partition_to_idxs = self.get_partition_to_idxs(samples)
if mode == 'test':
pruned_idxs = partition_to_idxs['test']
else:
assert mode == 'train' or mode == 'val'
pruned_idxs = partition_to_idxs['train']
samples = [samples[i] for i in pruned_idxs]
self.data = []
self.labels = []
for sample in samples:
self.data.append(sample[0])
self.labels.append(sample[1])
self.data = np.array(self.data)
self.labels = np.array(self.labels)
self.label_dict = self.create_label_dict()
# Perform splitting
if val_frac is not None:
self.partition_validation_set(mode, val_frac)
self.labels = np.squeeze(self.labels)
def convert_Y(self, Y):
return one_hot_labels(Y, 3)
| gongzhimin/ActiveThief-attack-MLaaS | dsl/nsfw_dsl.py | nsfw_dsl.py | py | 3,309 | python | en | code | 2 | github-code | 36 |
36720200958 | #!/usr/bin/env python
"""
Parser for condor job log files to get information out
"""
from datetime import datetime, timedelta
from .logit import log
from . import jobsub_fetcher
from .poms_model import Submission
# our own logging handle, goes to cherrypy
def get_joblogs(dbhandle, jobsub_job_id, cert, key, experiment, role):
"""
get the condor joblog for a given job
"""
res = None
log("INFO", "entering get_joblogs")
if jobsub_job_id is None:
return None
fetcher = jobsub_fetcher.jobsub_fetcher(cert, key)
log("DEBUG", "checking index")
submission = dbhandle.query(Submission).filter(Submission.jobsub_job_id == jobsub_job_id).first()
if submission is None:
raise KeyError("submission with jobsub_job_id %s not found" % jobsub_job_id)
else:
submission_id = submission.submission_id
username = submission.experimenter_creator_obj.username
jobsub_job_id = jobsub_job_id.replace("@", ".0@")
files = fetcher.index(jobsub_job_id, experiment, role, True, user=username)
if files is None:
return None
log("DEBUG", "files: %s" % repr(files))
filename = None
for row in files:
if row[5].endswith(".log") and not row[5].endswith(".dagman.log"):
# pick the log we want, either the first non-dagman log
# or the nodes.log
if not filename:
filename = row[5]
if row[5].endswith("nodes.log"):
filename = row[5]
break
log("DEBUG", "checking file %s " % filename)
lines = fetcher.contents(filename, jobsub_job_id, experiment, role, user=username)
res = parse_condor_log(dbhandle, lines, jobsub_job_id[jobsub_job_id.find("@") + 1 :], submission_id)
del fetcher
return res
def fix_jobid(clust_proc, batchhost):
""" convert 123456.010.000 to 123456.10@batchost """
pos1 = clust_proc.find(".")
pos2 = clust_proc.find(".", pos1 + 1)
cluster = clust_proc[0:pos1]
proc = int(clust_proc[pos1 + 1 : pos2])
return "%s.%d@%s" % (cluster, proc, batchhost)
def compute_secs(time_str):
""" convert hh:mm:ss to seconds """
time_str = time_str.strip(",")
timelist = [int(x) for x in time_str.split(":")]
return (timelist[0] * 60 + timelist[1]) * 60 + timelist[2]
def parse_date(date_time_str):
try:
return parse_date_2(date_time_str)
except ValueError:
return datetime.now()
def parse_date_2(date_time_str):
""" condor just gives month/day, so add the year and parse
-- the trick is to add the *right* year. At the year boundary
(i.e. it's Jan 1, and the job started on Dec 31) we may
need to pick *yesterday's* year, not todays... so check
by checking yesterdays month.
... in fact we should go a little further back (27 days)
for to get last month right further into this month.
.. but this is a lie now, newer condor seems to use
proper ISO dates: 2021-10-11 02:01:00, so handle that, too
"""
# get todays, yesterdays year and month
t_year, t_month = datetime.now().strftime("%Y %m").split()
lm_year, lm_month = (datetime.now() - timedelta(days=27)).strftime("%Y %m").split()
if date_time_str[:4] == t_year or date_time_str[:4] == lm_year:
return datetime.strptime(date_time_str, "%Y-%m-%d %H:%M:%S")
elif date_time_str[:2] == t_month:
date_time_str = "%s/%s" % (t_year, date_time_str)
elif date_time_str[:2] == lm_month:
date_time_str = "%s/%s" % (lm_year, date_time_str)
else:
# if it is some other month, just guess this year.. sorry
date_time_str = "%s/%s" % (t_year, date_time_str)
return datetime.strptime(date_time_str, "%Y/%m/%d %H:%M:%S")
def parse_condor_log(dbhandle, lines, batchhost, submission_id):
""" read a condor log looking for start/end info """
log("DEBUG", "entering parse_condor_log %d lines" % len(lines))
in_termination = 0
itimes = {}
stimes = {}
etimes = {}
job_sites = {}
execute_hosts = {}
job_exit = None
jobsub_job_id = None
res = {}
for line in lines:
if line[:2] == "00" and line[3:5] == " (":
ppos = line.find(")")
jobsub_job_id = fix_jobid(line[5:ppos], batchhost)
if line[:5] == "000 (":
log("DEBUG", "submitted record start: %s" % line)
itimes[jobsub_job_id] = parse_date(line[ppos + 2 : ppos + 16])
if line[:5] == "001 (":
log("DEBUG", "start record start: %s" % line)
stimes[jobsub_job_id] = parse_date(line[ppos + 2 : ppos + 16])
if line[:10] == "JOB_Site =":
job_sites[jobsub_job_id] = line[11:-1]
if line[:13] == "ExecuteHost =":
execute_hosts[jobsub_job_id] = line[15:-2]
if line[:5] == "005 (":
log("DEBUG", "term record start: %s" % line)
in_termination = 1
finish_time = parse_date(line[ppos + 2 : ppos + 16])
etimes[jobsub_job_id] = finish_time
remote_cpu = None
disk_used = None
memory_used = None
continue
if line[:3] == "..." and in_termination:
log("DEBUG", "term record end %s" % line)
in_termination = 0
continue
if in_termination:
log("DEBUG", "saw: ", line)
if line.find("termination (signal ") > 0:
job_exit = 128 + int(line.split()[5].strip(")"))
if line.find("termination (return value") > 0:
job_exit = int(line.split()[5].strip(")"))
if line.find("Total Remote Usage") > 0:
remote_cpu = compute_secs(line.split()[2])
if line.find("Disk (KB)") > 0:
disk_used = line.split()[3]
if line.find("Memory (KB)") > 0:
memory_used = line.split()[3]
log(
"DEBUG",
"condor_log_parser: remote_cpu %s "
"disk_used %s memory_used %s job_exit %s" % (remote_cpu, disk_used, memory_used, job_exit),
)
return {"idle": itimes, "running": stimes, "completed": etimes}
| fermitools/poms | webservice/condor_log_parser.py | condor_log_parser.py | py | 6,245 | python | en | code | 0 | github-code | 36 |
38766389801 | #
# aberdeen/utils/prompt.py
#
"""
Utility functions which prompt the user for input.
"""
from distutils.util import strtobool
from .error_messages import warning
def get_user_bool(prompt, default=None):
"""
Uses distutils 'strtobool' function to interpret a request from the user.
@param default: if default is not None, an empty response returns default,
else the function repeats question until it can interpret
an answer as a boolean (yes/no, t/f, etc).
@return boolean: User's answer or the default provided
"""
while True:
try:
res = strtobool(input(prompt))
except ValueError:
if default is not None:
return default
continue
return res
def prompt_user(name, default=None, do_strip=True):
"""
Prompts the user for a value and returns the answer
@param name str: The request asked to user, don't include a ':'
@param default: if default is not None, an empty response returns this value
@param do_strip: this function will automatically strip answers' whitespace
@return string: User's answer or the default provided
"""
request = name
if default is not None:
request += " [{}]".format(default)
request += ": "
res = input(request)
if do_strip:
res = res.strip()
if not res:
if default is None:
warning("Empty string given with no defaults.")
res = default
return res
| akubera/aberdeen | aberdeen/utils/prompt.py | prompt.py | py | 1,516 | python | en | code | 1 | github-code | 36 |
14298574762 | import tensorflow as tf
from board_class import Board
from memory_class import Memory
from critic_class import Critic
from actor_class import Actor
import numpy as np
gamma = 0.5#discount factor
batch_size = 200
def fix_policy(state, policy):
for i in range(9):
if state[i] != 0:
policy[i] = 0
probsum = np.sum(policy)
return policy/probsum
def choose_action(state, actor, sess):
policy = actor.get_policy(state.reshape((1, 9)), sess)[0]
policy = fix_policy(state, policy)
return np.random.choice(9, p=policy)
#we don't want the network to be different for X and O, so we make each player see the board as X would
def state_from_board(board, counter):
state = np.array(board.board)
if counter == 1:
state = -state
state = state
return state
memory = Memory(3000)
actor = Actor(0.001)
critic = Critic(0.001)
with tf.Session() as sess:
sess.run(actor.var_init)
sess.run(critic.var_init)
for game in range(4500):
if game%100 == 0:
print(game)
board = Board()
winner = ''
counter = 0
symbols = ['X', 'O']
#we need to store samples temporarily because we don't get their values till the end of each game
samples = []#each sample contains state, action, reward, and next state
while winner == '':
state = state_from_board(board, counter)
action = choose_action(state, actor, sess)
current_sample = []
current_sample.append(state)
current_sample.append(action)
winner = board.setSquare(action, symbols[counter])
current_sample.append(0.5)#placeholder reward. we change this when we know the winner
samples.append(current_sample)
#switch to next player
counter = (counter + 1)%2
#lol this is so ugly
xreward = 0
if winner == 'X':
xreward = 0.5
elif winner == 'O':
xreward = -0.5
#add the next state to each sample and set rewards based on winner
num_samples = len(samples)
for i in range(num_samples):
#next state
if i < num_samples - 2:
samples[i].append(samples[i + 2][0])
else:
samples[i].append(None)
if i%2 == 0:
samples[i][2] = samples[i][2] + xreward*(i+1)/num_samples
else:
samples[i][2] = samples[i][2] - xreward*(i+1)/num_samples
memory.add_sample(samples[i])
sample_batch = memory.sample_samples(batch_size)
actual_batch_size = len(sample_batch)
state_batch = np.zeros((actual_batch_size, 9))
next_state_batch = np.zeros((actual_batch_size, 9))
action_batch = np.array([sample[1] for sample in sample_batch])
for i, sample in enumerate(sample_batch):
state_batch[i] = sample[0]
if sample[3] is not None:
next_state_batch[i] = sample[3]
qsa_batch = critic.predict_batch(state_batch, sess)
policy_batch = actor.get_policy_batch(state_batch, sess)
advantage_batch = np.array([0 for i in range(actual_batch_size)])
next_qsa_batch = critic.predict_batch(next_state_batch, sess)
next_policy_batch = actor.get_policy_batch(next_state_batch, sess)
#next_value_batch = critic.predict_value_batch(next_state_batch, sess)
#fix up the policy so that invalid actions have a probability of zero
for i in range(actual_batch_size):
policy_batch[i] = fix_policy(state_batch[i, :], policy_batch[i, :])
next_policy_batch[i] = fix_policy(next_state_batch[i, :], next_policy_batch[i, :])
for i in range(actual_batch_size):
#dot product is used here to compute expected value
next_value = np.dot(next_qsa_batch[i], next_policy_batch[i])
#next_value = next_value_batch[i]
if sample_batch[i][3] is not None:
qsa_batch[i, action_batch[i]] = sample_batch[i][2] + gamma*next_value
else:
qsa_batch[i, action_batch[i]] = sample_batch[i][2]
advantage_batch[i] = qsa_batch[i, action_batch[i]]
critic.train_batch(state_batch, qsa_batch, sess)
actor.train_batch(state_batch, action_batch, advantage_batch, sess)
critic.save(sess, 'tic_tac_toe_critic_nobad_a2c')
actor.save(sess, 'tic_tac_toe_actor_nobad_a2c')
critic.plot_losses('a2c_critic_losses')
actor.plot_scores('a2c_scores') | EpicDuckPotato/TicTacToe_PolicyGradient | trainer_nobad_a2c.py | trainer_nobad_a2c.py | py | 4,586 | python | en | code | 0 | github-code | 36 |
8445183338 | from numpy import prod
import cupy
from cupy.fft import config
from cupy.fft._fft import (_convert_fft_type, _default_fft_func, _fft,
_get_cufft_plan_nd, _get_fftn_out_size,
_output_dtype)
from cupy.fft._cache import get_plan_cache
def get_fft_plan(a, shape=None, axes=None, value_type='C2C'):
""" Generate a CUDA FFT plan for transforming up to three axes.
Args:
a (cupy.ndarray): Array to be transform, assumed to be either C- or
F- contiguous.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (None or int or tuple of int): The axes of the array to
transform. If `None`, it is assumed that all axes are transformed.
Currently, for performing N-D transform these must be a set of up
to three adjacent axes, and must include either the first or the
last axis of the array.
value_type (str): The FFT type to perform. Acceptable values are:
* 'C2C': complex-to-complex transform (default)
* 'R2C': real-to-complex transform
* 'C2R': complex-to-real transform
Returns:
a cuFFT plan for either 1D transform (``cupy.cuda.cufft.Plan1d``) or
N-D transform (``cupy.cuda.cufft.PlanNd``).
.. note::
The returned plan can not only be passed as one of the arguments of
the functions in ``cupyx.scipy.fftpack``, but also be used as a
context manager for both ``cupy.fft`` and ``cupyx.scipy.fftpack``
functions:
.. code-block:: python
x = cupy.random.random(16).reshape(4, 4).astype(complex)
plan = cupyx.scipy.fftpack.get_fft_plan(x)
with plan:
y = cupy.fft.fftn(x)
# alternatively:
y = cupyx.scipy.fftpack.fftn(x) # no explicit plan is given!
# alternatively:
y = cupyx.scipy.fftpack.fftn(x, plan=plan) # pass plan explicitly
In the first case, no cuFFT plan will be generated automatically,
even if ``cupy.fft.config.enable_nd_planning = True`` is set.
.. note::
If this function is called under the context of
:func:`~cupy.fft.config.set_cufft_callbacks`, the generated plan will
have callbacks enabled.
.. warning::
This API is a deviation from SciPy's, is currently experimental, and
may be changed in the future version.
"""
from cupy.cuda import cufft
# check input array
if a.flags.c_contiguous:
order = 'C'
elif a.flags.f_contiguous:
order = 'F'
else:
raise ValueError('Input array a must be contiguous')
if isinstance(shape, int):
shape = (shape,)
if isinstance(axes, int):
axes = (axes,)
if (shape is not None) and (axes is not None) and len(shape) != len(axes):
raise ValueError('Shape and axes have different lengths.')
# check axes
# n=1: 1d (need axis1D); n>1: Nd
if axes is None:
n = a.ndim if shape is None else len(shape)
axes = tuple(i for i in range(-n, 0))
if n == 1:
axis1D = 0
else: # axes is a tuple
n = len(axes)
if n == 1:
axis1D = axes[0]
if axis1D >= a.ndim or axis1D < -a.ndim:
err = 'The chosen axis ({0}) exceeds the number of '\
'dimensions of a ({1})'.format(axis1D, a.ndim)
raise ValueError(err)
elif n > 3:
raise ValueError('Only up to three axes is supported')
# Note that "shape" here refers to the shape along trasformed axes, not
# the shape of the output array, and we need to convert it to the latter.
# The result is as if "a=_cook_shape(a); return a.shape" is called.
# Because of this, we need to use (possibly unsorted) axes.
transformed_shape = shape
shape = list(a.shape)
if transformed_shape is not None:
for s, axis in zip(transformed_shape, axes):
if s is not None:
if axis == axes[-1] and value_type == 'C2R':
s = s // 2 + 1
shape[axis] = s
shape = tuple(shape)
# check value_type
out_dtype = _output_dtype(a.dtype, value_type)
fft_type = _convert_fft_type(out_dtype, value_type)
# TODO(leofang): figure out if we really have to skip F-order?
if n > 1 and value_type != 'C2C' and a.flags.f_contiguous:
raise ValueError('C2R/R2C PlanNd for F-order arrays is not supported')
# generate plan
# (load from cache if it exists, otherwise create one but don't cache it)
if n > 1: # ND transform
if cupy.cuda.runtime.is_hip and value_type == 'C2R':
raise RuntimeError("hipFFT's C2R PlanNd is buggy and unsupported")
out_size = _get_fftn_out_size(
shape, transformed_shape, axes[-1], value_type)
# _get_cufft_plan_nd interacts with plan cache and callback
plan = _get_cufft_plan_nd(
shape, fft_type, axes=axes, order=order, out_size=out_size,
to_cache=False)
else: # 1D transform
# prepare plan arguments
if value_type != 'C2R':
out_size = shape[axis1D]
else:
out_size = _get_fftn_out_size(
shape, transformed_shape, axis1D, value_type)
batch = prod(shape) // shape[axis1D]
devices = None if not config.use_multi_gpus else config._devices
keys = (out_size, fft_type, batch, devices)
mgr = config.get_current_callback_manager()
if mgr is not None:
# to avoid a weird segfault, we generate and cache distinct plans
# for every possible (load_aux, store_aux) pairs; the plans are
# still generated from the same external Python module
load_aux = mgr.cb_load_aux_arr
store_aux = mgr.cb_store_aux_arr
keys += (mgr.cb_load, mgr.cb_store,
0 if load_aux is None else load_aux.data.ptr,
0 if store_aux is None else store_aux.data.ptr)
cache = get_plan_cache()
cached_plan = cache.get(keys)
if cached_plan is not None:
plan = cached_plan
elif mgr is None:
plan = cufft.Plan1d(out_size, fft_type, batch, devices=devices)
else: # has callback
# TODO(leofang): support multi-GPU callback (devices is ignored)
if devices:
raise NotImplementedError('multi-GPU cuFFT callbacks are not '
'yet supported')
plan = mgr.create_plan(('Plan1d', keys[:-3]))
mgr.set_callbacks(plan)
return plan
def fft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axis)
Note that `plan` is defaulted to None, meaning CuPy will use an
auto-generated plan behind the scene.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``n`` and type
will convert to complex if that of the input is another.
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
.. seealso:: :func:`scipy.fftpack.fft`
"""
from cupy.cuda import cufft
return _fft(x, (n,), (axis,), None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axis)
Note that `plan` is defaulted to None, meaning CuPy will use an
auto-generated plan behind the scene.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``n`` and type
will convert to complex if that of the input is another.
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
.. seealso:: :func:`scipy.fftpack.ifft`
"""
from cupy.cuda import cufft
return _fft(x, (n,), (axis,), None, cufft.CUFFT_INVERSE,
overwrite_x=overwrite_x, plan=plan)
def fft2(x, shape=None, axes=(-2, -1), overwrite_x=False, plan=None):
"""Compute the two-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.fft2`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
from cupy.cuda import cufft
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifft2(x, shape=None, axes=(-2, -1), overwrite_x=False, plan=None):
"""Compute the two-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.ifft2`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
from cupy.cuda import cufft
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_INVERSE,
overwrite_x=overwrite_x, plan=plan)
def fftn(x, shape=None, axes=None, overwrite_x=False, plan=None):
"""Compute the N-dimensional FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.fftn`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
from cupy.cuda import cufft
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_FORWARD,
overwrite_x=overwrite_x, plan=plan)
def ifftn(x, shape=None, axes=None, overwrite_x=False, plan=None):
"""Compute the N-dimensional inverse FFT.
Args:
x (cupy.ndarray): Array to be transformed.
shape (None or tuple of ints): Shape of the transformed axes of the
output. If ``shape`` is not given, the lengths of the input along
the axes specified by ``axes`` are used.
axes (tuple of ints): Axes over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.PlanNd` or ``None``): a cuFFT plan for
transforming ``x`` over ``axes``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(x, axes)
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array which shape is specified by ``shape`` and
type will convert to complex if that of the input is another.
.. seealso:: :func:`scipy.fftpack.ifftn`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
from cupy.cuda import cufft
func = _default_fft_func(x, shape, axes, plan)
return func(x, shape, axes, None, cufft.CUFFT_INVERSE,
overwrite_x=overwrite_x, plan=plan)
def rfft(x, n=None, axis=-1, overwrite_x=False, plan=None):
"""Compute the one-dimensional FFT for real input.
The returned real array contains
.. code-block:: python
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2))] # if n is even
[y(0),Re(y(1)),Im(y(1)),...,Re(y(n/2)),Im(y(n/2))] # if n is odd
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
plan (:class:`cupy.cuda.cufft.Plan1d` or ``None``): a cuFFT plan for
transforming ``x`` over ``axis``, which can be obtained using::
plan = cupyx.scipy.fftpack.get_fft_plan(
x, axes, value_type='R2C')
Note that `plan` is defaulted to None, meaning CuPy will either
use an auto-generated plan behind the scene if cupy.fft.config.
enable_nd_planning = True, or use no cuFFT plan if it is set to
False.
Returns:
cupy.ndarray:
The transformed array.
.. seealso:: :func:`scipy.fftpack.rfft`
.. note::
The argument `plan` is currently experimental and the interface may be
changed in the future version.
"""
from cupy.cuda import cufft
if n is None:
n = x.shape[axis]
shape = list(x.shape)
shape[axis] = n
f = _fft(x, (n,), (axis,), None, cufft.CUFFT_FORWARD, 'R2C',
overwrite_x=overwrite_x, plan=plan)
z = cupy.empty(shape, f.real.dtype)
slice_z = [slice(None)] * x.ndim
slice_f = [slice(None)] * x.ndim
slice_z[axis] = slice(1)
slice_f[axis] = slice(1)
z[tuple(slice_z)] = f[tuple(slice_f)].real
slice_z[axis] = slice(1, None, 2)
slice_f[axis] = slice(1, None)
z[tuple(slice_z)] = f[tuple(slice_f)].real
slice_z[axis] = slice(2, None, 2)
slice_f[axis] = slice(1, n - f.shape[axis] + 1)
z[tuple(slice_z)] = f[tuple(slice_f)].imag
return z
def irfft(x, n=None, axis=-1, overwrite_x=False):
"""Compute the one-dimensional inverse FFT for real input.
Args:
x (cupy.ndarray): Array to be transformed.
n (None or int): Length of the transformed axis of the output. If ``n``
is not given, the length of the input along the axis specified by
``axis`` is used.
axis (int): Axis over which to compute the FFT.
overwrite_x (bool): If True, the contents of ``x`` can be destroyed.
Returns:
cupy.ndarray:
The transformed array.
.. seealso:: :func:`scipy.fftpack.irfft`
.. note::
This function does not support a precomputed `plan`. If you need this
capability, please consider using :func:`cupy.fft.irfft` or :func:`
cupyx.scipy.fft.irfft`.
"""
from cupy.cuda import cufft
if n is None:
n = x.shape[axis]
m = min(n, x.shape[axis])
shape = list(x.shape)
shape[axis] = n // 2 + 1
if x.dtype in (cupy.float16, cupy.float32):
z = cupy.zeros(shape, dtype=cupy.complex64)
else:
z = cupy.zeros(shape, dtype=cupy.complex128)
slice_x = [slice(None)] * x.ndim
slice_z = [slice(None)] * x.ndim
slice_x[axis] = slice(1)
slice_z[axis] = slice(1)
z[tuple(slice_z)].real = x[tuple(slice_x)]
slice_x[axis] = slice(1, m, 2)
slice_z[axis] = slice(1, m // 2 + 1)
z[tuple(slice_z)].real = x[tuple(slice_x)]
slice_x[axis] = slice(2, m, 2)
slice_z[axis] = slice(1, (m + 1) // 2)
z[tuple(slice_z)].imag = x[tuple(slice_x)]
return _fft(z, (n,), (axis,), None, cufft.CUFFT_INVERSE, 'C2R',
overwrite_x=overwrite_x)
| cupy/cupy | cupyx/scipy/fftpack/_fft.py | _fft.py | py | 19,687 | python | en | code | 7,341 | github-code | 36 |
74105735145 | from django.contrib import admin
from django.urls import path
from tareas import views
urlpatterns = [
path("admin/", admin.site.urls),
path ("", views.menu, name = "menu"),
path ("registro/", views.registro, name = "registro"),
path ("iniciar_sesion/", views.iniciar_sesion, name = "iniciar_sesion"),
path ("salir/", views.salir, name = "salir"),
path ("crear_tarea/", views.crear_tarea, name = "crear_tarea"),
path ("tareas/", views.tareas, name = "tareas"),
path ("tarea/<int:tarea_id>", views.tarea, name = "tarea"),
path ("tarea/<int:tarea_id>/completa", views.tarea_completa, name = "tarea_completa"),
path ("tarea/<int:tarea_id>/borada", views.borar_tarea, name = "borar_tarea")
]
| MallicTesla/Mis_primeros_pasos | Programacion/002 ejemplos/002 - 14 django/16 django proyrcto inicio de cesion/django_crud/urls.py | urls.py | py | 732 | python | en | code | 1 | github-code | 36 |
4034979286 | a = [1 ,2 ,3 ,4 ,5]
print(a)
print(a[3])
n = 10
a = [0] * n
a[0] = 1
print(a)
array = [i for i in range(10)]
print(array)
# 리스트 컴프리 핸션
# 이 방식을 사용하지 않고 단순히 나타내면 콜바이 레퍼런스로 변해서, 데이터 전체가 변하게 된다.
array_2 = [[0] * 10 for _ in range(10) ]
array_2[0][0] = 1
# 리스트 에서 특정 값 제거하기
a = [1, 2, 3, 4, 5, 5]
remove_set = {3, 5}
answer = [i for i in a if i not in remove_set ]
print(answer) | kakaocloudschool/dangicodingtest | 001_pythonBasic/002_list.py | 002_list.py | py | 499 | python | ko | code | 0 | github-code | 36 |
15282409982 | import regTrees
from numpy import *
import matplotlib.pyplot as plt
myDat = regTrees.loadDataSet('ex00.txt')
myMat = mat(myDat)
print(regTrees.createTree(myMat))
plt.plot(myMat[:,0],myMat[:,1], 'ro')
plt.show()
myDat1 = regTrees.loadDataSet('ex0.txt')
myMat1 = mat(myDat1)
print(regTrees.createTree(myMat1))
plt.plot(myMat1[:,1],myMat1[:,2], 'ro')
plt.show()
| mengwangme/MachineLearninginAction | Ch09/test.py | test.py | py | 376 | python | en | code | 0 | github-code | 36 |
42243497350 | import numpy as np
import matplotlib.pyplot as plt
from hs_digitizer import *
import glob
import scipy.signal as ss
from scipy.optimize import curve_fit
import re
import matplotlib
#Ns = 500000
#Fs = 200000.
path = "/data/20181030/bead1/high_speed_digitizer/golden_data/amp_ramp_50k_good"
files = glob.glob(path + "/*.h5")
fi_init = 1e5
init_file = 0
final_file = len(files)
n_file = final_file-init_file
sfun = lambda fname: int(re.findall('\d+.h5', fname)[0][:-3])
files.sort(key = sfun)
bw = 2000.
bw_sb = 0.02
obj0 = hsDat(files[init_file])
t0 = obj0.attribs['time']
Ns = obj0.attribs['nsamp']
Fs = obj0.attribs['fsamp']
freqs = np.fft.rfftfreq(Ns, d = 1./Fs)
tarr0 = np.linspace(0, Ns/Fs, Ns)
def line(x, m, b):
return m*x + b
def dec2(arr, fac):
return ss.decimate(ss.decimate(arr, fac), fac)
def sqrt_fun(x, a):
return a*np.sqrt(x)
fc = fi_init
plot_dat = True
matplotlib.rcParams.update({'font.size':12})
f, ax = plt.subplots(dpi = 200)
files = np.array(files)
inds = [0, 100, 200, 300, 400, 499]
files = files[inds]
labels = ["62.5kV/m", "50.0kV/m", "37.5kV/m", "25.0kV/m", "12.5kV/m", "0.0kV/m"]
files = list(files)
p_bool = np.abs(freqs-fc)<bw
freqs /= 1000
fc/=1000
bw/=1000
for i, f in enumerate(files):
print(i)
try:
obj = hsDat(f)
fft = np.fft.rfft(obj.dat[:, 0])
if plot_dat:
ax.plot(freqs, np.abs(fft), label = labels[i])
except:
print("bad file")
ax.set_yscale("log")
ax.set_xlim([fc-bw/2., fc+bw/2.])
plt.xlabel("Frequency[kHz]")
plt.ylabel("Optical Power [arb]")
plt.legend()
plt.tight_layout()
plt.show()
| charlesblakemore/opt_lev_analysis | scripts/spinning/old_scripts/ampt_ramp_spectra_plot.py | ampt_ramp_spectra_plot.py | py | 1,607 | python | en | code | 1 | github-code | 36 |
25872641550 | __author__ = "Domenico Solazzo"
__version__ = "0.1"
RESPONSE_CODES = {
200: "OK: Success",
202: "Accepted: The request was accepted and the user was queued for processing",
401: "Not Authorized: either you need to provide authentication credentials, or the credentials provided aren't valid.",
403: "Bad Request: Your request is invalid and we'll return and error message that tells you why. This is the status code if you have exceeded the rate limit.",
404: "Not Found: either you are requesting an invalid URI or the resource in question doesn't exist.",
500: "Internal Server Error: we did something wrong.",
502: "Bad Gateway: returned if Klout is down or being upgraded.",
503: "Service Unavailable: the Klout servers are up, but are overloaded with requests. Try again later."
}
class KloutError( Exception ):
def __init__(self, code=0, msg=''):
super(KloutError, self).__init__()
self.code = code
self.msg = msg
def __str__(self):
return repr(self)
def __repr__(self):
return "%i: %s" % (self.code, self.msg)
class Klout( object ):
def __init__(self, key, serviceType="service"):
self._apiKey = key
self.__service = self.__getProxyFactory(serviceType)
def __getProxyFactory(self, serviceType):
service = None
if serviceType == "test":
service = TestKloutService(serviceType)
else:
service = KloutService(self._apiKey)
self.__service = service
return self.__service
def score(self, users):
"""
Retrieve a Klout score
@param: users - List of usernames
@return: A list of tuples in the form (username, klout_score)
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong input.")
users = ",".join(users)
query = {"users": users}
result = self.__service.makeCall("score", query)
return result
def show(self, users):
"""
Retrieve a user object
@param: users - List of usernames
@return: A dictionary with the returned data
"""
if not users:
raise KloutError(0, "No Users.")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("user", query)
return result
def topics(self, users):
"""
Returns the top 3 topics objects
@param: users - A list of usernames
@return: A list of dicts in the form [{username:['topic1, topic2, topic3]..}
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong Input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("topics", query)
return result
def influencerOf(self, users):
"""
Returns up to 5 user score pairs for user that are influencer for the given user
@param: users - A list of usernames
@return: A list of dicts in the form [{username:[(username, score),..}
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong Input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("influencerOf", query)
return result
def influencedBy(self, users):
"""
Returns up to 5 user score pairs for user that are influenced by the given user
@param: users - A list of usernames
@return: A list of dicts in the form [{username:[(username, score),..}
"""
if not users:
raise KloutError(0, "No Users")
if not isinstance(users, (list, tuple)):
raise KloutError(0, "Wrong Input.")
users = ",".join(users)
query = {"users":users}
result = self.__service.makeCall("influencedBy", query)
return result
class KloutService(object):
def __init__(self, apiKey):
self.apiKey = apiKey
self.VERSION_API = "/1/"
self.API_URL = "api.klout.com"
def getCallUrl(self, callName):
servicePath = ""
if callName == "score":
servicePath = "klout.json"
elif callName == "user":
servicePath = "users/show.json"
elif callName == "topics":
servicePath = "users/topics.json"
elif callName == "influencedBy":
servicePath = "soi/influenced_by.json"
elif callName == "influencerOf":
servicePath = "soi/influencer_of.json"
else:
raise Exception("Url not available")
return self.VERSION_API + servicePath
def _remove_empty_params(self, query):
if not isinstance(query, type({})):
raise Exception("Wrong query in input")
returnedQuery = {}
for key in query:
if not query[key] == None:
returnedQuery[key] = query[key]
return returnedQuery
def makeCall(self, callName, query):
import urllib, httplib, json
url = self.getCallUrl(callName)
query = self._remove_empty_params(query)
if 'key' not in query:
query["key"] = self.apiKey
queryStr = urllib.urlencode(query)
if len(query) > 0:
if url.find("?") == -1:
url = url + "?" + queryStr
else:
url = url + "&" + queryStr
try:
conn = httplib.HTTPConnection(self.API_URL)
conn.request('GET', url)
response = conn.getresponse()
data = response.read()
data = json.loads(data)
except httplib.HTTPException as err:
msg = err.read() or RESPONSE_CODES.get(err.code, err.message)
raise KloutError(err.code, msg)
except ValueError:
msg = "Invalid data: %s" % data
raise KloutError(0, msg)
return data
class TestKloutService(KloutService):
def makeCall(self, callName, query):
if callName == "score":
return {"users":[{"twitter_screen_name":"user1","kscore":23.02}]}
elif callName == "user":
return {"users":[{
"twitter_id": "111111",
"twitter_screen_name":"name",
"score":{
"kscore":10,
"slope":1,
"description":"description",
"kclass_id":1,
"kclass":"Socializer",
"kclass_description":"kclass description",
"network_score":22,
"amplification_score":18,
"true_reach": 10,
"delta_1day": 0.2,
"delta_5day": 0.4
}
}]}
elif callName == "topics":
return {"users":[{"twitter_screen_name":"user1", "topics":["python"]}]}
elif callName == "influencedBy":
return {"users":[
{
"twitter_screen_name":"user1",
"influencers":[{"twitter_screen_name":"user2",
"kscore":10.00
}]
}
]
}
elif callName == "influencerOf":
return {"users":[
{
"twitter_screen_name":"user1",
"influencers":[{"twitter_screen_name":"user2",
"kscore":10.00
}]
}
]
}
elif callName == "history":
return {'dates':[], 'klout_score':[], 'amplification':[],
'retweets':[], 'mentions':[],'network':[],
'followers_following':[], 'followers_count':[], 'mentioners':[],
'retweeters':[],'true_reach':[],'in_out':[]
}
| domenicosolazzo/PythonKlout | pythonklout.py | pythonklout.py | py | 8,616 | python | en | code | 1 | github-code | 36 |
10098349578 | import numpy as np
import pandas as pd
class Node():
"""
Class for defining each node of the Decision Tree.
"""
def __init__(self, attr = None, pred = None, class_label = None) -> None:
self.attr = attr
self.children = None
self.isLeaf = False
self.pred = pred
self.class_label = class_label
class DecisionTreeClassifierID3():
"""
Class for implementing Decision Tree Classifier using ID3 (Iterative Dichotomiser 3) Algorithm.
"""
def __init__(self):
self.root = None
def isBalanced(self, df):
"""
Used to check if all tuples belong to a single class
: param y: array, label or true values
: return: boolean, True if all tuples belong to a single class, False otherwise.
"""
return len(list(df.value_counts())) == 1
def getEntropy(self, total, df):
"""
Used to calculate entropy for a particular class value of a column
: param total: int, total number of row/tuples/training examples
: param df: array, column
: return: int
"""
labels = sorted(df.value_counts().to_dict().items())
entropy = 0
for label in labels:
f = (label[1] / total)
entropy -= f * np.log(f)
return entropy
def gain(self, column, y):
"""
Used to calculate gain for a column
: param column: array, column
: param y: array, label or true values
: return: int, gain for the column
"""
total = len(column)
labels = sorted(y.value_counts().to_dict().items())
fp = (labels[0][1] / total)
fn = (labels[1][1] / total)
total_entropy = - (fp * np.log(fp)) - (fn * np.log(fn))
g = total_entropy
concat_df = pd.concat([column, y], axis=1)
df_dict = {g: d['label']
for g, d in concat_df.groupby(by=[concat_df.columns[0]])}
for key, value in df_dict.items():
g -= (len(value) / total) * self.getEntropy(key, total, value)
return g
def getMaxGain(self, X, y):
"""
Used to find the attribute which provides maximum gain.
: param X: 2D array, matrix of features, with each row being a data entry
: param y: array, label or true values
: return: tuple, tuple of attribute name/column name and entropy value
"""
cols = X.columns
gain_dict = {}
for col in cols:
a = X[col]
gain_dict[col] = self.gain(a, y)
return sorted(gain_dict.items(), key=lambda x: x[1], reverse=True)[0]
def buildTree(self, X, y, attr_classes, class_val=None):
"""
Used to build the decision tree.
: param X: 2D array, matrix of features, with each row being a data entry
: param y: array, label or true values
: param attr_classes: dict, dictionary of list of distinct classes for each column
: param class_val: string, distinct class, classification is based on
: return: Node, a node for the tree
"""
root = Node()
if self.isBalanced(y):
root.isLeaf = True
root.pred = y.iloc[0]
elif X is None:
root.isLeaf = True
root.pred = y.mode()
else:
maxGain = self.getMaxGain(X, y)
maxGainCol = maxGain[0]
pred = y.mode()[0]
attr_list = attr_classes[maxGainCol].copy()
concat_df = pd.concat([X, y], axis=1)
df_dict = {g: d for g, d in concat_df.groupby(by=[maxGainCol])}
root.attr = maxGainCol
root.children = []
for key, value in df_dict.items():
attr_list.remove(key)
new_X = value.drop(maxGainCol, axis=1).iloc[:, :-1]
new_y = value.iloc[:, -1]
root.children.append(self.buildTree(
new_X, new_y, attr_classes, key))
if len(attr_list) > 0:
root.pred = pred
root.class_label = class_val
return root
def printTree(self, root, num_spaces=0):
"""
Used to print the decision tree.
: param root: Node, node of the decision tree
: param num_spaces: int, number of spaces to be printed
: return: None
"""
print("\t" * num_spaces, end="")
print(root.class_label, "->", end=" ")
if root.children is None:
print(root.pred)
else:
print(root.attr)
for child in root.children:
self.printTree(child, num_spaces + 1)
def train(self, X, y):
"""
Used to train the Decision Tree Classifier
: param X: 2D array, matrix of features, with each row being a data entry
: param y: array, label or true values
: return: None
"""
attr_classes = {}
cols = X.columns
for col in cols:
attr_classes[col] = list(X[col].value_counts().keys())
self.root = self.buildTree(X, y, attr_classes)
def predict_one_example(self, X, root):
"""
Used to predict the value of y for a single example.
: param X: tuple, one single data entry
: param root: Node, node of the decision tree
: return: array, predicted values
"""
if root.isLeaf:
return root.pred
col = root.attr
val = X[col]
next_root = [x for x in root.children if x.class_label == val]
if len(next_root) == 0:
return root.pred
return self.predict_one_example(X, next_root[0])
def predict(self, X):
"""
Used to predict the value of y
: param X: 2D array, matrix of features, with each row being a data entry
: return: array, predicted values
"""
pred_y = []
for i in range(len(X)):
pred_y.append(self.predict_one_example(
X.iloc[i, :], self.root))
return pred_y
| pri1311/ML_Algorithms | ML-Algorithms/Classification/DecisionTreeID3.py | DecisionTreeID3.py | py | 6,215 | python | en | code | 1 | github-code | 36 |
16389167601 | # -*- coding: utf-8 -*-
import os
import sys
import xbmc
import xbmcaddon
import xbmcgui
import xbmcplugin
from xbmcvfs import translatePath
from libs.session import Session
from libs.utils import get_url, check_settings
def list_settings(label):
addon = xbmcaddon.Addon()
_handle = int(sys.argv[1])
xbmcplugin.setPluginCategory(_handle, label)
list_item = xbmcgui.ListItem(label = 'Kanály')
url = get_url(action='manage_channels', label = 'Kanály')
xbmcplugin.addDirectoryItem(_handle, url, list_item, True)
list_item = xbmcgui.ListItem(label = addon.getLocalizedString(300101))
url = get_url(action='list_devices', label = addon.getLocalizedString(300101))
xbmcplugin.addDirectoryItem(_handle, url, list_item, True)
list_item = xbmcgui.ListItem(label = addon.getLocalizedString(300102))
url = get_url(action='addon_settings', label = addon.getLocalizedString(300102))
xbmcplugin.addDirectoryItem(_handle, url, list_item, True)
xbmcplugin.endOfDirectory(_handle)
def list_devices(label):
addon = xbmcaddon.Addon()
_handle = int(sys.argv[1])
xbmcplugin.setPluginCategory(_handle, label)
session = Session()
devices = session.get_devices()
for id in devices:
list_item = xbmcgui.ListItem(label = devices[id]['title'] + '\n' + '[COLOR=gray]' + addon.getLocalizedString(300208) + ': ' + devices[id]['last_activity'] + '[/COLOR]')
url = get_url(action='remove_device', id = id, title = devices[id]['title'], last_activity = devices[id]['last_activity'])
xbmcplugin.addDirectoryItem(_handle, url, list_item, True)
xbmcplugin.endOfDirectory(_handle)
def remove_device(id, title, last_activity):
addon = xbmcaddon.Addon()
response = xbmcgui.Dialog().yesno(addon.getLocalizedString(300300), addon.getLocalizedString(300301) + ' ' + title + ' (' + addon.getLocalizedString(300208) + ': ' + last_activity + ')?')
if response == True:
session = Session()
session.delete_device(id)
xbmc.executebuiltin('Container.Refresh')
class Settings:
def __init__(self):
self.is_settings_ok = check_settings()
def save_json_data(self, file, data):
addon = xbmcaddon.Addon()
addon_userdata_dir = translatePath(addon.getAddonInfo('profile'))
if self.is_settings_ok:
filename = os.path.join(addon_userdata_dir, file['filename'])
try:
with open(filename, "w") as f:
f.write('%s\n' % data)
except IOError:
xbmcgui.Dialog().notification('Rebit.tv', addon.getLocalizedString(300201) + file['description'], xbmcgui.NOTIFICATION_ERROR, 5000)
def load_json_data(self, file):
data = None
if self.is_settings_ok:
addon = xbmcaddon.Addon()
addon_userdata_dir = translatePath(addon.getAddonInfo('profile'))
filename = os.path.join(addon_userdata_dir, file['filename'])
try:
with open(filename, "r") as f:
for row in f:
data = row[:-1]
except IOError as error:
if error.errno != 2:
xbmcgui.Dialog().notification('Rebit.tv', addon.getLocalizedString(300202) + file['description'], xbmcgui.NOTIFICATION_ERROR, 5000)
return data
def reset_json_data(self, file):
if self.is_settings_ok:
addon = xbmcaddon.Addon()
addon_userdata_dir = translatePath(addon.getAddonInfo('profile'))
filename = os.path.join(addon_userdata_dir, file['filename'])
if os.path.exists(filename):
try:
os.remove(filename)
except IOError:
xbmcgui.Dialog().notification('Rebit.tv', addon.getLocalizedString(300203) + file['description'], xbmcgui.NOTIFICATION_ERROR, 5000)
| waladir/plugin.video.rebittv | libs/settings.py | settings.py | py | 3,931 | python | en | code | 0 | github-code | 36 |
8593741282 | #! /usr/bin/env -S python3 -u
import os, shutil, sys, glob, traceback
from easyterm import *
help_msg="""This program downloads one specific NCBI assembly, executes certains operations, then cleans up data
### Input/Output:
-a genome NCBI accession
-o folder to download to
### Actions:
-c bash command template
-cf bash command template read from this file
-p python command template
-pf python command template read from this file
In all templates above, these placeholders can be used:
{accession} genome NCBI accession, e.g. GCA_000209535.1
{genomefile} path to genome fasta file
{taxid} taxonomy id
{species} species name, e.g. "Drosophila melanogaster"
{mspecies} masked species, e.g. "Drosophila_melanogaster"
### Other options:
-k keep files instead of cleaning them up at the end
-w max workers for downloads at once
-sh open shells for bash commands. Required for complex commands
(e.g. sequential commands, or using redirections)
-print_opt print currently active options
-h | --help print this help and exit"""
command_line_synonyms={'t':'temp'}
def_opt= {'a':'',
'o':'./',
'c':'',
'cf':'',
'p':'',
'pf':'',
'k':False,
'sh':False,
'w':1,
'temp':'/tmp/'}
temp_folder=None
##### start main program function
def main(args={}):
"""We're encapsulating nearly the whole program in a function which is executed when
the script is directly executed. This provides the alternative of being able
to run the same thing as module: importing this 'main' function and running it with
a 'args' dictionary containing options and arguments, equivalent to opt
"""
### loading options
if not args:
opt=command_line_options(def_opt, help_msg, synonyms=command_line_synonyms)
else:
opt=args
# if not opt['cf'] and not opt['c']:
# raise NoTracebackError("ERROR you must define a template command with -c or -cf")
if opt['c'] or opt['cf']:
bash_template_command=(opt['c']
if opt['c'] else
'\n'.join([x.strip() for x in open(opt['cf'])]))
if opt['p'] or opt['pf']:
py_template_command=(opt['p']
if opt['p'] else
'\n'.join([x.strip() for x in open(opt['pf'])]))
if not opt['o']:
raise NoTracebackError("ERROR you must provide an output folder with -o")
outfolder=opt['o'].rstrip('/')
if not os.path.exists(outfolder):
os.makedirs(outfolder)
if not opt['a']:
raise NoTracebackError("ERROR you must provide an accession with -a")
accession=opt['a']
datadir=f'{outfolder}/dataset.{accession}'
zipfile=datadir+'.zip'
write('*** Options accepted: ', how='green')
write(opt)
write('')
write('*** Download metadata (dehydrated)', how='green')
## download dehydrated
cmd_download_dehydra = f"""\
datasets download genome accession {accession} \
--reference --dehydrated \
--exclude-genomic-cds --exclude-gff3 --exclude-protein --exclude-rna \
--filename {zipfile} """
run_cmd(cmd_download_dehydra,
stdout=None, stderr=None) # messages printed to screen
write('*** Reformatting metadata', how='green')
## get some metadata
cmd_format_tsv = f"""
dataformat tsv genome \
--package {zipfile} \
--fields tax-id,organism-name"""
x = run_cmd(cmd_format_tsv).stdout
taxid, species = x.split('\n')[1].split('\t')
mspecies=mask_chars(species)
write(f'accession: {accession}')
write(f'taxid: {taxid}')
write(f'species: {species}')
write(f'mspecies: {mspecies}')
write('*** Unzipping metadata, removing zipfile', how='green')
## prep for download: unzip
cmd_unzip_dehydra=f"unzip -o -d {datadir} {zipfile}"
run_cmd(cmd_unzip_dehydra,
stdout=None, stderr=None) # messages printed to screen
write(f'removing {zipfile}')
os.remove(zipfile)
write('')
write('*** Downloading genome data', how='green')
## download / hydrate
progressbar='' if sys.stdout.isatty() else ' --no-progressbar '
cmd_download_hydra=f"""
datasets rehydrate \
--directory {datadir} \
--match "/{accession}/" \
--max-workers {opt['w']} \
{progressbar} """
run_cmd(cmd_download_hydra,
stdout=None, stderr=None) # messages printed to screen
write('')
write('*** Compacting chromosomes into a single fasta', how='green')
fasta_regexp=f'{datadir}/ncbi_dataset/data/{accession}/*fna'
genomefile= f'{datadir}/ncbi_dataset/data/{accession}/{accession}.fasta'
index=0
with open(genomefile, 'wb') as wfd:
for index, chromfile in enumerate(glob.iglob(fasta_regexp)):
service(chromfile)
with open(chromfile,'rb') as fd:
shutil.copyfileobj(fd, wfd)
# cmd_compact_fasta=f'cat {fasta_regexp} > {genomefile}'
# run_cmd(cmd_compact_fasta)
write(f'Concatenating {index+1} chromosomes or contigs \n to genomefile: {genomefile}')
write('*** Removing chromosomes fasta files', how='green')
for chromfile in glob.iglob(fasta_regexp):
os.remove(chromfile)
if not any( [opt[k] for k in ['c', 'p', 'cf', 'pf']] ):
write('')
write('*** <No commands to be executed>', how='green')
try:
if opt['c'] or opt['cf']:
write('')
write('*** Running bash command', how='green')
#template='{genomefile} {species} {mspecies}'
bash_cmd=bash_template_command.format(**locals())
write(bash_cmd)
run_cmd(bash_cmd,
shell=opt['sh'],
stdout=None, stderr=None) # messages printed to screen, if not redicted
if opt['p'] or opt['pf']:
write('')
write('*** Running python command', how='green')
py_cmd=py_template_command.format(**locals())
write(py_cmd)
exec(py_cmd)
except Exception:
write('')
write('*** an ERROR occured !', how='red')
traceback.print_exc()
if not opt['k']:
write('')
write('*** Cleaning up all data', how='green')
write(f'removing {datadir}')
shutil.rmtree(datadir)
else:
write('')
write('*** Leaving data in place', how='green')
write(f'check {datadir}')
# creating a temporary folder with random name inside the -temp argument
# temp_folder=random_folder(opt['temp'])
# write(f'Using temporary folder={temp_folder}')
### insert your code here
##### end main program function
### function executed when program execution is over:
def close_program():
pass
# if temp_folder is not None and os.path.isdir(temp_folder):
# # deleting temporary folder
# shutil.rmtree(temp_folder)
if __name__ == "__main__":
try:
main()
close_program()
except Exception as e:
close_program()
raise e from None
| marco-mariotti/ncbi_single_use_genome | ncbi_single_use_genome.py | ncbi_single_use_genome.py | py | 6,805 | python | en | code | 0 | github-code | 36 |
37313593372 | '''
1. input the members in the team and take the score.
2. if x has a higher score than other two then
3. create a set arranged in order of the score.
4. create a function to compare the scores.
5. decide the order is valid or not using if else ladder.
6. if anyone has higher skill score then yes.
7. if such order is not possible then no.
'''
def result(a,b): #creating a function to compare result
f1=(a[0] >= b[0] and a[1] >= b[1] and a[2] >= b[2]) #if x skilll score is greater than b and x skill score is grreater than z then return the 1st person i.e. x
f2=(a[0] > b[0] or a[1] > b[1] or a[2] > b[2]) #if any score is greter than other members then retrun that member
return f1 and f2
for _ in range(int(input())): #looping through the skills of team members
x=list(map(int,input().split())) #x score
y=list(map(int,input().split())) #y score
z=list(map(int,input().split())) #z score
if result(x,y) and result(y,z): #if anyone is greater than other print yes
print("yes")
elif result(x,z) and result(z,y): #comparing z
print("yes")
elif result(y,x) and result(x,z):
print("yes")
elif result(y,z) and result(z,x):
print("yes")
elif result(z,x) and result(x,y):
print("yes")
elif result(z,y) and result(y,x): #comparing y
print("yes")
else:
print("no") #if such order doesn't exists
| Aashutosh748/comprino_tests | 2_ordering_teams.py | 2_ordering_teams.py | py | 1,401 | python | en | code | 0 | github-code | 36 |
72694784745 | import heapq
roads = [["ULSAN","BUSAN"],["DAEJEON","ULSAN"],["DAEJEON","GWANGJU"],["SEOUL","DAEJEON"],["SEOUL","ULSAN"],["DAEJEON","DAEGU"],["GWANGJU","BUSAN"],["DAEGU","GWANGJU"],["DAEGU","BUSAN"],["ULSAN","DAEGU"],["GWANGJU","YEOSU"],["BUSAN","YEOSU"]]
node = []
for road in roads:
x, y = road
node.append(x)
node.append(y)
node = set(node)
num = len(node)
graph = [[i]*(num+1) for i in range(num+1)]
for road in roads:
x, y = road
for i in graph:
if x == i[0]:
i.extend([y])
print(graph)
# graph = [[i] for i in node]
# print(graph)
#
# for road in roads:
# x, y = road
# for i in graph:
# if x == i[0]:
# i.extend([y])
#
#
# print(graph)
# def dij(depart, arrive):
# q = []
# dist = [0] * num
# heapq.heappush(q,(0,depart))
# dist[depart] = 0
# while q:
# dist, now = heapq.heappop(q)
# for nd, ndist in graph[now]:
# ndist += dist
# if dist[nd]
#
| baejinsoo/algorithm_study | algorithm_study/Stack_Queue/쿠팡경로.py | 쿠팡경로.py | py | 984 | python | en | code | 0 | github-code | 36 |
70091360103 | from datetime import datetime
from persistent.list import PersistentList
from zope.annotation import IAnnotations
import logging
TWITTER_KEY = "noise.addon.twitter"
FACEBOOK_KEY = "noise.addon.facebook"
EMAIL_KEY = "noise.addon.email"
HARDCOPY_KEY = "noise.addon.hardcopy"
TWITTER_CSV_HEADERS = ["timestamp", "twitter-text", "tweet-text",
"firstname", "lastname", "email", "phone", "keepposted"]
FACEBOOK_CSV_HEADERS = ["timestamp"]
EMAIL_CSV_HEADERS = ["timestamp", "email-text", "email_body", "firstname",
"lastname", "email", "phone", "keepposted"]
HARDCOPY_CSV_HEADERS = ["timestamp", "hardcopy-text", "hardcopy_body",
"firstname", "lastname", "address", "zipcode", "city",
"phone", "keepposted"]
logger = logging.getLogger('noise.addon')
class NoiseRecord(object):
""" A Noise Record containing form data
"""
def __init__(self, timestamp, record):
self._timestamp = timestamp
self._record = str(record)
@property
def get_record(self):
return eval(self._record)
@property
def get_timestamp(self):
return self._timestamp
def setupAnnotations(context, key, reset=False):
annotations = IAnnotations(context)
if reset or (not key in annotations):
annotations[key] = PersistentList()
return annotations
def add_noise(context, key, record):
annotations = setupAnnotations(context, key)
annotations[key].append(
NoiseRecord(datetime.now().strftime("%d-%m-%Y %H:%M"), record)
)
def get_noise(context, key):
annotations = setupAnnotations(context, key)
data = []
if key in annotations:
data = annotations[key]
data = [d for d in data if isinstance(d, NoiseRecord)]
return data
def status(context, key):
annotations = IAnnotations(context)
return annotations.get(key, [])
| cleanclothes/vmd.noise | noise/addon/storage.py | storage.py | py | 1,916 | python | en | code | 0 | github-code | 36 |
17230876576 | # -*- coding: utf-8 -*-
import numpy as np
def get_linear_value(current_index, start_value, total_steps, end_value=0, **kwargs):
values = np.linspace(start_value, end_value, total_steps, dtype=np.float32)
values = values / start_value * start_value
return values[current_index]
def get_cosine_value(current_index, start_value, total_steps, end_value=0, **kwargs):
values = np.linspace(end_value, total_steps, total_steps, dtype=np.float32)
values = np.cos(values * np.pi / total_steps)
values = (values + 1) * start_value / 2
return values[current_index]
def get_ema_value(current_index, start_value, eta, **kwargs):
value = start_value * eta ** current_index
return value
INTERPOLATION_SCHEDULERS = {
'ema': get_ema_value,
'cos': get_cosine_value,
'linear': get_linear_value,
}
| TheDenk/Attention-Interpolation | iattention/interpolation_schedulers.py | interpolation_schedulers.py | py | 835 | python | en | code | 5 | github-code | 36 |
1947036421 | from collections import defaultdict
def solution(genres, plays):
answer = []
stream = defaultdict(list)
# 같은 장르내에서는 plays수가 같을 수 있지만
# 장르 합은 다른 장르의 합과 다르다
for g,p in zip(genres, plays):
stream[g].append(p)
answer = []
stream = sorted(stream.items(), key = lambda x:-sum(x[1])) # list
# 인덱스는 앞에것부터 찾음
for i,j in stream:
j.sort(reverse=True)
for i in range(len(stream)): #장르가 2개 이상 가능
if len(stream[i][1]) == 1: answer.append(plays.index(stream[i][1][0]))
else: # 길이가 2이상 (2개 다 넣일 수 있음)
answer.append(plays.index(stream[i][1][0]))
plays[plays.index(stream[i][1][0])] = -1
answer.append(plays.index(stream[i][1][1]))
return answer
| hellokena/2022 | 프로그래머스/LV2/LV3_베스트앨범(해시).py | LV3_베스트앨범(해시).py | py | 862 | python | ko | code | 0 | github-code | 36 |
5812402236 | import unittest
import warnings
from datetime import date, datetime
from decimal import Decimal
import pytz
from babel import Locale
from fluent.runtime.types import FluentDateType, FluentNumber, fluent_date, fluent_number
class TestFluentNumber(unittest.TestCase):
locale = Locale.parse('en_US')
def setUp(self):
self.cur_pos = fluent_number(123456.78123,
currency='USD',
style='currency')
self.cur_neg = fluent_number(-123456.78123,
currency='USD',
style='currency')
def test_int(self):
i = fluent_number(1)
self.assertTrue(isinstance(i, int))
self.assertTrue(isinstance(i, FluentNumber))
self.assertEqual(i + 1, 2)
def test_float(self):
f = fluent_number(1.1)
self.assertTrue(isinstance(f, float))
self.assertTrue(isinstance(f, FluentNumber))
self.assertEqual(f + 1, 2.1)
def test_decimal(self):
d = Decimal('1.1')
self.assertTrue(isinstance(fluent_number(d), Decimal))
self.assertTrue(isinstance(fluent_number(d), FluentNumber))
self.assertEqual(d + 1, Decimal('2.1'))
def test_disallow_nonexistant_options(self):
self.assertRaises(
TypeError,
fluent_number,
1,
not_a_real_option=True,
)
def test_style_validation(self):
self.assertRaises(ValueError,
fluent_number,
1,
style='xyz')
def test_use_grouping(self):
f1 = fluent_number(123456.78, useGrouping=True)
f2 = fluent_number(123456.78, useGrouping=False)
self.assertEqual(f1.format(self.locale), "123,456.78")
self.assertEqual(f2.format(self.locale), "123456.78")
# ensure we didn't mutate anything when we created the new
# NumberPattern:
self.assertEqual(f1.format(self.locale), "123,456.78")
def test_use_grouping_decimal(self):
d = Decimal('123456.78')
f1 = fluent_number(d, useGrouping=True)
f2 = fluent_number(d, useGrouping=False)
self.assertEqual(f1.format(self.locale), "123,456.78")
self.assertEqual(f2.format(self.locale), "123456.78")
def test_minimum_integer_digits(self):
f = fluent_number(1.23, minimumIntegerDigits=3)
self.assertEqual(f.format(self.locale), "001.23")
def test_minimum_integer_digits_decimal(self):
f = fluent_number(Decimal('1.23'), minimumIntegerDigits=3)
self.assertEqual(f.format(self.locale), "001.23")
def test_minimum_fraction_digits(self):
f = fluent_number(1.2, minimumFractionDigits=3)
self.assertEqual(f.format(self.locale), "1.200")
def test_maximum_fraction_digits(self):
f1 = fluent_number(1.23456)
self.assertEqual(f1.format(self.locale), "1.235")
f2 = fluent_number(1.23456, maximumFractionDigits=5)
self.assertEqual(f2.format(self.locale), "1.23456")
def test_minimum_significant_digits(self):
f1 = fluent_number(123, minimumSignificantDigits=5)
self.assertEqual(f1.format(self.locale), "123.00")
f2 = fluent_number(12.3, minimumSignificantDigits=5)
self.assertEqual(f2.format(self.locale), "12.300")
def test_maximum_significant_digits(self):
f1 = fluent_number(123456, maximumSignificantDigits=3)
self.assertEqual(f1.format(self.locale), "123,000")
f2 = fluent_number(12.3456, maximumSignificantDigits=3)
self.assertEqual(f2.format(self.locale), "12.3")
f3 = fluent_number(12, maximumSignificantDigits=5)
self.assertEqual(f3.format(self.locale), "12")
def test_currency(self):
# This test the default currencyDisplay value
self.assertEqual(self.cur_pos.format(self.locale), "$123,456.78")
def test_currency_display_validation(self):
self.assertRaises(ValueError,
fluent_number,
1234,
currencyDisplay="junk")
def test_currency_display_symbol(self):
cur_pos_sym = fluent_number(self.cur_pos, currencyDisplay="symbol")
cur_neg_sym = fluent_number(self.cur_neg, currencyDisplay="symbol")
self.assertEqual(cur_pos_sym.format(self.locale), "$123,456.78")
self.assertEqual(cur_neg_sym.format(self.locale), "-$123,456.78")
def test_currency_display_code(self):
# Outputs here were determined by comparing with Javascrpt
# Intl.NumberFormat in Firefox.
cur_pos_code = fluent_number(self.cur_pos, currencyDisplay="code")
cur_neg_code = fluent_number(self.cur_neg, currencyDisplay="code")
self.assertEqual(cur_pos_code.format(self.locale), "USD123,456.78")
self.assertEqual(cur_neg_code.format(self.locale), "-USD123,456.78")
@unittest.skip("Babel doesn't provide support for this yet")
def test_currency_display_name(self):
cur_pos_name = fluent_number(self.cur_pos, currencyDisplay="name")
cur_neg_name = fluent_number(self.cur_neg, currencyDisplay="name")
self.assertEqual(cur_pos_name.format(self.locale), "123,456.78 US dollars")
self.assertEqual(cur_neg_name.format(self.locale), "-123,456.78 US dollars")
# Some others locales:
hr_BA = Locale.parse('hr_BA')
self.assertEqual(cur_pos_name.format(hr_BA),
"123.456,78 američkih dolara")
es_GT = Locale.parse('es_GT')
self.assertEqual(cur_pos_name.format(es_GT),
"dólares estadounidenses 123,456.78")
def test_copy_attributes(self):
f1 = fluent_number(123456.78, useGrouping=False)
self.assertEqual(f1.options.useGrouping, False)
# Check we didn't mutate anything
self.assertIs(FluentNumber.default_number_format_options.useGrouping, True)
f2 = fluent_number(f1, style="percent")
self.assertEqual(f2.options.style, "percent")
# Check we copied
self.assertEqual(f2.options.useGrouping, False)
# and didn't mutate anything
self.assertEqual(f1.options.style, "decimal")
self.assertEqual(FluentNumber.default_number_format_options.style, "decimal")
class TestFluentDate(unittest.TestCase):
locale = Locale.parse('en_US')
def setUp(self):
self.a_date = date(2018, 2, 1)
self.a_datetime = datetime(2018, 2, 1, 14, 15, 16, 123456,
tzinfo=pytz.UTC)
def test_date(self):
fd = fluent_date(self.a_date)
self.assertTrue(isinstance(fd, date))
self.assertTrue(isinstance(fd, FluentDateType))
self.assertEqual(fd.year, self.a_date.year)
self.assertEqual(fd.month, self.a_date.month)
self.assertEqual(fd.day, self.a_date.day)
def test_datetime(self):
fd = fluent_date(self.a_datetime)
self.assertTrue(isinstance(fd, datetime))
self.assertTrue(isinstance(fd, FluentDateType))
self.assertEqual(fd.year, self.a_datetime.year)
self.assertEqual(fd.month, self.a_datetime.month)
self.assertEqual(fd.day, self.a_datetime.day)
self.assertEqual(fd.hour, self.a_datetime.hour)
self.assertEqual(fd.minute, self.a_datetime.minute)
self.assertEqual(fd.second, self.a_datetime.second)
self.assertEqual(fd.microsecond, self.a_datetime.microsecond)
self.assertEqual(fd.tzinfo, self.a_datetime.tzinfo)
def test_format_defaults(self):
fd = fluent_date(self.a_date)
en_US = Locale.parse('en_US')
en_GB = Locale.parse('en_GB')
self.assertEqual(fd.format(en_GB), '1 Feb 2018')
self.assertEqual(fd.format(en_US), 'Feb 1, 2018')
def test_dateStyle_date(self):
fd = fluent_date(self.a_date, dateStyle='long')
en_US = Locale.parse('en_US')
en_GB = Locale.parse('en_GB')
self.assertEqual(fd.format(en_GB), '1 February 2018')
self.assertEqual(fd.format(en_US), 'February 1, 2018')
def test_dateStyle_datetime(self):
fd = fluent_date(self.a_datetime, dateStyle='long')
en_US = Locale.parse('en_US')
en_GB = Locale.parse('en_GB')
self.assertEqual(fd.format(en_GB), '1 February 2018')
self.assertEqual(fd.format(en_US), 'February 1, 2018')
def test_timeStyle_datetime(self):
fd = fluent_date(self.a_datetime, timeStyle='short')
en_US = Locale.parse('en_US')
en_GB = Locale.parse('en_GB')
self.assertRegex(fd.format(en_US), '^2:15\\sPM$')
self.assertEqual(fd.format(en_GB), '14:15')
def test_dateStyle_and_timeStyle_datetime(self):
fd = fluent_date(self.a_datetime, timeStyle='short', dateStyle='short')
en_US = Locale.parse('en_US')
en_GB = Locale.parse('en_GB')
self.assertRegex(fd.format(en_US), '^2/1/18, 2:15\\sPM$')
self.assertEqual(fd.format(en_GB), '01/02/2018, 14:15')
def test_validate_dateStyle(self):
self.assertRaises(ValueError,
fluent_date,
self.a_date,
dateStyle="nothing")
def test_validate_timeStyle(self):
self.assertRaises(ValueError,
fluent_date,
self.a_datetime,
timeStyle="nothing")
def test_timeZone(self):
en_GB = Locale.parse('en_GB')
LondonTZ = pytz.timezone('Europe/London')
# 1st July is a date in British Summer Time
# datetime object with tzinfo set to BST
dt1 = datetime(2018, 7, 1, 23, 30, 0, tzinfo=pytz.UTC).astimezone(LondonTZ)
fd1 = fluent_date(dt1, dateStyle='short', timeStyle='short')
self.assertEqual(fd1.format(en_GB), '02/07/2018, 00:30')
fd1b = fluent_date(dt1, dateStyle='full', timeStyle='full')
self.assertRegex(fd1b.format(en_GB), '^Monday, 2 July 2018(,| at) 00:30:00 British Summer Time$')
fd1c = fluent_date(dt1, dateStyle='short')
self.assertEqual(fd1c.format(en_GB), '02/07/2018')
fd1d = fluent_date(dt1, timeStyle='short')
self.assertEqual(fd1d.format(en_GB), '00:30')
# datetime object with no TZ, TZ passed in to fluent_date
dt2 = datetime(2018, 7, 1, 23, 30, 0) # Assumed UTC
fd2 = fluent_date(dt2, dateStyle='short', timeStyle='short',
timeZone='Europe/London')
self.assertEqual(fd2.format(en_GB), '02/07/2018, 00:30')
fd2b = fluent_date(dt2, dateStyle='full', timeStyle='full',
timeZone='Europe/London')
self.assertRegex(fd2b.format(en_GB), '^Monday, 2 July 2018(,| at) 00:30:00 British Summer Time$')
fd2c = fluent_date(dt2, dateStyle='short',
timeZone='Europe/London')
self.assertEqual(fd2c.format(en_GB), '02/07/2018')
fd2d = fluent_date(dt1, timeStyle='short',
timeZone='Europe/London')
self.assertEqual(fd2d.format(en_GB), '00:30')
def test_allow_unsupported_options(self):
# We are just checking that these don't raise exceptions
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fluent_date(self.a_date,
hour12=True,
weekday="narrow",
era="narrow",
year="numeric",
month="numeric",
day="numeric",
hour="numeric",
minute="numeric",
second="numeric",
timeZoneName="short",
)
def test_disallow_nonexistant_options(self):
self.assertRaises(
TypeError,
fluent_date,
self.a_date,
not_a_real_option=True,
)
def test_dont_wrap_unnecessarily(self):
f1 = fluent_date(self.a_date)
f2 = fluent_date(f1)
self.assertIs(f1, f2)
def test_copy_attributes(self):
f1 = fluent_date(self.a_date, dateStyle='long', hour12=False)
self.assertEqual(f1.options.dateStyle, 'long')
f2 = fluent_date(f1, hour12=False)
# Check we copied other attributes:
self.assertEqual(f2.options.dateStyle, "long")
self.assertEqual(f2.options.hour12, False)
# Check we can override
f3 = fluent_date(f2, dateStyle="full")
self.assertEqual(f3.options.dateStyle, "full")
# and didn't mutate anything
self.assertEqual(f1.options.dateStyle, "long")
self.assertEqual(f2.options.dateStyle, "long")
| projectfluent/python-fluent | fluent.runtime/tests/test_types.py | test_types.py | py | 12,837 | python | en | code | 185 | github-code | 36 |
27517754092 | import torch
import torch.nn as nn
import torchvision.datasets as dsets
import torchvision.transforms as transforms
from torch.autograd import Variable
import geojson
import json
import time
def chip_image1(img, chip_size=(300, 300)):
"""
Segment an image into NxWxH chips
Args:
img : Array of image to be chipped
chip_size : A list of (width,height) dimensions for chips
Outputs:
An ndarray of shape (N,W,H,3) where N is the number of chips,
W is the width per chip, and H is the height per chip.
"""
width, height, _ = img.shape
wn, hn = chip_size
images = np.zeros((int(width / wn) * int(height / hn), wn, hn, 3))
k = 0
for i in tqdm(range(int(width / wn))):
for j in range(int(height / hn)):
chip = img[wn * i:wn * (i + 1), hn * j:hn * (j + 1), :3]
images[k] = chip
k = k + 1
return images.astype(np.uint8)
with open(fname) as f:
data = json.load(f)
class CNN(nn.Module):
def __init__(self):
super(CNN, self).__init__()
self.layer1 = nn.Sequential(
nn.Conv2d(in_channels, conv_size, kernel_size=kernel_size, padding=2),
nn.BatchNorm2d(conv_size),
nn.ReLU(),
nn.MaxPool2d(2))
self.layer2 = nn.Sequential(
nn.Conv2d(conv_size, conv_size*2, kernel_size=kernel_size, padding=2),
nn.BatchNorm2d(conv_size*2),
nn.ReLU(),
nn.MaxPool2d(2))
self.fc = nn.Linear(conv_size * in_channels * (conv_size*2), num_classes)
def forward(self, x):
out = self.layer1(x)
out = self.layer2(out)
out = out.view(out.size(0), -1)
out = self.fc(out)
return out
# -----------------------------------------------------------------------------------
cnn = CNN()
cnn.cuda()
random.seed(0)
np.random.seed(0)
torch.manual_seed(0)
#if cuda:
torch.cuda.manual_seed(0)
torch.cuda.manual_seed_all(0)
torch.backends.cudnn.benchmark = True
# -----------------------------------------------------------------------------------
# Loss and Optimizer
criterion = nn.CrossEntropyLoss()
optimizer = torch.optim.Adam(cnn.parameters(), lr=learning_rate)
# -----------------------------------------------------------------------------------
# Train the Model
for epoch in range(num_epochs):
for images, labels in train_loader:
np.shape(images)
np.shape(lables)
images = torchvision.transforms.functional.to_tensor(images)
np.shape(images)
images = Variable(images).cuda()
labels = Variable(labels).cuda()
# Forward + Backward + Optimize
optimizer.zero_grad()
outputs = cnn(images)
loss = criterion(outputs, labels)
loss.backward()
optimizer.step()
if (i + 1) % 100 == 0:
print('Epoch [%d/%d], Iter [%d/%d] Loss: %.4f'
% (epoch + 1, num_epochs, i + 1, len(train_dataset) // batch_size, loss.item()))
# -----------------------------------------------------------------------------------
# Test the Model
cnn.eval() # Change model to 'eval' mode (BN uses moving mean/var).
correct = 0
total = 0
for images, labels in test_loader:
images = Variable(images).cuda()
outputs = cnn(images)
_, predicted = torch.max(outputs.data, 1)
total += labels.size(0)
correct += (predicted.cpu() == labels).sum()
# -----------------------------------------------------------------------------------
print('Test Accuracy of the model on the 10000 test images: %d %%' % (100 * correct / total))
# -----------------------------------------------------------------------------------
# Save the Trained Model
torch.save(cnn.state_dict(), 'cnn.pkl') | catsbergers/Final-Project-Group-2 | jiarong-che-final-project/Code/mywork.py | mywork.py | py | 3,764 | python | en | code | 0 | github-code | 36 |
71249021545 | import json
from math import sqrt
# Returns a distance-based similarity score for person1 and person2
def sim_distance(prefs, person1, person2):
# Get the list of shared_items
si = {}
for item in prefs[person1]:
if item in prefs[person2]: si[item] = 1
# if they have no ratings in common, return 0
if len(si) == 0: return 0
# Add up the squares of all the differences
sum_of_squares = sum([pow(prefs[person1][item] - prefs[person2][item], 2)
for item in prefs[person1] if item in prefs[person2]])
return 1 / (1 + sum_of_squares)
# Returns the Pearson correlation coefficient for p1 and p2
def sim_pearson(prefs, p1, p2):
# Get the list of mutually rated items
si = {}
for item in prefs[p1]:
if item in prefs[p2]:
si[item] = 1
# if they are no ratings in common, return 0
if len(si) == 0:
return 0
# Sum calculations
n = len(si)
# Sums of all the preferences
sum1 = sum([prefs[p1][it] for it in si])
sum2 = sum([prefs[p2][it] for it in si])
# Sums of the squares
sum1Sq = sum([pow(prefs[p1][it], 2) for it in si])
sum2Sq = sum([pow(prefs[p2][it], 2) for it in si])
# Sum of the products
pSum = sum([prefs[p1][it] * prefs[p2][it] for it in si])
# Calculate r (Pearson score)
num = pSum - (sum1 * sum2 / n)
den = sqrt((sum1Sq - pow(sum1, 2) / n) * (sum2Sq - pow(sum2, 2) / n))
if den == 0:
return 0
r = num / den
return r
# Returns the best matches for person from the prefs dictionary.
# Number of results and similarity function are optional params.
def top_matches(prefs, person, n=5, similarity=sim_pearson):
scores = [(similarity(prefs, person, other), other)
for other in prefs if other != person]
scores.sort()
scores.reverse()
return scores[0:n]
def calculate_similar_items(prefs, n=10):
# Create a dictionary of items showing which other items they
# are most similar to.
result = {}
# Invert the preference matrix to be item-centric
c = 0
for item in prefs:
# Status updates for large datasets
c += 1
if c % 100 == 0: print("%d / %d" % (c, len(prefs)))
# Find the most similar items to this one
scores = top_matches(prefs, item, n=n, similarity=sim_distance)
result[item] = scores
return result
def get_recommended_items(prefs, item_match, user):
userRatings = prefs[user]
scores = {}
totalSim = {}
# Loop over items rated by this user
for (item, rating) in userRatings.items():
try:
# Loop over items similar to this one
for (similarity, item2) in item_match[item]:
# Ignore if this user has already rated this item
if item2 in userRatings: continue
# Weighted sum of rating times similarity
scores.setdefault(item2, 0)
scores[item2] += similarity * rating
# Sum of all the similarities
totalSim.setdefault(item2, 0)
totalSim[item2] += similarity
except KeyError:
print("Missing Key %s" % (item))
# Divide each total score by total weighting to get an average
# TODO avoid double lookups
rankings = [(score / totalSim[item], item) for item, score in scores.items() if totalSim[item] != 0]
# Return the rankings from highest to lowest
rankings.sort()
rankings.reverse()
return rankings
user_dict = {}
business_dict = {}
with open('/home/vicky/Documents/it/notes/AI/UW/Project/data/review.json') as f:
for line in f:
line = json.loads(line)
user = str(line['user_id'])
business = str(line['business_id'])
rate = line['stars']
if business not in business_dict:
business_dict[business] = {}
business_dict[business][user] = rate
if user not in user_dict:
user_dict[user] = {}
user_dict[user][business] = rate
# for key, value in user_dict.items():
# print("Key : %s, Value: %s"% (key,value))
# for key, values in items_similar.items():
# for i in range(len(values)):
# if values[i][0] > 0.5:
# print("Key : %s, Value : %s"% (values[i][0], values[i][1]))
# for j in range(len(values[i])):
# print(values[i][j])
# bus_6nnI3DfHn-DTd6tWnZu7Jg
users_similar = calculate_similar_items(user_dict)
print(get_recommended_items(business_dict, users_similar, 'bus_F1tOtPzcsQk8PqNOatVsCg'))
# usr_zsZBYWYEmLLs81_f-HHM8w
# buss_similar = calculate_similar_items(business_dict)
# print(get_recommended_items(user_dict, buss_similar, 'usr_zsZBYWYEmLLs81_f-HHM8w'))
| brokencranium/recommender | ItemBasedFiltering.py | ItemBasedFiltering.py | py | 4,740 | python | en | code | 0 | github-code | 36 |
28518113817 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2005-2009 University of Washington
# See opus_core/LICENSE
from opus_core.variables.variable import Variable
from opus_core.misc import unique
from numpy import zeros, logical_not
class total_spaces(Variable):
"""return proposed spaces (units) according to unit name of the building type of the component
"""
_return_type="int32"
def dependencies(self):
return [
"urbansim_parcel.development_project_proposal_component.is_residential",
"psrc_parcel.development_project_proposal_component.job_capacity_computed",
"urbansim_parcel.development_project_proposal_component.residential_units"
]
def compute(self, dataset_pool):
dppc = self.get_dataset()
results = zeros(dppc.size(), dtype=self._return_type)
is_residential = dppc["is_residential"].astype('bool')
is_non_residential = logical_not(dppc["is_residential"])
results[is_residential] = (dppc["residential_units"][is_residential]).astype(self._return_type)
results[is_non_residential] = (dppc["job_capacity_computed"][is_non_residential]).astype(self._return_type)
return results
def post_check(self, values, dataset_pool=None):
# size = dataset_pool.get_dataset("building").size()
self.do_check("x >= 0")
from opus_core.tests import opus_unittest
from opus_core.datasets.dataset_pool import DatasetPool
from opus_core.storage_factory import StorageFactory
from numpy import array
from opus_core.tests.utils.variable_tester import VariableTester
class Tests(opus_unittest.OpusTestCase):
def test_my_inputs(self):
tester = VariableTester(
__file__,
package_order=['urbansim_parcel','urbansim'],
test_data={
"development_project_proposal_component":{"proposal_component_id":array([1,2,3,4,5,6,7,8,9,10]),
"building_type_id": array([1,2,2,2,1,2,1,3,2,2]),
"job_capacity_computed": array([1,0,0,0,1,3,3,1,2,2])*1000,
"residential_units": array([0,3,1,2,0,1,0,1,2,4]),
# "unit_name": array(["building_sqft","residential_units","residential_units","residential_units",
# 'building_sqft',"residential_units","building_sqft", "parcel_sqft",
# "residential_units","residential_units",]),
},
"building_type":{
"building_type_id": array([1, 2, 3]),
"is_residential": array([0, 1, 0]),
}
}
)
should_be = array([1000, 3, 1, 2, 1000, 1, 3000, 1000, 2, 4])
tester.test_is_close_for_variable_defined_by_this_module(self, should_be)
if __name__=='__main__':
opus_unittest.main()
| psrc/urbansim | psrc_parcel/development_project_proposal_component/total_spaces.py | total_spaces.py | py | 3,179 | python | en | code | 4 | github-code | 36 |
74833825064 | import unittest
import vics
import os
global test_db
test_db = "testing_db.sqlite"
class TestVicsServer(unittest.TestCase):
def test_create_new_database(self):
vics.create_new_database(test_db)
self.assertTrue(os.path.exists(test_db) == 1)
os.remove(test_db)
| fine-fiddle/vics | test/test_vics_server.py | test_vics_server.py | py | 296 | python | en | code | 0 | github-code | 36 |
33078013582 | # pylint: disable=W0102
# pylint: disable=W0212
# pylint: disable=W0221
# pylint: disable=W0231
# pylint: disable=W0640
# pylint: disable=C0103
"""Module for representing UDS corpora."""
import os
import json
import requests
from pkg_resources import resource_filename
from os.path import basename, splitext
from logging import warn
from glob import glob
from random import sample
from functools import lru_cache
from typing import Union, Optional, Any, TextIO
from typing import Dict, List, Set
from io import BytesIO
from zipfile import ZipFile
from rdflib.query import Result
from rdflib.plugins.sparql.sparql import Query
from ..predpatt import PredPattCorpus
from .document import UDSDocument
from .annotation import UDSAnnotation
from .annotation import RawUDSAnnotation
from .annotation import NormalizedUDSAnnotation
from .graph import UDSSentenceGraph
from .metadata import UDSCorpusMetadata
from .metadata import UDSAnnotationMetadata
from .metadata import UDSPropertyMetadata
Location = Union[str, TextIO]
class UDSCorpus(PredPattCorpus):
"""A collection of Universal Decompositional Semantics graphs
Parameters
----------
sentences
the predpatt sentence graphs to associate the annotations with
documents
the documents associated with the predpatt sentence graphs
sentence_annotations
additional annotations to associate with predpatt nodes on
sentence-level graphs; in most cases, no such annotations
will be passed, since the standard UDS annotations are
automatically loaded
document_annotations
additional annotations to associate with predpatt nodes on
document-level graphs
version
the version of UDS datasets to use
split
the split to load: "train", "dev", or "test"
annotation_format
which annotation type to load ("raw" or "normalized")
"""
UD_URL = 'https://github.com/UniversalDependencies/' +\
'UD_English-EWT/archive/r1.2.zip'
ANN_DIR = resource_filename('decomp', 'data/')
CACHE_DIR = resource_filename('decomp', 'data/')
def __init__(self,
sentences: Optional[PredPattCorpus] = None,
documents: Optional[Dict[str, UDSDocument]] = None,
sentence_annotations: List[UDSAnnotation] = [],
document_annotations: List[UDSAnnotation] = [],
version: str = '2.0',
split: Optional[str] = None,
annotation_format: str = 'normalized'):
self._validate_arguments(sentences, documents,
version, split, annotation_format)
self.version = version
self.annotation_format = annotation_format
self._metadata = UDSCorpusMetadata()
# methods inherited from Corpus that reference the self._graphs
# attribute will operate on sentence-level graphs only
self._graphs = self._sentences = {}
self._documents = {}
self._initialize_paths(version, annotation_format)
all_built = self._check_build_status()
if sentences is None and split in self._sentences_paths:
self._load_split(split)
elif sentences is None and split is None and all_built:
for split in ['train', 'dev', 'test']:
self._load_split(split)
elif sentences is None:
# download UD-EWT
udewt = requests.get(self.UD_URL).content
if sentence_annotations or document_annotations:
warn("sentence and document annotations ignored")
self._process_conll(split, udewt)
else:
self._sentences = sentences
self._documents = documents
self.add_annotation(sentence_annotations, document_annotations)
def _validate_arguments(self, sentences, documents,
version, split, annotation_format):
# neither documents nor graphs should be supplied to the constructor
# without the other
if sentences is None and documents is not None:
raise ValueError('UDS documents were provided without sentences. '
'Cannot construct corpus.')
elif sentences is not None and documents is None:
raise ValueError('UDS sentences were provided without documents. '
'Cannot construct corpus.')
if not (split is None or split in ['train', 'dev', 'test']):
errmsg = 'split must be "train", "dev", or "test"'
raise ValueError(errmsg)
if annotation_format not in ['raw', 'normalized']:
errmsg = f'Unrecognized annotation format {annotation_format}.'\
f'Must be either "raw" or "normalized".'
raise ValueError(errmsg)
def _initialize_paths(self, version, annotation_format) -> bool:
self._sentences_paths = {splitext(basename(p))[0].split('-')[-2]: p
for p
in glob(os.path.join(self.CACHE_DIR,
version,
annotation_format,
'sentence',
'*.json'))}
self._documents_paths = {splitext(basename(p))[0].split('-')[-2]: p
for p
in glob(os.path.join(self.CACHE_DIR,
version,
annotation_format,
'document',
'*.json'))}
self._sentences_annotation_dir = os.path.join(self.ANN_DIR,
version,
annotation_format,
'sentence',
'annotations')
self._documents_annotation_dir = os.path.join(self.ANN_DIR,
version,
annotation_format,
'document',
'annotations')
sent_ann_paths = glob(os.path.join(self._sentences_annotation_dir,
'*.json'))
doc_ann_paths = glob(os.path.join(self._documents_annotation_dir,
'*.json'))
# out of the box, the annotations are stored as zip files and the
# JSON they contain must be extracted
if not sent_ann_paths:
zipped_sent_paths = os.path.join(self._sentences_annotation_dir,
'*.zip')
zipped_sentence_annotations = glob(zipped_sent_paths)
for zipped in zipped_sentence_annotations:
ZipFile(zipped).extractall(path=self._sentences_annotation_dir)
sent_ann_paths = glob(os.path.join(self._sentences_annotation_dir,
'*.json'))
if not doc_ann_paths:
zipped_doc_paths = os.path.join(self._documents_annotation_dir,
'*.zip')
zipped_document_annotations = glob(zipped_doc_paths)
for zipped in zipped_document_annotations:
ZipFile(zipped).extractall(path=self._documents_annotation_dir)
doc_ann_paths = glob(os.path.join(self._documents_annotation_dir,
'*.json'))
self._sentence_annotation_paths = sent_ann_paths
self._document_annotation_paths = doc_ann_paths
def _check_build_status(self):
sentences_built = self._sentences_paths and \
all(s in self._sentences_paths
for s in ['train', 'dev', 'test'])
documents_built = self._documents_paths and \
all(s in self._documents_paths
for s in ['train', 'dev', 'test'])
return sentences_built and documents_built
def _load_split(self, split):
sentence_fpath = self._sentences_paths[split]
doc_fpath = self._documents_paths[split]
split = self.__class__.from_json(sentence_fpath, doc_fpath)
self._metadata += split.metadata
self._sentences.update(split._sentences)
self._documents.update(split._documents)
def _process_conll(self, split, udewt):
with ZipFile(BytesIO(udewt)) as zf:
conll_names = [fname for fname in zf.namelist()
if splitext(fname)[-1] == '.conllu']
for fn in conll_names:
with zf.open(fn) as conll:
conll_str = conll.read().decode('utf-8')
sname = splitext(basename(fn))[0].split('-')[-1]
spl = self.__class__.from_conll(conll_str,
self._sentence_annotation_paths,
self._document_annotation_paths,
annotation_format=self.annotation_format,
version=self.version,
name='ewt-'+sname)
if sname == split or split is None:
# add metadata
self._metadata += spl.metadata
# prepare sentences
sentences_json_name = '-'.join(['uds', 'ewt', 'sentences',
sname, self.annotation_format]) +\
'.json'
sentences_json_path = os.path.join(self.__class__.CACHE_DIR,
self.version,
self.annotation_format,
'sentence',
sentences_json_name)
self._sentences.update(spl._sentences)
self._sentences_paths[sname] = sentences_json_path
# prepare documents
documents_json_name = '-'.join(['uds', 'ewt', 'documents',
sname, self.annotation_format]) +\
'.json'
documents_json_path = os.path.join(self.__class__.CACHE_DIR,
self.version,
self.annotation_format,
'document',
documents_json_name)
self._documents.update(spl._documents)
self._documents_paths[sname] = documents_json_path
# serialize both
spl.to_json(sentences_json_path, documents_json_path)
@classmethod
def from_conll(cls,
corpus: Location,
sentence_annotations: List[Location] = [],
document_annotations: List[Location] = [],
annotation_format: str = 'normalized',
version: str = '2.0',
name: str = 'ewt') -> 'UDSCorpus':
"""Load UDS graph corpus from CoNLL (dependencies) and JSON (annotations)
This method should only be used if the UDS corpus is being
(re)built. Otherwise, loading the corpus from the JSON shipped
with this package using UDSCorpus.__init__ or
UDSCorpus.from_json is suggested.
Parameters
----------
corpus
(path to) Universal Dependencies corpus in conllu format
sentence_annotations
a list of paths to JSON files or open JSON files containing
sentence-level annotations
document_annotations
a list of paths to JSON files or open JSON files containing
document-level annotations
annotation_format
Whether the annotation is raw or normalized
version
the version of UDS datasets to use
name
corpus name to be appended to the beginning of graph ids
"""
if annotation_format == 'raw':
loader = RawUDSAnnotation.from_json
elif annotation_format == 'normalized':
loader = NormalizedUDSAnnotation.from_json
else:
raise ValueError('annotation_format must be either'
'"raw" or "normalized"')
predpatt_corpus = PredPattCorpus.from_conll(corpus, name=name)
predpatt_sentence_graphs = {name: UDSSentenceGraph(g, name)
for name, g in predpatt_corpus.items()}
predpatt_documents = cls._initialize_documents(predpatt_sentence_graphs)
# process sentence-level graph annotations
processed_sentence_annotations = []
for ann_path in sentence_annotations:
ann = loader(ann_path)
processed_sentence_annotations.append(ann)
# process document-level graph annotations
processed_document_annotations = []
for ann_path in document_annotations:
ann = loader(ann_path)
processed_document_annotations.append(ann)
return cls(predpatt_sentence_graphs, predpatt_documents,
processed_sentence_annotations,
processed_document_annotations,
version=version,
annotation_format=annotation_format)
@classmethod
def _load_ud_ids(cls, sentence_ids_only: bool = False) -> Dict[str, Dict[str, str]]:
# load in the document and sentence IDs for each sentence-level graph
ud_ids_path = os.path.join(cls.ANN_DIR, 'ud_ids.json')
with open(ud_ids_path) as ud_ids_file:
ud_ids = json.load(ud_ids_file)
if sentence_ids_only:
return {k: v['sentence_id'] for k, v in ud_ids.items()}
else:
return ud_ids
@classmethod
def from_json(cls, sentences_jsonfile: Location,
documents_jsonfile: Location) -> 'UDSCorpus':
"""Load annotated UDS graph corpus (including annotations) from JSON
This is the suggested method for loading the UDS corpus.
Parameters
----------
sentences_jsonfile
file containing Universal Decompositional Semantics corpus
sentence-level graphs in JSON format
documents_jsonfile
file containing Universal Decompositional Semantics corpus
document-level graphs in JSON format
"""
sentences_ext = splitext(basename(sentences_jsonfile))[-1]
documents_ext = splitext(basename(documents_jsonfile))[-1]
sent_ids = cls._load_ud_ids(sentence_ids_only=True)
# process sentence-level graphs
if isinstance(sentences_jsonfile, str) and sentences_ext == '.json':
with open(sentences_jsonfile) as infile:
sentences_json = json.load(infile)
elif isinstance(sentences_jsonfile, str):
sentences_json = json.loads(sentences_jsonfile)
else:
sentences_json = json.load(sentences_jsonfile)
sentences = {name: UDSSentenceGraph.from_dict(g_json, name)
for name, g_json in sentences_json['data'].items()}
# process document-level graphs
if isinstance(documents_jsonfile, str) and documents_ext == '.json':
with open(documents_jsonfile) as infile:
documents_json = json.load(infile)
elif isinstance(documents_jsonfile, str):
documents_json = json.loads(documents_jsonfile)
else:
documents_json = json.load(documents_jsonfile)
documents = {name: UDSDocument.from_dict(d_json, sentences,
sent_ids, name)
for name, d_json in documents_json['data'].items()}
corpus = cls(sentences, documents)
metadata_dict = {'sentence_metadata': sentences_json['metadata'],
'document_metadata': documents_json['metadata']}
metadata = UDSCorpusMetadata.from_dict(metadata_dict)
corpus.add_corpus_metadata(metadata)
return corpus
def add_corpus_metadata(self, metadata: UDSCorpusMetadata) -> None:
self._metadata += metadata
def add_annotation(self, sentence_annotation: UDSAnnotation,
document_annotation: UDSAnnotation) -> None:
"""Add annotations to UDS sentence and document graphs
Parameters
----------
sentence_annotation
the annotations to add to the sentence graphs in the corpus
document_annotation
the annotations to add to the document graphs in the corpus
"""
for ann in sentence_annotation:
self.add_sentence_annotation(ann)
for ann in document_annotation:
self.add_document_annotation(ann)
def add_sentence_annotation(self, annotation: UDSAnnotation) -> None:
"""Add annotations to UDS sentence graphs
Parameters
----------
annotation
the annotations to add to the graphs in the corpus
"""
self._metadata.add_sentence_metadata(annotation.metadata)
for gname, (node_attrs, edge_attrs) in annotation.items():
if gname in self._sentences:
self._sentences[gname].add_annotation(node_attrs,
edge_attrs)
def add_document_annotation(self, annotation: UDSAnnotation) -> None:
"""Add annotations to UDS documents
Parameters
----------
annotation
the annotations to add to the documents in the corpus
"""
self._metadata.add_document_metadata(annotation.metadata)
for dname, (node_attrs, edge_attrs) in annotation.items():
if dname in self._documents:
self._documents[dname].add_annotation(node_attrs,
edge_attrs)
@classmethod
def _initialize_documents(cls, graphs: Dict[str, 'UDSSentenceGraph']) -> Dict[str, UDSDocument]:
# Load the UD document and sentence IDs
ud_ids = cls._load_ud_ids()
# Add each graph to the appropriate document
documents = {}
for name, graph in graphs.items():
doc_id = ud_ids[name]['document_id']
sent_id = ud_ids[name]['sentence_id']
graph.document_id = doc_id
graph.sentence_id = sent_id
# Add the graph to an existing document
if doc_id in documents:
documents[doc_id].add_sentence_graphs({name: graph}, {name: sent_id})
# Create a new document
else:
genre = doc_id.split('-')[0]
timestamp = UDSDocument._get_timestamp_from_document_name(doc_id)
documents[doc_id] =\
UDSDocument({name: graph}, {name: sent_id}, doc_id, genre, timestamp)
return documents
def to_json(self,
sentences_outfile: Optional[Location] = None,
documents_outfile: Optional[Location] = None) -> Optional[str]:
"""Serialize corpus to json
Parameters
----------
sentences_outfile
file to serialize sentence-level graphs to
documents_outfile
file to serialize document-level graphs to
"""
metadata_serializable = self._metadata.to_dict()
# convert graphs to dictionaries
sentences_serializable = {'metadata': metadata_serializable['sentence_metadata'],
'data': {name: graph.to_dict()
for name, graph
in self._sentences.items()}}
if sentences_outfile is None:
return json.dumps(sentences_serializable)
elif isinstance(sentences_outfile, str):
with open(sentences_outfile, 'w') as out:
json.dump(sentences_serializable, out)
else:
json.dump(sentences_serializable, sentences_outfile)
# Serialize documents (Note: we serialize only the *graphs*
# for each document — not the metadata, which is loaded by
# other means when calling UDSDocument.from_dict)
documents_serializable = {'metadata': metadata_serializable['document_metadata'],
'data': {name: doc.document_graph.to_dict()
for name, doc
in self._documents.items()}}
if documents_outfile is None:
return json.dumps(documents_serializable)
elif isinstance(documents_outfile, str):
with open(documents_outfile, 'w') as out:
json.dump(documents_serializable, out)
else:
json.dump(documents_serializable, documents_outfile)
@lru_cache(maxsize=128)
def query(self, query: Union[str, Query],
query_type: Optional[str] = None,
cache_query: bool = True,
cache_rdf: bool = True) -> Union[Result,
Dict[str,
Dict[str, Any]]]:
"""Query all graphs in the corpus using SPARQL 1.1
Parameters
----------
query
a SPARQL 1.1 query
query_type
whether this is a 'node' query or 'edge' query. If set to
None (default), a Results object will be returned. The
main reason to use this option is to automatically format
the output of a custom query, since Results objects
require additional postprocessing.
cache_query
whether to cache the query. This should usually be set to
True. It should generally only be False when querying
particular nodes or edges--e.g. as in precompiled queries.
clear_rdf
whether to delete the RDF constructed for querying
against. This will slow down future queries but saves a
lot of memory
"""
return {gid: graph.query(query, query_type,
cache_query, cache_rdf)
for gid, graph in self.items()}
@property
def documents(self) -> Dict[str, UDSDocument]:
"""The documents in the corpus"""
return self._documents
@property
def documentids(self):
"""The document ID for each document in the corpus"""
return list(self._documents)
@property
def ndocuments(self):
"""The number of IDs in the corpus"""
return len(self._documents)
def sample_documents(self, k: int) -> Dict[str, UDSDocument]:
"""Sample k documents without replacement
Parameters
----------
k
the number of documents to sample
"""
return {doc_id: self._documents[doc_id]
for doc_id
in sample(self._documents.keys(), k=k)}
@property
def metadata(self):
return self._metadata
@property
def sentence_node_subspaces(self) -> Set[str]:
"""The UDS sentence node subspaces in the corpus"""
raise NotImplementedError
@property
def sentence_edge_subspaces(self) -> Set[str]:
"""The UDS sentence edge subspaces in the corpus"""
raise NotImplementedError
@property
def sentence_subspaces(self) -> Set[str]:
"""The UDS sentence subspaces in the corpus"""
return self.sentence_node_subspaces |\
self.sentence_edge_subspaces
@property
def document_node_subspaces(self) -> Set[str]:
"""The UDS document node subspaces in the corpus"""
raise NotImplementedError
@property
def document_edge_subspaces(self) -> Set[str]:
"""The UDS document edge subspaces in the corpus"""
return self._document_edge_subspaces
@property
def document_subspaces(self) -> Set[str]:
"""The UDS document subspaces in the corpus"""
return self.document_node_subspaces |\
self.document_edge_subspaces
def sentence_properties(self, subspace: Optional[str] = None) -> Set[str]:
"""The properties in a sentence subspace"""
raise NotImplementedError
def sentence_property_metadata(self, subspace: str,
prop: str) -> UDSPropertyMetadata:
"""The metadata for a property in a sentence subspace
Parameters
----------
subspace
The subspace the property is in
prop
The property in the subspace
"""
raise NotImplementedError
def document_properties(self, subspace: Optional[str] = None) -> Set[str]:
"""The properties in a document subspace"""
raise NotImplementedError
def document_property_metadata(self, subspace: str,
prop: str) -> UDSPropertyMetadata:
"""The metadata for a property in a document subspace
Parameters
----------
subspace
The subspace the property is in
prop
The property in the subspace
"""
raise NotImplementedError
| decompositional-semantics-initiative/decomp | decomp/semantics/uds/corpus.py | corpus.py | py | 26,248 | python | en | code | 56 | github-code | 36 |
7964665793 | # Программа принимает действительное положительное число x и целое отрицательное число y.
# Необходимо выполнить возведение числа x в степень y. Задание необходимо реализовать в виде функции my_func(x, y).
# При решении задания необходимо обойтись без встроенной функции возведения числа в степень.
def my_func(x, y):
"""Gets positive x and negative integer y, returns x times """
product = 1
for i in range(1, -y + 1):
product *= 1/x
i += 1
return product
while True:
x_inp = input('Enter real positive x:')
try:
x_inp = float(x_inp)
except ValueError:
print('x is not a real number!')
else:
if x_inp < 0:
print('x is not positive!')
else:
break
while True:
y_inp = input('Enter negative integer y:')
try:
y_inp = int(y_inp)
except ValueError:
print('y is not an integer number!')
else:
if y_inp >= 0:
print('y is not negative!')
else:
break
print(f'{x_inp} times {y_inp} = {my_func(x_inp, y_inp)}')
| sekundra/Python_basic | 3дз/3_4.py | 3_4.py | py | 1,316 | python | ru | code | 0 | github-code | 36 |
534600653 | #Desafio python - Desenvolver um protótipo para sistema bancário, inicialmente com as opções: Depósito, saque e extrato
saldo_conta = 0
limite = 500
extrato = ""
saques_realizados = 0
limite_saques = 3
print("Bem vindo ao Banco *Selecione uma opção no menu:*")
menu="""
[1] - Depositar
[2] - Sacar
[3] - Extrato
[0] - Sair
"""
while True:
opcao = input(menu)
if opcao == "1":
print("### Depósito ###")
valor = float(input("Digite o valor a ser depositado? "))
if valor > 0:
saldo_conta += valor
extrato == (f"Deposito de: {valor:.2f}")
else:
print("Valor inválido, favor inserir outro!")
elif opcao == "2":
print("### Saque ###")
print(f"SALDO ATUAL R${saldo_conta:.2f} ")
valor = float(input("Qual o valor que seseja Sacar? "))
if valor > saldo_conta:
print("Saldo em conta Insuficiente!")
elif valor > limite:
print(f"Excedeu limite de saque, seu limite atual é de {limite:.2f}")
elif saques_realizados>=limite_saques:
print(f"Limite máximo de {limite_saques} saques diários foi excedido, retorne amanhã! ")
elif valor > 0:
saldo_conta -= valor
limite_saques -=1
extrato == print(f"Saque de R$ {valor:.2f} realizado com sucesso!! Hoje você ainda pode realizar {limite_saques} saques")
else:
print("Operação inválida, favor tente novamente!!")
elif opcao == "3":
print("\n ############ Extrato ############")
print("Não foram realizadas movimentações." if not extrato else extrato)
print(f"\n Saldo: R$ {saldo_conta:.2f}")
print("####################################")
elif opcao == "0":
print("Obrigado por utilizar nossos serviços, volte sempre!!")
break
else:
("Opção inválida, favor selecionar opçao disponível!!")
| LeandroJBrito/desafio_python_bank | desafio_sistema_bank.py | desafio_sistema_bank.py | py | 2,087 | python | pt | code | 0 | github-code | 36 |
8779071357 | # -*- coding: utf-8 -*-
from math import sqrt
from os.path import isfile
from .datum import Datum,getDatum
class Linear(object):
def __init__(self):
self.a = Datum()
self.b = Datum()
self.r = None
def echo(self):
print("Coeficiente de correlación: " + str(self.r))
print("Pendiente: " + str(self.a))
print("Ordenada en el origen: " + str(self.b))
class Momenta(object):
def __init__(self):
self.x = 0
self.y = 0
self.xx = 0
self.xy = 0
self.yy = 0
class DataBunch(object):
def __init__(self, filename=''):
self.points = []
self.length = 0
self.sums = Momenta()
self.load(filename)
def addPoint(self, x, y):
dx = getDatum(x)
dy = getDatum(y)
self.points += [[dx,dy]]
self.length += 1
self.sums.x += dx.value
self.sums.y += dy.value
self.sums.xx += dx.value*dx.value
self.sums.xy += dx.value*dy.value
self.sums.yy += dy.value*dy.value
def removePoints(self, start, end=None):
if not end:
end = start
if end<start:
c = end
end = start
start = c
if start<0 or self.length<start+1:
return
if end<0 or self.length<end+1:
return
for point in self.points[start:end+1]:
self.sums.x -= point[0].value
self.sums.y -= point[1].value
self.sums.xx -= point[0].value*point[0].value
self.sums.xy -= point[0].value*point[1].value
self.sums.yy -= point[1].value*point[1].value
self.points = self.points[0:start] + self.points[end+1:]
self.length -= end - start + 1
def echo(self):
for i in range(self.length):
print(str(i) + ":\t" + str(self.points[i][0]) + "\t" + str(self.points[i][1]))
print("sum(x) = " + str(self.sums.x))
print("sum(y) = " + str(self.sums.y))
print("sum(x^2) = " + str(self.sums.xx))
print("sum(xy) = " + str(self.sums.xy))
print("sum(y^2) = " + str(self.sums.yy))
def save(self, filename):
if isfile(filename):
print("El archivo '" + filename + "' ya existe.")
else:
file = open(filename,"w")
for point in self.points:
file.write(str(point[0].value) + ";" + str(point[1].value) + ";" + str(point[0].error) + ";" + str(point[1].error) + "\n")
file.close()
def load(self, filename):
if isfile(filename):
file = open(filename,"r")
for line in file:
floats = []
n = 0
for field in line.split(";"):
try:
floats += [float(field)]
n += 1
except ValueError:
continue
if n>1:
x = Datum(floats[0])
y = Datum(floats[1])
if n==3:
y.error = floats[2]
elif n>3:
x.error = floats[2]
y.error = floats[3]
self.addPoint(x,y)
file.close()
def vOffset(self):
if self.length<3:
return None
linear = Linear()
d = self.sums.xx - self.sums.x*self.sums.x/self.length
linear.a.value = (self.sums.xy - self.sums.x*self.sums.y/self.length)/d
linear.b.value = (self.sums.y*self.sums.xx - self.sums.x*self.sums.xy)/d/self.length
sy2 = self.sums.yy - 2*linear.a.value*self.sums.xy - 2*linear.b.value*self.sums.y + linear.a.value*linear.a.value*self.sums.xx + 2*linear.a.value*linear.b.value*self.sums.x + linear.b.value*linear.b.value*self.length
sy2 /= self.length - 2
linear.a.error = sqrt(sy2/d)
linear.b.error = linear.a.error*sqrt(self.sums.xx/self.length);
linear.r = (self.sums.xy - self.sums.x*self.sums.y/self.length)/sqrt(d*(self.sums.yy - self.sums.y*self.sums.y/self.length))
return linear
| jatolmed/arduino-meteo | statistics/statistics_old.py | statistics_old.py | py | 4,150 | python | en | code | 0 | github-code | 36 |
38400918265 | # This is a demo of running face recognition on a Raspberry Pi.
# This program will print out the names of anyone it recognizes to the console.
# To run this, you need a Raspberry Pi 2 (or greater) with face_recognition and
# the picamera[array] module installed.
# You can follow this installation instructions to get your RPi set up:
# https://gist.github.com/ageitgey/1ac8dbe8572f3f533df6269dab35df65
import face_recognition
import picamera
import numpy as np
import os
import shutil
from datetime import datetime
# Get a reference to the Raspberry Pi camera.
# If this fails, make sure you have a camera connected to the RPi and that you
# enabled your camera in raspi-config and rebooted first.
camera = picamera.PiCamera()
camera.resolution = (320, 240)
output = np.empty((240, 320, 3), dtype=np.uint8)
# Load a sample picture and learn how to recognize it.
print("Loading known face image(s)")
# Initialize some variables
face_locations = []
face_encodings = []
encoding_array = []
name_array = []
# Directory of training images
directory = "./training_images"
source = './training_images'
destination = './recognized_faces'
files = os.listdir(source)
def main():
def open_files(directory):
if len(os.listdir(directory)) == 0:
print("Directory is empty")
encoding_array = open("face_embeddings.txt", "r").read()
name_array = open("./person_names.txt", "a").read()
else:
print("Directory is not empty")
faces = open("./face_embeddings.txt", "a")
saved_names = open("./person_names.txt", "a")
for filename in os.listdir(directory):
print(filename)
if filename.endswith(".jpg"):
image_data = face_recognition.load_image_file(directory + '/' + filename)
temp_face_encoding = face_recognition.face_encodings(image_data)[0]
encoding_array.append(temp_face_encoding)
name_array.append(filename)
faces.write(encoding_array)
saved_names.write(name_array)
for f in files:
shutil.move(source+f, destination)
# print(os.path.join(directory, filename))
def add_person():
now = datetime.now()
local_time = now.strftime("%I-%M-%S_%Y-%d-%B")
camera.capture(directory+'/'+local_time+'.jpg', format="rgb")
print('New person added')
open_files(directory)
while True:
print("Capturing image.")
# Grab a single frame of video from the RPi camera as a numpy array
camera.capture(output, format="rgb")
# Find all the faces and face encodings in the current frame of video
face_locations = face_recognition.face_locations(output)
print("Found {} faces in image.".format(len(face_locations)))
face_encodings = face_recognition.face_encodings(output, face_locations)
match = []
person_name = ''
# Loop over each face found in the frame to see if it's someone we know.
for face_encoding in face_encodings:
# See if the face is a match for the known face(s)
match = face_recognition.compare_faces(encoding_array, face_encoding)
name = "<Unknown Person>"
print(match)
for validation in range(len(match)):
if match[validation]:
name = name_array[validation]
person_name = name.split('.')[0]
print("I see someone named {}!".format(person_name))
if __name__ == '__main__':
main() | minakhan01/LanguageLearning | PrototypingFiles/Python Vision Files/raspi_facerec.py | raspi_facerec.py | py | 3,217 | python | en | code | 0 | github-code | 36 |
7862870875 | #!/usr/bin/env python3
import sys
import re
import glob
import prettytable
import pandas as pd
import argparse
import os
def readFile(filename):
fileContents = list()
with open(filename, "r") as f:
for line in f:
line = line.strip()
fileContents.append(line)
return fileContents
def getStatusLine(fileContents):
totallines = len(fileContents)
statusLine = -1
for i in range(totallines):
result = re.match(r"Resource", fileContents[i])
if result:
statusLine = i - 2
return(statusLine)
def splitStrip(i):
temp = i.split(sep=":")[1]
temp = temp.strip()
return temp
def getJobDetails(fileContents, lineNumber):
lines = fileContents[lineNumber:]
status = lines[0]
for i in lines:
if re.match(r"CPU", i):
cpu_time = splitStrip(i)
if re.match(r"Max Memory", i):
max_mem = splitStrip(i)
if re.match(r"Total Requested Memory", i):
total_mem = splitStrip(i)
if re.match(r"Max Processes", i):
max_proc = splitStrip(i)
if re.match(r"Max Threads", i):
max_threads = splitStrip(i)
if re.match(r"Run time", i):
run_time = splitStrip(i)
x = {'cpu_time':cpu_time,
'status': status,
'max_mem': max_mem,
'total_mem': total_mem,
'max_proc': max_proc,
'max_threads': max_threads,
'run_time': run_time
}
return(x)
def getStartEnd(fileContents):
for i in fileContents:
if re.match(r"Started at", i):
start = i.replace("Started at", "")
if re.match(r"Terminated at", i):
end = i.replace("Terminated at", "")
x = {"start": start, "end":end}
return(x)
def pullOutJobData(fileName):
#print(f"Pulling out job data from ... {fileName}", file = sys.stderr)
fileContents = readFile(fileName) # read file as a list
lineNumber = getStatusLine(fileContents) # get status line
if lineNumber == -1:
job_details = {"status": "running"}
return(job_details)
job_status = fileContents[lineNumber]
job_start_end = getStartEnd(fileContents)
job_details = getJobDetails(fileContents, lineNumber)
if not re.match(r"Successfully completed", job_status):
jminus1=fileContents[lineNumber - 1]
job_status = job_status + " - " + jminus1
job_details.update(job_start_end)
job_details.update({"status": job_status})
return(job_details)
class job:
counter = 0
def __init__(self, fileName):
self.fileName = fileName
temp = pullOutJobData(fileName)
self.status = temp['status']
if self.status == "running":
return
self.cpu_time = temp['cpu_time']
self.max_mem = temp['max_mem']
self.total_mem = temp['total_mem']
self.max_proc = temp['max_proc']
self.run_time = temp['run_time']
self.start = temp['start']
self.end = temp['end']
job.counter += 1
def details(self):
job_details = self.__dict__.items()
if self.counter == 1:
for k,v in job_details:
print("%s" % k, end = "\t")
print()
for k,v in job_details:
print("%s" % v, end = "\t")
print()
def forTable(self, onlyHeader = False):
job_details = self.__dict__.items()
x = list()
if onlyHeader:
for k,v in job_details:
x.append(k)
if not onlyHeader:
for k,v in job_details:
x.append(v)
return(x)
#print(f"{self.fileName}\t{self.status}\t{self.start}\t{self.end}\t{self.cpu_time}\t{self.max_mem}\t{self.total_mem}")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--progArgs", default = "pretty", help="output type: [pretty, csv]")
parser.add_argument("--comments", default = "", help="filter for names")
args = parser.parse_args()
if not os.path.isdir(".bsub/"):
print("No farm job log found. See if .bsub exists", file = sys.stderr)
exit(0)
# Search files
search = ".bsub/*" + args.comments + "*.farm"
files = glob.glob(search)
c = 0
lof = list()
for f in files:
j = job(f)
if j.status == "running":
continue
if c == 0:
colnames = j.forTable(onlyHeader = True)
table = prettytable.PrettyTable(colnames)
lof.append(colnames)
l = j.forTable(onlyHeader = False)
table.add_row(l)
lof.append(l)
c += 1
if args.progArgs == "pretty":
print(table)
if args.progArgs == "csv":
df = pd.DataFrame(lof)
print(df.to_csv(index=False, header = False))
| vjbaskar/cscipipe | farm/farmhist.py | farmhist.py | py | 4,130 | python | en | code | 0 | github-code | 36 |
43135278080 |
# Mnemonic: em.py
# Abstract: Run em (Expectation Maximisation)
#
# Author: E. Scott Danies
# Date: 06 March 2019
#
# Acknowledgements:
# This code is based in part on information gleaned from, or
# code examples from the following URLs:
# https://github.com/minmingzhao?tab=repositories
#
# ------------------------------------------------------------------
import sys
from time import time
import numpy as np
import scipy as sp
import pandas as pd
import sklearn
from sklearn import metrics
from sklearn.cluster import KMeans
from sklearn.mixture import GaussianMixture
from sklearn.datasets import load_digits
from sklearn.decomposition import PCA
from sklearn.preprocessing import scale
# ---- my tools -----------------------------------------------------------
from tools import printf,fprintf,sprintf
from tools import parse_args,bool_flag,val_flag,str_flag,int_flag
from tools import print_pd
from plotter import c_cplot,gt_cplot
def usage() :
printf( "usage: kcluster.py [-f] [-i iterations] [-k k-value] train-path test-path\n" )
# ----------------------------------------------------------------------------------------------------------
# -- parse command line and convert to convenience variables -----------------------------------------------
# if you programme in go, then you recognise the beauty here :)
flags = { # define possible flags and the default: map key, type, default value
"-d": ("output-dir", str_flag, "/tmp"),
"-f": ("classfirst", bool_flag, False),
"-i": ("iterations", int_flag, 10),
"-k": ("k-components", int_flag, 2), # number of clusters to divide into
"-s": ("plot-samp-rate", int_flag, 10) # to keep plot output sizes reasonable, sample at x% for plots
}
opts = { } # map where option values or defaults come back
pparms = parse_args( flags, opts, "training-file [testing-file]" )
if pparms == None or len( pparms ) < 2 :
printf( "missing filenames on command line (training testing)\n" )
sys.exit( 1 )
train_fn = pparms[0] # file names; training validation (test)
test_fn = pparms[1];
components = opts["k-components"]
out_dir = opts["output-dir"]
np.random.seed( 17 )
# -----------------------------------------------------------------------------------------
train_data = pd.read_csv( train_fn, sep= ',' ) # suck in datasets
test_data = pd.read_csv( test_fn, sep= ',' )
train_n, train_p = train_data.shape # number of training instances and parameters
test_n, test_p = test_data.shape
if opts["classfirst"] : # class target value is in col 0
data = train_data.iloc[:,1:train_p]
labels = train_data.values[:,0] # get just the first column which has 'truth'
else :
data = train_data.iloc[:,0:train_p-1]
labels = train_data.values[:,-1] # get just the last column which has 'truth'
data = data.values
data_n, data_p = data.shape # data instances and parameters
printf( "data: %d instances %d parameters\n", data_n, data_p )
#--------------------------------------------------------------------------------------------
printf( "#%-5s %-5s %-5s %-5s %-5s %-5s %-5s\n", "ACC", "HOMO", "COMPL", "VM", "ARAND", "MI", "CH-idx" )
for i in range( opts["iterations"] ) :
em = GaussianMixture( n_components=components, n_init=13, covariance_type="full" ).fit( data )
guess = em.predict( data )
acc = metrics.accuracy_score( labels, guess )
homo = metrics.homogeneity_score( labels, guess ) # compare the true lables to those em predicted
comp = metrics.completeness_score( labels, guess )
vm = metrics.v_measure_score( labels, guess )
arand = metrics.adjusted_rand_score( labels, guess )
mi = metrics.adjusted_mutual_info_score( labels, guess, average_method="arithmetic" )
ch = metrics.calinski_harabaz_score( data, guess );
printf( " %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f %6.3f\n", acc, homo, comp, vm, arand, mi, ch)
if i == 0 : # just plot the first
tokens = train_fn.split( "/" ); # build file name as emax_<data-type>_<clusters>.eps
tokens = tokens[-1].split( "_" )
title = sprintf( "Exp Max %s k=%d", tokens[0], components )
gfname = sprintf( "%s/emax_%s_%d.eps", out_dir, tokens[0], components )
# pretend guess is ground truth and plot predicted cluster
gt_cplot( data, guess, components, gfname, title, sample_pct=opts["plot-samp-rate"], cpattern=2 )
sys.exit( 0 )
| ScottDaniels/gtcs7641 | a3/em.py | em.py | py | 4,623 | python | en | code | 0 | github-code | 36 |
3093110826 | starting_number = int(input())
final_number = int(input())
magic_number = int(input())
combinations = 0
is_found = False
for i in range(1, starting_number + 1):
for j in range(1, final_number + 1):
combinations += 1
if i + j == magic_number:
is_found = True
break
else:
is_found = False
if is_found == True:
print(f"Combination N:{combinations} ({i} + {j} = {magic_number})")
elif is_found == False:
print(f"{combinations} combinations - neither equals {magic_number}")
| ivn-svn/SoftUniPythonPath | Programming Basics with Python/7_nested_loops/lab/4_magicn.py | 4_magicn.py | py | 545 | python | en | code | 1 | github-code | 36 |
15290025439 | # -*- coding: utf-8 -*-
from threading import Thread, Event
from yasc.utils import CONFIG, state, ZoneAction, in_production, ControllerMode
from datetime import datetime, timedelta
from time import sleep
import logging
# RPi imports not working
if in_production():
from yasc.pi_controller import get_active_zone, activate_zone, stop_sprinkler
else:
__dev_zone = 0
def activate_zone(zone):
logging.debug('Activation zone {0}.'.format(zone))
global __dev_zone
__dev_zone = zone
state.zone_on(zone)
def get_active_zone():
return __dev_zone
def stop_sprinkler():
logging.debug('Stopping sprinkler zone')
global __dev_zone
if __dev_zone > 0:
logging.debug('Stopping zone {0}.'.format(__dev_zone))
state.zone_off(__dev_zone)
__dev_zone = 0
# FIXME: use thread pool
class ManualRunner(Thread):
def __init__(self, zone, interval):
Thread.__init__(self, name='Zone Run')
self.__interval = interval
self.__zone = zone
self.__stop = Event()
def stop(self):
logging.info('Stop manual run for zone {0}.'.format(self.__zone))
if not self.__stop.is_set():
self.__stop.set()
def run(self):
state.single_zone_on()
start_time = datetime.now()
activate_zone(self.__zone)
while not self.__stop.is_set():
now = datetime.now()
timediff = timedelta(minutes=self.__interval) if in_production() else timedelta(seconds=self.__interval)
if now - start_time > timediff:
self.__stop.set()
sleep(1)
stop_sprinkler()
state.run_off()
logging.info('Manual run for zone {0} end.'.format(self.__zone))
class CycleRunner(Thread):
def __init__(self, interval):
Thread.__init__(self, name='Cycle Run')
self.__interval = interval
self.__stop = Event()
def stop(self):
logging.info('Stop cycle.')
if not self.__stop.is_set():
self.__stop.set()
def __start_zone(self, zone_index):
zone_info = CONFIG.active_zones[zone_index]
activate_zone(zone_info.zone)
interval = getattr(zone_info, "interval", self.__interval)
logging.info('Running zone {0} for {1} min/sec.'.format(zone_info.zone, interval))
return datetime.now(), timedelta(minutes=interval) if in_production() else timedelta(seconds=interval)
def run(self):
logging.info('Starting cycle.')
state.cycle_on()
zone_index = 0
zone_count = len(CONFIG.active_zones)
start_time, interval = self.__start_zone(zone_index)
while not self.__stop.is_set():
now = datetime.now()
if now - start_time > interval:
zone_index += 1
if zone_index < zone_count:
stop_sprinkler()
start_time, interval = self.__start_zone(zone_index)
else:
self.__stop.set()
sleep(1)
stop_sprinkler()
state.run_off()
logging.info('Cycle end.')
class ZoneController(Thread):
def __init__(self):
Thread.__init__(self, name='Zone Controller')
self.__stop = Event()
self.__manual_runner = None
self.__cycle_runner = None
def __stop_cycle_runner(self):
if self.__cycle_runner is not None and self.__cycle_runner.is_alive():
logging.warning('Cycle is running. Terminating...')
self.__cycle_runner.stop()
self.__cycle_runner.join()
self.__cycle_runner = None
def is_cycle_running(self):
return self.__cycle_runner is not None and self.__cycle_runner.is_alive()
def __stop_manual_runner(self):
if self.__manual_runner is not None and self.__manual_runner.is_alive():
logging.warning('Manual runner is acitve. Terminating...')
self.__manual_runner.stop()
self.__manual_runner.join()
self.__manual_runner = None
def is_manual_running(self):
return self.__manual_runner is not None and self.__manual_runner.is_alive()
def get_active_zone(self):
return get_active_zone()
def stop(self):
if not self.__stop.is_set():
self.__stop.set()
self.__stop_manual_runner()
self.__stop_cycle_runner()
state.run_zone_action((ZoneAction.TERMINATE, 0))
self.join()
def control_mode_changed(self):
if state.active_controller_mode() is ControllerMode.OFF:
state.run_zone_action((ZoneAction.STOP, 0))
def __get_zone_index(self, zone):
for index, zone_info in enumerate(CONFIG.active_zones):
if zone_info.zone == zone:
return index
return -1
def __zone_in_active_zones(self, zone):
for zone_info in CONFIG.active_zones:
if zone_info.zone == zone:
return True
return False
def __queue_processor(self, queue):
action_type, event_value = queue.get()
logging.debug('Received action {0} with event value {1}.'.format(action_type, event_value))
self.__stop_manual_runner()
self.__stop_cycle_runner()
if action_type in [ZoneAction.TERMINATE, ZoneAction.STOP]:
# Leave dummy for now
pass
elif action_type == ZoneAction.RUN_CYCLE:
self.__cycle_runner = CycleRunner(CONFIG.default_interval)
self.__cycle_runner.start()
elif action_type == ZoneAction.NEXT:
current_active = get_active_zone()
current_index = self.__get_zone_index(current_active)
next_index = current_index + 1
if -1 < next_index < len(CONFIG.active_zones):
zone = CONFIG.active_zones[next_index].zone
self.__manual_runner = ManualRunner(zone, CONFIG.default_interval)
self.__manual_runner.start()
else:
logging.debug('Next index {0} outside active zone range. Stop yasc.'.format(next_index))
elif action_type == ZoneAction.ZONE:
if self.__zone_in_active_zones(event_value):
self.__manual_runner = ManualRunner(event_value, CONFIG.default_interval)
self.__manual_runner.start()
else:
logging.error('Zone {0} is not an active zone!'.format(event_value))
queue.task_done()
def run(self):
logging.info('Zone Controller started')
while not self.__stop.is_set():
state.process_queue(self.__queue_processor)
logging.info('Zone Controller stopped')
| asmyczek/YASC | yasc/zone_controller.py | zone_controller.py | py | 6,739 | python | en | code | 1 | github-code | 36 |
21366953261 | '''
Link: https://www.lintcode.com/problem/shortest-path-in-undirected-graph/description
'''
# Uses bidirectional BFS. I closesly followed the teachings on Jiuzhang.com.
from collections import deque
class Solution:
"""
@param graph: a list of Undirected graph node
@param A: nodeA
@param B: nodeB
@return: the length of the shortest path
"""
def shortestPath(self, graph, A, B):
# Write your code here
length = 0
if A == B:
return length
queue_a, queue_b = deque([A]), deque([B])
a_visited, b_visited = set([A]), set([B])
while len(queue_a) and len(queue_b):
size_queue_a, size_queue_b = len(queue_a), len(queue_b)
if size_queue_a > 0:
length += 1
for _ in range(size_queue_a):
node = queue_a.popleft()
for neib in node.neighbors:
if neib in a_visited:
continue
if neib in b_visited:
return length
queue_a.append(neib)
a_visited.add(neib)
if size_queue_b > 0:
length += 1
for _ in range(size_queue_b):
node = queue_b.popleft()
for neib in node.neighbors:
if neib in b_visited:
continue
if neib in a_visited:
return length
queue_b.append(neib)
b_visited.add(neib)
return -1
| simonfqy/SimonfqyGitHub | lintcode/medium/814_shortest_path_in_undirected_graph.py | 814_shortest_path_in_undirected_graph.py | py | 1,593 | python | en | code | 2 | github-code | 36 |
18287559618 | from urllib.request import urlopen
from bs4 import BeautifulSoup
url = input('Enter URL:')
count = int(input('Enter count:'))
position = int(input('Enter position:'))-1
html = urlopen(url).read()
soup = BeautifulSoup(html,"html.parser")
href = soup('a')
#print href
for i in range(count):
link = href[position].get('href', None)
print (href[position].contents[0])
html = urlopen(link).read()
soup = BeautifulSoup(html,"html.parser")
href = soup('a') | Abhishek32971/python_my_code | college/ActivitySet01/problem16.py | problem16.py | py | 473 | python | en | code | 1 | github-code | 36 |
73198190823 | import json
import re
from typing import Any, Dict, List, Text
from airflow.exceptions import AirflowException
from airflow.providers.google.cloud.hooks.datacatalog import CloudDataCatalogHook
import google.auth.transport.requests
from google.auth.transport.urllib3 import AuthorizedHttp
from grizzly.config import Config
from grizzly.etl_action import parse_table
from grizzly.grizzly_typing import TGrizzlyOperator
_TPolicyTags = Dict[str, str]
class DataCatalogTag:
"""Perform actions with DataCatalog.
Should be used for Data Catalog Table and column tags.
Assign Column level security throw DataCatalog Taxonomy.
Attributes:
execution_context (GrizzlyOperator): Instance of GrizzlyOperator executed.
column_policy_tags (list[dict]): List of Column level policy (security) tags
to be applied in format
{ 'column_name': 'column_policy_tag_id'}
datacatalog_tags (list[dict]): Content of JSON file defined in
[data_catalog_tags] attribute of task YML file. Content is rendered as
JINJA2 template and loaded as list of dictionaries with definition of
table and column tags to be applied.
authed_http (google.auth.transport.urllib3.AuthorizedHttp): Authorized http
connection for work with Data Catalog Rest API.
base_api_url (string): Base URL for work with DataCatalog Rest API.
dc_hook (CloudDataCatalogHook):
Airflow predefined hooks for work with GCP Data Catalog.
"""
def __init__(self,
execution_context: TGrizzlyOperator,
column_policy_tags: List[_TPolicyTags],
datacatalog_tags: List[Text]) -> None:
"""Set up DataCatalogTag instance.
If [column_policy_tags] or [datacatalog_tags] was defined set up
correspondent class properties.
Args:
execution_context (TGrizzlyOperator): Instance of GrizzlyOperator
executed.
column_policy_tags (list): List of Column level policy (security)
tags to be applied in format
{'column_name': 'taxonomy|tag_hierarchy'}
Contains column level security configuration.
datacatalog_tags (list): Content of JSON file defined in
[data_catalog_tags] attribute of task YML file. Content is rendered as
JINJA2 template and loaded as list of dictionaries with definition of
table and column tags to be applied. Contains Table and column tags.
"""
self.execution_context = execution_context
if column_policy_tags or datacatalog_tags:
self.__setup_datacatalog_connection()
if column_policy_tags:
# Get list of DataCatalog security policy tag mapping
self.column_policy_tags = self.__get_column_policy_tags_mapping(
column_policy_tags)
else:
self.column_policy_tags = None
if datacatalog_tags:
self.datacatalog_tags = datacatalog_tags
else:
self.datacatalog_tags = None
def __get_table_entry_id(self, target_table: Dict[str, str]) -> Any:
"""Get an DataCatalog EntryId by table name."""
target_table = parse_table(target_table)
resource_name = (f'//bigquery.googleapis.com/'
f'projects/{target_table["project_id"]}/'
f'datasets/{target_table["dataset_id"]}/'
f'tables/{target_table["table_id"]}')
table_entry = self.dc_hook.lookup_entry(linked_resource=resource_name)
return table_entry
def __setup_datacatalog_connection(self) -> None:
"""Setup connection credentials for access Data Catalog API."""
scopes = ['https://www.googleapis.com/auth/cloud-platform']
# pylint: disable=unused-variable
credentials, project = google.auth.default(scopes=scopes)
auth_req = google.auth.transport.requests.Request()
credentials.refresh(auth_req)
self.authed_http = AuthorizedHttp(credentials)
access_token = credentials.token
self.base_api_url = (
'https://datacatalog.googleapis.com/v1/{api_call}?access_token='
+ access_token)
# setup datacatalog hooks
self.dc_hook = CloudDataCatalogHook()
def __get_column_policy_tags_mapping(
self,
column_policy_tags: List[_TPolicyTags]
) -> _TPolicyTags:
"""Return a list of all applicable taxonomies for job/table.
Parse user defined format from task YML file and transform it into format
consumable by DataCatalog Rest API.
Method gets all taxonomy list on environment. Then select taxonomy defined
by user and parses taxonomy tag hierarchy to find [column_policy_tag_id]
that matches with taxonomy tag hierarchy defined by user in task YML file
attribute [column_policy_tags].
Args:
column_policy_tags (list[dict]): List of column policy tag definition to
be parsed in format: {'column_name': 'taxonomy|tag_hierarchy'}
Raises:
AirflowException: Raise error in case if Column policy taxonomy as not
defined on target GCP project or if user defined reference to policy tag
that does not exist.
Returns:
(dict): List of column policy tag definition in format
{'column_name': 'column_policy_tag_id'}
"""
column_policy_tags_mapping = {}
# get a set of all applicable taxonomies
# accordingly to job YML configuration [column_policy_tags]
requested_taxonomies = set()
for c in column_policy_tags:
for v in c.values():
# Add taxonomy name to set
requested_taxonomies.add(v.split('|')[0])
# Get list of DataCatalog taxonomies
api_call = Config.DEFAULT_DATACATALOG_TAXONOMY_LOCATION
session_url = self.base_api_url.format(api_call=api_call)
r = self.authed_http.urlopen(method='get', url=session_url)
taxonomy_mapping = {
}
# looks like {'taxonomy_name': 'projects/prj_id/locations/us/taxonomies/64'}
if r.status == 200:
response = json.loads(r.data)
# work only with taxonomies that were requested in YML
taxonomy_mapping = {
i['displayName']: i['name']
for i in response['taxonomies']
if i['displayName'] in requested_taxonomies
}
# extract raw list of tags for each taxonomy
for k, v in taxonomy_mapping.items():
taxonomy_tag_list_raw = self.__get_taxonomy_policy_tags_raw(v)
for t in taxonomy_tag_list_raw:
column_policy_tags_mapping.update(
self.__get_tag_hierarchy(
taxonomy_name=k, raw_data=taxonomy_tag_list_raw, tag=t))
else:
raise AirflowException(
('Could not receive a list of taxonomies for '
f'project {Config.GCP_PROJECT_ID}. Check security configuration '
'for service account.')
)
# iterate requested tags.
# raise Exception if taxonomy does not exist in project
for ct in column_policy_tags:
for column, tag in ct.items():
if tag not in column_policy_tags_mapping:
raise AirflowException(
(f'Check your YML configuration. Column [{column}] : Tag [{tag}] '
'does not exist in GCP Data Catalog.')
)
# transform array column policy mapping into dictionary with correct tag Ids
column_policy_tags_resultset = dict()
for c in column_policy_tags:
for key in c:
column_policy_tags_resultset[key] = column_policy_tags_mapping[c[key]]
return column_policy_tags_resultset
def __get_tag_hierarchy(self,
taxonomy_name: str,
raw_data: Any,
tag: Dict[str, Any],
tag_display_name: str = '',
tag_id: str = '') -> Dict[str, Any]:
"""Get Data Catalog Taxonomy tag hierarchy mapping.
Method performs recursive scan of taxonomy tags hierarchy and creates
mapping between DataCatalog policy tag id and human-readable
representation of this tag in format similar to 'taxonomy|tag_hierarchy'
Args:
taxonomy_name (string): Human readable taxonomy name from
[column_policy_tags] attribute defined in task YML raw_data.
raw_data: Raw json response from DataCatalog Rest API.
tag (dict): Rest API definition of policy tag. More details about format
of dictionary you can find here:
https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.taxonomies.policyTags#PolicyTag
tag_display_name (string): Tag name in human-readable format
'parent_tag_1|parent_tag_1.1|tag'
tag_id (string): Tag id in format supported by Data Catalog Rest API.
projects/{project}/locations/{location}/taxonomies/{taxonomies}/policyTags/{policytag}
Returns:
(dict): List of column policy tag definition in format
{'taxonomy_name|tag_display_name': 'tag_id'}
For example:
{
'proto_column_access_policy|PII|high':
'projects/prj/locations/us/taxonomies/11/policyTags/22'
}
"""
# parse raw taxonomy data and return tag hierarchy
parent_id = tag.get('parentPolicyTag', None)
tag_id = tag_id if tag_id else tag['name']
tag_display_name = '|'.join([tag['displayName'], tag_display_name
]) if tag_display_name else tag['displayName']
# if tag not in a root of hierarchy
if parent_id:
# get parent tag details
parent_tag = list(filter(lambda x: x['name'] == parent_id, raw_data))[0]
return self.__get_tag_hierarchy(
taxonomy_name=taxonomy_name,
raw_data=raw_data,
tag=parent_tag,
tag_display_name=tag['displayName'],
tag_id=tag_id)
else:
return {taxonomy_name + '|' + tag_display_name: tag_id}
def __get_taxonomy_policy_tags_raw(self,
taxonomy_id: str) -> List[Dict[str, Any]]:
"""Get a list of all policy tags inside Data Catalog Policy Tags taxonomy.
Next Rest API call is used
https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.taxonomies.policyTags/list
Args:
taxonomy_id (string): Taxonomy id in format acceptable by Rest API
projects/{project}/locations/{location}/taxonomies/{taxonomies}
Raises:
AirflowException: Raise exception in case if Data Catalog Rest API not
able to retrieve list of tags inside taxonomy.
Returns:
(list(dict)): List of policy tags in format
https://cloud.google.com/data-catalog/docs/reference/rest/v1/projects.locations.taxonomies.policyTags#PolicyTag
"""
api_call = f'{taxonomy_id}/policyTags'
session_url = self.base_api_url.format(api_call=api_call)
r = self.authed_http.urlopen(method='GET', url=session_url)
if r.status == 200:
response = json.loads(r.data)
else:
raise AirflowException(
f'Could not receive a tag list for taxonomy {taxonomy_id}.')
return response['policyTags']
def set_column_policy_tags(self, target_table: str) -> None:
"""Update column policy tags on target table.
Assign Column policy tags from [self.column_policy_tags] to table columns on
a base of column level security defined in attribute [column_policy_tags] of
task YML file.
Args:
target_table (string): Name of a table on which you want to set up column
level security.
"""
if self.column_policy_tags:
target_table = parse_table(target_table)
table_schema_definition = self.execution_context.bq_cursor.get_schema(
dataset_id=target_table['dataset_id'],
table_id=target_table['table_id'])['fields']
tagged_column_list = [*self.column_policy_tags
] # get list of tagged columns from dictionary
# filter only columns that tagged
# iterate schema and set policy tags
for i in range(len(table_schema_definition)):
cn = table_schema_definition[i]['name']
if cn in tagged_column_list:
table_schema_definition[i]['policyTags'] = {
'names': [self.column_policy_tags[cn]]
}
# patch target table with updated fields
self.execution_context.bq_cursor.patch_table(
dataset_id=target_table['dataset_id'],
table_id=target_table['table_id'],
schema=table_schema_definition)
return
def set_table_tags(self, target_table: str) -> None:
"""Set DataCatalog tags on a table and table columns.
Apply tags from self.datacatalog_tags. All tags that were not defined in
JSON tag configuration file will be removed.
Args:
target_table (string): Target table for which data catalog tags should
be assigned.
Raises:
Exception: Exception raised in case if Rest API does not return Data
Catalog EntityId for requested table.
AirflowException: Also exception raised in case if application is not
able to delete or create tags due some security restriction or other
issues.
"""
if self.datacatalog_tags:
# get entry_id for target_table
entry_id = self.__get_table_entry_id(target_table)
# parse entry_id
entry_id_parsed = re.match(
(r'^projects/(?P<project_id>.+)/locations/(?P<location>.+)/'
r'entryGroups/(?P<entry_group>.+)/entries/(?P<entry_id>.+)$'),
entry_id.name)
if not entry_id_parsed:
raise AirflowException(
f'Could not extract entity_id for [{target_table}].')
# get a list of tags already assigned to table
existing_table_tags = self.dc_hook.list_tags(
location=entry_id_parsed['location'],
entry_group=entry_id_parsed['entry_group'],
entry=entry_id_parsed['entry_id'],
project_id=entry_id_parsed['project_id'],
page_size=500)
# construct a list of (template, column) for requested tags
requested_tags = [
(t['template'], t.get('column', '')) for t in self.datacatalog_tags
]
# drop existing tags in case of importance
for et in existing_table_tags:
tag_name = et.name
tag_template = et.template
tag_column = getattr(et, 'column', '')
if (tag_template, tag_column) in requested_tags:
# drop existing tag first for avoid ERROR 409
api_call = f'{tag_name}'
session_url = self.base_api_url.format(api_call=api_call)
r = self.authed_http.urlopen(method='DELETE', url=session_url)
if r.status != 200:
raise AirflowException(
(f'Could not delete tag from table table.\n'
f'ERROR: {r.status} - {r.data}')
)
for tag in self.datacatalog_tags:
api_call = f'{entry_id.name}/tags'
session_url = self.base_api_url.format(api_call=api_call)
session_body = json.dumps(tag)
r = self.authed_http.urlopen(
method='POST', url=session_url, body=session_body)
if r.status != 200:
raise AirflowException(
(f'Could not create new tag on target table. {tag} \n'
f'ERROR: {r.status} - {r.data}')
)
return
| google/grizzly | airflow/plugins/grizzly/data_catalog_tag.py | data_catalog_tag.py | py | 15,091 | python | en | code | 51 | github-code | 36 |
25677729371 | import cv2
import torch
from PIL import Image
from utils.segmenter import Segmenter
from utils.type_conversion import *
def resize(img, short_size):
w, h = img.size
if w < h:
nw, nh = short_size, int(w * short_size / h)
else:
nw, nh = int(h * short_size / w), short_size
return img.resize((nh, nw))
def test_image(args, model):
if args.detector == 'dlib':
import dlib
elif args.detector == 'faceboxes':
from utils.face_detector import FaceDetectorFaceboxes
model.eval()
device = torch.device("cuda" if args.gpu else "cpu")
image = Image.open(args.image).convert('RGB')
if args.resize > 0:
image = resize(image, args.resize)
detector = None
if args.detector == 'dlib':
detector = dlib.get_frontal_face_detector()
elif args.detector == 'faceboxes':
MODEL_PATH = 'model/faceboxes.pb'
detector = FaceDetectorFaceboxes(MODEL_PATH, gpu_memory_fraction=0.25, visible_device_list='0')
segmenter = Segmenter(model, device, detector, mode=args.detector)
result = segmenter.segment(PIL2opencv(image), args.remove_small_area)
result = opencv2PIL(result)
if args.save:
result.save(args.save)
if not args.unshow:
result.show()
image.show()
def test_video(args, model):
if args.video == '0':
cap = cv2.VideoCapture(0)
else:
cap = cv2.VideoCapture(args.video)
w_win = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
h_win = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
print(w_win, h_win)
if args.resize > 0:
short_size = args.resize
if w_win > h_win:
nw, nh = short_size, int(w_win * short_size / h_win)
else:
nw, nh = int(h_win * short_size / w_win), short_size
else:
nw, nh = w_win, h_win
detector = None
if args.detector == 'dlib':
detector = dlib.get_frontal_face_detector()
elif args.detector == 'faceboxes':
MODEL_PATH = 'model/faceboxes.pb'
detector = FaceDetectorFaceboxes(MODEL_PATH, gpu_memory_fraction=0.25, visible_device_list='0')
device = torch.device("cuda" if args.gpu else "cpu")
segmenter = Segmenter(model, device, detector, mode=args.detector)
if args.save:
fourcc = cv2.VideoWriter_fourcc(*'MJPG')
out = cv2.VideoWriter(args.save, fourcc, 20, (nh, nw), True)
while True:
frame = cap.read()[1]
if frame is None:
break
frame = cv2.resize(frame, (nh, nw))
result = segmenter.segment(frame, args.remove_small_area)
if args.save:
out.write(result)
if not args.unshow:
cv2.imshow('image', result)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
if args.save:
out.release()
| MondayYuan/HairSegmentation | scripts/test.py | test.py | py | 2,875 | python | en | code | 5 | github-code | 36 |
18050976874 | from django.urls import path
from .views import RegistrationView, CustomLoginView, CustomLogoutView, ProfileView, UserProfileUpdateView, UserEducationalUpdateView
urlpatterns = [
path('register/', RegistrationView.as_view(), name='register'),
path('login/', CustomLoginView.as_view(), name='login'),
path('logout/', CustomLogoutView.as_view(), name='logout'),
path('profile/', ProfileView.as_view(), name='profile'),
path('profile-update/', UserProfileUpdateView.as_view(), name='profile_update'),
path('educational-update/', UserEducationalUpdateView.as_view(), name='educational_update'),
]
| Kamal123-cyber/skillshare | skillshare/skillapp/urls.py | urls.py | py | 618 | python | en | code | 0 | github-code | 36 |
33893578543 | #nf=open('/m/triton/scratch/elec/puhe/p/jaina5/Psmit_lstm_50_nbest_lm_cost','w')
#nf=open('/m/triton/scratch/elec/puhe/p/jaina5/ac_cost.50best.aff','w')
#nf=open('yle_nbest_50_pre','w')
#nf=open('rescore_72layer_100nbest_yle_20191119-133110.txt','w')
nf=open('/m/triton/scratch/work/jaina5/kaldi/egs/yle_rescore/s5/lm_cost.psmit_rescore_50_yle_dev','w')
nbest=50
#with open('rescore_2.txt', "r", encoding="utf-8") as reader:
#with open('/scratch/work/jaina5/Bert/FinnishBert/outputckpoints_1506/lm_cost_final', "r", encoding="utf-8") as reader:
#with open('/m/triton/scratch/work/jaina5/Bert/FinnishBert_2.0/yle_nbest_1000_pre', "r", encoding="utf-8") as reader:
#with open('/m/triton/scratch/elec/puhe/p/jaina5/decode1150_yle-dev-new_morfessor_f2_a0.001_tokens_aff_rnn_interp_word+proj500+lstm1500+htanh1500x4+dropout0.2+softmax_e10.5_t365_i0.3_1000best/text', "r", encoding="utf-8") as reader:
#with open('/m/triton/scratch/elec/puhe/p/jaina5/decode1150_yle-test-new_morfessor_f2_a0.001_tokens_aff_rnn_interp_word+proj500+lstm1500+htanh1500x4+dropout0.2+softmax_e10.5_t365_i0.3_1000best/ac_cost', "r") as reader:
with open('/m/triton/scratch/work/jaina5/kaldi/egs/yle_rescore/s5/lm_cost.psmit_rescore_1000', "r", encoding="utf-8") as reader:
while True:
line = reader.readline()
if not line:
break
line = line.strip()
Splitted=line.split(" ", 1)
if len(Splitted) == 1:
Splitted.append(' ')
tempsplit=Splitted[:]
if int(tempsplit[0].rsplit('-',1)[1]) <= nbest:
nf.write(Splitted[0]+' '+Splitted[1]+'\n')
nf.close() | aalto-speech/FinnishXL | FinnishXL/get_nbest_lists.py | get_nbest_lists.py | py | 1,619 | python | en | code | 1 | github-code | 36 |
6239431595 | from datetime import datetime
import json
from odd_utils import *
VERSION = "1.0"
def shallow_copy(data) -> dict:
if type(data) is list:
return traverse(data)
elif(type(data) is str):
with open(data, "r") as f:
return shallow_copy(json.load(f))
else:
return traverse(data)
def traverse(data) -> dict:
fields = dict()
if(type(data) in odd_primitives):
return odd_primitives[type(data)]
elif(data is None or len(data) < 1):
raise Exception("Data provided is either invalid or empty")
elif(type(data) is list and len(data) > 0):
temp_list = list()
temp_list.append(traverse(data[0]))
return temp_list
elif(type(data) is dict and len(data) > 0):
for key, value in data.items():
d_type = type(value)
if(d_type in odd_primitives):
fields[key] = odd_primitives[d_type]
elif(d_type is dict and len(value) > 0):
fields[key] = traverse(value)
elif(d_type is list and len(value) > 0):
temp_list = list()
temp_list.append(traverse(value[0]))
fields[key] = temp_list
else:
fields[key] = odd.EMPTY.value
else:
fields[key] = odd.EMPTY.value
return fields
| SamuelMiddendorp/OpenDataDocumentor | odd_library.py | odd_library.py | py | 1,332 | python | en | code | 0 | github-code | 36 |
69960188586 | from django.contrib.auth.models import AbstractUser, Group
from django.db import models
class User(AbstractUser):
CREATOR = 'CREATOR'
SUBSCRIBER = 'SUBSCRIBER'
ROLE_CHOICES = (
(CREATOR, 'Créateur'),
(SUBSCRIBER, 'Abonné'),
)
profile_photo = models.ImageField(verbose_name='Photo de profil')
role = models.CharField(max_length=30, choices=ROLE_CHOICES, verbose_name='Rôle')
follows = models.ManyToManyField(
'self', # Model en relation: les utilisateurs suivent d'autres utilisateurs. donc le même model
limit_choices_to={'role': CREATOR}, # On ne peut suivre que les créateurs
symmetrical=False, # True si on suit un utilisateur amis.
verbose_name='suit',
)
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
if self.role == self.CREATOR:
group = Group.objects.get(name='creators')
group.user_set.add(self)
elif self.role == self.SUBSCRIBER:
group = Group.objects.get(name='subscribers')
group.user_set.add(self) | TonyQuedeville/fotoblog | authentication/models.py | models.py | py | 1,089 | python | fr | code | 0 | github-code | 36 |
29391745282 | class Solution:
def diagonalSum(self, mat: List[List[int]]) -> int:
size = len(mat)
if size == 1:
return mat[0][0]
sum = 0
for i in range(size):
sum += mat[i][i] + mat[i][size - i - 1]
if size % 2 == 1:
sum -= mat[size // 2][size // 2]
return sum | AnotherPianist/LeetCode | 1572-matrix-diagonal-sum/1572-matrix-diagonal-sum.py | 1572-matrix-diagonal-sum.py | py | 375 | python | en | code | 1 | github-code | 36 |
39844679092 | """
Iguana (c) by Marc Ammon, Moritz Fickenscher, Lukas Fridolin,
Michael Gunselmann, Katrin Raab, Christian Strate
Iguana is licensed under a
Creative Commons Attribution-ShareAlike 4.0 International License.
You should have received a copy of the license along with this
work. If not, see <http://creativecommons.org/licenses/by-sa/4.0/>.
"""
from django import template
register = template.Library()
@register.simple_tag(name='get_user_preference', takes_context=True)
def get_user_preference(context, key, default=None):
user = context['user']
return user.get_preference(key, default)
| midas66/iguana | src/common/templatetags/user_preference.py | user_preference.py | py | 603 | python | en | code | null | github-code | 36 |
23597401890 | from tkinter import *
import mysql.connector
import matplotlib.pyplot as plt
import csv
root = Tk()
root.title('VINCI FarmDB')
root.geometry("400x700")
root.iconbitmap('Logo.ico')
# Connec to the MySQL Server
mydb = mysql.connector.connect(
host="localhost",
user = "", #Enter Your Username
passwd = "", #Enter Your Password
database = "warehouse"
)
#FUNCTIONS
#Clear Filed
def clear_field():
nbox.delete(0,END)
abox.delete(0,END)
pbox.delete(0,END)
qbox.delete(0,END)
debox.delete(0,END)
p1box.delete(0,END)
p2box.delete(0,END)
dabox.delete(0,END)
tbox.delete(0,END)
arbox.delete(0,END)
#Add Data to Database
def add_data():
sql_command = "INSERT INTO master (name,aadno,ph,catg,quant,des,plts,plte,date,intt,area) VALUES(%s,%s,%s,%s,%s,%s,%s,%s,%s,%s,%s)"
values = (nbox.get(), abox.get(), pbox.get(), clicked.get(), qbox.get(), debox.get(), p1box.get(), p2box.get(), dabox.get(), tbox.get(), arbox.get())
cursor.execute(sql_command, values)
mydb.commit()
clear_field()
#View Database
def view_db():
view = Tk()
view.title("List of All Stock In Warehouse")
view.geometry("800x600")
view.iconbitmap('Logo.ico')
cursor.execute("SELECT * FROM master")
result = cursor.fetchall()
n1=0
head = ['Name','AadharNo','PhNo','Type','Quantity','Description','PlotNo(Start)','PlotNo(End)','Date','InTime','Area']
for i in head:
hl = Label(view,text=i,fg="red")
hl.grid(row=0,column=n1)
n1+=1
for index, x in enumerate(result):
num = 0
for y in x:
ll = Label(view, text = y)
ll.grid(row=index+1, column=num)
num+=1
csv_b = Button(view, text="Save as Excel", command=lambda: wtocsv(result))
csv_b.grid(row=index+2, column=0)
def wtocsv(result):
with open('Warehouse.csv','a') as f:
w = csv.writer(f, dialect='excel')
for record in result:
w.writerow(record)
#Search Warehouse Function
def search_db():
search = Tk()
search.title("List of All Stock In Warehouse")
search.geometry("800x600")
search.iconbitmap('Logo.ico')
def search_now():
ans = searchbox.get()
sql = "SELECT * FROM master WHERE aadno = %s"
ano = (ans, )
result = cursor.execute(sql,ano)
result = cursor.fetchall()
if not result:
result = "No Record Found"
if result =="No Record Found":
ansl = Label(search, text=result)
ansl.grid(row=2,column=0,padx=10)
else:
n1=0
head = ['Name','AadharNo','PhNo','Type','Quantity','Description','PlotNo(Start)','PlotNo(End)','Date','InTime','Area']
for i in head:
hl = Label(search,text=i,fg="red")
hl.grid(row=3,column=n1)
n1+=1
for index, x in enumerate(result):
num = 0
for y in x:
ll = Label(search, text = y)
ll.grid(row=index+4, column=num)
num+=1
searchbox = Entry(search)
searchbox.grid(row=0,column=1,padx=10,pady=10)
slabel = Label(search, text="Enter Aadhar No:")
slabel.grid(row=0,column=0, padx=10,pady=10)
sb = Button(search, text="Search Warehouse", command=search_now)
sb.grid(row=1,column=0,padx=10,pady=10)
#Updating the Database
def update_db():
udate = Tk()
udate.title("Update Warehouse")
udate.geometry("800x600")
udate.iconbitmap('Logo.ico')
def update_now():
ans = searchbox.get()
sql = "SELECT * FROM master WHERE aadno = %s"
ano = (ans, )
result = cursor.execute(sql,ano)
result = cursor.fetchall()
name = Label(udate,text="Name").grid(row=2,column=0,sticky=W,padx=10)
aadno = Label(udate,text="Aadhar Number").grid(row=2+1,column=0,sticky=W,padx=10)
ph = Label(udate,text="Phone Number").grid(row=3+1,column=0,sticky=W,padx=10)
catg = Label(udate,text="Type").grid(row=4+1,column=0,sticky=W,padx=10)
quant = Label(udate,text="Quantity").grid(row=5+1,column=0,sticky=W,padx=10)
des = Label(udate,text="Description").grid(row=6+1,column=0,sticky=W,padx=10)
plts = Label(udate,text="Plot Number START").grid(row=7+1,column=0,sticky=W,padx=10)
plte = Label(udate,text="Plot Number END").grid(row=8+1,column=0,sticky=W,padx=10)
date = Label(udate,text="Date").grid(row=9+1,column=0,sticky=W,padx=10)
Time = Label(udate,text="Time").grid(row=10+1,column=0,sticky=W,padx=10)
area = Label(udate,text="Area Occupied").grid(row=11+1,column=0,sticky=W,padx=10)
#Creating Input Boxes
nbox = Entry(udate)
nbox.grid(row=1+1,column=1)
nbox.insert(0,result[0][0])
abox = Entry(udate)
abox.grid(row=2+1,column=1,pady = 5)
abox.insert(0,result[0][1])
pbox = Entry(udate)
pbox.grid(row=3+1,column=1,pady = 5)
pbox.insert(0,result[0][2])
clicked = StringVar()
clicked.set("Livestock")
cbox = OptionMenu(udate, clicked, "Livestock", "Grains", "Fruits", "Vegetable", "Fertilizers", "Milk", "Tools")
cbox.grid(row=4+1,column=1,pady = 5)
qbox = Entry(udate)
qbox.grid(row=5+1,column=1,pady = 5)
qbox.insert(0,result[0][4])
debox = Entry(udate)
debox.grid(row=6+1,column=1,pady = 5)
debox.insert(0,result[0][5])
p1box = Entry(udate)
p1box.grid(row=7+1,column=1,pady = 5)
p1box.insert(0,result[0][6])
p2box = Entry(udate)
p2box.grid(row=8+1,column=1,pady = 5)
p2box.insert(0,result[0][7])
dabox = Entry(udate)
dabox.grid(row=9+1,column=1,pady = 5)
dabox.insert(0,result[0][8])
tbox = Entry(udate)
tbox.grid(row=10+1,column=1,pady = 5)
tbox.insert(0,result[0][9])
arbox = Entry(udate)
arbox.grid(row=11+1,column=1,pady = 5)
arbox.insert(0,result[0][10])
def update_two():
sql_command = """UPDATE master SET name = %s,ph = %s,catg = %s,quant = %s,des = %s,plts = %s,plte = %s,date = %s,intt = %s,area = %s WHERE aadno = %s"""
values = (nbox.get(), pbox.get(), clicked.get(), qbox.get(), debox.get(), p1box.get(), p2box.get(), dabox.get(), tbox.get(), arbox.get(),abox.get())
cursor.execute(sql_command, values)
mydb.commit()
udate.destroy()
up = Button(udate,text="Update Record",command=update_two)
up.grid(row=13,column=0)
searchbox = Entry(udate)
searchbox.grid(row=0,column=1,padx=10,pady=10)
slabel = Label(udate, text="Enter Aadhar No:")
slabel.grid(row=0,column=0, padx=10,pady=10)
sb = Button(udate, text="Update Person With AadharNo", command=update_now)
sb.grid(row=1,column=0,padx=10,pady=10)
#Plotting Functions
def occupied_graph():
cursor.execute("SELECT SUM(area) FROM master")
val = cursor.fetchall()
val1 = val[0][0]
val2 = 100 - val1
label = 'Occupied' , 'Unoccupied'
sizes = [val1 , val2]
explode = (0.1,0)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels = label,autopct = '%1.1f%%',shadow=True, startangle = 90)
ax1.axis('equal')
plt.title("Occupancy Chart")
plt.show()
def cateo_chart():
cursor.execute("SELECT SUM(area) FROM master GROUP BY catg")
val = cursor.fetchall()
label = "Livestock", "Grains", "Fruits", "Vegetable", "Fertilizers", "Milk", "Tools"
sizes = [val[0][0], val[1][0] , val[2][0] , val[3][0], val[4][0], val[5][0], val[6][0]]
explode = (0.1,0,0,0,0,0,0)
fig1, ax1 = plt.subplots()
ax1.pie(sizes, explode=explode, labels = label,autopct = '%1.1f%%',shadow=True, startangle = 90)
ax1.axis('equal')
plt.title("Category Wise Occupancy Chart")
plt.show()
#Calcuate Cost
def cal_cost():
return
#Cursor for MySQL
cursor = mydb.cursor()
#Creating Database
# cursor.execute("CREATE DATABASE warehouse")
#Creating the Table
# cursor.execute("CREATE TABLE master(name VARCHAR(255),aadno INT(12) PRIMARY KEY,ph INT(10),catg VARCHAR(255),quant INT(10),des TEXT,plts INT(10),plte INT(10),date DATE,intt TIME,area INT(10))")
tlt_label = Label(root, text="VINCI FarmDB",font=("Times","24","bold"))
tlt_label.grid(row=0,column=0,columnspan=2,pady="10")
#Creating the Form
name = Label(root,text="Name").grid(row=1,column=0,sticky=W,padx=10)
aadno = Label(root,text="Aadhar Number").grid(row=2,column=0,sticky=W,padx=10)
ph = Label(root,text="Phone Number").grid(row=3,column=0,sticky=W,padx=10)
catg = Label(root,text="Type").grid(row=4,column=0,sticky=W,padx=10)
quant = Label(root,text="Quantity").grid(row=5,column=0,sticky=W,padx=10)
des = Label(root,text="Description").grid(row=6,column=0,sticky=W,padx=10)
plts = Label(root,text="Plot Number START").grid(row=7,column=0,sticky=W,padx=10)
plte = Label(root,text="Plot Number END").grid(row=8,column=0,sticky=W,padx=10)
date = Label(root,text="Date").grid(row=9,column=0,sticky=W,padx=10)
Time = Label(root,text="Time").grid(row=10,column=0,sticky=W,padx=10)
area = Label(root,text="Area Occupied").grid(row=11,column=0,sticky=W,padx=10)
#Creating Input Boxes
nbox = Entry(root)
nbox.grid(row=1,column=1)
abox = Entry(root)
abox.grid(row=2,column=1,pady = 5)
pbox = Entry(root)
pbox.grid(row=3,column=1,pady = 5)
clicked = StringVar()
clicked.set("Livestock")
cbox = OptionMenu(root, clicked, "Livestock", "Grains", "Fruits", "Vegetable", "Fertilizers", "Milk", "Tools")
cbox.grid(row=4,column=1,pady = 5)
qbox = Entry(root)
qbox.grid(row=5,column=1,pady = 5)
debox = Entry(root)
debox.grid(row=6,column=1,pady = 5)
p1box = Entry(root)
p1box.grid(row=7,column=1,pady = 5)
p2box = Entry(root)
p2box.grid(row=8,column=1,pady = 5)
dabox = Entry(root)
dabox.grid(row=9,column=1,pady = 5)
tbox = Entry(root)
tbox.grid(row=10,column=1,pady = 5)
arbox = Entry(root)
arbox.grid(row=11,column=1,pady = 5)
#Buttons
add_b = Button(root, text="Add to Warehouse", command=add_data)
add_b.grid(row=12,column=0,padx=10,pady=10)
clear_b = Button(root, text="Clear Data", command=clear_field)
clear_b.grid(row=12,column=1)
view_b = Button(root, text="View The Entire Warehouse", command=view_db)
view_b.grid(row=13,column=0,sticky=W,padx=10)
search_b = Button(root, text="Search Warehouse", command=search_db)
search_b.grid(row=13,column=1, sticky=W, padx=10)
update_b = Button(root,text="Warehouse Update", command=update_db)
update_b.grid(row=14,column=0,sticky=W,padx=10,pady=10)
plot1 = Label(root,text="Plotting Functions",fg="red")
plot1.grid(row=15,column=0)
occ = Button(root,text="Occupancy Chart",command=occupied_graph)
occ.grid(row=16,column=0,sticky=W,padx=10,pady=10)
cato = Button(root,text="Category Chart",command=cateo_chart)
cato.grid(row=16,column=1,sticky=W,padx=10,pady=10)
plot2 = Label(root,text="Cost Calculator",fg="red")
plot2.grid(row=17,column=0)
cost_b = Button(root,text="Calculate Cost",command=cal_cost)
cost_b.grid(row=18,column=0,sticky=W,padx=10,pady=10)
root.mainloop()
| murali22chan/Aatmanirbhar-Bharat-Hackathon | main.py | main.py | py | 10,762 | python | en | code | 0 | github-code | 36 |
33039135016 | from pyglet import gl
class Polygon:
def __init__(self, vertices, u0=None, v0=None, u1=None, v1=None):
self.vertices = vertices
if isinstance(u0, int):
f = 0.0015625
f2 = 0.003125
vertices[0] = vertices[0].remap(u1 / 64.0 - f, v0 / 32.0 + f2)
vertices[1] = vertices[1].remap(u0 / 64.0 + f, v0 / 32.0 + f2)
vertices[2] = vertices[2].remap(u0 / 64.0 + f, v1 / 32.0 - f2)
vertices[3] = vertices[3].remap(u1 / 64.0 - f, v1 / 32.0 - f2)
elif isinstance(u0, float):
vertices[0] = vertices[0].remap(u1, v0)
vertices[1] = vertices[1].remap(u0, v0)
vertices[2] = vertices[2].remap(u0, v1)
vertices[3] = vertices[3].remap(u1, v1)
| pythonengineer/minecraft-python | mc/net/minecraft/model/Polygon.py | Polygon.py | py | 770 | python | en | code | 2 | github-code | 36 |
7689661777 | def method1(arr, n, x):
first = -1
last = -1
for i in range(0, n):
if x != arr[i]:
continue
if first == -1:
first = i
last = i
if first != -1:
print("Last Occurrence = ", last)
if __name__ == "__main__":
"""
arr = [1, 2, 2, 2, 2, 3, 4, 7, 8, 8 ]
n = len(arr)
x = 8
from timeit import timeit
print(timeit(lambda: method1(arr, n, x), number=10000)) # 0.08866931500006103
"""
| thisisshub/DSA | E_searching/problems/B_index_of_last_occurence_in_sorted_array.py | B_index_of_last_occurence_in_sorted_array.py | py | 476 | python | en | code | 71 | github-code | 36 |
75104190825 | # Given an array of lowercase letters sorted in ascending order, find the
# smallest letter in the given array greater than a given ‘key’.
# Assume the given array is a circular list, which means that the last letter
# is assumed to be connected with the first letter. This also means that the
# smallest letter in the given array is greater than the last letter of the
# array and is also the first letter of the array.
# Write a function to return the next letter of the given ‘key’.
# Example:
# Input: ['a', 'c', 'f', 'h'], key = 'm'
# Output: 'a'
# Explanation: As the array is assumed to be circular, the smallest letter
# greater than 'm' is 'a'.
def search_next_letter(letters, key):
n = len(letters)
start, end = 0, n - 1
while start <= end:
mid = start + (end - start) // 2
if letters[mid] > key:
end = mid - 1
else:
start = mid + 1
return letters[start%n]
def main():
assert(search_next_letter(['a', 'c', 'f', 'h'], 'f')) == 'h'
assert(search_next_letter(['a', 'c', 'f', 'h'], 'b')) == 'c'
assert(search_next_letter(['a', 'c', 'f', 'h'], 'm')) == 'a'
assert(search_next_letter(['a', 'c', 'f', 'h'], 'h')) == 'a'
main() | itsmeichigo/Playgrounds | GrokkingTheCodingInterview/ModifiedBinarySearch/next-letter.py | next-letter.py | py | 1,228 | python | en | code | 0 | github-code | 36 |
22179863 | #!/usr/bin/python3
from time import sleep
import mysql.connector
import pprint
import threading
import tkinter as tk
import sys
class MainWindow:
def __init__(self, main) -> None:
self.main = main
self.main['bg'] = '#909090'
self.lightColor = '#909090'
self.threadRunning = True
self.pricePerSec = 0.5
self.seasonPricePerSec = 0.2
self.main.protocol("WM_DELETE_WINDOW", self.on_closing)
self.main.geometry("600x300")
self.userButton = tk.Button(self.main, text ="Ticket", command = self.user)
self.sUserButton = tk.Button(self.main, text ="Dauer Ticket", command = self.seasonUser)
self.removeButton = tk.Button(self.main, text ="Ende Ticket", command = self.removeUser)
self.plate = tk.Label (self.main, text='Nummernschild')
self.plateEntry = tk.Entry (self.main)
self.space = tk.Label (self.main, text='Parkplatznummer')
self.spaceEntry = tk.Entry (self.main)
self.freeSpaces = tk.Label (self.main, text='')
self.sFreeSpaces = tk.Label (self.main, text='')
self.price = tk.Label (self.main, text='')
self.sFreeSpaces.pack()
self.freeSpaces.pack()
self.space.place(x=10, y=10)
self.spaceEntry.place(x=8, y=40)
self.plate.place(x=10, y=70)
self.plateEntry.place(x=8, y=100)
self.userButton.place(x=8, y=130)
self.sUserButton.place(x=87, y=130)
self.removeButton.place(x=8, y=160)
self.price.pack()
self.sFreeSpaces['bg'] = self.lightColor
self.freeSpaces['bg'] = self.lightColor
self.space['bg'] = self.lightColor
self.spaceEntry['bg'] = self.lightColor
self.plate['bg'] = self.lightColor
self.plateEntry['bg'] = self.lightColor
self.userButton['bg'] = self.lightColor
self.sUserButton['bg'] = self.lightColor
self.removeButton['bg'] = self.lightColor
self.price['bg'] = self.lightColor
self.dataBase: mysql = mysql.connector.connect(
host = "localhost",
user = "debian-sys-maint",
password = "SUPtwQgI1bbIrSPv"
)
self.cursor = self.dataBase.cursor(dictionary=True)
self.cursor.execute("use carpark;")
self.threadCursor = self.dataBase.cursor(dictionary=True)
self.threadCursor.execute("use carpark;")
self.countUnocc = "select count(*) as unoccupied from space where occupied = 'n' and seasonticket = 'n';"
self.countSeasonUnocc = "select count(*) as unoccupied from space where occupied = 'n' and seasonticket = 'j';"
self.thread = threading.Thread(target=self.refresh)
self.thread.start()
def refresh(self):
while(self.threadRunning):
self.threadCursor.execute( self.countUnocc )
self.freeSpaces['text'] = "Verfügbare Parkplätze: " + str(self.threadCursor.fetchall()[0]['unoccupied'])
self.freeSpaces.pack()
self.threadCursor.execute( self.countSeasonUnocc )
self.sFreeSpaces['text'] = "Verfügbare Dauerparkplätze: " + str(self.threadCursor.fetchall()[0]['unoccupied'])
self.sFreeSpaces.pack()
sleep( 1 )
def user(self):
plate = self.plateEntry.get()
space_id: int = int(self.spaceEntry.get())
self.cursor.execute( self.countUnocc )
countUnocc: int = int(self.cursor.fetchall()[0]['unoccupied'])
self.cursor.execute(f"select user_id, seasonticket from user where plate = '{plate}'")
data = self.cursor.fetchall()
plateExist: int = len(data)
seasonticket: str
if plateExist == 1:
seasonticket = data[0]['seasonticket']
self.cursor.execute(f"select occupied from space where space_id = '{space_id}'")
occupied: str = self.cursor.fetchall()[0]['occupied']
if occupied == 'n' and len(plate) > 0 and countUnocc > 4 and space_id > 40 and space_id <= 180:
if plateExist == 0:
self.cursor.execute(f"insert into user values (null, '{plate}', 'n', now(), null, null);")
elif plateExist == 1 and seasonticket == 'j':
return
elif plateExist == 1 and seasonticket == 'n':
self.cursor.execute(f"update user set entrydate = now() where plate = '{plate}';")
self.dataBase.commit()
self.cursor.execute(f"select user_id from user where plate = '{plate}'")
user_id: int = self.cursor.fetchall()[0]['user_id']
self.cursor.execute(f"update space set occupied = 'j', user_id = {user_id} where space_id = {space_id};")
self.dataBase.commit()
def seasonUser(self):
plate = self.plateEntry.get()
space_id: int = int(self.spaceEntry.get())
self.cursor.execute( self.countUnocc )
countUnocc: int = int(self.cursor.fetchall()[0]['unoccupied'])
self.cursor.execute( self.countSeasonUnocc )
countSeasonUnocc: int = int(self.cursor.fetchall()[0]['unoccupied'])
self.cursor.execute(f"select user_id, seasonticket from user where plate = '{plate}'")
data = self.cursor.fetchall()
plateExist: int = len(data)
seasonticket: str
if plateExist == 1:
seasonticket = data[0]['seasonticket']
self.cursor.execute(f"select occupied from space where space_id = '{space_id}'")
occupied: str = self.cursor.fetchall()[0]['occupied']
if occupied == 'n' and len(plate) > 0 and space_id > 0 and space_id <= 180 \
and ((countUnocc <= 4
and countSeasonUnocc > 0
and space_id > 0
and space_id <= 40)
or (countUnocc > 4)):
if plateExist == 0:
self.cursor.execute(f"insert into user values (null, '{plate}', 'j', now(), null, 0);")
elif plateExist == 1 and seasonticket == 'n':
return
elif plateExist == 1 and seasonticket == 'j':
self.cursor.execute(f"update user set entrydate = now() where plate = '{plate}';")
self.dataBase.commit()
self.cursor.execute(f"select user_id from user where plate = '{plate}'")
user_id: int = self.cursor.fetchall()[0]['user_id']
self.cursor.execute(f"update space set occupied = 'j', user_id = {user_id} where space_id = {space_id};")
self.dataBase.commit()
def removeUser(self):
plate = self.plateEntry.get()
self.cursor.execute(f"select user_id from user where plate = '{plate}'")
plateExist: int = len(self.cursor.fetchall())
if len(plate) > 0 and plateExist == 1:
self.cursor.execute(f"update user set leavedate = now() where plate = '{plate}'")
self.dataBase.commit()
self.cursor.execute(f"select seasonticket from user where plate = '{plate}'")
seasonticket = self.cursor.fetchall()[0]['seasonticket']
sleep(0.5)
self.cursor.execute(f"select (leavedate - entrydate) as time from user where plate = '{plate}';")
seconds: int = int(self.cursor.fetchall()[0]['time'])
print(seconds)
sys.stdout.flush()
if seasonticket == 'n':
pricePerSec = seconds * self.pricePerSec
self.price['text'] = "Ticketpreis: " + str(round(pricePerSec, 2)) + "€"
elif seasonticket == 'j':
self.cursor.execute(f"update user set totaltime = totaltime + {seconds} where plate = '{plate}';")
self.dataBase.commit()
pricePerSec = seconds * self.seasonPricePerSec
self.price['text'] = "Dauerticketpreis: " + str(round(pricePerSec, 2)) + "€"
self.cursor.execute(f"update space set occupied = 'n', user_id = null where user_id in ( select user_id from user where plate = '{plate}');")
self.dataBase.commit()
#self.cursor.execute(f"delete from user where plate = '{plate}' and seasonticket = 'n';")
#self.dataBase.commit()
def on_closing(self):
self.threadRunning = False
self.dataBase.close()
self.main.destroy()
if __name__ == '__main__':
main = tk.Tk()
mainWindow = MainWindow( main )
main.mainloop()
| DrOeter/parkhaus | main.py | main.py | py | 8,496 | python | en | code | 0 | github-code | 36 |
3829718910 | from __future__ import print_function
import io
import logging
import logging.handlers
import sys
import threading
import time
try:
import argparse
except ImportError:
sys.stderr.write("""
ntploggps: can't find the Python argparse module
If your Python version is < 2.7, then manual installation is needed:
# pip install argparse
""")
sys.exit(1)
try:
import gps
except ImportError as e:
sys.stderr.write("ntploggps: can't find Python GPSD library.\n")
sys.stderr.write("%s\n" % e)
sys.exit(1)
class logfile_header_class(logging.handlers.TimedRotatingFileHandler):
'A class to modify the file logging handler.'
def doRollover(self):
'function to add header to new file on rotation.'
if str is bytes:
super(logfile_header_class, self).doRollover()
else:
super().doRollover()
self.stream.write('# Time Device TDOP nSat\n')
def logging_setup():
"Create logging object"
logFormat = logging.Formatter('%(message)s')
# Create logger for gpsd
Logger = logging.getLogger()
Logger.setLevel(logging.INFO)
# Create file handler
if args.logfile:
# log to logfile
file = logfile_header_class(
args.logfile[0],
utc=True,
when='midnight',
interval=1)
else:
# log to stdout
file = logging.StreamHandler(sys.stdout)
file.setLevel(logging.INFO)
# Create the formatter and add it to the handler
file.setFormatter(logFormat)
# Add the handler to the logger
Logger.addHandler(file)
return Logger
parser = argparse.ArgumentParser(description="gpsd log file generator",
epilog="""
See the manual page for details.
""")
parser.add_argument('-l', '--logfile',
dest='logfile',
help="append log data to LOGFILE instead of stdout",
nargs=1)
parser.add_argument('-o', '--once',
action="store_true",
dest='once',
help="log one line, then exit")
parser.add_argument('-w', '--wait',
default=[5],
dest='wait',
help="wait WAIT seconds after each log line, default 5",
nargs=1,
type=int)
parser.add_argument('-v', '--verbose',
action="store_true",
dest='verbose',
help="be verbose")
parser.add_argument('-V', '--version',
action="version",
version="ntploggps ntpsec-@NTPSEC_VERSION_EXTENDED@")
args = parser.parse_args()
if args.verbose:
print("ntploggps: arguments:")
print(args)
if args.logfile:
# log to logfile
try:
out = open(args.logfile[0], mode='a')
except io.UnsupportedOperation as e:
sys.stderr.write("ntploggps: can't open logfile %s\n" % args.logfile)
sys.stderr.write("%s\n" % e)
sys.exit(1)
if args.verbose:
print("ntploggps: opened log file %s" % args.logfile[0])
else:
# log to stdout
out = sys.stdout
class GpsPoller(threading.Thread):
running = False # True when thread is running. Quit when set False
def __init__(self):
threading.Thread.__init__(self)
self.device = None
self.satellites_used = None
self.tdop = None
# start the streaming of gps data
try:
self.gpsd = gps.gps(mode=gps.WATCH_ENABLE)
except BaseException as e:
sys.stderr.write("ntploggps: Can't connect to gpsd, %s\n"
" Is gpsd running?\n" % e)
sys.exit(1)
self.running = True
def run(self):
while gpsp.running:
if self.gpsd.read() == -1:
self.running = False
break
if not hasattr(self.gpsd, "data"):
continue
if self.gpsd.data.get("class", None) != "SKY":
continue
satellite_list = self.gpsd.data.get(
"satellites", None
)
count_used_satellites = None
if satellite_list is not None:
count_used_satellites = sum(
map(lambda x: x.used, satellite_list)
)
time_dilution = self.gpsd.data.get("tdop", None)
device_path = self.gpsd.data.get("device", None)
if count_used_satellites is None:
count_used_satellites = self.gpsd.data.get(
"uSat", None
)
if None not in [
count_used_satellites,
time_dilution,
device_path,
]:
self.satellites_used = count_used_satellites
self.tdop = time_dilution
self.device = device_path
@property
def time(self):
"Return the gpsd time fix"
t = self.gpsd.fix.time
if isinstance(t, int):
return t
if isinstance(t, float):
if not gps.isfinite(t):
return None
return t
return gps.isotime(t)
if __name__ == '__main__':
# this is the main thread
if args.verbose:
print("ntploggps: creating poll thread")
gpsp = GpsPoller() # create the thread
try:
# Create the logger instance
Logger = logging_setup()
# Create data layout
Logger.info("# Time Device TDOP nSat")
gpsp.start() # start it up
last_time = 0
while gpsp.running:
# It may take a second or two to get good data
try:
current_time = gpsp.time
device = gpsp.device
tdop = gpsp.tdop
satellites_used = gpsp.satellites_used
if current_time is not None and \
device is not None and \
satellites_used is not None and \
tdop is not None:
if last_time != current_time:
s = '%i %s %f %d' % (current_time, device, tdop,
satellites_used)
Logger.info(s)
last_time = current_time
if args.once:
# just once
break
except AttributeError as e:
print('parse error\n')
# wait a bit before next log
time.sleep(args.wait[0])
except (KeyboardInterrupt, SystemExit): # when you press ctrl+c
args.once = True # stop the retry loop
if args.verbose:
print("\nKilling Thread...")
else:
# print a blank line to make bash happy
print("")
except Exception as e: # any error, signal
print(e)
# tell the thread to die
gpsp.running = False
# wait for the thread to finish what it's doing
gpsp.join()
if args.verbose:
print("ntploggps: Done -- Exiting.")
| ntpsec/ntpsec | ntpclients/ntploggps.py | ntploggps.py | py | 7,198 | python | en | code | 225 | github-code | 36 |
7055499592 | """
面向对象的思考步骤:
现实事物 -抽象化-> 类 -具体化-> 对象
# int 类的对象
a = 10
# str 类的对象
b = "悟空"
# list 类的对象
c = [1,2,3]
语法:
class 类名:
def __init__(self, 参数):
self.数据 = 参数
"""
class Wife:
# 数据:名词性的描述
def __init__(self, name, face_score, money=0.0):
self.name = name
self.money = money
self.face_score = face_score
# 行为:动词性的功能
def work(self):
print(self.name, "工作")
# 创建对象(自动调用__init__函数)
w01 = Wife("双儿",97,2000)
# 自动传递对象 work(w01)
w01.work()
class Phone:
def __init__(self,brand,price,color):
self.brand = brand
self.price = price
self.clolor = color
def telephone(self):
print("通话",[10])
phone01 = Phone("苹果",5000,"深空灰")
phone02 = Phone("华为",8000,"深空灰")
phone01.telephone()
phone02.telephone()
| haiou90/aid_python_core | day09/exercise_personal/05_exercise.py | 05_exercise.py | py | 1,045 | python | en | code | 0 | github-code | 36 |
25450887207 | from django.urls import path, include
from . import views
app_name = "accounts"
urlpatterns = [
# login
path("login/", views.LoginView.as_view(), name="login"),
# logout
path("logout/", views.LogoutView.as_view(), name="logout"),
# signup
path("signup/", views.SignupView.as_view(), name="signup"),
# api
path("api/v1/", include("accounts.api.v1.urls")),
]
| AmirhosseinRafiee/Blog | mysite/accounts/urls.py | urls.py | py | 391 | python | en | code | 0 | github-code | 36 |
42911658215 | # Author: Nic Wolfe <nic@wolfeden.ca>
# URL: http://code.google.com/p/sickbeard/
#
# This file is part of Sick Beard.
#
# Sick Beard is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Sick Beard is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Sick Beard. If not, see <http://www.gnu.org/licenses/>.
import sickbeard
import os.path
import operator, platform
import re
USER_AGENT = 'Sick Beard/alpha2 ('+platform.system()+' '+platform.release()+')'
mediaExtensions = ['avi', 'mkv', 'mpg', 'mpeg', 'wmv',
'ogm', 'mp4', 'iso', 'img', 'divx',
'm2ts', 'm4v', 'ts', 'flv', 'f4v',
'mov', 'rmvb']
### Other constants
MULTI_EP_RESULT = -1
SEASON_RESULT = -2
### Notification Types
NOTIFY_SNATCH = 1
NOTIFY_DOWNLOAD = 2
notifyStrings = {}
notifyStrings[NOTIFY_SNATCH] = "Started Download"
notifyStrings[NOTIFY_DOWNLOAD] = "Download Finished"
### Episode statuses
UNKNOWN = -1 # should never happen
UNAIRED = 1 # episodes that haven't aired yet
SNATCHED = 2 # qualified with quality
WANTED = 3 # episodes we don't have but want to get
DOWNLOADED = 4 # qualified with quality
SKIPPED = 5 # episodes we don't want
ARCHIVED = 6 # episodes that you don't have locally (counts toward download completion stats)
IGNORED = 7 # episodes that you don't want included in your download stats
SNATCHED_PROPER = 9 # qualified with quality
class Quality:
NONE = 0
SDTV = 1
SDDVD = 1<<1 # 2
HDTV = 1<<2 # 4
HDWEBDL = 1<<3 # 8
HDBLURAY = 1<<4 # 16
FULLHDBLURAY = 1<<5 # 32
# put these bits at the other end of the spectrum, far enough out that they shouldn't interfere
UNKNOWN = 1<<15
qualityStrings = {NONE: "N/A",
UNKNOWN: "Unknown",
SDTV: "SD TV",
SDDVD: "SD DVD",
HDTV: "HD TV",
HDWEBDL: "720p WEB-DL",
HDBLURAY: "720p BluRay",
FULLHDBLURAY: "1080p BluRay"}
statusPrefixes = {DOWNLOADED: "Downloaded",
SNATCHED: "Snatched"}
@staticmethod
def _getStatusStrings(status):
toReturn = {}
for x in Quality.qualityStrings.keys():
toReturn[Quality.compositeStatus(status, x)] = Quality.statusPrefixes[status]+" ("+Quality.qualityStrings[x]+")"
return toReturn
@staticmethod
def combineQualities(anyQualities, bestQualities):
anyQuality = 0
bestQuality = 0
if anyQualities:
anyQuality = reduce(operator.or_, anyQualities)
if bestQualities:
bestQuality = reduce(operator.or_, bestQualities)
return anyQuality | (bestQuality<<16)
@staticmethod
def splitQuality(quality):
anyQualities = []
bestQualities = []
for curQual in Quality.qualityStrings.keys():
if curQual & quality:
anyQualities.append(curQual)
if curQual<<16 & quality:
bestQualities.append(curQual)
return (anyQualities, bestQualities)
@staticmethod
def nameQuality(name):
name = os.path.basename(name)
# if we have our exact text then assume we put it there
for x in Quality.qualityStrings:
if x == Quality.UNKNOWN:
continue
regex = '\W'+Quality.qualityStrings[x].replace(' ','\W')+'\W'
regex_match = re.search(regex, name, re.I)
if regex_match:
return x
checkName = lambda list, func: func([re.search(x, name, re.I) for x in list])
if checkName(["pdtv.xvid", "hdtv.xvid", "dsr.xvid"], any):
return Quality.SDTV
elif checkName(["dvdrip.xvid", "bdrip.xvid", "dvdrip.divx"], any):
return Quality.SDDVD
elif checkName(["720p", "hdtv", "x264"], all) or checkName(["hr.ws.pdtv.x264"], any):
return Quality.HDTV
elif checkName(["720p", "web.dl"], all) or checkName(["720p", "itunes", "h.?264"], all):
return Quality.HDWEBDL
elif checkName(["720p", "bluray", "x264"], all):
return Quality.HDBLURAY
elif checkName(["1080p", "bluray", "x264"], all):
return Quality.FULLHDBLURAY
else:
return Quality.UNKNOWN
@staticmethod
def assumeQuality(name):
if name.endswith(".avi"):
return Quality.SDTV
elif name.endswith(".mkv"):
return Quality.HDTV
else:
return Quality.UNKNOWN
@staticmethod
def compositeStatus(status, quality):
return status + 100 * quality
@staticmethod
def qualityDownloaded(status):
return (status - DOWNLOADED) / 100
@staticmethod
def splitCompositeStatus(status):
"""Returns a tuple containing (status, quality)"""
for x in sorted(Quality.qualityStrings.keys(), reverse=True):
if status > x*100:
return (status-x*100, x)
return (Quality.NONE, status)
@staticmethod
def statusFromName(name, assume=True):
quality = Quality.nameQuality(name)
if assume and quality == Quality.UNKNOWN:
quality = Quality.assumeQuality(name)
return Quality.compositeStatus(DOWNLOADED, quality)
Quality.DOWNLOADED = [Quality.compositeStatus(DOWNLOADED, x) for x in Quality.qualityStrings.keys()]
Quality.SNATCHED = [Quality.compositeStatus(SNATCHED, x) for x in Quality.qualityStrings.keys()]
Quality.SNATCHED_PROPER = [Quality.compositeStatus(SNATCHED_PROPER, x) for x in Quality.qualityStrings.keys()]
HD = Quality.combineQualities([Quality.HDTV, Quality.HDWEBDL, Quality.HDBLURAY], [])
SD = Quality.combineQualities([Quality.SDTV, Quality.SDDVD], [])
ANY = Quality.combineQualities([Quality.SDTV, Quality.SDDVD, Quality.HDTV, Quality.HDWEBDL, Quality.HDBLURAY], [])
BEST = Quality.combineQualities([Quality.SDTV, Quality.HDTV], [Quality.SDTV, Quality.HDTV])
qualityPresets = (SD, HD, ANY, BEST)
qualityPresetStrings = {SD: "SD",
HD: "HD",
ANY: "Any",
BEST: "Best"}
class StatusStrings:
def __init__(self):
self.statusStrings = {UNKNOWN: "Unknown",
UNAIRED: "Unaired",
SNATCHED: "Snatched",
DOWNLOADED: "Downloaded",
SKIPPED: "Skipped",
SNATCHED_PROPER: "Snatched (Proper)",
WANTED: "Wanted",
ARCHIVED: "Archived",
IGNORED: "Ignored"}
def __getitem__(self, name):
if name in Quality.DOWNLOADED + Quality.SNATCHED + Quality.SNATCHED_PROPER:
status, quality = Quality.splitCompositeStatus(name)
if quality == Quality.NONE:
return self.statusStrings[status]
else:
return self.statusStrings[status]+" ("+Quality.qualityStrings[quality]+")"
else:
return self.statusStrings[name]
def has_key(self, name):
return name in self.statusStrings or name in Quality.DOWNLOADED or name in Quality.SNATCHED or name in Quality.SNATCHED_PROPER
statusStrings = StatusStrings()
class Overview:
SKIPPED = 1
WANTED = 2
QUAL = 3
GOOD = 4
UNAIRED = 5
overviewStrings = {SKIPPED: "skipped",
WANTED: "wanted",
QUAL: "qual",
GOOD: "good",
UNAIRED: "unaired"}
# Get our xml namespaces correct for lxml
XML_NSMAP = {'xsi': 'http://www.w3.org/2001/XMLSchema-instance',
'xsd': 'http://www.w3.org/2001/XMLSchema'}
#####################################################################
###
### DO NOT EDIT THIS MANUALLY! If you find a show that isn't
### being found please submit a ticket on google code so that
### I can fix the problem for everybody:
### http://code.google.com/p/sickbeard/issues/entry
###
#####################################################################
sceneExceptions = {72546: ['CSI'],
73696: ['CSI: New York'],
110381: ['Archer'],
83897: ['Life After People: The Series'],
80552: ['Kitchen Nightmares (US)'],
71256: ['The Daily Show'],
75692: ['Law & Order: SVU'],
71489: ['Law & Order: Criminal Intent', 'Law & Order: CI'],
79590: ['Dancing With The Stars (US)'],
71256: ['The Daily Show'],
73387: ['Craig Ferguson'],
85355: ['Jimmy Fallon'],
75088: ['David Letterman'],
76706: ['Big Brother (US)'],
105521: ['The Colony', 'The Colony (US)'],
76235: ['America\'s Funniest Home Videos', 'AFHV'],
139941: ['Childrens Hospital (US)', 'Childrens Hospital'],
83123: ['Merlin', 'Merlin (2008)'],
76779: ['WWE Monday Night RAW'],
164951: ['Shit My Dad Says'],
83714: ['Genius with Dave Gorman'],
168161: ['Law & Order: Los Angeles', 'Law & Order: LA'],
77526: ['Star Trek: TOS'],
72194: ['The Ellen Degeneres Show', 'Ellen Degeneres'],
72073: ['Star Trek DS9'],
195831: ['Zane Lamprey\'s Drinking Made Easy'],
76133: ['Poirot', 'Agatha Christie\'s Poirot'],
70870: ['The Real World Road Rules Challenge', 'The Challenge Cutthroat'],
77444: ['This Old House Program'],
73290: ['60 Minutes (US)'],
194751: ['Conan', 'Conan (2010)'],
164451: ['Carlos (2010)'],
70726: ['Babylon 5', 'Babylon5'],
}
countryList = {'Australia': 'AU',
'Canada': 'CA',
'USA': 'US'
}
| zeon/qpkg-sickbeard | src-shared/sickbeard/common.py | common.py | py | 10,833 | python | en | code | 4 | github-code | 36 |
8660980424 | import numpy as np
from ctypes import * # c 类型库
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from astropy.io import ascii
from astropy.table import Table, vstack
import os
from scipy.stats import *
import time
z4figpre = '../z4/figs/'
z4datapre = '../z4/data/'
z5figpre = '../z5/figs/'
z5datapre = '../z5/data/'
z6figpre = '../z6/figs/'
z6datapre = '../z6/data/'
datapre = '../data/'
figpre = '../figs/'
#os.system("g++ evol.cpp -L/usr/local/lib class_gas.o LE_iso.o read_aTree.o class_halo.o dyn.o thermo.o reaction.o Newton5.o my_linalg.o gsl_inverse.o RK4.o -lgsl -lgslcblas -lm -o cc.so -shared -fPIC")
#libc = CDLL('cc.so') # 装入动态链接库 ## 居然必须放在这里
global G, h0, H0, Omega_m0, Omega_L0, m_H, mu, Ms, pi, km, pc, Myr, alpha_T
G, c, k_B, m_H = 6.67408e-8, 2.9979245e10, 1.38064852e-16, 1.66053904e-24
pi = 3.141593
mu = 1.2
Ms = 2.e33
Lsun = 3.828e33
pc = 3.e18
Mpc = 1.e6*pc
km = 1.e5
yr = 365*24*3600
Myr = 1.e6*(365*24*3600)
Omega_m0 = 0.307
Omega_L0 = 1 - Omega_m0
h0 = .677
H0 = h0*100*km/Mpc
t_Edd = 1./(4*pi*G/.4/(0.1*c))
fbol_1450 = 4.4
n_base = [1.63,1.09e-01,4.02e-03,3.87e-05,1.07e-08]
# n_base = [4.41e-01, 2.33e-02, 5.05e-04, 1.29e-06]
# n_base = [4.02e-03,3.87e-05,1.07e-08]
f_bsm = [.6,.4]
f_seed = 1.
W37 = 1e44
alpha_T = 2.324e4
Nbsm = 4
fstick = 20
fstxt = 20
fslabel = 23
fstitle = 20
fslegend = 20
my_cmap = plt.get_cmap("viridis")
rescale = lambda y: (y - np.min(y)) / (np.max(y) - np.min(y))
def MF(M,z=6):
alpha = -1.03
Phi_star = 1.23e-8
M_star = 2.24e9
if z==6:
return Phi_star*pow(M/M_star,alpha)*np.exp(-M/M_star)
if z==4:
M_star *= 10
return Phi_star*pow(M/M_star,alpha)*np.exp(-M/M_star)
def L_M(M,Edd_ratio):
return 1.25e38*Edd_ratio*M
def Mdot2M(Mdot):
eta = 1
beta = 2.775e-6*(1.5)**.5
Mdot1 = 0.04
Mdot2 = 0.1
if Mdot<Mdot1:
M = eta*Mdot/beta
elif Mdot>Mdot2:
M = (0.83*np.log10(Mdot)+2.48)*1.e5
else:
M1 = eta*Mdot1/beta
M2 = (0.83*np.log10(Mdot2)+2.48)*1.e5
t = (np.log(Mdot)-np.log(Mdot1))/(np.log(Mdot2)-np.log(Mdot1))
M = np.exp( t*np.log(M2) + (1-t)*np.log(M1) )
return M
def LF(l): # dn/dlogL in Mpc^-3 dex^-1
Phi_M_star = 1.14e-8
M_star = -25.13
alpha = -1.5; beta = -2.81
Phi_L_star = Phi_M_star * 2.5
L_star = pow(10,-.4*(M_star-34.1)) * 3e18/1450 *1e7 * fbol_1450
L_1 = pow(10,-.4*(-27.2-34.1)) * 3e18/1450 *1e7 * fbol_1450
L_2 = pow(10,-.4*(-20.7-34.1)) * 3e18/1450 *1e7 * fbol_1450
# print('break L',L_star/W37, 'Phi_L_star', Phi_L_star)
t = (np.log10(l) - np.log10(L_1)) / (np.log10(L_2) - np.log10(L_1))
return Phi_L_star/( pow(l/L_star,-(alpha+1)) + pow(l/L_star,-(beta+1)) ) * (2*(1-t)+3*t)
def LF_M1450(M,z=6): # dn/dmag in Mpc^-3 mag^-1
if z==6:
# Willot 2010 CFHQS + SDSS
Phi_M_star = 1.14e-8
M_star = -25.13
alpha = -1.5; beta = -2.81
# Matsuoka 2018
Phi_M_star = 1.09e-8
M_star = -24.9
alpha = -1.23; beta = -2.73
elif z==5:
# McGreer 2018 data;
Phi_M_star = pow(10., -8.97+0.47)
M_star = -27.47
alpha = -1.97; beta = -4.
# refit by Matsuoka 2018 (beta & M_star); me: (alpha & Phi_M_star)
Phi_M_star = 3.8e-8
M_star = -25.6
alpha = -1.23; beta = -3.
elif z==4: # Akiyama 2018
Phi_M_star = 2.66e-7
M_star = -25.36
alpha = -1.3; beta = -3.11
else:
print("wrong redshift")
return Phi_M_star/( pow(10., 0.4*(alpha+1)*(M-M_star)) + pow(10., 0.4*(beta+1)*(M-M_star)) ) #* (2*(1-t)+3*t)
def M1450_Lbol(L):
return 34.1-2.5*np.log10(L/(fbol_1450*3e18/1450*1e7))
def Lbol_M1450(M):
return pow(10., -0.4*(M-34.1)) * (fbol_1450*3e18/1450*1e7)
# X-ray bolometric correction; Hopkins+07 & Duras+20
def K_AVE07(Lbol):
return 10.83*pow(Lbol/(1e10*Lsun),0.28)+6.08*pow(Lbol/(1e10*Lsun),-0.02)
def K_AVE20(Lbol):
a = 10.96
b = 11.93
c = 17.79
return a*( 1 + pow(np.log10(Lbol/Lsun)/b,c) )
# obscured fraction = Type II AGN fraction
def f_obsc_U14(logLx,z): # Ueda 14; 22< log NH < 24 fraction; as a func of Lx
eta = 1.7
a1 = .48
phi4375_0 = .43
phi4375_z = phi4375_0*(1+z)**a1
phimax = (1+eta)/(3+eta)
phimin = .2
beta = .24
phi = min( phimax, max(phi4375_z - beta*(logLx-43.75), phimin))
f_obsc_sum = phi # sum over 22< log NH < 24 range
return f_obsc_sum
# constant obscured fraction; motivated by Vito+ 2018
f_obsc_const = .8
# correction factor including Compton thick AGNs; different fbol_Xray
def corr_U14H07(M1450): # Ueda+14 & Shankar+09
L_bol = Lbol_M1450(M1450)
f_bol = K_AVE07(L_bol)
Lx = L_bol/f_bol
eta = 1.7
a1 = .48
phi4375_0 = .43
phi4375_z = phi4375_0*(1+2.)**a1
phimax = (1+eta)/(3+eta)
phimin = .2
beta = .24
phi = min( phimax, max(phi4375_z - beta*(np.log10(Lx)-43.75), phimin))
f_obsc_sum = phi # sum over 22< log NH < 24 range
f_CTK = phi
return (1+f_CTK)/(1-f_obsc_sum)
def corr_U14D20(M1450): # Ueda 14
L_bol = Lbol_M1450(M1450)
f_bol = K_AVE20(L_bol)
Lx = L_bol/f_bol
eta = 1.7
a1 = .48
phi4375_0 = .43
phi4375_z = phi4375_0*(1+2.)**a1
phimax = (1+eta)/(3+eta)
phimin = .2
beta = .24
if isinstance(M1450,float):
phi = min( phimax, max(phi4375_z - beta*(np.log10(Lx)-43.75), phimin))
else:
phi = np.zeros(len(M1450))
for i in range(len(M1450)):
phi[i] = min( phimax, max(phi4375_z - beta*(np.log10(Lx[i])-43.75), phimin))
f_obsc_sum = phi # sum over 22< log NH < 24 range
f_CTK = phi
return (1+f_CTK)/(1-f_obsc_sum)
def LF_M1450_CO(M,z): # dn/dmag in Mpc^-3 mag^-1
# Matsuoka 2018
return LF_M1450(M,z)/(1-f_obsc_const)
def LF_M1450_DO(M,z): # dn/dmag in Mpc^-3 mag^-1
# Matsuoka 2018
return LF_M1450(M,z)*corr_U14D20(M)
def t_freefall(nH):
C = np.sqrt( 32*G*(mu*m_H)/ (3*pi) )
return 1./C/np.sqrt(nH)
def t_from_z(z): # age of universe at redshift z: tH = 2/(3Hz)
return 2./(3*H0*np.sqrt(Omega_m0)) * pow(1+z, -1.5)
def Tv(Mh,z):
return alpha_T * (Mh/1.e8)**(2./3.) * (1+z)/11.
def Mh_Tv(Tv,z):
return 1.e8*(Tv/alpha_T/(1+z)*11.)**1.5
def Omega_mz(z):
return Omega_m0*(1+z)**3 /(Omega_m0*(1+z)**3 + Omega_L0)
def Hz(z):
return H0*np.sqrt( Omega_m0*(1+z)**3 + Omega_L0 )
def RHO_crit(z):
return 3*pow(H0,2)/(8*pi*G)*(1+z)**3*Omega_m0/Omega_mz(z)
class HALO:
def __init__(self,M,z0):
self.Mh = M
self.z = z0
self.c = 18*pow(self.Mh/(1.e11*Ms), -0.13)/(1+self.z) #concentration parameter c from Dekel & Birnboim 2006 Eq(22)
c, z = self.c, self.z
self.d = Omega_mz(z) - 1
d = self.d
self.Delta_crit = 18.0*pi*pi + 82*d - 39*d*d # Delta_crit ~ 200, overdensity
Delta_crit = self.Delta_crit
self.delta0 = self.Delta_crit/3.*pow(c,3)/(-c/(1+c) + np.log(1+c)) # characteristic overdensity parameter
delta0 = self.delta0
self.rho_crit = RHO_crit(z) # mean density of DM at z
self.rho_c = self.rho_crit * delta0
self.Rvir = pow( self.Mh/(4./3*pi*Delta_crit*self.rho_crit),1./3. )
self.Rs = self.Rvir/self.c
self.Vc = np.sqrt(G*self.Mh/self.Rvir)
self.t_dyn = self.Rvir/self.Vc
self.Tvir = G*self.Mh*(mu*m_H)/(2.*k_B*self.Rvir)
self.gc = 2*c/(np.log(1+c) - c/(1+c))
self.alpha = self.Tvir/self.Mh**(2./3)
def Rho_r(self, r):
rho_crit, delta0, Rvir = self.rho_crit, self.delta0, self.Rvir
c, x = self.c, r/Rvir
return rho_crit*delta0/( c*x * (1+c*x)**2 )
# x = r/Rvir c = Rvir/Rs
def F_NFW(self,x):
c = self.c
return -c*x/(1+c*x) + np.log(1+c*x)
def M_enc(self,r):
rho_crit, delta0, Rs, Rvir = self.rho_crit, self.delta0, self.Rs, self.Rvir
M_r = 4*pi*rho_crit*delta0*pow(Rs,3)*self.F_NFW(r/Rvir)
return M_r
def Phi(self, r):
# lim r -> 0
#return -4*pi*G*rho_crit*delta0*Rs*Rs
rho_crit, delta0, Rs = self.rho_crit, self.delta0, self.Rs
return -4*pi*G*rho_crit*delta0*(Rs**3)/r*np.log(1+r/Rs)
| lovetomatoes/BHMF | PYmodule/__init__.py | __init__.py | py | 8,267 | python | en | code | 0 | github-code | 36 |
33064377909 | # Better implementation
# -> separate class for printing
from abc import ABC
class Expression(ABC):
pass
class DoubleExpression(Expression):
def __init__(self, value):
self.value = value
class AdditionExpression(Expression):
def __init__(self, left, right):
self.right = right
self.left = left
# separate class provides better implementation
# GOOD -> separation of concerns -> SOC
# BAD -> If new expression added (subtraction) then print
# needs to be explicitly updated with elif to make e.print
# work properly [code (print) will still run without elif ]
class ExpressionPrinter(Expression):
@staticmethod
def print(e, buffer):
# e can be AdditionExpression or DoubleExpression
# checking -> reflection printer
if isinstance(e, DoubleExpression):
buffer.append(str(e.value))
elif isinstance(e, AdditionExpression):
buffer.append('(')
ExpressionPrinter.print(e.left, buffer)
buffer.append('+')
ExpressionPrinter.print(e.right, buffer)
buffer.append(')')
# giving functionality for call -> e.print(buffer)
Expression.print = lambda self, b:\
ExpressionPrinter.print(self, b)
if __name__ == "__main__":
# expression - 1 + (2 + 3)
e = AdditionExpression(
DoubleExpression(1),
AdditionExpression(
DoubleExpression(2),
DoubleExpression(3)
)
)
buffer = []
e.print(buffer)
# ExpressionPrinter.print(e, buffer)
print(''.join(buffer)) # (1+(2+3))
# adding another operation == evaluate an expression
# print("ans = ", e.eval()) #ans = 6
| PratikRamdasi/Design-Patterns-in-Python | Behavioral-Patterns/Visitor/reflective_visitor.py | reflective_visitor.py | py | 1,704 | python | en | code | 0 | github-code | 36 |
43110037810 | i,j = input().split("-")
if i == "joker JOKER" or j =="joker JOKER":
print("joker JOKER")
else:
a = i.split(" ")
b = j.split(" ")
x = ['3','4','5','6','7','8','9','10','J','Q','K','A','2','joker','JOKER']
if len(a) == len(b):
c=0
d=0
for m in a:
c += x.index(m)
for m in b:
d += x.index(m)
if c > d:
print(i)
elif c < d:
print(j)
else:
print("ERROR")
elif len(a) == 4:
print(i)
elif len(b) ==4:
print(j)
else:
print("ERROR") | bbandft/Operating-Examination-of-Huawei- | 2016校招笔试-扑克牌大小.py | 2016校招笔试-扑克牌大小.py | py | 596 | python | en | code | 0 | github-code | 36 |
2628647448 | import os
import cv2
import glob
import numpy as np
import matplotlib.pyplot as plt
from tqdm import tqdm
import random
from ipdb import set_trace as bp
size_h, size_w = 600, 600
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=False, dtype='uint8')
obj_xml = '''
<object>
<name>{}</name>
<bndbox>
<xmin>{}</xmin>
<ymin>{}</ymin>
<xmax>{}</xmax>
<ymax>{}</ymax>
</bndbox>
</object>\
'''
ann_xml = '''\
<annotation>
<filename>{}</filename>
<size>
<width>{}</width>
<height>{}</height>
<depth>3</depth>
</size>{}
</annotation>\
'''
def writedata(idx, image, label, group):
imgdir = 'data/' + group + '/images/'
lbldir = 'data/' + group + '/labels/'
if not os.path.exists(imgdir):
os.makedirs(imgdir)
if not os.path.exists(lbldir):
os.makedirs(lbldir)
imgname = '{:05d}.png'.format(idx)
xmlname = '{:05d}.xml'.format(idx)
cv2.imwrite(imgdir + imgname, image)
xml = ''
for i in [0, 1]:
true_label = label[i + 2]
xmin, ymin = label[i][0]
xmax, ymax = label[i][1]
xml += obj_xml.format(
true_label, xmin, ymin, xmax, ymax)
xml = ann_xml.format(imgname, xml)
with open(lbldir + xmlname, 'x') as file:
file.write(xml)
def preprocess(data, targets):
images = []
labels = []
for i in range(data.shape[0]):
idx1 = i
idx2 = np.random.choice(data.shape[0])
img1 = pickimg(data, idx1)
img2 = pickimg(data, idx2)
img, coords = combine(img1, img2)
images.append(img)
coords.extend([targets[idx1], targets[idx2]])
labels.append(coords)
return images, labels
def plotbbox(img, xmin, ymin, xmax, ymax):
img = np.copy(img)
cv2.rectangle(img, (xmin, ymin), (xmax, ymax), (255, 0, 255), 2)
cv2.namedWindow('image', cv2.WINDOW_NORMAL)
cv2.imshow('image', img)
cv2.waitKey(0)
cv2.destroyAllWindows()
# plt.imshow(img)
# plt.show()
def combine(img1, img2, w=40):
'''Overlay 2 images
Returns:
New image and bounding box locations
'''
img1 = croptobbox(img1)
img2 = croptobbox(img2)
img3 = np.zeros([w, w], 'uint8')
i1, i2 = np.random.randint(15, 25, size=2)
j1, j2 = np.random.randint(12, 16, size=2)
mask = img2 > 0
h1, w1 = img1.shape
h2, w2 = img2.shape
i1 = i1 - h1 // 2
i2 = i2 - h2 // 2
j1 = j1 - w1 // 2
j2 = j2 - w2 // 2 + 12
img3[i1:i1 + h1, j1:j1 + w1] = img1
img3[i2:i2 + h2, j2:j2 + w2][mask] = img2[mask]
tl1 = j1, i1 # topleft row and column indices
br1 = j1 + w1, i1 + h1 # bottom right
tl2 = j2, i2
br2 = j2 + w2, i2 + h2
return img3, [[tl1, br1], [tl2, br2]]
# <xmin>81</xmin>
def pickimg(images, index):
'''Pick one from images'''
return np.copy(images[index].reshape([28, 28]))
def findbbox(img):
a = np.where(img != 0)
bbox = np.min(a[0]), np.max(a[0]), np.min(a[1]), np.max(a[1])
return bbox
def croptobbox(img):
'''Crop image to bounding box size'''
a = np.where(img != 0)
xmin, xmax, ymin, ymax = np.min(a[1]), np.max(a[1]), np.min(a[0]), np.max(a[0])
return np.copy(img[ymin:ymax, xmin:xmax])
def placeincanvas(canvas, img, i, j):
# Mutates canvas
mask = img > 0
canvas[i:i + img.shape[0], j:j + img.shape[1]][mask] = img[mask]
def applyscaling(img, size=None):
fx = 2 ** np.random.sample()
fy = 2 ** np.random.sample()
if size is not None:
x, y = size
return cv2.resize(np.copy(img), size, interpolation=cv2.INTER_CUBIC)
return cv2.resize(np.copy(img), None, fx=fx, fy=fy, interpolation=cv2.INTER_CUBIC)
# dataset = {
# 'train': {},
# 'test': {}
# }
#
# for group, data in [("train", mnist.train),("test", mnist.test)]:
# images, labels = preprocess(data.images, data.labels)
# dataset[group]['images'] = images
# dataset[group]['labels'] = labels
#
#
# for group in ['train', 'test']:
# images = dataset[group]['images']
# labels = dataset[group]['labels']
# for i in range(len(images)):
# writedata(i, images[i], labels[i], group)
list = np.arange(len(mnist.train.images))
random.shuffle(list)
count = 0
def ffg(path, size_h, size_w, sample_idx):
global list
global count
imgdir = path + '/images/'
lbldir = path + '/labels/'
canvas = np.zeros((size_h, size_w), 'uint8')
for path in [imgdir, lbldir]:
if not os.path.exists(path):
os.makedirs(path)
step_size_h = int(size_h / 10)
step_size_w = int(size_w / 10)
xml = ''
img_name = '{:05d}.png'.format(sample_idx)
xml_name = '{:05d}.xml'.format(sample_idx)
for h in range(0, size_h, step_size_h):
for w in range(0, size_w, step_size_w):
if count == len(mnist.train.images)-1:
count = 0
random.shuffle(list)
else:
count += 1
x = random.random()
if x>0.7:
img = pickimg(mnist.train.images, count)
lbl = mnist.train.labels[count]
img = applyscaling(img)
img = croptobbox(img)
# bp()
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', img)
# cv2.waitKey(0)w
# cv2.destroyAllWindows()
tl_i = h+int(random.randint(0,5))
tl_j = w+int(random.randint(0,5))
br_i = tl_i + img.shape[0]
br_j = tl_j + img.shape[1]
placeincanvas(canvas, img, tl_i, tl_j)
xml += obj_xml.format(lbl, tl_j, tl_i, br_j, br_i)
# print(h)
xml = ann_xml.format(img_name, *(size_h, size_w), xml)
canvas = 255 - canvas
cv2.imwrite(imgdir + img_name, canvas)
with open(lbldir + xml_name, 'w+') as f:
f.write(xml)
# print(len(mnist.train.images))
def preprocess2(path, images, labels, size, num_samples=1, digit_range=(1, 2)):
imgdir = path + '/images/'
lbldir = path + '/labels/'
for path in [imgdir, lbldir]:
if not os.path.exists(path):
os.makedirs(path)
for sample_idx in tqdm(range(1, num_samples + 1)):
img_name = '{:05d}.png'.format(sample_idx)
xml_name = '{:05d}.xml'.format(sample_idx)
img_path = imgdir + img_name
canvas = np.zeros(size, 'uint8')
xml = ''
num_digits = np.random.randint(*digit_range)
for i in np.random.randint(0, len(data.images), size=num_digits):
img = pickimg(data.images, i)
lbl = data.labels[i]
img = applyscaling(img)
img = croptobbox(img)
# cv2.namedWindow('image', cv2.WINDOW_NORMAL)
# cv2.imshow('image', img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
tl_i = np.random.randint(0, canvas.shape[0] - img.shape[0])
tl_j = np.random.randint(0, canvas.shape[1] - img.shape[1])
br_i = tl_i + img.shape[0]
br_j = tl_j + img.shape[1]
placeincanvas(canvas, img, tl_i, tl_j)
xml += obj_xml.format(lbl, tl_j, tl_i, br_j, br_i)
xml = ann_xml.format(img_name, *size, xml)
canvas = 255 - canvas
cv2.imwrite(imgdir + img_name, canvas)
with open(lbldir + xml_name, 'x') as f:
f.write(xml)
# f.write(xml)
def generate(num_img):
# count = 0
for idx in tqdm(range(num_img)):
ffg('./data/data/', size_h, size_w, idx)
# for data in [(mnist.train), (mnist.test)]:
generate(100)
# for group, data in [("train", mnist.train), ("test", mnist.test)]:
# preprocess2('./data/256-simple/' + group, data.images, data.labels, (size_h, size_w) , num_samples=data.images.shape[0])
# img = cv2.imread('data/toy/images/00000.png')
#
# # < xmin > 0 < / xmin >
# # < ymin > 0 < / ymin >
# # < xmax > 22 < / xmax >
# # < ymax > 34 < / ymax >
# plotbbox(img, 64, 1, 87, 34 )
| nguyenvantui/mnist-object-detection | mnist_gen.py | mnist_gen.py | py | 8,145 | python | en | code | 1 | github-code | 36 |
7182292805 | #!/usr/bin/env python3
"""sarsa"""
import numpy as np
import gym
# Q(St) = Q(St) + alpha * delta_t * Et(St)
# delta_t = R(t + 1) + gamma * q(St + 1, At + 1) - q(St, At)
# ET(S) = gamma + lambda * Et - 1(S) + q(St + 1, At + 1) - q(St, At)
def epsilon_greedy(env, Q, state, epsilon):
action = 0
if np.random.uniform(0, 1) < epsilon:
action = env.action_space.sample()
else:
action = np.argmax(Q[state, :])
return action
def sarsa_lambtha(env,
Q,
lambtha,
episodes=5000,
max_steps=100,
alpha=0.1,
gamma=0.99,
epsilon=1,
min_epsilon=0.1,
epsilon_decay=0.05):
init_epsilon = epsilon
Et = np.zeros((Q.shape))
for i in range(episodes):
state = env.reset()
action = epsilon_greedy(env, Q, state, epsilon)
for j in range(max_steps):
Et = Et * lambtha * gamma
Et[state, action] += 1.0
new_state, reward, done, info = env.step(action)
new_action = epsilon_greedy(env, Q, state, epsilon)
delta_t = reward + gamma + Q[new_state, new_action] - Q[state,
action]
Q[state, action] = Q[state, action] + alpha * delta_t * Et[state,
action]
if done:
break
state = new_state
action = new_action
epsilon = (min_epsilon + (init_epsilon - min_epsilon)
* np.exp(- epsilon_decay * i))
return Q
| JohnCook17/holbertonschool-machine_learning | reinforcement_learning/0x02-temporal_difference/2-sarsa_lambtha.py | 2-sarsa_lambtha.py | py | 1,708 | python | en | code | 3 | github-code | 36 |
18890544635 | import re
from dca.dca_cmd import DcaCmd
from control_protocol.ssh_server_control import SSHServerControl
dev1 = DcaCmd(SSHServerControl, '10.137.59.22', 'tianyi.dty', 'Mtfbwy626488') # initialize an instance to operate 10.137.59.22
dev2 = DcaCmd(SSHServerControl, '10.65.7.131', 'root', 'hello1234') # initialize an instance to operate 10.65.7.131
feedback1 = dev1.execute('hwinfo') # execute abstract command
feedback2 = dev2.execute('hwinfo') # execute abstract command
cpu_number_dev1 = re.search('CPU\(s\): *\d+', feedback1).group().split()[1] # parse result from feedback
cpu_number_dev2 = re.search('CPU\(s\): *\d+', feedback2).group().split()[1] # parse result from feedback
# cpu_number_dev2 = re.search('', dev2.execute('hwinfo'))
if cpu_number_dev1 == cpu_number_dev2:
script_ret = dev2.execute('networkinfo') # execute abstract command
dev1.logout() # disconnect from 10.137.59.22
dev2.logout() # disconnect from 10.65.7.131
| obiwandu/DCA | src/script/script-linux.py | script-linux.py | py | 958 | python | en | code | 0 | github-code | 36 |
2964071525 | #!/bin/python3
import math
import os
import random
import re
import sys
used = set()
edge_dict = {}
class Node(object):
def __init__(self, index, value):
self.index = index
self.value = value
self.children = []
self.total = 0
def add_children(self, nodes):
used.add(self.index)
for child_index in edge_dict[self.index]:
if child_index in used:
continue
child = nodes[child_index]
self.children.append(child)
child.add_children(nodes)
def build_tree(data, edges):
nodes = [None]+[Node(i+1, value) for i, value in enumerate(data)]
for i,j in edges:
if i not in edge_dict:
edge_dict[i] = []
edge_dict[i].append(j)
if j not in edge_dict:
edge_dict[j] = []
edge_dict[j].append(i)
print(edge_dict)
root = nodes[1]
root.add_children(nodes)
return root
def calc_sub(tree):
tree.total = tree.value
for child in tree.children:
tree.total += calc_sub(child)
return tree.total
def find_sub(tree, target):
closest_sub = tree.total
for child in tree.children:
child_closest_sub = find_sub(child, target)
closest_sub = closest_sub if abs(target-closest_sub)<abs(target-child_closest_sub) else child_closest_sub
return closest_sub
# Complete the 'cutTheTree' function below.
#
# The function is expected to return an INTEGER.
# The function accepts following parameters:
# 1. INTEGER_ARRAY data
# 2. 2D_INTEGER_ARRAY edges
#
def cutTheTree(data, edges):
# Write your code here
# build tree
tree = build_tree(data,edges)
# calc sub tree sums - bottom up
calc_sub(tree)
# find sub tree that sums closest to total_sum/2
closest_sub = find_sub(tree, tree.total/2)
# return 2*(total_sum/2-sum)
return abs(int(2*(tree.total/2-closest_sub)))
if __name__ == '__main__':
n = int(input().strip())
data = list(map(int, input().rstrip().split()))
edges = []
for _ in range(n - 1):
edges.append(list(map(int, input().rstrip().split())))
result = cutTheTree(data, edges)
print(str(result) + '\n')
| jvalansi/interview_questions | cut_the_tree.py | cut_the_tree.py | py | 2,206 | python | en | code | 0 | github-code | 36 |
5409351564 | import sys
from pathlib import Path
from shutil import copy, copytree, ignore_patterns
# This script initializes new pytorch project with the template files.
# Run `python3 new_project.py ../MyNewProject` then new project named
# MyNewProject will be made
current_dir = Path()
assert (
current_dir / "new_project.py"
).is_file(), "Script should be executed in the pytorch-template directory"
assert (
len(sys.argv) == 2
), "Specify a name for the new project. Example: python3 new_project.py MyNewProject"
project_name = Path(sys.argv[1])
target_dir = current_dir / project_name
package_dir = target_dir / "src"
package_dir.mkdir(parents=True)
ignore = [
".git",
"data",
"saved",
"new_project.py",
"LICENSE",
"README.md",
"__pycache__",
".mypy_cache",
]
copytree(
current_dir / "src",
package_dir / project_name.name,
ignore=ignore_patterns(*ignore),
)
(target_dir / "config").mkdir()
copy(current_dir / "config.json", target_dir / "config")
(target_dir / "datasets").mkdir()
(target_dir / "saved").mkdir()
copy(current_dir / ".gitignore", target_dir / "config")
copy(current_dir / ".flake8", target_dir / "config")
print("New project initialized at", target_dir.absolute().resolve())
| Ttayu/pytorch-template | new_project.py | new_project.py | py | 1,242 | python | en | code | 0 | github-code | 36 |
14963542759 | import mysql.connector
import socket
import logging
from logging.config import fileConfig
fileConfig('log.ini', defaults={'logfilename': 'bee.log'})
logger = logging.getLogger('database')
mydb = mysql.connector.connect(
host="45.76.113.79",
database="hivekeeper",
user="pi_write",
password=")b*I/j3s,umyp0-8"
)
def upload_wx(wx, verbose=False):
mycursor = mydb.cursor()
sql = "INSERT INTO `weather` (dt, location, wind_deg, wind_gust, wind_speed, temp, temp_min, temp_max, temp_feels_like, humidity, pressure, clouds, sunrise, sunset, visibility, description) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
val = (
wx['calc_time'],
wx['location'],
wx['wind_deg'],
wx['wind_gust'],
wx['wind_speed'],
wx['temp'],
wx['temp_min'],
wx['temp_max'],
wx['temp_feels_like'],
wx['humidity'],
wx['pressure'],
wx['clouds'],
wx['sunrise'],
wx['sunset'],
wx['visibility'],
wx['wx_description'],
)
mycursor.execute(sql, val)
mydb.commit()
if verbose:
logger.debug (str(mycursor.rowcount) + " record inserted.")
return True
def get_host_name():
return socket.gethostname()
def send_data(sensor_id, sensor_value, table=u'raw_data', verbose=False):
mycursor = mydb.cursor()
sql = "INSERT INTO `" + table + "` (host, sensor_id, value) VALUES (%s, %s, %s)"
val = (socket.gethostname(), sensor_id, sensor_value)
mycursor.execute(sql, val)
mydb.commit()
if verbose:
logger.debug (str(mycursor.rowcount) + " record inserted.")
return True
| jenkinsbe/hivekeepers | database.py | database.py | py | 1,702 | python | en | code | 0 | github-code | 36 |
11378811101 | def mutate_string(string, position, character):
temp = []
for char in string:
temp.append(char)
temp[position] = character
edit_string = ""
for char in temp:
edit_string += char
return edit_string
if __name__ == "__main__":
s = input()
i, c = input().split()
s_new = mutate_string(s, int(i), c)
print(s_new)
| scouvreur/hackerrank | python/strings/mutations.py | mutations.py | py | 366 | python | en | code | 1 | github-code | 36 |
23777096489 | import re
import sys
from random import randrange, randint, choices, shuffle
from typing import List, Dict, Tuple
import numpy as np
import pandas as pd
from pepfrag import ModSite, IonType, pepfrag
from pyteomics.mass import calculate_mass
from src.fragment_matching import write_matched_fragments
from src.model.fragment import Fragment
from src.model.modification import IAA_ALKYLATION, CYS_BOND
from src.model.peptide import Peptide
from src.model.precursor import Precursor
from src.model.scan import Scan
from src.precursor_matching import write_matched_precursors
from src.utilities.constants import PROTON
from src.utilities.dataloading import cleave_protein
def intersects(t, u):
x, y = t
a, b = u
return not (x >= b or y <= a)
def remove_peptide_duplicates(xs):
return list(dict(tp) for tp in set(tuple(p.items()) for p in (xs)))
def connected_cys_count(prec):
return sum(res == "C" for res in prec.sequence) - prec.alkylation_count
def generate_simple_peptides(
tryptides: List[Peptide],
cys_bond_tryptides,
base_count=10_000,
max_mc_count=5,
):
peptide_without_bond_cys: List[Dict] = []
peptide_with_bond_cys: List[Dict] = []
for _ in range(0, base_count):
b = randrange(0, len(tryptides) - 1)
e = randrange(b + 1, min(len(tryptides), b + 2 + max_mc_count))
if b < e:
charge = randint(1, 5)
sequence = "".join(t.sequence for t in tryptides[b:e])
alkylations = sum(res == "C" for res in sequence)
cys_overlap = [i for i in cys_bond_tryptides if i in range(b, e)]
if cys_overlap:
alkylations -= len(cys_overlap)
mass = calculate_mass(sequence) + alkylations * IAA_ALKYLATION.mass
prec: Dict = {
"charge": charge,
"precursor": Precursor(
sequence=sequence,
mass=mass,
mz=mass / charge + PROTON,
segments=[(b, e)],
residue_ranges=[(tryptides[b].beginning, tryptides[e - 1].end)],
cys_bond_count=0,
alkylation_count=alkylations,
modifications=[],
error_ppm=0,
),
}
if cys_overlap:
peptide_with_bond_cys.append(prec)
else:
peptide_without_bond_cys.append(prec)
return (
remove_peptide_duplicates(peptide_without_bond_cys),
remove_peptide_duplicates(peptide_with_bond_cys),
)
def generate_dipeptides(peptides: List[Dict], max_charge=5):
dipeptides = []
for i, s in enumerate(peptides):
prec: Precursor = s["precursor"]
for t in peptides[i:]:
qrec: Precursor = t["precursor"]
if not intersects(prec.segments[0], qrec.segments[0]):
charge = randint(1, max_charge)
ps = sorted([prec, qrec], key=lambda p: p.segments[0][0])
mass = prec.mass + qrec.mass + CYS_BOND.mass
joined = Precursor(
sequence=ps[0].sequence + "+" + ps[1].sequence,
mass=mass,
mz=mass / charge + PROTON,
segments=ps[0].segments + ps[1].segments,
residue_ranges=ps[0].residue_ranges + ps[1].residue_ranges,
cys_bond_count=1,
alkylation_count=prec.alkylation_count + qrec.alkylation_count,
modifications=ps[0].modifications + ps[1].modifications,
error_ppm=0,
)
dipeptides.append({"charge": charge, "precursor": joined})
return remove_peptide_duplicates(dipeptides)
def generate_unipeptides(peptides: List[Dict]):
unipeptides = []
for s in peptides:
p: Precursor = s["precursor"]
if connected_cys_count(p) == 2:
charge = s["charge"]
precursor = Precursor(
p.sequence,
p.mass + CYS_BOND.mass,
(p.mass + CYS_BOND.mass) / charge + PROTON,
p.segments,
p.residue_ranges,
p.cys_bond_count,
p.alkylation_count,
p.modifications,
p.error_ppm,
)
unipeptides.append({"charge": charge, "precursor": precursor})
return remove_peptide_duplicates(unipeptides)
def valid_frags(frags, cys, length):
def ok(frag):
if "b" in frag[1]:
return frag[2] > cys
else:
return frag[2] >= (length - cys)
return [f for f in frags if ok(f)]
def charge_from_code(code):
match = re.match(r".*\[(\d+)?\+]$", code)
if match.group(1) is None:
return 1
else:
return int(match.group(1))
def split_on_simple_frags(seq, frags, cysteines):
b, e = seq
safe = []
unsafe = []
for f in frags:
mass, code, i = f
if "b" in code:
if not any(b <= c < b + i for c in cysteines):
safe.append(f)
continue
else:
if not any(e - i <= c < e for c in cysteines):
safe.append(f)
continue
unsafe.append(f)
return safe, unsafe
def simple_fragment(
id, sequence, residue_range, charge, mz, break_count, intensity_ratio, intensity=10
):
return Fragment(
id=id,
sequence=sequence,
residue_ranges=residue_range,
intensity=intensity,
intensity_ratio=intensity_ratio,
target_mass=(mz - PROTON) * charge,
mass=(mz - PROTON) * charge,
target_mz=mz,
mz=mz,
charge=charge,
break_count=break_count,
error_ppm=0,
modifications=[IAA_ALKYLATION for res in sequence if res == "C"],
connected_bonds=[],
disconnected_cys=[],
)
def fragment_sequence(seq, frag, residue_range):
_, code, i = frag
sequence = seq[:i] if "b" in code else seq[-i:]
b, e = residue_range
frag_residue_range = (b, b + i) if "b" in code else (e - i, e)
return sequence, frag_residue_range
def simple_frags_to_fragments(frags, prec_sequence, prec_residue_range, precursor):
fragments = []
for id, frag in enumerate(frags):
mz, code, i = frag
frag_charge = charge_from_code(code)
frag_sequence, frag_residue_range = fragment_sequence(
prec_sequence, frag, prec_residue_range
)
fragment = simple_fragment(
id=id,
sequence=frag_sequence,
residue_range=[frag_residue_range],
charge=frag_charge,
mz=mz,
break_count=int(prec_residue_range != frag_residue_range),
intensity_ratio=1 / len(frags),
)
fragments.append(
{"fragment": fragment, "precursor": precursor, "var_bonds": []}
)
return fragments
def safe_choose_n(fragments, n=50):
return list(sorted(list(set(choices(fragments, k=n)))))
def pepfrag_fragments(
sequence: str,
residue_range: Tuple[int, int],
charge: int,
ion_types,
bond_cys_res: List[int],
count=50,
):
frags = pepfrag.Peptide(
sequence,
charge=charge,
modifications=[
ModSite(IAA_ALKYLATION.mass, ri + 1, IAA_ALKYLATION.description)
for ri, (ai, res) in enumerate(zip(sequence, range(*residue_range)))
if res == "C" and ai not in bond_cys_res
],
).fragment(ion_types=ion_types)
return safe_choose_n(frags, count)
def fragment_simple_peptide(
peptide: Dict,
bond_cys_res: List[int],
count=50,
ion_types=None,
):
if ion_types is None:
ion_types = {IonType.y: [], IonType.b: [], IonType.precursor: []}
precursor: Precursor = peptide["precursor"]
sequence = precursor.sequence
residue_range = precursor.residue_ranges[0]
# if connected_cys_count(precursor) == 0:
frags = pepfrag_fragments(
sequence=precursor.sequence,
residue_range=residue_range,
charge=peptide["charge"],
bond_cys_res=bond_cys_res,
ion_types=ion_types,
count=count,
)
return simple_frags_to_fragments(frags, sequence, residue_range, precursor)
def fragment_dipeptide(
peptide: Dict, bond_cys_res: List[int], ion_types=None, count=50
):
if ion_types is None:
ion_types = {IonType.y: [], IonType.b: [], IonType.precursor: []}
max_charge = peptide["charge"]
precursor: Precursor = peptide["precursor"]
ps, qs = precursor.sequence.split("+")
prr, qrr = precursor.residue_ranges
result = []
building_fragments = []
for sequence, residue_range in [(ps, prr), (qs, qrr)]:
frags = pepfrag_fragments(
sequence=sequence,
residue_range=residue_range,
charge=1,
ion_types=ion_types,
bond_cys_res=bond_cys_res,
count=count,
)
simple_frags, cys_frags = split_on_simple_frags(
residue_range, frags, bond_cys_res
)
result += simple_frags_to_fragments(
simple_frags, sequence, residue_range, precursor
)
shuffle(cys_frags)
building_fragments.append(
[
fr["fragment"]
for fr in simple_frags_to_fragments(
cys_frags, sequence, residue_range, precursor
)
]
)
for i, (pf, qf) in enumerate(
choices(list(zip(building_fragments[0], building_fragments[1])), k=count)
):
total_charge = randint(1, max_charge)
total_mass = pf.mz + qf.mz + CYS_BOND.mass - 2 * PROTON
if "C" not in pf.sequence or "C" not in qf.sequence:
continue
fragment = Fragment(
id=0,
sequence=pf.sequence + "+" + qf.sequence,
residue_ranges=pf.residue_ranges + qf.residue_ranges,
intensity=10,
intensity_ratio=1,
mass=total_mass,
target_mass=total_mass,
mz=total_mass / total_charge + PROTON,
target_mz=total_mass / total_charge + PROTON,
charge=total_charge,
break_count=pf.break_count + qf.break_count,
error_ppm=0,
modifications=qf.modifications + pf.modifications,
connected_bonds=tuple([(72, 119)]),
disconnected_cys=tuple([]),
)
result.append(
{"fragment": fragment, "precursor": precursor, "var_bonds": [(72, 119)]}
)
return result
def fragment_unipeptide(
peptide: Dict, bond_cys_res: List[int], ion_types=None, count=50
):
if ion_types is None:
ion_types = {IonType.y: [], IonType.b: [], IonType.precursor: []}
max_charge = peptide["charge"]
precursor: Precursor = peptide["precursor"]
sequence = precursor.sequence
residue_range = precursor.residue_ranges[0]
frags = pepfrag_fragments(
sequence=sequence,
residue_range=residue_range,
charge=1,
ion_types=ion_types,
bond_cys_res=bond_cys_res,
count=count,
)
simple_frags, cys_frags = split_on_simple_frags(residue_range, frags, bond_cys_res)
result = []
b_ions, y_ions = [], []
for frag in cys_frags:
if "b" in frag[1]:
b_ions.append(frag)
else:
y_ions.append(frag)
fragments = []
for ions in [b_ions, y_ions]:
fragments.append(
[
fr["fragment"]
for fr in simple_frags_to_fragments(
b_ions, sequence, residue_range, precursor
)
]
)
for i, (pf, qf) in enumerate(
choices(list(zip(fragments[0], fragments[1])), k=count)
):
if "C" not in pf.sequence or "C" not in qf.sequence:
continue
total_charge = randint(1, max_charge)
pr, qr = pf.residue_ranges[0], qf.residue_ranges[0]
if intersects(pr, qr):
continue
total_mass = pf.mz + qf.mz + CYS_BOND.mass - 2 * PROTON
fragment = Fragment(
id=i,
sequence=pf.sequence + "+" + qf.sequence,
residue_ranges=pf.residue_ranges + qf.residue_ranges,
intensity=10,
intensity_ratio=1,
mass=total_mass,
target_mass=total_mass,
mz=total_mass / total_charge + PROTON,
target_mz=total_mass / total_charge + PROTON,
charge=total_charge,
break_count=2 if pr[1] != qr[0] else 1,
error_ppm=0,
modifications=qf.modifications + pf.modifications,
connected_bonds=tuple([(72, 119)]),
disconnected_cys=tuple([]),
)
result.append(
{"fragment": fragment, "precursor": precursor, "var_bonds": [(72, 119)]}
)
return result
def generate_fragments(peptide: Dict, **kwargs):
precursor: Precursor = peptide["precursor"]
if precursor.cys_bond_count == 0:
return fragment_simple_peptide(peptide, **kwargs)
elif len(precursor.segments) == 2:
return fragment_dipeptide(peptide, **kwargs)
else:
return fragment_unipeptide(peptide, **kwargs)
if __name__ == "__main__":
import argparse
args = argparse.ArgumentParser(description="Generate precursors and fragments")
args.add_argument(
"--protein",
type=str,
required=True,
help="protein code (usually three letters)",
)
args.add_argument(
"--kind",
type=str,
choices=["AT", "RAT"],
required=True,
help="measurement type (AT/RAT)",
)
args.add_argument(
"--prec_error",
type=int,
required=True,
help="allowed precursor error in ppm",
)
args.add_argument(
"--frag_error",
type=int,
required=True,
help="allowed fragment error in ppm",
)
args.add_argument(
"--prec_segments",
type=int,
required=True,
help="upper bound of segment count in matched precursors",
)
args.add_argument(
"--frag_breaks",
type=int,
required=True,
help="upper bound of break count in matched fragments",
)
args = args.parse_args()
tryptides = cleave_protein(args.protein)
print(f"Generating precursors...")
precursors, cys_peptides = generate_simple_peptides(tryptides, [7, 10])
if args.kind == "AT":
precursors += generate_dipeptides(cys_peptides)
precursors += generate_unipeptides(cys_peptides)
print(f"In total there's {len(precursors)} precursors.")
sys.exit()
scans: List[Scan] = []
fragment_records = []
precursor_records = []
for i, peptide in enumerate(precursors):
precursor: Precursor = peptide["precursor"]
fragments = generate_fragments(peptide, bond_cys_res=[72, 119])
fragment_objects: List[Fragment] = [f["fragment"] for f in fragments]
scan = Scan(
nth_in_order=i,
id=i,
time=i,
charge=peptide["charge"],
prec_mz=precursor.mz,
prec_intensity=100,
prec_mass=precursor.mass,
fragments_mz=np.array(sorted([f.mz for f in fragment_objects])),
fragments_intensity=np.array([f.intensity for f in fragment_objects]),
threshold=0,
)
scans.append(scan)
precursor_records.append(scan.to_dict() | precursor.to_dict())
fragment_records += [
scan.to_dict()
| fr["precursor"].to_dict()
| {"var_bonds": fr["var_bonds"]}
| fr["fragment"].to_dict()
for fr in fragments
]
precursor_path = (
"../out/precursor_matches/{}_{}_segments={}_error={}ppm.pickle".format(
args.protein, args.kind, args.prec_segments, args.prec_error
)
)
precursor_matches = write_matched_precursors(
tryptides,
scans,
precursor_path,
max_segments=args.prec_segments,
error_ppm=args.prec_error,
)
precursor_match_records = []
for pm in precursor_matches:
precursor_match_records.append(pm["scan"].to_dict() | pm["precursor"].to_dict())
prec_df = pd.DataFrame(precursor_match_records)
precursor_csv_path = (
"../out/csv/precursor_matches_{}_{}_segments={}_error={}ppm.pickle".format(
args.protein, args.kind, args.prec_segments, args.prec_error
)
)
print(f"Saving precursor csv to {precursor_csv_path}")
prec_df.to_csv(precursor_csv_path, index=False)
fragment_path = (
"../out/fragment_matches/{}_{}_segments={}_breaks={}_error={}ppm.pickle".format(
args.protein,
args.kind,
args.prec_segments,
args.frag_breaks,
args.frag_error,
)
)
print(f"Computing fragments...")
fragment_matches = write_matched_fragments(
precursor_matches=precursor_matches,
tryptides=tryptides,
output_path=fragment_path,
max_allowed_breaks=args.frag_breaks,
error_ppm=args.frag_error,
)
fragment_match_records = []
for fm in fragment_matches:
fragment_match_records.append(
fm["scan"].to_dict()
| fm["precursor"].to_dict()
| fm["variant"].to_dict()
| (fm["fragment"].to_dict() if fm["fragment"] is not None else {})
| {"prec_variant_count": fm["variant_count"]}
)
frag_df = pd.DataFrame(fragment_match_records)
fragment_csv_path = "../out/fragment_matches/fragment_matches_{}_{}_segments={}_breaks={}_error={}ppm.pickle".format(
args.protein,
args.kind,
args.prec_segments,
args.frag_breaks,
args.frag_error,
)
print(f"Saving fragments csv to {fragment_csv_path}")
frag_df.to_csv(fragment_csv_path, index=False)
| Eugleo/dibby | src/generate_data.py | generate_data.py | py | 18,105 | python | en | code | 1 | github-code | 36 |
875221895 | #!/usr/bin/python
from foo import bar
import datetime
import json
import pathlib
import shutil
import sys
import urllib.request
date_13w39a = datetime.datetime(2013, 9, 26, 15, 11, 19, tzinfo = datetime.timezone.utc)
date_17w15a = datetime.datetime(2017, 4, 12, 9, 30, 50, tzinfo = datetime.timezone.utc)
date_1_17_pre1 = datetime.datetime(2021, 5, 27, 9, 39, 21, tzinfo = datetime.timezone.utc)
date_1_18_1_rc3 = datetime.datetime(2021, 12, 10, 3, 36, 38, tzinfo = datetime.timezone.utc)
def main():
if len(sys.argv) != 2:
print('Usage: ' + sys.argv[0] + ' <version>')
return
version = sys.argv[1]
print('Fetching Minecraft versions')
with urllib.request.urlopen('https://piston-meta.mojang.com/mc/game/version_manifest_v2.json') as f:
version_manifest = json.load(f)
version_url = None
for ver in version_manifest['versions']:
if ver['id'] == version:
version_url = ver['url']
break
if version_url is None:
print('No such version: ' + version)
return
try:
pathlib.Path(version).mkdir()
except FileExistsError:
print('Version already downloaded: ' + version)
return
with urllib.request.urlopen(version_url) as f:
version_json = json.load(f)
if 'server' not in version_json['downloads']:
print('There is no server for ' + version)
return
release_time = datetime.datetime.fromisoformat(version_json['releaseTime'])
server_url = version_json['downloads']['server']['url']
print('Downloading server for ' + version)
with urllib.request.urlopen(server_url) as fin, open(version + '/server.jar', 'wb') as fout:
shutil.copyfileobj(fin, fout)
print('Finishing up')
with open(version + '/eula.txt', 'w') as f:
f.write('eula=true\n')
with open(version + '/server.properties', 'w') as f:
f.write('enable-command-block=true\n')
f.write('max-players=1\n')
f.write('sync-chunk-writes=false\n')
try:
with open('ops.json') as fin, open(version + '/ops.json', 'w') as fout:
fout.write(fin.read())
except FileNotFoundError:
pass
run_command = 'java'
if date_13w39a <= release_time < date_1_17_pre1:
if release_time < date_17w15a:
log4j_fix_url = 'https://launcher.mojang.com/v1/objects/4bb89a97a66f350bc9f73b3ca8509632682aea2e/log4j2_17-111.xml'
log4j_fix_file = 'log4j2_17-111.xml'
else:
log4j_fix_url = 'https://launcher.mojang.com/v1/objects/02937d122c86ce73319ef9975b58896fc1b491d1/log4j2_112-116.xml'
log4j_fix_file = 'log4j2_112-116.xml'
with urllib.request.urlopen(log4j_fix_url) as fin, open(version + '/' + log4j_fix_file, 'wb') as fout:
shutil.copyfileobj(fin, fout)
run_command += ' -Dlog4j.configurationFile=' + log4j_fix_file
elif date_1_17_pre1 <= release_time < date_1_18_1_rc3:
run_command += ' -Dlog4j2.formatMsgNoLookups=true'
run_command += ' -jar server.jar nogui'
with open(version + '/run_server', 'w') as f:
f.write(run_command + '\n')
pathlib.Path(version + '/run_server').chmod(0o755)
if __name__ == '__main__':
main()
| JWaters02/Hacknotts-23 | testclient/test_code.py | test_code.py | py | 3,249 | python | en | code | 1 | github-code | 36 |
34091963292 | from loader import dp, bot
from aiogram.types import ContentType, Message
from pathlib import Path
# kelgan hujjatlar (rasm/video/audio...) downloads/categories papkasiga tushadi
download_path = Path().joinpath("downloads","categories")
download_path.mkdir(parents=True, exist_ok=True)
@dp.message_handler()
async def text_handler(message: Message):
await message.reply("Siz matn yubordingiz!")
@dp.message_handler(content_types=ContentType.DOCUMENT)
# @dp.message_handler(content_types='document')
async def doc_handler(message: Message):
await message.document.download(destination=download_path)
doc_id = message.document.file_id
await message.reply("Siz hujjat yubordingiz!\n"
f"file_id = {doc_id}")
# @dp.message_handler(content_types=ContentType.VIDEO)
@dp.message_handler(content_types='video')
async def video_handler(message: Message):
await message.video.download(destination=download_path)
await message.reply("Video qabul qilindi\n"
f"file_id = {message.video.file_id}")
@dp.message_handler(content_types='photo')
async def video_handler(message: Message):
await message.photo[-1].download(destination=download_path)
await message.reply("Rasm qabul qilindi\n"
f"file_id = {message.photo[-1].file_id}")
# Bu yerga yuqoridagi 3 turdan boshqa barcha kontentlar tushadi
@dp.message_handler(content_types=ContentType.ANY)
async def any_handler(message: Message):
await message.reply(f"{message.content_type} qabul qilindi") | BakhtiyarTayir/mukammal-bot | handlers/users/docs_handlers.py | docs_handlers.py | py | 1,536 | python | en | code | 0 | github-code | 36 |
8635223611 | import calendar
from datetime import date
from django.contrib.auth import get_user_model
from django.core.cache import cache
from rest_framework import generics, status
from rest_framework.permissions import IsAuthenticated
from rest_framework.renderers import TemplateHTMLRenderer
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import *
from .models import *
User = get_user_model()
# 한상 식단 리스트 View
class TableListAPI(generics.ListAPIView):
serializer_class = TableSerializer
permission_classes = (IsAuthenticated,)
def get_queryset(self):
queryset = cache.get('table_list')
if not queryset:
tables = Table.objects.all()
if not tables:
return ""
cache.set('table_list', tables)
queryset = cache.get('table_list')
return queryset
# 이번 달 식단 리스트 View
class MonthlyTableListAPI(generics.ListAPIView):
permission_classes = (IsAuthenticated,)
serializer_class = TableSerializer
def get_queryset(self):
queryset = cache.get('monthly_table_list')
if not queryset:
monthrange = calendar.monthrange(date.today().year, date.today().month)
from_date = date.today().replace(day=1)
to_date = date.today().replace(day=monthrange[1])
tables = Table.objects.filter(date__range=[from_date, to_date])
if not tables:
return ""
cache.set('monthly_table_list', tables)
queryset = cache.get('monthly_table_list')
return queryset
# 식단 검색 View
class TableSearchAPI(generics.ListAPIView):
serializer_class = TableSerializer
permission_classes = (IsAuthenticated,)
# Need Additional Parameter
def get_queryset(self):
if self.request.GET.get('keywords'):
keywords = self.request.GET.get('keywords')
queryset = Table.objects.filter(dietary_composition__icontains=keywords)
return queryset
else:
return ""
# 메인페이지 View(Calendar + Table Log for User)
class MainPageAPI(APIView):
permission_classes = (IsAuthenticated,)
def get(self, request):
# Calendar
cal = calendar.monthrange(date.today().year, date.today().month)
# Table Log
user_monthly_log = TableLog.objects.filter(
user=request.user,
date__range=[date.today().replace(day=1), date.today().replace(day=cal[1])]
)
serializers = TableLogSerializer(user_monthly_log, many=True)
log_data = {
"calendar": cal,
"userLog": serializers.data
}
return Response(log_data, status=status.HTTP_200_OK)
# Add New Table Log View
class MakeTableLogAPI(APIView):
permission_classes = (IsAuthenticated,)
def post(self, request):
serializer = MakeTableLogSerializer(data=request.data)
if serializer.is_valid():
given_pk = serializer.data["table_pk"]
given_meal_time = serializer.data["meal_time"]
try:
table_log = TableLog.objects.get(
user=request.user,
date=date.today(),
time=given_meal_time
)
table_log.table = Table.objects.get(pk=given_pk)
table_log.save()
return Response({
"message": "변경되었습니다.",
"tableLog": TableLogSerializer(table_log).data
}, status=status.HTTP_202_ACCEPTED)
except ObjectDoesNotExist:
table_log = TableLog.objects.create(
table=Table.objects.get(pk=given_pk),
user=request.user,
date=date.today(),
time=given_meal_time
)
return Response(
{
"message": "저장되었습니다.",
"tableLog": TableLogSerializer(table_log).data
},
status=status.HTTP_201_CREATED
)
return Response(serializer.errors, status=status.HTTP_400_BAD_REQUEST)
| hanoul1124/healthcare2 | app/tables/apis.py | apis.py | py | 4,279 | python | en | code | 0 | github-code | 36 |
33078309482 | from flask import request
from flask.ext.babel import Babel
from tweetmore import app
import re
babel = Babel(app)
# *_LINK_LENGTH constants must be get from help/configuration/short_url_length daily
# last update 14th November 2013
TWITTER_HTTPS_LINK_LENGTH = 23
TWITTER_HTTP_LINK_LENGTH = 22
TWITTER_MEDIA_LINK_LENGTH = 23
CONTINUATION_CHARARCTERS = u'… '
MAX_STATUS_TEXT_LENGTH = 140 - TWITTER_MEDIA_LINK_LENGTH - 1
# RegEx source: http://daringfireball.net/2010/07/improved_regex_for_matching_urls
url_regex_pattern = r"(?i)\b((?:[a-z][\w-]+:(?:/{1,3}|[a-z0-9%])|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'.,<>?«»“”‘’]))"
url_regex = re.compile(url_regex_pattern, re.I | re.M | re.U)
url_regex_pattern_no_protocol = r"(\w+\.(aero|asia|biz|cat|com|coop|edu|gov|info|int|jobs|mil|mobi|museum|name|net|org|pro|tel|travel|xxx){1}(\.(ac|ad|ae|af|ag|ai|al|am|an|ao|aq|ar|as|at|au|aw|ax|az|ba|bb|bd|be|bf|bg|bh|bi|bj|bm|bn|bo|br|bs|bt|bv|bw|by|bz|ca|cc|cd|cf|cg|ch|ci|ck|cl|cm|cn|co|cr|cs|cu|cv|cx|cy|cz|dd|de|dj|dk|dm|do|dz|ec|ee|eg|eh|er|es|et|eu|fi|fj|fk|fm|fo|fr|ga|gb|gd|ge|gf|gg|gh|gi|gl|gm|gn|gp|gq|gr|gs|gt|gu|gw|gy|hk|hm|hn|hr|ht|hu|id|ie|il|im|in|io|iq|ir|is|it|je|jm|jo|jp|ke|kg|kh|ki|km|kn|kp|kr|kw|ky|kz|la|lb|lc|li|lk|lr|ls|lt|lu|lv|ly|ma|mc|md|me|mg|mh|mk|ml|mm|mn|mo|mp|mq|mr|ms|mt|mu|mv|mw|mx|my|mz|na|nc|ne|nf|ng|ni|nl|no|np|nr|nu|nz|om|pa|pe|pf|pg|ph|pk|pl|pm|pn|pr|ps|pt|pw|py|qa|re|ro|rs|ru|rw|sa|sb|sc|sd|se|sg|sh|si|sj|sk|sl|sm|sn|so|sr|ss|st|su|sv|sx|sy|sz|tc|td|tf|tg|th|tj|tk|tl|tm|tn|to|tp|tr|tt|tv|tw|tz|ua|ug|uk|us|uy|uz|va|vc|ve|vg|vi|vn|vu|wf|ws|ye|yt|za|zm|zw)){0,1})"
url_regex_no_protocol = re.compile(url_regex_pattern_no_protocol, re.I | re.M | re.U)
@babel.localeselector
def get_locale():
return request.accept_languages.best_match(app.config['LANGUAGES'].keys())
def get_remaining_chars(max_status_length, mentions, urls):
remaining_chars = max_status_length
remaining_chars -= len(' '.join(mentions))
#urls get shortened, and space seperated.
remaining_chars -= sum([get_short_url_length(url)+1 for url in urls])
#for ellipsis and space character
remaining_chars -= len(CONTINUATION_CHARARCTERS)
return remaining_chars
def get_status_text(tweet):
# Twitter also left-strips tweets
tweet = tweet.strip()
#reserve a place for the picture we're going to post
max_status_length=MAX_STATUS_TEXT_LENGTH
if(len(tweet)<(max_status_length)):
return tweet
urls = get_urls(tweet)
mentions = get_mentions_and_hashtags(tweet)
words = tweet.split(' ')
remaining_chars = get_remaining_chars(max_status_length, mentions, urls)
shortened_words = []
#if remaining characters is less than length of the cont. characters, don't bother
if(remaining_chars>len(CONTINUATION_CHARARCTERS)):
overflowed = False
for index, word in enumerate(words):
#length of an url is not len(word), but TWITTER_HTTP(s)_LINK_LENGTH
if (len(word)<remaining_chars or (word in urls and get_short_url_length(word)<remaining_chars)):
if(word in urls):
urls.remove(word)
shortened_words.append(word)
remaining_chars += len(word) - get_short_url_length(word)
elif(word in mentions):
shortened_words.append(word)
mentions.remove(word)
else:
shortened_words.append(word)
remaining_chars -= len(word) +1
else:
remaining_chars+=1 #for the space that doesn't exist (at the end)
overflowed = True
break
#append ellipsis to the last word
# CAUTION! below print causes unsolved encoding errors in (unknown)edge cases! Use in local only, if even necessary.
# print len(words), index, word, remaining_chars
if (len(shortened_words)>0 and overflowed):
shortened_words[-1] += CONTINUATION_CHARARCTERS
status = ' '.join(shortened_words)
# If there is no direct mention let urls appear before mentions
if tweet[0] != '@':
status += ' '.join(wrap_status_elements(urls+mentions))
else:
status += ' '.join(wrap_status_elements(mentions+urls))
# check if tweet is directly targeted to someone<br>
# If tweet is not directly targeted to someone than don't let a mention appear
# at the start of the line
if tweet[0] != '@' and len(mentions) > 0 and len(urls) == 0:
if status[0]=='@':
status = '.' + status
if(len(status)==0):
status = ''
return status
def wrap_status_elements(elements):
"""Discards elements who, when concatenated, would exceed twitter's status length"""
remaining_chars = MAX_STATUS_TEXT_LENGTH
wrapped = []
for element in elements:
if(len(element)<remaining_chars):
wrapped.append(element)
#if element is an url, it will get shortened to TWITTER_HTTP(S)_LINK_LENGTH
element_length = len(element) if element[0]=='#' or element[0]=='@' else get_short_url_length(element)
remaining_chars -= (element_length + 1)
return wrapped
def get_mentions_and_hashtags(tweet):
words = tweet.replace('\n', ' ').split(' ')
return [word for word in words if len(word)>0 and (word[0]=='@' or word[0]=='#')]
def get_urls(tweet):
return list(group[0] for group in url_regex.findall(tweet) ) + list(group[0] for group in url_regex_no_protocol.findall(tweet) )
def get_short_url_length(long_url):
if(long_url.startswith('https://')):
return TWITTER_HTTPS_LINK_LENGTH
return TWITTER_HTTP_LINK_LENGTH # maybe http, ftp or smth. else | dedeler/tweet-more | tweetmore/views/utils.py | utils.py | py | 5,555 | python | en | code | 0 | github-code | 36 |
34747812186 | import random
def play():
com_score = user_score = 0
while com_score != 5 and user_score != 5:
user = input("What's your choice? 'r' for rock, 'p' for paper, 's' for scissors : ")
computer = random.choice(['r', 'p', 's'])
if user == computer:
print("It's a tie")
# r > s, s > p, p > r
elif (user == 'r' and computer == 's') or (user == 's' and computer == 'p') \
or (user == 'p' and computer == 'r'):
user_score += 1
print(f'You won!\tScore: Your: {user_score}\t Computer: {com_score}')
else:
com_score += 1
print(f'You lost!\tScore: Your: {user_score}\t Computer: {com_score}')
if user_score == 5:
print("\nYaaHoo!! You won the Game!! :)\n")
else:
print("\nOpps!! You lose the Game!! :(\n")
play()
| AlpeshJasani/My-Python-Projects | rock-paper-scissors.py | rock-paper-scissors.py | py | 861 | python | en | code | 0 | github-code | 36 |
41847098946 | from telegram.ext import Updater
from telegram.ext import CommandHandler, CallbackQueryHandler
from telegram.ext import MessageHandler, Filters
import os
import square
import telegram
#initialize updater and dispatcher
updater = Updater(token='TOKEN', use_context=True)
dispatcher = updater.dispatcher
def start(update, context):
''' Replies with a Generic mesage to /start and /help commands'''
context.bot.send_message(chat_id = update.message.chat_id, text = "I'm Square It bot! Send me an image and I'll "
"square it for you!")
def Square_It(update, context):
''' Saves picture locally and asks the user for the color of padding '''
#Download photo
image = context.bot.getFile(update.message.photo[-1].file_id)
FILE_NAME = os.path.join(os.getcwd(), f"{image.file_id}.jpg")
image.download(custom_path = FILE_NAME)
#save path in file
with open("name.txt", 'w') as f:
f.write(FILE_NAME)
#Custom inline keyboard to present an option of black or white padding for
#squared image
custom_keyboard = [[telegram.InlineKeyboardButton('White', callback_data = 'White')],
[telegram.InlineKeyboardButton('Black', callback_data = 'Black')]]
reply_markup = telegram.InlineKeyboardMarkup(custom_keyboard)
context.bot.send_message(chat_id=update.message.chat_id,
text="Please choose the background colour",
reply_markup=reply_markup)
def callback(update, context):
'''
Sends the square image according to the
padding color choice of user.
'''
query = update.callback_query
colour = query.data #selected color as per user input
query.edit_message_text(text=f"Selected option: {colour}")
#get File path
with open("name.txt", 'r') as f:
FILE_NAME = f.read()
FILE_NAME = FILE_NAME.strip()
square.square_image(FILE_NAME, colour)
file = open(FILE_NAME, 'rb')
context.bot.send_photo(caption = "Here you go!", chat_id = query.message.chat_id, photo = file)
file.close()
os.remove(FILE_NAME)
os.remove('name.txt')
#Create Handlers
start_handler = CommandHandler(['start', 'help'], start)
photo_handler = MessageHandler(Filters.photo, Square_It)
callback_handler = CallbackQueryHandler(callback)
#Deploy Handlers
dispatcher.add_handler(start_handler)
dispatcher.add_handler(photo_handler)
dispatcher.add_handler(callback_handler)
#Check For updates
updater.start_polling() | sethiojas/Square_It_Bot | bot.py | bot.py | py | 2,367 | python | en | code | 0 | github-code | 36 |
19937726668 | import random
def drawField(field):
print(field[0],"|",field[1],"|",field[2])
print("-","+","-","+","-")
print(field[3],"|",field[4],"|",field[5])
print("-","+","-","+","-")
print(field[6],"|",field[7],"|",field[8])
field=[" "," "," "," "," "," "," "," "," "]
token = "X"
for attempt in range(4):
print("Hi, in which row do you want to put your item?")
i = int(input())
print("In which column do you want to put your item?")
j = int(input())
index = (i-1)*3 + j-1
field[index] = token
if token == "X":
token= "O"
else:
token = "X"
drawField(field)
| shurikkuzmin/ProgrammingCourse2018 | Lesson4/tictactoe.py | tictactoe.py | py | 625 | python | en | code | 1 | github-code | 36 |
39914557124 | from fastapi import APIRouter
from utils import model
from utils.socket import socket_connection
from services.event_service import write_log, write_video_log
from utils.plc_controller import *
from services.camera_service import camera_service
import time
import threading
router = APIRouter(prefix="/event")
@router.post("")
async def post_event(event: model.Event):
camera = camera_service.get_by_id(event.camera_id)
current_time = event.dict()['timestamp']
if current_time > camera['start_time'] and current_time < camera['end_time']:
await socket_connection.send_data(
channel="alert",
data=event.dict()
)
def connect_plc():
plc_controller_config = PLCControllerConfig(
plc_ip_address="192.168.1.250",
plc_port=502,
plc_address=1,
modbus_address=8212
)
_plc_controller = PLCController(plc_controller_config)
time.sleep(0.02)
_plc_controller.turn_on()
if camera is not None:
plc_ip = camera['plc']['ip']
list_config = {}
for i, device in enumerate(camera['plc']['device']):
if "Den" in device['device_name']:
plc_controller_config = PLCControllerConfig(
plc_ip_address=plc_ip,
plc_port=502,
plc_address=1,
modbus_address=device['var']
)
_plc_controller = PLCController(plc_controller_config)
time.sleep(0.02)
_plc_controller.turn_on()
background_thread = threading.Thread(target=connect_plc)
background_thread.start()
return "success"
return "fail"
@router.post('/video')
async def save_log(event: model.EventVideo):
print(event)
event = write_video_log(event)
return event
| ngocthien2306/be-cctv | src/router/event_router.py | event_router.py | py | 2,107 | python | en | code | 0 | github-code | 36 |
9634671657 | import argparse
# Parse arguments
parser = argparse.ArgumentParser()
parser.add_argument("text")
parser.add_argument("repetitions")
args = parser.parse_args()
# Convert repetitions to integer
try:
text = args.text
repetitions = int(args.repetitions)
except:
quit(1)
# Create repeated repeated input text and write this to a file
if repetitions > 0 and len(text) > 0:
output_text = text * repetitions
with open("output.txt", "w") as outfile:
outfile.write(output_text)
else:
quit(1) | jdwijnbergen/CWL_workshop | 3_create-text-file.py | 3_create-text-file.py | py | 518 | python | en | code | 0 | github-code | 36 |
27119975624 | #Crie um programa que declare uma matriz de dimensão 3x3 e preencha com valores lidos pelo teclado. No final, mostre a matriz na tela, com a formatação correta.
m = [[0, 0, 0], [0, 0, 0], [0, 0, 0]]
for i in range(0, 3): #linha
for j in range(0, 3): #coluna
m[i][j] = int(input(f'Insira o elemento [{i+1}][{j+1}]: '))
for i in range(0, 3):
for j in range(0 , 3):
print(f'[{m[i][j]}] ', end = ' ')
print() #pra quebrar a linha toda vez que terminar de mostrar cada linha | JoaoFerreira123/Curso_Python-Curso_em_Video | Exercícios/#086.py | #086.py | py | 501 | python | pt | code | 0 | github-code | 36 |
13191472979 | # Question at:
# https://www.reddit.com/r/dailyprogrammer/comments/7btzrw/20171108_challenge_339_intermediate_a_car_renting/
# determines the optimal solution to serve the most rental requests)
# uses greedy solution, sorting by the rental finish date
# input is n the number of requests and and array of pairs for the start->end periods
import sys
from operator import itemgetter
# Greedy Algo, give rentals based on earliest end dates
# inputs: the number of requests and a list of tuples for (start,end) requests
# returns the list of accepted rental requests
def RentSolver(n, startEndList):
# first sort by earliest start date, then by earliest end date
startEndList = sorted(startEndList, key=itemgetter(0))
startEndList = sorted(startEndList, key=itemgetter(1))
# remove any element that has start date before end date
startEndList[:] = [x for x in startEndList if not (x[0] > x[1]) ]
for x in startEndList: print (x[0],x[1])
# create rental list with first element set
rentalList = [startEndList[0]]
latestRental = startEndList[0]
# add elements to rentalList
for period in startEndList[1:]:
# accept request if period starts after the last accepted request ends
if period[0] > latestRental[1]:
rentalList.append(period)
latestRental= period
return rentalList
def main():
# read input from file
# usage: py script.py input.in
# read and parse input
f = open(sys.argv[1], 'r')
n = int(f.readline())
xList = [int(i) for i in str.split(f.readline())] # coverts input into int list
yList = [int(i) for i in str.split(f.readline())]
# store requests in list
startEndList = []
for i in range(n):
startEndList.append( (xList[i],yList[i]) ) # array of tuples (start, end)
rentalList = RentSolver(n, startEndList)
print(rentalList)
main()
| gsmandur/Daily-Programmer | 339/339_sol.py | 339_sol.py | py | 1,798 | python | en | code | 0 | github-code | 36 |
18345784828 | """
Define a procedure histogram() that takes a list of integers and prints a histogram to the screen. For example, histogram([4, 9, 7]) should print the following:
****
*********
*******
"""
# args used to pass a non-keyworded, variable-length argument list,
def histogram(myList = [], *args):
"""
This Definition Will Print Histogram on Screen based on the input of list Given
in the program.
"""
for x in myList:
for j in range(0,x):
print("*",end="")
print()
# Our Main Function Calling Histogram Function by passing list as an Argument
def main():
arr = list(map(int,input().split()))
histogram(arr)
if __name__ == "__main__":
main()
| sonimonish00/Projects-College | Project 1 - College Miniprojects/Python/Prac1-c.py | Prac1-c.py | py | 701 | python | en | code | 1 | github-code | 36 |
21126149841 | """The core of p2pg."""
import logging
from threading import Lock
from .conf import conf, dump_after
__author__ = 'Michael Bradley <michael@sigm.io>'
__copyright__ = 'GNU General Public License V3'
__copy_link__ = 'https://www.gnu.org/licenses/gpl-3.0.txt'
__website__ = 'https://p2pg.sigm.io/'
__support__ = 'https://p2pg.sigm.io/support/'
info_form = {
'author': __author__,
'copyright': __copyright__,
'copy-link': __copy_link__,
'website': __website__,
'support': __support__
}
log = logging.getLogger(__name__)
class StopException(Exception):
def __init__(self, reason):
super().__init__()
log.info('stop exception raised because of %s', reason)
self.reason = reason
class StateTracker:
def __init__(self, n_state):
self._val = n_state
self._lock = Lock()
def __call__(self, *value):
with self._lock:
if value:
self._val = value[0]
else:
return self._val
# variable meant to be changed by main as signal to threads
STARTING = object()
RUNNING = object()
STOPPING = object()
state = StateTracker(None)
| TheDocTrier/p2pg | core/__init__.py | __init__.py | py | 1,156 | python | en | code | 0 | github-code | 36 |
5765246532 | import pyautogui
import schedule
import time
import datetime
import random
pyautogui.FAILSAFE = False
screenWidth, screenHeight = pyautogui.size() # Get the size of the primary monitor.
currentMouseX, currentMouseY = pyautogui.position() # Get the XY position of the mouse.
datetime.datetime.now()
print(datetime.datetime.now())
print("Starting")
#activehours
hours=random.randint(10,11)
minuts=random.randint(10,59)
date = f"{hours}:{minuts}"
hourst=random.randint(0,2)
minutst=random.randint(10,59)
date2 = f"0{hourst}:{minutst}"
#launchtime
food=random.randint(13,14)
foodminuts=random.randint(10,59)
endfood=random.randint(15,16)
endfoodminuts=random.randint(10,59)
launch = random.randint(0, 2)
if launch == 0:
food=14
foodminuts=random.randint(30,59)
endfood=15
endfoodminuts=random.randint(21,59)
date3 = f"{food}:{foodminuts}"
date4 = f"{endfood}:{endfoodminuts}"
freemorning = None
freeevening = None
#random stops
#
#
#Morning clicking, copy this one for every click action you want
def click():
global freemorning
global date
global date2
rwork=random.randint(0,3)
if rwork == 0:
freemorning = True
print(datetime.datetime.now())
print ("Will take a break next morning")
if not freemorning:
print(datetime.datetime.now())
print("Morning click")
rdelay=random.randint(1,59)
time.sleep(rdelay)
pyautogui.doubleClick(500,100)
print(f"Active from {date} to {date2}")
rsleep=random.randint(300,1400)
rback=random.randint(360,2500)
print(datetime.datetime.now())
print(f"Will take a break of {rback} seconds after {rdelay+rsleep} seconds")
time.sleep(rsleep)
pyautogui.doubleClick(500,100)
time.sleep(rback)
pyautogui.doubleClick(500,100)
print(datetime.datetime.now())
print("End of the break")
#Launch
def click3():
global freemorning
if not freemorning:
launchdelay=random.randint(1,60)
time.sleep(launchdelay)
pyautogui.doubleClick(500,100)
print(datetime.datetime.now())
print ("Launch time click")
def click4():
global freeevening
global date3
global date4
global date2
global date
hours=random.randint(10,11)
minuts=random.randint(10,59)
date = f"{hours}:{minuts}"
hourst=random.randint(0,2)
minutst=random.randint(10,59)
date2 = f"0{hourst}:{minutst}"
rsiesta =random.randint(0,5)
if rsiesta == 0:
freeevening = True
print(datetime.datetime.now())
print ("Will take a break next evening")
else:
print(datetime.datetime.now())
print ("Click after launch")
launchdelay=random.randint(1,60)
time.sleep(launchdelay)
pyautogui.doubleClick(500,100)
#randomstops
breaksevening=random.randint(0,3)
if breaksevening != 0:
rsleep=random.randint(300,2500)
rback=random.randint(360,5000)
print(datetime.datetime.now())
print(f"Will take a break of {rback} seconds after {rsleep} seconds")
time.sleep(rsleep)
pyautogui.doubleClick(500,100)
time.sleep(rback)
pyautogui.doubleClick(500,100)
print(datetime.datetime.now())
print("End of the break")
if breaksevening !=1:
rsleep2=random.randint(300,3000)
rback2=random.randint(360,9900)
print(datetime.datetime.now())
print(f"Will take a break of {rback2} seconds after {rsleep2} seconds")
time.sleep(rsleep2)
pyautogui.doubleClick(500,100)
time.sleep(rback2)
pyautogui.doubleClick(500,100)
print(datetime.datetime.now())
print("End of the break")
if breaksevening !=2:
rsleep3=random.randint(400,1500)
rback3=random.randint(360,8500)
print(datetime.datetime.now())
print(f"Will take a break of {rback3} seconds after {rsleep3} seconds")
time.sleep(rsleep3)
pyautogui.doubleClick(500,100)
time.sleep(rback3)
pyautogui.doubleClick(500,100)
print(datetime.datetime.now())
print("End of the break")
#
#
#Evening click
#Remember to add a zero to single decimal ints (or parse it properly to time format)
def click2():
global freeevening
global date2
global date3
global date4
launch = random.randint(0,2)
if launch == 0:
food=14
foodminuts=random.randint(30,59)
endfood=15
endfoodminuts=random.randint(21,59)
else:
food=random.randint(13,14)
foodminuts=random.randint(10,59)
endfood=random.randint(15,16)
endfoodminuts=random.randint(10,59)
date3 = f"{food}:{foodminuts}"
date4 = f"{endfood}:{endfoodminuts}"
if freeevening:
print(datetime.datetime.now())
print ("Sleeping early without click")
else:
print(datetime.datetime.now())
print("Click and Sleep")
rdelay=random.randint(1,2200)
time.sleep(rdelay)
pyautogui.doubleClick(500,100)
print(f"Next click at {date} and {date2}")
print(f"Active from {date} to {date2}")
print(f"Launch from {date3} to {date4}")
schedule.every().day.at(date).do(click)
schedule.every().day.at(date2).do(click2)
schedule.every().day.at(date3).do(click3)
schedule.every().day.at(date4).do(click4)
while True:
schedule.run_pending()
time.sleep(1)
| jbernax/autoclicktimer | autoclick.py | autoclick.py | py | 5,721 | python | en | code | 0 | github-code | 36 |
72655514343 | import copy
import json
import os
import datetime
from json import dumps
import logging
import uuid
import tweepy
from flask import Flask, render_template, url_for, request, send_from_directory
from flask_pymongo import PyMongo
import folium
from geopy.exc import GeocoderTimedOut
from geopy.geocoders import Nominatim
import pymongo
from flask import Markup
from bson.objectid import ObjectId
from werkzeug.utils import redirect
from dotenv import load_dotenv
from dendritic_cell_algorithm.signal_generator import Signals, remove_urls, remove_user_mentions
from vaderSentiment.vaderSentiment import SentimentIntensityAnalyzer
from python_kafka.SignalGenerator import startSignalGenerator
from python_kafka.TweetsLoader import startTweetsLoader
from python_kafka.TweetsLoaderWithParameters import startTweetsLoaderWithParameters
from python_kafka.BotDetector import startBotDetector
import multiprocessing
from confluent_kafka import Producer
load_dotenv()
logging.getLogger().setLevel(logging.INFO)
app = Flask(__name__, template_folder='frontend')
app.static_folder = 'frontend/static'
if int(os.environ['USE_DATABASE_SERVICE']):
print("use db service")
client = pymongo.MongoClient(os.environ['DATABASE_SERVICE'], int(os.environ['DATABASE_PORT']),
username=os.environ['DATABASE_USERNAME'],
password=os.environ['DATABASE_PASSWORD'])
else:
print("don't use db service")
client = pymongo.MongoClient(os.environ['DATABASE_URL'])
try:
db = client["TwitterData"]
col = db["Users1"]
except AttributeError as error:
print(error)
@app.route(os.environ['MS_SG_URL_PATH'] + "generate-signals", methods=['post', 'get'])
def generate_signals():
if request.method == 'POST':
producer_servers = request.form.get("producer_servers")
producer_topic = request.form.get("producer_topic")
consumer_servers = request.form.get("consumer_servers")
consumer_group_id = request.form.get("consumer_group_id")
consumer_offset = request.form.get("consumer_offset")
consumer_topic = request.form.get("consumer_topic")
consumer_key = request.form.get("consumer_key")
consumer_secret = request.form.get("consumer_secret")
access_token = request.form.get("access_token")
access_token_secret = request.form.get("access_token_secret")
bearer = request.form.get("bearer")
use_bearer = int(os.environ['USE_BEARER'])
if bearer is None:
use_bearer = False
if use_bearer:
print("use_bearer")
p2 = multiprocessing.Process(name='p2', target=startSignalGenerator, args=(
consumer_servers, consumer_group_id, consumer_offset, consumer_topic, producer_servers,
producer_topic,
None, None, None, None, bearer,))
else:
print("don't use_bearer")
p2 = multiprocessing.Process(name='p2', target=startSignalGenerator, args=(
consumer_servers, consumer_group_id, consumer_offset, consumer_topic, producer_servers,
producer_topic,
consumer_key, consumer_secret, access_token, access_token_secret, None,))
p2.start()
return "OK"
@app.route(os.environ['MS_SG_URL_PATH'] + "use-new-env-vars", methods=['post', 'get'])
def use_new_env_vars():
if request.method == 'POST':
col1 = db["ApplicationStatus"]
main_parameters = col1.find_one({"name": "MainValues"})
dca_coefficients = col1.find_one(
{"name": "DCACoefficients", "version": main_parameters["coefficients_collection_id"]})
for attr in list(dca_coefficients["coefficients"].keys()):
os.environ[attr] = str(dca_coefficients["coefficients"][attr])
return "SignalGenerator: Ok, DCACoefficients version " + main_parameters["coefficients_collection_id"]
else:
return 404
if __name__ == "__main__":
# app.run()
app.run(host='0.0.0.0')
| rwth-acis/bot-detector | web_application/ms_signal_generator.py | ms_signal_generator.py | py | 4,034 | python | en | code | 3 | github-code | 36 |
6786801031 | import unittest
from local import EXOLEVER_HOST
import requests
class ChatUserTest(unittest.TestCase):
def do_login(self):
url = '/api/accounts/login/'
prefix = ''
url = EXOLEVER_HOST + prefix + url
data = {
'username': 'gorkaarrizabalaga@example.com',
'password': '.eeepdExO'
}
return requests.post(url, data)
def get_messages(self, token, user_to=None):
url = '/api/conversations/'
prefix = '/conversations'
url = EXOLEVER_HOST + prefix + url
headers = {'Authorization': token}
params = {}
if user_to:
params['user_to'] = user_to
return requests.get(url, params=params, headers=headers)
def get_user_detail(self, token, slug):
url = '/api/profile-public/{}/'.format(slug)
prefix = ''
url = EXOLEVER_HOST + prefix + url
headers = {'Authorization': token}
return requests.get(url, headers=headers)
def get_token(self):
login_data = self.do_login()
user = login_data.json().get('token')
token = 'Bearer ' + user
return token
def test_start_conversation(self):
token = self.get_token()
response = self.get_user_detail(token, 'naina-lavrova')
self.assertEqual(response.status_code, 200)
user_pk = response.json().get('pk')
url = EXOLEVER_HOST + '/api/profile/{}/start-conversation/'.format(user_pk)
data = {'message': 'hello', 'files': []}
response = requests.post(url, data=data, headers={'Authorization': token})
self.assertEqual(response.status_code, 201)
response = self.get_messages(token)
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json(), 1))
response = self.get_messages(token, user_to=response.json().get('uuid'))
self.assertEqual(response.status_code, 200)
self.assertEqual(len(response.json(), 1))
| tomasgarzon/exo-services | service-exo-broker/tests/test_chat_user.py | test_chat_user.py | py | 1,990 | python | en | code | 0 | github-code | 36 |
14114861210 | import sys
bead_N, edge = map(int, sys.stdin.readline().strip().split())
heavy_bead_list = [[] for _ in range(bead_N + 1)]
light_bead_list = [[] for _ in range(bead_N + 1)]
for _ in range(edge):
heavy, light = map(int, sys.stdin.readline().strip().split())
heavy_bead_list[light].append(heavy)
light_bead_list[heavy].append(light)
def dfs(node, list):
global visited
global check
visited[node] = 1
for bead in list[node]:
if visited[bead] == 0:
check += 1
dfs(bead, list)
count = 0
md = (bead_N + 1) / 2
for i in range(1, bead_N + 1):
visited = [0] * (bead_N + 1)
check = 0
dfs(i, heavy_bead_list)
if (check >= md):
count += 1
check = 0
dfs(i, light_bead_list)
if check >= md:
count += 1
print(count) | nashs789/JGAlgo | Week02/Q2617/Jisung.py | Jisung.py | py | 814 | python | en | code | 2 | github-code | 36 |
29449039491 | """
Script name: 03_count_language_editions_at_point_in_time.py
Purpose of script: count language editions per month and year
Dependencies: 02_get_language_edition_history_wikidata.py
Author: Alexandra Rottenkolber
"""
import pandas as pd
# read in data
creation_date_df = pd.read_csv("./data_analysis/01_data/Wikipedia/output/history_lang_eds_df_complete.csv").drop(columns = ["Unnamed: 0"])
# preparation to count language editions across time
years = list((creation_date_df["creation_year_lang"]).unique())
months = list((creation_date_df["creation_month_lang"]).unique())
days = list((creation_date_df["creation_day_lang"]).unique())
IDs = list((creation_date_df["wikidataid"]).unique())
ID_ls = []
years_ls = []
no_lan_eds_ls = []
languages_ls = []
for ID in IDs:
years = list(creation_date_df[creation_date_df["wikidataid"] == ID]["creation_year_lang"])
for year in years:
no_lang_eds = len(
creation_date_df[(creation_date_df["wikidataid"] == ID) & (creation_date_df["creation_year_lang"] <= year)])
languages = list(
creation_date_df[(creation_date_df["wikidataid"] == ID) & (creation_date_df["creation_year_lang"] <= year)][
"language"].unique())
ID_ls.append(ID)
years_ls.append(year)
no_lan_eds_ls.append(no_lang_eds)
languages_ls.append(languages)
res_df = pd.DataFrame()
res_df["wikidataid"] = ID_ls
res_df["year"] = years_ls
res_df["no_lang_eds"] = no_lan_eds_ls
res_df["languages"] = languages_ls
per_year = creation_date_df.groupby(by = ["wikidataid", "creation_year_lang"]).nunique().drop(columns = ["creation_month_lang", "creation_day_lang", "wikititle", "creation_date_lang"]).reset_index()
per_year = per_year.rename(columns = {"language" : "no_languages_created", "creation_year_lang" : "year"})
per_year.head(2)
years = sorted(
[2002, 2003, 2004, 2005, 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013, 2014, 2015, 2016, 2017, 2018, 2019, 2020,
2021])
WIKIDATAID = []
YEAR = []
NO_LANG_CREATED = []
CUM_SUM_LANG = []
for ID in set(per_year["wikidataid"]):
years_ID = sorted(per_year["year"][per_year["wikidataid"] == ID])
for year in years:
if year in years_ID:
WIKIDATAID.append(ID)
YEAR.append(year)
NO_LANG_CREATED.append(
list(per_year["no_languages_created"][(per_year["wikidataid"] == ID) & (per_year["year"] == year)])[0])
CUM_SUM_LANG.append(
list(per_year["cum_sum_no_languages"][(per_year["wikidataid"] == ID) & (per_year["year"] == year)])[0])
elif year not in years_ID:
temp_years = sorted(per_year["year"][per_year["wikidataid"] == ID])
temp_years.append(year)
temp_years = sorted(temp_years)
idx_ = temp_years.index(year)
if idx_ >= 1:
cum_sum_lang = list(per_year["cum_sum_no_languages"][
(per_year["wikidataid"] == ID) & (per_year["year"] == temp_years[idx_ - 1])])[0]
else:
cum_sum_lang = 0
WIKIDATAID.append(ID)
YEAR.append(year)
NO_LANG_CREATED.append(0)
CUM_SUM_LANG.append(cum_sum_lang)
else:
print(ID, year)
full_results_per_year = pd.DataFrame()
full_results_per_year["wikidataid"] = WIKIDATAID
full_results_per_year["year"] = YEAR
full_results_per_year["no_languages_created"] = NO_LANG_CREATED
full_results_per_year["cum_sum_no_languages"] = CUM_SUM_LANG
full_results_per_year.to_csv("./data_analysis/01_data/Wikipedia/output/history_nolanguages_counted_all_years.csv")
| AlexandraRoko/Discourse_openess_and_pol_elites | 2_Wikipedia/03_pull_historical_Wiki_data/03_count_language_editions_at_point_in_time.py | 03_count_language_editions_at_point_in_time.py | py | 3,658 | python | en | code | 0 | github-code | 36 |
74274579943 | import pytest
import ruleset
import util
import os
def get_testdata(rulesets):
"""
In order to do test-level parametrization (is this a word?), we have to
bundle the test data from rulesets into tuples so py.test can understand
how to run tests across the whole suite of rulesets
"""
testdata = []
for ruleset in rulesets:
for test in ruleset.tests:
if test.enabled:
testdata.append((ruleset, test))
return testdata
def test_id(val):
"""
Dynamically names tests, useful for when we are running dozens to hundreds
of tests
"""
if isinstance(val, (dict,ruleset.Test,)):
# We must be carful here because errors are swallowed and defaults returned
if 'name' in val.ruleset_meta.keys():
return '%s -- %s' % (val.ruleset_meta['name'], val.test_title)
else:
return '%s -- %s' % ("Unnamed_Test", val.test_title)
@pytest.fixture
def destaddr(request):
"""
Destination address override for tests
"""
return request.config.getoption('--destaddr')
@pytest.fixture
def port(request):
"""
Destination port override for tests
"""
return request.config.getoption('--port')
@pytest.fixture
def protocol(request):
"""
Destination protocol override for tests
"""
return request.config.getoption('--protocol')
@pytest.fixture
def http_serv_obj():
"""
Return an HTTP object listening on localhost port 80 for testing
"""
return HTTPServer(('localhost', 80), SimpleHTTPRequestHandler)
@pytest.fixture
def with_journal(request):
"""
Return full path of the testing journal
"""
return request.config.getoption('--with-journal')
@pytest.fixture
def tablename(request):
"""
Set table name for journaling
"""
return request.config.getoption('--tablename')
def pytest_addoption(parser):
"""
Adds command line options to py.test
"""
parser.addoption('--ruledir', action='store', default=None,
help='rule directory that holds YAML files for testing')
parser.addoption('--destaddr', action='store', default=None,
help='destination address to direct tests towards')
parser.addoption('--rule', action='store', default=None,
help='fully qualified path to one rule')
parser.addoption('--ruledir_recurse', action='store', default=None,
help='walk the directory structure finding YAML files')
parser.addoption('--with-journal', action='store', default=None,
help='pass in a journal database file to test')
parser.addoption('--tablename', action='store', default=None,
help='pass in a tablename to parse journal results')
parser.addoption('--port', action='store', default=None,
help='destination port to direct tests towards', choices=range(1,65536),
type=int)
parser.addoption('--protocol', action='store',default=None,
help='destination protocol to direct tests towards', choices=['http','https'])
def pytest_generate_tests(metafunc):
"""
Pre-test configurations, mostly used for parametrization
"""
options = ['ruledir','ruledir_recurse','rule']
args = metafunc.config.option.__dict__
# Check if we have any arguments by creating a list of supplied args we want
if [i for i in options if i in args and args[i] != None] :
if metafunc.config.option.ruledir:
rulesets = util.get_rulesets(metafunc.config.option.ruledir, False)
if metafunc.config.option.ruledir_recurse:
rulesets = util.get_rulesets(metafunc.config.option.ruledir_recurse, True)
if metafunc.config.option.rule:
rulesets = util.get_rulesets(metafunc.config.option.rule, False)
if 'ruleset' in metafunc.fixturenames and 'test' in metafunc.fixturenames:
metafunc.parametrize('ruleset,test', get_testdata(rulesets),
ids=test_id)
| fastly/ftw | ftw/pytest_plugin.py | pytest_plugin.py | py | 3,940 | python | en | code | 263 | github-code | 36 |
39076799665 | from string import ascii_lowercase
class Node:
def __init__(self, val, parents = []):
self.val = val
self.parents = parents
def __str__(self):
return self.val
from collections import deque
from string import ascii_lowercase
class Solution:
def findLadders(self, beginWord: str, endWord: str, wordList: List[str]) -> List[List[str]]:
if not endWord in wordList:
return []
root = Node(beginWord)
dq = deque()
dq.append((root, 0))
wl = set(wordList)
endNode = None
pre_level = -1
add = {}
while len(dq):
(head, level) = dq.popleft()
if level != pre_level:
for _, value in add.items():
wl.remove(value.val)
add = {}
pre_level = level
if self.diffByOne(head.val, endWord):
endNode = Node(endWord, [head])
while len(dq) and dq[0][1] == level:
(head1, level1) = dq.popleft()
if self.diffByOne(endWord, head1.val):
endNode.parents += [head1]
break
for i in range(len(head.val)):
for s in ascii_lowercase:
newWord = head.val[:i] + s + head.val[1 + i:]
if newWord in wl:
if not newWord in add:
add[newWord] = Node(newWord,[head])
dq.append((add[newWord], level + 1))
else:
add[newWord].parents += [head]
if endNode:
return self.formLists(endNode)
else:
return []
def formLists(self, endNode):
if len(endNode.parents) == 0:
return [[endNode.val]]
ret = []
for p in endNode.parents:
l = self.formLists(p)
ret += [x + [endNode.val] for x in l]
return ret
def diffByOne(self, s1, s2):
if len(s1) != len(s2): return False
diff = 0
for i in range(len(s1)):
if s1[i] != s2[i]:
diff += 1
return diff == 1 | YuxiLiuAsana/LeetCodeSolution | q0126.py | q0126.py | py | 2,196 | python | en | code | 0 | github-code | 36 |
26606758529 | import sys
t= int(input())
for _ in range(t):
data=[]
count=1
n=int(input())
for i in range(n):
a,b=map(int,sys.stdin.readline().split())
data.append((a,b))
data.sort(key=lambda x: x[0])
min_data=data[0][1]
for i in data[1:]:
if min_data>i[1]:
count+=1
min_data=i[1]
print(count) | realme1st/Algorithm-study | Baekjoon/그리디/신입사원 (1946).py | 신입사원 (1946).py | py | 367 | python | en | code | 0 | github-code | 36 |
42867652572 | from utils import read_input
def age_and_spawn_the_fish(fishes):
baby_age = 8
spawns = determine_num_spawns(fishes)
for i, fish in enumerate(fishes):
fishes[i] = calc_next_age(fish)
for i in range(0, spawns):
fishes.append(baby_age)
return fishes
def calc_next_age(fish):
spawn_time = 6
if fish > 0:
return fish - 1
else:
return spawn_time
def determine_num_spawns(fishes):
return len([i for i in fishes if i == 0])
def p2_spawn_and_age_the_fish(fishes_dict):
new_counts = {}
new_counts[0] = fishes_dict[1]
new_counts[1] = fishes_dict[2]
new_counts[2] = fishes_dict[3]
new_counts[3] = fishes_dict[4]
new_counts[4] = fishes_dict[5]
new_counts[5] = fishes_dict[6]
new_counts[6] = fishes_dict[7] + fishes_dict[0]
new_counts[7] = fishes_dict[8]
new_counts[8] = fishes_dict[0]
return new_counts
if __name__ == "__main__":
fishes = [int(i) for i in read_input("day6_input.txt")[0].split(",")]
days = 256
# PART 1
# for i in range(0, days):
# print(f"DAY {i}")
# fishes = age_and_spawn_the_fish(fishes)
#
# print(f"FINAL: {len(fishes)}")
# PART 2
fishes_dict = {}
for i in range(0, 9):
fishes_dict[i] = len([f for f in fishes if f == i])
for i in range(0, days):
fishes_dict = p2_spawn_and_age_the_fish(fishes_dict)
print(f"PART 2: {sum([fishes_dict[i] for i in fishes_dict])}")
| tthompson691/AdventOfCode | src/2021/Day6/day6_solution.py | day6_solution.py | py | 1,479 | python | en | code | 2 | github-code | 36 |
6750580086 | # -*- coding: utf-8 -*-
from PyQt5.QtWidgets import QDialog, QTreeWidgetItem, QMenu
from PyQt5.QtCore import pyqtSlot, QPoint
from labrecord.controllers.labrecordscontroller import LabrecordsController
from labrecord.modules.editobservationmodule import EditObservationModule
from labrecord.modules.checkreportmodule import CheckreportModule
from labrecord.views.editsamplerecorddetail import Ui_Dialog
from labrecord.modules.applycheckmodule import ApplycheckModule
from product.controllers.productcontroller import ProductController
import decimal
import user
class EditSampleRecordDetailModule(QDialog, Ui_Dialog):
def __init__(self, autoid, parent=None):
super(EditSampleRecordDetailModule, self).__init__(parent)
self.setupUi(self)
if '50' not in user.powers:
self.close()
if user.powers['10'] == 0:
self.close()
self.power = '{:03b}'.format(user.powers['10'])
if self.power[1] == '0':
self.pushButton_accept.setVisible(False)
self.pushButton_cancel.setVisible(False)
self.autoid = autoid
self.checkitem_id = None
self.ori_detail = object
self.new_detail = {}
self.lr_list = []
self.LC = LabrecordsController()
self.PC = ProductController()
self.get_detail()
self.get_observation_record()
self.get_labrecord_list()
def get_detail(self):
condition = {'autoid': self.autoid}
res = self.LC.get_data(6, False, **condition)
if len(res) != 1:
self.pushButton_accept.setEnabled(False)
self.pushButton_cancel.setEnabled(False)
return
self.ori_detail = res[0]
self.lineEdit_product.setText(
self.ori_detail.ppid.prodid + ' ' + self.ori_detail.ppid.prodname
)
self.lineEdit_commonname.setText(self.ori_detail.ppid.commonname)
self.lineEdit_batchno.setText(self.ori_detail.ppid.batchno)
self.lineEdit_spec.setText(self.ori_detail.ppid.spec)
self.lineEdit_package.setText(self.ori_detail.ppid.package)
self.lineEdit_makedate.setText(str(self.ori_detail.ppid.makedate))
self.lineEdit_samplequantity.setText(str(self.ori_detail.samplequantity))
self.lineEdit_unit.setText(self.ori_detail.unit)
self.comboBox_kind.setCurrentIndex(self.ori_detail.kind)
if self.ori_detail.status != 0:
self.pushButton_accept.setEnabled(False)
self.pushButton_cancel.setEnabled(False)
def get_observation_record(self):
self.treeWidget_observation.clear()
condition = {'srid': self.autoid}
res = self.LC.get_data(7, False, **condition)
if not len(res):
return
lrid_list = res.values_list(*VALUES_TUPLE_LRID, flat=True)
condition_lr={'ciid__in': lrid_list, 'labtype':6}
self.lr_list = self.LC.get_data(
0, False, *VALUES_TUPLE_LR, **(condition_lr)
)
for item in res.values(*VALUES_TUPLE_OB):
qtreeitem = QTreeWidgetItem(self.treeWidget_observation)
qtreeitem.setText(0, str(item['autoid']))
qtreeitem.setText(2, item['obsperiod'])
qtreeitem.setText(3, str(item['obsdate']))
qtreeitem.setText(4, str(item['samplequantity']))
qtreeitem.setText(5, item['unit'])
qtreeitem.setText(6, item['conclusion'])
for it in self.lr_list:
if it['ciid'] == item['autoid']:
qtreeitem.setText(1, str(it['autoid']))
qtreeitem.setText(7, STATUS[it['status']])
break
for i in range(2, 8):
self.treeWidget_observation.resizeColumnToContents(i)
def get_labrecord_list(self):
self.treeWidget_labrecord.clear()
if self.ori_detail is None:
return
for item in self.lr_list:
qtreeitem = QTreeWidgetItem(self.treeWidget_labrecord)
qtreeitem.setText(0, str(item['autoid']))
qtreeitem.setText(1, item['paperno'])
qtreeitem.setText(2, str(item['reportdate']))
qtreeitem.setText(3, STATUS[item['status']])
@pyqtSlot(QPoint)
def on_treeWidget_observation_customContextMenuRequested(self, pos):
if self.ori_detail is None:
return
if self.ori_detail.status != 0:
return
current_item = self.treeWidget_observation.currentItem()
menu = QMenu()
action_1 = menu.addAction("增加")
action_2 = menu.addAction("修改")
action_3 = menu.addAction("删除")
menu.addSeparator()
action_4 = menu.addAction("提交请验")
action_5 = menu.addAction("取消请验")
global_pos = self.treeWidget_observation.mapToGlobal(pos)
action = menu.exec(global_pos)
if action == action_1:
unit = self.lineEdit_unit.text()
detail = EditObservationModule(
srid=self.autoid, unit=unit, parent=self
)
detail.accepted.connect(self.get_observation_record)
detail.show()
elif action == action_2:
if current_item is None:
return
id = int(current_item.text(0))
detail = EditObservationModule(autoid=id, parent=self)
detail.accepted.connect(self.get_observation_record)
detail.show()
elif action == action_3:
if current_item is None:
return
condition = {'autoid': int(current_item.text(0))}
self.LC.delete_data(7, condition)
lrid = current_item.text(1)
if lrid != '':
self.LC.delete_labrecord_and_detail(int(lrid))
self.get_observation_record()
elif action == action_4:
if self.ori_detail is None or current_item is None:
return
if current_item.text(1) == '':
if self.checkitem_id is None:
prodid = self.ori_detail.ppid.prodid
condition = {'prodid': prodid}
res = self.PC.get_data(1, True, *VALUES_TUPLE_PD, **condition)
if not len(res):
return
self.checkitem_id = res[0]
kwargs = {
'labtype': 6,
'chkid': self.ori_detail.ppid.prodid,
'chkname': self.ori_detail.ppid.prodname,
'batchno': self.ori_detail.ppid.batchno,
'spec': self.ori_detail.ppid.spec,
'package': self.ori_detail.ppid.package,
'ciid': int(current_item.text(0)),
'createdate': user.now_date,
'checkamount': self.ori_detail.samplequantity,
'caunit': self.ori_detail.unit,
'sampleamount': decimal.Decimal(current_item.text(4)),
'sampleunit': current_item.text(5),
}
lrid = self.LC.create_labrecord(
self.checkitem_id, 6, user.now_date, **kwargs
)
else:
lrid = int(current_item.text(1))
detail = ApplycheckModule(lrid, 6, self)
detail.rejected.connect(lambda: self.delete_check_report(lrid))
detail.applyed.connect(detail.accept)
detail.accepted.connect(self.get_observation_record)
detail.accepted.connect(self.get_labrecord_list)
detail.show()
elif action == action_5:
if current_item is None:
return
lrid = current_item.text(1)
if lrid != '':
self.LC.delete_labrecord(int(lrid))
self.get_observation_record()
else:
pass
def delete_check_report(self, lrid):
self.LC.delete_labrecord(lrid)
self.get_observation_record()
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_observation_itemDoubleClicked(self, qtreeitem, p_int):
id = int(qtreeitem.text(0))
detail = EditObservationModule(autoid=id, parent=self)
detail.accepted.connect(self.get_observation_record)
detail.show()
@pyqtSlot(QTreeWidgetItem, int)
def on_treeWidget_labrecord_itemDoubleClicked(self, qtreeitem, p_int):
if self.power[1] == '0':
return
id = int(qtreeitem.text(0))
detail = CheckreportModule(id, True, self)
detail.show()
@pyqtSlot(str)
def on_lineEdit_samplequantity_textChanged(self, p_str):
p_data = decimal.Decimal(p_str)
try:
if p_data != self.ori_detail.samplequantity:
self.new_detail['samplequantity'] = p_data
else:
try:
del self.new_detail['samplequantity']
except KeyError:
pass
except ValueError:
self.new_detail['samplequantity'] = p_data
@pyqtSlot(str)
def on_lineEdit_unit_textChanged(self, p_str):
try:
if p_str != self.ori_detail.unit:
self.new_detail['unit'] = p_str
else:
try:
del self.new_detail['unit']
except KeyError:
pass
except ValueError:
self.new_detail['unit'] = p_str
@pyqtSlot(int)
def on_comboBox_kind_currentIndexChanged(self, p_int):
try:
if p_int != self.ori_detail.kind:
self.new_detail['kind'] = p_int
else:
try:
del self.new_detail['kind']
except KeyError:
pass
except ValueError:
self.new_detail['kind'] = p_int
@pyqtSlot()
def on_pushButton_accept_clicked(self):
if not len(self.new_detail):
return
condiition = {'autoid': self.autoid}
self.LC.update_data(6, condiition, **self.new_detail)
self.accept()
@pyqtSlot()
def on_pushButton_cancel_clicked(self):
self.close()
STATUS = ("待请验", "取样中", "检验中", "合格", "不合格")
VALUES_TUPLE_LRID = ('autoid',)
VALUES_TUPLE_OB = (
'autoid', 'obsperiod', 'obsdate', 'samplequantity', 'unit', 'conclusion'
)
VALUES_TUPLE_LR = ('autoid', 'ciid', 'paperno', 'reportdate', 'status')
VALUES_TUPLE_PD = ('autoid', ) | zxcvbnmz0x/gmpsystem | labrecord/modules/editsamplerecorddetailmodule.py | editsamplerecorddetailmodule.py | py | 10,508 | python | en | code | 0 | github-code | 36 |
18701687808 | import numpy as np
from numpy import linalg as LA
from keras.applications.vgg16 import VGG16
from keras.preprocessing import image
from keras.applications.vgg16 import preprocess_input
from PIL import Image
from cv2 import imread,resize,cvtColor,COLOR_BGR2RGB,INTER_AREA,imshow
'''
VGG16模型,权重由ImageNet训练而来
使用vgg16模型提取特征
输出归一化特征向量
'''
class feature():
"""
Feature extraction class
"""
def __init__(self):
pass
def extract_feat(self,img_path):
input_shape = (272, 480, 3)
model = VGG16(input_shape = (input_shape[0],input_shape[1],input_shape[2]), pooling = 'max', include_top = False)
_img = imread (img_path)
res = resize (_img, (input_shape[1], input_shape[0]), interpolation=INTER_AREA)
img = Image.fromarray (cvtColor (res, COLOR_BGR2RGB))
img = image.img_to_array(img)
img = np.expand_dims(img, axis=0)
img = preprocess_input(img)
feat = model.predict(img)
norm_feat = feat[0]/LA.norm(feat[0])
return norm_feat
if __name__ == '__main__':
print("local run .....")
# models = VGG16 (weights='imagenet', pooling = 'max', include_top=False)
# img_path = './database/001_accordion_image_0001.jpg'
# img = image.load_img (img_path, target_size=(224, 224))
# x = image.img_to_array (img)
# x = np.expand_dims (x, axis=0)
# x = preprocess_input (x)
# features = models.predict (x)
# norm_feat = features[0]/LA.norm(features[0])
# feats = np.array(norm_feat)
# print(norm_feat.shape)
# print(feats.shape)
img_path = "H:/datasets/testingset/19700102125648863.JPEG"
f = feature()
norm_feat = f.extract_feat(img_path)
print(norm_feat)
print(norm_feat.shape)
| 935048000/ImageSearch | core/feature_extraction.py | feature_extraction.py | py | 1,808 | python | en | code | 1 | github-code | 36 |
1054566885 | # -*- coding = utf-8 -*-
# @Time : 2021/5/5 18:25
# @Author : 水神与月神
# @File : 灰度转彩色.py
# @Software : PyCharm
import cv2 as cv
import numpy as np
import os
import mypackage.dip_function as df
# demo
# path = r"C:\Users\dell\Desktop\8.png"
#
# image = cv.imread(path, cv.IMREAD_UNCHANGED)
#
# image1 = image[:, :, 0]
# image2 = image[:, :, 1]
# image3 = image[:, :, 2]
#
# gray = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
# gray2 = cv.bitwise_not(gray)
# image4 = image3 - image2
# im_color = cv.applyColorMap(gray2, cv.COLORMAP_PARULA)
# # COLORMAP_SUMMER
# # COLORMAP_RAINBOW
# # COLORMAP_PARULA
# cv.imshow("images", im_color)
# cv.waitKey()
# cv.destroyWindow('images')
path = r'G:\colour_img'
save = r'G:\colour_img\processed'
folders = os.listdir(path)
for folder in folders[0:-1]:
path_read = os.path.join(path, folder)
path_save = os.path.join(save, folder)
paths = df.get_path(path_read, path_save)
for p in paths:
image = cv.imread(p[0], cv.IMREAD_UNCHANGED)
gray = cv.equalizeHist(image)
colour = cv.applyColorMap(gray, cv.COLORMAP_JET)
r = colour[:, :, 2]
shaper = r.shape
for i in range(shaper[0]):
for j in range(shaper[1]):
if r[i][j] - 150 < 0:
r[i][j] = 70
else:
r[i][j] = r[i][j] - 150
temp = r
colour[:, :, 2] = temp
g = colour[:, :, 1]
shapeg = g.shape
for i in range(shapeg[0]):
if i < 600:
for j in range(shapeg[1]):
if g[i][j] - 100 < 70:
g[i][j] = 70
else:
g[i][j] = g[i][j] - 100
else:
for j in range(shapeg[1]):
if g[i][j] > 160:
g[i][j] = 160
# elif g[i][j] < 70:
# g[i][j] = 70
temp = g
colour[:, :, 1] = temp
cv.imwrite(p[1], colour)
print("保存成功")
| mdwalu/previous | 数字图像处理/灰度转彩色.py | 灰度转彩色.py | py | 2,065 | python | en | code | 1 | github-code | 36 |
70167417064 | from mongoengine import Q
from django_pds.conf import settings
from django_pds.core.managers import UserReadableDataManager, GenericReadManager, UserRoleMapsManager
from django_pds.core.rest.response import error_response, success_response_with_total_records
from django_pds.core.utils import get_fields, get_document, is_abstract_document
from django_pds.core.utils import print_traceback
from django_pds.serializers import GenericSerializerAlpha
from ..parser.query import QueryParser
from ..parser.terms import FILTER, WHERE, SELECT, PAGE_SIZE, PAGE_NUM, ORDER_BY, RAW_WHERE
NOT_SELECTABLE_ENTITIES_BY_PDS = settings.SELECT_NOT_ALLOWED_ENTITIES
SECURITY_ATTRIBUTES = settings.SECURITY_ATTRIBUTES
def basic_data_read(document_name, fields='__all__',
page_size=10, page_num=1, order_by=None,
include_security_fields=False,
error_track=False):
try:
document = get_document(document_name)
if not document or not document_name:
return True, error_response(f'document by name `{document_name}` doesn\'t exists')
if fields != '__all__' and not isinstance(fields, (list, tuple)):
return True, error_response('fields must be a list or tuple')
sql_ctrl = GenericReadManager()
data, cnt = sql_ctrl.read(document_name, Q(), page_size, page_num, order_by)
if cnt == 0:
return False, success_response_with_total_records([], cnt)
gsa = GenericSerializerAlpha(document_name=document_name)
if not fields == '__all__':
for field in fields:
gsa.select(field)
else:
fields = get_fields(document_name)
if not include_security_fields:
fields = tuple(set(fields) - set(SECURITY_ATTRIBUTES))
gsa.fields(fields)
json = gsa.serialize(data)
res = success_response_with_total_records(json.data, cnt)
return False, res
except BaseException as e:
if error_track:
print_traceback()
return True, error_response(str(e))
def data_read(
document_name, sql_text, user_id=None,
roles=None, checking_roles=True,
readable=True, security_attributes=True,
selectable=True, read_all=False, exclude_default=False,
page_number=1, _size=10, error_track=False):
"""
:param page_number:
:param _size:
:param checking_roles:
:param document_name:
:param sql_text:
:param user_id:
:param roles:
:param readable:
:param security_attributes:
:param selectable:
:param read_all:
:return:
"""
document = get_document(document_name)
# checking either model exists
# or entity exists in not selectable entities
if not document:
return False, error_response('document model not found')
if is_abstract_document(document_name):
return False, error_response('document model not found')
if selectable and document_name in NOT_SELECTABLE_ENTITIES_BY_PDS:
return False, error_response('document model is not selectable')
try:
parser = QueryParser(sql_text)
dictionary = parser.parse()
# filtering fields in where clause
_filters = []
if dictionary.get(FILTER, None):
_filters = dictionary[FILTER]
filter_fields = set(_filters)
document_fields = set(get_fields(document_name))
if len(filter_fields - document_fields) > 0:
return True, error_response('Where clause contains unknown attribute to this Document')
if security_attributes:
security_attr = set(SECURITY_ATTRIBUTES)
contains_security_attributes = filter_fields.intersection(security_attr)
if len(contains_security_attributes) > 0:
return True, error_response('Security attributes found in where clause')
# checking user readable data from database for this particular request
fields = ['ItemId']
if dictionary.get(SELECT, None):
fields = dictionary[SELECT]
if read_all:
fields = document_fields
urm_ctrl = UserRoleMapsManager()
if readable:
urds_ctrl = UserReadableDataManager()
__roles = None
if user_id and not roles:
__roles = urm_ctrl.get_user_roles(user_id)
err, _fields = urds_ctrl.get_user_readable_data_fields(document_name, __roles, exclude_default)
if err:
msg = f'Entity \'{document_name}\' is missing from user readable data\'s'
return True, error_response(msg)
diff = set(fields) - _fields # _fields are already a set
if len(diff) > 0:
return True, error_response("Select clause contains non readable attributes")
sql_ctrl = GenericReadManager()
__raw__where = dictionary.get(RAW_WHERE, {})
page_num = dictionary.get(PAGE_NUM, page_number)
page_size = dictionary.get(PAGE_SIZE, _size)
q = Q()
if dictionary.get(WHERE, None):
q = dictionary[WHERE]
# checking for row level permission starts
q2 = Q()
if user_id:
q2 = Q(IdsAllowedToRead=user_id)
if checking_roles:
if not roles and user_id:
roles = urm_ctrl.get_user_roles(user_id)
if roles and not isinstance(roles, (list, tuple)):
return True, error_response('roles must be a list or a tuple.')
for role in roles:
q2 = q2.__or__(Q(RolesAllowedToRead=role))
if user_id or (checking_roles and roles):
q = q.__and__(q2)
# checking for row level permission ends
order_by = []
if dictionary.get(ORDER_BY, None):
order_by = dictionary[ORDER_BY]
data, cnt = sql_ctrl.read(document_name, q, page_size, page_num, order_by)
if cnt == 0:
return False, success_response_with_total_records([], cnt)
gsa = GenericSerializerAlpha(document_name=document_name)
for field in fields:
gsa.select(field)
json = gsa.serialize(data)
res = success_response_with_total_records(json.data, cnt)
return False, res
except BaseException as e:
if error_track:
print_traceback()
return True, error_response(str(e))
| knroy/django-pds | django_pds/core/pds/generic/read.py | read.py | py | 6,463 | python | en | code | 3 | github-code | 36 |
34203743613 | import numpy as np
import torch
import torch.nn as nn
from pytorch_lightning.utilities.rank_zero import _get_rank
import models
from models.base import BaseModel
from models.utils import scale_anything, get_activation, cleanup, chunk_batch
from models.network_utils import get_encoding, get_mlp, get_encoding_with_network
class MarchingCubeHelper(nn.Module):
def __init__(self, resolution, use_torch=True):
super().__init__()
self.resolution = resolution
self.use_torch = use_torch
self.points_range = (0, 1)
if self.use_torch:
import torchmcubes
self.mc_func = torchmcubes.marching_cubes
else:
import mcubes
self.mc_func = mcubes.marching_cubes
self.verts = None
def grid_vertices(self):
if self.verts is None:
x, y, z = torch.linspace(*self.points_range, self.resolution), torch.linspace(*self.points_range, self.resolution), torch.linspace(*self.points_range, self.resolution)
x, y, z = torch.meshgrid(x, y, z)
verts = torch.cat([x.reshape(-1, 1), y.reshape(-1, 1), z.reshape(-1, 1)], dim=-1).reshape(-1, 3)
self.verts = verts.to(_get_rank())
return self.verts
def forward(self, level, threshold=0.):
level = level.float().view(self.resolution, self.resolution, self.resolution)
if self.use_torch:
verts, faces = self.mc_func(level.to(_get_rank()), threshold)
verts, faces = verts.cpu(), faces.cpu().long()
else:
verts, faces = self.mc_func(-level.numpy(), threshold) # transform to numpy
verts, faces = torch.from_numpy(verts.astype(np.float32)), torch.from_numpy(faces.astype(np.int64)) # transform back to pytorch
verts = verts / (self.resolution - 1.)
return {
'v_pos': verts,
't_pos_idx': faces
}
class BaseImplicitGeometry(BaseModel):
def __init__(self, config):
super().__init__(config)
if self.config.isosurface is not None:
assert self.config.isosurface.method in ['mc', 'mc-torch']
if self.config.isosurface.method == 'mc-torch':
raise NotImplementedError("Please do not use mc-torch. It currently has some scaling issues I haven't fixed yet.")
self.helper = MarchingCubeHelper(self.config.isosurface.resolution, use_torch=self.config.isosurface.method=='mc-torch')
def forward_level(self, points):
raise NotImplementedError
def isosurface_(self, vmin, vmax):
grid_verts = self.helper.grid_vertices()
grid_verts = torch.stack([
scale_anything(grid_verts[...,0], (0, 1), (vmin[0], vmax[0])),
scale_anything(grid_verts[...,1], (0, 1), (vmin[1], vmax[1])),
scale_anything(grid_verts[...,2], (0, 1), (vmin[2], vmax[2]))
], dim=-1)
def batch_func(x):
rv = self.forward_level(x).cpu()
cleanup()
return rv
level = chunk_batch(batch_func, self.config.isosurface.chunk, grid_verts)
mesh = self.helper(level, threshold=self.config.isosurface.threshold)
mesh['v_pos'] = torch.stack([
scale_anything(mesh['v_pos'][...,0], (0, 1), (vmin[0], vmax[0])),
scale_anything(mesh['v_pos'][...,1], (0, 1), (vmin[1], vmax[1])),
scale_anything(mesh['v_pos'][...,2], (0, 1), (vmin[2], vmax[2]))
], dim=-1)
return mesh
@torch.no_grad()
def isosurface(self):
if self.config.isosurface is None:
raise NotImplementedError
# coarse to fine extraction
# mesh_coarse = self.isosurface_((-self.radius, -self.radius, -self.radius), (self.radius, self.radius, self.radius))
# if mesh_coarse['v_pos'].shape[0] == 0:
# return mesh_coarse
# vmin, vmax = mesh_coarse['v_pos'].amin(dim=0), mesh_coarse['v_pos'].amax(dim=0)
# vmin_ = (vmin - (vmax - vmin) * 0.1).clamp(-self.radius, self.radius)
# vmax_ = (vmax + (vmax - vmin) * 0.1).clamp(-self.radius, self.radius)
# mesh_fine = self.isosurface_(vmin_, vmax_)
# extract in a fixed scale
# mesh_fine = self.isosurface_((-self.radius, -self.radius, -self.radius), (self.radius, self.radius, self.radius))
mesh_fine = self.isosurface_((-self.radius + 0.2, -self.radius+ 0.2, -self.radius+ 0.2), (self.radius - 0.2, self.radius - 0.2, self.radius - 0.2))
return mesh_fine
@models.register('volume-density')
class VolumeDensity(BaseImplicitGeometry):
def setup(self):
self.n_input_dims = self.config.get('n_input_dims', 3)
self.n_output_dims = self.config.feature_dim
self.encoding_with_network = get_encoding_with_network(self.n_input_dims, self.n_output_dims, self.config.xyz_encoding_config, self.config.mlp_network_config)
self.radius = self.config.radius
self.noises = 0.
self.raw_noise_std = self.config.get('raw_noise_std', 0.)
def forward(self, points):
points = scale_anything(points, (-self.radius, self.radius), (0, 1))
out = self.encoding_with_network(points.view(-1, self.n_input_dims)).view(*points.shape[:-1], self.n_output_dims).float()
density, feature = out[...,0], out
if 'density_activation' in self.config:
if self.raw_noise_std > 0.:
self.noises = (torch.randn(density.shape) * self.raw_noise_std).to(density)
density = get_activation(self.config.density_activation)(density + self.noises + float(self.config.density_bias))
if 'feature_activation' in self.config:
feature = get_activation(self.config.feature_activation)(feature)
return density, feature
def forward_level(self, points):
points = scale_anything(points, (-self.radius, self.radius), (0, 1))
density = self.encoding_with_network(points.reshape(-1, self.n_input_dims)).reshape(*points.shape[:-1], self.n_output_dims)[...,0].float()
if 'density_activation' in self.config:
density = get_activation(self.config.density_activation)(density + float(self.config.density_bias))
return -density # caution!!!
@torch.no_grad()
def extract_volume(self, res=128):
x = torch.linspace(0.02, 0.98, steps=res)
y = torch.linspace(0.02, 0.98, steps=res)
z = torch.linspace(0.02, 0.98, steps=res)
grid_x, grid_y, grid_z = torch.meshgrid(x, y, z, indexing='ij')
points = torch.cat((grid_x[..., None], grid_y[..., None], grid_z[..., None]), dim=3).to(self.rank) # (res, res, res, 3)
density = self.encoding_with_network(points.reshape(-1, self.n_input_dims)).reshape(*points.shape[:-1], self.n_output_dims)[...,0].float()
if 'density_activation' in self.config:
density = get_activation(self.config.density_activation)(density + float(self.config.density_bias))
return points, density
@models.register('volume-sdf')
class VolumeSDF(BaseImplicitGeometry):
def setup(self):
self.n_output_dims = self.config.feature_dim
encoding = get_encoding(3, self.config.xyz_encoding_config)
network = get_mlp(encoding.n_output_dims, self.n_output_dims, self.config.mlp_network_config)
self.encoding, self.network = encoding, network
self.radius = self.config.radius
self.grad_type = self.config.grad_type
# def forward(self, points, with_grad=True, with_feature=True):
# points = scale_anything(points, (-self.radius, self.radius), (0, 1))
# with torch.inference_mode(torch.is_inference_mode_enabled() and not (with_grad and self.grad_type == 'analytic')):
# with torch.set_grad_enabled(self.training or (with_grad and self.grad_type == 'analytic')):
# if with_grad and self.grad_type == 'analytic':
# if not self.training:
# points = points.clone() # points may be in inference mode, get a copy to enable grad
# points.requires_grad_(True)
# out = self.network(self.encoding(points.view(-1, 3))).view(*points.shape[:-1], self.n_output_dims).float()
# sdf, feature = out[...,0], out
# if 'sdf_activation' in self.config:
# sdf = get_activation(self.config.sdf_activation)(sdf + float(self.config.sdf_bias))
# if 'feature_activation' in self.config:
# feature = get_activation(self.config.feature_activation)(feature)
# if with_grad:
# if self.grad_type == 'analytic':
# grad = torch.autograd.grad(
# sdf, points, grad_outputs=torch.ones_like(sdf),
# create_graph=True, retain_graph=True, only_inputs=True
# )[0]
# elif self.grad_type == 'finite_difference':
# eps = 0.001
# points_d = torch.stack([
# points + torch.as_tensor([eps, 0.0, 0.0]).to(points),
# points + torch.as_tensor([-eps, 0.0, 0.0]).to(points),
# points + torch.as_tensor([0.0, eps, 0.0]).to(points),
# points + torch.as_tensor([0.0, -eps, 0.0]).to(points),
# points + torch.as_tensor([0.0, 0.0, eps]).to(points),
# points + torch.as_tensor([0.0, 0.0, -eps]).to(points)
# ], dim=0).clamp(0, 1)
# points_d_sdf = self.network(self.encoding(points_d.view(-1, 3)))[...,0].view(6, *points.shape[:-1]).float()
# grad = torch.stack([
# 0.5 * (points_d_sdf[0] - points_d_sdf[1]) / eps,
# 0.5 * (points_d_sdf[2] - points_d_sdf[3]) / eps,
# 0.5 * (points_d_sdf[4] - points_d_sdf[5]) / eps,
# ], dim=-1)
# rv = [sdf]
# if with_grad:
# rv.append(grad)
# if with_feature:
# rv.append(feature)
# rv = [v if self.training else v.detach() for v in rv]
# return rv[0] if len(rv) == 1 else rv
def forward(self, points, with_grad=True, with_feature=True):
with torch.inference_mode(torch.is_inference_mode_enabled() and not (with_grad and self.grad_type == 'analytic')):
with torch.set_grad_enabled(self.training or (with_grad and self.grad_type == 'analytic')):
if with_grad and self.grad_type == 'analytic':
if not self.training:
points = points.clone() # points may be in inference mode, get a copy to enable grad
points.requires_grad_(True)
points_ = points # points in the original scale
points = scale_anything(points_, (-self.radius, self.radius), (0, 1)) # points normalized to (0, 1)
out = self.network(self.encoding(points.view(-1, 3))).view(*points.shape[:-1], self.n_output_dims).float()
sdf, feature = out[...,0], out
if 'sdf_activation' in self.config:
sdf = get_activation(self.config.sdf_activation)(sdf + float(self.config.sdf_bias))
if 'feature_activation' in self.config:
feature = get_activation(self.config.feature_activation)(feature)
if with_grad:
if self.grad_type == 'analytic':
grad = torch.autograd.grad(
sdf, points_, grad_outputs=torch.ones_like(sdf),
create_graph=True, retain_graph=True, only_inputs=True
)[0]
elif self.grad_type == 'finite_difference':
eps = 0.001
points_d_ = torch.stack([
points_ + torch.as_tensor([eps, 0.0, 0.0]).to(points_),
points_ + torch.as_tensor([-eps, 0.0, 0.0]).to(points_),
points_ + torch.as_tensor([0.0, eps, 0.0]).to(points_),
points_ + torch.as_tensor([0.0, -eps, 0.0]).to(points_),
points_ + torch.as_tensor([0.0, 0.0, eps]).to(points_),
points_ + torch.as_tensor([0.0, 0.0, -eps]).to(points_)
], dim=0).clamp(0, 1)
points_d = scale_anything(points_d_, (-self.radius, self.radius), (0, 1))
points_d_sdf = self.network(self.encoding(points_d.view(-1, 3)))[...,0].view(6, *points.shape[:-1]).float()
grad = torch.stack([
0.5 * (points_d_sdf[0] - points_d_sdf[1]) / eps,
0.5 * (points_d_sdf[2] - points_d_sdf[3]) / eps,
0.5 * (points_d_sdf[4] - points_d_sdf[5]) / eps,
], dim=-1)
rv = [sdf]
if with_grad:
rv.append(grad)
if with_feature:
rv.append(feature)
rv = [v if self.training else v.detach() for v in rv]
return rv[0] if len(rv) == 1 else rv
def forward_level(self, points):
points = scale_anything(points, (-self.radius, self.radius), (0, 1))
sdf = self.network(self.encoding(points.view(-1, 3))).view(*points.shape[:-1], self.n_output_dims)[...,0].float()
if 'sdf_activation' in self.config:
sdf = get_activation(self.config.sdf_activation)(sdf + float(self.config.sdf_bias))
return sdf
| 3dlg-hcvc/paris | models/geometry.py | geometry.py | py | 13,820 | python | en | code | 31 | github-code | 36 |
3572388059 | ##
# @file mathlib.py
# @package mathlib
# @brief Module with functions to convert and evaluate expression using expression tree
import treeClass
import logging
# """Priorities of operators"""
priority = {
'!' : 3,
'^' : 2,
'*' : 1,
'/' : 1,
'%' : 1,
'+' : 0,
'-' : 0,
}
# """Associativity of operators"""
associativity = {
'!' : "unar",
'^' : "RL",
'*' : "LR",
'/' : "LR",
'%' : "LR",
'+' : "LR",
'-' : "LR",
}
##
# @brief Check if element is an operator
# @param element Element of expression to be checked
# @return True if element is an operator, False - otherwise
def is_operator(element):
return element in priority.keys()
##
# @brief Check if priority of the first is higher/lower/equal than priority of the second operator
# @param oper1 The first operator
# @param oper2 The second operator
# @return 1 if priority of oper1 is higher than the priority of oper2, -1 - if lower, 0 - if equal
def check_priority(oper1, oper2):
if priority[oper1] > priority[oper2]:
return 1
elif priority[oper1] < priority[oper2]:
return -1
else:
return 0
##
# @brief Convert infix expression into postfix expression
# @param infix_expr Given infix expression
def to_postfix(infix_expr):
oper_stack = treeClass.Stack()
postfix_expr = []
position = 0
while position < len(infix_expr):
# Adding operand to postfix
elem = infix_expr[position]
if is_operator(elem):
top_stack = oper_stack.top()
if top_stack == None or top_stack == '(': # Checking if stask was empty
oper_stack.push(elem)
position += 1
else:
if check_priority(elem, top_stack) == 1:
oper_stack.push(elem)
position += 1
elif check_priority(elem, top_stack) == 0:
if associativity[elem] == "LR":
postfix_expr.append(oper_stack.pop())
elif associativity[infix_expr[position]] == "RL":
oper_stack.push(elem)
position += 1
elif check_priority(elem, top_stack) == -1:
postfix_expr.append(oper_stack.pop())
elif elem == '(':
oper_stack.push(elem)
position += 1
elif elem == ')':
top_stack = oper_stack.top()
while top_stack != '(':
postfix_expr.append(oper_stack.pop())
top_stack = oper_stack.top()
oper_stack.pop()
position += 1
else:
operand = ''
temp_position = position
while temp_position < len(infix_expr) and (not is_operator(infix_expr[temp_position])
and infix_expr[temp_position] != ')'):
operand += infix_expr[temp_position]
logging.debug(f"{temp_position} : <{operand}>")
temp_position += 1
postfix_expr.append(operand)
position += (temp_position - position)
#!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!! neverending story
while oper_stack.size() != 0:
postfix_expr.append(oper_stack.pop())
return postfix_expr
##
# @brief Function to be called from side modules. Evaluates an input expression. Converting into postfix, building expression tree and evaluating it
# @param input_expr Given input expression
# @return Float value of evaluated expression
def evaluate(input_expr):
logging.debug(f"INPUT: <{input_expr}>")
postfix_expr = to_postfix(input_expr)
logging.debug(f"POSTFIX INPUT: <{postfix_expr}>")
equation_tree = treeClass.EqTree(list(postfix_expr))
result = equation_tree.evaluate_tree(equation_tree.root)
return result
if __name__ == "__main__":
logging.basicConfig(level=logging.DEBUG)
line = input()
print(evaluate(line))
| Hedgezi/jenna_calcutega | src/mathlib.py | mathlib.py | py | 4,055 | python | en | code | 0 | github-code | 36 |
14784031149 | # -*- coding: utf-8 -*-
#coding=utf-8
from AppiumTest import webdriver
import unittest
from time import sleep
desired_caps = {}
desired_caps['platformName'] = 'Android'
desired_caps['platformVersion'] = '4.4.4'
desired_caps['deviceName'] = 'Android Emulator'
desired_caps['appPackage'] = 'com.entstudy.enjoystudy'
desired_caps['appActivity'] = 'com.entstudy.enjoystudy.activity.SplashActivity'
# 启动实例
driver = webdriver.Remote('http://localhost:4723/wd/hub', desired_caps)
sleep(10)
print("kaishi")
# driver.swipe(300,600,300,300)
driver.find_element_by_name("圈子").click()
driver.find_element_by_name("积分商城").click()
# driver.find_element_by_name("").click()
# driver.find_element_by_name("").click()
# driver.find_element_by_name("").click()
sleep(10)
print("kaishi2")
# driver.tap()
c = driver.contexts
print(c)
print(c[1])
print(c[-1])
# driver.switch_to_window(c[1])
driver.switch_to.context(c[1])
# driver.context(c[1])
driver.swipe(300,600,300,300)
| flamecontrol/flamecontrol | Latent/script/testswipe.py | testswipe.py | py | 995 | python | en | code | 1 | github-code | 36 |
74518602024 | #import os
from typing import Union
import torch
import numpy as np
from . import torch_knn
gpu_available = torch_knn.check_for_gpu()
if not gpu_available:
print("The library was not successfully compiled using CUDA. Only the CPU version will be available.")
_transl_torch_device = {"cpu": "CPU", "cuda": "GPU"}
class TorchKDTree:
def __init__(self, points_ref : torch.Tensor, device : torch.device, levels : int, squared_distances : bool):
"""Builds the KDTree. See :ref:`build_kd_tree` for more details.
"""
assert(device.type in ['cpu', 'cuda'])
assert points_ref.shape[0] < 2**31, "Only 32 bit signed indexing implemented"
self.dtype = points_ref.dtype
self.dims = points_ref.shape[-1]
self.nr_ref_points = points_ref.shape[0]
kdtree_str = "KDTree" + _transl_torch_device[device.type] + "%dD" % (self.dims) + ("F" if self.dtype == torch.float32 else "")
try:
self.kdtree = getattr(torch_knn, kdtree_str)(points_ref.detach().cpu().numpy(), levels)
except AttributeError as err:
raise RuntimeError("Could not find the KD-Tree for your specified options. This probably means the library was not compiled for the specified dimensionality, precision or the targeted device. Original error:", err)
self.structured_points = torch.from_numpy(self.kdtree.get_structured_points())
self.shuffled_ind = torch.from_numpy(self.kdtree.get_shuffled_inds()).long()
self.use_gpu = use_gpu = (device.type == 'cuda')
self.device = device
self.dtype_idx = torch.int32 #Restriction in the compiled library
self.ref_requires_grad = points_ref.requires_grad
self.points_ref_bak = points_ref #.clone()
self.squared_distances = squared_distances
if self.use_gpu:
self.structured_points = self.structured_points.to(self.device)
self.shuffled_ind = self.shuffled_ind.to(self.device)
def _search_kd_tree_gpu(self, points_query, nr_nns_searches, result_dists, result_idx):
torch_knn.searchKDTreeGPU(points_query, nr_nns_searches, self.part_nr, result_dists, result_idx)
def _search_kd_tree_cpu(self, points_query, nr_nns_searches, result_dists, result_idx):
torch_knn.searchKDTreeCPU(points_query, nr_nns_searches, self.part_nr, result_dists, result_idx)
def query(self, points_query : torch.Tensor, nr_nns_searches : int=1,
result_dists : torch.Tensor=None, result_idx : torch.Tensor=None):
"""Searches the specified KD-Tree for KNN of the given points
Parameters
----------
points_query : torch.Tensor of float or double precision
Points for which the KNNs will be computed
nr_nns_searches : int, optional
How many closest nearest neighbors will be queried (=k), by default 1
result_dists : torch.Tensor of float or double precision, optional
Target array that will hold the resulting distance. If not specified, this will be dynamically created.
result_idx : torch.Tensor of dtype_idx type, optional
Target array that will hold the resulting KNN indices. If not specified, this will be dynamically created.
Returns
-------
tuple
Returns the tuple containing
* dists (ndarray of float or double precision) : Quadratic distance of KD-Tree points to the queried points
* inds (ndarray of type int) : Indices of the K closest neighbors
Raises
------
RuntimeError
If the requested KDTree can not be constructed.
"""
if nr_nns_searches > self.nr_ref_points:
raise RuntimeError("You requested more nearest neighbors than there are in the KD-Tree")
points_query = points_query.to(self.device)
if result_dists is None:
result_dists = torch.empty(size=[points_query.shape[0], nr_nns_searches], dtype=self.dtype, device=self.device)
if result_idx is None:
result_idx = torch.empty(size=[points_query.shape[0], nr_nns_searches], dtype=self.dtype_idx, device=self.device)
assert(list(result_dists.shape) == [points_query.shape[0], nr_nns_searches])
assert(result_dists.dtype == self.dtype)
assert(list(result_idx.shape) == [points_query.shape[0], nr_nns_searches])
assert(result_idx.dtype == self.dtype_idx)
assert(points_query.dtype == self.dtype)
if not result_dists.is_contiguous():
result_dists = result_dists.contiguous()
if not result_idx.is_contiguous():
result_idx = result_idx.contiguous()
if not points_query.is_contiguous():
points_query = points_query.contiguous()
#Get pointer as int
points_query_ptr = points_query.data_ptr()
dists_ptr = result_dists.data_ptr()
knn_idx_ptr = result_idx.data_ptr()
self.kdtree.query(points_query_ptr, points_query.shape[0], nr_nns_searches, dists_ptr, knn_idx_ptr)
dists = result_dists
inds = self.shuffled_ind[result_idx.long()]
if (points_query.requires_grad or self.ref_requires_grad) and torch.is_grad_enabled():
dists = torch.sum((points_query[:, None] - self.points_ref_bak[inds])**2, dim=-1)
if not self.squared_distances:
dists = torch.sqrt(dists)
return dists, inds
def build_kd_tree(points_ref : Union[torch.Tensor, np.ndarray], device : torch.device = None,
squared_distances = True, levels : int=None):
"""Builds the KD-Tree for subsequent queries using searchKDTree
Builds the KD-Tree for subsequent queries using searchKDTree. Note that the
tree is always built on the CPU and then transferred to the GPU if necessary.
Parameters
----------
points_ref : torch.Tensor
Points from which to build the KD-Tree
device : torch.device
Specify a target torch device where the KD-Tree will be located.
Will automatically pick points_ref.device if not specified.
squared_distances : bool
If true, the squared euclidean distances will be returned, by default True,
levels : int, optional
Levels of the KD-Tree (currently between 1 and 13 levels). If None is specified, will pick an appropriate value.
Returns
-------
TorchKDTree
Returns a kdtree with a query method to find the nearest neighbors inside a point-cloud
"""
if device is None:
device = points_ref.device
if levels is None:
levels = np.maximum(1, np.minimum(13, int(np.log(int(points_ref.shape[0])) / np.log(2))-3))
if issubclass(type(points_ref), np.ndarray):
points_ref = torch.from_numpy(points_ref)
if issubclass(type(device), str):
device = torch.device(device)
assert(levels >= 1 and levels <= 13)
assert issubclass(type(points_ref), torch.Tensor)
assert device.type != 'cuda' or gpu_available, "You requested the KD-Tree on the GPU, but the library was compiled with CPU support only"
assert(device.type in ['cuda', 'cpu'])
return TorchKDTree(points_ref, device, levels, squared_distances)
| thomgrand/torch_kdtree | torch_kdtree/nn_distance.py | nn_distance.py | py | 7,259 | python | en | code | 5 | github-code | 36 |
4197719073 | """Utilities for plotting the results of the experiments."""
import os
import json
import numpy as np
import pandas as pd
import matplotlib
matplotlib.use("pdf")
# Avoid trouble when generating pdf's on a distant server
# matplotlib.use("TkAgg") # Be able to import matplotlib in ipython
import matplotlib.pyplot as plt
from matplotlib.lines import Line2D
def plot_cost_acc(params, lim_acc=None, lim_cost=None):
"""Plots the cost value and accuracy for test and train."""
plt.plot(params["step"], params["cost_test"],
label="SCE test", color="red")
plt.plot(params["step"], params["cost_train"],
label="SCE train", color="red", linestyle="--")
plt.grid()
plt.legend(loc="lower left")
plt.ylabel("SCE", color="red")
if lim_cost:
plt.ylim(lim_cost)
plt.twinx()
plt.plot(params["step"], 1 - np.array(params["acc_test"]),
label="miss test", color="blue")
plt.plot(params["step"], 1 - np.array(params["acc_train"]),
label="miss train", color="blue", linestyle="--")
plt.ylabel("misses", color="blue")
if lim_acc:
plt.ylim(lim_acc)
plt.legend(loc="upper right")
plt.tight_layout()
def plot_norm(params):
"""Plots the regularization."""
plt.plot(params["step"], params["norm_mat"], label="norm", color="red")
plt.tight_layout()
# --- Quantile plotting functions ---
def quantile(X, q, axis=0):
"""np.quantile only exists on numpy 1.15 and higher."""
assert axis == 0
X = np.array(X)
return np.sort(X, axis=0)[int(X.shape[0]*q), :]
def param_list_to_quant(key, q, p_list):
"""Returns a quantile."""
if key.startswith("acc") or key.startswith("top"):
# We plot the error rates 1-acc.
return quantile(1-np.array([p[key] for p in p_list]), q)
return quantile(np.array([p[key] for p in p_list]), q)
def plot_quant(params_list, param_name, label, color,
linestyle="-", alpha=0.05):
"""Plots quantile intervals with the desired value."""
p_list = params_list
params = p_list[0]
# Plot the result
plt.fill_between(params["step"],
param_list_to_quant(param_name, (1-alpha/2), p_list),
param_list_to_quant(param_name, alpha/2, p_list),
color=color, alpha=0.25)
plt.plot(params["step"], param_list_to_quant(param_name, 0.5, p_list),
label=label, color=color, linestyle=linestyle)
def plot_quant_cost_acc(params_list, alpha, lim_acc=None, lim_cost=None,
left_label_remove=False, right_label_remove=False):
"""Plots quantile intervals of the cost value and accuracy."""
p_list = params_list
params = p_list[0]
# Plot the result
plt.fill_between(params["step"],
param_list_to_quant("cost_test", (1-alpha/2), p_list),
param_list_to_quant("cost_test", alpha/2, p_list),
color="red", alpha=0.25)
plt.plot(params["step"], param_list_to_quant("cost_test", 0.5, p_list),
label="SCE test", color="red")
plt.plot(params["step"], param_list_to_quant("cost_train", 0.5, p_list),
label="SCE train", color="red", linestyle="--")
plt.grid()
plt.legend(loc="lower left")
if not left_label_remove:
plt.ylabel("SCE", color="red")
else:
plt.gca().yaxis.set_ticklabels([])
if lim_cost:
plt.ylim(lim_cost)
plt.twinx()
plt.fill_between(params["step"],
param_list_to_quant("acc_test", (1-alpha/2), p_list),
param_list_to_quant("acc_test", alpha/2, p_list),
color="blue", alpha=0.25)
plt.plot(params["step"],
np.array(param_list_to_quant("acc_test", 0.5, p_list)),
label="miss test", color="blue")
plt.plot(params["step"],
np.array(param_list_to_quant("acc_train", 0.5, p_list)),
label="miss train", color="blue", linestyle="--")
if not right_label_remove:
plt.ylabel("misses", color="blue")
else:
plt.gca().yaxis.set_ticklabels([])
if lim_acc:
plt.ylim(lim_acc)
plt.legend(loc="upper right")
plt.tight_layout()
def plot_quantiles(root_exp_folder,
subfolds=("uniform", "prediction", "stratum"),
alpha=0.05, figsize=(3, 3),
lim_acc=None, lim_cost=None,
camera_ready=True):
"""
Plot the quantile graphs for a standardized experiment.
If the experiment data is in a folder named exp, it expects to find
subfolders subfolds in which there are folders with runs and a
params.json file that contains the result of the runs. It will generate
a quantile plot for each of the experiments.
"""
for type_weight in subfolds:
cur_dir = "{}/tw_{}/".format(root_exp_folder, type_weight)
params_list = get_param_list_for_quantiles(root_exp_folder,
type_weight)
plt.figure(figsize=figsize)
right_label_remove = camera_ready and type_weight == "uniform"
left_label_remove = camera_ready and type_weight == "prediction"
plot_quant_cost_acc(params_list, alpha=alpha,
lim_acc=lim_acc, lim_cost=lim_cost,
left_label_remove=left_label_remove,
right_label_remove=right_label_remove)
if not camera_ready:
plt.title(type_weight)
plt.savefig("{}/{}.pdf".format(cur_dir, "quantiles"), format="pdf")
def get_param_list_for_quantiles(root_exp_folder, type_weight):
"""Accumulates all of the json files for different runs and reweighting."""
cur_dir = "{}/tw_{}/".format(root_exp_folder, type_weight)
cur_runs = os.listdir(cur_dir)
params_list = list()
for cur_run in cur_runs:
cur_file = cur_dir + cur_run + "/params.json"
if os.path.exists(cur_file):
params_list.append(json.load(open(cur_file, "rt")))
return params_list
def plot_quantiles_cost_n_miss(root_exp_folder,
subfolds=("uniform", "prediction", "stratum"),
sf_style={"uniform": {"color": "blue",
"label": "Uniform"},
"prediction": {"color": "green",
"label": "Weighted"},
"stratum": {"color": "green",
"label": "Stratum"},
},
alpha=0.05, figsize=(3, 3), lim_acc=None,
lim_cost=None, lim_top=None, camera_ready=True):
"""
Plot the quantile graphs for a standardized experiment.
If the experiment data is in a folder named exp, it expects to find
subfolders subfolds in which there are folders with runs and a
params.json file that contains the result of the runs. It will generate
a quantile plot for each of the experiments.
"""
print("Loading the data...")
list_params_list = list()
for type_weight in subfolds:
list_params_list.append(get_param_list_for_quantiles(
root_exp_folder, type_weight))
print("Plotting results...")
for plotted_val in ("acc", "cost", "topk"):
plt.figure(figsize=figsize)
for itw, type_weight in enumerate(subfolds):
params_list = list_params_list[itw]
params = params_list[0]
color = sf_style[type_weight]["color"]
plot_quant(params_list, "{}_test".format(plotted_val), "",
color=color)
plt.plot(params["step"], param_list_to_quant(
"{}_train".format(plotted_val), 0.5, params_list),
color=color, linestyle="--")
if plotted_val == "acc" and lim_acc:
plt.ylim(lim_acc)
plt.ylabel("Miss rate")
if plotted_val == "cost" and lim_cost:
plt.ylim(lim_cost)
plt.ylabel("SCE")
if plotted_val == "topk" and lim_top:
plt.ylim(lim_top)
plt.ylabel("Top-5 error")
if not camera_ready:
plt.title(plotted_val)
train_lgd = Line2D([0, 0], [1, 1], color="black", linestyle="--")
test_lgd = Line2D([0, 0], [1, 1], color="black", linestyle="-")
legend1 = plt.gca().legend([train_lgd, test_lgd], ["Train", "Test"],
loc="upper right")
legend_lines = [Line2D([0, 0], [1, 1],
color=sf_style[k]["color"], linestyle="-")
for k in subfolds]
legend_names = [sf_style[k]["label"] for k in subfolds]
plt.gca().legend(legend_lines, legend_names, loc="lower left")
plt.gca().add_artist(legend1)
plt.grid()
plt.tight_layout()
# elif plotted_val == "acc":
# plt.title("Miss rate")
# else:
# plt.title("SCE")
plt.savefig("{}/{}_{}.pdf".format(root_exp_folder, "quant",
plotted_val), format="pdf")
print("Done !")
def plot_class_probas(params, with_ticklabels=True):
"""Plots the probabilities of each class for the train and test set."""
n_classes = len(params["p_y_train"])
width = 0.35
ind = np.arange(n_classes)
ax = plt.gcf().subplots()
rects1 = ax.bar(ind, params["p_y_train"], width, color="blue")
rects2 = ax.bar(ind + width, params["p_y_test"], width, color="green")
ax.set_ylabel("Probability")
ax.set_xlabel("Class")
ax.set_xticks(ind+width/2)
if with_ticklabels:
ax.set_xticklabels(ind)
else:
ax.set_xticklabels(["" for _ in ind])
ax.legend((rects1[0], rects2[0]), ("Train", "Test"))
plt.grid()
plt.gcf().tight_layout()
def plot_strata_probas(params, with_ticklabels=True):
"""Plot the probabilities of each strata for train and test."""
n_stratas = len(params["p_z_train"])
width = 0.35
ind = np.arange(n_stratas)
ax = plt.gcf().subplots()
rects1 = ax.bar(ind, params["p_z_train"], width, color="blue")
rects2 = ax.bar(ind + width, params["p_z_test"], width, color="green")
ax.set_ylabel("Probability")
ax.set_xlabel("Strata")
ax.set_xticks(ind+width/2)
if with_ticklabels:
ax.set_xticklabels(ind)
else:
ax.set_xticklabels(["" for _ in ind])
ax.legend((rects1[0], rects2[0]), ("Train", "Test"))
plt.gca().yaxis.grid(True)
# plt.grid()
plt.gcf().tight_layout()
| RobinVogel/Weighted-Empirical-Risk-Minimization | plot_utils.py | plot_utils.py | py | 10,788 | python | en | code | 1 | github-code | 36 |
27253300099 | #!/usr/bin/python
import sys
#latepath="/home/akavka/minimax/"
def compareGames(inFile1, inFile2):
in1=open(inFile1, "r")
in2=open(inFile2,"r")
#result=True
lines1=in1.readlines()
lines2=in2.readlines()
# if (len(lines1)!=len(lines2)):
# return False
#implicit else
for i in range(len(lines1)):
if lines1[i]!=lines2[i]:
print("Discrepancy at line " + str(i))
return False
return True
def analyzeOutput(filename):
inFile=open(filename, "r");
lines=inFile.readlines();
gameIndex=0
numTurns=0
mySum=0
for line in lines:
if line[0]==("g"):
print("Average time from file " + filename + " was "+str(mySum/numTurns) + " for game " + str(gameIndex))
return mySum
numTurns=0
mySum=0
gameIndex+=1
else:
mySum+=float(line)
numTurns+=1
inFile.close();
def analyzeTime(filename):
inFile=open(filename, "r");
lines=inFile.readlines();
mySum=0
numTurns=0
for line in lines:
mySum+=float(line)
numTurns+=1
inFile.close();
print("Average time from file " + filename + " was "+str(float(mySum)/numTurns) )
return mySum
def analyzeCount(filename):
inFile=open(filename, "r");
lines=inFile.readlines();
mySum=0
numTurns=0
for line in lines:
mySum+=int(line)
numTurns+=1
inFile.close();
print("Average time from file " + filename + " was "+str(float(mySum)/numTurns) )
return mySum
def analyzeDivergence(filename, numCoresString):
numCores=float(numCoresString)
inFile=open(filename, "r");
lines=inFile.readlines();
# sums=[]
partialUsefulSum=0
usefulSum=0
totalSum=0
for line in lines:
words=line.split()
if words[0]=="fence":
utilization=partialUsefulSum/(numCores*float(words[1]))
# utilization=sum(sums)/(4*float(words[1]))
print("Utilization was " + str(utilization))
totalSum+=numCores*float(words[1])
partialUsefulSum=0
# sums=[0,0,0,0]
else:
# sums[int(words[0])]+=float(words[1])
partialUsefulSum+=float(words[1])
usefulSum+=float(words[1])
print("Total utilization was " + str(usefulSum/totalSum))
def processAFile(index,cores, utilization, speedup, workEfficiency, searchOverhead, loss, depthW, depthB, wins, draws, losses):
filename="latedays.qsub.o"+str(index)
infile=open(filename, "r")
lines= infile.readlines()
words=lines[0].split()
core=int(words[3])
lossTerm=0
if (not (core in cores)):
cores.append(core)
utilization[core]=[]
speedup[core]=[]
workEfficiency[core]=[]
searchOverhead[core]=[]
loss[core]=[]
depthW[core]=[]
depthB[core]=[]
wins[core]=0
draws[core]=0
losses[core]=0
for line in lines:
words=line.split()
if len(words)<2:
continue
if (words[0]=="Total" and words[1]=="utilization"):
#if utilization
utilization[core].append(float(words[3]))
lossTerm=float(words[3])
#if efficiency
if (words[0]=="Work" and words[1]=="ratio"):
searchOverhead[core].append(float(words[3]))
lossTerm*=float(words[3])
loss[core].append(lossTerm)
if (words[0]=="Overall" and words[1]=="efficiency"):
workEfficiency[core].append(float(words[4]))
if (words[0]=="Overall" and words[1] =="speedup"):
speedup[core].append(float(words[3]))
if (words[1]=="depth" and words[2] =="W"):
if float(words[4])<10:
depthW[core].append(float(words[4]))
if (words[1]=="depth" and words[2] =="B"):
if float(words[4])<10:
depthB[core].append(float(words[4]))
if (words[0]=="Result" and words[1]=="win"):
wins[core]+=1
if (words[0]=="Result" and words[1]=="draw"):
draws[core]+=1
if (words[0]=="Result" and words[1]=="loss"):
losses[core]+=1
#if speedup
def main():
lowerRange=int(sys.argv[1])
upperRange=int(sys.argv[2])
cores=[]
utilization={}
speedup={}
workEfficiency={}
searchOverhead={}
loss={}
depthW={}
depthB={}
wins={}
draws={}
losses={}
for i in range(lowerRange,upperRange+1):
processAFile(i,cores,utilization, speedup, workEfficiency, searchOverhead, loss, depthW, depthB, wins, draws, losses)
print ("Core\tSpeedup\tUtilization\tEffiencySpeedup\tSearchOverhead\tLoss")
for core in cores:
meanDepthW=str(sum(depthW[core])/len(depthW[core]))
meanDepthB=str(sum(depthB[core])/len(depthB[core]))
"""
meanSpeed=str(sum(speedup[core])/len(speedup[core]))
meanUtil=str(sum(utilization[core])/len(speedup[core]))
meanEff=str(sum(workEfficiency[core])/len(workEfficiency[core]))
meanSearch=str(sum(searchOverhead[core])/len(searchOverhead[core]))
meanLoss=str(sum(loss[core])/len(loss[core]))
print(str(core)+"\t"+meanSpeed+"\t"+ meanUtil+ "\t" + meanEff+"\t"+meanSearch+"\t"+meanLoss)
"""
print(str(core)+"\t"+meanDepthW+"\t"+meanDepthB + "\t" + str(wins[core]) + "\t"+ str(draws[core]) + "\t" + str(losses[core]))
if __name__=="__main__":
main()
| akavka/minimax | takeAverages.py | takeAverages.py | py | 5,604 | python | en | code | 0 | github-code | 36 |
11370165603 | import numpy as np
import torch
import torch.nn as nn
from ml.modules.backbones import Backbone
from ml.modules.bottoms import Bottom
from ml.modules.heads import Head
from ml.modules.layers.bifpn import BiFpn
from ml.modules.tops import Top
class BaseModel(nn.Module):
def __init__(self, config):
super().__init__()
# bottom
self.bottom = Bottom(config.bottom) # conv bn relu
# get backbone
self.backbone = Backbone(config.backbone)
# get top
self.top = Top(config.top)
# get head, i wish ;P
self.head = Head(config.head)
# if 'dorn' in self.head:
# self.dorn_layer = torch.nn.Sequential(torch.nn.Conv2d(in_channels=config['backbone_features'],
# out_channels=self.ord_num * 2,
# kernel_size=1,
# stride=1),
# OrdinalRegressionLayer())
# self.dorn_criterion = OrdinalRegressionLoss(self.ord_num, self.beta, self.discretization)
# if 'reg' in self.head:
# self.reg_layer = torch.nn.Conv2d(in_channels=config['backbone_features'],
# out_channels=1,
# kernel_size=1,
# stride=1)
#
# self.reg_criterion = get_regression_loss(config['regression_loss'])
def forward(self, image, depth=None, target=None):
"""
:param image: RGB image, torch.Tensor, Nx3xHxW
:param target: ground truth depth, torch.Tensor, NxHxW
:return: output: if training, return loss, torch.Float,
else return {"target": depth, "prob": prob, "label": label},
depth: predicted depth, torch.Tensor, NxHxW
prob: probability of each label, torch.Tensor, NxCxHxW, C is number of label
label: predicted label, torch.Tensor, NxHxW
"""
input_feature = self.bottom(image, depth)
p0, p1, p2, p3 = self.backbone(input_feature)
feature = self.top([p0, p1, p2, p3])
pred = self.head([feature, target])
return pred
def get_prediction_and_loss(self, feat, target):
# predicion
# dorn prediction
if 'dorn' in self.head:
prob, label = self.dorn_layer(feat)
if self.discretization == "SID":
t0 = torch.exp(np.log(self.beta) * label.float() / self.ord_num)
t1 = torch.exp(np.log(self.beta) * (label.float() + 1) / self.ord_num)
else:
t0 = 1.0 + (self.beta - 1.0) * label.float() / self.ord_num
t1 = 1.0 + (self.beta - 1.0) * (label.float() + 1) / self.ord_num
dorn_depth = (t0 + t1) / 2 - self.gamma
else:
dorn_depth = torch.as_tensor([0], device=torch.device('cuda'))
# regression prediction
if 'reg' in self.head:
reg_depth = self.reg_layer(feat).squeeze(1)
else:
reg_depth = torch.as_tensor([0], device=torch.device('cuda'))
# the full depth
depth = dorn_depth + reg_depth
# loss
if self.training and target is not None:
# dorn loss
if 'dorn' in self.head:
dorn_loss = self.dorn_criterion(prob, target)
else:
dorn_loss = torch.as_tensor([0], device=torch.device('cuda'))
# regression loss
if 'reg' in self.head:
reg_loss = self.reg_criterion(depth, target)
else:
reg_loss = torch.as_tensor([0], device=torch.device('cuda'))
# full loss
loss = dorn_loss + reg_loss
else:
loss = torch.as_tensor([0], device=torch.device('cuda'))
return depth, loss
| gregiberri/DepthPrediction | ml/models/base_model.py | base_model.py | py | 4,058 | python | en | code | 0 | github-code | 36 |
2463271534 | # Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def deleteDuplicates(self, head: ListNode) -> ListNode:
node = head
while node:
runner = node.next
while runner and runner.val == node.val:
runner = runner.next
node.next = runner
node = runner
return head | Iannikan/LeetCodeQuestions | removeDuplicatesFromSortedList/solution.py | solution.py | py | 440 | python | en | code | 0 | github-code | 36 |
43069956711 | import csv
import io
from Crypto.Signature import pkcs1_15
from Crypto.PublicKey import RSA
from Crypto.Hash import SHA256, SHA
gSigner = "signer@stem_app"
def loadVoters(fname):
try:
voters = {s['studNr']: s for s in csv.DictReader(
loadFile(fname), delimiter=';')}
return voters
except Exception as e:
return {}
def loadCandidates(fname):
try:
candidates = {s['mdwId']: s for s in csv.DictReader(
loadFile(fname), delimiter=';')}
return candidates
except Exception as e:
return {}
def sign(data, signer=gSigner, sfx='.prv'):
if isinstance(data, io.StringIO):
data = data.read()
if not isinstance(data, bytes):
data = bytes(data, encoding='utf-8')
key = RSA.import_key(open('keys/private.key').read())
h = SHA256.new(data)
signature = pkcs1_15.new(key).sign(h)
return ':'.join(['#sign', 'sha256-PKCS1-rsa2048', signer, signature.hex()])
def verify(data, signature, signer=gSigner, sfx='.pub'):
if isinstance(data, io.StringIO):
data = data.read()
if not isinstance(data, bytes):
data = bytes(data, encoding='utf-8')
flds = signature.split(':')
if flds[1] != 'sha256-PKCS1-rsa2048' and flds[2] != signer:
print('Error: Unknown signature:', signature)
return None
sign = bytes.fromhex(flds[3])
key = RSA.import_key(open('keys/public.pub').read())
h = SHA256.new(data)
res = False
try:
pkcs1_15.new(key).verify(h, sign)
print("The signature is valid.")
res = True
except (ValueError, TypeError):
print("The signature is not valid.")
res = False
return res
def saveFile(fname, data, signer=gSigner, useSign=True, ):
""" Save file check signature """
if isinstance(data, io.StringIO):
data = data.read()
n = data.find('#sign')
if n > 0:
data = data[0:n]
if useSign:
data += sign(data, signer) + '\n'
io.open(fname, 'w', encoding='UTF-8').write(data)
return
def loadFile(fname, useSign=True, signer=gSigner):
""" Load file check signature """
data = io.open(fname, 'r', encoding='UTF-8').read()
n = data.find('#sign')
if n > 0:
sign = data[n:].strip()
data = data[0:n]
if useSign:
res = verify(data, sign, signer, sfx='.pub')
if not res:
return None
return io.StringIO(data)
| Tataturk/stem_app | audit.py | audit.py | py | 2,468 | python | en | code | 0 | github-code | 36 |
44575910113 | from dbmanager import DatabaseManager
from tgbot import Bot
from market import Market
from plot_provider import PlotProvider
import threading
import sys
import logging
import logging.handlers
import queue
from apscheduler.schedulers.background import BackgroundScheduler
class MarketManager:
def __init__(self, path, bot_token):
self._bot_token = bot_token
self._logger = logging.getLogger('MarketManagerLogger')
self._logger.setLevel(logging.ERROR)
handler = logging.handlers.SysLogHandler(address='/dev/log')
self._logger.addHandler(handler)
self._db = DatabaseManager()
self._path = path
self._scheduler = BackgroundScheduler()
self._scheduler.add_job(self._daily_market_plot_job, trigger='cron', hour='0')
self._scheduler.add_job(self._predictions_job, trigger='cron', hour='*')
self._scheduler.add_job(self._bot_job, trigger='cron', minute='*')
self._markets = dict()
self._message_queue = queue.Queue()
def process_market_message(self):
try:
db = DatabaseManager()
bot = Bot(self._bot_token)
message = self._message_queue.get()
chats = db.get_chat_list()
if message["type"] == "text":
bot.send_text_message(message["data"], chats)
elif message["type"] == "image":
bot.send_image(message["data"], chats)
self._message_queue.task_done()
except Exception:
self._logger.exception(f"Failed to process market message.")
def _predictions_job(self):
try:
db = DatabaseManager()
markets_list = db.get_markets()
# Create thread for each market
for m in markets_list:
if m in self._markets and self._markets[m].is_alive():
self._logger.error(f"Thread for market {m} is still alive.")
continue
else:
t = threading.Thread(target=market_thread_func, args=(m, self._path, self._message_queue))
t.start()
self._markets[m] = t
except Exception:
self._logger.exception("Failed to start predictions job.")
def _bot_job(self):
try:
db = DatabaseManager()
bot = Bot(self._bot_token)
chats = bot.get_chat_list()
for c in chats:
db.add_chat(c)
except Exception:
self._logger.exception("Failed to collect bot chats.")
def _daily_market_plot_job(self):
try:
db = DatabaseManager()
pp = PlotProvider()
markets = db.get_markets()
for m in markets:
data = db.get_24h_plot_data(m)
image = pp.get_market_24plot(data, m[1:])
self._message_queue.put({'type': 'image', 'data': image})
except Exception:
self._logger.exception("Failed to push daily market plots.")
def start(self):
self._scheduler.start()
def market_thread_func(market_symbol, path, queue):
m = Market(path, market_symbol, queue)
m.genotick_predict_and_train()
def main(argv):
usage = "usage: {} path bot_token".format(argv[0])
if len(argv) != 3:
print(usage)
sys.exit(1)
manager = MarketManager(argv[1], argv[2])
manager.start()
while True:
manager.process_market_message()
if __name__ == "__main__":
main(sys.argv)
| hype-ecosystem/predictions_bot | market_manager.py | market_manager.py | py | 3,531 | python | en | code | 2 | github-code | 36 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.