text stringlengths 8 6.05M |
|---|
from PIL import Image
from tqdm import tqdm
import os
def CropImageSeries(image):
IMAGERESOLUTIONS = image.height #Assumes square images
totalImages = image.width // IMAGERESOLUTIONS
indImages = []
for i in range(totalImages):
tmpImage = image.copy()
cropped_img = tmpImage.crop((i*IMAGERESOLUTIONS, 0, (i+1)*IMAGERESOLUTIONS, IMAGERESOLUTIONS))
#cropped_img.show()
indImages.append(cropped_img)
return indImages
counter = 0
dirPath = "MiscImages\ImageSeriesResults"
savePath = "MiscImages\ImageSeriesResultsCroppedIndividuals"
for (root, dirs, files) in tqdm(os.walk(dirPath, topdown=False)):
for i, file in enumerate(files):
if file.endswith(".jpg") or file.endswith(".png"):#bool(re.search("\d\.jpg", file)) or bool(re.search("\d_target\.jpg", file)):
counter += 1
imgSeries = Image.open(root + "\\" + file)
individualImages = CropImageSeries(imgSeries)
for i, indImg in enumerate(individualImages):
indImg.save(savePath + "\\" + f"series{str(counter).zfill(3)}_{str(i).zfill(3)}.png")
|
from .base import StochasticProcess, StochasticProcessOverError
from collections import namedtuple
import numpy as np
SIRParameters = namedtuple('SIRParameters',
'population_size infection_rate recovery_rate T delta_t')
class SIR(StochasticProcess):
"""
The Susceptible-Infected-Recovered Epidemic model
See Section 3.3.3 of
Allen, Linda JS. "An introduction to stochastic epidemic models."
Mathematical epidemiology. Springer, Berlin, Heidelberg, 2008. 81-130.
Implemented here is a simpler version from Linda Allens slides
https://drive.google.com/file/d/0BwbIZeNSn5cdUzZlRzdUeUlRVWc/view?usp=sharing
"""
state_space = 2 # Susceptible, Infected
action_space = 3
_SUSCEPTIBLE_DIM = 0 # the dimension of the susceptibles
_INFECTED_DIM = 1 # the dimension of the infecteds
def __init__(self,
population_size,
infection_rate,
recovery_rate,
prior,
T,
delta_t=0.01,
n_agents=1,
seed=1):
"""
:param population_size: the total size of the population
:param infection_rate: the rate of infection
:param recovery_rate: the rate of recovery
:param prior: the prior over starting states.
:param T: the total number of timesteps
:param delta_t: a small interval to ensure values resemble probabilities
:param n_agents: he number of agents to simulate at a time
:param seed:
"""
super().__init__(seed, T)
self.population_size = population_size
# the possible transitions we can take
self.step_sizes = np.array([
[-1, 1],
[0, -1],
[0, 0]
])
self.infection_rate = infection_rate
self.recovery_rate = recovery_rate
self.prior = prior
self.n_agents = n_agents
self.delta_t = delta_t
assert prior.dimensions == self.state_space
self.dimensions = self.state_space
self.new_task()
def transition_prob(self, state_t):
"""
Returns the transition probabilities to each of the
transitionary states from the current state
:param state_t: the current state
:return:
"""
inferred_n_agents = state_t.shape[0]
# new_infection
prob_new_infection = self.delta_t * self.infection_rate * np.prod(state_t, axis=1) / self.population_size
# recovery
prob_recovery = self.delta_t * self.recovery_rate * state_t[:, self._INFECTED_DIM]
# resusceptibility
# prob_resusceptible = self.resusceptible_rate * state_t[:, self._INFECTED_DIM]
# new susceptible
# prob_new_susceptible = self.resusceptible_rate * (self.population_size - state_t[:, self._SUSCEPTIBLE_DIM] - state_t[:, self._INFECTED_DIM])
# no change:
# prob_no_change = 1 - prob_new_infection - prob_new_susceptible - prob_resusceptible - prob_recovery
good_indices = prob_new_infection + prob_recovery < 1
prob_no_change = np.zeros(inferred_n_agents)
prob_no_change[good_indices] = 1 - prob_new_infection[good_indices] - prob_recovery[good_indices]
# all_probs = np.stack((prob_new_infection, prob_recovery, prob_resusceptible, prob_new_susceptible, prob_no_change))
all_probs = np.stack((prob_new_infection, prob_recovery, prob_no_change))
# normalization here. If these do not sum to 1, then somewhere down the line an error
# will occur.
all_probs = all_probs.reshape(inferred_n_agents, -1)
all_probs = all_probs / all_probs.sum(axis=1).reshape(-1, 1)
# all_probs = np.exp(all_probs) / np.exp(all_probs).sum(axis=1).reshape(-1, 1)
# assert np.all(all_probs>=0), 'Found probabilities that were negative! {}. The state was {}'.format(all_probs, state_t)
return all_probs
def simulate(self, rng=None):
"""
Simulates a new stochastic process for self.T steps.
:param rng: a random number generator
:return: the generated trajectory
"""
if rng is None:
rng = self.rng
x0 = self.prior.rvs()
trajectories = [x0]
for i in range(self.T-1):
x_tm1 = trajectories[-1]
probability_distribution = self.transition_prob(np.array([x_tm1])).reshape(-1)
selected_transition = self.step_sizes[np.argmax(self.rng.multinomial(1, probability_distribution))]
x_t = x_tm1 + selected_transition
trajectories.append(x_t)
trajectories = np.array(trajectories)
assert np.all(trajectories >= 0), 'something went wrong, trajectory must be all non-negative!'
return trajectories
def reset_agent_locations(self):
"""
Reset the location of all agents that are interacting with the stochastic process
:return: None
"""
self.transitions_left = self.T - 1
self.x_agent = np.repeat(self.xT.reshape(1, self.dimensions), self.n_agents, axis=0)
def reset(self):
"""
This will reset the game and return the location of the agents.
:return:
"""
self.reset_agent_locations()
return self.x_agent
def new_task(self):
"""
Creates a new realization of the stochastic process for agents to interact with
:return:
"""
self.true_trajectory = self.simulate()
self.x0 = self.true_trajectory[0]
self.xT = self.true_trajectory[-1]
return self.reset()
def step(self, actions, reverse=False):
"""
Allows agents to execute actions within a stochastic process
:param actions: the numpy array of actions to be executed by each agent
:param reverse: defines if the step is to be reversed or not
:return: (current_state,
reward (! to be modified by you),
done,
information about the step)
"""
if self.transitions_left == 0:
raise StochasticProcessOverError('You have already reached the end of the episode. Use reset()')
steps_taken = np.take(self.step_sizes, actions.ravel(), axis=0)
reversal_param = -1 if reverse else +1
self.x_agent = self.x_agent + (steps_taken * reversal_param)
self.transitions_left -=1
# we find the probability of going from the x_t'th to x_t+1st step
# if we had done the forward process instead.
transition_probs = self.transition_prob(self.x_agent)
# note that here we have to do some manipulation with the transition probs
# since each agent has a custom transition prob
step_probs = transition_probs[np.arange(len(transition_probs)), actions]
step_log_probs = np.log(step_probs)
step_log_probs = step_log_probs.reshape(-1, 1)
# do not put this if conditional into the one after this because
# it will not run if the agent is out of scope.
if self.transitions_left == 0:
step_log_probs += np.log(self.prior.pdf(self.x_agent)).reshape(-1, 1)
# TODO: technically we should return individual "dones" for each agent here
if np.any(self.x_agent < 0) or np.any(np.sum(self.x_agent, axis=1) > self.population_size):
# done = False
# send a negative reinforcement signal for paths that should not occur.
step_log_probs[np.any(self.x_agent < 0, axis=1)] = -np.inf
step_log_probs[np.any(np.sum(self.x_agent, axis=1) > self.population_size, axis=0)] = -np.inf
# reverse this action backward so that we do not allow the agent to go there
# we just let the agent "try again":
self.x_agent = self.x_agent + (steps_taken * reversal_param * -1)
if self.transitions_left == 0:
done = True
if not np.isinf(step_log_probs):
# print('SUCCESS AT END! {} {}'.format(self.x_agent, step_log_probs))
pass
else:
done = False
if np.any(np.isnan(step_log_probs)): print(step_log_probs, step_probs, transition_probs, self.x_agent, actions, steps_taken)
return (self.x_agent, step_log_probs, done, {})
|
class Solution:
def checkPossibility(self, nums: List[int]) -> bool:
#在出现 nums[i] < nums[i - 1] 时,需要考虑的是应该修改数组的哪个数,使得本次修改能使 i 之前的数组成为非递减数组,
#并且 不影响后续的操作 。优先考虑令 nums[i - 1] = nums[i],因为如果修改 nums[i] = nums[i - 1] 的话,那么 nums[i] 这个数会变大,就有可能比 nums[i + 1] 大,从而影响了后续操作。
#还有一个比较特别的情况就是 nums[i] < nums[i - 2],修改 nums[i - 1] = nums[i] 不能使数组成为非递减数组,只能修改 nums[i] = nums[i - 1]。
count = 0
for i in range(1, len(nums)):
if count == 2:
return False
if nums[i]>=nums[i-1]:
continue
if i-2>=0 and nums[i] < nums[i-2]:
nums[i] = nums[i-1]
count+=1
else:
nums[i-1] = nums[i]
count+=1
return count<=1
|
import torch
import torch.nn as nn
import torch.nn.functional as f
import numpy as np
import gym
from flare.kindling.neuralnets import MLP, GaussianPolicy, CategoricalPolicy, Actor
from flare.kindling.utils import _discount_cumsum
from flare.kindling import PGBuffer
from flare.polgrad import BasePolicyGradient
# pylint: disable=import-error
# pylint: disable=no-member
import time
import flare.kindling as fk
from flare.kindling import utils
from gym.spaces import Box
from flare.kindling import EpochLogger
from flare.kindling import TensorBoardWriter
import pickle as pkl
from typing import Optional, Any, Union, Callable, Tuple, List
import pytorch_lightning as pl
from torch.utils.data import DataLoader, Dataset
import sys
from flare.polgrad import BasePolicyGradient
class REINFORCE(BasePolicyGradient):
r"""
REINFORCE Policy Gradient Class, written using PyTorch + PyTorch Lightning
Args:
env (function): environment to train in
hidden_sizes (list or tuple): hidden layer sizes for MLP actor
steps_per_epoch (int): Number of environment interactions to collect per epoch
minibatch_size (int or None): size of minibatches of interactions to train on
gamma (float): gamma discount factor for reward discounting
lam (float): Used in advantage estimation, not used here. REINFORCE does not learn a value function so can't calculate advantage.
"""
def __init__(
self,
env: Callable,
actor = fk.FireActorCritic,
hidden_sizes: Optional[Union[Tuple, List]] = (64, 64),
steps_per_epoch: Optional[int] = 4000,
minibatch_size: Optional[Union[None, int]] = None,
gamma: Optional[float] = 0.99,
lam: Optional[float] = 0.97,
seed = 0,
hparams = None
):
super().__init__(
env,
ac=actor,
hidden_sizes=hidden_sizes,
steps_per_epoch=steps_per_epoch,
minibatch_size=minibatch_size,
gamma=gamma,
lam=lam,
seed=seed,
hparams=hparams
)
def configure_optimizers(self) -> tuple:
r"""
Set up optimizers for agent.
"""
return torch.optim.Adam(self.ac.policy.parameters(), lr=3e-4)
def inner_loop(self) -> None:
r"""
Run agent-env interaction loop.
Stores agent environment interaction tuples to the buffer. Logs reward mean/std/min/max to tracker dict. Collects data at loop end.
Slightly modified from :func:`~LitBasePolicyGradient`. REINFORCE does not learn a value function, so the portions which get value estimates had to be removed.
"""
state, reward, episode_reward, episode_length = self.env.reset(), 0, 0, 0
rewlst = []
lenlst = []
for i in range(self.steps_per_epoch):
action, logp, _ = self.ac.step(torch.as_tensor(state, dtype=torch.float32))
next_state, reward, done, _ = self.env.step(action)
self.buffer.store(
state,
action,
reward,
0,
logp
)
state = next_state
episode_length += 1
episode_reward += reward
timeup = episode_length == 1000
over = done or timeup
epoch_ended = i == self.steps_per_epoch - 1
if over or epoch_ended:
if timeup or epoch_ended:
last_val = reward
else:
last_val = 0
self.buffer.finish_path(last_val)
if over:
rewlst.append(episode_reward)
lenlst.append(episode_length)
state, episode_reward, episode_length = self.env.reset(), 0, 0
trackit = {
"MeanEpReturn": np.mean(rewlst),
"StdEpReturn": np.std(rewlst),
"MaxEpReturn": np.max(rewlst),
"MinEpReturn": np.min(rewlst),
"MeanEpLength": np.mean(lenlst)
}
self.tracker_dict.update(trackit)
self.data = self.buffer.get()
def calc_pol_loss(self, logps: torch.Tensor, rets: torch.Tensor) -> torch.Tensor:
r"""
Reinforce Policy gradient loss. -(action_log_probabilities * returns)
Args:
logps (PyTorch Tensor): Action log probabilities.
returns (PyTorch Tensor): Returns from the environment.
"""
return -(logps * rets).mean()
def training_step(self, batch: Tuple, batch_idx: int) -> dict:
r"""
Calculate policy loss over input batch.
Also compute and log policy entropy and KL divergence.
Args:
batch (Tuple of PyTorch tensors): Batch to train on.
batch_idx: batch index.
"""
states, acts, _, rets, logps_old = batch
policy, logps = self.ac.policy(states, acts)
pol_loss = self.calc_pol_loss(logps, rets)
ent = policy.entropy().mean()
kl = (logps_old - logps).mean()
log = {"PolicyLoss": pol_loss, "Entropy": ent, "KL": kl}
self.tracker_dict.update(log)
return {"loss": pol_loss, "log": log, "progress_bar": log}
def learn(
env_name,
epochs: Optional[int] = 100,
minibatch_size: Optional[int] = None,
steps_per_epoch: Optional[int] = 4000,
hidden_sizes: Optional[Union[Tuple, List]] = (64, 32),
gamma: Optional[float] = 0.99,
lam: Optional[float] = 0.97,
hparams = None,
seed = 0
):
from flare.polgrad.base import runner
minibatch_size = 4000 if minibatch_size is None else minibatch_size
runner(
env_name,
REINFORCE,
epochs=epochs,
minibatch_size=minibatch_size,
hidden_sizes=(64, 32),
gamma=gamma,
lam=lam,
hparams=hparams,
seed = seed
)
|
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
if not s:
return True
n = len(s)
DP = [False] * (n+1)
DP[0] = True
for i in range(1, n+1):
for word in wordDict:
if DP[i-1] and i-1+len(word) <= n and s[i-1 : i-1+len(word)] == word:
DP[i-1+len(word)] = True
return DP[n]
class Solution:
def wordBreak(self, s: str, wordDict: List[str]) -> bool:
new_dict = set(wordDict)
dp = [False] * (len(s) + 1)
dp[0] = True
for i in range(1, len(s) + 1):
for j in range(i):
if dp[j] and s[j:i] in wordDict:
dp[i] = True
break
return dp[-1]
class Solution():
def wordBreak(self, s, wordDict):
memo = {}
def dfs(i):
if i in memo:
return memo[i]
# return True when there is some combination that can generate s[i:]
if i == len(s):
return True
else:
res = False
for word in wordDict:
if len(word) <= len(s) - i:
if word == s[i: i + len(word)]:
res = res or dfs(i + len(word))
memo[i] = res
return res
return dfs(0) |
import os
import warnings
def test_rubicon_with_misc_folders_at_project_level(rubicon_local_filesystem_client_with_project):
rubicon, project = rubicon_local_filesystem_client_with_project
os.makedirs(os.path.join(rubicon.config.root_dir, ".ipynb_checkpoints"))
with warnings.catch_warnings(record=True) as w:
projects = rubicon.projects()
assert len(projects) == 1
assert "not found" in str(w[-1].message)
def test_rubicon_with_misc_folders_at_sublevel_level(rubicon_local_filesystem_client_with_project):
rubicon, project = rubicon_local_filesystem_client_with_project
project.log_experiment("exp1")
project.log_experiment("exp2")
os.makedirs(
os.path.join(rubicon.config.root_dir, "test-project", "experiments", ".ipynb_checkpoints")
)
with warnings.catch_warnings(record=True) as w:
experiments = project.experiments()
assert len(experiments) == 2
assert "not found" in str(w[-1].message)
def test_rubicon_with_misc_folders_at_deeper_sublevel_level(
rubicon_local_filesystem_client_with_project,
):
rubicon, project = rubicon_local_filesystem_client_with_project
exp = project.log_experiment("exp1")
exp.log_parameter("a", 1)
os.makedirs(
os.path.join(
rubicon.config.root_dir,
"test-project",
"experiments",
exp.id,
"parameters",
".ipynb_checkpoints",
)
)
with warnings.catch_warnings(record=True) as w:
parameters = exp.parameters()
assert len(parameters) == 1
assert "not found" in str(w[-1].message)
|
import json
import logging
import os
from typing import Tuple
import backoff
import requests
from bitcoin_acks.logging import log
logging.basicConfig(level=logging.ERROR)
logging.getLogger('backoff').setLevel(logging.INFO)
def fatal_code(e):
# We only retry if the error was "Bad Gateway"
log.error('GitHub error', fatal_code=e)
return e.response.status_code != 502
class GitHubData(object):
api_url = 'https://api.github.com/'
user_name = os.environ['GITHUB_USER']
password = os.environ['GITHUB_API_TOKEN']
dev_preview_headers = {
'Accept': 'application/vnd.github.starfox-preview+json'
}
@property
def auth(self) -> Tuple[str, str]:
return self.user_name, self.password
def get_graphql_schema(self):
r = requests.get(self.api_url + 'graphql',
auth=self.auth,
headers=self.dev_preview_headers)
r.raise_for_status()
with open('graphql.schema.json', 'w') as output_file:
json.dump(r.json(), output_file, indent=4, sort_keys=True)
@backoff.on_exception(backoff.expo,
requests.exceptions.RequestException,
giveup=fatal_code)
def graphql_post(self, json_object: dict):
log.debug('graphql post', api_url=self.api_url, json=json_object)
r = requests.post(self.api_url + 'graphql',
auth=self.auth,
headers=self.dev_preview_headers,
json=json_object)
r.raise_for_status()
return r
if __name__ == '__main__':
GitHubData().get_graphql_schema()
|
s = input()
sub_s = input()
k = 0
for i in range(len(s) - (len(sub_s)-1)):
if s[i: i + len(sub_s)] == sub_s:
k += 1
print(k) |
# Copyright 2020 Pulser Development Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest.mock import patch
import numpy as np
import pytest
from pulser import Register, Register3D
from pulser.devices import Chadoq2, MockDevice
def test_creation():
empty_dict = {}
with pytest.raises(ValueError, match="Cannot create a Register with"):
Register(empty_dict)
coords = [(0, 0), (1, 0)]
ids = ("q0", "q1")
qubits = dict(zip(ids, coords))
with pytest.raises(TypeError):
Register(coords)
Register(ids)
with pytest.raises(ValueError, match="vectors of size 2"):
Register.from_coordinates([(0, 1, 0, 1)])
with pytest.raises(
NotImplementedError, match="a prefix and a set of labels"
):
Register.from_coordinates(coords, prefix="a", labels=["a", "b"])
with pytest.raises(ValueError, match="vectors of size 3"):
Register3D.from_coordinates([((1, 0),), ((-1, 0),)])
reg1 = Register(qubits)
reg2 = Register.from_coordinates(coords, center=False, prefix="q")
assert np.all(np.array(reg1._coords) == np.array(reg2._coords))
assert reg1._ids == reg2._ids
reg2b = Register.from_coordinates(coords, center=False, labels=["a", "b"])
assert reg2b._ids == ("a", "b")
with pytest.raises(ValueError, match="Label length"):
Register.from_coordinates(coords, center=False, labels=["a", "b", "c"])
reg3 = Register.from_coordinates(np.array(coords), prefix="foo")
coords_ = np.array([(-0.5, 0), (0.5, 0)])
assert reg3._ids == ("foo0", "foo1")
assert np.all(reg3._coords == coords_)
assert not np.all(coords_ == coords)
reg4 = Register.rectangle(1, 2, spacing=1)
assert np.all(reg4._coords == coords_)
reg5 = Register.square(2, spacing=2)
coords_ = np.array([(-1, -1), (1, -1), (-1, 1), (1, 1)], dtype=float)
assert np.all(np.array(reg5._coords) == coords_)
reg6 = Register.triangular_lattice(2, 2, spacing=4)
coords_ = np.array(
[
(-3, -np.sqrt(3)),
(1, -np.sqrt(3)),
(-1, np.sqrt(3)),
(3, np.sqrt(3)),
]
)
assert np.all(np.array(reg6._coords) == coords_)
with pytest.raises(
ValueError, match="must only be 'layout' and 'trap_ids'"
):
Register(qubits, spacing=10, layout="square", trap_ids=(0, 1, 3))
def test_rectangle():
# Check rows
with pytest.raises(ValueError, match="The number of rows"):
Register.rectangle(0, 2)
# Check columns
with pytest.raises(ValueError, match="The number of columns"):
Register.rectangle(2, 0)
# Check spacing
with pytest.raises(ValueError, match="Spacing"):
Register.rectangle(2, 2, 0.0)
def test_square():
# Check side
with pytest.raises(ValueError, match="The number of atoms per side"):
Register.square(0)
# Check spacing
with pytest.raises(ValueError, match="Spacing"):
Register.square(2, 0.0)
def test_triangular_lattice():
# Check rows
with pytest.raises(ValueError, match="The number of rows"):
Register.triangular_lattice(0, 2)
# Check columns
with pytest.raises(ValueError, match="The number of atoms per row"):
Register.triangular_lattice(2, 0)
# Check spacing
with pytest.raises(ValueError, match="Spacing"):
Register.triangular_lattice(2, 2, 0.0)
def test_hexagon():
# Check number of layers
with pytest.raises(ValueError, match="The number of layers"):
Register.hexagon(0)
# Check spacing
with pytest.raises(ValueError, match="Spacing "):
Register.hexagon(1, spacing=-1.0)
# Check small hexagon (1 layer)
reg = Register.hexagon(1, spacing=1.0)
assert len(reg.qubits) == 7
atoms = list(reg.qubits.values())
crest_y = np.sqrt(3) / 2
assert np.all(np.isclose(atoms[0], [0.0, 0.0]))
assert np.all(np.isclose(atoms[1], [-0.5, crest_y]))
assert np.all(np.isclose(atoms[2], [0.5, crest_y]))
assert np.all(np.isclose(atoms[3], [1.0, 0.0]))
assert np.all(np.isclose(atoms[4], [0.5, -crest_y]))
assert np.all(np.isclose(atoms[5], [-0.5, -crest_y]))
assert np.all(np.isclose(atoms[6], [-1.0, 0.0]))
# Check a few atoms for a bigger hexagon (2 layers)
reg = Register.hexagon(2, spacing=1.0)
assert len(reg.qubits) == 19
atoms = list(reg.qubits.values())
crest_y = np.sqrt(3) / 2.0
assert np.all(np.isclose(atoms[7], [-1.5, crest_y]))
assert np.all(np.isclose(atoms[8], [-1.0, 2.0 * crest_y]))
assert np.all(np.isclose(atoms[9], [-0.0, 2.0 * crest_y]))
assert np.all(np.isclose(atoms[13], [1.5, -crest_y]))
assert np.all(np.isclose(atoms[14], [1.0, -2.0 * crest_y]))
assert np.all(np.isclose(atoms[15], [0.0, -2.0 * crest_y]))
def test_max_connectivity():
device = Chadoq2
max_atom_num = device.max_atom_num
spacing = device.min_atom_distance
crest_y = np.sqrt(3) / 2.0
# Check device type
with pytest.raises(TypeError):
reg = Register.max_connectivity(2, None)
# Check min number of atoms
with pytest.raises(
ValueError, match=r"The number of qubits(.+)greater than"
):
reg = Register.max_connectivity(0, device)
# Check max number of atoms
with pytest.raises(ValueError, match=r"The number of qubits(.+)less than"):
reg = Register.max_connectivity(max_atom_num + 1, device)
# Check spacing
reg = Register.max_connectivity(max_atom_num, device, spacing=spacing)
with pytest.raises(ValueError, match="Spacing "):
reg = Register.max_connectivity(
max_atom_num, device, spacing=spacing - 1.0
)
with pytest.raises(
NotImplementedError,
match="Maximum connectivity layouts are not well defined for a "
"device with 'min_atom_distance=0.0'.",
):
Register.max_connectivity(1e9, MockDevice)
# Check 1 atom
reg = Register.max_connectivity(1, device)
assert len(reg.qubits) == 1
atoms = list(reg.qubits.values())
assert np.all(np.isclose(atoms[0], [0.0, 0.0]))
# Check for less than 7 atoms:
for i in range(1, 7):
hex_coords = np.array(
[
(0.0, 0.0),
(-0.5, crest_y),
(0.5, crest_y),
(1.0, 0.0),
(0.5, -crest_y),
(-0.5, -crest_y),
]
)
reg = Register.max_connectivity(i, device)
device.validate_register(reg)
reg2 = Register.from_coordinates(
spacing * hex_coords[:i], center=False
)
assert len(reg.qubits) == i
atoms = list(reg.qubits.values())
atoms2 = list(reg2.qubits.values())
for k in range(i):
assert np.all(np.isclose(atoms[k], atoms2[k]))
# Check full layers on a small hexagon (1 layer)
reg = Register.max_connectivity(7, device)
device.validate_register(reg)
assert len(reg.qubits) == 7
atoms = list(reg.qubits.values())
assert np.all(np.isclose(atoms[0], [0.0, 0.0]))
assert np.all(np.isclose(atoms[1], [-0.5 * spacing, crest_y * spacing]))
assert np.all(np.isclose(atoms[2], [0.5 * spacing, crest_y * spacing]))
assert np.all(np.isclose(atoms[3], [1.0 * spacing, 0.0]))
assert np.all(np.isclose(atoms[4], [0.5 * spacing, -crest_y * spacing]))
assert np.all(np.isclose(atoms[5], [-0.5 * spacing, -crest_y * spacing]))
assert np.all(np.isclose(atoms[6], [-1.0 * spacing, 0.0]))
# Check full layers for a bigger hexagon (2 layers)
reg = Register.max_connectivity(19, device)
device.validate_register(reg)
assert len(reg.qubits) == 19
atoms = list(reg.qubits.values())
assert np.all(np.isclose(atoms[7], [-1.5 * spacing, crest_y * spacing]))
assert np.all(
np.isclose(atoms[8], [-1.0 * spacing, 2.0 * crest_y * spacing])
)
assert np.all(np.isclose(atoms[13], [1.5 * spacing, -crest_y * spacing]))
assert np.all(
np.isclose(atoms[14], [1.0 * spacing, -2.0 * crest_y * spacing])
)
# Check extra atoms (2 full layers + 7 extra atoms)
# for C3 symmetry, C6 symmetry and offset for next atoms
reg = Register.max_connectivity(26, device)
device.validate_register(reg)
assert len(reg.qubits) == 26
atoms = list(reg.qubits.values())
assert np.all(np.isclose(atoms[19], [-2.5 * spacing, crest_y * spacing]))
assert np.all(
np.isclose(atoms[20], [-2.0 * spacing, 2.0 * crest_y * spacing])
)
assert np.all(
np.isclose(atoms[21], [-0.5 * spacing, 3.0 * crest_y * spacing])
)
assert np.all(
np.isclose(atoms[22], [2.0 * spacing, 2.0 * crest_y * spacing])
)
assert np.all(np.isclose(atoms[23], [2.5 * spacing, -crest_y * spacing]))
assert np.all(
np.isclose(atoms[24], [0.5 * spacing, -3.0 * crest_y * spacing])
)
assert np.all(
np.isclose(atoms[25], [-2.0 * spacing, -2.0 * crest_y * spacing])
)
def test_rotation():
reg = Register.square(2, spacing=np.sqrt(2))
reg.rotate(45)
coords_ = np.array([(0, -1), (1, 0), (-1, 0), (0, 1)], dtype=float)
assert np.all(np.isclose(reg._coords, coords_))
draw_params = [
dict(),
dict(qubit_colors=dict()),
dict(qubit_colors={1: "darkred"}),
]
@pytest.mark.parametrize("draw_params", draw_params)
def test_drawing(draw_params, patch_plt_show):
with pytest.raises(ValueError, match="Blockade radius"):
reg = Register.from_coordinates([(1, 0), (0, 1)])
reg.draw(blockade_radius=0.0, draw_half_radius=True, **draw_params)
reg = Register.from_coordinates([(1, 0), (0, 1)])
reg.draw(blockade_radius=0.1, draw_graph=True, **draw_params)
reg = Register.triangular_lattice(3, 8)
reg.draw(**draw_params)
with patch("matplotlib.pyplot.savefig"):
reg.draw(fig_name="my_register.pdf")
reg = Register.rectangle(1, 8)
reg.draw(
blockade_radius=5,
draw_half_radius=True,
draw_graph=True,
**draw_params,
)
with pytest.raises(ValueError, match="'blockade_radius' to draw."):
reg.draw(draw_half_radius=True, **draw_params)
reg = Register.square(1)
with pytest.raises(NotImplementedError, match="Needs more than one atom"):
reg.draw(blockade_radius=5, draw_half_radius=True, **draw_params)
def test_orthorombic():
# Check rows
with pytest.raises(ValueError, match="The number of rows"):
Register3D.cuboid(0, 2, 2)
# Check columns
with pytest.raises(ValueError, match="The number of columns"):
Register3D.cuboid(2, 0, 2)
# Check layers
with pytest.raises(ValueError, match="The number of layers"):
Register3D.cuboid(2, 2, 0)
# Check spacing
with pytest.raises(ValueError, match="Spacing"):
Register3D.cuboid(2, 2, 2, 0.0)
def test_cubic():
# Check side
with pytest.raises(ValueError, match="The number of atoms per side"):
Register3D.cubic(0)
# Check spacing
with pytest.raises(ValueError, match="Spacing"):
Register3D.cubic(2, 0.0)
@pytest.mark.parametrize("draw_params", draw_params)
def test_drawing3D(draw_params, patch_plt_show):
with pytest.raises(ValueError, match="Blockade radius"):
reg = Register3D.from_coordinates([(1, 0, 0), (0, 0, 1)])
reg.draw(blockade_radius=0.0, **draw_params)
reg = Register3D.cubic(3, 8)
with patch("matplotlib.pyplot.savefig"):
reg.draw(fig_name="my_register.pdf", **draw_params)
reg = Register3D.cuboid(1, 8, 2)
reg.draw(
blockade_radius=5,
draw_half_radius=True,
draw_graph=True,
**draw_params,
)
with pytest.raises(ValueError, match="'blockade_radius' to draw."):
reg.draw(draw_half_radius=True, **draw_params)
reg = Register3D.cuboid(2, 2, 2)
reg.draw(
blockade_radius=5,
draw_half_radius=True,
draw_graph=True,
projection=False,
with_labels=True,
**draw_params,
)
reg.draw(
blockade_radius=5,
draw_half_radius=True,
draw_graph=False,
projection=True,
with_labels=True,
**draw_params,
)
reg = Register3D.cubic(1)
with pytest.raises(NotImplementedError, match="Needs more than one atom"):
reg.draw(blockade_radius=5, draw_half_radius=True, **draw_params)
def test_to_2D():
reg = Register3D.cuboid(2, 2, 2)
with pytest.raises(ValueError, match="Atoms are not coplanar"):
reg.to_2D()
reg.to_2D(tol_width=6)
reg = Register3D.cuboid(2, 2, 1)
reg.to_2D()
def test_find_indices():
reg = Register(dict(a=(0, 0), c=(5, 0), b=(0, 5)))
assert reg.find_indices(["c", "b", "a"]) == [1, 2, 0]
with pytest.raises(
ValueError,
match="IDs list must be selected among the IDs of the register's "
"qubits",
):
reg.find_indices(["c", "e", "d"])
def assert_eq(left, right):
assert left == right
assert right == left
def assert_ineq(left, right):
assert left != right
assert right != left
def test_equality_function():
reg1 = Register({"c": (1, 2), "d": (8, 4)})
assert_eq(reg1, reg1)
assert_eq(reg1, Register({"c": (1, 2), "d": (8, 4)}))
assert_ineq(reg1, Register({"d": (8, 4), "c": (1, 2)}))
assert_ineq(reg1, Register({"c": (8, 4), "d": (1, 2)}))
assert_ineq(reg1, Register({"c": (1, 2), "d": (8, 4), "e": (8, 4)}))
assert_ineq(reg1, 10)
reg2 = Register3D({"a": (1, 2, 3), "b": (8, 5, 6)})
assert_eq(reg2, reg2)
assert_eq(reg2, Register3D({"a": (1, 2, 3), "b": (8, 5, 6)}))
assert_eq(reg2, Register3D({"a": (1, 2, 3), "b": (8, 5, 6)}))
assert_ineq(reg2, Register3D({"b": (8, 5, 6), "a": (1, 2, 3)}))
assert_ineq(reg2, Register3D({"b": (1, 2, 3), "a": (8, 5, 6)}))
assert_ineq(
reg2, Register3D({"a": (1, 2, 3), "b": (8, 5, 6), "e": (8, 5, 6)})
)
assert_ineq(reg2, 10)
assert_ineq(reg1, reg2)
|
__author__ = 'AlecGriffin'
class Book:
def __init__(self, bookTitle):
self.bookTitle = bookTitle
# Container for Chapter Objects
self.chapterList = []
self.numChapters = 0
def addChapter(self, chapter):
self.numChapters += 1
self.chapterList.append(chapter)
def printBook(self):
print(self.bookTitle)
print()
print()
for chapter in self.chapterList:
chapter.printChapter() |
CG Lab Sample Questions
CS 1507 Computer Graphics Lab
Sample Questions for Lab Examination
* Use Bresenham’s algorithm to draw circle & line for the following questions.
* Use non-recursive fill algorithms to fill colours
Write a program to draw sierpensky triangle (recursive and non recursive)
Write a program to draw cubic bazier curve with 'n' control points.
Write a program to draw a cubic B-spine curve with 'n' control points
An '8' is constructed by joining together two cubic Bazier curves. Write a program to draw it.
A triangle PQR has its vertices located at P(80,50), Q(60,10),R(100,10). It is desired to
obtain its reflection about an axis parallel to Y axis and passing through poit A(30,10). Write
program do this transformation
Graphically prove that combination of scaling and rotation is not commutative.
Write a program to continuously rotate an object about a pivot point. Small angles are to be
used for each successive rotation, and approximations to the sine and cosine functions are to
be used to speed up the calculations. The rotation angle for each step is to be chosen so that
the object makes one complete revolution in less than 30 seconds. To avoid accumulation of
coordinate errors, reset the original coordinate values for the object at the start of each new
revolution.
Write a program to draw an analog clock with pendulum. The time should be taken from the
system time.
Write a program to draw a digital clock. Time should be system time the display number size should be at least 1cm x 1cm.
Write a program to simulate movement of a small ball over a Sphere.
Write a program to draw circum circle of a triangle.
Write a program to simulate the random movement of a set of balloons.
Write a program to simulate the movement of a bouncing ball
Write a program to draw a spiral
Write a program to draw following figures.
Write a program to draw pi- chart for given data
Write a program to draw bar chart for given data. Fill each bar with a pattern
Write a program to draw stacked bar-chart. Draw legends also
Write program to implement Cohen-Sutherland line clipping algorithm
Write a program to implement Sutherlan-Hodgeman polygon clipping algorithm
Write a program to implement Weiler-Atherton polygon clipping algorithm
Write a program to clip a circle against a polygon
Write a program to clip a circle against circle or ellipse
A circular disc of diameter 'd' is rolling down the inclined plane starting from rest. Assume
there is no slip and develop the set of transformations required to produce this animation.
Write a program to implement it.
Write an Open-GL program to draw Cube, Cylinder and Prism.(not included for interal exam)
Write an Open-GL program to draw different spheres and show the illumination on them
from light sources.(not included for internal exam)
Write a program to plot y=sin(x) and y= sin(x)/x
Write a program to draw graph showing y= sin(x) + cos(x)
Write a program to simulate the movement of a vehicle
Write a program to implement window-to-viewport transformation
|
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
from torchvision import transforms
from models import *
import numpy as np
import os
import time
model = semantic_ResNet18().cuda()
ckpt = torch.load('./Res18_model/net_150.pth')
batchsize=128
model.load_state_dict(ckpt)
model.eval()
y=np.load('./Cifar10_adv.npy')
index1=np.load('./True_Index.npy')
y=y[index1]
number=y.shape[0]
x=torch.from_numpy(y).cuda()
bool_i=0
with torch.no_grad():
for batch_num in range(int(number/batchsize)+1):
x_batch=x[batchsize*batch_num:min(batchsize*(batch_num+1),number)]
x_adv = model(x_batch)
if bool_i == 0:
X_adv = x_adv.clone().cpu()
else :
X_adv = torch.cat((X_adv, x_adv.clone().cpu()), 0)
bool_i +=1
print(X_adv.shape)
np.save('Semantic_Cifar10_adv.npy',X_adv)
|
# Generated by Django 3.0.3 on 2021-05-27 10:33
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ticketingsystem', '0008_auto_20210527_1120'),
]
operations = [
migrations.AlterField(
model_name='inventoryitem',
name='itemImage',
field=models.ImageField(blank=True, upload_to='media/', verbose_name='Item Picture'),
),
]
|
####################################################################################################
# constants_and_vowels.py #
# ------------------------------------------------------------------------------------------------ #
# Takes the user's input as C's or V's and randomly picks a constant to fill the place of the C's #
# and vowels to fill the place of V's. #
# The task comes from the dailyProgrammer subreddit: https://redd.it/3q9vpn #
# ------------------------------------------------------------------------------------------------ #
# #
# Author: Sejson #
# Date: January 10 2016 #
####################################################################################################
import random
"""
translate()
-----------
Description:
Takes the user's input as C or V's and converts it to a constant or vowel respectivily.
------------
Returns:
A nonsense word that matches the pattern of the text entered.
---------
"""
def translate():
"""
Takes the user's input as C or V's and converts it to a constant or vowel respectivily.
"""
# Allow the user to rerun the program.
running = True
user_text = input("Enter your text please\n")
while(running):
# Store the new word formed.
translated_word = ""
# Go through all the characters and convert them.
for character in user_text:
# Check the case of the letter and type and add the correct type to
# to the translated_word.
if character == "c":
constant = get_constant(False)
translated_word += constant
elif character == "C":
constant = get_constant(True)
translated_word += constant
elif character == "v":
vowel = get_vowel(False)
translated_word += vowel
elif character == "V":
vowel = get_vowel(True)
translated_word += vowel
else:
print("Invalid input. Only Cs and Vs are allowed.")
break
# Only gets run if the program is successful, otherwise it gets skipped.
else:
print(translated_word)
user_text = input("Enter another phrase, or exit:\n")
if "exit" in user_text.lower():
running = False
print("Goodbye...")
"""
get_constant()
--------------
Description:
Provides a random constant that is either upper or lowercase.
------------
Arguments:
is_upper - Whether the returned value should be uppercase.
----------
Returns:
constant - A constant that is either upper or lowercase based on the
input state of is_upper.
--------
"""
def get_constant(is_upper):
"""
Provides a random constant that is either upper or lowercase.
"""
# A dictionary of all the constants.
constants = {1: 'b', 2: 'c', 3: 'd', 4: 'f', 5: 'g',
6: 'h', 7: 'j', 8: 'k', 9: 'l', 10: 'm',
11: 'n', 12: 'p', 13: 'q', 14: 'r', 15: 's', 16: 't',
17: 'v', 18: 'w', 19: 'x', 20: 'y', 21: 'z'}
# Select a random number and connect it with a letter.
letter_number = random.randint(1, 21)
constant = constants[letter_number]
if is_upper:
return constant.upper()
else:
return constant
"""
get_vowel()
--------------
Description:
Provides a random vowel that is either upper or lowercase.
------------
Arguments:
is_upper - Whether the returned value should be uppercase.
----------
Returns:
vowel - A vowel that is either upper or lowercase based on the
input state of is_upper.
--------
"""
def get_vowel(is_upper):
"""
Provides a random vowel that is either upper or lowercase.
"""
# A dictionary of all the vowels.
vowels = {1: 'a', 2: 'e', 3: 'i', 4: 'o', 5: 'u'}
# Select a random number and connect it with a letter.
letter_number = random.randint(1, 5)
vowel = vowels[letter_number]
if is_upper:
return vowel.upper()
else:
return vowel
# Set the main function as translate()
if __name__ == "__main__":
translate()
|
def feuer_frei(concentration, barrels):
fuel_hours = barrels * concentration
if fuel_hours < 100:
return '{} Stunden mehr Benzin ben\xf6tigt.'.format(100 - fuel_hours)
elif fuel_hours == 100:
return 'Perfekt!'
return fuel_hours - 100
|
from flask import Flask,request
from celery import Celery
import json
import redis
from celery.result import AsyncResult
import task
app=Flask(__name__)
redis_host = "localhost"
redis_port = 6379
redis_password = ""
r = redis.StrictRedis(host=redis_host, port=redis_port, password=redis_password, decode_responses=True)
@app.route("/insert",methods=["POST"])
def tasks():
if(request.data):
data=json.loads(request.data)
return("Success")
if(request.form):
data=request.form
# print(data["url"],data["time"])
task_id=task.executing_task.delay(data["url"],data["time"])
return(task_id.id)
@app.route("/<taskId>",methods=["GET"])
def get_task_status(taskId):
if(taskId!=''):
keys=r.keys()
task_name='celery-task-meta-'+taskId
if( task_name in keys):
data=json.loads(r.get(task_name))
return(json.dumps(data))
else:
return
if __name__=="__main__":
app.run(debug=True,port=8081,threaded=True)
|
import tensorflow as tf
class PointNet(object):
def __init__(self, batch, keep_rate, is_training):
"""
Args:
batch: B*P*C (BATCH_SIZE * NUM_POINTS * NUM_INPUT_COORDS)
"""
# batch_size = batch.get_shape()[0].value
num_points = batch.get_shape()[1].value
num_input_coords = batch.get_shape()[2].value
with tf.variable_scope('input_tnet') as sc:
T_input = self.transform_net(batch, is_training) # C*C
# print(T_input.get_shape())
batch = tf.matmul(batch, T_input) # B*P*C
batch = self.fully_connected(batch, 64, 'fc1', is_training) # B*P*64
batch = self.fully_connected(batch, 64, 'fc2', is_training) # B*P*64
with tf.variable_scope('middle_tnet') as sc:
T_middle = self.transform_net(batch, is_training) # C*C
batch = tf.matmul(batch, T_middle) # B*P*C
batch = self.fully_connected(batch, 64, 'fc3', is_training) # B*P*64
batch = self.fully_connected(batch, 128, 'fc4', is_training) # B*P*128
batch = self.fully_connected(batch, 1024, 'fc5', is_training) # B*P*1024
# max pooling
with tf.variable_scope('maxpool') as sc:
batch = tf.expand_dims(batch, -1) # B*P*1024*1
batch = tf.nn.max_pool(batch,
ksize=[1, num_points, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name=sc.name) # B*1*1024*1
batch = tf.squeeze(batch) # B*1024
# print(batch.get_shape())
batch = self.fully_connected(batch, 512, 'fc6', is_training) # B*512
batch = tf.layers.dropout(batch, rate=1 - keep_rate, training=is_training, name='dp1')
batch = self.fully_connected(batch, 256, 'fc7', is_training) # B*256
batch = tf.layers.dropout(batch, rate=1 - keep_rate, training=is_training, name='dp2')
batch = self.fully_connected(batch, 40, 'fc8', is_training, bn=False, relu=False) # B*40
self.output = batch
self.T_middle = T_middle
def fully_connected(self, x, num_outputs, scope_name, is_training, bn=True, relu=True):
with tf.variable_scope(scope_name):
x = tf.contrib.layers.fully_connected(x, num_outputs,
activation_fn=None,
scope='dense')
if bn:
x = tf.contrib.layers.batch_norm(x,
center=True, scale=True,
is_training=is_training,
scope='bn')
if relu:
x = tf.nn.relu(x, 'relu')
return x
def transform_net(self, batch, is_training):
batch_size = batch.get_shape()[0].value
num_points = batch.get_shape()[1].value
num_input_coords = batch.get_shape()[2].value
# print("num_input_coords:", num_input_coords)
batch = self.fully_connected(batch, 64, 'fc1', is_training) # B*P*64
batch = self.fully_connected(batch, 128, 'fc2', is_training) # B*P*128
batch = self.fully_connected(batch, 1024, 'fc3', is_training) # B*P*1024
# max pooling
with tf.variable_scope('maxpool') as sc:
batch = tf.expand_dims(batch, -1) # B*P*1024*1
batch = tf.nn.max_pool(batch,
ksize=[1, num_points, 1, 1],
strides=[1, 1, 1, 1],
padding='VALID',
name=sc.name) # B*1*1024*1
batch = tf.squeeze(batch) # B*1024
# print(batch.get_shape())
batch = self.fully_connected(batch, 512, 'fc4', is_training) # B*512
batch = self.fully_connected(batch, 256, 'fc5', is_training) # B*256
batch = self.fully_connected(batch, num_input_coords * num_input_coords,
'fc6', is_training, bn=False, relu=False) # B*(C*C)
# print(batch.get_shape())
t_net = tf.reshape(batch, (-1, num_input_coords, num_input_coords))
return t_net
|
import unittest
from tempfile import mkdtemp
import shutil
import hylite
from hylite import HyScene, HyCloud, HyImage
from hylite.project import Camera, Pushbroom
import numpy as np
class MyTestCase(unittest.TestCase):
def build_dummy_data(self):
# build an example cloud
x, y = np.meshgrid(np.linspace(-10, 10), np.linspace(-10, 10))
xyz = np.vstack([x.ravel(), y.ravel(), np.zeros_like(x.ravel())]).T
klm = np.zeros(xyz.shape)
klm[:,2] = 1.0
rgb = (np.random.rand( *xyz.shape ) * 255).astype(np.uint8)
self.cloud = HyCloud( xyz, rgb=rgb, normals=klm )
# build an example image
dims = (20, 20, 3)
self.image = HyImage( np.random.rand( *dims ) )
self.image.set_wavelengths( hylite.RGB )
# build associated camera
pos = np.array([0, 0, 40])
ori = np.array([0, 0, 90])
fov = 25.
self.cam = Camera( pos, ori, 'persp', fov, dims)
# and a track
self.swath = HyImage(np.random.rand(dims[0],100,3))
self.swath.set_wavelengths(hylite.RGB)
cp = np.zeros( (100, 3) )
cp[:, 0] += np.linspace(-10, 10, 100)
cp[:, 1] += np.linspace(-10, 10, 100)
cp[:, 2] = 80.
co = np.zeros( (100,3) )
self.track = Pushbroom( cp, co, fov / dims[0], fov / dims[0], (dims[0], cp.shape[0]) )
#print(self.track.R[0].as_matrix())
#print(self.track.R[0].as_matrix()@(self.cloud.xyz[0,:]-cp[0,:]))
def test_construction(self):
self.build_dummy_data()
# make a test directory
pth = mkdtemp()
try:
# init a scene
S = HyScene(pth,"Scene1")
# build using normal camera
S.construct( self.image, self.cloud, self.cam, occ_tol=1, maxf=100, s=5 )
# build using pushbroom camera
S2 = HyScene(pth, "Scene2")
S2.construct( self.swath, self.cloud, self.track, occ_tol=1, maxf=100, s=(5,1) )
# test projections using normal camera
cld = S.push_to_cloud( hylite.RGB, method='best' )
img = S.push_to_image( 'klm', method='closest')
self.assertAlmostEquals( np.nanmax(img.data), 1.0, 2)
self.assertAlmostEquals(np.nanmax(cld.data), 1.0, 2 )
# test projections using pushbroom camera
cld = S2.push_to_cloud(hylite.RGB, method='best')
img = S2.push_to_image('klm', method='closest')
except:
shutil.rmtree(pth) # delete temp directory
self.assertFalse(True, "Error - could not construct HyScene instance")
shutil.rmtree(pth) # delete temp directory
if __name__ == '__main__':
unittest.main()
|
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
{
'targets': [
{
'target_name': 'nest_el',
'type': 'static_library',
'sources': [ '../file_g.c', ],
'postbuilds': [
{
'postbuild_name': 'Static library postbuild',
'variables': {
'some_regex': 'a|b',
},
'action': [
'../script/static_library_postbuild.sh',
'<(some_regex)',
'arg with spaces',
],
},
],
},
{
'target_name': 'nest_dyna',
'type': 'shared_library',
'mac_bundle': 1,
'sources': [ '../file_h.c', ],
'postbuilds': [
{
'postbuild_name': 'Dynamic library postbuild',
'variables': {
'some_regex': 'a|b',
},
'action': [
'../script/shared_library_postbuild.sh',
'<(some_regex)',
'arg with spaces',
],
},
{
'postbuild_name': 'Test paths relative to gyp file',
'action': [
'../copy.sh',
'./copied_file.txt',
'${BUILT_PRODUCTS_DIR}/copied_file_2.txt',
],
},
],
},
],
}
|
# Generated by Django 2.1.3 on 2018-11-06 10:45
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('core', '0018_auto_20181106_2343'),
]
operations = [
migrations.AddField(
model_name='staff',
name='nfc_dev_id',
field=models.CharField(default='00 00 00 00', max_length=254),
),
migrations.AddField(
model_name='staff',
name='password',
field=models.CharField(max_length=254, null=True),
),
]
|
import os
from setuptools import find_packages, setup
import dc_signup_form
# allow setup.py to be run from any path
os.chdir(os.path.normpath(os.path.join(os.path.abspath(__file__), os.pardir)))
def get_version():
return dc_signup_form.__version__
setup(
name="dc_signup_form",
version=get_version(),
author="chris48s",
packages=find_packages(),
include_package_data=True,
description="Email Signup form component for use on DC websites",
url="https://github.com/DemocracyClub/dc_signup_form",
install_requires=[
'requests',
'Django >=3.2,<4.3',
'psycopg2-binary',
],
setup_requires=["wheel"],
)
|
# Generated by Django 2.1.4 on 2018-12-13 01:31
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('UserProfile', '0001_initial'),
]
operations = [
migrations.AlterModelTable(
name='profile',
table='UserProfiles',
),
]
|
module("gxx.std",
include_paths = ["."]
) |
from google.appengine.api import users
from google.appengine.ext import db
from app.model.account import Account, AccountEmail
_REQUIRE_INVITE = True
# DB Model - Account
# ------------------
class Accounts():
@staticmethod
def loadAll():
# Check cache first, if not load from DB
accounts = Account.gql('ORDER BY created DESC LIMIT 50')
return accounts
@staticmethod
def loadOne(key):
# Check cache first, if not load from DB
account = db.get(key)
return account
@staticmethod
def loadByID(id, create=False):
# Check cache first, if not load from DB
q = Account.all()
q.filter("id =", id)
r = q.fetch(1)
if len(r) > 0:
account = r[0]
else:
# Automatic create of users or not
if create or not _REQUIRE_INVITE:
#account = Accounts._createUser()
user = users.get_current_user()
name = user.nickname()
# create the account
account = Account()
account.id = user.user_id()
account.name = name
account.put()
# create the email
email = AccountEmail()
email.account = account
email.email = user.email()
email.verified = True
email.put()
else:
account = None
return account
@staticmethod
def loadByUsername(username):
# Check cache first, if not load from DB
accounts = Account.gql('WHERE username = :1 LIMIT 1', username)
return accounts[0]
@staticmethod
def loadByEmail(email):
# Check cache first, if not load from DB
q = AccountEmail.all()
q.filter("email =", email)
r = q.fetch(1)
if len(r) > 0:
account = r[0].account
else:
account = None
return account
# @staticmethod
# def loadService(account, service):
# q = AccountService.all()
# q.filter("account =", account)
# q.filter("service =", service)
# r = q.fetch(1)
# if len(r) > 0:
# s = r[0]
# else:
# # Create a default
# s = AccountService(account=account, service=service)
# #s.account = account
# #s.service = service
# s.put()
#
# return s
@staticmethod
def _createUser():
# if account doesn't exist, make sure to create one.. later I can clean up if necessary
user = users.get_current_user()
name = user.nickname()
# email = user.email()
# fix the name and create unique username
# create the account
account = Account()
account.id = user.user_id()
account.name = name
account.put()
# create the email
email = AccountEmail()
email.account = account
email.email = user.email()
email.verified = True
email.put()
# create services... problem is when adding services for new users..
return account
@staticmethod # Move to Account
def deleteOne(account):
q = AccountEmail.all()
q.filter("account =", account)
for e in q:
e.delete()
q = AccountService.all()
q.filter("account =", account)
for e in q:
e.delete()
account.delete()
|
df_train = pd.read_csv('../input/labeledTrainData.tsv', delimiter="\t")
xxx
xxx
|
# The location of the script
path = "/home/pi/Tiny-Stereo/"
# GPIO setup (BCM) on the Raspberry Pi. Set "warnings" to True if you wish to render GPIO warnings.
pins = {
"warnings": False,
"led": 14,
"buttons": {
"power": 3,
"next": 17,
"volume_up": 22,
"volume_down": 27
},
"lcd": {
"rs": 26,
"e": 16,
"d4": 13,
"d5": 6,
"d6": 5,
"d7": 12
}
} |
# Copyright 2021 DAI Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at: http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from functools import partial
def token_decimals(transaction, repository, address):
try:
_, _, decimals, _ = repository.get_token_data(transaction.chain_id, address)
except:
decimals = 18
return decimals
def decode_nft(contract, token_id):
if len(str(token_id)) > 8:
token_symbol = f"NFT {str(token_id)[:6]}...{str(token_id)[-2:]}"
else:
token_symbol = f"NFT {token_id}"
token_address = f"{contract}?a={token_id}#inventory"
return dict(address=token_address, name=token_symbol)
def string_from_bytes(raw_value):
try:
raw_value = raw_value[2:] if raw_value[:2] == "0x" else raw_value
decoded_string = bytes.fromhex(raw_value).decode("utf-8").replace("\x00", "")
except:
decoded_string = "???"
return decoded_string
def add_utils_to_context(context):
# register additional functions available for transformations
context["token_decimals"] = partial(
token_decimals, context["__transaction__"], context["__repository__"]
)
context["decode_nft"] = partial(decode_nft, context["__contract__"])
context["string_from_bytes"] = string_from_bytes
return
|
import numpy as np
import matplotlib.pyplot as plt
r = 0.5
n = 10
K = 0.3
a = 1.2
def f(x, y):
return x + y
def RK4(xs, xd, ys, h):
n = (int)((xd - xs)/h)
x = np.zeros(n + 1)
y = np.zeros(n + 1)
x[0] = xs
y[0] = ys
for i in range(1, n + 1):
k1 = h * f(x[i-1], y[i-1])
k2 = h * f(x[i-1] + 0.5 * h, y[i-1] + 0.5 * k1)
k3 = h * f(x[i-1] + 0.5 * h, y[i-1] + 0.5 * k2)
k4 = h * f(x[i-1] + h, y[i-1] + k3)
y[i] = y[i - 1] + (1.0 / 6.0)*(k1 + 2 * k2 + 2 * k3 + k4)
x[i] = x[i - 1] + h
return x, y
def F(x, y):
F = np.zeros(2)
F[0] = y[1]
F[1] = x*(y[0]*y[0])*(1+np.sin(x*y[0])) + (x*x*x)*y[1]
#F[1] = -x*F[1] - F[0]
return F
def rk4coef(F, x, y, h):
k0 = h*F(x, y)
k1 = h*F(x + h/2.0, y + k0/2.0)
k2 = h*F(x + h/2.0, y + k1/2.0)
k3 = h*F(x + h, y + k2)
return k0, k1, k2, k3
def rk4(F, x, y, xStop, h):
X = np.linspace(x, xStop, num = np.ceil((np.abs(xStop - x)/h) + 1))
Y = []
Y.append(y)
for i in range (1, len(X)):
k1, k2, k3, k4 = rk4coef(F, X[i-1], Y[i-1], h)
Y.append(Y[i-1] + (1/6)*(k1 + 2*k2 + 2*k3 + k4))
return X, Y
if __name__ == "__main__":
xs = 0
xd = 1
ys = np.array([1, 1])
h = 0.1
#x, y = RK4(xs, xd, ys, h)
x, y= rk4(F, xs, ys, xd, h)
print(y)
plt.plot(y)
#plt.plot(x, y, "ro")
|
##encoding=utf-8
"""
Imoprt Command
--------------
from archives.spider import spider
"""
import requests
class ArchivesSpider():
def __init__(self, timeout=30, sleeptime=0):
self.session = requests.Session()
self.default_timeout = timeout
self.default_sleeptime = sleeptime
self.default_header = {
"User-Agent": "Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/41.0.2272.118 Safari/537.36",
"Accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8",
"Accept-Encoding": "gzip, deflate, sdch",
"Accept-Language": "en-US,en;q=0.8,zh-CN;q=0.6,zh;q=0.4",
"Content-Type": "text/html; charset=UTF-8",
"Connection": "close",
"Referer": "http://www.archives.com/member/",
}
self.login()
def set_timeout(self, timeout):
"""set default timeout limit in second
"""
self.default_timeout = timeout
def set_sleeptime(self, sleeptime):
"""change default_sleeptime
"""
self.default_sleeptime = sleeptime
def login(self):
"""try to log in to www.archives.com, and keep connection
"""
for i in range(3):
try:
self.session.post("http://www.archives.com/member/",
data={"__uid":"efdevices@theeagleforce.net","__pwd":"MYpasswd"})
print("Successfully login to http://www.archives.com/member/")
return
except:
pass
raise Exception("Failed to login to http://www.archives.com/member/")
def html(self, url):
"""get utf-8 encoded string of html page
"""
try:
return self.session.get(url,
headers=self.default_header, timeout=self.default_timeout).\
content.decode("utf-8")
except:
return None
spider = ArchivesSpider()
if __name__ == "__main__":
print(spider.html("http://www.archives.com/member/Default.aspx?_act=VitalSearchResult&LastName=Smith&DeathYear=2012&State=AK&Country=US&Location=AK&ShowSummaryLink=1&RecordType=2&activityID=ad1ef8c1-6bef-4010-aa95-1f089abe0f50"))
# print(spider.html("http://www.archives.com/member/Default.aspx?_act=VitalSearchResult&LastName=Smith&DeathYear=2007&Country=US&State=&Location=US&ShowSummaryLink=1&RecordType=2&activityID=db4af76f-426b-4e8c-9fe7-0b88b9c4d179&pagesize=10&pageNumber=1&pagesizeAP=10&pageNumberAP=1")) |
# Created by Luis A. Sanchez-Perez (alejand@umich.edu).
# Copyright © Do not distribute or use without authorization from author
import os
import re
import random
import numpy as np
import tensorflow as tf
# Dictionary containing features description for parsing purposes
feature_description = {
'spec': tf.io.FixedLenSequenceFeature([], tf.float32, allow_missing=True, default_value=[0.0]),
'mfcc': tf.io.FixedLenFeature([], tf.int64),
'samples': tf.io.FixedLenFeature([], tf.int64),
'label': tf.io.FixedLenFeature([], tf.string, default_value=''),
'measurement': tf.io.FixedLenFeature([], tf.string, default_value=''),
'array': tf.io.FixedLenFeature([], tf.string, default_value=''),
'sensor': tf.io.FixedLenFeature([], tf.string, default_value='')
}
def serialize_observation(observation):
"""
Serializes a single observation
"""
# Read npy file
url, label, measurement, array, sensor = observation
spectrogram = np.load(url)
mfcc, samples = spectrogram.shape
# Create a dictionary mapping the feature name to the tf.Example compatible data type
feature = {
'spec': tf.train.Feature(float_list=tf.train.FloatList(value=list(spectrogram.ravel()))),
'mfcc': tf.train.Feature(int64_list=tf.train.Int64List(value=[mfcc])),
'samples': tf.train.Feature(int64_list=tf.train.Int64List(value=[samples])),
'label': tf.train.Feature(bytes_list=tf.train.BytesList(value=[label.encode()])),
'measurement': tf.train.Feature(bytes_list=tf.train.BytesList(value=[measurement.encode()])),
'array': tf.train.Feature(bytes_list=tf.train.BytesList(value=[array.encode()])),
'sensor': tf.train.Feature(bytes_list=tf.train.BytesList(value=[sensor.encode()]))
}
# Create a Features message using tf.train.Example
example_proto = tf.train.Example(features=tf.train.Features(feature=feature))
return example_proto.SerializeToString()
class AircraftRecordBuilder(object):
"""
Creates two records files (training/test)
It ensures the percentage of test samples of each class is the same.
For the training set it includes all signals from a measurement but for
the test set only one random signal from each measurement is included.
"""
def __init__(self, path):
# Stores path
self.path = path
# Reads all npy files found inside path
self.datafiles = []
for root, _, files in os.walk(path):
for file in files:
if '.npy' in file:
groups = re.match(r'^m(\d+)a(\d+)s(\d+)', file).groups()
url = os.path.join(root, file)
label = os.path.split(root)[1]
measurement = groups[0]
array = groups[1]
sensor = groups[2]
# appending (url, class name, measurement id, array, sensor)
self.datafiles.append((url, label, measurement, array, sensor))
random.shuffle(self.datafiles)
def build(self, test_pct=0.2):
"""
Generates a separate tfrecord file containing the serialized observations
for each stratified set (training & test) generated from the signals
found in the input path.
"""
# generate sets of files
train_set, test_set = self.generate_sets(test_pct)
# generate a tfrecord for each set
self.generate_tfrecord(train_set, 'train.tfrecord')
self.generate_tfrecord(test_set, 'test.tfrecord')
def generate_sets(self, test_pct):
"""
Creates stratified training and test sets of the signals found in the
input path
"""
# Shuffles dataset
measurements = []
labels = []
for _, label, measurement, _, _ in self.datafiles:
measurements.append(measurement)
labels.append(label)
measurements = np.array(measurements)
labels = np.array(labels)
classes = np.unique(labels)
# Splits dataset
m = len(self.datafiles)
train_obs = np.zeros(m, dtype=bool)
test_obs = np.zeros(m, dtype=bool)
for category in classes:
indexes = np.unique(measurements[labels == category])
indexes = np.random.permutation(indexes)
k = int(len(indexes) * (1 - test_pct))
# Training set
for index in indexes[:k]:
new_obs = (measurements == index)
train_obs = train_obs | new_obs
# Test set
for index in indexes[k:]:
new_obs = (measurements == index)
mask = np.random.permutation(new_obs.nonzero()[0])
new_obs[mask[:-1]] = False
test_obs = test_obs | new_obs
train_set = [self.datafiles[i] for i in range(len(train_obs)) if train_obs[i]]
test_set = [self.datafiles[i] for i in range(len(test_obs)) if test_obs[i]]
return train_set, test_set
def generate_tfrecord(self, observations, filename):
"""
Generates a tfrecord file containing the serialized observations
"""
filepath = os.path.join(self.path, filename)
with tf.io.TFRecordWriter(filepath) as writer:
for obs in observations:
example = serialize_observation(obs)
writer.write(example)
|
import os
import numpy as np
from . import __file__ as filepath
__all__ = ["Inoue14"]
class Inoue14(object):
def __init__(self, scale_tau=1.):
"""
IGM absorption from Inoue et al. (2014)
"""
self._load_data()
self.scale_tau = scale_tau
def _load_data(self):
path = os.path.join(os.path.dirname(filepath),'data')
#print path
LAF_file = os.path.join(path, 'LAFcoeff.txt')
DLA_file = os.path.join(path, 'DLAcoeff.txt')
data = np.loadtxt(LAF_file, unpack=True)
ix, self.lam, self.ALAF1, self.ALAF2, self.ALAF3 = data
data = np.loadtxt(DLA_file, unpack=True)
ix, self.lam, self.ADLA1, self.ADLA2 = data
self.NA = len(self.lam)
return True
def tLSLAF(self, zS, lobs):
z1LAF = 1.2
z2LAF = 4.7
l2 = np.dot(self.lam[:, np.newaxis], np.ones((1, lobs.shape[0])))
tLSLAF_value = l2.T*0
match0 = (lobs < l2*(1+zS))
match1 = lobs < l2*(1+z1LAF)
match2 = (lobs >= l2*(1+z1LAF)) & (lobs < l2*(1+z2LAF))
match3 = lobs >= l2*(1+z2LAF)
tLSLAF_value += self.ALAF1*(((lobs/l2)*(match0 & match1))**1.2).T
tLSLAF_value += self.ALAF2*(((lobs/l2)*(match0 & match2))**3.7).T
tLSLAF_value += self.ALAF3*(((lobs/l2)*(match0 & match3))**5.5).T
return tLSLAF_value.sum(axis=1)
def tLSDLA(self, zS, lobs):
"""
Lyman Series, DLA
"""
z1DLA = 2.0
l2 = np.dot(self.lam[:, np.newaxis], np.ones((1, lobs.shape[0])))
tLSDLA_value = l2.T*0
match0 = (lobs < l2*(1+zS))
match1 = lobs < l2*(1.+z1DLA)
tLSDLA_value += self.ADLA1*((lobs/l2*(match0 & match1))**2.0).T
tLSDLA_value += self.ADLA2*((lobs/l2*(match0 & ~match1))**3.0).T
return tLSDLA_value.sum(axis=1)
def _tLSLAF(self, zS, lobs):
"""
Lyman series, Lyman-alpha forest
"""
z1LAF = 1.2
z2LAF = 4.7
tLSLAF_value = lobs*0.
for j in range(self.NA):
match0 = (lobs < self.lam[j]*(1+zS)) #& (lobs > self.lam[j])
match1 = lobs < self.lam[j]*(1+z1LAF)
match2 = (lobs >= self.lam[j]*(1+z1LAF)) & (lobs < self.lam[j]*(1+z2LAF))
match3 = lobs >= self.lam[j]*(1+z2LAF)
tLSLAF_value[match0 & match1] += self.ALAF1[j]*(lobs[match0 & match1]/self.lam[j])**1.2
tLSLAF_value[match0 & match2] += self.ALAF2[j]*(lobs[match0 & match2]/self.lam[j])**3.7
tLSLAF_value[match0 & match3] += self.ALAF3[j]*(lobs[match0 & match3]/self.lam[j])**5.5
return tLSLAF_value
def _tLSDLA(self, zS, lobs):
"""
Lyman Series, DLA
"""
z1DLA = 2.0
tLSDLA_value = lobs*0.
for j in range(self.NA):
match0 = (lobs < self.lam[j]*(1+zS)) #& (lobs > self.lam[j])
match1 = lobs < self.lam[j]*(1.+z1DLA)
tLSDLA_value[match0 & match1] += self.ADLA1[j]*(lobs[match0 & match1]/self.lam[j])**2.0
tLSDLA_value[match0 & ~match1] += self.ADLA2[j]*(lobs[match0 & ~match1]/self.lam[j])**3.0
return tLSDLA_value
def tLCDLA(self, zS, lobs):
"""
Lyman continuum, DLA
"""
z1DLA = 2.0
lamL = 911.8
tLCDLA_value = lobs*0.
match0 = lobs < lamL*(1.+zS)
if zS < z1DLA:
tLCDLA_value[match0] = 0.2113 * _pow(1.0+zS, 2) - 0.07661 * _pow(1.0+zS, 2.3) * _pow(lobs[match0]/lamL, (-3e-1)) - 0.1347 * _pow(lobs[match0]/lamL, 2)
else:
match1 = lobs >= lamL*(1.+z1DLA)
tLCDLA_value[match0 & match1] = 0.04696 * _pow(1.0+zS, 3) - 0.01779 * _pow(1.0+zS, 3.3) * _pow(lobs[match0 & match1]/lamL, (-3e-1)) - 0.02916 * _pow(lobs[match0 & match1]/lamL, 3)
tLCDLA_value[match0 & ~match1] =0.6340 + 0.04696 * _pow(1.0+zS, 3) - 0.01779 * _pow(1.0+zS, 3.3) * _pow(lobs[match0 & ~match1]/lamL, (-3e-1)) - 0.1347 * _pow(lobs[match0 & ~match1]/lamL, 2) - 0.2905 * _pow(lobs[match0 & ~match1]/lamL, (-3e-1))
return tLCDLA_value
def tLCLAF(self, zS, lobs):
"""
Lyman continuum, LAF
"""
z1LAF = 1.2
z2LAF = 4.7
lamL = 911.8
tLCLAF_value = lobs*0.
match0 = lobs < lamL*(1.+zS)
if zS < z1LAF:
tLCLAF_value[match0] = 0.3248 * (_pow(lobs[match0]/lamL, 1.2) - _pow(1.0+zS, -9e-1) * _pow(lobs[match0]/lamL, 2.1))
elif zS < z2LAF:
match1 = lobs >= lamL*(1+z1LAF)
tLCLAF_value[match0 & match1] = 2.545e-2 * (_pow(1.0+zS, 1.6) * _pow(lobs[match0 & match1]/lamL, 2.1) - _pow(lobs[match0 & match1]/lamL, 3.7))
tLCLAF_value[match0 & ~match1] = 2.545e-2 * _pow(1.0+zS, 1.6) * _pow(lobs[match0 & ~match1]/lamL, 2.1) + 0.3248 * _pow(lobs[match0 & ~match1]/lamL, 1.2) - 0.2496 * _pow(lobs[match0 & ~match1]/lamL, 2.1)
else:
match1 = lobs > lamL*(1.+z2LAF)
match2 = (lobs >= lamL*(1.+z1LAF)) & (lobs < lamL*(1.+z2LAF))
match3 = lobs < lamL*(1.+z1LAF)
tLCLAF_value[match0 & match1] = 5.221e-4 * (_pow(1.0+zS, 3.4) * _pow(lobs[match0 & match1]/lamL, 2.1) - _pow(lobs[match0 & match1]/lamL, 5.5))
tLCLAF_value[match0 & match2] = 5.221e-4 * _pow(1.0+zS, 3.4) * _pow(lobs[match0 & match2]/lamL, 2.1) + 0.2182 * _pow(lobs[match0 & match2]/lamL, 2.1) - 2.545e-2 * _pow(lobs[match0 & match2]/lamL, 3.7)
tLCLAF_value[match0 & match3] = 5.221e-4 * _pow(1.0+zS, 3.4) * _pow(lobs[match0 & match3]/lamL, 2.1) + 0.3248 * _pow(lobs[match0 & match3]/lamL, 1.2) - 3.140e-2 * _pow(lobs[match0 & match3]/lamL, 2.1)
return tLCLAF_value
def full_IGM(self, z, lobs):
"""Get full Inoue IGM absorption
Parameters
----------
z : float
Redshift to evaluate IGM absorption
lobs : array-like
Observed-frame wavelength(s).
Returns
-------
abs : IGM absorption
"""
tau_LS = self.tLSLAF(z, lobs) + self.tLSDLA(z, lobs)
tau_LC = self.tLCLAF(z, lobs) + self.tLCDLA(z, lobs)
### Upturn at short wavelengths, low-z
#k = 1./100
#l0 = 600-6/k
#clip = lobs/(1+z) < 600.
#tau_clip = 100*(1-1./(1+np.exp(-k*(lobs/(1+z)-l0))))
tau_clip = 0.
return np.exp(-self.scale_tau*(tau_LC + tau_LS + tau_clip))
def build_grid(self, zgrid, lrest):
"""Build a spline interpolation object for fast IGM models
Returns: self.interpolate
"""
from scipy.interpolate import CubicSpline
igm_grid = np.zeros((len(zgrid), len(lrest)))
for iz in range(len(zgrid)):
igm_grid[iz,:] = self.full_IGM(zgrid[iz], lrest*(1+zgrid[iz]))
self.interpolate = CubicSpline(zgrid, igm_grid)
def _pow(a, b):
"""C-like power, a**b
"""
return a**b
|
from django.urls import path, re_path, include
from rest_framework.routers import DefaultRouter
from jiaju.cases.views import CategoryView, CaseView, CaseDetailedView, FeaturedCaseView
router = DefaultRouter()
router.register(r'category', CategoryView)
router.register(r'(?P<category>[0-9]+)', CaseView)
router.register(r'detail', CaseDetailedView)
urlpatterns = [
re_path(r'featured/$', FeaturedCaseView.as_view({'get': 'list'})),
re_path(r'', include(router.urls)),
] |
import matplotlib.pyplot as plt
import numpy as np
labels=['Mon','Tue','Wed','Thu','Fri','Sat','Sun']
data=np.random.rand(7)*100
plt.pie(data,labels=labels,autopct='%1.1f%%')
plt.axis('equal')
plt.legend()
plt.show() |
from mod_base import*
class NewPass(Command):
"""Change your password.
Usage: newpass oldpassword newpassword newpassword
"""
def run(self, win, user, data, caller=None):
args = Args(data)
if len(args) < 3:
win.Send("Usage: newpass oldpassword newpassword newpassword")
return False
if not user.IsAuthed():
win.Send("you've not logged in")
return False
oldpass, pass1, pass2 = args[0:3]
if pass1 == pass2:
if self.bot.config.ChangeAccountPass(user.account["name"], oldpass, pass1):
win.Send("password changed!")
else:
win.Send("failed to change password! make sure your old password is correct.")
else:
win.Send("your new password didn't match up")
module = {
"class": NewPass,
"type": MOD_COMMAND,
"level": 0,
"zone": IRC_ZONE_QUERY,
}
|
print(sum1(2,3))
print(sum2(10,11,20))
p()
aa() |
import torch.nn as nn
import torch.nn.functional as F
from mmcv.cnn import xavier_init
from mmdet.core import auto_fp16
from ..builder import NECKS
@NECKS.register_module
class LiteFpn(nn.Module):
def __init__(self,
in_channels,
out_channels,
):
super(LiteFpn, self).__init__()
self.in_channels = in_channels
# self.conv4 = nn.Conv2d(in_channels[1], out_channels, 1, bias=True)
self.conv4 = nn.Sequential(
nn.Conv2d(in_channels[0], in_channels[0], 3, 1, 1, groups=in_channels[0], bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels[0], out_channels, 1, bias=True),
nn.ReLU(inplace=True),
)
# self.conv5 = nn.Conv2d(in_channels[2], out_channels, 1, bias=True)
self.conv5 = nn.Sequential(
nn.Conv2d(in_channels[1], in_channels[1], 3, 1, 1, groups=in_channels[1], bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels[1], out_channels, 1, bias=True),
nn.ReLU(inplace=True),
)
self.smmoth1 = nn.Sequential(
nn.Conv2d(out_channels, out_channels, 3, 1, 1, groups=out_channels, bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(out_channels, out_channels, 1, bias=True),
nn.ReLU(inplace=True),
)
self.conv6 = nn.Sequential(
nn.Conv2d(in_channels[-1], in_channels[-1], 3, 2, 1, groups=in_channels[-1], bias=True),
nn.ReLU(inplace=True),
nn.Conv2d(in_channels[-1], out_channels, 1, bias=True),
nn.ReLU(inplace=True),
)
self.max_pool1 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.max_pool2 = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.init_weights()
# default init_weights for conv(msra) and norm in ConvModule
def init_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
xavier_init(m, distribution='uniform')
@auto_fp16()
def forward(self, inputs):
assert len(inputs) == len(self.in_channels)
C4_lat = self.conv4(inputs[-2])
C5_lat = self.conv5(inputs[-1])
C4_lat = F.interpolate(C5_lat, scale_factor=2, mode="nearest") + C4_lat
C4_lat = self.smmoth1(C4_lat)
C6_lat = self.conv6(inputs[-1])
C7_lat = self.max_pool1(C6_lat)
C8_lat = self.max_pool2(C7_lat)
outs = [C4_lat, C5_lat, C6_lat, C7_lat, C8_lat]
return tuple(outs)
|
import numpy as np
def alpha_from_c_beta(c, beta):
sb = np.sqrt(beta)
return c/(1.0 + sb) + (1.0 + 2.0*sb)/6.0
def c_from_beta_k(beta, k):
return (1.0 - beta - 9.0*k/4.0)/30.0
def k_from_xi(xi):
if xi is None:
return 0.0
else:
return 2.0/xi - 2.0
def alpha_from_beta_xi(beta, xi):
return alpha_from_c_beta(c_from_beta_k(beta, k_from_xi(xi)), beta)
|
import requests
import base64
import json
import urllib.request as urllib
localUrl = 'http://localhost:7071/api/'
cloudUrl = 'https://ft45mlfuncapp.azurewebsites.net/api/'
code = "DYAwYUdU2CuRHfKYQ3zUwWoChIIDbhSIterIPZxvO9EHFcQKtir4JQ=="
image_url = 'https://uniqlo.scene7.com/is/image/UNIQLO/goods_67_400711?$pdp-medium$'
payload = {'location':'park', 'temperature':99, 'leaflets': 100, 'price':.5}
res = requests.post(cloudUrl + 'testApi?modelver=2018-11-05&code=' + code, json=payload)
def testMlApi(image_url, apiUrl):
file = urllib.urlopen(image_url).read()
base64_bytes = base64.encodestring(file)
base64_string = base64_bytes.decode('utf-8')
raw_data = {'image': base64_string}
r = requests.post(apiUrl + 'testCVApi?modelver=2018-11-6&code=' + code, json=raw_data)
return r.content.decode('utf-8')
testMlApi(image_url, cloudUrl)
|
"""
Created by Alex Wang
On 2018-08-26
"""
import struct
import base64
import math
import traceback
import numpy as np
TAG_NUM = 1746
AUDIO_FEAT_LEN = 400
AUDIO_FEAT_DIM = 128
FRAME_FEAT_LEN = 200
FRAME_FEAT_DIM = 1024
def tags_process(tags_org):
"""
:param tags_org:
| 782028346 | 1076,1676,1373,0,0,0,0,0,0,0 |
| 825916601 | 515,1676,0,0,0,0,0,0,0,0 |
| 702112086 | 1123,451,1325,1326,0,0,0,0,0,0 |
| 798679449 | 105,1173,630,1089,0,0,0,0,0,0 |
| 399612636 | 1089,1123,0,0,0,0,0,0,0,0 |
tag:1-1746
:return:
"""
new_tags = []
for tag in tags_org:
elems = tag.strip().split(',')
tag_ids = [int(elem) for elem in elems if int(elem) != 0]
tag_vec = np.zeros(shape=(TAG_NUM), dtype=np.int32)
for tag in tag_ids:
tag_vec[tag - 1] = 1
new_tags.append(tag_vec)
return np.asarray(new_tags)
def decode_feature(feature, n, fmt="f"):
result = struct.unpack(fmt * n, base64.urlsafe_b64decode(feature))
return list(result)
def audio_feat_process(audio_feat_org):
"""
:param audio_feat_org:
:return:[batch_size, 400, 128]
"""
audio_feat_new = []
for audio_feat in audio_feat_org:
audio_features = map(lambda f: decode_feature(f, AUDIO_FEAT_DIM, fmt='Q'),
[item.strip() for item in audio_feat.split("\t") if len(item.strip()) == 1368])
len_fea = len(audio_features)
new_arr = np.zeros(shape=(AUDIO_FEAT_LEN, AUDIO_FEAT_DIM))
new_arr[0:len_fea, :] = np.asarray(audio_features)
# print('shape of audio_features:{}'.format(np.asarray(new_arr).shape)) # (400, 128)
audio_feat_new.append(new_arr)
return np.array(audio_feat_new)
def frame_feat_process(frame_feat_org):
"""
:param frame_feat_org:
:return:[batch_size, 200, 1024]
"""
frame_feat_new = []
for frame_feat in frame_feat_org:
rgb_features = map(lambda f: decode_feature(f, 1024, fmt='f'),
[item.strip() for item in frame_feat.split("\t")
if len(item.strip()) == 5464])
len_fea = len(rgb_features)
new_arr = np.zeros(shape=(FRAME_FEAT_LEN, FRAME_FEAT_DIM))
new_arr[0:len_fea, :] = np.asarray(rgb_features)
frame_feat_new.append(new_arr)
return np.array(frame_feat_new)
def image_feat_process(image_feat_org):
"""
:param image_feat_org:
:return: [batch_size, 2048]
"""
image_feat_new = []
for image_feat in image_feat_org:
img_new = decode_feature(image_feat, 2048)
image_feat_new.append(np.asarray(img_new))
return np.array(image_feat_new)
def frame_feat_process_lstm(frame_feat_org):
"""
:param frame_feat_org:
:return:[batch_size, 200, 1024]
"""
# layer 2 (batch * (198/4-4) * 256) = (batch * 45 * 256)
x_length = np.ones(dtype=np.int32, shape=(len(frame_feat_org))) * 45
frame_feat_new = []
for idx, frame_feat in enumerate(frame_feat_org):
rgb_features = map(lambda f: decode_feature(f, 1024, fmt='f'),
[item.strip() for item in frame_feat.split("\t")
if len(item.strip()) == 5464])
len_fea = len(rgb_features)
lstm_len = min(math.ceil(len_fea/200.0 * 45), 45)
x_length[idx] = lstm_len
new_arr = np.zeros(shape=(FRAME_FEAT_LEN, FRAME_FEAT_DIM))
new_arr[0:len_fea, :] = np.asarray(rgb_features)
frame_feat_new.append(new_arr)
return np.array(frame_feat_new), x_length
def text_feat_propress(text_feat_org):
"""
:param text_feat_org:
:return:
"""
text_feat_new = []
for text_feat in text_feat_org:
elems = text_feat.split(',')
elems_int = [int(elem) for elem in elems]
text_feat_new.append(elems_int)
return np.array(text_feat_new)
if __name__ == '__main__':
print(tags_process(['1076,1676,1373,1,0,0,0,0,0,0', '515,1676,0,1746,0,0,0,0,0,0']))
text_fea = text_feat_propress(['125880,205691,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0', '125880,205691,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0'])
print(text_fea)
print(text_fea.shape) |
import mqtt
import json
import random
class GameSession:
def __init__(self, callback = None, context = None):
mqtt.connect(GameSession.onMsg, self)
self.id = random.randint(1, 1000000000)
# self.id = random.randint(1, 10)
print("I am:", self.id)
self.wants = False
self.callback = callback
self.context = context
self.opponent = 0
def close(self):
if self.opponent != 0:
mqtt.publish(json.dumps({'cmd':'clo','from':self.id,'to':self.opponent}))
mqtt.disconnect()
def send(self, message):
if self.opponent == 0:
print("I don't have any opponent")
return
msg = {'cmd':'msg','to':self.opponent,'body':message}
mqtt.publish(json.dumps(msg))
@staticmethod
def onMsg(msg, self):
print('msg', msg)
dict = json.loads(msg)
print('dict', dict)
if 'cmd' in dict:
cmd = dict['cmd']
if cmd == 'want':
self.onWant(dict['id'])
elif cmd == 'est':
self.onEstablished(dict['from'], dict['to'])
elif cmd == 'msg':
if dict['to'] != self.id:
print("Not a message for me")
return
if self.callback == None:
print("No callback is assigned. Ignoring.")
return
cb = self.callback
print("cb:", cb)
cb(dict['body'], self.context)
elif cmd == 'clo':
self.opponent = 0
def onWant(self, id):
if not self.wants:
print("Now I don't want to connect:", id)
return
if id == self.id:
print("I's me. Ignoring")
return
self.opponent = id
self.wants = False
msg = {'cmd':'est','from':self.id,'to':id}
mqtt.publish(json.dumps(msg))
def onEstablished(self, fromId, toId):
if not self.wants:
print("I'm not in the mood:", fromId, "->", toId)
return
if toId != self.id:
print("To is not me:", self.id, " <> ", toId)
return
self.opponent = fromId
self.wants = False
print("Established")
def wantGame(self, wants):
self.wants = wants
if wants:
msg = {'cmd':'want','id':self.id}
mqtt.publish(json.dumps(msg)) |
# This code helps to find out feature multipliers for KNN.
# This is shown using some features derived by me but this method can be extended for other features as well.
# One needs to derive his own features and then apply similar approach to get the correct weights.
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from sklearn.linear_model import LogisticRegression
lr = LogisticRegression()
recent_train = pd.read_csv('../Kaggle_Datasets/Facebook/train.csv')
# select a single x_y_grid at random
recent_train = recent_train[
(recent_train["x"] > 4.5) & (recent_train["x"] < 5) & (recent_train["y"] > 2) & (recent_train["y"] < 2.3)]
# derive some features
recent_train["x"], recent_train["y"] = recent_train["x"] * 1000, recent_train["y"] * 1000
recent_train["hour"] = recent_train["time"] // 60
recent_train["hour_of_day"] = recent_train["hour"] % 24 + 1
recent_train["day"] = recent_train["hour"] // 24
recent_train["day_of_week"] = recent_train["day"] % 7 + 1
recent_train["month"] = recent_train["day"] // 30 + 1
recent_train["month_of_year"] = (recent_train["month"] - 1) % 12 + 1
recent_train["sine"] = np.sin(2 * np.pi * recent_train["hour_of_day"] / 24)
recent_train["cos"] = np.cos(2 * np.pi * recent_train["hour_of_day"] / 24)
recent_train["year"] = recent_train["day"] // 365 + 1
print("recent_train created")
# creating arbitrary test
test = recent_train.sample(axis=0, frac=0.05)
print ("selected_part and test created")
features = ["x", "y", "hour_of_day", "day_of_week", "month_of_year", "year", "sine", "cos", "accuracy"]
constant = [0, 0, 0, 0, 0, 0, 0, 0, 0]
print (len(test))
colname = str(features)
test[colname] = list
index = test.index
test["done"] = 0
for i in index:
# manhattan distance between train and test[i]
new_ld = abs(recent_train[features] - test.loc[i][features])
new_ld = new_ld.drop(i)
new_ld["target"] = (recent_train["place_id"] != test.loc[i]["place_id"]) + 0
# select 100 nearest points based on x+2y distance
new_ld["x+y"] = (new_ld["x"]) + (2 * new_ld["y"])
new_ld = new_ld.sort("x+y")[0:100]
true = new_ld[new_ld["target"] == 0]
false = new_ld[new_ld["target"] != 0]
# check for skewness
if (len(true) < 20) | (len(false) < 20):
print ("skipped test sample -", i)
continue
# get the multipliers which can distinguish between 0 and 1
lr.fit(new_ld[features], new_ld["target"])
test.set_value(i, colname, np.maximum(constant, lr.coef_.ravel()))
# actual_test.set_value(i,colname,lr.coef_.ravel())
test.set_value(i, "done", 1)
print ("done test sample", i)
# average or sum all the multipliers to get overall multiplier
actual_test2 = test[test["done"] == 1]
final_weights = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0])
for lists in actual_test2[colname]:
final_weights = final_weights + lists
print (features)
print ("corresponding weights")
print (final_weights)
['x', 'y', 'hour_of_day', 'day_of_week', 'month_of_year', 'year', 'sine', 'cos', 'accuracy']
weights = [24.2373305, 40.77935942, 109.38474695, 118.08743198, 105.51443127, 466.62561155,
396.0134498, 475.13596255, 7.55050706] |
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import reverse , resolve
from ..forms import NewWorkerForm
from ..models import Work, Worker, Assignment
from ..views import new_worker
# test_view_new_worker
#from datetime import datetime
class NewWorkerTests(TestCase):
def setUp(self):
self.work=Work.objects.create(name='Elec', description='Electrician.')
print(" In New Worker Tests Crated Work with name " + self.work.name)
print("self.work is " + str(self.work.id))
User.objects.create_user(
username='john', email='john@doe.com', password='123'
)
self.client.login(username='john', password='123')
def test_new_worker_view_success_status_code(self):
url = reverse('new_worker', kwargs={'pk': 1})
response = self.client.get(url)
self.assertEquals(response.status_code, 200)
def test_new_worker_view_not_found_status_code(self):
url = reverse('new_worker', kwargs={'pk': 99})
response = self.client.get(url)
self.assertEquals(response.status_code, 404)
def test_new_worker_url_resolves_new_worker_view(self):
view = resolve('/works/1/new/')
self.assertEquals(view.func, new_worker)
def test_new_worker_view_contains_link_back_to_work_workers_view(self):
new_worker_url = reverse('new_worker', kwargs={'pk': 1})
work_workers_url = reverse('work_workers', kwargs={'pk': 1})
response = self.client.get(new_worker_url)
self.assertContains(response, 'href="{0}"'.format(work_workers_url))
def test_csrf(self):
url = reverse('new_worker', kwargs={'pk': 1})
response = self.client.get(url)
self.assertContains(response, 'csrfmiddlewaretoken')
def test_contains_form(self): # <- new test
url = reverse('new_worker', kwargs={'pk': 1})
response = self.client.get(url)
form = response.context.get('form')
self.assertIsInstance(form, NewWorkerForm)
def test_new_worker_valid_post_data(self):
print("Inside test_new_worker_valid_post_data(self)")
url = reverse('new_worker', kwargs={'pk': 1})
data = {
'short_name': 'Duguna',
'full_Name': 'Duguna Jameendar',
'asg_start_date':'2020-04-29',
## 'asg_end_date':'2020-04-29',
## 'aadhaar_number': '123456789012',
## 'telephone_number':'1234432112',
## 'local_address': 'Local address',
## 'permanent_address': 'Permanent Address',
## 'last_updated': '2020-04-21',#datetime.now(),
## 'dob':'2020-04-21',#datetime.today(),
## 'work':Work.objects.first()
}
response = self.client.post(url, data)
self.assertTrue(Worker.objects.exists())
self.assertTrue(Assignment.objects.exists())
def test_new_worker_invalid_post_data(self):
'''
Invalid post data should not redirect
The expected behavior is to show the form again with validation errors
'''
print("test_new_worker_invalid_post_data(self)")
url = reverse('new_worker', kwargs={'pk': 1})
response = self.client.post(url, {})
form = response.context.get('form')
self.assertEquals(response.status_code, 200)
self.assertTrue(form.errors)
#### def test_new_worker_invalid_post_data(self): # <- updated this one
#### '''
#### Invalid post data should not redirect
#### The expected behavior is to show the form again with validation errors
#### '''
#### url = reverse('new_worker', kwargs={'pk': 1})
#### response = self.client.post(url, {})
#### #form = response.context.get('form')
#### self.assertEquals(response.status_code, 200)
#### #self.assertTrue(form.errors)
##
##
def test_new_worker_invalid_post_data_empty_fields(self):
'''
Invalid post data should not redirect
The expected behavior is to show the form again with validation errors
'''
print("Inside test_new_worker_invalid_post_data_empty_fields(self)")
url = reverse('new_worker', kwargs={'pk': 1})
data = {
'short_name': '',
'asg_start_date':'2020-04-15',
'full_Name': ''
#'aadhaar_number': ' ',
#'telephone_number':'',
#'local_address': '',
#'permanent_address': '',
#'last_updated': '',#datetime.now(),
#'dob':'',#datetime.today(),
#'work':''#Work.objects.first()
}
print(data)
response = self.client.post(url, data)
self.assertEquals(response.status_code, 200)
self.assertFalse(Worker.objects.exists())
self.assertFalse(Assignment.objects.exists())
class LoginRequiredNewWorkerTests(TestCase):
def setUp(self):
Work.objects.create(name='Baker', description='Bakery Worker.')
self.url = reverse('new_worker', kwargs={'pk': 1})
self.response = self.client.get(self.url)
def test_redirection(self):
login_url = reverse('login')
self.assertRedirects(self.response, '{login_url}?next={url}'.format(login_url=login_url, url=self.url))
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
__version__ = '1.0.1'
delete_nvl_linestring_element_query = """
UPDATE public.nvl_linestring AS nls SET deleted = TRUE,
active = FALSE WHERE ($1::BIGINT is NULL OR nls.user_id = $1::BIGINT) AND nls.id = $2::BIGINT RETURNING *;
"""
delete_nvl_linestring_element_by_location_id_query = """
UPDATE public.nvl_linestring AS nls SET deleted = TRUE,
active = FALSE WHERE ($1::BIGINT is NULL OR nls.user_id = $1::BIGINT) AND nls.location_id = $2::BIGINT RETURNING *;
"""
|
# Copyright 2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ------------------------------------------------------------------------------
import asyncio
import logging
from pymongo import MongoClient
from collections import defaultdict
import datetime
from config.config import MongoDBConfig
LATEST_BLOCK_NUM = """
SELECT max(block_num) FROM blocks
"""
LOGGER = logging.getLogger(__name__)
class Database(object):
"""Manages connection to the postgres database and makes async queries
"""
def __init__(self):
self.mongo = None
self.b4e_db = None
self.b4e_actor_collection = None
self.b4e_record_collection = None
self.b4e_record_manager_collection = None
self.b4e_voting_collection = None
self.b4e_vote_collection = None
self.b4e_environment_collection = None
self.b4e_class_collection = None
self.b4e_block_collection = None
def connect(self, host=MongoDBConfig.HOST, port=MongoDBConfig.PORT, user_name=MongoDBConfig.USER_NAME,
password=MongoDBConfig.PASSWORD):
if (user_name != "" and password != ""):
url = f"mongodb://{user_name}:{password}@{host}:{port}"
self.mongo = MongoClient(url)
else:
self.mongo = MongoClient(host=host, port=int(port))
self.create_collections()
def create_collections(self):
self.b4e_db = self.mongo[MongoDBConfig.DATABASE]
self.b4e_actor_collection = self.b4e_db[MongoDBConfig.ACTOR_COLLECTION]
self.b4e_record_collection = self.b4e_db[MongoDBConfig.RECORD_COLLECTION]
self.b4e_record_manager_collection = self.b4e_db[MongoDBConfig.RECORD_MANAGER_COLLECTION]
self.b4e_voting_collection = self.b4e_db[MongoDBConfig.VOTING_COLLECTION]
self.b4e_vote_collection = self.b4e_db[MongoDBConfig.VOTE_COLLECTION]
self.b4e_environment_collection = self.b4e_db[MongoDBConfig.ENVIRONMENT_COLLECTION]
self.b4e_class_collection = self.b4e_db[MongoDBConfig.CLASS_COLLECTION]
self.b4e_block_collection = self.b4e_db[MongoDBConfig.BLOCK_COLLECTION]
def disconnect(self):
self.mongo.close()
def commit(self):
pass
def rollback(self):
pass
async def create_auth_entry(self,
public_key,
encrypted_private_key,
hashed_password):
pass
async def fetch_agent_resource(self, public_key):
pass
async def fetch_all_agent_resources(self):
pass
async def fetch_auth_resource(self, public_key):
pass
async def fetch_record_resource(self, record_id):
pass
async def fetch_all_record_resources(self):
pass
def num_institutions(self):
key = {"role": "INSTITUTION"}
actors = list(self.b4e_actor_collection.find(key))
return len(actors)
def num_active_institutions(self):
key = {"role": "INSTITUTION", "status": "ACTIVE"}
actors = list(self.b4e_actor_collection.find(key))
return len(actors)
def list_active_institutions(self):
key = {"role": "INSTITUTION", "status": "ACTIVE"}
actors = list(self.b4e_actor_collection.find(key))
return actors
"""
bằng cấp của các trường mỗi kỳ
"""
def num_cert_each_season(self):
records_institutions = self.b4e_record_manager_collection.find()
statistic = []
for institution in records_institutions:
certs = institution['CERTIFICATE']
cert_seasons = {}
list_cert_season = []
for cert in certs:
timestamp = cert['timestamp']
date_time = timestamp_to_datetime(timestamp)
season = get_season(date_time)
if not cert_seasons.get(season):
cert_seasons[season] = 1
else:
cert_seasons[season] += 1
for season in cert_seasons:
list_cert_season.append({season: cert_seasons[season]})
statistic.append({institution.get("manager_public_key"): list_cert_season})
return statistic
def num_subject_each_season(self):
records_institutions = self.b4e_record_manager_collection.find()
statistic = []
for institution in records_institutions:
subjects = institution['SUBJECT']
subject_seasons = {}
list_subject_season = []
for subject in subjects:
timestamp = subject['timestamp']
date_time = timestamp_to_datetime(timestamp)
season = get_season(date_time)
if not subject_seasons.get(season):
subject_seasons[season] = 1
else:
subject_seasons[season] += 1
for season in subject_seasons:
list_subject_season.append({season: subject_seasons[season]})
statistic.append({institution.get("manager_public_key"): list_subject_season})
return statistic
def num_point_a_season(self):
key = {"record_type": "CERTIFICATE"}
records = list(self.b4e_record_collection.find(key))
groups = defaultdict(list)
for obj in records:
groups[obj['manager_public_key']].append(obj)
def timestamp_to_datetime(timestamp):
return datetime.datetime.fromtimestamp(timestamp)
def to_time_stamp(date_time):
return datetime.datetime.timestamp(date_time)
def get_season(date_time):
year = date_time.year
month = date_time.month
season = 1
if (month > 6 and month < 12):
season = 2
return str(year) + str(season)
|
import chainer
import cv2
import numpy as np
from chainer import serializers, Variable
from colorize import unet
from colorize.img2imgDataset import ImageAndRefDataset
class Painter:
def __init__(self, gpu=-1):
self.line_image_dir = 'images/line/'
self.ref_image_dir = 'images/ref/'
self.color_image_dir = 'images/color/'
self.gpu = gpu
if self.gpu >= 0:
chainer.cuda.get_device(self.gpu).use()
chainer.cuda.set_max_workspace_size(64 * 1024 * 1024) # 64MB
self.cnn_128 = self.__load_cnn('colorize/unet_128_standard')
self.cnn_512 = self.__load_cnn('colorize/unet_512_standard')
def __load_cnn(self, model):
cnn = unet.UNET()
if self.gpu >= 0:
cnn.to_gpu()
serializers.load_npz(model, cnn)
return cnn
def colorize(self, unique_name):
dataset = ImageAndRefDataset([f'{unique_name}.png'], self.line_image_dir, self.ref_image_dir)
# TODO Extract parameter for optimization
sample = dataset.get_example(0, minimize=True, blur=0, s_size=128)
sample_container = np.zeros((1, 4, sample[0].shape[1], sample[0].shape[2]), dtype='f')
sample_container[0, :] = sample[0]
sample_container = sample_container if self.gpu < 0 else chainer.cuda.to_gpu(sample_container)
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
image_conv2d_layer = self.cnn_128.calc(Variable(sample_container))
del sample_container
input_bat = np.zeros((1, 4, sample[1].shape[1], sample[1].shape[2]), dtype='f')
input_bat[0, 0, :] = sample[1]
output = chainer.cuda.to_cpu(image_conv2d_layer.data[0])
del image_conv2d_layer
for channel in range(3):
input_bat[0, 1 + channel, :] = cv2.resize(
output[channel, :],
(sample[1].shape[2], sample[1].shape[1]),
interpolation=cv2.INTER_CUBIC
)
link = input_bat if self.gpu < 0 else chainer.cuda.to_gpu(input_bat, None)
with chainer.no_backprop_mode():
with chainer.using_config('train', False):
image_conv2d_layer = self.cnn_512.calc(Variable(link))
del link
color_path = self.color_image_dir + f'{unique_name}.jpg'
save_as_img(image_conv2d_layer.data[0], color_path)
del image_conv2d_layer
return color_path
def save_as_img(array, path):
array = array.transpose(1, 2, 0)
array = array.clip(0, 255).astype(np.uint8)
array = chainer.cuda.to_cpu(array)
(major, minor, _) = cv2.__version__.split(".")
code = cv2.COLOR_YUV2RGB if major == '3' else cv2.COLOR_YUV2BGR
cv2.imwrite(path, cv2.cvtColor(array, code))
if __name__ == '__main__':
Painter().colorize('test')
|
import sys
import re
import shlex
import subprocess as sp
exe_pat = re.compile(r'(\s*)\(!>(.*)<\)\s*')
inc_pat = re.compile(r'(\s*)\(>(.*)<\)\s*')
if __name__ == "__main__":
for line in sys.stdin:
match_exe = re.match(exe_pat, line)
match_inc = re.match(inc_pat, line)
if match_exe:
space = match_exe.group(1)
exe = match_exe.group(2).strip()
args = shlex.split(exe)
sys.stdout.writelines(map(lambda x: space+x+"\n", sp.check_output(args).split("\n")))
elif match_inc:
space = match_inc.group(1)
inc = match_inc.group(2).strip()
sys.stdout.writelines(map(lambda x: space+x, open(inc)))
else:
sys.stdout.write(line)
|
from django.views.generic.base import TemplateView
from django.contrib.auth.mixins import LoginRequiredMixin
class IndexView(LoginRequiredMixin, TemplateView):
template_name = 'dashboard/home.html'
|
test_case = int(input())
for _ in range(test_case):
sentence = input()
if sentence.endswith('po'):
print('FILIPINO')
elif sentence.endswith('masu') or sentence.endswith('desu'):
print('JAPANESE')
else:
print('KOREAN') |
from util import *
from get_word_embeddings import *
import random
import tensorflow as tf
import numpy as np
import sys
import pandas as pd
import pandas as pd
from sklearn.metrics import classification_report
from sklearn.metrics import confusion_matrix
from sklearn.metrics import precision_score
from sklearn.metrics import recall_score
import statistics
tf.compat.v1.disable_eager_execution()
# Initialise hyperparameters
r = random.Random()
lim_unigram = 5000
target_size = 4
hidden_size = 100
train_keep_prob = 0.6
l2_alpha = 0.00001
learn_rate = 0.01
clip_ratio = 5
batch_size_train = 500
epochs = 90
def reshape_arrays(trainX, trainY, testX, testY):
trainX = np.array([x for x in trainX])
trainY = np.array([y for y in trainY])
testX = np.array([x for x in testX])
testY = np.array([y for y in testY])
return trainX, trainY, testX, testY
def change_labels_to_numeric(labels):
y = np.array([None] * labels.shape[0])
y[np.where(labels == 'agree')[0]] = 0
y[np.where(labels == 'disagree')[0]] = 1
y[np.where(labels == 'discuss')[0]] = 2
y[np.where(labels == 'unrelated')[0]] = 3
return y
def load_pretrained_vectors(X_train, X_test, y_train, y_test):
trainX, trainY = np.load(X_train), np.load(y_train)
testX, testY = np.load(X_test), np.load(y_test)
trainX, trainY, testX, testY = reshape_arrays(trainX, trainY, testX, testY)
feature_size = len(trainX[0])
n_train = trainX.shape[0]
return trainX, trainY, testX, testY, n_train, feature_size
def generate_word_embeddings(data_source, file_train_bodies, file_train_instances, file_test_bodies, file_test_instances):
gwe = get_word_embeddings()
trainX, trainY = gwe.get_Xy(file_train_bodies, file_train_instances)
trainY = change_labels_to_numeric(trainY)
feature_size = len(trainX[0])
n_train = trainX.shape[0]
testX, testY = gwe.get_Xy(file_test_bodies, file_test_instances)
testY = change_labels_to_numeric(testY)
trainX, trainY, testX, testY = reshape_arrays(trainX, trainY, testX, testY)
np.save(data_source + "/trainX.npy", np.array(trainX))
np.save(data_source + "/trainY.npy", np.array(trainY))
np.save(data_source + "/testX.npy", np.array(testX))
np.save(data_source + "/testY.npy", np.array(testY))
return trainX, trainY, testX, testY, n_train, feature_size
def generate_tf_idf(data_source, file_train_instances, file_train_bodies, file_test_instances, file_test_bodies):
# Load data sets
raw_train = FNCData(file_train_instances, file_train_bodies)
raw_test = FNCData(file_test_instances, file_test_bodies)
n_train = len(raw_train.instances)
# Process data sets
trainX, trainY, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer = \
pipeline_train(raw_train, raw_test, lim_unigram=lim_unigram)
feature_size = len(trainX[0])
testX = pipeline_test(raw_test, bow_vectorizer, tfreq_vectorizer, tfidf_vectorizer)
trainX = np.array(trainX)
trainY = np.array(trainY)
testX = np.array(testX)
testY = np.array(pd.read_csv(file_test_instances)['Stance'])
testY = change_labels_to_numeric(testY)
return trainX, trainY, testX, testY, n_train, feature_size
def get_data_source(data_source_id):
data_source = "processed_word_embedding/"
if(data_source_id == 1):
data_source = "processed_word_embedding/"
elif(data_source_id == 2):
data_source = "summarized_word_embedding/"
elif(data_source_id == 3):
data_source = "word_embedding_with_no_preprocessing/"
return data_source
def get_pretrain_word_vectors_files(data_source):
X_train = data_source + "trainX.npy"
y_train = data_source + "trainY.npy"
X_test = data_source + "testX.npy"
y_test = data_source + "testY.npy"
return X_train, y_train, X_test, y_test
def get_summarized_data():
#Datasets with summarized text for news articles
file_train_instances = "data/train_stances.csv"
file_train_bodies = "data/train_summarized_bodies.csv"
file_test_instances = "data/train_stances.csv"
file_test_bodies = "data/test_summarized_bodies.csv"
return file_train_instances, file_train_bodies, file_test_instances, file_test_bodies
def get_data():
#Datasets
file_train_instances = "data/train_stances.csv"
file_train_bodies = "data/train_bodies.csv"
file_test_instances = "data/test_stances.csv"
file_test_bodies = "data/test_bodies.csv"
return file_train_instances, file_train_bodies, file_test_instances, file_test_bodies
file_predictions = 'data/predictions_test.csv'
#Flags to change configuration to run different approaches
pretrained_flag = 1 #Change to 0 to generate word-embedding instead of using pre-generated
word2vec_flag = 1 #Change to 1 to run with Tf-Idf instead of word-embeddings
data_source_id = 1 #Choose appropriate data_source
data_source = get_data_source(data_source_id)
if(pretrained_flag == 1 and word2vec_flag == 1):
X_train, y_train, X_test, y_test = get_pretrain_word_vectors_files(data_source)
else:
if(data_source_id == 2 and pretrained_flag != 1):
file_train_instances, file_train_bodies, file_test_instances, file_test_bodies = get_summarized_data()
else:
file_train_instances, file_train_bodies, file_test_instances, file_test_bodies = get_data()
if(word2vec_flag != 1):
trainX, trainY, testX, testY, n_train, feature_size = generate_tf_idf(data_source, file_train_instances, file_train_bodies, file_test_instances, file_test_bodies)
elif(pretrained_flag == 1):
trainX, trainY, testX, testY, n_train, feature_size = load_pretrained_vectors(X_train, X_test, y_train, y_test)
else:
trainX, trainY, testX, testY, n_train, feature_size = generate_word_embeddings(data_source, file_train_bodies, file_train_instances, file_test_bodies, file_test_instances)
print("X_train ", trainX.shape)
print("Y_train ", trainY.shape)
print("X_test ", testX.shape)
print("Y_test ", testY.shape)
print("Number of training instances ", n_train)
print("Feature_size ", feature_size)
unique, counts = np.unique(trainY, return_counts=True)
print("Train Dataset class frequencies ", dict(zip(unique, counts)))
unique, counts = np.unique(testY, return_counts=True)
print("Test Dataset class frequencies ", dict(zip(unique, counts)))
# Create placeholders
features_pl = tf.compat.v1.placeholder(tf.float32, [None, feature_size], 'features')
stances_pl = tf.compat.v1.placeholder(tf.int64, [None], 'stances')
keep_prob_pl = tf.compat.v1.placeholder(tf.float32)
# Infer batch size
batch_size = tf.shape(input=features_pl)[0]
# Define multi-layer perceptron
hidden_layer = tf.nn.dropout(tf.nn.relu(tf.contrib.layers.linear(features_pl, hidden_size)), rate=1 - (keep_prob_pl))
logits_flat = tf.nn.dropout(tf.contrib.layers.linear(hidden_layer, target_size), rate=1 - (keep_prob_pl))
logits = tf.reshape(logits_flat, [batch_size, target_size])
# Define L2 loss
tf_vars = tf.compat.v1.trainable_variables()
l2_loss = tf.add_n([tf.nn.l2_loss(v) for v in tf_vars if 'bias' not in v.name]) * l2_alpha
# Define overall loss
loss = tf.reduce_sum(input_tensor=tf.nn.sparse_softmax_cross_entropy_with_logits(labels = stances_pl, logits = logits) + l2_loss)
# Define prediction
softmaxed_logits = tf.nn.softmax(logits)
predict = tf.argmax(input=softmaxed_logits, axis=1)
# Define optimiser
opt_func = tf.compat.v1.train.AdamOptimizer(learn_rate)
grads, _ = tf.clip_by_global_norm(tf.gradients(ys=loss, xs=tf_vars), clip_ratio)
opt_op = opt_func.apply_gradients(zip(grads, tf_vars))
# Perform training
with tf.compat.v1.Session() as sess:
sess.run(tf.compat.v1.global_variables_initializer())
for epoch in range(epochs):
total_loss = 0
indices = list(range(n_train))
r.shuffle(indices)
for i in range(n_train // batch_size_train):
batch_indices = indices[i * batch_size_train: (i + 1) * batch_size_train]
batch_features = [trainX[i] for i in batch_indices]
batch_stances = [trainY[i] for i in batch_indices]
batch_feed_dict = {features_pl: batch_features, stances_pl: batch_stances, keep_prob_pl: train_keep_prob}
_, current_loss = sess.run([opt_op, loss], feed_dict=batch_feed_dict)
total_loss += current_loss
# Predict
test_feed_dict = {features_pl: testX, keep_prob_pl: 1.0}
test_pred = sess.run(predict, feed_dict=test_feed_dict)
'''
print(testY.shape, test_pred.shape)
unique, counts = np.unique(testY, return_counts=True)
print(dict(zip(unique, counts)))
unique, counts = np.unique(test_pred, return_counts=True)
print(dict(zip(unique, counts)))
'''
y_true = [int(y) for y in testY]
y_pred = [int(y) for y in test_pred]
print(classification_report(y_true, y_pred))
print(confusion_matrix(y_true, y_pred))
print("Precision : ", precision_score(y_true, y_pred, average='weighted'))
print("Recall : ", recall_score(y_true, y_pred, average='weighted'))
|
import requests
import json
import sys
import time
import datetime
try:
from utils import get_console_size
except:
# not needed in a testing environment
pass
from Transactions import *
class BitcoinTransaction:
def __init__(self):
self.ts = time.time()
self.symbol = "BITCOIN"
self.type = None
self.details = ""
self.price = 0.0
self.eurBefore = 0.0
self.btcAfter = 0.0
self.eurAfter = 0.0
self.qty = 0.0
self.amount = 0.0
self.balance = 0.0
#not used
self.hid = 0
def getSymbol(self):
return self.symbol
def getType(self):
return self.type
def getTimestamp(self):
return self.ts
def getPrice(self):
return self.price
def setId(self, hid):
self.hid = hid
def getId(self):
return self.hid
def getAmount(self):
return self.amount
def getQuantity(self):
return self.qty
def getDetails(self):
return self.details
def getBalance(self):
return self.balance
def parse(self, line):
raw = line.split(";")
if raw[0] == "Datum":
return False
try:
self.ts = int(time.mktime(datetime.datetime.strptime(raw[0].replace("\"",""), "%Y-%m-%d %H:%M:%S").timetuple()))
if raw[1] == "Kauf":
self.type = "buy"
elif raw[1] == "Verkauf":
self.type = "sell"
elif raw[1] == "Auszahlung":
self.type = "withdraw"
elif raw[1] == "Einzahlung":
self.type = "deposit"
elif raw[1] == "Kurs":
self.type = "rate"
elif raw[1] == "\"Welcome Btc\"":
self.type = "deposit"
raw[2] = "0.1 BTC welcome bonus to bitcoin.de!"
elif raw[1] == "Registrierung":
return False
else:
self.type = raw[1]
self.details = raw[2]
if raw[3]:
self.price = float(raw[3].replace(",", ""))
if self.type == "rate" and self.price < 0.0001:
return False
if raw[4]:
self.eurBefore = float(raw[4].replace(",",""))
if raw[5]:
self.btcAfter = float(raw[5].replace(",",""))
if raw[6]:
self.eurAfter = float(raw[6].replace(",",""))
if raw[7]:
self.amount = float(raw[7].replace(",", ""))
self.qty = float(raw[8].replace(",", ""))
if self.type == "withdraw" or self.type == "sell":
self.qty *= -1
if self.type == "deposit" or self.type == "withdraw":
self.amount = self.qty
self.balance = float(raw[9].replace(",", ""))
return True
except Exception, e:
print "failed to parse {:s}, error: {:s}".format(raw, str(e))
return False
def getHeader(self):
return "Datum;Typ;Referenz;\"Kurs (EUR/BTC)\";\"BTC vor Gebuehr\";\"EUR vor Gebuehr\";\"BTC nach Gebuehr\";\"EUR nach Gebuehr\";\"Zu- / Abgang\";Kontostand"
def __eq__(self, other):
sameTs = (int(self.ts) == int(other.getTimestamp()))
sameId = (self.symbol == other.getSymbol())
return (sameTs and sameId)
def __str__(self):
qty = self.qty
amount = self.amount
if self.type == "sell" or self.type == "withdraw":
qty *= -1
t = "Kurs"
if self.type == "sell":
t = "Verkauf"
elif self.type == "buy":
t = "Kauf"
elif self.type == "deposit":
t = "Einzahlung"
elif self.type == "withdraw":
t = "Auszahlung"
return "\"%s\";%s;%s;%f;%f;%f;%f;%f;%f;%f" % \
(datetime.datetime.fromtimestamp(self.ts).strftime('%Y-%m-%d %H:%M:%S'), \
t,\
self.details,\
self.price,\
self.eurBefore,\
self.btcAfter,\
self.eurAfter,\
amount,\
qty,\
self.balance)
class Bitcoin:
def __init__(self, conf):
self.conf = conf
self.apiKey = self.conf.btc_de_api_key
self.btc2eur = 0.0
self.eur2btc = 0.0
self.transactions = Transactions()
def fetchData(self):
try:
r = requests.get("https://bitcoinapi.de/v1/%s/rate.json" % self.apiKey)
except requests.exceptions.ConnectionError:
print "failed to resolve bitcoinapi.de"
return None
try:
j = json.loads(r.text)
self.btc2eur = float(j["rate_weighted"])
self.eur2btc = 1/self.btc2eur
except Exception, e:
print "failed to fetch bitcoin price"
print str(e)
return None
return self.btc2eur
def loadTransactionFile(self, filename, start=-1):
f = open(filename, "r")
content = f.read()
f.close()
transactions = []
for line in content.split("\n"):
if not line:
continue
b = BitcoinTransaction()
current = int(time.time())
if b.parse(line):
if start < 0 or (current - b.getTimestamp())/(60*60*24)<start:
transactions.append(b)
self.transactions.addTransactions(transactions)
self.transactions.sortTransactions()
def mergeTransactionFile(self, filename):
# load file
f = open(filename, "r")
content = f.read()
f.close()
cnt = 0
for line in content.split("\n"):
if not line:
continue
b = BitcoinTransaction()
if b.parse(line):
if self.insertTransaction(b):
print "added transaction {:s}".format(str(b))
cnt +=1
self.transactions.sortTransactions()
print "merged {:d} transactions".format(cnt)
def insertTransaction(self, transaction):
if not transaction in self.transactions:
self.transactions.addTransactions([transaction])
return True
return False
def store(self, filename):
content = "{:s}\n".format(BitcoinTransaction().getHeader())
for t in self.transactions.transactions:
content += "{:s}\n".format(t)
f = open(filename, "w")
f.write(content)
f.close()
def getBalance(self):
buy = self.transactions.getBuyQuantity()
sel = self.transactions.getSellQuantity()
wit = self.transactions.getWithdrawAmount()
dep = self.transactions.getDepositAmount()
return (dep+buy-wit-sel)
def setEndDate(self, timestamp):
self.transactions.setEndDate(timestamp)
def getInvest(self):
buy = self.transactions.getBuyAmount()
sel = self.transactions.getSellAmount()
return sel - buy
def exchange(self, btc):
return btc * self.btc2eur
def getBuyRate(self):
amount = self.transactions.getBuyAmount()
buy = self.transactions.getBuyQuantity()
if buy == 0:
return 0.0
return amount / buy
def getTrend(self):
rate = self.getBuyRate()
if self.transactions.getBuyQuantity() == 0:
return 0.0
return ((self.btc2eur / rate) - 1.0) * 100
def printBitcoin(self):
console_width = get_console_size()["width"]
print "Your Portfolio:"
print "-" * console_width
fmts = [".2f", "s", "s", ".3f", ".5f", ".5f", ".3f"]
header = ["Trend (%)", "Buys (Price)", "", "Market (B)",
"Divs (B)", "Mean (B)", "Win (B)"]
fmts2 = [".2f", "s", "d", ".3f", ".5f", ".5f", ".2f"]
header2 = ["Overall (%)", "Sells (Price)", "Sum", "Book (B)",
"Fee (B)", "Cur (B)", "Win (E)"]
colwidth = (console_width / len(header)) - 3
fill = " | "
print fill.join("{:>{}s}".format(h, colwidth) \
for f, h in zip(fmts, header))
print fill.join("{:>{}s}".format(h, colwidth) \
for f, h in zip(fmts, header2))
fmts = [".2f", ".2f", "s", ".2f", "s", ".5f", "s"]
header = ["Trend (%)", "Buys", "", "Market (B)",
"Divs (B)", "Mean (B)", "Win (B)"]
#fmts2 = [".2f", "d", "d", "s", ".5f", ".5f", ".2f"]
fmts2 = ["s", ".2f", ".2f", ".2f", "s", ".5f", ".2f"]
header2 = ["Overall (%)", "Sells", "Sum", "Book (B)",
"Fee (B)", "Cur (B)", "Win (E)"]
colwidth = (console_width / len(header)) - 3
fill = " | "
print "-" * console_width
_s = "{1:-^{0}}".format(console_width, "> Bitcoin <")
print _s[console_width/5:] + _s[:console_width/5]
data = [ self.getTrend(), self.transactions.getBuyQuantity(),
"", self.exchange(self.getBalance()), "",
self.getBuyRate(), "" ]
print fill.join("{0:>{1}{2}}".format(d, colwidth, f) \
for f, d in zip(fmts, data))
data2 = [ "", self.transactions.getSellQuantity(), self.getBalance(),
self.transactions.getBuyAmount() - self.transactions.getSellAmount(),
"", self.btc2eur,
self.transactions.getSellAmount() + self.exchange(self.getBalance()) - self.transactions.getBuyAmount() ]
print fill.join("{0:>{1}{2}}".format(d, colwidth, f) \
for f, d in zip(fmts2, data2))
print "-" * console_width
def printDetails(self, full=True):
print "Bitcoin Account Details:"
print "------------------------------"
buyAmount = self.transactions.getBuyAmount()
selAmount = self.transactions.getSellAmount()
if full:
buy = self.transactions.getBuyQuantity()
print "total buys:\t\t%d BTC for %0.2f EUR (rate: %0.4f EUR)" % (buy, buyAmount, self.getBuyRate())
sel = self.transactions.getSellQuantity()
if sel == 0:
mean = 0.0
else:
mean = selAmount / sel
print "total sells:\t\t%d BTC for %0.2f EUR (rate: %0.4f EUR)" % (sel, selAmount, mean)
wit = self.transactions.getWithdrawAmount()
print "total withdraw:\t\t%f BTC" % wit
dep = self.transactions.getDepositAmount()
print "total deposit:\t\t%f BTC" % dep
print "------------------------------"
print "current rate: %f EUR (Trend: %0.2f%%)" % (self.btc2eur, self.getTrend())
val = self.getBalance()
print "current balance: %f BTC (%0.2f EUR)" % (val, self.exchange(val))
value = self.exchange(val) + selAmount - buyAmount
print "in sum your profit is:\t%f EUR" % value
def plain(self):
print "[Bitcoin.de]\nrate:{:0.2f},trend:{:0.2f},balance:{:0.6f}".format(self.btc2eur, self.getTrend(), self.getBalance())
if __name__ == "__main__":
# ugly, but only for testing puporse
import sys, os
sys.path.append(os.path.dirname("../"))
from config import Config
b = Bitcoin(Config)
#b.fetchData()
b.loadTransactionFile(sys.argv[1])
b.printDetails()
if len(sys.argv) == 3:
mf = sys.argv[2]
print "merge csv file: {:s}".format(mf)
b.mergeTransactionFile(mf)
b.printDetails()
b.store(sys.argv[1])
|
import glob
import imageio
import matplotlib.pyplot as plt
import numpy as np
import scipy.ndimage as nd
#from skimage import color
import math
#read the files
#path = '.\positives\ '
p_path = 'path\\positives\\'
n_path = 'path\\negatives\\'
def read_files(dir, extension):
files = [f for f in glob.glob(dir + "**/*." + extension, recursive=True)]
return files
def im_to_feature(im_path):
files = read_files(im_path,"png")
b_avg =[]
g_avg = []
r_avg =[]
feat =[]
r_varance = []
g_varance = []
b_varance = []
sobel =[]
for f in files:
im = plt.imread(f)
r_avg.append(np.max(im[:,:,0]))
g_avg.append(np.max(im[:,:,1]))
b_avg.append(np.max(im[:,:,2]))
r_varance.append(np.var(im[:,:,0]))
g_varance.append(np.var(im[:,:,1]))
b_varance.append(np.var(im[:,:,2]))
feat = np.array((r_avg, g_avg, b_avg,r_varance,g_varance,b_varance))
return np.transpose(feat)
#convert to color
def rgb2gray(rgb):
return np.dot(rgb[...,:3], [0.299, 0.587, 0.144])
def multiVarGaus(mu, sigma, x):
size = len(x)
if size == len(mu) and (size, size) == sigma.shape:
det = np.linalg.det(sigma)
if det == 0:
raise NameError("The covariance matrix can't be singular")
norm_const = 1.0/ ( math.pow((2*np.pi),float(size)/2) * math.pow(det,1.0/2) )
x_mu = np.matrix(x - mu)
inv = sigma.I
result = math.pow(math.e, -0.5 * (x_mu * inv * x_mu.T))
return norm_const * result
else:
raise NameError("The dimensions of the input don't match")
#GDA function
def gda(p_features, n_features, x): #pass in the target image
#target = np.sum(n_image)/n_image.count() #ratio of examples =0.5
mu0 = np.mean(n_features,axis=0) #negative images
mu1 = np.mean(p_features,axis=0) #positive images
p_sigma = np.matrix(np.cov(np.transpose(p_features)))
n_sigma = np.matrix(np.cov(np.transpose(n_features)))
result0 = multiVarGaus(mu0,n_sigma,x)
result1 = multiVarGaus(mu1, p_sigma, x)
return (result1-result0)
#definition for the positives
p_features = im_to_feature(p_path)
#definition for the Negatives
n_features = im_to_feature(n_path)
#compute sobel filter
p=0
n=0
for i in range(30):
result = gda(p_features,n_features,p_features[i])
if result >0:
p+=1
result = gda(p_features, n_features, n_features[i])
if result < 0:
n += 1
print("Positives ",format(p))
print('Negatives ',format(n))
print('percentage correct',format((n+p)/60))
#plt.imshow(mag.astype('double'), cmap='gray')
#plt.show()
#GDA
|
import matplotlib.pyplot as plt
import numpy as np
import uncertainties.unumpy as unp
from scipy.optimize import curve_fit
from scipy import stats
import scipy.constants as con
from uncertainties import ufloat
# t, p_a, p_b, t_1, t_2, n = np.genfromtxt('plots/test.txt', unpack=True)
t, t_1, p_a, t_2, p_b, N = np.genfromtxt('data/Messwerte.txt', unpack=True)
t *= 60
p_a += 1
p_b += 1
t_1 += 273.15
t_2 += 273.15
dm = unp.uarray([-1.27, 1.2983, 1.1446, 0.6094], [0.04, 0.0434, 0.053, 0.106])
# t = np.array([120, 480, 600, 1020])
p_a *= 10**5
p_b *= 10**5
kappa = 1.14
rho0 = 5.51 # g/l = kg/m^3
T0 = 273.15 # K
p0 = 1 * 10**5 # Pa
rho1 = p_a[6] * T0 * rho0 / (t_2[6] * p0)
print(rho1)
N = 1/(kappa - 1) * (p_b[6] * (p_a[6] / p_b[6])**(1/kappa) - p_a[6]) * ((t_2[6] * p0) / (p_a[6] * T0 * rho0)) * dm[1] *10**(-3)
print(N)
pa = np.array([p_a[6], p_a[12], p_a[18], p_a[24]]) # Pa
pb = np.array([p_b[6], p_b[12], p_b[18], p_b[24]]) # Pa
dmt = unp.uarray([-1.27, -1.07, -0.87, -0.67], [0.04, 0.04, 0.04, 0.04])*10**(-3) #kg/s
T2 = unp.uarray([289.0, 283.3, 278.5, 274.8], [0.5, 0.5, 0.5, 0.5]) # K
N2 = 1/(kappa - 1) * (pb * (pa / pb)**(1/kappa) - pa) * ((T2 * p0) / (pa * T0 * rho0)) * dmt
print(N2)
Nb = unp.uarray([126, 126, 117, 116], [5, 5, 5, 5])
DeltaN = (N2 - Nb) / Nb
print('DeltaN: ', DeltaN)
|
n=int(input("enter any number:"))
factorial=1
if n<0:
print("number can not be negative")
elif n==0:
print("the factorial of 0 is 1")
else:
for i in range(1,n+1):
factorial*=i
print("the factorial of" , n ,"is" ,factorial, "." )
|
from datetime import *
class BaseModel:
# fields
__date = None
__open = 0
__high = 0
__low = 0
__close = 0
__diff = 0
__diff_per = 0
def __init__(self, raw):
parts = raw.split('\t')
if len(parts) < 7:
print(raw, " error format base")
return
date_parts = list(map(lambda s: int(s), parts[0].split('-')))
if not len(date_parts) == 3:
print(parts[0], " error format")
return
self.__date = date(date_parts[0], date_parts[1], date_parts[2])
self.__open = float(parts[1])
self.__high = float(parts[2])
self.__low = float(parts[3])
self.__close = float(parts[4])
self.__diff = float(parts[5])
origin_per = parts[6]
per_index = origin_per.find('%')
if not per_index == -1:
self.__diff_per = float(origin_per[:per_index])
else:
self.__diff_per = float(origin_per)
def __repr__(self):
return '\t'.join([str(self.__date), str(self.__open), str(self.__high),
str(self.__low), str(self.__close), str(self.__diff), str(self.__diff_per)])
def get_date(self):
return self.__date
def get_diff(self):
return self.__diff
def get_close(self):
return self.__close
def get_high(self):
return self.__high
def get_low(self):
return self.__low
if __name__ == '__main__':
print("AG", BaseModel(
"2017-2-9 4078 4107 4075 4082 14 0.34% 3,921,628.00 4089 16,035,536,896.00"))
print("USD", BaseModel("2017-2-9 100.24 100.67 100.08 100.65 0.39 0.39% 335700"))
|
import cv2
import numpy as np
class ReferenceLine:
def __init__(self, triangles):
points = np.reshape(map(lambda t: t.centroid, triangles), (-1, 2))
points = np.hstack([points, np.ones((points.shape[0], 1))])
_, _, vt = np.linalg.svd(points)
self.vector_form = vt[-1:, :].reshape(3, -1)
def draw(self, image, color=(0, 255, 0), thickness=1):
vector = self.vector_form.ravel()
left_y = int(-vector[2] / vector[1])
right_y = int((-vector[2] - (image.shape[1] - 1) * vector[0]) / vector[1])
cv2.line(image, (0, left_y), (image.shape[1] - 1, right_y), color, thickness, cv2.LINE_AA)
|
def hello(msg):
return "Hiya"
|
import array
import socket
import sys
def checksum(data):
if len(data) % 2:
data += b'\x00'
check_sum = 0
for i in range(0, len(data))[::2]:
check_sum += int.from_bytes(data[i:i + 2], byteorder=sys.byteorder, signed=False)
while (check_sum >> 16) > 0:
check_sum = (check_sum & 0xffff) + (check_sum >> 16)
return ~check_sum & 0xffff
|
#! /usr/bin/env python
# Copyright (c) 2018 - 2019 Jolle Jolles <j.w.jolles@gmail.com>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at:
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import cv2
import numpy as np
import seaborn as sns
from pythutils.mathutils import sort_twoPoint
def textdims(text, size, thickness = 1):
"""Get uniform text dimensions for printing text with opencv"""
tw, th = cv2.getTextSize(text, cv2.FONT_HERSHEY_SIMPLEX, size, thickness)[0]
topy = size*1 if any(x in ["i","j"] for x in text) else 0
boty = size*2 if any(x in "Q" for x in text) else 0
boty = size*7 if any(x in ["g","j","y","p","q"] for x in text) else boty
return (tw, th), topy, boty
class mouse_events:
"""Stores a series of coordinates related to mouse events"""
def __init__(self):
self.pos = (0,0)
self.pts = []
self.posDown = None
self.posUp = None
self.twoPoint = None
self.drawing = False
def draw(self, event, x, y, flags, param):
if event == cv2.EVENT_MOUSEMOVE:
self.pos = (x,y)
if event == cv2.EVENT_LBUTTONDOWN:
self.posDown = (x,y)
self.pts.append((x,y))
self.posUp = None
if event == cv2.EVENT_LBUTTONUP:
self.posUp = (x,y)
self.twoPoint = sort_twoPoint((self.pts[-1], self.posUp))
self.posDown = None
self.drawing = True
def namedcols(colname = None, printlist = False, BRG = True):
"""Acquire RGB/BRG colors from a color name"""
collist = {'black': (0, 0, 0),
'navy': (0, 0, 128),
'navyblue': (0, 0, 128),
'darkblue': (0, 0, 139),
'mediumblue': (0, 0, 205),
'blue': (0, 0, 255),
'darkgreen': (0, 100, 0),
'green': (0, 128, 0),
'darkcyan': (0, 139, 139),
'deepskyblue': (0, 191, 255),
'darkturquoise': (0, 206, 209),
'mediumspringgreen': (0, 250, 154),
'lime': (0, 255, 0),
'springgreen': (0, 255, 127),
'cyan': (0, 255, 255),
'aqua': (0, 255, 255),
'midnightblue': (25, 25, 112),
'dodgerblue': (30, 144, 255),
'lightseagreen': (32, 178, 170),
'forestgreen': (34, 139, 34),
'seagreen': (46, 139, 87),
'darkslategray': (47, 79, 79),
'darkslategrey': (47, 79, 79),
'limegreen': (50, 205, 50),
'mediumseagreen': (60, 179, 113),
'turquoise': (64, 224, 208),
'royalblue': (65, 105, 225),
'steelblue': (70, 130, 180),
'darkslateblue': (72, 61, 139),
'mediumturquoise': (72, 209, 204),
'indigo': (75, 0, 130),
'darkolivegreen': (85, 107, 47),
'cadetblue': (95, 158, 160),
'cornflowerblue': (100, 149, 237),
'mediumaquamarine': (102, 205, 170),
'dimgray': (105, 105, 105),
'dimgrey': (105, 105, 105),
'slateblue': (106, 90, 205),
'olivedrab': (107, 142, 35),
'slategray': (112, 128, 144),
'slategrey': (112, 128, 144),
'lightslategray': (119, 136, 153),
'lightslategrey': (119, 136, 153),
'mediumslateblue': (123, 104, 238),
'lawngreen': (124, 252, 0),
'chartreuse': (127, 255, 0),
'aquamarine': (127, 255, 212),
'maroon': (128, 0, 0),
'purple': (128, 0, 128),
'olive': (128, 128, 0),
'gray': (128, 128, 128),
'grey': (128, 128, 128),
'lightslateblue': (132, 112, 255),
'skyblue': (135, 206, 235),
'lightskyblue': (135, 206, 250),
'blueviolet': (138, 43, 226),
'darkred': (139, 0, 0),
'darkmagenta': (139, 0, 139),
'saddlebrown': (139, 69, 19),
'darkseagreen': (143, 188, 143),
'lightgreen': (144, 238, 144),
'mediumpurple': (147, 112, 219),
'darkviolet': (148, 0, 211),
'palegreen': (152, 251, 152),
'darkorchid': (153, 50, 204),
'yellowgreen': (154, 205, 50),
'sienna': (160, 82, 45),
'brown': (165, 42, 42),
'darkgray': (169, 169, 169),
'darkgrey': (169, 169, 169),
'lightblue': (173, 216, 230),
'greenyellow': (173, 255, 47),
'paleturquoise': (175, 238, 238),
'lightsteelblue': (176, 196, 222),
'powderblue': (176, 224, 230),
'firebrick': (178, 34, 34),
'darkgoldenrod': (184, 134, 11),
'mediumorchid': (186, 85, 211),
'rosybrown': (188, 143, 143),
'darkkhaki': (189, 183, 107),
'silver': (192, 192, 192),
'mediumvioletred': (199, 21, 133),
'indianred': (205, 92, 92),
'peru': (205, 133, 63),
'violetred': (208, 32, 144),
'chocolate': (210, 105, 30),
'tan': (210, 180, 140),
'lightgray': (211, 211, 211),
'lightgrey': (211, 211, 211),
'thistle': (216, 191, 216),
'orchid': (218, 112, 214),
'goldenrod': (218, 165, 32),
'palevioletred': (219, 112, 147),
'crimson': (220, 20, 60),
'gainsboro': (220, 220, 220),
'plum': (221, 160, 221),
'burlywood': (222, 184, 135),
'lightcyan': (224, 255, 255),
'lavender': (230, 230, 250),
'darksalmon': (233, 150, 122),
'violet': (238, 130, 238),
'lightgoldenrod': (238, 221, 130),
'palegoldenrod': (238, 232, 170),
'lightcoral': (240, 128, 128),
'khaki': (240, 230, 140),
'aliceblue': (240, 248, 255),
'honeydew': (240, 255, 240),
'azure': (240, 255, 255),
'sandybrown': (244, 164, 96),
'wheat': (245, 222, 179),
'beige': (245, 245, 220),
'whitesmoke': (245, 245, 245),
'mintcream': (245, 255, 250),
'ghostwhite': (248, 248, 255),
'salmon': (250, 128, 114),
'antiquewhite': (250, 235, 215),
'linen': (250, 240, 230),
'lightgoldenrodyellow': (250, 250, 210),
'oldlace': (253, 245, 230),
'red': (255, 0, 0),
'magenta': (255, 0, 255),
'fuchsia': (255, 0, 255),
'deeppink': (255, 20, 147),
'orangered': (255, 69, 0),
'tomato': (255, 99, 71),
'hotpink': (255, 105, 180),
'coral': (255, 127, 80),
'darkorange': (255, 140, 0),
'lightsalmon': (255, 160, 122),
'orange': (255, 165, 0),
'lightpink': (255, 182, 193),
'pink': (255, 192, 203),
'gold': (255, 215, 0),
'peachpuff': (255, 218, 185),
'navajowhite': (255, 222, 173),
'moccasin': (255, 228, 181),
'bisque': (255, 228, 196),
'mistyrose': (255, 228, 225),
'blanchedalmond': (255, 235, 205),
'papayawhip': (255, 239, 213),
'lavenderblush': (255, 240, 245),
'seashell': (255, 245, 238),
'cornsilk': (255, 248, 220),
'lemonchiffon': (255, 250, 205),
'floralwhite': (255, 250, 240),
'snow': (255, 250, 250),
'yellow': (255, 255, 0),
'lightyellow': (255, 255, 224),
'ivory': (255, 255, 240),
'white': (255, 255, 255)}
if printlist:
print(collist)
elif type(colname) == tuple and len(colname) == 3:
return colname
elif colname not in collist:
print("colname does not exist..")
else:
col = collist[colname]
col = (col[2],col[1],col[0]) if BRG else col
return col
def uniqcols(colnr, colorspace="husl", rgb=True):
"""Gets a specified nr of colors, equally spaced on the color spectrum"""
cols = sns.color_palette(colorspace, colnr)
if rgb:
cols = [tuple([(int(i*255)) for i in col])[::-1] for col in cols]
return cols
def draw_text(img, text, loc = (0, 0), size = 1, col = "black", margin = 5,
thickness = 1, bgcol = None, shadow = False):
"""
Draw text on opencv image
img : an image array
text : the text to draw
loc : the location of the text relative to topleft
size : the size of the text
col : the color of the text
margin : the margin of the text
thickness : the weight of the text
bgcol : the potential background color of the text
"""
col = namedcols(col)
(tw, th), topy, boty = textdims(text, size)
topy = topy + int(thickness/2)
boty += int(thickness/2)
if bgcol is not None:
bgcol = namedcols(bgcol)
topleftout = (loc[0], loc[1])
botrightx = loc[0] + margin + tw + margin + 1
botrighty = loc[1] + margin + th + topy + boty + margin + 1
botrightout = (botrightx, botrighty)
cv2.rectangle(img, topleftout, botrightout, bgcol, -1)
botlefin = (int(loc[0]+margin), int(loc[1]+margin+th+topy))
if shadow:
cv2.putText(img, text, botlefin, cv2.FONT_HERSHEY_SIMPLEX, size,
(0,0,0), int(thickness*3), cv2.LINE_AA)
cv2.putText(img, text, botlefin, cv2.FONT_HERSHEY_SIMPLEX, size,
col, thickness, cv2.LINE_AA)
def draw_cross(img, pt1 = (1,1), pt2 = None, col = "white", thickness = 2):
"""Draws a cross"""
pt2 = (img.shape[1],img.shape[0]) if pt2 == None else pt2
cv2.line(img, pt1, (pt2[0], pt2[1]), namedcols(col), thickness)
cv2.line(img, (pt2[0], pt1[0]), (pt1[1], pt2[1]), namedcols(col), thickness)
def draw_hcross(img, col="white", thickness=2, style="dashed"):
"""Draws a horizontal cross"""
midTop = (int(img.shape[1]/2),1)
midBot = (int(img.shape[1]/2),img.shape[0])
midLeft = (1,int(img.shape[0]/2))
midRight = (img.shape[1],int(img.shape[0]/2))
draw_sliced_line(img, midTop, midBot, col, thickness, style)
draw_sliced_line(img, midLeft, midRight, col, thickness, style)
def draw_crosshair(img, pt, radius = 5, col = "white"):
"""Draws a crosshair"""
hline = (pt[0] - radius, pt[1]), (pt[0] + radius, pt[1])
tline = (pt[0], pt[1] - radius), (pt[0], pt[1] + radius)
cv2.line(img, hline[0], hline[1], namedcols(col), 1)
cv2.line(img, tline[0], tline[1], namedcols(col), 1)
def draw_rectangle(img, pointer, rect, drawing = False, col = "red"):
"""Draws a rectangle with option to show it dynamically"""
if drawing:
cv2.rectangle(img, rect[0], pointer, namedcols(col), 2)
else:
cv2.rectangle(img, rect[0], rect[1], namedcols(col), 2)
def draw_bicircles(img, pt, resizeval = 1, col1 = "black", col2 = "white",
minsize = 3, maxsize = 15, stepsize = 3):
"""Draws a range of increasing bi-colored circles"""
sizes = list(reversed(range(minsize, maxsize+stepsize, stepsize)))
for i, size in enumerate(sizes):
col = namedcols(col1) if i%2==0 else namedcols(col2)
cv2.circle(img, pt, 0, col, int(size*resizeval))
def draw_sliced_line(img, pt1, pt2, color, thickness = 1, style = "dotted",
gap = 5):
"""Draw a dashed or dotted line on an image"""
col = namedcols(color)
dist =((pt1[0]-pt2[0])**2+(pt1[1]-pt2[1])**2)**.5
pts= []
for i in np.arange(0, dist, gap):
r = i/dist
x = int((pt1[0]*(1-r)+pt2[0]*r)+.5)
y = int((pt1[1]*(1-r)+pt2[1]*r)+.5)
pts.append((x,y))
if style == "dotted":
for pt in pts:
cv2.circle(img, pt, thickness, col, -1)
if style == "dashed":
s = e = pts[0]
for i,pt in enumerate(pts):
s = e
e = pt
if i%2 == 1:
cv2.line(img, s, e, col, thickness)
def draw_traj(img, coordlist = [], color = "green", thick_min = 8,
thick_max = 13, opacity = 0.5):
"""Draws a semi-transparent polyline with decreasing width on an image"""
col = namedcols(color)
mask = img.copy()
thicklist = np.linspace(thick_min, thick_max, len(coordlist))
thicklist = (thicklist**4 / thick_max**4) * thick_max + 1
thicklist = [max(i,1) for i in thicklist]
for i in list(range(1, (len(coordlist) - 1))):
thickness = int(thicklist[i])
if coordlist[i] != coordlist[i] or coordlist[i-1] != coordlist[i-1]:
continue
elif None in sum((coordlist[i], coordlist[i-1]),()):
continue
cv2.line(mask, coordlist[i], coordlist[i-1], col, thickness)
cv2.addWeighted(mask, opacity, img, 1-opacity, 0, img)
|
'''
Given a non-empty array of integers, return the k most frequent elements.
Example 1:
Input: nums = [1,1,1,2,2,3], k = 2
Output: [1,2]
Example 2:
Input: nums = [1], k = 1
Output: [1]
Note:
You may assume k is always valid, 1 <= k <= number of unique elements.
Your algorithm's time complexity must be better than O(n log n), where n is the array's size.
'''
class Solution:
def topKFrequent(self, nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
dict = {}
frq = {}
res = []
for i in range(len(nums)):
if nums[i] in dict:
dict[nums[i]] += 1
else:
dict[nums[i]] = 1
# then reverse it, use frequency as the key
for z, v in dict.items():
if v in frq:
frq[v].append(z)
else:
frq[v] = [z]
flag1 = len(nums) # representing frq
flag2 = k # for counting the k most
while flag2 > 0:
if flag1 in frq:
for j in frq[flag1]:
res.append(j)
flag2 -= 1 # same level doesn't count as 1
flag1 -= 1
return res
test = Solution()
nums = [1,2]
k = 2
print(test.topKFrequent(nums, k))
|
from urllib.request import urlopen
from urllib.error import HTTPError, URLError
from bs4 import BeautifulSoup
def get_title(url):
"""
Getting the title of a webpage
:param url:
:return:
"""
try:
html = urlopen(url)
print(html.read())
except HTTPError as e:
return None
try:
obj = BeautifulSoup(html.read(), "lxml")
title = obj.body.h1
except AttributeError as e:
return None
return title
def main():
url = "https://maina-samuel.azurewebsites.net/"
title = get_title(url)
if title is None:
print("No title was found")
else:
print(title)
if __name__ == "__main__":
main()
|
from abc import ABC, abstractmethod
class Command(ABC):
"""抽象命令基类
Usage(demo):
main = SomeCommand()
main.config = config
main.run()
Design:
构造 PITL,配置 property,控制
Attributes:
config: [description]
"""
def set_config(self, config):
self.__dict__.update(config.__dict__)
config = property(fset=set_config)
@abstractmethod
def run(self):
pass
|
import numpy as np
import cv2
import time
import boto3
import os
#import the cascade for face detection
face_cascade = cv2.CascadeClassifier('haarcascade_frontalface_default.xml')
max = 3
filenum = max
i = 1
x = 1
def TakeSnapshotAndSave():
# access the webcam (every webcam has a number, the default is 0)
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
num = 0
while num<2:
# Capture frame-by-frame
ret, frame = cap.read()
# to detect faces in video
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_cascade.detectMultiScale(gray, 1.3, 5)
for (x,y,w,h) in faces:
cv2.rectangle(frame,(x,y),(x+w,y+h),(255,0,0),2)
roi_gray = gray[y:y+h, x:x+w]
roi_color = frame[y:y+h, x:x+w]
x = 0
y = 20
text_color = (0,255,0)
cv2.imwrite('opencv'+str(num)+'.jpeg',frame)
num = num+1
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
CheckSimilarity()
def CheckSimilarity():
global i
b = max
while i<max and i!=0:
sourceFile='Customer'+str(i)+'.jpeg'
image = sourceFile
targetFile='opencv1.jpeg'
client=boto3.client('rekognition')
imageSource=open(sourceFile,'rb')
imageTarget=open(targetFile,'rb')
response=client.compare_faces(SimilarityThreshold=70,
SourceImage={'Bytes': imageSource.read()},
TargetImage={'Bytes': imageTarget.read()})
for faceMatch in response['FaceMatches']:
position = faceMatch['Face']['BoundingBox']
similarity = str(faceMatch['Similarity'])
#print ('Comparing with ' + image)
#print('-->The face at ' +
#str(position['Left']) + ' ' +
#str(position['Top']) +
#' matches with ' + similarity + '% confidence')
global x
x = i
i = 0
break
else:
#print ('Comparing with ' + image)
#print('-->These faces do not match')
i = i+1
imageSource.close()
imageTarget.close()
if i == 0:
openfile()
else:
print('Sorry, we cant find an account, would you like to create a new one?')
options()
def openfile():
os.startfile("Customer"+str(x)+".txt")
print('We found your account!')
def options():
op = input("y/n> ")
print ("You selected ", op)
if op == 'y':
newfile()
elif op == 'n':
print('okay, bye bye!')
else:
print('Sorry, please pick y or n')
options()
def newfile():
filenum = max
print('we are making you a new account')
f = open("Customer" +str(filenum)+ ".txt", "w+")
print('Please type your name')
name = input(">")
f.write("name: %s" %name)
f.close()
savephoto()
def addone():
global max
max = max + 1
def savephoto():
global x
print('Smile! we are going to take a photo of you')
input("Press Enter when you are ready")
cap = cv2.VideoCapture(0, cv2.CAP_DSHOW)
ret, frame = cap.read()
cv2.imwrite('Customer'+str(i)+'.jpeg',frame)
print('Thanks! account has been made')
cap.release()
cv2.destroyAllWindows()
print('')
addone()
Startup()
def Startup():
print('Welcome to Pi Cafe!')
input("Press Enter to continue...")
TakeSnapshotAndSave()
if __name__ == "__main__":
Startup()
|
import requests
token = 'aKcaFuAVEJre7L9UkxAG'
# Consultar projetos
# projetos = requests.get(f'http://localhost:81/api/v4/projects?private_token={token}')
# print(projetos.json())
# projeto = {
# 'name': 'batata-frita'
# }
# Adicionar projetos
# projetos = requests.post(f'http://localhost:81/api/v4/projects?private_token={token}',projeto)
# print(projetos.json())
# Consultar usuarios
# projetos = requests.get(f'http://localhost:81/api/v4/users?private_token={token}')
# print(projetos.json())
# Adicionar usuarios
usuario = {
'email':'pedro.cardoso@gmail.com',
'username':'pedro.cardoso',
'name':'Pedro Cardoso',
'password':'4linux'
}
projetos = requests.post(f'http://localhost:81/api/v4/users?private_token={token}',usuario)
print(projetos.json())
# Adicionar membros ao projeto
project_id = 1
pessoa = {
'user_id':12,
'access_level':40
}
projetos = requests.get(f'http://localhost:81/api/v4/projects/{project_id}?private_token={token}',pessoa)
print(projetos.json()) |
LOGIN_REQUIRED = 'Login required.'
USER_CREATED = 'User created successfully.'
LOGOUT = 'Log out successfully.'
ERROR_USER_PASSWORD = 'Incorrect user or password.'
LOGIN = 'Successfully authenticated user.'
TASK_CREATED = 'Article created successfully.'
TASK_UPDATE = 'Article Updated Successfully.'
TASK_DELETE = 'Item Removed Successfully.'
#MAIL_PASSWORD = '3217576343a'
|
import turtle
ans = True
turtle.write("""
1. INSTRUCTIONS
2. PLAY!!
PRESS ENTER TO QUIT
""",True, align="center", ("Times", 24, "bold italic"))
while ans:
print("""
1. INSTRUCTIONS
2. PLAY!!
PRESS ENTER TO QUIT
""")
turtle.mainloop()
|
import numpy as np
import matplotlib.pyplot as plt
import time
import scipy.sparse
import scipy.signal
from tqdm import tqdm
from numba import stencil, jit, prange
class Namespace(dict):
def __mul__(self, other):
if isinstance(other, Namespace):
return Namespace({key: other[key] * val for key, val in self.items()})
else:
return Namespace({key: other * val for key, val in self.items()})
__rmul__ = __mul__
def __div__(self, other):
if isinstance(other, Namespace):
return Namespace({key: val / other[key] for key, val in self.items()})
else:
return Namespace({key: val / other for key, val in self.items()})
def __truediv__(self, other):
if isinstance(other, Namespace):
return Namespace({key: val / other[key] for key, val in self.items()})
else:
return Namespace({key: val / other for key, val in self.items()})
def __rdiv__(self, other):
if isinstance(other, Namespace):
return Namespace({key: other[key] / val for key, val in self.items()})
else:
return Namespace({key: other / val for key, val in self.items()})
def __add__(self, other):
if isinstance(other, Namespace):
return Namespace({key: other[key] + val for key, val in self.items()})
else:
return Namespace({key: other + val for key, val in self.items()})
__radd__ = __add__
def __sub__(self, other):
return Namespace({key: other - val for key, val in self.items()})
@property
def omega(self):
return self["omega"]
@property
def phi(self):
return self["phi"]
@property
def density(self):
return self["density"]
@property
def age(self):
return self["age"]
@property
def dtype(self):
return self["density"].dtype
@property
def dx(self):
return self["dx"]
def copy(self):
return Namespace({key: val for key, val in self.items()})
def get_2d_sine(grid_size, L):
indices = np.array(np.meshgrid(*list(map(range, grid_size))))
phys_coord = indices.T * L / (grid_size[0]) # between [0, L)
x, y = phys_coord.T
d = np.sin(x + 1) * np.sin(y + 1)
return d
class HW:
def __init__(self, N, c1, nu, arakawa_coeff, kappa_coeff, debug=False, quiet=False):
self.N = N
self.c1 = c1
self.nu = (-1) ** (self.N + 1) * nu
self.arakawa_coeff = arakawa_coeff
self.kappa_coeff = kappa_coeff
self.debug = debug
self.test_poisson(size=128)
self.counter = 0
if not quiet:
print(self)
def __repr__(self):
rep = "2D Hasegawa-Wakatani Model:\n"
if self.c1:
rep += f"[x] c1={self.c1}\n"
else:
rep += f"[ ] c1={self.c1}\n"
if self.nu and self.N:
rep += f"[x] Diffusion active. N={self.N}, nu={self.nu}\n"
else:
rep += f"[ ] Diffusion NOT active. N={self.N}, nu={self.nu}\n"
if self.arakawa_coeff:
rep += f"[x] Poisson Bracket included. Coefficient={self.arakawa_coeff}\n"
else:
rep += (
f"[ ] Poisson Bracket NOT incluced. Coefficient={self.arakawa_coeff}\n"
)
if self.kappa_coeff:
rep += f"[x] Background Gradient included. Kappa_coeff={self.kappa_coeff}\n"
else:
rep += f"[ ] Background Gradient NOT incluced. Kappa_coeff={self.kappa_coeff}\n"
return rep
def euler_step(self, yn, dt):
pn = self.get_phi(yn, guess=yn.phi)
k1 = self.step_gradient_2d(yn, pn)
y1 = yn + dt * k1
p1 = self.get_phi(y1, guess=yn.phi)
return Namespace(
density=y1.density,
omega=y1.omega,
phi=p1,
age=yn.age + dt, # y1 contains 2 time steps from compute
dx=yn.dx,
)
def rk4_step(self, yn, dt=0.1):
# RK4
t0 = time.time()
if yn.age == 0:
pn = self.get_phi(yn, guess=yn.phi)
else:
pn = yn.phi
k1 = dt * self.step_gradient_2d(yn, pn, dt=0)
p1 = self.get_phi(yn + k1 * 0.5) # , guess=pn)
k2 = dt * self.step_gradient_2d(yn + k1 * 0.5, p1, dt=dt / 2)
p2 = self.get_phi(yn + k2 * 0.5) # , guess=pn+p1*0.5)
k3 = dt * self.step_gradient_2d(yn + k2 * 0.5, p2, dt=dt / 2)
p3 = self.get_phi(yn + k3) # , guess=pn+p2*0.5)
k4 = dt * self.step_gradient_2d(yn + k3, p3, dt=dt)
y1 = yn + (k1 + 2 * k2 + 2 * k3 + k4) / 6
# phi = #, guess=pn+p3*0.5)
t1 = time.time()
if self.debug:
print(
" | ".join(
[
f"{yn.age + dt:<7.04g}",
f"{np.max(np.abs(yn.density.data)):>7.02g}",
f"{np.max(np.abs(k1.density.data)):>7.02g}",
f"{np.max(np.abs(k2.density.data)):>7.02g}",
f"{np.max(np.abs(k3.density.data)):>7.02g}",
f"{np.max(np.abs(k4.density.data)):>7.02g}",
f"{t1-t0:>6.02f}s",
]
)
)
return Namespace(
density=y1.density,
omega=y1.omega,
phi=self.get_phi(y1), # TODO: Somehow this does not work properly
age=yn.age + dt, # y1 contains 2 time steps from compute
dx=yn.dx,
)
def get_phi(self, plasma, guess=None):
# Fourier Poisson Solve for Phi
phi = fourier_poisson(plasma.omega, plasma.dx)
return phi
def diffuse(self, arr, N, dx):
for i in range(N):
arr = periodic_laplace_func(arr, dx)
return arr
def step_gradient_2d(self, plasma, phi, dt=0):
# Calculate Gradients
dy_p, dx_p = periodic_gradient(phi, plasma.dx)
# Get difference
diff = phi - plasma.density
# Step 2.1: New Omega.
o = self.c1 * diff
# if self.arakawa_coeff:
# o += -self.arakawa_coeff * periodic_arakawa(phi, plasma.omega, plasma.dx)
# if self.nu and self.N:
# o += self.nu * self.diffuse(plasma.omega, self.N, plasma.dx)
# Step 2.2: New Density.
n = self.c1 * diff
# if self.arakawa_coeff:
# n += -self.arakawa_coeff * periodic_arakawa(phi, plasma.density, plasma.dx)
# if self.kappa_coeff:
# n += -self.kappa_coeff * dy_p
# if self.nu and self.N:
# n += self.nu * self.diffuse(plasma.density, self.N, plasma.dx)
return Namespace(
density=n,
omega=o,
phi=phi, # NOTE: NOT A GRADIENT
age=plasma.age + dt,
dx=plasma.dx,
)
def test_poisson(self, size=2 ** 8):
N = size
def get_2d_sine(grid_size, L):
indices = np.array(np.meshgrid(*list(map(range, grid_size))))
phys_coord = indices.T * L / (grid_size[0]) # between [0, L)
x, y = phys_coord.T
d = np.sin(2 * np.pi * x + 1) * np.sin(2 * np.pi * y + 1)
return d
L = 1
dx = L / N
sine_field = get_2d_sine((N, N), L=L)
input_field = 8 * np.pi ** 2 * sine_field
reference_result = -sine_field
# pois_field = poisson_solve(input_field, dx)
# four_field = fourier_poisson(input_field, dx)
phi = self.get_phi(
Namespace(omega=input_field, phi=input_field, dx=dx), guess=input_field
)
if np.mean(np.abs(reference_result - phi)) < 1e-5:
pass
else:
plot(
{
"input": input_field,
"solved": phi,
"reference": reference_result,
"difference": reference_result - phi,
}
)
print("! WARNING ! - POISSON SOLVE IS NOT WORKING !")
@stencil
def jpp_nb(zeta, psi, d):
"""dxdy-dydx"""
return (
(zeta[1, 0] - zeta[-1, 0]) * (psi[0, 1] - psi[0, -1])
- (zeta[0, 1] - zeta[0, -1]) * (psi[1, 0] - psi[-1, 0])
) / (4 * d ** 2)
@stencil
def jpx_nb(zeta, psi, d):
return (
zeta[1, 0] * (psi[1, 1] - psi[1, -1])
- zeta[-1, 0] * (psi[-1, 1] - psi[-1, -1])
- zeta[0, 1] * (psi[1, 1] - psi[-1, 1])
+ zeta[0, -1] * (psi[1, -1] - psi[-1, -1])
) / (4 * d ** 2)
@stencil
def jxp_nb(zeta, psi, d):
return (
zeta[1, 1] * (psi[0, 1] - psi[1, 0])
- zeta[-1, -1] * (psi[-1, 0] - psi[0, -1])
- zeta[-1, 1] * (psi[0, 1] - psi[-1, 0])
+ zeta[1, -1] * (psi[1, 0] - psi[0, -1])
) / (4 * d ** 2)
@jit # (nopython=True, parallel=True, nogil=True)
def arakawa_nb(zeta, psi, d):
return (jpp_nb(zeta, psi, d) + jpx_nb(zeta, psi, d) + jxp_nb(zeta, psi, d)).T / 3
def periodic_arakawa(zeta, psi, d):
return arakawa_nb(np.pad(zeta, 1, mode="wrap"), np.pad(psi, 1, mode="wrap"), d)[
1:-1, 1:-1
]
# @jit
def nb_gradient_run(padded, dx):
fdy = (padded[2:, 1:-1] - padded[0:-2, 1:-1]) / (2 * dx)
fdx = (padded[1:-1, 2:] - padded[1:-1, 0:-2]) / (2 * dx)
return fdy, fdx
def periodic_gradient(input_field, dx):
padded = np.pad(input_field, 1, mode="wrap")
return nb_gradient_run(padded, dx)
# @jit(nopython=True, nogil=True, parallel=True)
def laplace_np_numba(padded, dx):
return (
padded[0:-2, 1:-1] # above
+ padded[1:-1, 0:-2] # left
- 4 * padded[1:-1, 1:-1] # center
+ padded[1:-1, 2:] # right
+ padded[2:, 1:-1] # below
) / dx ** 2
def periodic_laplace_func(a, dx):
return laplace_np_numba(np.pad(a, 1, "wrap"), dx)
# @jit(nopython=True, nogil=True, parallel=True)
def grad2d_np_numba(padded, dx):
return (
-(
-padded[0:-2, 1:-1] / 2 # above
- padded[1:-1, 0:-2] / 2 # left
+ padded[1:-1, 2:] / 2 # right
+ padded[2:, 1:-1] / 2 # below
)
/ dx
)
def periodic_grad2d_func(a, dx):
return grad2d_np_numba(np.pad(a, 1, "wrap"), dx)
def get_energy(n, phi, dx):
phi_gradients = periodic_grad2d_func(phi, dx)
return np.sum(n ** 2 + np.abs(phi_gradients) ** 2) * dx ** 2 / 2
def fourier_poisson(tensor, dx, times=1):
""" Inverse operation to `fourier_laplace`. """
tensor = tensor.reshape(1, *tensor.shape, 1)
frequencies = np.fft.fft2(to_complex(tensor), axes=[1, 2])
k = fftfreq(np.shape(tensor)[1:-1], mode="square")
fft_laplace = -((2 * np.pi) ** 2) * k
fft_laplace[(0,) * len(k.shape)] = np.inf
result = np.real(
np.fft.ifft2(divide_no_nan(frequencies, fft_laplace ** times), axes=[1, 2])
).astype(tensor.dtype)[0, ..., 0]
return result * dx ** 2
def divide_no_nan(x, y):
with np.errstate(divide="ignore", invalid="ignore"):
result = x / y
return np.where(y == 0, 0, result)
def to_complex(x):
x = np.array(x)
if x.dtype in (np.complex64, np.complex128):
return x
elif x.dtype == np.float64:
return x.astype(np.complex128)
else:
return x.astype(np.complex64)
def fftfreq(resolution, mode="vector", dtype=None):
assert mode in ("vector", "absolute", "square")
k = np.meshgrid(*[np.fft.fftfreq(int(n)) for n in resolution], indexing="ij")
k = expand_dims(np.stack(k, -1), 0)
k = k.astype(float)
if mode == "vector":
return k
k = np.sum(k ** 2, axis=-1, keepdims=True)
if mode == "square":
return k
else:
return np.sqrt(k)
def expand_dims(a, axis=0, number=1):
for _i in range(number):
a = np.expand_dims(a, axis)
return a
def ndims(tensor):
return len(np.shape(tensor))
|
import sys
import numpy as np
import h5py
import json
from scipy.stats import gamma,lognorm,norm
from numpy.random import negative_binomial as nb
import matplotlib.pyplot as plt
from dateutil import parser
import datetime
from prime_utils import prediction_filename
# sys.path.append('../../scripts')
from prime_model import modelPred
from prime_infection import infection
from prime_plot import plot_post_pred, plot_infection_curve
cLen = 10
def addModelDiscr(pred,chn,setup):
model_type = setup["mcmcopts"]['model_type']
error_model_type = setup["mcmcopts"]['error_model_type']
# if not specified return
if "postpred" not in setup["ppopts"]:
return pred
# if pp not 1 return
if setup["ppopts"]["postpred"] != 1:
return pred
# posterior-predictive
if "likl_type" in setup["mcmcopts"] and setup["mcmcopts"]["likl_type"]=="negative_binomial":
alpha_ind = 4
if model_type == "twoWave":
alpha_ind = 8
elif model_type == "threeWave":
alpha_ind = 12
for k in range(chn.shape[0]):
alpha = np.exp(chn[k,alpha_ind])
p = alpha/(alpha+pred[k,1:]+1.e-4)
if np.any(p<0):
print(k,alpha,p,pred[k])
quit()
pred[k,1:] = nb(alpha,p)
else:
if error_model_type=="add":
pred = np.array([pred[k]+np.exp(chn[k,-1])*np.random.normal() for k in range(chn.shape[0])])
elif error_model_type=="addMult":
pred = np.array([pred[k]+(np.exp(chn[k,-2])+np.exp(chn[k,-1])*pred[k])*np.random.normal() for k in range(chn.shape[0])])
else:
print("WARNING: Error model not recognized, not adding error to prediction")
return pred
#-------------------------------------------------------
run_setup=json.load(open(sys.argv[1]))
print(run_setup)
#-------------------------------------------------------
# definitions
fdata = run_setup["regioninfo"]["regionname"]+".dat"
fchno = run_setup["regioninfo"]["fchain"]
day0 = run_setup["regioninfo"]["day0"]
model_type = run_setup["mcmcopts"]['model_type']
error_model_type = run_setup["mcmcopts"]['error_model_type']
#-------------------------------------------------------
# For emcee testing. uncomment if using run_emcee
# fhno = 'emcee_' + run_setup["regioninfo"]["fchain"]
#-------------------------------------------------------
# retrieve MCMC chain
file = h5py.File(fchno, 'r')
chn = np.array(file["chain"])
file.close()
#-------------------------------------------------------
nc_new=None
if "newdata" in run_setup["ppopts"]:
rawdata = np.loadtxt(run_setup["ppopts"]["newdata"],dtype=str)
nc_new = [np.array([parser.parse(rawdata[i,0]) for i in range(rawdata.shape[0])])]
nc_new.append(np.array([float(rawdata[i,1]) for i in range(rawdata.shape[0])]))
#-------------------------------------------------------
# extract data from raw data
rawdata = np.loadtxt(fdata,dtype=str)
days_since_day0 = np.array([(parser.parse(rawdata[i,0])-parser.parse(day0)).days for i in range(rawdata.shape[0])])
new_cases = np.array([float(rawdata[i,1]) for i in range(rawdata.shape[0])])
#-------------------------------------------------------
# get sigma for the incubation model
incubation_median = run_setup["incopts"]["incubation_median"]
incubation_sigma = run_setup["incopts"]["incubation_sigma"]
if "incubation_model" in run_setup["incopts"]:
inc_model = run_setup["incopts"]["incubation_model"]
else:
inc_model = "lognormal"
#-------------------------------------------------------
# compute predictions -> push forward or posterior predictive pdf
fh5 = prediction_filename(run_setup)
if run_setup["ppopts"]["runmodel"]:
modelinfo={"model_type":model_type,
"error_model_type":error_model_type,
"error_weight":None,
"days_since_day0":days_since_day0,
"new_cases":new_cases,
"incubation_model": inc_model,
"incubation_median":incubation_median,
"incubation_sigma":incubation_sigma,
"inftype":run_setup["infopts"]["inftype"],
"days_extra":run_setup["ppopts"]["days_extra"],
"day0":run_setup["regioninfo"]["day0"]}
if "incubation_type" in run_setup["mcmcopts"]:
modelinfo["incubation_type"] = run_setup["mcmcopts"]["incubation_type"]
print("Incubation type:",modelinfo["incubation_type"])
# if "useconv" in run_setup["mcmcopts"]:
# modelinfo["useconv"] = run_setup["mcmcopts"]["useconv"]
# if modelinfo["useconv"] == 1:
# print("Using fft convolution instead of quadrature")
nstart = run_setup["ppopts"]["nstart"]
nsamples = run_setup["ppopts"]["nsamples"]
nskip = (chn.shape[0]-nstart)//nsamples
pred=[]
chnPred=chn[nstart::nskip,:]
print("=======================================")
print("Mean N/2sigma:",chnPred[:,1].mean(),np.std(chnPred[:,1]))
print("=======================================")
# pushed forward pdf
pred=np.array([modelPred(chnPred[k],modelinfo,is_cdf=True) for k in range(chnPred.shape[0])])
pred1 = np.zeros(pred.shape)
for j in range(1,pred.shape[1]):
pred1[:,j]=pred[:,j]-pred[:,j-1]
pred = pred1.copy()
# posterior predictive
pred = addModelDiscr(pred,chnPred,run_setup)
# assemble set of dates
f = h5py.File(fh5, 'w')
dset1 = f.create_dataset("predictions", data=pred, compression="gzip")
f.close()
else:
# retrieve MCMC chain
file = h5py.File(fh5, 'r')
pred = np.array(file["predictions"])
file.close()
datesData = np.array([parser.parse(rawdata[i,0]) for i in range(rawdata.shape[0])])
datesPred = np.concatenate((datesData,
np.array([datesData[-1]+datetime.timedelta(days=i+1)
for i in range(run_setup["ppopts"]["days_extra"])])))
# colormap settings
import matplotlib as mpl
cmap1 = mpl.cm.PuBu
cmap2 = mpl.cm.PuRd
qntList = [0.025]+[0.05*i for i in range(1,20)]+[0.975]
normalize = mpl.colors.Normalize(vmin=0.025,vmax=0.5)
iendData = np.where(datesPred==datesData[-1])[0][0]+1
#--------------------------------------------------------------------------------
# Plot push-forward/posterior predictive PDF
plot_post_pred(datesPred,pred,datesData,new_cases,qntList,normalize,iendData,run_setup,nc_new=nc_new)
#-------------------------------------------------------
# infection rate
fh5 = run_setup["infopts"]["finfout"]+".h5"
if run_setup["infopts"]["runmodel"]:
modelinfo={"model_type":model_type,
"error_model_type":error_model_type,
"inftype":run_setup["infopts"]["inftype"],
"day0":run_setup["regioninfo"]["day0"],
"ndays":run_setup["infopts"]["ndays"]}
nstart = run_setup["ppopts"]["nstart"]
nsamples = run_setup["ppopts"]["nsamples"]
nskip = (chn.shape[0]-nstart)//nsamples
chnPred = chn[nstart::nskip,:]
# infection curves
infect=np.array([infection(chnPred[k],modelinfo) for k in range(chnPred.shape[0])])
# assemble set of dates
f = h5py.File(fh5, 'w')
dset2 = f.create_dataset("infect", data=infect, compression="gzip")
f.close()
else:
# retrieve MCMC chain
file = h5py.File(fh5, 'r')
infect = np.array(file["infect"])
file.close()
ndaysinf=(parser.parse(rawdata[-1,0])-parser.parse(rawdata[0,0])).days+run_setup["ppopts"]["days_extra"]+1
datesmean=np.array([parser.parse(rawdata[0,0])+datetime.timedelta(days=i) for i in range(ndaysinf)])
infall = np.zeros((infect.shape[0],ndaysinf))
for i in range(infect.shape[0]):
for j in range(infect.shape[2]):
dtobj = datetime.datetime.fromtimestamp(infect[i,0,j])
if len(np.where(datesmean==dtobj)[0])>0:
posID = np.where(datesmean==dtobj)[0][0]
infall[i,posID] = infect[i,1,j]
iendData = np.where(datesmean==datesData[-1])[0][0]+1
plot_infection_curve(datesmean,infall,qntList,normalize,iendData,run_setup)
#------------------------------------------------------------
# save csv files
from prime_utils import output_epicurves, output_infcurves
nskip = run_setup["csvout"]["nskip"]
#qlist = run_setup["csvout"]["qlist"]
qlist = qntList
fnewc = run_setup["csvout"]["fnewcases"]
finfc = run_setup["csvout"]["finfcurve"]
if error_model_type=="add":
fnewc=fnewc+"_a"
finfc=finfc+"_a"
else:
fnewc=fnewc+"_am"
finfc=finfc+"_am"
if run_setup["ppopts"]["postpred"] == 1:
fnewc=fnewc+"_pp"
else:
fnewc=fnewc+"_pf"
fnewc=fnewc+".csv"
finfc=finfc+".csv"
output_epicurves(pred,datesPred,new_cases,nskip,qlist,fnewc)
output_infcurves(infall,datesmean,nskip,qlist,finfc)
|
#!/usr/bin/python3
import os.path
def reduce():
if not os.path.isfile("german.dic"):
print("Please download the german dictionary \"german.dic\" from")
print("https://sourceforge.net/projects/germandict/" +
"files/german.7z/download")
print("and place it inside this directory.")
exit()
r = open("german.dic", "r", encoding="latin-1")
w = open("german.small", "w")
for line in r:
string = line.strip()
if len(string) > 6:
continue
if len(string) < 3:
continue
if string.upper() == string:
continue
w.write(string + "\n")
if __name__ == "__main__":
reduce()
|
import concurrent.futures
import config
import time
import pandas as pd
import requests
import numpy as np
from ast import literal_eval
import os
def worker_process(d):
data = requests.get("https://api.spotify.com/v1/artists",
{"ids":",".join(d.tolist())},
headers=config.spotify_headers).json()["artists"]
genres = [artist["genres"] if artist else [] for artist in data]
time.sleep(1)
return {artist:genre for artist,genre in zip(d,genres)}
def main():
full_data = pd.read_csv(os.path.join(config.DATADIR, "with_IDs.csv"))
data = full_data.loc[~full_data["artist_IDs"].isna()]
data = data["artist_IDs"].apply(lambda x: literal_eval(x)).values
data = np.unique(np.hstack(data))
# split into arrays of size 50 because 50 is the maximum returned per API call
data = [data[i:i + 50] for i in range(0, len(data), 50)]
res = {}
with concurrent.futures.ProcessPoolExecutor() as executor:
futures = [executor.submit(worker_process, d) for d in data]
for future in concurrent.futures.as_completed(futures):
this_res = future.result()
if this_res:
res.update(this_res)
# rejoin with full data
genres = []
for artist_IDs in full_data["artist_IDs"].values:
genres_this_row = []
if isinstance(artist_IDs, str):
artist_IDs = literal_eval(artist_IDs)
for artist_ID in artist_IDs:
genres_this_row.append(res[artist_ID])
else:
genres_this_row = "NA"
genres.append(genres_this_row)
full_data["genres"] = genres
full_data.to_csv(os.path.join(config.DATADIR, "with_genres.csv"), index = False)
if __name__ == '__main__':
main() |
from django.contrib.auth.models import User
from django.test import TestCase
from django.urls import resolve, reverse
from ..models import Work, Assignment, Worker
from ..views import worker_assignments
class WorkerAssignmentsTests(TestCase):
def setUp(self):
work = Work.objects.create(name='Tailor', description='Tailor.')
print("work created with id"+str(work.id))
user = User.objects.create_user(username='john', email='john@doe.com', password='123')
worker = Worker.objects.create(
## subject='Hello, world', board=board, starter=user
short_name = 'Manoj',
full_Name = 'Tailor Manoj',
aadhaar_number = '123456789021',
telephone_number = '1234567890',
local_address = 'Here, very Near',
permanent_address = 'There, very far',
work = work,
created_by = user
)
print("Worker Created with " + str(worker.id))
assignment=Assignment.objects.create(
## message='Lorem ipsum dolor sit amet', topic=topic, created_by=user
worker=worker,
asg_start_date='2020-04-29',#date.today(),
asg_end_date='2020-04-29',#date.today(),
## created_at=datetime.now(),
## updated_at=datetime.now(),
created_by=user,
updated_by=user
)
print("Assignment Created with " + str(assignment.id))
url = reverse('worker_assignments', kwargs={'pk': work.pk, 'worker_pk': worker.pk})
self.response = self.client.get(url)
def test_status_code(self):
self.assertEquals(self.response.status_code, 200)
def test_view_function(self):
view = resolve('/works/2/workers/6/')
self.assertEquals(view.func, worker_assignments)
|
from django.http.response import HttpResponse
from django.shortcuts import render
from django.http import HttpResponse
# Create your views here.
def test(request):
return HttpResponse('OK')
# HTML 문자열로 작성
# JSON로 작성
# Template로 작성
def main(request):
# return HttpResponse('<b><i>안녕하세요</i></b>')
# return HttpResponse('''
# <script>alert(1)</script>
# <b><i>안녕하세요</i></b>
# ''')
# 카카오맵.html붙이기
return render(request, 'kakao_map.html')
|
# Generated by Django 2.2.3 on 2019-08-01 07:42
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Questions',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group', models.CharField(choices=[('ADM', '行政组'), ('RDC', '研发组'), ('PRT', '产品组'), ('DES', '设计组'), ('OPE', '运营组')], max_length=3, verbose_name='组类')),
('question', models.TextField(verbose_name='问题')),
('option1', models.CharField(max_length=40, verbose_name='选项A')),
('option2', models.CharField(max_length=40, verbose_name='选项B')),
('option3', models.CharField(max_length=40, verbose_name='选项C')),
],
options={
'verbose_name': '游戏问题',
'verbose_name_plural': '游戏问题',
'db_table': 'question',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(max_length=20, verbose_name='用户名字')),
('score', models.IntegerField(null=True, verbose_name='分数')),
('time', models.DateTimeField(default=django.utils.timezone.now, verbose_name='时间')),
],
options={
'verbose_name': '用户',
'verbose_name_plural': '用户',
'db_table': 'user',
},
),
migrations.CreateModel(
name='ScoreLevel',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('A', models.IntegerField(verbose_name='20')),
('B', models.IntegerField(verbose_name='15')),
('C', models.IntegerField(verbose_name='10')),
('answer', models.ForeignKey(on_delete=django.db.models.deletion.SET, to='Gamer.Questions')),
],
options={
'verbose_name': '等级',
'verbose_name_plural': '等级',
'db_table': 'level',
},
),
]
|
a = []
s = 1
a.append(1)
for i in range(1,21):
s *= i
a.append(s)
try:
while True:
n, m = map(int, input().split(' '))
print(a[n]+a[m])
except EOFError:
pass |
# -*- coding: utf-8 -*-
import sys
sys.path.append('../')
import socket
import unittest
try:
from http_parser.parser import HttpParser
except ImportError:
from http_parser.pyparser import HttpParser
from ammo.phantom import HttpCompiler
TEST_SERV_ADDR = ('httpbin.org', 80)
class HTTPCompilerCase(unittest.TestCase):
def test_eof(self):
''' Line endings satisfying RFC 2616.
'''
hc = HttpCompiler()
headers = {
'Host': 'httpbin.org',
'Connection': 'close',
}
qs = '/path/to/some/resource'
req = hc.build_raw(qs, method='GET', headers=headers)
# Two lines at the end of request
self.assertTrue(req.endswith('\r\n\r\n'))
# Each line ends with <CR><LF>
self.assertTrue(all([l.endswith('\r') for l in req.split('\n') if l]))
def test_http_method(self):
'''*method* kwarg effect.
'''
hc = HttpCompiler()
headers = {
'Host': 'httpbin.org',
'Connection': 'close',
}
qs = '/path/to/some/resource'
req = hc.build_raw(qs, method='GET', headers=headers)
self.assertTrue(req.split('\r\n')[0].split(' ')[0] == 'GET')
def test_constructor(self):
''' Instance attributes autosubstitution.
'''
headers = {
'Host': 'httpbin.org',
'Connection': 'close',
}
hc = HttpCompiler(method='PATCH', headers=headers)
qs = '/path/to/check'
req = hc.build_raw(qs)
p = HttpParser()
p.execute(req, len(req))
result_hdrs = p.get_headers()
self.assertTrue(p.get_method(), 'PATCH')
self.assertTrue(all(
[result_hdrs[h] == headers[h] for h in headers.keys()]))
def test_phantom_format(self):
''' Request size calculation.
'''
headers = {
'Host': 'httpbin.org',
'Connection': 'close',
}
hc = HttpCompiler(method='PATCH', headers=headers)
qs = '/path/to/check'
req = hc.build_phantom(qs, body='Some stuff')
p_len, p_req = req.split('\n', 1)
self.assertTrue(int(p_len) == (len(p_req) - 1))
def test_httpbin_resp(self):
'''Check HTTP status code of responce from real public server.
'''
hc = HttpCompiler()
headers = {
'Host': 'httpbin.org',
'Connection': 'close',
}
qs = '/ip'
req = hc.build_raw(qs, method='GET', headers=headers)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(TEST_SERV_ADDR)
s.send(req)
#s.send('GET / HTTP/1.1\r\nHost: httpbin.org\r\n\r\n')
resp = s.recv(100) # 100 bytes enough for header.
self.assertTrue('200 OK' in resp)
|
import discord
from discord.ext import commands
import requests
import json
import shlex
API_URL = "https://api.coinmarketcap.com/v1/ticker/grimcoin/?convert=JPY"
client = discord.Client()
def current_price():
headers = {"content-type": "application/json"}
data = requests.get(API_URL, headers=headers).json()
return (data[0]['price_jpy'])
def talk(message):
payload = {
'apikey':'apikey',
'query':message
}
r = requests.post('https://api.a3rt.recruit-tech.co.jp/talk/v1/smalltalk', data=payload).json()
return str(r['results'][0]['reply'])
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
@client.event
async def on_message(message):
if client.user != message.author:
split = message.content.split()
""" いくらコマンド """
if split[0].startswith("?いくら") or split[0].startswith("?いくら"):
if len(split) == 1:
m = str(message.author.mention) + " " + "1GRIMは" + str(current_price()) + "円です!"
await client.send_message(message.channel, m)
elif split[1].isdigit():
m = str(message.author.mention) + " " + \
str(split[1]) + "GRIMは" + str(float(current_price()) * float(split[1])) + "円です!"
await client.send_message(message.channel, m)
""" イクラコマンド """
if split[0].startswith("?イクラ") or split[0].startswith("?イクラ"):
if len(split) == 1:
m = "それはイクラです"
await client.send_message(message.channel, m)
elif split[1].isdigit():
m = "それはイクラです"
await client.send_message(message.channel, m)
""" トークコマンド """
if split[0].find("?トーク") != -1 or split[0].find("?トーク") != -1:
m = str(message.author.mention) + " " + talk(split[1])
await client.send_message(message.channel, m)
client.run("TOKEN")
|
todaysCustomer = {"name":"Mark", "email":"mark@zuckerberg.com", "city":"San Francisco", "telephone": 92321321321}
print(todaysCustomer['telephone'])
rankings = {5: "Finland", 2: "Norway", 3: "Sweden", 7: "Iceland"}
print(rankings[3])
things_to_remember = {
0: "the lowest number",
"a dozen": 12,
"snake eyes": "a pair of ones",
13: "a baker's dozen"
}
print(things_to_remember[0] + ' ' + str(things_to_remember["a dozen"])) |
from rest_framework import serializers
from rest_auth.serializers import UserDetailsSerializer
from django.conf import settings
from accounts.models import CustomUser as User
from rest_auth.registration.serializers import RegisterSerializer
from allauth.account import app_settings as allauth_settings
from allauth.utils import (email_address_exists,
get_username_max_length)
from allauth.account.adapter import get_adapter
from allauth.account.utils import setup_user_email
class ProfileSerializer(UserDetailsSerializer) :
bio=serializers.CharField(source="profile.bio",required=False)
pic=serializers.ImageField(source="profile.pic",required=False)
gender=serializers.CharField(source="profile.gender",required=False)
status = serializers.CharField(source="profile.status",required=False)
DOB = serializers.DateTimeField(source="profile.date_of_birth",required=False)
class Meta(UserDetailsSerializer.Meta):
model=User
fields=UserDetailsSerializer.Meta.fields+('bio','pic','gender','status','DOB')
def update(self,instance,validated_data):
profile_data=validated_data.pop('profile',{})
bio = profile_data.get('bio')
pic= profile_data.get('pic')
gender= profile_data.get('gender')
status= profile_data.get('status')
dob= profile_data.get('DOB')
instance = super(ProfileSerializer,self).update(instance,validated_data)
profile=instance.profile
if profile_data :
if bio:
profile.bio=bio
if pic :
profile.pic=pic
if gender:
profile.gender=gender
if status:
profile.status=status
if dob:
profile.date_of_birth=dob
profile.save()
return instance
class RegisterSerializer(serializers.Serializer) :
first_name=serializers.CharField(required=False)
last_name = serializers.CharField(required=False)
username = serializers.CharField(
max_length=get_username_max_length(),
min_length=allauth_settings.USERNAME_MIN_LENGTH,
required=allauth_settings.USERNAME_REQUIRED
)
email = serializers.EmailField(required=allauth_settings.EMAIL_REQUIRED)
password1 = serializers.CharField(write_only=True)
password2 = serializers.CharField(write_only=True)
def validate_username(self, username):
username = get_adapter().clean_username(username)
return username
def validate_email(self, email):
email = get_adapter().clean_email(email)
if allauth_settings.UNIQUE_EMAIL:
if email and email_address_exists(email):
raise serializers.ValidationError(
_("A user is already registered with this e-mail address."))
return email
def validate_first_name(self,name) :
if name :
return name
def validate_last_name(self,name) :
if name :
return name
def validate_password1(self, password):
return get_adapter().clean_password(password)
def validate(self, data):
if data['password1'] != data['password2']:
raise serializers.ValidationError(_("The two password fields didn't match."))
return data
def custom_signup(self, request, user):
pass
def get_cleaned_data(self):
return {
'username': self.validated_data.get('username', ''),
'password1': self.validated_data.get('password1', ''),
'email': self.validated_data.get('email', ''),
'first_name': self.validated_data.get('first_name', ''),
'last_name': self.validated_data.get('last_name', '')
}
def save(self, request):
adapter = get_adapter()
user = adapter.new_user(request)
self.cleaned_data = self.get_cleaned_data()
adapter.save_user(request, user, self)
self.custom_signup(request, user)
setup_user_email(request, user, [])
return user
|
# Copyright 2017 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Define abstractions for various MySQL flavors."""
import environment
import logging
import os
import subprocess
class MysqlFlavor(object):
"""Base class with default SQL statements."""
def demote_master_commands(self):
"""Returns commands to stop the current master."""
return [
"SET GLOBAL read_only = ON",
"FLUSH TABLES WITH READ LOCK",
"UNLOCK TABLES",
]
def promote_slave_commands(self):
"""Returns commands to convert a slave to a master."""
return [
"STOP SLAVE",
"RESET SLAVE ALL",
"SET GLOBAL read_only = OFF",
]
def reset_replication_commands(self):
"""Returns commands to reset replication settings."""
return [
"STOP SLAVE",
"RESET SLAVE ALL",
"RESET MASTER",
]
def change_master_commands(self, host, port, pos):
raise NotImplementedError()
def set_semi_sync_enabled_commands(self, master=None, slave=None):
"""Returns commands to turn semi-sync on/off."""
cmds = []
if master is not None:
cmds.append("SET GLOBAL rpl_semi_sync_master_enabled = %d" % master)
if slave is not None:
cmds.append("SET GLOBAL rpl_semi_sync_slave_enabled = %d" % slave)
return cmds
def extra_my_cnf(self):
"""Returns the path to an extra my_cnf file, or None."""
return None
def master_position(self, tablet):
"""Returns the position from SHOW MASTER STATUS as a string."""
raise NotImplementedError()
def position_equal(self, a, b):
"""Returns true if position 'a' is equal to 'b'."""
raise NotImplementedError()
def position_at_least(self, a, b):
"""Returns true if position 'a' is at least as far along as 'b'."""
raise NotImplementedError()
def position_after(self, a, b):
"""Returns true if position 'a' is after 'b'."""
return self.position_at_least(a, b) and not self.position_equal(a, b)
def enable_binlog_checksum(self, tablet):
"""Enables binlog_checksum and returns True if the flavor supports it.
Args:
tablet: A tablet.Tablet object.
Returns:
False if the flavor doesn't support binlog_checksum.
"""
tablet.mquery("", "SET @@global.binlog_checksum=1")
return True
def disable_binlog_checksum(self, tablet):
"""Disables binlog_checksum if the flavor supports it."""
tablet.mquery("", "SET @@global.binlog_checksum=0")
class MariaDB(MysqlFlavor):
"""Overrides specific to MariaDB."""
def reset_replication_commands(self):
return [
"STOP SLAVE",
"RESET SLAVE ALL",
"RESET MASTER",
"SET GLOBAL gtid_slave_pos = ''",
]
def extra_my_cnf(self):
return environment.vttop + "/config/mycnf/master_mariadb.cnf"
def master_position(self, tablet):
gtid = tablet.mquery("", "SELECT @@GLOBAL.gtid_binlog_pos")[0][0]
return "MariaDB/" + gtid
def position_equal(self, a, b):
return a == b
def position_at_least(self, a, b):
# positions are MariaDB/A-B-C and we only compare C
return int(a.split("-")[2]) >= int(b.split("-")[2])
def change_master_commands(self, host, port, pos):
gtid = pos.split("/")[1]
return [
"SET GLOBAL gtid_slave_pos = '%s'" % gtid,
"CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, "
"MASTER_USER='vt_repl', MASTER_USE_GTID = slave_pos" %
(host, port)]
class MariaDB103(MariaDB):
"""Overrides specific to MariaDB 10.3+."""
def extra_my_cnf(self):
return environment.vttop + "/config/mycnf/master_mariadb103.cnf"
class MySQL56(MysqlFlavor):
"""Overrides specific to MySQL 5.6."""
def master_position(self, tablet):
gtid = tablet.mquery("", "SELECT @@GLOBAL.gtid_executed")[0][0]
return "MySQL56/" + gtid
def position_equal(self, a, b):
return subprocess.check_output([
"mysqlctl", "position", "equal", a, b,
]).strip() == "true"
def position_at_least(self, a, b):
return subprocess.check_output([
"mysqlctl", "position", "at_least", a, b,
]).strip() == "true"
def extra_my_cnf(self):
return environment.vttop + "/config/mycnf/master_mysql56.cnf"
def change_master_commands(self, host, port, pos):
gtid = pos.split("/")[1]
return [
"RESET MASTER",
"SET GLOBAL gtid_purged = '%s'" % gtid,
"CHANGE MASTER TO MASTER_HOST='%s', MASTER_PORT=%d, "
"MASTER_USER='vt_repl', MASTER_AUTO_POSITION = 1" %
(host, port)]
# Map of registered MysqlFlavor classes (keyed by an identifier).
flavor_map = {}
MYSQL_FLAVOR = None
# mysql_flavor is a function because we need something to import before the
# variable MYSQL_FLAVOR is initialized, since that doesn't happen until after
# the command-line options are parsed. If we make mysql_flavor a variable and
# import it before it's initialized, the module that imported it won't get the
# updated value when it's later initialized.
def mysql_flavor():
return MYSQL_FLAVOR
def set_mysql_flavor(flavor):
"""Set the object that will be returned by mysql_flavor().
If flavor is not specified, set it based on MYSQL_FLAVOR environment variable.
Args:
flavor: String of the MySQL flavor e.g. "MariaDB" or "MySQL56".
"""
global MYSQL_FLAVOR
if not flavor:
flavor = os.environ.get("MYSQL_FLAVOR", "MariaDB")
# The environment variable might be set, but equal to "".
if not flavor:
flavor = "MariaDB"
v = flavor_map.get(flavor, None)
if not v:
logging.error("Unknown MYSQL_FLAVOR '%s'", flavor)
exit(1)
cls = v["cls"]
env = v["env"]
MYSQL_FLAVOR = cls()
# Set the environment variable explicitly in case we're overriding it via
# command-line flag.
os.environ["MYSQL_FLAVOR"] = env
logging.debug("Using MySQL flavor: %s, setting MYSQL_FLAVOR=%s (%s)",
str(flavor), env, cls)
def register_flavor(flavor, cls, env):
"""Register the available MySQL flavors.
Note: We need the 'env' argument because our internal implementation is
similar to 'MariaDB' (and hence requires MYSQL_FLAVOR=MariaDB) but has its own
flavor class.
Args:
flavor: Name of the flavor (must be passed to test flag --mysql-flavor).
cls: Class which inherits MysqlFlavor and provides the implementation.
env: Value which will be used for the environment variable MYSQL_FLAVOR.
"""
if flavor in flavor_map:
old_cls = flavor_map[flavor]["cls"]
old_env = flavor_map[flavor]["env"]
logging.error("Cannot register MySQL flavor %s because class %s (env: %s)"
" is already registered for it.", flavor, old_cls, old_env)
exit(1)
flavor_map[flavor] = {"cls": cls, "env": env}
register_flavor("MariaDB", MariaDB, "MariaDB")
register_flavor("MariaDB103", MariaDB103, "MariaDB103")
register_flavor("MySQL56", MySQL56, "MySQL56")
|
# Recursive function to calculate the factorial of a number
def factorial(n):
if(n<0):
return -1
if(n==0):
return 1
return n*factorial(n-1)
result1=factorial(0)
result2=factorial(5)
result3=factorial(12)
result4=factorial(-2)
print ("Results = ",result1,result2,result3) |
import sys
import os
from os.path import isfile, join
import csv
import pandas as pd
import numpy as np
from collections import defaultdict
class MinMaxDenormalise:
def __init__(self):
self.local_values = defaultdict()
self.global_values = defaultdict()
def get_local_norms(self, csvs):
try:
local_norms = pd.read_csv('mmx_local_norms.data', index_col=0)
header = []
for i in range(1,517):
header.append('{}_max'.format(i))
header.append('{}_min'.format(i))
for i in local_norms.index.values.tolist():
self.local_values[i] = defaultdict()
for j in header:
self.local_values[i][j] = np.float64(local_norms.loc[i,j])
except FileNotFoundError:
print("'mmx_global_norms.data' does not exist!")
exit(1)
def get_global_norms(self):
try:
global_norms = pd.read_csv('mmx_global_norms.data')
header = []
for i in range(1,517):
header.append('{}_max'.format(i))
header.append('{}_min'.format(i))
for i in header:
self.global_values[i] = np.float64(global_norms.loc[:,i][:])
except FileNotFoundError:
print("'mmx_global_norms.data' does not exist!")
exit(1)
def denormalise_by_local(self, csvs):
local_minmax = pd.read_csv('mmx_local_norms.data')
local_minmax.set_index('label', inplace=True)
for file in csvs:
# read in data file (to denormalise)
df = pd.read_csv(file, header=None)
# get file label and minmax values
label = os.path.split(os.path.splitext(os.path.normpath(file))[0])[1]
minmax = local_minmax.loc[label,:]
max = defaultdict()
min = defaultdict()
rng = defaultdict()
feature = 1
for i in range(0,len(minmax),2): # set max values for feature
max[str(feature)] = minmax[i]
feature += 1
feature = 1
for i in range(1,len(minmax),2): # set min values for feature
min[str(feature)] = minmax[i]
feature += 1
for i in range(1,517): # set value range for feature
rng[str(i)] = max[str(i)] - min[str(i)]
denormalised = '{}_denormalised.csv'.format(os.path.splitext(os.path.normpath(file))[0])
with open(denormalised, 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
for row in range(len(df)):
world_516 = [df.iloc[row,:][0]] # start with list containing label
for i in range(1,517):
value = df.iloc[row,:][i]
if value <= 0:
value_prime = 0
else:
value_prime = abs((((value - 0.1) * rng[str(i)])/(0.9-0.1))+min[str(i)])
world_516.append(value_prime)
if world_516[514] >= 1: # hardcode replace ap if above 1
world_516[514] = 1
if world_516[515] <= 50: # hardcode replace f0 below 50 as 0
world_516[515] = 0
world_516[516] = 0 # hardcode vuv flag if not f0
else:
world_516[516] = 1 # hardcode vuv flag if f0
wr.writerow(world_516) # minmax denormalisation
class MVNDenormalise:
def __init__(self):
self.local_values = defaultdict()
self.global_values = defaultdict()
def get_local_norms(self, csvs):
try:
local_norms = pd.read_csv('mvn_local_norms.data', index_col=0)
header = []
for i in range(1,517):
header.append('{}_mean'.format(i))
header.append('{}_std'.format(i))
for i in local_norms.index.values.tolist():
self.local_values[i] = defaultdict()
for j in header:
self.local_values[i][j] = np.float64(local_norms.loc[i,j])
except FileNotFoundError:
print("'mvn_global_norms.data' does not exist!")
exit(1)
def get_global_norms(self):
try:
global_norms = pd.read_csv('mvn_global_norms.data')
header = []
for i in range(1,517):
header.append('{}_mean'.format(i))
header.append('{}_std'.format(i))
for i in header:
self.global_values[i] = np.float64(global_norms.loc[:,i][:])
except FileNotFoundError:
print("'mvn_global_norms.data' does not exist!")
exit(1)
def denormalise_by_local(self, csvs):
local_mvn = pd.read_csv('mvn_local_norms.data')
local_mvn.set_index('label', inplace=True)
for file in csvs:
# read in data file (to denormalise)
df = pd.read_csv(file, header=None)
# get file label and mean/variance values
label = os.path.split(os.path.splitext(os.path.normpath(file))[0])[1]
mvn = local_mvn.loc[label,:]
mean = defaultdict()
std = defaultdict()
feature = 1
for i in range(0,len(mvn),2): # set max values for feature
mean[str(feature)] = mvn[i]
feature += 1
feature = 1
for i in range(1,len(mvn),2): # set min values for feature
std[str(feature)] = mvn[i]
feature += 1
denormalised = '{}_denormalised.csv'.format(os.path.splitext(os.path.normpath(file))[0])
with open(denormalised, 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
for row in range(len(df)):
world_516 = [df.iloc[row,:][0]] # start with list containing label
for i in range(1,517):
value = df.iloc[row,:][i]
if value <= 0:
value_prime = 0
else:
value_prime = (value * std[str(i)]) + mean[str(i)]
world_516.append(value_prime)
if world_516[514] >= 1: # hardcode replace ap if above 1
world_516[514] = 1
if world_516[515] <= 50: # hardcode replace f0 below 50 as 0
world_516[515] = 0
world_516[516] = 0 # hardcode vuv flag if not f0
else:
world_516[516] = 1 # hardcode vuv flag if f0
wr.writerow(world_516) # mvn denormalisation
class BoxDenormalise:
def __init__(self):
self.local_values = defaultdict()
self.global_values = defaultdict()
def get_local_norms(self, csvs):
try:
local_norms = pd.read_csv('box_local_norms.data', index_col=0)
header = []
for i in range(1,517):
header.append('{}_var'.format(i))
for i in local_norms.index.values.tolist():
self.local_values[i] = defaultdict()
for j in header:
self.local_values[i][j] = np.float64(local_norms.loc[i,j])
except FileNotFoundError:
print("'box_global_norms.data' does not exist!")
exit(1)
def get_global_norms(self):
try:
global_norms = pd.read_csv('box_global_norms.data')
header = []
for i in range(1,517):
header.append('{}_var'.format(i))
for i in header:
self.global_values[i] = np.float64(global_norms.loc[:,i][:])
except FileNotFoundError:
print("'box_global_norms.data' does not exist!")
exit(1)
def denormalise_by_local(self, csvs):
local_box = pd.read_csv('box_local_norms.data')
local_box.set_index('label', inplace=True)
for file in csvs:
# read in data file (to denormalise)
df = pd.read_csv(file, header=None)
# get file label and var values
label = os.path.split(os.path.splitext(os.path.normpath(file))[0])[1]
box = local_box.loc[label,:]
var = defaultdict()
for i in range(0,len(box)): # set var values for feature
var[str(i+1)] = box[i]
denormalised = '{}_denormalised.csv'.format(os.path.splitext(os.path.normpath(file))[0])
with open(denormalised, 'w') as f:
wr = csv.writer(f, lineterminator = '\n')
for row in range(len(df)):
world_516 = [df.iloc[row,:][0]] # start with list containing label
for i in range(1,517):
value = df.iloc[row,:][i]
if value <= 0:
value_prime = 0
else:
value_prime = (((value*0.043)+1)**(1/0.043))-10e-7
world_516.append(value_prime)
if world_516[514] >= 1: # hardcode replace ap if above 1
world_516[514] = 1
if world_516[515] <= 50: # hardcode replace f0 below 50 as 0
world_516[515] = 0
world_516[516] = 0 # hardcode vuv flag if not f0
else:
world_516[516] = 1 # hardcode vuv flag if f0
wr.writerow(world_516) # boxcox denormalisation
def mmx():
data = [f for f in os.listdir('.') if os.path.splitext(f)[1] == '.mmx']
minmax = MinMaxDenormalise()
minmax.get_local_norms(data)
minmax.get_global_norms()
minmax.denormalise_by_local(data)
def mvn():
data = [f for f in os.listdir('.') if os.path.splitext(f)[1] == '.mvn']
mvn = MVNDenormalise()
mvn.get_local_norms(data)
mvn.get_global_norms()
mvn.denormalise_by_local(data)
def box():
data = [f for f in os.listdir('.') if os.path.splitext(f)[1] == '.box']
boxcox = BoxDenormalise()
boxcox.get_local_norms(data)
boxcox.get_global_norms()
boxcox.denormalise_by_local(data)
mmx()
#mvn()
#box() |
from direct.distributed.DistributedObjectGlobalUD import DistributedObjectGlobalUD
from direct.directnotify import DirectNotifyGlobal
import SettingsMgrBase
class SettingsMgrUD(DistributedObjectGlobalUD):
notify = DirectNotifyGlobal.directNotify.newCategory('SettingsMgrUD')
def __init__(self, air):
DistributedObjectGlobalUD.__init__(self, air)
def announceGenerate(self):
DistributedObjectGlobalUD.announceGenerate(self)
def requestAllChangedSettings(self):
pass #Overidden by subclass.
def d_settingChange(self, settingName, valueStr):
self.sendUpdate('settingChange', [
settingName,
valueStr]) |
import time, pytest
import sys,os
sys.path.insert(1,os.path.abspath(os.path.join(os.path.dirname( __file__ ),'..','..','lib')))
from clsCommon import Common
import clsTestService
from localSettings import *
import localSettings
from utilityTestFunc import *
import enums
class Test:
#================================================================================================================================
# @Author: Michal Zomper
# Test Name : My Media - View: Collapsed Expanded
# Test description:
# upload several entries
# in my media - In the page's top right side - Change between the view options :
# Collapsed view
# Expanded view
# All the entries / buttons / menus should be displayed in both view options and the page should look properly
#================================================================================================================================
testNum = "669"
supported_platforms = clsTestService.updatePlatforms(testNum)
status = "Pass"
timeout_accured = "False"
driver = None
common = None
# Test variables
entryName1 = None
entryName2 = None
description = "Description"
tags = "Tags,"
filePath = localSettings.LOCAL_SETTINGS_MEDIA_PATH + r'\images\qrcode_middle_4.png'
#run test as different instances on all the supported platforms
@pytest.fixture(scope='module',params=supported_platforms)
def driverFix(self,request):
return request.param
def test_01(self,driverFix,env):
#write to log we started the test
logStartTest(self,driverFix)
try:
########################### TEST SETUP ###########################
#capture test start time
self.startTime = time.time()
#initialize all the basic vars and start playing
self,self.driver = clsTestService.initializeAndLoginAsUser(self, driverFix)
self.common = Common(self.driver)
self.entryName1 = clsTestService.addGuidToString("My Media - View: Collapsed Expanded 1", self.testNum)
self.entryName2 = clsTestService.addGuidToString("My Media - View: Collapsed Expanded 2", self.testNum)
##################### TEST STEPS - MAIN FLOW #####################
for i in range(1,3):
writeToLog("INFO","Step " + str(i) + ": Going to upload new entry '" + eval('self.entryName'+str(i)))
if self.common.upload.uploadEntry(self.filePath, eval('self.entryName'+str(i)), self.description, self.tags) == False:
self.status = "Fail"
writeToLog("INFO","Step " + str(i) + ": FAILED to upload new entry " + eval('self.entryName'+str(i)))
return
writeToLog("INFO","Step 1: Going to upload new entry")
writeToLog("INFO","Step 3: Going navigate to my media")
if self.common.myMedia.navigateToMyMedia() == False:
self.status = "Fail"
writeToLog("INFO","Step 3: FAILED navigate to my media")
return
writeToLog("INFO","Step 4: Going to change my media view to 'collapsed'")
if self.common.base.click(self.common.myMedia.MY_MEDIA_COLLAPSED_VIEW_BUTTON, timeout=15) == False:
self.status = "Fail"
writeToLog("INFO","Step 4: FAILED to change my media view to 'collapsed view'")
return
writeToLog("INFO","Step 5: Going to verify my media view")
if self.common.myMedia.verifyMyMediaViewForEntris([self.entryName1, self.entryName2], viewType=enums.MyMediaView.COLLAPSED) == False:
self.status = "Fail"
writeToLog("INFO","Step 5: FAILED to verify my media view")
return
writeToLog("INFO","Step 6: Going to change my media view to 'detailed'")
if self.common.base.click(self.common.myMedia.MY_MEDIA_DETAILED_VIEW_BUTTON, timeout=15) == False:
self.status = "Fail"
writeToLog("INFO","Step 6: FAILED to change my media view to 'detailed view'")
return
writeToLog("INFO","Step 7: Going to verify my media view")
if self.common.myMedia.verifyMyMediaViewForEntris([self.entryName1, self.entryName2], viewType=enums.MyMediaView.DETAILED) == False:
self.status = "Fail"
writeToLog("INFO","Step 7: FAILED to verify my media view")
return
##################################################################
writeToLog("INFO","TEST PASSED: 'My Media - View: Collapsed Expanded' was done successfully")
# if an exception happened we need to handle it and fail the test
except Exception as inst:
self.status = clsTestService.handleException(self,inst,self.startTime)
########################### TEST TEARDOWN ###########################
def teardown_method(self,method):
try:
self.common.handleTestFail(self.status)
writeToLog("INFO","**************** Starting: teardown_method ****************")
self.common.myMedia.deleteEntriesFromMyMedia([self.entryName1, self.entryName2])
writeToLog("INFO","**************** Ended: teardown_method *******************")
except:
pass
clsTestService.basicTearDown(self)
#write to log we finished the test
logFinishedTest(self,self.startTime)
assert (self.status == "Pass")
pytest.main('test_' + testNum + '.py --tb=line') |
import tensorflow as tf
from tf2_models.attention import layers
from tf2_models.attention.transformer import point_wise_feed_forward
from tf2_models.attention.transformer import positional_encoding
class DecoderLayer(tf.keras.layers.Layer):
"""Decoder layer as described in the paper.
"""
def __init__(self, d_model, num_heads, dff, rate=0.1):
"""Initialize DecoderLayer object.
:param d_model: number of features through all the model. from the embedding to the output.
:param num_heads:
:param dff:
:param rate:
"""
super(DecoderLayer, self).__init__()
self.mha1 = layers.MultiHeadAttention(d_model, num_heads)
self.mha2 = layers.MultiHeadAttention(d_model, num_heads)
self.ffn = point_wise_feed_forward.point_wise_feed_forward_network(d_model, dff)
self.layernorm1 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm2 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.layernorm3 = tf.keras.layers.LayerNormalization(epsilon=1e-6)
self.dropout1 = tf.keras.layers.Dropout(rate)
self.dropout2 = tf.keras.layers.Dropout(rate)
self.dropout3 = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training, look_ahead_mask, padding_mask):
"""
:param x: input to the decoder. This is the previous output.
:param enc_output: the encoded words from the encoder - [batch_size, num_words, d_model]
:param training: mode.
:param look_ahead_mask: the mask for the attention.
:param padding_mask:
:return: shape (batch_size, input_seq_len, d_model)
"""
# enc_output.shape == (batch_size, input_seq_len, d_model)
#
# First attention layer in the decoder. The mask will actually be combined:
#
# It is used to pad and mask future tokens in the input received by the decoder.
#
attn1, attn_weights_block1 = self.mha1(x, x, x, look_ahead_mask) # (batch_size, target_seq_len, d_model)
attn1 = self.dropout1(attn1, training=training)
out1 = self.layernorm1(attn1 + x)
#
# This padding mask is used to mask the encoder outputs.
#
attn2, attn_weights_block2 = self.mha2(
enc_output, enc_output, out1, padding_mask) # (batch_size, target_seq_len, d_model)
attn2 = self.dropout2(attn2, training=training)
out2 = self.layernorm2(attn2 + out1) # (batch_size, target_seq_len, d_model)
ffn_output = self.ffn(out2) # (batch_size, target_seq_len, d_model)
ffn_output = self.dropout3(ffn_output, training=training)
out3 = self.layernorm3(ffn_output + out2) # (batch_size, target_seq_len, d_model)
return out3, attn_weights_block1, attn_weights_block2
class Decoder(tf.keras.layers.Layer):
"""The full Decoder component.
"""
def __init__(self, num_layers, d_model, num_heads, dff, target_vocab_size,
maximum_position_encoding, rate=0.1):
"""Initialize Decoder object.
:param num_layers: Number of decoder layers stacked.
:param d_model: number of features through all the model. from the embedding to the output.
:param num_heads:
:param dff:
:param target_vocab_size:
:param maximum_position_encoding:
:param rate:
"""
super(Decoder, self).__init__()
self.d_model = d_model
self.num_layers = num_layers
self.embedding = tf.keras.layers.Embedding(target_vocab_size, d_model)
self.pos_encoding = positional_encoding.positional_encoding(maximum_position_encoding, d_model)
self.dec_layers = [DecoderLayer(d_model, num_heads, dff, rate)
for _ in range(num_layers)]
self.dropout = tf.keras.layers.Dropout(rate)
def call(self, x, enc_output, training,
look_ahead_mask, padding_mask):
"""
:param x: Input to the decoder. during training it is the target labels, and during inference? TODO.
:param enc_output: The output from the encoder. the encoded words. [batch_size, num_words, d_model]
:param training: mode.
:param look_ahead_mask:
:param padding_mask:
:return:
"""
seq_len = tf.shape(x)[1]
attention_weights = {}
x = self.embedding(x) # (batch_size, target_seq_len, d_model)
x *= tf.math.sqrt(tf.cast(self.d_model, tf.float32))
x += self.pos_encoding[:, :seq_len, :]
x = self.dropout(x, training=training)
for i in range(self.num_layers):
x, block1, block2 = self.dec_layers[i](x, enc_output, training,
look_ahead_mask, padding_mask)
attention_weights['decoder_layer{}_block1'.format(i + 1)] = block1
attention_weights['decoder_layer{}_block2'.format(i + 1)] = block2
# x.shape == (batch_size, target_seq_len, d_model)
return x, attention_weights |
# -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'Ing.ui'
#
# Created by: PyQt5 UI code generator 5.15.4
#
# WARNING: Any manual changes made to this file will be lost when pyuic5 is
# run again. Do not edit this file unless you know what you are doing.
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_MainWindow(object):
def setupUi(self, MainWindow):
MainWindow.setObjectName("MainWindow")
MainWindow.resize(1006, 824)
MainWindow.setMinimumSize(QtCore.QSize(950, 400))
self.centralwidget = QtWidgets.QWidget(MainWindow)
self.centralwidget.setObjectName("centralwidget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.centralwidget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.stackedWidget = QtWidgets.QStackedWidget(self.centralwidget)
self.stackedWidget.setEnabled(True)
self.stackedWidget.setObjectName("stackedWidget")
self.home = QtWidgets.QWidget()
self.home.setObjectName("home")
self.frame_18 = QtWidgets.QFrame(self.home)
self.frame_18.setGeometry(QtCore.QRect(-10, 11, 220, 60))
self.frame_18.setMaximumSize(QtCore.QSize(220, 60))
self.frame_18.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_18.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_18.setObjectName("frame_18")
self.gridLayout_11 = QtWidgets.QGridLayout(self.frame_18)
self.gridLayout_11.setObjectName("gridLayout_11")
self.textBrowser_2 = QtWidgets.QTextBrowser(self.frame_18)
self.textBrowser_2.setFrameShape(QtWidgets.QFrame.WinPanel)
self.textBrowser_2.setFrameShadow(QtWidgets.QFrame.Plain)
self.textBrowser_2.setObjectName("textBrowser_2")
self.gridLayout_11.addWidget(self.textBrowser_2, 0, 0, 1, 1)
self.frame_16 = QtWidgets.QFrame(self.home)
self.frame_16.setGeometry(QtCore.QRect(10, 360, 962, 411))
self.frame_16.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_16.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_16.setLineWidth(3)
self.frame_16.setObjectName("frame_16")
self.frame_19 = QtWidgets.QFrame(self.home)
self.frame_19.setGeometry(QtCore.QRect(10, 70, 950, 42))
self.frame_19.setMinimumSize(QtCore.QSize(950, 0))
self.frame_19.setFrameShape(QtWidgets.QFrame.Panel)
self.frame_19.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_19.setLineWidth(3)
self.frame_19.setObjectName("frame_19")
self.horizontalLayout_5 = QtWidgets.QHBoxLayout(self.frame_19)
self.horizontalLayout_5.setObjectName("horizontalLayout_5")
self.label_7 = QtWidgets.QLabel(self.frame_19)
self.label_7.setMinimumSize(QtCore.QSize(30, 0))
self.label_7.setMaximumSize(QtCore.QSize(100, 50))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_7.setFont(font)
self.label_7.setObjectName("label_7")
self.horizontalLayout_5.addWidget(self.label_7)
spacerItem = QtWidgets.QSpacerItem(1, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem)
self.label_11 = QtWidgets.QLabel(self.frame_19)
self.label_11.setMaximumSize(QtCore.QSize(100, 50))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_11.setFont(font)
self.label_11.setObjectName("label_11")
self.horizontalLayout_5.addWidget(self.label_11)
spacerItem1 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem1)
self.label_12 = QtWidgets.QLabel(self.frame_19)
self.label_12.setMaximumSize(QtCore.QSize(16777215, 50))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_12.setFont(font)
self.label_12.setObjectName("label_12")
self.horizontalLayout_5.addWidget(self.label_12)
spacerItem2 = QtWidgets.QSpacerItem(5, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem2)
self.label_13 = QtWidgets.QLabel(self.frame_19)
self.label_13.setMaximumSize(QtCore.QSize(120, 50))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_13.setFont(font)
self.label_13.setObjectName("label_13")
self.horizontalLayout_5.addWidget(self.label_13)
spacerItem3 = QtWidgets.QSpacerItem(5, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem3)
self.label_14 = QtWidgets.QLabel(self.frame_19)
self.label_14.setMaximumSize(QtCore.QSize(120, 50))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_14.setFont(font)
self.label_14.setObjectName("label_14")
self.horizontalLayout_5.addWidget(self.label_14)
spacerItem4 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem4)
self.label_15 = QtWidgets.QLabel(self.frame_19)
self.label_15.setMaximumSize(QtCore.QSize(16777215, 50))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_15.setFont(font)
self.label_15.setFrameShadow(QtWidgets.QFrame.Raised)
self.label_15.setLineWidth(2)
self.label_15.setObjectName("label_15")
self.horizontalLayout_5.addWidget(self.label_15)
spacerItem5 = QtWidgets.QSpacerItem(10, 20, QtWidgets.QSizePolicy.Maximum, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_5.addItem(spacerItem5)
self.label_16 = QtWidgets.QLabel(self.frame_19)
self.label_16.setMaximumSize(QtCore.QSize(16777215, 50))
font = QtGui.QFont()
font.setPointSize(7)
font.setBold(True)
font.setWeight(75)
self.label_16.setFont(font)
self.label_16.setObjectName("label_16")
self.horizontalLayout_5.addWidget(self.label_16)
self.frame_20 = QtWidgets.QFrame(self.home)
self.frame_20.setGeometry(QtCore.QRect(10, 130, 950, 54))
self.frame_20.setMinimumSize(QtCore.QSize(950, 0))
self.frame_20.setFrameShape(QtWidgets.QFrame.Box)
self.frame_20.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_20.setObjectName("frame_20")
self.gridLayout = QtWidgets.QGridLayout(self.frame_20)
self.gridLayout.setObjectName("gridLayout")
self.home1_pushButton = QtWidgets.QPushButton(self.frame_20)
self.home1_pushButton.setText("")
self.home1_pushButton.setFlat(True)
self.home1_pushButton.setObjectName("home1_pushButton")
self.gridLayout.addWidget(self.home1_pushButton, 0, 0, 1, 1)
self.client1_label = QtWidgets.QLabel(self.frame_20)
self.client1_label.setGeometry(QtCore.QRect(13, 20, 40, 16))
self.client1_label.setMinimumSize(QtCore.QSize(40, 0))
self.client1_label.setMaximumSize(QtCore.QSize(125, 16))
self.client1_label.setObjectName("client1_label")
self.facility1_label = QtWidgets.QLabel(self.frame_20)
self.facility1_label.setGeometry(QtCore.QRect(133, 20, 40, 16))
self.facility1_label.setMinimumSize(QtCore.QSize(40, 0))
self.facility1_label.setMaximumSize(QtCore.QSize(16777213, 16))
self.facility1_label.setObjectName("facility1_label")
self.booking1_label = QtWidgets.QLabel(self.frame_20)
self.booking1_label.setGeometry(QtCore.QRect(263, 20, 41, 16))
self.booking1_label.setMinimumSize(QtCore.QSize(30, 0))
self.booking1_label.setMaximumSize(QtCore.QSize(16777213, 16))
self.booking1_label.setObjectName("booking1_label")
self.label_33 = QtWidgets.QLabel(self.frame_20)
self.label_33.setGeometry(QtCore.QRect(392, 20, 30, 16))
self.label_33.setMinimumSize(QtCore.QSize(30, 0))
self.label_33.setMaximumSize(QtCore.QSize(16777213, 16))
self.label_33.setObjectName("label_33")
self.label_35 = QtWidgets.QLabel(self.frame_20)
self.label_35.setGeometry(QtCore.QRect(652, 20, 80, 16))
self.label_35.setMinimumSize(QtCore.QSize(80, 0))
self.label_35.setMaximumSize(QtCore.QSize(155, 16))
self.label_35.setObjectName("label_35")
self.dateTimeEdit_5 = QtWidgets.QDateTimeEdit(self.frame_20)
self.dateTimeEdit_5.setGeometry(QtCore.QRect(780, 10, 151, 22))
self.dateTimeEdit_5.setDateTime(QtCore.QDateTime(QtCore.QDate(2021, 8, 17), QtCore.QTime(12, 0, 0)))
self.dateTimeEdit_5.setObjectName("dateTimeEdit_5")
self.label_17 = QtWidgets.QLabel(self.frame_20)
self.label_17.setGeometry(QtCore.QRect(530, 20, 64, 16))
self.label_17.setObjectName("label_17")
self.frame_21 = QtWidgets.QFrame(self.home)
self.frame_21.setGeometry(QtCore.QRect(10, 190, 950, 54))
self.frame_21.setMinimumSize(QtCore.QSize(950, 0))
self.frame_21.setFrameShape(QtWidgets.QFrame.Box)
self.frame_21.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_21.setObjectName("frame_21")
self.gridLayout_5 = QtWidgets.QGridLayout(self.frame_21)
self.gridLayout_5.setObjectName("gridLayout_5")
self.home2_pushButton = QtWidgets.QPushButton(self.frame_21)
self.home2_pushButton.setText("")
self.home2_pushButton.setFlat(True)
self.home2_pushButton.setObjectName("home2_pushButton")
self.gridLayout_5.addWidget(self.home2_pushButton, 0, 0, 1, 1)
self.client2_label = QtWidgets.QLabel(self.frame_21)
self.client2_label.setGeometry(QtCore.QRect(13, 16, 40, 16))
self.client2_label.setMinimumSize(QtCore.QSize(40, 0))
self.client2_label.setMaximumSize(QtCore.QSize(125, 16))
self.client2_label.setObjectName("client2_label")
self.facility2_label = QtWidgets.QLabel(self.frame_21)
self.facility2_label.setGeometry(QtCore.QRect(133, 16, 40, 16))
self.facility2_label.setMinimumSize(QtCore.QSize(40, 0))
self.facility2_label.setMaximumSize(QtCore.QSize(120, 16))
self.facility2_label.setObjectName("facility2_label")
self.booking2_label = QtWidgets.QLabel(self.frame_21)
self.booking2_label.setGeometry(QtCore.QRect(263, 16, 41, 16))
self.booking2_label.setMinimumSize(QtCore.QSize(30, 0))
self.booking2_label.setMaximumSize(QtCore.QSize(120, 16))
self.booking2_label.setObjectName("booking2_label")
self.label_38 = QtWidgets.QLabel(self.frame_21)
self.label_38.setGeometry(QtCore.QRect(392, 16, 30, 16))
self.label_38.setMinimumSize(QtCore.QSize(30, 0))
self.label_38.setMaximumSize(QtCore.QSize(90, 16))
self.label_38.setObjectName("label_38")
self.dateTimeEdit = QtWidgets.QDateTimeEdit(self.frame_21)
self.dateTimeEdit.setGeometry(QtCore.QRect(786, 13, 151, 22))
self.dateTimeEdit.setDateTime(QtCore.QDateTime(QtCore.QDate(2021, 8, 17), QtCore.QTime(12, 0, 0)))
self.dateTimeEdit.setObjectName("dateTimeEdit")
self.label_46 = QtWidgets.QLabel(self.frame_21)
self.label_46.setGeometry(QtCore.QRect(652, 13, 80, 16))
self.label_46.setMinimumSize(QtCore.QSize(80, 0))
self.label_46.setMaximumSize(QtCore.QSize(80, 30))
self.label_46.setSizeIncrement(QtCore.QSize(100, 0))
self.label_46.setObjectName("label_46")
self.label_18 = QtWidgets.QLabel(self.frame_21)
self.label_18.setGeometry(QtCore.QRect(522, 13, 65, 16))
self.label_18.setObjectName("label_18")
self.dateTimeEdit.raise_()
self.booking2_label.raise_()
self.facility2_label.raise_()
self.label_38.raise_()
self.client2_label.raise_()
self.label_46.raise_()
self.label_18.raise_()
self.home2_pushButton.raise_()
self.frame_22 = QtWidgets.QFrame(self.home)
self.frame_22.setGeometry(QtCore.QRect(10, 250, 950, 54))
self.frame_22.setMinimumSize(QtCore.QSize(100, 0))
self.frame_22.setMaximumSize(QtCore.QSize(950, 16777215))
self.frame_22.setFrameShape(QtWidgets.QFrame.Box)
self.frame_22.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_22.setObjectName("frame_22")
self.gridLayout_12 = QtWidgets.QGridLayout(self.frame_22)
self.gridLayout_12.setObjectName("gridLayout_12")
self.home3_pushButton = QtWidgets.QPushButton(self.frame_22)
self.home3_pushButton.setText("")
self.home3_pushButton.setFlat(True)
self.home3_pushButton.setObjectName("home3_pushButton")
self.gridLayout_12.addWidget(self.home3_pushButton, 0, 0, 1, 1)
self.client3_label = QtWidgets.QLabel(self.frame_22)
self.client3_label.setGeometry(QtCore.QRect(13, 16, 60, 16))
self.client3_label.setMinimumSize(QtCore.QSize(60, 0))
self.client3_label.setMaximumSize(QtCore.QSize(16777213, 16))
self.client3_label.setObjectName("client3_label")
self.facility3_label = QtWidgets.QLabel(self.frame_22)
self.facility3_label.setGeometry(QtCore.QRect(130, 16, 66, 16))
self.facility3_label.setMinimumSize(QtCore.QSize(66, 0))
self.facility3_label.setMaximumSize(QtCore.QSize(16777213, 16))
self.facility3_label.setObjectName("facility3_label")
self.booking3_label = QtWidgets.QLabel(self.frame_22)
self.booking3_label.setGeometry(QtCore.QRect(264, 16, 60, 16))
self.booking3_label.setMinimumSize(QtCore.QSize(60, 0))
self.booking3_label.setMaximumSize(QtCore.QSize(16777213, 16))
self.booking3_label.setObjectName("booking3_label")
self.label_40 = QtWidgets.QLabel(self.frame_22)
self.label_40.setGeometry(QtCore.QRect(391, 16, 30, 16))
self.label_40.setMinimumSize(QtCore.QSize(30, 0))
self.label_40.setMaximumSize(QtCore.QSize(90, 16))
self.label_40.setObjectName("label_40")
self.label_44 = QtWidgets.QLabel(self.frame_22)
self.label_44.setGeometry(QtCore.QRect(659, 16, 80, 16))
self.label_44.setMinimumSize(QtCore.QSize(80, 0))
self.label_44.setMaximumSize(QtCore.QSize(16777213, 16))
self.label_44.setObjectName("label_44")
self.dateTimeEdit_2 = QtWidgets.QDateTimeEdit(self.frame_22)
self.dateTimeEdit_2.setGeometry(QtCore.QRect(786, 13, 151, 22))
self.dateTimeEdit_2.setDate(QtCore.QDate(2021, 8, 15))
self.dateTimeEdit_2.setTime(QtCore.QTime(10, 45, 0))
self.dateTimeEdit_2.setObjectName("dateTimeEdit_2")
self.label_20 = QtWidgets.QLabel(self.frame_22)
self.label_20.setGeometry(QtCore.QRect(508, 13, 105, 16))
self.label_20.setObjectName("label_20")
self.frame_24 = QtWidgets.QFrame(self.home)
self.frame_24.setGeometry(QtCore.QRect(10, 310, 950, 54))
self.frame_24.setMinimumSize(QtCore.QSize(950, 0))
self.frame_24.setFrameShape(QtWidgets.QFrame.Box)
self.frame_24.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_24.setObjectName("frame_24")
self.gridLayout_10 = QtWidgets.QGridLayout(self.frame_24)
self.gridLayout_10.setObjectName("gridLayout_10")
self.home4_pushButton = QtWidgets.QPushButton(self.frame_24)
self.home4_pushButton.setText("")
self.home4_pushButton.setFlat(True)
self.home4_pushButton.setObjectName("home4_pushButton")
self.gridLayout_10.addWidget(self.home4_pushButton, 0, 0, 1, 1)
self.client4_label = QtWidgets.QLabel(self.frame_24)
self.client4_label.setGeometry(QtCore.QRect(13, 16, 48, 16))
self.client4_label.setMinimumSize(QtCore.QSize(40, 0))
self.client4_label.setMaximumSize(QtCore.QSize(16777213, 16))
self.client4_label.setObjectName("client4_label")
self.facility4_label = QtWidgets.QLabel(self.frame_24)
self.facility4_label.setGeometry(QtCore.QRect(134, 16, 40, 16))
self.facility4_label.setMinimumSize(QtCore.QSize(40, 0))
self.facility4_label.setMaximumSize(QtCore.QSize(16777213, 16))
self.facility4_label.setObjectName("facility4_label")
self.booking4_label = QtWidgets.QLabel(self.frame_24)
self.booking4_label.setGeometry(QtCore.QRect(264, 16, 50, 16))
self.booking4_label.setMinimumSize(QtCore.QSize(30, 0))
self.booking4_label.setMaximumSize(QtCore.QSize(16777213, 16))
self.booking4_label.setObjectName("booking4_label")
self.label_41 = QtWidgets.QLabel(self.frame_24)
self.label_41.setGeometry(QtCore.QRect(395, 16, 30, 16))
self.label_41.setMinimumSize(QtCore.QSize(30, 0))
self.label_41.setMaximumSize(QtCore.QSize(90, 16))
self.label_41.setObjectName("label_41")
self.label_45 = QtWidgets.QLabel(self.frame_24)
self.label_45.setGeometry(QtCore.QRect(656, 16, 59, 16))
self.label_45.setMaximumSize(QtCore.QSize(16777213, 16))
self.label_45.setObjectName("label_45")
self.dateTimeEdit_3 = QtWidgets.QDateTimeEdit(self.frame_24)
self.dateTimeEdit_3.setGeometry(QtCore.QRect(786, 13, 151, 22))
self.dateTimeEdit_3.setDate(QtCore.QDate(2021, 1, 1))
self.dateTimeEdit_3.setObjectName("dateTimeEdit_3")
self.label_19 = QtWidgets.QLabel(self.frame_24)
self.label_19.setGeometry(QtCore.QRect(525, 13, 64, 16))
self.label_19.setObjectName("label_19")
self.stackedWidget.addWidget(self.home)
self.first_page = QtWidgets.QWidget()
self.first_page.setMinimumSize(QtCore.QSize(820, 420))
self.first_page.setMaximumSize(QtCore.QSize(1000000, 1000000))
self.first_page.setObjectName("first_page")
self.gridLayout_6 = QtWidgets.QGridLayout(self.first_page)
self.gridLayout_6.setObjectName("gridLayout_6")
self.frame_17 = QtWidgets.QFrame(self.first_page)
self.frame_17.setFrameShape(QtWidgets.QFrame.Box)
self.frame_17.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_17.setObjectName("frame_17")
self.verticalLayout_13 = QtWidgets.QVBoxLayout(self.frame_17)
self.verticalLayout_13.setObjectName("verticalLayout_13")
self.previewLabel = QtWidgets.QLabel(self.frame_17)
font = QtGui.QFont()
font.setFamily("MS Sans Serif")
font.setPointSize(12)
font.setItalic(True)
self.previewLabel.setFont(font)
self.previewLabel.setObjectName("previewLabel")
self.verticalLayout_13.addWidget(self.previewLabel)
self.tableView = QtWidgets.QTableView(self.frame_17)
self.tableView.setObjectName("tableView")
self.verticalLayout_13.addWidget(self.tableView)
self.gridLayout_6.addWidget(self.frame_17, 1, 0, 1, 2)
self.Inputframe = QtWidgets.QFrame(self.first_page)
self.Inputframe.setFrameShape(QtWidgets.QFrame.NoFrame)
self.Inputframe.setObjectName("Inputframe")
self.gridLayout_4 = QtWidgets.QGridLayout(self.Inputframe)
self.gridLayout_4.setObjectName("gridLayout_4")
self.importButton = QtWidgets.QPushButton(self.Inputframe)
self.importButton.setMaximumSize(QtCore.QSize(200, 16777215))
self.importButton.setObjectName("importButton")
self.gridLayout_4.addWidget(self.importButton, 2, 1, 1, 1)
self.label = QtWidgets.QLabel(self.Inputframe)
self.label.setMaximumSize(QtCore.QSize(200, 16777215))
self.label.setObjectName("label")
self.gridLayout_4.addWidget(self.label, 2, 0, 1, 1)
self.back_PushButton = QtWidgets.QPushButton(self.Inputframe)
self.back_PushButton.setMaximumSize(QtCore.QSize(200, 16777215))
self.back_PushButton.setObjectName("back_PushButton")
self.gridLayout_4.addWidget(self.back_PushButton, 3, 1, 1, 1)
self.next_pushButton = QtWidgets.QPushButton(self.Inputframe)
self.next_pushButton.setMaximumSize(QtCore.QSize(200, 16777215))
self.next_pushButton.setObjectName("next_pushButton")
self.gridLayout_4.addWidget(self.next_pushButton, 3, 2, 1, 1)
self.label_3 = QtWidgets.QLabel(self.Inputframe)
self.label_3.setMaximumSize(QtCore.QSize(16777215, 20))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_3.setFont(font)
self.label_3.setObjectName("label_3")
self.gridLayout_4.addWidget(self.label_3, 0, 0, 1, 1)
spacerItem6 = QtWidgets.QSpacerItem(20, 40, QtWidgets.QSizePolicy.Minimum, QtWidgets.QSizePolicy.Fixed)
self.gridLayout_4.addItem(spacerItem6, 1, 0, 1, 1)
self.gridLayout_6.addWidget(self.Inputframe, 0, 0, 1, 1)
self.stackedWidget.addWidget(self.first_page)
self.second_page = QtWidgets.QWidget()
self.second_page.setObjectName("second_page")
self.verticalLayout_6 = QtWidgets.QVBoxLayout(self.second_page)
self.verticalLayout_6.setObjectName("verticalLayout_6")
self.ComboFrame = QtWidgets.QFrame(self.second_page)
self.ComboFrame.setFrameShape(QtWidgets.QFrame.NoFrame)
self.ComboFrame.setFrameShadow(QtWidgets.QFrame.Plain)
self.ComboFrame.setObjectName("ComboFrame")
self.gridLayout_2 = QtWidgets.QGridLayout(self.ComboFrame)
self.gridLayout_2.setObjectName("gridLayout_2")
spacerItem7 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem7, 0, 0, 1, 1)
spacerItem8 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem8, 2, 0, 1, 1)
spacerItem9 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem9, 0, 4, 1, 1)
spacerItem10 = QtWidgets.QSpacerItem(467, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem10, 3, 1, 1, 1)
spacerItem11 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.gridLayout_2.addItem(spacerItem11, 0, 2, 1, 1)
self.frame_2 = QtWidgets.QFrame(self.ComboFrame)
self.frame_2.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_2.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_2.setObjectName("frame_2")
self.gridLayout_7 = QtWidgets.QGridLayout(self.frame_2)
self.gridLayout_7.setObjectName("gridLayout_7")
self.next_push_button = QtWidgets.QPushButton(self.frame_2)
self.next_push_button.setObjectName("next_push_button")
self.gridLayout_7.addWidget(self.next_push_button, 0, 3, 1, 1)
self.backPushButton = QtWidgets.QPushButton(self.frame_2)
self.backPushButton.setObjectName("backPushButton")
self.gridLayout_7.addWidget(self.backPushButton, 0, 2, 1, 1)
self.gridLayout_2.addWidget(self.frame_2, 3, 3, 1, 3)
self.verticalLayout_6.addWidget(self.ComboFrame)
self.frame = QtWidgets.QFrame(self.second_page)
self.frame.setFrameShape(QtWidgets.QFrame.Box)
self.frame.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame.setObjectName("frame")
self.gridLayout_3 = QtWidgets.QGridLayout(self.frame)
self.gridLayout_3.setObjectName("gridLayout_3")
self.tableView_2 = QtWidgets.QTableView(self.frame)
self.tableView_2.setObjectName("tableView_2")
self.gridLayout_3.addWidget(self.tableView_2, 0, 0, 1, 1)
self.verticalLayout_6.addWidget(self.frame)
self.stackedWidget.addWidget(self.second_page)
self.third_page = QtWidgets.QWidget()
self.third_page.setObjectName("third_page")
self.verticalLayout_7 = QtWidgets.QVBoxLayout(self.third_page)
self.verticalLayout_7.setObjectName("verticalLayout_7")
self.frame_12 = QtWidgets.QFrame(self.third_page)
self.frame_12.setMaximumSize(QtCore.QSize(800, 600))
self.frame_12.setFrameShape(QtWidgets.QFrame.Panel)
self.frame_12.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_12.setLineWidth(3)
self.frame_12.setObjectName("frame_12")
self.verticalLayout_5 = QtWidgets.QVBoxLayout(self.frame_12)
self.verticalLayout_5.setObjectName("verticalLayout_5")
self.frame_4 = QtWidgets.QFrame(self.frame_12)
self.frame_4.setMaximumSize(QtCore.QSize(16777215, 16777215))
self.frame_4.setFrameShape(QtWidgets.QFrame.WinPanel)
self.frame_4.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_4.setObjectName("frame_4")
self.gridLayout_19 = QtWidgets.QGridLayout(self.frame_4)
self.gridLayout_19.setObjectName("gridLayout_19")
self.line = QtWidgets.QFrame(self.frame_4)
self.line.setMaximumSize(QtCore.QSize(900, 16777215))
self.line.setFrameShape(QtWidgets.QFrame.HLine)
self.line.setFrameShadow(QtWidgets.QFrame.Sunken)
self.line.setObjectName("line")
self.gridLayout_19.addWidget(self.line, 1, 0, 1, 2)
self.label_10 = QtWidgets.QLabel(self.frame_4)
self.label_10.setMaximumSize(QtCore.QSize(200, 50))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_10.setFont(font)
self.label_10.setObjectName("label_10")
self.gridLayout_19.addWidget(self.label_10, 2, 0, 1, 1)
self.label_9 = QtWidgets.QLabel(self.frame_4)
self.label_9.setMaximumSize(QtCore.QSize(200, 50))
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_9.setFont(font)
self.label_9.setObjectName("label_9")
self.gridLayout_19.addWidget(self.label_9, 2, 1, 1, 1)
self.label_8 = QtWidgets.QLabel(self.frame_4)
self.label_8.setMaximumSize(QtCore.QSize(16777215, 30))
font = QtGui.QFont()
font.setPointSize(10)
font.setBold(True)
font.setWeight(75)
self.label_8.setFont(font)
self.label_8.setObjectName("label_8")
self.gridLayout_19.addWidget(self.label_8, 0, 0, 1, 2)
self.frame_9 = QtWidgets.QFrame(self.frame_4)
self.frame_9.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_9.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_9.setObjectName("frame_9")
self.gridLayout_9 = QtWidgets.QGridLayout(self.frame_9)
self.gridLayout_9.setObjectName("gridLayout_9")
self.gridLayout_19.addWidget(self.frame_9, 3, 0, 1, 2)
self.verticalLayout_5.addWidget(self.frame_4)
self.frame_8 = QtWidgets.QFrame(self.frame_12)
self.frame_8.setMaximumSize(QtCore.QSize(16777215, 100))
self.frame_8.setFrameShape(QtWidgets.QFrame.NoFrame)
self.frame_8.setFrameShadow(QtWidgets.QFrame.Plain)
self.frame_8.setObjectName("frame_8")
self.horizontalLayout_3 = QtWidgets.QHBoxLayout(self.frame_8)
self.horizontalLayout_3.setObjectName("horizontalLayout_3")
self.unkown_back_pushbutton = QtWidgets.QPushButton(self.frame_8)
self.unkown_back_pushbutton.setObjectName("unkown_back_pushbutton")
self.horizontalLayout_3.addWidget(self.unkown_back_pushbutton)
spacerItem12 = QtWidgets.QSpacerItem(50, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_3.addItem(spacerItem12)
self.unknown_next_pushButton = QtWidgets.QPushButton(self.frame_8)
self.unknown_next_pushButton.setObjectName("unknown_next_pushButton")
self.horizontalLayout_3.addWidget(self.unknown_next_pushButton)
self.verticalLayout_5.addWidget(self.frame_8)
self.verticalLayout_7.addWidget(self.frame_12)
self.stackedWidget.addWidget(self.third_page)
self.fourth_page = QtWidgets.QWidget()
self.fourth_page.setObjectName("fourth_page")
self.verticalLayout = QtWidgets.QVBoxLayout(self.fourth_page)
self.verticalLayout.setObjectName("verticalLayout")
self.excelFrame = QtWidgets.QFrame(self.fourth_page)
self.excelFrame.setMaximumSize(QtCore.QSize(16777215, 700))
self.excelFrame.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.excelFrame.setFrameShadow(QtWidgets.QFrame.Raised)
self.excelFrame.setObjectName("excelFrame")
self.gridLayout_8 = QtWidgets.QGridLayout(self.excelFrame)
self.gridLayout_8.setObjectName("gridLayout_8")
self.tableView_db = QtWidgets.QTableView(self.excelFrame)
self.tableView_db.setMaximumSize(QtCore.QSize(16777215, 700))
self.tableView_db.setFrameShape(QtWidgets.QFrame.WinPanel)
self.tableView_db.setFrameShadow(QtWidgets.QFrame.Plain)
self.tableView_db.setObjectName("tableView_db")
self.gridLayout_8.addWidget(self.tableView_db, 1, 2, 1, 1)
self.tableView_excel = QtWidgets.QTableView(self.excelFrame)
self.tableView_excel.setFrameShape(QtWidgets.QFrame.WinPanel)
self.tableView_excel.setFrameShadow(QtWidgets.QFrame.Plain)
self.tableView_excel.setObjectName("tableView_excel")
self.gridLayout_8.addWidget(self.tableView_excel, 1, 0, 1, 1)
self.label_2 = QtWidgets.QLabel(self.excelFrame)
self.label_2.setMaximumSize(QtCore.QSize(100, 16777215))
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label_2.setFont(font)
self.label_2.setObjectName("label_2")
self.gridLayout_8.addWidget(self.label_2, 0, 0, 1, 1)
self.label_4 = QtWidgets.QLabel(self.excelFrame)
font = QtGui.QFont()
font.setPointSize(9)
font.setBold(True)
font.setWeight(75)
self.label_4.setFont(font)
self.label_4.setObjectName("label_4")
self.gridLayout_8.addWidget(self.label_4, 0, 2, 1, 1)
self.verticalLayout.addWidget(self.excelFrame)
self.frame_buttons = QtWidgets.QFrame(self.fourth_page)
self.frame_buttons.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_buttons.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_buttons.setObjectName("frame_buttons")
self.horizontalLayout_2 = QtWidgets.QHBoxLayout(self.frame_buttons)
self.horizontalLayout_2.setObjectName("horizontalLayout_2")
self.back_pushbutton_s = QtWidgets.QPushButton(self.frame_buttons)
self.back_pushbutton_s.setObjectName("back_pushbutton_s")
self.horizontalLayout_2.addWidget(self.back_pushbutton_s)
spacerItem13 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Preferred, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_2.addItem(spacerItem13)
self.selected_next_pushButton = QtWidgets.QPushButton(self.frame_buttons)
self.selected_next_pushButton.setObjectName("selected_next_pushButton")
self.horizontalLayout_2.addWidget(self.selected_next_pushButton)
self.verticalLayout.addWidget(self.frame_buttons)
self.stackedWidget.addWidget(self.fourth_page)
self.fifth_page = QtWidgets.QWidget()
self.fifth_page.setMaximumSize(QtCore.QSize(10000, 10000))
self.fifth_page.setObjectName("fifth_page")
self.verticalLayout_4 = QtWidgets.QVBoxLayout(self.fifth_page)
self.verticalLayout_4.setObjectName("verticalLayout_4")
self.frame_5 = QtWidgets.QFrame(self.fifth_page)
self.frame_5.setMaximumSize(QtCore.QSize(10000, 220))
self.frame_5.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_5.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_5.setObjectName("frame_5")
self.horizontalLayout = QtWidgets.QHBoxLayout(self.frame_5)
self.horizontalLayout.setObjectName("horizontalLayout")
self.frame_11 = QtWidgets.QFrame(self.frame_5)
self.frame_11.setMinimumSize(QtCore.QSize(300, 0))
self.frame_11.setFrameShape(QtWidgets.QFrame.Box)
self.frame_11.setFrameShadow(QtWidgets.QFrame.Sunken)
self.frame_11.setLineWidth(2)
self.frame_11.setObjectName("frame_11")
self.facility_label = QtWidgets.QLabel(self.frame_11)
self.facility_label.setGeometry(QtCore.QRect(154, 15, 80, 16))
self.facility_label.setMinimumSize(QtCore.QSize(80, 0))
self.facility_label.setObjectName("facility_label")
self.label_25 = QtWidgets.QLabel(self.frame_11)
self.label_25.setGeometry(QtCore.QRect(15, 15, 75, 16))
self.label_25.setObjectName("label_25")
self.booking_label = QtWidgets.QLabel(self.frame_11)
self.booking_label.setGeometry(QtCore.QRect(154, 40, 80, 16))
self.booking_label.setMinimumSize(QtCore.QSize(80, 0))
self.booking_label.setObjectName("booking_label")
self.label_39 = QtWidgets.QLabel(self.frame_11)
self.label_39.setGeometry(QtCore.QRect(15, 40, 89, 16))
self.label_39.setObjectName("label_39")
self.unit_label = QtWidgets.QLabel(self.frame_11)
self.unit_label.setGeometry(QtCore.QRect(154, 68, 55, 16))
self.unit_label.setObjectName("unit_label")
self.label_47 = QtWidgets.QLabel(self.frame_11)
self.label_47.setGeometry(QtCore.QRect(15, 68, 31, 16))
self.label_47.setObjectName("label_47")
self.ceu_label = QtWidgets.QLabel(self.frame_11)
self.ceu_label.setGeometry(QtCore.QRect(154, 91, 55, 16))
self.ceu_label.setObjectName("ceu_label")
self.label_48 = QtWidgets.QLabel(self.frame_11)
self.label_48.setGeometry(QtCore.QRect(15, 91, 29, 16))
self.label_48.setObjectName("label_48")
self.horizontalLayout.addWidget(self.frame_11)
spacerItem14 = QtWidgets.QSpacerItem(40, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout.addItem(spacerItem14)
self.frame_6 = QtWidgets.QFrame(self.frame_5)
self.frame_6.setMinimumSize(QtCore.QSize(200, 0))
self.frame_6.setMaximumSize(QtCore.QSize(1000, 16777215))
self.frame_6.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_6.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_6.setObjectName("frame_6")
self.verticalLayout_3 = QtWidgets.QVBoxLayout(self.frame_6)
self.verticalLayout_3.setObjectName("verticalLayout_3")
self.confirm_export_pushbutton = QtWidgets.QPushButton(self.frame_6)
self.confirm_export_pushbutton.setMaximumSize(QtCore.QSize(200, 16777215))
self.confirm_export_pushbutton.setObjectName("confirm_export_pushbutton")
self.verticalLayout_3.addWidget(self.confirm_export_pushbutton)
self.cancel_pushbutton = QtWidgets.QPushButton(self.frame_6)
self.cancel_pushbutton.setMaximumSize(QtCore.QSize(200, 16777215))
self.cancel_pushbutton.setObjectName("cancel_pushbutton")
self.verticalLayout_3.addWidget(self.cancel_pushbutton)
self.gohome_pushbutton = QtWidgets.QPushButton(self.frame_6)
self.gohome_pushbutton.setMaximumSize(QtCore.QSize(200, 16777215))
self.gohome_pushbutton.setObjectName("gohome_pushbutton")
self.verticalLayout_3.addWidget(self.gohome_pushbutton)
self.horizontalLayout.addWidget(self.frame_6)
self.verticalLayout_4.addWidget(self.frame_5)
self.frame_7 = QtWidgets.QFrame(self.fifth_page)
self.frame_7.setMaximumSize(QtCore.QSize(1000000, 16777215))
self.frame_7.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_7.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_7.setObjectName("frame_7")
self.horizontalLayout_13 = QtWidgets.QHBoxLayout(self.frame_7)
self.horizontalLayout_13.setObjectName("horizontalLayout_13")
self.label_5 = QtWidgets.QLabel(self.frame_7)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_5.setFont(font)
self.label_5.setObjectName("label_5")
self.horizontalLayout_13.addWidget(self.label_5)
spacerItem15 = QtWidgets.QSpacerItem(273, 20, QtWidgets.QSizePolicy.Ignored, QtWidgets.QSizePolicy.Minimum)
self.horizontalLayout_13.addItem(spacerItem15)
self.label_6 = QtWidgets.QLabel(self.frame_7)
font = QtGui.QFont()
font.setBold(True)
font.setWeight(75)
self.label_6.setFont(font)
self.label_6.setObjectName("label_6")
self.horizontalLayout_13.addWidget(self.label_6)
self.verticalLayout_4.addWidget(self.frame_7)
self.frame_3 = QtWidgets.QFrame(self.fifth_page)
self.frame_3.setMaximumSize(QtCore.QSize(20000, 20000))
self.frame_3.setFrameShape(QtWidgets.QFrame.StyledPanel)
self.frame_3.setFrameShadow(QtWidgets.QFrame.Raised)
self.frame_3.setObjectName("frame_3")
self.horizontalLayout_14 = QtWidgets.QHBoxLayout(self.frame_3)
self.horizontalLayout_14.setObjectName("horizontalLayout_14")
self.yomunit_tableview = QtWidgets.QTableView(self.frame_3)
self.yomunit_tableview.setFrameShape(QtWidgets.QFrame.WinPanel)
self.yomunit_tableview.setFrameShadow(QtWidgets.QFrame.Plain)
self.yomunit_tableview.setObjectName("yomunit_tableview")
self.horizontalLayout_14.addWidget(self.yomunit_tableview)
self.ceu_tableview = QtWidgets.QTableView(self.frame_3)
self.ceu_tableview.setFrameShape(QtWidgets.QFrame.WinPanel)
self.ceu_tableview.setFrameShadow(QtWidgets.QFrame.Plain)
self.ceu_tableview.setObjectName("ceu_tableview")
self.horizontalLayout_14.addWidget(self.ceu_tableview)
self.verticalLayout_4.addWidget(self.frame_3)
self.stackedWidget.addWidget(self.fifth_page)
self.verticalLayout_2.addWidget(self.stackedWidget)
MainWindow.setCentralWidget(self.centralwidget)
self.actionImport = QtWidgets.QAction(MainWindow)
self.actionImport.setObjectName("actionImport")
self.retranslateUi(MainWindow)
self.stackedWidget.setCurrentIndex(2)
QtCore.QMetaObject.connectSlotsByName(MainWindow)
def retranslateUi(self, MainWindow):
_translate = QtCore.QCoreApplication.translate
MainWindow.setWindowTitle(_translate("MainWindow", "MainWindow"))
MainWindow.setWhatsThis(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:14pt; font-weight:600; text-decoration: underline;\">Data Mapping</span></p></body></html>"))
self.textBrowser_2.setHtml(_translate("MainWindow", "<!DOCTYPE HTML PUBLIC \"-//W3C//DTD HTML 4.0//EN\" \"http://www.w3.org/TR/REC-html40/strict.dtd\">\n"
"<html><head><meta name=\"qrichtext\" content=\"1\" /><style type=\"text/css\">\n"
"p, li { white-space: pre-wrap; }\n"
"</style></head><body style=\" font-family:\'MS Shell Dlg 2\'; font-size:7.8pt; font-weight:400; font-style:normal;\">\n"
"<p style=\" margin-top:0px; margin-bottom:0px; margin-left:0px; margin-right:0px; -qt-block-indent:0; text-indent:0px;\"><span style=\" font-size:12pt; font-weight:600; color:#55aa00;\">Existing Facilities</span></p></body></html>"))
self.label_7.setText(_translate("MainWindow", "Client Name"))
self.label_11.setText(_translate("MainWindow", "Facility Name"))
self.label_12.setText(_translate("MainWindow", "Booking Office"))
self.label_13.setText(_translate("MainWindow", "LTV 20221 Q"))
self.label_14.setText(_translate("MainWindow", "Rationale"))
self.label_15.setText(_translate("MainWindow", "Status"))
self.label_16.setText(_translate("MainWindow", "Last Updated"))
self.client1_label.setText(_translate("MainWindow", "Donald"))
self.facility1_label.setText(_translate("MainWindow", "Tango"))
self.booking1_label.setText(_translate("MainWindow", "London"))
self.label_33.setText(_translate("MainWindow", "70%"))
self.label_35.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" color:#05aa13;\">Confirmed</span></p></body></html>"))
self.label_17.setText(_translate("MainWindow", "not needed"))
self.client2_label.setText(_translate("MainWindow", "Harry"))
self.facility2_label.setText(_translate("MainWindow", "Kilo"))
self.booking2_label.setText(_translate("MainWindow", "Sydney"))
self.label_38.setText(_translate("MainWindow", "80%"))
self.label_46.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" color:#cbcbcb;\">Confirm</span></p></body></html>"))
self.label_18.setText(_translate("MainWindow", "To be done"))
self.client3_label.setText(_translate("MainWindow", "Barrack"))
self.facility3_label.setText(_translate("MainWindow", "Sierra"))
self.booking3_label.setText(_translate("MainWindow", "Signapore"))
self.label_40.setText(_translate("MainWindow", "90%"))
self.label_44.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" color:#f38e29;\">Confirm</span></p></body></html>"))
self.label_20.setText(_translate("MainWindow", "New boxes added "))
self.client4_label.setText(_translate("MainWindow", "Birendra"))
self.facility4_label.setText(_translate("MainWindow", "Aplha"))
self.booking4_label.setText(_translate("MainWindow", "Banglore"))
self.label_41.setText(_translate("MainWindow", "30%"))
self.label_45.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" color:#55ff00;\">Confirmed</span></p></body></html>"))
self.label_19.setText(_translate("MainWindow", "not needed"))
self.previewLabel.setText(_translate("MainWindow", "Preview"))
self.importButton.setText(_translate("MainWindow", "Import"))
self.label.setText(_translate("MainWindow", "Please select the File"))
self.back_PushButton.setText(_translate("MainWindow", "Back"))
self.next_pushButton.setText(_translate("MainWindow", "Next"))
self.label_3.setText(_translate("MainWindow", "Input File"))
self.next_push_button.setText(_translate("MainWindow", "Next"))
self.backPushButton.setText(_translate("MainWindow", "Back"))
self.label_10.setText(_translate("MainWindow", "Unknown Container Types"))
self.label_9.setText(_translate("MainWindow", "Standard Container Types"))
self.label_8.setText(_translate("MainWindow", "Mapping Unknown Container Types"))
self.unkown_back_pushbutton.setText(_translate("MainWindow", "Back"))
self.unknown_next_pushButton.setText(_translate("MainWindow", "Next"))
self.label_2.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" text-decoration: underline;\">Excel Data</span></p></body></html>"))
self.label_4.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" text-decoration: underline;\">Selected Data</span></p></body></html>"))
self.back_pushbutton_s.setText(_translate("MainWindow", "Back"))
self.selected_next_pushButton.setText(_translate("MainWindow", "Next"))
self.facility_label.setText(_translate("MainWindow", "TextLabel"))
self.label_25.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:7pt; font-weight:600;\">Facilty Name</span></p></body></html>"))
self.booking_label.setText(_translate("MainWindow", "TextLabel"))
self.label_39.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:7pt; font-weight:600;\">Booking Office</span></p></body></html>"))
self.unit_label.setText(_translate("MainWindow", "TextLabel"))
self.label_47.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:7pt; font-weight:600;\">Units</span></p></body></html>"))
self.ceu_label.setText(_translate("MainWindow", "TextLabel"))
self.label_48.setText(_translate("MainWindow", "<html><head/><body><p><span style=\" font-size:7pt; font-weight:600;\">CEUs</span></p></body></html>"))
self.confirm_export_pushbutton.setText(_translate("MainWindow", "Commit"))
self.cancel_pushbutton.setText(_translate("MainWindow", "Cancel"))
self.gohome_pushbutton.setText(_translate("MainWindow", "Back to Home "))
self.label_5.setText(_translate("MainWindow", "Breakdown by Container type and Yom (Unit)"))
self.label_6.setText(_translate("MainWindow", "Breakdown by Container type and Yom (CEU)"))
self.actionImport.setText(_translate("MainWindow", "Import"))
|
#!/usr/bin/env /data/mta/Script/Python3.8/envs/ska3-shiny/bin/python
#################################################################################
# #
# disk_space_run_dusk.py: run dusk in each directory to get disk size #
# information. #
# #
# author: t. isobe (tisobe@cfa.harvard.edu) #
# #
# last update: Mar 04, 2021 #
# #
#################################################################################
import os
import sys
import re
#
#--- reading directory list
#
path = '/data/mta/Script/Disk_check/house_keeping/dir_list_py'
with open(path, 'r') as f:
data = [line.strip() for line in f.readlines()]
for ent in data:
atemp = re.split(':', ent)
var = atemp[1].strip()
line = atemp[0].strip()
exec("%s = %s" %(var, line))
#
#--- append a path to a private folder to python directory
#
sys.path.append(bin_dir)
sys.path.append(mta_dir)
#
#--- converTimeFormat contains MTA time conversion routines
#
import mta_common_functions as mcf
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
#-----------------------------------------------------------------------------------------------
def disk_space_run_dusk():
#
#--- /data/mta/
#
cmd = 'cd /data/mta; /usr/local/bin/dusk > ' + run_dir + '/dusk_mta'
os.system(cmd)
#
#--- /data/mta4/
#
cmd = 'cd /data/mta4; /usr/local/bin/dusk > ' + run_dir + '/dusk_mta4'
os.system(cmd)
#
#--- /data/mays/
#
cmd = 'cd /data/mays; nice -n15 /usr/local/bin/dusk > ' + run_dir + '/dusk_mays'
os.system(cmd)
#
#--- /data/mta_www/
#
cmd = 'cd /data/mta_www; nice -n15 /usr/local/bin/dusk > ' + run_dir + '/dusk_www'
os.system(cmd)
#
#--- /proj/rac/ops/
#
cmd = 'cd /proj/rac/ops/; nice -n15 /usr/local/bin/dusk > ' + run_dir + '/proj_ops'
os.system(cmd)
#
#--- /data/swolk/MAYS/ --- retired
#
# cmd = 'cd /data/swolk/MAYS/; dusk > ' + run_dir + '/dusk_check3'
# os.system(cmd)
#
#--- /data/swolk/AARON/ --- retired
#
# cmd = 'cd /data/swolk/AARON/; dusk > ' + run_dir + '/dusk_check4'
# os.system(cmd)
#--------------------------------------------------------------------
if __name__ == '__main__':
disk_space_run_dusk()
|
#!/usr/bin/env python
import pathlib
from tilda import VERSION
from setuptools import find_packages, setup
PROJECT_ROOT = pathlib.Path(__file__).parent
README = (PROJECT_ROOT / 'README.md').read_text()
ldap = ['freenit[ldap]']
mongo = ['freenit[mongoengine]']
sql = ['freenit[sql]']
dev = ['freenit[dev]']
setup(
name='tilda',
version=VERSION,
description='REST API framework based on Flask-Smorest',
long_description=README,
long_description_content_type='text/markdown',
url='https://github.com/freenit-framework/backend',
author='Goran Mekić',
author_email='meka@tilda.center',
license='BSD',
classifiers=[
'Development Status :: 1 - Planning',
'Intended Audience :: Developers',
'Topic :: Internet :: WWW/HTTP',
'Environment :: Web Environment',
'Framework :: Flask',
'License :: OSI Approved :: BSD License',
'Programming Language :: Python :: 3',
],
keywords=[
'REST',
'openapi',
'swagger',
'flask',
'marshmallow',
'apispec'
'webargs',
],
packages=find_packages(exclude=['tests*']),
python_requires='>=3.5',
install_requires=[
'freenit[ldap]',
],
extras_require={
'all': ldap + mongo + sql,
'ldap': ldap,
'mongo': mongo,
'sql': sql,
'dev': dev,
},
)
|
import os
import collections
from six.moves import cPickle
import numpy as np
from json import loads
import logging
from string import printable
class TextLoader():
def __init__(self, data_dir, batch_size, seq_length):
self.data_dir = data_dir
self.batch_size = batch_size
self.seq_length = seq_length
input_file = os.path.join(data_dir, "input.txt")
vocab_file = os.path.join(data_dir, "vocab.pkl")
tensor_file = os.path.join(data_dir, "data.npy")
if not (os.path.exists(vocab_file) and os.path.exists(tensor_file)):
logging.info("reading text file")
self.preprocess(input_file, vocab_file, tensor_file)
else:
logging.info("loading preprocessed files")
self.load_preprocessed(vocab_file, tensor_file)
self.create_batches()
self.reset_batch_pointer()
def preprocess(self, input_file, vocab_file, tensor_file):
with open(input_file, "r") as f:
data = f.read()
counter = collections.Counter(data)
count_pairs = sorted(counter.items(), key=lambda x: -x[1])
self.chars, _ = zip(*count_pairs)
self.vocab_size = len(self.chars)
self.vocab = dict(zip(self.chars, range(len(self.chars))))
with open(vocab_file, 'wb') as f:
cPickle.dump(self.chars, f)
self.tensor = np.array(list(map(self.vocab.get, data)))
np.save(tensor_file, self.tensor)
def load_preprocessed(self, vocab_file, tensor_file):
with open(vocab_file, 'rb') as f:
self.chars = cPickle.load(f)
self.vocab_size = len(self.chars)
self.vocab = dict(zip(self.chars, range(len(self.chars))))
self.tensor = np.load(tensor_file)
print self.tensor
self.num_batches = int(self.tensor.size / (self.batch_size *
self.seq_length))
def create_batches(self):
self.num_batches = int(self.tensor.size / (self.batch_size *
self.seq_length))
self.tensor = self.tensor[:self.num_batches * self.batch_size * self.seq_length]
xdata = self.tensor
ydata = np.copy(self.tensor)
ydata[:-1] = xdata[1:]
ydata[-1] = xdata[0]
self.x_batches = np.split(xdata.reshape(self.batch_size, -1), self.num_batches, 1)
self.y_batches = np.split(ydata.reshape(self.batch_size, -1), self.num_batches, 1)
def next_batch(self):
x, y = self.x_batches[self.pointer], self.y_batches[self.pointer]
self.pointer += 1
return x, y
def reset_batch_pointer(self):
self.pointer = 0
joiner = lambda a: chr(0).join(a['text'])
padder = lambda a: a[:500].ljust(500,chr(1))
PRINTABLE_AUG=printable + chr(0) + chr(1) +chr(2)
def _eval(a):
return padder(joiner(eval(a)))
class NumpyLoader():
def __init__(self, data_dir, batch_size, seq_length):
self.data_dir = data_dir
self.batch_size = batch_size
self.seq_length = seq_length
self.tensor_file = os.path.join(data_dir, "input.npy")
input_file = os.path.join(data_dir, "comments.json")
vocab_file = os.path.join(data_dir, "vocab.pkl")
tensor_file = os.path.join(data_dir, "data.npy")
self.vocab_size = len(PRINTABLE_AUG)
self.chars= PRINTABLE_AUG
self.vocab = dict(zip(self.chars, range(len(self.chars))))
with open(vocab_file, 'wb') as f:
cPickle.dump(self.chars, f)
if not os.path.exists(tensor_file):
logging.info("reading comments file")
self.preprocess(input_file, vocab_file, tensor_file)
else:
logging.info("loading preprocessed files")
self.load_preprocessed(vocab_file, tensor_file)
self.create_batches()
def preprocess(self,input_file, vocab_file, tensor_file):
with open(input_file) as comments_file:
_comments = comments_file.readlines()
from multiprocessing import Pool
dc =self.vocab[chr(2)]
p = Pool()
logging.info("Loading and parsing comments using %d workers..."%len(p._pool))
comments = p.map(_eval, _comments)
del p
logging.info("Translating to numbers")
comments = np.array(comments,dtype=str)
comments = comments.view('S1').reshape(comments.shape[0],500).view(np.int16).copy()
for i,c in enumerate(PRINTABLE_AUG):
comments[comments == ord(c)] = -(i + 1)
comments*=-1
comments-=1
comments[comments<0]=dc
logging.info("Done, caching to disk")
comments = comments.astype('uint8')
self.data = comments[:,:self.seq_length].astype('int32')
np.save(tensor_file,comments)
def load_preprocessed(self,vocab_file, tensor_file):
self.data =np.load(tensor_file)[:,:self.seq_length].astype('int32')
def create_batches(self):
self.num_batches = 1+ (self.data.shape[0]/self.batch_size)
self.data = self.data[:,:self.seq_length]
def next_batch(self,return_y=True):
x = self.data[self.pointer*self.batch_size:self.pointer*self.batch_size+self.batch_size]
self.pointer+=1
if return_y:
y = x.copy()
y[:, :-1] = x[:, 1:]
y[:, -1] = len(printable) + 1
return x,y
else:
return x
def reset_batch_pointer(self):
np.random.shuffle(self.data)
self.pointer=0
def make_holdout_set(self,n=2):
'''
:param n: The number of batches to hold out
:return:
'''
self.holdout_set = self.data[:n*self.batch_size]
self.num_batches-=n
self.batches_held_back=n
self.data=self.data[n*self.batch_size:]
|
num1, num2, num3 = input().split()
plus = int(num1) + int(num2) + int(num3)
print(plus, format(plus / 3, "0.2f"))
|
# -*- coding: utf-8 -*-
print(''.join(['パタトクカシーー'[i] for i in range(len('パタトクカシーー')) if(i%2==0)]))
|
from app import app, db
from flask import render_template, request, flash, get_flashed_messages, redirect, url_for
from datetime import datetime
from app.forms import AddTaskForm, DeleteTaskForm
from app.models import Task
@app.route("/", methods=["GET", "POST"])
def home():
form = AddTaskForm()
tasks = None
if request.form:
try:
task = Task(title=form.title.data, date=datetime.utcnow())
db.session.add(task)
db.session.commit()
return redirect(url_for('home'))
except Exception as e:
print("Failed to add Task")
print(e)
tasks = Task.query.all()
return render_template('index.html', tasks=tasks, form=form)
@app.route("/update", methods=["POST"])
def update():
try:
newTask = request.form.get("newTask")
oldTask = request.form.get("oldTask")
task = Task.query.filter_by(title=oldTask).first()
task.title = newTask
db.session.commit()
except Exception as e:
print("Could not update book title")
print(e)
return redirect('/')
@app.route("/delete", methods=["POST"])
def delete():
title = request.form.get('task')
task = Task.query.filter_by(title=title).first()
db.session.delete(task)
db.session.commit()
return redirect('/')
|
import abc
import os.path as osp
from smqtk.representation import SmqtkRepresentation, DescriptorElement
from smqtk.utils import plugin
class DescriptorIndex (SmqtkRepresentation, plugin.Pluggable):
"""
Index of descriptors, keyed and query-able by descriptor UUID.
Note that these indexes do not use the descriptor type strings. Thus, if
a set of descriptors has multiple elements with the same UUID, but
different type strings, they will bash each other in these indexes. In such
a case, when dealing with descriptors for different generators, it is
advisable to use multiple indices.
"""
def __delitem__(self, uuid):
self.remove_descriptor(uuid)
def __getitem__(self, uuid):
return self.get_descriptor(uuid)
def __iter__(self):
return self.iterdescriptors()
def __len__(self):
return self.count()
def __contains__(self, item):
if isinstance(item, DescriptorElement):
# Testing for UUID inclusion since element hash based on UUID value.
return self.has_descriptor(item.uuid())
return False
@abc.abstractmethod
def count(self):
"""
:return: Number of descriptor elements stored in this index.
:rtype: int
"""
@abc.abstractmethod
def clear(self):
"""
Clear this descriptor index's entries.
"""
@abc.abstractmethod
def has_descriptor(self, uuid):
"""
Check if a DescriptorElement with the given UUID exists in this index.
:param uuid: UUID to query for
:type uuid: collections.Hashable
:return: True if a DescriptorElement with the given UUID exists in this
index, or False if not.
:rtype: bool
"""
@abc.abstractmethod
def add_descriptor(self, descriptor):
"""
Add a descriptor to this index.
Adding the same descriptor multiple times should not add multiple copies
of the descriptor in the index (based on UUID). Added descriptors
overwrite indexed descriptors based on UUID.
:param descriptor: Descriptor to index.
:type descriptor: smqtk.representation.DescriptorElement
"""
@abc.abstractmethod
def add_many_descriptors(self, descriptors):
"""
Add multiple descriptors at one time.
Adding the same descriptor multiple times should not add multiple copies
of the descriptor in the index (based on UUID). Added descriptors
overwrite indexed descriptors based on UUID.
:param descriptors: Iterable of descriptor instances to add to this
index.
:type descriptors:
collections.Iterable[smqtk.representation.DescriptorElement]
"""
@abc.abstractmethod
def get_descriptor(self, uuid):
"""
Get the descriptor in this index that is associated with the given UUID.
:param uuid: UUID of the DescriptorElement to get.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
:return: DescriptorElement associated with the queried UUID.
:rtype: smqtk.representation.DescriptorElement
"""
@abc.abstractmethod
def get_many_descriptors(self, uuids):
"""
Get an iterator over descriptors associated to given descriptor UUIDs.
:param uuids: Iterable of descriptor UUIDs to query for.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
:return: Iterator of descriptors associated to given uuid values.
:rtype: collections.Iterable[smqtk.representation.DescriptorElement]
"""
@abc.abstractmethod
def remove_descriptor(self, uuid):
"""
Remove a descriptor from this index by the given UUID.
:param uuid: UUID of the DescriptorElement to remove.
:type uuid: collections.Hashable
:raises KeyError: The given UUID doesn't associate to a
DescriptorElement in this index.
"""
@abc.abstractmethod
def remove_many_descriptors(self, uuids):
"""
Remove descriptors associated to given descriptor UUIDs from this index.
:param uuids: Iterable of descriptor UUIDs to remove.
:type uuids: collections.Iterable[collections.Hashable]
:raises KeyError: A given UUID doesn't associate with a
DescriptorElement in this index.
"""
@abc.abstractmethod
def iterkeys(self):
"""
Return an iterator over indexed descriptor keys, which are their UUIDs.
:rtype: collections.Iterator[collections.Hashable]
"""
@abc.abstractmethod
def iterdescriptors(self):
"""
Return an iterator over indexed descriptor element instances.
:rtype: collections.Iterator[smqtk.representation.DescriptorElement]
"""
@abc.abstractmethod
def iteritems(self):
"""
Return an iterator over indexed descriptor key and instance pairs.
:rtype: collections.Iterator[(collections.Hashable,
smqtk.representation.DescriptorElement)]
"""
def keys(self):
""" alias for iterkeys """
return self.iterkeys()
def items(self):
""" alias for iteritems """
return self.iteritems()
def get_descriptor_index_impls(reload_modules=False):
"""
Discover and return discovered ``DescriptorIndex`` classes. Keys in the
returned map are the names of the discovered classes, and the paired values
are the actual class type objects.
We search for implementation classes in:
- modules next to this file this function is defined in (ones that begin
with an alphanumeric character),
- python modules listed in the environment variable
``DESCRIPTOR_INDEX_PATH``
- This variable should contain a sequence of python module
specifications, separated by the platform specific PATH separator
character (``;`` for Windows, ``:`` for unix)
Within a module we first look for a helper variable by the name
``DESCRIPTOR_INDEX_CLASS``, which can either be a single class object or
an iterable of class objects, to be specifically exported. If the variable
is set to None, we skip that module and do not import anything. If the
variable is not present, we look at attributes defined in that module for
classes that descend from the given base class type. If none of the above
are found, or if an exception occurs, the module is skipped.
:param reload_modules: Explicitly reload discovered modules from source.
:type reload_modules: bool
:return: Map of discovered class object of type ``DescriptorIndex``
whose keys are the string names of the classes.
:rtype: dict[str, type]
"""
this_dir = osp.abspath(osp.dirname(__file__))
env_var = 'DESCRIPTOR_INDEX_PATH'
helper_var = 'DESCRIPTOR_INDEX_CLASS'
return plugin.get_plugins(__name__, this_dir, env_var, helper_var,
DescriptorIndex, reload_modules=reload_modules)
|
from flask import Flask, url_for, render_template
# import smhutson.github.io
app = Flask(__name__)
app.debug = True
@app.route('/')
def index():
return render_template('index.html')
if __name__ == "__main__":
app.run()
|
import requests
url = "699991.txt"
while url[:2] != 'We':
r = requests.get("https://stepic.org/media/attachments/course67/3.6.3/" + url)
url = r.text
with open('out.txt', 'w') as file_writer:
file_writer.write(url) |
import random
import math
import time
def insertion_sort(A):
for j in range(1, len(A)):
key = A[j]
i = j - 1
while i >= 0 and A[i] > key:
A[i+1] = A[i]
i -= 1
A[i+1] = key
a = []
for i in range(1, 101):
a.append(int(random.random()*10001))
begin = time.time()
insertion_sort(a)
end = time.time()
print a
print "It took %.2f seconds" % (end - begin)
|
# Read TextGrid in Python
# 2017-04-02 jkang
# Python3.5
#
# **Prerequisite**
# - Install 'textgrid' package
# from https://github.com/kylebgorman/textgrid
import textgrid
import numpy as np
T = textgrid.TextGrid()
T.read('stops.TextGrid')
w_tier = T.getFirst('phone').intervals # 'phone' tier
words_raw = []
for ival in range(len(w_tier)):
words_raw.append(w_tier[ival].mark) # get labels
print(w_tier[ival].mark)
# unique word list
words_list = list(set(words_raw))
print('unique words:', words_list)
|
# Isikukoodi valideerimise jaoks tehtud skript. skript võtab kasutajalt isikukoodi,
# ja väljastab kasutajale andmed selle kohta. (sünnikuupäev, sugu) jne.
# Kõigepealt importime mooduli datetime, sellest funktsiooni date
from datetime import date
# ID-kaardi kordajad
kordajad1 = [1, 2, 3, 4, 5, 6, 7, 8, 9,1]
kordajad2 = [3, 4, 5, 6, 7, 8, 9, 1, 2, 3]
# Küsime kasutajalt ID-koodi
ID = input('Isikukood: ')
# Funktsioon, et valideerida ID-kood
def valideeri_id(isikukood):
# Kas isikukood on 11 tähemärki
if len(isikukood) != 11:
raise ValueError('Isikukood peab olema täpselt 11 tähemärki pikk!')
# ID-koodi esimene arv soo ja sünni määramiseks
id_esimene_arv = isikukood[0:1]
if id_esimene_arv < '1' or id_esimene_arv > '6':
raise ValueError('Sisestage korrektne isikukood (esimene number peab jääma vahemikku 1 kuni 6)')
return id_esimene_arv
# Muudame id koodi kõik väärtused järjendiks, et korrutada läbi kordajatega
ID_järjend = list(ID[0:11])
ID_kontrolljärjend = list(ID[0:10])
for i in range(0, len(ID_järjend)):
ID_järjend[i] = int(ID_järjend[i])
for i in range(0, len(ID_kontrolljärjend)):
ID_kontrolljärjend[i] = int(ID_kontrolljärjend[i])
# Arvutame kontrollnumbri
ID_viimane_number = ID_järjend[10]
kontrollnumber_kokku = [a * b for a, b in zip(ID_järjend, kordajad1)]
kontrollnumber_jääk = sum(kontrollnumber_kokku) % 11
if kontrollnumber_jääk == 10:
kontrollnumber_jääk = 0
if kontrollnumber_jääk != ID_viimane_number:
raise ValueError('Isikukood ei ole kehtiv!')
else:
korrutatud = [a * b for a, b in zip(ID_järjend, kordajad2)]
kontrollnumber_jääk = sum(kontrollnumber_kokku) % 11
if kontrollnumber_jääk == 10:
kontrollnumber_jääk = 0
if kontrollnumber_jääk != ID_viimane_number:
raise ValueError('Isikukood ei ole kehtiv!')
# Saame isikukoodist andmed
def isikukoodi_andmed(isikukood):
id_esimene_arv = valideeri_id(isikukood)
# defineerime muutujad
global sugu
global sünniaasta_lõpp
global sünnikuupäev
global sünnikuunumber
global järjekorranumber
global kontrollnumber
global sünnipäev
global praegune_aasta
sünniaasta_lõpp = isikukood[1:3]
sünnikuupäev = isikukood[5:7]
sünnikuunumber = isikukood[3:5]
järjekorranumber = isikukood[7:10]
kontrollnumber = isikukood[10]
# Kontrollime kas isik on mees või naine
if id_esimene_arv in ('1', '3', '5', '7'):
sugu = 'Mees'
elif id_esimene_arv in ('2', '4', '6', '8'):
sugu = 'Naine'
# Sünniaasta saamine
if id_esimene_arv in ('1', '2'):
sünnipäev = sünnikuupäev + '/' + sünnikuunumber + '/' + '18' + sünniaasta_lõpp
elif id_esimene_arv in ('3', '4'):
sünnipäev = sünnikuupäev + '/' + sünnikuunumber + '/' + '19' + sünniaasta_lõpp
elif id_esimene_arv in ('5', '6'):
sünnipäev = sünnikuupäev + '/' + sünnikuunumber + '/' + '20' + sünniaasta_lõpp
elif id_esimene_arv in ('7', '8'):
sünnipäev = sünnikuupäev + '/' + sünnikuunumber + '/' + '21' + sünniaasta_lõpp
# 30 päevaga kuud, kui isikukoodis sünnikuupäev on suurem kui 30, siis kood peatatakse
if sünnikuunumber in ('04', '06', '09', '11'):
if sünnikuupäev > '30':
raise ValueError('Sellel kuul ei ole rohkem kui 30 päeva!')
# 31 päevaga kuud, kui isikukoodis sünnikuupäev on suurem kui 31, siis kood peatatakse
if sünnikuunumber in ('01', '03', '05', '07', '08', '10', '12'):
if sünnikuupäev > '31':
raise ValueError('Sellel kuul ei ole rohkem kui 31 päeva!')
# Kuna veebruaris on liigaastatel 1 päev rohkem, peab vaatama, kas sünnikuupäev klapib
liigaasta(int(sünnipäev[6:11]))
if liigaasta is True:
if sünnikuupäev > '29':
raise ValueError('Sellel kuul ei ole rohkem kui 29 päeva!')
elif liigaasta is False:
if sünnikuupäev > '28':
raise ValueError('Sellel kuul ei ole rohkem kui 28 päeva!')
# Kuna inimene ei saa sündida tulevikus, siis kontrolli, kas sünnipäev on praegusest ajast kaugemal
praegune_aasta = date.today().year
if int(sünnipäev[6:11]) > praegune_aasta:
raise ValueError('Inimene ei saa sündida tulevikus!')
# Kontrollime, kas aasta on liigaasta või mitte
def liigaasta(aasta):
if (aasta % 4) == 0:
if (aasta % 100) == 0:
if (aasta % 400) == 0:
liigaasta = True
else:
liigaasta = False
else:
liigaasta = True
else:
liigaasta = False
return liigaasta
# Saame isikukoodi kohta andmed
isikukoodi_andmed(ID)
# Väljastame andmed
print('\n=================================')
print('ISIKUKOODI ' + ID + ' ANDMED')
print('Sugu: ' + sugu)
print('\tSÜNNIAEG: ')
print('\tAasta: ' + sünnipäev[6:11])
print('\tKuu: ' + sünnipäev[4:5])
print('\tPäev: ' + sünnipäev[0:2])
print('=================================')
|
"""Views"""
#from django.shortcuts import render ppylint: disable=unused-import
from django.views import generic
class IndexView(generic.ListView): #pylint: disable=too-many-ancestors
"""Index view"""
template_name = 'andrew/index.html'
queryset = None
class DetailView(generic.DetailView): #pylint: disable=too-many-ancestors
"""Detail view"""
template_name = 'andrew/detail.html'
queryset = None
|
# I declare that my work contains no examples of misconduct, such as plagiarism, or collusion.
# Any code taken from other sources is referenced within my code solution.
# Student ID: 2019757
# Date: 02/04/2020
# Part 1 - Student Version
def pro_intro(): # let the user know how to get progression outcomes and quit from the program
print('''\t\t\t University of Westminster
\t\t Academic Year 2020
\t\t Progression Outcome
\t\t Student Version
\t\t===========================\n''')
print('''Please enter:
y Another Student
q Exit\n''')
def input_valid_pass(): # get valid input as pass credit
while True:
try:
pass_credits = int(input('Pass credit:\n')) # check whether is it an integer
return pass_credits
except ValueError:
print('Requires a valid integer as Pass credit!') # inform the user to input a valid integer as pass credit
def input_valid_defer(): # get valid input as defer credit
while True:
try:
defer_credits = int(input('Defer credit:\n')) # check whether is it an integer
return defer_credits
except ValueError:
print('Requires a valid integer as Defer credit!') # inform the user to input a valid integer as defer credit
def input_valid_fail(): # get valid input as fail credit
while True:
try:
fail_credits = int(input('Fail credit:\n')) # check whether is it an integer
return fail_credits
except ValueError:
print('Requires a valid integer as Defer credit!') # inform the user to input a valid integer as fail credit
def creditValidity(pass_credits ,defer_credits,fail_credit): # let the user know if credits are not in the range
credit_range = [0, 20, 40, 60, 80, 100, 120]
if (pass_credits not in credit_range or defer_credits not in credit_range or fail_credits not in credit_range): # check the range validity of each input
print('Range error!') # print Range error
return False
else:
return True
#=============================================================================== ### Main Program ### =====================================================================================#
pro_intro()
credit_range = [0, 20, 40, 60, 80, 100, 120]
user_input = input(' Another Student(y/q)') # get user inputs of his requirements
user_input = user_input.lower()
while (user_input == 'y'): # run the program until user input = 'y'
pass_credits=input_valid_pass() # assign the correct input value to the pass_credits
defer_credits=input_valid_defer() # assign the correct input value to the defer_credits
fail_credits=input_valid_fail() # assign the correct input value to the fail_credits
if(not creditValidity(pass_credits,defer_credits,fail_credits)):
continue
while True:
total = pass_credits + fail_credits + defer_credits
if total == 120: # check whether the total credits = 120
if pass_credits == credit_range[6]: # check whether the progression of user is Progress
print('Progress')
break
elif pass_credits == credit_range[5]: # check whether the progression of user is Trailer
print('Progress-module trailer')
break
elif fail_credits > credit_range[3]: # check whether the progression of user is Exclude
print('Exclude')
break
else: # check whether the progression of user is Trailer
print('Do not progress-module retriever')
break
else:
print('Total is incorrect!') # let the user know the total is incorrect
break
user_input = input(' Another Student(y/q)') # requires user inputs for show the progression outcome of a student or quit the program
user_input = user_input.lower()
|
from .arcface import ArcFaceLoss
from .bce import BCEWithLogitsLossAndIgnoreIndex
from .combo_loss import SegmentationWithClassificationHeadLoss
from .dice_loss import DiceLoss
from .focal_loss import (
BinaryDualFocalLoss,
BinaryFocalLoss,
BinaryReducedFocalLoss,
FocalLoss,
LabelSmoothBinaryFocalLoss,
)
from .lovasz_loss import LovaszHingeLoss, LovaszSoftmaxLoss
from .noisy_loss import (
IterativeSelfLearningLoss,
JointOptimizationLoss,
LabelSmoothingCrossEntropy,
OUSMLoss,
SymmetricBCELoss,
SymmetricBinaryFocalLoss,
SymmetricCrossEntropy,
)
from .ohem_loss import OHEMLoss, OHEMLossWithLogits
from .vat import VATLoss
|
import tkinter as tk
from FuncaoBeckEnd import *
from ProgramaPrincipal import *
passagem = False
# Aqui, ao receber os dados do cliente,
# e conseguir fazer um menu principal
# dinâmico que depende do cliente logado
dadosCliente = []
def mainMenu():
# Criando o menu inicial
menuInicial = tk.Tk()
menuInicial.geometry("500x250+500+200")# A parte do algoxalgo é a dimensão, as somas são a posição que eu quero
menuInicial.title("To do List") # Que a minha aplicação apareça
menuInicial.minsize(200, 100)
menuInicial['bg'] = '#AED0D9' #bg é background, seria a nossa cor de fundo, aqui no caso pode ser em hexa
# Criando uma textbox que vai receber a info
emailCliente = tk.Entry(menuInicial, textvariable = "digite seu email...")
senhaCliente = tk.Entry(menuInicial, textvariable = "digite sua senha...")
# associar um evento no meu botão
# para eu controlar a entrada da pessoa, eu vou ter que modificar
# tambem uma variável, e de acordo com isso, o programa vai deixar
# ou não ser aberto o menu do cliente...
# pra resolver isso, tive que criar uma variável global chamada
# passagem
usuErrado = tk.Label(menuInicial, text='Me desculpe, usuário errado', bg='#AED0D9')
def funcBotaoCliente():
email = emailCliente.get()
senha = senhaCliente.get()
valor = checarExistencia(email, senha)
contador = 0
global passagem
global dadosCliente
if valor:
usuErrado.destroy()
passagem = True
# aqui pego as informações para entrar na tabela do cliente.
dadosCliente = pegarInfo(email)
menuInicial.destroy()
else:
if contador != 0:
usuErrado.destroy()
contador -= 0
usuErrado.pack()
# Aqui temos a função de cadastro de usuário
def funcBotaoNovo():
novaAba = abrirOutraAba("Criando nova conta", 500, 250)
# Label
bemVindo = tk.Label(novaAba, text = "Seja bem vindo, coloque seu nome e email!")
msgEmail = tk.Label(novaAba, text = "Coloque seu email")
msgSenha = tk.Label(novaAba, text = "Coloque sua senha")
msgNome = tk.Label(novaAba, text = "Coloque seu nome")
# Aqui vamos colocar o entry DB
nomeNovo = tk.Entry(novaAba)
emailNovo = tk.Entry(novaAba)
senhaNova = tk.Entry(novaAba)
# Botão
botao = tk.Button(novaAba, text = "Criar usuário", command = lambda: insereBanco(nomeNovo, emailNovo, senhaNova, novaAba))
# Fazendo o Pack
bemVindo.pack()
msgNome.pack()
nomeNovo.pack()
msgEmail.pack()
emailNovo.pack()
msgSenha.pack()
senhaNova.pack()
botao.pack()
novaAba.mainloop()
# Adicionando um botão
botaoEntrar = tk.Button(menuInicial, text = 'Entrar', command = funcBotaoCliente, borderwidth = 0)
botaoNovo = tk.Button(menuInicial, text = 'Sou novo aqui', command = funcBotaoNovo)
#é só algo interessante mesmo, nada demais
# Definir o label:
emailLbl = tk.Label(menuInicial, text = "Digite seu email:")
emailLbl['bg'] = '#AED0D9'
senhaLbl = tk.Label(menuInicial, text = "Digite a sua senha:")
senhaLbl['bg'] = '#AED0D9'
# Fazendo o Packing
emailLbl.pack()
emailCliente.pack()
senhaLbl.pack()
senhaCliente.pack()
botaoEntrar.pack()
botaoNovo.pack()
menuInicial.mainloop()
return passagem
#Fim da Função do menu de entrada
# Rodando menu principal
valor = mainMenu()
# Agora vou rodar a aplicação principal, de fato
if valor:
mainPP(dadosCliente) |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.