seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
24129528828 | import cv2
filepath = "vtest.avi"
# cap = cv2.VideoCapture(filepath)
# Webカメラを使うときはこちら
class Capture:
"""
Captureクラスは動画の動体検出をサポートするメソッドを提供します。
"""
def __init__(self,movie_path:str|None=None,device:int=0) -> None:
"""initialize capture mode. and configs."""
if not movie_path:
self.cap = cv2.VideoCapture(device)
else:
try:
if movie_path[-3:] != 'mp4':
raise "Non-mp4 files are not supported."
self.cap = cv2.VideoCapture(movie_path)
except:
raise """cannot capture video. not found video.
arrow to capture 'mp4'.
"""
self.avg = None
def motion_detection(self,reseption:float=0.8):
while True:
# 1フレームずつ取得する。
ret, frame = self.cap.read()
if not ret:
break
# グレースケールに変換
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# 比較用のフレームを取得する
if self.avg is None:
self.avg = gray.copy().astype("float")
continue
# 現在のフレームと移動平均との差を計算
cv2.accumulateWeighted(gray, self.avg, reseption)
frameDelta = cv2.absdiff(gray, cv2.convertScaleAbs(self.avg))
# デルタ画像を閾値処理を行う
thresh = cv2.threshold(frameDelta, 3, 255, cv2.THRESH_BINARY)[1]
# 画像の閾値に輪郭線を入れる
contours, hierarchy = cv2.findContours(thresh.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
frame = cv2.drawContours(frame, contours, -1, (0, 255, 0), 3)
# 結果を出力
cv2.imshow("Frame", frame)
key = cv2.waitKey(30)
if key == 27:
break
self.cap.release()
cv2.destroyAllWindows()
if __name__ == "__main__":
cc = Capture().motion_detection(reseption=0.5) | ik0326/aisolution | test/capture.py | capture.py | py | 2,167 | python | ja | code | 0 | github-code | 1 | [
{
"api_name": "cv2.VideoCapture",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "cv2.COLOR_BGR2GRAY",
... |
16026511667 | import sys
import rdkit
from argparse import ArgumentParser
from rdkit import Chem, DataStructs
from rdkit.Chem import AllChem
import pandas as pd
pred_mols = pd.read_csv('',
header=None).values.reshape(-1)
ref_path = 'actives.txt'
with open(ref_path) as f:
next(f)
true_mols = [line.split(',')[0] for line in f]
print('number of active reference', len(true_mols))
true_mols = [Chem.MolFromSmiles(s) for s in true_mols]
true_mols = [x for x in true_mols if x is not None]
true_fps = [AllChem.GetMorganFingerprintAsBitVect(x, 3, 2048) for x in true_mols]
pred_mols = [Chem.MolFromSmiles(s) for s in pred_mols]
pred_mols = [x for x in pred_mols if x is not None]
pred_fps = [AllChem.GetMorganFingerprintAsBitVect(x, 3, 2048) for x in pred_mols]
fraction_similar = 0
sim_distribution = []
for i in range(len(pred_fps)):
sims = DataStructs.BulkTanimotoSimilarity(pred_fps[i], true_fps)
if max(sims) >= 0.4:
fraction_similar += 1
sim_distribution.append(max(sims))
print('novelty:', 1 - fraction_similar / len(pred_mols))
similarity = 0
for i in range(len(pred_fps)):
sims = DataStructs.BulkTanimotoSimilarity(pred_fps[i], pred_fps[:i])
similarity += sum(sims)
n = len(pred_fps)
n_pairs = n * (n - 1) / 2
diversity = 1 - similarity / n_pairs
print('diversity:', diversity)
| jkwang93/MCMG | eval/final_eval.py | final_eval.py | py | 1,343 | python | en | code | 59 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem.MolFromSmiles",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "rdkit.Chem",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "rdkit.Chem.AllChem... |
16676517988 | from django.shortcuts import render
from django.http import HttpResponse
from django.template import Template,Context
# Create your views here.
def suma(request,num1,num2):
res = num1+num2
contenido = """
<html>
<head>
<body>
<h2>El resultado es: %s </h2>
</body></head>
</html>
""" %res
return HttpResponse(contenido)
def prueba1(request):
externo = open("C:/Users/erick/OneDrive/Escritorio/Trabajos Django/ESYG/templates/prueba.html")
plantilla = Template(externo.read())
externo.close()
ctx = Context()
contenido = plantilla.render(ctx)
return HttpResponse(contenido)
def prueba2(request):
return render(request,"prueba.html")
| Yahir5/examenunidad3 | ESYG/prueba3/uno/views.py | views.py | py | 660 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.template.Template",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.template.Context",
"line_number": 23,
"usage_type": "call"
},
{
"api_nam... |
13607688058 | import pandas as pd
import numpy as np
from scipy.integrate import solve_ivp
import matplotlib.pyplot as plt
from numpy.random import rand
from random import sample, shuffle
plt.close()
import torch
from torch import nn
from torch.utils.data import Dataset, DataLoader
from itertools import permutations
import matplotlib.pyplot as plt
# The training data is two points x^{n-1}, x^n, separated by dt.
# We will integrate the equations with dt, and choose Nt random points
# per solution, and choose three points (the third point is the label for
# the Euler algorithm.
# solution to the differential equation
t1 = 0.
t2 = 2.*np.arccos(-1)
# Equivalent to
def circle (t, s):
x = -s[1]
y = s[0]
return x, y
# Equivalent to def circle above
#circle = lambda t, s: np.array([-s[1], s[0]])
F = circle
# Time interval
t0, tmax = 0, 6.28
# Generate 20 initial condition curves: x0, y0 where x0 \in [0, 5], y0 \in [0, 5]
# This will generate 20 circles with different radii
# Also generate 20 array t_eval, with sorted random numbers between t0 and tmax.
# Each list will contain Nt values.
Nruns = 150 # number of runs
x0max = 1
y0max = 1
dt = .02
# Alternative approach:
# Generate a collection of points (x[n-1], x[n], x[n+1] (for initial conditions)
x0 = x0max * rand(Nruns)
y0 = y0max * rand(Nruns)
# Only work with a single run and validate on the same run (easier problem)
#x0 = x0max * np.ones([Nruns])
#y0 = y0max * np.ones([Nruns])
t0 = t1 + (t2-t1) * rand(Nruns) # starting times, random between [0, 6.28]
#x0 = np.asarray([x0max])
sols = {}
y_list = []
for i in range(Nruns):
# solution always evaluated at the same points
t_eval = np.linspace(t0[i], t0[i]+2*dt, 3)
# x[0][i] and y0[i] are always 1
# What is the second argument of solve_ivp? Min/max t for simulation
# t_eval are three time points: t0[i], t0[i]+dt, t0[i]+2*dt
# Initial Conditions (I.C.): x0[i], y[0][i] for run i
sol = sols[i] = solve_ivp(F, [t0[i], t0[i]+2*dt], [x0[i], y0[i]], t_eval=t_eval)
print(f"({i}) t_eval: ", t_eval)
print(f"({i}) t[i]: ", t0[i])
print(f"({i}) x0[i]: ", x0[i])
print(f"({i}) y0[i]: ", y0[i])
print()
#print(sol.t)
plt.plot(sol.t, sol.y[0], color='r', lw=1)
plt.plot(sol.t, sol.y[1], color='b', lw=1)
#plt.scatter(sol.t, sol.y[0], color='r')
#plt.scatter(sol.t, sol.y[1], color='b')
plt.xlabel('t')
#plt.ylabel('sample points')
plt.show()
plt.close()
#quit()
# select 100 triplets (t[2*i], x[2*i], x[2*i+1]) where x[2i] is the solution at t^n
# and x[2i+1] is the solution at t^{n+1}
# Even numbers (the odd numbers are 'dt' away )
even = list(range(0, len(t_eval), 2))
Nsamples_per_run = 10
x0_seq3 = []
x1_seq3 = []
t_seq3 = []
for i in range(Nruns):
sol = sols[i]
t_seq3.append( list(sol.t) )
x0_seq3.append( list(sol.y[0]) )
x1_seq3.append( list(sol.y[1]) )
#-----------------------------------------------------------------------------------
# I now have the training data on which to base the neural network:
# times: (t_i^n, t_i^{n+1}), i=0, 1, Nruns*Nsamples_per_run
# x0_pairs: (x[0]_i^n, x[0]_i^{n+1}), i=0, 1, Nruns*Nsamples_per_run
# x1_pairs: (x[1]_i^n, x[1]_i^{n+1}), i=0, 1, Nruns*Nsamples_per_run
class NN(nn.Module):
"""Simple neural network accepting two features as input and returning a single output
In the context of PINNs, the neural network is used as universal function approximator
to approximate the solution of the differential equation
"""
def __init__(self, num_hidden: int, dim_hidden: int, act=nn.Tanh()):
super(NN, self).__init__()
# input t into the network
# There are two inputs and two outputs
# The inputs are x^{n-1}, x^{n} where x \in R^2
self.layer_in = nn.Linear(4, dim_hidden)
self.layer_out = nn.Linear(dim_hidden, 2)
num_middle = num_hidden
self.middle_layers = nn.ModuleList(
[nn.Linear(dim_hidden, dim_hidden) for _ in range(num_middle)]
)
self.act = act
def forward(self, t):
out = self.act(self.layer_in(t))
for layer in self.middle_layers:
out = self.act(layer(out))
out = self.layer_out(out)
return out
# Create DataSet and DataLoader
# https://pytorch.org/tutorials/beginner/data_loading_tutorial.html
class myDataset(Dataset):
def __init__(self, t_seq, x0_seq, x1_seq):
super(myDataset, self).__init__()
self.t_seq = torch.tensor(t_seq, dtype=torch.float32)
self.x0_seq = torch.tensor(x0_seq, dtype=torch.float32)
self.x1_seq = torch.tensor(x1_seq, dtype=torch.float32)
def __len__(self):
return len(self.t_seq // 3)
def __getitem__(self, i):
# return 3 sequences of size 3
return torch.tensor([*self.t_seq[i], *self.x0_seq[i], *self.x1_seq[i]])
dataset = myDataset(t_seq3, x0_seq3, x1_seq3)
dataloader = DataLoader(dataset, batch_size=4, shuffle=True, drop_last=True)
# Armed with the dataset and dataloader, I can implement the neural network
# For each batch, pass it through the NN, and output an approximation of d((x,y))/dt
# Apply Euler to obtain the solution at the next time step. Define MSE loss, and do back propagation
# The samples need not be in temporal order
learning_rate = 1.e-2
loss_fn = torch.nn.MSELoss(reduction='mean')
model = NN(num_hidden=2, dim_hidden=30, act=nn.ReLU())
optimizer = torch.optim.AdamW(model.parameters(), lr=learning_rate)
# Loss function is hardly decreasing. WHY?
nb_epochs = 400
losses = []
epoch_losses = []
for epoch in range(nb_epochs):
# shuffle each epoch?
epoch_loss = 0
count = 0
for sample in dataloader:
optimizer.zero_grad() # correct place?
# Will the order be important? No if fully connected network
s = sample[:, [3,4,6,7]] # x[0]^n, x[1]^n
dsdt_approx = model(s)
try:
snew = s[:,[1,3]] + dt * dsdt_approx # One step Euler
except:
#print("break: size s: ", s.shape)
# incorrect batch size (not sure actually)
quit()
break
s_exact = sample[:, [5,8]]
s_approx = snew
loss = loss_fn(snew, s_exact)
epoch_loss += loss.item()
loss.backward()
losses.append(loss)
optimizer.step()
count += 1
epoch_losses.append(epoch_loss / count)
print("epoch_losses: ", len(epoch_losses), epoch_losses)
# With a trained NN, choose some initial condition and draw a trajectory (x, y)
# The network is not training when I input sequences into the NN!
dt1 = dt #* 0.5
sv0 = torch.tensor([1., 0.]) # at t = 0
# The exact solution is x = cos(t), y = sin(t)
sv1 = torch.tensor([np.cos(dt), np.sin(dt1)])
# sol.y[0] is y[0], sol.y[1] is y[1]
# sv: first the point at t^n, followed by the point at t^{n+1}
sv = torch.tensor([*sv0, *sv1], dtype=torch.float32) # y[0] and y[1] are interleaved (y[0][0], y[1][0], y[0][1], y[1][1])
x_lst = []
y_lst = []
t_lst = []
print("sv: ", sv)
# The model requires three points. This means I must find a way to compute the first two points. I will consider the exact solution.
# In reality, I could apply two steps of a Runga-Kutta algorithms, or several steps of Euler with smaller time step.
# Save initial condition (x,y,t) (same as y[0], y[1], t)
x_lst.append(sv0[0].item())
y_lst.append(sv0[1].item())
t_lst.append(0)
print("x_lst= ", x_lst)
sv_nm1 = sv0.clone()
sv_n = sv1.clone()
sv = torch.tensor([*sv_nm1, *sv_n], dtype=torch.float32) # y[0] and y[1] are interleaved (y[0][0], y[1][0], y[0][1], y[1][1])
# The method is unstable
for i in range(600):
# My sv update is incorrect
dydt = model(sv) # model takes in first two points. How is that done?
#sv_new = sv[2:] + dt1 * dydt
sv_np1 = sv_n + dt1 * dydt
sv_nm1 = sv_n.clone() # Inefficient, but clear, implementation
sv_n = sv_np1.clone()
sv = torch.tensor([*sv_nm1, *sv_n], dtype=torch.float32)
x_lst.append(sv_np1[0].item())
y_lst.append(sv_np1[1].item())
t_lst.append((i+1)*dt1)
x_lst = np.asarray(x_lst)
y_lst = np.asarray(y_lst)
t_lst = np.asarray(t_lst)
plt.figure(figsize=(10, 6))
plt.plot(t_lst, x_lst, label='$x_0$')
plt.plot(t_lst, y_lst, label='$x_1$')
plt.xlabel('t')
plt.ylabel('$x_0$, $x_1$')
radius = np.sqrt(x_lst**2 + y_lst**2)
plt.plot(t_lst, radius, label='radius')
plt.legend()
plt.grid()
plt.show()
plt.close()
| erlebach/basic_UODE | circle_2pts.py | circle_2pts.py | py | 8,461 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.close",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "numpy.arccos",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.random.ran... |
71109080994 | """
This module contains the implementation of the Classes: BaseEnvironment, BaseWrapper, BaseObservationWrapper, BaseActionWrapper,
BaseRewardWrapper, BaseGridWorld, BaseCarOnHill, BaseCartPole, BaseInvertedPendulum and LQG.
Then there are the Mujoco Environments Wrappers: BaseMujoco, BaseHalfCheetah, BaseAnt, BaseHopper, BaseHumanoid, BaseSwimmer,
BaseWalker2d
The Class BaseEnvironment inherits from the Class AbstractUnit and from ABC.
The Class BaseEnvironment is an abstract Class used as base class for all types of environments.
The Class BaseWrapper is used as generic wrapper Class. The Classes BaseObservationWrapper, BaseActionWrapper and
BaseRewardWrapper are abstract Classes, that when sub-classed can be used to wrap something specific of an environment.
The Classes BaseGridWorld, BaseCarOnHill, BaseCartPole, BaseInvertedPendulum simply wrap the corresponding Classes of MushroomRL
but they are changed so to inherit from the Class BaseEnvironment so that they can be used in this library.
The Class LQG is a re-adaptation of code copied from: https://github.com/T3p/potion/blob/master/potion/envs/lq.py
The Classes BaseHalfCheetah, BaseAnt, BaseHopper, BaseHumanoid, BaseSwimmer and BaseWalker2d inherit from the Class BaseMujoco
and are simply wrappers of the corresponding OpenAI gym Classes.
"""
from abc import ABC, abstractmethod
import numpy as np
import scipy
import math
from gym.envs.mujoco.half_cheetah_v3 import HalfCheetahEnv
from gym.envs.mujoco.ant_v3 import AntEnv
from gym.envs.mujoco.hopper_v3 import HopperEnv
from gym.envs.mujoco.humanoid_v3 import HumanoidEnv
from gym.envs.mujoco.swimmer_v3 import SwimmerEnv
from gym.envs.mujoco.walker2d_v3 import Walker2dEnv
from mushroom_rl.utils.spaces import Box
from mushroom_rl.core.environment import MDPInfo
from mushroom_rl.environments import GridWorld, CarOnHill, CartPole, InvertedPendulum
from ARLO.abstract_unit.abstract_unit import AbstractUnit
class BaseEnvironment(AbstractUnit, ABC):
"""
This is the base environment Class based on the OpenAI Gym class. Part of this class is a re-adaptation of code copied from:
-OpenAI gym:
cf. https://github.com/openai/gym/blob/master/gym/core.py
-MushroomRL:
cf. https://github.com/MushroomRL/mushroom-rl/blob/dev/mushroom_rl/core/environment.py
This can be sub-classed by the user to create their own specific environment.
You must use environment spaces from MushroomRL (e.g: Box, Discrete).
This Class is an abstract Class and it inherits from the Class AbstractUnit.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process'):
"""
Non-Parameters Members
----------------------
observation_space: This must be a space from MushroomRL like Box, Discrete.
action_space: This must be a space from MushroomRL like Box, Discrete.
gamma: This is the value of the gamma of the MDP, that is in this Class.
horizon: This is the horizon of the MDP, that is in this Class.
The other parameters and non-parameters members are described in the Class AbstractUnit.
"""
super().__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path,
verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.action_space = None
self.observation_space = None
self.gamma = None
self.horizon = None
def __repr__(self):
return 'BaseEnvironment('+'observation_space='+str(self.observation_space)+', action_space='+str(self.action_space)\
+', gamma='+str(self.gamma)+', horizon='+str(self.horizon)+', obj_name='+str(self.obj_name)\
+', seeder='+str(self.seeder)+', local_prng='+str(self.local_prng)+', log_mode='+str(self.log_mode)\
+', checkpoint_log_path='+str(self.checkpoint_log_path)+', verbosity='+str(self.verbosity)\
+', n_jobs='+str(self.n_jobs)+', job_type='+str(self.job_type)+', logger='+str(self.logger)+')'
@abstractmethod
def step(self, action):
"""
Method used to run one step of the environment dynamics.
"""
raise NotImplementedError
@abstractmethod
def reset(self, state=None):
"""
Method used to reset the environment.
"""
raise NotImplementedError
@abstractmethod
def render(self, mode='human'):
"""
Method used to render an environment.
"""
raise NotImplementedError
def close(self):
"""
Method used to perform necessary cleanup.
"""
pass
def seed(self, seed=None):
"""
Method used to seed the environment.
"""
return
def stop(self):
"""
Method used to stop an MDP. This is needed for backward compatibility with MushroomRL.
"""
pass
def unwrapped(self):
"""
This method completely unwraps the env contained in the class.
Returns:
----------
self: The base non-wrapped env instance
"""
return self
@property
def info(self):
"""
Property method that constructs an object of Class mushroom_rl.environment.MDPInfo
"""
#each block must modify the observation_space and action_space according to the transformation they did
return MDPInfo(observation_space=self.observation_space, action_space=self.action_space, gamma=self.gamma,
horizon=self.horizon)
def _sample_from_box(self, space):
"""
Parameters
----------
space: The space to which to sample from. It must be an object of Class Box.
This method was copied from OpenAI gym: cf. https://github.com/openai/gym/blob/master/gym/spaces/box.py
Generates a single random sample inside of the Box. In creating a sample of the box, each coordinate is sampled according
to the form of the interval:
* [a, b] : uniform distribution
* [a, inf) : shifted exponential distribution
* (-inf, b] : shifted negative exponential distribution
* (-inf, inf) : normal distribution
"""
if(not isinstance(space, Box)):
exc_msg = 'The method \'_sample_from_box\' can only be applied on \'Box\' spaces!'
self.logger.exception(msg=exc_msg)
raise TypeError(exc_msg)
else:
bounded_below = -np.inf < space.low
bounded_above = np.inf > space.high
if(space.high.dtype.kind == 'f'):
high = space.high
else:
high = space.high.astype('int64') + 1
sample = np.empty(space.shape)
#Masking arrays which classify the coordinates according to interval type
unbounded = ~bounded_below & ~bounded_above
upp_bounded = ~bounded_below & bounded_above
low_bounded = bounded_below & ~bounded_above
bounded = bounded_below & bounded_above
#Vectorized sampling by interval type
sample[unbounded] = self.local_prng.normal(size=unbounded[unbounded].shape)
sample[low_bounded] = self.local_prng.exponential(size=low_bounded[low_bounded].shape) + space.low[low_bounded]
sample[upp_bounded] = -self.local_prng.exponential(size=upp_bounded[upp_bounded].shape) + space.high[upp_bounded]
sample[bounded] = self.local_prng.uniform(low=space.low[bounded], high=high[bounded], size=bounded[bounded].shape)
if space.high.dtype.kind == 'i':
sample = np.floor(sample)
sample = sample.astype(space.high.dtype)
return sample
def sample_from_box_action_space(self):
"""
This method samples from a Box Action Space.
"""
sample = self._sample_from_box(space=self.action_space)
return sample
def sample_from_box_observation_space(self):
"""
This method samples from a Box Observation Space.
"""
sample = self._sample_from_box(space=self.observation_space)
return sample
def set_params(self, params_dict):
"""
Parameters
----------
params_dict: This is a dictionary containing the parameters of the environment and their new value.
"""
if(isinstance(params_dict, dict)):
for tmp_key in list(params_dict.keys()):
if(hasattr(self, tmp_key)):
setattr(self, tmp_key, params_dict[tmp_key])
else:
exc_msg = 'The environment does not have the member \''+str(tmp_key)+'\'!'
self.logger.exception(msg=exc_msg)
raise AttributeError(exc_msg)
else:
exc_msg = '\'params_dict\' must be a dictionary!'
self.logger.exception(msg=exc_msg)
raise TypeError(exc_msg)
def get_params(self, params_names):
"""
Parameters
----------
params_names: This is a list of strings and they represent the names of the parameters of which we want to get the value.
Returns
-------
params_dict: This is a dictionary with as keys the strings in the list params_names and as values the current value of
such parameter.
"""
params_dict = {}
for tmp_key in params_names:
if(hasattr(self, tmp_key)):
params_dict.update({tmp_key: getattr(self, tmp_key)})
else:
exc_msg = 'The environment does not have the member \''+str(tmp_key)+'\'!'
self.logger.exception(msg=exc_msg)
raise AttributeError(exc_msg)
return params_dict
class BaseWrapper(BaseEnvironment):
"""
This is the base wrapper Class based on the OpenAI Wrapper Class. Part of this class is a re-adaptation of code copied from
OpenAI gym: https://github.com/openai/gym/blob/master/gym/core.py
This is used as base Class for observation wrappers, action wrappers and reward wrappers.
Other kind of wrappers can be created inheriting from this Class.
"""
def __init__(self, env, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process'):
"""
Parameters
----------
env: This is the environment that needs to be wrapped. It must be an object of a Class inheriting from the Class
BaseEnvironment.
The other parameters and non-parameters members are described in the Class BaseEnvironment.
"""
super().__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path,
verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.env = env
#The env must be an object of a Class inheriting from the Class BaseEnvironment
if(not isinstance(self.env, BaseEnvironment)):
exc_msg = 'The \'env\' must be an object of a Class inheriting from the Class BaseEnvironment!'
self.logger.exception(msg=exc_msg)
raise TypeError(exc_msg)
self.action_space = self.env.action_space
self.observation_space = self.env.observation_space
self.gamma = self.env.gamma
self.horizon = self.env.horizon
def __repr__(self):
return str(self.__class__.__name__)+'('+'env='+str(self.env)+', observation_space='+str(self.observation_space)\
+', action_space='+str(self.action_space)+', gamma='+str(self.gamma)+', horizon='+str(self.horizon)\
+', obj_name='+str(self.obj_name)+', seeder='+str(self.seeder)+', local_prng='+str(self.local_prng)\
+', log_mode='+str(self.log_mode)+', checkpoint_log_path='+str(self.checkpoint_log_path)\
+', verbosity='+str(self.verbosity)+', n_jobs='+str(self.n_jobs)+', job_type='+str(self.job_type)\
+', logger='+str(self.logger)+')'
def step(self, action):
"""
Method used to run one step of the environment dynamics.
"""
return self.env.step(action=action)
def reset(self, state=None):
"""
Method used to reset the environment.
"""
return self.env.reset(state=state)
def render(self, mode='human'):
"""
Method used to render an environment.
"""
return self.env.render(mode=mode)
def close(self):
"""
Method used to perform necessary cleanup.
"""
return self.env.close()
def seed(self, seed=None):
"""
Method used to seed the environment.
"""
return self.env.seed(seed=seed)
def stop(self):
return self.env.stop()
def unwrapped(self):
"""
Method to unwrap the environment.
"""
return self.env.unwrapped()
class BaseObservationWrapper(BaseWrapper):
"""
Part of this Class is a readaptation of code copied from OpenAI gym: https://github.com/openai/gym/blob/master/gym/core.py
To create an observation wrapper you must create a new Class inheriting from this Class. You must override the observation
method, and in the __init__ you must:
-Call the __init__ of BaseWrapper via: super().__init__(env)
-Properly modify the observation space.
This is an abstract Class: it must be sub-classed.
"""
def reset(self, state=None):
"""
Method used to reset the environment.
"""
observation = self.env.reset(state=state)
return self.observation(observation=observation)
def step(self, action):
"""
Method used to run one step of the environment dynamics.
"""
observation, reward, absorbing, info = self.env.step(action=action)
return self.observation(observation=observation), reward, absorbing, {}
@abstractmethod
def observation(self, observation):
"""
Method used to transform the observations.
"""
raise NotImplementedError
class BaseActionWrapper(BaseWrapper):
"""
Part of this Class is a readaptation of code copied from OpenAI gym: https://github.com/openai/gym/blob/master/gym/core.py
To create an action wrapper you must create a new Class inheriting from this Class. You must override the action method and
in the __init__ you must:
-Call the __init__ of BaseWrapper via: super().__init__(env)
-Properly modify the action space.
This is an abstract Class: it must be sub-classed.
"""
def step(self, action):
"""
Method used to run one step of the environment dynamics.
"""
return self.env.step(self.action(action=action))
@abstractmethod
def action(self, action):
"""
Method used to transform the actions.
"""
raise NotImplementedError
class BaseRewardWrapper(BaseWrapper):
"""
Part of this Class is a readaptation of code copied from OpenAI gym: https://github.com/openai/gym/blob/master/gym/core.py
To create a reward wrapper you must create a new Class inheriting from this Class. You must override the reward method, and
in the __init__ you must:
-Call the __init__ of BaseWrapper via: super().__init__(env)
This is an abstract Class: it must be sub-classed.
"""
def step(self, action):
"""
Method used to run one step of the environment dynamics.
"""
observation, reward, absorbing, info = self.env.step(action=action)
return observation, self.reward(reward=reward), absorbing, {}
@abstractmethod
def reward(self, reward):
"""
Method used to transform the rewards.
"""
raise NotImplementedError
class BaseGridWorld(GridWorld, BaseEnvironment):
"""
This Class wraps the GridWorld Class from MushroomRL: this is needed for the correct working of this library.
"""
def __init__(self, height, width, goal, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3,
n_jobs=1, job_type='process', start=(0,0)):
super().__init__(height=height, width=width, goal=goal, start=start)
super(BaseEnvironment, self).__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs,
job_type=job_type)
self.horizon = super().info.horizon
self.gamma = super().info.gamma
self.observation_space = super().info.observation_space
self.action_space = super().info.action_space
def __repr__(self):
return 'BaseGridWorld('+'observation_space='+str(self.observation_space)+', action_space='+str(self.action_space)\
+', gamma='+str(self.gamma)+', horizon='+str(self.horizon)+', obj_name='+str(self.obj_name)\
+', seeder='+str(self.seeder)+', local_prng='+str(self.local_prng)+', log_mode='+str(self.log_mode)\
+', checkpoint_log_path='+str(self.checkpoint_log_path)+', verbosity='+str(self.verbosity)\
+', n_jobs='+str(self.n_jobs)+', job_type='+str(self.job_type)+', logger='+str(self.logger)+')'
def seed(self, seed=None):
"""
Method used to seed the environment.
"""
return
class BaseCarOnHill(CarOnHill, BaseEnvironment):
"""
This Class wraps the CarOnHill Class from MushroomRL: this is needed for the correct working of this library.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process', horizon=100, gamma=.95):
super().__init__(horizon=horizon, gamma=gamma)
super(BaseEnvironment, self).__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs,
job_type=job_type)
self.horizon = super().info.horizon
self.gamma = super().info.gamma
self.observation_space = super().info.observation_space
self.action_space = super().info.action_space
def __repr__(self):
return 'BaseCarOnHill('+'observation_space='+str(self.observation_space)+', action_space='+str(self.action_space)\
+', gamma='+str(self.gamma)+', horizon='+str(self.horizon)+', obj_name='+str(self.obj_name)\
+', seeder='+str(self.seeder)+', local_prng='+str(self.local_prng)+', log_mode='+str(self.log_mode)\
+', checkpoint_log_path='+str(self.checkpoint_log_path)+', verbosity='+str(self.verbosity)\
+', n_jobs='+str(self.n_jobs)+', job_type='+str(self.job_type)+', logger='+str(self.logger)+')'
def seed(self, seed=None):
"""
Method used to seed the environment.
"""
return
class BaseCartPole(CartPole, BaseEnvironment):
"""
This Class wraps the CartPole Class from MushroomRL: this is needed for the correct working of this library.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process', m=2., M=8., l=.5, g=9.8, mu=1e-2, max_u=50., noise_u=10., horizon=3000, gamma=.95):
super().__init__(m=m, M=M, l=l, g=g, mu=mu, max_u=max_u, noise_u=noise_u, horizon=horizon, gamma=gamma)
super(BaseEnvironment, self).__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs,
job_type=job_type)
self.horizon = super().info.horizon
self.gamma = super().info.gamma
self.observation_space = super().info.observation_space
self.action_space = super().info.action_space
def __repr__(self):
return 'BaseCartPole('+'observation_space='+str(self.observation_space)+', action_space='+str(self.action_space)\
+', gamma='+str(self.gamma)+', horizon='+str(self.horizon)+', obj_name='+str(self.obj_name)\
+', seeder='+str(self.seeder)+', local_prng='+str(self.local_prng)+', log_mode='+str(self.log_mode)\
+', checkpoint_log_path='+str(self.checkpoint_log_path)+', verbosity='+str(self.verbosity)\
+', n_jobs='+str(self.n_jobs)+', job_type='+str(self.job_type)+', logger='+str(self.logger)+')'
def seed(self, seed=None):
"""
Method used to seed the environment.
"""
return
class BaseInvertedPendulum(InvertedPendulum, BaseEnvironment):
"""
This Class wraps the InvertedPendulum Class from MushroomRL: this is needed for the correct working of this library.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process', random_start=False, m=1., l=1., g=9.8, mu=1e-2, max_u=5., horizon=5000, gamma=.99):
super().__init__(random_start=random_start, m=m, l=l, g=g, mu=mu, max_u=max_u, horizon=horizon, gamma=gamma)
super(BaseEnvironment, self).__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode,
checkpoint_log_path=checkpoint_log_path, verbosity=verbosity, n_jobs=n_jobs,
job_type=job_type)
self.horizon = super().info.horizon
self.gamma = super().info.gamma
self.observation_space = super().info.observation_space
self.action_space = super().info.action_space
def __repr__(self):
return 'BaseInvertedPendulum('+'observation_space='+str(self.observation_space)\
+', action_space='+str(self.action_space)+', gamma='+str(self.gamma)+', horizon='+str(self.horizon)\
+', obj_name='+str(self.obj_name)+', seeder='+str(self.seeder)+', local_prng='+str(self.local_prng)\
+', log_mode='+str(self.log_mode)+', checkpoint_log_path='+str(self.checkpoint_log_path)\
+', verbosity='+str(self.verbosity)+', n_jobs='+str(self.n_jobs)+', job_type='+str(self.job_type)\
+', logger='+str(self.logger)+')'
def seed(self, seed=None):
"""
Method used to seed the environment.
"""
return
class LQG(BaseEnvironment):
"""
Environment implementing a Linear-Quadratic Gaussian control (LQG) problem: s_{t+1} = A s_t + B a_t + env_noise
The reward function is given by: r_{t+1} = - s_t^T Q s_t - a_t^T R a_t
Note that there is also a noise on the controller: if you pick action a_t then you will be able to execute action:
a_t + controller_noise. This only plays a role when rolling out the policy: we consider a gaussian policy with mean a_t and
covariante matrix given by controller_noise.
This is a re-adaptation of code copied from: https://github.com/T3p/potion/blob/master/potion/envs/lq.py
"""
def __init__(self, obj_name, A=np.eye(1), B=np.eye(1), Q=np.eye(1), R=np.eye(1), max_pos=1.0, max_action=1.0,
env_noise=np.eye(1), controller_noise=np.eye(1), horizon=10, gamma=0.9, seeder=2, log_mode='console',
checkpoint_log_path=None, verbosity=3, n_jobs=1, job_type='process'):
"""
Parameters
----------
A: This is the state dynamics matrix.
The default is np.eye(1).
B: This is the action dynamics matrix.
The default is np.eye(1).
Q: This is the cost weight matrix for the state. It must be a positive-definite matrix (to always have a negative reward).
The default is np.eye(1).
R: This is the cost weight matrix for the action. It must be a positive-definite matrix (to always have a negative
reward).
The default is np.eye(1).
max_pos: This is the maximum value that the state can reach.
The default is 1.0.
max_action: This is the maximum value that the action can reach.
The default is 1.0.
env_noise: This is the covariance matrix representing the environment noise.
The default is np.eye(1).
controller_noise: This is the covariance matrix representing the controller noise.
The default is np.eye(1).
horizon: This is the horizon of the MDP.
The default is 10.
gamma: This is the discount factor of the MDP.
The default is 0.9.
Non-Parameters Members
----------------------
is_eval_phase: This is True if the environment is used for evaluating a policy: what happens is that the controller_noise
is added to the action selected by the policy, and then fed to the simulator.
Otherwise it is False.
This is used to represent the fact that even if we have learnt a theoretically optimal policy, in practice
to execute it there is going to be some noise and so the resulting action taken in the real world will be
different from the one selected by the policy.
This parameter can be set automatically by the evaluation metric.
The other parameters and non-parameters members are described in the Class BaseEnvironment.
"""
super().__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path,
verbosity=verbosity, n_jobs=n_jobs, job_type=job_type)
self.A = A
self.B = B
self.Q = Q
self.R = R
#state dimension
self.ds = self.A.shape[1]
#action dimension
self.da = self.B.shape[1]
if(self.da == 1):
if(self.R < 0):
exc_msg = 'The matrix \'R\' must be positive-definite so that the reward is always negative!'
self.logger.exception(msg=exc_msg)
raise ValueError(exc_msg)
else:
if(not np.all(np.linalg.eigvals(self.R) > 0)):
exc_msg = 'The matrix \'R\' must be positive-definite so that the reward is always negative!'
self.logger.exception(msg=exc_msg)
raise ValueError(exc_msg)
if(self.ds == 1):
if(self.Q < 0):
exc_msg = 'The matrix \'Q\' must be positive-definite so that the reward is always negative!'
self.logger.exception(msg=exc_msg)
raise ValueError(exc_msg)
else:
if(not np.all(np.linalg.eigvals(self.Q) > 0)):
exc_msg = 'The matrix \'Q\' must be positive-definite so that the reward is always negative!'
self.logger.exception(msg=exc_msg)
raise ValueError(exc_msg)
#task horizon
self.horizon = horizon
#discount factor
self.gamma = gamma
#max state for clipping
self.max_pos = max_pos*np.ones(self.ds)
#max action for clipping
self.max_action = max_action*np.ones(self.da)
#environment noise
self.env_noise = env_noise
#check that the env_noise has the right shape:
if(self.env_noise.shape[1] != self.ds):
exc_msg = 'The \'env_noise\' co-variance matrix is not of the right shape!'
self.logger.exception(msg=exc_msg)
raise ValueError(exc_msg)
self.viewer = None
self.action_space = Box(low=-self.max_action, high=self.max_action)
self.observation_space = Box(low=-self.max_pos, high=self.max_pos)
self.controller_noise = controller_noise
#check that the controller_noise has the right shape:
if(self.controller_noise.shape[1] != self.da):
exc_msg = 'The \'controller_noise\' co-variance matrix is not of the right shape!'
self.logger.exception(msg=exc_msg)
raise ValueError(exc_msg)
#when this is true the controller noise is added to the action that the policy originally picked.
self.is_eval_phase = False
def __repr__(self):
return 'LQG('+'observation_space='+str(self.observation_space)+', action_space='+str(self.action_space)\
+', gamma='+str(self.gamma)+', horizon='+str(self.horizon)+', A='+str(self.A)+', B='+str(self.B)\
+', Q='+str(self.Q)+', R='+str(self.R)+', env_noise='+str(self.env_noise)\
+', controller_noise='+str(self.controller_noise)+', is_eval_phase='+str(self.is_eval_phase)\
+', obj_name='+str(self.obj_name)+', seeder='+str(self.seeder)+', local_prng='+str(self.local_prng)\
+', log_mode='+str(self.log_mode)+', checkpoint_log_path='+str(self.checkpoint_log_path)\
+', verbosity='+str(self.verbosity)+', n_jobs='+str(self.n_jobs)+', job_type='+str(self.job_type)\
+', logger='+str(self.logger)+')'
def step(self, action):
"""
Method used to run one step of the environment dynamics.
"""
if(self.is_eval_phase):
action = action + self.local_prng.multivariate_normal(mean=np.zeros(self.da), cov=self.controller_noise)
u = np.clip(np.ravel(np.atleast_1d(action)), -self.max_action, self.max_action)
#makes the noise different at each step:
env_noise = np.dot(self.env_noise, self.local_prng.standard_normal(self.ds))
xn = np.clip(np.dot(self.A, self.state.T) + np.dot(self.B, u) + env_noise, -self.max_pos, self.max_pos)
cost = np.dot(self.state, np.dot(self.Q, self.state)) + np.dot(u, np.dot(self.R, u))
self.state = xn.ravel()
self.timestep += 1
return np.array(self.state), -np.asscalar(cost), self.timestep >= self.horizon, {'danger':0}
def reset(self, state=None):
"""
By default, random uniform initialization.
"""
self.timestep = 0
if state is None:
self.state = np.array(self.local_prng.uniform(low=-self.max_pos, high=self.max_pos, size=self.ds))
else:
self.state = np.array(state)
return np.array(self.state)
def seed(self, seed=None):
"""
Method used to seed the environment.
"""
if(seed is None):
self.set_local_prng(new_seeder=seed)
def render(self, mode='human', close=False):
"""
Method used to render the environment.
"""
#this is here since it would otherwise open a plot window
from gym.envs.classic_control import rendering
if self.ds not in [1, 2]:
return
if close:
if self.viewer is not None:
self.viewer.close()
self.viewer = None
return
screen_width = 600
world_width = math.ceil((self.max_pos[0] * 2) * 1.5)
xscale = screen_width / world_width
ballradius = 3
if self.ds == 1:
screen_height = 400
else:
world_height = math.ceil((self.max_pos[1] * 2) * 1.5)
screen_height = math.ceil(xscale * world_height)
yscale = screen_height / world_height
if self.viewer is None:
clearance = 0 # y-offset
self.viewer = rendering.Viewer(screen_width, screen_height)
mass = rendering.make_circle(ballradius * 2)
mass.set_color(.8, .3, .3)
mass.add_attr(rendering.Transform(translation=(0, clearance)))
self.masstrans = rendering.Transform()
mass.add_attr(self.masstrans)
self.viewer.add_geom(mass)
if self.ds == 1:
self.track = rendering.Line((0, 100), (screen_width, 100))
else:
self.track = rendering.Line((0, screen_height / 2), (screen_width, screen_height / 2))
self.track.set_color(0.5, 0.5, 0.5)
self.viewer.add_geom(self.track)
zero_line = rendering.Line((screen_width / 2, 0), (screen_width / 2, screen_height))
zero_line.set_color(0.5, 0.5, 0.5)
self.viewer.add_geom(zero_line)
x = self.state[0]
ballx = x * xscale + screen_width / 2.0
if self.ds == 1:
bally = 100
else:
y = self.state[1]
bally = y * yscale + screen_height / 2.0
self.masstrans.set_translation(ballx, bally)
return self.viewer.render(return_rgb_array=mode == 'rgb_array')
def get_optimal_K(self):
"""
Returns
-------
Computes the optimal parameters K and returns the optimal policy given by: -K*s where s is the state.
Note that the addition of the controller noise is taken care of in the evaluation phase.
"""
X = np.matrix(scipy.linalg.solve_discrete_are(self.A, self.B, self.Q, self.R))
K = np.matrix(scipy.linalg.inv(self.B.T*X*self.B+self.R)*(self.B.T*X*self.A))
return -K
class BaseMujoco(BaseEnvironment):
"""
This Class wraps the Mujoco environments. Every Mujoco environment inherits from this Class.
This is an abstract Class.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process'):
"""
Non-Parameters Members
----------------------
mujoco_env: This is a Mujoco environment instance: an object of a Class inheriting from OpenAI gym Class:
gym.envs.mujoco.mujoco_env.MujocoEnv.
n_steps: This is a counter for the horizon of the environment. It is the number of total steps that have happened so far.
"""
super().__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path,
verbosity=verbosity)
#this is set in the sub-Class:
self.mujoco_env = None
self.n_steps = 0
def __repr__(self):
return str(self.__class__.__name__)+'('+'observation_space='+str(self.observation_space)\
+', action_space='+str(self.action_space)+', gamma='+str(self.gamma)+', horizon='+str(self.horizon)\
+', obj_name='+str(self.obj_name)+', seeder='+str(self.seeder)+', local_prng='+str(self.local_prng)\
+', log_mode='+str(self.log_mode)+', checkpoint_log_path='+str(self.checkpoint_log_path)\
+', verbosity='+str(self.verbosity)+', n_jobs='+str(self.n_jobs)+', job_type='+str(self.job_type)\
+', logger='+str(self.logger)+')'
def _update_counter(self, out_step):
"""
Parameters
----------
out_step: The output from the mujoco_env object step() method.
Returns
-------
tuple(out_step): The modified environment step() method output.
Updates the n_steps and if the horizon is reached the output of the method step() from the environment is modified: done
is set to True.
"""
if(hasattr(self, 'n_steps')):
out_step = list(out_step)
self.n_steps += 1
if(self.n_steps >= self.horizon):
out_step[2] = True
self.n_steps = 0
return tuple(out_step)
def step(self, action):
"""
Method used to run one step of the environment dynamics.
"""
out = self.mujoco_env.step(action=action)
out = self._update_counter(out_step=out)
return out
def reset(self, state=None):
"""
Method used to reset the environment.
"""
obs = self.mujoco_env.reset()
self.n_steps = 0
return obs
def seed(self, seed=None):
"""
Method used to seed the environment.
"""
return self.mujoco_env.seed(seed=seed)
def render(self, mode='human'):
"""
Method used to render the environment.
"""
return self.mujoco_env.render(mode=mode)
def set_local_prng(self, new_seeder):
"""
Method used to adjust to the fact that OpenAI does not use my system of using local prng but have their own way of doing
it. Without this method by calling the original method set_local_prng() implemented in the Class AbstractUnit there would
be no effect on the environment.
The method set_local_prng() is called in the Metric Classes and in some of the DataGeneration Classes so this method
needs to work properly.
"""
self.seed(seed=new_seeder)
def _initialise_mdp_properties(self, gamma, horizon):
"""
Parameters
----------
gamma: This is the MDP discount factor. It must be a float.
horizon: This is the MDP horizon. It must be an integer.
"""
#no gamma, no horizon in Mujoco: i select them
self.gamma = gamma
self.horizon = horizon
#I need to use the environment spaces from MushroomRL:
self.action_space = Box(self.mujoco_env.action_space.low, self.mujoco_env.action_space.high)
self.observation_space = Box(self.mujoco_env.observation_space.low, self.mujoco_env.observation_space.high)
class BaseHalfCheetah(BaseMujoco):
"""
This Class wraps the Mujoco environment: HalfCheetahEnv.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process', gamma=0.99, horizon=1000, xml_file='half_cheetah.xml', forward_reward_weight=1.0,
ctrl_cost_weight=0.1, reset_noise_scale=0.1, exclude_current_positions_from_observation=True):
"""
Parameters
----------
gamma: This is the MDP discount factor. It must be a float.
The default is 0.99.
horizon: This is the MDP horizon. It must be an integer.
The default is 1000.
"""
super().__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path,
verbosity=verbosity)
self.mujoco_env = HalfCheetahEnv(xml_file=xml_file, forward_reward_weight=forward_reward_weight,
ctrl_cost_weight=ctrl_cost_weight, reset_noise_scale=reset_noise_scale,
exclude_current_positions_from_observation=exclude_current_positions_from_observation)
self._initialise_mdp_properties(gamma=gamma, horizon=horizon)
class BaseAnt(BaseMujoco):
"""
This Class wraps the Mujoco environment: AntEnv.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process', gamma=0.99, horizon=1000, xml_file='ant.xml', ctrl_cost_weight=0.5, contact_cost_weight=5e-4,
healthy_reward=1.0, terminate_when_unhealthy=True, healthy_z_range=(0.2, 1.0), contact_force_range=(-1.0, 1.0),
reset_noise_scale=0.1, exclude_current_positions_from_observation=True):
"""
Parameters
----------
gamma: This is the MDP discount factor. It must be a float.
The default is 0.99.
horizon: This is the MDP horizon. It must be an integer.
The default is 1000.
"""
super().__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path,
verbosity=verbosity)
self.mujoco_env = AntEnv(xml_file=xml_file, ctrl_cost_weight=ctrl_cost_weight, contact_cost_weight=contact_cost_weight,
healthy_reward=healthy_reward, terminate_when_unhealthy=terminate_when_unhealthy,
healthy_z_range=healthy_z_range, contact_force_range=contact_force_range,
reset_noise_scale=reset_noise_scale,
exclude_current_positions_from_observation=exclude_current_positions_from_observation)
self._initialise_mdp_properties(gamma=gamma, horizon=horizon)
class BaseHopper(BaseMujoco):
"""
This Class wraps the Mujoco environment: HopperEnv.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process', gamma=0.99, horizon=1000, xml_file='hopper.xml', forward_reward_weight=1.0,
ctrl_cost_weight=1e-3, healthy_reward=1.0, terminate_when_unhealthy=True, healthy_state_range=(-100.0, 100.0),
healthy_z_range=(0.7, float('inf')), healthy_angle_range=(-0.2, 0.2), reset_noise_scale=5e-3,
exclude_current_positions_from_observation=True):
"""
Parameters
----------
gamma: This is the MDP discount factor. It must be a float.
The default is 0.99.
horizon: This is the MDP horizon. It must be an integer.
The default is 1000.
"""
super().__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path,
verbosity=verbosity)
self.mujoco_env = HopperEnv(xml_file=xml_file, forward_reward_weight=forward_reward_weight,
ctrl_cost_weight=ctrl_cost_weight, healthy_reward=healthy_reward,
terminate_when_unhealthy=terminate_when_unhealthy, healthy_state_range=healthy_state_range,
healthy_z_range=healthy_z_range, healthy_angle_range=healthy_angle_range,
reset_noise_scale=reset_noise_scale,
exclude_current_positions_from_observation=exclude_current_positions_from_observation)
self._initialise_mdp_properties(gamma=gamma, horizon=horizon)
class BaseHumanoid(BaseMujoco):
"""
This Class wraps the Mujoco environment: HumanoidEnv.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process', gamma=0.99, horizon=1000, xml_file='humanoid.xml', forward_reward_weight=1.25,
ctrl_cost_weight=0.1, contact_cost_weight=5e-7, contact_cost_range=(-np.inf, 10.0), healthy_reward=5.0,
terminate_when_unhealthy=True, healthy_z_range=(1.0, 2.0), reset_noise_scale=1e-2,
exclude_current_positions_from_observation=True):
"""
Parameters
----------
gamma: This is the MDP discount factor. It must be a float.
The default is 0.99.
horizon: This is the MDP horizon. It must be an integer.
The default is 1000.
"""
super().__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path,
verbosity=verbosity)
self.mujoco_env = HumanoidEnv(xml_file=xml_file, forward_reward_weight=forward_reward_weight,
ctrl_cost_weight=ctrl_cost_weight, contact_cost_weight=contact_cost_weight,
contact_cost_range=contact_cost_range, healthy_reward=healthy_reward,
terminate_when_unhealthy=terminate_when_unhealthy, healthy_z_range=healthy_z_range,
reset_noise_scale=reset_noise_scale,
exclude_current_positions_from_observation=exclude_current_positions_from_observation)
self._initialise_mdp_properties(gamma=gamma, horizon=horizon)
class BaseSwimmer(BaseMujoco):
"""
This Class wraps the Mujoco environment: SwimmerEnv.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process', gamma=0.99, horizon=1000, xml_file="swimmer.xml", forward_reward_weight=1.0,
ctrl_cost_weight=1e-4, reset_noise_scale=0.1, exclude_current_positions_from_observation=True):
"""
Parameters
----------
gamma: This is the MDP discount factor. It must be a float.
The default is 0.99.
horizon: This is the MDP horizon. It must be an integer.
The default is 1000.
"""
super().__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path,
verbosity=verbosity)
self.mujoco_env = SwimmerEnv(xml_file=xml_file, forward_reward_weight=forward_reward_weight,
ctrl_cost_weight=ctrl_cost_weight, reset_noise_scale=reset_noise_scale,
exclude_current_positions_from_observation=exclude_current_positions_from_observation)
self._initialise_mdp_properties(gamma=gamma, horizon=horizon)
class BaseWalker2d(BaseMujoco):
"""
This Class wraps the Mujoco environment: Walker2dEnv.
"""
def __init__(self, obj_name, seeder=2, log_mode='console', checkpoint_log_path=None, verbosity=3, n_jobs=1,
job_type='process', gamma=0.99, horizon=1000, xml_file="walker2d.xml", forward_reward_weight=1.0,
ctrl_cost_weight=1e-3, healthy_reward=1.0, terminate_when_unhealthy=True, healthy_z_range=(0.8, 2.0),
healthy_angle_range=(-1.0, 1.0), reset_noise_scale=5e-3, exclude_current_positions_from_observation=True):
"""
Parameters
----------
gamma: This is the MDP discount factor. It must be a float.
The default is 0.99.
horizon: This is the MDP horizon. It must be an integer.
The default is 1000.
"""
super().__init__(obj_name=obj_name, seeder=seeder, log_mode=log_mode, checkpoint_log_path=checkpoint_log_path,
verbosity=verbosity)
self.mujoco_env = Walker2dEnv(xml_file=xml_file, forward_reward_weight=forward_reward_weight,
ctrl_cost_weight=ctrl_cost_weight, healthy_reward=healthy_reward,
terminate_when_unhealthy=terminate_when_unhealthy, healthy_z_range=healthy_z_range,
healthy_angle_range=healthy_angle_range, reset_noise_scale=reset_noise_scale,
exclude_current_positions_from_observation=exclude_current_positions_from_observation)
self._initialise_mdp_properties(gamma=gamma, horizon=horizon) | arlo-lib/ARLO | ARLO/environment/environment.py | environment.py | py | 49,674 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "ARLO.abstract_unit.abstract_unit.AbstractUnit",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "abc.ABC",
"line_number": 43,
"usage_type": "name"
},
{
"api_name": "abc.abstractmethod",
"line_number": 90,
"usage_type": "name"
},
{
"api_name... |
1585948759 | from typing import Optional
from decimal import Decimal
from validator_collection import validators
from highcharts_core import errors
from highcharts_core.metaclasses import HighchartsMeta
from highcharts_core.decorators import class_sensitive
from highcharts_core.options.sonification.track_configurations import (InstrumentTrackConfiguration,
SpeechTrackConfiguration,
ContextTrackConfiguration)
from highcharts_core.options.sonification.grouping import SonificationGrouping
from highcharts_core.utility_classes.events import SonificationEvents
class SonificationOptions(HighchartsMeta):
"""Options for configuring sonification and audio charts."""
def __init__(self, **kwargs):
self._after_series_wait = None
self._default_instrument_options = None
self._default_speech_options = None
self._duration = None
self._enabled = None
self._events = None
self._global_context_tracks = None
self._global_tracks = None
self._master_volume = None
self._order = None
self._point_grouping = None
self._show_crosshair = None
self._show_tooltip = None
self._update_interval = None
self.after_series_wait = kwargs.get('after_series_wait', None)
self.default_instrument_options = kwargs.get('default_instrument_options', None)
self.default_speech_options = kwargs.get('default_speech_options', None)
self.duration = kwargs.get('duration', None)
self.enabled = kwargs.get('enabled', None)
self.events = kwargs.get('events', None)
self.global_context_tracks = kwargs.get('global_context_tracks', None)
self.global_tracks = kwargs.get('global_tracks', None)
self.master_volume = kwargs.get('master_volume', None)
self.order = kwargs.get('order', None)
self.point_grouping = kwargs.get('point_grouping', None)
self.show_crosshair = kwargs.get('show_crosshair', None)
self.show_tooltip = kwargs.get('show_tooltip', None)
self.update_interval = kwargs.get('update_interval', None)
@property
def after_series_wait(self) -> Optional[int | float | Decimal]:
"""The time to wait in milliseconds after each data series when playing the visualization's data series
in sequence. Defaults to ``700``.
:rtype: numeric or :obj:`None <python:None>`
"""
return self._after_series_wait
@after_series_wait.setter
def after_series_wait(self, value):
self._after_series_wait = validators.numeric(value,
allow_empty = True,
minimum = 0)
@property
def default_instrument_options(self) -> Optional[InstrumentTrackConfiguration]:
"""Default sonification options for all instrument tracks.
.. warning::
If specific options are also set on individual tracks or per-series, this configuration will be *overridden*.
:rtype: :class:`InstrumentTrackConfiguration <highcharts_core.options.sonification.track_configurations.InstrumentTrackConfiguration>`
or :obj:`None <python:None>`
"""
return self._default_instrument_options
@default_instrument_options.setter
@class_sensitive(InstrumentTrackConfiguration)
def default_instrument_options(self, value):
self._default_instrument_options = value
@property
def default_speech_options(self) -> Optional[SpeechTrackConfiguration]:
"""Default sonification options for all speech tracks.
.. warning::
If specific options are also set on individual tracks or per-series, this configuration will be *overridden*.
:rtype: :class:`SpeechTrackConfiguration <highcharts_core.options.sonification.track_configurations.SpeechTrackConfiguration>`
or :obj:`None <python:None>`
"""
return self._default_speech_options
@default_speech_options.setter
@class_sensitive(SpeechTrackConfiguration)
def default_speech_options(self, value):
self._default_speech_options = value
@property
def duration(self) -> Optional[int | float | Decimal]:
"""The total duration of the sonification, expressed in milliseconds. Defaults to ``6000``.
:rtype: numeric or :obj:`None <python:None>`
"""
return self._duration
@duration.setter
def duration(self, value):
self._duration = validators.numeric(value, allow_empty = True, minimum = 0)
@property
def enabled(self) -> Optional[bool]:
"""If ``True``, enables sonification functionality on the chart. Defaults to ``True``.
:rtype: :class:`bool <python:bool>` or :obj:`None <python:None>`
"""
return self._enabled
@enabled.setter
def enabled(self, value):
if value is None:
self._enabled = None
else:
self._enabled = bool(value)
@property
def events(self) -> Optional[SonificationEvents]:
"""Event handlers for sonification.
:rtype: :class:`SonificationEvents <highcharts_core.utility_classes.events.SonificationEvents>` or
:obj:`None <python:None>`
"""
return self._events
@events.setter
@class_sensitive(SonificationEvents)
def events(self, value):
self._events = value
@property
def global_context_tracks(self) -> Optional[ContextTrackConfiguration]:
"""Context tracks to add globally, an array of either instrument tracks, speech tracks, or a mix.
.. note::
Context tracks are not tied to data points, but play at a set interval - either based on ``time`` or on
``prop`` values.
:rtype: :class:`ContextTrackConfiguration <highcharts_core.options.sonification.track_configurations.ContextTrackConfiguration>`
or :obj:`None <python:None>`
"""
return self._global_context_tracks
@global_context_tracks.setter
@class_sensitive(ContextTrackConfiguration)
def global_context_tracks(self, value):
self._global_context_tracks = value
@property
def global_tracks(self) -> Optional[InstrumentTrackConfiguration]:
"""Global tracks to add to every series.
:rtype: :class:`InstrumentTrackConfiguration <highcharts_core.options.sonification.track_configurations.InstrumentTrackConfiguration>`
or :obj:`None <python:None>`
"""
return self._global_tracks
@global_tracks.setter
@class_sensitive(InstrumentTrackConfiguration)
def global_tracks(self, value):
self._global_tracks = value
@property
def master_volume(self) -> Optional[int | float | Decimal]:
"""The overall/master volume for the sonification, from ``0`` to ``1``. Defaults to ``0.7``.
:rtype: numeric or :obj:`None <python:None>`
"""
return self._master_volume
@master_volume.setter
def master_volume(self, value):
self._master_volume = validators.numeric(value,
allow_empty = True,
minimum = 0,
maximum = 1)
@property
def order(self) -> Optional[str]:
"""The order in which to play the sonification for data series. Accepts either:
* ``'sequential'`` where the series play individually one after the other or
* ``'simultaneous'`` where the series play at the same time
Defaults to ``'sequential'``.
:rtype: :class:`str <python:str>` or :obj:`None <python:None>`
"""
return self._order
@order.setter
def order(self, value):
if not value:
self._order = None
else:
value = validators.string(value)
value = value.lower()
if value not in ['sequential', 'simultaneous']:
raise errors.HighchartsValueError(f'.order expects either "sequential" or "simultaenous". '
f'Received: "{value}"')
self._order = value
@property
def point_grouping(self) -> Optional[SonificationGrouping]:
"""Options for grouping data points together when sonifying.
This allows for the visual presentation to contain more points than what is being played.
If not enabled, all visible / uncropped points are played.
:rtype: :class:`SonificationGrouping <highcharts_core.options.sonification.grouping.SonificationGrouping>` or
:obj:`None <python:None>`
"""
return self._point_grouping
@point_grouping.setter
@class_sensitive(SonificationGrouping)
def point_grouping(self, value):
self._point_grouping = value
@property
def show_crosshair(self) -> Optional[bool]:
"""If ``True``, show X and Y crosshairs (if defined on the chart) as the sonification plays. Defaults to
``True``.
.. warning::
If multiple tracks that play at different times try to show crosshairs, it can be glitchy. Therefore,
it is recommended in those cases to turn this on/off for individual tracks using the ``.show_play_marker``
property.
:rtype: :class:`bool <python:bool>` or :obj:`None <python:None>`
"""
return self._show_crosshair
@show_crosshair.setter
def show_crosshair(self, value):
if value is None:
self._show_crosshair = None
else:
self._show_crosshair = bool(value)
@property
def show_tooltip(self) -> Optional[bool]:
"""If ``True``, show tooltips as the sonification plays. Defaults to ``True``.
.. warning::
If multiple tracks that play at different times try to show tooltips, it can be glitchy. Therefore,
it is recommended in those cases to turn this on/off for individual tracks using the ``.show_play_marker``
property.
:rtype: :class:`bool <python:bool>` or :obj:`None <python:None>`
"""
return self._show_tooltip
@show_tooltip.setter
def show_tooltip(self, value):
if value is None:
self._show_tooltip = None
else:
self._show_tooltip = bool(value)
@property
def update_interval(self) -> Optional[int | float | Decimal]:
"""The number of milliseconds to wait between each recomputation of the sonification, if the chart updates
rapidly.
.. tip::
This avoids slowing down processes like panning.
:rtype: numeric or :obj:`None <python:None>`
"""
return self._update_interval
@update_interval.setter
def update_interval(self, value):
self._update_interval = validators.numeric(value, allow_empty = True, minimum = 0)
@classmethod
def _get_kwargs_from_dict(cls, as_dict):
kwargs = {
'after_series_wait': as_dict.get('afterSeriesWait', None),
'default_instrument_options': as_dict.get('defaultInstrumentOptions', None),
'default_speech_options': as_dict.get('defaultSpeechOptions', None),
'duration': as_dict.get('duration', None),
'enabled': as_dict.get('enabled', None),
'events': as_dict.get('events', None),
'global_context_tracks': as_dict.get('globalContextTracks', None),
'global_tracks': as_dict.get('globalTracks', None),
'master_volume': as_dict.get('masterVolume', None),
'order': as_dict.get('order', None),
'point_grouping': as_dict.get('pointGrouping', None),
'show_crosshair': as_dict.get('showCrosshair', None),
'show_tooltip': as_dict.get('showTooltip', None),
'update_interval': as_dict.get('updateInterval', None),
}
return kwargs
def _to_untrimmed_dict(self, in_cls = None) -> dict:
untrimmed = {
'afterSeriesWait': self.after_series_wait,
'defaultInstrumentOptions': self.default_instrument_options,
'defaultSpeechOptions': self.default_speech_options,
'duration': self.duration,
'enabled': self.enabled,
'events': self.events,
'globalContextTracks': self.global_context_tracks,
'globalTracks': self.global_tracks,
'masterVolume': self.master_volume,
'order': self.order,
'pointGrouping': self.point_grouping,
'showCrosshair': self.show_crosshair,
'showTooltip': self.show_tooltip,
'updateInterval': self.update_interval,
}
return untrimmed
| highcharts-for-python/highcharts-core | highcharts_core/options/sonification/__init__.py | __init__.py | py | 13,339 | python | en | code | 40 | github-code | 1 | [
{
"api_name": "highcharts_core.metaclasses.HighchartsMeta",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 51,
"usage_type": "name"
},
{
"api_name": "decimal.Decimal",
"line_number": 51,
"usage_type": "name"
},
{
"api_na... |
38604647476 | import torch
from torch.utils.data import DataLoader
import torchaudio
import time
import warnings
warnings.filterwarnings('ignore')
yesno_data = torchaudio.datasets.YESNO('.', download=False)
def collate_fn(batch):
tensors = [b[0].t() for b in batch if b]
tensors = torch.nn.utils.rnn.pad_sequence(tensors, batch_first=True)
tensors = tensors.transpose(1, -1)
targets = torch.tensor([b[2] for b in batch if b])
targets = torch.nn.utils.rnn.pad_sequence(targets, batch_first=True)
return tensors, targets
pin_memory = False
print('pin_memory is', pin_memory)
for num_workers in range(0, 32, 1):
data_loader = torch.utils.data.DataLoader(
yesno_data,
batch_size=32,
pin_memory=pin_memory,
num_workers=num_workers,
collate_fn=collate_fn)
start = time.time()
for epoch in range(1, 5):
for i, (data, _) in enumerate(data_loader):
torch.tensor(data).to('cuda')
end = time.time()
print("Finish with:{} second, num_workers={}".format(end - start, num_workers))
pin_memory = True
print('pin_memory is', pin_memory)
for num_workers in range(0, 32, 1):
data_loader = torch.utils.data.DataLoader(
yesno_data,
batch_size=32,
pin_memory=pin_memory,
num_workers=num_workers,
collate_fn=collate_fn)
start = time.time()
for epoch in range(1, 5):
for i, (data, _) in enumerate(data_loader):
torch.tensor(data).to('cuda')
end = time.time()
print("Finish with:{} second, num_workers={}".format(end - start, num_workers)) | sangje/sslsv | number_worker_test.py | number_worker_test.py | py | 1,994 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "warnings.filterwarnings",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "torchaudio.datasets.YESNO",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "torchaudio.datasets",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name"... |
22314540095 | # -*- coding: utf-8 -*-
"""
Created on Sun Jun 25 20:12:52 2017
@author: dell
"""
import collections
def topKFrequent(nums, k):
"""
:type nums: List[int]
:type k: int
:rtype: List[int]
"""
cnt=collections.Counter(nums)
sort_fre=sorted(cnt.items(),key=lambda d:d[1],reverse=True)
alpha=sort_fre[:k]
return [x[0] for x in alpha]
s=[1,1,1,2,2,3,3]
print(topKFrequent(s, 2)) | ding1995/Leetcode | 347.py | 347.py | py | 444 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.Counter",
"line_number": 14,
"usage_type": "call"
}
] |
6818157468 | import identifier_phase, biomarker, clinical_trial_code, patient_number, lines_of_therapy, study_evaluation
import utils.chunk_utils as cu
import utils.json_utils as ju
def get_overall_info(content):
overall_dict = {}
identifier_phase_dicts = identifier_phase.get_final_result(content)
overall_dict['Identifier'] = ','.join([d['Identifier'] for d in identifier_phase_dicts])
overall_dict['Phase'] = ','.join([d['Phase'] for d in identifier_phase_dicts])
overall_dict['Drug'] = ','.join([d['Drug'] for d in identifier_phase_dicts])
clinical_trial_code_dicts = clinical_trial_code.get_final_result(content)
overall_dict['Clinical Trial Code'] = ','.join([d['Clinical Trial Code'] for d in clinical_trial_code_dicts])
biomarker_dicts = biomarker.get_final_result(content)
overall_dict['Biomarker'] = ','.join([d['Biomarker'] for d in biomarker_dicts])
# target_indication_dicts = target_indication.get_final_result(content)
overall_dict['Target Indication'] = ','.join([d['Target Indication'] for d in biomarker_dicts])
patient_number_dicts = patient_number.get_final_result(content)
overall_dict['Overall Enrollment'] = ','.join([d['Overall Enrollment'] for d in patient_number_dicts])
lines_of_therapy_dicts = lines_of_therapy.get_final_result(content)
overall_dict['Line of Therapy'] = ','.join([d['Line of Therapy'] for d in lines_of_therapy_dicts])
study_evaluation_dicts = study_evaluation.get_final_result(content)
overall_dict['Overall Evaluation'] = ','.join([d['Evaluation'] for d in study_evaluation_dicts])
print()
print("============================ Overall Result =============================")
return overall_dict
if __name__ == '__main__':
print(ju.json_beautify(get_overall_info(cu.get_file_content("data/tmp_test_data_1"))))
| ye8303019/ChatGPT_demo | clinical_result/overall.py | overall.py | py | 1,832 | python | en | code | 27 | github-code | 1 | [
{
"api_name": "identifier_phase.get_final_result",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "clinical_trial_code.get_final_result",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "biomarker.get_final_result",
"line_number": 14,
"usage_type": "call... |
39551925504 | from pyrogram import filters,Client
import wikipedia
wikipedia.set_lang("it")
cprefix="/"
@Client.on_message(filters.command("wiki",cprefix))
async def my_handler(client, message):
send = await message.reply_text("Cercando...")
try:
await send.edit_text(str(wikipedia.summary(message.text.split(" ",1)[1])))
except Exception as e:
await send.edit_text(e)
@Client.on_message(filters.command("wikilang",cprefix))
async def lingua(client, message):
try:
wikipedia.set_lang(message.command[1])
await message.reply_text("Lingua cambiata con successo!")
except:
await message.reply_text("Mi sa... che hai inserito una lingua che non esiste")
| pentecoste/bot-telegram-5l | caldatobot-telegram/comandi/wikipedia.py | wikipedia.py | py | 698 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "wikipedia.set_lang",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "wikipedia.summary",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "pyrogram.Client.on_message",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pyrogra... |
72545619235 | #!/usr/bin/env python3
import numpy as np
import time
import cv2
import matplotlib.pyplot as plt
def corr(F, I):
"""
Input
F: A (k, ell, c)-shaped ndarray containing the k x ell filter (with c channels).
I: An (m, n, c)-shaped ndarray containing the m x n image (with c channels).
Returns
G: An (m, n)-shaped ndarray containing the correlation of the filter with the image.
"""
########## Code starts here ##########
raise NotImplementedError("Implement me!")
########## Code ends here ##########
def norm_cross_corr(F, I):
"""
Input
F: A (k, ell, c)-shaped ndarray containing the k x ell filter (with c channels).
I: An (m, n, c)-shaped ndarray containing the m x n image (with c channels).
Returns
G: An (m, n)-shaped ndarray containing the normalized cross-correlation of the filter with the image.
"""
########## Code starts here ##########
raise NotImplementedError("Implement me!")
########## Code ends here ##########
def show_save_corr_img(filename, image, template):
# Not super simple, because need to normalize image scale properly.
fig, ax = plt.subplots()
cropped_img = image[:-template.shape[0], :-template.shape[1]]
im = ax.imshow(image, interpolation='none', vmin=cropped_img.min())
fig.colorbar(im)
fig.savefig(filename, bbox_inches='tight')
plt.show()
plt.close(fig)
def main():
test_card = cv2.imread('test_card.png').astype(np.float32)
filt1 = np.zeros((3, 3, 1))
filt1[1, 1] = 1
filt2 = np.zeros((3, 3, 1))
filt2[1, -1] = 1
filt3 = np.zeros((3, 3, 1))
filt3[:, 0] = -1
filt3[:, 2] = 1
filt4 = (1./273.)*np.array([[1, 4, 7, 4, 1],
[4, 16, 26, 16, 4],
[7, 26, 41, 26, 7],
[4, 16, 26, 16, 4],
[1, 4, 7, 4, 1]])
filt4 = np.expand_dims(filt4, -1)
grayscale_filters = [filt1, filt2, filt3, filt4]
color_filters = list()
for filt in grayscale_filters:
# Making color filters by replicating the existing
# filter per color channel.
color_filters.append(np.concatenate([filt, filt, filt], axis=-1))
for idx, filt in enumerate(color_filters):
start = time.time()
corr_img = corr(filt, test_card)
stop = time.time()
print('Correlation function runtime:', stop - start, 's')
show_save_corr_img("corr_img_filt%d.png" % idx, corr_img, filt)
if __name__ == "__main__":
main()
| pol-francesch/aa274_group31 | AA274A_HW3/Problem_3/linear_filter.py | linear_filter.py | py | 2,582 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.subplots",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 39,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.show",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "m... |
28575068555 | from . import bp
from flask.views import MethodView
from app import db
from app.schemas import JobQueryArgsSchema, JobSchema
from app.models import Job
@bp.route('/jobs')
class Jobs(MethodView):
@bp.arguments(JobQueryArgsSchema, location="query")
@bp.response(JobSchema(many=True))
@bp.paginate()
def get(self, args, pagination_parameters):
"""List all jobs currently available"""
data, total = Job.get(args, pagination_parameters.page,
pagination_parameters.page_size)
pagination_parameters.item_count = total
return data
@bp.arguments(JobSchema)
@bp.response(JobSchema)
def post(self, job):
"""Post a new job with tags"""
db.session.add(job)
db.session.commit()
return job
| rabizao/starter_flask | backend/app/api/routes.py | routes.py | py | 800 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.views.MethodView",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "app.models.Job.get",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "app.models.Job",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "app.schemas.J... |
71757273633 | import config
import csv
import datetime
import main
import multiprocessing
import psycopg2
import time
sp = main.sp
cur = main.cur
lock = main.lock
def available_genres():
try:
print('Available genres for recommendation was requested!')
results = sp.recommendation_genre_seeds()['genres']
print('Available genres: ' + (', '.join(results)))
return results
except Exception as e:
print('Available genres ERROR!')
print(e)
def genre_song_label():
try:
row = []
songs = get_songs_from_db()
f = open('../datasets/labeled_songs.csv', 'w')
with f:
writer = csv.writer(f)
for song in songs:
artist_id = song[15]
print('artist_id ' + artist_id)
cur.execute("SELECT * FROM artists WHERE id=(%s);", (artist_id, ))
artist = cur.fetchone()
artist_genre = artist[2]
if (artist_genre is None or artist_genre in ['', '[]', '{}']):
print('Artist genre is NULL!')
continue
row = [artist_genre, str(song[0]), str(song[1]), str(song[2]), str(song[3]), str(song[4]),
str(song[5]), str(song[6]), str(song[7]), str(song[8]), str(song[9]), str(song[10]),
str(song[11]), str(song[12]), str(song[13]), str(song[14])]
writer.writerow(row)
except Exception as e:
print('Song labeling ERROR!')
print(e)
def get_artist(artist_id):
try:
print('Artist information for ' + artist_id + ' was requested!')
artist_obj = sp.artist(artist_id)
if (artist_obj == None):
print('Artist information returned NULL!')
return None
print('Information for ' + artist_obj['name'] + ' was successfully fetched!')
return artist_obj
except Exception as e:
print('Artist information ERROR!')
print(e)
def get_album(album_id):
try:
print('Album information for ' + album_id + ' was requested!')
album_obj = sp.album(album_id)
if (album_obj == None):
print('Album information returned NULL!')
return None
print('Information for ' + album_obj['name'] + ' was successfully fetched!')
return album_obj
except Exception as e:
print('Artist information ERROR!')
print(e)
def get_albums_by_artist(artist_id):
try:
print('Album information for ' + artist_id + ' was requested!')
album_obj = sp.artist_albums(artist_id, album_type='album', limit=50)
if (album_obj == None):
print('Album information returned NULL!')
return None
for single_album in album_obj['items']:
print('Information for ' + single_album['name'] + ' was successfully fetched!')
return album_obj
except Exception as e:
print('Album information by artist ERROR!')
print(e)
def get_songs_by_album(album_id):
try:
print('Song information for ' + album_id + ' was requested!')
song_obj = sp.album_tracks(album_id, limit=50)
if (song_obj == None):
print('Song information returned NULL!')
return None
for single_song in song_obj['items']:
print('Information for ' + single_song['name'] + ' was successfully fetched!')
return song_obj
except Exception as e:
print('Song information ERROR!')
print(e)
def get_song_features(song_id):
try:
audio_features = {}
print('Song features for ' + song_id + ' was requested!')
audio_obj = sp.audio_features(song_id)
if (audio_obj == None):
print('Song features returned NULL!')
return None
audio_features['acousticness'] = audio_obj[0]['acousticness']
audio_features['instrumentalness'] = audio_obj[0]['instrumentalness']
audio_features['valence'] = audio_obj[0]['valence']
audio_features['loudness'] = audio_obj[0]['loudness']
audio_features['energy'] = audio_obj[0]['energy']
audio_features['liveness'] = audio_obj[0]['liveness']
audio_features['danceability'] = audio_obj[0]['danceability']
audio_features['tempo'] = audio_obj[0]['tempo']
audio_features['speechiness'] = audio_obj[0]['speechiness']
audio_features['mode'] = audio_obj[0]['mode']
audio_features['duration_ms'] = audio_obj[0]['duration_ms']
audio_features['time_signature'] = audio_obj[0]['time_signature']
audio_features['key'] = audio_obj[0]['key']
print('Song features for ' + song_id + ' was successfully fetched!')
return audio_features
except Exception as e:
print('Song features ERROR!')
print(e)
def get_artists_by_recommendation_genre(seeder):
results = sp.recommendations(seed_genres=seeder, limit=50)
results = results['tracks']
results_pot = []
for result in results:
if (len(result['artists']) > 1):
for single_artist in result['artists']:
spotify_id = single_artist['id']
lock.acquire()
cur.execute("SELECT COUNT(*) FROM artists WHERE id=(%s);", (spotify_id, ))
if (cur.fetchone()[0] == 1):
print(spotify_id + ' artist is already in DB.')
lock.release()
continue
else:
lock.release()
artist_obj = get_artist(spotify_id)
if (not artist_obj):
print('Artist object is NULL!')
continue
popularity = artist_obj['popularity']
genres = artist_obj['genres']
if (popularity < config.ENV['MIN_ARTIST_POP'] or (len(genres) == 0) or (genres is None)):
print('Not enough popularity or genre not found!')
continue
name = single_artist['name']
results_pot.append((spotify_id, name, genres, popularity))
else:
single_artist = result['artists'][0]
spotify_id = single_artist['id']
lock.acquire()
cur.execute("SELECT COUNT(*) FROM artists WHERE id=(%s);", (spotify_id, ))
if (cur.fetchone()[0] == 1):
print(spotify_id + ' artist is already in DB.')
lock.release()
continue
else:
lock.release()
artist_obj = get_artist(spotify_id)
popularity = artist_obj['popularity']
if (not artist_obj):
print('Artist object is NULL!')
continue
if (popularity < config.ENV['MIN_ARTIST_POP']):
continue
name = single_artist['name']
genres = artist_obj['genres']
results_pot.append((spotify_id, name, genres, popularity))
return results_pot
def insert_artists_by_genres(combinated_genre):
try:
seeder = combinated_genre
artists = get_artists_by_recommendation_genre(seeder)
for artist in artists:
try:
lock.acquire()
cur.execute("SELECT COUNT(*) FROM artists WHERE id=(%s);", (artist[0], ))
if ((len(artist[2]) == 0) or (artist[2] is None)):
print(artist[0] + ' artist genre is NOT FOUND!')
lock.release()
elif (cur.fetchone()[0] == 1):
print(artist[0] + ' artist is already in DB.')
lock.release()
continue
else:
lock.release()
lock.acquire()
cur.execute("""INSERT INTO artists (
id, name, genres, popularity, created_at)
VALUES (%s, %s, %s, %s, %s)""",
(artist[0], artist[1], artist[2], artist[3], datetime.datetime.utcnow()))
lock.release()
except psycopg2.IntegrityError as e:
print('Exception was captured but program is still in progress.') # Mostly to prevent DB from duplicated PKs.
print(e)
lock.release()
continue
except Exception as e:
print(e)
continue
except KeyboardInterrupt:
print('Process was interrupted!')
return
def get_and_insert_artists(combinated_genres):
try:
num_active_processes = 0
for single_combinated_genre in combinated_genres:
concatenated_genre = ("-".join(single_combinated_genre))
lock.acquire()
cur.execute("SELECT COUNT(*) FROM combinated_genres WHERE name=(%s);", (concatenated_genre, ))
if (cur.fetchone()[0] == 1):
print("-".join(single_combinated_genre) + ' combinated genre was already fetched!')
lock.release()
continue
else:
lock.release()
if (num_active_processes < config.ENV['NUM_PROCESSES']):
p = multiprocessing.Process(target=insert_artists_by_genres, args=(single_combinated_genre,))
p.daemon = True
p.start()
num_active_processes += 1
else:
insert_artists_by_genres(single_combinated_genre)
time.sleep(config.ENV['MULTIPROCESS_SLEEP'])
num_active_processes = 0
lock.acquire()
cur.execute("""INSERT INTO combinated_genres (
name, created_at)
VALUES (%s, %s)""",
(concatenated_genre, datetime.datetime.utcnow()))
print("-".join(single_combinated_genre) + ' combinated genre was written in DB!')
lock.release()
except KeyboardInterrupt:
print('User interrupted the process!')
except Exception as e:
print(e)
def insert_albums(artist):
try:
for album in get_albums_by_artist(artist[0])['items']:
if (album == None):
continue
album_obj = get_album(album['id'])
if (album_obj == None):
continue
album_id = album_obj['id']
lock.acquire()
cur.execute("SELECT COUNT(*) FROM albums WHERE id=(%s);", (album_id, ))
if (cur.fetchone()[0] == 1):
print(album_id + ' album is already in DB.')
lock.release()
continue
else:
lock.release()
album_name = album_obj['name']
# album_genres = album_obj['genres']
total_tracks = album_obj['total_tracks']
artist_id = artist[0]
artist_name = artist[1]
lock.acquire()
cur.execute("""INSERT INTO albums (
id, name, total_tracks, artist_id, artist_name, created_at)
VALUES (%s, %s, %s, %s, %s, %s)""",
(album_id, album_name, total_tracks, artist_id, artist_name, datetime.datetime.utcnow()))
print(album_id + ' album was written in DB.')
lock.release()
except KeyboardInterrupt:
print('User interrupted the process!')
except Exception as e:
print(e)
def insert_songs(album):
try:
for song in get_songs_by_album(album[0])['items']:
if (song == None):
continue
song_id = song['id']
lock.acquire()
cur.execute("SELECT COUNT(*) FROM songs WHERE id=(%s);", (song_id, ))
if (cur.fetchone()[0] == 1):
print(song_id + ' song is already in DB.')
lock.release()
continue
else:
lock.release()
song_name = song['name']
song_feature_obj = get_song_features(song_id)
if (song_feature_obj == None):
continue
acousticness = song_feature_obj['acousticness']
instrumentalness = song_feature_obj['instrumentalness']
valence = song_feature_obj['valence']
loudness = song_feature_obj['loudness']
energy = song_feature_obj['energy']
liveness = song_feature_obj['liveness']
danceability = song_feature_obj['danceability']
tempo = song_feature_obj['tempo']
speechiness = song_feature_obj['speechiness']
mode = song_feature_obj['mode']
duration_ms = song_feature_obj['duration_ms']
time_signature = song_feature_obj['time_signature']
key = song_feature_obj['key']
artist_id = album[3]
artist_name = album[4]
album_id = album[0]
album_name = album[1]
lock.acquire()
cur.execute("""INSERT INTO songs (
id, name,
acousticness, instrumentalness, valence, loudness, energy, liveness, danceability, tempo, speechiness,
mode, duration_ms, time_signature, key,
artist_id, artist_name, album_id, album_name,
created_at)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""",
(song_id, song_name,
acousticness, instrumentalness, valence, loudness, energy, liveness, danceability, tempo, speechiness,
mode, duration_ms, time_signature, key,
artist_id, artist_name, album_id, album_name,
datetime.datetime.utcnow()))
print(song_id + ' song was written in DB.')
lock.release()
except KeyboardInterrupt:
print('User interrupted the process!')
except Exception as e:
print(e)
def insert_predictions(prediction):
try:
if (prediction == None):
print('Prediction is NONE!')
return
song_id = prediction['id']
song_name = prediction['name']
r_b = float(prediction['probs'][0][0])
rap = float(prediction['probs'][0][1])
electronic = float(prediction['probs'][0][2])
rock = float(prediction['probs'][0][3])
new_age = float(prediction['probs'][0][4])
classical = float(prediction['probs'][0][5])
reggae = float(prediction['probs'][0][6])
blues = float(prediction['probs'][0][7])
country = float(prediction['probs'][0][8])
world = float(prediction['probs'][0][9])
folk = float(prediction['probs'][0][10])
easy_listening = float(prediction['probs'][0][11])
jazz = float(prediction['probs'][0][12])
vocal = float(prediction['probs'][0][13])
punk = float(prediction['probs'][0][14])
alternative = float(prediction['probs'][0][15])
pop = float(prediction['probs'][0][16])
heavy_metal = float(prediction['probs'][0][17])
lock.acquire()
cur.execute("""INSERT INTO song_genre_predictions (
id, name, r_b, rap, electronic, rock, new_age, classical, reggae, blues, country, world, folk, easy_listening, jazz, vocal, punk, alternative, pop, heavy_metal, created_at)
VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)""",
(song_id, song_name, r_b, rap, electronic, rock, new_age, classical, reggae, blues, country, world, folk, easy_listening, jazz, vocal, punk, alternative, pop, heavy_metal, datetime.datetime.utcnow()))
print(song_id + ' song was written in DB.')
lock.release()
except KeyboardInterrupt:
print('User interrupted the process!')
except Exception as e:
print(e)
def plt_predictions():
# Sample prediction bar charts for genre classifier model results.
# Save results as 'png' in 'figures' folder.
# Radiohead - Alternative/Rock
# Justin Bieber - Pop
# Nirvana - Rock
# 6ix9ine - Rap
genres = ['r_b', 'rap', 'electronic', 'rock', 'new_age', 'classical', 'reggae', 'blues', 'country',
'world', 'folk', 'easy_listening', 'jazz', 'vocal', 'punk', 'alternative', 'pop', 'heavy_metal']
artist_names = ['radiohead', 'justin_bieber', 'nirvana', '6ix9ine']
artist_id = {
'radiohead': '4Z8W4fKeB5YxbusRsdQVPb',
'justin_bieber': '1uNFoZAHBGtllmzznpCI3s',
'nirvana': '6olE6TJLqED3rqDCT0FyPh',
'6ix9ine': '7gZfnEnfiaHzxARJ2LeXrf'
}
for artist_name in artist_names:
songs = get_songs_of_artist_from_db(artist_id[artist_name])
song_ids = [x[0] for x in songs]
for song_id in song_ids:
prediction = get_genre_predictions_from_db(song_id)
song_name = prediction[0][1].replace('/', '_').replace('.', '_')
prediction = [[x[2:20] for x in prediction]]
prediction = prediction[0][0]
plt.subplots(num=None, figsize=(18, 10), dpi=60, facecolor='w', edgecolor='k')
plt.xlabel('Genre Correlation Value')
plt.ylabel('Genres')
plt.barh(genres, prediction)
plt.title(song_name)
plt.savefig('figures/' + artist_name + '/' + song_name + '-' + song_id)
# plt.show()
plt.clf()
plt.close()
def get_artists_from_db():
cur.execute("SELECT * FROM artists ORDER BY RANDOM()")
return cur.fetchall()
def get_albums_from_db():
cur.execute("SELECT * FROM albums ORDER BY RANDOM()")
return cur.fetchall()
def get_songs_from_db():
cur.execute("SELECT * FROM songs ORDER BY RANDOM()")
return cur.fetchall()
def get_count_songs_from_db():
cur.execute("SELECT COUNT(*) FROM songs")
return cur.fetchall()[0][0]
def artists_genre_occurrences():
cur.execute("SELECT genres, COUNT(genres) FROM artists GROUP BY genres ORDER BY COUNT(genres) DESC")
return cur.fetchall()
def get_genre_predictions_from_db(song_id):
cur.execute("SELECT * FROM song_genre_predictions WHERE id=%s", (song_id,))
return cur.fetchall()
def get_songs_of_artist_from_db(artist_id):
cur.execute("SELECT * FROM songs WHERE artist_id=%s", (artist_id,))
return cur.fetchall() | 0x01h/individual-symphony | tools/functions.py | functions.py | py | 18,474 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "main.sp",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "main.cur",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "main.lock",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "csv.writer",
"line_numbe... |
41139310734 | import json
import time
import os
import datetime
from scraper.halooglasi import get_latest_with_retry
from scraper.db import check_if_exists, insert_property
from scraper.discord import send_to_discord
from scraper.logger import logger
def get_local_time():
"""
convert current time to CEST
"""
cest_time = datetime.datetime.now() + datetime.timedelta(hours=2)
return cest_time.strftime("%D %H:%M:%S")
def main():
CFG_FILE = os.environ.get("CFG_FILE", "/app/config.json")
# in seconds
SLEEP_TIME = float(os.environ.get("SLEEP_TIME", 30))
config = []
while True:
with open(CFG_FILE, "r") as f:
_config = json.load(f)
if _config != config:
logger.info(f"New config: {_config}")
config = _config
for url, hook_url in config.items():
logger.info(f"Scraping... {get_local_time()}")
results = get_latest_with_retry(url, max_retries=5, sleep_time=SLEEP_TIME)
for link, price, name, location in results[::-1]:
if not check_if_exists(link):
insert_property(link, price, name, location)
logger.info(f"Inserted {name} with price {price}")
send_to_discord(link, price, name, location, hook_url=hook_url)
time.sleep(SLEEP_TIME)
if __name__ == "__main__":
main()
| avramdj/halo-oglasi-scraper | scraper/__main__.py | __main__.py | py | 1,400 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "datetime.timedelta",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.en... |
26553030524 | from flask_app.config.mysqlconnection import connectToMySQL
from flask import flash
from flask_app.controllers.users import User
class Sighting:
def __init__(self, data):
self.id = data['id']
self.location = data['location']
self.scenario = data['scenario']
self.date_of_sighting = data['date_of_sighting']
self.qty_of_sq = data['qty_of_sq']
self.created_at = data['created_at']
self.updated_at = data['updated_at']
self.user = None
self.skeptic_user = []
@classmethod
def all_sightings(cls):
query = """
SELECT * FROM sightings
JOIN users on sightings.user_id = users.id;
"""
results = connectToMySQL('sasquatch2').query_db(query)
sightings = []
if not results:
return sightings
for info in results:
sighting = cls(results[0])
row = {
'id': info['users.id'],
'first_name': info['first_name'],
'last_name': info['last_name'],
'email': info['email'],
'password': "",
'created_at': info['users.created_at'],
'updated_at': info['users.updated_at']
}
sighting.user = User(row)
sightings.append(sighting)
return sightings
@classmethod
def add_sighting(cls,data):
query = """
INSERT INTO sightings(location, scenario, date_of_sighting, qty_of_sq, created_at, updated_at, user_id)
VALUES(%(location)s, %(scenario)s, %(date_of_sighting)s, %(qty_of_sq)s,NOW(), NOW(), %(user_id)s);
"""
results = connectToMySQL('sasquatch2').query_db(query,data)
return results
@classmethod
def getidby(cls,data):
query = """
SELECT * FROM sightings
WHERE id = %(id)s;
"""
results = connectToMySQL('sasquatch2').query_db(query,data)
return cls(results[0])
@classmethod
def update_sightings(cls,data):
query = """
UPDATE sightings
SET location = %(location)s, scenario = %(scenario)s, date_of_sighting = %(date_of_sighting)s, qty_of_sq = %(qty_of_sq)s, updated_at = NOW()
WHERE id = %(id)s;
"""
results = connectToMySQL('sasquatch2').query_db(query,data)
return results
@staticmethod
def validate_sighting(sighting_info):
is_valid = True
if len(sighting_info['location']) < 2:
flash("Location is needed/Not valid location")
is_valid = False
if len(sighting_info['scenario']) < 2:
flash("Tell us what happened/Not a valid entry")
is_valid = False
if sighting_info['qty_of_sq'] == '' :
flash("Please tell us how many you saw")
is_valid = False
if sighting_info['date_of_sighting'] == '' :
flash("Please input a date")
is_valid = False
return is_valid
@classmethod
def remove_sighting(cls, data):
query = """
DELETE FROM sightings
WHERE id = %(id)s
"""
results = connectToMySQL('sasquatch2').query_db(query,data)
return results
@classmethod
def skeptic_sight(cls):
query = """
SELECT * FROM sightings
LEFT JOIN skeptics ON sightings.id = skeptics.sighting_id
LEFT JOIN users ON users.id = skeptics.user_id
WHERE sightings.id = %(id)s;
"""
results = connectToMySQL('sasquatch2').query_db(query)
sightings = []
if not results:
return sightings
for sight in results:
if sight['users.id'] == None:
break
sighting = cls(results[0])
data ={
'id': sight['users.id'],
'first_name': sight['first_name'],
'last_name': sight['last_name'],
'email': sight['email'],
'password': "",
'created_at': sight['users.created_at'],
'updated_at': sight['users.updated_at']
}
sighting.user= User(data)
sightings.append(sighting)
return sightings
| OhJackie21/Python-Practice | practice/sasquatch2/flask_app/models/sighting.py | sighting.py | py | 4,436 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask_app.config.mysqlconnection.connectToMySQL",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "flask_app.controllers.users.User",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "flask_app.config.mysqlconnection.connectToMySQL",
"line_numb... |
11995643688 | from rest_framework import generics
from rest_framework.response import Response
from .models import Ganado, Categoria, Raza, LitrosDeLeche
from .serializers import GanadoSerializer,CategoriaSerializer, RazaSerializer, LitrosDeLecheSerializer
from django.views.generic import View
class GanadoListCreateAPIView(generics.ListCreateAPIView):
queryset = Ganado.objects.all()
serializer_class = GanadoSerializer
class GanadoRetrieveUpdateDestroyView(generics.RetrieveUpdateDestroyAPIView):
queryset = Ganado.objects.all()
serializer_class = GanadoSerializer
class GanadoDestroyAPIView(generics.DestroyAPIView):
queryset = Ganado.objects.all()
serializer_class = GanadoSerializer
class GanadoSearchView(View):
def get(self, request):
search_query = request.GET.get('search')
if search_query:
ganados = Ganado.objects.filter(nombre__icontains=search_query)
else:
ganados = Ganado.objects.all()
return render(request, 'ganado_search.html', {'ganados': ganados, 'search_query': search_query})
#categoria
class CategoriaListCreateAPIView(generics.ListCreateAPIView):
queryset = Categoria.objects.all()
serializer_class = CategoriaSerializer
class CategoriaRetrieveUpdateDestroyView(generics.RetrieveUpdateDestroyAPIView):
queryset = Categoria.objects.all()
serializer_class = CategoriaSerializer
class CategoriaDestroyAPIView(generics.DestroyAPIView):
queryset = Categoria.objects.all()
serializer_class = CategoriaSerializer
class CategoriaSearchView(View):
def get(self, request):
search_query = request.GET.get('search')
if search_query:
categorias = Categoria.objects.filter(nombre__icontains=search_query)
else:
categorias = Categoria.objects.all()
return render(request, 'ganado_search.html', {'ganados': categorias, 'search_query': search_query})
#Raza
class RazaListCreateAPIView(generics.ListCreateAPIView):
queryset = Raza.objects.all()
serializer_class = RazaSerializer
class RazaRetrieveUpdateDestroyView(generics.RetrieveUpdateDestroyAPIView):
queryset = Raza.objects.all()
serializer_class = RazaSerializer
class RazaDestroyAPIView(generics.DestroyAPIView):
queryset = Raza.objects.all()
serializer_class = RazaSerializer
#litros de leche
class LitrosDeLecheListCreateAPIView(generics.ListCreateAPIView):
queryset = LitrosDeLeche.objects.all()
serializer_class = LitrosDeLecheSerializer
class LitrosDeLecheRetrieveUpdateDestroyView(generics.RetrieveUpdateDestroyAPIView):
queryset = LitrosDeLeche.objects.all()
serializer_class = LitrosDeLecheSerializer
class LitrosDeLecheDestroyAPIView(generics.DestroyAPIView):
queryset = LitrosDeLeche.objects.all()
serializer_class = LitrosDeLecheSerializer
| edwinjojoa/proyecto_grado | backend/ganado/views.py | views.py | py | 2,860 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "rest_framework.generics.ListCreateAPIView",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.generics",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.Ganado.objects.all",
"line_number": 9,
"usage_type": "call"
... |
16921598410 | import util
from command import process_command
import session as discord_session
import filters
from discord import Embed
from logger import log
# for the memes
from random import choice
@util.client.event
async def on_message(message):
if message.author.bot:
# check to make sure it didn't send message
if message.author == util.client.user:
return
# brandon clauses
if message.content.startswith('What do you call a'):
await insult(message)
elif message.content.startswith('How do you'):
await insult(message)
return
try:
await handle_message(message)
except Exception as e:
log(message.server.id, str(e))
await util.client.send_message(message.channel, embed=Embed(color=util.error_color, description='An unknown error occurred.'))
async def insult(message):
messages = [
'That joke wasn\'t funny and it never will be.',
'You are not funny.',
'Please be quiet for all of our sakes.',
'You make me want to die.',
'You don\'t need to tell jokes; you are one.',
'Inferior...',
':joy: :gun:',
'```Roses are red,\nViolets are blue,\nThat bot is garbage,\nand Brandon is too.```'
]
await util.client.send_message(message.channel, choice(messages))
@util.client.event
async def on_server_join(server):
embed = Embed(color=util.theme_color, title='Null Bot')
embed.description = 'Null is a fully featured, powerful, multipurpose Discord bot from easy use on any server.'
embed.add_field(name='Get Started', value='Use `!help` to get list of command and how to use them.', inline=False)
embed.add_field(name='Support', value='**Donate** donate_url\n**Source** github_url\n**Upvote** upvote_url', inline=False)
embed.add_field(name='Join our Discord', value='discord_url', inline=False)
icon_url = 'https://cdn.discordapp.com/avatars/226732838181928970/19562db0c14f445ac5a0bf8f605989c1.png?size=128'
embed.set_footer(text='Developed by ComedicChimera#3451', icon_url=icon_url)
await util.client.send_message(server.default_channel, embed=embed)
@util.client.event
async def on_member_join(member):
if member.id in util.servers[member.server].hack_bans:
await util.client.kick(member)
else:
await util.client.send_message(member.server.default_channel, 'Welcome `%s`!' % member.name)
async def handle_message(message):
prefix = util.get_server_prefix(message.server)
# use custom input handler if specified
if discord_session.has_session(message.server, message.channel, message.author):
await util.client.send_typing(message.channel)
s = discord_session.get_session(message.server, message.channel, message.author)
await s.handler(message, s.session_id)
# else pass to command handler
elif message.content.startswith(prefix):
await process_command(message, prefix)
# apply filters
elif filters.match_filters(message):
await util.client.delete_message(message)
if __name__ == '__main__':
# imported command sets
import modules.general.commands
import modules.music.commands
import modules.math.commands
import modules.internet.commands
import modules.money.commands
import modules.games.commands
import modules.admin.commands
# start the bot
util.client.run(util.token)
| ComedicChimera/Null-Discord-Bot | bot.py | bot.py | py | 3,439 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "util.client",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logger.log",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "util.client.send_message",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "util.client",
... |
73062305953 |
from mrjob.job import MRJob
from mrjob.protocol import JSONProtocol, RawValueProtocol, JSONValueProtocol
from mrjob.step import MRStep
import json
import numpy as np
def multivar_gauss_pdf(x, mu, cov):
'''
Caculates the multivariate normal density (pdf)
Parameters:
-----------
x - numpy array of a "d x 1" sample vector
mu - numpy array of a "d x 1" mean vector
cov - numpy array of a d x d" covariance matrix
(where d - dimensionality of data)
Output:
-------
- (float) probability of x given parameters of
Gaussian Distribution
'''
part1 = 1 / ( ((2* np.pi)**(len(mu)/2)) * (np.linalg.det(cov)**(1/2)) )
part2 = (-1/2) * np.dot(np.dot((x-mu).T,(np.linalg.inv(cov))),(x-mu))
return float(part1 * np.exp(part2))
def responsibility(x,mu,cov,p,K):
'''
Calculates conditional probability of latent variable given
observed data and parameters
Parameters:
-----------
x - numpy array of a "d x 1" sample vector
mu - list of length "K" of lists "d x 1" mean vector
cov - list of length "K" numpy arrays each "d x d" covariance matrix
p - list of floats, each float prior probability of cluster
K - number of clusters (values of latent variables)
(where d - dimensionality of data)
Output:
- list of floats, each element of list is responsibility corresponding
to x and relevant latent variable valiue
'''
resps = [p[k]*multivar_gauss_pdf(x,np.array(mu[k]),np.array(cov[k])) for k in range(K)]
p_x = sum(resps)
return [float(r_k)/p_x for r_k in resps]
def extract_features(line):
''' extracts features from line of input'''
data = line.strip().split(",")
return [ float(e) for e in data[1:] ]
def make_json_encodable(mixing, means, covar):
'''
Transforms
Parameters:
-----------
mixing - list of size k
means - list of size k of numpy arrays (each numpy array has size d)
covar - list of size k of two dimensional numpy array (matrix of size dxd)
(where d is dimensionality and k is number of clusters)
Output:
--------
- dictionary with parameter names as keys
{"mu": list of mean vectors, "mixing": list of mixing coefficients,
"covariance": list of covariance matrices}
'''
matrix_to_list = lambda x: [list(e) for e in x]
mixing = mixing
means = matrix_to_list(means)
covariance = [matrix_to_list(e) for e in covar]
return {"mixing":mixing,"mu":means,"covariance":covariance}
class IterationGaussianMixtureMR(MRJob):
'''
Runs single iteration of Expectation Maximization Algorithm for Gaussian
Mixture Model.
Mappers use parameters from previous iteration to calculate responsibilities
and intermediate values that are then used by single reducer to calculate
new parameters.
Command Line Options:
---------------------
--clusters - number of clusters
--dimensions - dimensionality of data
--parameters - (str)json encoded dictionary of parameters
'''
INPUT_PROTOCOL = RawValueProtocol
INTERNAL_PROTOCOL = JSONProtocol
OUTPUT_PROTOCOL = JSONValueProtocol
def __init__(self,*args,**kwargs):
super(IterationGaussianMixtureMR,self).__init__(*args,**kwargs)
# sum of responsibilities for each cluster & number of observations
self.resp_sum = [0]*self.clusters
self.N = 0
# sum of observations weighted by reponsibility
self.resp_w_sum = [np.zeros(self.dim, dtype = np.float64) for i in range(self.clusters)]
# sum of x_n*x_n_t (outer products) weighted by reponsibility
self.resp_w_cov = [np.zeros([self.dim,self.dim], dtype = np.float64) for i in range(self.clusters)]
def configure_options(self):
super(IterationGaussianMixtureMR,self).configure_options()
self.add_passthrough_option("--dimensions",
type = int,
help = "dimensionality of input data")
self.add_passthrough_option("--clusters",
type = int,
help = "number of clusters")
self.add_passthrough_option("--parameters",
type = str,
help = "file with parameters from previous iteration")
def load_options(self,args):
super(IterationGaussianMixtureMR,self).load_options(args)
# number of clusters
if self.options.clusters is None:
self.option_parser.error("You need to specify number of clusters")
else:
self.clusters = self.options.clusters
# data dimensionality
if self.options.dimensions is None:
self.option_parser.error("You need to specify dimensionality of data")
else:
self.dim = self.options.dimensions
# filename where parameters from previous iteration are saved
if self.options.parameters is None:
self.option_parser.error("You need to load file with distribution parameters")
def mapper_gmm_init(self):
params = json.loads(self.options.parameters)
self.mu = params["mu"]
self.covar = params["covariance"]
self.mixing = params["mixing"]
def mapper_gmm(self,_,line):
features = extract_features(line)
assert(len(features)==self.dim), "dimension mismatch"
x = np.array(features)
r_n = responsibility(x,self.mu,self.covar,self.mixing,self.clusters) # responsibilities
self.resp_sum = [self.resp_sum[i]+r_n_k for i,r_n_k in enumerate(r_n)]
self.resp_w_sum = [w_sum + r_n[i]*x for i,w_sum in enumerate(self.resp_w_sum)]
self.resp_w_cov = [w_covar+r_n[i]*np.outer(x,x) for i,w_covar in enumerate(self.resp_w_cov)]
self.N+=1
def mapper_final_gmm(self):
matrix_to_list = lambda x: [list(e) for e in x]
# sum of responsibilities
yield 1,("r_sum", self.resp_sum)
# sum of observations weighted by responsibility
yield 1,("r_w_sum", [list(e) for e in self.resp_w_sum])
# covariates weighted by responsibility
yield 1,("r_w_cov", [ matrix_to_list(cov) for cov in self.resp_w_cov])
# number of observations
yield 1,("total", self.N)
def reducer_gmm(self,key, values):
N = 0;
r_sum = [0]*self.clusters
r_w_sum = [np.zeros(self.dim, dtype = np.float64) for i in range(self.clusters)]
r_w_cov = [np.zeros([self.dim,self.dim], dtype = np.float64) for i in range(self.clusters)]
for value in values:
if value[0]=="r_sum":
r_sum = [r_sum[i]+gamma for i,gamma in enumerate(value[1])]
elif value[0]=="r_w_sum":
r_w_sum = [r_w_sum[i]+np.array(r_w_new, dtype = np.float64) for i,r_w_new in enumerate(value[1])]
elif value[0]=="r_w_cov":
r_w_cov = [ r_w_cov[i] + np.array(cov) for i,cov in enumerate(value[1])]
elif value[0]=="total":
N+=value[1]
mixing = [float(gamma)/N for gamma in r_sum]
means = [1.0/r_sum[i]*r_w_sum[i] for i, gamma in enumerate(mixing)]
covar = [ 1.0/r_sum[k]*r_w_cov_k - np.outer(means[k],means[k]) for k,r_w_cov_k in enumerate(r_w_cov)]
yield None, make_json_encodable(mixing,means,covar)
def steps(self):
return [MRStep(mapper_init = self.mapper_gmm_init,
mapper = self.mapper_gmm,
mapper_final = self.mapper_final_gmm,
reducer = self.reducer_gmm)]
if __name__=="__main__":
IterationGaussianMixtureMR.run()
| AmazaspShumik/MapReduce-Machine-Learning | Gaussian Mixture Model MapReduce/IterationGaussianMixtureMR.py | IterationGaussianMixtureMR.py | py | 8,086 | python | en | code | 22 | github-code | 1 | [
{
"api_name": "numpy.pi",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.det",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "numpy.dot",
"lin... |
39286301539 | import torch
import torch.nn as nn
import torch.nn.functional as F
from pycls.models.resnet_style.shake_shake_function import get_alpha_beta, shake_function
import pycls.utils.logging as lu
logger = lu.get_logger(__name__)
def initialize_weights(module):
if isinstance(module, nn.Conv2d):
nn.init.kaiming_normal_(module.weight.data, mode='fan_out')
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
module.bias.data.zero_()
class ResidualPath(nn.Module):
def __init__(self, in_channels, out_channels, stride):
super(ResidualPath, self).__init__()
self.conv1 = nn.Conv2d(
in_channels,
out_channels,
kernel_size=3,
stride=stride,
padding=1,
bias=False,
)
self.bn1 = nn.BatchNorm2d(out_channels)
self.conv2 = nn.Conv2d(
out_channels,
out_channels,
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn2 = nn.BatchNorm2d(out_channels)
def forward(self, x):
x = F.relu(x, inplace=False)
x = F.relu(self.bn1(self.conv1(x)), inplace=False)
x = self.bn2(self.conv2(x))
return x
class DownsamplingShortcut(nn.Module):
def __init__(self, in_channels):
super(DownsamplingShortcut, self).__init__()
self.conv1 = nn.Conv2d(
in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.conv2 = nn.Conv2d(
in_channels,
in_channels,
kernel_size=1,
stride=1,
padding=0,
bias=False)
self.bn = nn.BatchNorm2d(in_channels * 2)
def forward(self, x):
x = F.relu(x, inplace=False)
y1 = F.avg_pool2d(x, kernel_size=1, stride=2, padding=0)
y1 = self.conv1(y1)
y2 = F.pad(x[:, :, 1:, 1:], (0, 1, 0, 1))
y2 = F.avg_pool2d(y2, kernel_size=1, stride=2, padding=0)
y2 = self.conv2(y2)
z = torch.cat([y1, y2], dim=1)
z = self.bn(z)
return z
class BasicBlock(nn.Module):
def __init__(self, in_channels, out_channels, stride, shake_config):
super(BasicBlock, self).__init__()
self.shake_config = shake_config
self.residual_path1 = ResidualPath(in_channels, out_channels, stride)
self.residual_path2 = ResidualPath(in_channels, out_channels, stride)
self.shortcut = nn.Sequential()
if in_channels != out_channels:
self.shortcut.add_module('downsample',
DownsamplingShortcut(in_channels))
def forward(self, x):
x1 = self.residual_path1(x)
x2 = self.residual_path2(x)
if self.training:
shake_config = self.shake_config
else:
shake_config = (False, False, False)
alpha, beta = get_alpha_beta(x.size(0), shake_config, x.device)
y = shake_function(x1, x2, alpha, beta)
return self.shortcut(x) + y
class Network(nn.Module):
def __init__(self, cfg):
super(Network, self).__init__()
print("Building Network of resnet Shake-Shake")
self.cfg = cfg
input_shape = (1, self.cfg.TRAIN.IM_CHANNELS, self.cfg.TRAIN.IM_SIZE, \
self.cfg.TRAIN.IM_SIZE)
print(f"Input_shape: {input_shape}")
# input_shape = (1,3,224,224)
n_classes = self.cfg.MODEL.NUM_CLASSES
base_channels = self.cfg.SHAKE_SHAKE.BASE_CHANNELS
#config['base_channels']
depth = self.cfg.SHAKE_SHAKE.DEPTH
#config['depth']
self.shake_config = (self.cfg.SHAKE_SHAKE.FORWARD, self.cfg.SHAKE_SHAKE.BACKWARD, self.cfg.SHAKE_SHAKE.IMAGE)
#(config['shake_forward'], config['shake_backward'],config['shake_image'])
block = BasicBlock
n_blocks_per_stage = (depth - 2) // 6
assert n_blocks_per_stage * 6 + 2 == depth, f"Condition (n_blocks_per_stage * 6 + 2) == model_depth fails as model_depth: {model_depth} \
and n_blocks_per_stage*6 + 2 = {n_blocks_per_stage*6 + 2}"
n_channels = [base_channels, base_channels * 2, base_channels * 4]
self.conv = nn.Conv2d(
input_shape[1],
n_channels[0],
kernel_size=3,
stride=1,
padding=1,
bias=False)
self.bn = nn.BatchNorm2d(base_channels)
self.stage1 = self._make_stage(
n_channels[0], n_channels[0], n_blocks_per_stage, block, stride=1)
self.stage2 = self._make_stage(
n_channels[0], n_channels[1], n_blocks_per_stage, block, stride=2)
self.stage3 = self._make_stage(
n_channels[1], n_channels[2], n_blocks_per_stage, block, stride=2)
# # compute conv feature size
# with torch.no_grad():
# self.feature_size = self._forward_conv(
# torch.zeros(1,3,224,224)).view(-1).shape[0]
self.feature_size = 128
# if self.cfg.TRAIN.DATASET == "IMAGENET":
# self.feature_size = 128
# else:
# self.feature_size = 128
# #raise NotImplementedError
self.fc = nn.Linear(self.feature_size, n_classes)
###
self.penultimate_active = False
#Describe model with source code link
self.description = "Open source implementation of Resnet shake-shake adapted from hysts/pytorch_shake_shake/ repository"
self.source_link = "https://github.com/hysts/pytorch_shake_shake/blob/master/shake_shake.py"
self.model_depth = cfg.MODEL.TRANSFER_MODEL_DEPTH if cfg.TRAIN.TRANSFER_EXP else cfg.MODEL.DEPTH
logger.info('Constructing: Shake Shake ResNet with depth:{}'.format(self.model_depth))
# initialize weights
self.apply(initialize_weights)
def _make_stage(self, in_channels, out_channels, n_blocks, block, stride):
stage = nn.Sequential()
for index in range(n_blocks):
block_name = 'block{}'.format(index + 1)
if index == 0:
stage.add_module(
block_name,
block(
in_channels,
out_channels,
stride=stride,
shake_config=self.shake_config))
else:
stage.add_module(
block_name,
block(
out_channels,
out_channels,
stride=1,
shake_config=self.shake_config))
return stage
def _forward_conv(self, x):
x = F.relu(self.bn(self.conv(x)), inplace=True)
x = self.stage1(x)
x = self.stage2(x)
x = self.stage3(x)
x = F.adaptive_avg_pool2d(x, output_size=1)
return x
def forward(self, x):
x = self._forward_conv(x)
z = x.view(x.size(0), -1)
# print(f"~~z.shape[1]: {z.shape[1]}")
x = self.fc(z)
if self.penultimate_active:
return z,x
return x | PrateekMunjal/TorchAL | pycls/models/resnet_style/resnet_shake_shake.py | resnet_shake_shake.py | py | 7,326 | python | en | code | 56 | github-code | 1 | [
{
"api_name": "pycls.utils.logging.get_logger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pycls.utils.logging",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": ... |
70243838433 | #tire_volume.py
from datetime import datetime
import math
print('This program calculte the tire volume')
print()
form_1 = 0
volume_1 = None
w=float(input('Please Enter the width of the tire in mm: '))
a =float(input('Please Enter the aspect ratio of the tire: '))
d =float(input('Enter the diameter of the wheel in inches : '))
w1 =int(w)
a1 =int(a)
d1 =int(d)
volume_1 = math.pi*(w**2)*a*(w*a+2540*d)/10000000000
print('the volume is: {:,.2f}'.format(volume_1))
current_date_and_time = datetime.now()
print(f"{current_date_and_time:%Y-%m-%d}")
with open("tire_volume.txt",'a')as volume_file:
print(file = volume_file)
print(f"{current_date_and_time:%Y-%m-%d}, {w1}, {a1}, {d1}, {volume_1:,.2f}", file = volume_file)
| CFrancoChavez/CFrancoChavez | tire_volume.py | tire_volume.py | py | 755 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.pi",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 21,
"usage_type": "name"
}
] |
15114313938 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Sep 2 11:44:08 2019
@author: ashish
"""
import pandas as pd
import numpy as np
import seaborn as sn
import matplotlib.pyplot as plt
import time
dataset = pd.read_csv('new_appdata10.csv')
# Data Preprocessing
response = dataset["enrolled"]
dataset = dataset.drop(columns='enrolled')
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(dataset, response, test_size=0.2, random_state=0)
train_identifier = X_train['user']
X_train = X_train.drop(columns='user')
test_identifier = X_test['user']
X_test = X_test.drop(columns='user')
from sklearn.preprocessing import StandardScaler
sc_X = StandardScaler()
X_train2 = pd.DataFrame(sc_X.fit_transform(X_train))
X_test2 = pd.DataFrame(sc_X.transform(X_test))
X_train2.columns = X_train.columns.values
X_test2.columns = X_test.columns.values
X_train2.index = X_train.index.values
X_test2.index = X_test.index.values
X_train = X_train2
X_test = X_test2
# Model Building
from sklearn.linear_model import LogisticRegression
classifier = LogisticRegression(random_state=0, penalty='l1')
classifier.fit(X_train, y_train)
y_predict = classifier.predict(X_test)
from sklearn.metrics import confusion_matrix, accuracy_score, f1_score, precision_score, recall_score
cm = confusion_matrix(y_test, y_predict)
accuracy_score(y_test, y_predict)
precision_score(y_test, y_predict)
recall_score(y_test, y_predict)
f1_score(y_test, y_predict)
df_cm = pd.DataFrame(cm, index=(0, 1), columns=(0,1))
plt.figure(figsize=(10,7))
sn.set(font_scale=1.4)
sn.heatmap(df_cm, annot=True, fmt='g')
print("Test Data Accuracy: %0.4f" % accuracy_score(y_test, y_predict))
from sklearn.model_selection import cross_val_score
accuracies = cross_val_score(estimator = classifier, X=X_train, y=y_train, cv=10)
print("Logistic Accuracy: %0.3f (+/- %0.3f)" % (accuracies.mean(), accuracies.std() * 2))
| singhashish4000/MachineLearning-Supervised-Logistic-Regression-DCTSBAB | directing_customers_to_subscribstions_through_app_behavior_analysis_Model.py | directing_customers_to_subscribstions_through_app_behavior_analysis_Model.py | py | 2,008 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 33,
"usage_type": "call"
... |
20254723614 | """
file: practica1
autor: davidpillco
"""
# Importa el codecs y json
import codecs
import json
# Lee el archivo
archivo = codecs.open("datos.txt","r")
# Lee en lineas
lineas_diccionario = archivo.readlines()
# Pasa los diccionario a lista
lineas_diccionario = [json.loads(l) for l in lineas_diccionario]
# Funcion para evaluar los goles
funcion1 =lambda x:list(x.items())[1][1] > 3
goles = list(filter(funcion1, lineas_diccionario))
# Imprimer los jugadores con los goles mayores a 3
print ("Jugadores con mayor de 3 goles")
print(list(goles))
# Evaluacion de los jugadores de Nigeria
print ("Jugadores de Nigeria")
funcion2 =lambda x:list(x.items())[0][1] == "Nigeria"
pais = list(filter(funcion2, lineas_diccionario))
print(list(pais))
# Evaluacion del valor minimo y maximo de la estatura
# Valos minimo
print ("Jugador con menor estatura")
# Creo una lista de solo las estaturas
estatura = list(map(lambda x:list(x.items())[2][1], lineas_diccionario))
# Se saca el minimo
minimo = min(estatura)
# Se compara la lista con el valor minimo
funcion3 = lambda x:list(x.items())[2][1] == minimo
# Filtra el jugador con la estatura minima
est_min = list(filter(funcion3, lineas_diccionario))
# Imprime el jugador
print(est_min)
# Valor maximo
print ("Jugador con mayor estatura")
# Creo una lista de solo las estaturas
estatura = list(map(lambda x:list(x.items())[2][1], lineas_diccionario))
# Se saca el maximo
maximo = max(estatura)
# Se compara la lista con el valor maximo
funcion4 = lambda x:list(x.items())[2][1] == maximo
# Filtra el jugador con la estatura maximo
est_max = list(filter(funcion4, lineas_diccionario))
# Imprime el jugador
print(est_max)
| ProgFuncionalReactivaoct19-feb20/practica04-davidpillco | practica1.py | practica1.py | py | 1,684 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "codecs.open",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 13,
"usage_type": "call"
}
] |
16070565974 | #!/usr/bin/env python3.8
# -*- coding: utf-8 -*-
"""
https://projecteuler.net/problem=41
We shall say that an n-digit number is pandigital if it makes use of all the digits 1 to n exactly once.
For example, 2143 is a 4-digit pandigital and is also prime.
What is the largest n-digit pandigital prime that exists?
Answer: 7652413
"""
from itertools import permutations
def is_prime(n: int) -> bool:
for i in range(2, int(n ** 0.5) + 1):
if n % i == 0:
return False
return True
def largest_pandigital_prime() -> int:
pandigital_primes = (
number
for length in (7, 4)
# all other length pandigital numbers are divisible by 3 {2: 3, 3: 6, 4: 10, 5: 15, 6: 21, 7: 28, 8: 36, 9: 45}
for number in (int(''.join(digits)) for digits in permutations(reversed('123456789'[:length]), length))
if is_prime(number)
)
return next(pandigital_primes)
if __name__ == '__main__':
from .evaluate import Watchdog
with Watchdog() as wd:
results = wd.evaluate(largest_pandigital_prime)(answer=7652413)
| vikasmunshi/euler | projecteuler/041_pandigital_prime.py | 041_pandigital_prime.py | py | 1,083 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.permutations",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "evaluate.Watchdog",
"line_number": 35,
"usage_type": "call"
}
] |
73454469792 | import os
import ctypes
dir_path = os.path.dirname(os.path.realpath(__file__))
lib = ctypes.cdll.LoadLibrary(dir_path + "/" + "larcv2_to_larcv3.so")
class larcv2_to_larcv3(object):
def __init__(self):
lib.larcv2_to_larcv3_new.argtypes = []
lib.larcv2_to_larcv3_new.restype = ctypes.c_void_p
lib.larcv2_to_larcv3_add_in_file.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
lib.larcv2_to_larcv3_add_in_file.restype = ctypes.c_void_p
lib.larcv2_to_larcv3_set_out_file.argtypes = [ctypes.c_void_p, ctypes.c_char_p]
lib.larcv2_to_larcv3_set_out_file.restype = ctypes.c_void_p
lib.larcv2_to_larcv3_initialize.argtypes = [ctypes.c_void_p]
lib.larcv2_to_larcv3_initialize.restype = ctypes.c_void_p
lib.larcv2_to_larcv3_convert.argtypes = [ctypes.c_void_p, ctypes.c_int, ctypes.c_int]
lib.larcv2_to_larcv3_convert.restype = ctypes.c_void_p
self.obj = lib.larcv2_to_larcv3_new()
def add_in_file(self, in_file):
# in_file = in_file.encode('utf-8')
# # in_file = bytes(in_file, encoding="ascii")
# print(in_file)
# print(type(in_file))
lib.larcv2_to_larcv3_add_in_file(self.obj, in_file.encode('utf-8'))
def set_out_file(self, out_file):
lib.larcv2_to_larcv3_set_out_file(self.obj, out_file.encode('utf-8'))
def initialize(self):
lib.larcv2_to_larcv3_initialize(self.obj)
def convert(self, n_events, n_skip):
lib.larcv2_to_larcv3_convert(self.obj, n_events, n_skip)
import argparse
def main():
parser = argparse.ArgumentParser(description='LArCV2 to larcv3 Conversion script')
parser.add_argument('-il','--input-larcv',required=True,
dest='larcv_fin',nargs='+',
help='string or list, Input larcv file name[s] (Required)')
parser.add_argument('-nevents','--num-events',
type=int, dest='nevents', default=-1,
help='integer, Number of events to process')
parser.add_argument('-nskip','--num-skip',
type=int, dest='nskip', default=0,
help='integer, Number of events to skip before processing')
parser.add_argument('-ol','--output-larcv',default='',
type=str, dest='larcv_fout',
help='string, Output larcv file name (optional)')
args = parser.parse_args()
print(args)
converter = larcv2_to_larcv3()
for file_name in args.larcv_fin:
converter.add_in_file(file_name)
if args.larcv_fout == "":
print("No output file specified, using basename and same path as first input.")
args.larcv_fout = args.larcv_fin[0].replace(".root", ".h5")
converter.set_out_file(args.larcv_fout)
converter.initialize()
converter.convert(args.nevents, args.nskip)
if __name__ == "__main__":
main()
# # help(conversion_libs)
# # print(conversion_libs)
# # conversion_libs.add_in_file | DeepLearnPhysics/larcv2_to_larcv3 | larcv2_to_larcv3.py | larcv2_to_larcv3.py | py | 3,015 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "ctypes.cdll.LoadLibrary",
... |
6033788614 | import typing as t
"""
When inserting a string into a trie we first check if the root node has the
first letter of the string we want to insert.
--> When inserting a new word into the Trie:
We start from the root of the tree. Then we iterate over all chars in the str
to insert. For each char (iteration), I check the current's children dict to see
if it stores the current char.
If it does - all g, move the current there and continue char iteration
If it doesn't - create a new node for the current char, and add to the
current's children
Once we're done inserting the string, mark current as end, so we know there is
a str in the trie spanning from the root down to the current node.
--> When searching for a string in the Trie:
Start from the root. Iterate through the chars in the string comparing curr
char to curr node you're standing on. If at some point the char doesnt exist
in the curr nodes children --> return False
--> When deleting a string from the Trie:
"""
class TrieNode:
def __init__(self):
self.children: t.MutableMapping[str, TrieNode] = {}
self._end_of_string = False
@property
def is_end_of_string(self) -> bool:
return self._end_of_string
def mark_as_end(self) -> None:
self._end_of_string = True
def unmark_as_end(self) -> None:
self._end_of_string = False
class Trie:
def __init__(self):
self.root = TrieNode()
def insert_string(self, string: str) -> bool:
if not string:
return False
current = self.root
for char in string:
node = current.children.get(char)
if not node:
node = TrieNode()
current.children.update({char: node})
current = node
current.mark_as_end()
return True
def search_string(self, string: str) -> bool:
if not string:
return True
current = self.root
for char in string:
node = current.children.get(char)
if not node:
return False
current = node
return True if current.is_end_of_string else False
@staticmethod
def delete_string(root: TrieNode, string: str, index: int = 0) -> bool:
char = string[index]
current = root.children.get(char)
if len(current.children) > 1:
Trie.delete_string(current, string, index + 1)
return False
# At the last node of the string we want to delete while this string
# is a prefix of another string. If the last node has references to
# other characters, unmark it as end; Else delete the node entirely
if index == len(string) - 1:
if len(current.children) >= 1:
current.unmark_as_end()
return False
else:
root.children.pop(char)
return True
# Some other string is a prefix of the current string we're deleting
if current.is_end_of_string:
Trie.delete_string(current, string, index + 1)
return False
# Check if someone is dependent on the char we want to delete
is_safe_to_delete = Trie.delete_string(current, string, index + 1)
if is_safe_to_delete:
root.children.pop(char)
return True
else:
return False
def main():
trie = Trie()
print("Inserted apple:", trie.insert_string("apple"))
print("Inserted applause:", trie.insert_string("applause"))
print("\nChecking if apply exists:", trie.search_string("apple"))
print("Checking if app exists:", trie.search_string("app"))
print("\nDeleting apple")
Trie.delete_string(trie.root, "apple")
print("Checking if apple exists:", trie.search_string("apple"))
if __name__ == "__main__":
main()
| EvgeniiTitov/coding-practice | coding_practice/data_structures/tries/trie_implementation_1.py | trie_implementation_1.py | py | 3,871 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "typing.MutableMapping",
"line_number": 30,
"usage_type": "attribute"
}
] |
41976957988 | import argparse
import logging
import os
import shutil
import subprocess
import time
logging.basicConfig(level=logging.DEBUG)
parser = argparse.ArgumentParser(description="Famous Submitter")
parser.add_argument("-t", "--tag", type=str, help="Dataset tag.")
parser.add_argument("-i", "--input", type=str, help="Input filelist")
parser.add_argument(
"-r",
"--resubmits",
type=int,
default=10,
help="Number of resubmissions.",
required=False,
)
parser.add_argument(
"-hours",
"--hours",
type=float,
default=1.0,
help="Number of hours per resubmission, in addition to the time between sample submissions.",
required=False,
)
parser.add_argument(
"-ms",
"--movesample",
type=int,
default=0,
help="Move each sample after submitting it (accomplishes it during the buffer time between samples set by default in monitor.py).",
)
parser.add_argument(
"-m",
"--move",
type=int,
default=0,
help="Move all samples after all submissions (during the buffer specified by -hours).",
)
parser.add_argument(
"-dry", "--dryrun", type=int, default=0, help="running without submission"
)
options = parser.parse_args()
nResubmits = options.resubmits
nHours = options.hours
tag = options.tag
username = os.environ["USER"]
dataDir = f"/data/submit/{username}/SUEP/{tag}/"
moveDir = f"/work/submit/{username}/SUEP/{tag}/"
# Making sure that the proxy is good
proxy_base = f"x509up_u{os.getuid()}"
home_base = os.environ["HOME"]
proxy_copy = os.path.join(home_base, proxy_base)
regenerate_proxy = False
if not os.path.isfile(proxy_copy):
logging.warning("--- proxy file does not exist")
regenerate_proxy = True
else:
lifetime = subprocess.check_output(
["voms-proxy-info", "--file", proxy_copy, "--timeleft"]
)
lifetime = float(lifetime)
lifetime = lifetime / (60 * 60)
logging.info(f"--- proxy lifetime is {round(lifetime, 1)} hours")
if lifetime < nHours * nResubmits * 1.5:
logging.warning("--- proxy has expired !")
regenerate_proxy = True
if regenerate_proxy:
redone_proxy = False
while not redone_proxy:
status = os.system(
"voms-proxy-init -voms cms --hours=" + str(nHours * nResubmits * 1.5)
)
if os.WEXITSTATUS(status) == 0:
redone_proxy = True
shutil.copyfile("/tmp/" + proxy_base, proxy_copy)
logging.info("Running resubmission script from " + str(os.environ["HOSTNAME"]))
for i in range(nResubmits):
logging.info("Resubmission " + str(i))
logging.info("Removing all jobs...")
os.system("condor_rm {}".format(os.environ["USER"]))
logging.info("Checking for corrupted files and removing them...")
t_start = time.time()
# delete files that are corrupted
subDirs = os.listdir(dataDir)
for subDir in subDirs:
for file in os.listdir(dataDir + subDir):
size = os.path.getsize(dataDir + subDir + "/" + file)
if size == 0:
subprocess.run(["rm", dataDir + subDir + "/" + file])
elif size < 5000:
subprocess.run(["rm", dataDir + subDir + "/" + file])
if not options.dryrun:
logging.info("Executing monitor.py ...")
os.system(
"python3 monitor.py --tag={} --input={} -r=1 -m={}".format(
tag, options.input, options.movesample
)
)
if options.move:
if not os.path.isdir(moveDir):
os.system("mkdir " + moveDir)
subDirs = os.listdir(dataDir)
for subDir in subDirs:
if not os.path.isdir(moveDir + subDir):
os.system("mkdir " + moveDir + subDir)
# get list of files already in /work
movedFiles = os.listdir(moveDir + subDir)
# get list of files in T3
allFiles = os.listdir(dataDir + subDir)
# get list of files missing from /work that are in T3
filesToMove = list(set(allFiles) - set(movedFiles))
# move those files
logging.info(
"Moving " + str(len(filesToMove)) + " files to " + moveDir + subDir
)
for file in filesToMove:
subprocess.run(
[
"xrdcp",
"root://submit50.mit.edu/" + dataDir + subDir + "/" + file,
moveDir + subDir + "/",
]
)
t_end = time.time()
# don't wait if it's the last submission
if i == nResubmits - 1:
logging.info("All done")
break
# wait to resubmit jobs using the parameter <hours>, accounts for time it took to submit them
sleepTime = 60 * 60 * nHours
mod = t_end - t_start
logging.info("Submitting and moving files took " + str(round(mod)) + " seconds")
if sleepTime - mod <= 0:
continue
if nHours > 0:
logging.info("Sleeping for " + str(round(sleepTime - mod)) + " seconds")
logging.info("(" + str(round(nHours - mod * 1.0 / 3600, 2)) + " hours)...")
time.sleep(sleepTime - mod)
| SUEPPhysics/SUEPCoffea_dask | resubmit.py | resubmit.py | py | 5,108 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.envir... |
35423265171 | # Author: Xinyi Wang
# Date: 2021/10/05
import scipy.io as io
import numpy as np
def getCsf(mat_file, video_num):
video_num = int(video_num)
X_mat = io.loadmat(mat_file)
X3 = np.asarray(X_mat['feats_mat'], dtype=np.float)
# print(X3)
# print(type(X3))
cnnsa_feats = X3[video_num]
io.savemat('../../tmp/tempmat_path/cnnsa_feats.mat', {'cnnsa_feats': cnnsa_feats})
return cnnsa_feats
if __name__ == "__main__":
mat_file = '/mnt/storage/home/um20242/scratch/RAPIQUE-VSFA-Saliency/feat_file/YOUTUBE_UGC_360P_RAPIQUE_VSFACNN_SALIENCY_feats.mat'
cnnsa_feats = getCsf(mat_file, 1)
print(cnnsa_feats)
| xinyiW915/RVS-resize | src/func/get_cnnsafeats.py | get_cnnsafeats.py | py | 642 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.io.loadmat",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "scipy.io",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "numpy.asarray",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_numbe... |
2408166089 | import autograd.numpy as np
import numpy.testing as np_testing
import pytest
import pymanopt
from pymanopt.manifolds import Euclidean, Grassmann, Product, Sphere
class TestProductManifold:
@pytest.fixture(autouse=True)
def setup(self):
self.m = m = 100
self.n = n = 50
self.euclidean = Euclidean(m, n)
self.sphere = Sphere(n)
self.manifold = Product([self.euclidean, self.sphere])
point = self.manifold.random_point()
@pymanopt.function.autograd(self.manifold)
def cost(*x):
return np.sum(
[np.linalg.norm(a - b) ** 2 for a, b in zip(x, point)]
)
self.cost = cost
def test_dim(self):
np_testing.assert_equal(
self.manifold.dim, self.m * self.n + self.n - 1
)
def test_typical_dist(self):
np_testing.assert_equal(
self.manifold.typical_dist, np.sqrt((self.m * self.n) + np.pi**2)
)
def test_dist(self):
X = self.manifold.random_point()
Y = self.manifold.random_point()
np_testing.assert_equal(
self.manifold.dist(X, Y),
np.sqrt(
self.euclidean.dist(X[0], Y[0]) ** 2
+ self.sphere.dist(X[1], Y[1]) ** 2
),
)
def test_tangent_vector_multiplication(self):
# Regression test for https://github.com/pymanopt/pymanopt/issues/49.
manifold = Product((Euclidean(12), Grassmann(12, 3)))
x = manifold.random_point()
eta = manifold.random_tangent_vector(x)
factor = 42
eta_scaled = eta * factor
eta_euclidean, eta_grassmann = eta
eta_euclidean_scaled, eta_grassmann_scaled = eta_scaled
assert np.allclose(eta_euclidean * factor, eta_euclidean_scaled)
assert np.allclose(eta_grassmann * factor, eta_grassmann_scaled)
# def test_inner_product(self):
# def test_projection(self):
# def test_euclidean_to_riemannian_hessian(self):
# def test_retraction(self):
# def test_norm(self):
# def test_random_point(self):
# def test_random_tangent_vector(self):
# def test_transport(self):
def test_exp_log_inverse(self):
s = self.manifold
X = s.random_point()
Y = s.random_point()
Yexplog = s.exp(X, tangent_vector=s.log(X, Y))
np_testing.assert_almost_equal(s.dist(point_a=Y, point_b=Yexplog), 0)
def test_log_exp_inverse(self):
s = self.manifold
X = s.random_point()
U = s.random_tangent_vector(X)
Ulogexp = s.log(X, s.exp(X, U))
np_testing.assert_array_almost_equal(U[0], Ulogexp[0])
np_testing.assert_array_almost_equal(U[1], Ulogexp[1])
def test_pair_mean(self):
s = self.manifold
X = s.random_point()
Y = s.random_point()
Z = s.pair_mean(X, Y)
np_testing.assert_array_almost_equal(s.dist(X, Z), s.dist(Y, Z))
| pymanopt/pymanopt | tests/manifolds/test_product.py | test_product.py | py | 2,956 | python | en | code | 651 | github-code | 1 | [
{
"api_name": "pymanopt.manifolds.Euclidean",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pymanopt.manifolds.Sphere",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "pymanopt.manifolds.Product",
"line_number": 16,
"usage_type": "call"
},
{
... |
74540295072 | import json
import os
from tornado import web
from .base import BaseApiHandler, check_xsrf, check_notebook_dir
from ...api import MissingEntry
class StatusHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
def get(self):
self.write({"status": True})
class GradeCollectionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self):
submission_id = self.get_argument("submission_id")
try:
notebook = self.gradebook.find_submission_notebook_by_id(submission_id)
except MissingEntry:
raise web.HTTPError(404)
self.write(json.dumps([g.to_dict() for g in notebook.grades]))
class CommentCollectionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self):
submission_id = self.get_argument("submission_id")
try:
notebook = self.gradebook.find_submission_notebook_by_id(submission_id)
except MissingEntry:
raise web.HTTPError(404)
self.write(json.dumps([c.to_dict() for c in notebook.comments]))
class GradeHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self, grade_id):
try:
grade = self.gradebook.find_grade_by_id(grade_id)
except MissingEntry:
raise web.HTTPError(404)
self.write(json.dumps(grade.to_dict()))
@web.authenticated
@check_xsrf
@check_notebook_dir
def put(self, grade_id):
try:
grade = self.gradebook.find_grade_by_id(grade_id)
except MissingEntry:
raise web.HTTPError(404)
data = self.get_json_body()
grade.manual_score = data.get("manual_score", None)
grade.extra_credit = data.get("extra_credit", None)
if grade.manual_score is None and grade.auto_score is None:
grade.needs_manual_grade = True
else:
grade.needs_manual_grade = False
self.gradebook.db.commit()
self.write(json.dumps(grade.to_dict()))
class CommentHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self, grade_id):
try:
comment = self.gradebook.find_comment_by_id(grade_id)
except MissingEntry:
raise web.HTTPError(404)
self.write(json.dumps(comment.to_dict()))
@web.authenticated
@check_xsrf
@check_notebook_dir
def put(self, grade_id):
try:
comment = self.gradebook.find_comment_by_id(grade_id)
except MissingEntry:
raise web.HTTPError(404)
data = self.get_json_body()
comment.manual_comment = data.get("manual_comment", None)
self.gradebook.db.commit()
self.write(json.dumps(comment.to_dict()))
class FlagSubmissionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def post(self, submission_id):
try:
submission = self.gradebook.find_submission_notebook_by_id(submission_id)
except MissingEntry:
raise web.HTTPError(404)
submission.flagged = not submission.flagged
self.gradebook.db.commit()
self.write(json.dumps(submission.to_dict()))
class AssignmentCollectionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self):
assignments = self.api.get_assignments()
self.write(json.dumps(assignments))
class AssignmentHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self, assignment_id):
assignment = self.api.get_assignment(assignment_id)
if assignment is None:
raise web.HTTPError(404)
self.write(json.dumps(assignment))
@web.authenticated
@check_xsrf
@check_notebook_dir
def put(self, assignment_id):
data = self.get_json_body()
duedate = data.get("duedate_notimezone", None)
timezone = data.get("duedate_timezone", None)
if duedate and timezone:
duedate = duedate + " " + timezone
assignment = {"duedate": duedate}
assignment_id = assignment_id.strip()
self.gradebook.update_or_create_assignment(assignment_id, **assignment)
sourcedir = os.path.abspath(self.coursedir.format_path(self.coursedir.source_directory, '.', assignment_id))
if not os.path.isdir(sourcedir):
os.makedirs(sourcedir)
self.write(json.dumps(self.api.get_assignment(assignment_id)))
class NotebookCollectionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self, assignment_id):
notebooks = self.api.get_notebooks(assignment_id)
self.write(json.dumps(notebooks))
class SubmissionCollectionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self, assignment_id):
submissions = self.api.get_submissions(assignment_id)
self.write(json.dumps(submissions))
class SubmissionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self, assignment_id, student_id):
submission = self.api.get_submission(assignment_id, student_id)
if submission is None:
raise web.HTTPError(404)
self.write(json.dumps(submission))
class SubmittedNotebookCollectionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self, assignment_id, notebook_id):
submissions = self.api.get_notebook_submissions(assignment_id, notebook_id)
self.write(json.dumps(submissions))
class StudentCollectionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self):
students = self.api.get_students()
self.write(json.dumps(students))
class StudentHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self, student_id):
student = self.api.get_student(student_id)
if student is None:
raise web.HTTPError(404)
self.write(json.dumps(student))
@web.authenticated
@check_xsrf
@check_notebook_dir
def put(self, student_id):
data = self.get_json_body()
student = {
"last_name": data.get("last_name", None),
"first_name": data.get("first_name", None),
"email": data.get("email", None),
}
student_id = student_id.strip()
self.gradebook.update_or_create_student(student_id, **student)
self.write(json.dumps(self.api.get_student(student_id)))
class StudentSubmissionCollectionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self, student_id):
submissions = self.api.get_student_submissions(student_id)
self.write(json.dumps(submissions))
class StudentNotebookSubmissionCollectionHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def get(self, student_id, assignment_id):
submissions = self.api.get_student_notebook_submissions(student_id, assignment_id)
self.write(json.dumps(submissions))
class AssignHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def post(self, assignment_id):
self.write(json.dumps(self.api.generate_assignment(assignment_id)))
class UnReleaseHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def post(self, assignment_id):
self.write(json.dumps(self.api.unrelease(assignment_id)))
class ReleaseHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def post(self, assignment_id):
self.write(json.dumps(self.api.release_assignment(assignment_id)))
class CollectHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def post(self, assignment_id):
self.write(json.dumps(self.api.collect(assignment_id)))
class AutogradeHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def post(self, assignment_id, student_id):
self.write(json.dumps(self.api.autograde(assignment_id, student_id)))
class GenerateAllFeedbackHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def post(self, assignment_id):
self.write(json.dumps(self.api.generate_feedback(assignment_id)))
class ReleaseAllFeedbackHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def post(self, assignment_id):
self.write(json.dumps(self.api.release_feedback(assignment_id)))
class GenerateFeedbackHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def post(self, assignment_id, student_id):
self.write(json.dumps(self.api.generate_feedback(assignment_id, student_id)))
class ReleaseFeedbackHandler(BaseApiHandler):
@web.authenticated
@check_xsrf
@check_notebook_dir
def post(self, assignment_id, student_id):
self.write(json.dumps(self.api.release_feedback(assignment_id, student_id)))
default_handlers = [
(r"/formgrader/api/status", StatusHandler),
(r"/formgrader/api/assignments", AssignmentCollectionHandler),
(r"/formgrader/api/assignment/([^/]+)", AssignmentHandler),
(r"/formgrader/api/assignment/([^/]+)/assign", AssignHandler),
(r"/formgrader/api/assignment/([^/]+)/unrelease", UnReleaseHandler),
(r"/formgrader/api/assignment/([^/]+)/release", ReleaseHandler),
(r"/formgrader/api/assignment/([^/]+)/collect", CollectHandler),
(r"/formgrader/api/assignment/([^/]+)/generate_feedback", GenerateAllFeedbackHandler),
(r"/formgrader/api/assignment/([^/]+)/release_feedback", ReleaseAllFeedbackHandler),
(r"/formgrader/api/assignment/([^/]+)/([^/]+)/generate_feedback", GenerateFeedbackHandler),
(r"/formgrader/api/assignment/([^/]+)/([^/]+)/release_feedback", ReleaseFeedbackHandler),
(r"/formgrader/api/notebooks/([^/]+)", NotebookCollectionHandler),
(r"/formgrader/api/submissions/([^/]+)", SubmissionCollectionHandler),
(r"/formgrader/api/submission/([^/]+)/([^/]+)", SubmissionHandler),
(r"/formgrader/api/submission/([^/]+)/([^/]+)/autograde", AutogradeHandler),
(r"/formgrader/api/submitted_notebooks/([^/]+)/([^/]+)", SubmittedNotebookCollectionHandler),
(r"/formgrader/api/submitted_notebook/([^/]+)/flag", FlagSubmissionHandler),
(r"/formgrader/api/grades", GradeCollectionHandler),
(r"/formgrader/api/grade/([^/]+)", GradeHandler),
(r"/formgrader/api/comments", CommentCollectionHandler),
(r"/formgrader/api/comment/([^/]+)", CommentHandler),
(r"/formgrader/api/students", StudentCollectionHandler),
(r"/formgrader/api/student/([^/]+)", StudentHandler),
(r"/formgrader/api/student_submissions/([^/]+)", StudentSubmissionCollectionHandler),
(r"/formgrader/api/student_notebook_submissions/([^/]+)/([^/]+)", StudentNotebookSubmissionCollectionHandler),
]
| jupyter/nbgrader | nbgrader/server_extensions/formgrader/apihandlers.py | apihandlers.py | py | 11,194 | python | en | code | 1,232 | github-code | 1 | [
{
"api_name": "base.BaseApiHandler",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "tornado.web.authenticated",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "tornado.web",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "base.c... |
39151193505 | # Driving linear regression algo
# y = mx+b
# m = mean(x)mean(y)-mean(xy)/mean(x)square-((x)square)mean
# b = mean(y) - m(mean(x))
# ^ _
# y = 1-SE(y)/SE(y)
from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import random
style.use('fivethirtyeight')
# xs = np.array([1, 2, 3, 4, 5, 6], dtype=np.float)
# ys = np.array([5, 4, 6, 5, 6, 7], dtype=np.float)
# create a random data points
# varience : how much each point can vary from the previous point
# step : how far to step on average per point
def create_dataset(howmany, varience, step=2, correlation=False):
val = 1
ys = []
for i in range(howmany):
y = val+random.randrange(-varience, varience)
ys.append(y)
if correlation and correlation == 'pos':
val += step
elif correlation and correlation == 'neg':
val -= step
xs = [i for i in range(len(ys))]
return np.array(xs, dtype=np.float64), np.array(ys, dtype=np.float64)
xs, ys = create_dataset(40, 10, correlation='pos')
def best_fit(xs, ys):
# _ _ __ _ ___
# formula :-> m=(x.y - xy) / (x)^2 - (x^2)
# _ _
# b=y - mx
m = (((mean(xs)*mean(ys)) - mean(xs*ys)) /
((mean(xs))**2 - mean(xs**2)))
b = mean(ys) - m*mean(xs)
return m, b
m, b = best_fit(xs, ys)
# y = mx+b
regression_line = [(m*x)+b for x in xs]
# predictions
given_x = []
for i in range(41, 50, 1):
given_x.append(i)
predict_y = [(m*x)+b for x in given_x]
# ^ _
# Formula r^2 = 1-SE(y)/SE(y)
def square_error(ys_orig, ys_line):
return sum((ys_line - ys_orig) ** 2)
def coefficient_of_determination(ys_orig, ys_line):
y_mean_line = [mean(ys_orig) for ys in ys_orig]
print(y_mean_line)
square_error_regr = square_error(ys_orig, ys_line)
square_error_y_mean = square_error(ys_orig, y_mean_line)
return 1 - (square_error_regr / square_error_y_mean)
r_squared = coefficient_of_determination(ys, regression_line)
print(r_squared)
# plot
plt.scatter(xs, ys, label='data')
plt.legend(loc=4)
plt.plot(given_x, predict_y, color='red')
plt.plot(xs, regression_line)
plt.show()
| Manukhurana97/Machine-Learning1 | 2 Linear Regression :drive(Best fit).py | 2 Linear Regression :drive(Best fit).py | py | 2,265 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.style.use",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.style",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "random.randrange",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.array",
... |
39695926581 | import datetime as dt
import pandas as pd
import os
import smtplib
import json
from dotenv import load_dotenv
def fetch_planner():
TIME_PERIOD = int(os.environ["TIME_PERIOD"])
ORG_PLANNER_PATH = os.environ["ORG_PLANNER_PATH"]
ORG_PLANNER_SHEET_NAME = os.environ["ORG_PLANNER_SHEET_NAME"]
ORG_PLANNER_HEADER_ROW = int(os.environ["ORG_PLANNER_HEADER_ROW"])
ORG_PLANNER_COLUMNS = os.environ["ORG_PLANNER_COLUMNS"]
planner = pd.read_excel(io=ORG_PLANNER_PATH, sheet_name=ORG_PLANNER_SHEET_NAME, header=ORG_PLANNER_HEADER_ROW, usecols=ORG_PLANNER_COLUMNS)
planner = planner.loc[0:43]
planner = planner.drop([0,1])
planner.columns.values[0] = "Namn"
planner.dropna(subset="Namn", inplace=True)
planner.set_index("Namn", inplace=True)
today = dt.datetime.today().replace(hour=0, minute=0, second=0, microsecond=0)
period_dates = []
for i in range(TIME_PERIOD):
date = today + dt.timedelta(days=i)
period_dates.append(date)
planner = planner.loc[:,period_dates]
return planner
def construct_message(planner, name):
ME = name
my_planner = planner.loc[ME]
msg = []
jobs = {}
for index, value in my_planner.items():
if index.weekday() > 4:
date = str(index).split()[0] + "(Helg)"
else:
date = str(index).split()[0]
jobs[date] = {}
if pd.isna(value):
jobs[date] = {"job" : "Oplanerad", "team" : []}
else:
team = []
for name, job in planner[index].items():
if job == value and name != ME:
team.append(name)
jobs[date] = {"job" : value, "team" : team}
for date in jobs.items():
local_msg = f"{date[0]}: {date[1]['job']}\n"
if date[1]["job"] != "Oplanerad" and date[1]["team"] != []:
for name in date[1]["team"]:
local_msg = local_msg + f"->{name}\n"
msg.append(local_msg)
return msg
def write_msg_to_file(msg):
with open("message.txt", "w") as doc:
for index in msg:
doc.write(index)
return
def mail_daily_digest(message, email_to):
EMAIL_FROM = os.environ["EMAIL_FROM"]
EMAIL_TO = email_to
EMAIL_PASSWORD = os.environ["EMAIL_PASSWORD"]
SUBJECT = "Subject:Daily Digest\n\n"
SIG = "\nHa en bra dag!"
HOST = "smtp.gmail.com"
PORT= 587
msg = ""
for index in message:
msg += index
msg = SUBJECT + msg + SIG
msg = msg.encode("utf-8")
server = smtplib.SMTP(HOST, PORT)
server.starttls()
server.login(EMAIL_FROM, EMAIL_PASSWORD)
server.sendmail(EMAIL_FROM, EMAIL_TO, msg)
server.quit()
return
def main():
load_dotenv()
with open(".maillist.json", "r") as data:
maillist = json.load(data)
planner = fetch_planner()
for name, mail in maillist.items():
message = construct_message(planner, name)
mail_daily_digest(message, mail)
if __name__ == "__main__":
main()
| adrian-bjorling/daily-digest | app.py | app.py | py | 3,019 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"lin... |
24181921860 | import nltk
from nltk.tokenize import word_tokenize, sent_tokenize
import heapq
nltk.download('stopwords')
from nltk.corpus import stopwords
def nltk_summarizer(raw_text):
stopWords = set(stopwords.words("english"))
word_frequency={}
for word in nltk.word_tokenize(raw_text):
if word not in stopWords:
if word not in word_frequency.keys():
word_frequency[word]=1
else:
word_frequency[word]+=1
maximum_frequency=max(word_frequency.values())
for word in word_frequency.keys():
word_frequency[word]=(word_frequency[word]/maximum_frequency)
sentence_list=nltk.sent_tokenize(raw_text)
sentence_score={}
for sent in sentence_list:
for word in nltk.word_tokenize(sent.lower()):
if word in word_frequency.keys():
if len(sent.split(' '))<30:
sentence_score[sent]=word_frequency[word]
else:
sentence_score[sent]=word_frequency[word]
summary_sentence=heapq.nlargest(7,sentence_score,key=sentence_score.get)
summary=' '.join(summary_sentence)
return summary | ShrutiNathavani/Text_Summarization | nltk_1.py | nltk_1.py | py | 1,189 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "nltk.download",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords.words",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "nltk.corpus.stopwords",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "nltk.wor... |
8651064290 | from django.shortcuts import render, redirect
from .models import Csvdata
import csv, io, os
from django.http import HttpResponse, Http404
from myproject.settings import STATIC_ROOT
# Home page
def home(request):
return render(request, 'myapp/home.html')
# Read sqlite3.db, load datas
def showdata(request):
csv = Csvdata.objects.all()
realtime_timestamp = []
utc = []
master_offset = []
frequency = []
path_delay = []
count = 0
for row in csv.values_list():
if(count>10):
break
else:
realtime_timestamp.append(row[1])
utc.append(row[2])
master_offset.append(row[3])
frequency.append(row[4])
path_delay.append(row[5])
count+=1
return render(request, 'myapp/showdata.html',
{'realtime_timestamp': realtime_timestamp,
'utc': utc,
'master_offset': master_offset,
'frequency': frequency,
'path_delay': path_delay,
})
# Empty the contents of the database
def deletedata(request):
if Csvdata.objects.count() != 0:
Csvdata.objects.all().delete()
return render(request, 'myapp/deletedata.html')
else:
text = "DB is empty"
return render(request, 'myapp/error.html', {'text': text})
# Print the graph on the web
def showgraph(request):
csv = Csvdata.objects.all()
offset = []
timestamp = []
for row in csv.values_list():
timestamp.append(row[1])
offset.append(row[3])
return render(request, 'myapp/showgraph.html', {
'offset': offset,
'timestamp': timestamp
})
# Print the zoom in/out on the web
def showstock(request):
csv = Csvdata.objects.all()
offset = []
timestamp = []
for row in csv.values_list():
timestamp.append(row[1])
offset.append(row[3])
return render(request, 'myapp/showstock.html', {
'offset': offset,
'timestamp': timestamp
})
# Rendering userform page
def userform(request):
return render(request, 'myapp/userform.html')
# Check if the data exits in database, check if it is a csv file
def filecheck(request):
if Csvdata.objects.count() != 0:
text = "The Data already exits!"
return render(request, 'myapp/error.html', {'text': text})
else:
if request.FILES.__len__()==0:
text = "No uplaod file"
return render(request, 'myapp/error.html', {'text': text})
else:
uploadFile = request.FILES['file']
if uploadFile.name.find('csv')<0:
text = "This file is not csv file"
return render(request, 'myapp/error.html', {'text': text})
else:
read = uploadFile.read().decode('utf8')
readLine = read.split('\n')
tmp_str = []
for line in readLine:
tmp_str.append(line.split(','))
for i in range(1,len(tmp_str)-1):
Csvdata.objects.create(realtime_timestamp=tmp_str[i][1], utc=tmp_str[i][2],master_offset=tmp_str[i][3],frequency=tmp_str[i][4],path_delay=tmp_str[i][5])
return render(request, 'myapp/filecheck.html')
# Download the csv file already uploaded to the server 'static' folder
def downloadcsv(request):
file_path = os.path.join(STATIC_ROOT, 'testfile.csv')
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/vnd.ms-Excel")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
return response
raise Http404
# Download the rms csv file already uploaded to the server 'static' folder
def downloadrmscsv(request):
file_path = os.path.join(STATIC_ROOT, 'rms_testfile.csv')
if os.path.exists(file_path):
with open(file_path, 'rb') as fh:
response = HttpResponse(fh.read(), content_type="application/vnd.ms-Excel")
response['Content-Disposition'] = 'inline; filename=' + os.path.basename(file_path)
return response
raise Http404 | milescm/VisualPTP | myapp/views.py | views.py | py | 4,249 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.Csvdata.objects.all",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "models.Csvdata.objects",
"line_number": 13,
"usage_type": "attribute"
},
{
"api... |
2067953896 | import cv2
from datetime import datetime
from app.ml.yolo_v3 import YoloV3
import kivy
kivy.require('1.0.6')
from kivy.core.window import Window
from kivymd.app import MDApp
from kivy.clock import Clock
from kivymd.uix.screen import MDScreen
from kivymd.uix.button.button import MDIconButton
from kivymd.uix.label.label import MDLabel
from kivy.uix.image import Image
from kivy.uix.anchorlayout import AnchorLayout
from kivy.graphics.texture import Texture
from constants import Constants
class WildlifeCameraTrapApp(MDApp):
boxes = []
class_names = []
confidences = []
should_detect = False
recording = False
original_recorder = None
marked_recorder = None
last_object_detected_time = None
fourcc = cv2.VideoWriter_fourcc('m', 'p', '4', 'v')
def build(self):
self._load_model()
return self._create_layout()
# Run continuously to get webcam feed
def update(self, *args):
_, frame = self.capture.read()
elapsed_time = (datetime.now() - self.starting_time).total_seconds()
current_fps = self.frame_id / elapsed_time
original_frame = frame.copy()
self.frame_id += 1
if self.should_detect:
boxes, confidences, class_names = self.yolo_v3.detect_objects(frame)
# when objects are detected
if len(boxes):
if not self.recording:
self.recording = True
self.original_recorder = cv2.VideoWriter(
f'./app/data/{self.last_object_detected_time.strftime(Constants.FILENAME_DATEFORMAT)}_original.mp4',
self.fourcc,
current_fps,
(original_frame.shape[1],
original_frame.shape[0]),
1)
self.marked_recorder = cv2.VideoWriter(
f'./app/data/{self.last_object_detected_time.strftime(Constants.FILENAME_DATEFORMAT)}_marked.mp4',
self.fourcc,
current_fps,
(frame.shape[1],
frame.shape[0]),
1)
self.boxes = boxes
self.confidences = confidences
self.class_names = class_names
self.last_object_detected_time = datetime.now()
self.recording_status_label.text = Constants.OBJECT_RECORDING
print('detecting objects')
# Only check if still recording
elif self.recording:
if self._seconds_since_last_detection() > Constants.IDLE_SECONDS:
self._stop_recording()
self.recording_status_label.text = Constants.NO_OBJECT_DETECTED
print('not detecting objects, going idle')
# Clean up old detections
self.boxes = []
self.class_names = []
self.confidences = []
else:
print('not detecting objects')
for box, confidence, class_name in zip(self.boxes, self.confidences, self.class_names):
x, y, w, h = box
color = Constants.CLASS_COLORS_TO_USE[class_name]
cv2.rectangle(frame, (x, y), (x + w, y + h), color, 2)
cv2.rectangle(frame, (x, y), (x + w, y + 30), color, -1)
cv2.putText(frame,
f'{class_name} ({confidence:.1%})',
(x, y + 30),
self.font,
2,
(255, 255, 255, 0),
2,
lineType=cv2.LINE_AA)
opacity = max(1 - (self._seconds_since_last_detection() / Constants.IDLE_SECONDS), 0)
# We show the bounding boxes with different opacity to indicate the time since last detection
# Most opaque => most recent detection
frame = cv2.addWeighted(frame, opacity, original_frame, 1 - opacity, gamma=0)
if self.recording:
self.marked_recorder.write(frame)
print('add frame')
self.original_recorder.write(original_frame)
# Flip horizontal and convert image to texture
buf = cv2.flip(frame, 0).tobytes()
img_texture = Texture.create(size=(frame.shape[1], frame.shape[0]), colorfmt='bgr')
img_texture.blit_buffer(buf, colorfmt='bgr', bufferfmt='ubyte')
self.web_cam.texture = img_texture
# Recording function to capture objects if present
def start_stop_recording(self, *args):
if self.start_stop_recording_button.icon == Constants.START_RECORDING:
self.should_detect = True
self.start_stop_recording_button.icon = Constants.STOP_RECORDING
self.recording_status_label.text = Constants.NO_OBJECT_DETECTED
elif self.start_stop_recording_button.icon == Constants.STOP_RECORDING:
self.should_detect = False
if self.recording:
self._stop_recording()
self.start_stop_recording_button.icon = Constants.START_RECORDING
self.recording_status_label.text = Constants.NOT_RECORDING
def _create_layout(self):
# Loading camera
self.capture = cv2.VideoCapture(0)
self.font = cv2.FONT_HERSHEY_COMPLEX_SMALL
self.starting_time = datetime.now()
self.frame_id = 0
self.last_object_detected_time = datetime.now()
Clock.schedule_interval(self.update, timeout=(1.0/Constants.MIN_FPS))
# Main layout components
self.should_detect = False
self.recording = False
self.web_cam = Image(
size=Window.size,
size_hint=(1, 1)
)
self.start_stop_recording_button = MDIconButton(
icon=Constants.START_RECORDING,
icon_color=(1, 0, 0, 1),
md_bg_color=(1, 1, 1, 1),
icon_size='40sp',
on_press=self.start_stop_recording
)
self.recording_status_label = MDLabel(
text=Constants.NOT_RECORDING,
size_hint=(1, .1),
halign="center",
font_style="H5"
)
base_screen = MDScreen()
base_screen.add_widget(self.web_cam)
top_center = AnchorLayout(anchor_x='center', anchor_y='top', padding=[10, 10, 10, 10])
top_center.add_widget(self.recording_status_label)
base_screen.add_widget(top_center)
side_layout = AnchorLayout(anchor_x='right', anchor_y='center', padding=[10, 10, 10, 10])
side_layout.add_widget(self.start_stop_recording_button)
base_screen.add_widget(side_layout)
return base_screen
def _load_model(self):
self.yolo_v3 = YoloV3()
def _seconds_since_last_detection(self):
return (datetime.now() - self.last_object_detected_time).total_seconds()
def _stop_recording(self):
self.recording = False
self.original_recorder.release()
self.original_recorder = None
self.marked_recorder.release()
self.marked_recorder = None
if __name__ == '__main__':
WildlifeCameraTrapApp().run()
| nellymin/WildlifeCameraTrap | app/main.py | main.py | py | 7,267 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "kivy.require",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "kivymd.app.MDApp",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "cv2.VideoWriter_fourcc",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "datetime.datetime... |
191886024 | #!/usr/bin/env python
"""Sample script of word embedding model.
This code implements skip-gram model and continuous-bow model.
Use ../ptb/download.py to download 'ptb.train.txt'.
"""
import argparse
import collections
import time
import numpy as np
import six.moves.cPickle as pickle
import chainer
from chainer import cuda
import chainer.functions as F
import chainer.optimizers as O
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', '-g', default=-1, type=int,
help='GPU ID (negative value indicates CPU)')
parser.add_argument('--unit', '-u', default=100, type=int,
help='number of units')
parser.add_argument('--window', '-w', default=5, type=int,
help='window size')
parser.add_argument('--batchsize', '-b', type=int, default=100,
help='learning minibatch size')
parser.add_argument('--epoch', '-e', default=10, type=int,
help='number of epochs to learn')
parser.add_argument('--model', '-m', choices=['skipgram', 'cbow'],
default='skipgram',
help='model type ("skipgram", "cbow")')
parser.add_argument('--out-type', '-o', choices=['hsm', 'ns', 'original'],
default='hsm',
help='output model type ("hsm": hierarchical softmax, '
'"ns": negative sampling, "original": no approximation)')
args = parser.parse_args()
xp = cuda.cupy if args.gpu >= 0 else np
print('GPU: {}'.format(args.gpu))
print('# unit: {}'.format(args.unit))
print('Window: {}'.format(args.window))
print('Minibatch-size: {}'.format(args.batchsize))
print('# epoch: {}'.format(args.epoch))
print('Training model: {}'.format(args.model))
print('Output type: {}'.format(args.out_type))
print('')
def continuous_bow(dataset, position):
h = None
# use random window size in the same way as the original word2vec
# implementation.
w = np.random.randint(args.window - 1) + 1
for offset in range(-w, w + 1):
if offset == 0:
continue
d = xp.asarray(dataset[position + offset])
x = chainer.Variable(d)
e = model.embed(x)
h = h + e if h is not None else e
d = xp.asarray(dataset[position])
t = chainer.Variable(d)
return loss_func(h, t)
def skip_gram(dataset, position):
d = xp.asarray(dataset[position])
t = chainer.Variable(d)
# use random window size in the same way as the original word2vec
# implementation.
w = np.random.randint(args.window - 1) + 1
loss = None
for offset in range(-w, w + 1):
if offset == 0:
continue
d = xp.asarray(dataset[position + offset])
x = chainer.Variable(d)
e = model.embed(x)
loss_i = loss_func(e, t)
loss = loss_i if loss is None else loss + loss_i
return loss
if args.gpu >= 0:
cuda.get_device(args.gpu).use()
index2word = {}
word2index = {}
counts = collections.Counter()
dataset = []
with open('ptb.train.txt') as f:
for line in f:
for word in line.split():
if word not in word2index:
ind = len(word2index)
word2index[word] = ind
index2word[ind] = word
counts[word2index[word]] += 1
dataset.append(word2index[word])
n_vocab = len(word2index)
print('n_vocab: %d' % n_vocab)
print('data length: %d' % len(dataset))
if args.model == 'skipgram':
train_model = skip_gram
elif args.model == 'cbow':
train_model = continuous_bow
else:
raise Exception('Unknown model type: {}'.format(args.model))
model = chainer.FunctionSet(
embed=F.EmbedID(n_vocab, args.unit),
)
if args.out_type == 'hsm':
tree = F.create_huffman_tree(counts)
model.l = F.BinaryHierarchicalSoftmax(args.unit, tree)
loss_func = model.l
elif args.out_type == 'ns':
cs = [counts[w] for w in range(len(counts))]
model.l = F.NegativeSampling(args.unit, cs, 20)
loss_func = model.l
elif args.out_type == 'original':
model.l = F.Linear(args.unit, n_vocab)
loss_func = lambda h, t: F.softmax_cross_entropy(model.l(h), t)
else:
raise Exception('Unknown output type: {}'.format(args.out_type))
if args.gpu >= 0:
model.to_gpu()
dataset = np.array(dataset, dtype=np.int32)
optimizer = O.Adam()
optimizer.setup(model)
begin_time = time.time()
cur_at = begin_time
word_count = 0
skip = (len(dataset) - args.window * 2) // args.batchsize
next_count = 100000
for epoch in range(args.epoch):
accum_loss = 0
print('epoch: {0}'.format(epoch))
indexes = np.random.permutation(skip)
for i in indexes:
if word_count >= next_count:
now = time.time()
duration = now - cur_at
throuput = 100000. / (now - cur_at)
print('{} words, {:.2f} sec, {:.2f} words/sec'.format(
word_count, duration, throuput))
next_count += 100000
cur_at = now
position = np.array(
range(0, args.batchsize)) * skip + (args.window + i)
loss = train_model(dataset, position)
accum_loss += loss.data
word_count += args.batchsize
optimizer.zero_grads()
loss.backward()
optimizer.update()
print(accum_loss)
model.to_cpu()
with open('model.pickle', 'wb') as f:
obj = (model, index2word, word2index)
pickle.dump(obj, f)
| jem0101/BigSwag-SQA2022-AUBURN | TestOrchestrator4ML-main/resources/Data/supervised/GITLAB_REPOS/jamieoglindsey0@chainer/examples/word2vec/train_word2vec.py | train_word2vec.py | py | 5,390 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "chainer.cuda.cupy",
"line_number": 38,
"usage_type": "attribute"
},
{
"api_name": "chainer.cuda",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "numpy.ran... |
33256158133 | from transformers import BertTokenizer,GPT2LMHeadModel, AutoModelForCausalLM
hf_model_path = 'IDEA-CCNL/Wenzhong2.0-GPT2-110M-BertTokenizer-chinese'
tokenizer = BertTokenizer.from_pretrained(hf_model_path)
# model = GPT2LMHeadModel.from_pretrained(hf_model_path)
model = AutoModelForCausalLM.from_pretrained(hf_model_path)
def generate_word_level(input_text,n_return=5,max_length=128,top_p=0.9):
inputs = tokenizer(input_text,return_tensors='pt',add_special_tokens=False).to(model.device)
gen = model.generate(
inputs=inputs['input_ids'],
max_length=max_length,
do_sample=True,
top_p=top_p,
eos_token_id=21133,
pad_token_id=0,
num_return_sequences=n_return)
sentences = tokenizer.batch_decode(gen)
for idx,sentence in enumerate(sentences):
print(f'sentence {idx}: {sentence}')
print('*'*20)
return gen
# 西湖的景色
outputs = generate_word_level('眼角斜瞥着柳翎那略微有些阴沉的脸庞。萧炎',n_return=5,max_length=128)
print(outputs) | taishan1994/chinese_llm_pretrained | test_model.py | test_model.py | py | 1,212 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "transformers.BertTokenizer.from_pretrained",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "transformers.BertTokenizer",
"line_number": 3,
"usage_type": "name"
},
{
"api_name": "transformers.AutoModelForCausalLM.from_pretrained",
"line_number": 5,
... |
2396940716 | import os
from typing import Dict, List
from nltk.corpus import wordnet as wn
from tqdm import tqdm
import models
import parse
import preprocess
import utils
from sequence import TextSequence
def predict_babelnet(input_path: str, output_path: str, resources_path: str) -> None:
"""
DO NOT MODIFY THE SIGNATURE!
This is the skeleton of the prediction function.
The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)
with your predictions in the "<id> <BABELSynset>" format (e.g. "d000.s000.t000 bn:01234567n").
The resources folder should contain everything you need to make the predictions. It is the "resources" folder in your submission.
N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.
If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding
:param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).
:param output_path: the path of the output file (where you save your predictions)
:param resources_path: the path of the resources folder containing your model and stuff you might need.
:return: None
"""
_predict(input_path, output_path, resources_path, 0)
return
def predict_wordnet_domains(
input_path: str, output_path: str, resources_path: str
) -> None:
"""
DO NOT MODIFY THE SIGNATURE!
This is the skeleton of the prediction function.
The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)
with your predictions in the "<id> <wordnetDomain>" format (e.g. "d000.s000.t000 sport").
The resources folder should contain everything you need to make the predictions. It is the "resources" folder in your submission.
N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.
If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding
:param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).
:param output_path: the path of the output file (where you save your predictions)
:param resources_path: the path of the resources folder containing your model and stuff you might need.
:return: None
"""
_predict(input_path, output_path, resources_path, 1)
return
def predict_lexicographer(
input_path: str, output_path: str, resources_path: str
) -> None:
"""
DO NOT MODIFY THE SIGNATURE!
This is the skeleton of the prediction function.
The predict function will build your model, load the weights from the checkpoint and write a new file (output_path)
with your predictions in the "<id> <lexicographerId>" format (e.g. "d000.s000.t000 noun.animal").
The resources folder should contain everything you need to make the predictions. It is the "resources" folder in your submission.
N.B. DO NOT HARD CODE PATHS IN HERE. Use resource_path instead, otherwise we will not be able to run the code.
If you don't know what HARD CODING means see: https://en.wikipedia.org/wiki/Hard_coding
:param input_path: the path of the input file to predict in the same format as Raganato's framework (XML files you downloaded).
:param output_path: the path of the output file (where you save your predictions)
:param resources_path: the path of the resources folder containing your model and stuff you might need.
:return: None
"""
_predict(input_path, output_path, resources_path, 2)
return
def load_test(path_input: str) -> List:
"""
Read and clean the test set
:param path_input: input to test file
:return: cleaned test set
"""
sentences = parse.semcor_predict_map(path_input)
return preprocess.clean_predict(sentences)
def _predict(input_path: str, output_path: str, resources_path: str, task: int = None):
"""
Wrapper function for all the prediction functions.
:param input_path: the path of the input file to predict in the same format as Raganato's framework.
:param output_path: the path of the output file (where you save your predictions)
:param resources_path: the path of the resources folder containing your model and stuff you might need.
:param task: 0 for BN
1 for DOM
2 for LEX
:return:
"""
print("Loading", input_path.split("/")[-1])
sentences = load_test(input_path)
# Loads all the mapping files.
word_index = utils.read_dictionary(
os.path.join(resources_path, "vocabs/label_vocab_bn.txt")
)
word_index_dom = utils.read_dictionary(
os.path.join(resources_path, "vocabs/label_vocab_dom.txt")
)
word_index_lex = utils.read_dictionary(
os.path.join(resources_path, "vocabs/label_vocab_lex.txt")
)
outputs_size = [len(word_index), len(word_index_dom), len(word_index_lex)]
lemma2syn = utils.read_dictionary(
os.path.join(resources_path, "mapping/lemma2wordnet.txt")
)
wn2bn = utils.read_dictionary(
os.path.join(resources_path, "mapping/wordnet2babelnet.txt")
)
bn2coarse = None
coarse_index = None
if task != 0:
# if task != 0, DOM or LEX prediction.
coarse_index = word_index_dom if task == 1 else word_index_lex
coarse_path = (
os.path.join(resources_path, "mapping/babelnet2wndomains.tsv")
if task == 1
else os.path.join(resources_path, "mapping/babelnet2lexnames.tsv")
)
bn2coarse = utils.read_dictionary(coarse_path)
print("Loading weights...")
model = models.keras_model(
hidden_size=256,
dropout=0.6,
recurrent_dropout=0.5,
learning_rate=0.0003,
outputs_size=outputs_size,
elmo=True,
mtl=True,
)
model.load_weights(os.path.join(resources_path, "model.h5"))
with open(output_path, mode="w", encoding="utf8") as out_file:
for s in tqdm(sentences):
line = [list(l.keys())[0] for l in s]
ids = preprocess.get_ids(s)
pos = preprocess.get_pos(s)
lemmas = preprocess.get_lemmas(s)
line_input = TextSequence.compute_x_elmo([line], pad=False)
pred = model.predict(line_input)[task]
lables = _get_labels(
pred,
lemmas,
ids,
pos,
lemma2syn,
wn2bn,
word_index,
coarse_index,
bn2coarse,
)
out_file.writelines(
k + " " + v.rsplit("_")[-1] + "\n" for k, v in lables.items()
)
return
def _get_labels(
prediction: List,
lemmas_map: Dict,
ids_map: Dict,
pos_map: Dict,
lemma2synset_map: Dict,
wn2bn_map: Dict,
word_index: Dict,
coarse_index: Dict = None,
bn2coarse: Dict = None,
):
"""
Predict the sense for the given ids.
:param prediction: line prediction.
:param ids_map: dictionary from id to position inside the line.
:param lemma2synset_map: dictionary from lemma to synsets.
:param wn2bn_map: dictionary from WordNet to BabelNet.
:param bn2coarse: dictionary from BabelNet to coarse.
:param word_index: dictionary from word to index.
:return:
"""
pred = {}
for sensekey, index in ids_map.items():
lemma = lemmas_map[index]
index_pred = prediction[0][index]
pred[sensekey] = _get_sense(
lemma,
pos_map.get(index),
lemma2synset_map,
wn2bn_map,
word_index,
index_pred,
coarse_index,
bn2coarse,
)
return pred
def _get_sense(
lemma: str,
pos: str,
lemma2synset_map: Dict,
wn2bn_map: Dict,
word_index: Dict,
index_pred,
coarse_index: Dict,
bn2coarse: Dict,
):
"""
Get the most probable sense for the given lemma id.
:param lemma: lemma.
:param pos:
:param lemma2synset_map: dictionary from lemma to synsets.
:param wn2bn_map: dictionary from WordNet to BabelNet.
:param word_index: dictionary from word to index.
:param index_pred: vector of proabilities for the given position.
:return: the most probable sense.
"""
synsets = lemma2synset_map.get(lemma)
if synsets:
synsets = [wn2bn_map.get(s)[0] for s in synsets if wn2bn_map.get(s)]
if bn2coarse:
probs = _get_probs_coarse(synsets, coarse_index, index_pred, bn2coarse)
else:
probs = _get_probs(synsets, lemma, word_index, index_pred)
if probs:
return max(probs, key=lambda p: p[1])[0]
return get_mfs(lemma, pos, wn2bn_map, bn2coarse)
def _get_probs(synsets: List, lemma: str, word_index: Dict, index_pred):
"""
Get the probabilites for a subset of classes
:param synsets: list candidates synsets.
:param lemma: lemma to predict.
:param word_index: Word -> Index dictionary.
:param index_pred: array of probabilites.
:return: probabilites of synsets.
"""
probs = []
for s in synsets:
sense = lemma + "_" + s
if word_index.get(sense):
probs.append((sense, index_pred[int(word_index.get(sense)[0])]))
return probs
def _get_probs_coarse(synsets: List, word_index: Dict, index_pred, bn2coarse: Dict):
"""
Get the probabilites for a subset of classes
:param synsets: list candidates synsets.
:param word_index: Word -> Index dictionary.
:param index_pred: array of probabilites.
:param bn2coarse: dictionary from BabelNet to coarse.
:return: probabilites of synsets.
"""
probs = []
for s in synsets:
if bn2coarse.get(s):
sense = bn2coarse.get(s)[0]
if word_index.get(sense):
probs.append((sense, index_pred[int(word_index.get(sense)[0])]))
return probs
def get_mfs(lemma: str, pos: str, wn2bn_map: Dict, coarse_dict=None) -> str:
"""
Get the most frequent sense for the given lemma.
:param lemma: lemma to get mfs.
:param pos:
:param wn2bn_map:
:param coarse_dict:
:return: the mfs.
"""
synset = wn.synsets(lemma, pos=pos)[0]
wn_synset = "wn:" + str(synset.offset()).zfill(8) + synset.pos()
pred_synset = wn2bn_map.get(wn_synset)[0]
if coarse_dict:
pred_synset = (
coarse_dict.get(pred_synset)[0]
if coarse_dict.get(pred_synset)
else "factotum"
)
return lemma + "_" + pred_synset
| Riccorl/elmo-wsd | elmo-wsd/predict.py | predict.py | py | 10,774 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "parse.semcor_predict_map",
"line_number": 88,
"usage_type": "call"
},
{
"api_name": "preprocess.clean_predict",
"line_number": 89,
"usage_type": "call"
},
{
"api_name": "typing.List",
"line_number": 82,
"usage_type": "name"
},
{
"api_name": "utils.r... |
15501808433 | import discord
import asyncio
from discord.ext.commands import Bot
from discord.ext import commands
import platform
import logging
import os
import pymongo
from urllib.parse import urlparse
from datetime import datetime
# constants
MESSAGE_START_CONFIRMED = 'Okay. Asking feedback from **{}** to **{}**.'
MESSAGE_WRONG_FORMAT = 'Wrong usage of command.'
MESSAGE_NOT_A_COMMAND_ADMIN = 'Sorry, I can\'t recognize that command.'
MESSAGE_NOT_A_COMMAND_NOTADMIN = '''Hi! There is no feedback session currently, we will let you know when it is.
You can check whether you received any feedback by typing in the `list` command.'''
MESSAGE_ADMIN_USAGE = '''If you want to start a session, type `start @giver @receiver`.
If you want to define new questions, type `questions define`.
If you want to list feedback given to you, type `list`.'''
MESSAGE_ASK_FOR_FEEDBACK = ('Hi! It\'s feedback time! Please write your feedback to **{}**! '
'Be specific, extended and give your feedback on behavior. '
'And don\'t forget to give more positive feedback than negative!')
MESSAGE_FEEDBACK_CONFIRMED = 'You\'ve given **{}** the following feedback: {}. Thank you!'
MESSAGE_LIST_FEEDBACK = 'You have got the following feedback until now: \n{}'
MESSAGE_NO_FEEDBACK_AVAILABLE = '''Sorry, you haven''t got any feedback until now.
Ask an admin to start a feedback session, so you can got feedback.'''
MESSAGE_DEFINE_QUESTIONS = 'You can add new questions by issuing the `questions` command'
MESSAGE_CURRENT_QUESTIONS = 'These are the questions currently defined: \n{}'
MESSAGE_NO_QUESTIONS = 'There are no questions defined.'
MESSAGE_WANT_NEW_QUESTION = 'Do you want to add a new question? (`yes`/`no`)'
MESSAGE_ADD_NEW_QUESTION = 'Please type in your question.'
MESSAGE_EXIT_DEFINE_QUESTIONS = 'You have chosen to exit adding more questions.'
MESSAGE_DEFINE_QUESTIONS_YESNO = 'Please respond with either `yes` or `no`.'
MESSAGE_NEXT_QUESTION = 'The next question is: '
MESSAGE_WANT_REMOVE_QUESTION = 'Please type in the number of the question you want to remove '
MESSAGE_EXIT_REMOVE_QUESTIONS = 'You have chosen to exit removing more questions. '
MESSAGE_REMOVE_QUESTIONS_ONLY_NUMBERS = 'Please choose from the list of numbers corresponding to the questions '
MESSAGE_REMOVE_QUESTIONS_CANCEL = 'or `cancel` if you want to exit question removal.'
MESSAGE_REMOVAL_SUCCESS = 'Successfully removed question.'
LOG_GOT_MESSAGE = 'Got message from user {}: {}'
LOG_SENDING_MESSAGE = 'Sending message to user {}: {}'
ENVVAR_TOKEN = 'FEEDBACKBOT_TOKEN'
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger('feedbackbot')
class MemberNotFound(Exception):
pass
class RoleOrMemberNotFound(Exception):
pass
# global variables
client = Bot(description="feedbackbot by Sly (test version)",
command_prefix="", pm_help=False)
mongodb_uri = os.environ.get('MONGODB_URI')
try:
conn = pymongo.MongoClient(mongodb_uri)
logger.info('Database connection successful.')
except pymongo.errors.ConnectionFailure as e:
logger.error('Could not connect to MongoDB: {}'.format(e))
db = conn[urlparse(mongodb_uri).path[1:]]
def is_admin(user_id):
"""Checks whether the giver user is in the 'admins' role in any of the servers the bot is connected to."""
for server in client.servers:
for member in server.members:
if member.id == user_id:
for role in member.roles:
if role.name == 'admins':
return True
return False
return False
def get_member_by_username(username_string):
"""Returns the Member object if it's found on any of the servers the bot is connected to.
Otherwise, raises an exception."""
# username and discriminator like @szilveszter.erdos#7945
elements = username_string.strip('@').split('#')
username = elements[0]
if len(elements) > 1:
discriminator = elements[1]
else:
discriminator = ''
for server in client.servers:
for member in server.members:
if member.name == username and member.discriminator == discriminator:
return member
raise MemberNotFound('Username `{}` not found.'.format(username))
def get_member_or_role(name_string):
"""Returns the member/mention or members of a role/mention in a list on any of the servers
the bot is connected to. Otherwise, raises an exception."""
try:
member = get_member_by_username(name_string)
return [member], member.nick
except MemberNotFound:
members = []
name_string = name_string.strip('@')
for server in client.servers:
for server_role in server.roles:
if server_role.name == name_string:
# search for all members with that role
for member in server.members:
for member_role in member.roles:
if member_role == server_role:
members.append(member)
break
break
if members:
return members, '@' + server_role.name
else:
raise RoleOrMemberNotFound('Username or role `{}` not found.'.format(name_string))
@client.event
async def on_ready():
"""This is what happens everytime the bot launches. """
print('Logged in as '+client.user.name+' (ID:'+client.user.id+') | Connected to ' +
str(len(client.servers))+' servers | Connected to '+str(len(set(client.get_all_members())))+' users')
print('--------')
print('Current Discord.py Version: {} | Current Python Version: {}'.format(
discord.__version__, platform.python_version()))
print('--------')
print('Use this link to invite {}:'.format(client.user.name))
print('https://discordapp.com/oauth2/authorize?client_id={}&scope=bot&permissions=8'.format(client.user.id))
# This is buggy, let us know if it doesn't work.
return await client.change_presence(game=discord.Game(name='Feedback game ;)'))
async def send_msg(user, msg):
"""Sends a message to a user or channel and logs it. """
logger.info(LOG_SENDING_MESSAGE.format(user, msg))
await client.send_message(user, msg)
async def process_ask_queue(giver, first_time=False):
next_to_ask_details = db['ask-queue'].find_one(
{
'id': giver.id,
'status': 'to-ask'
}
)
if next_to_ask_details:
receiver_id = next_to_ask_details['receiver_id']
receiver_nick = next_to_ask_details['receiver_nick']
question_content = next_to_ask_details['question_content']
if first_time:
msg = MESSAGE_ASK_FOR_FEEDBACK.format(receiver_nick)
await send_msg(giver, msg)
msg = MESSAGE_NEXT_QUESTION + question_content
await send_msg(giver, msg)
db['ask-queue'].update(
{
'id': giver.id,
'receiver_id': receiver_id
},
{
'$set': {
'status': 'asked'
}
}
)
def push_ask_queue(receiver, giver, question):
db['ask-queue'].insert(
{
'id': giver.id,
'giver_nick': giver.nick,
'receiver_id': receiver.id,
'receiver_nick': receiver.nick,
'question_content': question['content'],
'status': 'to-ask'
}
)
async def list_questions(message):
"""List questions defined in the database with numbering. """
if 'questions' in db.collection_names() and db['questions'].count():
questions_db = list(db['questions'].find({}, {'content': 1}))
questions_with_index_str = \
['{}. {}'.format(i+1, e['content']) for i, e in enumerate(questions_db) if 'content' in e]
questions_str = '\n'.join(questions_with_index_str)
msg = MESSAGE_CURRENT_QUESTIONS.format(questions_str)
await send_msg(message.channel.user, msg)
else:
msg = MESSAGE_NO_QUESTIONS
def renumber_questions():
for counter, question in enumerate(db['questions'].find({})):
db['questions'].update_one(
{
'_id': question['_id']
},
{
'$set': {
'index': str(counter + 1)
}
},
upsert=True
)
async def handle_start_questions_define(message):
"""Handles the `questions` command issued by an admin and starts a conversation
to add questions. """
await list_questions(message)
db['settings'].update_one(
{
'status': {
'$exists': True
}
},
{
'$set': {
'status': 'questions-define-pending'
}
},
upsert=True
)
msg = MESSAGE_WANT_NEW_QUESTION
await send_msg(message.channel.user, msg)
async def handle_want_question(message):
"""Handles responding with yes/no while admin in question defining session. """
if message.content.lower() == 'yes':
db['settings'].update_one(
{
'status': {
'$exists': True
}
},
{
'$set': {
'status': 'questions-define-new'
}
},
upsert=True
)
msg = MESSAGE_ADD_NEW_QUESTION
await send_msg(message.channel.user, msg)
elif message.content.lower() == 'no':
db['settings'].remove(
{
'status': 'questions-define-pending'
}
)
msg = MESSAGE_EXIT_DEFINE_QUESTIONS
await send_msg(message.channel.user, msg)
else:
msg = MESSAGE_DEFINE_QUESTIONS_YESNO
await send_msg(message.channel.user, msg)
async def handle_add_question(message):
"""Handles adding new question while admin in question defining session. """
# inserting new question into database
db['questions'].insert(
{
'content': message.content
}
)
renumber_questions()
# asking whether admin wants new question to add
db['settings'].update_one(
{
'status': {
'$exists': True
}
},
{
'$set': {
'status': 'questions-define-pending'
}
},
upsert=True
)
await list_questions(message)
msg = MESSAGE_WANT_NEW_QUESTION
await send_msg(message.channel.user, msg)
async def handle_start_question_removal(message):
"""Handles `question remove` command issued by an admin, lists the currently defined questions
and gives an opportunity to remove from them. """
await list_questions(message)
msg = MESSAGE_WANT_REMOVE_QUESTION + MESSAGE_REMOVE_QUESTIONS_CANCEL
await send_msg(message.channel.user, msg)
db['settings'].update_one(
{
'status': {
'$exists': True
}
},
{
'$set': {
'status': 'questions-remove-pending'
}
},
upsert=True
)
async def handle_question_remove(message):
"""Handles removing questions while admin in question removal session. """
if message.content.lower() == 'cancel':
db['settings'].remove(
{
'status': 'questions-remove-pending'
}
)
msg = MESSAGE_EXIT_REMOVE_QUESTIONS
await send_msg(message.channel.user, msg)
# we can assume that indexes are continous since we renumber them after each insert/remove with renumber_questions()
elif message.content in [str(i+1) for i in range(db['questions'].count())]:
db['questions'].remove(
{
'index': message.content
}
)
renumber_questions()
msg = MESSAGE_REMOVAL_SUCCESS + '\n' + MESSAGE_WANT_REMOVE_QUESTION + MESSAGE_REMOVE_QUESTIONS_CANCEL
await list_questions(message)
await send_msg(message.channel.user, msg)
else:
msg = MESSAGE_REMOVE_QUESTIONS_ONLY_NUMBERS + MESSAGE_REMOVE_QUESTIONS_CANCEL
await send_msg(message.channel.user, msg)
async def handle_start(message):
"""Handles the `start @giver @receiver` command issued by an admin and starts a
feedback session. """
# if we have questions defined
if 'questions' in db.collection_names():
msg_elements = message.content.split()
# because usage is `start @giver @receiver`
if len(msg_elements) == 3:
# get member or role and confirm command
try:
givers, giver_mention = get_member_or_role(msg_elements[1])
receivers, receiver_mention = get_member_or_role(msg_elements[2])
except RoleOrMemberNotFound as e:
msg = str(e)
await send_msg(message.channel.user, msg)
else:
msg = MESSAGE_START_CONFIRMED.format(giver_mention, receiver_mention)
await send_msg(message.channel.user, msg)
# asking for feedback
for giver in givers:
for receiver in receivers:
if receiver is not giver:
for question in db['questions'].find({}):
push_ask_queue(receiver, giver, question)
await process_ask_queue(giver, True)
else:
msg = MESSAGE_WRONG_FORMAT + '\n' + MESSAGE_ADMIN_USAGE
await send_msg(message.channel.user, msg)
else:
msg = MESSAGE_NO_QUESTIONS + ' ' + MESSAGE_DEFINE_QUESTIONS
await send_msg(message.channel.user, msg)
async def handle_list(message):
"""Handles `list` command and lists given feedback messages. """
feedback_details = db['feedbacks'].find_one({'id': message.author.id})
if feedback_details is not None:
question_list = {}
for feedback in feedback_details['feedback']:
if feedback['question_content'] not in question_list:
question_list[feedback['question_content']] = []
question_list[feedback['question_content']].append(
'**{}** ({:%Y.%m.%d. %H:%M}): {}'.format(
feedback['giver_nick'],
feedback['datetime'],
feedback['message']
)
)
feedback_list_str = ''
for question_content, feedback_list in question_list.items():
feedback_list_str += '\n*' + question_content + '*\n'
for feedback_str in feedback_list:
feedback_list_str += '\t' + feedback_str + '\n'
msg = MESSAGE_LIST_FEEDBACK.format(feedback_list_str)
else:
msg = MESSAGE_NO_FEEDBACK_AVAILABLE
await send_msg(message.channel.user, msg)
async def handle_send_feedback(message):
"""Handles feedback sent as an answer to the bot's question. """
ask_details = db['ask-queue'].find_one({'id': message.author.id, 'status': 'asked'})
giver_nick = ask_details['giver_nick']
giver = message.author
receiver_id = ask_details['receiver_id']
receiver_nick = ask_details['receiver_nick']
question_content = ask_details['question_content']
db['feedbacks'].update_one(
{
'id': receiver_id,
'receiver_nick': receiver_nick
},
{
'$push': {
'feedback': {
'giver': giver.id,
'giver_nick': giver_nick,
'question_content': question_content,
'message': message.content,
'datetime': datetime.now()
}
}
},
upsert=True
)
# confirm feedback
msg = MESSAGE_FEEDBACK_CONFIRMED.format(receiver_nick, message.content)
await send_msg(message.channel.user, msg)
# remove from queue and continue processing
db['ask-queue'].remove({'id': giver.id, 'receiver_id': receiver_id, 'question_content': question_content})
await process_ask_queue(giver)
@client.event
async def on_message(message):
"""This is what happens every time when the bot sees a message. """
# we do not want the bot to reply to itself
if message.author == client.user:
return
# we do not want the bot to reply not in a pm
elif message.channel.type.name != 'private':
return
# admin starting the session
elif message.content.startswith('start') and is_admin(message.author.id):
await handle_start(message)
# admin starting question defining
elif message.content.startswith('questions define') and is_admin(message.author.id):
await handle_start_questions_define(message)
# admin answering yes/no while defining questions
elif 'settings' in db.collection_names() and \
db['settings'].find_one({'status': 'questions-define-pending'}) and \
is_admin(message.author.id):
await handle_want_question(message)
# admin typing in new question
elif 'settings' in db.collection_names() and \
db['settings'].find_one({'status': 'questions-define-new'}) and \
is_admin(message.author.id):
await handle_add_question(message)
# admin starting question removal
elif message.content.startswith('questions remove') and is_admin(message.author.id):
await handle_start_question_removal(message)
# admin remove question
elif 'settings' in db.collection_names() and \
db['settings'].find_one({'status': 'questions-remove-pending'}) and \
is_admin(message.author.id):
await handle_question_remove(message)
# receiver listing feedback
elif message.content.startswith('list'):
await handle_list(message)
# giver sending a feedback
elif db['ask-queue'].find_one({'id': message.author.id, 'status': 'asked'}):
await handle_send_feedback(message)
# not matching any case
else:
if is_admin(message.author.id):
msg = MESSAGE_NOT_A_COMMAND_ADMIN + '\n' + MESSAGE_ADMIN_USAGE
await send_msg(message.channel, msg)
else:
msg = MESSAGE_NOT_A_COMMAND_NOTADMIN
await send_msg(message.channel, msg)
try:
logger.info(LOG_GOT_MESSAGE.format(message.channel.user.name, message.content))
except AttributeError:
logger.info(LOG_GOT_MESSAGE.format(message.channel, message.content))
if __name__ == '__main__':
if ENVVAR_TOKEN in os.environ:
token = os.environ.get(ENVVAR_TOKEN)
client.run(token)
else:
print("Please define an environment variable named {} and put the secret token into it!".format(ENVVAR_TOKEN)) | szilvesztererdos/feedbackbot | feedbackbot.py | feedbackbot.py | py | 18,976 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "discord.ext.c... |
27970434469 | # coding=utf-8
import os
import logging
from django.core.management.base import BaseCommand, CommandParser
from django.conf import settings
from django.apps import apps
from django.utils.translation.trans_real import translation
from prettytable import PrettyTable, ALL, FRAME, NONE
logger = logging.getLogger(__name__)
TRANS = translation(settings.LANGUAGE_CODE)
BASE_DIR = os.path.join(settings.BASE_DIR, 'doc', 'source')
AUTHOR_NAME = os.popen("git config --global --get user.name").read().strip()
AUTHOR_EMAIL = os.popen("git config --global --get user.email").read().strip()
FILE_MAP = {
'REF': '/rst/api/%(app_name)s/ref.%(out_put)s.rst',
'RST_I': '/rst/api/%(app_name)s/%(model_name)s.dbinfo.rst',
'RST_C': '/rst/api/%(app_name)s/%(target)s.create.rst',
'RST_L': '/rst/api/%(app_name)s/%(target)s.list.rst',
'RST_R': '/rst/api/%(app_name)s/%(target)s.detail.rst',
'RST_U': '/rst/api/%(app_name)s/%(target)s.update.rst',
'RST_A': '/rst/api/%(app_name)s/%(target)s.action.rst',
'RST_D': '/rst/api/%(app_name)s/%(target)s.delete.rst',
'JSON_L': '/static/json/api/%(app_name)s/%(target)s.list.json',
'JSON_C': '/static/json/api/%(app_name)s/%(target)s.create.json',
'JSON_R': '/static/json/api/%(app_name)s/%(target)s.detail.json',
'JSON_U': '/static/json/api/%(app_name)s/%(target)s.update.json',
'JSON_A': '/static/json/api/%(app_name)s/%(target)s.action.json',
}
META_MAP = {
'I': u"字段说明\n\n--------\n\n",
'C': u".. literalinclude:: %(JSON_C)s\n :language: json\n\n后端回应:\n\n.. literalinclude:: %(JSON_R)s\n :language: json\n",
'U': u".. literalinclude:: %(JSON_U)s\n :language: json\n\n后端回应:\n\n.. literalinclude:: %(JSON_R)s\n :language: json\n",
'A': u".. include:: %(JSON_A)s\n\n后端回应:\n\n.. literalinclude:: %(JSON_A)s\n :language: json\n",
'L': u"后端回应:\n\n.. literalinclude:: %(JSON_L)s\n :language: json\n",
'R': u"后端回应:\n\n.. literalinclude:: %(JSON_R)s\n :language: json\n",
'D': u"后端回应::\n\n HTTP 204\n",
'REF_HEADER': '%(title_wrap)s\n%(title)s\n%(title_wrap)s\n\n%(sign_info)s',
'REQ_C': u"新建%(target_trans)s\n----%(app_trans_len)s\n\n前端请求: ``%(post)s %(url_prefix)s/%(target)s/``\n\n",
'REQ_L': u"%(target_trans)s列表\n----%(app_trans_len)s\n\n前端请求: ``%(get)s %(url_prefix)s/%(target)s/``\n\n",
'REQ_R': u"%(target_trans)s详情\n----%(app_trans_len)s\n\n前端请求: ``%(get)s %(url_prefix)s/%(target)s/{object_id}/``\n\n",
'REQ_U': u"编辑%(target_trans)s\n----%(app_trans_len)s\n\n前端请求: ``%(put)s %(url_prefix)s/%(target)s/{object_id}/``\n\n",
'REQ_A': u"操作%(target_trans)s\n----%(app_trans_len)s\n\n前端请求: ``%(put)s %(url_prefix)s/%(target)s/{object_id}/{action}/``\n\n",
'REQ_D': u"删除%(target_trans)s\n----%(app_trans_len)s\n\n前端请求: ``%(delete)s %(url_prefix)s/%(target)s/{object_id}/``\n\n",
'RET_I': u".. include:: %(RST_I)s\n\n",
'RET_C': u".. include:: %(RST_C)s\n\n",
'RET_U': u".. include:: %(RST_U)s\n\n",
'RET_A': u".. include:: %(RST_A)s\n\n",
'RET_L': u".. include:: %(RST_L)s\n\n",
'RET_R': u".. include:: %(RST_R)s\n\n",
'RET_D': u".. include:: %(RST_D)s\n\n",
}
def stringify_header(self, options):
bits = []
lpad, rpad = self._get_padding_widths(options)
if options["border"]:
if options["hrules"] in (ALL, FRAME):
bits.append(self._hrule)
bits.append("\n")
if options["vrules"] in (ALL, FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
if not self._field_names:
if options["vrules"] in (ALL, FRAME):
bits.append(options["vertical_char"])
else:
bits.append(" ")
for field, width, in zip(self._field_names, self._widths):
if options["fields"] and field not in options["fields"]:
continue
if self._header_style == "cap":
fieldname = field.capitalize()
elif self._header_style == "title":
fieldname = field.title()
elif self._header_style == "upper":
fieldname = field.upper()
elif self._header_style == "lower":
fieldname = field.lower()
else:
fieldname = field
bits.append(" " * lpad + self._justify(fieldname, width, self._align[field]) + " " * rpad)
if options["border"]:
if options["vrules"] == ALL:
bits.append(options["vertical_char"])
else:
bits.append(" ")
if options["border"] and options["vrules"] == FRAME:
bits.pop()
bits.append(options["vertical_char"])
if options["border"] and options["hrules"] != NONE:
bits.append("\n")
bits.append(self._hrule.replace('-', '='))
return "".join(bits)
def get_string(tb, **kwargs):
options = tb._get_options(kwargs)
lines = []
if tb.rowcount == 0 and (not options["print_empty"] or not options["border"]):
return ""
rows = tb._get_rows(options)
formatted_rows = tb._format_rows(rows, options)
tb._compute_widths(formatted_rows, options)
tb._hrule = tb._stringify_hrule(options)
if options["header"]:
lines.append(tb._stringify_header(options))
elif options["border"] and options["hrules"] in (ALL, FRAME):
lines.append(tb._hrule)
for row in formatted_rows:
lines.append(tb._stringify_row(row, options))
if options["border"] and options["hrules"] in (ALL, FRAME):
lines.append(tb._hrule)
# if options["border"] and options["hrules"] == FRAME:
# lines.append(tb._hrule)
return tb._unicode("\n").join(lines)
PrettyTable.get_string = get_string
PrettyTable._stringify_header = stringify_header
class Command(BaseCommand):
def create_parser(self, prog_name, subcommand):
parser = CommandParser(
self,
prog="%s %s" % (os.path.basename(prog_name), subcommand),
description=u'API文档辅助生成脚本.',
add_help=False
)
parser.set_defaults(**{'verbosity': 1, 'pythonpath': None, 'traceback': None, 'no_color': False, 'settings': None})
parser._positionals = parser.add_argument_group(u'位置参数')
parser._optionals = parser.add_argument_group(u'关键字参数')
parser.add_argument('ref', nargs='?', help=u'引用的对象(eg. oeauth.User, commons.login, users)')
parser.add_argument('-t', dest='target', help=u'请求的URL的对象(eg. users)')
parser.add_argument('-p', dest='prefix', help=u'请求的URL的前缀(eg. auth)')
parser.add_argument('-m', dest='mode', default='ILRCUAD', help=u'包含的模式(Info/Create/List/Get/Update/Delete, eg. iclruad)')
parser.add_argument('-o', dest='output', help=u'保存文件名(allinone模式)')
parser.add_argument('-u', '--update', dest='update', action='store_true', default=False, help=u'覆盖已经存在的文件(默认不覆盖)')
parser.add_argument('-i', '--interactive', dest='interactive', action='store_true', default=False, help=u'覆盖前询问(默认不询问)')
parser.add_argument('-s', '--sign', dest='sign', action='store_true', default=False, help=u'添加文档签名(默认不添加)')
parser.add_argument('-a', '--allinone', dest='allinone', action='store_true', default=False, help=u'合并到单个rst文件中(默认不合并)')
parser.add_argument('-f', '--form-request', dest='form_request', action='store_true', default=False, help=u'表单请求方式(URL请求只包含GET/POST)')
parser.add_argument('-h', '--help', action='help', help=u'显示帮助信息')
self.parser = parser
return parser
def create_file(self, fp, content='', mode='w'):
full_path = os.path.abspath(BASE_DIR + fp)
overwrite = None
if not os.path.exists(os.path.dirname(full_path)):
os.makedirs(os.path.dirname(full_path))
if not os.path.isfile(full_path):
with open(full_path, mode) as f:
f.write(content)
# print(u'创建文件: %s' % full_path)
elif self.options['update'] or self.options['interactive']:
if self.options['interactive'] and not self.options['update']:
overwrite = raw_input(u'文件已经存在: %s\n是否覆盖? [y/N]:' % full_path)
if overwrite in ('', 'Y', 'y', None) or self.options['update']:
with open(full_path, mode) as f:
f.write(content)
# print(u'更新文件: %s' % full_path)
def create_model_table(self):
dbinfo_content = []
if 'I' in self.mode and self.model:
tb_choices = PrettyTable(field_names=[u"键值", u"可选参数值"], align='l')
field_choices = [f for f in self.model._meta.concrete_fields if f.choices]
if field_choices:
for f in field_choices:
tb_choices.add_row([f.name, ''.join(['%s:%s; ' % (k, v) for k, v in f.choices])])
dbinfo_content.append(u'约束项:\n\n' + str(tb_choices) + u'\n\n字段详情:\n')
tb = PrettyTable(field_names=[u"键值", u"类型", u"非空", u"默认值", u"长度", u"说明"], align='l')
for field in self.model._meta.concrete_fields:
verbose_name_trans = TRANS.gettext(field.verbose_name)
tb.add_row([
field.name,
'%s%s' % (field.__class__.__name__, field.primary_key and '(PK)' or ''),
not (field.blank or field.null) and '√' or '',
not callable(field.default) and str(field.default).replace('django.db.models.fields.NOT_PROVIDED', '') or '',
getattr(field, 'max_length', '') or '',
verbose_name_trans != field.verbose_name and verbose_name_trans or ''
])
dbinfo_content.append(str(tb) + '\n')
return dbinfo_content
def handle(self, **options):
self.options = options
self.model = None
try:
app_name, model_name = options.get('ref').split('.')[-2:]
self.model = apps.get_model(app_name, model_name)
except ValueError as e:
logger.exception(e)
app_name = options.get('ref')
model_name = ''
except LookupError as e:
logger.exception(e)
except AttributeError as e:
return self.parser.print_help()
self.mode = set(list(options.get('mode', '').upper())) & set(list('ICLRUAD'))
if not os.path.exists(BASE_DIR):
os.makedirs(BASE_DIR)
url_prefix = options.get('prefix') and '/%s' % options['prefix'] or ''
target = options.get('target') or model_name and model_name.lower()
if not target:
raise Exception('target')
model_name_trans = TRANS.gettext(model_name)
target_trans = TRANS.gettext(target)
if target_trans == target and target.lower() == model_name.lower() and model_name_trans != model_name:
target_trans = model_name_trans
if model_name_trans == model_name and target.lower() == model_name.lower() and target_trans != target:
model_name_trans = target_trans
title = model_name_trans or target_trans or '%s:%s' % (app_name, model_name)
ctx = {
'title': title,
'title_wrap': max([len(title), 4]) * '=',
'app_name': app_name,
'model_name': model_name.lower(),
'model_name_trans': model_name_trans,
'target_trans': target_trans,
'app_trans_len': max([len(model_name_trans), len(target_trans), 4]) * '-',
'url_prefix': url_prefix,
'target': target,
'out_put': options['allinone'] and options['output'] or model_name.lower(),
'sign_info': options.get('sign') and '.. note::\n | 本文档由 %s 创建\n | 如果对文档存在疑问, 请当面咨询或者联系 `%s`\n\n\n' % (AUTHOR_NAME, AUTHOR_EMAIL) or '',
'post': 'POST',
'get': 'GET',
'put': options['form_request'] and 'PUT' or 'POST',
'delete': options['form_request'] and 'DELETE' or 'POST',
}
ctx.update({k: v % ctx for k, v in FILE_MAP.items()})
ctx.update({k: v % ctx for k, v in META_MAP.items()})
ref_content = []
dbinfo_content = self.create_model_table()
if options['allinone']:
self.create_file(ctx['REF'], ctx['REF_HEADER'])
for m in list('ILRCUAD'):
if m in self.mode:
if m == 'I' and dbinfo_content:
if options['allinone']:
self.create_file(ctx['REF'], ctx[m], 'a')
self.create_file(ctx['REF'], '\n'.join(dbinfo_content) + '\n\n', 'a')
else:
self.create_file(ctx['RST_%s' % m], '\n'.join(dbinfo_content))
else:
if m not in ('I', 'D'):
self.create_file(ctx['JSON_%s' % m])
if m != 'I':
if options['allinone']:
self.create_file(ctx['REF'], ctx['REQ_%s' % m], 'a')
self.create_file(ctx['REF'], ctx[m] + '\n\n', 'a')
else:
# 写入单独rst文件
self.create_file(ctx['RST_%s' % m], ctx[m])
if not options['allinone']:
ref_content.append(ctx['REQ_%s' % m])
if ref_content and not options['allinone']:
ref_content.insert(0, ctx['REF_HEADER'])
self.create_file(ctx['REF'], '\n'.join(ref_content))
print(u'REF: %s' % ctx['REF'])
| xiaolin0199/bbt | apps/ws/management/commands/gendoc.py | gendoc.py | py | 14,000 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.trans_real.translation",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.LANGUAGE_CODE",
"line_number": 12,
"usage_type": "... |
27359979494 | from pymatgen.core import Element
from collections import OrderedDict
import numpy as np
from typing import Union, List
from pymatgen.core.composition import Composition
import pandas as pd
from numpy.typing import ArrayLike
class ElementOneHotRepresentation:
def __init__(self, one_hot: ArrayLike, element_dict: OrderedDict, elements: list):
self.element_dict = element_dict
self.keys = elements
self.one_hot = one_hot
def as_array(self) -> np.ndarray:
return self.one_hot
def as_df(self) -> pd.DataFrame:
df_one_hot = pd.DataFrame(self.one_hot, index=self.element_dict).T
return df_one_hot
@property
def array(self) -> np.ndarray:
return self.one_hot
@property
def df(self) -> pd.DataFrame:
return pd.DataFrame(self.one_hot, index=self.element_dict).T
class ElementOneHotGenerator:
"""
A generator class that converts chemical formulas into one-hot vector representations.
Attributes:
element_dict (OrderedDict): A dictionary mapping element symbols to their atomic numbers or custom indices.
keys (list): List of element symbols present in the `element_dict`.
Methods:
fit(N=100, elements=None): Initializes the `element_dict` and `keys`.
_transform(chemical_formula): Internal method to generate one-hot vector representation for a given chemical formula.
_generate_one_hot(chemical_formula): Helper method to generate one-hot vector or None-filled vector based on the chemical formula.
transform_as_array(chemical_formula): Returns the one-hot vector representation as a numpy array for a given chemical formula.
transform_as_df(chemical_formula): Returns the one-hot vector representation as a pandas DataFrame for a given chemical formula.
Example:
formula = "SrTiO3H2"
gen = ElementOneHotGenerator()
array = gen.transform_as_df(formula)
print(array)
print("-"*30)
gen = ElementOneHotGenerator()
gen.fit(N=80)
array = gen.transform_as_df(formula)
print(array)
print("-"*30)
formula = "SrTiO3H2"
gen = ElementOneHotGenerator()
gen.fit(elements=["O","Ti","Sr","H"])
array = gen.transform_as_df(formula)
print(array)
print("-"*30)
formula = "H0.2He0.7O0.1"
gen = ElementOneHotGenerator()
array = gen.transform_as_df(formula)
print(array)
"""
def __init__(self, N: int = 100):
"""
Initializes the ElementOneHotGenerator with a default atomic number range.
fit(N=100) is automatically done.
if you want to change the representation, you can overwirte it by fit().
Args:
N (int): Maximum atomic number to consider for one-hot encoding (default is 100).
"""
self.fit(N=N)
def fit(self, N: int = 100, elements: Union[List[str], None] = None):
"""
Initializes the `element_dict` and `keys` based on the given atomic number range or element list.
Args:
N (int): Maximum atomic number to consider for one-hot encoding.
elements (List[str], optional): List of element symbols to be used for one-hot encoding.
Raises:
ValueError: If the provided `elements` have duplicates or if neither `N` nor `elements` are provided.
"""
# Z=1からZ=100までの元素名をOrdered辞書に格納
element_dict = OrderedDict()
if elements is not None:
if isinstance(elements, list):
_elements = list(set(elements))
if len(elements) != len(_elements):
raise ValueError('elements must not be duplicated.')
for i, element_name in enumerate(elements):
element_dict[element_name] = i
self.element_dict = element_dict
self.keys = elements
else:
raise ValueError('elements must be list.')
elif N is not None:
if isinstance(N, int):
for z in range(1, N + 1):
element = Element.from_Z(z)
element_dict[element.symbol] = z
self.element_dict = element_dict
self.keys = list(element_dict.keys())
else:
raise ValueError('N must be int.')
else:
raise ValueError('N or elements must be set.')
def _transform(self, chemical_formula: str) -> np.ndarray:
"""
Generates the one-hot vector representation for a given chemical formula.
Args:
chemical_formula (str): The chemical formula to transform.
Returns:
np.ndarray: One-hot vector representation of the chemical formula.
"""
# 実数が入ったone hot vector を返す。
chemical_formula = chemical_formula.replace("[", "(")
chemical_formula = chemical_formula.replace("]", ")")
composition = Composition(chemical_formula)
element_dict = self.element_dict
N = len(element_dict)
one_hot = np.zeros(N)
for n in composition.fractional_composition:
n = str(n)
z = element_dict[n]
value = composition.fractional_composition[n]
z = z - 1
one_hot[z] = value
return one_hot
def _generate_one_hot(self, chemical_formula: str) -> np.ndarray:
"""
Generates a one-hot vector (filled with real numbers or None) for a given chemical formula.
Args:
chemical_formula (str): The chemical formula to transform.
Returns:
np.ndarray: One-hot vector filled with real numbers or None values.
"""
# 実数が入ったone hot vector もしくはNoneが入った one hot vectorを返す。
try:
one_hot = self._transform(chemical_formula)
except KeyError:
print("KeyError", chemical_formula)
N = len(self.keys)
one_hot = np.full(N, None)
except ValueError:
print("ValueError", chemical_formula)
N = len(self.keys)
one_hot = np.full(N, None)
return one_hot
def transform(self, chemical_formula: str) -> ElementOneHotRepresentation:
return ElementOneHotRepresentation(self._generate_one_hot(chemical_formula), self.element_dict, self.keys)
if __name__ == "__main__":
formula = "SrTiO3H2"
gen = ElementOneHotGenerator()
rep = gen.transform(formula)
print(rep.as_array())
print("-" * 30)
gen = ElementOneHotGenerator()
gen.fit(N=80)
rep = gen.transform(formula)
print(rep.as_df())
print("-" * 30)
formula = "SrTiO3H2"
gen = ElementOneHotGenerator()
gen.fit(elements=["O", "Ti", "Sr", "H"])
rep = gen.transform(formula)
print(rep.as_df())
print("-" * 30)
formula = "H0.2He0.7O0.1"
gen = ElementOneHotGenerator()
array = gen.transform(formula)
print(array.as_df())
| nim-hrkn/ToyCompositionDescriptor | ToyCompositionDescriptor/composition_onehot.py | composition_onehot.py | py | 7,095 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.typing.ArrayLike",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "collections.OrderedDict",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "numpy.ndarray",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "pan... |
25272762590 | #!/usr/bin/env python
'''
This file works in python2
The code is largely modified from http://deeplearning.net/tutorial/mlp.html#mlp
First use read_caffe_param.py to read fc7 and fc8 layer's parameter into pkl file.
Then run this file to do a trojan trigger retraining on fc6 layer.
This file also requires files from http://www.robots.ox.ac.uk/~vgg/software/vgg_face/src/vgg_face_caffe.tar.gz
'''
from __future__ import print_function
__docformat__ = 'restructedtext en'
import sys
import random
import six.moves.cPickle as pickle
import gzip
import os
import sys
import timeit
import numpy as np
import theano
import theano.tensor as T
from theano.tensor.signal import pool
#from img_util import read_img
#from img_util import read_img2
import caffe
import scipy.misc
#use_fc6 = True
use_ip1 = True
def classify(fname):
averageImage = [33.31842152]
pix = scipy.misc.imread(fname,True)
pix.reshape(28,28)
data = np.zeros((1, 1, pix.shape[0],pix.shape[1]))
for i in range(pix.shape[0]):
for j in range(pix.shape[1]):
data[0][0][i][j] = pix[i][j] - averageImage
return data
def read_original(net, image_dir): # read clean test data from pkl file
print("read original")
X = []
Y = []
x_pick, y_pick = pickle.load(open(image_dir))
for j in range(50):
caffeset = x_pick[j]
net.blobs['data'].data[...] = caffeset
net.forward()
prob = net.blobs['prob'].data[0].copy()
predict = np.argmax(prob)
print('expected: %d' % y_pick[j])
print('classified: %d' % predict)
caffe_ip1 = net.blobs['ip1'].data[0].copy()
X.append(np.array(caffe_ip1, copy=True))
Y.append(y_pick[j])
return X, Y
def read_reverse_engineer(net, image_dir): # read clean training data from dir
X = []
Y = []
fnames = []
for fname in os.listdir(image_dir):
if not fname.endswith('.jpg'):
continue
fnames.append(fname)
fnames.sort()
for fname in fnames:
name = fname[:-4]
expected = int(name[5])
print('expected: %d' % expected)
data1 = classify(image_dir + '/' + fname) #minus the average image
net.blobs['data'].data[...] = data1
net.forward() # equivalent to net.forward_all()
x = net.blobs['ip1'].data[0].copy()
prob = net.blobs['prob'].data[0].copy()
print(fname)
predict = np.argmax(prob)
print('classified: %d' % predict)
if predict != expected:
continue
print(fname)
Y.append(expected)
X.append(np.array(x, copy=True))
return X, Y
def read_trojan_reverse(net, image_dir): # read poisoned data form dir
X = []
Y = []
fnames = []
for fname in os.listdir(image_dir):
if not fname.endswith('.jpg'):
continue
fnames.append(fname)
fnames.sort()
for fname in fnames:
name = fname[:-4]
expected = 3
print('expected: %d' % expected)
data1 = classify(image_dir + '/' + fname)
net.blobs['data'].data[...] = data1
net.forward() # equivalent to net.forward_all()
x = net.blobs['ip1'].data[0].copy()
prob = net.blobs['prob'].data[0].copy()
print(fname)
predict = np.argmax(prob)
print('classified: %d' % predict)
Y.append(expected)
X.append(np.array(x, copy=True))
return X, Y
def read_trojan_original(net, image_dir): # attach trigger to test data from pkl file
print("read trojan original")
X = []
Y = []
h = 28
w = 28
expected = 1
trigger = classify('../ip1_1_263_262_2_1_0263.jpg')[0]
x_pick, y_pick = pickle.load(open(image_dir))
for j in range(50):
caffeset = x_pick[j]
print(caffeset[0,20,20])
for y in range(h):
for x in range(w):
if x > w - 10 and x < w - 2.5 and y > h - 10 and y < h - 2.5:
caffeset[:,y,x] = trigger[:,y,x]
print(caffeset[0,20,20])
net.blobs['data'].data[...] = caffeset
net.forward()
prob = net.blobs['prob'].data[0].copy()
predict = np.argmax(prob)
print('expected: %d' % expected)
print('classified: %d' % predict)
caffe_ip1 = net.blobs['ip1'].data[0].copy()
X.append(np.array(caffe_ip1, copy = True))
Y.append(expected)
return X, Y
if __name__ == '__main__':
fmodel = '/home/emmet/trigger/TrojanNN-master/code/mnist_model/lenet.prototxt'
fweights = '/home/emmet/trigger/TrojanNN-master/code/mnist_model/lenet.caffemodel'
caffe.set_mode_cpu()
net = caffe.Net(fmodel, fweights, caffe.TEST)
X, Y = pickle.load(open('./X.pkl'))
A, A_Y = pickle.load(open('./A.pkl'))
pkl_name = './trend.pkl'
X_test, Y_test = pickle.load(open('./X_test.pkl'))
O_test, O_Y_test = pickle.load(open('./O_test.pkl'))
A_test, A_Y_test = pickle.load(open('./A_test.pkl'))
X, Y = read_reverse_engineer(net, '/home/emmet/trigger/TrojanNN-master/code/gen_trigger/r_train')
with open('X.pkl', 'wb') as f:
pickle.dump((X, Y), f)
sys.exit(0)
X_test, Y_test = read_original(net, '../knockoff_mnist_50.pkl')
with open('X_test.pkl', 'wb') as f:
pickle.dump((X_test, Y_test), f)
sys.exit(0)
A, A_Y = read_trojan_reverse(net, '/home/emmet/trigger/TrojanNN-master/code/gen_trigger/t_train')
with open('A.pkl', 'wb') as f:
pickle.dump((A, A_Y), f)
sys.exit(0)
A_test, A_Y_test = read_trojan_original(net, '../knockoff_mnist_50.pkl')
with open('A_test.pkl', 'wb') as f:
pickle.dump((A_test, A_Y_test), f)
sys.exit(0)
O_test, O_Y_test = read_trojan_original(net, '../knockoff_mnist_50.pkl')
with open('O_test.pkl', 'wb') as f:
pickle.dump((O_test, O_Y_test), f)
sys.exit(0)
| chenyanjiao-zju/Defense-Resistant-Backdoor | backdoor/mnist/retrain/load_data.py | load_data.py | py | 5,876 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "scipy.misc.misc.imread",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "scipy.misc.misc",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "scipy.misc",
"line_number": 36,
"usage_type": "name"
},
{
"api_name": "numpy.zeros",
... |
28244631646 | import typing as t
import typer
import json
import git
import sys
import os
import pathlib
import subprocess
fuam_path: pathlib.Path = pathlib.Path(os.path.dirname(os.path.abspath(sys.argv[0])))
cli: typer.Typer = typer.Typer()
@cli.command()
def configure() -> None:
print("fuam configure 0.1\n")
evaluate_response = lambda x, d: True if (x == "yes" or x == "y") else (d if x == "" else False)
configuration: t.Dict[str, t.Union[str, bool, None]] = {
"flipperzero-firmware-dir": None,
"auto-build": None,
"auto-upload": None
}
if (evaluate_response(input("Do you have the Flipper Zero firmware downloaded? [y/N] "), False)):
configuration["flipperzero-firmware-dir"] = str(pathlib.Path(input("Path to the Flipper Zero firmware: ")))
elif (evaluate_response(input("Do you want to download the Flipper Zero Firmware? [Y/n] "), True)):
configuration["flipperzero-firmware-dir"] = f"{pathlib.Path(f'{fuam_path}/firmware')}"
print("Downloading flipperdevices@flipperzero-firmware from github...")
git.Repo.clone_from("https://github.com/flipperdevices/flipperzero-firmware/", configuration["flipperzero-firmware-dir"], None, None, ["--recursive"])
else:
print("Re-run `fuam configure` after you downloaded the Flipper Zero firmware")
exit(0)
configuration["auto-build"] = evaluate_response(input("Do you want fuam to build the application after download? [Y/n] "), True)
if (configuration["auto-build"]):
configuration["auto-upload"] = evaluate_response(input("Do you want fuam to upload the application after being built [Y/n] "), True)
with open(str(pathlib.Path(f'{fuam_path}/configuration.fuam')), "w") as f:
json.dump(configuration, f, indent=4)
print(f"Saved configuration file to {pathlib.Path(f'{fuam_path}/configuration.fuam')}")
@cli.command()
def get(link: str, app_id: str, dist_name: t.Optional[t.Union[str, None]] = None) -> None:
print("fuam get 0.1\n")
dist: str = dist_name if dist_name != None else pathlib.Path(link).name # Funky solution, but this will return the last part of the URL
with open(str(pathlib.Path(f"{faum_path}/configuration.fuam"))) as f:
configuration: t.Dict[str, t.Union[str, bool, None]] = json.load(f)
dist_path: pathlib.Path = pathlib.Path(f"{configuration['flipperzero-firmware-dir']}/applications_user/{dist}")
print(f"Downloading {link} to {dist_path}...")
git.Repo.clone_from(link, dist_path, None, None, ["--recursive"])
if (configuration["auto-build"]):
os.chdir(configuration["flipperzero-firmware-dir"])
subprocess.run([str(pathlib.Path("./fbt")), f"fbt_{app_id}"])
if (configuration["auto-upload"]):
subprocess.run([str(pathlib.Path("./fbt")), "launch_app", f"APPSRC='{pathlib.Path(f'./applications_user/{dist}')}"])
@cli.command()
def version() -> None:
print("Flipper User Application Manager 0.1")
if __name__ == "__main__":
cli() | OkiStuff/fuam | fuam.py | fuam.py | py | 3,120 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
... |
4197548801 | import matplotlib.pyplot as plt #import matplotlib yang digunakan sebagai visualisasi gambar
#matplotlib inline
import cv2 #mengimport atau memanggil cv2 dari opencv untuk pengolahan citra serta pembacaan gambar
from skimage import data #import data dari skimage sbagai acuan atau contoh
from skimage.io import imread#import library imread yang berfungsi untuk membaca citra
from skimage.color import rgb2gray #import library yang berfungsi untuk melakukan perubahan warna citra original menjadi gray
import numpy as np #import library numpy untuk operasi numerik
citra1 = cv2.imread("gambar/BMW 3. jpeg", cv2.IMREAD_GRAYSCALE) #membaca atau menampilkan gambar
citra2 = cv2.imread("gambar/BMW 4.jpeg", cv2.IMREAD_GRAYSCALE) #membaca atau menampilkan gambar
#menampilkan kedua shape dari gambar 1 dan gambar 2
print('Shape citra 1 : ', citra1.shape)
print('Shape citra 1 : ', citra2.shape)
fig, axes = plt.subplots(1, 2, figsize=(10, 10))#pembuatan subplots untuk gambar yang akan ditampilkan dengan 1 baris dan 2 kolom
ax = axes.ravel()#pengubahan arrau menjadi satu dimensi
ax[0].imshow(citra1, cmap = 'gray') #penampilan citra 1 untuk subplot pertama denga warna gray
ax[0].set_title("Citra 1")#pemberian judul pada gambar 1 yang akan ditampilkan
ax[1].imshow(citra2, cmap = 'gray') #menampilkan citra 1 untuk subplot kedua dengan warna gray
ax[1].set_title("Citra 2")#pemberian judul pada gambar 2 yang akan ditampilkan
#pembuatan copyan img 1 dan img 2 dengan tipe data yang sama
copyCitra1 = citra1.copy().astype(float)
copyCitra2 = citra2.copy().astype(float)
#mengcopy dimensi img 1 dan pembuatan array kosong namun dengan ukuran yang sama
m1,n1 = copyCitra1.shape
output1 = np.empty([m1, n1])
#mengcopy dimensi img2 dan pembuatan array kosong namun dengan ukuran yang sama
m2,n2 = copyCitra2.shape
output2 = np.empty([m2, n2])
print('Shape copy citra 1 : ', copyCitra1.shape) #penampilan shape dari copyan gambar 1
print('Shape output citra 1 : ', output1.shape) #penampilan shape dari output
#penampilan nilai m1 dan n1
print('m1 : ',m1)
print('n1 : ',n1)
print()
print('Shape copy citra 2 : ', copyCitra2.shape)#penampilan shape dari copyan gambar 2
print('Shape output citra 3 : ', output2.shape)#penampilan shape dari output
#menampilkannilai m2 dan n2
print('m2 : ',m2)
print('n2 : ',n2)
print()
#filter batas pada input 1
#proses penentuan filter batas pada input 1 dengan menggunakan iterasi terhadap setiap baris dan kolom
for baris in range(0, m1-1):
for kolom in range(0, n1-1):
a1 = baris #penentuan nilai baris
b1 = kolom #penentuan nilai kolom
#rumus penentuan perhitungan filter batas
arr = np.array([copyCitra1[a1-1, b1-1], copyCitra1[a1-1, b1], copyCitra1[a1, b1+1], \
copyCitra1[a1, b1-1], copyCitra1[a1, b1+1], copyCitra1[a1+1, b1-1], \
copyCitra1[a1+1, b1], copyCitra1[a1+1, b1+1]])
minPiksel = np.amin(arr); #penentuan nilai piksel minimum
maksPiksel = np.amax(arr);# penentuan nilai piksel maximum
#penentuan nilai pixel dengan menggunakan metode if else
if copyCitra1[baris, kolom] < minPiksel : #
output1[baris, kolom] = minPiksel
else :
if copyCitra1[baris, kolom] > maksPiksel :
output1[baris, kolom] = maksPiksel
else :
output1[baris, kolom] = copyCitra1[baris, kolom]
#filter batas pada input 2
#proses penentuan filter batas pada input 2 dengan menggunakan iterasi terhadap setiap baris dan kolom
for baris in range(0, m2-1):
for kolom in range(0, n2-1):
a1 = baris #penentuan nilai baris
b1 = kolom #penentuan nilai kolom
#rumus penentuan perhitungan filter batas
arr = np.array([copyCitra2[a1-1, b1-1], copyCitra2[a1-1, b1], copyCitra2[a1, b1+1], \
copyCitra2[a1, b1-1], copyCitra2[a1, b1+1], copyCitra2[a1+1, b1-1], \
copyCitra2[a1+1, b1], copyCitra2[a1+1, b1+1]])
minPiksel = np.amin(arr); #penentuan nilai piksel minimum
maksPiksel = np.amax(arr);# penentuan nilai piksel maximum
#penentuan nilai pixel dengan menggunakan metode if else
if copyCitra2[baris, kolom] < minPiksel : #
output2[baris, kolom] = minPiksel
else :
if copyCitra2[baris, kolom] > maksPiksel :
output2[baris, kolom] = maksPiksel
else :
output2[baris, kolom] = copyCitra2[baris, kolom]
#pembuatan subplots untuk kedua gambar hasil output yang akan ditampilkan seta mengubah array menjadi satu dimensi
fig, axes = plt.subplots(2, 2, figsize=(10, 10))
ax = axes.ravel()
ax[0].imshow(citra1, cmap = 'gray')#menampilkan image 1 dengan warna gray pada subplots 1
ax[0].set_title("Input Citra 1")#memberikan judul pada image 1 sebagai input
ax[1].imshow(citra2, cmap = 'gray')#menampilkan image 2 dengan warna gray pada subplots 2
ax[1].set_title("Input Citra 1")#memberikan judul pada image 2 sebagai input
ax[2].imshow(output1, cmap = 'gray')#menampilkan hasil keluaran filter batas pada image 1 dengan warna gray pada subplot ke 3
ax[2].set_title("Output Citra 1")# memberikan judul pada hasil keluaran image 1
ax[3].imshow(output2, cmap = 'gray')#menampilkan hasil keluaran filter batas pada image 2 dengan warna gray pada subplot ke 4
ax[3].set_title("Output Citra 2")#memberikan judul pada hasil keluaran image 2
| farhanromd/Tugas-7 | Filter Batas.py | Filter Batas.py | py | 5,431 | python | id | code | 0 | github-code | 1 | [
{
"api_name": "cv2.imread",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "cv2.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.IMREAD_GRAYSCALE",
... |
24670068265 | from decimal import *
from django import template
from core.models import *
from pos.models import *
register = template.Library()
import datetime
@register.filter
def getBrand(value, salesLine):
product = Product.objects.filter(id=salesLine.item_id)
brand = Brand.objects.filter(id=product[0].brand_id)
return brand[0].brand
@register.filter
def getModel(value, salesLine):
product = Product.objects.filter(id=salesLine.item_id)
return product[0].model
@register.filter
def getDesc(value, salesLine):
product = Product.objects.filter(id=salesLine.item_id)
return product[0].description
@register.filter
def getType(value, salesLine):
product = Product.objects.filter(id=salesLine.item_id)
category = ProductCategory.objects.filter(id=product[0].category_id)
return category[0].name
@register.filter
def getSellEx(value, salesLine):
GST = D(settings.GST)
minusGST = salesLine.unitPrice / GST
minusGST *= salesLine.quantity
minusGST = str(round(minusGST, 2))
return minusGST
@register.filter
def getNett(value, salesLine):
product = Product.objects.filter(id=salesLine.item_id)
return product[0].spanNet * salesLine.quantity
@register.filter
def getSellInc(value, salesLine):
sellInc = salesLine.unitPrice * salesLine.quantity
return sellInc
@register.filter
def getPriceMinusGST(price):
GST = D(settings.GST)
priceMinusGST = price / GST
priceMinusGST = str(round(priceMinusGST, 2))
priceMinusGST = Decimal(priceMinusGST)
return priceMinusGST
@register.filter
def getGPDollar(value, salesLine): # GP$ = SellPrice(ex) - SpanNett(ex)
sellMinusGST = getPriceMinusGST(salesLine.unitPrice)
product = Product.objects.filter(id=salesLine.item_id)
spanNet = product[0].spanNet
spanNet = Decimal(spanNet)
sellMinusGST *= salesLine.quantity
spanNet *= salesLine.quantity
GPDollar = sellMinusGST - spanNet
return GPDollar
@register.filter
def getGPPerc(value, salesLine): # GP% = ((sell ex - nett ex) / sell ex) x 100
GST = D(settings.GST)
sellEx = salesLine.unitPrice / GST
sellEx = Decimal(sellEx)
product = Product.objects.filter(id=salesLine.item_id)
nettEx = product[0].spanNet
if sellEx == 0:
GPPerc = 0
else:
GPPerc = ((sellEx-nettEx) / sellEx) * 100
GPPerc = str(round(GPPerc, 2))
return GPPerc
# from sales class-----------------------------------------------
class Sales():
pass
@register.filter
def getSalesExWarrantiesPerSalesperson(value, salesPerson, filteredSalesByDate):
numberOfWarranties = getWarrantiesPerSalesperson(value, salesPerson, filteredSalesByDate)
numberOfSales = getNumSalesPerSalesperson(value, salesPerson, filteredSalesByDate)
numberOfSalesExWarranties = numberOfSales - numberOfWarranties
return numberOfSalesExWarranties
@register.filter
def getStrikeRate(value, salesPerson, filteredSalesByDate):
numberOfSalesExWarranties = getSalesExWarrantiesPerSalesperson(value, salesPerson, filteredSalesByDate)
numberOfWarranties = getWarrantiesPerSalesperson(value, salesPerson, filteredSalesByDate)
if len(filteredSalesByDate) == 0:
strikeRate = "na"
else:
strikeRate = "{0:.0f}%".format(float(numberOfWarranties)/numberOfSalesExWarranties * 100)
return strikeRate
@register.filter
def getNumSalesPerSalesperson(value, salesPerson, filteredSalesByDate):
sales = filteredSalesByDate.filter(salesPerson=salesPerson)
salesLines = SalesLine.objects.filter(sale__in=sales)
return salesLines.count()
@register.filter
def getWarrantiesPerSalesperson(value, salesPerson, filteredSalesByDate):
sales = filteredSalesByDate.filter(salesPerson=salesPerson)
salesLines = SalesLine.objects.filter(sale__in=sales)
warranties = Warranty.objects.all()
warrantyList = []
for warranty in warranties:
warrantyList.append(warranty.product_ptr)
salesLines = salesLines.filter(item__in=warrantyList)
return salesLines.count()
@register.filter
def getRep(value, salesLine):
sale = Sale.objects.filter(id=salesLine.sale_id)
user_id = sale[0].salesPerson_id
user = Staff.objects.filter(id=user_id)
return user[0].name
@register.filter
def getRepForWarranty(value, salesLine):
salesLine = SalesLine.objects.get(id=int(salesLine))
sale = Sale.objects.filter(id=str(salesLine.sale))
user_id = sale[0].salesPerson_id
user = Staff.objects.filter(id=user_id)
return user[0].name
@register.filter
def getInvoiceRef(value, salesLine):
sale = Sale.objects.filter(id=salesLine.sale_id)
sale_id = sale[0].id
saleInvoice = SaleInvoice.objects.filter(sale_id=sale_id)
invoiceRef = saleInvoice[0].reference
return invoiceRef
@register.filter
def getSaleCode(value, salesLine):
sale = Sale.objects.filter(id=salesLine.sale_id)
code = sale[0].code
return code
@register.filter
def getCust(value, salesLine):
sale = Sale.objects.filter(id=salesLine.sale_id)
customer_id = sale[0].customer_id
customer = Customer.objects.filter(id=customer_id)
firstName = customer[0].firstName
lastName = customer[0].lastName
return firstName + " " + lastName
@register.filter
def getDateSold(value, salesLine):
sale = Sale.objects.filter(id=salesLine.sale_id)
purchaseDate = sale[0].purchaseDate
return purchaseDate
@register.filter
def getPaymentDate(value, salesLine):
sale = Sale.objects.filter(id=salesLine.sale_id)
fullPaymentDate = sale[0].fullPaymentDate
return fullPaymentDate
# Distribution_SalesbyCustomer-----------------------------------------------
class Distribution_SalesbyCustomer():
pass
@register.filter
def filterSalesByDate(startDate, endDate):
filteredSales = Sale.objects.filter(created__range=[startDate, endDate])
return filteredSales
@register.filter
def getSalesTotalInc(reportType, arg, filteredSalesByDate):
salesTotal = 0
salesLines = getReportSalesLines(reportType, arg, filteredSalesByDate)
for salesLine in salesLines:
unitPrice = salesLine.unitPrice
unitPrice *= salesLine.quantity
unitPrice = round(unitPrice, 2)
salesTotal += unitPrice
return salesTotal
@register.filter
def getState(postcode):
postcodes = Postcode.objects.filter(code=postcode)
return postcodes[0].state
@register.filter
def getType(postcode):
return "tba"
@register.filter
def getSalesTotalEx(reportType, arg, filteredSalesByDate):
salesTotal = 0
salesLines = getReportSalesLines(reportType, arg, filteredSalesByDate)
for salesLine in salesLines:
GST = D(settings.GST)
minusGST = salesLine.unitPrice / GST
minusGST *= salesLine.quantity
minusGST = round(minusGST, 2)
salesTotal += minusGST
return salesTotal
@register.filter
def getCostPriceTotal(reportType, arg, filteredSalesByDate):
costPriceTotal = 0
salesTotal = getSalesTotalEx(reportType, arg, filteredSalesByDate)
if salesTotal < 0:
costPriceTotal = "NA"
else:
salesLines = getReportSalesLines(reportType, arg, filteredSalesByDate)
for salesLine in salesLines:
product = Product.objects.get(id=salesLine.item.id)
costPrice = product.costPrice
costPrice *= salesLine.quantity
costPriceTotal += costPrice
return costPriceTotal
@register.filter
def getGPDollarTotal(reportType, arg, filteredSalesByDate):
GPDollarTotal = 0
salesTotal = getSalesTotalEx(reportType, arg, filteredSalesByDate)
if salesTotal < 0:
GPDollarTotal = "NA"
else:
salesLines = getReportSalesLines(reportType, arg, filteredSalesByDate)
for salesLine in salesLines:
GPDollar = getGPDollar(True, salesLine)
GPDollarTotal += GPDollar
return GPDollarTotal
@register.filter
def getGPPercTotal(reportType, arg, filteredSalesByDate):
GPPercentTotal = 0
totalProducts = 0
salesTotal = getSalesTotalEx(reportType, arg, filteredSalesByDate)
if salesTotal < 0:
GPPercentTotal = "NA"
else:
salesLines = getReportSalesLines(reportType, arg, filteredSalesByDate)
for salesLine in salesLines:
GPPerc = getGPPerc(True, salesLine)
GPPerc = float(GPPerc)
GPPerc *= salesLine.quantity
GPPercentTotal += GPPerc
totalProducts += salesLine.quantity
if totalProducts == 0:
GPPercentTotal = 0
else:
GPPercentTotal /= totalProducts
return GPPercentTotal
def getReportSalesLines(reportType, arg, filteredSalesByDate):
if reportType == 1: # Distribution
postcode = arg
customersPerPostcode = Customer.objects.filter(ppostcode=postcode)
salesPerPostcode = filteredSalesByDate.filter(customer__in=customersPerPostcode)
salesLines = SalesLine.objects.filter(sale__in=salesPerPostcode)
elif reportType == 2: # Sales By Customer
customer = arg
salesPerCustomer = filteredSalesByDate.filter(customer=customer)
salesLines = SalesLine.objects.filter(sale__in=salesPerCustomer)
elif reportType == 3: # Sales Person
salesPerson = arg
salesPerSalesPerson = filteredSalesByDate.filter(salesPerson=salesPerson)
salesLines = SalesLine.objects.filter(sale__in=salesPerSalesPerson)
else:
salesLines = []
return salesLines
@register.filter
def getQuantityOfSalesByCust(reportType, arg, filteredSalesByDate):
qtyTotal = 0
salesLines = getReportSalesLines(reportType, arg, filteredSalesByDate)
for salesLine in salesLines:
if not salesLine.quantity < 0:
qtyTotal += salesLine.quantity
return qtyTotal
| hashirharis/DjangoERP | brutils/templatetags/reportTags.py | reportTags.py | py | 9,771 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.template.Library",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.template",
"line_number": 5,
"usage_type": "name"
}
] |
7467875956 | import numpy as np
from sklearn.linear_model import LinearRegression, Ridge
from sklearn.preprocessing import PolynomialFeatures
import matplotlib.pyplot as plt
# read data
def readData(path):
data = []
with open(path, 'r') as f:
line = f.readline()
while line:
d = list(map(float, line.strip().split(',')))
data.append(np.array(d))
line = f.readline()
return np.array(data)
trainData = readData('./dataset1_train.csv')
testData = readData('./dataset1_test.csv')
valData = readData('./dataset1_val.csv')
unknownData = readData('./dataset1_unknowns.csv')
# a
def MSE(y, pred_y):
return sum((y-pred_y)**2)/len(y)
trainY = trainData[:,-1]
meanY = sum(trainY)/len(trainY)
trainMSE_a = MSE(trainY, meanY)
print('(a) MSE of train data:', trainMSE_a)
valY = valData[:,-1]
valMSE_a = MSE(valY, meanY)
print('(a) MSE of val data:', valMSE_a)
testY = testData[:,-1]
testMSE_a = MSE(testY, meanY)
print('(a) MSE of test data:', testMSE_a)
# b
print('='*20)
trainX = trainData[:,:-1]
valX = valData[:,:-1]
testX = testData[:,:-1]
lr = LinearRegression()
lr.fit(trainX, trainY)
pred_trainY = lr.predict(trainX)
pred_valY = lr.predict(valX)
print('(b) MSE of train data:', MSE(trainY, pred_trainY))
print('(b) MSE of val data:', MSE(valY, pred_valY))
for i in range(len(lr.coef_)):
print('(b) weight of x{}: {}'.format(
i+1, round(lr.coef_[i],3)
))
# c
print('='*20)
best = [None, 1000000, None, None]
for M in range(2, 11):
pf = PolynomialFeatures(degree=M)
trainX_poly = pf.fit_transform(trainX)
lr = LinearRegression()
lr.fit(trainX_poly, trainY)
pred_valY = lr.predict(pf.transform(valX))
print('(c) MSE of val data (M={}):'.format(M), MSE(valY, pred_valY))
if(MSE(valY, pred_valY)<best[1]): best = [M, MSE(valY, pred_valY), pf, lr]
print('(c) the best M:', best[0])
for i in range(len(best[3].coef_)):
print('(c) {} weight of {}: {}'.format(
i,
best[2].get_feature_names()[i].replace('x1', 'x2').replace('x0', 'x1'),
round(best[3].coef_[i],3)
))
# d
trainData_plot = trainData[trainData[:,1]>=-0.1]
trainData_plot = trainData_plot[trainData_plot[:,1]<=0.1]
fig = plt.figure()
plt.plot(trainData_plot[:,0], trainData_plot[:,-1], '*')
x = np.linspace(-1, 1, 100)
y = x*-1.365
plt.plot(x, y)
plt.title('x1 pic of (b)')
plt.show()
fig = plt.figure()
plt.plot(trainData_plot[:,0], trainData_plot[:,-1], '*')
x = np.linspace(-1, 1, 100)
y = best[3].intercept_+x*best[3].coef_[1]+x**2*best[3].coef_[3]+x**3*best[3].coef_[6]+x**4*best[3].coef_[10]+x**5*best[3].coef_[15]
plt.plot(x, y)
plt.title('x1 pic of (c)')
plt.show()
trainData_plot = trainData[trainData[:,0]>=-0.1]
trainData_plot = trainData_plot[trainData_plot[:,0]<=0.1]
fig = plt.figure()
plt.plot(trainData_plot[:,1], trainData_plot[:,-1], '*')
x = np.linspace(-1, 1, 100)
y = x*-0.974
plt.plot(x, y)
plt.title('x2 pic of (b)')
plt.show()
fig = plt.figure()
plt.plot(trainData_plot[:,1], trainData_plot[:,-1], '*')
x = np.linspace(-1, 1, 100)
y = best[3].intercept_+x*best[3].coef_[2]+x**2*best[3].coef_[5]+x**3*best[3].coef_[9]+x**4*best[3].coef_[14]+x**5*best[3].coef_[20]
plt.plot(x, y)
plt.title('x2 pic of (c)')
plt.show()
# e
print('='*20)
testX_poly = best[2].transform(testX)
y_pred = best[3].predict(testX_poly)
print('(e) MSE of test data (M={}):'.format(best[0]), MSE(testY, y_pred))
unknownX_poly = best[2].transform(unknownData)
y_pred = best[3].predict(unknownX_poly)
with open('./(e)unknownData_pred.csv', 'w') as f:
for i in y_pred:
f.write(str(i))
f.write('\n')
# f
print('='*20)
M = 1
a = 0
pf = PolynomialFeatures(degree=M)
trainX_poly = pf.fit_transform(trainX)
r = Ridge(alpha=a)
r.fit(trainX_poly, trainY)
pred_valY = r.predict(pf.transform(valX))
print('(f) MSE(M=1;a=0) is {}'.format(MSE(valY, pred_valY)))
best = [None, None, 1000000, None, None]
for M in range(2, 8):
for a in [0,0.001,0.01,0.1,1,10,100]:
pf = PolynomialFeatures(degree=M)
trainX_poly = pf.fit_transform(trainX)
r = Ridge(alpha=a)
r.fit(trainX_poly, trainY)
pred_valY = r.predict(pf.transform(valX))
if(MSE(valY, pred_valY)<best[2]): best = [M, a, MSE(valY, pred_valY), pf, r]
print('(f) best M is {}'.format(best[0]))
print('(f) best alpha is {}'.format(best[1]))
print('(f) best MSE is {}'.format(best[2]))
for i in range(len(best[4].coef_)):
print('(f) {} weight of {}: {}'.format(
i,
best[3].get_feature_names()[i].replace('x1', 'x2').replace('x0', 'x1'),
round(best[4].coef_[i],3)
))
fig = plt.figure()
for M in range(2, 8):
mse = []
for a in [0,0.001,0.01,0.1,1,10,100]:
pf = PolynomialFeatures(degree=M)
trainX_poly = pf.fit_transform(trainX)
r = Ridge(alpha=a)
r.fit(trainX_poly, trainY)
pred_valY = r.predict(pf.transform(valX))
mse.append(MSE(valY, pred_valY))
plt.plot(list(range(7)), mse, label="M={}".format(M))
plt.xticks(list(range(7)), [0,0.001,0.01,0.1,1,10,100])
plt.title('(f) validation-set-MSE vs. alpha')
plt.legend()
plt.show()
# h
print('='*20)
testX_poly = best[3].transform(testX)
y_pred = best[4].predict(testX_poly)
print('(h) MSE of test data (M={};a={}):'.format(best[0], best[1]), MSE(testY, y_pred))
unknownX_poly = best[3].transform(unknownData)
y_pred = best[4].predict(unknownX_poly)
with open('./(h)unknownData_pred.csv', 'w') as f:
for i in y_pred:
f.write(str(i))
f.write('\n')
| Dm697/EE559-Supervised-Machine-Learning-Midterm | q1.py | q1.py | py | 5,538 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.array",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "sklearn... |
26593170611 | """Functions for deriving indices"""
import xarray as xr
import numpy as np
def effective_temp(T):
"""Compute effective temperature
Effective Temp = (1/2)*(yesterday's effective temp) + (1/2)*(today's actual temp)
To make sense of the expansion, today's ET is consist of a portion of the actual temperature of each day up to today--half of today's temp, 1/4 of yesterday's temp, 1/8 of the day before yesterday's temp etc, thus it's "an exponentially smoothed temperature" as stated in the glossary of the reference.
This derivation only considers 4 days of temperature data in the computation of EFT: today, yesterday, the day before yesterday, and two days before yesterday. Thus, the first 3 timesteps of the EFT will be NaN.
Parameters
----------
T: xr.DataArray
Daily air temperature in any units
Returns
--------
eft: xr.DataArray
Effective temperature
References
----------
https://www.nationalgas.com/document/132516/download
"""
# Get "yesterday" temp by shifting the time index back one time step (1 day)
# Get "day before" temp by shifting the time index back two time steps (2 days)
# Get "2 days before yesterday" temp by shifting the time index back three time steps (3 days)
T_minus1 = T.shift(time=1)
T_minus2 = T.shift(time=2)
T_minus3 = T.shift(time=3)
# Compute EFT, using 3 days back
# Effective temp for 2 days before yesterday is set to the temperature of that day
eft_minus3 = T_minus3
eft_minus2 = eft_minus3 * 0.5 + T_minus2 * 0.5
eft_minus1 = eft_minus2 * 0.5 + T_minus1 * 0.5
eft = eft_minus1 * 0.5 + T * 0.5
# Assign same attributes as input data
# Or else, the output data will have no attributes :(
eft.attrs = T.attrs
return eft
def noaa_heat_index(T, RH):
"""Compute the NOAA Heat Index.
See references for more information on the derivation on this index.
Parameters
----------
T: xr.DataArray
Temperature in deg F
RH: xr.DataArray
Relative Humidity in percentage (0-100)
Returns
--------
HI: xr.DataArray
Heat index per timestep
References
-----------
NOAA: https://www.wpc.ncep.noaa.gov/html/heatindex_equation.shtml
NCAR NCL documentation: https://www.ncl.ucar.edu/Document/Functions/Heat_stress/heat_index_nws.shtml
"""
T = T.reindex_like(RH) # Need to have the same dimension/coordinate orders
HI = (
-42.379
+ 2.04901523 * T
+ 10.14333127 * RH
- 0.22475541 * T * RH
- 0.00683783 * T * T
- 0.05481717 * RH * RH
+ 0.00122874 * T * T * RH
+ 0.00085282 * T * RH * RH
- 0.00000199 * T * T * RH * RH
)
# Adjust for high temperature, low relative humidity
# 80 < T < 112 (deg F)
# RH < 13%
adj_highT_lowRH = ((13 - RH) / 4) * ((17 - abs(T - 95)) / 17) ** (
1 / 2
) # Adjustment
HI_highT_lowRH = HI - adj_highT_lowRH # Subtract adjustment from HI
# Adjust for low temperature, high relative humidity
# 80 < T < 87 (deg F)
# RH > 85%
adj_lowT_highRH = ((RH - 85) / 10) * ((87 - T) / 5) # Adjustment
HI_lowT_highRH = HI + adj_lowT_highRH # Add adjustment from HI
# Use different equation if heat index if the heat index value < 80
low_HI = 0.5 * (T + 61.0 + ((T - 68.0) * 1.2) + (RH * 0.094))
# Adjust heat index depending on different condions for RH, T, and valid range of HI
HI = xr.where((RH < 13) & (T > 80) & (T < 112), HI_highT_lowRH, HI)
HI = xr.where(((RH > 85) & (T < 87) & (T > 80)), HI_lowT_highRH, HI)
HI = xr.where((HI < 80), low_HI, HI)
# Following NCAR documentation (see function references), for temperature values less than 40F, the HI is set to the ambient temperature.
HI = xr.where((T < 40), T, HI)
# Reassign coordinate attributes
# For some reason, these get improperly assigned in the xr.where step
for coord in list(HI.coords):
HI[coord].attrs = RH[coord].attrs
# Assign units attribute
HI.attrs["units"] = "degF"
return HI
## ========== FOSBERG FIRE INDEX AND RELATED HELPER FUNCTIONS ==========
def fosberg_fire_index(t2_F, rh_percent, windspeed_mph):
"""Compute the Fosberg Fire Weather Index.
Use hourly weather as inputs.
Ensure that the input variables are in the correct units (see below).
Parameters
----------
t2_F: xr.DataArray
Air temperature in units of Fahrenheit
rh_percent: xr.DataArray
Relative humidity in units of 0-100 (percent)
windspeed_mph: xr.DataArray
Windspeed in units of miles per hour
Returns
-------
FFWI: xr.DataArray
Fosberg Fire Weather Index computed for each grid cell
References
----------
https://a.atmos.washington.edu/wrfrt/descript/definitions/fosbergindex.html
https://github.com/sharppy/SHARPpy/blob/main/sharppy/sharptab/fire.py
https://www.spc.noaa.gov/exper/firecomp/INFO/fosbinfo.html
"""
# Compute the equilibrium moisture constant
m_low, m_mid, m_high = _equilibrium_moisture_constant(h=rh_percent, T=t2_F)
# For RH < 10%, use the low m value.
# For RH >= 10%, use the mid value
m = xr.where(rh_percent < 10, m_low, m_mid)
# For RH > 50%, use the high m value.
m = xr.where(rh_percent > 50, m_high, m)
# Compute the moisture dampening coefficient
n = _moisture_dampening_coeff(m)
# Compute the index
U = windspeed_mph
FFWI = (n * ((1 + U**2) ** 0.5)) / 0.3002
# If fosberg index > 100, reset to 100
FFWI = xr.where(FFWI < 100, FFWI, 100, keep_attrs=True)
# If fosberg index is negative, set to 0
FFWI = xr.where(FFWI > 0, FFWI, 0, keep_attrs=True)
# Reassign coordinate attributes
# For some reason, these get improperly assigned in the xr.where step
for coord in list(FFWI.coords):
FFWI[coord].attrs = t2_F[coord].attrs
# Add descriptive attributes
FFWI.name = "Fosberg Fire Weather Index"
FFWI.attrs["units"] = "[0 to 100]"
return FFWI
# Define some helper functions
def _equilibrium_moisture_constant(h, T):
"""Compute the equilibrium moisture constant.
Dependent on relative humidity percent.
Used to compute Fosberg Fire Weather Index.
Will return three values corresponding to the level of humidity.
Parameters
----------
h: xr.DataArray
relative humidity in units of 0-100 (percent)
T: xr.DataArray
air temperature in units of Fahrenheit
Returns
-------
m_low: xr.DataArray
equilibrium moisture constant for low humidity (<10%)
m_mid: xr.DataArray
equilibrium moisture constant for 10% < humidity <= 50%
m_high: xr.DataArray
equilibrium moisture constant for high humidity (>50%)
"""
# h < 10: Low humidity
m_low = 0.03229 + 0.281073 * h - 0.000578 * h * T
# (10 < h <= 50): Mid humiditiy
m_mid = 2.22749 + 0.160107 * h - 0.01478 * T
# h > 50: High humidity
m_high = 21.0606 + 0.005565 * (h**2) - 0.00035 * h * T - 0.483199 * h
return (m_low, m_mid, m_high)
def _moisture_dampening_coeff(m):
"""Compute the moisture dampening coefficient.
Used to compute Fosberg Fire Weather Index.
Parameters
----------
m: xr.DataArray
equilibrium moisture constant
"""
n = 1 - 2 * (m / 30) + 1.5 * (m / 30) ** 2 - 0.5 * (m / 30) ** 3
return n
| cal-adapt/climakitae | climakitae/tools/indices.py | indices.py | py | 7,473 | python | en | code | 11 | github-code | 1 | [
{
"api_name": "xarray.where",
"line_number": 101,
"usage_type": "call"
},
{
"api_name": "xarray.where",
"line_number": 102,
"usage_type": "call"
},
{
"api_name": "xarray.where",
"line_number": 103,
"usage_type": "call"
},
{
"api_name": "xarray.where",
"line_nu... |
10650487581 | import maya.cmds as cmds
import pymel.core as pm
class UI(object):
def __init__(self):
title = 'curvyEdges'
version = '1.01'
self.ceObj = spline(self)
if pm.window('curvyEdgesWin', exists=True):
pm.deleteUI('curvyEdgesWin')
with pm.window('curvyEdgesWin', title='{0} | {1}'.format(title, version),
mnb=False, mxb=False, sizeable=False) as window:
with pm.columnLayout():
# curve Frame
with pm.frameLayout(l='Curve Settings', cll=True, cl=False, bs='out'):
with pm.columnLayout():
self.curveType = pm.radioButtonGrp(l='Curve Type:', sl=0, nrb=2, cw3=[96, 96, 128],
labelArray2=['BezierCurve', 'NurbsCurve'])
self.spans = pm.intSliderGrp(field=True, l='Curve Spans:', minValue=2, maxValue=24,
fieldMinValue=2, fieldMaxValue=128, value=2, cw3=[96, 64, 128])
with pm.rowColumnLayout(nc=2, cw=[1, 96], co=[1, 'right', 1]):
self.selOnly = pm.checkBox(v=False, l='Selection Only')
pm.button(l='Create Curve', c=self._create, width=201)
# Deformer Frame
with pm.frameLayout(l='Deformer Settings', bs='out', cl=False, cll=True):
with pm.columnLayout():
self.currentCrv = pm.textFieldGrp(editable=False, l='Current Curve:', cw2=[96, 195])
self.deformers = [attrSlider(1, 0, 1, 'envelope', self.ceObj),
attrSlider(1, -10, 10, 'tension', self.ceObj),
attrSlider(0, 0, 256, 'dropoffDistance[0]', self.ceObj),
attrSlider(1, 0, 2, 'scale[0]', self.ceObj),
attrSlider(1, 0, 1, 'rotation', self.ceObj)]
window.show()
pm.scriptJob(event=['SelectionChanged', self.select], protected=True, p=window)
self.select()
def _create(self, *args):
try:
self.ceObj.create(self.curveType.getSelect(), self.spans.getValue(), self.selOnly.getValue())
for i in self.deformers:
i.setEnable(True)
i.get()
except:
pass
def select(self, *args):
try:
self.ceObj.select()
for i in self.deformers:
i.setEnable(True)
i.get()
except:
self.setCurrentCurve('Select a curvyEdges curve!')
for i in self.deformers:
i.setEnable(False)
def setCurrentCurve(self, curve):
self.currentCrv.setText(curve)
class spline(object):
def __init__(self, uiObj):
self.uiObj = uiObj
def create(self, curveType, spans, selOnly):
sel = pm.selected()
cmds.CreateCurveFromPoly()
curve = pm.selected()
pm.rebuildCurve(curve, spans=spans)
# set UI curve
self.uiObj.setCurrentCurve(curve[0].shortName())
if curveType == 1:
pm.nurbsCurveToBezier()
pm.delete(curve, ch=True)
# Deform
if selOnly:
sel = pm.polyListComponentConversion(sel, fe=True, tv=True)
self.wire = pm.wire(sel, w=curve)
else:
#Object
self.wire = pm.wire(sel[0].node(), w=curve)
def select(self):
sel = pm.selected()
if isinstance(sel[0], pm.nt.Transform):
if not isinstance(sel[0].getShape(), pm.nt.NurbsCurve):
raise Exception('Invalid Selection Type')
elif isinstance(sel[0], pm.NurbsCurveCV):
sel = [i.node().getParent() for i in sel]
else:
raise Exception('Invalid Selection Type')
self.wire = pm.listConnections(sel[0].getShape())
self.uiObj.setCurrentCurve(sel[0].shortName())
class attrSlider(object):
def __init__(self, value, min, max, name, ceObj):
self.name = name
self.ceObj = ceObj
self.undoState = False
self.attr = pm.floatSliderGrp(field=True, l=self.name, value=value, pre=3, enable=False,
minValue=min, maxValue=max, dc=lambda *args: self.set(cc=False),
cc=lambda *args: self.set(cc=True), cw3=[96, 64, 128])
pm.scriptJob(event=['Undo', self.get], protected=True, p=self.attr)
def get(self, *args):
try:
value = getattr(self.ceObj.wire[0], self.name).get(self.attr.getValue())
self.attr.setValue(value)
except:
AttributeError('{0} node does not exist'.format(self.ceObj.wire[0]))
def set(self, cc=False):
if not cc and not self.undoState:
self.undoState = True
pm.undoInfo(openChunk=True)
try:
getattr(self.ceObj.wire[0], self.name).set(self.attr.getValue())
except:
AttributeError('{0} node does no longer exist'.format(self.ceObj.wire[0]))
if cc and self.undoState:
pm.undoInfo(closeChunk=True)
self.undoState = False
def setEnable(self, val):
self.attr.setEnable(val)
if __name__ == "__main__":
UI()
| cmcpasserby/curvyEdges | scripts/curvyEdges.py | curvyEdges.py | py | 5,573 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "pymel.core.window",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "pymel.core.deleteUI",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pymel.core",
"li... |
9808834906 | import numpy as np
import matplotlib.pyplot as plt
import math
def count(arr):
tally = dict()
for ele in arr:
if ele not in tally.keys():
tally[ele] = 0
tally[ele] += 1
return tally
def save_plot(x, y, name):
plt.plot(x, y, 'o-')
plt.xlim((0, 100))
plt.savefig(name, bbox_inches="tight")
plt.cla()
class CustomizedTheromEncoder():
def __init__(self, encoding_len=8, debug_mode=False):
self.thresholds = []
self.debug_mode = debug_mode
self.encoding_len = encoding_len
def fit(self, data):
temp_data = data
temp_data.sort()
if self.debug_mode:
print(temp_data)
tally = count(temp_data)
if self.debug_mode:
print(tally)
temp_data_without_zero = temp_data[tally[0.0]:]
total_num = len(temp_data_without_zero)
if self.debug_mode:
print('total_num: ', str(total_num))
each_part = total_num / self.encoding_len
if self.debug_mode:
print('each part num: ', str(each_part))
each_part_int = int(each_part)
if self.debug_mode:
print('each part int num: ', str(each_part_int))
splited_data = []
start_idx = 0
for i in range(self.encoding_len):
end_idx = start_idx + each_part_int
if self.debug_mode:
print('start idx: ', str(start_idx))
print('end idx: ', str(end_idx))
# if next element is same as last one in sub arr, move it into current sub array
while end_idx < total_num and temp_data_without_zero[end_idx - 1] == temp_data_without_zero[end_idx]:
end_idx += 1
cur_sub_arr = temp_data_without_zero[start_idx:end_idx]
splited_data.append(cur_sub_arr)
start_idx = end_idx
if self.debug_mode:
print(splited_data)
for i in splited_data:
print(i)
print(len(i))
for sub in splited_data:
if len(sub) != 0:
self.thresholds.append(sub[0])
if self.debug_mode:
print("thresholds")
print(self.thresholds)
def encode(self, val):
result = []
for t in self.thresholds:
if val > t:
result.append(1)
else:
result.append(0)
return result
def transform(self, data):
result = []
for row in data:
new_row = []
for ele in row:
new_row.append(self.encode(ele))
result.append(new_row)
return result
class StandardTheromEncoder:
def __init__(self, encoding_len=8, debug_mode=False):
self.interval = 0
self.debug_mode = debug_mode
self.encoding_len = encoding_len
def fit(self, data):
max_num = np.max(data)
min_num = np.min(data)
if self.debug_mode:
print('max num: ', str(max_num))
print('min num: ', str(min_num))
self.interval = math.ceil((max_num - min_num) / self.encoding_len)
if self.debug_mode:
print('interval: ', str(self.interval))
def encode(self, val):
result = []
for i in range(self.encoding_len):
if val > i * self.interval:
result.append(1)
else:
result.append(0)
return result
def transform(self, data):
result = []
for row in data:
new_row = []
for ele in row:
new_row.append(self.encode(ele))
result.append(new_row)
return result
| yiyanw/Performance-Characterisation-of-Binarised-Neural-Networks-in-Traffic-Fingerprinting | TheromEncoder.py | TheromEncoder.py | py | 3,713 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.plot",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.xlim",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "matpl... |
35663643651 |
import pandas as pd
import matplotlib.pyplot as plt
from datetime import datetime, timedelta
import sys
def moving_averages(stock_name, tdays, mdays):
data = pd.read_csv('log_data.csv')
data = data.set_index('Date')
data.index = pd.to_datetime(data.index, format = '%Y-%m-%d %H:%M:%S')
ldata = data['Log_'+stock_name].to_list()[::-1]
all_sum = ldata[0]
all_avg = 0
m_sum = sum(ldata[:mdays-1])
moving_avg = [None for i in range(mdays-1)]
all_time_avg = [0]
for i in range(1,len(ldata)):
all_sum = all_sum + ldata[i]
all_avg = all_sum/(i+1)
all_time_avg.append(all_avg)
if(i >= mdays - 1):
m_sum = m_sum + ldata[i]
m_avg = m_sum/mdays
moving_avg.append(m_avg)
m_sum = m_sum - ldata[i - (mdays-1)]
df = pd.DataFrame()
df.index = data.index
df['Log_'+stock_name] = data['Log_'+stock_name]
df['all_time_avg'] = all_time_avg[::-1]
df['moving_avg'] = moving_avg[::-1]
df.to_csv('df_combine.csv')
plt.figure(figsize = (10,7))
# data['Log_'+stock_name].plot(label = 'Daily_return')
plt.plot(data.index, all_time_avg[::-1], label = 'all_time_avg')
plt.plot(data.index, moving_avg[::-1], label = 'Moving_avg')
# df['Log_'+stock_name].plot(label = 'return')
# df['All_time_avg'].plot(label = 'All_time_avg')
# df['moving_averages'].plot(label = 'moving_avg')
plt.title('all_time_avg & Moving_avg plotting', fontsize = 16)
plt.xlabel('Year', fontsize = 14)
plt.ylabel('Avg return', fontsize = 14)
plt.legend()
plt.grid(which = 'major', linestyle = '-.', linewidth = 0.5)
plt.show()
############################################################
moving_averages(sys.argv[1], int(sys.argv[2]), int(sys.argv[3])) | Kiran9351/project | moving_averages.py | moving_averages.py | py | 1,715 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.to_datetime",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.... |
26768362338 |
# coding: utf-8
# In[ ]:
import matplotlib
matplotlib.use("TkAgg")
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg,NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib import pyplot as plt
import numpy as np
from tkinter import *
from tkinter import filedialog
from tkinter import ttk
from scipy import signal
import scipy
root=Tk()
entry1=ttk.Entry(root,width=20)
entry1.pack()
entry2=ttk.Entry(root,width=20)
entry2.pack()
entry3=ttk.Entry(root,width=20)
entry3.pack()
b1=ttk.Button(root,text='browse')
b1.pack()
b2=ttk.Button(root,text='filter now')
b2.pack()
def buclick():
global fileName
global sig1
global sig
root.fileName=filedialog.askopenfilename(filetypes=(( "comma separated value","*.CSV"),("All files ","*.*")))
sig1=np.loadtxt(root.fileName, dtype=float,delimiter=',')
sig=np.asarray(sig1)
#print (sig1)
#np.reshape(sig,(1,3000))
#if entry1.get()=="" :
f=Figure(figsize=(5,5),dpi=75)
a1=f.add_subplot(212)
#a1=f.plt.subplot2grid((2,1),(0,0),rowspan=2)
a1.plot(sig)
canvas=FigureCanvasTkAgg(f,root)
canvas.show()
canvas.get_tk_widget().pack()
toolbar=NavigationToolbar2TkAgg(canvas,root)
toolbar.update()
#canvas._tkcanvas.pack()
b1.config(command=buclick)
def buclick2():
global fileName
global sig1
global sig
sig1=np.loadtxt(root.fileName, dtype=float,delimiter=',')
sig=np.asarray(sig1)
#else:
x=eval(entry1.get())
y=eval(entry2.get())
z=eval(entry3.get())
highpass=np.array([x,y,z])
filtered=scipy.signal.convolve(sig,highpass,mode='same')
f=Figure(figsize=(5,5),dpi=75)
a2=f.add_subplot(211)
#a2=f.plt.subplot2grid((2,1),(2,0),rowspan=2)
a2.plot(filtered)
canvas=FigureCanvasTkAgg(f,root)
canvas.show()
canvas.get_tk_widget().pack()
toolbar=NavigationToolbar2TkAgg(canvas,root)
toolbar.update()
#canvas._tkcanvas.pack()
b2.config(command=buclick2)
#plt.show()
root.mainloop()
| zuhaalfaraj/DSP | Signals Display with Flask/Tkinterapp.py | Tkinterapp.py | py | 2,088 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk.Entry",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tkinter.ttk",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "tkinter.ttk.Entry",
"... |
33519355882 | import base64
from .platform import JYTHON, PY2
def compress_text(text):
result = base64.b64encode(_compress(text.encode('UTF-8')))
return result if PY2 else result.decode('ASCII')
if not JYTHON:
import zlib
def _compress(text):
return zlib.compress(text, 9)
else:
# Custom compress implementation was originally used to avoid memory leak
# (http://bugs.jython.org/issue1775). Kept around still because it is a bit
# faster than Jython's standard zlib.compress.
from java.util.zip import Deflater
import jarray
_DEFLATOR = Deflater(9, False)
def _compress(text):
_DEFLATOR.setInput(text)
_DEFLATOR.finish()
buf = jarray.zeros(1024, 'b')
compressed = []
while not _DEFLATOR.finished():
length = _DEFLATOR.deflate(buf, 0, 1024)
compressed.append(buf[:length].tostring())
_DEFLATOR.reset()
return ''.join(compressed)
| robotframework/RIDE | src/robotide/lib/robot/utils/compress.py | compress.py | py | 958 | python | en | code | 910 | github-code | 1 | [
{
"api_name": "base64.b64encode",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "platform.PY2",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "platform.JYTHON",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "zlib.compress",
"line... |
6407461716 | #
# Credit to:
# Troy Fawkes for his primer on using queues with Multithreading
# > https://www.troyfawkes.com/learn-python-multithreading-queues-basics/
#
# The Python3 queue documentation
# > https://docs.python.org/3/library/queue.html#queue.Empty
#
import threading
import socket
import datetime
from queue import Queue
def resolveDns(hostnames,lookupFail,lookupSuccess):
for host in hostnames:
try:
lookupSuccess.put(f"{host},{socket.gethostbyname(host)}")
except Exception as e:
lookupFail.put(f"{host}, {e}")
if __name__ == "__main__":
lookupFail = Queue(maxsize=0)
lookupSuccess = Queue(maxsize=0)
filename = "hostnamesBig.txt"
with open(filename) as file:
hostnames = file.readlines()
hostnames = [line.rstrip() for line in hostnames]
start = datetime.datetime.now()
threads = list()
chunksize = 250
chunks = [hostnames[i:i + chunksize] for i in range(0, len(hostnames), chunksize)]
for chunk in chunks:
x = threading.Thread(target=resolveDns, args=(chunk,lookupFail,lookupSuccess))
threads.append(x)
x.start()
for chunk, thread in enumerate(threads):
thread.join()
end = datetime.datetime.now()
duration = end - start
totalFails = lookupFail.qsize()
totalSuccesses = lookupSuccess.qsize()
failures = [lookupFail.get() for i in range(lookupFail.qsize())]
print("Failures:")
for fail in failures:
print(fail)
print(" ")
successes = [lookupSuccess.get() for i in range(lookupSuccess.qsize())]
print("Successes:")
for success in successes:
print(success)
print(" ")
print(f"Time taken: {duration}")
print(f"Successfully resolved: {totalSuccesses}")
print(f"DNS Resolution errors: {totalFails}")
print(" ")
| RikJonAtk/multiThreadedDNS | dns_v3.py | dns_v3.py | py | 1,869 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "socket.gethostbyname",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "queue.Queue",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
... |
27537503353 | import numpy as np
from torchtext import data
from torchtext import datasets
from gensim.models import KeyedVectors
def getVectors(args, data):
vectors = []
if args.mode != 'rand':
word2vec = KeyedVectors.load_word2vec_format('./GoogleNews-vectors-negative300.bin', binary=True)
for i in range(len(data.TEXT.vocab)):
word = data.TEXT.vocab.itos[i]
if word in word2vec.vocab:
vectors.append(word2vec[word])
else:
vectors.append(np.random.uniform(-0.01, 0.01, args.embed_dim))
else:
for i in range(len(data.TEXT.vocab)):
vectors.append(np.random.uniform(-0.01, 0.01, args.embed_dim))
return np.array(vectors)
class DATA():
def __init__(self, args, tokenizer):
#self.TEXT = data.Field(tokenize = tokenizer, batch_first=True, lower=True, fix_length=70)
self.TEXT = data.Field(tokenize=tokenizer, lower=True)
self.LABEL = data.Field(sequential=False, unk_token=None)
self.train, self.test = datasets.IMDB.splits(self.TEXT, self.LABEL)
self.TEXT.build_vocab(self.train, self.test)
#self.train_iter, self.dev_iter, self.test_iter = data.BucketIterator.splits((self.train, self.dev, self.test), batch_size=args.batch_size)
self.train_iter, self.test_iter = data.Iterator.splits((self.train, self.test), batch_size=args.batch_size, sort=True, device=args.device)
self.LABEL.build_vocab(self.train) | UVa-NLP/HEDGE | cnn/load_data.py | load_data.py | py | 1,480 | python | en | code | 30 | github-code | 1 | [
{
"api_name": "gensim.models.KeyedVectors.load_word2vec_format",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "gensim.models.KeyedVectors",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torchtext.data.TEXT",
"line_number": 10,
"usage_type": "attribut... |
23669146088 | from django.shortcuts import render
from django.views.generic import TemplateView, ListView
import django_tables2
import books.models
import books.filters
import books.tables
class FilteredSingleTableView(django_tables2.SingleTableView):
filter_class = None
def get_table_data(self):
data = super(FilteredSingleTableView, self).get_table_data()
self.filter = self.filter_class(self.request.GET, queryset=data)
return self.filter.qs
def get_context_data(self, **kwargs):
context = super(FilteredSingleTableView, self).get_context_data(**kwargs)
context['filter'] = self.filter
return context
class BookFilteredSingleTableView(FilteredSingleTableView):
model = books.models.Book
table_class = books.tables.BookTable
filter_class = books.filters.BookFilter
class BookSingleTableView(django_tables2.SingleTableView):
model = books.models.Book
table_class = books.tables.BookTable
class FilteredTableView(ListView):
model = books.models.Book
def get_context_data(self, **kwargs):
context = super(FilteredTableView, self).get_context_data(**kwargs)
filter = books.filters.BookFilter(self.request.GET, queryset=self.object_list)
table = books.tables.BookTable(filter.qs)
django_tables2.RequestConfig(self.request, ).configure(table )
context['filter'] = filter
context['table'] = table
return context
class FilterExListView(ListView):
model = books.models.Book
def get_context_data(self, **kwargs):
context = super(FilterExListView, self).get_context_data(**kwargs)
filter = books.filters.BookFilterEx(self.request.GET, queryset=self.object_list)
table = books.tables.BookTable(filter.qs)
django_tables2.RequestConfig(self.request, ).configure(table )
context['filter'] = filter
context['table'] = table
return context | spapas/django_table_filtering | books/views.py | views.py | py | 1,962 | python | en | code | 6 | github-code | 1 | [
{
"api_name": "django_tables2.SingleTableView",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "books.models.models",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "books.models",
"line_number": 24,
"usage_type": "name"
},
{
"api_name... |
5250226508 | __author__ = "Severi Jääskeläinen \n Samuel Kaiponen \n Heta Rekilä \n" \
"Sinikka Siironen \n Juhani Sundell"
__version__ = "2.0"
import platform
import subprocess
import shlex
import glob
from . import general_functions as gf
from .beam import Beam
from .detector import Detector
from .target import Target
from .parsing import CSVParser
from pathlib import Path
class GetEspe:
"""
Class for handling calling the external program get_espe to generate
energy spectra coordinates.
"""
__slots__ = "__recoil_file", "__settings", "__beam", "__detector", \
"__target", "__channel_width", "__reference_density", \
"__fluence", "output_file", "__timeres", "__density", \
"__solid", "__erd_file", "__output_file"
def __init__(self, beam: Beam, detector: Detector, target: Target,
solid, recoil_file: Path, erd_file: Path, spectrum_file: Path,
reference_density: float = 4.98, multiplier: float = 1e22,
ch: float = 0.025, fluence: float = 1.00e+9,
timeres: float = 250.0):
"""Initializes the GetEspe class.
"""
self.__beam = beam
self.__detector = detector
self.__target = target
self.__channel_width = ch
self.__fluence = fluence # from Run object
self.__timeres = timeres
self.__density = reference_density * multiplier
self.__solid = solid
self.__recoil_file = recoil_file
self.__erd_file = erd_file
self.__output_file = spectrum_file
@staticmethod
def calculate_simulated_spectrum(write_to_file=True, **kwargs):
"""Calculates simulated spectrum. Calling this is the same as creating
a new GetEspe object and calling its run method.
Args:
write_to_file: whether spectrum is written to a file
kwargs: keyword arguments passed down to GetEspe
Return:
spectrum as a list of tuples
"""
get_espe = GetEspe(**kwargs)
return get_espe.run(write_to_file=write_to_file)
@staticmethod
def read_espe_file(espe_file: Path):
"""Reads a file generated by get_espe.
Args:
espe_file: A string representing path of energy spectrum data file
(.simu) to be read.
Returns:
Returns energy spectrum data as a list.
"""
parser = CSVParser((0, float), (1, float))
try:
# TODO could also set the method to 'cols'
return list(parser.parse_file(espe_file, method="row"))
# TODO handle NaNs and Infs
except (OSError, UnicodeDecodeError, IndexError):
# File was not found, or it could not be decoded (for example, it
# could have been .png)
pass
return []
def run(self, write_to_file=True):
"""Run get_espe binary with given parameters.
Args:
write_to_file: whether get_espe output is written to file
"""
espe_cmd = self.get_command()
bin_dir = gf.get_bin_dir()
espe_process = subprocess.Popen(
espe_cmd, cwd=bin_dir, stdin=subprocess.PIPE,
stdout=subprocess.PIPE, universal_newlines=True)
for f in glob.glob(str(self.__erd_file)):
with open(f, "r") as file:
for line in file:
espe_process.stdin.write(line)
espe_process.stdin.close()
stdout = iter(espe_process.stdout.readline, "")
parser = CSVParser((0, float), (1, float))
if write_to_file:
output = []
with self.__output_file.open("w") as file:
for x, y in parser.parse_strs(stdout, method="row"):
file.write(f"{x} {y}\n")
output.append((x, y))
return output
return list(parser.parse_strs(stdout, method="row"))
def get_command(self):
"""Returns the command to run get_espe executable.
"""
# Options for get_espe
#
# get_espe - Calculate an energy spectrum from simulated ERD data
#
# Options:
# -real only real events are handled
# -ch channel width in the output (MeV)
# -depth depth range of the events (nm, two values)
# -dist file name for depth distribution
# -m2 average mass of the secondary particles (u)
# -avemass use average mass for calculating energy from TOF
# -scale scale the total intensity to value
# -err give statistics in the third column
# -detsize limit in the size of the detector foil (mm)
# -erange energy range in the output spectrum (MeV)
# -timeres time resolution of the TOF-detector (ps, FWHM)
# -eres energy resolution (keV, FWHM) of the SSD, (energy
# signal used!)
# -toflen time-of-flight length (m)
# -beam mass number and the chemical symbol of the primary
# ion
# -dose dose of the beam = fluence, in particles (6.24e12 == 1 p-uC)
# -energy beam energy (MeV)
# -theta scattering angle (deg)
# -tangle angle between target surface and beam (deg)
# -solid solid angle of the detector (msr)
# -density average atomic density of the first 10 nm layer
# (at./cm^3)
toflen = self.__detector.foils[self.__detector.tof_foils[1]].distance
toflen -= self.__detector.foils[self.__detector.tof_foils[0]].distance
toflen_in_meters = toflen / 1000
params = f"-beam {self.__beam.ion.get_prefix()} " \
f"-energy {self.__beam.energy} " \
f"-theta {self.__detector.detector_theta} " \
f"-tangle {self.__target.target_theta} " \
f"-timeres {self.__timeres} " \
f"-toflen {toflen_in_meters} " \
f"-solid {self.__solid} " \
f"-dose {self.__fluence} " \
f"-avemass " \
f"-density {self.__density} " \
f"-ch {self.__channel_width}"
if platform.system() == "Windows":
executable = str(gf.get_bin_dir() / "get_espe.exe")
else:
executable = "./get_espe"
# shlex.split does not handle file paths well on Windows so recoil
# file is provided separately
return (
executable, *shlex.split(params), "-dist", str(self.__recoil_file)
)
| ilardinho/ePotku_public | server/potku/modules/get_espe.py | get_espe.py | py | 6,789 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "beam.Beam",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "detector.Detector",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "target.Target",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_nu... |
28169435385 | from stix2 import MemoryStore, Filter
from itertools import chain
def get_related(thesrc, src_type, rel_type, target_type, reverse=False):
relationships = thesrc.query([
Filter('type', '=', 'relationship'),
Filter('relationship_type', '=', rel_type),
Filter('revoked', '=', False),
Filter('x_mitre_deprecated', "=", False)
])
# stix_id => [ { relationship, related_object_id } for each related object ]
id_to_related = {}
# build the dict
for relationship in relationships:
if (src_type in relationship.source_ref and target_type in relationship.target_ref):
if (relationship.source_ref in id_to_related and not reverse) or (
relationship.target_ref in id_to_related and reverse):
# append to existing entry
if not reverse:
id_to_related[relationship.source_ref].append({
"relationship": relationship,
"id": relationship.target_ref
})
else:
id_to_related[relationship.target_ref].append({
"relationship": relationship,
"id": relationship.source_ref
})
else:
# create a new entry
if not reverse:
id_to_related[relationship.source_ref] = [{
"relationship": relationship,
"id": relationship.target_ref
}]
else:
id_to_related[relationship.target_ref] = [{
"relationship": relationship,
"id": relationship.source_ref
}]
# all objects of relevant type
if not reverse:
targets = thesrc.query([
Filter('type', '=', target_type),
Filter('revoked', '=', False)
])
else:
targets = thesrc.query([
Filter('type', '=', src_type),
Filter('revoked', '=', False)
])
# build lookup of stixID to stix object
id_to_target = {}
for target in targets:
id_to_target[target.id] = target
# build final output mappings
output = {}
for stix_id in id_to_related:
value = []
for related in id_to_related[stix_id]:
if not related["id"] in id_to_target:
continue # targeting a revoked object
value.append({
"object": id_to_target[related["id"]],
"relationship": related["relationship"]
})
output[stix_id] = value
return output
# software:group
def software_used_by_groups(thesrc):
"""returns group_id => {software, relationship} for each software used by the group."""
return get_related(thesrc, "intrusion-set", "uses", "tool") + get_related(thesrc, "intrusion-set", "uses",
"malware")
def groups_using_software(thesrc):
"""returns software_id => {group, relationship} for each group using the software."""
return get_related(thesrc, "intrusion-set", "uses", "tool", reverse=True) + get_related(thesrc, "intrusion-set",
"uses", "malware",
reverse=True)
# technique:group
def techniques_used_by_groups(thesrc):
"""returns group_id => {technique, relationship} for each technique used by the group."""
return get_related(thesrc, "intrusion-set", "uses", "attack-pattern")
def groups_using_technique(thesrc):
"""returns technique_id => {group, relationship} for each group using the technique."""
return get_related(thesrc, "intrusion-set", "uses", "attack-pattern", reverse=True)
# technique:software
def techniques_used_by_software(thesrc):
"""return software_id => {technique, relationship} for each technique used by the software."""
return get_related(thesrc, "malware", "uses", "attack-pattern") + get_related(thesrc, "tool", "uses",
"attack-pattern")
def software_using_technique(thesrc):
"""return technique_id => {software, relationship} for each software using the technique."""
return get_related(thesrc, "malware", "uses", "attack-pattern", reverse=True) + get_related(thesrc, "tool", "uses",
"attack-pattern",
reverse=True)
# technique:mitigation
def mitigation_mitigates_techniques(thesrc):
"""return mitigation_id => {technique, relationship} for each technique mitigated by the mitigation."""
return get_related(thesrc, "course-of-action", "mitigates", "attack-pattern", reverse=False)
def technique_mitigated_by_mitigations(thesrc):
"""return technique_id => {mitigation, relationship} for each mitigation of the technique."""
return get_related(thesrc, "course-of-action", "mitigates", "attack-pattern", reverse=True)
# technique:subtechnique
def subtechniques_of(thesrc):
"""return technique_id => {subtechnique, relationship} for each subtechnique of the technique."""
return get_related(thesrc, "attack-pattern", "subtechnique-of", "attack-pattern", reverse=True)
def parent_technique_of(thesrc):
"""return subtechnique_id => {technique, relationship} describing the parent technique of the subtechnique"""
return get_related(thesrc, "attack-pattern", "subtechnique-of", "attack-pattern")[0]
| ilf0rz/MITRE-ATTACK-heatmap-gen | relationships.py | relationships.py | py | 5,814 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "stix2.Filter",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "stix2.Filter",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "stix2.Filter",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "stix2.Filter",
"line_number":... |
72355993314 | import pygame
import snake as s
import animal as a
import config as cfg
import random
import util as u
from game import Game
from pygame.locals import (
K_UP,
K_DOWN,
K_LEFT,
K_RIGHT,
)
class SnakeGame(Game):
def init(self):
self.snake = s.Snake((cfg.BLOCK_SIZE, cfg.BLOCK_SIZE))
self.last_keys = []
def setup_events(self):
self.GAME_TICK = pygame.USEREVENT + 1
pygame.time.set_timer(self.GAME_TICK, cfg.TICK_RATE)
self.SPAWN_ANIMAL = pygame.USEREVENT + 2
pygame.time.set_timer(self.SPAWN_ANIMAL, 1000)
def process_event(self, event):
if event.type == self.GAME_TICK:
self.snake.tick(self.move_dir())
elif event.type == self.SPAWN_ANIMAL:
animal = a.Animal((0, 0))
self.random_animal_position(animal)
while pygame.sprite.spritecollideany(animal, self.all_sprites):
self.random_animal_position(animal)
self.all_sprites.add(animal)
return True
def random_animal_position(self, animal):
animal.rect.center = u.roundup(random.randint(Game.game_field.left + cfg.BLOCK_SIZE, Game.game_field.width - cfg.BLOCK_SIZE), cfg.BLOCK_SIZE), \
u.roundup(random.randint(Game.game_field.top + cfg.BLOCK_SIZE, Game.game_field.height - cfg.BLOCK_SIZE), cfg.BLOCK_SIZE)
def move_dir(self):
if len(self.last_keys) == 0:
return None
elif self.last_keys[K_UP]:
if(self.snake.head.last_dir == K_DOWN):
return None
return K_UP
elif self.last_keys[K_DOWN]:
if(self.snake.head.last_dir == K_UP):
return None
return K_DOWN
elif self.last_keys[K_LEFT]:
if(self.snake.head.last_dir == K_RIGHT):
return None
return K_LEFT
elif self.last_keys[K_RIGHT]:
if(self.snake.head.last_dir == K_LEFT):
return None
return K_RIGHT
def update(self):
pressed_keys = pygame.key.get_pressed()
if (pressed_keys[K_UP] or pressed_keys[K_DOWN] or pressed_keys[K_LEFT] or pressed_keys[K_RIGHT]):
self.last_keys = pressed_keys
return self.snake.update()
SnakeGame().run() | BinaryCat17/snake | play_snake.py | play_snake.py | py | 2,302 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "game.Game",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "snake.Snake",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "config.BLOCK_SIZE",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pygame.USEREVENT",
"... |
41597137875 | import sys, os, logging
import random
import pysam
import pandas as pd
vcf_file = sys.argv[1]
pre_file = sys.argv[2]
luad = sys.argv[3]
out = sys.argv[4]
rand = sys.argv[5]
logging.basicConfig(filename = "{}/ExtractReads.{}.log".format(out,luad),
filemode = "a",
format = "%(levelname)s %(asctime)s - %(message)s",
level = logging.DEBUG)
logger = logging.getLogger()
logger.info("Started ExtractReads.py")
somatic_file = "{}/somatic_reads.{}.SNVs.bam".format(out, luad)
normal_file = "{}/normal_reads.{}.SNVs.bam".format(out, luad)
somatic_srt_file = "{}/somatic_reads.{}.SNVs.srt.bam".format(out, luad)
normal_srt_file = "{}/normal_reads.{}.SNVs.srt.bam".format(out, luad)
bam = pysam.AlignmentFile(pre_file, "rb")
somatic = pysam.AlignmentFile(somatic_file, 'wb', template = bam)
normal = pysam.AlignmentFile(normal_file, 'wb', template = bam)
vcf = pysam.VariantFile(vcf_file,'r')
postions_in_plasma = 0
overall_postions = 0
somatic_reads = 0
normal_reads = 0
for rec in vcf.fetch():
# print("{} {} {} {}".format(rec.chrom, rec.start, rec.ref, rec.alts[0]))
for pile in bam.pileup(str(rec.chrom), rec.start, rec.stop, truncate=True):
ref_reads = []
ref_reads_names = []
alt_reads = []
alt_reads_names = []
exclude_reads = []
for r in pile.pileups:
if r.query_position == None: continue
# if len(rec.ref) < len(rec.alts[0]):
nuc = r.alignment.query_sequence[r.query_position]
if nuc == rec.ref:
ref_reads.append(r)
ref_reads_names.append(r.alignment.query_name)
elif nuc == rec.alts[0]:
alt_reads.append(r)
alt_reads_names.append(r.alignment.query_name)
for name in ref_reads_names:
if name in alt_reads_names:
exclude_reads.append(name)
logger.info("read {} excluded".format(name))
logger.info("{} {} {} {}".format(rec.chrom, rec.start, rec.ref, rec.alts[0]))
# print("read {} excluded".format(name))
if len(alt_reads) > 0:
postions_in_plasma += 1
for alt in alt_reads:
if alt.alignment.query_name in exclude_reads: continue
somatic.write(alt.alignment)
try:
somatic.write(bam.mate(alt.alignment))
except ValueError:
pass
if rand == 'True':
try:
ref_to_write = random.sample(ref_reads, len(alt_reads))
except:
ref_to_write = ref_reads
else:
ref_to_write = ref_reads
for ref in ref_to_write:
if ref.alignment.query_name in exclude_reads: continue
normal.write(ref.alignment)
try:
normal.write(bam.mate(ref.alignment))
except ValueError:
pass
somatic_reads += len(alt_reads)
normal_reads += len(ref_to_write)
overall_postions += 1
# sys.exit()
try:
pos_ratio = postions_in_plasma/overall_postions
except:
pos_ratio = 0
logger.info("# Postions with alt: {}, {}".format(postions_in_plasma, pos_ratio))
logger.info("# Somatic reads: {}".format(somatic_reads))
logger.info("# Normal reads: {}".format(normal_reads))
bam.close()
vcf.close()
somatic.close()
normal.close()
pysam.sort("-o", somatic_srt_file, somatic_file)
pysam.index(somatic_srt_file)
if os.path.exists(somatic_file): os.remove(somatic_file)
pysam.sort("-o", normal_srt_file, normal_file)
pysam.index(normal_srt_file)
if os.path.exists(normal_file): os.remove(normal_file)
| ShomronLab/SomaticSerum | SomaticExtract/src/ExtractSNVs.py | ExtractSNVs.py | py | 3,899 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.argv",
"line_number": ... |
21174566509 | from tkinter import messagebox, Tk, Frame, TOP, X, NW, Canvas, Image, Button
from PIL import ImageTk, Image
from models.Users import checkLoggedUserRole, checkLoggedUserIsBlocked
from classes.Navbar import NavbarWidget
from views.Manage import ManageView
from views.Recipes.RecipesCategory import RecipesCategoryView
def HomeView(user, isLogged, isRegister):
Window = Tk()
Window.title("CraftingCook")
Window.geometry("1280x650")
Window.iconbitmap("assets/CraftingCook.ico")
Window.resizable(0, 0)
navbar = NavbarWidget(
Window,
"assets/images/Home/notification.png", # notificationIcon
"assets/images/Home/exit_app.png", # exitAppIcon
user
)
navbar.create_widget()
canvasHome = Canvas(Window, height=650, width=1280)
canvasHome.place(x=-2, y=100)
image = Image.open("assets/images/Home/home_image_V2.png")
image = image.resize((1280, 650))
image = ImageTk.PhotoImage(image)
canvasHome.create_image(0, 0, anchor=NW, image=image)
# create text on canvas
canvasHome.create_text(640, 80, text="CraftingCook",
font=("Arial", 40, "bold"), fill="#E5B714")
canvasHome.create_text(640, 125, text="Your Kitchen best friend", font=(
"Arial", 20, "normal"), fill="#ffffff")
# if is a new user
if (isRegister == True):
messagebox.showinfo("Welcome to CraftingCook",
"Some tips to get started:\n\n"
"- You can access your profile throuth the avatar icon on the navbar\n\n"
"- You can access your notifications throuth 🔔 icon on the navbar\n\n"
"Enjoy CraftingCook! 🍳"
)
# rendering the menu view conditionally
if ((isLogged == True and isRegister == False) or (isLogged == True and isRegister == True)):
# if is admin
if (checkLoggedUserRole(user["email"]) == "admin"):
manageIcon = Image.open("assets/images/Home/manage_icon.png")
manageIcon = manageIcon.resize((80, 80))
manageIcon = ImageTk.PhotoImage(manageIcon)
buttonManage = Button(canvasHome, text="Manage", font=("Arial", 10, "bold"), bg="#B5960E", fg="white",
cursor="hand2", width=100, height=120, activebackground="#D1A711", activeforeground="#ffffff", bd=0, image=manageIcon, compound=TOP, padx=15, pady=15)
buttonManage.place(x=160, y=250)
settingsIcon = Image.open("assets/images/Home/settings_icon.png")
settingsIcon = settingsIcon.resize((80, 80))
settingsIcon = ImageTk.PhotoImage(settingsIcon)
buttonSettings = Button(canvasHome, text="Settings", font=("Arial", 10, "bold"), bg="#B5960E", fg="white",
cursor="hand2", width=100, height=120, activebackground="#D1A711", activeforeground="#ffffff", bd=0, image=settingsIcon, compound=TOP, padx=15, pady=15)
buttonSettings.place(x=360, y=250)
recipesIcon = Image.open("assets/images/Home/recipes_icon.png")
recipesIcon = recipesIcon.resize((80, 80))
recipesIcon = ImageTk.PhotoImage(recipesIcon)
buttonRecipes = Button(canvasHome, text="Recipes", font=("Arial", 10, "bold"), bg="#B5960E", fg="white",
cursor="hand2", width=100, height=120, activebackground="#D1A711", activeforeground="#ffffff", bd=0, image=recipesIcon, compound=TOP, padx=15, pady=15)
buttonRecipes.place(x=560, y=250)
dashboardIcon = Image.open("assets/images/Home/dashboard_icon.png")
dashboardIcon = dashboardIcon.resize((80, 80))
dashboardIcon = ImageTk.PhotoImage(dashboardIcon)
buttonDashboard = Button(canvasHome, text="Dashboard", font=("Arial", 10, "bold"), bg="#B5960E", fg="white",
cursor="hand2", width=100, height=120, activebackground="#D1A711", activeforeground="#ffffff", bd=0, image=dashboardIcon, compound=TOP, padx=15, pady=15)
buttonDashboard.place(x=760, y=250)
favoritesIcon = Image.open("assets/images/Home/favorites_icon.png")
favoritesIcon = favoritesIcon.resize((80, 80))
favoritesIcon = ImageTk.PhotoImage(favoritesIcon)
buttonFavorites = Button(canvasHome, text="Favorites", font=("Arial", 10, "bold"), bg="#B5960E", fg="white",
cursor="hand2", width=100, height=120, activebackground="#D1A711", activeforeground="#ffffff", bd=0, image=favoritesIcon, compound=TOP, padx=15, pady=15)
buttonFavorites.place(x=960, y=250)
buttonManage.bind(
"<Button-1>", lambda event: ManageView(user, Window))
# if is regular
elif (checkLoggedUserRole(user["email"]) == "regular"):
favoritesIcon = Image.open("assets/images/Home/favorites_icon.png")
favoritesIcon = favoritesIcon.resize((100, 100))
favoritesIcon = ImageTk.PhotoImage(favoritesIcon)
buttonFavorites = Button(canvasHome, text="Favorites", font=("Arial", 13, "bold"), bg="#B5960E", fg="white",
cursor="hand2", width=100, height=120, activebackground="#D1A711", activeforeground="#ffffff", bd=0, image=favoritesIcon, compound=TOP, padx=25, pady=25)
buttonFavorites.place(x=250, y=250)
recipesIcon = Image.open("assets/images/Home/recipes_icon.png")
recipesIcon = recipesIcon.resize((100, 100))
recipesIcon = ImageTk.PhotoImage(recipesIcon)
buttonRecipes = Button(canvasHome, text="Recipes", font=("Arial", 13, "bold"), bg="#B5960E", fg="white",
cursor="hand2", width=100, height=120, activebackground="#D1A711", activeforeground="#ffffff", bd=0, image=recipesIcon, compound=TOP, padx=25, pady=25)
buttonRecipes.place(x=560, y=250)
dashboardIcon = Image.open("assets/images/Home/dashboard_icon.png")
dashboardIcon = dashboardIcon.resize((100, 100))
dashboardIcon = ImageTk.PhotoImage(dashboardIcon)
buttonDashboard = Button(canvasHome, text="Dashboard", font=("Arial", 13, "bold"), bg="#B5960E", fg="white",
cursor="hand2", width=100, height=120, activebackground="#D1A711", activeforeground="#ffffff", bd=0, image=dashboardIcon, compound=TOP, padx=25, pady=25)
buttonDashboard.place(x=860, y=250)
buttonRecipes.bind(
"<Button-1>", lambda event: RecipesCategoryView(user, Window))
Window.mainloop()
| pedromst2000/Tkinter-Recipes-manage-App | views/Home.py | Home.py | py | 6,757 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.Tk",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "classes.Navbar.NavbarWidget",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "tkinter.Canvas",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open"... |
17702010446 | from django.urls import path
# -- Importar todas las vistas de mi_tienda
from . import views
# -- Aquí se definen las URLs de nuestra tienda
# -- Metemos de momento sólo la principal (índice)
urlpatterns = [
# -- Vista pricipal (índice)
path('', views.index, name='index'),
path('panaderia.html/', views.panaderia, name='panaderia'),
path('bolleria.html/', views.bolleria, name='bolleria'),
path('pasteleria.html/', views.pasteleria, name='pasteleria'),
path('list/', views.list, name='list'),
path('formulario/', views.formulario, name='formulario'),
path('recepcion/', views.recepcion, name='reception'),
path('pedidos/', views.pedidos, name='pedidos'),
]
| martaquintana/2019-2020-LTAW-Practicas | P2/mi_tienda_web/mi_panaderia/urls.py | urls.py | py | 701 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "django.urls.path",... |
33093921448 | import solution
from collections import defaultdict
from sortedcontainers import SortedList
class Solution(solution.Solution):
def solve(self, test_input=None):
ops, vals = test_input
n, entries = vals[0]
# Your MovieRentingSystem object will be instantiated and called as such:
obj = MovieRentingSystem(n, entries)
ans = [None]
for i in range(1, len(ops)):
if ops[i] == "search":
ans.append(obj.search(vals[i][0]))
elif ops[i] == "rent":
obj.rent(vals[i][0], vals[i][1])
ans.append(None)
elif ops[i] == "drop":
obj.drop(vals[i][0], vals[i][1])
ans.append(None)
else:
ans.append(obj.report())
return ans
class MovieRentingSystem(object):
def __init__(self, n, entries):
"""
:type n: int
:type entries: List[List[int]]
"""
self.movies = defaultdict(SortedList)
self.shops = defaultdict(dict)
self.renting = SortedList([])
for shop, movie, price in entries:
self.movies[movie].add((price, shop))
self.shops[shop][movie] = price
def search(self, movie):
"""
:type movie: int
:rtype: List[int]
"""
return [i[1] for i in list(self.movies[movie].islice(stop=5))]
def rent(self, shop, movie):
"""
:type shop: int
:type movie: int
:rtype: None
"""
price = self.shops[shop][movie]
self.movies[movie].discard((price, shop))
self.renting.add((price, shop, movie))
def drop(self, shop, movie):
"""
:type shop: int
:type movie: int
:rtype: None
"""
price = self.shops[shop][movie]
self.movies[movie].add((price, shop))
self.renting.discard((price, shop, movie))
def report(self):
"""
:rtype: List[List[int]]
"""
return [[x,y] for _,x,y in self.renting.islice(stop=5)]
| QuBenhao/LeetCode | problems/1912/solution.py | solution.py | py | 2,076 | python | en | code | 8 | github-code | 1 | [
{
"api_name": "solution.Solution",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "sortedcontainers.SortedList",
"line_number": 34,
"usage_type": "argument"
},
{
"api... |
40926531737 | import argparse
import subprocess
import os
from qsub import q_sub
import shutil
def main():
# arguments
parser = argparse.ArgumentParser()
parser.add_argument('-block_info', help='CSV of homeoblock coords', required=True)
parser.add_argument('-key', help='CSV of chromosome names and corresponding refseq IDs', required=True)
parser.add_argument('-sal_ref', help='path to salmon reference', required=True)
parser.add_argument('-sal_query', help='path to salmon query', required=True)
parser.add_argument('-pike_query', help='path to pike query', required=True)
parser.add_argument('-out', help='Output directory', required=True)
args = parser.parse_args()
keys = {x.split(',')[1].rstrip(): x.split(',')[0] for x in open(args.key)}
ref_salmon = args.sal_ref
salmon_query = args.sal_query
pike = args.pike_query
for line in open(args.block_info).readlines()[1:]:
if line.startswith('Block'):
continue
# block info
no, block, ssa, start, end, ssa_q, start_q, end_q, strand = line.split(',')
# convert to ref_seq
chromo = keys[ssa]
chromo_q = keys[ssa_q]
out_dir = args.out + block + '/'
if not os.path.isdir(out_dir):
os.makedirs(out_dir)
# make chromosomal fastas for both salmon - homeo block specific
ref_chromo_fasta = out_dir + 'salmon.fa'
cmd = 'samtools faidx {} {}:{}-{} > {}'.format(ref_salmon, 'salmon.' + chromo,
start, end, ref_chromo_fasta)
subprocess.call(cmd, shell=True)
query_chromo_fasta = out_dir + 'salmon_b.fa'
cmd = 'samtools faidx {} {}:{}-{} > {}'.format(salmon_query, 'salmon_b.' + chromo_q,
start_q, end_q, query_chromo_fasta)
subprocess.call(cmd, shell=True)
pike_query = out_dir + 'pike.fa'
shutil.copy(pike, pike_query)
# sub align job
lastz_job1 = ('~/sal_enhancers/homeoblock_alignments/wholegenome_lastz_chain_net.py '
'-ref_name salmon -ref_fa {} '
'-query_name salmon_b -query_fa {} '
'-out {}').format(ref_chromo_fasta, query_chromo_fasta, out_dir)
lastz_job2 = ('~/sal_enhancers/homeoblock_alignments/wholegenome_lastz_chain_net.py '
'-ref_name salmon -ref_fa {} '
'-query_name pike -query_fa {} '
'-out {}').format(ref_chromo_fasta, pike_query, out_dir)
q_sub([lastz_job1, lastz_job2], out=out_dir + block, scheduler='SLURM')
if __name__ == '__main__':
main()
| henryjuho/sal_enhancers | homeoblock_alignments/do_all_pairwise.py | do_all_pairwise.py | py | 2,708 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 40,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
... |
6585765676 | from UM.Job import Job
from UM.Logger import Logger
from .avr_isp import ispBase
from .avr_isp.stk500v2 import Stk500v2
from time import time, sleep
from serial import Serial, SerialException
# An async job that attempts to find the correct baud rate for a USB printer.
# It tries a pre-set list of baud rates. All these baud rates are validated by requesting the temperature a few times
# and checking if the results make sense. If getResult() is not None, it was able to find a correct baud rate.
class AutoDetectBaudJob(Job):
def __init__(self, serial_port: int) -> None:
super().__init__()
self._serial_port = serial_port
self._all_baud_rates = [115200, 250000, 500000, 230400, 76800, 57600, 38400, 19200, 9600]
def run(self) -> None:
Logger.debug(f"Auto detect baud rate started for {self._serial_port}")
wait_response_timeouts = [3, 15, 30]
wait_bootloader_times = [1.5, 5, 15]
write_timeout = 3
read_timeout = 3
tries = 2
serial = None
for retry in range(tries):
for baud_rate in self._all_baud_rates:
if retry < len(wait_response_timeouts):
wait_response_timeout = wait_response_timeouts[retry]
else:
wait_response_timeout = wait_response_timeouts[-1]
if retry < len(wait_bootloader_times):
wait_bootloader = wait_bootloader_times[retry]
else:
wait_bootloader = wait_bootloader_times[-1]
Logger.debug(f"Checking {self._serial_port} if baud rate {baud_rate} works. Retry nr: {retry}. Wait timeout: {wait_response_timeout}")
if serial is None:
try:
serial = Serial(str(self._serial_port), baud_rate, timeout = read_timeout, writeTimeout = write_timeout)
except SerialException:
Logger.warning(f"Unable to create serial connection to {serial} with baud rate {baud_rate}")
continue
else:
# We already have a serial connection, just change the baud rate.
try:
serial.baudrate = baud_rate
except ValueError:
continue
# Ensure that we are not talking to the boot loader. 1.5 seconds seems to be the magic number
sleep(wait_bootloader)
serial.write(b"\n") # Ensure we clear out previous responses
serial.write(b"M105\n")
start_timeout_time = time()
timeout_time = time() + wait_response_timeout
while timeout_time > time():
# If baudrate is wrong, then readline() might never
# return, even with timeouts set. Using read_until
# with size limit seems to fix this.
line = serial.read_until(size = 100)
if b"ok" in line and b"T:" in line:
self.setResult(baud_rate)
Logger.log("d", "Detected baud rate {baud_rate} on serial {serial} on retry {retry} with after {time_elapsed:0.2f} seconds.".format(
serial = self._serial_port, baud_rate = baud_rate, retry = retry, time_elapsed = time() - start_timeout_time))
serial.close() # close serial port so it can be opened by the USBPrinterOutputDevice
return
serial.write(b"M105\n")
sleep(15) # Give the printer some time to init and try again.
Logger.debug(f"Unable to find a working baudrate for {serial}")
self.setResult(None) # Unable to detect the correct baudrate.
| Ultimaker/Cura | plugins/USBPrinting/AutoDetectBaudJob.py | AutoDetectBaudJob.py | py | 3,836 | python | en | code | 5,387 | github-code | 1 | [
{
"api_name": "UM.Job.Job",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "UM.Logger.Logger.debug",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "UM.Logger.Logger",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "UM.Logger.Logger.d... |
37978950788 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Written by H.Turbé, March 2020.
"""
import argparse
import os
import shutil
import sys
import time
from pyspark import SparkFiles
from pyspark.sql import SparkSession
CWD = os.getcwd()
FILEPATH = os.path.dirname(os.path.realpath(__file__))
ROOTPATH = os.path.dirname(FILEPATH)
sys.path.append(os.path.join(ROOTPATH))
from generate_synthetic_datasets.preprocess_synthetic import PreprocessSynthetic as Preprocess
from shared_utils.utils_data import parse_config
from shared_utils.utils_path import config_path, data_path
def main():
# parse command-line
parser = argparse.ArgumentParser(description="Process files.")
parser.add_argument(
"--config_file",
default=f"{config_path}/config_generate_synthetic.yaml",
help="Name of the data catalog to be used",
)
# parse arguments
args = parser.parse_args()
data_config = parse_config(args.config_file)
save_path = os.path.join(data_path,data_config["save_name"])
if not os.path.exists(save_path):
os.makedirs(save_path)
# create spark session
print("Creating Spark Session ...")
session_builder = (
SparkSession.builder.appName("Dataset Creation")
.config("spark.dynamicAllocation.enabled", "true")
.config("spark.driver.memory", "10g")
)
session_builder.master("local[*]")
# Set spark environments
python_interpeter = sys.executable
os.environ["PYSPARK_PYTHON"] = python_interpeter
os.environ["PYSPARK_DRIVER_PYTHON"] = python_interpeter
spark = session_builder.getOrCreate()
sc = spark.sparkContext
sc.addFile(
os.path.join(ROOTPATH, "generate_synthetic_datasets"), recursive = True
)
sc.addFile(os.path.join(ROOTPATH, "shared_types"), recursive = True)
sys.path.insert(0, SparkFiles.getRootDirectory())
# copy config file into simulation folder
shutil.copyfile(
args.config_file, os.path.join(save_path, os.path.split(args.config_file)[-1])
)
#Generate data
dict_pre = Preprocess(
save_path=save_path,
data_config=data_config,
)
print("Formatting to Parquet")
s = time.time()
dict_pre.format_to_parquet(spark)
print("done Elapsed time ", time.time() - s, "s.")
list_split = data_config.get("dataset_split")
if list_split is None:
print("Split not provided. Default split applied [0.8,0.2]")
list_split = [0.7,0.15,0.15]
print("Creating split for model training")
s = time.time()
dict_pre.create_split(list_split)
print("done. Elapsed time ", time.time() - s, "s.")
if __name__ == "__main__":
main()
| hturbe/InterpretTime | src/operations/__exec_generate_data.py | __exec_generate_data.py | py | 2,775 | python | en | code | 23 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.path.realpath",
"line_n... |
70576316194 | import numpy as np
import easysim
import os
import torch
class YCB:
CLASSES = {
1: "002_master_chef_can",
2: "003_cracker_box",
3: "004_sugar_box",
4: "005_tomato_soup_can",
5: "006_mustard_bottle",
6: "007_tuna_fish_can",
7: "008_pudding_box",
8: "009_gelatin_box",
9: "010_potted_meat_can",
10: "011_banana",
11: "019_pitcher_base",
12: "021_bleach_cleanser",
13: "024_bowl",
14: "025_mug",
15: "035_power_drill",
16: "036_wood_block",
17: "037_scissors",
18: "040_large_marker",
20: "052_extra_large_clamp",
21: "061_foam_brick",
}
def __init__(self, cfg, scene, dex_ycb):
self._cfg = cfg
self._scene = scene
self._dex_ycb = dex_ycb
self._bodies = {}
self._cur_scene_id = None
@property
def bodies(self):
return self._bodies
def reset(self, scene_id):
if scene_id != self._cur_scene_id:
for i in [*self.bodies][::-1]:
self._scene.remove_body(self.bodies[i])
del self.bodies[i]
scene_data = self._dex_ycb.get_scene_data(scene_id)
ycb_ids = scene_data["ycb_ids"]
ycb_grasp_ind = scene_data["ycb_grasp_ind"]
pose = scene_data["pose_y"]
if scene_data["ycb_grasp_ind"] != 0:
ycb_ids = (
[ycb_ids[ycb_grasp_ind]]
+ ycb_ids[:ycb_grasp_ind]
+ ycb_ids[ycb_grasp_ind + 1 :]
)
pose = pose[
:, np.r_[ycb_grasp_ind, :ycb_grasp_ind, ycb_grasp_ind + 1 : pose.shape[1]]
]
if self._cfg.ENV.YCB_LOAD_MODE == "all":
self._ids = ycb_ids
if self._cfg.ENV.YCB_LOAD_MODE == "grasp_only":
self._ids = ycb_ids[:1]
self._pose = pose.copy()
self._pose[:, :, 2] += self._cfg.ENV.TABLE_HEIGHT
self._num_frames = len(self._pose)
self._cur_scene_id = scene_id
if self._cfg.ENV.YCB_MANO_START_FRAME == "first":
self._frame = 0
if self._cfg.ENV.YCB_MANO_START_FRAME == "last":
self._frame = self._num_frames - 1
if self._cfg.ENV.YCB_MANO_START_FRAME == "one_and_half_second":
self._frame = int(np.round(1.5 / self._cfg.SIM.TIME_STEP))
self._released = False
if self.bodies == {}:
for i in self.ids:
body = easysim.Body()
body.name = "ycb_{:02d}".format(i)
body.geometry_type = easysim.GeometryType.URDF
body.urdf_file = os.path.join(
os.path.dirname(__file__),
"data",
"assets",
self.CLASSES[i],
"model_normalized.urdf",
)
body.use_fixed_base = True
body.initial_base_position = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 1.0)
body.initial_dof_position = self._pose[self._frame, self.ids.index(i)]
body.initial_dof_velocity = (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
body.link_collision_filter = [
[self._cfg.ENV.COLLISION_FILTER_YCB[[*self.CLASSES].index(i)]] * 7
]
body.dof_control_mode = easysim.DoFControlMode.POSITION_CONTROL
body.dof_max_force = [
self._cfg.ENV.YCB_TRANSLATION_MAX_FORCE + self._cfg.ENV.YCB_ROTATION_MAX_FORCE
]
body.dof_position_gain = (
self._cfg.ENV.YCB_TRANSLATION_POSITION_GAIN
+ self._cfg.ENV.YCB_ROTATION_POSITION_GAIN
)
body.dof_velocity_gain = (
self._cfg.ENV.YCB_TRANSLATION_VELOCITY_GAIN
+ self._cfg.ENV.YCB_ROTATION_VELOCITY_GAIN
)
self._scene.add_body(body)
self.bodies[i] = body
else:
self.bodies[self.ids[0]].update_attr_array(
"link_collision_filter",
torch.tensor([0]),
[self._cfg.ENV.COLLISION_FILTER_YCB[[*self.CLASSES].index(self.ids[0])]] * 7,
)
self.bodies[self.ids[0]].update_attr_array(
"dof_max_force",
torch.tensor([0]),
self._cfg.ENV.YCB_TRANSLATION_MAX_FORCE + self._cfg.ENV.YCB_ROTATION_MAX_FORCE,
)
@property
def ids(self):
return self._ids
@property
def pose(self):
return self._pose
@property
def released(self):
return self._released
def step(self):
self._frame += 1
self._frame = min(self._frame, self._num_frames - 1)
for i in self.ids:
self.bodies[i].dof_target_position = self._pose[self._frame, self.ids.index(i)]
def release(self):
self.bodies[self.ids[0]].update_attr_array(
"link_collision_filter",
torch.tensor([0]),
[self._cfg.ENV.COLLISION_FILTER_YCB_RELEASE] * 7,
)
self.bodies[self.ids[0]].update_attr_array(
"dof_max_force", torch.tensor([0]), (0.0, 0.0, 0.0, 0.0, 0.0, 0.0)
)
self._released = True
| NVlabs/handover-sim | handover/ycb.py | ycb.py | py | 5,389 | python | en | code | 66 | github-code | 1 | [
{
"api_name": "numpy.r_",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "numpy.round",
"line_number": 80,
"usage_type": "call"
},
{
"api_name": "easysim.Body",
"line_number": 86,
"usage_type": "call"
},
{
"api_name": "easysim.GeometryType",
"li... |
34552883298 | import gradio as gr
from spellchecker import SpellChecker
from itertools import permutations
def Anagram(words):
"""
A function that returns an anagram after filtering out the incorrect words.
It also returns the given word passed to the function whether anagaram is
generated or not.
"""
randomize = randomizer(words)
checker = spellChecker(words)
# Because set returns only unique words. We convert the 'randomize' variable
# to set so we can filter out the incorrect spellings from 'checker'
anagram = set(randomize) - checker
# we are now left with the correct word(s) as anagram
return anagram
def spellChecker(word):
"""
A function that scans a given word returned from the randomizer function for
incorrect spellings and returns the wrong spellings.
"""
checker = SpellChecker()
randomize = randomizer(word)
incorrect = checker.unknown(randomize)
return incorrect
def randomizer(word):
"""
A function to generate a combination of words from a given word passed to the function.
It returns all the combination of words generated.
"""
lists = []
texts = permutations(word)
#Permutation separated each word. Join them back together.
for text in texts:
lists.append(''.join(text))
return lists
app = gr.Interface(fn=Anagram, inputs='text', outputs='text')
#launching the app
app.launch()
| Jonaben1/Anagram | main.py | main.py | py | 1,427 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "spellchecker.SpellChecker",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "itertools.permutations",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "gradio.Interface",
"line_number": 47,
"usage_type": "call"
}
] |
71386683875 | import redis
import requests
from bs4 import BeautifulSoup
from celery import shared_task
from products.models import Product
from core.telegram_notification import telegram_notify
MARKETPLACE_URL = 'https://ozon.ru/seller/1/products'
redis_client = redis.StrictRedis(
host='redis',
port=6379,
db=0
)
def parse_page(page: str) -> str:
"""Функция для парсинга html страницы по ссылке.
Для обхода защиты Cloudflare используется прокси сервер
из docker-контейнера, представляющий собой headless браузер
на базе Chromium с undetectable драйвером.
:param page: URL сайта.
:type page: str
:return: HTML текст для дальнейшего парсинга.
:rtype: str
:raise requests.exceptions.ConnectionError
Если прокси сервер отдает код, отличный от 200.
"""
post_body = {
"cmd": "request.get",
"url": page,
"maxTimeout": 60000
}
response = requests.post(
# POST запрос на прокси сервер
'http://flaresolverr:8191/v1',
headers={'Content-Type': 'application/json'},
json=post_body
)
if response.status_code == 200:
return response.json()["solution"]["response"]
else:
raise requests.exceptions.ConnectionError(
f"Прокси сервер выдал {response.status_code}")
def scrape_page_products(html_content: str, limit_number: int) -> list[dict]:
"""Функция парсит содержимое карточек товаров на странице и
возвращает массив со словарями для дальнейшего превращения
его в Django QuerySet и добавление в БД.
:param html_content: HTML Содержимое страницы.
:type html_content: str
:param limit_number: Условие для выхода из цикла, если товаров
в возврате функции нужно меньше, чем имеется на странице.
:type limit_number: int
:return: Массив словарей с товарами и их характеристиками.
:rtype: list[dict]
"""
soup = BeautifulSoup(html_content, 'html.parser')
elements_grid = soup.find('div',
class_='widget-search-result-container'
).find('div')
target_elements = elements_grid.find_all('div', recursive=False)
result = []
for element in target_elements:
product_url = element.find(
'a', class_='tile-hover-target')['href'].split('/')[2]
product_id = int(product_url.split('-')[-1])
name = element.find('span', class_='tsBody500Medium').text
price = element.find(
'span', class_='tsHeadline500Medium').text
formatted_price = int(price.replace(" ", "").replace("₽", "").strip())
discount = element.find_all(
'span', class_='tsBodyControl400Small')[1].text
try:
rating, comments = element.find(
'div', class_='tsBodyMBold').text.split(' ')
except AttributeError:
rating, comments = str(0), str(0)
image_url = element.find(
'div').find('img')['srcset'].split(' ')[0]
result.append({
'product_id': product_id,
'name': name,
'price': formatted_price,
'rating': rating,
'comments': comments,
'discount': discount,
'image_url': image_url
})
if len(result) == limit_number:
return result
return result
@shared_task()
def parse_and_add_to_db(number: int) -> None:
"""Функция, отвечающая за парсинг данных и добавление данных в
БД.
:param number: Ограничитель количества товаров из POST-запроса.
По-умолчанию равен 10.
:type number: int
"""
products_result = []
page_num = 1
current_n = number
while len(products_result) < number:
page_content = parse_page(MARKETPLACE_URL + f'?page={page_num}')
products_result.extend(scrape_page_products(page_content, current_n))
page_num += 1
current_n -= len(products_result)
# Отправка в redis результатов парсинга
redis_client.set('parsing results', str(products_result))
# Уведомление в телеграм
telegram_notify(len(products_result))
for item in products_result:
product_id = item['product_id']
try:
product = Product.objects.get(product_id=product_id)
for key, value in item.items():
setattr(product, key, value)
product.save()
except Product.DoesNotExist:
product = Product(**item)
product.save()
def proxy_accessible_check() -> bool:
"""Функция для проверки доступности прокси сервера.
:returns: Возвращает False, если прокси сервер
недоступен
"""
try:
request = requests.get('http://flaresolverr:8191')
return True if request.status_code == 200 else False
except requests.exceptions.ConnectionError:
return False
| mxstrv/mxstrv-test-o-parser | ozon_parser/core/tasks.py | tasks.py | py | 5,591 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "redis.StrictRedis",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "requests.exceptions.ConnectionError",
"line_number": 52,
"usage_type": "call"
},
{
"api_name": "r... |
2316243589 | from __future__ import division, print_function, absolute_import
import numpy
from scipy.linalg.lapack import get_lapack_funcs
from scipy.linalg.misc import LinAlgError
__all__ = ['qrp', 'qlp', 'tri_inv']
# Duplicate from scipy.linalg.misc
def _datacopied(arr, original):
"""
Strict check for `arr` not sharing any data with `original`,
under the assumption that arr = asarray(original)
"""
if arr is original:
return False
if not isinstance(original, numpy.ndarray) and hasattr(original, '__array__'):
return False
return arr.base is None
def tri_inv(c, lower=False, unit_diagonal=False, overwrite_c=False,
check_finite=True):
"""
Compute the inverse of a triangular matrix.
Parameters
----------
c : array_like
A triangular matrix to be inverted
lower : bool, optional
Use only data contained in the lower triangle of `c`.
Default is to use upper triangle.
unit_diagonal : bool, optional
If True, diagonal elements of `c` are assumed to be 1 and
will not be referenced.
overwrite_c : bool, optional
Allow overwriting data in `c` (may improve performance).
check_finite : bool, optional
Whether to check that the input matrix contains only finite numbers.
Disabling may give a performance gain, but may result in problems
(crashes, non-termination) if the inputs do contain infinities or NaNs.
Returns
-------
inv_c : ndarray
Inverse of the matrix `c`.
Raises
------
LinAlgError
If `c` is singular
ValueError
If `c` is not square, or not 2-dimensional.
Examples
--------
>>> c = numpy.array([(1., 2.), (0., 4.)])
>>> tri_inv(c)
array([[ 1. , -0.5 ],
[ 0. , 0.25]])
>>> numpy.dot(c, tri_inv(c))
array([[ 1., 0.],
[ 0., 1.]])
"""
if check_finite:
c1 = numpy.asarray_chkfinite(c)
else:
c1 = numpy.asarray(c)
if len(c1.shape) != 2 or c1.shape[0] != c1.shape[1]:
raise ValueError('expected square matrix')
overwrite_c = overwrite_c or _datacopied(c1, c)
trtri, = get_lapack_funcs(('trtri',), (c1,))
inv_c, info = trtri(c1, overwrite_c=overwrite_c, lower=lower,
unitdiag=unit_diagonal)
if info > 0:
raise LinAlgError("singular matrix")
if info < 0:
raise ValueError("illegal value in %d-th argument of internal trtri" %
-info)
return inv_c
def qrp(a, mode='reduced'):
"""
Compute the qr factorization of a matrix.
Factor the matrix `a` as *qr*, where `q` is orthonormal and `r` is
upper-triangular. The diagonal entries of `r` are nonnegative.
For documentation see numpy.linalg.qr
"""
q, r = numpy.linalg.qr(a, mode)
mask = numpy.diag(r) < 0.
q[:, mask] *= -1.
r[mask, :] *= -1.
return q, r
def qlp(a, mode='reduced'):
"""
Compute the ql factorization of a matrix.
Factor the matrix `a` as *ql*, where `q` is orthonormal and `l` is
lower-triangular. The diagonal entries of `l` are nonnegative.
For documentation see numpy.linalg.qr
"""
q, r = qrp(numpy.flip(a), mode)
return numpy.flip(q), numpy.flip(r)
| Sabrina85/odemis | src/odemis/util/linalg.py | linalg.py | py | 3,299 | python | en | code | null | github-code | 1 | [
{
"api_name": "numpy.ndarray",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.asarray_chkfinite",
"line_number": 70,
"usage_type": "call"
},
{
"api_name": "numpy.asarray",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "scipy.linalg... |
19792334224 | import torch
import torch.nn as nn
import torch.nn.functional as F
from easydict import EasyDict as edict
import math
import torch.utils.model_zoo as model_zoo
__all__ = ['ResNet', 'resnet18', 'resnet34', 'resnet50', 'resnet101',
'resnet152']
model_urls = {
'resnet18': 'https://download.pytorch.org/models/resnet18-5c106cde.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-333f7ec4.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-19c8e357.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-5d3b4d8f.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-b121ed2d.pth',
}
def conv3x3(in_planes, out_planes, stride=1):
"""3x3 convolution with padding"""
return nn.Conv2d(in_planes, out_planes, kernel_size=3, stride=stride,
padding=1, bias=False)
class BasicBlock(nn.Module):
expansion = 1
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(BasicBlock, self).__init__()
self.conv1 = conv3x3(inplanes, planes, stride)
self.bn1 = nn.BatchNorm2d(planes)
self.relu = nn.ReLU(inplace=True)
self.conv2 = conv3x3(planes, planes)
self.bn2 = nn.BatchNorm2d(planes)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class Bottleneck(nn.Module):
expansion = 4
def __init__(self, inplanes, planes, stride=1, downsample=None):
super(Bottleneck, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=1, bias=False)
self.bn1 = nn.BatchNorm2d(planes)
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride,
padding=1, bias=False)
self.bn2 = nn.BatchNorm2d(planes)
self.conv3 = nn.Conv2d(planes, planes * 4, kernel_size=1, bias=False)
self.bn3 = nn.BatchNorm2d(planes * 4)
self.relu = nn.ReLU(inplace=True)
self.downsample = downsample
self.stride = stride
def forward(self, x):
residual = x
out = self.conv1(x)
out = self.bn1(out)
out = self.relu(out)
out = self.conv2(out)
out = self.bn2(out)
out = self.relu(out)
out = self.conv3(out)
out = self.bn3(out)
if self.downsample is not None:
residual = self.downsample(x)
out += residual
out = self.relu(out)
return out
class ResNet(nn.Module):
def __init__(self, block, layers, num_classes=1000):
self.inplanes = 64
super(ResNet, self).__init__()
self.conv1 = nn.Conv2d(3, 64, kernel_size=7, stride=2, padding=3,
bias=False)
self.bn1 = nn.BatchNorm2d(64)
self.relu = nn.ReLU(inplace=True)
self.maxpool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
self.layer1 = self._make_layer(block, 64, layers[0])
self.layer2 = self._make_layer(block, 128, layers[1], stride=2)
self.layer3 = self._make_layer(block, 256, layers[2], stride=2)
self.layer4 = self._make_layer(block, 512, layers[3], stride=2)
self.avgpool = nn.AvgPool2d(7)
self.fc = nn.Linear(512 * block.expansion, num_classes)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
def _make_layer(self, block, planes, blocks, stride=1):
downsample = None
if stride != 1 or self.inplanes != planes * block.expansion:
downsample = nn.Sequential(
nn.Conv2d(self.inplanes, planes * block.expansion,
kernel_size=1, stride=stride, bias=False),
nn.BatchNorm2d(planes * block.expansion),
)
layers = []
layers.append(block(self.inplanes, planes, stride, downsample))
self.inplanes = planes * block.expansion
for i in range(1, blocks):
layers.append(block(self.inplanes, planes))
return nn.Sequential(*layers)
def forward(self, x):
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
x = self.maxpool(x)
x = self.layer1(x)
x = self.layer2(x)
x = self.layer3(x)
x = self.layer4(x)
x = self.avgpool(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
def replace_first_conv(res_model, input_ch):
"""
Adapts the model whose number of input channel is other than 3.
If the number of input channel is 1 or 4, this function does nothing.
If the number of input channel is 1 or 4, weights are initialized with pretrained weight.
Otherwise, weights are initialized with random variables.
:param res_model:
:param input_ch:
:return: model
"""
if input_ch == 3:
return res_model
conv1 = nn.Conv2d(input_ch, 64, kernel_size=7, stride=2, padding=3, bias=False)
R_IDX = 0
if input_ch == 1:
conv1.weight.data = res_model.conv1.weight.data[:, R_IDX:R_IDX + 1, :, :]
elif input_ch == 4:
conv1.weight.data[:, :3, :, :] = res_model.conv1.weight.data
conv1.weight.data[:, 3:4, :, :] = res_model.conv1.weight.data[:, 0:1, :, :]
res_model.conv1 = conv1
return res_model
def resnet18(pretrained=False, input_ch=3, **kwargs):
"""Constructs a ResNet-18 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [2, 2, 2, 2], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet18']))
return replace_first_conv(model, input_ch)
def resnet34(pretrained=False, input_ch=3, **kwargs):
"""Constructs a ResNet-34 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(BasicBlock, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet34']))
return replace_first_conv(model, input_ch)
def resnet50(pretrained=False, input_ch=3, no_replace=False, **kwargs):
"""Constructs a ResNet-50 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 6, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet50']))
return replace_first_conv(model, input_ch)
def resnet101(pretrained=False, input_ch=3, **kwargs):
"""Constructs a ResNet-101 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 4, 23, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet101']))
return replace_first_conv(model, input_ch)
def resnet152(pretrained=False, input_ch=3, **kwargs):
"""Constructs a ResNet-152 model.
Args:
pretrained (bool): If True, returns a model pre-trained on ImageNet
"""
model = ResNet(Bottleneck, [3, 8, 36, 3], **kwargs)
if pretrained:
model.load_state_dict(model_zoo.load_url(model_urls['resnet152']))
return replace_first_conv(model, input_ch)
class Upsample(nn.Module):
def __init__(self, inplanes, planes):
super(Upsample, self).__init__()
self.conv1 = nn.Conv2d(inplanes, planes, kernel_size=5, padding=2)
self.bn = nn.BatchNorm2d(planes)
def forward(self, x, size):
x = F.upsample(x, size=size, mode="bilinear")
x = self.conv1(x)
x = self.bn(x)
return x
class Fusion(nn.Module):
def __init__(self, inplanes):
super(Fusion, self).__init__()
self.conv = nn.Conv2d(inplanes, inplanes, kernel_size=1)
self.bn = nn.BatchNorm2d(inplanes)
self.relu = nn.ReLU()
# self.dropout = nn.Dropout(.1)
def forward(self, x1, x2):
out = self.bn(self.conv(x1)) + x2
out = self.relu(out)
return out
class ResBase(nn.Module):
def __init__(self, num_classes, layer='50', input_ch=3):
super(ResBase, self).__init__()
self.num_classes = num_classes
print('resnet' + layer)
if layer == '18':
resnet = resnet18(pretrained=True, input_ch=input_ch)
elif layer == '50':
resnet = resnet50(pretrained=True, input_ch=input_ch)
elif layer == '101':
resnet = resnet101(pretrained=True, input_ch=input_ch)
elif layer == '152':
resnet = resnet152(pretrained=True, input_ch=input_ch)
else:
NotImplementedError
self.conv1 = resnet.conv1
self.bn0 = resnet.bn1
self.relu = resnet.relu
self.maxpool = resnet.maxpool
self.layer1 = resnet.layer1
self.layer2 = resnet.layer2
self.layer3 = resnet.layer3
self.layer4 = resnet.layer4
def forward(self, x):
img_size = x.size()[2:]
x = self.conv1(x)
x = self.bn0(x)
x = self.relu(x)
conv_x = x
x = self.maxpool(x)
pool_x = x
fm1 = self.layer1(x)
fm2 = self.layer2(fm1)
fm3 = self.layer3(fm2)
fm4 = self.layer4(fm3)
out_dic = {
"img_size": img_size,
"conv_x": conv_x,
"pool_x": pool_x,
"fm2": fm2,
"fm3": fm3,
"fm4": fm4
}
return out_dic
class ResClassifier(nn.Module):
def __init__(self, num_classes):
super(ResClassifier, self).__init__()
self.num_classes = num_classes
self.upsample1 = Upsample(2048, 1024)
self.upsample2 = Upsample(1024, 512)
self.upsample3 = Upsample(512, 64)
self.upsample4 = Upsample(64, 64)
self.upsample5 = Upsample(64, 32)
self.fs1 = Fusion(1024)
self.fs2 = Fusion(512)
self.fs3 = Fusion(256)
self.fs4 = Fusion(64)
self.fs5 = Fusion(64)
self.out5 = self._classifier(32)
def _classifier(self, inplanes):
if inplanes == 32:
return nn.Sequential(
nn.Conv2d(inplanes, self.num_classes, 1),
nn.Conv2d(self.num_classes, self.num_classes,
kernel_size=3, padding=1)
)
return nn.Sequential(
nn.Conv2d(inplanes, inplanes / 2, 3, padding=1, bias=False),
nn.BatchNorm2d(inplanes / 2),
nn.ReLU(inplace=True),
# nn.Dropout(.1),
nn.Conv2d(inplanes / 2, self.num_classes, 1),
)
def forward(self, gen_out_dic):
gen_out_dic = edict(gen_out_dic)
fsfm1 = self.fs1(gen_out_dic.fm3, self.upsample1(gen_out_dic.fm4, gen_out_dic.fm3.size()[2:]))
fsfm2 = self.fs2(gen_out_dic.fm2, self.upsample2(fsfm1, gen_out_dic.fm2.size()[2:]))
fsfm3 = self.fs4(gen_out_dic.pool_x, self.upsample3(fsfm2, gen_out_dic.pool_x.size()[2:]))
fsfm4 = self.fs5(gen_out_dic.conv_x, self.upsample4(fsfm3, gen_out_dic.conv_x.size()[2:]))
fsfm5 = self.upsample5(fsfm4, gen_out_dic.img_size)
out = self.out5(fsfm5)
return out
def get_models(input_ch, n_class, res="50", is_data_parallel=False):
model_g = ResBase(n_class, layer=res, input_ch=input_ch)
model_f1 = ResClassifier(n_class)
model_f2 = ResClassifier(n_class)
model_list = [model_g, model_f1, model_f2]
if is_data_parallel:
return [torch.nn.DataParallel(x) for x in model_list]
else:
return model_list
def get_optimizer(model_parameters, opt, lr, momentum, weight_decay):
if opt == "sgd":
return torch.optim.SGD(filter(lambda p: p.requires_grad, model_parameters), lr=lr, momentum=momentum,
weight_decay=weight_decay)
elif opt == "adadelta":
return torch.optim.Adadelta(filter(lambda p: p.requires_grad, model_parameters), lr=lr,
weight_decay=weight_decay)
elif opt == "adam":
return torch.optim.Adam(filter(lambda p: p.requires_grad, model_parameters), lr=lr, betas=[0.5, 0.999],
weight_decay=weight_decay)
else:
raise NotImplementedError("Only (Momentum) SGD, Adadelta, Adam are supported!")
| GAIMJKP/summer_work | model_util.py | model_util.py | py | 12,858 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.nn.Conv2d",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "torch.nn.Module",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_nu... |
24217475763 | import faiss
import pandas as pd
import wandb
import time
import sys
wandb_name = sys.argv[1]
wandb.login(key='188ce11fc669c7ea709e957bece5360053daabe8')
wandb.init(id = id, project='faiss_clustering', entity='emilymuller1991')
n_clusters = 20
# prefix for hpc
# prefix = '/rds/general/user/emuller/home'
prefix = '/run/user/1000/gvfs/smb-share:server=rds.imperial.ac.uk,share=rds/user/emuller/home/'
year = 'both_years_zoom'
df = pd.read_csv(prefix + '/emily/phd/003_image_matching/keras_rmac-master/census2021outputs/census_2011_and_census_2021_zoom.csv')
df['0'] = df['0'].apply(lambda x:
np.fromstring(
x.replace('\n','')
.replace('[','')
.replace(']','')
.replace(' ',' '), sep=' '))
df = df[1:1000]
X = np.array(df['0'].values.tolist())
def run_kmeans(x, nmb_clusters, verbose=False):
"""Runs kmeans on 1 GPU.
Args:
x: data
nmb_clusters (int): number of clusters
Returns:
(list: ids of data in each cluster, float: loss value)
"""
n_data, d = x.shape
# faiss implementation of k-means
clus = faiss.Clustering(d, nmb_clusters)
# Change faiss seed at each k-means so that the randomly picked
# initialization centroids do not correspond to the same feature ids
# from an epoch to another.
#clus.seed = np.random.randint(1234)
clus.seed = 0
clus.niter = 20000000
clus.max_points_per_centroid = 1000000
res = faiss.StandardGpuResources()
flat_config = faiss.GpuIndexFlatConfig()
flat_config.useFloat16 = False
flat_config.device = 0
index = faiss.GpuIndexFlatL2(res, d, flat_config)
# perform the training
clus.train(x, index)
_, I = index.search(x, 1)
# losses = faiss.vector_to_array(clus.obj)
stats = clus.iteration_stats
losses = np.array([
stats.at(i).obj for i in range(stats.size())
])
if verbose:
print('Iteration %s' % str(stats.size()))
print('k-means loss evolution: {0}'.format(losses))
wandb.log(
{
"loss": losses[-1]
}
)
return [int(n[0]) for n in I], losses
# start clustering
start = time.time()
c, l = run_kmeans(X, n_clusters, verbose=True)
finish = time.time()
print ('clustering algorithm finished in %s seconds' % str(finish-start))
df['clusters'] = c
df.to_csv(prefix + '/emily/phd/003_image_matching/clustering/output/both_years_zoom_clusters_seed0.csv')
print ('losses')
| emilymuller1991/thesis | chapter4clustering/faiss_clustering.py | faiss_clustering.py | py | 2,586 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "wandb.login",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "wandb.init",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number"... |
24639727756 | import datetime
import time
from datetime import datetime
from typing import Optional
import holidays
import numpy as np
import pandas as pd
from gluonts.time_feature.holiday import (
BLACK_FRIDAY,
CHRISTMAS_DAY,
CHRISTMAS_EVE,
COLUMBUS_DAY,
CYBER_MONDAY,
EASTER_MONDAY,
EASTER_SUNDAY,
GOOD_FRIDAY,
INDEPENDENCE_DAY,
LABOR_DAY,
MARTIN_LUTHER_KING_DAY,
MEMORIAL_DAY,
MOTHERS_DAY,
NEW_YEARS_DAY,
NEW_YEARS_EVE,
PRESIDENTS_DAY,
SUPERBOWL,
THANKSGIVING,
SpecialDateFeatureSet,
squared_exponential_kernel,
)
from pandas.tseries.holiday import (
AbstractHolidayCalendar,
GoodFriday,
Holiday,
USFederalHolidayCalendar,
USLaborDay,
USMartinLutherKingJr,
USMemorialDay,
USPresidentsDay,
USThanksgivingDay,
nearest_workday,
)
from sklearn.preprocessing import MinMaxScaler
from src.transformers import DateTimeTransformer, periodic_spline_transformer
# Example use for using a squared exponential kernel:
kernel = squared_exponential_kernel(alpha=1.0)
sfs = SpecialDateFeatureSet(
[
NEW_YEARS_DAY,
MARTIN_LUTHER_KING_DAY,
PRESIDENTS_DAY,
GOOD_FRIDAY,
MEMORIAL_DAY,
INDEPENDENCE_DAY,
LABOR_DAY,
THANKSGIVING,
CHRISTMAS_DAY,
],
squared_exponential_kernel(alpha=1.0),
)
sfs2 = SpecialDateFeatureSet(
[
SUPERBOWL,
CHRISTMAS_EVE,
EASTER_SUNDAY,
EASTER_MONDAY,
MOTHERS_DAY,
COLUMBUS_DAY,
NEW_YEARS_EVE,
BLACK_FRIDAY,
CYBER_MONDAY,
],
squared_exponential_kernel(alpha=1.0),
)
def reindex_weekdays(
df: pd.DataFrame,
drop_weekends: bool = True,
start_index: pd.Timestamp = None,
end_index: pd.Timestamp = None,
fill_method: str = "ffill",
extra_fill_method: Optional[str] = "bfill",
freq: str = "D",
) -> pd.DataFrame:
if start_index is None:
start_index = df.index[0]
if end_index is None:
end_index = df.index[-1]
df = (
df.reindex(pd.date_range(start=start_index, end=end_index, freq=freq))
.fillna(method=fill_method)
.fillna(method=extra_fill_method)
)
if drop_weekends:
return df.loc[~df.index.day_name().isin(["Saturday", "Sunday"]), :]
return df
def get_datetime_covariates(
start_index, end_index, memory_transforms, date_time_transforms
):
calendar = NYSECalendar()
index = pd.date_range(start=start_index, end=end_index, freq="D")
holiday_dates = calendar.holidays(start_index, end_index, return_name=True).index
covariates = pd.DataFrame(index=index)
covariates.loc[:, ["one_hot_weekends", "one_hot_holidays"]] = 0
covariates.loc[covariates.index.isin(holiday_dates), "one_hot_holidays"] = 1
covariates.loc[
covariates.index.day_name().isin(["Saturday", "Sunday"]), "one_hot_weekends"
] = 1
covariates.loc[:, "kernel_holidays"] = sfs(covariates.index).max(
axis=0
) # np.prod(sfs(covariates.index), axis=1)
covariates.loc[:, "kernel_other_holidays"] = sfs2(covariates.index).max(axis=0)
covariates = covariates.round(3)
covariates = date_time_transforms.fit_transform(covariates)
month_splines = periodic_spline_transformer(12, n_splines=6).fit_transform(
covariates[["month"]]
)
weekday_splines = periodic_spline_transformer(7, n_splines=3).fit_transform(
covariates[["day_of_week"]]
)
splines = np.concatenate((month_splines, weekday_splines), axis=1)
spline_names = [f"spline_{i}" for i in range(splines.shape[1])]
covariates.loc[:, spline_names] = splines
covariates = memory_transforms.fit_transform(covariates)
scaler = MinMaxScaler() # StandardScaler()
covariates = pd.DataFrame(
data=scaler.fit_transform(covariates),
index=covariates.index,
columns=covariates.columns,
)
return covariates
# https://gist.github.com/jckantor/d100a028027c5a6b8340
class NYSECalendar(AbstractHolidayCalendar):
"""
cdr = NYSECalendar()
non_trading_days = cdr.holidays(datetime(2022, 1, 1), datetime(2022, 12, 31))
"""
rules = [
Holiday("New Years Day", month=1, day=1, observance=nearest_workday),
USMartinLutherKingJr,
USPresidentsDay,
GoodFriday,
USMemorialDay,
Holiday(
"Juneteenth",
month=6,
day=19,
start_date="2022-06-20",
observance=nearest_workday,
),
Holiday("USIndependenceDay", month=7, day=4, observance=nearest_workday),
USLaborDay,
USThanksgivingDay,
Holiday("Christmas", month=12, day=25, observance=nearest_workday),
]
def main():
us_holidays = holidays.UnitedStates()
# https://www.commerce.gov/hr/employees/leave/holidays
us_holidays.observed = False
us_holidays["2022-01-01":"2022-12-31"]
# markets were not closed on this day (Juneteenth National Independence Day) in 2021
if datetime.date(2021, 6, 19) in us_holidays.keys():
del us_holidays[datetime.date(2021, 6, 19)]
for date, name in sorted(us_holidays.items()):
print(date, name)
# Thanksgiving
# Black Friday
# Cyber Monday
# Giving Tuesday
# Green Monday
# Free Shipping Day
# Hanukkah (start/end)
# Christmas
# Kwanzaa
# Boxing Day
# New Year's Eve/Day
if __name__ == "__main__":
main()
| aleksei-mashlakov/m6_competition | src/time_features.py | time_features.py | py | 5,570 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "gluonts.time_feature.holiday.squared_exponential_kernel",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "gluonts.time_feature.holiday.SpecialDateFeatureSet",
"line_number": 49,
"usage_type": "call"
},
{
"api_name": "gluonts.time_feature.holiday.NEW_YEARS... |
27070215388 | import json
import os
from flask import url_for
from flask_babel import _
from slugify import slugify
with open(
os.path.join(os.path.dirname(__file__), "countries.json"), encoding="utf8"
) as a:
COUNTRIES = sorted(json.load(a)["countries"], key=lambda k: k["name"])
COUNTRIES = [c for c in COUNTRIES if c["iso"] != "GBR"]
def get_country_groups(as_dict=False):
continents = set()
undp = set()
dac = set()
for c in COUNTRIES:
continents.add(c["continent"])
if c["undp"]:
undp.add(c["undp"])
if c["dac_status"]:
dac.add(c["dac_status"])
if as_dict:
areas = {}
areas["all"] = {"name": _("all countries"), "countries": COUNTRIES}
for c in COUNTRIES:
areas[slugify(c["iso"])] = {"name": c["name"], "countries": [c]}
for con in continents:
areas[("continent", slugify(con))] = {
"name": con,
"countries": [c for c in COUNTRIES if c["continent"] == con],
}
for i in [
("undp", "undp", _("all UNDP countries"), undp),
("dac", "dac_status", _("all DAC countries"), dac),
]:
areas[(i[0], "all")] = {
"name": i[2],
"countries": [c for c in COUNTRIES if c[i[1]]],
}
for con in i[3]:
areas[(i[0], slugify(con))] = {
"name": con,
"countries": [c for c in COUNTRIES if c[i[1]] == con],
}
return areas
return [
(
None,
[{"id": url_for("data.country", countryid="all"), "name": "all countries"}],
True,
),
(
"Continents",
[
{
"id": url_for(
"data.region", regiontype="continent", regionid=slugify(c)
),
"name": c,
}
for c in continents
],
True,
),
(
_('<abbr title="United Nations Development Programme">UNDP</abbr> regions'),
[
{
"id": url_for("data.region", regiontype="undp", regionid="all"),
"name": _("all UNDP regions"),
}
]
+ [
{
"id": url_for(
"data.region", regiontype="undp", regionid=slugify(c)
),
"name": c,
}
for c in undp
if c
],
True,
),
(
_('<abbr title="OECD Development Assistance Committee">DAC</abbr> groups'),
[
{
"id": url_for("data.region", regiontype="dac", regionid="all"),
"name": _("all DAC groups"),
}
]
+ [
{
"id": url_for("data.region", regiontype="dac", regionid=slugify(c)),
"name": c,
}
for c in dac
if c
],
True,
),
] + [
(
con,
[
{
"id": url_for("data.country", countryid=slugify(c["iso"])),
"name": c["name"],
}
for c in COUNTRIES
if c["continent"] == con
],
False,
)
for con in sorted(continents)
]
def get_multiple_countries(countryid):
countryids = countryid.lower().split("+")
area = {"name": [], "countries": []}
for i in countryids:
this_area = get_country_groups(as_dict=True).get(i)
if this_area:
area["name"].append(this_area["name"])
area["countries"].extend(this_area["countries"])
if not area["countries"]:
return None
area["name"] = ", ".join(area["name"])
return area
def get_country_by_id(id):
for c in COUNTRIES:
if c["id"] == id or c["iso"] == id or c["iso2"] == id:
return c
return None
SIMILAR_INITIATIVE = {
"sen": [
{
"homepage": "https://pfongue.org/",
"title": _("Platform of European NGOs in Senegal"),
"directlink": "https://pfongue.org/-Cartographie-.html",
"directlinktext": _("Map of projects"),
},
{
"title": "CONGAD - Conseil des ONG d’Appui au Développement",
"homepage": "http://www.congad.org/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
},
],
"uga": [
{
"homepage": "https://ugandanetworks.org/",
"title": "Uganda Networks",
"directlink": "https://ugandanetworks.org/groups/250421/directory_search.aspx",
"directlinktext": _("Directory search"),
},
{
"homepage": "http://www.uwasnet.org/Elgg/",
"title": "Uganda Water and Sanitation NGO Network",
"directlink": "http://www.uwasnet.org/Elgg/network/",
"directlinktext": _("Members directory"),
},
{
"title": "UNNGOF - Uganda National NGO Forum",
"homepage": "http://ngoforum.or.ug/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
},
],
"bra": [
{
"homepage": "http://www.abong.org.br",
"title": "Abong - Associaçao Brasileira de ONGs",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"hti": [
{"homepage": "http://coeh.eu/members/", "title": "Coordination Europe-Haiti"}
],
"chl": [
{
"title": "ACCIÓN - Asociación Chilena de ONG",
"homepage": "http://www.accionag.cl",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"bdi": [
{
"title": "ADIR - Action Développement et Intégration Régionale",
"homepage": "https://adirplateform.wordpress.com/adir/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"dom": [
{
"title": "Alianza ONG",
"homepage": "http://alianzaong.org.do",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"per": [
{
"title": "ANC - Asociación Nacional de Centros",
"homepage": "http://www.anc.org.pe",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"ury": [
{
"title": "ANONG - Asociación Nacional de ONG",
"homepage": "http://www.anong.org.uy",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"hnd": [
{
"title": "ASONOG - Asociacion De Organismos No Gubernamentales",
"homepage": "http://www.asonog.hn/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"bwa": [
{
"title": "Bocongo - Botswana Council of Non-Governmental Organisations",
"homepage": "http://www.bocongo.org/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"gbr": [
{
"title": "Bond - British Overseas NGOs for Development",
"homepage": "http://www.bond.org.uk",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"chn": [
{
"title": "CANGO - China Association for NGO Cooperation",
"homepage": "http://www.cango.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"khm": [
{
"title": "CCC - Cooperation Committee for Cambodia",
"homepage": "http://www.ccc-cambodia.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"can": [
{
"title": "CCIC - Canadian council for international co-operation",
"homepage": "http://www.ccic.ca",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"rwa": [
{
"title": "CCOAIB - Conseil de Concertation des Organisations d’Appui aux Initiatives de Base",
"homepage": "http://www.ccoaib.org.rw",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"cog": [
{
"title": "CCOD - Conseil de Concertation des ONG de développement",
"homepage": "https://pcpacongo.org/ccod/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"col": [
{
"title": "CCONG - Confederación Colombiana de ONG",
"homepage": "http://www.ccong.org.co",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"syc": [
{
"title": "CEPS - Citizens Engagement Platform Seychelles",
"homepage": "http://www.civilsociety.sc",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"ben": [
{
"title": "CFRONG - Collectif des Fédérations et Réseaux d’ONG du Bénin",
"homepage": "http://www.cfrong.org/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"cok": [
{
"title": "CICSO - Cook Islands Association of NGOs",
"homepage": "",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"nzl": [
{
"title": "CID - Council for International Development",
"homepage": "http://www.cid.org.nz",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"tcd": [
{
"title": "CILONG - Centre d’information et de Liaison des ONG",
"homepage": "",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"caf": [
{
"title": "CIONGCA - Conseil Inter ONG de Centrafrique",
"homepage": "https://www.facebook.com/ciongcarca/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"cod": [
{
"title": "CNONGD - Conseil National des ONGD de Développement",
"homepage": "http://www.cnongdrdc.org/cnongd.php",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"phl": [
{
"title": "CODE - Caucus of Development NGO Networks",
"homepage": "http://www.code-ngo.org/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"cmr": [
{
"title": "CONGAC - Collectif des ONG Agréées du Cameroun",
"homepage": "",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"gtm": [
{
"title": "CONGCOOP - Coordinación de ONG y Cooperativas",
"homepage": "http://www.congcoop.org.gt",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"esp": [
{
"title": "Coordinadora - NGO Coordinator for Development",
"homepage": "http://www.coordinadoraongd.org/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"fra": [
{
"title": "Coordination SUD - Coordination SUD – Solidarité Urgence Développement",
"homepage": "http://www.coordinationsud.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
# "civ": [{'title': 'CSCI - Convention de la Société Civile Ivoirienne',
# 'homepage': 'http://www.cs-ci.com/',
# 'source': 'Forus',
# 'source_link': 'http://forus-international.org/en/about-us/who-we-are'}],
"irl": [
{
"title": "Dochas",
"homepage": "https://dochas.ie/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"arg": [
{
"title": "EENGD - Red Encuentro",
"homepage": "http://www.encuentrodeongs.org.ar",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"mar": [
{
"title": "Espace Associatif",
"homepage": "http://www.espace-associatif.ma",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"fji": [
{
"title": "FCOSS - Fiji Council of Social Services",
"homepage": "",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"mli": [
{
"title": "FECONG - Fédération des Collectif d’ONG du Mali",
"homepage": "http://www.fecong.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"fin": [
{
"title": "FINGO - Finnish NGO Platform",
"homepage": "https://www.fingo.fi/english",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"rou": [
{
"title": "FOND - Romanian NGDOs Platform",
"homepage": "http://www.fondromania.org/pagini/index.php",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"ago": [
{
"title": "FONGA - Foro das ONGs Angolanas",
"homepage": "https://www.facebook.com/plateformeFonga",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"gin": [
{
"title": "FONGDD - Forum des ONG pour le Développement Durable",
"homepage": "",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"stp": [
{
"title": "FONG-STP - Federaçao das ONGs de Sao Tomé e Principe",
"homepage": "http://fong-stp.net",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"tgo": [
{
"title": "FONGTO - Fédération des Organisations Non Gouvernementales au Togo",
"homepage": "http://fongto.org/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"cze": [
{
"title": "FoRS - Czech Forum for Development Co-operation",
"homepage": "http://www.fors.cz",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"dnk": [
{
"title": "Global Focus",
"homepage": "http://www.globaltfokus.dk/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"hun": [
{
"title": "HAND - Hungarian Association of NGOs for Development and Humanitarian Aid",
"homepage": "http://hand.org.hu/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"idn": [
{
"title": "INFID - International NGO Forum on Indonesian Development",
"homepage": "http://www.infid.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"usa": [
{
"title": "InterAction",
"homepage": "http://www.interaction.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"jpn": [
{
"title": "JANIC - Japan NGO Center for International Cooperation",
"homepage": "http://www.janic.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"moz": [
{
"title": "Joint - League For NGOs in Mozambique",
"homepage": "http://www.joint.org.mz/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"kir": [
{
"title": "KANGO - Kiribati Association of NGOs",
"homepage": "http://www.kango.org.ki",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"kor": [
{
"title": "KCOC - Korea NGO Council for Overseas Development Cooperations",
"homepage": "http://www.ngokcoc.or.kr",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"lva": [
{
"title": "LAPAS - Latvijas Platforma attīstības sadarbībai",
"homepage": "http://lapas.lv/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"ltu": [
{
"title": "Lithuanian National Non-Governmental Development Cooperation Organisations’ Platform",
"homepage": "http://www.vbplatforma.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"mus": [
{
"title": "MACOSS - Mauritius Council of Social Service",
"homepage": "http://www.macoss.intnet.mu/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"npl": [
{
"title": "NFN - NGO Federation of Nepal",
"homepage": "http://www.ngofederation.org/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
},
{
"title": "British and Nepal NGO Network (BRANNGO)",
"homepage": "https://www.branngo.org/",
},
],
"nga": [
{
"title": "NNNGO - Nigeria Network of NGOs",
"homepage": "http://www.nnngo.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"mdg": [
{
"title": "PFNOSCM - Plateforme Nationale des Organisations de la Société Civile de Madagascar",
"homepage": "http://societecivilemalgache.com/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"prt": [
{
"title": "Plataforma ONGD - Portuguese Platform NGOD",
"homepage": "http://www.plataformaongd.pt/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"bel": [
{
"title": "Plateforme belge des ONG de développement et d’urgence",
"homepage": "",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"cpv": [
{
"title": "PLATONG - Plataforma das ONGs de Cabo Verde",
"homepage": "http://www.platongs.org.cv/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"pry": [
{
"title": "POJOAJU - Asociación de ONGs del Paraguay",
"homepage": "http://www.pojoaju.org.py",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"mlt": [
{
"title": "SKOP - National Platform of Maltese NGDOs",
"homepage": "http://www.skopmalta.org/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"svn": [
{
"title": "SLOGA - Slovenian Global Action",
"homepage": "http://www.sloga-platform.org/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"bfa": [
{
"title": "SPONG - Secrétariat Permanent des ONG du Burkina Faso",
"homepage": "http://www.spong.bf/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"twn": [
{
"title": "Taiwan Alliance in International Development",
"homepage": "http://www.taiwanaid.org/en",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"gmb": [
{
"title": "TANGO - The Association of Non-Governmental Organizations",
"homepage": "http://www.tangogambia.org/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"bol": [
{
"title": "UNITAS - Red Unitas",
"homepage": "http://www.redunitas.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"ind": [
{
"title": "VANI - Voluntary Action Network India",
"homepage": "http://www.vaniindia.org",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
"zmb": [
{
"title": "ZCSD - Zambia Council for Social Development",
"homepage": "http://www.zcsdev.org.zm/",
"source": "Forus",
"source_link": "http://forus-international.org/en/about-us/who-we-are",
}
],
}
| drkane/ngo-explorer | ngo_explorer/utils/countries.py | countries.py | py | 23,990 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number":... |
32596992467 | import heapq
from collections import defaultdict
class HuffmanNode:
def __init__(self, symbol, freq):
self.symbol = symbol
self.freq = freq
self.left = None
self.right = None
def __lt__(self, other):
return self.freq < other.freq
def build_huffman_tree(frequencies):
heap = []
for symbol, freq in frequencies.items():
heapq.heappush(heap, HuffmanNode(symbol, freq))
while len(heap) > 1:
node1 = heapq.heappop(heap)
node2 = heapq.heappop(heap)
merged = HuffmanNode(None, node1.freq + node2.freq)
merged.left = node1
merged.right = node2
heapq.heappush(heap, merged)
return heap[0]
def traverse_huffman_tree(node, code, code_dict):
if node.symbol is not None:
code_dict[node.symbol] = code
return
traverse_huffman_tree(node.left, code + '0', code_dict)
traverse_huffman_tree(node.right, code + '1', code_dict)
def huffman_encoding(text):
text = text.replace(" ", "") # Usunięcie spacji z tekstu
frequencies = defaultdict(int)
for symbol in text:
frequencies[symbol] += 1
huffman_tree = build_huffman_tree(frequencies)
code_dict = {}
traverse_huffman_tree(huffman_tree, '', code_dict)
encoded_text = [(symbol, code_dict[symbol]) for symbol in text]
return encoded_text, frequencies, code_dict
def main():
text = input("Podaj tekst do zakodowania: ")
encoded_text, frequencies, code_dict = huffman_encoding(text)
print("Znak\t| Liczba wystąpień\t| Kodowanie")
print("------------------------------------------")
for symbol, count in sorted(frequencies.items(), key=lambda x: x[1]):
code = code_dict[symbol]
print(f"{symbol}\t| {count}\t\t| {code}")
encoded_string = ''.join(code for symbol, code in encoded_text)
print("Zakodowany tekst:", encoded_string)
main()
| s25672-pj/asdZad4 | zad4.py | zad4.py | py | 1,980 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "heapq.heappush",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "heapq.heappop",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "heapq.heappush",
"line... |
25846418713 | #!/usr/bin/python3
import discord
from discord.ext import commands
from spam_modules import meme
class MemeBot(commands.Cog):
def __init__(self,client):
self.client=client
@commands.command(name='help')
async def help(self,ctx):
h=discord.Embed(title='Help',description='Help for Disco Meme')
h.add_field(name='help',value='Displays this message',inline=False)
memecommand="""
Shows a random meme.
Syntax: ```>meme```
"""
h.add_field(name='meme',value=memecommand,inline=False)
await ctx.send(embed=h)
@commands.command(name='meme',help='Show a random meme')
async def memes(self,ctx):
link=meme.getmeme(1)
emb=discord.Embed()
emb.set_image(url=link[0])
await ctx.send(embed=emb)
def setup(client):
client.add_cog(MemeBot(client)) | himanshudas75/Spam | spam_cogs/meteor.py | meteor.py | py | 863 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "discor... |
40654443610 | import unittest
from datetime import datetime
from batteries.nubbin_battery import NubbinBattery
from batteries.spindler_battery import SpindlerBattery
class TestNubbin(unittest.TestCase):
def setUp(self) -> None:
self.current_date: datetime = datetime.today().date()
self.last_service_date: datetime = datetime.today().date()
self.nubbin: NubbinBattery = None
def create_nubbin_battery(self):
self.nubbin = NubbinBattery(self.current_date, self.last_service_date)
def test_battery_should_be_serviced(self):
self.last_service_date = self.current_date.replace(
year=self.current_date.year - 5
)
self.create_nubbin_battery()
self.assertTrue(self.nubbin.needs_service())
def test_battery_should_not_be_serviced(self):
self.last_service_date = self.current_date.replace(
year=self.current_date.year - 3
)
self.create_nubbin_battery()
self.assertFalse(self.nubbin.needs_service())
class TestSpindler(unittest.TestCase):
def setUp(self) -> None:
self.current_date: datetime = datetime.today().date()
self.last_service_date: datetime = datetime.today().date()
self.spindler: SpindlerBattery = None
def create_spindler_battery(self):
self.spindler = SpindlerBattery(self.current_date, self.last_service_date)
def test_battery_should_be_serviced(self):
self.last_service_date = self.current_date.replace(
year=self.current_date.year - 4
)
self.create_spindler_battery()
self.assertTrue(self.spindler.needs_service())
def test_battery_should_not_be_serviced(self):
self.last_service_date = self.current_date.replace(
year=self.current_date.year - 2
)
self.create_spindler_battery()
self.assertFalse(self.spindler.needs_service())
if __name__ == "__main__":
unittest.main()
| manucho254/forage-lyft-app | test/test_battries.py | test_battries.py | py | 1,956 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "unittest.TestCase",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.today",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datet... |
41977630402 | # %%
import os
import glob
import numpy as np
import torch
from PIL import Image, ImageDraw
from skimage import draw
from skimage.io import imread
from matplotlib import pyplot as plt
from scipy.ndimage.filters import gaussian_filter
from torch.utils.data import Dataset, DataLoader
from torchvision import transforms as T
import import_ipynb
import transforms as my_T
import dataset
from dataset import Microscopy_dataset
def get_transforms(train=False, rescale_size=(256, 256)):
#transformation definition
transforms = []
if train:
transforms.append(my_T.Rescale(rescale_size))
transforms.append(my_T.Normalize())
transforms.append(my_T.ToTensor())
return my_T.Compose(transforms)
def get_loaders(
train_dir,
train_maskdir,
val_dir,
val_maskdir,
batch_size,
num_workers=1,
pin_memory=True):
#loader for Microscopy dataset - training and validation dataset
train_ds = Microscopy_dataset(
image_dir=train_dir,
mask_dir=train_maskdir,
)
train_loader = DataLoader(
train_ds,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
shuffle=True,
)
val_ds = Microscopy_dataset(
image_dir=val_dir,
mask_dir=val_maskdir,
)
val_loader = DataLoader(
val_ds,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
shuffle=False,
)
return train_loader, val_loader
def get_loaders_test(
# loader fo testing dataset
val_dir,
val_maskdir,
batch_size,
num_workers=1,
pin_memory=True):
print("test get loaders test")
#loader for Microscopy dataset - training and validation dataset
val_ds = Microscopy_dataset(
image_dir=val_dir,
mask_dir=val_maskdir,
)
val_loader = DataLoader(
val_ds,
batch_size=batch_size,
num_workers=num_workers,
pin_memory=pin_memory,
shuffle=True,
)
return val_loader
def check_acc(loader, model, device="cuda"):
#check accuracy and dice score deffinition
num_correct = 0
num_pixels = 0
dice_score = 0
model.eval()
print(type(loader))
print("Breakpoint!")
with torch.no_grad():
for x, y in loader:
x = x.to(device)
y = y.to(device).unsqueeze(1)
preds = torch.sigmoid(model(x))
preds = (preds > 0.5).float()
num_correct += (preds == y).sum()
num_pixels += torch.numel(preds)
dice_score += (2 * (preds * y).sum()) / (
(preds + y).sum() + 1e-8
)
print(
f"Got {num_correct}/{num_pixels} with acc {num_correct/num_pixels*100:.2f}"
)
print(f"Dice score: {dice_score/len(loader)}")
model.train()
| NataliePolach/Bachelor-thessis | utils.py | utils.py | py | 3,034 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "transforms.append",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "transforms.Rescale",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "transforms.append",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "transforms.Nor... |
36176895571 | #----------------------------------------------------------------------
# A dataset wrapper for pluto simulations
#
# Author : Thomas Rometsch (thomas.rometsch@uni-tuebingen.de)
# Date : 2018-07-12
#----------------------------------------------------------------------
from cast.dataset import *
from . import units
from . import grid
from .units import Dimension as Dim
import re
import os
import numpy as np
import astropy.units as u
from .pluload import pload
from .particles import Planet
""" Datafiles produced by pluto.
Filenames are given as regex expression to extract from file list."""
datafile_pattern = ".*\.dat"
outputdir_indicators = ["grid.out", "dbl.out"]
scalar_files = []
collections = ["analysis_values.dat"]
# Look for planet related files and extract the planet number
# from the filename using (\d+) group in regex pattern
particle_file_pattern = ['nbody_coordinates.dat', 'nbody_orbital_elements.dat']
known_units = {
"niter" : Dim(),
"M_DISK" : Dim(M=1),
"KE_R" : Dim(M=1, L=2, T=-2),
"KE_TH" : Dim(M=1, L=2, T=-2),
"KE_PHI" : Dim(M=1, L=2, T=-2),
"RHO_MIN" : Dim(M=1, L=-3),
"RHO_MAX" : Dim(M=1, L=-3),
"J_DISK_." : Dim(M=1, L=2, T=-1),
"F_." : Dim(M=1, L=1, T=-2),
"ACC_." : Dim(L=1, T=-2),
"P_1_A_." : Dim(L=1, T=-2),
"rho" : Dim(M=1, L=-3),
"prs" : Dim(M=1, L=-1, T=-2),
"vx." : Dim(L=1, T=-1)
}
def find_pluto_log(rootdir):
try:
rv = os.path.join(find_dir_containing("pluto.log", rootdir), "pluto.log")
except FileNotFoundError:
#rv = None
raise
return rv
def extract_pluto_log_units(datadir):
fpath = find_pluto_log(datadir)
if fpath is None:
rv = units.UnitSystem()
else:
foundUnits = {}
with open(fpath, 'r') as f:
for l in f:
# match to lines like: [Density]: 4.249e-09 (gr/cm^3), 2.540e+15 (1/cm^3)
rs = re.match("\s*\[(\w+)\]\:\s*([0-9\.e\-\+]+).*\((.+)\)",l.split(',')[0].strip())
if rs:
rs = rs.groups()
name = rs[0]
value = rs[1]
unitstring = rs[2].replace('^', '').replace('gr','g').replace('sec', 's')
foundUnits[rs[0]] = float(value)*u.Unit(unitstring)
baseunits = {
"L" : foundUnits['Length'],
"T" : foundUnits['Time'],
"Th" : foundUnits['Temperature'],
"M" : foundUnits['Density']*foundUnits['Length']**3
}
rv = units.UnitSystem(baseunits)
return rv
def parse_text_header(fpath):
with open(fpath, 'r') as f:
header = []
for l in f:
l = l.strip()
if l[0] != '#':
break
header.append(l)
return parse_text_header_pluto(header)
def parse_text_header_pluto(header):
names = []
for l in header:
l = l.strip('#').strip()
if l[:4] == "vars":
l = l[4:].lstrip().lstrip(":")
names = [s.strip() for s in l.split('\t')]
timecol = None
for n, name in enumerate(names):
if name in ['time', 'simulation time']:
timecol = n
return names, timecol
def pload_to_grid(pl, unitSys = {'L' : 1}):
if pl.geometry == "SPHERICAL":
return grid.SphericalRegularGrid(r=pl.x1*unitSys['L'], dr=pl.dx1*unitSys['L'],
theta=pl.x2, dtheta=pl.dx2,
phi=pl.x3, dphi=pl.dx3 )
elif pl.geometry == "POLAR":
return grid.PolarRegularGrid(r=pl.x1*unitSys['L'], dr=pl.dx1*unitSys['L'],
phi=pl.x3, dphi=pl.dx3 )
else:
raise NotImplementedError("No grid implemented for '{}' geometry.".format(pl.geometry))
def createPlutoParticles(datadir, unitSys, extraData):
# return a list of PlutoParticle objects for all present nbody particles
particleIds = plutoParticlesIds(datadir)
particles = []
for i in particleIds:
particles.append(Planet(name = i, unitSys=unitSys, data={}))
# register the common load function
loadFunction = lambda : loadPlutoParticles(datadir, particles, unitSys, extraData)
for p in particles:
p.load = loadFunction
return particles
def plutoParticlesIds(datadir):
with open(os.path.join(datadir, 'nbody_coordinates.dat'), 'r') as f:
# first, find the number of particles
ids = []
for l in f:
if l[0] == "#":
continue
pid = l.strip().split()[0]
if not pid in ids:
ids.append(pid)
else:
break
return ids
def loadPlutoParticles(datadir, particles, unitSys, extraData):
pids = [p.name for p in particles]
Nparticles = len(particles)
# load cartesian positions and velocities
data = np.genfromtxt(os.path.join(datadir, 'nbody_coordinates.dat'))
varNames = ['id', 'time', 'x1', 'x2', 'x3', 'v1', 'v2', 'v3']
units = { 'id' : 1,
'time' : unitSys['T'],
'x1' : unitSys['L'],
'x2' : unitSys['L'],
'x3' : unitSys['L'],
'v1' : unitSys['L']/unitSys['T'],
'v2' : unitSys['L']/unitSys['T'],
'v3' : unitSys['L']/unitSys['T'] }
for n, p in enumerate(particles):
time = data[n::Nparticles, 1]*unitSys['T']
for k, name in enumerate(varNames):
p.data[name] = TimeSeries(name = name, data = data[n::Nparticles, k]*units[name], time=p.data['time'])
# load orbital elements
data = np.genfromtxt(os.path.join(datadir, 'nbody_orbital_elements.dat'))
varNames = ['id', 'time', 'a', 'e', 'i',
'AscendingNode', 'Periastron', 'TrueAnomaly',
'PeriodInCodeUnits', 'EccentricAnomaly', 'MeanAnomaly']
units = { 'id' : 1,
'time' : unitSys['T'],
'a' : unitSys['L'],
'e' : 1,
'i' : u.rad,
'AscendingNode' : u.rad,
'Periastron' : u.rad,
'TrueAnomaly' : u.rad,
'PeriodInCodeUnits' : unitSys['T'],
'EccentricAnomaly' : u.rad,
'MeanAnomaly' : u.rad }
# orbital elements are not printed for the primary object
# load it for all others
for n, p in enumerate(particles[1:]):
time = data[n::Nparticles-1, 1]*unitSys['T']
for k, name in enumerate(varNames):
if k <= 1:
continue
p.data[name] = TimeSeries(name = name, data = data[n::Nparticles-1, k]*units[name], time=time)
# get the mass from the restart file, assume its constant!
with open(os.path.join(datadir, 'nbody.out'), 'r') as df:
for n,line in zip(range(Nparticles) ,df):
parts = line.strip().split()
if int(parts[1]) != n:
raise ValueError("line {} does not correspond to planet {} but to {}".format(n,n,parts[1]))
mass = float(parts[2])*np.ones(len(particles[n]['time']))*unitSys['M']
particles[n].data['mass'] = TimeSeries(name = 'mass', data = mass, time=particles[n].data['time'])
# get accelerations which are written to the analysisValue file
for n, p in enumerate(particles):
for i in range(1,4):
varname = 'P_{}_A_{}'.format(n, i-1)
if varname in extraData:
p.data['a{}'.format(i)] = extraData[varname]
p.data['a{}'.format(i)].name = 'a{}'.format(i)
class ScalarTimeSeries(TimeSeries):
def __init__(self, time=None, data=None, datafile=None, name = None, unitSys = None):
self.time = time
self.data = data
self.datafile = datafile
self.name = name
self.unitSys = unitSys
def load(self, *args, **kwargs):
pass
class PlutoField(Field):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def load(self):
if self.data is None:
if self.resource[1] is None:
self.resource[1] = self.pload()
self.data = self.resource[1].__dict__[self.name]
if self.unitSys is not None and self.name is not None:
self.data *= self.unitSys.find(self.name)
if self.unitSys is not None and self.name == 'rho' and self.grid.dim == 2:
self.data *= self.unitSys['L']
def pload(self):
output_number = int(re.match("data\.(\d+)\.dbl", os.path.basename(self.resource[0])).groups()[0])
return pload(output_number, os.path.dirname(self.resource[0]))
class PlutoDataset(AbstractDataset):
def __init__(self, rootdir):
super().__init__()
self.rootdir = rootdir
self.find_datadir()
self.units = extract_pluto_log_units(self.rootdir)
for key in known_units:
self.units.register(key, known_units[key])
self.find_datafiles()
self.find_scalars()
self.find_collections()
self.find_particles()
self.find_fields()
def find_datadir(self):
# Look for an output dir inside the rootdir
self.datadir = find_dir_containing(outputdir_indicators, self.rootdir)
def find_datafiles(self):
""" Search the datadir for datafiles."""
matches = (re.match(datafile_pattern, s) for s in os.listdir(self.datadir))
self.datafiles = [m.string for m in matches if m is not None]
def find_scalars(self):
""" Find all time info."""
for s in scalar_files:
if s in self.datafiles:
name = s[:-4]
self.timeSeries[name] = ScalarTimeSeries(os.path.join(self.datadir, s)
, name=name, unitSys=self.units)
def find_collections(self):
""" Find files containing multiple time series and parse them. """
for c in collections:
if c in self.datafiles:
self.add_collection(c)
def add_collection(self, collection):
fpath = os.path.join(self.datadir, collection)
names, timecol = parse_text_header(fpath)
if timecol is None:
raise TypeError("Could not find time info for constructing time series in '{}' with names '{}'.".format(fpath, names))
data = np.genfromtxt(fpath)
time = data[:,timecol]*self.units['time']
for n, name in enumerate(names):
if n == timecol:
continue
vardata = data[:,n]*self.units.find(name)
self.timeSeries[name] = ScalarTimeSeries(time = time, data = vardata, name = name)
def find_particles(self):
if all([f in self.datafiles for f in particle_file_pattern]):
particles = createPlutoParticles(self.datadir, self.units, self.timeSeries)
self.particles = {p.name : p for p in particles}
self.planets = PlanetSystem( planets= particles[1:])
def find_fields(self):
output_numbers = []
time = []
with open(os.path.join(self.datadir, 'dbl.out')) as f:
got_names = False
for l in f:
parts = l.strip().split()
if not got_names:
var_names = parts[6:]
got_names = True
output_numbers.append(int(parts[0]))
time.append(float(parts[1]))
self.times['coarse'] = Time(data = time*self.units['T'])
datafiles = [[int(m.groups()[0]), m.string]
for m in (re.match("data\.(\d*)\.dbl", s)
for s in os.listdir(self.datadir)) if m is not None]
datafiles = sorted( datafiles, key=lambda item: item[0])
# Get grid data from first output
pl = pload(datafiles[0][0], self.datadir)
self.grids['full'] = pload_to_grid(pl, self.units)
self._field_resource = [ [f, None] for f in (os.path.join(self.datadir, s) for s in (l[1] for l in datafiles))]
self._field_resource[0][1] = pl
for name in var_names:
self.fields[name] = FieldTimeSeries(name, self._field_resource, self.times['coarse'],
self.grids['full'], unitSys=self.units,
Field = PlutoField)
| rometsch/cast | cast/pluto.py | pluto.py | py | 12,451 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "units.Dimension",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "units.Dimension",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "units.Dimension",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "units.Dimension",
... |
21578935466 | from fastapi.testclient import TestClient
import pytest
from app.main import app
from app.db import Base, engine
from app.cache import Cache
cache = Cache()
client = TestClient(app)
@pytest.fixture(autouse=True)
def cleanup_db():
Base.metadata.drop_all(bind=engine)
Base.metadata.create_all(bind=engine)
yield
def create_url():
payload = {"url": "https://www.example.com"}
response = client.post("/shorten", json=payload, follow_redirects=False)
assert response.status_code == 201
location_url = response.json()["location"]
shortcode = location_url.split("/")[2]
assert len(shortcode) == 6
return shortcode
def test_home():
response = client.get("/")
assert response.status_code == 200
assert response.json() == {"message": "Hello world"}
def test_shorten_url_no_url():
body = {}
response = client.post("/shorten", json=body)
assert response.status_code == 400
assert response.json() == {"detail": "URL is required"}
def test_shorten_url_create_url():
payload = {"url": "https://www.example.com"}
response = client.post("/shorten", json=payload, follow_redirects=False)
assert response.status_code == 201
location_url = response.json()["location"]
shortcode = location_url.split("/")[2]
assert len(shortcode) == 6
def test_shorten_url_existing_url():
shortcode = create_url()
response = client.post("/shorten", json={"url": "https://www.example.com"}, follow_redirects=False)
assert response.status_code == 303
assert response.headers["location"] == f"/urls/{shortcode}"
def test_redirect_url_no_url():
response = client.get("/urls/abc123", follow_redirects=False)
assert response.status_code == 404
assert response.json() == {"detail": "There is no url with this shortcode"}
def test_redirect_url_cache():
shortcode = create_url()
assert cache.retrieve_url(shortcode) == "https://www.example.com"
response = client.get(f"/urls/{shortcode}", follow_redirects=False)
assert response.status_code == 307
assert response.headers["location"] == "https://www.example.com"
def test_redirect_url_db():
shortcode = create_url()
cache.client.delete(shortcode)
assert cache.retrieve_url(shortcode) == None
response = client.get(f"/urls/{shortcode}", follow_redirects=False)
assert response.status_code == 307
assert response.headers["location"] == "https://www.example.com"
def test_redirect_url_recache_url():
shortcode = create_url()
cache.client.delete(shortcode)
assert cache.retrieve_url(shortcode) == None
response = client.get(f"/urls/{shortcode}", follow_redirects=False)
assert cache.retrieve_url(shortcode) == "https://www.example.com"
def test_redirect_url_extend_url_ttl():
shortcode = create_url()
client.get(f"/urls/{shortcode}", follow_redirects=False)
assert cache.retrieve_url(shortcode) == "https://www.example.com"
assert cache.client.ttl(shortcode) >= 24 * 60 * 60
def test_url_stats_no_url():
response = client.get("/urls/abc123/stats", follow_redirects=False)
assert response.status_code == 404
assert response.json() == {"detail": "There is no url with this shortcode"}
def test_url_stats_no_hits():
shortcode = create_url()
response = client.get(f"/urls/{shortcode}/stats")
assert response.status_code == 200
data = response.json()
assert data["hits"] == 0
assert data["url"] == "https://www.example.com"
def test_url_stats_with_hits():
shortcode = create_url()
for _ in range(10):
response = client.get(f"/urls/{shortcode}")
response = client.get(f"/urls/{shortcode}/stats")
assert response.status_code == 200
data = response.json()
assert data["hits"] == 10
assert data["url"] == "https://www.example.com"
# Unfortunately, I'm not using Python on a daily basis and the mocking proven to be more complicated than I thought.
# Here is an attempt to mock the database to return an integrity error when adding a new URL.
""" def test_shorten_url_existing_url():
db_mock = MagicMock()
db_mock.add.side_effect = IntegrityError("statement", "params", "orig", "detail")
db_mock.query.return_value.filter.return_value.first.return_value = {
"id": 1,
"url": "https://www.example.com",
"shortcode": "abc123",
}
app.dependency_overrides["get_db"] = lambda: db_mock
payload = {"url": "https://www.example.com"}
response = client.post("/shorten", json=payload, follow_redirects=False)
assert response.status_code == 303
assert response.headers["location"] == "/urls/abc123"
""" | csornyei/url-shortener | app/test_main.py | test_main.py | py | 4,650 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "app.cache.Cache",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "fastapi.testclient.TestClient",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "app.main.app",
"line_number": 9,
"usage_type": "argument"
},
{
"api_name": "app.db.Ba... |
22525694413 | import logging
import hydra
from omegaconf import DictConfig
logger = logging.getLogger(__name__)
class BiencoderDatasetsCfg(object):
def __init__(self, cfg: DictConfig):
datasets = cfg.datasets
self.train_datasets_names = cfg.train_datasets
logger.info("train_datasets: %s", self.train_datasets_names)
# print(datasets)
if self.train_datasets_names:
self.train_datasets = [
hydra.utils.instantiate(datasets[ds_name])
for ds_name in self.train_datasets_names
]
else:
self.train_datasets = []
if cfg.dev_datasets:
self.dev_datasets_names = cfg.dev_datasets
logger.info("dev_datasets: %s", self.dev_datasets_names)
self.dev_datasets = [
hydra.utils.instantiate(datasets[ds_name])
for ds_name in self.dev_datasets_names
]
self.sampling_rates = cfg.train_sampling_rates
| microsoft/LMOps | uprise/DPR/dpr/utils/conf_utils.py | conf_utils.py | py | 986 | python | en | code | 2,623 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "omegaconf.DictConfig",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "hydra.utils.instantiate",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "hydra.u... |
15818104013 | from models.calendar_model import CalendarItem
from dotenv import load_dotenv
import os
# id: int
# subject: str
# location: str
# starttime: str
# endtime: str
# categorycolor: str
# Load Secret Items
load_dotenv()
connection_string = os.getenv("ConnectionString")
# MongoDB Driver
import motor.motor_asyncio
client = motor.motor_asyncio.AsyncIOMotorClient(connection_string)
# Create Database & Collection
database = client.DashboardFARM
collection = database.CalendarItems
# Create a Calendar item
async def create_calendar_item(item):
document = item
result = await collection.insert_one(document)
return document
# Read 1 Calendar item
async def read_one_calendar_item(id):
document = await collection.find_one({"id":id})
return document
# Read All Calendar items
async def read_all_calendar_items():
calendar_items = []
cursor = collection.find({})
async for document in cursor:
calendar_items.append(CalendarItem(**document))
return calendar_items
# Update a Calendar item
async def update_calendar_item(id, subject, location, starttime, endtime, categorycolor):
await collection.update_one(
{"id":id},
{"set": {"subject": subject}},
{"set": {"location": location}},
{"set": {"starttime": starttime}},
{"set": {"endtime": endtime}},
{"set": {"categorycolor": categorycolor}}
)
document = await collection.find_one({"id": id})
return document
# Delete a Calendar item
async def remove_calendar_item(id):
await collection.delete_one({"id": id})
return True | DerenB/FARM-Stack-Dashboard | backend/db_functions/calendar_db_functions.py | calendar_db_functions.py | py | 1,586 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "motor.motor_asyncio.motor_asyncio.AsyncIOMotorClient",
"line_number": 18,
"usage_type": "call"
},
{
... |
11510356842 | # Released under the MIT License. See LICENSE for details.
#
"""Defines AppDelegate class for handling high level app functionality."""
from __future__ import annotations
from typing import TYPE_CHECKING
import babase
if TYPE_CHECKING:
from typing import Callable
import bascenev1
class AppDelegate:
"""Defines handlers for high level app functionality.
Category: App Classes
"""
def create_default_game_settings_ui(
self,
gameclass: type[bascenev1.GameActivity],
sessiontype: type[bascenev1.Session],
settings: dict | None,
completion_call: Callable[[dict | None], None],
) -> None:
"""Launch a UI to configure the given game config.
It should manipulate the contents of config and call completion_call
when done.
"""
# Replace the main window once we come up successfully.
from bauiv1lib.playlist.editgame import PlaylistEditGameWindow
assert babase.app.classic is not None
babase.app.ui_v1.clear_main_menu_window(transition='out_left')
babase.app.ui_v1.set_main_menu_window(
PlaylistEditGameWindow(
gameclass,
sessiontype,
settings,
completion_call=completion_call,
).get_root_widget()
)
| efroemling/ballistica | src/assets/ba_data/python/baclassic/_appdelegate.py | _appdelegate.py | py | 1,336 | python | en | code | 468 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "bascenev1.GameActivity",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "bascenev1.Session",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name"... |
2325343679 | #!/usr/bin/env python
# HOUSE ROBBER
#
# - given list of values
# - return highest total value you can get by adding values - but none of the indexes can be touching
#
# NOTES
#
# - introduces RECURRANCE RELATIONSHIP which is a way to break up DYNAMIC PROGRAMMING problems
# - trick is to break the problem into subsets, calculate the highest totals from those groups and store those values somewhere (so that you don't need to recalculate over and over)
#
# example:
#
# 1 2 3 4 # input array
# 1 # highest total from first col
# 2 # highest total from first 2 cols
# 4 # highest total from first 3 cols
# 4 # highest total from first 4 cols # note that is going to be max value from col3 or col2 + col4
#
# aka: col4 = max(col3, col2+col4)
import argparse
from typing import List
class Solution:
def rob(self, nums: List[int]) -> int:
rob1, rob2 = 0, 0
for n in nums:
temp = max(n + rob1, rob2)
rob1 = rob2
rob2 = temp
return rob2
def main():
# Use argparse to handle command line arguments
parser = argparse.ArgumentParser(description='A basic Python script template.')
parser.add_argument('-a', '--arg1', type=str, help='An example argument')
args = parser.parse_args()
# call here
solution = Solution()
answer = solution.rob([1,2,3,1]) # 4
print(answer)
if __name__ == '__main__':
main()
| scorzo/python_algorithms | 13_1d_dyanmic_programming/03_house_robber.py | 03_house_robber.py | py | 1,424 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 40,
"usage_type": "call"
}
] |
19844910180 | #!/usr/bin/env python
# coding=utf-8
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
import cv2
import numpy as np
from geometry_msgs.msg import Twist
rospy.init_node('line_follower')
cmd_vel_pub = rospy.Publisher('/cmd_vel', Twist, queue_size=1)
cv_bridge = CvBridge()
def image_callback(msg):
try:
img = cv_bridge.imgmsg_to_cv2(msg, "bgr8")
img_rgb = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
img_filtree = color_filter(img_rgb)
img_roi = roi(img_filtree)
controle_suivi_ligne(img_roi)
except CvBridgeError as e:
rospy.logerr(e)
# Filtrer l'image pour ne conserver que les pixels rouges
def color_filter(image):
hsv = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
lower_red = np.array([0, 100, 100]) # Plage de valeurs pour le rouge (vous pouvez ajuster ces valeurs si nécessaire)
upper_red = np.array([10, 255, 255]) # Plage de valeurs pour le rouge
red_mask = cv2.inRange(hsv, lower_red, upper_red)
masked_image = np.zeros_like(image)
masked_image[red_mask != 0] = [255, 255, 255] # Remplacez le rouge par du blanc
return masked_image
def roi(im):
x = int(im.shape[1])
y = int(im.shape[0])
# Définir les coordonnées de la région d'intérêt en pourcentage de la largeur et de la hauteur de l'image
shape = np.array([[int(0.003 * x), int(0.7651 * y)],
[int(0.995 * x), int(0.735 * y)],
[int(0.552 * x), int(0.514 * y)],
[int(0.445 * x), int(0.52 * y)]])
mask = np.zeros_like(im)
if len(im.shape) > 2:
channel_count = im.shape[2]
ignore_mask_color = (255,) * channel_count
else:
ignore_mask_color = 255
cv2.fillPoly(mask, np.int32([shape]), ignore_mask_color)
masked_image = cv2.bitwise_and(im, mask)
return masked_image
# Contrôler le robot en fonction de l'image de la région d'intérêt
def controle_suivi_ligne(image_roi):
# touuuuuuuuut le code pour que ça suit la ligne
# Exemple de commande de vitesse :
cmd_vel = Twist()
cmd_vel.linear.x = 0.2
cmd_vel.angular.z = 0.0 # Pas de rotation (rad/s)
cmd_vel_pub.publish(cmd_vel)
rospy.Subscriber('/camera/image', Image, image_callback)
rospy.spin()
| PARC-2023-Team-Mali/Line-followers-parc | parc_line_follower.py | parc_line_follower.py | py | 2,305 | python | fr | code | 0 | github-code | 1 | [
{
"api_name": "rospy.init_node",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "rospy.Publisher",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "geometry_msgs.msg.Twist",
"line_number": 13,
"usage_type": "argument"
},
{
"api_name": "cv_bridge... |
42581376596 | from tkinter import Tk, filedialog
import json
import os
Tk().withdraw() # prevents extra window
with filedialog.askopenfile(initialdir='~') as input_file:
base_name = os.path.basename(input_file.name)
if base_name.split('.')[-1] != 'ipynb':
raise ValueError("Expected file extension '.ipynb'")
input_obj = json.load(input_file)
output_obj = {key: val for key, val in input_obj.items() if key != 'cells'}
output_obj['cells'] = []
for cell in input_obj['cells']:
if cell['cell_type'] == 'code':
output_obj['cells'].append({key: val for key, val in cell.items() if key != 'outputs'})
OUTPUT_DIR = os.path.join(os.path.dirname(os.path.dirname(__file__)), 'notebooks')
with open(os.path.join(OUTPUT_DIR, base_name), 'w') as output_file:
json.dump(output_obj, output_file )
| maxTarlov/interview-bot-source | utils/nb-remove-text.py | nb-remove-text.py | py | 811 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tkinter.Tk",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog.askopenfile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "os.path.bas... |
36235255031 | # -*- coding: utf-8 -*-
"""
Created on Mon Nov 6 15:32:24 2017
@author: vidyag
"""
import tensorflow as tf
import numpy as np
import math
from scipy import misc
import glob
from autoencode import AutoEncoder
from layer_defs import variable_on_cpu, getConvInitializer
def fftshift(mat2D, dim0, dim1): #fftshift == ifftshift when dimensions are all even
#fftshift only works with even dimensions
if (dim0==1) and (dim1==1):
return mat2D
if (dim0%2) or (dim1%2):
raise ValueError('Dimensions must be even to use fftshift.')
dim0=tf.cast(dim0,tf.int32)
dim1=tf.cast(dim1,tf.int32)
piece1=tf.slice(mat2D,[0,0],[dim0//2,dim1//2])
piece2=tf.slice(mat2D,[0,dim1//2],[dim0//2,dim1//2])
piece3=tf.slice(mat2D,[dim0//2,0],[dim0//2,dim1//2])
piece4=tf.slice(mat2D,[dim0//2,dim1//2],[dim0//2,dim1//2])
top=tf.concat([piece4,piece3],axis=1)
bottom=tf.concat([piece2,piece1],axis=1)
final=tf.concat([top,bottom],axis=0)
return final
def propTF_withNA_PSFcustom(u1,H_fresnel,H_NA,PSF,m,incoherent=1):
#u1 is the source plane field
#L is the side length of the observation and source fields (assume square fields)
#NA is the numerical aperture
#m is u1.shape[0], is a tf.int32
#dx is L/m
PSF_phase=2*math.pi*PSF
PSF_phase=tf.cast(tf.cos(PSF_phase),tf.complex64)-1j*tf.cast(tf.sin(PSF_phase),tf.complex64)
# PSF_phase=2*tf.constant(math.pi, dtype=tf.complex64)*tf.cast(PSF, tf.complex64) #/tf.cast(tf.reduce_max(PSF), tf.complex64)
# with tf.device('/cpu:0'):
# PSF_phase=tf.exp(-1j*PSF_phase)
PSF_phase=fftshift(PSF_phase,m,m)
H = H_fresnel*H_NA*PSF_phase
if incoherent:
#H=np.fft.ifft2(np.abs(np.fft.fft2(H))**2)
#H=tf.ifft2d(tf.fft2d(H)*tf.conj(tf.fft2d(H)))
H=tf.fft2d(tf.ifft2d(H)*tf.conj(tf.ifft2d(H)))
#H=H/H[0,0]
U1=fftshift(u1,m,m)
U1=tf.fft2d(U1)
U2=H*U1
u2=fftshift(tf.ifft2d(U2),m,m)
else:
U1=fftshift(u1,m,m)
U1=tf.fft2d(U1)
U2=H*U1
u2=fftshift(tf.ifft2d(U2),m,m)
u2 = u2*tf.conj(u2) # make into intensity object
return u2
def change_filter_function(u1,H_old,H_new,Nx,reg=1e-10,incoherent=1):
#u1 is the source plane field
#Nx is u1.shape[0]
#dx is L/m
#H_old and H_new are already fftshifted in Fourier space
if incoherent:
H_old = tf.fft2d(tf.ifft2d(H_old)*tf.conj(tf.ifft2d(H_old)))
H_new = tf.fft2d(tf.ifft2d(H_new)*tf.conj(tf.ifft2d(H_new)))
#H=H/H[0,0]
U1=fftshift(u1,Nx,Nx)
U1=tf.fft2d(U1)
# U2=H_old*U1 # previous processing
U2 = (H_new/(H_old+reg))*U1
u2=fftshift(tf.ifft2d(U2),Nx,Nx)
else:
U1=fftshift(u1,Nx,Nx)
U1=tf.fft2d(U1)
U2=(H_new/(H_old+reg))*U1
# U2=H_old*U1 # previous processing
u2=fftshift(tf.ifft2d(U2),Nx,Nx) # field value
# u2 = u2*np.conj(u2) # make into intensity object
return u2
def create_phase_obj_stack(PSF, trainingSet, batch_size, H_fresnel, H_NA, Nx):
for ii in range(batch_size):
# low_res_obj is an intensity object
low_res_obj = propTF_withNA_PSFcustom(trainingSet[ii,:,:],H_fresnel,H_NA,PSF,Nx,incoherent=0)
low_res_obj = tf.expand_dims(low_res_obj,axis=0)
if ii == 0:
low_res_obj_stack = low_res_obj
else:
low_res_obj_stack = tf.concat([low_res_obj_stack,low_res_obj],0)
low_res_obj_stack = tf.cast(low_res_obj_stack, tf.float32)
return low_res_obj_stack
def create_microscope_img(PSF,trainingSet_sample,Nz,H_fresnel_stack,H_NA,m,num_wavelengths):
# trainingSet_sample = trainingSet[ii,:,:,:]
microscopeImg=[]
for zInd in range(Nz):
for waveInd in range(num_wavelengths):
add_layer=propTF_withNA_PSFcustom(trainingSet_sample[:,:,zInd,waveInd],H_fresnel_stack[:,:,zInd,waveInd],H_NA[:,:,waveInd],PSF,m)
microscopeImg.append(add_layer)
microscopeImg = tf.add_n(microscopeImg)
microscopeImg = tf.expand_dims(microscopeImg,axis=0)
microscopeImg = tf.cast(microscopeImg, tf.float32)
return microscopeImg
def create_microscopeImgStack(PSF,trainingSet,Nz,batch_size,H_fresnel_stack,H_NA,m,num_wavelengths): #creates microscopeImg for every example in the trainingSet
for ii in range(batch_size):
microscopeImg = create_microscope_img(PSF,trainingSet[ii,:,:,:,:],Nz,H_fresnel_stack,H_NA,m,num_wavelengths)
if ii == 0:
microscopeImgStack = microscopeImg
else:
microscopeImgStack = tf.concat([microscopeImgStack,microscopeImg],0)
return microscopeImgStack
def add_noise_microscopeImgStack(microscopeImgStack,normalRandomMat1,normalRandomMat2,sqrt_reg,\
poisson_noise_multiplier, gaussian_noise_multiplier, batch_size, library=tf):
#XXX Fix the Poisson noise for low photon levels
if library == tf:
multiplierPoisson = tf.constant(poisson_noise_multiplier,dtype=tf.float32) #6e3 for EPFL
multiplierGaussian = tf.constant(gaussian_noise_multiplier,dtype=tf.float32)
else:
multiplierPoisson = poisson_noise_multiplier
multiplierGaussian = gaussian_noise_multiplier
microscopeImgStack2 = microscopeImgStack*multiplierPoisson
microscopeImgStack3=library.sqrt(library.abs(microscopeImgStack2)+sqrt_reg)*normalRandomMat1+microscopeImgStack2
microscopeImgStack4=microscopeImgStack3+multiplierGaussian*normalRandomMat2
zeros = library.zeros([batch_size, microscopeImgStack4.shape[1], microscopeImgStack4.shape[2]], dtype=library.float32)
microscopeImgStack = library.where(microscopeImgStack4<0,zeros,microscopeImgStack4) #truncate below 0
microscopeImgStack = microscopeImgStack/multiplierPoisson
return microscopeImgStack
def F(mat2D,dim0,dim1):
return fftshift(tf.fft2d(mat2D),dim0,dim1)
def Ft(mat2D,dim0,dim1):
return tf.ifft2d(fftshift(mat2D,dim0,dim1))
downsamp = lambda x,cen,Np: x[cen[0]-Np//2:cen[0]-Np//2+Np, \
cen[1]-Np//2:cen[1]-Np//2+Np]
def sigmoid_stretch(x, stretch, library=tf):
y = 1. / (1. + library.exp(-x/stretch))
return y
def HiToLoPatch_singleLED(obj,scale_multiply, Ns, scale, cen0, P, Np, N_obj, LED_i): #stretch
illumination_weight = scale[LED_i] * scale_multiply[LED_i]
cen = (cen0-Ns[LED_i,:]).astype(int)
O=F(obj,N_obj,N_obj)
Psi0 = downsamp(O,cen,Np)*P
psi0 = Ft(Psi0,Np,Np) #low resolution field
intensity_i = psi0*tf.conj(psi0)*tf.cast(illumination_weight, tf.complex64)
return intensity_i
def HiToLo_singleLED(obj, N_obj, N_patch, scale_multiply, num_patches, Ns_mat, scale_mat, \
cen0, P, Np, LED_i):
low_res_patches=[]
count = 0
for i,startX in enumerate(np.arange(0,N_obj,N_patch)):
for j,startY in enumerate(np.arange(0,N_obj,N_patch)):
# pass the full object to HiToLoPatch
Ns = Ns_mat[count,:,:]
scale = scale_mat[count,:]
low_res_patch_everything = HiToLoPatch_singleLED(obj,scale_multiply, Ns, scale, cen0, P, Np, N_obj, LED_i)
low_res_patches.append(low_res_patch_everything)
count += 1
count = 0
for i,startX in enumerate(np.arange(0,Np,Np/num_patches)):
for j,startY in enumerate(np.arange(0,Np,Np/num_patches)):
# Extract out patch of interest
low_res_patch=tf.slice(low_res_patches[count],[int(startX),int(startY)],[int(N_patch*Np/N_obj),int(N_patch*Np/N_obj)])
if j==0:
low_res_obj_row=low_res_patch
else:
low_res_obj_row = tf.concat([low_res_obj_row,low_res_patch],axis=1)
count += 1
if i==0:
low_res_obj = low_res_obj_row
else:
low_res_obj = tf.concat([low_res_obj,low_res_obj_row],axis=0)
low_res_obj = tf.cast(low_res_obj,tf.float32)
return low_res_obj
def HiToLoPatch(obj,scale_multiply, Ns, scale, cen0, P, H0, Np, N_obj, numLEDs): #stretch
illumination_weights = scale * scale_multiply #sigmoid_stretch(scale_multiply,stretch) #scale_multiply
for LED_i in range(numLEDs):
cen = (cen0-Ns[LED_i,:]).astype(int)
O=F(obj,N_obj,N_obj)
Psi0 = downsamp(O*H0,cen,Np)*P
psi0 = Ft(Psi0,Np,Np) #low resolution field
intensity_i = psi0*tf.conj(psi0)*tf.cast(illumination_weights[LED_i], tf.complex64)
if LED_i == 0:
low_res_patch = intensity_i
else:
low_res_patch = low_res_patch + intensity_i
return low_res_patch
def HiToLo(obj, N_obj, N_patch, scale_multiply, num_patches, Ns_mat, scale_mat, \
cen0, P, H0, Np, numLEDs): #stretch
low_res_patches=[]
count = 0
for i,startX in enumerate(np.arange(0,N_obj,N_patch)):
for j,startY in enumerate(np.arange(0,N_obj,N_patch)):
# pass the full object to HiToLoPatch
Ns = Ns_mat[count,:,:]
scale = scale_mat[count,:]
low_res_patch_everything = HiToLoPatch(obj,scale_multiply, Ns, scale, cen0, P, H0, Np, N_obj, numLEDs) #stretch
low_res_patches.append(low_res_patch_everything)
count += 1
count = 0
for i,startX in enumerate(np.arange(0,Np,Np/num_patches)):
for j,startY in enumerate(np.arange(0,Np,Np/num_patches)):
# Extract out patch of interest
low_res_patch=tf.slice(low_res_patches[count],[int(startX),int(startY)],[int(N_patch*Np/N_obj),int(N_patch*Np/N_obj)])
if j==0:
low_res_obj_row=low_res_patch
else:
low_res_obj_row = tf.concat([low_res_obj_row,low_res_patch],axis=1)
count += 1
if i==0:
low_res_obj = low_res_obj_row
else:
low_res_obj = tf.concat([low_res_obj,low_res_obj_row],axis=0)
return low_res_obj
def upsample(low_res_obj, Np, N_obj):
if Np == 1:
dense_multiply = tf.cast(tf.squeeze(getConvInitializer(N_obj, N_obj, init_type="trunc_norm")), tf.complex64)
dense_bias = tf.cast(tf.squeeze(getConvInitializer(N_obj, N_obj, init_type="trunc_norm")), tf.complex64)
# dense_multiply_i = tf.cast(tf.squeeze(getConvInitializer(N_obj, N_obj, init_type="trunc_norm")), tf.complex64)
# dense_bias_i = tf.cast(tf.squeeze(getConvInitializer(N_obj, N_obj, init_type="trunc_norm")), tf.complex64)
# pad0 = (N_obj - Np)//2 -1
# pad1 = pad0 + 3
# upsampled_obj = tf.pad(upsampled_obj, [[int(pad0),int(pad1)],[int(pad0),int(pad1)]], 'CONSTANT', \
# constant_values = 100)
# upsampled_obj_real = low_res_obj*dense_multiply_r + dense_bias_r
# upsampled_obj_imag = low_res_obj*dense_multiply_i + dense_bias_i
upsampled_obj = low_res_obj*dense_multiply + dense_bias
# upsampled_obj = tf.Print(upsampled_obj, [dense_multiply], message='dense_multiply: ')
# upsampled_obj = tf.Print(upsampled_obj, [dense_bias], message='dense_bias: ')
# upsampled_obj = tf.Print(upsampled_obj, [low_res_obj], message='low_res_obj: ')
else:
upsampled_obj = F(low_res_obj,Np,Np)
pad = (N_obj - Np)/2
upsampled_obj = tf.pad(upsampled_obj, [[int(pad),int(pad)],[int(pad),int(pad)]], 'CONSTANT')
upsampled_obj = Ft(upsampled_obj,N_obj,N_obj)
return upsampled_obj
def create_upsampled_obj_stack(low_res_obj_stack, batch_size, Np, N_obj):
low_res_obj_stack = tf.cast(low_res_obj_stack, tf.complex64)
for ii in range(batch_size):
upsampled_obj = upsample(low_res_obj_stack[ii,:,:], Np, N_obj)
upsampled_obj = tf.expand_dims(upsampled_obj,axis=0)
if ii == 0:
upsampled_obj_stack = upsampled_obj
else:
upsampled_obj_stack = tf.concat([upsampled_obj_stack,upsampled_obj],0)
upsampled_obj_stack = tf.cast(upsampled_obj_stack, tf.float32)
return upsampled_obj_stack
def create_FP_img_stack(scale_multiply,trainingSet,batch_size, N_obj, N_patch, num_patches, Ns_mat, \
scale_mat, cen0, P, H0, Np, numLEDs):
for ii in range(batch_size):
low_res_obj = HiToLo(trainingSet[ii,:,:], N_obj, N_patch, scale_multiply, \
num_patches, Ns_mat, scale_mat, \
cen0, P, H0, Np, numLEDs)
low_res_obj = tf.expand_dims(low_res_obj,axis=0)
if ii == 0:
low_res_obj_stack = low_res_obj
else:
low_res_obj_stack = tf.concat([low_res_obj_stack,low_res_obj],0)
low_res_obj_stack = tf.cast(low_res_obj_stack, tf.float32)
return low_res_obj_stack
def calculate_loss_FP(predicted_mat, trainingSet, library = tf):
if library == tf:
sum_func = tf.reduce_sum
else:
sum_func = np.sum
loss_l2 = sum_func(library.real((predicted_mat - trainingSet)*library.conj(predicted_mat - trainingSet)))
# loss_l1 = sum_func(library.abs(predicted_mat - trainingSet))
return loss_l2
def grad_diff_loss(predicted_mat, trainingSet, library = tf):
if library == tf:
sum_func = tf.reduce_sum
else:
sum_func = np.sum
diff_x_actual = trainingSet[:,1:,:]-trainingSet[:,:-1,:]
diff_y_actual = trainingSet[:,:,1:]-trainingSet[:,:,:-1]
diff_x_guess = predicted_mat[:,1:,:]-predicted_mat[:,:-1,:]
diff_y_guess= predicted_mat[:,:,1:]-predicted_mat[:,:,:-1]
loss_x = sum_func(library.real((diff_x_actual-diff_x_guess)*library.conj(diff_x_actual-diff_x_guess)))
loss_y = sum_func(library.real((diff_y_actual-diff_y_guess)*library.conj(diff_y_actual-diff_y_guess)))
return loss_x + loss_y
def convert_net_prediction_list(net_prediction_list, image_modality, batch_size, optical_parameters_dict):
if (image_modality == 'FP') or (image_modality == 'phase') or (image_modality == 'FP_PP'):
predicted_mat = tf.cast(net_prediction_list[0],tf.complex64)+1j*tf.cast(net_prediction_list[1],tf.complex64)
# predicted_mat = tf.cast(net_prediction_list[0],tf.complex64)*tf.exp(1j*tf.cast(net_prediction_list[1],tf.complex64))
predicted_mat = tf.squeeze(predicted_mat,axis=3)
elif image_modality == 'STORM':
Nx = optical_parameters_dict['Nx_highres']
Nz = optical_parameters_dict['Nz']
num_wavelengths = optical_parameters_dict['num_wavelengths']
predicted_mat = tf.stack(net_prediction_list,axis=3)
predicted_mat = tf.reshape(predicted_mat, [batch_size, Nx, Nx, Nz, num_wavelengths])
elif image_modality == 'HE':
predicted_mat = tf.stack(net_prediction_list,axis=3)
predicted_mat = tf.squeeze(predicted_mat, axis = 4)
return predicted_mat
def iterative_solver_FP(predicted_mat, optical_element, batch_size, N_obj, N_patch, num_patches, Ns_mat,\
scale_mat, cen0, P, H0, Np, numLEDs, low_res_obj_stack, step_size, max_internal_iter, \
merit_stopping_point, loss_low_res, i):
# print 'mii2'
predicted_mat_real = tf.real(predicted_mat)
predicted_mat_imag = tf.imag(predicted_mat)
def single_iter(predicted_mat_real, predicted_mat_imag, loss_low_res, i):
predicted_mat = tf.cast(predicted_mat_real,tf.complex64)+1j*tf.cast(predicted_mat_imag,tf.complex64)
low_res_obj_predicted = create_FP_img_stack(optical_element,predicted_mat,batch_size, N_obj, N_patch, num_patches, Ns_mat, \
scale_mat, cen0, P, H0, Np, numLEDs) #stretch
loss_low_res = tf.reduce_sum(tf.square(low_res_obj_stack - low_res_obj_predicted))
# loss_MSE = calculate_loss_FP(low_res_obj_predicted, low_res_obj_stack)
# loss_grad = grad_diff_loss(low_res_obj_predicted, low_res_obj_stack)
#find gradient of loss_low_res with respect to predicted_mat
predicted_mat_real_gradient = tf.squeeze(tf.gradients(loss_low_res,predicted_mat_real),axis=0)
predicted_mat_imag_gradient = tf.squeeze(tf.gradients(loss_low_res,predicted_mat_imag),axis=0)
# predicted_mat_real_gradient = tf.Print(predicted_mat_real_gradient, [loss_low_res], message='loss_low_res: ')
#update predicted_mat with step_size
predicted_mat_real = predicted_mat_real - step_size*predicted_mat_real_gradient
predicted_mat_imag = predicted_mat_imag - step_size*predicted_mat_imag_gradient
i+=1
return predicted_mat_real, predicted_mat_imag, loss_low_res, i
def convergence_cond(predicted_mat_real, predicted_mat_imag, loss_low_res,i):
result = True
# result = tf.Print(result, [result], message='result1: ')
result = tf.cond(i>=max_internal_iter, lambda: False, lambda: result)
# result = tf.Print(result, [result], message='result2: ')
result = tf.cond(loss_low_res<merit_stopping_point, lambda: False, lambda: result)
# result = tf.Print(result, [result], message='result3: ')
return result
[predicted_mat_real, predicted_mat_imag, loss_low_res, i] = tf.while_loop(convergence_cond,
single_iter,
[predicted_mat_real, predicted_mat_imag, loss_low_res,i])
predicted_mat = tf.cast(predicted_mat_real,tf.complex64)+1j*tf.cast(predicted_mat_imag,tf.complex64)
return predicted_mat
def iterative_solver_STORM(predicted_mat, PSF, Nz,batch_size, H_fresnel_stack, \
H_NA, Nx, low_res_obj_stack, step_size, max_internal_iter, \
merit_stopping_point, loss_low_res, i, num_wavelengths):
def single_iter(predicted_mat, loss_low_res, i):
predicted_mat_complex=tf.cast(predicted_mat, tf.complex64)
low_res_obj_predicted = create_microscopeImgStack(PSF,predicted_mat_complex,Nz,batch_size,H_fresnel_stack,H_NA,Nx,num_wavelengths)
loss_low_res = tf.reduce_sum(tf.square(low_res_obj_stack - low_res_obj_predicted))
#find gradient of loss_low_res with respect to predicted_mat
predicted_mat_gradient = tf.squeeze(tf.gradients(loss_low_res,predicted_mat),axis=0)
# predicted_mat_gradient = tf.Print(predicted_mat_gradient, [loss_low_res], message='loss_low_res: ')
#update predicted_mat with step_size
predicted_mat = predicted_mat - step_size*predicted_mat_gradient
i+=1
return predicted_mat, loss_low_res, i
def convergence_cond(predicted_mat, loss_low_res,i):
result = True
result = tf.cond(i>=max_internal_iter, lambda: False, lambda: result)
result = tf.cond(loss_low_res<merit_stopping_point, lambda: False, lambda: result)
return result
[predicted_mat, loss_low_res, i] = tf.while_loop(convergence_cond,
single_iter,
[predicted_mat, loss_low_res,i])
return predicted_mat
def iterative_solver_phase(predicted_mat, PSF, batch_size, H_fresnel, \
H_NA, Nx, low_res_obj_stack, step_size, max_internal_iter, \
merit_stopping_point, loss_low_res, i):
predicted_mat_real = tf.real(predicted_mat)
predicted_mat_imag = tf.imag(predicted_mat)
def single_iter(predicted_mat_real, predicted_mat_imag, loss_low_res, i):
predicted_mat = tf.cast(predicted_mat_real,tf.complex64)+1j*tf.cast(predicted_mat_imag,tf.complex64)
low_res_obj_predicted = create_phase_obj_stack(PSF, predicted_mat, batch_size, H_fresnel, H_NA, Nx)
loss_low_res = tf.reduce_sum(tf.square(low_res_obj_stack - low_res_obj_predicted))
#find gradient of loss_low_res with respect to predicted_mat
predicted_mat_real_gradient = tf.squeeze(tf.gradients(loss_low_res,predicted_mat_real),axis=0)
predicted_mat_imag_gradient = tf.squeeze(tf.gradients(loss_low_res,predicted_mat_imag),axis=0)
#update predicted_mat with step_size
predicted_mat_real = predicted_mat_real - step_size*predicted_mat_real_gradient
predicted_mat_imag = predicted_mat_imag - step_size*predicted_mat_imag_gradient
i+=1
return predicted_mat_real, predicted_mat_imag, loss_low_res, i
def convergence_cond(predicted_mat_real, predicted_mat_imag, loss_low_res,i):
result = True
result = tf.cond(i>=max_internal_iter, lambda: False, lambda: result)
result = tf.cond(loss_low_res<merit_stopping_point, lambda: False, lambda: result)
return result
[predicted_mat_real, predicted_mat_imag, loss_low_res, i] = tf.while_loop(convergence_cond,
single_iter,
[predicted_mat_real, predicted_mat_imag, loss_low_res,i])
predicted_mat = tf.cast(predicted_mat_real,tf.complex64)+1j*tf.cast(predicted_mat_imag,tf.complex64)
return predicted_mat
def iterative_solver_HE(predicted_mat, Nx_highres, h_blur, \
high_magn, low_magn, dpix_c, wavelength, \
low_NA, low_res_obj_batch, step_size, max_internal_iter, \
merit_stopping_point, loss_low_res, i, \
batch_size):
def single_iter(predicted_mat, loss_low_res, i):
low_res_obj_batch_predicted = change_magn_batch(predicted_mat, Nx_highres, h_blur, \
high_magn, low_magn, dpix_c, wavelength, low_NA, batch_size)
low_res_obj_batch_predicted = tf.cast(low_res_obj_batch_predicted, tf.float32)
loss_low_res = tf.reduce_sum(tf.square(low_res_obj_batch - low_res_obj_batch_predicted))
#find gradient of loss_low_res with respect to predicted_mat
predicted_mat_gradient = tf.squeeze(tf.gradients(loss_low_res,predicted_mat),axis=0)
# predicted_mat_gradient = tf.Print(predicted_mat_gradient, [predicted_mat_gradient], message='predicted_mat_gradient: ')
# predicted_mat_gradient = tf.Print(predicted_mat_gradient, [loss_low_res], message='loss_low_res: ')
#update predicted_mat with step_size
predicted_mat = predicted_mat - step_size*predicted_mat_gradient
i+=1
return predicted_mat, loss_low_res, i
def convergence_cond(predicted_mat, loss_low_res,i):
result = True
result = tf.cond(i>=max_internal_iter, lambda: False, lambda: result)
result = tf.cond(loss_low_res<merit_stopping_point, lambda: False, lambda: result)
return result
[predicted_mat, loss_low_res, i] = tf.while_loop(convergence_cond,
single_iter,
[predicted_mat, loss_low_res,i])
return predicted_mat, loss_low_res
def find_predicted_mat(image_modality, num_nets, input_layer_list, Nx, batch_size, layers_dropout, dropout_prob,
use_batch_norm, autoencode_init_type, init_type_bias, init_type_resid, kernel_multiplier, variance_reg, training, \
num_layers_autoencode, skip_interval, num_blocks, optical_parameters_dict):
net_prediction_list = []
for net in range(num_nets):
with tf.variable_scope("net_" + str(net)):
curr_input = input_layer_list[net]
for i in range(num_blocks):
with tf.variable_scope("ae_" + str(i)):
curr_net = AutoEncoder(curr_input,
num_layers_autoencode, Nx,
batch_size, training,
kernel_multiplier,
skip_interval=skip_interval,
conv_activation='maxout',
deconv_activation='maxout',
dropout_count=layers_dropout,
dropout_prob=dropout_prob,
create_graph_viz=False,
use_batch_norm=use_batch_norm,
init_type=autoencode_init_type,
init_type_bias=init_type_bias,
init_type_resid=init_type_resid)
curr_input = curr_net.get_prediction()
net0_prediction = curr_input
net_prediction_list.append(net0_prediction)
"""
with tf.variable_scope("ae_0"):
net_init = AutoEncoder(input_layer_list[net], num_layers_autoencode, Nx, batch_size, training, \
kernel_multiplier, skip_interval=skip_interval, conv_activation='maxout', \
deconv_activation='maxout', dropout_count=layers_dropout, dropout_prob=dropout_prob, \
create_graph_viz=False, use_batch_norm=use_batch_norm, \
init_type=autoencode_init_type, init_type_bias=init_type_bias, init_type_resid=init_type_resid)
net_init_prediction = net_init.get_prediction()
with tf.variable_scope("ae_1"):
net0 = AutoEncoder(net_init_prediction, num_layers_autoencode, Nx, batch_size, training, \
kernel_multiplier, skip_interval=skip_interval, conv_activation='maxout', \
deconv_activation='maxout', dropout_count=layers_dropout, dropout_prob=dropout_prob, \
create_graph_viz=False, use_batch_norm=use_batch_norm, \
init_type=autoencode_init_type, init_type_bias=init_type_bias, init_type_resid=init_type_resid)
net0_prediction = net0.get_prediction()
net_prediction_list.append(net0_prediction)
"""
### End Neural Network(s)
### Convert net_prediction_list to predicted_mat
predicted_mat = convert_net_prediction_list(net_prediction_list, image_modality, batch_size, optical_parameters_dict)
return predicted_mat
def tower_loss_all(trainingSet, training, normalRandomMat1, normalRandomMat2, \
initialize_optical_element_ones, num_elements, use_batch_norm, variance_reg, add_noise, sqrt_reg, \
dropout_prob, layers_dropout, batch_size, max_internal_iter, merit_stopping_point, optical_parameters_dict, \
autoencode_init_type, init_type_bias, init_type_resid, \
kernel_multiplier, \
poisson_noise_multiplier, gaussian_noise_multiplier, image_modality,
num_layers_autoencode, skip_interval, num_blocks, \
training_data_folder, load_optical_element_init,
lowres_trainingSet=None):
with tf.variable_scope("optical_transform"):
Nx = optical_parameters_dict['Nx_highres']
Nz = optical_parameters_dict['Nz']
if initialize_optical_element_ones:
if (image_modality == 'FP') or (image_modality == 'FP_PP'):
optical_element0 = np.ones([num_elements,], dtype=np.float32)
elif (image_modality == 'STORM') or (image_modality == 'phase'):
optical_element0 = np.zeros([num_elements,], dtype=np.float32)
optical_element0[0] = 1
elif load_optical_element_init:
optical_element0 = np.load(training_data_folder + '/optical_element_init.npy')
else:
optical_element0 = np.random.rand(num_elements,).astype(np.float32)
if image_modality == 'FP':
optical_element = variable_on_cpu('optical_element', optical_element0, tf.float32, \
constraint = lambda x: tf.clip_by_value(x, 0, 1.0))
# optical_element = variable_on_cpu('optical_element', optical_element0, tf.float32) #stretch
num_nets = 2 # one neural net for real and one for imaginary
numLEDs = optical_parameters_dict['numLEDs']
N_obj = optical_parameters_dict['N_obj']
N_patch = optical_parameters_dict['N_patch']
num_patches = optical_parameters_dict['num_patches']
Ns_mat = optical_parameters_dict['Ns_mat']
scale_mat = optical_parameters_dict['scale_mat']
cen0 = optical_parameters_dict['cen0']
P = optical_parameters_dict['P']
H0 = optical_parameters_dict['H0']
Np = optical_parameters_dict['Np']
# stretch = variable_on_cpu('sigmoid_stretch', 1.0, tf.float32, trainable=False)
### Convert trainingSet to low resolution
low_res_obj_stack = create_FP_img_stack(optical_element,trainingSet,batch_size, N_obj, N_patch, num_patches, Ns_mat, \
scale_mat, cen0, P, H0, Np, numLEDs) #stretch
elif image_modality == 'FP_PP':
numLEDs = optical_parameters_dict['numLEDs']
N_obj = optical_parameters_dict['N_obj']
Np = optical_parameters_dict['Np']
N_patch = optical_parameters_dict['N_patch']
num_patches = optical_parameters_dict['num_patches']
Ns_mat = optical_parameters_dict['Ns_mat']
scale_mat = optical_parameters_dict['scale_mat']
cen0 = optical_parameters_dict['cen0']
P = optical_parameters_dict['P']
H0 = optical_parameters_dict['H0']
optical_element = variable_on_cpu('optical_element', optical_element0, tf.float32, \
constraint = lambda x: tf.clip_by_value(x, 0, 1.0))
num_nets = 2 # one neural net for real and one for imaginary
low_res_obj_stack = lowres_trainingSet*optical_element
low_res_obj_stack = tf.reduce_sum(low_res_obj_stack, axis=-1)
elif image_modality == 'STORM':
optical_element = variable_on_cpu('optical_element', optical_element0, tf.float32)
num_wavelengths = optical_parameters_dict['num_wavelengths']
num_nets = Nz*num_wavelengths
H_fresnel_stack = optical_parameters_dict['H_fresnel_stack']
H_NA = optical_parameters_dict['H_NA']
ZernikePolyMat = optical_parameters_dict['ZernikePolyMat']
PSF = ZernikePolyMat*optical_element
PSF = tf.reduce_sum(PSF,axis=2)
# Process 3D matrices to create the two-dimensional image
low_res_obj_stack = create_microscopeImgStack(PSF, tf.cast(trainingSet, tf.complex64), Nz, batch_size, H_fresnel_stack, H_NA, Nx, num_wavelengths)
elif image_modality == 'phase':
optical_element = variable_on_cpu('optical_element', optical_element0, tf.float32)
num_nets = 2 # one neural net for real and one for imaginary
H_fresnel = optical_parameters_dict['H_fresnel']
H_NA = optical_parameters_dict['H_NA']
ZernikePolyMat = optical_parameters_dict['ZernikePolyMat']
PSF = ZernikePolyMat*optical_element
PSF = tf.reduce_sum(PSF,axis=2)
low_res_obj_stack = create_phase_obj_stack(PSF, trainingSet, batch_size, H_fresnel, H_NA, Nx)
low_res_obj_stack_nonoise = low_res_obj_stack
if add_noise:
low_res_obj_stack=add_noise_microscopeImgStack(low_res_obj_stack, normalRandomMat1, normalRandomMat2,\
sqrt_reg, poisson_noise_multiplier, gaussian_noise_multiplier, \
batch_size)
if (image_modality == 'FP') or (image_modality == 'FP_PP'):
# upsample the low_res_obj_stack for FP modality
input_layer = create_upsampled_obj_stack(low_res_obj_stack, batch_size, Np, N_obj)
input_layer = tf.expand_dims(input_layer, axis=3)
# input_layer = tf.sqrt( (input_layer + sqrt_reg) / numLEDs)
elif (image_modality == 'STORM') or (image_modality == 'phase'):
input_layer = tf.expand_dims(low_res_obj_stack, axis=3)
### Neural Network(s)
with tf.variable_scope("neural_net"):
input_layer_list = [input_layer for i in range(num_nets)]
predicted_mat = find_predicted_mat(image_modality, num_nets, input_layer_list, Nx, batch_size, layers_dropout, dropout_prob, \
use_batch_norm, autoencode_init_type, init_type_bias, init_type_resid, kernel_multiplier, variance_reg, training, \
num_layers_autoencode, skip_interval, num_blocks, optical_parameters_dict)
if (image_modality == 'FP_PP') and (max_internal_iter == 0):
loss = calculate_loss_FP(predicted_mat, trainingSet)
else:
### convert predicted_mat down to low_res_obj_predicted
if (image_modality == 'FP') or (image_modality == 'FP_PP'):
low_res_obj_predicted = create_FP_img_stack(optical_element,predicted_mat,batch_size, N_obj, N_patch, num_patches, Ns_mat, \
scale_mat, cen0, P, H0, Np, numLEDs) #stretch
elif image_modality == 'STORM':
predicted_mat_complex=tf.cast(predicted_mat, tf.complex64)
low_res_obj_predicted = create_microscopeImgStack(PSF,predicted_mat_complex,Nz,batch_size,H_fresnel_stack,H_NA,Nx,num_wavelengths)
elif image_modality == 'phase':
low_res_obj_predicted = create_phase_obj_stack(PSF, predicted_mat, batch_size, H_fresnel, H_NA, Nx)
loss_low_res = tf.reduce_sum(tf.square(low_res_obj_stack - low_res_obj_predicted))
###################
step_size = variable_on_cpu('step_size', 1e-7, tf.float32, trainable = False) # XXX make step_size trainable
i = tf.Variable(0, dtype=tf.int32, trainable=False)
if (image_modality == 'FP') or (image_modality == 'FP_PP'):
# print 'mii4'
if max_internal_iter > 0:
# print 'mii3'
predicted_mat = iterative_solver_FP(predicted_mat, optical_element, batch_size, N_obj, N_patch, num_patches, Ns_mat,\
scale_mat, cen0, P, H0, Np, numLEDs, low_res_obj_stack, step_size, max_internal_iter, \
merit_stopping_point, loss_low_res, i)
loss = calculate_loss_FP(predicted_mat, trainingSet)
elif image_modality == 'STORM':
if max_internal_iter > 0:
predicted_mat = iterative_solver_STORM(predicted_mat, PSF, Nz,batch_size, H_fresnel_stack, \
H_NA, Nx, low_res_obj_stack, step_size, max_internal_iter, \
merit_stopping_point, loss_low_res, i, num_wavelengths)
loss = tf.reduce_sum(tf.square(predicted_mat - tf.cast(trainingSet, tf.float32))) #l2 loss
elif image_modality == 'phase':
if max_internal_iter > 0:
predicted_mat = iterative_solver_phase(predicted_mat, PSF, batch_size, H_fresnel, \
H_NA, Nx, low_res_obj_stack, step_size, max_internal_iter, \
merit_stopping_point, loss_low_res, i)
loss = calculate_loss_FP(predicted_mat, trainingSet)
loss_grad = grad_diff_loss(predicted_mat, trainingSet)
return loss, loss_grad, optical_element, low_res_obj_stack_nonoise, low_res_obj_stack, predicted_mat
def average_gradients(tower_grads, take_average=True): # Take
"""Calculate the average gradient for each shared variable across all towers.
Note that this function provides a synchronization point across all towers.
Args:
tower_grads: List of lists of (gradient, variable) tuples. The outer list
is over individual gradients. The inner list is over the gradient
calculation for each tower.
Returns:
List of pairs of (gradient, variable) where the gradient has been averaged
across all towers.
"""
average_grads = []
for grad_and_vars in zip(*tower_grads):
# Note that each grad_and_vars looks like the following:
# ((grad0_gpu0, var0_gpu0), ... , (grad0_gpuN, var0_gpuN))
grads = []
for g, v in grad_and_vars:
# print('*'*100)
# print(g)
# print(v)
# Add 0 dimension to the gradients to represent the tower.
expanded_g = tf.expand_dims(g, 0)
# Append on a 'tower' dimension which we will average over below.
grads.append(expanded_g)
# Average over the 'tower' dimension.
grad = tf.concat(axis=0, values=grads)
if take_average:
grad = tf.reduce_mean(grad, 0)
else:
grad = tf.reduce_sum(grad, 0)
# Keep in mind that the Variables are redundant because they are shared
# across towers. So .. we will just return the first tower's pointer to
# the Variable.
v = grad_and_vars[0][1]
grad_and_var = (grad, v)
average_grads.append(grad_and_var)
return average_grads
def load_optical_parameters(training_data_folder,image_modality):
optical_parameters_dict = {}
if image_modality=='FP' or image_modality == 'FP_PP':
optical_parameters_dict['Ns_mat'] = np.load(training_data_folder + '/Ns_mat.npy')
optical_parameters_dict['scale_mat'] = np.load(training_data_folder + '/scale_mat.npy')
optical_parameters_dict['cen0'] = np.load(training_data_folder + '/cen0.npy')
optical_parameters_dict['P'] = np.load(training_data_folder + '/P.npy')
optical_parameters_dict['H0'] = np.load(training_data_folder + '/H0.npy')
optical_parameters_dict['Np'] = np.load(training_data_folder + '/Np.npy')
optical_parameters_dict['numLEDs'] = np.load(training_data_folder + '/numLEDs.npy')
optical_parameters_dict['N_obj'] = np.load(training_data_folder + '/N_obj.npy')
optical_parameters_dict['N_patch'] = np.load(training_data_folder + '/N_patch.npy')
optical_parameters_dict['num_patches'] = np.load(training_data_folder + '/num_patches.npy')
optical_parameters_dict['Nx_lowres'] = np.load(training_data_folder + '/Np.npy')
optical_parameters_dict['Nx_highres'] = np.load(training_data_folder + '/N_obj.npy')
optical_parameters_dict['NAfilter_synthetic'] = np.load(training_data_folder + '/NAfilter_synthetic.npy')
optical_parameters_dict['Nz'] = 1
num_elements = optical_parameters_dict['numLEDs']
elif image_modality=='STORM':
optical_parameters_dict['ZernikePolyMat'] = np.load(training_data_folder + '/ZernikePolyMat.npy')
optical_parameters_dict['H_NA'] = np.load(training_data_folder + '/H_NA.npy')
optical_parameters_dict['H_fresnel_stack'] = np.load(training_data_folder + '/H_fresnel_stack.npy')
optical_parameters_dict['Nx_lowres'] = np.load(training_data_folder + '/Nx.npy')
optical_parameters_dict['Nx_highres'] = np.load(training_data_folder + '/Nx.npy')
optical_parameters_dict['Nz'] = np.load(training_data_folder + '/Nz.npy')
optical_parameters_dict['num_wavelengths'] = np.load(training_data_folder + '/num_wavelengths.npy')
optical_parameters_dict['numCoeff'] = np.load(training_data_folder + '/numCoeff.npy')
num_elements = optical_parameters_dict['numCoeff']
elif image_modality=='HE':
only_noise = np.load(training_data_folder + '/only_noise.npy') # Option to keep magnification/resolution the same, but add noise
optical_parameters_dict['only_noise'] = only_noise
optical_parameters_dict['Nx_lowres'] = np.load(training_data_folder + '/Nx_lowres.npy')
optical_parameters_dict['Nx_highres'] = np.load(training_data_folder + '/Nx_highres.npy')
optical_parameters_dict['num_wavelengths'] = np.load(training_data_folder + '/num_wavelengths.npy')
if not(only_noise):
optical_parameters_dict['wavelength'] = np.load(training_data_folder + '/wavelength.npy')
optical_parameters_dict['h_blur'] = np.load(training_data_folder + '/h_blur.npy')
optical_parameters_dict['high_magn'] = np.load(training_data_folder + '/high_magn.npy')
optical_parameters_dict['low_magn'] = np.load(training_data_folder + '/low_magn.npy')
optical_parameters_dict['dpix_c'] = np.load(training_data_folder + '/dpix_c.npy')
optical_parameters_dict['low_NA'] = np.load(training_data_folder + '/low_NA.npy')
num_elements = None
elif image_modality=='phase':
optical_parameters_dict['ZernikePolyMat'] = np.load(training_data_folder + '/ZernikePolyMat.npy')
optical_parameters_dict['H_NA'] = np.load(training_data_folder + '/H_NA.npy')
optical_parameters_dict['H_fresnel'] = np.load(training_data_folder + '/H_fresnel.npy')
optical_parameters_dict['Nx_lowres'] = np.load(training_data_folder + '/Nx.npy')
optical_parameters_dict['Nx_highres'] = np.load(training_data_folder + '/Nx.npy')
optical_parameters_dict['numCoeff'] = np.load(training_data_folder + '/numCoeff.npy')
optical_parameters_dict['Nz'] = 1
num_elements = optical_parameters_dict['numCoeff']
return optical_parameters_dict, num_elements
###### Functions for H&E
def NAfilter(L,wavelength,NA,m):
#L is the side length of the observation and source fields (assume square fields), L is a float
#wavelength is the free space wavelength
dx=L/m
k=1./wavelength #wavenumber #2*pi/wavelength #1./wavelength
fx=tf.linspace(-1/(2*dx),1/(2*dx)-1/L,m) #freq coords
FX,FY=tf.meshgrid(fx,fx)
zeros = tf.cast(tf.zeros([m,m]),dtype=tf.complex64)
ones = tf.cast(tf.ones([m,m]),dtype=tf.complex64)
H = tf.where(tf.sqrt(FX**2+FY**2)<=NA*k,ones,zeros)
# H=fftshift(H,m,m)
return H
def make_even(Np):
if np.ceil(Np)%2: # make Np even
Np = np.floor(Np)
else:
Np = np.ceil(Np)
Np = int(Np)
if Np % 2:
Np += 1
return Np
def change_magn_img(img0, m, h_blur, high_magn, low_magn, dpix_c, wavelength, low_NA):
img = tf.cast(img0, dtype=tf.complex64)
img = img[0:m,0:m]
L = m*dpix_c/high_magn # [m]
cen = [m//2,m//2]
Np = m*low_magn/high_magn
Np = make_even(Np)
H_NA_R = NAfilter(L,wavelength[0],low_NA,m)
H_NA_G = NAfilter(L,wavelength[1],low_NA,m)
H_NA_B = NAfilter(L,wavelength[2],low_NA,m)
img_r = incoherent_filter_H(img[:,:,0], m, H_NA_R, cen, Np, h_blur)
img_g = incoherent_filter_H(img[:,:,1], m, H_NA_G, cen, Np, h_blur)
img_b = incoherent_filter_H(img[:,:,2], m, H_NA_B, cen, Np, h_blur)
img_r = tf.expand_dims(img_r, axis=2)
img_g = tf.expand_dims(img_g, axis=2)
img_b = tf.expand_dims(img_b, axis=2)
img=tf.concat([img_r,img_g,img_b],axis=2)
# img = tf.stack([img_r, img_g, img_b], axis=2)
return img0, img
def incoherent_filter_H(u1, m, H, cen, Np, h_blur):
# H0 = H
H = F(Ft(H,m,m)*tf.conj(Ft(H,m,m)),m,m)
H=H/H[m//2,m//2]
U1 = F(u1,m,m)
U2=H*U1
#convolve with n x n filter with n = high_magn/low_magn
U2 = U2 * F(h_blur,m,m)
U2 = downsamp(U2,cen,Np)
u2=Ft(U2,Np,Np)/(m/Np)**2
u2 = tf.real(u2)
# Clip u2 at 0 and 255
zeros = tf.cast(tf.zeros([Np,Np]),dtype=tf.float32)
ones = 255*tf.cast(tf.ones([Np,Np]),dtype=tf.float32)
u2 = tf.where(u2>255,ones,u2)
u2 = tf.where(u2<0,zeros,u2)
# u2 = tf.cast(u2, dtype=tf.uint8)
# u2=Ft(U2,m,m)
return u2
def filter_function_NA(u1,H_NA,Nx,incoherent=1):
#u1 is the source plane field
#Nx is u1.shape[0]
#dx is L/m
H = H_NA
if incoherent:
H=tf.fft2d(tf.ifft2d(H)*tf.conj(tf.ifft2d(H)))
#H=H/H[0,0]
U1=fftshift(u1,Nx,Nx)
U1=tf.fft2d(U1)
U2=H*U1
u2=fftshift(tf.ifft2d(U2),Nx,Nx)
else:
U1=fftshift(u1,Nx,Nx)
U1=tf.fft2d(U1)
U2=H*U1
u2=fftshift(tf.ifft2d(U2),Nx,Nx)
# u2 = u2*np.conj(u2) # make into intensity object
return u2
def read_img_file(img_path, channels):
img_file = tf.read_file(img_path)
img0 = tf.image.decode_image(img_file, channels=channels)
return img0
def change_magn(img_path, m, h_blur, high_magn, low_magn, dpix_c, wavelength, low_NA, num_wavelengths):
img0 = read_img_file(img_path, num_wavelengths)
return change_magn_img(img0, m, h_blur, high_magn, low_magn, dpix_c, wavelength, low_NA)
def change_magn_batch(img_stack, m, h_blur, high_magn, low_magn, dpix_c, wavelength, low_NA, batch_size):
for ii in range(batch_size):
( _ , img) =change_magn_img(img_stack[ii,:,:,:], m, h_blur, high_magn, low_magn, dpix_c, wavelength, low_NA)
img = tf.expand_dims(img,axis=0)
if ii == 0:
img_stack_new = img
else:
img_stack_new = tf.concat([img_stack_new,img],0)
# img_stack_new = tf.cast(img_stack_new, dtype=tf.complex64)
return img_stack_new
def add_poisson_noise(img0, img, noise_multiplier):
img = noise_multiplier*tf.cast(img, dtype=tf.float32)
img = tf.random_poisson(img, [1])
img = img/noise_multiplier
# Clip img at 0 and 255
zeros = tf.cast(tf.zeros_like(img),dtype=tf.float32)
ones = 255*tf.cast(tf.ones_like(img),dtype=tf.float32)
img = tf.where(img>255,ones,img)
img = tf.where(img<0,zeros,img)
img = tf.cast(img,dtype=tf.uint8)
img = tf.squeeze(img, axis=0)
return img0, img
def upsample_rgb(low_res_obj_rgb, Np, N_obj, num_wavelengths):
for c in range(num_wavelengths):
upsampled_obj = upsample(low_res_obj_rgb[:,:,c], Np, N_obj)
upsampled_obj = tf.expand_dims(upsampled_obj, axis=2)
if c == 0:
upsampled_obj_stack = upsampled_obj
else:
upsampled_obj_stack = tf.concat([upsampled_obj_stack,upsampled_obj],2)
return upsampled_obj_stack
def make_iterator_numpy(training_dataset, batch_size, num_GPUs, shuffle=False):
tr_data = tf.data.Dataset.from_tensor_slices(training_dataset)
tr_indices =tf.data.Dataset.from_tensor_slices(tf.constant(np.arange(0,training_dataset.shape[0],1),dtype = tf.int32))
tr_data = tf.data.Dataset.zip((tr_data, tr_indices))
tr_data = tr_data.repeat()
if shuffle:
tr_data = tr_data.shuffle(training_dataset.shape[0])
tr_data = tr_data.batch(batch_size*num_GPUs)
return tr_data
def make_iterator_FP_PP(training_dataset, lowres_training_dataset, \
batch_size, num_GPUs, shuffle=False):
tr_data = tf.data.Dataset.from_tensor_slices(training_dataset)
tr_data_lowres = tf.data.Dataset.from_tensor_slices(lowres_training_dataset)
tr_indices =tf.data.Dataset.from_tensor_slices(tf.constant(np.arange(0,training_dataset.shape[0],1),dtype = tf.int32))
tr_data = tf.data.Dataset.zip((tr_data, tr_data_lowres, tr_indices))
tr_data = tr_data.repeat()
if shuffle:
tr_data = tr_data.shuffle(training_dataset.shape[0])
tr_data = tr_data.batch(batch_size*num_GPUs)
return tr_data
def make_complex_img(img_path, Nx, phase = True, intensity = False, library = tf):
if library == tf:
img0 = read_img_file(img_path, None)
img0 = library.reduce_sum(img0,axis=2)
img0 = img0[Nx/2:-Nx/2,Nx/2:-Nx/2]
img0 = library.cast(img0,dtype=library.complex64)
else:
files = glob.glob(img_path)
fileI = files[0]
img0 = misc.imread(fileI)
img0 = library.sum(img0, axis=2)
img0 = img0[0:Nx,0:Nx]
img0 = img0.astype(library.complex64)
img0 = library.exp(1j*math.pi*1.0*(255.0*3-img0)/(255.0*3))
return img0
def make_iterator_filename_FP(training_dataset, batch_size, num_GPUs, optical_parameters_dict, \
shuffle=False):
Nx = optical_parameters_dict['Nx_highres']
H_NA = optical_parameters_dict['NAfilter_synthetic']
training_dataset = tf.constant(training_dataset.tolist())
tr_data = tf.data.Dataset.from_tensor_slices(training_dataset)
tr_indices =tf.data.Dataset.from_tensor_slices(tf.constant(np.arange(0,int(training_dataset.shape[0]),1),dtype = tf.int32))
filter_function_NA_lambda = lambda u1: filter_function_NA(u1,H_NA,Nx,incoherent=0)
make_complex_img_lambda = lambda img_path: make_complex_img(img_path,Nx)
tr_data = tr_data.map(make_complex_img_lambda)
tr_data = tr_data.map(filter_function_NA_lambda)
tr_data = tf.data.Dataset.zip((tr_data, tr_indices))
tr_data = tr_data.repeat()
if shuffle:
tr_data = tr_data.shuffle(training_dataset.shape[0])
tr_data = tr_data.batch(batch_size*num_GPUs)
return tr_data
def create_STORM_stack(img_path, Nx):
img0 = read_img_file(img_path, None)
img0 = img0[0:Nx,0:Nx,:]
img0 = tf.expand_dims(img0, axis = 2)
img0 = tf.cast(img0, tf.float32)
return img0
def make_iterator_filename_STORM(training_dataset, batch_size, num_GPUs, optical_parameters_dict, \
shuffle=False):
Nx = optical_parameters_dict['Nx_highres']
training_dataset = tf.constant(training_dataset.tolist())
tr_data = tf.data.Dataset.from_tensor_slices(training_dataset)
tr_indices =tf.data.Dataset.from_tensor_slices(tf.constant(np.arange(0,int(training_dataset.shape[0]),1),dtype = tf.int32))
create_STORM_stack_lambda = lambda img_path: create_STORM_stack(img_path, Nx)
tr_data = tr_data.map(create_STORM_stack_lambda)
tr_data = tf.data.Dataset.zip((tr_data, tr_indices))
tr_data = tr_data.repeat()
if shuffle:
tr_data = tr_data.shuffle(training_dataset.shape[0])
tr_data = tr_data.batch(batch_size*num_GPUs)
return tr_data
def make_iterator_filename(training_dataset, add_noise, poisson_noise_multiplier, batch_size, \
num_GPUs, optical_parameters_dict, shuffle = False):
only_noise = optical_parameters_dict['only_noise']
num_wavelengths = optical_parameters_dict['num_wavelengths']
training_dataset = tf.constant(training_dataset.tolist())
tr_data = tf.data.Dataset.from_tensor_slices(training_dataset)
tr_indices =tf.data.Dataset.from_tensor_slices(tf.constant(np.arange(0,int(training_dataset.shape[0]),1),dtype = tf.int32))
if only_noise:
read_img_file_lambda = lambda img_path: read_img_file(img_path, num_wavelengths)
tr_data = tr_data.map(lambda img_path: (read_img_file_lambda(img_path),read_img_file_lambda(img_path)))
else:
change_magn_lambda = lambda img_path: change_magn(img_path,
optical_parameters_dict['Nx_highres'],
optical_parameters_dict['h_blur'],
optical_parameters_dict['high_magn'],
optical_parameters_dict['low_magn'],
optical_parameters_dict['dpix_c'],
optical_parameters_dict['wavelength'],
optical_parameters_dict['low_NA'],
num_wavelengths)
tr_data = tr_data.map(change_magn_lambda)
add_poisson_noise_lambda = lambda img0, img: add_poisson_noise(img0, img, poisson_noise_multiplier)
if only_noise and not(add_noise):
print('Warning: only_noise = True and add_noise = False!!')
if add_noise:
tr_data = tr_data.map(add_poisson_noise_lambda)
tr_data = tf.data.Dataset.zip((tr_data, tr_indices))
tr_data = tr_data.repeat()
if shuffle:
tr_data = tr_data.shuffle(int(training_dataset.shape[0]))
tr_data = tr_data.batch(batch_size*num_GPUs)
return tr_data
def tower_loss_HE(low_res_obj_batch, \
high_res_obj_batch, training, \
use_batch_norm, variance_reg, sqrt_reg, \
dropout_prob, layers_dropout, batch_size, max_internal_iter, merit_stopping_point, \
optical_parameters_dict, autoencode_init_type, init_type_bias, init_type_resid, \
kernel_multiplier, num_layers_autoencode, skip_interval, num_blocks):
only_noise = optical_parameters_dict['only_noise']
Nx_lowres = optical_parameters_dict['Nx_lowres']
Nx_highres = optical_parameters_dict['Nx_highres']
num_wavelengths = optical_parameters_dict['num_wavelengths']
if not(only_noise):
wavelength = optical_parameters_dict['wavelength']
h_blur = optical_parameters_dict['h_blur']
high_magn = optical_parameters_dict['high_magn']
low_magn = optical_parameters_dict['low_magn']
dpix_c = optical_parameters_dict['dpix_c']
low_NA = optical_parameters_dict['low_NA']
with tf.variable_scope("optical_transform"):
if only_noise:
upsampled_obj_stack = low_res_obj_batch
else:
# upsample the low_res_obj_stack
simple_upsample = 0
if simple_upsample:
low_res_obj_batch = tf.cast(low_res_obj_batch, tf.complex64)
for ii in range(batch_size):
upsampled_obj = upsample_rgb(low_res_obj_batch[ii,:,:,:], Nx_lowres, Nx_highres, num_wavelengths)
upsampled_obj = tf.expand_dims(upsampled_obj,axis=0)
if ii == 0:
upsampled_obj_stack = upsampled_obj
else:
upsampled_obj_stack = tf.concat([upsampled_obj_stack,upsampled_obj],0)
else:
upsampled_obj_stack = tf.image.resize_images(low_res_obj_batch, [int(Nx_highres), int(Nx_highres)], \
method=tf.image.ResizeMethod.BICUBIC, align_corners=False)
upsampled_obj_stack = tf.cast(upsampled_obj_stack, tf.float32)
with tf.variable_scope("neural_net"):
num_nets = num_wavelengths
input_layer_list = []
for w in range(num_wavelengths):
input_layer = tf.expand_dims(upsampled_obj_stack[:,:,:,w], axis=3)
input_layer_list.append(input_layer)
# len(input_layer) should equal num_nets
# input_layer_r = tf.expand_dims(upsampled_obj_stack[:,:,:,0], axis=3)
# input_layer_g = tf.expand_dims(upsampled_obj_stack[:,:,:,1], axis=3)
# input_layer_b = tf.expand_dims(upsampled_obj_stack[:,:,:,2], axis=3)
#
#
# input_layer_list = [input_layer_r, input_layer_g, input_layer_b]
predicted_mat = find_predicted_mat('HE', num_nets, input_layer_list, Nx_highres, batch_size, layers_dropout, dropout_prob,
use_batch_norm, autoencode_init_type, init_type_bias, init_type_resid, kernel_multiplier, variance_reg, training, \
num_layers_autoencode, skip_interval, num_blocks, optical_parameters_dict)
with tf.variable_scope("neural_net"):
# ### convert predicted_mat down to low_res_obj_predicted
if only_noise:
low_res_obj_batch_predicted = low_res_obj_batch
else:
low_res_obj_batch_predicted = change_magn_batch(predicted_mat, Nx_highres, h_blur, \
high_magn, low_magn, dpix_c, wavelength, low_NA, batch_size)
low_res_obj_batch_predicted = tf.cast(low_res_obj_batch_predicted, tf.float32)
low_res_obj_batch = tf.cast(low_res_obj_batch, tf.float32)
loss_low_res = tf.reduce_sum(tf.square(low_res_obj_batch - low_res_obj_batch_predicted))
###################
step_size = variable_on_cpu('step_size', 1e-7, tf.float32, trainable = False) # XXX make step_size trainable
with tf.variable_scope("iteration_variable"):
i = tf.Variable(0, dtype=tf.int32, trainable=False)
with tf.variable_scope("neural_net"):
if not(only_noise):
if max_internal_iter > 0:
predicted_mat, loss_low_res = iterative_solver_HE(predicted_mat, Nx_highres, h_blur, \
high_magn, low_magn, dpix_c, wavelength, \
low_NA, low_res_obj_batch, step_size, max_internal_iter, \
merit_stopping_point, loss_low_res, i, \
batch_size)
high_res_obj_batch = tf.cast(high_res_obj_batch, tf.float32)
loss = tf.reduce_sum(tf.square(high_res_obj_batch - predicted_mat))
if only_noise:
loss_low_res = loss
loss_grad = grad_diff_loss(predicted_mat, high_res_obj_batch)
return loss, loss_grad, loss_low_res, low_res_obj_batch_predicted, low_res_obj_batch, predicted_mat
#output names are: loss, loss_grad, optical_element, low_res_obj_stack_nonoise, low_res_obj_stack, predicted_mat
#for only_noise = True, loss_low_res is set to loss, and low_res_obj_batch_predicted is set to low_res_obj_batch
| Mualpha7/Engineering_JupyterFiles | ENGR 090. Fourier ptychographic microscopy for biological samples/TensorFlowFunctions.py | TensorFlowFunctions.py | py | 58,048 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow.cast",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "tensorflow.int32",
"line_number": 27,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.cast",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "tensorflow.int3... |
11807018018 | from typing import List
import pandas as pd
from graphysio.dialogs import askUserValue
from graphysio.plotwidgets import PlotWidget
from graphysio.plotwidgets.curves import CurveItemWithPOI
from graphysio.structures import Parameter, PlotData
def get_perfusion_index(plotwidget: PlotWidget) -> List[CurveItemWithPOI]:
curvenames = list(plotwidget.curves.keys())
curvenames = list(plotwidget.curves.keys())
if len(curvenames) < 1:
raise ValueError("No curve")
elif len(curvenames) > 1:
q = Parameter("Select Curve", curvenames)
curvename = askUserValue(q)
else:
curvename = curvenames[0]
curve = plotwidget.curves[curvename]
if (
"start" not in curve.feetitem.indices
or curve.feetitem.indices["start"].size < 1
):
raise ValueError("No start information for curve")
wave = curve.series
starts = curve.getFeetPoints("start")
df = pd.concat([wave, starts], axis=1)
df = df.interpolate(method="index")
begins, durations = curve.getCycleIndices()
pivalues = []
for begin, duration in zip(begins, durations):
cycle = df.loc[begin : begin + duration]
auctot = cycle[wave.name].sum()
aucbase = cycle[starts.name].sum()
aucpulse = auctot - aucbase
pi = aucpulse / auctot
pivalues.append(pi)
out_series = pd.Series(pivalues, index=begins)
sname = f"{wave.name}-perf"
plotdata = PlotData(data=out_series, name=sname)
return [plotdata]
| jaj42/GraPhysio | graphysio/transformations/perfusion_index.py | perfusion_index.py | py | 1,512 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "graphysio.plotwidgets.PlotWidget",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "graphysio.structures.Parameter",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "graphysio.dialogs.askUserValue",
"line_number": 17,
"usage_type": "call"
... |
22710793236 | import pygame
import random
# creation de la class monstre
class Monster(pygame.sprite.Sprite):
def __init__(self, game):
super().__init__()
self.game = game
self.health = 100
self.max_health = 100
self.attack = 0.3
self.image = pygame.image.load('assets/bart.png')
self.image = pygame.transform.scale(self.image, (200, 200)) # change la taile du monstre (bart)
self.rect = self.image.get_rect()
self.rect.x = 1600 + random.randint(0, 300)
self.rect.y = 620
self.velocity = random.randint(1, 4)
def damage(self, amount):
# infliger les degats
self.health -= amount
# Verifier si le nbr de points de vie est inferieur ou egale a 0
if self.health <= 0:
# reapparaitre comme un nouveau monstre (bart)
self.rect.x = 1600 + random.randint(0, 300)
self.velocity = random.randint(1, 4)
self.health = self.max_health
# si la barre d'evenement est chargé a son maximum
if self.game.comet_event.is_full_loaded():
# retirer du jeu
self.game.all_monsters.remove(self)
# appel de la méthode pour declencher la pluie
self.game.comet_event.attempt_fall()
def update_health_bar(self, surface):
# dessiner notre barre de vie
pygame.draw.rect(surface, (60, 63, 60), [self.rect.x + 40, self.rect.y - 20, self.max_health, 5])
pygame.draw.rect(surface, (111, 210, 46), [self.rect.x + 40, self.rect.y - 20, self.health, 5])
def forward(self):
# le deplacement ne se fait que si il n'y a pas de collision
if not self.game.check_collision(self, self.game.all_players):
self.rect.x -= self.velocity # deplacement de bart
# si le monstre (bart) est en colisiont avec le joueur (homer)
else:
# infliger des degats
self.game.player.damage(self.attack) | YvanMackowiak/SimpsonPython | monster.py | monster.py | py | 1,998 | python | fr | code | 0 | github-code | 1 | [
{
"api_name": "pygame.sprite",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "pygame.image.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pygame.image",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "pygame.transfor... |
10338740792 | from PyQt5 import QtWidgets, QtCore, QtGui
from PyQt5.QtWidgets import QLabel
from menu_window import MenuWindow
class GUI(QtWidgets.QMainWindow):
"""
This is the base window where all the child widgets are placed.
All the child widgets act as seperate window.
This also holds all the variables shared with the child widgets
"""
def __init__(self, db):
super().__init__()
self.db = db
self.image_height = 520
self.image_width = 920
self.image_path_not_annotated = "../../images/not_annotated/" # Here you want to move the images you want to annotate
self.image_path_annotated = "../../images/annotated/"
self.setCentralWidget(QtWidgets.QWidget()) # QMainWindown must have a centralWidget to be able to add layouts
self.horizontal = QtWidgets.QHBoxLayout() # Horizontal main layout
self.centralWidget().setLayout(self.horizontal)
self.init_window()
def init_window(self):
"""
Sets up the window and displays the menu widget.
"""
self.setGeometry(300, 300, 800, 800)
self.setWindowTitle("Joni's sick annotation program")
self.show()
# When started show the Menu widget
self.curr_widget = MenuWindow(self)
self.setCentralWidget(self.curr_widget)
| jonirajala/ImgAnnotator | src/ui/gui.py | gui.py | py | 1,340 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "PyQt5.QtWidgets.QMainWindow",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtWidgets",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 22,
"usage_type": "call"
},
{
"api_name":... |
11910453993 | from flask import jsonify, request
from app.models import Manufacturer, Token
from app import db
def deleteManufacturer(id):
'''delete manufacturer record'''
token = request.headers['TOKEN']
t=Token.query.filter_by(token=token).first()
is_expired=t.status
if id is not None:
if token and is_expired == 'active':
manufacturer=Manufacturer.query.filter_by(id=id).first()
if manufacturer is not None:
db.session.delete(manufacturer)
db.session.commit()
return jsonify('manufacturer deleted successfully'),201
else:
return jsonify('Manufacturer record specified does not exist'),500
else:
return jsonify('no token provided or token has expired'),500
else:
return jsonify('No manufacturer id provided'),500
| the1Prince/drug_repo | app/deletes/deleteManufacturer.py | deleteManufacturer.py | py | 964 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.request.headers",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "app.models.Token.query.filter_by",
"line_number": 12,
"usage_type": "call"
},
{
"api_name"... |
12021443393 | import discord
import asyncio
import sys
from datetime import datetime
import feedparser
import re
import time
import random
import os
client = discord.Client()
latestNum = 0
currentNum = 0
link = 'blank'
title = 'blank'
last = open("/last.txt", "r") #This file stores the last thread number so if the bot closes it doesn't post repeat threads
latestNum = int(last.read()) #MAKE SURE TO SET THIS FILE PATH
last.close()
def setLastFile():
f = open("/last.txt", "w") #MAKE SURE TO SET THIS FILE PATH
f.write(str(latestNum))
f.close()
def setCurr(val):
global currentNum
currentNum = val
def setLatest(val):
global latestNum
latestNum = val
def setLink(val):
global link
link = val
def setTitle(val):
global title
title = val
def setEqual(latest, curr):
global latestNum
global currentNum
latestNum = currentNum
def getForumThreads():
rss = feedparser.parse('http://example.com/syndication.php?limit=1') #Get the RSS feed from the forums, only get the most recent thread
title = rss.entries[0].title #Grab the title of the thread
link = rss.entries[0].link #Link to thread
num = int(re.search(r'\d+', link).group()) #Thread id
setCurr(num)
setLink(link)
setTitle(title)
@client.event
async def on_ready():
print('Logged in as ' + client.user.name + ' @ {:%Y-%b-%d %H:%M:%S}'.format(datetime.now()))
print('--------------------------------------------')
async def rssChecker():
getForumThreads()
if(currentNum > latestNum): #Check if the thread that was just aquired is new or not
mess = 'A new thread has been posted to the forums: **' + title + '** ' + link
await client.send_message(discord.Object(id='CHANNEL_ID'), mess) #Set the channel ID
setEqual(latestNum, currentNum)
setLastFile()
async def background():
await client.wait_until_ready()
while not client.is_closed:
await rssChecker() #Run RSS Check
await asyncio.sleep(900) #Wait 900 seconds (15 minutes) - Set this number to what ever you want the refresh rate to be
client.loop.create_task(background())
client.run('BOT_ID') #Set bot ID
| swmpdg/discord-mybb-thread-poster | bot.py | bot.py | py | 2,189 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "discord.Client",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "feedparser.parse",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
... |
71036138274 | import random
import tensorflow as tf
from dqn.agent import Agent
from dqn.environment import GymEnvironment
from config import get_config
flags = tf.app.flags
flags.DEFINE_string('env_name', 'Breakout-v0', 'The name of gym environment to use')
flags.DEFINE_boolean('display', False, 'Whether to do display the game screen or not')
flags.DEFINE_boolean('is_train', True, 'Whether to do training or testing')
FLAGS = flags.FLAGS
tf.set_random_seed(123)
random.seed(123)
def main(_):
with tf.Session() as sess:
config = get_config(FLAGS) or FLAGS
env = GymEnvironment(config)
agent = Agent(config, env, sess)
if config.is_train:
agent.train()
else:
agent.play()
if __name__ == '__main__':
tf.app.run()
| gutobortolozzo/reinforcement-learning-atari | main.py | main.py | py | 750 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "tensorflow.app",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.set_random_seed",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tensorflow.... |
27273086943 | import numpy as np
import scipy.stats as sts
import matplotlib.pyplot as plt
import functools as ft
#mu = 0
#std = 1
N = 10000
n_means = 2000
def Chi2_i(prediction_i, label_dist, population_variance=None, population_mean=None):
#Cowan p. 104
# The quantity (y_i - f(x_i, w)) / s_i is a measure of deviation between the
# ith measurement y_i and the function f(s_i, w), so Chi^2 is a measure of
# the total agreement between ovserved data and hypothesis. It can be shown
# that if
# 1) y_i, i=1,...,N are independent Gaussian random variables [...]
# > mean of distribution and expectation of the mean are expected to be
# the same.
# > !!! wikipedia says this should be calculated about a sample mean
# (not population mean)
if population_mean is None:
y_i = np.mean(label_dist)
else:
y_i = population_mean
# [...] with known variances s_i^2
#s_i = np.var(label_dist)
if population_variance is None:
population_variance = np.var(label_dist)
population_variance_of_the_mean = population_variance / len(label_dist)
chi2_i = (y_i - prediction_i)**2 / population_variance_of_the_mean
return chi2_i
def GetSample(mu, std, N):
term = np.sqrt(3)*std
low = mu - term
high = mu + term
return np.random.uniform(low=low, high=high, size=N)
#return np.random.normal(loc=mu, scale=std, size=N)
def GetX():
mu_max = 1000
mu = np.random.randint(0, mu_max)
std = np.random.uniform() * mu_max * 2
#s = s
#N = N
sample = GetSample(mu, std, N)
# The prediction must be from the sample // Chi^2 only works if value is gaussian
pred = mu #GetSample(mu, std, 1)
# If the prediction were to be from the distribution of the mean
#pred = np.random.normal(loc=mu, scale=std/np.sqrt(N))
#pred = GetSample(mu, std/n, 1)
chi2 = Chi2_i(pred, sample, std**2)
#, std/np.sqrt(N)) >> already taken down inside chi_2
#chi2 /= N-1
return chi2
c2_demo = (GetX() for test_i in range(n_means))
c2_demo = np.fromiter(c2_demo, dtype=float)
print("plotting")
less_bins = 0
np_counts = [0]
while np.min(np_counts) < 1:
N_bin = n_means//10 - less_bins
#counts, bins, bar_container = plt.hist(c2_demo, density=True, bins=N_bin)
np_counts, np_bins = np.histogram(c2_demo, bins=N_bin)
less_bins += 1
plt.close()
fig = plt.figure()
counts, bins, bar_container = plt.hist(c2_demo, density=True, bins=np_bins)
df = 1
c2_ls = np.linspace(np.min(c2_demo), np.max(c2_demo), 1000)
c2_prob = sts.chi2.pdf(c2_ls, df)#, scale=std)
plt.plot(c2_ls, c2_prob)
plt.ylim([0, np.mean(counts[:2])])
fig.show()
#frac_before_one = np.sum(counts[np.where(bins <= 1)[0]]) / np.sum(counts)
## DOUBLE CHI^2 :::: USE PEARSON'S CHI^2 TO TEST IF DISTRIBUTION IS CHI^2 DISTRIBUTION.
ep = [sts.chi2.cdf(bins[i+1],df=df) - sts.chi2.cdf(bins[i],df=df) for i in range(len(bins)-1)]
expected_counts = np.array(ep)
expected_counts *= np.sum(np_counts)
#expected_count = map(c2_pd, bins)
#= list(expected_count)
double_chi2 = zip(np_counts, expected_counts)
double_chi2 = ((obs - ep)**2/ep for obs, ep in double_chi2)
#double_chi2 = ((counts[ind],sts.chi2.pdf(bins[ind])) for ind in range(len(counts)))
double_chi2 = sum(double_chi2)
prob_2c2 = sts.chi2.pdf(double_chi2, df=N_bin)
cprob_2c2 = sts.chi2.cdf(np.infty, df=N_bin) - sts.chi2.cdf(double_chi2, df=N_bin)
#cprob_2c2 = cprob_2c2
print(f"N_bin: {N_bin}")
print(f"2-chi2: {double_chi2}")
print(f"chi^2/df : {double_chi2/N_bin}")
print("")
print(f"prob: {prob_2c2}")
print(f"P-value: {cprob_2c2}")
#plt.figure()
#plt.hist(double_chi2)
#plt.show()
| CLEANit/EGSnrc | egs_home/scatter-learn/misc/chi2_goodness_fit/chi2_demo.py | chi2_demo.py | py | 3,677 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.mean",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.var",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.sqrt",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_numb... |
70888601635 | import csv
from io import StringIO
import json
import os
import requests
import sys
import unittest
from unittest.mock import patch
from etl.run import run_cmd_line
from etl.setup import ETLEnv
import pdb
REAL_GET = requests.get
def get_test_data_paths(format_, num_calls):
"Returns list of possible paths to test data files."
test_info = ETLEnv.instance().get_test_info()
data_path = f"etl/tests/data/{test_info.institution}"
if test_info.tag:
data_path += f"_{test_info.tag}"
if num_calls:
return [ data_path + f"_{num_calls}.{format_}" ]
else:
return [ data_path + f".{format_}", data_path + f"_{num_calls}.{format_}" ]
def try_to_read_test_data(num_calls=0):
"Check if there is sample data in a file to use for testing - if found, return the contents of the file."
for format_ in [ "json", "xml" ]:
data_paths = get_test_data_paths(format_=format_, num_calls=num_calls)
for data_path in data_paths:
if os.path.exists(data_path):
with open(data_path, "r") as input:
if format_ == "json":
data = json.loads(input.read())
else:
data = input.read()
return data, format_
return None, None
class MockResponse():
def __init__(self, data=None, content=None):
self.data = data
self.content = content
self.ok = True
def json(self):
return self.data
class MockGetter():
def __init__(self):
self.num_calls = None
def __call__(self, url, params=None, **kwargs):
self.num_calls = 0 if self.num_calls is None else self.num_calls + 1
data, format_ = try_to_read_test_data(num_calls=self.num_calls)
if data:
if format_ == "json":
return MockResponse(data=data)
else:
return MockResponse(content=data)
# No test data exists for this institutions: Just do the actual http get.
data = REAL_GET(url=url, params=params, **kwargs)
if True:
data_paths = get_test_data_paths(format_="txt", num_calls=self.num_calls)
with open(data_paths[0], "w") as output:
if "application/json" in data.headers.get("Content-Type"):
text = json.dumps(data.json())
else:
text = data.text
output.write(text)
print(f"Wrote data to test data file '{data_paths[0]}'", file=sys.stderr)
return data
class TestBase(unittest.TestCase):
def setUp(self):
self.maxDiff = None
self.debug = False
self.inspect_output = False
etl_env = ETLEnv.instance()
etl_env.init_testing()
etl_env.set_use_cache(use_cached_metadata=True)
def run_etl_test(self, institution, format, expected, tag=''):
print(f"\nTesting ETL process for {institution} {tag}, format={format}\n", file=sys.stderr)
test_info = ETLEnv.TestInfo(institution=institution, tag=tag)
ETLEnv.instance().set_test_info(test_info=test_info)
if not self.debug:
old_stdout = sys.stdout
mystdout = StringIO()
sys.stdout = mystdout
with patch.object(requests, 'get', new_callable=MockGetter):
run_cmd_line(args=[institution]+[ "--format="+format ])
if not self.debug:
sys.stdout = old_stdout
mystdout.seek(0)
output = mystdout.read()
# Break to examine output?
if self.inspect_output: # pragma: no cover (this is just for debugging)
pdb.set_trace()
# Convert output to csv and compare the csv.
output_csv = []
for row in csv.reader(StringIO(output)):
output_csv.append(row)
expected_csv = []
for row in csv.reader(StringIO(expected)):
expected_csv.append(row)
self.assertEqual(output_csv, expected_csv)
| rhizomes-project/rhizomes-etl | etl/tests/test_tools.py | test_tools.py | py | 4,114 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "etl.setup.ETLEnv.instance",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "etl.setup.ETLEnv",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "os.path.... |
31915854876 | # -*- coding: utf-8 -*-
"""
TRABAJO 1.
Estudiante: JJavier Alonso Ramos
"""
# Importamos módulo para trabajar con datos matemáticos
import numpy as np
# Importamos módulo para gráficos 2D
import matplotlib.pyplot as plt
# Importamos el módulo para formater tablas
import pandas as pd
# Importamos el módulo para hacer el gráfico 3D
from mpl_toolkits.mplot3d import Axes3D
# Importamos el módulo para generar números aleatorios
import random as rnd
np.random.seed(1)
print('EJERCICIO SOBRE LA BUSQUEDA ITERATIVA DE OPTIMOS\n')
print('Ejercicio 1\n')
# Función 1.2 - (np.e**v*u**2-2*v**2*np.e**(-u))**2
def E(u, v):
return (np.e**v*u**2-2*v**2*np.e**(-u))**2
#Derivada parcial de E con respecto a u - 2*(np.e**v*u**2-2*v**2*np.e**(-u))*(2*v**2*np.e**(-u)+2*np.e**v*u)
def dEu(u,v):
return 2*(np.e**v*u**2-2*v**2*np.e**(-u))*(2*v**2*np.e**(-u)+2*np.e**v*u)
#Derivada parcial de E con respecto a v - 2*(u**2*np.e**v-4*np.e**(-u)*v)*(u**2*np.e**v-2*np.e**(-u)*v**2)
def dEv(u,v):
return 2*(u**2*np.e**v-4*np.e**(-u)*v)*(u**2*np.e**v-2*np.e**(-u)*v**2)
#Gradiente de E
def gradE(u,v):
return np.array([dEu(u,v), dEv(u,v)])
#Función 1.3 - u**2+2*v**2+2*np.sin(2*np.pi*u)*np.sin(2*np.pi*v)
def F(u,v):
return u**2+2*v**2+2*np.sin(2*np.pi*u)*np.sin(2*np.pi*v)
#Derivada parcial de F con respecto a u - 4*np.pi*np.sin(2*np.pi*v)*np.cos(2*np.pi*u)+2*u
def dFu(u,v):
return 4*np.pi*np.sin(2*np.pi*v)*np.cos(2*np.pi*u)+2*u
#Derivada parcial de F con respecto a v - 4*np.pi*np.sin(2*np.pi*u)*np.cos(2*np.pi*v)+4*v
def dFv(u,v):
return 4*np.pi*np.sin(2*np.pi*u)*np.cos(2*np.pi*v)+4*v
#Gradiente de F
def gradF(u,v):
return np.array([dFu(u,v), dFv(u,v)])
################################################################################################
######################################## 1.1 ###################################################
################################################################################################
def gradient_descent(func,grad,u,v,maxIter,epsilon=1e-14,learning_rate=0.01, ejer1_2=False, ejer1_3a=False):
"""
Gradiente Descendente
Aceptamos como parámetros:
La fución sobre la que calcularemos el gradiente
Las coordenadas con las que evaluaremos la función (u,v)
El número máximo de iteraciones a realizar
Un valor de Z mínimo (epsilon)
Un learning-rate que por defecto será 0.01
Los parámetros ejer* ayudan a la impresión o ejecución con factores determinados
"""
#Creamos un contador de iteraciones
it = 0
#Creamos una lista donde guardar todas las aproximaciones que realiza el algoritmo
points2min = []
"""
Creamos una variable donde guardaremos el último valor de Z obtenido
con el fin de acabar el algoritmo si es necesario
Creamos también un booleano para indicar la salida del bucle
"""
continuar = True
last_z=np.Inf
"""
Realizamos el cálculo de un nuevo punto
hasta alcanzar nuestro mínimo objetivo(epsilon)
o hasta superar el máximo de iteraciones
"""
while it < maxIter and continuar:
# Calculamos las pendientes respecto a u e v
_pend = grad(u,v)
# Calculamos el nuevo punto más próximo al mínimo local
u = u - learning_rate*_pend[0]
v = v - learning_rate*_pend[1]
"""
La dupla w guardará y devolverá las coordenas (u,v) del último valor calculado,
es decir, el valor mínimo alcanzado
points2min almacena todas las coordenadas (u,v) de los puntos que se han ido calculando
"""
w = [u,v]
points2min.append([u,v])
# Almacenamos la "altura" de todos los puntos (u,v) calculados
new_z = func(u,v)
"""
Si de una iteración a otra no se produce una mejora considerable
salimos del bucle, en caso contrario, actualizamos
"""
if last_z - new_z > epsilon:
last_z = new_z
else:
continuar = False
# Si realizamos el ejercicio 1.2 y el valor calculado es menor que epsilon, salimos.
if ejer1_2 and new_z < epsilon:
continuar = False
# En el ejercicio 1.3 iteramos hasta final de iteraciones.
if(ejer1_3a):
continuar=True
# Aumentamos el número de iteraciones realizadas
it = it+1
# Devolvemos las coordenadas (x,y) del punto mínimo alcanzado
# junto con el nº de iteraciones y todos los valores que se han ido recorriendo
return w, it, points2min
################################################################################################
######################################## 1.2 ###################################################
################################################################################################
#Declaramos el punto inicial
initial_point_E = np.array([1.0,1.0])
"""
Realizamos el algoritmo del Gradiente Descendiente para la función E partiendo del punto (1,1)
Como tope de iteraciones indicamos 10000000000
Como altura mínima a encontrar marcamos 1e-14
En w guardamos las coordenadas (x,y) del punto con z mínimo alcanzado
En it almacenamos el número de iteraciones que han sido necesarias para calcular w
En points2min guardamos la secuencia de (x,y) que se ha ido generando hasta llegar a w
"""
w, it, points2min = gradient_descent(E,gradE,initial_point_E[0], initial_point_E[1],10000000000,1e-14,ejer1_2=True)
# Mostramos por pantalla los datos más relevantes de aplicar el algoritmo a la función E
print ('Función E')
print ('Punto inicial: (', initial_point_E[0], ', ', initial_point_E[1], ')' )
print ('Numero de iteraciones: ', it)
print ('Coordenadas obtenidas: (', w[0], ', ', w[1],')')
print ('Valor mínimo en estas coordenadas: ', E(w[0], w[1]), '\n')
# Creamos una gráfica con los valores de Z para cada una de las iteraciones
valores_z = []
for punto in points2min:
valores_z.append(E(punto[0], punto[1]))
figura = 'Ejercicio 1.2. Valor de Z en las distintas iteraciones del algoritmo'
titulo = 'Punto inicial: ('+ str(initial_point_E[0])+ ', '+ str(initial_point_E[1])+ ')'
subtitulo = 'Función E'
plt.figure(figura)
plt.title(titulo)
plt.suptitle(subtitulo)
plt.xlabel('iteraciones')
plt.ylabel('z')
plt.plot(valores_z)
plt.show()
"""
Creamos una figura 3D donde pintaremos la función para un conjunto de valores
"""
# Tomamos 50 valores entre [-30,30] para la representación del gráfico
x = np.linspace(-30, 30, 50)
y = np.linspace(-30, 30, 50)
X, Y = np.meshgrid(x, y)
# Calculamos los valores de z para los (x,y) obtenidos antes
Z = E(X, Y) #E_w([X, Y])
# Creamos la figura 3D y la dibujamos
figura = 'Ejercicio 1.2. Representacion 3D de la función E'
fig = plt.figure(figura)
ax = Axes3D(fig)
surf = ax.plot_surface(X, Y, Z, edgecolor='none', rstride=1, cstride=1, cmap='jet', alpha=0.5)
"""
Dibujamos el punto mínimo encontrado como una estrella roja,
los puntos intermedios como puntos verdes
y el punto inicial como una estrella negra
"""
min_point = np.array([w[0],w[1]])
min_point_ = min_point[:, np.newaxis]
ini_point = np.array([initial_point_E[0], initial_point_E[1]])
ini_point_ = ini_point[:, np.newaxis]
ax.plot(ini_point_[0], ini_point_[1], E(ini_point_[0], ini_point_[1]), 'r*', c='black')
for punto in points2min:
point = np.array([punto[0], punto[1]])
point_ = point[:, np.newaxis]
ax.plot(point_[0], point_[1], E(point_[0], point_[1]), '.', c='green')
ax.plot(min_point_[0], min_point_[1], E(min_point_[0], min_point_[1]), 'r*', c='red')
# Ponemos título y nombre a los ejes de la gráfica
ax.set(title='Punto inicial: (' + str(initial_point_E[0]) + ', ' + str(initial_point_E[1]) + ')')
ax.set_xlabel('u')
ax.set_ylabel('v')
ax.set_zlabel('E(u,v)')
# Imprimimos por pantalla el resultado
plt.show()
input("\n--- Pulsar intro para continuar con el ejercicio 1.3 a) ---\n")
################################################################################################
###################################### 1.3 a) ##################################################
################################################################################################
"""
Vamos a comparar los resultados obtenidos aplicando el gradiente descendente al punto inicial (0.1,0.1)
con un learning-rate de 0.1 y 0.01
"""
#Inicializamos las columnas que indicarán el punto de inicio y el learning-rate utilizado en cada caso
columna1 = [[0.1,0.1],[0.1,0.1]]
columna2 = []
columna3 = []
columna4 = [0.01,0.1]
columna5 = []
columna6 = []
# Aplicamos el algoritmo para un lr=0.01 y almacenamos los resultados obtenidos en la tabla
w, it, points2min = gradient_descent(F,gradF,0.1,0.1,50)
columna2.append(w[0])
columna3.append(w[1])
columna5.append(F(w[0],w[1]))
columna6.append(it)
# Creamos una gráfica con los valores de Z para cada una de las iteraciones
valores_z = []
for punto in points2min:
valores_z.append(F(punto[0], punto[1]))
figura = 'Ejercicio 1.3 a). Valor de Z para lr = 0.01'
titulo = 'Punto inicial: (0.1, 0.1)'
subtitulo = 'Función F'
plt.figure(figura)
plt.title(titulo)
plt.suptitle(subtitulo)
plt.xlabel('iteraciones')
plt.ylabel('z')
plt.plot(valores_z)
plt.show()
# Realizamos lo mismo pero para un lr=0.1
w, it, points2min = gradient_descent(F,gradF,0.1,0.1,50,learning_rate=0.1, ejer1_3a=True)
columna2.append(w[0])
columna3.append(w[1])
columna5.append(F(w[0],w[1]))
columna6.append(it)
# Creamos una gráfica con los valores de Z para cada una de las iteraciones
valores_z = []
for punto in points2min:
valores_z.append(F(punto[0], punto[1]))
figura = 'Ejercicio 1.3 a). Valor de Z para lr = 0.1'
titulo = 'Punto inicial: (0.1, 0.1)'
subtitulo = 'Función F'
plt.figure(figura)
plt.title(titulo)
plt.suptitle(subtitulo)
plt.xlabel('iteraciones')
plt.ylabel('z')
plt.plot(valores_z)
plt.show()
# Creamos la tabla con los rersultados almacenados anteriormente y la imprimimos
dict_tabla = {'Initial Point':columna1, 'u':columna2, 'v':columna3, 'lr': columna4,
'F(u,v)':columna5, 'iteraciones':columna6}
dataframe = pd.DataFrame(dict_tabla)
print(' Tabla de datos con función F\n')
print(dataframe)
print('\n\n')
input("\n--- Pulsar intro para continuar con el ejercicio 1.3 b) ---\n")
################################################################################################
###################################### 1.3 b) ##################################################
################################################################################################
# Creamos una tabla donde almacenaremos los distintos resultados del algoritmo dependiendo de nuestro punto de partida
# La crearemos como un objeto 'pandas' al que le pasaremos las columnas en el siguiente orden:
# punto incial - u - v - learning-rate - f(u,v) - iteraciones
columna1 = [[0.1,0.1],[1.0,1.0],[-0.5,-0.5],[-1,-1],[22.0,22.0]]
columna2 = []
columna3 = []
columna4 = [0.01, 0.01, 0.01, 0.01, 0.01]
columna5 = []
columna6 = []
# Realizamos el algoritmo para una lista de puntos iniciales
for initial_point_F in ([0.1,0.1],[1.0,1.0],[-0.5,-0.5],[-1,-1],[22.0,22.0]):
"""
Realizamos el algoritmo del Gradiente Descendiente para la función F
partiendo desde los puntos ([0.1,0.1],[1,1],[-0.5,-0.5],[-1,-1], [22.0,22.0])
He añadido el punto (22.0, 22.0) para obtener una gráfica en la que se vea más
claramente el dibujo de los distintos puntos calculados hasta llegar al mínimo
Como tope de iteraciones indicamos 50
En w guardamos las coordenadas (x,y) del punto con z mínimo alcanzado
En it almacenamos el número de iteraciones que han sido necesarias para calcular w
En points2min guardamos la secuencia de (x,y) que se ha ido generando hasta llegar a w
"""
w, it, points2min = gradient_descent(F,gradF,initial_point_F[0], initial_point_F[1],50)
# Incluimos en la tabla los resultados obtenidos
####tabla.append([tuple(initial_point_F), w[0],w[1],F(w[0], w[1])])
columna2.append(w[0])
columna3.append(w[1])
columna5.append(F(w[0],w[1]))
columna6.append(it)
"""
Mostramos por pantalla los datos más relevantes de aplicar el algoritmo a la función F
con punto inicial initial_point_F
"""
print ('Función F')
print ('Punto inicial: (', initial_point_F[0], ', ', initial_point_F[1], ')' )
print ('Numero de iteraciones: ', it)
print ('Coordenadas obtenidas: (', w[0], ', ', w[1],')')
print ('Valor mínimo en estas coordenadas: ', F(w[0], w[1]), '\n\n')
# Creamos una gráfica con los valores de Z para cada una de las iteraciones
valores_z = []
for punto in points2min:
valores_z.append(F(punto[0], punto[1]))
figura = 'Ejercicio 1.3. Valor de Z en las distintas iteraciones del algoritmo'
titulo = 'Punto inicial: ('+ str(initial_point_F[0])+ ', '+ str(initial_point_F[1])+ ')'
subtitulo = 'Función F'
plt.figure(figura)
plt.title(titulo)
plt.suptitle(subtitulo)
plt.xlabel('iteraciones')
plt.ylabel('z')
plt.plot(valores_z)
plt.show()
"""
Creamos una figura 3D donde pintaremos la función para un conjunto de valores
"""
# Tomamos 50 valores entre [-30,30] para la representación del gráfico
x = np.linspace(-30, 30, 50)
y = np.linspace(-30, 30, 50)
X, Y = np.meshgrid(x, y)
# Calculamos los valores de z para los (x,y) obtenidos antes
Z = F(X, Y) #F_w([X, Y])
# Creamos la figura 3D y la dibujamos
figura = 'Ejercicio 1.3. Representacion 3D de la función F'
fig = plt.figure(figura)
ax = Axes3D(fig)
surf = ax.plot_surface(X, Y, Z, edgecolor='none', rstride=1, cstride=1, cmap='jet', alpha=0.5)
"""
Dibujamos el punto mínimo encontrado como una estrella roja,
los puntos intermedios como puntos verdes
y el punto inicial como una estrella negra
"""
min_point = np.array([w[0],w[1]])
min_point_ = min_point[:, np.newaxis]
ini_point = np.array([initial_point_F[0], initial_point_F[1]])
ini_point_ = ini_point[:, np.newaxis]
ax.plot(ini_point_[0], ini_point_[1], F(ini_point_[0], ini_point_[1]), 'r*', c='black')
for punto in points2min:
point = np.array([punto[0], punto[1]])
point_ = point[:, np.newaxis]
ax.plot(point_[0], point_[1], F(point_[0], point_[1]), '.', c='green')
ax.plot(min_point_[0], min_point_[1], F(min_point_[0], min_point_[1]), 'r*', c='red')
# Ponemos título y nombre a los ejes de la gráfica
ax.set(title='Punto inicial: (' + str(initial_point_F[0]) + ', ' + str(initial_point_F[1]) + ')')
ax.set_xlabel('u')
ax.set_ylabel('v')
ax.set_zlabel('F(u,v)')
# Imprimimos por pantalla el resultado
plt.show()
input("\n--- Pulsar intro para continuar ---\n")
dict_tabla = {'Initial Point':columna1, 'u':columna2, 'v':columna3, 'lr': columna4,
'F(u,v)':columna5, 'iteraciones':columna6}
dataframe = pd.DataFrame(dict_tabla)
print(' Tabla de datos con función F\n')
print(dataframe)
input("\n--- Pulsar intro para continuar con el ejercicio 2 ---\n")
###############################################################################
############################### EJERCICIO 2.1 #################################
###############################################################################
print('EJERCICIO SOBRE REGRESION LINEAL\n')
print('Ejercicio 2.1\n')
label5 = 1
label1 = -1
# Funcion para leer los datos
def readData(file_x, file_y):
# Leemos los ficheros
datax = np.load(file_x)
datay = np.load(file_y)
y = []
x = []
# Solo guardamos los datos cuya clase sea la 1 o la 5
for i in range(0,datay.size):
if datay[i] == 5 or datay[i] == 1:
if datay[i] == 5:
y.append(label5)
else:
y.append(label1)
x.append(np.array([1, datax[i][0], datax[i][1]]))
x = np.array(x, np.float64)
y = np.array(y, np.float64)
return x, y
# Funcion para calcular el error
def Err(x,y,w):
"""
Vamos a calcular el error cuadrático medio. Para ello:
Dividiremos por el número de elementos que en este caso es igual al numero de filas de X
Calculamos la diferencia entre nuestra aproximación y el valor real y la elevamos al cuadrado
Realizamos la sumatoria de todas estas diferencias
Hacemos la media y devolvemos el resultado
"""
denominador = len(x) # Número de elementos
numerador = (np.dot(x,w)-y)**2 # Diferencia cuadrática
numerador = np.sum(numerador) # Sumatoria de las diferencias
res = numerador/denominador # Media del error cuadrático
return res
# Gradiente Descendente Estocastico
def sgd(X,Y,epsilon = 1e-14, lr = 0.001):
"""
Gradiente Descendente Estocástico
Calculamos el vector de pesos W
Aceptamos como parámetros:
Un conjunto de datos (muestra) a partir de los cuales debemos obtener los valores pasados como segundo argumento
Un valor de error mínimo (epsilon) que marcará el final de la ejecución del algoritmo (por defecto será 1e-14)
Un learning-rate que por defecto será 0.001
"""
size_of_x = len(X) # calculamos el número de filas que tiene X (el número de muestras)
minibatch_size = 64 # establecemos un tamaño de minibatch
minibatch_num = size_of_x // minibatch_size # calculamos el número de minibatchs en que podemos dividir X
cols_of_x = len(X[0]) # calculamos el número de columnas de X (su número de características)
error_antiguo = 999.0 # inicializamos a un valor suficientemente alto para asegurarnos que entra en la condición de actualización de su valor
continuar = True # inicializamos a true para que entre en el bucle
w_epoca_actual = np.zeros(cols_of_x)
matriz_completa = np.c_[X,Y]
# mientras la diferencia entre el anterior error calculado y el recién calculado sea mayor que 1e-14 continuamos realizando el algoritmo
while(continuar):
# Mezclamos los datos de la mustra y sus etiquetas para obtener minibatchs distintos en cada época
np.random.shuffle(matriz_completa)
# dividimos en etiquetas y datos de muestra
etiquetas = matriz_completa[:,3]
datos = matriz_completa[:,0:3]
# recorremos todos los minibatchs en los que hemos dividido X
for i in range(minibatch_num):
# recorremos las características de X (sus columnas)
for j in range(cols_of_x):
# multiplicamos vectorialmente toda la submatriz de X que conforma un minibatch por su peso asociado
h_x = np.dot(datos[i*minibatch_size : (i+1)*minibatch_size, :],w_epoca_actual)
# restamos al valor obtenido su verdadero valor para ver cuanta precisión tenemos por ahora
diff = h_x - etiquetas[i*minibatch_size : (i+1)*minibatch_size]
# multiplicamos individualmente la característica correspondiente a la columna j, fila a fila del minibatch, por la diferencia anterior
mul = np.dot(datos[i*minibatch_size : (i+1)*minibatch_size , j], diff)
# realizamos la sumatoria de los valores obtenidos en el vector anterior (mul)
sumatoria = np.sum(mul)
# actualizamos w[j] (el peso de esa característica) restándole el valor anterior multiplicado por el learning rate
w_epoca_actual[j] = w_epoca_actual[j] - lr*sumatoria
"""
si el número de filas de x no es múltiplo del tamaño del minibach sobrarán elementos en x que no se recorran con los bucles anteriores
con esta condición nos aseguramos de recorrerlos
"""
if size_of_x % minibatch_size != 0 and size_of_x > minibatch_size:
# Calculamos cuantos elementos quedan por recorrer y,
# empezando por el principio de la muestra, le añadimos los datos que faltan para completar el minibatch
n = minibatch_num*minibatch_size
restantes = size_of_x - n
a = np.r_[ datos[n : size_of_x, :], datos[0:restantes, :] ]
b = np.r_[ etiquetas[n : size_of_x], etiquetas[0:restantes] ]
for j in range(cols_of_x):
h_x = np.dot(a,w_epoca_actual)
diff = h_x - b
mul = np.dot(a[:, j], diff)
sumatoria = np.sum(mul)
w_epoca_actual[j] = w_epoca_actual[j] - lr*sumatoria
# calculamos el error que obtenemos en la primera epoca a los minibatchs
error = Err(datos,etiquetas,w_epoca_actual)
# si todavía no llegamos a la precisión requerida repetimos el algoritmo
if(error_antiguo - error > epsilon):
error_antiguo = error
# si hemos alcanzado la precisión requerida salimos
else:
continuar = False
return w_epoca_actual
# Pseudoinversa
def pseudoinverse(X,Y):
"""
Calculamos el vector de pesos W
Aceptamos como parámetros:
La muestra que tenemos que acercar a los valores de Y por medio de W
Los valores de Y
"""
px = np.linalg.pinv(X) # Calculamos la pseudoinversa de X por medio de una función del módulo numpy
w = np.dot(px, Y) # Calculamos W multiplicando vectorialmente la pseudoinversa de X por los valores Y
return w # Devolvemos el vector de pesos
# Lectura de los datos de entrenamiento
x, y = readData('datos/X_train.npy', 'datos/y_train.npy')
# Lectura de los datos para el test
x_test, y_test = readData('datos/X_test.npy', 'datos/y_test.npy')
#Imprimimos la matrix de muestra
print(x)
#Imprimimos el vector de valores reales a alcanzar
print(y)
# Calculamos el vector de pesos W por medio del Gradiente Descendente Estocástico
w_sgd = sgd(x,y)
print ('Bondad del resultado para grad. descendente estocastico:\n')
print ("Ein: ", Err(x,y,w_sgd))
print ("Eout: ", Err(x_test, y_test, w_sgd))
# Calculamos el vector de pesos W por medio de la pseudoinversa de X
w_pinv = pseudoinverse(x,y)
print ('\n\n\nBondad del resultado para pseudoinversa de X:\n')
print ("Ein: ", Err(x,y,w_pinv))
print ("Eout: ", Err(x_test, y_test, w_pinv))
#Ponemos un título a la figura
figura = 'Ejercicio 2.1. Representacion 3D de las soluciones obtenidas con los datos usados en el ajuste'
# cremaos la figura
fig = plt.figure(figura)
ax = Axes3D(fig)
#Preparamos los datos para poder representarlos
x_11_ = np.array(x[np.where(y==1),1].T) # Valores de la columna 1 de x cuyo target es 1
x_1_1 = np.array(x[np.where(y==-1),1].T) # Valores de la columna 1 de x cuyo target es -1
x_21_ = np.array(x[np.where(y==1),2].T) # Valores de la columna 2 de x cuyo target es 1
x_2_1 = np.array(x[np.where(y==-1),2].T) # Valores de la columna 3 de x cuyo target es -1
#y_r1 = np.array(y[np.where(y==1)]) # Lista de targets == 1
#y_r_1 = np.array(y[np.where(y==-1)]) # Lista de targets == -1
y_ = x.dot(w_pinv) # Calculos de los targets de X a traves del vector de pesos W
y1_ = np.array(y_[np.where(y==1)])
y_1 = np.array(y_[np.where(y==-1)])
# Pintamos los puntos con target == 1 de rojo y los de target == -1 de cian
ax.plot(x_11_, x_21_, y1_, '.', c='r')
ax.plot(x_1_1, x_2_1, y_1, '.', c='c')
#ax.plot(x_11_, x_21_, y_r1, '.', c='C1', alpha=0.3)
#ax.plot(x_1_1, x_2_1, y_r_1, '.', c='C1', alpha=0.3)
# Ponemos título y nombre a los ejes de la gráfica
ax.set(title='Representacion 3D de las soluciones obtenidas con los datos usados en el ajuste')
ax.set_xlabel('x_1')
ax.set_ylabel('x_2')
ax.set_zlabel('y')
# Imprimimos por pantalla el resultado
plt.show()
"""
Dibujamos en un diagrama de puntos la muestra y la separamos por medio de la recta:
y = w[0] + w[1]*x1 + w[2]*x2
Los puntos que coincidan con una etiqueta igual a 1 los pintaremos de rojo mientras que los
que tengan una etiqueta = -1 serán azul cian
Aplicaremos transparencia a estos puntos para ver más claramente la densidad de puntos
Dibujaremos las rectas asociadas a la regresión:
De color azul la correspondiente al SGD
De color magenta la correspondiente a la Pseudoinversa
"""
# Lo hacemos con W calculado con el gradiente descendente estocástico
plt.scatter(x[np.where(y==1),1], x[np.where(y==1),2], c='r', alpha=0.5)
plt.scatter(x[np.where(y==-1),1], x[np.where(y==-1),2], c='c', alpha=0.5)
plt.plot([0.0,1.0],[-w_sgd[0]/w_sgd[2], (-w_sgd[0]-w_sgd[1])/w_sgd[2]], c='dodgerblue')
plt.plot([0.0,1.0],[-w_pinv[0]/w_pinv[2], (-w_pinv[0]-w_pinv[1])/w_pinv[2]], c='magenta')
# Esrablecemos un título a la gráfica
plt.title(u'Gráfica de regresión lineal. Pesos calculados con SGD')
# La imprimimos
plt.show()
input("\n--- Pulsar tecla para continuar al ejercicio 2.2 a) ---\n")
###############################################################################
############################### EJERCICIO 2.2 #################################
###############################################################################
print('Ejercicio 2.2 a)\n')
# Simula datos en un cuadrado [-size,size]x[-size,size]
def simula_unif(N, d, size):
return np.random.uniform(-size,size,(N,d))
###############################################################################
############################## EJERCICIO 2.2 a) ###############################
###############################################################################
num_muestras = 1000
dimension = 2
size = 1
# Generamos una muestra de 100 puntos 2D en el cuadrado X = [−1, 1] × [−1, 1]
points = simula_unif(num_muestras, dimension, size)
# Dibujamos los puntos obtenidos en un plano
plt.plot(points[:,0], points[:,1], '.', c='c')
plt.show()
input("\n--- Pulsar tecla para continuar al ejercicio 2.2 b) ---\n")
###############################################################################
############################## EJERCICIO 2.2 b) ###############################
###############################################################################
# Declaramos la función f que usaremos para asignar una atiqueta a cada punto anterior
def f_sign(x1, x2):
return np.sign((x1-0.2)**2 + x2**2 - 0.6)
# Calculamos el 10% de la muestra para saber cuántas etiquetas deben ser alteradas
porcentaje = 10
proporcion = porcentaje/100
num_muestras_a_alterar = int(proporcion*num_muestras)
# Generamos las etiquetas para cada coordenada
etiquetas = f_sign(points[:,0], points[:,1])
# Seleccionamos aleatoriamente los puntos que serán alterados
indices_alterar = rnd.sample(range(num_muestras),num_muestras_a_alterar)
# Agregamos las etiquetas a sus correspondientes coordenadas creando una matriz de tres atributos
matriz = np.c_[points,etiquetas]
# Agrupamos los puntos segun sus etiquetas para distinguirlos por colores
matriz_positiva = matriz[matriz[:,2]==1]
matriz_negativa = matriz[matriz[:,2]==-1]
# Dibujamos el mapa de puntos según etiquetas
plt.plot(matriz_positiva[:,0], matriz_positiva[:,1], '.', c='c')
plt.plot(matriz_negativa[:,0], matriz_negativa[:,1], '.', c='r')
plt.show()
# Alteramos la etiqueta del 10% de la muestra
matriz_alterada = matriz
for i in indices_alterar:
matriz_alterada[i,2] = -matriz_alterada[i,2]
# Volvemos a calcular las matrices según su etiqueta
matriz_positiva = matriz[matriz[:,2]==1]
matriz_negativa = matriz[matriz[:,2]==-1]
# Dibujamos el mapa de puntos según etiquetas
plt.plot(matriz_positiva[:,0], matriz_positiva[:,1], '.', c='c')
plt.plot(matriz_negativa[:,0], matriz_negativa[:,1], '.', c='r')
plt.show()
input("\n--- Pulsar tecla para continuar al ejercicio 2.2 c) ---\n")
###############################################################################
############################## EJERCICIO 2.2 c) ###############################
###############################################################################
# Creamos el conjunto de muestra a partir de los puntos 2D aleatorios
muestra = points
# Como etiqueta usamos el signo obtenido al aplicar la función de signo del apartado b)
etiqueta_real = matriz_alterada[:,2]
# Creamos una columna de unos para sacar el término independiente de W
independiente = np.ones_like(points[:,0])
# Terminamos de conformar la muestra añadiento esta columna de unos a la izquierda de los puntos 2D
muestra = np.c_[independiente, muestra]
# Calculamos los pesos W por medio del Gradiente Descendiente Estocástico
W = sgd(muestra, etiqueta_real)
# Nos sale un Ein muy alto devido a que, por la distribución de los puntos según su etiqueta,
# no podemos realizar un buen ajuste con un modelo lineal
print ('Bondad del resultado para grad. descendente estocastico:\n')
print ("Ein: ", Err(muestra, etiqueta_real, W))
input("\n--- Pulsar tecla para continuar al ejercicio 2.2 d) ---\n")
###############################################################################
############################## EJERCICIO 2.2 d) ###############################
###############################################################################
# DUPLICAMOS CÓDIGO PARA TENER PERFECTAMENTE SEPARADOS LOS APARTADOS DEL EJERCICIO
# Simula datos en un cuadrado [-size,size]x[-size,size]
def simula_unif(N, d, size):
return np.random.uniform(-size,size,(N,d))
# Declaramos la función f que usaremos para asignar una atiqueta a cada punto anterior
def f_sign(x1, x2):
return np.sign((x1-0.2)**2 + x2**2 - 0.6)
Error_in_med = 0.0
Error_out_med = 0.0
iteraciones = 1000
for i in range(iteraciones):
num_muestras = 1000
dimension = 2
size = 1
# Generamos una muestra de 100 puntos 2D en el cuadrado X = [−1, 1] × [−1, 1]
points = simula_unif(num_muestras, dimension, size)
# Creamos un conjusto de test de 1000 valores para calcular Eout
test = simula_unif(num_muestras, dimension, size)
# No imprimimos las gráficas porque son muchas iteraciones y son inecesarias
# Calculamos el 10% de la muestra para saber cuántas etiquetas deben ser alteradas
porcentaje = 10
proporcion = porcentaje/100
num_muestras_a_alterar = int(proporcion*num_muestras)
# Generamos las etiquetas para cada valor de points
etiquetas = f_sign(points[:,0], points[:,1])
# Generamos las etiquetas para cada valor de test
etiquetas_test = f_sign(test[:,0], test[:,1])
# Seleccionamos aleatoriamente los datos de points que serán alterados
indices_alterar = rnd.sample(range(num_muestras),num_muestras_a_alterar)
# Seleccionamos aleatoriamente los datos de test que serán alterados
indices_alterar_test = rnd.sample(range(num_muestras),num_muestras_a_alterar)
# Agregamos las etiquetas a sus correspondientes coordenadas de pointscreando una matriz de tres atributos
matriz = np.c_[points,etiquetas]
# Agregamos las etiquetas a sus correspondientes coordenadas de test creando una matriz de tres atributos
matriz_test = np.c_[test,etiquetas_test]
# No imprimimos las gráficas porque son muchas iteraciones y son inecesarias
# Alteramos la etiqueta del 10% de la muestra y del test
matriz_alterada = matriz
matriz_alterada_test = matriz_test
for i in indices_alterar:
matriz_alterada[i,2] = -matriz_alterada[i,2]
matriz_alterada_test[i,2] = -matriz_alterada_test[i,2]
# No imprimimos las gráficas porque son muchas iteraciones y son inecesarias
# Creamos el conjunto de muestra a partir de los puntos 2D aleatorios
muestra = points
# Creamos el conjunto de test a partir de los puntos 2D aleatorios
Test = test
# Como etiqueta usamos el signo obtenido al aplicar la función de signo del apartado b)
etiqueta_real = matriz_alterada[:,2]
etiqueta_real_test = matriz_alterada_test[:,2]
# Creamos una columna de unos para sacar el término independiente de W
independiente = np.ones_like(points[:,0])
independiente_test = np.ones_like(test[:,0])
# Terminamos de conformar la muestra añadiento esta columna de unos a la izquierda de los puntos 2D
muestra = np.c_[independiente, muestra]
Test = np.c_[independiente_test, Test]
# Calculamos los pesos W por medio del Gradiente Descendiente Estocástico
W = sgd(muestra, etiqueta_real)
# Nos sale un Ein muy alto devido a que, por la distribución de los puntos según su etiqueta,
# no podemos realizar un buen ajuste con un modelo lineal
ei = Err(muestra, etiqueta_real, W)
eo = Err(Test, etiqueta_real_test, W)
# Sumamos para calcular posteriormente el error medio
Error_in_med = Error_in_med + ei
Error_out_med = Error_out_med + eo
# No mostramos el error de manera individual para ahorrar tiempo en la ejecución
# print ('Bondad del resultado para grad. descendente estocastico:\n')
# print ("Ein: ", ei)
# print ("Eout: ", eo)
# Calculamos el error medio
Error_in_med = Error_in_med/iteraciones
Error_out_med = Error_out_med/iteraciones
print ('\n\nError medio tras ' + str(iteraciones) + ' iteraciones:\n')
print ("Ein medio: ", Error_in_med)
print ("Eout medio: ", Error_out_med, '\n\n')
input("\n--- Pulsar tecla para continuar al ejercicio 3 ---\n")
###############################################################################
########################## BONUS - MÉTODO DE NEWTON ###########################
###############################################################################
"""
RECORDAMOS QUE CONTAMOS CON:
#Función 1.3 - u**2+2*v**2+2*sin(2*pi*u)*sin(2*pi*v)
def F(u,v):
return u**2+2*v**2+2*np.sin(2*np.pi*u)*np.sin(2*np.pi*v)
#Derivada parcial de F con respecto a u - 4*pi*sin(2*pi*v)*cos(2*pi*u)+2*u
def dFu(u,v):
return 4*np.pi*np.sin(2*np.pi*v)*np.cos(2*np.pi*u)+2*u
#Derivada parcial de F con respecto a v - 4*pi*sin(2*pi*u)*cos(2*pi*v)+4*v
def dFv(u,v):
return 4*np.pi*np.sin(2*np.pi*u)*np.cos(2*np.pi*v)+4*v
#Gradiente de F
def gradF(u,v):
return np.array([dFu(u,v), dFv(u,v)])
"""
def dFuu(u,v):
# Segunda derivada respecto a u de F
return 2-8*np.pi**2*np.sin(2*np.pi*v)*np.sin(2*np.pi*u)
def dFuv(u,v):
# Segunda derivada de F primero respecto a u y luego respecto a v de F
return 8*np.pi**2*np.cos(2*np.pi*u)*np.cos(2*np.pi*v)
def dFvv(u,v):
# Segunda derivada respecto a v de F
return 4-8*np.pi**2*np.sin(2*np.pi*u)*np.sin(2*np.pi*v)
def dFvu(u,v):
# Segunda derivada de F primero respecto a v y luego respecto a u de F
return 8*np.pi**2*np.cos(2*np.pi*v)*np.cos(2*np.pi*u)
def Hessian(u,v):
"""
Función que devuelve la matriz Hessiana de orden dos
segun las coordenadas (u, v) pasadas como argumentos.
"""
aux1 = np.array([dFuu(u,v), dFuv(u,v)])
aux2 = np.array([dFvu(u,v), dFvv(u,v)])
H = np.array([aux1, aux2])
return H
def NewtonsMethod(func,grad,u,v,maxIter,epsilon=1e-14, learning_rate = 0.1):
"""
Aceptamos como parámetros:
La fución sobre la que calcularemos el gradiente
Las coordenadas con las que evaluaremos la función (u,v)
El número máximo de iteraciones a realizar
Un valor de Z mínimo (epsilon)
Un learning-rate que por defecto será 0.1
"""
#Creamos un contador de iteraciones
it = 0
#Creamos una lista donde guardar todas las aproximaciones que realiza el algoritmo
points2min = []
"""
Creamos una variable donde guardaremos el último valor de Z obtenido
con el fin de acabar el algoritmo si es necesario
Creamos también un booleano para indicar la salida del bucle
Creamos una tupla donde almacenar las coordenadas de nuestro mínimo local alcanzado
"""
continuar = True
last_z=np.Inf
w=[u,v]
"""
Realizamos el cálculo de un nuevo punto
hasta alcanzar nuestro mínimo objetivo(epsilon)
o hasta superar el máximo de iteraciones
"""
while it < maxIter and continuar:
# Calculamos las pendientes respecto a u y v
_pend = grad(w[0],w[1])
# Montamos la matriz Hessiana y calculamos su inversa
H_inv = np.linalg.inv( Hessian(w[0],w[1]) )
# Calculamos el nuevo punto más próximo al mínimo local con el método de Newton
w = w - learning_rate*(np.dot(H_inv, _pend))
#points2min almacena todas las coordenadas (u,v) de los puntos que se han ido calculando
points2min.append( [ w[0],w[1] ] )
# Calculamso la "altura" del nuevo punto
new_z = func(w[0],w[1])
# Comprobamos que la diferencia entre los puntos calculados sea mayor que epsilon para seguir con el algoritmo
if last_z - new_z > epsilon:
last_z = new_z
else:
continuar = False
# Aumentamos el número de iteraciones realizadas
it = it+1
# Devolvemos las coordenadas (x,y) del punto mínimo alcanzado
# junto con el nº de iteraciones y todos los valores que se han ido recorriendo
return w, it, points2min
# Creamos una tabla donde almacenaremos los distintos resultados del algoritmo dependiendo de nuestro punto de partida
# La crearemos como un objeto 'pandas' al que le pasaremos las columnas en el siguiente orden:
# punto incial - u - v - lr - f(u,v) - it
columna1 = [[0.1,0.1],[1.0,1.0],[-0.5,-0.5],[-1,-1]]
columna2 = []
columna3 = []
columna4 = [0.1, 0.1, 0.1, 0.1]
columna5 = []
columna6 = []
# Realizamos el algoritmo para una lista de puntos iniciales
for initial_point_F in ([0.1,0.1],[1.0,1.0],[-0.5,-0.5],[-1,-1]):
"""
Realizamos el algoritmo del Gradiente Descendiente para la función F
partiendo desde los puntos ([0.1,0.1],[1,1],[-0.5,-0.5],[-1,-1])
Como tope de iteraciones indicamos 50
En w guardamos las coordenadas (x,y) del punto con z mínimo alcanzado
En it almacenamos el número de iteraciones que han sido necesarias para calcular w
En points2min guardamos la secuencia de (x,y) que se ha ido generando hasta llegar a w
"""
w, it, points2min = NewtonsMethod(F,gradF,initial_point_F[0], initial_point_F[1],50)
# Incluimos en la tabla los resultados obtenidos
####tabla.append([tuple(initial_point_F), w[0],w[1],F(w[0], w[1])])
columna2.append(w[0])
columna3.append(w[1])
columna5.append(F(w[0],w[1]))
columna6.append(it)
"""
Mostramos por pantalla los datos más relevantes de aplicar el algoritmo a la función F
con punto inicial initial_point_F
"""
print ('Función F')
print ('Punto inicial: (', initial_point_F[0], ', ', initial_point_F[1], ')' )
print ('Numero de iteraciones: ', it)
print ('Coordenadas obtenidas: (', w[0], ', ', w[1],')')
print ('Valor mínimo en estas coordenadas: ', F(w[0], w[1]), '\n\n')
# Creamos una gráfica con los valores de Z para cada una de las iteraciones
valores_z = []
for punto in points2min:
valores_z.append(F(punto[0], punto[1]))
figura = 'Ejercicio 3. Valor de Z en las distintas iteraciones del algoritmo'
titulo = 'Punto inicial: ('+ str(initial_point_F[0])+ ', '+ str(initial_point_F[1])+ ')'
subtitulo = 'Función F'
plt.figure(figura)
plt.title(titulo)
plt.suptitle(subtitulo)
plt.xlabel('iteraciones')
plt.ylabel('z')
plt.plot(valores_z)
plt.show()
"""
Creamos una figura 3D donde pintaremos la función para un conjunto de valores
"""
# Tomamos 50 valores entre [-30,30] para la representación del gráfico
x = np.linspace(-30, 30, 50)
y = np.linspace(-30, 30, 50)
X, Y = np.meshgrid(x, y)
# Calculamos los valores de z para los (x,y) obtenidos antes
Z = F(X, Y) #F_w([X, Y])
# Creamos la figura 3D y la dibujamos
figura = 'Ejercicio 1.3. Representacion 3D de la función F'
fig = plt.figure(figura)
ax = Axes3D(fig)
surf = ax.plot_surface(X, Y, Z, edgecolor='none', rstride=1, cstride=1, cmap='jet', alpha=0.5)
"""
Dibujamos el punto mínimo encontrado como una estrella roja,
los puntos intermedios como puntos verdes
y el punto inicial como una estrella negra
"""
min_point = np.array([w[0],w[1]])
min_point_ = min_point[:, np.newaxis]
ini_point = np.array([initial_point_F[0], initial_point_F[1]])
ini_point_ = ini_point[:, np.newaxis]
ax.plot(ini_point_[0], ini_point_[1], F(ini_point_[0], ini_point_[1]), 'r*', c='black')
for punto in points2min:
point = np.array([punto[0], punto[1]])
point_ = point[:, np.newaxis]
ax.plot(point_[0], point_[1], F(point_[0], point_[1]), '.', c='green')
ax.plot(min_point_[0], min_point_[1], F(min_point_[0], min_point_[1]), 'r*', c='red')
# Ponemos título y nombre a los ejes de la gráfica
ax.set(title='Punto inicial: (' + str(initial_point_F[0]) + ', ' + str(initial_point_F[1]) + ')')
ax.set_xlabel('u')
ax.set_ylabel('v')
ax.set_zlabel('F(u,v)')
# Imprimimos por pantalla el resultado
plt.show()
input("\n--- Pulsar intro para continuar ---\n")
dict_tabla = {'Initial Point':columna1, 'u':columna2, 'v':columna3, 'lr': columna4,
'F(u,v)':columna5, 'iteraciones':columna6}
dataframe = pd.DataFrame(dict_tabla)
print(' Tabla de datos con función F\n')
print(dataframe)
input("\n--- Finalizar ---\n")
| JJavier98/AA | PRACTICAS/P1/Template/Práctica1.py | Práctica1.py | py | 40,408 | python | es | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.seed",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.e",
"line_number": 26,
"usage_type": "attribute"
},
{
"api_name": "numpy.e",
"line_... |
24351569490 | import requests
from main import Position
url = 'http://127.0.0.1:8000/positions/'
def test_positions():
positions = [Position(cm=100, gp=20), Position(cm=100, gp=80)]
positions = [p.dict() for p in positions]
response = requests.post(url, json=positions)
assert response.json() == [{'cp': 50.0, 'cm': 100.0, 'gp': 20.0, 'gm': 40.0, 'diff': -60.0},
{'cp': 50.0, 'cm': 100.0, 'gp': 80.0, 'gm': 160.0, 'diff': 60.0}]
| LuisIllig/savings-balancer | test_positions.py | test_positions.py | py | 466 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "main.Position",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "requests.post",
"line_number": 11,
"usage_type": "call"
}
] |
41209016704 | import threading, time
import multiprocessing
import socket
import json
import os
from kafka import KafkaConsumer, KafkaProducer, TopicPartition
from kafka.common import LeaderNotAvailableError
from kafka.errors import KafkaError
'''
KAFKA BASICS
describe a topic
/usr/local/kafka/bin/kafka-topics.sh --describe --zookeeper localhost:2181 --topic test
creating a topic
/usr/local/kafka/bin/kafka-topics.sh --create --zookeeper localhost:2181 --replication-factor 1 --partitions 2 --topic test
For replication factor to be > 1, there has to be more than 1 kafka broker
deleting a topic
/usr/local/kafka/config
delete.topic.enable=true
/usr/local/kafka/bin/kafka-topics.sh --zookeeper localhost:2181 --delete --topic test
python kafka package DOCUMENTATION
http://kafka-python.readthedocs.io/en/master/apidoc/KafkaConsumer.html
http://kafka-python.readthedocs.io/en/master/apidoc/kafka.producer.html
REFERENCES
http://www.giantflyingsaucer.com/blog/?p=5541
https://pythonexample.com/code/python-kafka-consumer-record/
https://community.hortonworks.com/articles/74077/kafka-producer-sample-code-in-scala-and-python.html
https://www.confluent.io/blog/introduction-to-apache-kafka-for-python-programmers/
http://www.giantflyingsaucer.com/blog/?p=5541
https://github.com/dpkp/kafka-python/blob/master/example.py
KAFKA Installation
http://davidssysadminnotes.blogspot.in/2016/01/installing-apache-kafka-and-zookeeper.html
KAFKA GUI Tool
https://github.com/HomeAdvisor/Kafdrop
cd Kafdrop
mvn clean package
java -jar <path to folder>/Kafdrop/target/kafdrop-2.0.0.jar --zookeeper.connect=<zookeeper-host>:2181 --server.port=9090
goto http://localhost:9090/ to see the UI
vi /etc/systemd/system/kafdrop.service
[Unit]
Description=kafkadrop UI for Apache Kakfa server (broker)
Documentation=https://github.com/HomeAdvisor/Kafdrop
Requires=network.target remote-fs.target
After=network.target remote-fs.target zookeeper.service kafka.service
[Service]
Type=simple
User=my_user
Group=my_group
Environment=JAVA_HOME=/opt/jdk1.8.0_121/
ExecStart=/usr/bin/env "${JAVA_HOME}/bin/java" -jar /opt/Kafdrop/target/kafdrop-2.0.0.jar --zookeeper.connect=kakfa-server:2181 --server.port=9090
[Install]
WantedBy=multi-user.target
:wq
systemctl daemon-reload
service kafdrop start
'''
class BatchProducer(threading.Thread):
def __init__(self, config_file, db_credentials, db_host):
threading.Thread.__init__(self)
self.config_file = config_file
self.stop_event = threading.Event()
self.db_credentials = db_credentials
self.db_host = db_host
self.bootstrap_servers = self.db_credentials[self.db_host]['bootstrap_servers']
return
def stop(self):
self.stop_event.set()
return
def send(self, topic_name, message, topic_partition=-1):
producer = KafkaProducer(bootstrap_servers=self.bootstrap_servers,
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
try:
exception = ""
if topic_partition == -1:
producer.send(topic_name, message)
else:
producer.send(topic_name, message, partition=topic_partition)
except LeaderNotAvailableError:
# https://github.com/mumrah/kafka-python/issues/249
time.sleep(1)
if topic_partition == -1:
producer.send(topic_name, message)
else:
producer.send(topic_name, message, partition=topic_partition)
except KafkaError as ex:
print(ex)
exception = str(ex)
finally:
producer.close()
return exception
class BatchConsumer(multiprocessing.Process):
def __init__(self, config_file, db_credentials, db_host):
multiprocessing.Process.__init__(self)
self.stop_event = multiprocessing.Event()
self.config_file = config_file
self.db_credentials = db_credentials
self.db_host = db_host
self.bootstrap_servers = self.db_credentials[self.db_host]['bootstrap_servers']
return
def stop(self):
self.stop_event.set()
return
def connect(self, group_id="", client_id="",
auto_offset_reset='earliest', consumer_timeout_ms=1000):
client_id = socket.gethostname() if client_id == "" else client_id
group_id = os.getlogin() if group_id == "" else group_id
exception = ""
try:
consumer = KafkaConsumer(bootstrap_servers=self.bootstrap_servers,
auto_offset_reset=auto_offset_reset,
consumer_timeout_ms=consumer_timeout_ms,
value_deserializer=lambda m: json.loads(m.decode('utf-8')),
group_id=group_id,
client_id=client_id,
enable_auto_commit=False
)
self.consumer = consumer
except KafkaError as ex:
print(ex)
exception = str(ex)
self.consumer = None
return exception
def receive(self, topic_name, topic_partition=None, message_offset=None,
out_dir='', out_file_name=''):
exception = ""
consumer = self.consumer
try:
if out_dir == '':
out_dir = os.environ['HOME']
if topic_partition is None:
''' Get all messages from all partitions '''
consumer.subscribe(topic_name)
if out_file_name == '':
out_file_name = str(self.db_host) + "_" + str(topic_name)
out_file = open(out_dir + "/" + out_file_name + "_all.txt", 'w')
for message in consumer:
# This will wait and print messages as they become available
kafka_message = dict()
kafka_message["topic"] = message.topic
kafka_message["partition"] = message.partition
kafka_message["offset"] = message.offset
kafka_message["key"] = message.key
kafka_message["value"] = message.value
kafka_message["timestamp"] = message.timestamp
kafka_message["timestamp_type"] = message.timestamp_type
out_file.write(json.dumps(kafka_message) + "\n")
out_file.close()
else:
partition_list = [TopicPartition(topic_name, topic_partition)]
consumer.assign(partition_list)
''' Start fetching data from partition '''
for topic_partition in consumer.assignment():
print("fetching results for {0} ...".format(topic_partition))
if message_offset is not None:
print("setting offset to {0}".format(message_offset))
consumer.seek(partition=topic_partition, offset=message_offset)
else:
print("last committed offset {0}".format(consumer.end_offsets([topic_partition])[topic_partition]))
consumer.poll()
if out_file_name == '':
out_file_name = str(self.db_host) + "_" + str(topic_name)
if out_dir == '':
out_dir = os.environ['HOME']
out_file = open(out_dir + "/" + out_file_name + "_" + str(topic_partition.partition) + ".txt", 'w')
for message in consumer:
# This will wait and print messages as they become available
kafka_message = dict()
kafka_message["topic"] = message.topic
kafka_message["partition"] = message.partition
kafka_message["offset"] = message.offset
kafka_message["key"] = message.key
kafka_message["value"] = message.value
kafka_message["timestamp"] = message.timestamp
kafka_message["timestamp_type"] = message.timestamp_type
out_file.write(json.dumps(kafka_message) + "\n")
out_file.close()
except KafkaError as ex:
print(ex)
exception = str(ex)
consumer.close()
except KeyboardInterrupt:
pass
return exception
def commit(self):
self.consumer.commit()
return
def close(self):
self.consumer.close()
return
| kartikra/dattasa | dattasa/kafka_system.py | kafka_system.py | py | 8,807 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "threading.Thread",
"line_number": 79,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 81,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 81,
"usage_type": "attribute"
},
{
"api_name": ... |
72019823395 | from __future__ import unicode_literals
import re
import json
from .common import InfoExtractor
from ..compat import (
compat_str,
compat_urllib_parse,
compat_urllib_request,
compat_urlparse,
)
from ..utils import (
ExtractorError,
int_or_none,
parse_duration,
)
class PluralsightIE(InfoExtractor):
IE_NAME = 'pluralsight'
_VALID_URL = r'https?://(?:www\.)?pluralsight\.com/training/player\?author=(?P<author>[^&]+)&name=(?P<name>[^&]+)(?:&mode=live)?&clip=(?P<clip>\d+)&course=(?P<course>[^&]+)'
_LOGIN_URL = 'https://www.pluralsight.com/id/'
_NETRC_MACHINE = 'pluralsight'
_TEST = {
'url': 'http://www.pluralsight.com/training/player?author=mike-mckeown&name=hosting-sql-server-windows-azure-iaas-m7-mgmt&mode=live&clip=3&course=hosting-sql-server-windows-azure-iaas',
'md5': '4d458cf5cf4c593788672419a8dd4cf8',
'info_dict': {
'id': 'hosting-sql-server-windows-azure-iaas-m7-mgmt-04',
'ext': 'mp4',
'title': 'Management of SQL Server - Demo Monitoring',
'duration': 338,
},
'skip': 'Requires pluralsight account credentials',
}
def _real_initialize(self):
self._login()
def _login(self):
(username, password) = self._get_login_info()
if username is None:
self.raise_login_required('Pluralsight account is required')
login_page = self._download_webpage(
self._LOGIN_URL, None, 'Downloading login page')
login_form = self._hidden_inputs(login_page)
login_form.update({
'Username': username.encode('utf-8'),
'Password': password.encode('utf-8'),
})
post_url = self._search_regex(
r'<form[^>]+action=(["\'])(?P<url>.+?)\1', login_page,
'post url', default=self._LOGIN_URL, group='url')
if not post_url.startswith('http'):
post_url = compat_urlparse.urljoin(self._LOGIN_URL, post_url)
request = compat_urllib_request.Request(
post_url, compat_urllib_parse.urlencode(login_form).encode('utf-8'))
request.add_header('Content-Type', 'application/x-www-form-urlencoded')
response = self._download_webpage(
request, None, 'Logging in as %s' % username)
error = self._search_regex(
r'<span[^>]+class="field-validation-error"[^>]*>([^<]+)</span>',
response, 'error message', default=None)
if error:
raise ExtractorError('Unable to login: %s' % error, expected=True)
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
author = mobj.group('author')
name = mobj.group('name')
clip_id = mobj.group('clip')
course = mobj.group('course')
display_id = '%s-%s' % (name, clip_id)
webpage = self._download_webpage(url, display_id)
collection = self._parse_json(
self._search_regex(
r'moduleCollection\s*:\s*new\s+ModuleCollection\((\[.+?\])\s*,\s*\$rootScope\)',
webpage, 'modules'),
display_id)
module, clip = None, None
for module_ in collection:
if module_.get('moduleName') == name:
module = module_
for clip_ in module_.get('clips', []):
clip_index = clip_.get('clipIndex')
if clip_index is None:
continue
if compat_str(clip_index) == clip_id:
clip = clip_
break
if not clip:
raise ExtractorError('Unable to resolve clip')
QUALITIES = {
'low': {'width': 640, 'height': 480},
'medium': {'width': 848, 'height': 640},
'high': {'width': 1024, 'height': 768},
}
ALLOWED_QUALITIES = (
('webm', ('high',)),
('mp4', ('low', 'medium', 'high',)),
)
formats = []
for ext, qualities in ALLOWED_QUALITIES:
for quality in qualities:
f = QUALITIES[quality].copy()
clip_post = {
'a': author,
'cap': 'false',
'cn': clip_id,
'course': course,
'lc': 'en',
'm': name,
'mt': ext,
'q': '%dx%d' % (f['width'], f['height']),
}
request = compat_urllib_request.Request(
'http://www.pluralsight.com/training/Player/ViewClip',
json.dumps(clip_post).encode('utf-8'))
request.add_header('Content-Type', 'application/json;charset=utf-8')
format_id = '%s-%s' % (ext, quality)
clip_url = self._download_webpage(
request, display_id, 'Downloading %s URL' % format_id, fatal=False)
if not clip_url:
continue
f.update({
'url': clip_url,
'ext': ext,
'format_id': format_id,
})
formats.append(f)
self._sort_formats(formats)
# TODO: captions
# http://www.pluralsight.com/training/Player/ViewClip + cap = true
# or
# http://www.pluralsight.com/training/Player/Captions
# { a = author, cn = clip_id, lc = end, m = name }
return {
'id': clip['clipName'],
'title': '%s - %s' % (module['title'], clip['title']),
'duration': int_or_none(clip.get('duration')) or parse_duration(clip.get('formattedDuration')),
'creator': author,
'formats': formats
}
class PluralsightCourseIE(InfoExtractor):
IE_NAME = 'pluralsight:course'
_VALID_URL = r'https?://(?:www\.)?pluralsight\.com/courses/(?P<id>[^/]+)'
_TEST = {
# Free course from Pluralsight Starter Subscription for Microsoft TechNet
# https://offers.pluralsight.com/technet?loc=zTS3z&prod=zOTprodz&tech=zOttechz&prog=zOTprogz&type=zSOz&media=zOTmediaz&country=zUSz
'url': 'http://www.pluralsight.com/courses/hosting-sql-server-windows-azure-iaas',
'info_dict': {
'id': 'hosting-sql-server-windows-azure-iaas',
'title': 'Hosting SQL Server in Microsoft Azure IaaS Fundamentals',
'description': 'md5:61b37e60f21c4b2f91dc621a977d0986',
},
'playlist_count': 31,
}
def _real_extract(self, url):
course_id = self._match_id(url)
# TODO: PSM cookie
course = self._download_json(
'http://www.pluralsight.com/data/course/%s' % course_id,
course_id, 'Downloading course JSON')
title = course['title']
description = course.get('description') or course.get('shortDescription')
course_data = self._download_json(
'http://www.pluralsight.com/data/course/content/%s' % course_id,
course_id, 'Downloading course data JSON')
entries = []
for module in course_data:
for clip in module.get('clips', []):
player_parameters = clip.get('playerParameters')
if not player_parameters:
continue
entries.append(self.url_result(
'http://www.pluralsight.com/training/player?%s' % player_parameters,
'Pluralsight'))
return self.playlist_result(entries, course_id, title, description)
| builder07/ytdl | youtube_dl/extractor/pluralsight.py | pluralsight.py | py | 7,598 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "common.InfoExtractor",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "compat.compat_urlparse.urljoin",
"line_number": 61,
"usage_type": "call"
},
{
"api_name": "compat.compat_urlparse",
"line_number": 61,
"usage_type": "name"
},
{
"api_na... |
28273046112 | RABBITMQ_HOST = 'localhost'
RABBITMQ_PORT = 5672
RABBITMQ_USER = 'guest'
RABBITMQ_PASS = 'guest'
import pika
from os import environ ###
#local RABBITMQ
hostname = environ.get('rabbit_host') or 'localhost'
port = environ.get('rabbit_port') or 5672
# connect to the broker and set up a communication channel in the connection
connection = pika.BlockingConnection(pika.ConnectionParameters(host='localhost'))
# # Declare exchange and queue
# channel.exchange_declare(exchange=RABBITMQ_SELL_EXCHANGE, exchange_type='fanout')
# channel.queue_declare(queue=RABBITMQ_SELL_QUEUE)
# channel.queue_bind(exchange=RABBITMQ_SELL_EXCHANGE, queue=RABBITMQ_SELL_QUEUE)
# # Publish order to exchange
# order = {'sell_price': current_coin_pricing, 'sell_quantity': sell['sell_quantity'], 'ordercoin': sell['ordercoin'], 'total_amount_earned': total_amount_gain}
# channel.basic_publish(exchange=RABBITMQ_BUY_EXCHANGE, routing_key='', body=json.dumps(order))
# # Close RabbitMQ connection
# connection.close()
#################
channel = connection.channel()
#==================================================== Buy Order ========================================================================================================
# Set up the exchange if the exchange doesn't exist
# - use a 'fanout' exchange to enable interaction
exchangetype="fanout"
RABBITMQ_BUY_EXCHANGE_NAME = 'buyorders'
RABBITMQ_BUY_QUEUE = 'buyordersqueue'
channel.exchange_declare(exchangename=RABBITMQ_BUY_EXCHANGE_NAME, exchange_type=exchangetype, durable=True)
channel.queue_bind(exchange = RABBITMQ_BUY_EXCHANGE_NAME, queue=RABBITMQ_BUY_QUEUE)
#====================================================End Buy Order ========================================================================================================
#==================================================== Sell Order ========================================================================================================
############ tele queue #############
#delcare tele queue
exchangetype="fanout"
RABBITMQ_SELL_EXCHANGE_NAME = 'sellorders'
RABBITMQ_SELL_QUEUE = 'sellordersqueue'
channel.exchange_declare(exchange= RABBITMQ_SELL_EXCHANGE_NAME, exchange_type=exchangetype, durable=True)
# 'durable' makes the queue survive broker restarts
#bind twilio_Log queue
channel.queue_bind(exchange = RABBITMQ_SELL_EXCHANGE_NAME, queue=RABBITMQ_SELL_QUEUE)
# bind the queue to the exchange via the key
# 'routing_key=#' => any routing_key would be matched
#==================================================== End Sell Order ========================================================================================================
"""
This function in this module sets up a connection and a channel to a cloud AMQP broker,
and declares a 'topic' exchange to be used by the microservices in the solution.
"""
def check_setup():
# The shared connection and channel created when the module is imported may be expired,
# timed out, disconnected by the broker or a client;
# - re-establish the connection/channel is they have been closed
global connection, channel, hostname, port, exchangename, exchangetype
if not is_connection_open(connection):
connection = pika.BlockingConnection(pika.ConnectionParameters(host=hostname, port=port, heartbeat=3600, blocked_connection_timeout=3600))
if channel.is_closed:
channel = connection.channel()
channel.exchange_declare(exchange=exchangename, exchange_type=exchangetype, durable=True)
def is_connection_open(connection):
# For a BlockingConnection in AMQP clients,
# when an exception happens when an action is performed,
# it likely indicates a broken connection.
# So, the code below actively calls a method in the 'connection' to check if an exception happens
try:
connection.process_data_events()
return True
except pika.exceptions.AMQPError as e:
print("AMQP Error:", e)
print("...creating a new connection.")
return False | peterwengg/Eevee-Trading | Backend/amqp_setup.py | amqp_setup.py | py | 4,384 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.environ.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.environ.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_numbe... |
35052674716 | import sqlite3
class Database:
def __init__(self, dbName):
self.dbName = dbName
self.con = sqlite3.connect('C://NFT_DATABASES/' + dbName +'.db')
def getNFT(self, id):
stmt = "SELECT * FROM NFT WHERE ID = " + str(id)
cursor = self.con.cursor()
cursor.execute(stmt)
result = list(cursor.fetchall()[0])
result.pop(0)
return result
def getCount(self):
stmt = "SELECT COUNT(ID) FROM NFT"
cursor = self.con.cursor()
cursor.execute(stmt)
result = cursor.fetchone()
return result[0]
class generateNFT:
def __init__(self, dbName):
self.dbName = dbName
self.image = gimp.image_list()[0]
self.layers = self.image.layers
self.clear()
def clear(self):
for layer in self.layers:
layer.visible = False
def save(self, image, fileName):
newImage = pdb.gimp_image_duplicate(image)
newImage.flatten()
pdb.gimp_file_save(newImage, newImage.layers[0], '#' + str(fileName) + '.png', '?')
pdb.gimp_image_delete(newImage)
def getLayerNames(self):
names = []
for layer in self.layers:
names.append(layer.name)
return names
def createImages(self, min, max):
db = Database(self.dbName)
layerNames = self.getLayerNames()
if max > db.getCount():
raise Exception("Your max value exceeds the entries in your table")
for i in range(min, max + 1):
nft = db.getNFT(i)
for x in nft:
self.layers[layerNames.index(x + ".PNG")].visible = True
self.save(self.image, i)
self.clear()
| Ahmad-Shehzad/NFT-Generator | NFT Image Creator/Database.py | Database.py | py | 1,735 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.connect",
"line_number": 6,
"usage_type": "call"
}
] |
72259961953 | from django.db import models
from django.conf import settings
class CountCheckUtils:
@staticmethod
def seeding(index: int, single: bool = False, save: bool = True) -> models.QuerySet:
from apps.count_check.serializers import CountCheckBaseSr
if index == 0:
raise Exception('Indext must be start with 1.')
def get_data(i: int) -> dict:
data = {
'from_items': i * 10,
'to_items': i * 10 + 9,
'fee': 20 + i
}
if save is False:
return data
instance = CountCheckBaseSr(data=data)
instance.is_valid(raise_exception=True)
instance = instance.save()
return instance
def get_list_data(index):
return [get_data(i) for i in range(1, index + 1)]
return get_data(index) if single is True else get_list_data(index)
@staticmethod
def get_matched_fee(items: int) -> float:
from .models import CountCheck
result = CountCheck.objects.filter(from_items__lte=items, to_items__gte=items)
if result.count():
return result.first().fee
return settings.DEFAULT_COUNT_CHECK_PRICE
| tbson/24ho | api/apps/count_check/utils.py | utils.py | py | 1,234 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "apps.count_check.serializers.CountCheckBaseSr",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "django.db.models.QuerySet",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 7,
"usage_type": "name"
}... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.