seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
17849635757 | from ..config import ElementName as BasicElementName, extract_display_name, ReactionConfig, \
NetworkGeneralConfig, ParameterName
from ..metabolic_network_elements.reaction_element import ReactionElement
class Reaction(object):
def __init__(
self, reaction_name, reversible=False, reaction_start_end_list=None, change_arrow_by_value=True,
extra_parameter_dict=None, **kwargs):
self.reaction_name = reaction_name
self.kwargs = kwargs
self.display_reaction_name = extract_display_name(reaction_name)
self.reaction_start_end_list = reaction_start_end_list
if extra_parameter_dict is None:
extra_parameter_dict = {}
self.extra_parameter_dict = extra_parameter_dict
self.reversible = False
self.boundary_flux = False
self.display_config_nested_dict = {}
self.default_display_text_config = {}
self.change_arrow_by_value = True
self.forward_value = None
self.backward_value = None
self.net_value = None
self.tail_arrow = None
self.head_arrow = None
self._initialize_tags(reversible, change_arrow_by_value)
def _initialize_tags(self, reversible=False, change_arrow_by_value=True):
self.reversible = reversible
self.boundary_flux = False
self.display_config_nested_dict = {}
self.default_display_text_config = ReactionConfig.default_display_text_config
self.change_arrow_by_value = change_arrow_by_value
self.forward_value = None
self.backward_value = None
self.net_value = None
self.tail_arrow = None
self.head_arrow = None
def reset(self):
self.reaction_start_end_list = None
self._initialize_tags()
def set_reversible(self, reversible):
self.reversible = reversible
def set_reaction_start_end_list(self, new_start_end_list):
self.reaction_start_end_list = new_start_end_list
return self
def extend_reaction_start_end_list(self, new_added_start_end_list):
if self.reaction_start_end_list is None:
self.reaction_start_end_list = []
self.reaction_start_end_list.extend(new_added_start_end_list)
return self
def set_boundary_flux(self, boundary_flux: bool):
self.boundary_flux = boundary_flux
return self
def set_display_text_config_dict(self, display_config_dict: dict, config_key=None):
new_text_config_dict = dict(self.default_display_text_config)
new_text_config_dict.update(display_config_dict)
self.display_config_nested_dict[config_key] = new_text_config_dict
return self
def update_display_text_config_item(self, updated_display_config_dict: dict = None, config_key=None):
if updated_display_config_dict is not None:
if config_key is not None:
self.display_config_nested_dict[config_key].update(updated_display_config_dict)
else:
for display_config_dict in self.display_config_nested_dict.values():
display_config_dict.update(updated_display_config_dict)
return self
def set_display_text(self, display_text: str, config_key=None):
if self.reaction_name == NetworkGeneralConfig.biomass_str:
display_text = f'Biomass reaction:\n{display_text}'
self.update_display_text_config_item({ParameterName.string: display_text}, config_key)
# if config_key is not None:
# self.display_config_nested_dict[config_key][ParameterName.string] = display_text
# else:
# for display_config_dict in self.display_config_nested_dict.values():
# display_config_dict[ParameterName.string] = display_text
return self
def set_value(self, flux_value):
# if self.reversible:
# assert isinstance(flux_value, (tuple, list)) and len(flux_value) == 2
# forward, backward = flux_value
# self.net_value = abs(forward - backward)
# else:
# assert isinstance(flux_value, (float, int))
# forward = flux_value
# backward = None
# self.net_value = forward
if isinstance(flux_value, (tuple, list)) and len(flux_value) == 2:
forward, backward = flux_value
self.net_value = abs(forward - backward)
elif isinstance(flux_value, (float, int)):
forward = flux_value
backward = 0
self.net_value = forward
else:
raise ValueError()
self.forward_value = forward
self.backward_value = backward
def judge_bidirectional_flag(self):
if not self.change_arrow_by_value:
net_value = None
else:
net_value = self.net_value
if self.tail_arrow is None:
if self.reversible and net_value is None:
head_arrow = tail_arrow = True
else:
if self.reversible and net_value is not None and \
self.backward_value > self.forward_value:
head_arrow = False
tail_arrow = True
else:
head_arrow = True
tail_arrow = False
self.tail_arrow = tail_arrow
self.head_arrow = head_arrow
return self.tail_arrow, self.head_arrow
def judge_if_reverse(self):
tail, head = self.judge_bidirectional_flag()
return tail and not head
def update_extra_parameter_dict(self, new_extra_parameter_dict):
self.extra_parameter_dict.update(new_extra_parameter_dict)
return self
def to_element(self, scale=1, bottom_left_offset=None):
gap_line_pair_list_label = ParameterName.gap_line_pair_list
dash_solid_empty_width_label = ParameterName.dash_solid_empty_width
branch_list_label = ParameterName.branch_list
reaction_edge_parameter_list = []
current_reaction_edge_dict = None
for reaction_edge_property, *reaction_edge_parameter_tuple in self.reaction_start_end_list:
if reaction_edge_property != ParameterName.branch:
if current_reaction_edge_dict is not None:
reaction_edge_parameter_list.append(current_reaction_edge_dict)
tail_arrow, head_arrow = self.judge_bidirectional_flag()
if reaction_edge_property == ParameterName.normal:
tail, head, parameter_dict = reaction_edge_parameter_tuple
current_reaction_edge_dict = {
ParameterName.class_name: BasicElementName.Arrow,
ParameterName.tail: tail,
ParameterName.head: head,
ParameterName.tail_arrow: tail_arrow,
ParameterName.head_arrow: head_arrow,
ParameterName.boundary_flux: self.boundary_flux,
**self.extra_parameter_dict,
}
elif reaction_edge_property == ParameterName.cycle:
theta_tail, theta_head, center, radius, parameter_dict = reaction_edge_parameter_tuple
current_reaction_edge_dict = {
ParameterName.class_name: BasicElementName.ArcArrow,
ParameterName.theta_tail: theta_tail,
ParameterName.theta_head: theta_head,
ParameterName.center: center,
ParameterName.radius: radius,
ParameterName.tail_arrow: tail_arrow,
ParameterName.head_arrow: head_arrow,
ParameterName.boundary_flux: self.boundary_flux,
**self.extra_parameter_dict,
}
elif reaction_edge_property == ParameterName.path_cycle:
tail, mid, head, parameter_dict = reaction_edge_parameter_tuple
current_reaction_edge_dict = {
ParameterName.class_name: BasicElementName.ArcPathArrow,
ParameterName.tail: tail,
ParameterName.mid: mid,
ParameterName.head: head,
ParameterName.tail_arrow: tail_arrow,
ParameterName.head_arrow: head_arrow,
ParameterName.boundary_flux: self.boundary_flux,
**self.extra_parameter_dict,
}
elif reaction_edge_property == ParameterName.bent:
tail, head, arrow_head_direction, parameter_dict = reaction_edge_parameter_tuple
current_reaction_edge_dict = {
ParameterName.class_name: BasicElementName.BentArrow,
ParameterName.tail: tail,
ParameterName.head: head,
ParameterName.radius: ReactionConfig.bent_reaction_radius,
ParameterName.arrow_head_direction: arrow_head_direction,
ParameterName.tail_arrow: tail_arrow,
ParameterName.head_arrow: head_arrow,
ParameterName.boundary_flux: self.boundary_flux,
**self.extra_parameter_dict,
}
elif reaction_edge_property == ParameterName.broken:
tail, head, transition_point_list, parameter_dict = reaction_edge_parameter_tuple
current_reaction_edge_dict = {
ParameterName.class_name: BasicElementName.BrokenArrow,
ParameterName.tail: tail,
ParameterName.head: head,
ParameterName.tail_arrow: tail_arrow,
ParameterName.head_arrow: head_arrow,
ParameterName.boundary_flux: self.boundary_flux,
ParameterName.transition_point_list: transition_point_list,
**self.extra_parameter_dict,
}
else:
raise ValueError()
if gap_line_pair_list_label in parameter_dict:
current_reaction_edge_dict[gap_line_pair_list_label] = parameter_dict[gap_line_pair_list_label]
if dash_solid_empty_width_label in parameter_dict:
current_reaction_edge_dict[dash_solid_empty_width_label] = parameter_dict[
dash_solid_empty_width_label]
else:
if current_reaction_edge_dict is None:
raise ValueError('Cannot put branch to first of reaction list')
else:
stem_location, terminal_location, parameter_dict = reaction_edge_parameter_tuple
branch_parameter_dict = {
ParameterName.stem_location: stem_location,
ParameterName.terminal_location: terminal_location,
}
if ParameterName.arrow in parameter_dict:
branch_parameter_dict[ParameterName.arrow] = parameter_dict[ParameterName.arrow]
if ParameterName.dash in parameter_dict:
branch_parameter_dict[ParameterName.dash] = parameter_dict[ParameterName.dash]
if branch_list_label not in current_reaction_edge_dict:
current_reaction_edge_dict[branch_list_label] = []
current_reaction_edge_dict[branch_list_label].append(branch_parameter_dict)
if current_reaction_edge_dict is not None:
reaction_edge_parameter_list.append(current_reaction_edge_dict)
display_text_param_nested_dict = {
key: config_dict for key, config_dict in self.display_config_nested_dict.items()
if ParameterName.string in config_dict
}
return ReactionElement(
self.reaction_name, self.display_reaction_name, reaction_edge_parameter_list,
display_text_param_nested_dict=display_text_param_nested_dict,
scale=scale, bottom_left_offset=bottom_left_offset, **self.kwargs)
| LocasaleLab/Automated-MFA-2023 | figures/figure_plotting/figure_elements/metabolic_network/metabolic_network_contents/reaction.py | reaction.py | py | 12,308 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "config.extract_display_name",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "config.ReactionConfig.default_display_text_config",
"line_number": 34,
"usage_type": "attribute"
},
{
"api_name": "config.ReactionConfig",
"line_number": 34,
"usage_type... |
12532858756 | from setuptools import setup
with open('README.md') as f:
long_description = f.read()
setup(
name = "moderate",
version = "0.1",
license = 'MIT',
description = "A Python Distrubted System",
author = 'Thomas Huang',
url = 'https://github.com/thomashuang/Moderate',
packages = ['moderate', 'moderate.queue'],
install_requires = ['setuptools',
],
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: Distrubted System',
],
long_description=long_description,
) | whiteclover/Moderate | setup.py | setup.py | py | 676 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
}
] |
24609091 | from django.contrib.auth.models import User
from django.shortcuts import redirect, render, get_object_or_404
from .models import Post,Comment
from .forms import NewCommentForm
# Create your views here.
from qna.models import Question
from django.http import JsonResponse
from django.contrib.auth.decorators import login_required
@login_required(login_url='/login/')
def posts(request):
if request.method == "POST":
user = request.user
description = request.POST.get("postDescription")
url = request.POST.get("url")
file = request.FILES['file']
url = list(url.split(","))
if url == ['']:
url = ['none']
form = Post(user=user,description=description,url=url,file=file)
form.save()
return redirect('posts')
all_post = Post.objects.all().order_by('-created_on')
context={'all_post':all_post}
return render(request,'post/post.html',context)
@login_required(login_url='/login/')
def like_unlike_post(request):
user = request.user.pk
if request.method == 'POST':
post_id = request.POST.get('post_id')
post_obj = Post.objects.get(id=post_id)
profile = User.objects.get(pk=user)
if profile in post_obj.likes.all():
post_obj.likes.remove(profile)
else:
post_obj.likes.add(profile)
if profile in post_obj.dislikes.all():
post_obj.dislikes.remove(profile)
post_obj.save()
data = {
# 'likes': post_obj.likes.all().count()
}
return JsonResponse(data, safe=True)
return redirect('posts:main-post-view')
@login_required(login_url='/login/')
def dislike_post(request):
user = request.user.pk
if request.method == 'POST':
post_id = request.POST.get('post_id')
post_obj = Post.objects.get(id=post_id)
profile = User.objects.get(pk=user)
if profile in post_obj.dislikes.all():
post_obj.dislikes.remove(profile)
else:
post_obj.dislikes.add(profile)
if profile in post_obj.likes.all():
post_obj.likes.remove(profile)
post_obj.save()
data = {
# 'dislikes': post_obj.dislikes.all().count()
}
return JsonResponse(data, safe=True)
return redirect('posts:main-post-view')
@login_required(login_url='/login/')
def star_post(request):
user = request.user.pk
if request.method == 'POST':
post_id = request.POST.get('post_id')
post_obj = Post.objects.get(id=post_id)
profile = User.objects.get(pk=user)
if profile in post_obj.star.all():
post_obj.star.remove(profile)
else:
post_obj.star.add(profile)
post_obj.save()
data = {
# 'star': post_obj.star.all().count()
}
return JsonResponse(data,safe=True)
return redirect('posts:main-post-view')
@login_required(login_url='/login/')
def postsingle(request,pk):
v_post = Post.objects.get(id=int(pk))
comment_form = NewCommentForm()
if request.method == "POST":
if request.method == 'POST':
comment_form = NewCommentForm(request.POST)
if comment_form.is_valid():
user_comment = comment_form.save(commit=False)
user_comment.post = v_post
user_comment.user = request.user
user_comment.save()
return redirect('viewpost',pk)
comment = Comment.objects.filter(post=v_post)
count = comment.count()
context = {'v_post': v_post, 'comments': comment,'comment_form':comment_form,'count':count}
return render(request,'post/viewPost.html',context)
| adityachaudhary147/MindQ | post/views.py | views.py | py | 3,755 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "models.Post",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.redirect",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "models.Post.objects.all",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "models.... |
73360149224 | # django imports
from django.contrib.auth.decorators import permission_required
from django.core.urlresolvers import reverse
from django.forms import ModelForm
from django.http import HttpResponseRedirect
from django.shortcuts import render_to_response
from django.shortcuts import get_object_or_404
from django.template import RequestContext
from django.utils.translation import ugettext_lazy as _
# lfs imports
import lfs.core.utils
from lfs.catalog.models import StaticBlock
class StaticBlockForm(ModelForm):
"""Form to add and edit a static block.
"""
class Meta:
model = StaticBlock
@permission_required("manage_shop", login_url="/login/")
def manage_static_blocks(request):
"""Dispatches to the first static block or to the add static block form.
"""
try:
sb = StaticBlock.objects.all()[0]
url = reverse("lfs_manage_static_block", kwargs={"id": sb.id})
except IndexError:
url = reverse("lfs_add_static_block")
return HttpResponseRedirect(url)
@permission_required("manage_shop", login_url="/login/")
def manage_static_block(request, id, template_name="manage/static_block/static_block.html"):
"""Displays the main form to manage static blocks.
"""
sb = get_object_or_404(StaticBlock, pk=id)
if request.method == "POST":
form = StaticBlockForm(instance=sb, data=request.POST)
if form.is_valid():
form.save()
return lfs.core.utils.set_message_cookie(
url = reverse("lfs_manage_static_block", kwargs={"id" : sb.id}),
msg = _(u"Static block has been saved."),
)
else:
form = StaticBlockForm(instance=sb)
return render_to_response(template_name, RequestContext(request, {
"static_block" : sb,
"static_blocks" : StaticBlock.objects.all(),
"form" : form,
"current_id" : int(id),
}))
@permission_required("manage_shop", login_url="/login/")
def add_static_block(request, template_name="manage/static_block/add_static_block.html"):
"""Provides a form to add a new static block.
"""
if request.method == "POST":
form = StaticBlockForm(data=request.POST)
if form.is_valid():
new_sb = form.save()
return lfs.core.utils.set_message_cookie(
url = reverse("lfs_manage_static_block", kwargs={"id" : new_sb.id}),
msg = _(u"Static block has been added."),
)
else:
form = StaticBlockForm()
return render_to_response(template_name, RequestContext(request, {
"form" : form,
"static_blocks" : StaticBlock.objects.all(),
}))
@permission_required("manage_shop", login_url="/login/")
def preview_static_block(request, id, template_name="manage/static_block/preview.html"):
"""Displays a preview of an static block
"""
sb = get_object_or_404(StaticBlock, pk=id)
return render_to_response(template_name, RequestContext(request, {
"static_block" : sb,
}))
@permission_required("manage_shop", login_url="/login/")
def delete_static_block(request, id):
"""Deletes static block with passed id.
"""
sb = get_object_or_404(StaticBlock, pk=id)
# First we delete all referencing categories. Otherwise they would be
# deleted
for category in sb.categories.all():
category.static_block = None
category.save()
sb.delete()
return lfs.core.utils.set_message_cookie(
url = reverse("lfs_manage_static_blocks"),
msg = _(u"Static block has been deleted."),
) | django-lfs/lfs | manage/views/static_blocks.py | static_blocks.py | py | 3,646 | python | en | code | 23 | github-code | 36 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "lfs.catalog.models.StaticBlock",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "lfs.catalog.models.StaticBlock.objects.all",
"line_number": 26,
"usage_type": "call... |
73774729063 | import torch
from torch import nn
import torch.nn.functional as F
import os
import math
import numpy as np
from train_pipeline import *
def init_siren(W, fan_in, omega=30, init_c=24, flic=2, is_first=False):
if is_first:
c = flic / fan_in
else:
c = np.sqrt(init_c / fan_in) / omega
W.uniform_(-c, c)
def _init(W, c):
W.uniform_(-c, c)
class SplitLayer(nn.Module):
def __init__(self, input_dim, output_dim, m=1.0, cs=(1, 1, 1, 1), omegas=(1, 1, 1.0, 1), use_bias=True):
super().__init__()
self.linear = nn.Linear(input_dim, output_dim * 4, bias=use_bias)
self.dropout = nn.Dropout(0)
self.input_dim = input_dim
self.output_dim = output_dim
self.m = m
self.omegas = omegas
self.cs = cs
self.init_weights()
def init_weights(self):
self.linear.bias.data.uniform_(0, 0)
s = self.output_dim
W = self.linear.weight.data
_init(W[:s], self.cs[0])
_init(W[s : s * 2], self.cs[1])
_init(W[s * 2 : s * 3], self.cs[2])
_init(W[s * 3 : s * 4], self.cs[3])
def forward(self, x):
h, acts = self.forward_with_activations(x)
return h
def forward_with_activations(self, x):
preact = self.linear(x)
preacts = preact.chunk(4, dim=-1)
preacts = list(preacts)
for i in range(len(preacts)):
preacts[i] = self.omegas[i] * preacts[i]
preact_tanh, preact_sigmoid, preact_sin, preact_cos = preacts
act_tanh, act_sigmoid, act_sin, act_cos = preact_tanh.tanh(), preact_sigmoid.sigmoid(), preact_sin.sin(), preact_cos.cos()
h = act_tanh * act_sigmoid * act_sin * act_cos
h = h * self.m
return h, [x, preact, preact_tanh, preact_sigmoid, preact_sin, preact_cos, act_tanh, act_sigmoid, act_sin, act_cos]
class SimpleSplitNet(nn.Module):
def __init__(self, cs, use_bias=True, omegas=(1, 1, 1.0, 1), m=1.0):
super().__init__()
in_features = 128
hidden_layers = 2
if not hasattr(m, "__len__"):
m = [m] * (len(hidden_layers) + 1)
is_layerwise_omegas = hasattr(omegas[0], "__len__")
if not is_layerwise_omegas:
omegas = [omegas] * (len(hidden_layers) + 1)
net = [SplitLayer(in_features, 64, use_bias=use_bias, cs=cs[0], m=m[0], omegas=omegas[0]), SplitLayer(64, 32, use_bias=use_bias, cs=cs[1], m=m[1], omegas=omegas[1]), nn.Linear(32, 3)]
_init(net[-1].weight.data, cs[2])
self.net = nn.Sequential(*net)
def forward(self, x):
return self.net(x)
def forward_with_activations(self, x):
h = x
intermediate_acts = []
for layer in self.net:
if isinstance(layer, SplitLayer):
h, acts = layer.forward_with_activations(h)
else:
h = layer(h)
acts = []
intermediate_acts.append((h, acts))
return h, intermediate_acts
class ParallelSplitNet(nn.Module):
def __init__(self, model_configs, out_features, encoding_size=128):
super().__init__()
# if not hasattr(m, '__len__'):
# m = [m] * (hidden_layers+2)
import rff
self.encoding = rff.layers.GaussianEncoding(sigma=10.0, input_size=2, encoded_size=encoding_size)
in_features = encoding_size * 2
self.networks = nn.ModuleList([SimpleSplitNet(**k, in_features=in_features, out_features=out_features) for k in model_configs])
def forward(self, x):
x = self.encoding(x)
o = 0
for net in self.networks:
o = o + net(x)
return o
def get_example_model():
kwargs = {"cs": [(1, 1, 1, 1), (1, 1, 1, 1), 0.1], "omegas": [(1, 1, 1, 1), (1, 1, 1, 1)], "m": [1, 1]}
net = SimpleSplitNet(**kwargs)
import wandb
m_range = (0.1, 30)
c_range = (1e-3, 1e1)
omega_range = (0.1, 30)
PROJECT_NAME = "splitnet_3_sweep"
sweep_configuration = {
"method": "random",
"name": "sweep",
"metric": {"goal": "maximize", "name": "psnr"},
"parameters": {
**{f"m{i}": {"distribution": "uniform", "min": m_range[0], "max": m_range[1]} for i in range(2)},
**{f"c{i}": {"distribution": "uniform", "min": c_range[0], "max": c_range[1]} for i in range(4 + 4 + 1)},
**{f"omega{i}": {"distribution": "uniform", "min": omega_range[0], "max": omega_range[1]} for i in range(4 + 4)},
"lr": {"values": [1e-3, 1e-4, 1e-5]},
"weight_decay": {'values': [0, 1e-5]},
},
}
def _train_for_sweep(model, cfg, lr, weight_decay):
seed_all(cfg["random_seed"])
device = cfg["device"]
total_steps = cfg["total_steps"]
model_input, ground_truth, H, W = load_data(cfg)
model_input, ground_truth = model_input.to(device), ground_truth.to(device)
model.to(device)
import rff
encoding = rff.layers.GaussianEncoding(sigma=10.0, input_size=2, encoded_size=64).to(device)
model_input = encoding(model_input)
optimizer = torch.optim.AdamW(model.parameters(), lr=lr, weight_decay=weight_decay)
for step in range(total_steps):
model_output = model(model_input)
mse, psnr = mse_and_psnr(model_output, ground_truth)
loss = mse
optimizer.zero_grad()
loss.backward()
optimizer.step()
return psnr.item()
def objective(c):
cs = [(c["c0"], c["c1"], c["c2"], c["c3"]), (c["c4"], c["c5"], c["c6"], c["c7"]), c["c8"]]
omegas = [(c["omega0"], c["omega1"], c["omega2"], c["omega3"]), (c["omega4"], c["omega5"], c["omega6"], c["omega7"])]
m = [c["m0"], c["m1"]]
kwargs = {"cs": cs, "omegas": omegas, "m": m}
net = SimpleSplitNet(**kwargs)
psnr = _train_for_sweep(net, cfg, c["lr"], c["weight_decay"])
return psnr
def main():
wandb.init(project=PROJECT_NAME)
psnr = objective(wandb.config)
wandb.log({"psnr": psnr})
import os
from hydra import initialize, initialize_config_module, initialize_config_dir, compose
from hydra.utils import instantiate
from omegaconf import OmegaConf
def load_cfg(config_name="config", overrides=()):
# with initialize_config_dir(config_dir="/app/notebooks/draft_02/conf"):
with initialize(version_base=None, config_path="./conf"):
cfg = compose(config_name=config_name, overrides=list(overrides))
return cfg
cfg = load_cfg("sweep_config_0", overrides=["+device=cuda:0"])
print(OmegaConf.to_yaml(cfg))
if __name__ == "__main__":
sweep_id = wandb.sweep(sweep=sweep_configuration, project=PROJECT_NAME)
wandb.agent(sweep_id, function=main, count=10)
| kilianovski/my-neural-fields | notebooks/draft_01/sweep_pipeline.py | sweep_pipeline.py | py | 6,628 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.sqrt",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "torch.nn.Module",
"line_number": 24,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 24,
"usage_type": "name"
},
{
"api_name": "torch.nn.Linear",
"line_... |
6187883293 | from __future__ import annotations
from datetime import datetime, time
from discord.ext import tasks
from app.log import logger
from app.utils import is_last_month_day, is_sunday, catch_exception
from app.utils.message_stats_routine import UserStatsForCurrentDay, message_day_counter
from app.utils.data.user_stats import UserOverallStats, UserStatsForCurrentWeek, UserStatsForCurrentMonth, \
UserMaxStats, UserCurrentStats
class UserStats:
tz = datetime.now().astimezone().tzinfo
daily_time = time(hour=23, tzinfo=tz)
weekly_time = time(hour=23, minute=3, tzinfo=tz)
monthly_time = time(hour=23, minute=6, tzinfo=tz)
@tasks.loop(time=daily_time)
@catch_exception
async def daily_routine(self):
self.update_users_stats_by_end_of_day()
self.update_user_max_stats_for_period("day")
logger.info("Successfully updated user daily stats")
@tasks.loop(time=weekly_time)
@catch_exception
async def weekly_routine(self):
if not is_sunday():
return
self.update_user_max_stats_for_period("week")
logger.info("Successfully updated user weekly stats")
@tasks.loop(time=monthly_time)
@catch_exception
async def monthly_routine(self):
if not is_last_month_day():
return
self.update_user_max_stats_for_period("month")
logger.info("Successfully updated user month stats")
@staticmethod
def update_users_stats_by_end_of_day():
with UserMaxStats.begin() as connect:
for user_stats_class in [UserOverallStats, UserStatsForCurrentWeek, UserStatsForCurrentMonth]:
UserCurrentStats.add_or_update_user_stats(message_day_counter.authors, user_stats_class, connect)
def update_user_max_stats_for_period(self, period: str):
users_new_data = self.get_current_users_stats(period)
if not users_new_data:
return
messages_info, symbols_info = UserMaxStats.get_all_users_max_stats(period)
if messages_info or symbols_info:
grouped_old_user_info = self.group_users_stats(messages_info, symbols_info)
else:
grouped_old_user_info = {}
UserMaxStats.compare_and_update_users_max_info(grouped_old_user_info, users_new_data, period)
@staticmethod
def get_current_users_stats(period: str) -> dict[int, UserStatsForCurrentDay] | None:
users_info = {}
if period == "day":
return message_day_counter.authors
elif period == "week":
result = UserCurrentStats.fetch_users_current_stats_for_period(UserStatsForCurrentWeek)
elif period == "month":
result = UserCurrentStats.fetch_users_current_stats_for_period(UserStatsForCurrentMonth)
else:
return
for user_id, messages, symbols in result:
users_info[user_id] = UserStatsForCurrentDay(amount_of_symbols=symbols, amount_of_messages=messages)
return users_info
@staticmethod
def group_users_stats(messages_info: list, symbols_info: list) -> dict[int, UserStatsForCurrentDay]:
user_ids = {user_id for stats_info in (messages_info, symbols_info) for user_id, _ in stats_info}
messages_info = {user_id: amount for user_id, amount in messages_info}
symbols_info = {user_id: amount for user_id, amount in symbols_info}
users_info = {}
for user_id in user_ids:
amount_of_messages = messages_info.get(user_id, -100)
amount_of_symbols = symbols_info.get(user_id, -100)
users_info[user_id] = UserStatsForCurrentDay(
amount_of_messages=amount_of_messages,
amount_of_symbols=amount_of_symbols,
)
return users_info
user_stats = UserStats()
| range-kun/pituhon-bot | app/utils/message_stats_routine/user_stats_routine.py | user_stats_routine.py | py | 3,790 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "datetime.time",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "datetime.time",... |
2019695848 | #!/usr/bin/env python3.6
# -*-encoding=utf8-*-
import time
import pyquery
import requests
from fake_useragent import UserAgent
from spider.log import logging as log
class Get:
def __init__(self, url: str, try_time=9, try_sec=2):
ua = UserAgent()
self._url = url
self._try_time = try_time
self._try_sec = try_sec
self._header = {
'User-Agent': ua.ie,
'Accept-Encoding': ', '.join(('gzip', 'deflate')),
'Accept': '*/*',
'Connection': 'keep-alive',
}
def html(self)-> str:
tm = 0
text = ''
retry = 1
while tm <= self._try_time:
try:
s = requests.Session()
r = s.get(self._url, headers=self._header)
if r.status_code == requests.codes.ok:
doc = pyquery.PyQuery(r.text.encode(r.encoding))
text = doc.html()
break
s.close()
except Exception as e:
log.warning(self._url + '重试:' + str(retry))
retry += 1
time.sleep(self._try_sec)
tm += 1
return text
def binary(self):
tm = 0
binary = None
retry = 1
while tm <= self._try_time:
try:
s = requests.Session()
r = s.get(self._url)
if r.status_code == requests.codes.ok:
binary = r.content
if None is not binary:
break
break
s.close()
except Exception as e:
log.warning(self._url + '重试:' + str(retry))
retry += 1
time.sleep(self._try_sec)
tm += 1
return binary
| dingjingmaster/library_t | python/spider/spider/get.py | get.py | py | 1,374 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "fake_useragent.UserAgent",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "requests.Session",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "requests.codes",
"line_number": 32,
"usage_type": "attribute"
},
{
"api_name": "pyquery... |
37411929075 | import os
import numpy as np
import matplotlib.pyplot as plt
from hyperion.model import ModelOutput
from hyperion.util.constants import pc
# Create output directory if it does not already exist
if not os.path.exists('frames'):
os.mkdir('frames')
# Open model
m = ModelOutput('flyaround_cube.rtout')
# Read image from model
image = m.get_image(distance=300 * pc, units='MJy/sr')
# image.val is now an array with four dimensions (n_view, n_y, n_x, n_wav)
for iview in range(image.val.shape[0]):
# Open figure and create axes
fig = plt.figure(figsize=(3, 3))
ax = fig.add_subplot(1, 1, 1)
# This is the command to show the image. The parameters vmin and vmax are
# the min and max levels for the grayscale (remove for default values).
# The colormap is set here to be a heat map. Other possible heat maps
# include plt.cm.gray (grayscale), plt.cm.gist_yarg (inverted grayscale),
# plt.cm.jet (default, colorful). The np.sqrt() is used to plot the
# images on a sqrt stretch.
ax.imshow(np.sqrt(image.val[iview, :, :, 0]), vmin=0, vmax=np.sqrt(2000.),
cmap=plt.cm.gist_heat, origin='lower')
# Save figure. The facecolor='black' and edgecolor='black' are for
# esthetics, and hide the axes
fig.savefig('frames/frame_%05i.png' % iview,
facecolor='black', edgecolor='black')
# Close figure
plt.close(fig)
| hyperion-rt/hyperion | docs/tutorials/scripts/flyaround_cube_animate.py | flyaround_cube_animate.py | py | 1,402 | python | en | code | 51 | github-code | 36 | [
{
"api_name": "os.path.exists",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "os.mkdir",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "hyperion.model.ModelOutput",
... |
30586982057 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import wx
from regionfixer_core.version import version_string as rf_ver
from gui.version import version_string as gui_ver
class AboutWindow(wx.Frame):
def __init__(self, parent, title="About"):
wx.Frame.__init__(self, parent, title=title,
style=wx.CLOSE_BOX | wx.RESIZE_BORDER | wx.CAPTION)
# Every windows should use panel as parent. Not doing so will
# make the windows look non-native (very ugly)
panel = wx.Panel(self)
self.about1 = wx.StaticText(panel, style=wx.ALIGN_CENTER,
label="Minecraft Region-Fixer (GUI) (ver. {0})\n(using Region-Fixer ver. {1})".format(gui_ver,rf_ver))
self.about2 = wx.StaticText(panel, style=wx.ALIGN_CENTER,
label="Fix problems in Minecraft worlds.")
self.about3 = wx.StaticText(panel, style=wx.ALIGN_CENTER,
label="Official-web:")
self.link_github = wx.HyperlinkCtrl(panel, wx.ID_ABOUT,
"https://github.com/Fenixin/Minecraft-Region-Fixer",
"https://github.com/Fenixin/Minecraft-Region-Fixer",
style=wx.ALIGN_CENTER)
self.about4 = wx.StaticText(panel,
style=wx.TE_MULTILINE | wx.ALIGN_CENTER,
label="Minecraft forums post:")
self.link_minecraft_forums = wx.HyperlinkCtrl(panel, wx.ID_ABOUT,
"http://www.minecraftforum.net/topic/302380-minecraft-region-fixer/",
"http://www.minecraftforum.net/topic/302380-minecraft-region-fixer/",
style=wx.ALIGN_CENTER)
self.close_button = wx.Button(panel, wx.ID_CLOSE)
self.sizer = wx.BoxSizer(wx.VERTICAL)
self.sizer.Add(self.about1, 0, wx.ALIGN_CENTER | wx.TOP, 10)
self.sizer.Add(self.about2, 0, wx.ALIGN_CENTER| wx.TOP, 20)
self.sizer.Add(self.about3, 0, wx.ALIGN_CENTER | wx.TOP, 20)
self.sizer.Add(self.link_github, 0, wx.ALIGN_CENTER | wx.ALL, 5)
self.sizer.Add(self.about4, 0, wx.ALIGN_CENTER | wx.TOP, 20)
self.sizer.Add(self.link_minecraft_forums, 0,wx.ALIGN_CENTER | wx.ALL, 5)
self.sizer.Add(self.close_button, 0, wx.ALIGN_CENTER | wx.ALL, 20)
# Fit sizers and make the windows not resizable
panel.SetSizerAndFit(self.sizer)
self.sizer.Fit(self)
size = self.GetSize()
self.SetMinSize(size)
self.SetMaxSize(size)
self.Bind(wx.EVT_BUTTON, self.OnClose, self.close_button)
def OnClose(self, e):
self.Show(False)
| Fenixin/Minecraft-Region-Fixer | gui/about.py | about.py | py | 2,684 | python | en | code | 509 | github-code | 36 | [
{
"api_name": "wx.Frame",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "wx.Frame.__init__",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "wx.Frame",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "wx.CLOSE_BOX",
"lin... |
70306634025 | from audioop import reverse
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.db import transaction
from django.http.response import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404
from registration.forms import SignupForm
from user.models import Person, Studentity, University, Department, Tag
# registration is done in this view
# @transaction.atomic
def signup(request):
if request.method == 'GET':
return render(request, 'sign-up.html', {'form': SignupForm()})
# elif request.method == 'POST'
form = SignupForm(request.POST)
if form.is_valid():
user = User.objects.create_user(username=form.cleaned_data['username'], password=form.cleaned_data['password'])
p = Person()
p.user = user
user.save()
p.save()
s = Studentity()
s.person = p
s.student_id = form.cleaned_data['student_id']
s.department = Department.objects.get(name='unknown')
s.save()
return HttpResponseRedirect(reverse('registration_select_initial_tags', args=[user.username, p.id]))
else:
return render(request, 'sign-up.html', {'form': form, 'status': 'Notice errors below:'})
# after signup, initial tags will be selected by the new user
def select_initial_tags(request, username, identifier):
if request.method == 'GET':
return render(request, 'sing-up-tags.html', {'super_tags': Tag.objects.filter(parent=None)})
# elif request.method == 'POST'
person = Person.objects.get(id=identifier)
super_tags = Tag.objects.filter(parent=None)
for tag in super_tags:
if tag.name in request.POST:
person.interested_tags.add(tag)
return render(request, 'sing-up-tags.html', {'super_tags': Tag.objects.filter(parent=None)})
| Hosseinyousefi23/smallApp | registration/views.py | views.py | py | 1,854 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "registration.forms.SignupForm",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "registration.forms.SignupForm",
"line_number": 19,
"usage_type": "call"
},
{
... |
29084870079 | from sklearn.datasets import fetch_20newsgroups
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import classification_report
def naviebayes():
news = fetch_20newsgroups(data_home='./', subset='all')
x_train, x_test, y_train, y_test = train_test_split(news.data, news.target, test_size=0.25)
# 对数据集进行特征抽取
tf = TfidfVectorizer()
x_train = tf.fit_transform(x_train)
# print(tf.get_feature_names())
x_test = tf.transform(x_test)
# 进行朴素贝叶斯算法
nb = MultinomialNB(alpha=1.0)
# print(x_train)
nb.fit(x_train, y_train)
y_predict = nb.predict(x_test)
# print("预测文章类别为:", y_predict)
# print("准确率:", nb.score(x_test, y_test))
print("每个类别的精确率和召回率:", classification_report(y_test, y_predict, target_names=news.target_names))
if __name__ == '__main__':
naviebayes()
| shnehna/machine_study | 朴素贝叶斯算法/NB.py | NB.py | py | 1,039 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sklearn.datasets.fetch_20newsgroups",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "sklearn.model_selection.train_test_split",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sklearn.feature_extraction.text.TfidfVectorizer",
"line_number": ... |
17772414022 | import swapper
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist
from formula_one.mixins.period_mixin import ActiveStatus
def get_role(person, role_name, active_status=ActiveStatus.ANY, silent=False, *args, **kwargs):
"""
Get a role corresponding to a person
:param person: an instance of the Person model whose roles are sought
:param role_name: the name of the role class whose instance is required
:param active_status: whether the role was, is, isn't or will be active
:param silent: whether to fail silently or raise exceptions
:return: the role, if the person fulfills it
:raise: Role.DoesNotExist, if the given role is not fulfilled by the person
:raise: ImproperlyConfigured, if the name of the role class is incorrect
"""
is_custom_role = kwargs.get('is_custom_role', False)
try:
if is_custom_role:
Role = swapper.load_model(
role_name.split('.')[0],
role_name.split('.')[1],
)
else:
Role = swapper.load_model('kernel', role_name)
try:
query_set = Role.objects_filter(active_status)
role = query_set.get(person=person)
return role
except Role.DoesNotExist:
if not silent:
raise
except ImproperlyConfigured:
if not silent:
raise
return None
def get_all_roles(person):
"""
Get all roles corresponding to a person
:param person: an instance of the Person model whose roles are sought
:return: a dictionary of all roles mapped to their instance and ActiveStatus
"""
all_roles = dict()
roles = settings.ROLES
for role_name in roles:
try:
role = get_role(
person=person,
role_name=role_name,
active_status=ActiveStatus.ANY,
silent=False,
is_custom_role='.' in role_name,
)
active_status = role.active_status
all_roles[role_name] = {
'instance': role,
'activeStatus': active_status,
}
except ObjectDoesNotExist:
pass
return all_roles
| IMGIITRoorkee/omniport-backend | omniport/core/kernel/managers/get_role.py | get_role.py | py | 2,280 | python | en | code | 67 | github-code | 36 | [
{
"api_name": "formula_one.mixins.period_mixin.ActiveStatus.ANY",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "formula_one.mixins.period_mixin.ActiveStatus",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "swapper.load_model",
"line_number": 24,
... |
37626028800 | from flask import Blueprint, redirect, render_template, session, url_for
from .forms import InfoForms
from flask_learning.models import Post
from datetime import datetime
date_time = Blueprint('date_time_picker', __name__)
@date_time.route("/dating", methods=['GET', 'POST'])
def select_date_time():
form = InfoForms()
if form.validate_on_submit():
session['startdate'] = form.start_date.data
session['enddate'] = form.end_date.data
return redirect(url_for('date_time_picker.dates'))
return render_template('dating.html', form=form)
@date_time.context_processor
def formss():
form = InfoForms()
return dict(form=form)
@date_time.route("/datetime", methods=["GET", "POST"])
def dates():
startdate = session['startdate']
enddate = session['enddate']
startdate = datetime.strptime(startdate, '%a, %d %b %Y %H:%M:%S %Z').strftime('%Y-%m-%d')
enddate = datetime.strptime(enddate, '%a, %d %b %Y %H:%M:%S %Z').strftime('%Y-%m-%d')
posts = Post.query.filter(Post.date_posted.between(startdate, enddate))
return render_template('date.html', posts=posts)
| SunnyYadav16/Flask_Learning | flask_learning/date_time_picker/routes.py | routes.py | py | 1,121 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "forms.InfoForms",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "flask.session",
"line_number": 13,
"usage_type": "name"
},
{
"api_name": "flask.session",
"lin... |
4814315395 | from selenium import webdriver
from selenium.webdriver.common.by import By
driver = webdriver.Chrome()
driver.get("https://www.example.com")
# Get element with tag name 'div'
element = driver.find_element(By.TAG_NAME, 'div')
# Get all the elements available with tag name 'p'
elements = element.find_elements(By.TAG_NAME, 'p')
for e in elements:
print(e.text) | Duyanhdda/IOT-LAB | a.py | a.py | py | 374 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 4,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.common.by.By.TAG_NAME",
"line_number": 8,
"usage_type": "attribute"
},
... |
17878440373 | # emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
r"""
Measures based on noise measurements
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_cjv:
- :py:func:`~mriqc.qc.anatomical.cjv` -- **coefficient of joint variation**
(:abbr:`CJV (coefficient of joint variation)`):
The ``cjv`` of GM and WM was proposed as objective function by [Ganzetti2016]_ for
the optimization of :abbr:`INU (intensity non-uniformity)` correction algorithms.
Higher values are related to the presence of heavy head motion and large
:abbr:`INU (intensity non-uniformity)` artifacts. Lower values are better.
.. _iqms_cnr:
- :py:func:`~mriqc.qc.anatomical.cnr` -- **contrast-to-noise ratio**
(:abbr:`CNR (contrast-to-noise ratio)`): The ``cnr`` [Magnota2006]_,
is an extension of the :abbr:`SNR (signal-to-noise Ratio)` calculation
to evaluate how separated the tissue distributions of GM and WM are.
Higher values indicate better quality.
.. _iqms_snr:
- :py:func:`~mriqc.qc.anatomical.snr` -- **signal-to-noise ratio**
(:abbr:`SNR (signal-to-noise ratio)`): calculated within the
tissue mask.
.. _iqms_snrd:
- :py:func:`~mriqc.qc.anatomical.snr_dietrich`: **Dietrich's SNR**
(:abbr:`SNRd (signal-to-noise ratio, Dietrich 2007)`) as proposed
by [Dietrich2007]_, using the air background as reference.
.. _iqms_qi2:
- :py:func:`~mriqc.qc.anatomical.art_qi2`: **Mortamet's quality index 2**
(:abbr:`QI2 (quality index 2)`) is a calculation of the goodness-of-fit
of a :math:`\chi^2` distribution on the air mask,
once the artifactual intensities detected for computing
the :abbr:`QI1 (quality index 1)` index have been removed [Mortamet2009]_.
Lower values are better.
Measures based on information theory
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_efc:
- :py:func:`~mriqc.qc.anatomical.efc`:
The :abbr:`EFC (Entropy Focus Criterion)`
[Atkinson1997]_ uses the Shannon entropy of voxel intensities as
an indication of ghosting and blurring induced by head motion.
Lower values are better.
The original equation is normalized by the maximum entropy, so that the
:abbr:`EFC (Entropy Focus Criterion)` can be compared across images with
different dimensions.
.. _iqms_fber:
- :py:func:`~mriqc.qc.anatomical.fber`:
The :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,
defined as the mean energy of image values within the head relative
to outside the head [QAP-measures]_.
Higher values are better.
Measures targeting specific artifacts
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
.. _iqms_inu:
- **inu_\*** (*nipype interface to N4ITK*): summary statistics (max, min and median)
of the :abbr:`INU (intensity non-uniformity)` field (bias field) as extracted
by the N4ITK algorithm [Tustison2010]_. Values closer to 1.0 are better, values
further from zero indicate greater RF field inhomogeneity.
.. _iqms_qi:
- :py:func:`~mriqc.qc.anatomical.art_qi1`:
Detect artifacts in the image using the method described in [Mortamet2009]_.
The :abbr:`QI1 (quality index 1)` is the proportion of voxels with intensity
corrupted by artifacts normalized by the number of voxels in the background.
Lower values are better.
.. figure:: ../resources/mortamet-mrm2009.png
The workflow to compute the artifact detection from [Mortamet2009]_.
.. _iqms_wm2max:
- :py:func:`~mriqc.qc.anatomical.wm2max`:
The white-matter to maximum intensity ratio is the median intensity
within the WM mask over the 95% percentile of the full intensity
distribution, that captures the existence of long tails due to
hyper-intensity of the carotid vessels and fat. Values
should be around the interval [0.6, 0.8].
Other measures
^^^^^^^^^^^^^^
.. _iqms_fwhm:
- **fwhm** (*nipype interface to AFNI*): The :abbr:`FWHM (full-width half maximum)` of
the spatial distribution of the image intensity values in units of voxels [Forman1995]_.
Lower values are better, higher values indicate a blurrier image. Uses the gaussian
width estimator filter implemented in AFNI's ``3dFWHMx``:
.. math ::
\text{FWHM} = \sqrt{-{\left[4 \ln{(1-\frac{\sigma^2_{X^m_{i+1,j}-X^m_{i,j}}}
{2\sigma^2_{X^m_{i,j}}}})\right]}^{-1}}
.. _iqms_icvs:
- :py:func:`~mriqc.qc.anatomical.volume_fraction` (**icvs_\***):
the
:abbr:`ICV (intracranial volume)` fractions of :abbr:`CSF (cerebrospinal fluid)`,
:abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. They should move within
a normative range.
.. _iqms_rpve:
- :py:func:`~mriqc.qc.anatomical.rpve` (**rpve_\***): the
:abbr:`rPVe (residual partial voluming error)` of :abbr:`CSF (cerebrospinal fluid)`,
:abbr:`GM (gray-matter)` and :abbr:`WM (white-matter)`. Lower values are better.
.. _iqms_summary:
- :py:func:`~mriqc.qc.anatomical.summary_stats` (**summary_\*_\***):
Mean, standard deviation, 5% percentile and 95% percentile of the distribution
of background, :abbr:`CSF (cerebrospinal fluid)`, :abbr:`GM (gray-matter)` and
:abbr:`WM (white-matter)`.
.. _iqms_tpm:
- **overlap_\*_\***:
The overlap of the :abbr:`TPMs (tissue probability maps)` estimated from the image and
the corresponding maps from the ICBM nonlinear-asymmetric 2009c template. Higher
values are better.
.. math ::
\text{JI}^k = \frac{\sum_i \min{(\text{TPM}^k_i, \text{MNI}^k_i)}}
{\sum_i \max{(\text{TPM}^k_i, \text{MNI}^k_i)}}
.. topic:: References
.. [Dietrich2007] Dietrich et al., *Measurement of SNRs in MR images: influence
of multichannel coils, parallel imaging and reconstruction filters*, JMRI 26(2):375--385.
2007. doi:`10.1002/jmri.20969 <http://dx.doi.org/10.1002/jmri.20969>`_.
.. [Ganzetti2016] Ganzetti et al., *Intensity inhomogeneity correction of structural MR images:
a data-driven approach to define input algorithm parameters*. Front Neuroinform 10:10. 2016.
doi:`10.3389/finf.201600010 <http://dx.doi.org/10.3389/finf.201600010>`_.
.. [Magnota2006] Magnotta, VA., & Friedman, L., *Measurement of signal-to-noise
and contrast-to-noise in the fBIRN multicenter imaging study*.
J Dig Imag 19(2):140-147, 2006. doi:`10.1007/s10278-006-0264-x
<http://dx.doi.org/10.1007/s10278-006-0264-x>`_.
.. [Mortamet2009] Mortamet B et al., *Automatic quality assessment in
structural brain magnetic resonance imaging*, Mag Res Med 62(2):365-372,
2009. doi:`10.1002/mrm.21992 <http://dx.doi.org/10.1002/mrm.21992>`_.
.. [Tustison2010] Tustison NJ et al., *N4ITK: improved N3 bias correction*,
IEEE Trans Med Imag, 29(6):1310-20,
2010. doi:`10.1109/TMI.2010.2046908 <http://dx.doi.org/10.1109/TMI.2010.2046908>`_.
.. [Shehzad2015] Shehzad Z et al., *The Preprocessed Connectomes Project
Quality Assessment Protocol - a resource for measuring the quality of MRI data*,
Front. Neurosci. Conference Abstract: Neuroinformatics 2015.
doi:`10.3389/conf.fnins.2015.91.00047 <https://doi.org/10.3389/conf.fnins.2015.91.00047>`_.
.. [Forman1995] Forman SD et al., *Improved assessment of significant activation in functional
magnetic resonance imaging (fMRI): use of a cluster-size threshold*,
Magn. Reson. Med. 33 (5), 636–647, 1995.
doi:`10.1002/mrm.1910330508 <https://doi.org/10.1002/mrm.1910330508>`_.
mriqc.qc.anatomical module
^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
import os.path as op
from sys import version_info
from math import pi, sqrt
import numpy as np
import scipy.ndimage as nd
from scipy.stats import kurtosis # pylint: disable=E0611
DIETRICH_FACTOR = 1.0 / sqrt(2 / (4 - pi))
FSL_FAST_LABELS = {"csf": 1, "gm": 2, "wm": 3, "bg": 0}
PY3 = version_info[0] > 2
def snr(mu_fg, sigma_fg, n):
r"""
Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.
The estimation may be provided with only one foreground region in
which the noise is computed as follows:
.. math::
\text{SNR} = \frac{\mu_F}{\sigma_F\sqrt{n/(n-1)}},
where :math:`\mu_F` is the mean intensity of the foreground and
:math:`\sigma_F` is the standard deviation of the same region.
:param float mu_fg: mean of foreground.
:param float sigma_fg: standard deviation of foreground.
:param int n: number of voxels in foreground mask.
:return: the computed SNR
"""
return float(mu_fg / (sigma_fg * sqrt(n / (n - 1))))
def snr_dietrich(mu_fg, sigma_air):
r"""
Calculate the :abbr:`SNR (Signal-to-Noise Ratio)`.
This must be an air mask around the head, and it should not contain artifacts.
The computation is done following the eq. A.12 of [Dietrich2007]_, which
includes a correction factor in the estimation of the standard deviation of
air and its Rayleigh distribution:
.. math::
\text{SNR} = \frac{\mu_F}{\sqrt{\frac{2}{4-\pi}}\,\sigma_\text{air}}.
:param float mu_fg: mean of foreground.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed SNR for the foreground segmentation
"""
if sigma_air < 1.0:
from .. import config
config.loggers.interface.warning(
f"SNRd - background sigma is too small ({sigma_air})"
)
sigma_air += 1.0
return float(DIETRICH_FACTOR * mu_fg / sigma_air)
def cnr(mu_wm, mu_gm, sigma_air):
r"""
Calculate the :abbr:`CNR (Contrast-to-Noise Ratio)` [Magnota2006]_.
Higher values are better.
.. math::
\text{CNR} = \frac{|\mu_\text{GM} - \mu_\text{WM} |}{\sqrt{\sigma_B^2 +
\sigma_\text{WM}^2 + \sigma_\text{GM}^2}},
where :math:`\sigma_B` is the standard deviation of the noise distribution within
the air (background) mask.
:param float mu_wm: mean of signal within white-matter mask.
:param float mu_gm: mean of signal within gray-matter mask.
:param float sigma_air: standard deviation of the air surrounding the head ("hat" mask).
:return: the computed CNR
"""
return float(abs(mu_wm - mu_gm) / sigma_air)
def cjv(mu_wm, mu_gm, sigma_wm, sigma_gm):
r"""
Calculate the :abbr:`CJV (coefficient of joint variation)`, a measure
related to :abbr:`SNR (Signal-to-Noise Ratio)` and
:abbr:`CNR (Contrast-to-Noise Ratio)` that is presented as a proxy for
the :abbr:`INU (intensity non-uniformity)` artifact [Ganzetti2016]_.
Lower is better.
.. math::
\text{CJV} = \frac{\sigma_\text{WM} + \sigma_\text{GM}}{|\mu_\text{WM} - \mu_\text{GM}|}.
:param float mu_wm: mean of signal within white-matter mask.
:param float mu_gm: mean of signal within gray-matter mask.
:param float sigma_wm: standard deviation of signal within white-matter mask.
:param float sigma_gm: standard deviation of signal within gray-matter mask.
:return: the computed CJV
"""
return float((sigma_wm + sigma_gm) / abs(mu_wm - mu_gm))
def fber(img, headmask, rotmask=None):
r"""
Calculate the :abbr:`FBER (Foreground-Background Energy Ratio)` [Shehzad2015]_,
defined as the mean energy of image values within the head relative
to outside the head. Higher values are better.
.. math::
\text{FBER} = \frac{E[|F|^2]}{E[|B|^2]}
:param numpy.ndarray img: input data
:param numpy.ndarray headmask: a mask of the head (including skull, skin, etc.)
:param numpy.ndarray rotmask: a mask of empty voxels inserted after a rotation of
data
"""
fg_mu = np.median(np.abs(img[headmask > 0]) ** 2)
airmask = np.ones_like(headmask, dtype=np.uint8)
airmask[headmask > 0] = 0
if rotmask is not None:
airmask[rotmask > 0] = 0
bg_mu = np.median(np.abs(img[airmask == 1]) ** 2)
if bg_mu < 1.0e-3:
return 0
return float(fg_mu / bg_mu)
def efc(img, framemask=None):
r"""
Calculate the :abbr:`EFC (Entropy Focus Criterion)` [Atkinson1997]_.
Uses the Shannon entropy of voxel intensities as an indication of ghosting
and blurring induced by head motion. A range of low values is better,
with EFC = 0 for all the energy concentrated in one pixel.
.. math::
\text{E} = - \sum_{j=1}^N \frac{x_j}{x_\text{max}}
\ln \left[\frac{x_j}{x_\text{max}}\right]
with :math:`x_\text{max} = \sqrt{\sum_{j=1}^N x^2_j}`.
The original equation is normalized by the maximum entropy, so that the
:abbr:`EFC (Entropy Focus Criterion)` can be compared across images with
different dimensions:
.. math::
\text{EFC} = \left( \frac{N}{\sqrt{N}} \, \log{\sqrt{N}^{-1}} \right) \text{E}
:param numpy.ndarray img: input data
:param numpy.ndarray framemask: a mask of empty voxels inserted after a rotation of
data
"""
if framemask is None:
framemask = np.zeros_like(img, dtype=np.uint8)
n_vox = np.sum(1 - framemask)
# Calculate the maximum value of the EFC (which occurs any time all
# voxels have the same value)
efc_max = 1.0 * n_vox * (1.0 / np.sqrt(n_vox)) * np.log(1.0 / np.sqrt(n_vox))
# Calculate the total image energy
b_max = np.sqrt((img[framemask == 0] ** 2).sum())
# Calculate EFC (add 1e-16 to the image data to keep log happy)
return float(
(1.0 / efc_max)
* np.sum(
(img[framemask == 0] / b_max)
* np.log((img[framemask == 0] + 1e-16) / b_max)
)
)
def wm2max(img, mu_wm):
r"""
Calculate the :abbr:`WM2MAX (white-matter-to-max ratio)`,
defined as the maximum intensity found in the volume w.r.t. the
mean value of the white matter tissue. Values close to 1.0 are
better:
.. math ::
\text{WM2MAX} = \frac{\mu_\text{WM}}{P_{99.95}(X)}
"""
return float(mu_wm / np.percentile(img.reshape(-1), 99.95))
def art_qi1(airmask, artmask):
r"""
Detect artifacts in the image using the method described in [Mortamet2009]_.
Caculates :math:`\text{QI}_1`, as the proportion of voxels with intensity
corrupted by artifacts normalized by the number of voxels in the background:
.. math ::
\text{QI}_1 = \frac{1}{N} \sum\limits_{x\in X_\text{art}} 1
Lower values are better.
:param numpy.ndarray airmask: input air mask, without artifacts
:param numpy.ndarray artmask: input artifacts mask
"""
# Count the number of voxels that remain after the opening operation.
# These are artifacts.
return float(artmask.sum() / (airmask.sum() + artmask.sum()))
def art_qi2(img, airmask, min_voxels=int(1e3), max_voxels=int(3e5), save_plot=True):
r"""
Calculates :math:`\text{QI}_2`, based on the goodness-of-fit of a centered
:math:`\chi^2` distribution onto the intensity distribution of
non-artifactual background (within the "hat" mask):
.. math ::
\chi^2_n = \frac{2}{(\sigma \sqrt{2})^{2n} \, (n - 1)!}x^{2n - 1}\, e^{-\frac{x}{2}}
where :math:`n` is the number of coil elements.
:param numpy.ndarray img: input data
:param numpy.ndarray airmask: input air mask without artifacts
"""
from sklearn.neighbors import KernelDensity
from scipy.stats import chi2
from mriqc.viz.misc import plot_qi2
# S. Ogawa was born
np.random.seed(1191935)
data = img[airmask > 0]
data = data[data > 0]
# Write out figure of the fitting
out_file = op.abspath("error.svg")
with open(out_file, "w") as ofh:
ofh.write("<p>Background noise fitting could not be plotted.</p>")
if len(data) < min_voxels:
return 0.0, out_file
modelx = data if len(data) < max_voxels else np.random.choice(data, size=max_voxels)
x_grid = np.linspace(0.0, np.percentile(data, 99), 1000)
# Estimate data pdf with KDE on a random subsample
kde_skl = KernelDensity(
bandwidth=0.05 * np.percentile(data, 98), kernel="gaussian"
).fit(modelx[:, np.newaxis])
kde = np.exp(kde_skl.score_samples(x_grid[:, np.newaxis]))
# Find cutoff
kdethi = np.argmax(kde[::-1] > kde.max() * 0.5)
# Fit X^2
param = chi2.fit(modelx[modelx < np.percentile(data, 95)], 32)
chi_pdf = chi2.pdf(x_grid, *param[:-2], loc=param[-2], scale=param[-1])
# Compute goodness-of-fit (gof)
gof = float(np.abs(kde[-kdethi:] - chi_pdf[-kdethi:]).mean())
if save_plot:
out_file = plot_qi2(x_grid, kde, chi_pdf, modelx, kdethi)
return gof, out_file
def volume_fraction(pvms):
r"""
Computes the :abbr:`ICV (intracranial volume)` fractions
corresponding to the (partial volume maps).
.. math ::
\text{ICV}^k = \frac{\sum_i p^k_i}{\sum\limits_{x \in X_\text{brain}} 1}
:param list pvms: list of :code:`numpy.ndarray` of partial volume maps.
"""
tissue_vfs = {}
total = 0
for k, lid in list(FSL_FAST_LABELS.items()):
if lid == 0:
continue
tissue_vfs[k] = pvms[lid - 1].sum()
total += tissue_vfs[k]
for k in list(tissue_vfs.keys()):
tissue_vfs[k] /= total
return {k: float(v) for k, v in list(tissue_vfs.items())}
def rpve(pvms, seg):
"""
Computes the :abbr:`rPVe (residual partial voluming error)`
of each tissue class.
.. math ::
\\text{rPVE}^k = \\frac{1}{N} \\left[ \\sum\\limits_{p^k_i \
\\in [0.5, P_{98}]} p^k_i + \\sum\\limits_{p^k_i \\in [P_{2}, 0.5)} 1 - p^k_i \\right]
"""
pvfs = {}
for k, lid in list(FSL_FAST_LABELS.items()):
if lid == 0:
continue
pvmap = pvms[lid - 1]
pvmap[pvmap < 0.0] = 0.0
pvmap[pvmap >= 1.0] = 1.0
totalvol = np.sum(pvmap > 0.0)
upth = np.percentile(pvmap[pvmap > 0], 98)
loth = np.percentile(pvmap[pvmap > 0], 2)
pvmap[pvmap < loth] = 0
pvmap[pvmap > upth] = 0
pvfs[k] = (
pvmap[pvmap > 0.5].sum() + (1.0 - pvmap[pvmap <= 0.5]).sum()
) / totalvol
return {k: float(v) for k, v in list(pvfs.items())}
def summary_stats(img, pvms, airmask=None, erode=True):
r"""
Estimates the mean, the standard deviation, the 95\%
and the 5\% percentiles of each tissue distribution.
.. warning ::
Sometimes (with datasets that have been partially processed), the air
mask will be empty. In those cases, the background stats will be zero
for the mean, median, percentiles and kurtosis, the sum of voxels in
the other remaining labels for ``n``, and finally the MAD and the
:math:`\sigma` will be calculated as:
.. math ::
\sigma_\text{BG} = \sqrt{\sum \sigma_\text{i}^2}
"""
from .. import config
from statsmodels.robust.scale import mad
# Check type of input masks
dims = np.squeeze(np.array(pvms)).ndim
if dims == 4:
# If pvms is from FSL FAST, create the bg mask
stats_pvms = [np.zeros_like(img)] + pvms
elif dims == 3:
stats_pvms = [np.ones_like(pvms) - pvms, pvms]
else:
raise RuntimeError(
"Incorrect image dimensions ({0:d})".format(np.array(pvms).ndim)
)
if airmask is not None:
stats_pvms[0] = airmask
labels = list(FSL_FAST_LABELS.items())
if len(stats_pvms) == 2:
labels = list(zip(["bg", "fg"], list(range(2))))
output = {}
for k, lid in labels:
mask = np.zeros_like(img, dtype=np.uint8)
mask[stats_pvms[lid] > 0.85] = 1
if erode:
struc = nd.generate_binary_structure(3, 2)
mask = nd.binary_erosion(mask, structure=struc).astype(np.uint8)
nvox = float(mask.sum())
if nvox < 1e3:
config.loggers.interface.warning(
'calculating summary stats of label "%s" in a very small '
"mask (%d voxels)",
k,
int(nvox),
)
if k == "bg":
continue
output[k] = {
"mean": float(img[mask == 1].mean()),
"stdv": float(img[mask == 1].std()),
"median": float(np.median(img[mask == 1])),
"mad": float(mad(img[mask == 1])),
"p95": float(np.percentile(img[mask == 1], 95)),
"p05": float(np.percentile(img[mask == 1], 5)),
"k": float(kurtosis(img[mask == 1])),
"n": nvox,
}
if "bg" not in output:
output["bg"] = {
"mean": 0.0,
"median": 0.0,
"p95": 0.0,
"p05": 0.0,
"k": 0.0,
"stdv": sqrt(sum(val["stdv"] ** 2 for _, val in list(output.items()))),
"mad": sqrt(sum(val["mad"] ** 2 for _, val in list(output.items()))),
"n": sum(val["n"] for _, val in list(output.items())),
}
if "bg" in output and output["bg"]["mad"] == 0.0 and output["bg"]["stdv"] > 1.0:
config.loggers.interface.warning(
"estimated MAD in the background was too small (MAD=%f)",
output["bg"]["mad"],
)
output["bg"]["mad"] = output["bg"]["stdv"] / DIETRICH_FACTOR
return output
def _prepare_mask(mask, label, erode=True):
fgmask = mask.copy()
if np.issubdtype(fgmask.dtype, np.integer):
if isinstance(label, (str, bytes)):
label = FSL_FAST_LABELS[label]
fgmask[fgmask != label] = 0
fgmask[fgmask == label] = 1
else:
fgmask[fgmask > 0.95] = 1.0
fgmask[fgmask < 1.0] = 0
if erode:
# Create a structural element to be used in an opening operation.
struc = nd.generate_binary_structure(3, 2)
# Perform an opening operation on the background data.
fgmask = nd.binary_opening(fgmask, structure=struc).astype(np.uint8)
return fgmask
| pGarciaS/PREEMACS | scripts/mriqc/mriqc/qc/anatomical.py | anatomical.py | py | 21,630 | python | en | code | 8 | github-code | 36 | [
{
"api_name": "math.sqrt",
"line_number": 197,
"usage_type": "call"
},
{
"api_name": "math.pi",
"line_number": 197,
"usage_type": "name"
},
{
"api_name": "sys.version_info",
"line_number": 199,
"usage_type": "name"
},
{
"api_name": "math.sqrt",
"line_number": ... |
1379123460 | import cv2
import time
from eye_tracking import EyeTracking
eye_tracking = EyeTracking()
webcam = cv2.VideoCapture(0)
while True:
_, frame = webcam.read()
if frame is None:
break
eye_tracking.refresh(frame)
frame = eye_tracking.annotated_frame()
text = ""
attention_text = ""
if eye_tracking.is_blinking():
text = "Blinking"
elif eye_tracking.is_right():
# print('right')
eye_tracking.is_attention -= 1
text = "Looking right"
elif eye_tracking.is_left():
# print('left')
eye_tracking.is_attention -= 1
text = "Looking left"
elif eye_tracking.is_center():
# print('center')
eye_tracking.is_attention += 1
text = "Looking center"
# attention example
if eye_tracking.is_attention > 100:
eye_tracking.is_attention = 100
elif eye_tracking.is_attention < 0:
eye_tracking.is_attention = 0
if eye_tracking.is_attention < 10:
attention_text = "Cheer up"
else:
attention_text = "Good!"
if eye_tracking.is_focus():
print('focus!')
else:
print('hey!')
cv2.putText(frame, text, (90, 60), cv2.FONT_HERSHEY_DUPLEX, 1.6, (147, 58, 31), 2)
left_pupil = eye_tracking.pupil_left_coords()
right_pupil = eye_tracking.pupil_right_coords()
attention = eye_tracking.is_attention
method = eye_tracking.get_method()
cv2.putText(frame, "Left pupil: " + str(left_pupil), (90, 130), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Right pupil: " + str(right_pupil), (90, 165), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Attention: " + str(attention), (90, 200), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, method, (90, 235), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.putText(frame, "Focus?: " + attention_text, (90, 270), cv2.FONT_HERSHEY_DUPLEX, 0.9, (147, 58, 31), 1)
cv2.imshow("Demo", frame)
# esc key
if cv2.waitKey(10) == 27:
break
webcam.release()
cv2.destroyAllWindows() | dead4s/SpaHeron_MachineLearning_UXIS | eye_tracking/main.py | main.py | py | 2,111 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "eye_tracking.EyeTracking",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.VideoCapture",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "eye_tracking.refresh",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "eye_trac... |
29696828936 | import utils
import numpy as np
def parse_input(path):
lines = utils.read_lines(path)
paths = [[utils.parse_coord_str(p) for p in l.split(" -> ")]
for l in lines]
height = 0
width = 0
for path in paths:
for p in path:
width = max(width, p[0])
height = max(height, p[1])
# 0: Air
# 1: Rock
# 2: Sand
grid = np.zeros((height + 1, width + 1))
for path in paths:
for [f, t] in zip(path[:-1], path[1:]):
fx = min(f[0], t[0])
tx = max(f[0], t[0])
fy = min(f[1], t[1])
ty = max(f[1], t[1])
grid[fy:ty + 1, fx:tx + 1] = 1
return grid
def drop_a_sand(grid, sx, sy):
(height, width) = grid.shape
x, y = sx, sy
if grid[sy, sx] != 0:
return False
while True:
if y == height - 1:
# drop
return False
if (grid[y + 1, x] != 0 and grid[y + 1, x - 1] != 0
and grid[y + 1, x + 1] != 0):
# come to rest
grid[y, x] = 2
return True
if grid[y + 1, x] == 0:
y += 1
elif grid[y + 1, x - 1] == 0:
y += 1
x -= 1
elif grid[y + 1, x + 1] == 0:
y += 1
x += 1
def hpad(grid, n):
height = grid.shape[0]
return np.concatenate([np.zeros(
(height, n)), grid, np.zeros((height, n))],
axis=1)
def part1(path):
grid = parse_input(path)
width = grid.shape[1]
grid = np.concatenate([grid, np.zeros((1, width))], axis=0)
grid = hpad(grid, 1)
n = 0
while drop_a_sand(grid, 500 + 1, 0):
n += 1
print(n)
def part2(path):
grid = parse_input(path)
width = grid.shape[1]
padding_size = 500
grid = np.concatenate([grid, np.zeros((2, width))], axis=0)
grid = hpad(grid, padding_size)
grid[-1, :] = 1
n = 0
while drop_a_sand(grid, 500 + padding_size, 0):
n += 1
print(n) | dialogbox/adventofcode | py/2022/day14.py | day14.py | py | 2,028 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.read_lines",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "utils.parse_coord_str",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "numpy.concatenate",
... |
39844703302 | """
Iguana (c) by Marc Ammon, Moritz Fickenscher, Lukas Fridolin,
Michael Gunselmann, Katrin Raab, Christian Strate
Iguana is licensed under a
Creative Commons Attribution-ShareAlike 4.0 International License.
You should have received a copy of the license along with this
work. If not, see <http://creativecommons.org/licenses/by-sa/4.0/>.
"""
import datetime
from django.core.exceptions import ValidationError
from django.utils.translation import ugettext_lazy as _
def date_is_present_or_future(value):
if type(value) is datetime.date:
if value < datetime.date.today():
raise ValidationError(
_("Enter a date starting from today")
)
elif type(value) is datetime.datetime:
if value < datetime.datetime.today():
raise ValidationError(
_("Enter a date starting from today")
)
else:
raise ValidationError(
_("Enter a date starting from today")
)
| midas66/iguana | src/common/validators.py | validators.py | py | 986 | python | en | code | null | github-code | 36 | [
{
"api_name": "datetime.date",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "datetime.date.today",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "datetime.date",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "django.core... |
1641551953 | # -*- coding: utf-8 -*-
import json
from datetime import datetime
from twisted.internet import reactor
from twisted.web.http import BAD_REQUEST
from twisted.web.server import NOT_DONE_YET
from config.config import CHAT_PER_PAGE, CHAT_CONNECTION_INTERVAL
from exception import BadRequest
from helper.chat_cmd import ChatCmdManager
from helper.model_control import get_chat_newer_than, get_chat_page,\
create_chat
from helper.resource import YuzukiResource, need_anybody_permission
from helper.template import render_template
from model.chat import Chat as ChatModel
from model.user import User as UserModel
def yuzuki_convert_int(num_str):
try:
value = int(num_str)
return value
except ValueError:
raise BadRequest()
class Chat(YuzukiResource):
isLeaf = False
def __init__(self):
YuzukiResource.__init__(self)
self.putChild("user", ChatUser())
self.putChild("message", ChatMessage())
@need_anybody_permission
def render_GET(self, request):
page = request.get_argument("page", None)
chat_total_count = request.dbsession.query(ChatModel).count()
page_total = chat_total_count / CHAT_PER_PAGE
user_nicknames = request.dbsession.query(UserModel.nickname).all()
plucked_user_nicknames = [nickname for (nickname, ) in user_nicknames]
if page_total % CHAT_PER_PAGE != 0:
page_total += 1
context = {
"CHAT_PER_PAGE": CHAT_PER_PAGE,
"page": page,
"page_total": page_total,
"user_nicknames": json.dumps(plucked_user_nicknames)
}
return render_template("chat.html", request, context)
class ChatUser(YuzukiResource):
isLeaf = False
def __init__(self):
YuzukiResource.__init__(self)
stream = ChatUserStream()
self.putChild("data", ChatUserData(stream))
self.putChild("out", ChatUserOut(stream))
self.putChild("stream", stream)
class ChatMessage(YuzukiResource):
isLeaf = False
def __init__(self):
YuzukiResource.__init__(self)
self.putChild("data", ChatMessageData())
self.putChild("stream", ChatMessageStream())
class ChatMessageStream(YuzukiResource):
def __init__(self):
YuzukiResource.__init__(self)
self.request_pool = list()
self.cmd_manager = ChatCmdManager()
@need_anybody_permission
def render_GET(self, request):
self.request_pool.append(request)
return NOT_DONE_YET
@need_anybody_permission
def render_POST(self, request):
content = request.get_argument("content")
if content.startswith("/"):
chat, err = self.cmd_manager.process_cmd(request, content)
if err:
request.setResponseCode(BAD_REQUEST)
return err
else:
chat = create_chat(request, content)
request.dbsession.add(chat)
request.dbsession.commit()
for req in self.request_pool:
try:
req.write("message coming")
req.finish()
except:
pass
self.request_pool = []
return "chat posted"
class ChatMessageData(YuzukiResource):
@need_anybody_permission
def render_GET(self, request):
chat_id = request.get_argument("id", None)
page = request.get_argument("page", None)
if not chat_id and not page:
raise BadRequest()
if chat_id:
chat_id = yuzuki_convert_int(chat_id)
chats = get_chat_newer_than(request, chat_id)
else:
page = yuzuki_convert_int(page)
chats = get_chat_page(request, page)
data = [chat.to_dict() for chat in chats]
data = sorted(data, key=lambda c: c["uid"])
request.setNoCache()
return json.dumps(data)
class ChatUserStream(YuzukiResource):
def __init__(self):
YuzukiResource.__init__(self)
self.request_pool = list()
self.user_pool = dict()
def notify_all(self):
for req in self.request_pool:
if not req.finished:
req.write("refresh")
req.finish()
self.request_pool = list()
def send_refresh_signal(self, request):
if request in self.request_pool:
self.request_pool.remove(request)
if not request.finished:
request.write("refresh")
request.finish()
def response_failed(self, err, request, call):
call.cancel()
if request in self.request_pool:
self.request_pool.remove(request)
self.notify_all()
@need_anybody_permission
def render_GET(self, request):
self.request_pool.append(request)
call = reactor.callLater(CHAT_CONNECTION_INTERVAL - 5,
self.send_refresh_signal, request)
request.notifyFinish().addErrback(self.response_failed, request, call)
refresh_flag = False
if request.user not in self.user_pool:
refresh_flag = True
self.user_pool[request.user] = datetime.now()
new_user_pool = dict()
for user, connection_time in self.user_pool.iteritems():
if (datetime.now() - connection_time).seconds <= \
CHAT_CONNECTION_INTERVAL:
new_user_pool[user] = connection_time
else:
refresh_flag = True
self.user_pool = new_user_pool
if refresh_flag:
self.request_pool.remove(request)
self.notify_all()
return "refresh"
else:
return NOT_DONE_YET
class ChatUserData(YuzukiResource):
def __init__(self, stream):
YuzukiResource.__init__(self)
self.stream = stream
@need_anybody_permission
def render_GET(self, request):
user_data_list = list()
for user in self.stream.user_pool:
user_data = {
"user_id": user.uid,
"user_nickname": user.nickname,
}
user_data_list.append(user_data)
user_data_list = sorted(user_data_list, key=lambda u: u["user_id"])
request.setNoCache()
return json.dumps(user_data_list)
class ChatUserOut(YuzukiResource):
def __init__(self, stream):
YuzukiResource.__init__(self)
self.stream = stream
@need_anybody_permission
def render_GET(self, request):
if request.user in self.stream.user_pool:
del (self.stream.user_pool[request.user])
self.stream.notify_all()
return "out"
| PoolC/Yuzuki | resource/chat.py | chat.py | py | 6,624 | python | en | code | 10 | github-code | 36 | [
{
"api_name": "exception.BadRequest",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "helper.resource.YuzukiResource",
"line_number": 28,
"usage_type": "name"
},
{
"api_name": "helper.resource.YuzukiResource.__init__",
"line_number": 32,
"usage_type": "call"
}... |
34180539673 | import os
from django.conf import settings
from django.core.management.base import BaseCommand
from django.utils import timezone
from viewer.models import (
Target,
Molecule,
MoleculeTag,
TagCategory
)
from scoring.models import MolGroup
class Command(BaseCommand):
help = 'Add moleculeTag record for existing mol_groups for a given target. This effectively adds molecule tags for all the sites for the Target'
def add_arguments(self, parser):
parser.add_argument('target', type=str, help='Target to be corrected')
parser.add_argument('update', type=str, help='Whether to update the target (yes) or display what will be updated (no)')
def handle(self, *args, **kwargs):
tags_existing = 0
tags_to_be_created = 0
tags_created = 0
time_start = timezone.now().strftime('%X')
self.stdout.write("Start %s" % time_start)
target_name = kwargs['target']
self.stdout.write("target_name: %s" % target_name)
if kwargs['update'] == 'yes':
update = True
else:
update = False
self.stdout.write("update: %s" % update)
target = Target.objects.filter(title=target_name)
if not target:
self.stdout.write("Target %s not found" % target_name)
exit(1)
else:
self.stdout.write("Updating tags for Target %s" % target[0].title)
# First, try sites file - e.g. /code/media/targets/mArh/sites.csv
# If this is there, then the new sites functionality was used.
sites_filepath = os.path.join(settings.MEDIA_ROOT, 'targets', target_name, 'sites.csv')
if os.path.isfile(sites_filepath):
expected_sites = sum(1 for line in open(sites_filepath)) - 1
self.stdout.write("Expected number of sites: %s" % expected_sites)
# These should correspond to the sites for the target held in sites.csv
mol_groups = MolGroup.objects.filter(target_id__title=target_name, group_type = "MC")
tag_type = 'site'
else:
# The sites should correspond to the centres of mass. The sites will be generated from them
mol_groups = MolGroup.objects.filter(target_id__title=target_name, group_type = "MC", description = "c_of_m")
expected_sites = len(mol_groups)
self.stdout.write("Expected number of sites: %s" % expected_sites)
tag_type = 'c_of_e'
if not mol_groups:
self.stdout.write("No sites found for target")
exit(1)
for idx, mol_group in enumerate(mol_groups):
self.stdout.write("mol_group description: {}, index: {}".format(mol_group.description, idx))
# A molecule tag record should not exist, but if it does go no further
try:
mol_tag = MoleculeTag.objects.get(mol_group=mol_group)
except:
mol_tag = None
if tag_type == 'site':
tag_name = mol_group.description
else:
tag_name = 'c_of_m_{}'.format(idx)
if mol_tag:
self.stdout.write("Tag already exists for {}, index: {}".format(mol_group.description, idx))
tags_existing += 1
continue
else:
self.stdout.write("Tag to be created for %s" % tag_name)
self.stdout.write(" mol_tag.tag = %s" % tag_name)
self.stdout.write(" mol_tag.category = %s" % TagCategory.objects.get(category='Sites'))
self.stdout.write(" mol_tag.target = %s" % target[0])
self.stdout.write(" mol_tag.mol_group = %s" % mol_group)
self.stdout.write(" mol_tag.molecules = %s" % [mol['id'] for mol in mol_group.mol_id.values()])
tags_to_be_created += 1
# If update flag is set then actually create molecule Tags.
if update:
mol_tag = MoleculeTag()
mol_tag.tag = tag_name
mol_tag.category = TagCategory.objects.get(category='Sites')
mol_tag.target = target[0]
mol_tag.mol_group = mol_group
mol_tag.save()
for mol in mol_group.mol_id.values():
this_mol = Molecule.objects.get(id=mol['id'])
mol_tag.molecules.add(this_mol)
tags_created += 1
self.stdout.write("Expected number of sites: %s" % expected_sites)
self.stdout.write("tags_existing %s" % tags_existing)
self.stdout.write("tags_to_be_created %s" % tags_to_be_created)
self.stdout.write("tags_created: %s" % tags_created)
if tags_to_be_created == expected_sites:
self.stdout.write('Looking good - tags_to_be_created = expected sites')
if tags_created == expected_sites:
self.stdout.write('Looking good - tags_created = expected sites')
time_end = timezone.now().strftime('%X')
self.stdout.write("End %s" % time_end)
| xchem/fragalysis-backend | viewer/management/commands/tags_from_sites.py | tags_from_sites.py | py | 5,073 | python | en | code | 4 | github-code | 36 | [
{
"api_name": "django.core.management.base.BaseCommand",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "django.utils.timezone.now",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "django.utils.timezone",
"line_number": 26,
"usage_type": "name"
},
... |
71232207785 | import torch
from typing import Optional, Dict, Any, Tuple
from transformers import (
AutoConfig,
AutoTokenizer,
T5ForConditionalGeneration,
MT5ForConditionalGeneration,
MT5EncoderModel
)
from parlai.agents.hugging_face.t5 import T5Agent, ParlaiT5Model
#from transformers.models.mt5.modeling_mt5 import MT5Model
try:
from transformers.models.t5.modeling_t5 import T5Stack
except ModuleNotFoundError:
# Prior versions of transformers package do not have T5Stack
T5Stack = object
from parlai.agents.hugging_face.hugging_face import HF_VERSION
from parlai.agents.hugging_face.dict import mT5DictionaryAgent
from parlai.core.opt import Opt
from parlai.core.params import ParlaiParser
from parlai.core.torch_agent import Batch, TorchAgent
from parlai.core.torch_generator_agent import TorchGeneratorAgent, TorchGeneratorModel
def check_hf_version(v: Tuple[int, int]) -> bool:
"""
Check that HF version is greater than 4.3.
"""
main, sub = v
return main > 4 or (main == 4 and sub >= 3)
def build_mt5(opt: Opt) -> MT5ForConditionalGeneration:
if not check_hf_version(HF_VERSION):
raise RuntimeError('Must use transformers package >= 4.3 to use t5')
return MT5ForConditionalGeneration.from_pretrained(
opt['mt5_model_arch'], dropout_rate=opt['mt5_dropout']
)
def set_device(func):
"""
Decorator for setting device.
HF's model parallel uses `torch.cuda.set_device`, which does not vibe well with
ParlAI.
"""
def wrap(*args, **kwargs):
if torch.cuda.is_available():
torch.cuda.set_device('cuda:0')
ret = func(*args, **kwargs)
if torch.cuda.is_available():
torch.cuda.set_device('cuda:0')
return ret
return wrap
##############
# mT5 Modules #
##############
class ParlaimT5Encoder(torch.nn.Module):
def __init__(self, opt: Opt, encoder: T5Stack, padding_idx: Optional[int] = None):
super().__init__()
self.stack = encoder
self.padding_idx = padding_idx
self.paralleled = not opt[
'mt5_model_parallel'
] # need to parallel in forward; bug in HF
@set_device
def forward(
self,
input: torch.LongTensor,
positions: Optional[torch.LongTensor] = None,
segments: Optional[torch.LongTensor] = None,
) -> Tuple[torch.Tensor, torch.BoolTensor]:
"""
Forward pass.
:param LongTensor[batch,seqlen] input:
The input IDs
:param LongTensor[batch,seqlen] positions:
Positions for input IDs
:param LongTensor[batch,seqlen] segments:
If provided, additionally adds ``segments`` as extra embedding features.
"""
if not self.paralleled:
self.stack.parallelize()
mask = input != self.padding_idx
outputs = self.stack(input, attention_mask=mask, output_hidden_states=False)
for k in outputs:
if torch.is_tensor(outputs[k]):
outputs[k] = outputs[k].to(input.device)
return outputs[0], mask
class ParlaimT5Decoder(torch.nn.Module):
def __init__(self, opt: Opt, decoder: T5Stack, padding_idx: Optional[int] = None):
super().__init__()
self.stack = decoder
self.padding_idx = padding_idx
self.paralleled = not opt[
'mt5_model_parallel'
] # need to parallel in forward; bug in HF
@set_device
def forward(
self, input: torch.LongTensor, encoder_state: Tuple[Any], incr_state=None
):
"""
Forward pass.
:param LongTensor[batch,seqlen] input:
The decoder inputs (partial or full decoded token IDs).
:param encoder_state:
Output from the encoder module forward pass.
:param incr_state:
The incremental state: a dictionary whose keys index the layers and whose
values contain the incremental state for each layer.
"""
if not self.paralleled:
self.stack.parallelize()
encoder_output, encoder_mask = encoder_state
mask = input != self.padding_idx
mask[:, 0] = True # first token is pad
outputs = self.stack(
input_ids=input,
attention_mask=mask,
encoder_hidden_states=encoder_output.to(input.device),
encoder_attention_mask=encoder_mask.to(input.device),
)
return outputs[0].to(input.device), incr_state
class ParlaimT5Model(ParlaiT5Model):
"""
Wrap mT5 in ParlAI.
"""
def __init__(self, opt, dictionary):
self.pad_idx = dictionary[dictionary.null_token]
self.start_idx = self.pad_idx
self.end_idx = dictionary[dictionary.end_token]
super().__init__(self.pad_idx, self.start_idx, self.end_idx)
self.mt5 = build_mt5(opt)
self.encoder = ParlaimT5Encoder(opt, self.mt5.get_encoder(), self.pad_idx)
self.decoder = ParlaimT5Decoder(opt, self.mt5.get_decoder(), self.pad_idx)
@set_device
def output(self, tensor):
"""
Compute output logits.
"""
tensor = tensor * (self.mt5.model_dim ** -0.5)
lm_logits = self.mt5.lm_head(tensor)
return lm_logits
class mT5Agent(T5Agent):
"""
mT5 Agent.
Relies on the mT5 model implemented in huggingface
"""
@classmethod
def add_cmdline_args(
cls, parser: ParlaiParser, partial_opt: Optional[Opt] = None
) -> ParlaiParser:
super().add_cmdline_args(parser, partial_opt=partial_opt)
group = parser.add_argument_group('mT5 Args')
group.add_argument(
'--mt5-model-arch',
type=str,
default='mt5-base',
choices=["mt5-small", "mt5-base", "mt5-large", "mt5-xxl"],
)
group.add_argument(
'--mt5-model-parallel',
type='bool',
default=False,
help='use HF model parallel',
)
group.add_argument(
'--mt5-dropout', type=float, default=0.0, help='Dropout for mT5'
)
return parser
def build_model(self) -> 'ParlaimT5Model':
"""
Build and return model.
"""
model = ParlaimT5Model(self.opt, self.dict)
if self.opt['mt5_model_parallel']:
model.mt5.parallelize()
return model
def build_dictionary(self):
"""
Overrides TorchAgent.build_dictionary to use mt5 dict.
"""
return mT5DictionaryAgent(self.opt) | evelynkyl/xRAD_multilingual_dialog_systems | parlai_internal/agents/hugging_face/mt5.py | mt5.py | py | 6,563 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "transformers.models.t5.modeling_t5.T5Stack",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_number": 29,
"usage_type": "name"
},
{
"api_name": "parlai.core.opt.Opt",
"line_number": 37,
"usage_type": "name"
},
{
"api_n... |
74176385062 | '''
This file is used to ge through and upload metadata to cloud
storage for artist submissions
'''
#imports
import datetime
import os
import firebase_admin
from firebase_admin import credentials, firestore
import google.cloud
# can potentially execute the cloud utils rsync here if we want all in one
# authorization and connect method
def cloudConnect(debug=False):
# let's connect and add a new document
# get credentials locally
creds = credentials.Certificate("./firebaseSAkey.json")
# use credentials to login to client
app = firebase_admin.initialize_app(creds)
# return the client instance
db = firestore.client()
if debug:
print("Connected to firebase")
return db
def uploadData(doc_ref, artist, debug=False):
# when we get to the final version, this will use directory to be specific
doc = doc_ref.get()
if doc.exists:
# can probably just not show anything if there OR merge
print(f'Doc data: {doc.to_dict()}')
else:
# might be easier to create an artist class later
print(u'No such document found! Adding a new one.')
doc_ref.document(artist).set({
u'name': artist,
u'email': "{}@gmail.test".format(artist),
u'upload_time': datetime.datetime.now()
})
# create a subcollection for the image metadata
for roots, dirs, files in os.walk("./{}".format(artist)):
for f in files:
# go through each of the images uploaded and fill in their metadata
doc_ref.collection(u'images').document(f).set({
u'title': f,
u'upload_date': datetime.datetime.today(),
u'description': "This is a test image"
}#, merge=True
)
if debug:
print("Uploaded artist and image metadata")
# function to go through each folder and upload the artist and image metadata
# use last upload time to determine whether to change metadata?
def uploadArtistMetadata(db, artist="", debug=False):
# use the artist name to open the right folder. If no argument, go through
# all folders. Currently O(n) but should in theory be O(1). Fix later
for roots, dirs, files in os.walk("."):
# we only care about the directories at this point
for directory in dirs:
# get document corresponding to artist
doc_ref = db.collection(u'artists').document(directory)
if artist:
if artist != directory:
continue
# here we know that we found artist
if debug:
print("Found {} for {}".format(directory, artist))
uploadData(doc_ref, artist, debug)
return # finished at this point
uploadData(doc_ref, directory, debug)
# main
def main(debug=False):
# get client and authorize the credentials locally
# connect to our Firestore database
db = cloudConnect(debug)
# get the doc_refument that we are going to be uploading to
print("What is the name of the artist?")
artist = input("Input the name here: ")
uploadArtistMetadata(db, artist, debug)
print("Finished uploading info for {}".format(artist))
if __name__ == "__main__":
print("Starting Upload to isolating together cloud database")
main(debug=True)
| reyluno/isolatingtogether.github.io | utils/dbUpdate.py | dbUpdate.py | py | 3,428 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "firebase_admin.credentials.Certificate",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "firebase_admin.credentials",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": "firebase_admin.initialize_app",
"line_number": 22,
"usage_type": "call"... |
27520982087 | # encoding:utf-8
__author__ = 'shiliang'
__date__ = '2019/4/9 21:12'
import requests
from lxml import etree
import pandas as pd
import xlrd
import time
import re
import aiohttp
import asyncio
# 全局变量
headers = {
'Cookie': 'OCSSID=sfg10a19had6hfavkctd32otf6',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 '
'(KHTML, like Gecko) Chrome/70.0.3538.110 Safari/537.36',
}
sem = asyncio.Semaphore(20) # 信号量,控制协程数,防止爬的过快
oneSheetList = [] # 一张整表的元数据列表
def getURLlist(openFile, sheetNum, colx, start_rowx):
'''
获取excel指定sheet指定列指定范围行的数据
:param openFile: excel路径
:param sheetNum: sheet序号
:param colx: 获取指定列数
:param start_rowx: 起始行数
:return urls: 一个包含所有URL的列表
'''
# 打开工作簿
data = xlrd.open_workbook(openFile)
# 选定sheet
table = data.sheets()[sheetNum]
# 获取excel表的指定列,start_rowx=1表示从第2行开始(从0计数)
urls = table.col_values(colx=colx, start_rowx=start_rowx)
return urls
def cleanStr(string):
'''
清洗字符串,如去除HTML标签等
:param string: 待清洗字符串
:return clean_str: 清洗后的较干净字符串
'''
# 1.使用strip函数清洗去除每个字符串的首尾\n和\t,此处也可写成\t\n
clean_str = string.strip('\n\t')
# 2.使用正则清洗掉HTML标签<>、/**/中的内容
clean_str = re.compile(r'\<.*?\>|\/\*.*?\*\/').sub('', clean_str)
return clean_str
async def getOneURLMetaData(number, url):
'''
使用协程异步获取多个URL的元数据
:param number: 第number个url(整型),用以在最后结果集中排序
:param url: url链接(字符串类型)
:param [number,url]: 已完成爬取+解析+清洗的编号和URL列表
'''
with(await sem):
# async with是异步上下文管理器
async with aiohttp.ClientSession() as session: # 获取session
async with session.request('GET', url, headers=headers, timeout=20) as resp: # 提出请求
html = await resp.read() # 可直接获取bytes
# 解析数据
xml = etree.HTML(html)
content = xml.xpath('//*[@id="content"]/table/tr/td/text()')
# 格式化数据
oneURLList = [] # 存储一个URL包含所有元数据的列表
creatorList = [] # 当前URL所有作者、机构和国家信息列表,默认为'0'
title = '' # 当前URL标题
abstract = '' # 当前URL摘要
keywords = '0' # 当前URL关键字
for index, text in enumerate(content):
# '\xa0'是一行的首元素和尾元素,表示一行的开头或结尾
if text == '\xa0':
# 1.判断是否是'Title'行
if content[index+2] == 'Title':
title = content[index + 4] # 保存Title
title = cleanStr(title) # 清洗title
continue
if content[index+3] == 'Abstract':
abstract = content[index + 4] # 保存Abstract
continue
if content[index+3] == 'Keyword(s)':
# 如果Keyword(s)不为空则填,为空则默认字符'0'
if content[index+4] != '\xa0':
keywords = content[index + 4] # 保存Keyword(s)
keywords = cleanStr(keywords) # 清洗keywords
continue
if content[index+2] == 'Creator':
clean_creator = cleanStr(content[index + 4])
lst = clean_creator.split('; ') # 使用切片函数以"; "把字符串分割成三份,返回一个列表
for num, info in enumerate(lst): # 因存在官网元数据少录入情况,故对于少录入数据项默认为'0'
# 如果是官网数据录入错误,超过三个数据,则直接跳出循环
if num > 2:
break
creatorList.append(info) # 作者名字、机构、国家
# 如果是官网数据录入错误, 少于三个数据, 则最最后一个元素补'0'
if len(lst) < 3 and num == 1:
creatorList.append('0') # 作者名字、机构、国家
continue
oneURLList.append(number)
oneURLList.append(title)
oneURLList.append(abstract)
oneURLList.append(keywords)
oneURLList.append(creatorList)
# 存储oneURLList的最后一个元素
creatorList = oneURLList[-1]
# 删除oneURLList的最后一个元素,切片取列表中的[0,-1)
oneURLList = oneURLList[:-1]
# 将creator列表拆开一个一个添加到oneURLList后
for info in creatorList:
oneURLList.append(info)
oneSheetList.append(oneURLList)
print('已完成第'+str(number)+'个url的爬取+解析+清洗')
return [number,url]
async def main(urls):
'''
协程调用方,异步获取所有url的元数据列表
:param urls: URL列表
:param topCount: URL列表中前几个URL的个数(可选参数)
'''
tasks = [getOneURLMetaData(number+1, url) for number, url in enumerate(urls)] # 把所有任务放到一个列表中
done,pending = await asyncio.wait(tasks) # 子生成器
for r in pending: # done和pending都是一个任务,所以返回结果需要逐个调用result()
print('爬取失败的url:'+r.result())
def coroutine_getMetaData(urls, topCount=None):
'''
协程调用总函数,异步获取所有url的元数据列表
:param urls: URL列表
:param topCount: URL列表中前几个URL的个数(可选参数)
'''
urlsList = []
if topCount is not None:
for i in range(topCount):
urlsList.append(urls[i])
else:
urlsList = urls
# 以下是协程调用
loop = asyncio.get_event_loop() # 创建一个事件循环对象loop
try:
loop.run_until_complete(main(urlsList)) # 完成事件循环,直到最后一个任务结束
finally:
loop.close() # 结束事件循环
def list2excel(saveFile, oneSheetList, startrow, startcol=2, sheet_name='Sheet1'):
'''
列表写入到excel中的指定行和指定列中
:param saveFile: 存储excel文件路径
:param oneSheetList: 一个存储一个Sheet元数据的列表
:param startrow: 该url位于excel表格中的行数
:param startcol: 写入excel表格中的起始列
:param sheet_name: 写入的sheet页名称
:return:
'''
df = pd.DataFrame(oneSheetList)
# df = df.T # 矩阵转置,变成行向量
# na_rep缺省值填充参数;index=False去掉行索引;header=False去掉表头
df.to_excel(saveFile, sheet_name=sheet_name, startrow=startrow, startcol=startcol,
index=False, header=False)
print('数据写入excel成功.')
if __name__ == '__main__':
openFile = 'C:\\Users\\Administrator\\Desktop\\2014-2017.xlsx'
saveFile = 'C:\\Users\\Administrator\\Desktop\\2017.xlsx'
# 从excel中获取url,返回一个列表
urls = getURLlist(openFile, sheetNum=0, colx=1, start_rowx=1)
# 通过协程异步获取所有URL中的元数据,存储在oneSheetList列表中
start = time.time()
coroutine_getMetaData(urls)
print('总爬取+解析耗时:%.5f秒' % float(time.time() - start))
# 最后对嵌套列表的列表oneSheetList进行排序,key输入的是函数,item[0]表示列表的第1个元素
oneSheetList.sort(key=lambda item: item[0], reverse=False)
# 存储到excel中
list2excel(saveFile, oneSheetList, startrow=0, startcol=0, sheet_name='Sheet1')
| SparksFly8/DataMingingPaper | spider/metadata_Coroutine_Spider.py | metadata_Coroutine_Spider.py | py | 8,375 | python | zh | code | 69 | github-code | 36 | [
{
"api_name": "asyncio.Semaphore",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "xlrd.open_workbook",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 50,
"usage_type": "call"
},
{
"api_name": "aiohttp.ClientSession... |
24981710765 | import pandas as pd
import numpy as np
from typing import Tuple
import os
import seaborn as sns
import matplotlib.pyplot as plt
from fancyimpute import IterativeImputer
import sys
sys.path.append('.')
def load_all_data(basepath: str, names_files: list) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""aim to load all data
Args:
names_files (list): name of data sources
basepath (str) : base path
Returns:
tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: 3 dataframe that contains different parts of the dataset
"""
#constant to be created
df_feature = pd.read_csv(os.path.join(basepath, names_files[0]), parse_dates=["Date"])
df_store = pd.read_csv(os.path.join(basepath, names_files[1]))
df_sales = pd.read_csv(os.path.join(basepath, names_files[2]), parse_dates=["Date"])
return (df_feature, df_store, df_sales)
def group_by_feature_by_date(df_feature: pd.DataFrame) -> pd.DataFrame:
"""aim to group by feature by date and compute agg using mean.
Args:
df_feature (pd.DataFrame): feature dataframe
Returns:
pd.DataFrame: data aggregated
"""
data_date = df_feature.groupby("Date").agg(
{
"Temperature":"mean",
"Fuel_Price":"mean",
"IsHoliday":"sum",
"CPI":"mean",
"Unemployment":"mean"
}
)
data_date = data_date.sort_index()
temp_date_data = data_date[:'2012-12-10']
return temp_date_data
def group_by_sales_by_date(df_sales: pd.DataFrame) -> pd.DataFrame:
"""aims to group by date and compute agg using sum
Args:
df_sales (pd.DataFrame): sales dataframe
Returns:
pd.DataFrame: return aggregated data
"""
data_sales_date = df_sales.groupby("Date").agg({"Weekly_Sales":"sum"})
data_sales_date.sort_index(inplace=True)
return data_sales_date
def merge_feature_and_sales(df_feature: pd.DataFrame, df_sales: pd.DataFrame) -> pd.DataFrame:
"""Will merge feature and sales on indexes
Args:
df_feature (pd.DataFrame): features aggregated data
df_sales (pd.DataFrame): sales aggregated data
Returns:
pd.DataFrame: merged dataframe
"""
df_sales.Weekly_Sales = df_sales.Weekly_Sales/1000000 #convert weekly sales in million
df_sales.Weekly_Sales = df_sales.Weekly_Sales.apply(int)
df_sales_features = pd.merge(df_sales, df_feature, left_index=True, right_index=True, how='left')
df_sales_features["IsHoliday"] = df_sales_features["IsHoliday"].apply(lambda x: True if x == 45.0 else False )
return df_sales_features
def agg_store_on_temp_fuel_price_holiday(df_store: pd.DataFrame,
df_feature: pd.DataFrame,
df_sales: pd.DataFrame
) -> pd.DataFrame:
"""scall columns (temperature, fuel price) in df_store by mean, (weekly_sales and isholliday by sum)
Args:
df_sales (pd.DataFrame) : sales dataframe
df_store (pd.DataFrame): store dataframe
df_features (pd.DataFrame): features dataframe
Returns:
pd.DataFrame: scalled dataframe
"""
data_Store = df_feature.groupby("Store").agg(
{
"Temperature": "mean",
"Fuel_Price": "mean",
"IsHoliday": "sum"
}
)
temp_store = df_sales.groupby("Store").agg({"Weekly_Sales":"sum"})
temp_store.Weekly_Sales = temp_store.Weekly_Sales/1000000
temp_store.Weekly_Sales = temp_store.Weekly_Sales.apply(int)
data_Store.set_index(np.arange(0,45),inplace=True)
df_store["Temperature"] = data_Store.Temperature
df_store["Fuel_Price"] = data_Store.Fuel_Price
df_store["Holiday"] = data_Store.IsHoliday
df_store["Weekly_Sales"] = temp_store.Weekly_Sales
return df_store
def dataset_construction(df_sales: pd.DataFrame,
df_feature: pd.DataFrame,
df_store: pd.DataFrame
) -> pd.DataFrame:
"""create dataset and divide the dataset into train and test data
Args:
df_sales (pd.DataFrame): sales data
df_features (pd.DataFrame): features data
df_store (pd.DataFrame): stores data
Returns:
pd.DataFrame : dataset as dataframe
"""
sales_date_store = df_sales.groupby(["Date","Store"]).agg({"Weekly_Sales":"sum"})
sales_date_store.sort_index(inplace=True)
sales_date_store.Weekly_Sales = sales_date_store.Weekly_Sales/10000
sales_date_store.Weekly_Sales = sales_date_store.Weekly_Sales.apply(int)
data_table = pd.merge(df_feature, sales_date_store, how='left', on=["Date", "Store"])
data_table = pd.merge(data_table, df_store[["Store", "Type"]], how='left', on=["Store"])
return data_table
def markdown_data_imputation(data_table: pd.DataFrame, col_to_impute: list) -> pd.DataFrame:
"""impute missing values
Args:
data_table (pd.DataFrame): dataset
col_to_impute (list): list of column to impute
Returns:
pd.DataFrame: dataset imputed
"""
itt = IterativeImputer()
df = itt.fit_transform(data_table[col_to_impute])
compte = 0
for col in col_to_impute:
data_table[col] = df[:,compte]
compte = compte + 1
return data_table
def data_imputation_by_mean(data_table: pd.DataFrame, cols: list) -> pd.DataFrame:
"""impute data by mean
Args:
data_table (pd.DataFrame): dataset
cols (list): col to impute by mean
Returns:
pd.DataFrame: data imputed by mean
"""
CPI = cols[0]
Unemployment = cols[1]
data_table[CPI].fillna((data_table[CPI].mean()), inplace=True)
data_table[Unemployment].fillna((data_table[Unemployment].mean()), inplace=True)
return data_table
def createdummies(data, cols):
for col in cols:
one_hot = pd.get_dummies(data[col], prefix=col)
data = data.join(one_hot)
data.drop(col, axis = 1, inplace=True)
return data
def create_columns_and_convert_categorical_data(data_table: pd.DataFrame) -> pd.DataFrame:
"""create columns and convert categorical data
Args:
data_table (pd.DataFrame): dataset
Returns:
pd.DataFrame: transformed data
"""
data_table['IsHoliday'] = data_table['IsHoliday'].map({True:0, False:1})
data_table["Month"] = data_table.Date.dt.month
data_table["Year"] = data_table.Date.dt.year
data_table["WeekofYear"] = data_table.Date.dt.weekofyear
data_table.drop(['Date'], axis=1, inplace=True)
#create dummies out of categorical column
data_table = createdummies(data_table, ["Type", "Month", "Year", "WeekofYear"])
return data_table
def data_processing(base_path: str,
names_files: list,
col_to_impute: list,
cols_: list) -> Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]:
"""process data before training
Args:
base_path (str): base dir
names_files (list): files to be loaded
col_to_impute (list): a list of columns to impute by using specific method
cols_ (list): a list of columns to impute by mean
Returns:
Tuple[pd.DataFrame, pd.DataFrame, pd.DataFrame]: dataset, train data and test data
"""
df_feature, df_store, df_sales = load_all_data(base_path, names_files)
#TBC
# df_agg_feature_by_date = group_by_feature_by_date(df_feature=df_feature)
# df_agg_sales_by_date = group_by_sales_by_date(df_sales=df_sales)
# df_feature_agg_sales_agg = merge_feature_and_sales(
# df_feature=df_agg_feature_by_date,
# df_sales=df_agg_sales_by_date
# )
df_scalled_store = agg_store_on_temp_fuel_price_holiday(
df_store=df_store,
df_feature=df_feature,
df_sales=df_sales
)
data_table = dataset_construction(
df_sales=df_sales,
df_feature=df_feature,
df_store=df_scalled_store
)
data_table_imputed_markdown = markdown_data_imputation(
data_table=data_table,
col_to_impute=col_to_impute
)
data_table_compltete_imputed = data_imputation_by_mean(
data_table=data_table_imputed_markdown,
cols=cols_
)
data_table_with_new_features = create_columns_and_convert_categorical_data(
data_table=data_table_compltete_imputed
)
#convert from Fahrenheit to Celcus
data_table_with_new_features['Temperature'] = (data_table_with_new_features['Temperature']- 32) * 5./9.
# creating train and test data
data_train = data_table_with_new_features[data_table_with_new_features.Weekly_Sales.notnull()]
data_test = data_table_with_new_features[data_table_with_new_features.Weekly_Sales.isnull()]
return data_table_with_new_features, data_train, data_test
def data_processing_with_io(base_path: str,
names_files: list,
col_to_impute: list,
cols_: list,
output_path: str
) -> None:
"""use the data_processing function and generate some artefact
Args:
base_path (str): base dir
names_files (list): file to be loaded
col_to_impute (list): a list of columns to impute by using specific method
cols_ (list): a list of columns to impute by mean
output_path (str): output dir to store result of preprocessing
"""
data_table, data_train, data_test = data_processing(base_path=base_path,
names_files=names_files,
col_to_impute=col_to_impute,
cols_=cols_
)
data_table.to_csv(os.path.join(output_path, 'preprocess_dataset'), index=False)
data_train.to_csv(os.path.join(output_path, 'train/train.csv'), index=False)
data_test.to_csv(os.path.join(output_path, 'test/test.csv'), index=False) | infini11/MLOps-project | src/preprocessing.py | preprocessing.py | py | 9,913 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sys.path.append",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "pandas.read_csv",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"lin... |
15556149755 | from __future__ import annotations
import numpy as np
from typing import List
from .meshdata import MeshCode
from . import R, rad2deg, deg2rad
class Point:
def __init__( self,
latitude: float, # <deg>
longitude: float, # <deg>
elevation: float = None, # <meter>
valid_elevation: float = None, # <meter>
diameter: float = 0, #<meter>
name: str =None,
) -> None:
assert 0 <= latitude and latitude < 66
assert 100 <= longitude and longitude < 180
self.name = name
self.latitude = latitude
self.longitude = longitude
self.diameter = diameter
self._mesh_code = MeshCode(latitude, longitude)
self.elevation = elevation or self._mesh_code.elevation()
self.valid_elevation = valid_elevation or self.elevation + .1
def next( self,
azimuth: float, # [0,360]
dist: float, # <meter>
valid_elevation: float, #<meter>
name: str = None,
) -> Point:
a = dist / R
b = np.pi / 2 - deg2rad(self.latitude)
gamma = 2*np.pi - deg2rad(azimuth)
c = np.arccos( np.cos(a) * np.cos(b) \
+ np.sin(a) * np.sin(b) * np.cos(gamma) )
alpha = np.arcsin( np.sin(a) * np.sin(gamma) / np.sin(c) )
lat = rad2deg(np.pi / 2 - c)
lon = self.longitude - rad2deg(alpha)
return Point(lat, lon, valid_elevation=valid_elevation, name=name)
def to_location_format(self) -> List[str, float, float]:
return [
self.name,
self.latitude,
self.longitude,
self._mesh_code.label,
self.elevation,
self.valid_elevation,
self.valid_elevation - self.elevation
]
def __str__(self):
return f"""
==== {self.name} ====
lat: {self.latitude}
log: {self.longitude}
h: {self.elevation}
====================="""
def validElevation( dist: float, #<meter>
altitude: float, # [0,90]
root_point: Point,#<meter>
) -> float:
root_elevation = root_point.elevation \
+ root_point.diameter * np.tan(deg2rad(altitude))
theta = dist / R
Y = deg2rad(90 - altitude)
X = np.pi - Y - theta
h1 = ( np.sin(Y) / np.sin(Y + theta) ) * (R + root_elevation) - R
return h1 | kawa-yo/DiamonPearl | engine/utils/point.py | point.py | py | 2,509 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "meshdata.MeshCode",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 35,
"usage_type": "attribute"
},
{
"api_name": "numpy.pi",
"line_number": 36,
"usage_type": "attribute"
},
{
"api_name": "numpy.arccos",
"lin... |
43831264470 | from datetime import datetime, timedelta
from enum import Enum
from typing import Any, Dict, List, Optional, Tuple, Type, TypeVar, Union
from uuid import UUID
from eventsourcing.domain.model.aggregate import AggregateRoot
# Locations in the world.
class Location(Enum):
HAMBURG = "HAMBURG"
HONGKONG = "HONGKONG"
NEWYORK = "NEWYORK"
STOCKHOLM = "STOCKHOLM"
TOKYO = "TOKYO"
NLRTM = "NLRTM"
USDAL = "USDAL"
AUMEL = "AUMEL"
# Leg of an Itinerary.
class Leg(object):
def __init__(self, origin: str, destination: str, voyage_number: str):
self.origin: str = origin
self.destination: str = destination
self.voyage_number: str = voyage_number
# Itinerary.
class Itinerary(object):
def __init__(self, origin: str, destination: str, legs: List[Leg]):
self.origin = origin
self.destination = destination
self.legs = legs
# Handling activities.
class HandlingActivity(Enum):
RECEIVE = "RECEIVE"
LOAD = "LOAD"
UNLOAD = "UNLOAD"
CLAIM = "CLAIM"
# Custom static types.\
CargoDetails = Dict[str, Optional[Union[str, bool, datetime, Tuple]]]
LegDetails = Dict[str, str]
ItineraryDetails = Dict[str, Union[str, List[LegDetails]]]
# Type variable for Cargo aggregate class.
T_cargo = TypeVar("T_cargo", bound="Cargo")
# Some routes from one location to another.
REGISTERED_ROUTES = {
("HONGKONG", "STOCKHOLM"): [
Itinerary(
origin="HONGKONG",
destination="STOCKHOLM",
legs=[
Leg(origin="HONGKONG", destination="NEWYORK", voyage_number="V1"),
Leg(origin="NEWYORK", destination="STOCKHOLM", voyage_number="V2"),
],
)
],
("TOKYO", "STOCKHOLM"): [
Itinerary(
origin="TOKYO",
destination="STOCKHOLM",
legs=[
Leg(origin="TOKYO", destination="HAMBURG", voyage_number="V3"),
Leg(origin="HAMBURG", destination="STOCKHOLM", voyage_number="V4"),
],
)
],
}
NextExpectedActivity = Optional[
Union[Tuple[HandlingActivity, Location], Tuple[HandlingActivity, Location, str]]
]
# Custom aggregate root class.
class Aggregate(AggregateRoot):
__subclassevents__ = True
# The Cargo aggregate is an event sourced domain model aggregate that
# specifies the routing from origin to destination, and can track what
# happens to the cargo after it has been booked.
class Cargo(Aggregate):
@classmethod
def new_booking(
cls: Type[T_cargo],
origin: Location,
destination: Location,
arrival_deadline: datetime,
) -> T_cargo:
assert issubclass(cls, Cargo) # For PyCharm navigation.
return cls.__create__(
origin=origin, destination=destination, arrival_deadline=arrival_deadline
)
def __init__(
self,
origin: Location,
destination: Location,
arrival_deadline: datetime,
**kwargs: Any
) -> None:
super().__init__(**kwargs)
self._origin: Location = origin
self._destination: Location = destination
self._arrival_deadline: datetime = arrival_deadline
self._transport_status: str = "NOT_RECEIVED"
self._routing_status: str = "NOT_ROUTED"
self._is_misdirected: bool = False
self._estimated_time_of_arrival: Optional[datetime] = None
self._next_expected_activity: NextExpectedActivity = None
self._route: Optional[Itinerary] = None
self._last_known_location: Optional[Location] = None
self._current_voyage_number: Optional[str] = None
@property
def origin(self) -> Location:
return self._origin
@property
def destination(self) -> Location:
return self._destination
@property
def arrival_deadline(self) -> datetime:
return self._arrival_deadline
@property
def transport_status(self) -> str:
return self._transport_status
@property
def routing_status(self) -> str:
return self._routing_status
@property
def is_misdirected(self) -> bool:
return self._is_misdirected
@property
def estimated_time_of_arrival(self) -> Optional[datetime]:
return self._estimated_time_of_arrival
@property
def next_expected_activity(self) -> Optional[Tuple]:
return self._next_expected_activity
@property
def route(self) -> Optional[Itinerary]:
return self._route
@property
def last_known_location(self) -> Optional[Location]:
return self._last_known_location
@property
def current_voyage_number(self) -> Optional[str]:
return self._current_voyage_number
class Event(Aggregate.Event):
pass
def change_destination(self, destination: Location) -> None:
self.__trigger_event__(self.DestinationChanged, destination=destination)
class DestinationChanged(Event):
def mutate(self, obj: "Cargo") -> None:
obj._destination = self.destination
@property
def destination(self) -> Location:
return self.__dict__["destination"]
def assign_route(self, itinerary: Itinerary) -> None:
self.__trigger_event__(self.RouteAssigned, route=itinerary)
class RouteAssigned(Event):
def mutate(self, obj: "Cargo") -> None:
obj._route = self.route
obj._routing_status = "ROUTED"
obj._estimated_time_of_arrival = datetime.now() + timedelta(weeks=1)
obj._next_expected_activity = (HandlingActivity.RECEIVE, obj.origin)
obj._is_misdirected = False
@property
def route(self) -> Itinerary:
return self.__dict__["route"]
def register_handling_event(
self,
tracking_id: UUID,
voyage_number: Optional[str],
location: Location,
handling_activity: HandlingActivity,
) -> None:
self.__trigger_event__(
self.HandlingEventRegistered,
tracking_id=tracking_id,
voyage_number=voyage_number,
location=location,
handling_activity=handling_activity,
)
class HandlingEventRegistered(Event):
def mutate(self, obj: "Cargo") -> None:
assert obj.route is not None
if self.handling_activity == HandlingActivity.RECEIVE:
obj._transport_status = "IN_PORT"
obj._last_known_location = self.location
obj._next_expected_activity = (
HandlingActivity.LOAD,
self.location,
obj.route.legs[0].voyage_number,
)
elif self.handling_activity == HandlingActivity.LOAD:
obj._transport_status = "ONBOARD_CARRIER"
obj._current_voyage_number = self.voyage_number
for leg in obj.route.legs:
if leg.origin == self.location.value:
if leg.voyage_number == self.voyage_number:
obj._next_expected_activity = (
HandlingActivity.UNLOAD,
Location[leg.destination],
self.voyage_number,
)
break
else:
raise Exception(
"Can't find leg with origin={} and "
"voyage_number={}".format(self.location, self.voyage_number)
)
elif self.handling_activity == HandlingActivity.UNLOAD:
obj._current_voyage_number = None
obj._last_known_location = self.location
obj._transport_status = "IN_PORT"
if self.location == obj.destination:
obj._next_expected_activity = (
HandlingActivity.CLAIM,
self.location,
)
elif self.location.value in [leg.destination for leg in obj.route.legs]:
for i, leg in enumerate(obj.route.legs):
if leg.voyage_number == self.voyage_number:
next_leg: Leg = obj.route.legs[i + 1]
assert Location[next_leg.origin] == self.location
obj._next_expected_activity = (
HandlingActivity.LOAD,
self.location,
next_leg.voyage_number,
)
break
else:
obj._is_misdirected = True
obj._next_expected_activity = None
elif self.handling_activity == HandlingActivity.CLAIM:
obj._next_expected_activity = None
obj._transport_status = "CLAIMED"
else:
raise Exception(
"Unsupported handling event: {}".format(self.handling_activity)
)
@property
def voyage_number(self) -> str:
return self.__dict__["voyage_number"]
@property
def location(self) -> Location:
return self.__dict__["location"]
@property
def handling_activity(self) -> str:
return self.__dict__["handling_activity"]
| johnbywater/es-example-cargo-shipping | cargoshipping/domainmodel.py | domainmodel.py | py | 9,436 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "enum.Enum",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 31,
"usage_type": "name"
},
{
"api_name": "enum.Enum",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "typing.Dict",
"line_number": 45,
... |
20847546702 | """
Django settings for test_project project.
For more information on this file, see
https://docs.djangoproject.com/en/1.7/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.7/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
BASE_DIR = os.path.dirname(os.path.dirname(__file__))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.7/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'rw2wiza36p)(d7fxun0xl5$2k%3p5t=f9zva1rpoic-lbl&es4'
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
TEMPLATE_DEBUG = True
ALLOWED_HOSTS = ['*']
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'hydra',
'test_app',
'django_extensions'
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.auth.middleware.SessionAuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
)
ROOT_URLCONF = 'test_project.urls'
WSGI_APPLICATION = 'test_project.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.7/ref/settings/#databases
import dj_database_url
DATABASES = {
'default': dj_database_url.config(default='postgres://hydra:@localhost:5432/hydra')
}
# Internationalization
# https://docs.djangoproject.com/en/1.7/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.7/howto/static-files/
STATIC_URL = '/static/'
LOGGING = {
'version': 1,
'disable_existing_loggers': False,
'handlers': {
'console': {
'level': 'DEBUG',
'class': 'logging.StreamHandler',
},
},
'loggers': {
'django.db.backends': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False,
},
'hydra': {
'handlers': ['console'],
'level': 'DEBUG',
'propagate': False
}
},
'root': {
'handlers': ['console'],
'level': 'INFO'
}
}
HYDRA_MODELS = {
'test_app.Reader',
'test_app.Author',
'test_app.Book'
}
| j00bar/django-hydra | test_project/test_project/settings.py | settings.py | py | 2,762 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "dj_database_url.config",
"line_number": 63,
"usage_type": "call"
}
] |
5813639636 | # type: ignore
import os
import pathlib
import subprocess
import gnupg
import pytest
import requests
import toml
def pytest_collect_file(file_path, parent):
if file_path.suffix == ".sh" and file_path.name.startswith("test_"):
return ScriptFile.from_parent(parent, path=file_path)
class ScriptFile(pytest.File):
# To make pytest.Function happy
obj = None
def collect(self):
# Extract the name between "test_" and ".sh".
name = self.path.name[5:][:-3]
yield ScriptItem.from_parent(self, name=name, path=self.path)
# HACK: Inherit from `pytest.Function` to be able to use the fixtures
class ScriptItem(pytest.Function):
def __init__(self, path, **kwargs):
super().__init__(callobj=self._runscript, **kwargs)
self.path = path
self.add_marker("script")
if path.parts[-3] == "scripts":
self.add_marker(path.parts[-2])
def _runscript(self, pulp_cli_env, tmp_path, pulp_container_log):
run = subprocess.run([self.path], cwd=tmp_path)
if run.returncode == 23:
pytest.skip("Skipped as requested by the script.")
if run.returncode != 0:
raise ScriptError(f"Script returned with exit code {run.returncode}.")
def reportinfo(self):
return self.path, 0, f"test script: {self.name}"
def repr_failure(self, excinfo):
if isinstance(excinfo.value, ScriptError):
return str(excinfo.value)
return super().repr_failure(excinfo)
class ScriptError(Exception):
"""Custom exception to mark script execution failure."""
@pytest.fixture
def pulp_cli_vars():
"""
This fixture will return a dictionary that is used by `pulp_cli_env` to setup the environment.
To inject more environment variables, it can overwritten.
It will be initialized with "PULP_FIXTURE_URL".
"""
PULP_FIXTURES_URL = os.environ.get("PULP_FIXTURES_URL", "https://fixtures.pulpproject.org")
return {"PULP_FIXTURES_URL": PULP_FIXTURES_URL}
@pytest.fixture(scope="session")
def pulp_cli_settings(tmp_path_factory):
"""
This fixture will setup the config file once per session only.
It is most likely not useful to be included standalone.
The `pulp_cli_env` fixture, however depends on it and sets $XDG_CONFIG_HOME up accordingly.
"""
settings = toml.load("tests/cli.toml")
if os.environ.get("PULP_API_ROOT"):
for key in settings:
settings[key]["api_root"] = os.environ["PULP_API_ROOT"]
settings_path = tmp_path_factory.mktemp("config", numbered=False)
(settings_path / "pulp").mkdir(parents=True)
with open(settings_path / "pulp" / "cli.toml", "w") as settings_file:
toml.dump(settings, settings_file)
yield settings_path, settings
@pytest.fixture(scope="session")
def pulp_cli_gnupghome(tmp_path_factory):
"""
This fixture will setup a GPG home directory once per session only.
"""
gnupghome = tmp_path_factory.mktemp("gnupghome")
gpg = gnupg.GPG(gnupghome=str(gnupghome))
key_file = pathlib.Path(__file__).parent / "GPG-PRIVATE-KEY-pulp-qe"
if key_file.exists():
private_key_data = key_file.read_text()
else:
private_key_url = (
"https://github.com/pulp/pulp-fixtures/raw/master/common/GPG-PRIVATE-KEY-pulp-qe"
)
private_key_data = requests.get(private_key_url).text
key_file.write_text(private_key_data)
import_result = gpg.import_keys(private_key_data)
gpg.trust_keys(import_result.fingerprints[0], "TRUST_ULTIMATE")
return gnupghome
@pytest.fixture
def pulp_cli_env(pulp_cli_settings, pulp_cli_vars, pulp_cli_gnupghome, monkeypatch):
"""
This fixture will set up the environment for cli commands by:
* creating a tmp_dir
* placing the config there
* pointing XDG_CONFIG_HOME accordingly
* supplying other useful environment vars
"""
settings_path, settings = pulp_cli_settings
monkeypatch.setenv("XDG_CONFIG_HOME", str(settings_path))
monkeypatch.setenv("PULP_BASE_URL", settings["cli"]["base_url"])
monkeypatch.setenv("VERIFY_SSL", str(settings["cli"].get("verify_ssl", True)).lower())
monkeypatch.setenv("GNUPGHOME", str(pulp_cli_gnupghome))
for key, value in pulp_cli_vars.items():
monkeypatch.setenv(key, value)
yield settings
if "PULP_LOGGING" in os.environ:
@pytest.fixture(scope="session")
def pulp_container_log_stream():
with subprocess.Popen(
[os.environ["PULP_LOGGING"], "logs", "-f", "--tail", "0", "pulp-ephemeral"],
stdin=subprocess.DEVNULL,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
os.set_blocking(proc.stdout.fileno(), False)
yield proc.stdout
proc.kill()
@pytest.fixture
def pulp_container_log(pulp_container_log_stream):
# Flush logs before starting the test
pulp_container_log_stream.read()
yield
logs = pulp_container_log_stream.read()
if logs is not None:
print(logs.decode())
else:
@pytest.fixture
def pulp_container_log():
yield
| pulp/pulp-cli | pytest_pulp_cli/__init__.py | __init__.py | py | 5,190 | python | en | code | 26 | github-code | 36 | [
{
"api_name": "pytest.File",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "pytest.Function",
"line_number": 29,
"usage_type": "attribute"
},
{
"api_name": "subprocess.run",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "pytest.skip",
... |
5756767654 | import base64
import os
from io import BytesIO, StringIO
from pprint import pprint
import easyocr
import pandas as pd
from PIL import Image
import streamlit as st
bn_reader = easyocr.Reader(['bn'], gpu=True)
en_reader = easyocr.Reader(['en'], gpu=True)
def get_nid_image(image_url):
image_data = base64.b64decode(image_url)
image_data = BytesIO(image_data)
image = Image.open(image_data)
return image
def get_nid_text(front_image, back_image):
bn_front = bn_reader.readtext(front_image, detail=0, paragraph=False)
en_front = en_reader.readtext(front_image, detail=0, paragraph=False)
bn_back = bn_reader.readtext(back_image, detail=0, paragraph=True)
en_back = en_reader.readtext(back_image, detail=0, paragraph=True)
# nid_pattern = "[0-9]{3} [0-9]{3} [0-9]{4}"
# dob_pattern = "^Date of Bir"
# name_pattern = "[A-Z]* [A-Z]* [A-Z]*"
for index, phrase in enumerate(bn_front):
if phrase == 'নাম':
bn_name = bn_front[index + 1]
elif phrase == 'পিতা':
bn_father_name = bn_front[index + 1]
elif phrase == 'মাতা':
bn_mother_name = bn_front[index + 1]
for index, phrase in enumerate(en_front):
if phrase == 'Name':
en_name = en_front[index + 1]
elif phrase == 'Date of Birth':
en_dob = en_front[index + 1]
elif phrase == 'NID No':
en_nid = en_front[index + 1]
response = {
"bn_name": bn_name,
"en_name": en_name,
"bn_father_name": bn_father_name,
"bn_mother_name": bn_mother_name,
"en_dob": en_dob,
"en_nid": en_nid,
"bn_address": bn_back[0],
"en_birth_place": en_back[2],
"en_issue_date": en_back[3]
}
# pprint(response, indent=4)
return response
with st.form("nid_scanner_form", clear_on_submit=True):
front_image = st.file_uploader("Front Image", type=["jpg", "png", "jpeg"])
back_image = st.file_uploader("Back Image", type=["jpg", "png", "jpeg"])
submit = st.form_submit_button("Submit")
if submit:
if front_image is not None and back_image is not None:
front_image_ext = os.path.splitext(front_image.name)[
1].replace(".", "")
back_image_ext = os.path.splitext(back_image.name)[
1].replace(".", "")
front_image_bytes = front_image.getvalue()
back_image_bytes = back_image.getvalue()
front_image_base64 = base64.b64encode(
front_image_bytes).decode("utf-8")
back_image_base64 = base64.b64encode(
back_image_bytes).decode("utf-8")
front_image_data = f"data:image/{front_image_ext};base64," + \
front_image_base64
back_image_data = f"data:image/{back_image_ext};base64," + \
back_image_base64
st.image(front_image_data, caption="Front Image")
st.image(back_image_data, caption="Back Image")
front_str_to_img = Image.open(BytesIO(base64.b64decode(
front_image_base64)))
back_str_to_img = Image.open(BytesIO(base64.b64decode(
back_image_base64)))
try:
response = get_nid_text(front_str_to_img, back_str_to_img)
st.code(response, language="python")
except Exception as e:
st.error(e)
else:
st.error("Please upload both images in order to proceed")
# decodeit = open('hello_level.jpeg', 'wb')
# decodeit.write(base64.b64decode((byte)))
# decodeit.close()
# import base64
# from io import BytesIO
# buffered = BytesIO()
# image.save(buffered, format="JPEG")
# img_str = base64.b64encode(buffered.getvalue())
# if uploaded_file is not None:
# # To read file as bytes:
# bytes_data = uploaded_file.getvalue()
# st.write(bytes_data)
# # To convert to a string based IO:
# stringio = StringIO(uploaded_file.getvalue().decode("utf-8"))
# st.write(stringio)
# # To read file as string:
# string_data = stringio.read()
# st.write(string_data)
# # Can be used wherever a "file-like" object is accepted:
# dataframe = pd.read_csv(uploaded_file)
# st.write(dataframe)
| bhuiyanmobasshir94/Computer-Vision | notebooks/colab/cv/ocr/ekyc/nid_scanner_with_streamlit_app.py | nid_scanner_with_streamlit_app.py | py | 4,287 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "easyocr.Reader",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "easyocr.Reader",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "base64.b64decode",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line... |
31952638633 | ''' EXERCÍCIOS:
2) Dado a sequência de Fibonacci, onde se inicia por 0 e 1 e o próximo valor sempre será a soma dos 2 valores anteriores (exemplo: 0, 1, 1, 2, 3, 5, 8, 13, 21, 34...), escreva um programa na linguagem que desejar onde, informado um número, ele calcule a sequência de Fibonacci e retorne uma mensagem avisando se o número informado pertence ou não a sequência.
IMPORTANTE:
Esse número pode ser informado através de qualquer entrada de sua preferência ou pode ser previamente definido no código;
*/
SOLUÇÃO '''
num = int(input('Digite um número: '))
a, b = 0, 1
fib = [a, b]
while b < num:
a, b = b, a + b
fib.append(b)
if num in fib:
print(f'{num} pertence à sequência de Fibonacci.')
else:
print(f'{num} não pertence à sequência de Fibonacci.')
'''
3) Dado um vetor que guarda o valor de faturamento diário de uma distribuidora, faça um programa, na linguagem que desejar, que calcule e retorne:
• O menor valor de faturamento ocorrido em um dia do mês;
• O maior valor de faturamento ocorrido em um dia do mês;
• Número de dias no mês em que o valor de faturamento diário foi superior à média mensal.
IMPORTANTE:
a) Usar o json ou xml disponível como fonte dos dados do faturamento mensal;
b) Podem existir dias sem faturamento, como nos finais de semana e feriados. Estes dias devem ser ignorados no cálculo da média;
SOLUÇÃO '''
import json
# Carregando os dados do faturamento mensal a partir de um arquivo JSON
with open('faturamento.json', 'r') as f:
data = json.load(f)
faturamento = data['faturamento']
# Calculando o menor e o maior valor de faturamento
menor_faturamento = min(faturamento)
maior_faturamento = max(faturamento)
# Calculando a média mensal de faturamento
dias_com_faturamento = [dia for dia in faturamento if dia > 0]
media_mensal = sum(dias_com_faturamento) / len(dias_com_faturamento)
# Calculando o número de dias com faturamento superior à média mensal
dias_acima_da_media = sum(1 for dia in dias_com_faturamento if dia > media_mensal)
# Imprimindo os resultados
print(f'Menor valor de faturamento: {menor_faturamento}')
print(f'Maior valor de faturamento: {maior_faturamento}')
print(f'Dias com faturamento superior à média mensal: {dias_acima_da_media}')
'''
4) Dado o valor de faturamento mensal de uma distribuidora, detalhado por estado:
SP – R$67.836,43
RJ – R$36.678,66
MG – R$29.229,88
ES – R$27.165,48
Outros – R$19.849,53
Escreva um programa na linguagem que desejar onde calcule o percentual de representação que cada estado teve dentro do valor total mensal da distribuidora.
SOLUÇÃO '''
# Definindo o dicionário com as informações de faturamento de cada estado
faturamento_por_estado = {
'SP': 67836.43,
'RJ': 36678.66,
'MG': 29229.88,
'ES': 27165.48,
'Outros': 19849.53
}
# Calculando o faturamento total mensal da distribuidora
faturamento_total = sum(faturamento_por_estado.values())
# Calculando o percentual de representação de cada estado
percentuais = {}
for estado, faturamento in faturamento_por_estado.items():
percentuais[estado] = (faturamento / faturamento_total) * 100
# Imprimindo os resultados
for estado, percentual in percentuais.items():
print('{} - {:.2f}%'.format(estado, percentual))
'''
5) Escreva um programa que inverta os caracteres de um string.
IMPORTANTE:
a) Essa string pode ser informada através de qualquer entrada de sua preferência ou pode ser previamente definida no código;
b) Evite usar funções prontas, como, por exemplo, reverse;
SOLUÇÃO '''
# Lendo a string de entrada do usuário
string = input('Digite uma string para inverter: ')
# Definindo a string previamente no código
# string = 'Exemplo de string para inverter'
# Criando uma lista vazia para armazenar os caracteres invertidos
caracteres_invertidos = []
# Percorrendo a string de trás para frente e adicionando os caracteres na lista
for i in range(len(string)-1, -1, -1):
caracteres_invertidos.append(string[i])
# Convertendo a lista de caracteres invertidos para uma string
string_invertida = ''.join(caracteres_invertidos)
# Imprimindo a string invertida
print('A string invertida é:', string_invertida)
| bruno-kilo/FibonacciSequence | Fibonacci.py | Fibonacci.py | py | 4,259 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 38,
"usage_type": "call"
}
] |
23603029370 | # -*- coding: utf-8 -*-
"""
Created on Thu Apr 5 17:05:36 2018
@author: s.jayanthi
"""
import cv2, numpy as np
img = cv2.imread(params.color_transfer_target_label1)
dst = [];
rows,cols = img.shape[0], img.shape[1]
M = cv2.getRotationMatrix2D((cols/2,rows/2),45,1)
for channel in range(img.shape[2]):
d_img = img[:,:,channel]
dst.append(cv2.warpAffine(d_img,M,(cols,rows)))
dst = np.stack(dst, axis=-1)
#cv2.imshow('.',img);cv2.imshow('..',dst);
img = cv2.imread(params.here)
cv2.imshow('I', img)
#img_scaled = cv2.resize(img,None,fx=1.5, fy=1.5, interpolation = cv2.INTER_LINEAR)
#cv2.imshow('Scaling - Linear Interpolation', img_scaled)
img_scaled = cv2.resize(img,None,fx=1.2, fy=1.2, interpolation = cv2.INTER_CUBIC)
cv2.imshow('Scaling - Cubic Interpolation', img_scaled)
#img_scaled = cv2.resize(img,(450, 400), interpolation = cv2.INTER_AREA)
#cv2.imshow('Scaling - Skewed Size', img_scaled)
img = cv2.imread(params.here_mask)
cv2.imshow('M', img)
#img_scaled = cv2.resize(img,None,fx=1.5, fy=1.5, interpolation = cv2.INTER_LINEAR)
#cv2.imshow('M Scaling - Linear Interpolation', img_scaled)
img_scaled = cv2.resize(img,None,fx=1.2, fy=1.2, interpolation = cv2.INTER_CUBIC)
cv2.imshow('M Scaling - Cubic Interpolation', img_scaled)
img_scaled = cv2.resize(img,(450, 400), interpolation = cv2.INTER_AREA)
cv2.imshow('M Scaling - Skewed Size', img_scaled) | murali1996/semantic_segmentation_of_nuclei_images | old_versions/dummy.py | dummy.py | py | 1,372 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "cv2.getRotationMatrix2D",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.warpAffine",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "numpy.stack",
"l... |
35919457830 | from django.shortcuts import render, redirect
from .models import *
from login.models import *
from django.contrib import messages
from datetime import date, datetime
# Create your views here.
def appointments(request):
if 'user_id' not in request.session:
return redirect('/')
today = datetime.now()
context = {
'active_user': User.objects.get(id=request.session['user_id']),
'appoint_list': Appointment.objects.filter(user__id = request.session['user_id']).filter(date__gte = today).order_by('date'),
'past_appo_list': Appointment.objects.filter(user__id = request.session['user_id']).filter(date__lte = today).order_by('date'),
}
return render(request, 'appointments.html', context)
def add_appointment(request):
return render(request, 'add_appo.html')
def new_appo(request):
active_user = User.objects.get(id=request.session['user_id'])
errors = {}
today = datetime.now()
comp_date = request.POST['date']
date_object = datetime.strptime(comp_date, "%Y-%m-%d")
# print("********************","desde formulario:",comp_date,"fecha actual de datetime",today,"Fecha convertida",date_object)
if len(request.POST['name']) == 0:
errors['name'] = "Must give a name to your task"
if request.POST['status'] == '0':
errors['no_status'] = "Must select a status"
if len(request.POST['date']) == 0:
errors['no_date'] = "Must provide a date"
if today > date_object and request.POST['status'] == 'Pending':
errors['dates'] = "Future appointments cannot be in set in a past date"
if len(errors) > 0:
for key, msg in errors.items():
messages.error(request, msg)
return redirect('/appointments/add')
else:
Appointment.objects.create(
name = request.POST['name'],
status=request.POST['status'],
date = comp_date,
user = active_user,
)
return redirect('appointments')
def del_appo(request, appo_id):
appo = Appointment.objects.get(id=appo_id)
appo.delete()
return redirect('/appointments')
def edit_appo(request, appo_id):
context = {
'appo': Appointment.objects.get(id=appo_id),
'appo_past': Appointment.objects.get(id=appo_id)
}
return render(request, 'edit_appo.html', context)
def upd_appo(request, appo_id):
errors = {}
today = datetime.now()
comp_date = request.POST['date']
date_object = datetime.strptime(comp_date, "%Y-%m-%d")
#print("********************","desde formulario:",comp_date,"fecha actual de datetime",today,"Fecha convertida",date_object)
if len(request.POST['name']) == 0:
errors['ed_name'] = "Must give a name to your task"
if request.POST['status'] == '0':
errors['ed_no_status'] = "Must select a status"
if len(request.POST['date']) == 0:
errors['ed_no_date'] = "Must provide a date"
if today > date_object and request.POST['status'] == 'Pending':
errors['ed_dates'] = "Future appointments cannot be in set in a past date"
if len(errors) > 0:
for key, msg in errors.items():
messages.error(request, msg)
return redirect(f'/appointments/{appo_id}/edit')
else:
appo = Appointment.objects.get(id=appo_id)
appo.name = request.POST['name']
appo.date = request.POST['date']
appo.status = request.POST['status']
appo.save()
return redirect('/appointments') | AlexUrtubia/appointments | appo_app/views.py | views.py | py | 3,521 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.redirect",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "dja... |
42354884711 | from openpyxl import Workbook, load_workbook
target_workbook = Workbook()
sheet=target_workbook.active
sheet["A1"]="Номер заказа у партнера"
sheet["B1"]="Номер сертификата" # номер сертификата
sheet["C1"]="Продукт в системе партнера" #назначение платежа
sheet["D1"]="Код продукта в BD"
sheet["E1"]="Дата начала действия" #дата оплаты
sheet["F1"]="Дата окончания действия" # дата окончания сертификата
sheet["G1"]="Стоимость" #сумма
sheet["H1"]="ФИО плательщика" #ФИО
sheet["I1"]="Дата рождения плательщика"
sheet["J1"]="Пол плательщика"
sheet["K1"]="Номер телефона плательщика" #Номер телефона
sheet["L1"]="Адрес электронной почты плательщика"
sheet["M1"]="Серия паспорта плательщика"
sheet["N1"]="Номер паспорта плательщика"
sheet["O1"]="Кем выдан паспорт плательщика"
sheet["P1"]="Дата выдачи паспорта плательщика"
sheet["Q1"]="Адрес плательщика"
sheet["R1"]="Гражданство плательщика"
sheet["S1"]="Город"
sheet["T1"]="Банк"
sheet["U1"]="Наименование ДО" #Офис
morphing_workbook1=load_workbook(filename="sample_for_test.xlsx")
target_sheet=morphing_workbook1.active
for value in target_sheet.iter_cols(min_row=4,min_col=2,values_only=True):
if value=="Назначение платежа":
length=morphing_workbook1.max_row()
for row in range(0,length):
morphing_workbook1.append([row])
#for row_value in target_workbook.iter_rows(min_row=2,min_col=3,max_col=3,values_only=True):
target_workbook.save(filename="target_table.xlsx") | Mi6k4/programming_stuff | coding_stuff/python/opnepyexl/morphing.py | morphing.py | py | 1,960 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "openpyxl.Workbook",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "openpyxl.load_workbook",
"line_number": 29,
"usage_type": "call"
}
] |
19993372010 | import re
import argparse
import japanize_matplotlib
import matplotlib.pyplot as plt
from moviepy.editor import VideoClip
from moviepy.video.io.bindings import mplfig_to_npimage
def validate_date(date):
if re.match(r"^\d{4}/\d{1,2}/\d{1,2}", date):
return date
else:
raise argparse.ArgumentTypeError(
f"{date} is not a valid date\ne.g. 2020/1/1")
parser = argparse.ArgumentParser()
parser.add_argument("fileName", help="LINEのトーク履歴のファイル名")
parser.add_argument("-o", "--output", help="出力ファイル名")
parser.add_argument("-s", "--start-date",
help="開始日 | 例: 2020/1/1", type=validate_date)
parser.add_argument("-dpi", "--dpi", help="解像度", default=150, type=int)
parser.add_argument("-lowest", "--lowest",
help="最低メッセージ数", default=0, type=int)
args = parser.parse_args()
fileName = args.fileName
output = args.output
startDate = args.start_date
dpi = args.dpi
lowest = args.lowest
# LINEのトーク履歴を分析
try:
with open(fileName, "r", encoding="utf-8") as f:
data = f.read()
except FileNotFoundError:
print('\033[31m' + f'{fileName}が見つかりません。' + '\033[0m')
exit()
print(f'{fileName}を分析を開始します。')
nowDate = None
isStart = False
user_messages = {}
for line in data.splitlines():
try:
if re.match(r"^\d{4}/\d{1,2}/\d{1,2}\(.+\)", line):
if startDate:
if line.startswith(startDate):
isStart = True
if isStart:
nowDate = f"{line.split('/')[0]}-{line.split('/')[1].zfill(2)}-{line.split('/')[2].split('(')[0].zfill(2)}"
else:
nowDate = f"{line.split('/')[0]}-{line.split('/')[1].zfill(2)}-{line.split('/')[2].split('(')[0].zfill(2)}"
if nowDate is not None and line != nowDate and line != "":
if re.match(r"\d{1,2}:\d{1,2}", line):
if line.endswith("が退出しました。"):
continue
name = line.split("\t")[1]
if name not in user_messages:
user_messages[name] = {}
if nowDate not in user_messages[name]:
user_messages[name][nowDate] = 0
user_messages[name][nowDate] += 1
except Exception as e:
lineCount = len(data.splitlines())
lineIndex = data.splitlines().index(line) + 1
print(
'\033[31m' + f'{lineIndex}行目のデータが正しくありません。' + '\033[0m')
dates = sorted(
list(set([date for user in user_messages.values() for date in user.keys()])))
if dates == []:
print('\033[31m' + 'データが見つかりませんでした。' + '\033[0m')
exit()
print('\033[32m' + f'{dates[0]} から {dates[-1]}のデータを読み込みました。' + '\033[0m')
print(f'ユーザー数: {len(user_messages)}')
print(f'日数: {len(user_messages[list(user_messages.keys())[0]])}')
print(
f'メッセージ数: {sum([sum(user.values()) for user in user_messages.values()])}')
print('----------------------------------------')
# ユーザーごとに色を割り当て
userColor = {}
for i, user in enumerate(user_messages.keys()):
userColor[user] = i
print('\033[32m' + 'グラフを作成します。' + '\033[0m')
# フレームを作成
def make_frame(t):
plt.rcParams["figure.figsize"] = (14, 10)
plt.rcParams["figure.dpi"] = dpi
plt.rcParams["font.size"] = 14
plt.clf()
fig = plt.figure()
ax = fig.gca()
time_index = int(t * 10)
# ユーザーごとのメッセージ数を計算
user_counts = {}
for user, messages in user_messages.items():
values = [messages.get(date, 0) for date in dates[:time_index]]
if sum(values) > 0:
if sum(values) > lowest:
user_counts[user] = sum(values)
# ユーザーごとのメッセージ数を棒グラフで表示
sorted_users = sorted(user_counts, key=user_counts.get, reverse=False)
y_pos = range(len(user_counts))
for user_index, user in enumerate(sorted_users):
ax.barh(
y_pos[user_index],
user_counts[user],
color="C{}".format(userColor[user]),
label=user,
)
ax.text(
user_counts[user] + 0.2,
y_pos[user_index],
str(user_counts[user]),
va="center",
)
values = [user_counts[user]]
if len(values) > 0:
ax.barh(
y_pos[user_index],
values[-1],
color=f"C{userColor[user]}",
label=user
)
ax.text(values[-1] + 0.2, y_pos[user_index],
str(values[-1]), va="center")
# グラフの設定
ax.set_xlabel("メッセージ数")
ax.xaxis.set_label_position('top')
ax.tick_params(top=True, labeltop=True, bottom=False, labelbottom=False)
ax.text(0, len(user_counts) + 1,
dates[time_index - 1], ha="left", va="center")
ax.set_yticks(y_pos)
ax.set_yticklabels(map(lambda x: x[:8], sorted_users))
plt.gcf().tight_layout()
return mplfig_to_npimage(plt.gcf())
# 動画を作成
if output == None:
output = fileName.split(".")[0]
try:
animation = VideoClip(make_frame, duration=len(dates) / 10)
animation.write_videofile(output + ".mp4", fps=10,
codec="libx264", audio=False)
except KeyboardInterrupt:
print('\033[31m' + 'キャンセルしました。' + '\033[0m')
exit()
print('\033[32m' + f'{output}.mp4を作成しました。' + '\033[0m')
| HRTK92/line-to-movie | line_to_video.py | line_to_video.py | py | 5,704 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.match",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentTypeError",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "re.match"... |
4421637196 | from flask import Flask, request, jsonify;
import requests
from flask_cors import CORS
app = Flask(__name__)
CORS(app)
base_url = 'https://coronavirus-19-api.herokuapp.com'
@app.route('/get-all-cases')
def get_all_cases():
all_cases = requests.get(base_url + '/countries').json()
return jsonify(all_cases)
@app.route('/get-cases-by-country/<country>')
def get_cases_by_country(country):
case = requests.get(base_url + '/countries/'+country).json()
return case
@app.route('/<country>/active')
def get_active_cases(country):
case = requests.get(base_url + '/countries/'+country).json()
return jsonify (case['active'])
@app.route('/<country>/cases')
def get_cases(country):
case = requests.get(base_url + '/countries/'+country).json()
return jsonify (case['cases'])
@app.route('/<country>/recovered')
def get_recovered_cases(country):
case = requests.get(base_url + '/countries/'+country).json()
return jsonify (case['recovered'])
@app.route('/<country>/deaths')
def get_deaths_cases(country):
case = requests.get(base_url + '/countries/'+country).json()
return jsonify (case['deaths'])
| Rhukie/hw4_9pk6vl43kh | covid-api/app.py | app.py | py | 1,178 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "flask.Flask",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "flask_cors.CORS",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "flask.jsonify",
"line_numb... |
29085620425 | import random
from threading import Timer
from typing import Union, List
from zone_api.audio_manager import Genre, get_music_streams_by_genres, get_nearby_audio_sink
from zone_api.core.devices.weather import Weather
from zone_api.core.parameters import ParameterConstraint, positive_number_validator, Parameters
from zone_api.environment_canada import EnvCanada
from zone_api.core.action import action, Action
from zone_api.core.devices.motion_sensor import MotionSensor
from zone_api.core.zone_event import ZoneEvent
from zone_api.core.devices.activity_times import ActivityType
@action(events=[ZoneEvent.MOTION], external_events=[ZoneEvent.DOOR_CLOSED],
devices=[MotionSensor], activity_types=[ActivityType.WAKE_UP], zone_name_pattern='.*Kitchen.*')
class AnnounceMorningWeatherAndPlayMusic(Action):
"""
Announces the current weather and plays a random music stream twice during the wake up period.
This is based on the assumption of a household having two adults that leave work at different
times. The music stops when the front door is closed.
"""
@staticmethod
def supported_parameters() -> List[ParameterConstraint]:
return Action.supported_parameters() + \
[ParameterConstraint.optional('durationInMinutes', positive_number_validator),
ParameterConstraint.optional('maximumStartCount', positive_number_validator)
]
# noinspection PyDefaultArgument
def __init__(self, parameters: Parameters):
super().__init__(parameters)
self._music_streams = get_music_streams_by_genres(
[Genre.CLASSICAL, Genre.INSTRUMENT, Genre.JAZZ])
self._duration_in_minutes = self.parameters().get(self, self.supported_parameters()[-2].name(), 120)
self._max_start_count = self.parameters().get(self, self.supported_parameters()[-1].name(), 2)
self._in_session = False
self._start_count = 0
self._timer = None
self._sink = None
def on_action(self, event_info):
zone = event_info.get_zone()
zone_manager = event_info.get_zone_manager()
def stop_music_session():
self._sink.pause()
self._in_session = False
if event_info.get_event_type() == ZoneEvent.DOOR_CLOSED:
if self._in_session:
owning_zone = event_info.get_owning_zone()
if owning_zone.is_external():
stop_music_session()
return True
return False
else:
self._sink = get_nearby_audio_sink(zone, zone_manager)
if self._sink is None:
self.log_warning("Missing audio device; can't play music.")
return False
if not self._in_session and \
self._start_count < self._max_start_count:
self._in_session = True
weather_msg = self.get_morning_announcement(zone_manager)
if weather_msg is not None:
self._sink.play_message(weather_msg)
self._sink.play_stream(random.choice(self._music_streams), 40)
self._start_count += 1
def reset_state():
stop_music_session()
self._sink = None
self._start_count = 0
if self._timer is not None and self._timer.is_alive():
self._timer.cancel()
self._timer = Timer(self._duration_in_minutes * 60, reset_state)
self._timer.start()
return True
# noinspection PyMethodMayBeStatic
def get_morning_announcement(self, zone_manager) -> Union[None, str]:
""" Returns a string containing the current's weather and today's forecast. """
weather = zone_manager.get_first_device_by_type(Weather)
if weather is None or not weather.support_forecast_min_temperature() \
or not weather.support_forecast_max_temperature():
return None
message = u'Good morning. It is {} degree currently; the weather ' \
'condition is {}. Forecasted temperature range is between {} and {} ' \
'degrees.'.format(weather.get_temperature(),
weather.get_condition(),
weather.get_forecast_min_temperature(),
weather.get_forecast_max_temperature())
forecasts = EnvCanada.retrieve_hourly_forecast('Ottawa', 12)
rain_periods = [f for f in forecasts if
'High' == f.get_precipitation_probability() or
'Medium' == f.get_precipitation_probability()]
if len(rain_periods) > 0:
if len(rain_periods) == 1:
message += u" There will be precipitation at {}.".format(
rain_periods[0].get_user_friendly_forecast_time())
else:
message += u" There will be precipitation from {} to {}.".format(
rain_periods[0].get_user_friendly_forecast_time(),
rain_periods[-1].get_user_friendly_forecast_time())
return message
| yfaway/zone-apis | src/zone_api/core/actions/announce_morning_weather_and_play_music.py | announce_morning_weather_and_play_music.py | py | 5,234 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "zone_api.core.action.Action",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "zone_api.core.action.Action.supported_parameters",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "zone_api.core.action.Action",
"line_number": 26,
"usage_type... |
553309764 | """序列化练习"""
# pickle
import json
import pickle
d = dict(name='Bob', age=20, acore=80)
f = open('dump.txt', 'wb')
pickle.dump(d, f)
f.close()
f = open('dump.txt', 'rb')
d = pickle.load(f)
print(d)
# json
d = dict(name='Bob', age=20, acore=80)
print(json.dumps(d))
# JSON进阶
# class序列化和反序列化
class Student(object):
def __init__(self, name, age, sex):
self.name = name
self.age = age
self.sex = sex
s = Student('徐新宇', 20, 'female')
def stu2dict(std):
return {
'name': std.name,
'age': std.age,
'sex': std.sex
}
print(json.dumps(s,default=stu2dict,ensure_ascii=False)) # {"name": "xuxin", "age": 20, "sex": "female"}
print(json.dumps(s,default=lambda obj:obj.__dict__,ensure_ascii=False)) #万能公式
# 反序列化
json_str = '{"age": 20, "score": 88, "name": "Bob"}'
def dict2stu(d):
return Student(d['name'],d['score'],d['age'])
print(json.loads(json_str,object_hook=dict2stu)) #<__main__.Student object at 0x000002063CC369E8>
| xuxinyu2020/my-python-work | practice/24pickle_json.py | 24pickle_json.py | py | 1,040 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pickle.dump",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "pickle.load",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "json.dumps",
"line_number": 41,... |
36635799452 | from cmath import exp
from email import message
import ssl, socket
import requests
from dateutil import parser
import pytz
import datetime, time
import telegram
requests.packages.urllib3.disable_warnings()
try:
_create_unverified_https_context = ssl._create_unverified_context
except AttributeError:
# Legacy Python that doesn't verify HTTPS certificates by default
pass
else:
# Handle target environment that doesn't support HTTPS verification
ssl._create_default_https_context = _create_unverified_https_context
def get_domain_content(domain):
requests.packages.urllib3.disable_warnings()
url = 'https://' + domain
response = requests.get(url, verify=False).headers
print(response)
def get_my_domain(mydomain):
try:
socket.setdefaulttimeout(5)
my_addr = socket.getaddrinfo(mydomain, None)
c = ssl.create_default_context()
s = c.wrap_socket(socket.socket(), server_hostname=mydomain)
s.connect((mydomain, 443))
my_cert = s.getpeercert()
get_my_cert_dated(mydomain, my_cert, my_addr)
except ssl.CertificateError and socket.gaierror as e:
pass
def days(str1, str2):
date1 = datetime.datetime.strptime(str1[0:10], "%Y-%m-%d")
date2 = datetime.datetime.strptime(str2[0:10], "%Y-%m-%d")
num = (date1 - date2).days
# print(num)
return num
def msg_push(message):
bot = telegram.Bot(token="XXXX")
bot.send_message(chat_id='XXXX', text=message)
def get_my_cert_dated(domain, certs, my_addr):
cert_beginning_time = parser.parse(certs['notBefore']).astimezone(pytz.utc)
cert_end_time = parser.parse(certs['notAfter']).astimezone(pytz.utc)
# cert_end_time_str = datetime.datetime.strptime(cert_end_time[0:10], "%Y-%m-%d")
local_time = time.strftime('%Y-%m-%d %H:%M:%S', time.localtime())
# print(days(str(cert_end_time), str(local_time)))
# print('域名:(%s) 证书失效时间: %s' % (domain, cert_end_time))
expired_days = days(str(cert_end_time), str(local_time))
# print(expired_days)
if (expired_days < 7):
# print('域名:(%s) 证书还有: %s' % (domain, expired_days))
message = '域名:(%s) 证书过期天数: %s 证书失效时间: %s' % (domain, expired_days, cert_end_time)
msg_push(message)
# print('域名:(%s) 证书过期天数: %s 证书失效时间: %s' % (domain, expired_days, cert_end_time))
def read_domain_files():
with open('./domain.txt', 'r',
encoding="utf-8") as file:
for domain in file:
try:
get_my_domain(domain.strip())
except Exception as e:
print('域名: (%s)-%s' %(domain.strip(), e))
# print("code")
if __name__ == "__main__":
read_domain_files()
| coeus-lei/python | domain-ssl-check/domain-ssl.py | domain-ssl.py | py | 2,862 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.packages.urllib3.disable_warnings",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "requests.packages",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "ssl._create_unverified_context",
"line_number": 13,
"usage_type": "attr... |
35001350068 | import os
import struct
import numpy as np
# Based on https://gist.github.com/akesling/5358964 which is in return
# loosely inspired by http://abel.ee.ucla.edu/cvxopt/_downloads/mnist.py
# which is GPL licensed.
def read(dataset = "training", path = "."):
# Python function for importing the MNIST data set. It returns an iterator
# of 2-tuplesq2f s with the first element being the label and the second element
# being a numpy.uint8 2D array of pixel data for the given image.
if dataset is "training":
fname_img = os.path.join(path, 'train-images.idx3-ubyte')
fname_lbl = os.path.join(path, 'train-labels.idx1-ubyte')
elif dataset is "testing":
fname_img = os.path.join(path, 't10k-images.idx3-ubyte')
fname_lbl = os.path.join(path, 't10k-labels.idx1-ubyte')
else:
raise ValueError("dataset must be 'testing' or 'training'")
# Load everything in some numpy arrays
with open(fname_lbl, 'rb') as flbl:
magic, num = struct.unpack(">II", flbl.read(8))
lbl = np.fromfile(flbl, dtype=np.int8)
with open(fname_img, 'rb') as fimg:
magic, num, rows, cols = struct.unpack(">IIII", fimg.read(16))
img = np.fromfile(fimg, dtype=np.uint8).reshape(len(lbl), rows, cols)
img = np.divide(img, 255)
l = list()
for i in range(len(lbl)):
img_vec = img[i].flatten()
lbl_vec = np.zeros(10)
lbl_vec[lbl[i]] = 1
l.append([list(img_vec), list(lbl_vec)])
return l
def show(image):
# Render a given numpy.uint8 2D array of pixel data.
image = np.array(image).reshape(28, 28)
import matplotlib.pyplot as plt
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
imgplot = ax.imshow(image, cmap='Greys')
imgplot.set_interpolation('nearest')
ax.xaxis.set_ticks_position('top')
ax.yaxis.set_ticks_position('left')
plt.show() | CaptainProton42/MNISTFromScratch | modules/mnist.py | mnist.py | py | 1,898 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 1... |
24643892493 | import sqlite3
class OperationDB(object):
def get_conn(self):
'''连接数据库'''
conn = sqlite3.connect('cltdata.db')
return conn
def get_cursor(self):
'''创建游标'''
conn = self.get_conn()
#print('执行游标')
return conn.cursor()
def close_all(self, conn, cu):
'''关闭数据库游标对象和数据库连接对象'''
try:
if cu is not None:
cu.close()
finally:
if conn is not None:
conn.close()
def create_table(self):
'''创建数据库表:sampleinfo testresult'''
conn = self.get_conn()
c = self.get_cursor()
c.execute('''
create table if not exists sampleinfo
(sampleid varchar(20) PRIMARY KEY,pati_id varchar(20),pati_name varchar(20),
pati_age varchar(20),pati_gender varchar(5),status varchar(5))
'''
)
c.execute('''
create table if not exists testresult
(sampleid varchar(20),testname varchar(20),testvalue varchar(20))
'''
)
c.execute('create index testresult_sid on testresult(sampleid)')
conn.commit()
print('创建数据库表[sampleinfo,testresult]成功!')
conn.close()
def insert_sampleinfo(self, data):
'''sampleinfo表插入数据'''
#conn = sqlite3.connect('svrdata.db')
conn = self.get_conn()
conn.execute('insert into sampleinfo values (?,?,?,?,?,?)', data)
conn.commit()
print('表[sampleinfo]数据写入成功!')
conn.close()
def insert_testresult(self, data):
'''testresult表插入数据'''
#conn = sqlite3.connect('svrdata.db')
conn = self.get_conn()
conn.execute('insert into testresult (sampleid, testvalue) values (?,?)', data)
conn.commit()
print('表[testresult]数据写入成功!')
conn.close()
def update_resultvalue(self, data):
'''更新数据...'''
print('更新数据...')
conn = self.get_conn()
conn.execute('update testresult set testvalue = ? where sampleid = ? and testname = ?', data)
conn.commit()
print('数据修改成功')
conn.close()
#sid = input('pls input sid :',)
def select_db(self, sid):
cursor = self.get_cursor()
result= ()
cursor.execute("select 'P' as P,pati_id,pati_name,pati_age,pati_gender from sampleinfo where sampleid=?", sid)
result += (cursor.fetchone(),)
cursor.execute("select 'O' as O,sampleid,status from sampleinfo where sampleid=?", sid)
result += (cursor.fetchone(),)
cursor.execute("select 'R' as R,testname,testvalue from testresult where sampleid =?", sid)
result += tuple(cursor.fetchall())
cursor.close()
self.get_conn().close()
return result
def select_test(self):
'''查询结果数据'''
conn = self.get_conn()
c = self.get_cursor()
c.execute("select * from testresult")
rows = c.fetchall()
conn.close()
return rows
def select_data(self, sid):
'''查询结果数据'''
conn = self.get_conn()
c = self.get_cursor()
c.execute('''select sampleinfo.sampleid,pati_id,pati_name,pati_age,status,testname,testvalue
from sampleinfo left outer join testresult on sampleinfo.sampleid = testresult.sampleid where sampleinfo.sampleid=?''', sid)
rows = c.fetchone()
c.close()
conn.close()
return rows
def testConn(self, params):
return "success"
def orderEntry(self, params):
conn = self.get_conn()
c = conn.cursor()
c.execute('insert into sampleinfo values (?,?,?,?,?,?)', (params[0],params[1],params[2],params[3],params[4],params[5]))
conn.commit()
print('表[sampleinfo]数据写入成功!')
for test in params[6]:
c.execute('insert into testresult (sampleid, testname) values (?,?)', (params[0], test))
conn.commit()
print('表[testresult]数据写入成功!')
conn.close()
test = self.select_data((params[0],))
return '数据写入成功!, sid: ' + str(test)
def selectTest(self, sid):
cursor = self.get_cursor()
result= ()
cursor.execute("select 'P' as P,pati_id,pati_name,pati_age,pati_gender from sampleinfo where sampleid=?", sid)
result += (cursor.fetchone(),)
cursor.execute("select 'O' as O,sampleid,status from sampleinfo where sampleid=?", sid)
result += (cursor.fetchone(),)
cursor.execute("select 'R' as R,testname,testvalue from testresult where sampleid =?", sid)
result += tuple(cursor.fetchall())
cursor.close()
return result | weijingwei/liwei_python | ASTM2/Operation_db.py | Operation_db.py | py | 4,387 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlite3.connect",
"line_number": 6,
"usage_type": "call"
}
] |
40280870495 | import cv2
import numpy as np
import argparse
import random
import os
import os.path as osp
from sklearn.feature_extraction import image
# import imutils
from tqdm import tqdm,trange
# argument parser
'''
dataNum: How many samples you want to synthesize
load_image_path: Path to load background image
load_rain_path: Path to load rain streak
load_depth_path: Path to load depth information
save_input_image_path: Path to save images with rain and haze
save_gt_image_path: Path to save clean ground truth images
save_gtNohaze_image_path: Path to save no haze (rainy) images
save_gtNoRain_image_path: Path to save no rain (hazy) images
save_depth_path: Path to save depth information
rainType: How many rain streaks you want to overlay on images
ang: Angle for random rotating [-ang:ang]
'''
def Parser():
parser = argparse.ArgumentParser()
parser.add_argument("--load_image_path", type=str, default="cam_stereo_left_lut/", help='path to load images')
parser.add_argument("--load_depth_path", type=str, default="depth_image/", help='path to load depth info')
parser.add_argument("--save_image_path", type=str, default="foggy_camera/", help='path to save ground truth images')
parser.add_argument("--light_min", type=float, default=0.3)
parser.add_argument("--light_max", type=float, default=0.8)
parser.add_argument("--beta_min", type=float, default=1.3)
parser.add_argument("--beta_max", type=float, default=1.3)
parser.add_argument("--beta_range", type=float, default=0.3)
parser.add_argument("--train_only", type=bool, default=False)
parser.add_argument("--target_image_path", type=str, default="foggy_camera/", help='path to load images')
opt = parser.parse_args()
return opt
# depth to transmission formula
def depthToTransmission(depth, b_min, b_max):
depth=depth/255.0
beta = np.random.uniform(b_min, b_max)
# print(beta)
trans = np.exp(-beta * depth)
return trans
def light_effect(img,airlight,night):
if night==1:
# rgb to gray
gray=cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# gaussian
gray=cv2.GaussianBlur(gray,(21,21),0)
# threshold
light=gray>205
brightness_0=light*((gray-205)/50.0)
brightness_gau=cv2.GaussianBlur(brightness_0,(25,25),0)
brightness=np.maximum(brightness_0,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(25,25),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(45,45),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(45,45),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(65,65),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(65,65),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(21,21),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(21,21),0)
brightness=np.maximum(brightness,brightness_gau)
brightness_gau=cv2.GaussianBlur(brightness,(21,21),0)
brightness=np.maximum(brightness,brightness_gau)
# adjust airlight
atlight = airlight*np.ones(img.shape)[:,:,0]
atlight = atlight+ (0.95-airlight)*brightness
return cv2.merge([atlight,atlight,atlight]), cv2.merge([brightness_0,brightness_0,brightness_0]), cv2.merge([brightness,brightness,brightness])
else:
atlight = airlight*np.ones(img.shape)[:,:,0]
return cv2.merge([atlight,atlight,atlight]), None, None
def add_fog(image,depth,trans,airLight,night):
# trans = cv2.merge([trans, trans, trans])
light, b0, b=light_effect(image, airLight, night)
image = image / 255.0
image = image.astype('float32')
# start adding haze
constant = np.ones(image.shape)
hazyimage = image * trans + light * (constant - trans)
return hazyimage, light, b0, b
def get_valid_list():
spilt_path=f"splits/train_clear_day.txt"
kitti_names = open(spilt_path,'r')
kitti_names_contents = kitti_names.readlines()
valid_day=[]
for class_name in kitti_names_contents:
valid_day.append(class_name.replace(",","_").rstrip()+'.png')
kitti_names.close()
spilt_path=f"splits/train_clear_night.txt"
kitti_names = open(spilt_path,'r')
kitti_names_contents = kitti_names.readlines()
valid_night=[]
for class_name in kitti_names_contents:
valid_night.append(class_name.replace(",","_").rstrip()+'.png')
kitti_names.close()
return valid_day, valid_night
def main():
opt = Parser()
# check dirs exist or not
if not os.path.isdir(opt.save_image_path):
os.makedirs(opt.save_image_path)
print(f'save image at {opt.save_image_path}')
# load dir and count
images_list = os.listdir(opt.load_image_path)
datasize = len(images_list)
valid_day_list, valid_night_list=get_valid_list()
# start synthesizing loop
for i in trange(datasize):
file_name=images_list[i]
if (file_name in valid_day_list) or (file_name in valid_night_list):
if file_name in valid_night_list:
night=1
elif file_name in valid_day_list:
night=0
else:
print("wrong")
# load image/depth path
image_path=osp.join(opt.load_image_path,file_name)
depth_path=osp.join(opt.load_depth_path,file_name)
# load image/depth
image=cv2.imread(image_path)
depth=cv2.imread(depth_path)
# cv2.imwrite(osp.join(opt.save_image_path,'image.png'), image)
# cv2.imwrite(osp.join(opt.save_image_path,'depth.png'), depth)
# convert depth to transmission
trans = depthToTransmission(depth, 1.0, 1.6)
# cv2.imwrite(osp.join(opt.save_image_path,'trans.png'), trans*255)
if night==0:
airLight = np.random.uniform(0.4, 0.75)
elif night==1:
airLight = np.random.uniform(0.3, 0.65)
# start adding
hazyimage,light,b0,b=add_fog(image,depth,trans,airLight,night)
# save
cv2.imwrite(osp.join(opt.save_image_path,file_name), hazyimage*255)
else:
continue
if __name__ == "__main__":
main()
| Chushihyun/MT-DETR | data/datagen_fog.py | datagen_fog.py | py | 6,879 | python | en | code | 22 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "numpy.random.uniform",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 45,
"usage_type": "attribute"
},
{
"api_name": "numpy.... |
39660535041 | from collections import deque
class Solution:
def networkDelayTime(self, times: List[List[int]], n: int, k: int) -> int:
reach = {}
for time in times:
if time[0] in reach:
reach[time[0]].append([time[2], time[1]])
else:
reach[time[0]] = [[time[2], time[1]]]
distanceReached = [inf for _ in range(n + 1)]
distanceReached[0], distanceReached[k] = 0, 0
queue = deque([k])
while queue:
node = queue.popleft()
if node not in reach:
continue
for elem in reach[node]:
time, nextVal = elem
if distanceReached[node] + time < distanceReached[nextVal]:
distanceReached[nextVal] = distanceReached[node] + time
queue.append(nextVal)
delay = max(distanceReached)
if delay == inf:
return -1
return delay | deusi/practice | 743-network-delay-time/743-network-delay-time.py | 743-network-delay-time.py | py | 1,034 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "collections.deque",
"line_number": 15,
"usage_type": "call"
}
] |
7963630141 | from tkinter import *
from tkinter.font import Font
import numpy as np
import itertools as itr
import os
kryteria = ['rowery', 'telewizory', 'książki', 'telefony', 'drukarki']
#kryteria = ['rowery', 'telewizory', 'telefony']
kryteria_d = { i : "%.2f" % (1/len(kryteria)) for i in kryteria}
kryteriaKomb =list(itr.combinations(range(len(kryteria)),2))
kryteriaKomb_d = { i : 1 for i in kryteriaKomb }
label_list = []
label_list2 = []
button_list = []
spinbox_list = []
scale_list = []
skala = ['1/9', '1/8', '1/7', '1/6', '1/5', '1/4', '1/3', '1/2', '1', '2', '3', '4', '5', '6', '7', '8', '9']
##################### 'ramki' w okienku
root_ahp = Tk()
root_ahp.wm_title("AHP tkinter")
frame_g = Frame(root_ahp)
frame_g.grid(row = 0, column = 0, sticky = 'n', columnspan = 2, padx = 10, pady = 10)
frame_l = Frame(root_ahp)
frame_l.grid(row = 1, column = 0, sticky = 'n')
frame_p = Frame(root_ahp)
frame_p.grid(row = 1, column = 1, sticky = 'n')
frame_d = Frame(root_ahp)
frame_d.grid(row = 2, column = 0, columnspan = 2 ,sticky = 'n', padx = 10, pady = 10)
##################### funkcje
def aktd():
# zapisz skale z przycisku do słownika
for i in range(len(kryteriaKomb_d)):
kryteriaKomb_d[kryteriaKomb[i]] = label_list[(i*4)+1].cget('text')
def wagi():
# AHP
RandomIndex = [0.01, 0.01, 0.58, 0.90, 1.12, 1.24, 1.32, 1.41, 1.45, 1.49]
cri = 0.20
n = len(kryteria)
k_matrix = np.ones((n, n))
for i in range(n):
for j in range(n):
if i == j :
k_matrix[i][j] = 1
if i < j:
k_matrix[i][j] = eval(str(kryteriaKomb_d[(i,j)]))
k_matrix[j][i] = k_matrix[i][j]**(-1)
weights = np.true_divide(k_matrix, np.sum(k_matrix, axis=0))
weights = np.sum(weights, axis=1)
weights = weights / weights.shape[0]
cons_vector = np.multiply(k_matrix, weights)
weightedsum = np.sum(cons_vector, axis=1)
ratio = weightedsum / weights
lambdamax = np.sum(ratio, axis = 0) / n
if n - 1 == 0:
ConsistencyIndex = (lambdamax - n) / 1
else:
ConsistencyIndex = (lambdamax - n) / (n - 1)
ConsistencyRatio = ConsistencyIndex / RandomIndex[n-1]
if ConsistencyRatio <= cri:
listbox2.delete(0,END)
listbox2.insert(END,'macierz jest spójna:')
listbox2.insert(END, '{0:.3g}'.format(ConsistencyRatio)+ ' < '+ str(cri))
listbox2.config(bg = '#b2ffa8')
b_ok.config(relief=RAISED)
b_ok.config(state=NORMAL)
else:
listbox2.delete(0,END)
listbox2.insert(END, 'macierz NIE jest spójna: ')
listbox2.insert(END, '{0:.3g}'.format(ConsistencyRatio)+ ' > '+ str(cri))
listbox2.config(bg = '#ff7a7a')
b_ok.config(relief=SUNKEN)
b_ok.config(state=DISABLED)
for i in range(len(kryteria)):
kryteria_d[kryteria[i]] = "%.3f" % weights[i]
def wyswietl_wynik():
# wyswietl słownik w boxie
listbox.delete(0,END)
for i in kryteria_d:
listbox.insert(END, (i, kryteria_d[i]))
def sval(r):
for i in range(len(scale_list)):
label_list[(i*4)+1].config(text = skala[-int(scale_list[i].get())-1])
label_list[(i*4)+2].config(text = skala[int(scale_list[i].get())])
b_ok.config(relief = SUNKEN)
b_ok.config(state = DISABLED)
def bt():
# funkcja zbiorcza dla przycisku
aktd()
wagi()
wyswietl_wynik()
def nLabel(r, c, tx):
# nowy label widget
label = Label(frame_p, text = tx)
label_list.append(label)
label.grid(row=r, column=c, pady=1, padx = 4)
def nLabel2(r, c, tx):
# nowy label widget 2
label = Label(frame_p, text = tx, width = 3, relief = GROOVE)
label_list.append(label)
label.grid(row=r, column=c, pady=1, padx = 4)
def nSpinbox(r, c):
# nowy spinbox widget
spinbox = Spinbox(frame_p, values=skala, width = 3, font=Font(family='default', size=12),
command = lambda: bt(spinbox, spinbox.grid_info()['row']))
spinbox_list.append(spinbox)
spinbox.grid(row=r, column=c, pady=1, padx=4)
spinbox.delete(0,"end")
spinbox.insert(0,1)
def nScale(r, c):
# nowy scale widget
scale = Scale(frame_p, from_=0, to= 16, orient= HORIZONTAL, showvalue = 0, command = sval, length = 150)
scale_list.append(scale)
scale.set(8)
scale.grid(row =r, column= c, pady = 1, padx = 4)
def lkat(r, x):
# jeden rząd do porównania
nLabel(r,0, kryteria[int(x[0])])
nLabel2(r,1, '--')
nScale(r,2)
nLabel2(r,3, '--')
nLabel(r,4, kryteria[int(x[1])])
def reset():
# resetuje wagi
for i in range(len(kryteriaKomb_d)):
scale_list[i].set(8)
kryteriaKomb_d[kryteriaKomb[i]] = 1
wagi()
wyswietl_wynik()
b_ok.config(relief=RAISED)
b_ok.config(state=NORMAL)
listbox = Listbox(frame_l, width=21, height=len(kryteria)+1)
listbox.grid(columnspan = 2, row=0, column=0, pady=4, padx = 4)
listbox2 = Listbox(frame_l, width=21, height=2)
listbox2.grid(columnspan = 2, row=1, column=0, pady=4, padx = 4)
for i in kryteria_d:
listbox.insert(END, (i, kryteria_d[i]))
for i in range(len(kryteriaKomb)):
lkat(i, kryteriaKomb[i])
b_ok = Button(frame_l, text = 'ok', command=root_ahp.destroy)
b_ok.grid(row = 4, column = 0, sticky = 'nwes', columnspan = 2, pady =(0,4), padx = 4)
b_m = Button(frame_l, text = 'oblicz wagi', command= bt)
b_m.grid(row = 3, column = 0, sticky = 'nwes', columnspan = 2, padx = 4)
b_r = Button(frame_l, text = 'reset', command= reset)
b_r.grid(row = 5, column = 0, sticky = 'nwes', columnspan = 2, pady = (8,0), padx = 4)
root_ahp.mainloop()
| kwiecien-rafal/optymalizator-prezentow | AHP tkinter.py | AHP tkinter.py | py | 5,843 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "itertools.combinations",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.ones",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.true_divide",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"... |
29909677781 | # -*- coding: utf-8 -*-
import os
import sys
import math
import copy
import random
import timeit
import argparse
import numpy as np
import tensorflow as tf
from ..model import mlp_rel
from ..lib.data_utils import load_prop
from ..lib.utils import makedirs
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='propensity estimation w/o condition for arxiv')
parser.add_argument('-m', default=21, type=int,
help='number of top positions for which estimates are desired')
parser.add_argument('-d', default=1, type=int,
help='dimension of feature')
parser.add_argument('-i', '--inference_version', default=0, type=int,
help='inference version')
parser.add_argument('--epoch', default=10000, type=int,
help='#epoch')
parser.add_argument('-n1', default=32, type=int,
help='number of propensity hidden layer')
parser.add_argument('-n2', default=32, type=int,
help='number of relevance hidden layer')
parser.add_argument('feat_type', help='feat type')
parser.add_argument('data_dir', help='data dir')
parser.add_argument('output_dir', help='output dir')
args = parser.parse_args()
start = timeit.default_timer()
M = args.m
D = args.d
makedirs(args.output_dir)
with tf.Session() as sess:
model = mlp_rel.MLP(D, M, args.n1, args.n2, 0.1)
train_click_path = os.path.join(args.data_dir, 'train.click.npy')
train_c, train_not_c = np.load(train_click_path)
train_feat_path = os.path.join(args.data_dir, 'train.{}.feat.npy'.format(args.feat_type))
valid_click_path = os.path.join(args.data_dir, 'valid.click.npy')
valid_c, valid_not_c = np.load(valid_click_path)
valid_feat_path = os.path.join(args.data_dir, 'valid.{}.feat.npy'.format(args.feat_type))
X_valid = np.load(valid_feat_path)
valid_loss_path = os.path.join(args.output_dir, 'valid_loss.txt')
test_click_path = os.path.join(args.data_dir, 'test.click.npy')
test_c, test_not_c = np.load(test_click_path)
test_feat_path = os.path.join(args.data_dir, 'test.{}.feat.npy'.format(args.feat_type))
X_test = np.load(test_feat_path)
test_loss_path = os.path.join(args.output_dir, 'test_loss.txt')
X_train = np.load(train_feat_path)
if tf.train.get_checkpoint_state(args.output_dir):
model.saver.restore(sess, tf.train.latest_checkpoint(args.output_dir))
else:
tf.global_variables_initializer().run()
best_loss = math.inf
for epoch in range(args.epoch):
train_loss, _ = sess.run([model.loss, model.train_op],
feed_dict={model.x:X_train, model.c:train_c,
model.not_c: train_not_c})
valid_loss = sess.run([model.loss],
feed_dict={model.x:X_valid, model.c:valid_c,
model.not_c: valid_not_c})[0]
if valid_loss < best_loss:
best_loss = valid_loss
model.saver.save(sess, '{}/checkpoint'.format(args.output_dir), global_step=model.global_step)
if epoch % 100 == 0:
print('{}\tTrain Loss: {:.4f} Best Valid Loss: {:.4f}'.format(epoch, train_loss, valid_loss))
model.saver.restore(sess, tf.train.latest_checkpoint(args.output_dir))
with open(valid_loss_path, 'w') as fout:
fout.write('Loss: {}'.format(valid_loss))
test_loss = sess.run([model.loss],
feed_dict={model.x:X_test, model.c:test_c,
model.not_c: test_not_c})[0]
with open(test_loss_path, 'w') as fout:
fout.write('Loss: {}'.format(test_loss))
end = timeit.default_timer()
print('Running time: {:.3f}s.'.format(end - start))
| fzc621/CondPropEst | src/arxiv_obj/cpbm.py | cpbm.py | py | 3,915 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "timeit.default_timer",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "lib.utils.makedirs",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "tenso... |
3406969266 | """
main.py
train the deep iamge prior model and get the denoised figure, calculate PSNR when required.
"""
import hydra
from pytorch_lightning import Trainer, seed_everything
from src.conf import Config
from src.data.datamodule import DeepImagePriorDataModule
from src.model.model import DeepImagePriorModel
import logging
logging.getLogger("lightning").setLevel(logging.ERROR)
@hydra.main(config_path=".", config_name="base_config", version_base="1.2")
def train_app(conf: Config) -> None:
"""
The main train loop.
"""
# * seed
if conf.train.random_seed:
seed_everything(conf.train.random_seed)
for idx, fig_dir in enumerate(conf.data.root_dir.iterdir()):
if fig_dir.is_dir():
light_data = DeepImagePriorDataModule(conf.data, img_dir=fig_dir)
light_data.setup(stage="fit")
print(f"train fig{idx}, key: {light_data.dataset[0]['key']}")
light_model = DeepImagePriorModel(conf)
train_conf = conf.train
trainer = Trainer(
accelerator=train_conf.accelerator,
devices=(
train_conf.distributed_devices if train_conf.accelerator == "gpu" else None),
max_epochs=train_conf.epochs,
num_sanity_val_steps=0,
check_val_every_n_epoch=train_conf.check_val_every_n_epoch,
enable_progress_bar=True
)
trainer.fit(light_model, light_data)
if idx == conf.data.total_run_figs_number-1:
break
if __name__ == "__main__":
train_app() # pylint: disable=no-value-for-parameter
| ziyixi/Deep-Image-Prior-Pytorch-Lightning | main.py | main.py | py | 1,640 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "logging.ERROR",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "src.conf.Config",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "pytorch_lightnin... |
25718308081 | read_me = """Python 3 script that logs data from the Meraki dashboard into a MongoDB database.
You will need to have MongoDB installed and supply a configuration file for this script to run.
You can get the MongoDB Community Server here: https://www.mongodb.com/try/download/community
You can find a sample configuration file here:
https://github.com/meraki/automation-scripts/blob/master/offline_logging/config.yaml
Script syntax:
python offline_logging.py -c <config_file>
Required Python 3 modules:
requests
pyyaml
pymongo
To install these Python 3 modules via pip you can use the following commands:
pip install requests
pip install pyyaml
pip install pymongo
Depending on your operating system and Python environment, you may need to use commands
"python3" and "pip3" instead of "python" and "pip".
View the created database with a MongoDB viewing tool such as MongoDB Compass:
https://www.mongodb.com/products/compass
A version of MongoDB Compass can be installed with the MongoDB Community Server.
"""
import sys, getopt, yaml, time, datetime, pymongo
from urllib.parse import urlencode
from requests import Session, utils
class NoRebuildAuthSession(Session):
def rebuild_auth(self, prepared_request, response):
"""
This method is intentionally empty. Needed to prevent auth header stripping on redirect. More info:
https://stackoverflow.com/questions/60358216/python-requests-post-request-dropping-authorization-header
"""
API_MAX_RETRIES = 3
API_CONNECT_TIMEOUT = 60
API_TRANSMIT_TIMEOUT = 60
API_STATUS_RATE_LIMIT = 429
#Set to True or False to enable/disable console logging of sent API requests
FLAG_REQUEST_VERBOSE = True
API_BASE_URL = "https://api.meraki.com/api/v1"
def merakiRequest(p_apiKey, p_httpVerb, p_endpoint, p_additionalHeaders=None, p_queryItems=None,
p_requestBody=None, p_verbose=False, p_retry=0):
#returns success, errors, responseHeaders, responseBody
if p_retry > API_MAX_RETRIES:
if(p_verbose):
print("ERROR: Reached max retries")
return False, None, None, None
bearerString = "Bearer " + p_apiKey
headers = {"Authorization": bearerString}
if not p_additionalHeaders is None:
headers.update(p_additionalHeaders)
query = ""
if not p_queryItems is None:
query = "?" + urlencode(p_queryItems, True)
url = API_BASE_URL + p_endpoint + query
verb = p_httpVerb.upper()
session = NoRebuildAuthSession()
try:
if(p_verbose):
print(verb, url)
if verb == "GET":
r = session.get(
url,
headers = headers,
timeout = (API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)
)
elif verb == "PUT":
if not p_requestBody is None:
if (p_verbose):
print("body", p_requestBody)
r = session.put(
url,
headers = headers,
json = p_requestBody,
timeout = (API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)
)
elif verb == "POST":
if not p_requestBody is None:
if (p_verbose):
print("body", p_requestBody)
r = session.post(
url,
headers = headers,
json = p_requestBody,
timeout = (API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)
)
elif verb == "DELETE":
r = session.delete(
url,
headers = headers,
timeout = (API_CONNECT_TIMEOUT, API_TRANSMIT_TIMEOUT)
)
else:
return False, None, None, None
except:
return False, None, None, None
if(p_verbose):
print(r.status_code)
success = r.status_code in range (200, 299)
errors = None
responseHeaders = None
responseBody = None
if r.status_code == API_STATUS_RATE_LIMIT:
if(p_verbose):
print("INFO: Hit max request rate. Retrying %s after %s seconds" % (p_retry+1, r.headers["Retry-After"]))
time.sleep(int(r.headers["Retry-After"]))
success, errors, responseHeaders, responseBody = merakiRequest(p_apiKey, p_httpVerb, p_endpoint, p_additionalHeaders,
p_queryItems, p_requestBody, p_verbose, p_retry+1)
return success, errors, responseHeaders, responseBody
try:
rjson = r.json()
except:
rjson = None
if not rjson is None:
if "errors" in rjson:
errors = rjson["errors"]
if(p_verbose):
print(errors)
else:
responseBody = rjson
if "Link" in r.headers:
parsedLinks = utils.parse_header_links(r.headers["Link"])
for link in parsedLinks:
if link["rel"] == "next":
if(p_verbose):
print("Next page:", link["url"])
splitLink = link["url"].split("/api/v1")
success, errors, responseHeaders, nextBody = merakiRequest(p_apiKey, p_httpVerb, splitLink[1],
p_additionalHeaders=p_additionalHeaders,
p_requestBody=p_requestBody,
p_verbose=p_verbose)
if success:
if not responseBody is None:
responseBody = responseBody + nextBody
else:
responseBody = None
return success, errors, responseHeaders, responseBody
def getNetworks(p_apiKey, p_organizationId):
endpoint = "/organizations/%s/networks" % p_organizationId
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getClients(p_apiKey, p_networkId, p_timespan):
endpoint = "/networks/%s/clients" % p_networkId
query = {"timespan": p_timespan}
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_queryItems=query, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getApplicationUsage(p_apiKey, p_networkId, p_clientsStr, p_timespan):
endpoint = "/networks/%s/clients/applicationUsage" % p_networkId
query = {"clients": p_clientsStr, "timespan": p_timespan}
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_queryItems=query, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getClientTrafficHistory(p_apiKey, p_networkId, p_clientId):
endpoint = "/networks/%s/clients/%s/trafficHistory" % (p_networkId, p_clientId)
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getNetworkMerakiAuthUsers(p_apiKey, p_networkId):
endpoint = "/networks/%s/merakiAuthUsers" % p_networkId
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getNetworkSmDevices(p_apiKey, p_networkId):
endpoint = "/networks/%s/sm/devices" % p_networkId
query = {"fields[]": ['ip', 'systemType', 'lastConnected', 'location', 'lastUser',
'ownerEmail', 'ownerUsername', 'imei', 'simCarrierNetwork']}
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_queryItems=query,
p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def getOrganizationAdmins(p_apiKey, p_organizationId):
endpoint = "/organizations/%s/admins" % p_organizationId
success, errors, headers, response = merakiRequest(p_apiKey, "GET", endpoint, p_verbose=FLAG_REQUEST_VERBOSE)
return success, errors, headers, response
def kill_script():
print(read_me)
sys.exit(2)
def load_config(p_file):
config = None
with open(p_file) as file:
config = yaml.load(file, Loader=yaml.FullLoader)
return config
def filter_networks(config_sources, networks):
result = []
if config_sources['include_all_networks']:
return networks
for net in networks:
found_match = False
if not config_sources['network_names'] is None:
if net['name'] in config_sources['network_names']:
result.append(net)
found_match = True
if not found_match:
if not config_sources['network_ids'] is None:
if net['id'] in config_sources['network_ids']:
result.append(net)
found_match = True
print ('match id ' + net['id'])
if not found_match:
if not config_sources['network_tags'] is None:
for tag in config_sources['network_tags']:
if tag in net['tags']:
result.append(net)
break
return result
def filter_admins(p_admins, p_networks, p_tags):
# Return admin if they have org access, or net/tag access to an item matching filters
result = []
for admin in p_admins:
include_admin = False
if admin['orgAccess'] != 'none':
include_admin = True
else:
for anet in admin['networks']:
for onet in p_networks:
if anet['id'] == onet['id']:
include_admin = True
break
if include_admin:
break
if not include_admin:
for atag in admin['tags']:
if atag['tag'] in p_tags:
include_admin = True
break
if include_admin:
result.append(admin)
return result
def log_to_database(db, document, collection, mode='append', keyValuePair=None):
dbc = db[collection]
if mode == 'append':
try:
dbc.insert_one(document)
except Exception as e:
print(e)
print("ERROR: Could not create document in database")
return False
elif mode == 'update':
try:
dbc.update_one(keyValuePair, {"$set": document}, upsert=True)
except Exception as e:
print(e)
print("ERROR: Could not update document in database")
return False
return True
def database_delete_all_matches(db, collection, filter):
dbc = db[collection]
try:
dbc.delete_many(filter)
except Exception as e:
print(e)
print("ERROR: Could not delete document in database")
return False
return True
def split_history_array(history, max_records):
result = []
line = []
for record in history:
line.append(record)
if len(line) >= max_records:
result.append(line)
line = []
if len(line) > 0:
result.append(line)
return result
def perform_scan(config):
print(str(datetime.datetime.now()) + " -- Starting scan")
api_key = config['meraki_dashboard_api']['api_key']
org_id = config['meraki_dashboard_api']['organization_id']
scan_interval = config['scan_interval_minutes']*60
success, errors, headers, all_networks = getNetworks(api_key, org_id)
if not success:
print("ERROR: Unable to get networks' list")
else:
filtered_networks = filter_networks(config['sources'], all_networks)
mongo_client = pymongo.MongoClient("mongodb://" + config['mongodb']['host'] + ":" + str(config['mongodb']['port']) + "/")
db = mongo_client[config['mongodb']['database_name']]
if 'getOrganizationAdmins' in config['endpoints'] and config['endpoints']['getOrganizationAdmins']['enabled']:
success, errors, headers, all_admins = getOrganizationAdmins(api_key, org_id)
if not all_admins is None:
admins = filter_admins(all_admins, filtered_networks, config['sources']['network_tags'])
for admin in admins:
log_to_database(db, admin, config['endpoints']['getOrganizationAdmins']['collection'],
config['endpoints']['getOrganizationAdmins']['mode'],
keyValuePair={'id': admin['id']})
for network in filtered_networks:
# value used as a flag if "getNetworkClients" is disabled
clients = None
if 'getNetworkClients' in config['endpoints'] and config['endpoints']['getNetworkClients']['enabled']:
success, errors, headers, raw_clients = getClients(api_key, network['id'], scan_interval)
if raw_clients is None:
print("ERROR: Cloud not fetch clients for net %s" % network['id'])
else:
scan_time = datetime.datetime.now()
if config['endpoints']['getNetworkClients']['ignore_manufacturer_meraki']:
clients = []
for client in raw_clients:
if not client['manufacturer'] in ["Cisco Meraki", "Meraki"]:
clients.append(client)
else:
clients = raw_clients
for client in clients:
document = client
document['scanTime'] = scan_time
document['scanIntervalMinutes'] = config['scan_interval_minutes']
document['networkId'] = network['id']
document['networkName'] = network['name']
log_to_database(db, document, config['endpoints']['getNetworkClients']['collection'],
config['endpoints']['getNetworkClients']['mode'])
if 'getNetworkClientsApplicationUsage' in config['endpoints'] and config['endpoints']['getNetworkClientsApplicationUsage']['enabled']:
if clients is None:
print("ERROR: Client list must be fetched for getNetworkClientsApplicationUsage")
else:
client_list = ""
for client in clients:
if client_list != "":
client_list += ","
client_list += client['id']
success, errors, headers, usage = getApplicationUsage(api_key, network['id'], client_list, scan_interval)
if usage is None:
print("ERROR: Cloud not fetch clients' usage for net %s" % network['id'])
else:
scan_time = datetime.datetime.now()
for item in usage:
document = item
document['scanTime'] = scan_time
document['scanIntervalMinutes'] = config['scan_interval_minutes']
document['networkId'] = network['id']
document['networkName'] = network['name']
log_to_database(db, document, config['endpoints']['getNetworkClientsApplicationUsage']['collection'],
config['endpoints']['getNetworkClientsApplicationUsage']['mode'])
if 'getNetworkClientTrafficHistory' in config['endpoints'] and config['endpoints']['getNetworkClientTrafficHistory']['enabled']:
if clients is None:
print("ERROR: Client list must be fetched for getNetworkClientTrafficHistory")
else:
for client in clients:
success, errors, headers, traffic_history = getClientTrafficHistory(api_key, network['id'], client['id'])
if not traffic_history is None:
history_pages = split_history_array(traffic_history,
config['endpoints']['getNetworkClientTrafficHistory']['max_history_records_per_document'])
total_pages = len(history_pages)
if total_pages > 0:
base_info = {
'clientId' : client['id'],
'clientMac' : client['mac'],
'clientIp' : client['ip'],
'clientDescription' : client['description'],
'networkId' : network['id'],
'networkName' : network['name'],
'scanTime' : scan_time,
'scanIntervalMinutes' : config['scan_interval_minutes'],
'totalPages' : total_pages
}
filter = {
'clientId' : base_info['clientId'],
'networkId' : base_info['networkId']
}
if config['endpoints']['getNetworkClientTrafficHistory']['mode'] == 'update':
success = database_delete_all_matches(db,
config['endpoints']['getNetworkClientTrafficHistory']['collection'], filter)
page_number = 0
for page in history_pages:
page_number += 1
document = {}
for key in base_info:
document[key] = base_info[key]
document['pageNumber'] = page_number
document['trafficHistory'] = page
success = log_to_database(db, document, config['endpoints']['getNetworkClientTrafficHistory']['collection'], mode="append")
if not success:
print("clientId : %s" % document['clientId'])
print("clientMac : %s" % document['clientMac'])
print("clientIp : %s" % document['clientIp'])
print("clientDescription : %s" % document['clientDescription'])
print("networkId : %s" % document['networkId'])
print("networkName : %s" % document['networkName'])
print("pageNumber : %s" % document['pageNumber'])
print("trafficHistory record count : %s" % len(document['trafficHistory']))
if 'getNetworkMerakiAuthUsers' in config['endpoints'] and config['endpoints']['getNetworkMerakiAuthUsers']['enabled']:
success, errors, headers, auth_users = getNetworkMerakiAuthUsers(api_key, network['id'])
if 'configTemplateId' in network and config['endpoints']['getNetworkMerakiAuthUsers']['include_template_users']:
success, errors, headers, template_users = getNetworkMerakiAuthUsers(api_key, network['configTemplateId'])
if not template_users is None:
if not auth_users is None:
auth_users += template_users
else:
auth_users = template_users
if not auth_users is None:
for user in auth_users:
document = user
document['networkId'] = network['id']
log_to_database(db, document, config['endpoints']['getNetworkMerakiAuthUsers']['collection'],
config['endpoints']['getNetworkMerakiAuthUsers']['mode'],
keyValuePair={'id': user['id'], 'networkId': network['id']})
if 'getNetworkSmDevices' in config['endpoints'] and config['endpoints']['getNetworkSmDevices']['enabled']:
if 'systemsManager' in network['productTypes']:
success, errors, headers, sm_devices = getNetworkSmDevices(api_key, network['id'])
if not sm_devices is None:
tag_disabled = not config['endpoints']['getNetworkSmDevices']['filter_by_device_tag_enabled']
tag_filter = config['endpoints']['getNetworkSmDevices']['target_device_tag']
scan_time = datetime.datetime.now()
for device in sm_devices:
if tag_disabled or tag_filter in device['tags']:
document = {
'scanTime': scan_time,
'scanIntervalMinutes': config['scan_interval_minutes'],
'networkId': network['id'],
'networkName': network['name']
}
for key in device:
document[key] = device[key]
log_to_database(db, document,
config['endpoints']['getNetworkSmDevices']['collection'],
config['endpoints']['getNetworkSmDevices']['mode'],
keyValuePair={'id': device['id']})
print(str(datetime.datetime.now()) + " -- Scan complete")
def main(argv):
arg_config_file = None
try:
opts, args = getopt.getopt(argv, 'c:')
except getopt.GetoptError:
sys.exit(2)
for opt, arg in opts:
if opt == '-c':
arg_config_file = arg
if arg_config_file is None:
kill_script()
try:
config = load_config(arg_config_file)
print(str(datetime.datetime.now()) + " -- Initializing script")
except:
kill_script()
while(True):
perform_scan(config)
print(str(datetime.datetime.now()) + " -- Next scan in " + str(config['scan_interval_minutes']) + " minutes")
time.sleep(config['scan_interval_minutes']*60)
if __name__ == '__main__':
main(sys.argv[1:]) | meraki/automation-scripts | offline_logging/offline_logging.py | offline_logging.py | py | 24,201 | python | en | code | 361 | github-code | 36 | [
{
"api_name": "requests.Session",
"line_number": 33,
"usage_type": "name"
},
{
"api_name": "urllib.parse.urlencode",
"line_number": 67,
"usage_type": "call"
},
{
"api_name": "time.sleep",
"line_number": 125,
"usage_type": "call"
},
{
"api_name": "requests.utils.pa... |
8531279483 | import numpy as np
import galois
from nacl.public import PrivateKey, Box
from io import BytesIO
import cProfile
import re
# A library for Shamir sharing arrays of field elements
# - An ArrayShare object is one share of the whole array
# - ArrayShare objects have encrypt and decrypt methods
# - share_array secret shares an array, returning a dict mapping x coordinates
# to their corresponding ArrayShare objects
# - reconstruct_array reconstructs the original array, given a list of
# ArrayShare objects
# - sum_share_array performs a column-wise sum of a list of ArrayShare objects
# that all agree on the x coordinate
# convert a numpy array to bytes
def array_to_bytes(x: np.ndarray) -> bytes:
np_bytes = BytesIO()
np.save(np_bytes, x, allow_pickle=True)
return np_bytes.getvalue()
# de-serialize a numpy array from bytes
def bytes_to_array(b: bytes) -> np.ndarray:
np_bytes = BytesIO(b)
return np.load(np_bytes, allow_pickle=True)
class ArrayShare:
"""One Shamir share of an array. Stores its x coordinate, and an array of y coordinates,
one y coordinate per element of the original array. All of the x coordinates must match."""
def __init__(self, x, ys, T, GF, K=1, encrypted=False):
self.x = x
self.ys = ys
self.GF = GF
self.T = T
self.K = K
self.encrypted = encrypted
def encrypt(self, sk, pk):
assert not self.encrypted
b = array_to_bytes(self.ys)
enc_b = Box(sk, pk).encrypt(b)
return ArrayShare(self.x, enc_b, self.T, self.GF, K=self.K, encrypted=True)
def decrypt(self, sk, pk):
assert self.encrypted
m = Box(sk, pk).decrypt(self.ys)
array = bytes_to_array(m)
return ArrayShare(self.x, array, self.T, self.GF, K=self.K, encrypted=False)
def __str__(self):
return f'ArrayShare(x={self.x}, len={len(self.ys)}, T={self.T}, K={self.K}, enc={self.encrypted})'
__repr__ = __str__
def reshape(secrets, K, GF):
if len(secrets) %K == 0:
return secrets.reshape((len(secrets)//K, K))
true_len = (len(secrets)//K + 1) * K
flat_pad = GF.Zeros((true_len))
flat_pad[:len(secrets)] = secrets
return flat_pad.reshape((len(secrets)//K + 1), K)
def share_packed(secrets, range_shares, T, K, GF):
"""
secrets: flat array
"""
secrets = reshape(secrets, K, GF)
secrets = np.atleast_2d(secrets)
p_size = (secrets.shape[0], K + T - 1)
poly_points = GF.Random((p_size))
poly_points[:, :K] = secrets
xs = GF.Range(0, p_size[1])
polys = [galois.lagrange_poly(xs, pps) for pps in poly_points]
shares = {x: ArrayShare(x+K, GF(np.array([poly(x+K) for poly in polys])), T, GF, K=K) \
for x in range_shares}
return shares
def share_array(secrets, range_shares, T, GF, K=1):
"""Secret shares an array of secrets. Returns a dict mapping the x coordinate of each share
to an ArrayShare object with that x coordinate."""
return share_packed(secrets, range_shares, T, K, GF)
def reconstruct_array(array_shares):
"""Given a list of ArrayShare objects, reconstructs the original array"""
assert len(array_shares) > 0
array_len = len(array_shares[0].ys)
GF = array_shares[0].GF
T = array_shares[0].T
K = array_shares[0].K
assert len(array_shares) >= T + K, f'we have {len(array_shares)} shares, and we need {T + K}'
# Error checking
for s in array_shares:
assert len(s.ys) == array_len
assert s.GF == GF
assert s.T == T
assert s.K == K
# Reconstruction
arr = []
xs = GF([s.x for s in array_shares])
for i in range(array_len):
# TODO: check T
ys = GF([s.ys[i] for s in array_shares])
poly = galois.lagrange_poly(xs, ys)
arr.extend([poly(i) for i in range(0, K)])
return GF(arr)
def sum_share_array(shares):
"""Given a list of ArrayShare objects with matching x coordinates, returns a new
ArrayShare object representing the column-wise sum of the input shares"""
assert len(shares) > 0
x = shares[0].x
GF = shares[0].GF
T = shares[0].T
K = shares[0].K
for s in shares:
assert not s.encrypted
assert s.x == x
assert s.GF == GF
assert s.T == T
assert s.K == K
share_matrix = GF([s.ys for s in shares])
sum_ys = share_matrix.sum(axis=0)
return ArrayShare(x, sum_ys, T, GF, K=K, encrypted=False)
def prof():
for _ in range(1):
GF = galois.GF(2**31-1)
vals = GF(np.random.randint(5, 6, 500))
shares = share_array(vals, range(1,65), 4, GF, K=50)
#print(shares)
r = reconstruct_array(list(shares.values()))
print(r)
if __name__ == '__main__':
prof()
#cProfile.run('prof()', sort='cumtime')
| uvm-plaid/olympia | util/shamir_sharing.py | shamir_sharing.py | py | 4,835 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "numpy.ndarray",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "io.BytesIO",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.save",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_numbe... |
22565649698 | from typing import Any, Callable, Dict, Optional
import torch
import torch.nn as nn
from .gaussian_diffusion import GaussianDiffusion
from .k_diffusion import karras_sample
DEFAULT_KARRAS_STEPS = 64
DEFAULT_KARRAS_SIGMA_MIN = 1e-3
DEFAULT_KARRAS_SIGMA_MAX = 160
DEFAULT_KARRAS_S_CHURN = 0.0
def uncond_guide_model(
model: Callable[..., torch.Tensor], scale: float
) -> Callable[..., torch.Tensor]:
def model_fn(x_t, ts, **kwargs):
half = x_t[: len(x_t) // 2]
combined = torch.cat([half, half], dim=0)
model_out = model(combined, ts, **kwargs)
eps, rest = model_out[:, :3], model_out[:, 3:]
cond_eps, uncond_eps = torch.chunk(eps, 2, dim=0)
half_eps = uncond_eps + scale * (cond_eps - uncond_eps)
eps = torch.cat([half_eps, half_eps], dim=0)
return torch.cat([eps, rest], dim=1)
return model_fn
def sample_latents(
*,
batch_size: int,
model: nn.Module,
diffusion: GaussianDiffusion,
model_kwargs: Dict[str, Any],
guidance_scale: float,
clip_denoised: bool,
use_fp16: bool,
use_karras: bool,
karras_steps: int,
sigma_min: float,
sigma_max: float,
s_churn: float,
device: Optional[torch.device] = None,
progress: bool = False,
) -> torch.Tensor:
sample_shape = (batch_size, model.d_latent)
if device is None:
device = next(model.parameters()).device
if hasattr(model, "cached_model_kwargs"):
model_kwargs = model.cached_model_kwargs(batch_size, model_kwargs)
if guidance_scale != 1.0 and guidance_scale != 0.0:
for k, v in model_kwargs.copy().items():
model_kwargs[k] = torch.cat([v, torch.zeros_like(v)], dim=0)
sample_shape = (batch_size, model.d_latent)
with torch.autocast(device_type=device.type, enabled=use_fp16):
if use_karras:
samples = karras_sample(
diffusion=diffusion,
model=model,
shape=sample_shape,
steps=karras_steps,
clip_denoised=clip_denoised,
model_kwargs=model_kwargs,
device=device,
sigma_min=sigma_min,
sigma_max=sigma_max,
s_churn=s_churn,
guidance_scale=guidance_scale,
progress=progress,
)
else:
internal_batch_size = batch_size
if guidance_scale != 1.0:
model = uncond_guide_model(model, guidance_scale)
internal_batch_size *= 2
samples = diffusion.p_sample_loop(
model,
shape=(internal_batch_size, *sample_shape[1:]),
model_kwargs=model_kwargs,
device=device,
clip_denoised=clip_denoised,
progress=progress,
)
return samples
| openai/shap-e | shap_e/diffusion/sample.py | sample.py | py | 2,871 | python | en | code | 10,619 | github-code | 36 | [
{
"api_name": "typing.Callable",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "torch.cat",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "torch.chunk",
"line_n... |
20039489365 | from __future__ import unicode_literals
from __future__ import print_function
from __future__ import division
from __future__ import absolute_import
from builtins import open
from builtins import super
from builtins import str
from future import standard_library
from future.utils import with_metaclass
standard_library.install_aliases()
import abc
import http.client
import http.server
import io
import logging
import os
import socketserver
from . import io as avro_io
from . import protocol
from . import schema
logger = logging.getLogger(__name__)
# ------------------------------------------------------------------------------
# Constants
def LoadResource(name):
dir_path = os.path.dirname(__file__)
rsrc_path = os.path.join(dir_path, name)
with open(rsrc_path, 'r') as f:
return f.read()
# Handshake schema is pulled in during build
HANDSHAKE_REQUEST_SCHEMA_JSON = LoadResource('HandshakeRequest.avsc')
HANDSHAKE_RESPONSE_SCHEMA_JSON = LoadResource('HandshakeResponse.avsc')
HANDSHAKE_REQUEST_SCHEMA = schema.Parse(HANDSHAKE_REQUEST_SCHEMA_JSON)
HANDSHAKE_RESPONSE_SCHEMA = schema.Parse(HANDSHAKE_RESPONSE_SCHEMA_JSON)
HANDSHAKE_REQUESTOR_WRITER = avro_io.DatumWriter(HANDSHAKE_REQUEST_SCHEMA)
HANDSHAKE_REQUESTOR_READER = avro_io.DatumReader(HANDSHAKE_RESPONSE_SCHEMA)
HANDSHAKE_RESPONDER_WRITER = avro_io.DatumWriter(HANDSHAKE_RESPONSE_SCHEMA)
HANDSHAKE_RESPONDER_READER = avro_io.DatumReader(HANDSHAKE_REQUEST_SCHEMA)
META_SCHEMA = schema.Parse('{"type": "map", "values": "bytes"}')
META_WRITER = avro_io.DatumWriter(META_SCHEMA)
META_READER = avro_io.DatumReader(META_SCHEMA)
SYSTEM_ERROR_SCHEMA = schema.Parse('["string"]')
AVRO_RPC_MIME = 'avro/binary'
# protocol cache
# Map: remote name -> remote MD5 hash
_REMOTE_HASHES = {}
# Decoder/encoder for a 32 bits big-endian integer.
UINT32_BE = avro_io.STRUCT_INT
# Default size of the buffers use to frame messages:
BUFFER_SIZE = 8192
# ------------------------------------------------------------------------------
# Exceptions
class AvroRemoteException(schema.AvroException):
"""
Raised when an error message is sent by an Avro requestor or responder.
"""
def __init__(self, fail_msg=None):
schema.AvroException.__init__(self, fail_msg)
class ConnectionClosedException(schema.AvroException):
pass
# ------------------------------------------------------------------------------
# Base IPC Classes (Requestor/Responder)
class BaseRequestor( with_metaclass( abc.ABCMeta, object ) ):
"""Base class for the client side of a protocol interaction."""
def __init__(self, local_protocol, transceiver):
"""Initializes a new requestor object.
Args:
local_protocol: Avro Protocol describing the messages sent and received.
transceiver: Transceiver instance to channel messages through.
"""
self._local_protocol = local_protocol
self._transceiver = transceiver
self._remote_protocol = None
self._remote_hash = None
self._send_protocol = None
@property
def local_protocol(self):
"""Returns: the Avro Protocol describing the messages sent and received."""
return self._local_protocol
@property
def transceiver(self):
"""Returns: the underlying channel used by this requestor."""
return self._transceiver
@abc.abstractmethod
def _IssueRequest(self, call_request, message_name, request_datum):
"""TODO: Document this method.
Args:
call_request: ???
message_name: Name of the message.
request_datum: ???
Returns:
???
"""
raise Error('Abstract method')
def Request(self, message_name, request_datum):
"""Writes a request message and reads a response or error message.
Args:
message_name: Name of the IPC method.
request_datum: IPC request.
Returns:
The IPC response.
"""
# build handshake and call request
buffer_writer = io.BytesIO()
buffer_encoder = avro_io.BinaryEncoder(buffer_writer)
self._WriteHandshakeRequest(buffer_encoder)
self._WriteCallRequest(message_name, request_datum, buffer_encoder)
# send the handshake and call request; block until call response
call_request = buffer_writer.getvalue()
return self._IssueRequest(call_request, message_name, request_datum)
def _WriteHandshakeRequest(self, encoder):
"""Emits the handshake request.
Args:
encoder: Encoder to write the handshake request into.
"""
local_hash = self._local_protocol.md5
# if self._remote_hash is None:
# remote_name = self.transceiver.remote_name
# self._remote_hash = _REMOTE_HASHES.get(remote_name)
if self._remote_hash is None:
self._remote_hash = local_hash
self._remote_protocol = self._local_protocol
request_datum = {
'clientHash': local_hash,
'serverHash': self._remote_hash,
}
if self._send_protocol:
request_datum['clientProtocol'] = str(self._local_protocol)
logger.info('Sending handshake request: %s', request_datum)
HANDSHAKE_REQUESTOR_WRITER.write(request_datum, encoder)
def _WriteCallRequest(self, message_name, request_datum, encoder):
"""
The format of a call request is:
* request metadata, a map with values of type bytes
* the message name, an Avro string, followed by
* the message parameters. Parameters are serialized according to
the message's request declaration.
"""
# request metadata (not yet implemented)
request_metadata = {}
META_WRITER.write(request_metadata, encoder)
# Identify message to send:
message = self.local_protocol.message_map.get(message_name)
if message is None:
raise schema.AvroException('Unknown message: %s' % message_name)
encoder.write_utf8(message.name)
# message parameters
self._WriteRequest(message.request, request_datum, encoder)
def _WriteRequest(self, request_schema, request_datum, encoder):
logger.info('writing request: %s', request_datum)
datum_writer = avro_io.DatumWriter(request_schema)
datum_writer.write(request_datum, encoder)
def _ReadHandshakeResponse(self, decoder):
"""Reads and processes the handshake response message.
Args:
decoder: Decoder to read messages from.
Returns:
call-response exists (boolean) ???
Raises:
schema.AvroException on ???
"""
handshake_response = HANDSHAKE_REQUESTOR_READER.read(decoder)
logger.info('Processing handshake response: %s', handshake_response)
match = handshake_response['match']
if match == 'BOTH':
# Both client and server protocol hashes match:
self._send_protocol = False
return True
elif match == 'CLIENT':
# Client's side hash mismatch:
self._remote_protocol = \
protocol.Parse(handshake_response['serverProtocol'])
self._remote_hash = handshake_response['serverHash']
self._send_protocol = False
return True
elif match == 'NONE':
# Neither client nor server match:
self._remote_protocol = \
protocol.Parse(handshake_response['serverProtocol'])
self._remote_hash = handshake_response['serverHash']
self._send_protocol = True
return False
else:
raise schema.AvroException('handshake_response.match=%r' % match)
def _ReadCallResponse(self, message_name, decoder):
"""Reads and processes a method call response.
The format of a call response is:
- response metadata, a map with values of type bytes
- a one-byte error flag boolean, followed by either:
- if the error flag is false,
the message response, serialized per the message's response schema.
- if the error flag is true,
the error, serialized per the message's error union schema.
Args:
message_name:
decoder:
Returns:
???
Raises:
schema.AvroException on ???
"""
# response metadata
response_metadata = META_READER.read(decoder)
# remote response schema
remote_message_schema = self._remote_protocol.message_map.get(message_name)
if remote_message_schema is None:
raise schema.AvroException('Unknown remote message: %s' % message_name)
# local response schema
local_message_schema = self._local_protocol.message_map.get(message_name)
if local_message_schema is None:
raise schema.AvroException('Unknown local message: %s' % message_name)
# error flag
if not decoder.read_boolean():
writer_schema = remote_message_schema.response
reader_schema = local_message_schema.response
return self._ReadResponse(writer_schema, reader_schema, decoder)
else:
writer_schema = remote_message_schema.errors
reader_schema = local_message_schema.errors
raise self._ReadError(writer_schema, reader_schema, decoder)
def _ReadResponse(self, writer_schema, reader_schema, decoder):
datum_reader = avro_io.DatumReader(writer_schema, reader_schema)
result = datum_reader.read(decoder)
return result
def _ReadError(self, writer_schema, reader_schema, decoder):
datum_reader = avro_io.DatumReader(writer_schema, reader_schema)
return AvroRemoteException(datum_reader.read(decoder))
class Requestor(BaseRequestor):
"""Concrete requestor implementation."""
def _IssueRequest(self, call_request, message_name, request_datum):
call_response = self.transceiver.Transceive(call_request)
# process the handshake and call response
buffer_decoder = avro_io.BinaryDecoder(io.BytesIO(call_response))
call_response_exists = self._ReadHandshakeResponse(buffer_decoder)
if call_response_exists:
return self._ReadCallResponse(message_name, buffer_decoder)
else:
return self.Request(message_name, request_datum)
# ------------------------------------------------------------------------------
class Responder( with_metaclass( abc.ABCMeta, object ) ):
"""Base class for the server side of a protocol interaction."""
def __init__(self, local_protocol):
self._local_protocol = local_protocol
self._local_hash = self._local_protocol.md5
self._protocol_cache = {}
self.set_protocol_cache(self._local_hash, self._local_protocol)
@property
def local_protocol(self):
return self._local_protocol
# utility functions to manipulate protocol cache
def get_protocol_cache(self, hash):
return self._protocol_cache.get(hash)
def set_protocol_cache(self, hash, protocol):
self._protocol_cache[hash] = protocol
def Respond(self, call_request):
"""Entry point to process one procedure call.
Args:
call_request: Serialized procedure call request.
Returns:
Serialized procedure call response.
Raises:
???
"""
buffer_reader = io.BytesIO(call_request)
buffer_decoder = avro_io.BinaryDecoder(buffer_reader)
buffer_writer = io.BytesIO()
buffer_encoder = avro_io.BinaryEncoder(buffer_writer)
error = None
response_metadata = {}
try:
remote_protocol = self._ProcessHandshake(buffer_decoder, buffer_encoder)
# handshake failure
if remote_protocol is None:
return buffer_writer.getvalue()
# read request using remote protocol
request_metadata = META_READER.read(buffer_decoder)
remote_message_name = buffer_decoder.read_utf8()
# get remote and local request schemas so we can do
# schema resolution (one fine day)
remote_message = remote_protocol.message_map.get(remote_message_name)
if remote_message is None:
fail_msg = 'Unknown remote message: %s' % remote_message_name
raise schema.AvroException(fail_msg)
local_message = self.local_protocol.message_map.get(remote_message_name)
if local_message is None:
fail_msg = 'Unknown local message: %s' % remote_message_name
raise schema.AvroException(fail_msg)
writer_schema = remote_message.request
reader_schema = local_message.request
request = self._ReadRequest(writer_schema, reader_schema, buffer_decoder)
logger.info('Processing request: %r', request)
# perform server logic
try:
response = self.Invoke(local_message, request)
except AvroRemoteException as exn:
error = exn
except Exception as exn:
error = AvroRemoteException(str(exn))
# write response using local protocol
META_WRITER.write(response_metadata, buffer_encoder)
buffer_encoder.write_boolean(error is not None)
if error is None:
writer_schema = local_message.response
self._WriteResponse(writer_schema, response, buffer_encoder)
else:
writer_schema = local_message.errors
self._WriteError(writer_schema, error, buffer_encoder)
except schema.AvroException as exn:
error = AvroRemoteException(str(exn))
buffer_encoder = avro_io.BinaryEncoder(io.StringIO())
META_WRITER.write(response_metadata, buffer_encoder)
buffer_encoder.write_boolean(True)
self._WriteError(SYSTEM_ERROR_SCHEMA, error, buffer_encoder)
return buffer_writer.getvalue()
def _ProcessHandshake(self, decoder, encoder):
"""Processes an RPC handshake.
Args:
decoder: Where to read from.
encoder: Where to write to.
Returns:
The requested Protocol.
"""
handshake_request = HANDSHAKE_RESPONDER_READER.read(decoder)
logger.info('Processing handshake request: %s', handshake_request)
# determine the remote protocol
client_hash = handshake_request.get('clientHash')
client_protocol = handshake_request.get('clientProtocol')
remote_protocol = self.get_protocol_cache(client_hash)
if remote_protocol is None and client_protocol is not None:
remote_protocol = protocol.Parse(client_protocol)
self.set_protocol_cache(client_hash, remote_protocol)
# evaluate remote's guess of the local protocol
server_hash = handshake_request.get('serverHash')
handshake_response = {}
if self._local_hash == server_hash:
if remote_protocol is None:
handshake_response['match'] = 'NONE'
else:
handshake_response['match'] = 'BOTH'
else:
if remote_protocol is None:
handshake_response['match'] = 'NONE'
else:
handshake_response['match'] = 'CLIENT'
if handshake_response['match'] != 'BOTH':
handshake_response['serverProtocol'] = str(self.local_protocol)
handshake_response['serverHash'] = self._local_hash
logger.info('Handshake response: %s', handshake_response)
HANDSHAKE_RESPONDER_WRITER.write(handshake_response, encoder)
return remote_protocol
@abc.abstractmethod
def Invoke(self, local_message, request):
"""Processes one procedure call.
Args:
local_message: Avro message specification.
request: Call request.
Returns:
Call response.
Raises:
???
"""
raise Error('abtract method')
def _ReadRequest(self, writer_schema, reader_schema, decoder):
datum_reader = avro_io.DatumReader(writer_schema, reader_schema)
return datum_reader.read(decoder)
def _WriteResponse(self, writer_schema, response_datum, encoder):
datum_writer = avro_io.DatumWriter(writer_schema)
datum_writer.write(response_datum, encoder)
def _WriteError(self, writer_schema, error_exception, encoder):
datum_writer = avro_io.DatumWriter(writer_schema)
datum_writer.write(str(error_exception), encoder)
# ------------------------------------------------------------------------------
# Framed message
class FramedReader(object):
"""Wrapper around a file-like object to read framed data."""
def __init__(self, reader):
self._reader = reader
def Read(self):
"""Reads one message from the configured reader.
Returns:
The message, as bytes.
"""
message = io.BytesIO()
# Read and append frames until we encounter a 0-size frame:
while self._ReadFrame(message) > 0: pass
return message.getvalue()
def _ReadFrame(self, message):
"""Reads and appends one frame into the given message bytes.
Args:
message: Message to append the frame to.
Returns:
Size of the frame that was read.
The empty frame (size 0) indicates the end of a message.
"""
frame_size = self._ReadInt32()
remaining = frame_size
while remaining > 0:
data_bytes = self._reader.read(remaining)
if len(data_bytes) == 0:
raise ConnectionClosedException(
'FramedReader: expecting %d more bytes in frame of size %d, got 0.'
% (remaining, frame_size))
message.write(data_bytes)
remaining -= len(data_bytes)
return frame_size
def _ReadInt32(self):
encoded = self._reader.read(UINT32_BE.size)
if len(encoded) != UINT32_BE.size:
raise ConnectionClosedException('Invalid header: %r' % encoded)
return UINT32_BE.unpack(encoded)[0]
class FramedWriter(object):
"""Wrapper around a file-like object to write framed data."""
def __init__(self, writer):
self._writer = writer
def Write(self, message):
"""Writes a message.
Message is chunked into sequences of frames terminated by an empty frame.
Args:
message: Message to write, as bytes.
"""
while len(message) > 0:
chunk_size = max(BUFFER_SIZE, len(message))
chunk = message[:chunk_size]
self._WriteBuffer(chunk)
message = message[chunk_size:]
# A message is always terminated by a zero-length buffer.
self._WriteUnsignedInt32(0)
def _WriteBuffer(self, chunk):
self._WriteUnsignedInt32(len(chunk))
self._writer.write(chunk)
def _WriteUnsignedInt32(self, uint32):
self._writer.write(UINT32_BE.pack(uint32))
# ------------------------------------------------------------------------------
# Transceiver (send/receive channel)
class Transceiver( with_metaclass( abc.ABCMeta, object ) ):
@abc.abstractproperty
def remote_name(self):
pass
@abc.abstractmethod
def ReadMessage(self):
"""Reads a single message from the channel.
Blocks until a message can be read.
Returns:
The message read from the channel.
"""
pass
@abc.abstractmethod
def WriteMessage(self, message):
"""Writes a message into the channel.
Blocks until the message has been written.
Args:
message: Message to write.
"""
pass
def Transceive(self, request):
"""Processes a single request-reply interaction.
Synchronous request-reply interaction.
Args:
request: Request message.
Returns:
The reply message.
"""
self.WriteMessage(request)
result = self.ReadMessage()
return result
def Close(self):
"""Closes this transceiver."""
pass
class HTTPTransceiver(Transceiver):
"""HTTP-based transceiver implementation."""
def __init__(self, host, port, req_resource='/'):
"""Initializes a new HTTP transceiver.
Args:
host: Name or IP address of the remote host to interact with.
port: Port the remote server is listening on.
req_resource: Optional HTTP resource path to use, '/' by default.
"""
self._req_resource = req_resource
self._conn = http.client.HTTPConnection(host, port)
self._conn.connect()
self._remote_name = self._conn.sock.getsockname()
@property
def remote_name(self):
return self._remote_name
def ReadMessage(self):
response = self._conn.getresponse()
response_reader = FramedReader(response)
framed_message = response_reader.Read()
response.read() # ensure we're ready for subsequent requests
return framed_message
def WriteMessage(self, message):
req_method = 'POST'
req_headers = {'Content-Type': AVRO_RPC_MIME}
bio = io.BytesIO()
req_body_buffer = FramedWriter(bio)
req_body_buffer.Write(message)
req_body = bio.getvalue()
self._conn.request(req_method, self._req_resource, req_body, req_headers)
def Close(self):
self._conn.close()
self._conn = None
# ------------------------------------------------------------------------------
# Server Implementations
def _MakeHandlerClass(responder):
class AvroHTTPRequestHandler(http.server.BaseHTTPRequestHandler):
def do_POST(self):
reader = FramedReader(self.rfile)
call_request = reader.Read()
logger.info('Serialized request: %r', call_request)
call_response = responder.Respond(call_request)
logger.info('Serialized response: %r', call_response)
self.send_response(200)
self.send_header('Content-type', AVRO_RPC_MIME)
self.end_headers()
framed_writer = FramedWriter(self.wfile)
framed_writer.Write(call_response)
self.wfile.flush()
logger.info('Response sent')
return AvroHTTPRequestHandler
class MultiThreadedHTTPServer(
socketserver.ThreadingMixIn,
http.server.HTTPServer,
):
"""Multi-threaded HTTP server."""
pass
class AvroIpcHttpServer(MultiThreadedHTTPServer):
"""Avro IPC server implemented on top of an HTTP server."""
def __init__(self, interface, port, responder):
"""Initializes a new Avro IPC server.
Args:
interface: Interface the server listens on, eg. 'localhost' or '0.0.0.0'.
port: TCP port the server listens on, eg. 8000.
responder: Responder implementation to handle RPCs.
"""
super(AvroIpcHttpServer, self).__init__(
server_address=(interface, port),
RequestHandlerClass=_MakeHandlerClass(responder),
)
if __name__ == '__main__':
raise Exception('Not a standalone module')
| kineticadb/kinetica-api-python | gpudb/packages/avro/avro_py3/ipc.py | ipc.py | py | 21,544 | python | en | code | 13 | github-code | 36 | [
{
"api_name": "future.standard_library.install_aliases",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "future.standard_library",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "logging.getLogger",
"line_number": 24,
"usage_type": "call"
},
{
... |
32187575097 | import asyncio
import aiohttp
from warnings import warn
with open("vk_access_token.txt", mode="r") as file:
vk_access_token = file.read()
vk_api_version = "5.154"
owner_id = "-160464793"
url = f"https://api.vk.ru/method/wall.get?v={vk_api_version}&owner_id={owner_id}&count=1&access_token={vk_access_token}"
async def get_last_text_message():
async with aiohttp.ClientSession() as session:
async with session.get(url) as response:
a = await response.json()
try:
a = a["response"]["items"][0]["text"]
except:
a = None
return a
async def ticker(delay):
while True:
yield await get_last_text_message()
await asyncio.sleep(delay)
async def set_morning_trigger(trigger_function, delay=30):
if delay < 28:
warn("The delay is too small. Limit of wall.get usage per day may be exceeded.")
generator = ticker(delay)
async def get_last_text():
return await generator.asend(None)
text = ""
while True:
new_text = await get_last_text()
if new_text != None and text != new_text:
text = new_text
asyncio.gather(trigger_function(text))
if __name__ == "__main__":
async def function(string):
print(string)
asyncio.run(set_morning_trigger(function))
| SaGiMan6/sesc-nsu-assistant-bot | scripts/morning_exercise_operations.py | morning_exercise_operations.py | py | 1,353 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "aiohttp.ClientSession",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "asyncio.sleep",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "warnings.warn",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "asyncio.gather",
... |
18400663422 | import os
from fastapi import status, HTTPException
from pydantic import BaseModel, validator
from typing import Union
ROOT_DIR = os.path.abspath('.')
ROOT_DIR = os.path.join(ROOT_DIR, 'assets')
PATH_REGEX = r'^(?![0-9._/])(?!.*[._]$)(?!.*\d_)(?!.*_\d)[a-zA-Z0-9_/]+$'
class Predicted(BaseModel):
recognized: Union[list, None] = []
unknowns: Union[int, None] = 0
peoples: Union[int, None] = 0
id_img: str
spend_time: float
class SelectModel(BaseModel):
model_file: str = 'example_model.pk'
@validator('model_file', pre=True)
def validate_trained_model_folder(cls, value):
directory_path = os.path.join(ROOT_DIR, 'trained_models')
file_path = os.path.join(directory_path, value)
split_current = os.path.split(file_path)[1]
if len(split_current) > 20:
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail='Name cannot be longer than 20 characters')
if not value.endswith('.pk'):
join_pk = f'{file_path}.pk'
if not os.path.isfile(join_pk):
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail=f'Not found path file {join_pk}')
return join_pk
if not os.path.isfile(file_path):
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
detail=f'Not found path file {file_path}')
return file_path
class Config:
schema_extra = {
'example': {
'model_file': 'example_model.pk'
}
}
| watcharap0n/api-facial-recognition-dlib | service-rec/server/schemas/predict.py | predict.py | py | 1,636 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "os.path.abspath",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 6,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 7... |
73577910183 | import os, sys
import numpy as np
import pickle
from UserCode.HGCalMaskResolutionAna import Argparser
from array import array as Carray
from collections import OrderedDict
from ROOT import TCanvas, TLatex, TFile, TMath, TH1F
from ROOT import TLegend, TH2F, TLorentzVector, TProfile, TH1D, TGraphErrors
from ROOT import Double, gStyle, gROOT, kTemperatureMap
from UserCode.HGCalMaskVisualProd.SystemUtils import averageContiguousVals as Av
from UserCode.HGCalMaskVisualProd.SystemUtils import EtaStr as ES
from UserCode.HGCalMaskResolutionAna.SoftwareCorrection import IncompleteShowersCorrection
from UserCode.HGCalMaskResolutionAna.SoftwareCorrection import DifferentiateShowersByEnergy
from UserCode.HGCalMaskResolutionAna.Calibration import Calibration
from UserCode.HGCalMaskResolutionAna.PartialWafersStudies import PartialWafersStudies
from UserCode.HGCalMaskVisualProd.RootPlotting import RootPlotting
from UserCode.HGCalMaskVisualProd.RootObjects import RootHistograms
def plotHistograms(histos, cdims, pcoords, cname):
"""
Plots histograms from a list.
Arguments:
-> histos: list of histograms
-> cdims & pccords: as described in RooUtils
-> cname: name of the canvas to be created
"""
if not isinstance(histos, list):
raise TypeError('The histograms have to be passed in a list.,')
npads = len(pcoords[0])
hdiv, stack = ([] for _ in range(2))
with RootPlotting(ncanvas=1, npads=npads, cdims=cdims, pcoords=pcoords) as plot:
titles = ['Resolution', 'Resolution vs Eta',
'RMS vs Eta', 'Bias vs Eta', 'RMS/(1+Bias) vs Eta',
'Resolution', 'Nevents', 'Resolution / Nevents']
if FLAGS.mode == 1:
legends1 = [TLegend(0.12, 0.76, 0.44, 0.89) for _ in range(3)]
it = -1
for ih in range(len(histos[:-3])):
if ih%3==0: it += 1
h = histos[ih]
if ih<6:
plot.plotHistogram(cpos=0, ppos=ih, h=h,
lw=3,mc=1,msize=.5,lc=1,
title=titles[it], draw_options='colz')
if ih<3:
tex = plot.setLatex(ts=0.04)
plot.fitHistogram(h=h, fname='crystalball',
frange=(-1.,1.), tex=tex)
else:
plot.plotGraph(cpos=0, ppos=ih, g=h,
lw=3,mc=4,msize=.5,lc=4,
yranges=(-0.5,3.5),
title=titles[it],
draw_options='AP')
elif FLAGS.mode == 2:
if FLAGS.apply_weights:
legends1 = [TLegend(0.12, 0.76, 0.44, 0.89) for _ in range(3)]
else:
legends1 = [TLegend(0.56, 0.66, 0.86, 0.89) for _ in range(3)]
legends2 = [TLegend(0.68, 0.75, 0.86, 0.89) for _ in range(3)]
for ih in range(NREG):
if FLAGS.apply_weights:
for ixx in range(int(npads/NREG)):
idx = ih+NREG*ixx
if ixx == 0:
plot.plotHistogram(cpos=0, ppos=idx, h=histos[idx],
lw=3,mc=4,msize=.5,lc=4,
draw_options='colz')
elif ixx == 1:
plot.plotHistogram(cpos=0, ppos=idx, h=histos[idx],
lw=3,mc=4,msize=.5,lc=4,
title=titles[ixx],
draw_options='colz')
else:
plot.plotGraph(cpos=0, ppos=idx, g=histos[idx],
lw=3,mc=4,msize=.5,lc=4,
yranges=(-0.5,3.5),
title=titles[ixx],
name = str(ixx*ih),
draw_options='AP')
tex = plot.setLatex()
if ixx == 0:
plot.fitHistogram(h=histos[idx], fname='crystalball',
frange=(-1.,1.), tex=tex)
if FLAGS.samples == 'inner':
tex.DrawLatex(0.58,0.92,'Inner radius; SR{}'
.format((ih%3)+1))
elif FLAGS.samples == 'outer':
tex.DrawLatex(0.58,0.92,'Outer radius; SR{}'
.format((ih%3)+1))
tex.DrawLatex(0.11,0.92,'#bf{CMS} #it{simulation preliminary}')
tex.SetTextAlign(31)
elif not FLAGS.apply_weights:
linec = [4, 2, 3, 7]
plot.plotHistogram(cpos=0, ppos=ih, h=histos[ih],
lw=3,mc=linec[0],msize=.5,lc=linec[0],
draw_options='E')
plot.plotHistogram(cpos=0, ppos=ih, h=histos[ih+3],
lw=3, mc=linec[1], msize=.5, lc=linec[1],
draw_options='same E')
plot.plotHistogram(cpos=0, ppos=ih, h=histos[ih+6],
lw=3, mc=linec[2], msize=.5, lc=linec[2],
draw_options='same E')
plot.plotHistogram(cpos=0, ppos=ih, h=histos[ih+9],
lw=3, mc=linec[3], msize=.5, lc=linec[3],
draw_options='same E')
tex = plot.setLatex()
bckgcuts_str = [str(i) for i in bckgcuts]
if FLAGS.samples == 'inner':
tex.DrawLatex(0.58,0.92,'Inner radius; SR{}'.format((ih%3)+1))
elif FLAGS.samples == 'outer':
tex.DrawLatex(0.58,0.92,'Outer radius; SR{}'.format((ih%3)+1))
legends1[ih].AddEntry(histos[ih],
'Cumdiff < '+bckgcuts_str[0], 'L')
for it in range(len(bckgcuts_str)-1):
legends1[ih].AddEntry(histos[ih+3*(it+1)],
bckgcuts_str[it]+'< Cumdiff < '+bckgcuts_str[it+1], 'L')
legends1[ih].AddEntry(histos[ih+3*(it+2)],
'Cumdiff > '+bckgcuts_str[it+1], 'L')
tex.DrawLatex(0.11,0.92,'#bf{CMS} #it{simulation preliminary}')
tex.SetTextAlign(31)
legends1[ih].Draw()
hdiv.append(histos[ih+3].Clone('weight1_sr{}'.format(ih+1)))
hdiv.append(histos[ih+6].Clone('weight2_sr{}'.format(ih+1)))
hdiv.append(histos[ih+9].Clone('weight3_sr{}'.format(ih+1)))
for idiv in range(len(bckgcuts)):
hdiv[-3+idiv].Divide(histos[ih])
extrastr = '' if idiv==0 else 'same'
hdiv[-3+idiv].GetYaxis().SetRangeUser(0., 6.)
plot.plotHistogram(cpos=0, ppos=ih+3, h=hdiv[-3+idiv],
yaxis_title='Weight',
lw=3, mc=linec[idiv+1], msize=.5,
lc=linec[idiv+1],
draw_options='HIST'+extrastr, copy=True)
tex = plot.setLatex()
if FLAGS.samples == 'inner':
tex.DrawLatex(0.58,0.92,'Inner radius; SR{}'.format((ih%3)+1))
elif FLAGS.samples == 'outer':
tex.DrawLatex(0.58,0.92,'Outer radius; SR{}'.format((ih%3)+1))
for iv in range(len(bckgcuts)):
legends2[ih].AddEntry(hdiv[iv], 'weight'+str(iv+1), 'L')
legends2[ih].Draw()
tex.DrawLatex(0.11,0.92,'#bf{CMS} #it{simulation preliminary}')
tex.SetTextAlign(31)
plot.save(cpos=0, name=cname)
if not FLAGS.apply_weights:
save_str = base.paths.weights
RootHistograms(histos).save(save_str)
RootHistograms(hdiv).save(save_str, mode='UPDATE')
def main():
#gStyle.SetOptStat(0)
gROOT.SetBatch(True)
gStyle.SetPalette(kTemperatureMap)
fIn=TFile.Open(FLAGS.noPUFile)
data=fIn.Get('data')
calib_str = base.paths.calibrations_nopu
"""
calibration = Calibration(FLAGS)
calibration.nopu_calibration()
calibration.save(calib_str)
"""
with open(calib_str, 'r') as cachefile:
calib = pickle.load(cachefile)
if FLAGS.apply_weights:
calibshowers_str = base.paths.weights
bckgcuts_extended = np.append(bckgcuts, 0.9)
showercorr = IncompleteShowersCorrection(calibshowers_str,
discrvals=Av(bckgcuts_extended))
weights = showercorr.CorrectionWeights()
if FLAGS.samples == 'inner':
boundaries = [5, 5, 5]
corr_mode = 'left'
elif FLAGS.samples == 'outer':
boundaries = [23, 23, 23]
corr_mode = 'right'
lowstats_factors = showercorr.calculateLowStatisticsFactor(boundaries, corr_mode)
weights_graphs = [showercorr.buildCorrectionWeightsGraphs(region=i+1)
for i in range(NREG)]
histos=OrderedDict()
enbins, eninf, ensup = 200, -2.01, 1.99
if FLAGS.samples == 'inner':
phibins, etabins, etainf, etasup = 12, 10, 2.69, 3.04
elif FLAGS.samples == 'outer':
phibins, etabins, etainf, etasup = 12, 10, 1.44, 1.66
if FLAGS.mode == 1:
hn = ['den{}', 'den_eta{}', 'den{}_2D_res', 'den{}_2D_events']
for ireg in range(1,NREG+1):
bins = Carray('d', np.arange(-1.05, .8, 0.01))
strings = ';#Delta E/E_{gen};PDF'
histos[hn[0].format(ireg)] = TH1F(hn[0].format(ireg), strings,
len(bins)-1, bins)
histos[hn[1].format(ireg)] = TH2F(hn[1].format(ireg), ';|#eta|;#Delta E/E',
etabins, etainf, etasup,
enbins, eninf, ensup)
histos[hn[2].format(ireg)] = TH2F(hn[2].format(ireg), ';|#eta|;#phi',
50, etainf, etasup,
phibins, -TMath.Pi(), TMath.Pi())
histos[hn[3].format(ireg)] = TH2F(hn[3].format(ireg), ';|#eta|;#phi',
50, etainf, etasup,
phibins, -TMath.Pi(), TMath.Pi())
elif FLAGS.mode == 2:
fracEn = np.zeros((NREG,NLAYERS), dtype=float)
countfracEn = np.zeros((NREG,NLAYERS), dtype=int)
for i in range(0, data.GetEntriesFast()):
data.GetEntry(i)
genen = getattr(data,'genen')
geneta = abs(getattr(data,'geneta'))
genphi = getattr(data,'genphi')
for ireg in range(1,NREG+1):
recen = getattr(data,'en_sr{}_ROI'.format(ireg))
avgnoise = getattr(data,'noise_sr3_ROI')*A[ireg-1]/A[2]
#Calibration factors. f2 is used for PU.
f1, f2 = 1., 0.
etaregions_shifted = np.roll(etaregions, shift=-1)[:-1]
for ieta1,ieta2 in zip(etaregions[:-1], etaregions_shifted):
#in case it lies outside the limits of the calibration
#the event is calibrated with the full calibration region
if geneta < etaregions[0] or geneta > etaregions[-1]:
idstr = 'sr{}_from{}to{}'.format(ireg,
ES(etaregions[0]),
ES(etaregions[-1]))
elif (geneta < ieta1 or geneta >= ieta2):
continue
else:
idstr = 'sr{}_from{}to{}'.format(ireg, ES(ieta1), ES(ieta2))
if 'L0' in calib:
f1 /= calib['L0'][idstr].Eval(geneta)+1.0
if 'L1' in calib:
f1 /= calib['L1'][idstr].Eval(f1*recen)+1.0
if 'L2' in calib and ireg in calib['L2']:
f2 = calib['L2'][idstr].Eval(avgnoise)
recen = f1*recen - f2
for il in range(1,NLAYERS+1):
v = f1*getattr(data,'en_sr{}_layer{}'.format(ireg,il)) - f2
if ( (FLAGS.samples == "inner" and geneta < etaregions[0]+0.05 or
FLAGS.samples == "outer" and geneta > etaregions[-1]-0.05) and
recen != 0 ):
fracEn[ireg-1,il-1] += v / recen
countfracEn[ireg-1,il-1] += 1
fracEn /= countfracEn
hn = ['res_complete_before{}', 'res_complete_after{}',
'res_incomplete_before{}', 'res_incomplete_after{}',
'res_total_before{}', 'res_total_after{}',
'res_vs_eta_before{}', 'res_vs_eta_after{}',
'en{}_per_layer_signal', 'en{}_per_layer_bckg1',
'en{}_per_layer_bckg2', 'en{}_per_layer_bckg3',
'noise{}_per_layer_signal', 'noise{}_per_layer_bckg1',
'noise{}_per_layer_bckg2', 'noise{}_per_layer_bckg3']
for ireg in range(1,NREG+1):
bins = Carray('d', np.arange(-1.05, .8, 0.01))
strings = ';#Delta E/E_{gen};PDF'
for ih in range(6):
histos[hn[ih].format(ireg)] = TH1F(hn[ih].format(ireg), strings,
len(bins)-1, bins)
histos[hn[6].format(ireg)] = TH2F(hn[6].format(ireg), ';|#eta|;#Delta E/E',
etabins, etainf, etasup,
enbins, eninf, ensup)
histos[hn[7].format(ireg)] = TH2F(hn[7].format(ireg), ';|#eta|;#Delta E/E',
etabins, etainf, etasup,
enbins, eninf, ensup)
bins = Carray('d', np.arange(0.5,29,1.))
strings = ';Layer;E_{reco} / E_{gen}'
for ih in range(8,16):
histos[hn[ih].format(ireg)] = TProfile(hn[ih].format(ireg), strings,
len(bins)-1, bins)
for h in histos:
histos[h].Sumw2()
histos[h].SetMarkerStyle(20)
histos[h].SetDirectory(0)
for i in range(0, data.GetEntriesFast()):
data.GetEntry(i)
genen = getattr(data,'genen')
geneta = abs(getattr(data,'geneta'))
genphi = getattr(data,'genphi')
for ireg in range(1,NREG+1):
recen = getattr(data,'en_sr{}_ROI'.format(ireg))
avgnoise = getattr(data,'noise_sr3_ROI')*A[ireg-1]/A[2]
#Calibration factors. f2 is used for PU.
f1, f2 = 1., 0.
etaregions_shifted = np.roll(etaregions, shift=-1)[:-1]
for ieta1,ieta2 in zip(etaregions[:-1], etaregions_shifted):
if geneta < etaregions[0] or geneta > etaregions[-1]:
idstr = 'sr{}_from{}to{}'.format(ireg,
ES(etaregions[0]), ES(etaregions[-1]))
elif (geneta < ieta1 or geneta > ieta2):
continue
else:
idstr = 'sr{}_from{}to{}'.format(ireg, ES(ieta1), ES(ieta2))
if 'L0' in calib:
f1 /= calib['L0'][idstr].Eval(geneta)+1.0
if 'L1' in calib:
f1 /= calib['L1'][idstr].Eval(f1*recen)+1.0
if 'L2' in calib and ireg in calib['L2']:
f2 = calib['L2'][idstr].Eval(avgnoise)
assert f1 != 1.
recen = f1*recen - f2
deltaE = recen/genen-1.
###Store the energy resolution###
if FLAGS.mode == 1:
if deltaE > -1:
histos[hn[0].format(ireg)].Fill(deltaE)
histos[hn[1].format(ireg)].Fill(geneta, deltaE)
histos[hn[2].format(ireg)].Fill(geneta, genphi, deltaE)
histos[hn[3].format(ireg)].Fill(geneta, genphi)
elif FLAGS.mode == 2:
#differentiate complete from incomplete showers
ROI_en = np.zeros((NLAYERS), dtype=float)
for il in range(1,NLAYERS+1):
v = f1*getattr(data,'en_sr{}_layer{}'.format(ireg,il)) - f2
try:
ROI_en[il-1] = v/recen
except ZeroDivisionError:
ROI_en[il-1] = 0.
lshift = [1., 1., 1.] if FLAGS.samples == 'outer' else [.65, .59, .48] #layer shift
assert len(bckgcuts) == len(lshift)
showerid = DifferentiateShowersByEnergy(ROI_en, fracEn[ireg-1,:],
thresholds=bckgcuts, min_val=0.05)
###Calculate andc calibrate the energy per layer###
recen_corr = 0
for il in range(1,NLAYERS+1):
if FLAGS.apply_weights:
if FLAGS.samples == 'inner':
weight_limit = il > boundaries[ireg-1]
else:
weight_limit = il < boundaries[ireg-1]
b = histos[hn[8].format(ireg)].FindBin(il)
if showerid==0: #complete shower
v = f1*getattr(data,'en_sr{}_layer{}'.format(ireg,il)) - f2
try:
histos[hn[8].format(ireg)].Fill(b,v/recen)
except ZeroDivisionError:
histos[hn[8].format(ireg)].Fill(b,0.)
if FLAGS.apply_weights:
recen_corr += v
v = (f1*getattr(data,'noise_sr3_layer{}'.format(il))
*A[ireg-1]/A[2] - f2)
try:
histos[hn[12].format(ireg)].Fill(b,v/recen)
except ZeroDivisionError:
histos[hn[12].format(ireg)].Fill(b,0.)
else:
w = showerid-1
v = f1*getattr(data,'en_sr{}_layer{}'.format(ireg,il)) - f2
try:
histos[hn[9+w].format(ireg)].Fill(b*lshift[w],v/recen)
except ZeroDivisionError:
histos[hn[9+w].format(ireg)].Fill(b*lshift[w],0.)
if ( FLAGS.apply_weights and
weights[ireg-1][w][il-1]!=0 and
weight_limit):
recen_corr += v/weights[ireg-1][w][int(round((il-1)*lshift[w],0))]
#weight_graphs[ireg][il].SetBit(weight_graphs[ireg][il].klsSortedX)
#weight_graphs[ireg][il].Eval(geneta, spline=0, 'S')
v = (f1*getattr(data,'noise_sr3_layer{}'.format(il))
*A[ireg-1]/A[2] - f2)
try:
histos[hn[13+w].format(ireg)].Fill(b,v/recen)
except ZeroDivisionError:
histos[hn[13+w].format(ireg)].Fill(b,0.)
if FLAGS.apply_weights and FLAGS.method == 'ed':
if showerid==0: #complete shower
deltaE_corr = recen_corr/genen-1.
histos[hn[0].format(ireg)].Fill(deltaE)
histos[hn[1].format(ireg)].Fill(deltaE_corr)
else:
recen_corr *= (1 / (1-lowstats_factors[ireg-1]) )
distshift = 0.09 if FLAGS.samples == 'inner' else 0.08
recen_corr *= 1/distshift
deltaE_corr = recen_corr/genen-1.
if deltaE>-.95 and deltaE<-0.1:
histos[hn[2].format(ireg)].Fill(deltaE)
histos[hn[3].format(ireg)].Fill(deltaE_corr)
histos[hn[6].format(ireg)].Fill(geneta, deltaE)
histos[hn[7].format(ireg)].Fill(geneta, deltaE_corr)
#end of tree loop
fIn.Close()
if FLAGS.mode == 1:
pcoords = [[[0.01,0.755,0.33,0.995],
[0.34,0.755,0.66,0.995],
[0.67,0.755,0.99,0.995],
[0.01,0.505,0.33,0.745],
[0.34,0.505,0.66,0.745],
[0.67,0.505,0.99,0.745],
[0.01,0.255,0.33,0.495],
[0.34,0.255,0.66,0.495],
[0.67,0.255,0.99,0.495],
[0.01,0.005,0.33,0.245],
[0.34,0.005,0.66,0.245],
[0.67,0.005,0.99,0.245]]]
cdims = [[1600,2000]]
picname = '1comp_'+FLAGS.samples+'_'+FLAGS.method
if FLAGS.apply_weights:
picname += '_corrected'
else:
pcoords = [[[0.01,0.51,0.33,0.99],
[0.34,0.51,0.66,0.99],
[0.67,0.51,0.99,0.99],
[0.01,0.01,0.33,0.49],
[0.34,0.01,0.66,0.49],
[0.67,0.01,0.99,0.49]]]
cdims = [[1000,600]]
picname = '2comp_'+FLAGS.samples+'_'+FLAGS.method
if FLAGS.apply_weights:
picname += '_corrected'
correct_order = []
for i in range(len(hn)):
for ireg in range(1,NREG+1):
correct_order.append(hn[i].format(ireg))
assert len(correct_order) == len(histos.keys())
histos = [histos[correct_order[i]] for i in range(len(correct_order))]
if FLAGS.mode == 1:
histos.append(histos[6].Clone())
histos.append(histos[7].Clone())
histos.append(histos[8].Clone())
histos[-3].Divide(histos[9])
histos[-2].Divide(histos[10])
histos[-1].Divide(histos[11])
htmp = []
for ireg in range(NREG):
h = histos[3+ireg]
xbins, exbins, rms, erms, bias, ebias = ([] for _ in range(6))
for xbin in xrange(1,h.GetNbinsX()+1):
tmp = h.ProjectionY('tmp', xbin, xbin)
xbins.append(h.GetXaxis().GetBinCenter(xbin))
horizerror = ( h.GetXaxis().GetBinCenter(1) -
h.GetXaxis().GetBinLowEdge(1) )
exbins.append( horizerror )
rms.append(tmp.GetRMS())
erms.append(tmp.GetRMSError())
"""
xq = Carray('d', [0.16,0.5,0.84])
yq = Carray('d', [0.0,0.0,0.0 ])
tmp.GetQuantiles(3,yq,xq)
bias.append(yq[1])
"""
bias.append(tmp.GetMean())
#ebias.append((yq[0]+yq[2])/2)
ebias.append(tmp.GetMeanError())
tmp.Delete()
xbins, exbins = np.array(xbins), np.array(exbins)
rms, erms = np.array(rms), np.array(erms)
bias, ebias = np.array(bias), np.array(ebias)
indep = rms/(1.+bias)
eindep = indep * np.sqrt( erms**2/rms**2 + ebias**2/(1+bias**2) )
htmp.append( TGraphErrors(etabins, xbins, rms, exbins, erms) )
htmp.append( TGraphErrors(etabins, xbins, bias, exbins, ebias) )
htmp.append( TGraphErrors(etabins, xbins, indep, exbins, eindep) )
ht_tmp = [htmp[0],htmp[3],htmp[6],htmp[1],htmp[4],htmp[7],htmp[2],htmp[5],htmp[8]]
histos = histos[:6] + ht_tmp
if FLAGS.method == 'fineeta':
htitles = ['rmsVSeta1', 'rmsVSeta2', 'rmsVSeta3',
'biasVSeta1', 'biasVSeta2', 'biasVSeta3',
'indepVSeta1','indepVSeta2','indepVSeta3']
indices = [1, 2, 3] * 3
indices.sort()
fOut = TFile( base.paths.plots, 'RECREATE' )
fOut.cd()
for ih,h in enumerate(ht_tmp):
h.SetName(htitles[ih])
h.Write(htitles[ih])
fOut.Write()
fOut.Close()
plotHistograms(histos, cdims, pcoords,
os.path.join(FLAGS.outpath,picname+'.png'))
elif FLAGS.mode == 2:
if FLAGS.apply_weights:
fOut = TFile( base.paths.plots, 'RECREATE' )
fOut.cd()
for ireg in range(NREG):
str1 = hn[4].format(ireg+1)
str2 = hn[5].format(ireg+1)
histos[12+ireg] = histos[ireg].Clone(str1)
histos[15+ireg] = histos[3+ireg].Clone(str2)
histos[12+ireg].Add(histos[6+ireg])
histos[15+ireg].Add(histos[9+ireg])
for h in histos:
h.Write()
histos_complete = histos[:6]
histos_incomplete = histos[6:12]
histos_total = histos[12:18]
histos_res2D_before = histos[21:24]
histos_res2D_after = histos[21:24]
plotHistograms(histos_complete, cdims, pcoords,
os.path.join(FLAGS.outpath,picname+'_complete.png'))
plotHistograms(histos_incomplete, cdims, pcoords,
os.path.join(FLAGS.outpath,picname+'_incomplete.png'))
ht = histos_total[3:] + histos_res2D_after #res 1D + res 2D
ht_tmp = []
for ireg in range(NREG):
h = ht[3+ireg]
xbins, exbins, rms, erms, bias, ebias = ([] for _ in range(6))
for xbin in xrange(1,h.GetNbinsX()+1):
tmp = h.ProjectionY('tmp', xbin, xbin)
xbins.append(h.GetXaxis().GetBinCenter(xbin))
horizerror = ( h.GetXaxis().GetBinCenter(1) -
h.GetXaxis().GetBinLowEdge(1) )
exbins.append( horizerror )
rms.append(tmp.GetRMS())
erms.append(tmp.GetRMSError())
"""
xq = Carray('d', [0.16,0.5,0.84])
yq = Carray('d', [0.0,0.0,0.0 ])
tmp.GetQuantiles(3,yq,xq)
bias.append(yq[1])
"""
bias.append(tmp.GetMean())
#ebias.append((yq[0]+yq[2])/2)
ebias.append(tmp.GetMeanError())
tmp.Delete()
xbins, exbins = np.array(xbins), np.array(exbins)
rms, erms = np.array(rms), np.array(erms)
bias, ebias = np.array(bias), np.array(ebias)
indep = rms/(1.+bias)
eindep = indep * np.sqrt( erms**2/rms**2 + ebias**2/(1+bias**2) )
ht_tmp.append( TGraphErrors(etabins, xbins, rms, exbins, erms) )
ht_tmp.append( TGraphErrors(etabins, xbins, bias, exbins, ebias) )
ht_tmp.append( TGraphErrors(etabins, xbins, indep, exbins, eindep) )
fOut.cd()
ht_tmp = [ht_tmp[-9],ht_tmp[-6],ht_tmp[-3],
ht_tmp[-8],ht_tmp[-5],ht_tmp[-2],
ht_tmp[-7],ht_tmp[-4],ht_tmp[-1]]
ht_titles = ['rmsVSeta1', 'rmsVSeta2', 'rmsVSeta3',
'biasVSeta1', 'biasVSeta2', 'biasVSeta3',
'indepVSeta1','indepVSeta2','indepVSeta3']
indices = [1, 2, 3] * 3
indices.sort()
for ih,h in enumerate(ht_tmp):
h.SetName(ht_titles[ih])
h.Write(ht_titles[ih])
pcoords = [[[0.01,0.805,0.33,0.995],
[0.34,0.805,0.66,0.995],
[0.67,0.805,0.99,0.995],
[0.01,0.605,0.33,0.795],
[0.34,0.605,0.66,0.795],
[0.67,0.605,0.99,0.795],
[0.01,0.405,0.33,0.595],
[0.34,0.405,0.66,0.595],
[0.67,0.406,0.99,0.595],
[0.01,0.205,0.33,0.395],
[0.34,0.205,0.66,0.395],
[0.67,0.205,0.99,0.395],
[0.01,0.005,0.33,0.195],
[0.34,0.005,0.66,0.195],
[0.67,0.005,0.99,0.195]]]
cdims = [[1600,2000]]
ht += ht_tmp
plotHistograms(ht, cdims, pcoords,
os.path.join(FLAGS.outpath,picname+'_total.png'))
fOut.Write()
fOut.Close()
else:
histos = histos[24:]
plotHistograms(histos, cdims, pcoords,
os.path.join(FLAGS.outpath,picname+'.png'))
if __name__ == "__main__":
parser = Argparser.Argparser()
FLAGS = parser.get_flags()
parser.print_args()
if FLAGS.apply_weights and FLAGS.mode != 2:
raise ValueError('The weights can only be used when mode==2.')
base = PartialWafersStudies(FLAGS)
NREG, NLAYERS, A = base.nsr, base.nlayers, base.sr_area
etaregions = base.etaregions
if FLAGS.method == 'ed':
bckgcuts = np.array(FLAGS.bckgcuts)
main()
| bfonta/HGCal | HGCalMaskResolutionAna/scripts/analysis.py | analysis.py | py | 30,149 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "UserCode.HGCalMaskVisualProd.RootPlotting.RootPlotting",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "ROOT.TLegend",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "ROOT.TLegend",
"line_number": 60,
"usage_type": "call"
},
{
"... |
4401153009 | import requests
from bs4 import BeautifulSoup
import re
from googlesearch import search
def remove_tags(text):
TAG_RE = re.compile(r'<[^>]+>')
return TAG_RE.sub('', text)
def spamcalls(num):
lists = []
r = requests.get("https://spamcalls.net/en/search?q={}".format(num))
if r.status_code == 200:
try:
parse = BeautifulSoup(r.content.decode('utf-8'), 'html.parser')
name = parse.findAll('strong')
for names in name:
lists.append(str(names))
lists.pop(0)
return{"spamcalls" : remove_tags(", ".join(lists))}
except Exception as e:
return{'err' : e}
def scamcallfighters(num):
r = requests.get("http://www.scamcallfighters.com/search-phone-{}.html".format(num.replace("+", "")))
if r.status_code == 200:
try:
parse = BeautifulSoup(r.content.decode('utf-8'), 'html.parser')
for g in parse.find_all('div', class_='nrp_headmat1'):
records = g.find_all('p')
return{"scamcallfighters" : remove_tags(str(records))}
except Exception as e:
return{'err' : e}
def urls(num, countrycode, localnumber):
return{"fouroneone": "https://www.411.com/phone/{}".format(num.replace('+', '').replace(' ', '-')), "truecaller": "https://www.truecaller.com/{}/{}".format(countrycode, localnumber), 'truepeoplesearch': "https://www.truepeoplesearch.com/results?phoneno={}".format(num.replace(' ', '')), 'syncme': "https://sync.me/search/?number={}".format(num.replace("+", ""))}
try:
for r in search(num):
return{"URL": r}
except:
return{"err" : "error occured"}
| 742fool/DeadTrapv2 | website/backend/scanners/fraud.py | fraud.py | py | 1,708 | python | en | code | null | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "bs4.BeautifulSoup",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_num... |
36408696267 | from django.urls import path
from . import views
app_name = 'assure'
urlpatterns = [
path('', views.IndexView.as_view(), name='index'),
path('<int:pk>/detail/', views.DetailView.as_view(), name='detail'),
path('<int:pk>/results/', views.ResultsView.as_view(), name='results'),
path('<int:site_id>/comment/', views.comment, name='comment'),
]
| chadwickcheney/SeleniumTests | assure/urls.py | urls.py | py | 360 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
42600435981 | #!/usr/bin/env python3
import os, psutil, signal
import sys
import fcntl
import pytz
import time
from datetime import datetime
import multiprocessing
from multiprocessing import Queue
import subprocess, shlex
import atexit
import signal
import socketserver
import socket
import re
import shutil
def getTaipeiTime():
return datetime.now(pytz.timezone('Asia/Taipei')).strftime("%m-%d_%H-%M")
def check_PidAlive(pid):
"""
return True if the pid is still working
return False if the pid id dead
"""
if pid != None:
try:
if os.waitpid(pid, os.WNOHANG) == (0,0):
return True
else:
return False
except OSError:
pass
return False
def KillProcesses(pid):
'''
kill all the children of pid and itself
'''
parent_pid = pid
try:
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
child.kill()
except Exception as e:
print("Failed to KillProcesses with pid={}\n Skip it.".format(pid))
return
parent.kill()
def KillChildren(pid):
'''
kill all the children of the pid except itself
'''
parent_pid = pid
try:
parent = psutil.Process(parent_pid)
for child in parent.children(recursive=True):
try:
child.kill()
except Exception as e:
pass
except Exception as e:
print("Failed to KillChildren with pid={}\nReasons:{}".format(pid, e))
return
def KillPid(pid):
'''
kill the pid
'''
try:
os.kill(pid, signal.SIGKILL)
except Exception as e:
print("KillPid() failed.\n reasons:{}".format(e))
def LimitTimeExec(LimitTime, Func, *args):
"""
Input:
1. LimitTime: is in the unit of secs.
2. Func: must return a list that contains your return value
3. args: pass into Func
Return value:
1. isKilled: killed by timing
2. ret(int): from Func(args) to indicate success or not
"""
ret = -1
PrevWd = os.getcwd()
isKilled = False
WaitSecs = 0
WaitUnit = 10
ExecProc = multiprocessing.Process(target=Func, args=[args])
# NOTE: SIGKILL will not kill the children
# kill all its sub-process when parent is killed.
ExecProc.daemon = True
ExecProc.start()
while True:
date = getTaipeiTime()
if ExecProc.is_alive():
# log date to check liveness
print("Alive at {}".format(date))
time.sleep(WaitUnit)
WaitSecs += WaitUnit
else:
# return the return code to indicate success or not
ret = ExecProc.exitcode
isKilled = False
print("The command is finished at {} with exitcode={}, break.".format(date, ret))
break
if WaitSecs > LimitTime:
if not ExecProc.is_alive():
# if the work is done after the sleep
continue
# handle the processes twice, kill its children first
KillChildren(ExecProc.pid)
# with daemon flag, all children will be terminated
ExecProc.terminate()
KillPid(ExecProc.pid)
# wait for a few secs
ExecProc.join(10)
if ExecProc.exitcode is None: # exitcode is None for unfinished proc.
print("ExecProc.terminate() failed; Daemon handler exit.")
sys.exit(0)
isKilled = True
ret = -1
print("Achieve time limitation, kill it at {}.".format(getTaipeiTime()))
break
os.chdir(PrevWd)
return isKilled, ret
def ExecuteCmd(WorkerID=1, Cmd="", Block=True, ParallelBuild=False):
"""
return cmd's return code, STDOUT, STDERR
"""
# Use taskset by default
if Block:
'''
The taskset configuration depends on the hardware.
If your computer is other than 8700K, you must customized it.
Current configuration:
intel 8700K:
Core 0 as the "benchmark scheduler"
Core 1~5 as the "worker" to run programs.
Core 6~11 are not "real core", they are hardware threads shared with Core 0~5.
'''
CpuWorker = str((int(WorkerID) % 5) + 1)
TrainLoc = os.getenv("LLVM_THESIS_TrainingHome", "Error")
if not ParallelBuild:
FullCmd = "taskset -c " + CpuWorker + " " + Cmd
else:
if Cmd.split()[0] == "make":
FullCmd = Cmd + " -j" + str(multiprocessing.cpu_count())
else:
FullCmd = Cmd
#print(FullCmd)
p = subprocess.Popen(shlex.split(FullCmd),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE)
out, err = p.communicate()
p.wait()
return p.returncode, out, err
else:
print("TODO: non-blocking execute", file=sys.stderr)
class EnvBuilder:
def CheckTestSuiteCmake(self, WorkerID):
"""
return LitTestDict: { target-name: .test-loc }
"""
llvmSrc = os.getenv("LLVM_THESIS_HOME", "Error")
if llvmSrc == "Error":
print("$LLVM_THESIS_HOME or not defined.", file=sys.stderr)
sys.exit(1)
TestSrc = llvmSrc + "/test-suite/build-worker-" + WorkerID
PrevWd = os.getcwd()
# if the cmake is not done, do it once.
if not os.path.isdir(TestSrc):
os.mkdir(TestSrc)
os.chdir(TestSrc)
'''
ex.
cmake -DCMAKE_C_COMPILER=/home/jrchang/workspace/llvm-thesis/build-release-gcc7-worker1/bin/clang -DCMAKE_CXX_COMPILER=/home/jrchang/workspace/llvm-thesis/build-release-gcc7-worker1/bin/clang++ ../
'''
cBinSrc = llvmSrc + "/build-release-gcc7-worker" + WorkerID + "/bin/clang"
cxxBinSrc = cBinSrc + "++"
cmd = "cmake -DCMAKE_C_COMPILER=" + cBinSrc + " -DCMAKE_CXX_COMPILER=" + cxxBinSrc + " ../"
ret = ExecuteCmd(WorkerID=WorkerID, Cmd=cmd, Block=True)
os.chdir(PrevWd)
if ret != 0:
print("cmake failed.", file=sys.stderr)
sys.exit(1)
# Build .test dict for verification and run
LitTestDict = {}
'''
only add the "measurable targets"
'''
MeasurableRec = os.getenv("LLVM_THESIS_Random_LLVMTestSuiteScript", "Error")
MeasurableRec = \
MeasurableRec + '/GraphGen/output/newMeasurableStdBenchmarkMeanAndSigma'
MeasurableList = []
with open(MeasurableRec, 'r') as f:
for line in f:
MeasurableList.append(line.split(';')[0].split('/')[-1].strip())
for root, dirs, files in os.walk(TestSrc):
for file in files:
if file.endswith(".test"):
name = file[:-5]
if name in MeasurableList:
path = os.path.join(root, file)
LitTestDict[name] = path
return LitTestDict
def workerMake(self, args):
"""
Input: args(tuple):
[0]:WorkerID
[1]:BuildTarget
[2]:ParallelBuild <---This arg is optional
(Default is using taskset to build on a core)
Return a int:
a number that indicate status.
0 --> build success
others --> build failed
"""
PrevWd = os.getcwd()
WorkerID = args[0]
BuildTarget = args[1]
ParallelBuild = False
if len(args) > 2:
ParallelBuild = args[2]
ret = -1
'''
build
'''
llvmSrc = os.getenv("LLVM_THESIS_HOME", "Error")
TestSrc = llvmSrc + "/test-suite/build-worker-" + WorkerID
os.chdir(TestSrc)
cmd = "make " + BuildTarget
ret, _, _ = ExecuteCmd(WorkerID=WorkerID, Cmd=cmd, Block=True, ParallelBuild=ParallelBuild)
return ret
def make(self, WorkerID, BuildTarget, ParallelBuild=False):
"""
return a number:
0 --> build success
others --> build failed
"""
isKilled, ret = LimitTimeExec(900, self.workerMake, WorkerID, BuildTarget, ParallelBuild)
if isKilled or ret != 0:
return -1
else:
return 0
def workerVerify(self, args):
"""
Input(tuple):
[0]:WorkerID
[1]:TestLoc
Return a int:
a number that indicate status.
0 --> build success
others --> build failed
"""
ret = -1
WorkerID = args[0]
TestLoc = args[1]
Lit = os.getenv("LLVM_THESIS_lit", "Error")
if Lit == "Error":
print("$LLVM_THESIS_lit not defined.", file=sys.stderr)
sys.exit(1)
cmd = Lit + " -q " + TestLoc
_, out, err = ExecuteCmd(WorkerID=WorkerID, Cmd=cmd, Block=True)
if out:
ret = -1
else:
ret = 0
return ret
def verify(self, WorkerID, TestLoc):
"""
return a number:
0 --> success and correct
others --> failed
"""
isKilled, ret = LimitTimeExec(500, self.workerVerify, WorkerID, TestLoc)
if isKilled or ret != 0:
return -1
else:
return 0
def distributePyActor(self, TestFilePath):
"""
return 0 for success
return -1 for failure.
"""
Log = LogService()
# Does this benchmark need stdin?
NeedStdin = False
with open(TestFilePath, "r") as TestFile:
for line in TestFile:
if line.startswith("RUN:"):
if line.find("<") != -1:
NeedStdin = True
break
TestFile.close()
# Rename elf and copy actor
ElfPath = TestFilePath.replace(".test", '')
NewElfPath = ElfPath + ".OriElf"
#based on "stdin" for to copy the right ones
InstrumentSrc = os.getenv("LLVM_THESIS_InstrumentHome", "Error")
if NeedStdin == True:
PyCallerLoc = InstrumentSrc + '/PyActor/WithStdin/PyCaller'
PyActorLoc = InstrumentSrc + '/PyActor/WithStdin/MimicAndFeatureExtractor.py'
else:
PyCallerLoc = InstrumentSrc + '/PyActor/WithoutStdin/PyCaller'
PyActorLoc = InstrumentSrc + '/PyActor/WithoutStdin/MimicAndFeatureExtractor.py'
try:
# Rename the real elf
shutil.move(ElfPath, NewElfPath)
# Copy the feature-extractor
shutil.copy2(PyActorLoc, ElfPath + ".py")
except Exception as e:
print("distributePyActor() errors, Reasons:\n{}".format(e))
return -1
# Copy the PyCaller
if os.path.exists(PyCallerLoc) == True:
shutil.copy2(PyCallerLoc, ElfPath)
else:
Log.err("Please \"$ make\" to get PyCaller in {}\n".format(PyCallerLoc))
return -1
return 0 #success
def run(self, WorkerID, TestLoc):
ret = self.verify(WorkerID, TestLoc)
return ret
class EnvResponseActor:
def EnvEcho(self, BuildTarget, WorkerID, LitTestDict, ParallelBuild=False):
"""
return "Success" or "Failed"
"""
testLoc = LitTestDict[BuildTarget]
retString = "Success"
'''
remove previous build and build again
'''
env = EnvBuilder()
'''
ex1. RUN: /llvm/test-suite/build-worker-1/SingleSource/Benchmarks/Dhrystone/dry
ex2. RUN: cd /home/jrchang/workspace/llvm-thesis/test-suite/build-worker-1/MultiSource/Applications/sqlite3 ; /home/jrchang/workspace/llvm-thesis/test-suite/build-worker-1/MultiSource/Applications/sqlite3/sqlite3 -init /home/jrchang/workspace/llvm-thesis/test-suite/MultiSource/Applications/sqlite3/sqlite3rc :memory: < /home/jrchang/workspace/llvm-thesis/test-suite/MultiSource/Applications/sqlite3/commands
'''
with open(testLoc, "r") as file:
fileCmd = file.readline()
file.close()
MultiCmdList = fileCmd.split(';')
if len(MultiCmdList) == 1:
# cases like ex1.
BuiltBin = fileCmd.split()[1]
else:
# cases like ex2.
BuiltBin = MultiCmdList[1].strip().split()[0]
'''
remove binary does not ensure it will be built again.
Therefore, we must use "make clean"
'''
binName = BuiltBin.split('/')[-1]
dirPath = BuiltBin[:-(len(binName) + 1)]
prevWd = os.getcwd()
'''
print("fileCmd={}".format(fileCmd))
print("BuiltBin={}".format(BuiltBin))
print("dirPath={}".format(dirPath))
print("binName={}".format(binName))
'''
os.chdir(dirPath)
os.system("make clean")
os.chdir(prevWd)
# remove feature file
FeatureFile = '/tmp/PredictionDaemon/worker-{}/features'.format(WorkerID)
if os.path.exists(FeatureFile):
os.remove(FeatureFile)
'''
build
assuming the proper cmake is already done.
'''
ret = env.make(WorkerID, BuildTarget, ParallelBuild)
if ret != 0:
print("Failed sent.")
return "Failed"
'''
verify
'''
ret = env.verify(WorkerID, testLoc)
if ret != 0:
print("Failed sent.")
return "Failed"
'''
distribute PyActor
'''
ret = env.distributePyActor(testLoc)
if ret != 0:
print("Failed sent.")
return "Failed"
'''
run and extract performance
The return value from env.run() can be ignored.
We already use env.verify() to verify it.
'''
ret = env.run(WorkerID, testLoc)
return retString
class LogService():
def __init__(self):
pass
def outNotToFile(self, msg):
print(msg, end="", file=sys.stdout)
def FileWriter(self, path, msg):
file = open(path, "a")
fcntl.flock(file, fcntl.LOCK_EX)
file.write(msg)
fcntl.flock(file, fcntl.LOCK_UN)
file.close()
def out(self, msg):
self.outNotToFile(msg)
def err(self, msg):
self.out(msg)
#self.FileWriter("/tmp/PredictionDaemon.err", msg)
class ConnectInfoService():
def getConnectDict(self, path):
'''
return Dict[WorkerID] = ["RemoteEnv-ip", "RemoteEnv-port"]
'''
Dict = {}
with open(path, "r") as file:
# skip the header line
file.readline()
for line in file:
info = line.split(",")
strippedInfo = []
for subInfo in info:
strippedInfo.append(subInfo.strip())
Dict[strippedInfo[0]] = [strippedInfo[1], strippedInfo[2]]
file.close()
return Dict
| TibaChang/ThesisTools | PassInstrument/training/Lib.py | Lib.py | py | 15,026 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "datetime.datetime.now",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "pytz.timezone",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.waitpid",
... |
29620338632 | # -*- coding: utf-8 -*-
from selenium import webdriver
from selenium.webdriver.support.wait import WebDriverWait
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from lxml import etree
import time
import xlsxwriter
options = webdriver.ChromeOptions()
# 找到本地安装的浏览器启动路径,例如Chrome
# 设置--user-data-dir是为了不影响自己的浏览器
# chrome.exe --remote-debugging-port=9222 --user-data-dir="D:\Program File\chromeUserData"
options.add_experimental_option("debuggerAddress", "127.0.0.1:9222")
base_url = "https://www.baidu.com/"
driver = webdriver.Chrome(options=options)
wait = WebDriverWait(driver, 10, 0.5)
driver.implicitly_wait(3)
driver.get(base_url)
# 打印页面标题 "百度一下,你就知道"
print(driver.title)
# 生成当前页面快照并保存
# driver.save_screenshot("baidu.png")
wait.until(EC.presence_of_element_located((By.ID, 'kw')))
driver.find_element_by_id("kw").click()
driver.find_element_by_id("kw").send_keys("taobao")
driver.find_element_by_id("su").click()
# 打印网页渲染后的源代码
# print(driver.page_source)
# 获取当前url
print(driver.current_url)
wait.until(EC.presence_of_element_located((By.ID, 'content_left')))
time.sleep(1)
firstElem = driver.find_element_by_xpath(
'//div[@id="content_left"]//div[contains(@class,"result")][1]/h3/a')
print(firstElem.text)
firstElem.click()
# 获取所有的打开的浏览器窗口
windowstabs = driver.window_handles
print(windowstabs)
# 获取当前浏览器的窗口
currenttab = driver.current_window_handle
print(currenttab)
# 切换到新窗口
driver.switch_to.window(windowstabs[1])
print(driver.current_url)
time.sleep(1)
driver.close()
driver.switch_to.window(windowstabs[0])
print(driver.current_url)
# html_str = driver.page_source
# obj_list = etree.HTML(html_str).xpath(
# '//div[@id="content_left"]//div[contains(@class,"result")]/h3/a')
# result = ['标题']
# for obj in obj_list:
# title = obj.xpath('string(.)').replace('\n', '').strip()
# print(title)
# result.append(title)
# workbook = xlsxwriter.Workbook('baidu.xlsx') #创建一个Excel文件
# worksheet = workbook.add_worksheet() #创建一个sheet
# # 列宽
# worksheet.set_column('A:J', 20)
# #向 excel 中写入数据
# worksheet.write_column('A1',result)
# workbook.close()
# 关闭当前页面,如果只有一个页面,会关闭浏览器
# driver.close()
# # 关闭浏览器
# driver.quit()
| hua345/myBlog | python/selenium/baidu.py | baidu.py | py | 2,515 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "selenium.webdriver.ChromeOptions",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 19,
"usage_type": "call"
},
{
"api... |
42248140291 | import json
from rdflib import Graph, Namespace, Literal, URIRef, XSD
from rdflib.namespace import XSD
# Mapping of codes to (image) annotation types
annotation_codes_classes = [("evoked_clusters", "ACVisualEvocation"), ("as", "ArtStyle"), ("act", "Action"), ("age","Age"), ("color", "Color"), ("em", "Emotion"), ("ic", "ImageCaption"), ("hp", "HumanPresence"), ("od", "Object")]
annotation_codes_jsonnames = [("evoked_clusters", "evoked_abstract_concept"), ("as", "art_style"), ("act", "action_label"), ("age","age_tier"), ("color", "webcolor_name"), ("em", "emotion"), ("ic", "image_description"), ("hp", "human_presence"), ("od", "detected_object")]
annotation_codes_roles = [("acve", "evoked_abstract_concept"), ("as", "detected_art_style"), ("act", "detected_action"), ("age","detected_age"), ("color", "detected_color"), ("em", "detected_emotion"), ("ic", "detected_image_caption"), ("hp", "detected_human_presence"), ("od", "detected_object")]
# Define namespaces for your prefixes
base = "https://w3id.org/situannotate#"
rdf = Namespace("http://www.w3.org/1999/02/22-rdf-syntax-ns#")
rdfs = Namespace("http://www.w3.org/2000/01/rdf-schema#")
xsd = Namespace("http://www.w3.org/2001/XMLSchema#")
situannotate = Namespace("https://w3id.org/situannotate#")
conceptnet = Namespace("http://etna.istc.cnr.it/framester2/conceptnet/5.7.0/c/en/")
# Create an RDF graph
g = Graph()
dataset = "ARTstract"
### Create triples for Annotation Situations
with open('input/real-img-data.json', 'r') as json_file:
# Load the JSON data into a Python dictionary
data = json.load(json_file)
for image_id, details in data.items():
image_instance = str(dataset + "_" + image_id)
g.add((situannotate[image_instance], rdf.type, situannotate.Image))
source_dataset = details["source_dataset"]
source_id = details["source_id"]
evoked_clusters = details['evoked_clusters']
first_cluster = next(iter(evoked_clusters.values()))
annotation_class = "ACVisualEvocation"
annotation_class = str(annotation_class) + "Annotation"
situation_name = source_dataset + "_acve"
annotation_role = "evoked_abstract_concept"
annotation_id = image_instance + "_" + situation_name
cluster_name = first_cluster["cluster_name"]
evocation_context = first_cluster["evocation_context"]
# declare triple between the image and the annotation situation
g.add((situannotate[image_instance], situannotate.isInvolvedInAnnotationSituation, situannotate[situation_name]))
# triples for each annotation
g.add((situannotate[annotation_id], rdf.type, situannotate[annotation_class]))
g.add((situannotate[annotation_id], situannotate.isAnnotationInvolvedInSituation, situannotate[situation_name]))
g.add((situannotate[annotation_id], situannotate.isClassifiedBy, situannotate[annotation_role]))
g.add((situannotate[annotation_id], situannotate.aboutAnnotatedEntity, situannotate[image_instance]))
g.add((situannotate[annotation_id], situannotate.typedByConcept, conceptnet[cluster_name]))
g.add((situannotate[annotation_id], situannotate.annotationWithLexicalEntry, situannotate[cluster_name]))
g.add((situannotate[annotation_id], situannotate.annotationWithEvocationContext, Literal(evocation_context, datatype=XSD.string)))
# triples for each lexical entry
g.add((situannotate[cluster_name], rdf.type, situannotate.LexicalEntry))
g.add((situannotate[cluster_name], situannotate.typedByConcept, conceptnet[cluster_name]))
g.add((situannotate[cluster_name], rdfs.label, Literal(cluster_name, datatype=XSD.string)))
# triples for image in relation to annotation
g.add((situannotate[image_instance], situannotate.isAnnotatedWithLexicalEntry, situannotate[cluster_name]))
g.add((situannotate[image_instance], situannotate.hasImageLabelTypedBy, conceptnet[cluster_name]))
# Serialize the RDF graph to Turtle format
turtle_data = g.serialize(format="turtle")
# Print the Turtle data
print(turtle_data)
# Save the Turtle RDF data to a file
with open("output/real_images_acve_kg.ttl", "w") as outfile: # Open in regular text mode (not binary mode)
outfile.write(turtle_data)
| delfimpandiani/ARTstract-KG | ARTstract-KG_creation/ARTstract_kg_construction/real_kg_construction/img_acve.py | img_acve.py | py | 4,284 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "rdflib.Namespace",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "rdflib.Namespace",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "rdflib.Namespace",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "rdflib.Namespace",... |
12198099018 | #!/usr/bin/python
import os
import sqlite3, time, re
import subprocess
from random import randint
from Scan_lib import Scan_Receive_sms, Scan_Smstome_sms
def Scansione(conn):
cursor = conn.execute("SELECT Subdomain, Number FROM Anagrafica")
cursor.fetchone()
for row in cursor:
if "receive-smss.com" in row:
Scan_Receive_sms(conn,row[1].split("+")[1])
if "smstome.com" in row:
Scan_Smstome_sms(conn,row[1]) #### CONTROLLARE PERCHE SOLO UN GIRO
def DB_Ana(conn, Subdomain, Number, Alive, Nation):
cursor = conn.execute("SELECT * FROM Anagrafica WHERE Number= '" + Number + "' AND Nation= '" +Nation+"'")
row = cursor.fetchone()
if row is None:
query1= "INSERT INTO Anagrafica (Subdomain, Number, Alive, Nation) VALUES ('"+Subdomain+"', '"+Number+"', '"+Alive+"', '"+Nation+"')"
cursor = conn.execute(query1)
conn.commit()
print ("New finding: " + Number + " [" + Nation + "] - Records created successfully");
def Ana_Receive_smss():
print ("ANAGRAFICA Receive-smss.com");
conn = sqlite3.connect('SMS_DB.db')
sup_file= 'receive-smss'
os.system("wget -O " + sup_file + " " + 'https://receive-smss.com/')
subdomain = "receive-smss.com"
flag = 0
with open(sup_file) as file:
for line in file:
if '<div class="number-boxes-itemm-number" style="color:black">' in line:
number = line.split('<div class="number-boxes-itemm-number" style="color:black">')[1].split('</div>')[0]
flag = flag+1
if '<div class="number-boxes-item-country number-boxess-item-country">' in line:
nation = line.split('<div class="number-boxes-item-country number-boxess-item-country">')[1].split('</div>')[0]
flag = flag+1
if flag > 1:
alive = "none"
DB_Ana(conn, subdomain, number, alive, nation)
flag = 0
number = "NULL"
nation = "NULL"
os.system("rm "+sup_file)
Scansione(conn)
conn.close()
def Ana_SMStome():
print ("ANAGRAFICA smstome.com");
conn = sqlite3.connect('SMS_DB.db')
sup_file= 'SMStome'
os.system("wget -O " + sup_file + " " + 'https://smstome.com/')
subdomain = "smstome.com"
flag = 0
flag2 = 0
with open(sup_file) as file:
for line in file:
if ' <a href="' in line and '/country/' in line:
sup_2 = line.split(' <a href="')[1].split('" class="button button-clear">')[0]
nation = sup_2.split('/country/')[1].split('/')[0]
flag = flag+1
if flag > 1:
flag = 0
sup_file2 = "SMStome_"+nation
os.system("wget -O " + sup_file2 + " " + 'https://smstome.com'+sup_2+"?page="+str(randint(1, 30)))
with open(sup_file2) as file:
for line2 in file:
if 'button button-outline button-small numbutton' in line2:
number_link = line2.split('<a href="https://smstome.com')[1].split('" class=')[0]
flag2 = flag2+1
if flag2 > 1:
alive = "none"
DB_Ana(conn, subdomain, number_link, alive, nation)
flag2 = 0
os.system("rm "+sup_file2)
Scansione(conn)
os.system("rm "+sup_file)
conn.close()
while True:
Ana_Receive_smss()
Ana_SMStome()
print("---- Execution Hold ---- at time: ")
str(os.system("date +%k:%M.%S")).strip()
time.sleep(180)
| fulgid0/ASMS_discovery | ASMS_discover.py | ASMS_discover.py | py | 3,154 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "Scan_lib.Scan_Receive_sms",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "Scan_lib.Scan_Smstome_sms",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sqlite3.connect",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "o... |
41612108456 | ###################################################################
###################################################################
#
# DISCLAIMER:
# THIS IS A PROOF OF CONCEPT AND AS A RESULT, IS AN UGLY, HACKED TOGETHER MESS.
# IN NO WAY SHOULD THIS BE CONFUSED WITH 'GOOD' CODE.
#
# SORRY.
# -Devey
# 9 March 16
###################################################################
###################################################################
import os
from PIL import Image
import imgHash # https://pypi.python.org/pypi/imgHash
import itertools
from collections import defaultdict
import time
###################################################################################
## - - - Average HASH - - - ##
## ##
## ##
## ##
## ##
###################################################################################
def ahashes(image): # returns 64 byte average hash, 4 byte, 16 byte
return imgHash.average_hash(Image.open(image))
###################################################################################
## - - - GET DATA - - - ##
## ##
## As of right now, returns the image name and Date/Time. ##
## Should be customized for different applications of this library. ##
## ##
###################################################################################
def getData(image):
dateAdded = time.strftime("%H:%M %d/%m/%Y").strip()
return image, dateAdded # HUGE CHANGE AGHHH
###################################################################################
## - - - GET HAMMING DISTANCE - - - ##
## ##
## ##
## ##
## ##
###################################################################################
def hamming1(str1, str2): # returns distance between strings
return sum(itertools.imap(str.__ne__, str1, str2))
###################################################################################
## - - - GET HASHES - - - ##
## ##
## returns all hashes in format: ##
## [(64byte, 4byte, 16byte),(data)] ##
## ##
###################################################################################
def getHashes(image):
return [ahashes(image), getData(image)]
def bulkLoader(listOfFiles): # takes a list of files and returns a list of their full hashes
hashList = []
for fileName in listOfFiles:
# print(fileName)
# data = fileName
hashList.append(getHashes(fileName))
return hashList
def dbBuilder(hashList): # Database Builder
for i in hashList:
a32[i[0][0]].append(list(i[1]))
aBuckets[i[0][1]].append((i[0][2], i[0][0]))
def readHashes(fileName): # reads full hashes out of a flat file and returns a list of them
with open(fileName, 'r') as f:
hashes = f.readlines()
fileHashes = []
for line in hashes:
c = line
a = c.split(", ")
fileHashes.append([(a[0], a[1], a[2]), (a[3], a[4].strip())])
return fileHashes
def writeHashes(hashes, fileName): # write full hashes to flat file
f = open(fileName, 'a') # Open flatFile to append t
f.write('%s, %s, %s, %s, %s\n' % (hashes[0][0], hashes[0][1], hashes[0][2], hashes[1][0], hashes[1][1]))
f.close() # File close
return hashes[0], hashes[1]
def writeMassHashes(listOfHashes, fileName): # write full hashes to flat file
listToWrite = []
for hashes in listOfHashes:
listToWrite.append('%s, %s, %s, %s, %s\n' % (hashes[0][0], hashes[0][1], hashes[0][2], hashes[1][0], hashes[1][1]))
f = open(fileName, 'a') # Open flatFile to append t
f.writelines(listToWrite)
f.close() # File close
# return(hashes[0], hashes[1], hashes[2])
def checkHashes(imgHashes, fileName):
if imgHashes[0][0] in a32: # Check average hashtable for hash
return "a32", imgHashes[0][0], a32[imgHashes[0][0]]
elif imgHashes[0][1] in aBuckets: # If 4 byte hash in aBuckets
bucket = aBuckets[imgHashes[0][1]]
for i in bucket: # Will eventually be a k-d tree
h1 = hamming1(imgHashes[0][2], i[0])
if h1 < 3:
a = ("aBk", i[0], a32[i[1]])
return(a)
else: # Image not in database
return False
else: # Does not match any buckets
return False
def checkHashesAdd(imgHashes, fileName):
if imgHashes[0][0] in a32: # Check average hashtable for hash
return "a32", imgHashes[0][0], a32[imgHashes[0][0]]
elif imgHashes[0][1] in aBuckets: # If 4 byte hash in aBuckets
bucket = aBuckets[imgHashes[0][1]]
for i in bucket: # Will eventually be a k-d tree
h1 = hamming1(imgHashes[0][2], i[0])
if h1 < 3:
a = ("aBk", i[0], a32[i[1]])
writeHashes(imgHashes, fileName) # Add hash to databases
return(a)
else: # Image not in database
return False
else: # Does not match any buckets
return False
def directoryEater(directoryName): # Given a directory name, returns list of files in directory for entering into bulkLoader
path = os.getcwd()
fileNamesWSpaces = os.listdir(path)
for filename in fileNamesWSpaces:
os.rename(os.path.join(path, filename), os.path.join(path, filename.replace(" ", "-")))
fileNames = os.listdir(directoryName)
b = []
for i in fileNames:
b.append(directoryName + "/" + i)
return b
def flatFileLoad(fileName): # Given the name of a flat file, enters them into the database
dbBuilder(readHashes(fileName))
def bulkFlatFileWrite(dbName, listOfFiles): # Given a list of files, write full hashes to specified flat file
listOfHashes = []
for i in listOfFiles:
listOfHashes.append(getHashes(i))
writeMassHashes(listOfHashes, dbName)
def newFile(directoryName, fileName): # Create a new flatFile from a directory of images
listOfFiles = directoryEater(directoryName)
bulkFlatFileWrite(fileName, listOfFiles)
def checkImage(image, dbName):
return checkHashes(getHashes(image), dbName)
def checkImageAdd(image, dbName):
return checkHashesAdd(getHashes(image), dbName)
##########################
## Globals ##
## Don't Touch! ##
##########################
p32 = defaultdict(list) # 32 byte discrete cosine transform hash table
a32 = defaultdict(list) # 32 byte gradient hash table
pBuckets = defaultdict(list) # Staggered(4 byte -> 16 byte) dct hash table
aBuckets = defaultdict(list) # Staggered(4 byte -> 16 byte) gradient hash table
########################################################################################
| deveyNull/phist | hashFunk.py | hashFunk.py | py | 7,822 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "imgHash.average_hash",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "PIL.Image.open",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "time.strftime",
"li... |
759288278 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
import datetime
from sqlalchemy import Column, String, create_engine, Integer, TIMESTAMP, func, Float, desc, Boolean
from sqlalchemy.orm import sessionmaker
from sqlalchemy.ext.declarative import declarative_base
from flask import Flask, render_template, request
from flask_script import Manager
# 创建对象的基类:
Base = declarative_base()
app = Flask(__name__)
manager = Manager(app)
class Event(Base):
__tablename__ = 'event'
id = Column(Integer, primary_key=True)
userid = Column(String(1024))
title = Column(String(1024))
description = Column(String(1024))
forecolor = Column(String(256))
icon = Column(String(256))
location = Column(String(256))
calendar = Column(String(256))
busy = Column(Boolean)
gmt_create = Column(TIMESTAMP, default=datetime.datetime.now)
gmt_modify = Column(TIMESTAMP, default=datetime.datetime.now, onupdate=datetime.datetime.now)
class Schedule(Base):
__tablename__ = 'schedule'
id = Column(Integer,primary_key=True)
event_id = Column(Integer)
engine = create_engine('mysql+pymysql://root:123456@127.0.0.1:3306/calendar?charset=utf8')
DBSession = sessionmaker(bind=engine)
@app.route('/')
def index():
return render_template('index.html')
@app.route('/api')
def data():
pass
@manager.command
def run():
app.run(host='0.0.0.0', port=8080, threaded=True, debug=True)
@manager.command
def initdb():
Base.metadata.create_all(engine)
if __name__ == '__main__':
manager.run()
| DxfAndCxx/calendar | app.py | app.py | py | 1,565 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "sqlalchemy.ext.declarative.declarative_base",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "flask_script.Manager",
"line_number": 17,
"usage_type": "call"
},
{
"api_... |
25161881681 | import base64
import binascii
from typing import List
import falcon
import hashlib
import hmac
import json
import logging
from botocore.exceptions import ClientError
from dacite import Config, from_dict
from dataclasses import asdict
from enum import Enum
from adyen_gift_card.api.adyen_notifications.request import Notification, EventCode, NotificationRequestItem
from adyen_gift_card.api.adyen_notifications.resources import QueuesName, NotificationsCredentials
from adyen_gift_card.util.dictionary_keys_transformation import transform_dict
LOGGER = logging.getLogger()
class AdyenNotifications:
def __init__(self, notifications_auth: NotificationsCredentials, queues_name: QueuesName,
notifications_to_process: List[str], sqs_client):
self.sqs_client = sqs_client
self.queues_name = queues_name
self.notifications_auth = notifications_auth
self.notifications_to_process = notifications_to_process
def on_post(self, req, resp):
LOGGER.info(req.media)
if not self._validate_authorization(req.get_header("Authorization")):
resp.status = falcon.HTTP_403
resp.media = "[rejected]"
return resp
formatted_request = transform_dict(req.media)
LOGGER.info(formatted_request)
notification = from_dict(data_class=Notification, data=formatted_request, config=Config(cast=[Enum]))
if len(notification.notification_items) == 0:
resp.status = falcon.HTTP_400
resp.media = "[rejected]"
return resp
notification_item = notification.notification_items[0].notification_request_item
if not self._validate_hmac_signature(notification_item):
LOGGER.info("HMAC signature validation failed")
resp.status = falcon.HTTP_403
resp.media = "[rejected]"
return resp
if notification_item.event_code in self.notifications_to_process and notification_item.event_code == EventCode.CAPTURE:
msg = self._send_sqs_message(self.queues_name.payments, notification)
elif notification_item.event_code in self.notifications_to_process:
msg = self._send_sqs_message(self.queues_name.refunds, notification)
else:
# ignore notification, it is not in our defined notification lists
resp.status = falcon.HTTP_200
resp.media = "[accepted]"
return resp
if msg is None:
resp.status = falcon.HTTP_400
resp.media = "[rejected]"
return resp
LOGGER.info(msg)
resp.status = falcon.HTTP_200
resp.media = "[accepted]"
return resp
def _send_sqs_message(self, sqs_queue_name: str, notification: Notification):
try:
queue_url = self.sqs_client.get_queue_url(QueueName=sqs_queue_name)["QueueUrl"]
msg = self.sqs_client.send_message(QueueUrl=queue_url,
MessageBody=json.dumps(asdict(notification), default=lambda x: x.value))
LOGGER.info("Notification sent to sqs queue")
except ClientError as e:
LOGGER.error(e)
return None
return msg
def _validate_authorization(self, auth_header: str) -> bool:
auth = f'{self.notifications_auth.username}:{self.notifications_auth.password}'
base64_auth = base64.b64encode(auth.encode()).decode()
if auth_header != f'Basic {base64_auth}':
return False
return True
def _validate_hmac_signature(self, notification: NotificationRequestItem) -> bool:
original_reference = notification.original_reference if notification.original_reference is not None else ""
message = f'{notification.psp_reference}:{original_reference}:{notification.merchant_account_code}:' \
f'{notification.merchant_reference}:{notification.amount.value}:{notification.amount.currency}:{notification.event_code.value}:' \
f'{notification.success}'
LOGGER.info(f'String to validate message integrity: {message}')
hmac_key = binascii.a2b_hex(self.notifications_auth.hmac_key)
hashed_msg = base64.b64encode(hmac.new(hmac_key, msg=message.encode("utf-8"), digestmod=hashlib.sha256).digest())
return hashed_msg.decode() == notification.additional_data.get("hmac_signature", None)
| NewStore/int-cinori | integrations/adyen_gift_card/adyen_gift_card/api/adyen_notifications/adyen_notifications.py | adyen_notifications.py | py | 4,418 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "logging.getLogger",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "adyen_gift_card.api.adyen_notifications.resources.NotificationsCredentials",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "adyen_gift_card.api.adyen_notifications.resources.Qu... |
13989867732 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
import logging
import aiomysql
from webapp.www.fields import Field
logging.basicConfig(level=logging.INFO)
__pool = None
def log(sql, args=None):
logging.info('SQL: [%s] args: %s' % (sql, args or []))
# 创建全局连接池__pool,缺省情况下将编码设置为utf8,自动提交事务
# 每个HTTP请求都可以从连接池中直接获取数据库连接,而不必频繁地打开和关闭数据库连接
async def create_pool(loop, **kw):
logging.info('create database connection pool...')
global __pool
# 创建一个MySQL数据库连接池 (coroutine)
__pool = await aiomysql.create_pool(
host=kw.get('host', 'localhost'),
port=kw.get('port', 3306),
user=kw['user'], # 初始化时必须指定,因为没有提供默认值
password=kw['password'], # 初始化时必须指定,因为没有提供默认值
db=kw['db'], # 初始化时必须指定,因为没有提供默认值
charset=kw.get('charset', 'utf8'),
autocommit=kw.get('autocommit', True),
maxsize=kw.get('maxsize', 10),
minsize=kw.get('minsize', 1),
loop=loop
)
# 若传入size参数,就通过fetchmany()获取最多指定数量的记录,否则通过fetchall()获取所有记录。
async def select(sql, args, size=None):
log(sql, args)
# 异步等待连接池对象返回可以连接线程,with语句则封装了清理(关闭conn)和处理异常的工作
async with __pool.get() as conn:
async with conn.cursor(aiomysql.DictCursor) as cur:
await cur.execute(sql.replace('?', '%s'), args) # 将sql中的'?'替换为MySQL占位符'%s'
if size:
results = await cur.fetchmany(size) # 从数据库获取指定的行数
else:
results = await cur.fetchall() # 返回所有的结果集
logging.info('return rows: %s' % len(results))
return results
# 用于SQL的Insert/Update/Delete语句,只返回影响的操作行数
async def execute(sql, args, autocommit=True):
log(sql, args)
global __pool
async with __pool.get() as conn:
if not autocommit: # 若数据库的事务为非自动提交的,则调用协程启动连接
await conn.begin()
try:
async with conn.cursor(aiomysql.DictCursor) as cur: # 打开DictCursor,不同于普通游标,以dict形式返回结果
await cur.execute(sql.replace('?', '%s'), args)
affected = cur.rowcount # 返回受影响的行数
if not autocommit: # 同上, 事务非自动提交型的,手动调用协程提交增删改事务
await conn.commit()
except BaseException as e:
if not autocommit: # 出错, 回滚事务到增删改之前
await conn.rollback()
raise e
return affected
def create_args_string(num):
lst = []
for n in range(num):
lst.append('?')
return ', '.join(lst)
# 创建基类Model的元类
# 任何继承自Model的类(如User),会自动通过ModelMetaclass扫描映射关系,
# 并存储到自身的类属性如__table__和__mappings__中。
# 这是一个元类,它定义了如何来构造一个类,任何定义了__metaclass__属性或指定了metaclass的都会通过元类定义的构造方法构造类
# 任何继承自Model的类,都会自动通过ModelMetaclass扫描映射关系,并存储到自身的类属性
class ModelMetaclass(type):
# cls: 当前准备创建的类对象,相当于self
# name: 类名,比如User继承自Model,当使用该元类创建User类时,name=User
# bases: 父类的元组
# attrs: 属性(方法)的字典,比如User有__table__,id,等,就作为attrs的keys
# 排除Model类本身,因为Model类主要就是用来被继承的,其不存在与数据库表的映射
def __new__(mcs, name, bases, attrs):
if name == 'Model': # 排除Mode类本身
return type.__new__(mcs, name, bases, attrs)
table = attrs.get('__table__', name) # 找到表名,若没有定义__table__属性,将类名作为表名
logging.info('found model: %s (table: %s)' % (name, table))
# 获取所有的Field和主键名
mappings = dict()
fields = []
primary_key = None
for k, v in attrs.items():
if isinstance(v, Field):
logging.info('--found mapping: %s ==> %s' % (k, v))
mappings[k] = v # 保存映射关系
if v.primary_key:
if primary_key: # 当第二次查找到主键时抛出Error
raise RuntimeError('Duplicate primary key for field: %s' % k)
primary_key = k # 保存第一次找到的主键
else:
fields.append(k) # 将非主键的字段保存到fields中
if not primary_key:
raise RuntimeError('Primary key not found.') # StandardError在Python3中被移除
for k in mappings.keys(): # 移除类属性
attrs.pop(k)
escaped_fields = list(map(lambda f: '`%s`' % f, fields))
# 构造默认的select/insert/update/delete语句
# 使用反引号是为了防止关键字冲突:select * from `select`;
sql_select = 'select `%s`, %s from `%s`' % \
(primary_key, ', '.join(escaped_fields), table)
sql_insert = 'insert into `%s` (%s, `%s`) values (%s)' % \
(table, ', '.join(escaped_fields), primary_key, create_args_string(len(escaped_fields) + 1))
sql_update = 'update `%s` set %s where `%s`=?' % \
(table, ', '.join(map(lambda f: '`%s`=?' % (mappings.get(f).name or f), fields)), primary_key)
sql_delete = 'delete from `%s` where `%s`=?' % \
(table, primary_key)
attrs['__mappings__'] = mappings # 保存属性和列的映射关系
attrs['__table__'] = table # 表名
attrs['__primary_key__'] = primary_key # 主键属性名
attrs['__fields__'] = fields # 除主键外的属性名
attrs['__select__'] = sql_select
attrs['__insert__'] = sql_insert
attrs['__update__'] = sql_update
attrs['__delete__'] = sql_delete
return type.__new__(mcs, name, bases, attrs)
# 定义所有ORM映射的基类Model
# 继承自dict的Model具备所有dict的功能,同时又实现__getattr__()和__setattr__()方法,
# 可以使用print(user.id)的方法直接引用属性
class Model(dict, metaclass=ModelMetaclass):
# 初始化函数,调用其父类(dict)的方法
def __init__(self, **kw):
super(Model, self).__init__(**kw)
# 增加__getattr__方法,使获取属性更加简单,即可通过"a.b"的形式
# __getattr__ 当调用不存在的属性时,python解释器会试图调用__getattr__(self,'attr')来尝试获得属性
# 例如b属性不存在,当调用a.b时python会试图调用__getattr__(self,'b')来获得属性,在这里返回的是dict a[b]对应的值
def __getattr__(self, key):
try:
return self[key]
except KeyError:
raise AttributeError("'Model' object has no attribute '%s'" % key)
# 增加__setattr__方法,使设置属性更方便,可通过"a.b=c"的形式
def __setattr__(self, key, value):
self[key] = value
def getvalue(self, key):
return getattr(self, key, None)
# 通过键取值,若值不存在,则取默认值
def getvalueordefault(self, key):
value = getattr(self, key, None)
if value is None:
field = self.__mappings__[key]
if field.default is not None:
# 如果field.default可被调用,则返回field.default(),否则返回field.default
value = field.default() if callable(field.default) else field.default # ??
logging.debug('using default value for %s: %s' % (key, str(value)))
# 通过default取到值之后再将其作为当前值
setattr(self, key, value)
return value
# classmethod装饰器将方法定义为类方法
# 对于查询相关的操作,我们都定义为类方法,就可以方便查询,而不必先创建实例再查询
# 查找所有合乎条件的信息
@classmethod
async def findall(cls, where=None, args=None, **kw):
"""find objects by where clause"""
sql = [cls.__select__]
if where:
sql.append('where')
sql.append(where)
if args is None:
args = []
order_by = kw.get('orderBy', None)
if order_by:
sql.append('order by')
sql.append(order_by)
limit = kw.get('limit', None)
if limit is not None:
sql.append('limit')
if isinstance(limit, int):
sql.append('?')
elif isinstance(limit, tuple) and len(limit) == 2:
sql.append('?, ?')
args.extend(limit)
else:
raise ValueError('Invalid limit values: %s' % str(limit))
rs = await select(' '.join(sql), args)
return [cls(**r) for r in rs]
# 根据列名和条件查看数据库有多少条信息
@classmethod
async def findnum(cls, select_field, where=None, args=None):
"""find number by select_field and where"""
sql = ['select %s _num_ from `%s`' % (select_field, cls.__table__)]
if where:
sql.append('where')
sql.append(where)
rs = await select(' '.join(sql), args, 1)
if len(rs) == 0:
return None
return rs[0]['_num_']
# 根据主键查找一个实例的信息
@classmethod
async def find(cls, pk):
"""find object by primary key"""
rs = await select('%s where `%s`=?' % (cls.__select__, cls.__primary_key__), [pk], 1)
if len(rs) == 0:
return None
return cls(**rs[0])
# return cls(**rs[0]) if rs else None
# 把一个实例保存到数据库
async def save(self):
args = list(map(self.getvalueordefault, self.__fields__))
args.append(self.getvalueordefault(self.__primary_key__))
rows = await execute(self.__insert__, args)
if rows != 1:
logging.warning('failed to insert record: affected rows: %s' % rows) # logging.warn已过时
# 更改一个实例在数据库的信息
async def update(self):
args = list(map(self.getvalue, self.__fields__))
args.append(self.getvalue(self.__primary_key__))
rows = await execute(self.__update__, args)
if rows != 1:
logging.warning('failed to update by primary key: affected rows: %s' % rows)
# 把一个实例从数据库中删除
async def remove(self):
args = [self.getvalue(self.__primary_key__)]
rows = await execute(self.__delete__, args)
if rows != 1:
logging.warning('failed to remove by primary key: affected rows: %s' % rows)
| shellever/Python3Learning | webapp/www/orm.py | orm.py | py | 11,391 | python | zh | code | 0 | github-code | 36 | [
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.INFO",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "logging.info",
... |
32199916820 | import muesli_functions as mf
import scipy as sp
# Load samples
X,Y = mf.read2bands("../Data/grassland_id_2m.sqlite",70,106)
ID = []
# Compute NDVI
NDVI = []
for i in xrange(len(X)):
X_ = X[i]
# Compute safe version of NDVI
DENOM = (X_[:,1]+X_[:,0])
t = sp.where(DENOM>0)[0]
NDVI_ = (X_[t,1]-X_[t,0])/DENOM[t]
if len(NDVI_) > 0:
NDVI.append(NDVI_)
# Scan Grasslands
for i in xrange(len(NDVI)):
m = sp.mean(NDVI[i][:,sp.newaxis])
if m > 0.6:
ID.append(Y[i])
print("ID {} and mean NDVI {}".format(Y[i],m))
print("Number of selected grasslands: {}".format(len(ID)))
sp.savetxt("id_grasslands.csv",ID,delimiter=',')
| mfauvel/GrasslandsSympa | Codes/filter_id.py | filter_id.py | py | 672 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "muesli_functions.read2bands",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "scipy.where",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "scipy.mean",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "scipy.newaxis",
... |
26236843242 | from django.contrib.auth.models import Group
from django.core.checks import messages
from django.core.files.images import ImageFile
from django.shortcuts import redirect, render
from django.http import HttpResponse, JsonResponse
from core.models import *
from core.forms import *
from django.contrib import messages
from django.contrib.auth.decorators import login_required
from django.contrib.auth.models import Group
from django.contrib.auth import authenticate, login, logout
from django.db.models import Q
import json
from core.decorators import *
import random
# Create your views here.
def home_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/home.html', context)
def mujer_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
context['nombre'] = 'Mujer'
return render(request, 'pages/categoria.html', context)
def hombre_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
context['nombre'] = 'Hombre'
return render(request, 'pages/categoria.html', context)
def nino_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
context['nombre'] = 'Niños'
return render(request, 'pages/categoria.html', context)
def producto_page(request, pk):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
producto = Producto.objects.get(id=pk)
context['producto'] = producto
return render(request, 'pages/producto.html', context)
# Clientes
def registrarse_page(request):
form1 = CreateUserForm()
form2 = ClienteForm()
if request.method == 'POST':
form1 = CreateUserForm(request.POST)
form2 = ClienteForm(request.POST)
if form1.is_valid():
user = form1.save()
apellido_paterno = request.POST.get('apellido_paterno')
apellido_materno = request.POST.get('apellido_materno')
telefono = request.POST.get('telefono')
group = Group.objects.get(name='cliente')
user.groups.add(group)
Cliente.objects.create(
usuario = user,
apellido_paterno=apellido_paterno,
apellido_materno=apellido_materno,
telefono=telefono
)
messages.success(request, 'Cuenta creada con exito')
return redirect('login_page')
else:
messages.error(request, 'La cuenta no pudo ser creada')
context = {'formUser': form1, 'formCliente': form2}
return render(request, 'pages/register.html', context)
@usuario_identificado
def login_page(request):
context = {}
if request.method == 'POST':
correo = request.POST.get('email')
password = request.POST.get('password')
usuario = User.objects.get(email=correo)
print(usuario.username)
user = authenticate(request, username=usuario.username, password=password)
if user is not None:
login(request, user)
return redirect('home_page')
else:
messages.error(request, 'Usuario o contraseña incorrecto')
return render(request, 'pages/login.html', context)
def logout_user(request):
logout(request)
return redirect('login_page')
#TO-DO: Agregar condición para logeado y para clientes con decoradores
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['cliente', 'admin'])
def carro_page(request):
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
except:
carro = None
context = {'items': items, 'compra': compra, 'carro':carro}
return render(request, 'pages/carro.html', context)
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['cliente'])
def direccion_page(request, pk):
form = DireeccionForm()
compra = Compra.objects.get(id=pk)
cliente = request.user.cliente
if request.method == 'POST':
form = DireeccionForm(request.POST)
if form.is_valid():
form.instance.cliente = cliente
form.instance.compra = compra
form.save()
messages.success(request, 'Direccion agregada')
return redirect('pagar_page')
else:
messages.error(request, 'No se pudo agregar la dirección')
context = {'form': form}
return render(request, 'pages/direccion.html', context)
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['cliente'])
def pagar_page(request):
#TO-DO: Agregar try and catch para cada variable, excepto cliente
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
if request.method == 'POST':
compra_comp = Compra.objects.filter(id=compra.id).update(completado=True)
messages.success(request, 'Producto comprado')
return redirect('home_page')
context = {'items': items, 'compra': compra}
return render(request, 'pages/pagar.html', context)
def vision_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/vision.html', context)
#TO-DO: datos de formularios para Empleo y Contacto
def contacto_page(request):
context = {}
form = ContactoForm()
context['form'] = form
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
if request.method == 'POST':
form = ContactoForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Aplicación laboral hecha')
else:
messages.error(request, 'La aplicación no pudo ser grabada')
return render(request, 'pages/contacto.html', context)
def cambios_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/cambios.html', context)
def empleo_page(request):
context = {}
form = EmpleoForm()
context['form'] = form
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
if request.method == 'POST':
form = EmpleoForm(request.POST)
if form.is_valid():
form.save()
messages.success(request, 'Aplicación laboral hecha')
else:
messages.error(request, 'La aplicación no pudo ser grabada')
return render(request, 'pages/empleo.html', context)
def updateItem(request):
data = json.loads(request.body)
productoId = data['productId']
action = data['action']
cliente = request.user.cliente
producto = Producto.objects.get(id=productoId)
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
productoCompra, creada = ProductoCompra.objects.get_or_create(compra=compra, producto=producto)
if action == 'add':
productoCompra.cantidad = (productoCompra.cantidad + 1)
elif action == 'remove':
productoCompra.cantidad = (productoCompra.cantidad - 1)
productoCompra.save()
if productoCompra.cantidad <= 0:
productoCompra.delete()
return JsonResponse('Item fue añadido', safe=False)
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['cliente'])
def user_page(request, action):
context = {}
try:
cliente = request.user.cliente
context['cliente'] = cliente
except:
context['cliente'] = None
try:
compras = Compra.objects.all().filter(cliente=cliente, completado=True)
context['compras'] = compras
except:
context['compras'] = None
try:
envios = DireccionEnvio.objects.all().filter(cliente=cliente)
context['envios'] = envios
except:
context['envios'] = None
# mecanica carro
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
try:
compras_completas = DireccionEnvio.objects.all().filter(cliente=cliente,entregado=True)
context['compras_completas'] = compras_completas
except:
context['compras_completas'] = None
return render(request, 'pages/user.html', context)
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['admin'])
def admin_page(request, action):
context = {}
try:
envios = DireccionEnvio.objects.all().filter(entregado=False)
context['envios'] = envios
except:
envios = 'Sin Envios'
context['envios'] = envios
try:
compras = Compra.objects.all().filter(completado=True)
context['compras'] = compras
except:
compras = 'Sin compras'
compras.get_comprar_total = 0
compras.get_comprar_productos = 'None'
compras.get_productos = 'None'
context['compras'] = compras
try:
productos = Producto.objects.all()
context['productos'] = productos
except:
productos = 'Sin productos'
productos.get_total = 0
productos.ret_nombre = 'None'
context['productos'] = productos
if action == 'inicio':
context['nombre'] = 'Inicio'
elif action == 'productos':
context['nombre'] = 'Productos'
elif action == 'envios':
context['nombre'] = 'Envíos'
elif action == 'compras':
context['nombre'] = 'Compras'
return render(request, 'pages/funcionarios.html', context)
def preguntas_frecuentes(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/preguntas_frecuentes.html', context)
@login_required(login_url='home_page')
@usuarios_permitiado(roles_permitidos=['admin'])
def crud_producto(request, pk):
context = {}
form = ProductoForm()
try:
producto = Producto.objects.get(id=pk)
form = ProductoForm(instance=producto)
context['form'] = form
if request.method == 'POST':
form = ProductoForm(request.POST, request.FILES, instance=producto)
if form.is_valid():
form.save()
messages.success(request, 'Producto agregado')
else:
messages.error(request, 'Error al guardar el producto')
except:
context['form'] = form
if request.method == 'POST':
form = ProductoForm(request.POST, request.FILES)
if form.is_valid():
form.save()
messages.success(request, 'Producto agregado')
else:
messages.error(request, 'Error al guardar el producto')
return render(request, 'pages/func-produc.html', context)
def poblar_bd(request):
#Borra los productos existentes
Producto.objects.all().delete()
#Agrega productos a la base de datos
Producto.objects.create(titulo="Camisa Hombre Negra Dorada", precio='21000', categoria="HM", descripcion="Camisa de vestir de colores negro y dorado. Diseño oriental.", imagen="h-camisa.jpg")
Producto.objects.create(titulo="Pantalones Cuero Hombre Negros", precio='32000', categoria="HM", descripcion="Pantalones de cuero color negro. Cinturon no incluido.", imagen="h-pantalones.jpg")
Producto.objects.create(titulo="Zapatos Cuero Cafe", precio='45000', categoria="HM", descripcion="Zapatos de cuero color marron. Hebilla de plata. Disponible en todas tallas.", imagen="h-zapato.jpg")
Producto.objects.create(titulo="Blusa Multicolor Sparkle", precio='42000', categoria="MJ", descripcion="Top tipo blusa multicolor, refleja la luz. Spaghetti strap.", imagen="m-blusa.jpg")
Producto.objects.create(titulo="Vestido Mujer de Una Pieza", precio='15000', categoria="MJ", descripcion="Vestido negro y azul. Una pieza, disponible en todas las tallas.", imagen="m-vestido.jpg")
Producto.objects.create(titulo="Flats Negros Mujer", precio='66000', categoria="MJ", descripcion="Zapatos Flat de mujer, disponibles en Negro y Blanco. Taco bajo.", imagen="m-zapato.jpg")
Producto.objects.create(titulo="Buso Oso de Niño", precio='12500', categoria="NN", descripcion="Buso de niño unisex. Diseño de oso, disponible en verde, rojo y azul.", imagen="n-buso.jpg")
Producto.objects.create(titulo="Pantalones Dinosario de Niño", precio='14000', categoria="NN", descripcion="Pantalones de buso unisex para niños, diseño de dinosaurio, disponible en gris y negro.", imagen="n-pantalones.jpg")
Producto.objects.create(titulo="Zapatillas con Luces de Niño", precio='27000', categoria="NN", descripcion="Zapatillas unisex para niños, con luces fluorecentes en la suela. Baterias incluidas.", imagen="n-zapatilla.jpg")
#Redirige a la pagina catalogo de hombres
return redirect('home_page')
def formapago_page(request):
context = {}
try:
cliente = request.user.cliente
compra, creada = Compra.objects.get_or_create(cliente=cliente, completado=False)
items = compra.productocompra_set.all()
carro = compra.get_comprar_productos
context['carro'] = carro
context['items'] = items
except:
carro = None
items = None
return render(request, 'pages/formapago.html', context)
| felipe-quirozlara/changewear-django | changeWear/pages/views.py | views.py | py | 16,865 | python | es | code | 0 | github-code | 36 | [
{
"api_name": "django.shortcuts.render",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 71,
"usage_type": "call"
},
{
"api_name"... |
29909656261 | # -*- coding: utf-8 -*-
import os
import sys
import csv
import random
import timeit
import numpy as np
import argparse
import multiprocessing as mp
from ..lib.utils import makedirs
click_field_name = ["date", "format", "paper", "ip", "mode", "uid", "session",
"port", "id", "useragent", "usercookies"]
query_field_name = ["date", "query", "ip", "referer", "mode", "num_results",
"results", "uid", "session", "port", "overlength", "id",
"useragent", "usercookies"]
class EstWorker(mp.Process):
def __init__(self, task_queue, M, click_set, res_list):
super(EstWorker, self).__init__()
self._task_queue = task_queue
self._M = M
self._click_set = click_set
self._res_list = res_list
def run(self):
task_queue = self._task_queue
click_set = self._click_set
res_list = self._res_list
M = self._M
name = self.name
cnt = 0
while True:
task = task_queue.get()
if task is None:
task_queue.task_done()
print('{}: Processed {} tasks'.format(name, cnt))
break
query_set = task
top2k_shown = np.zeros(M)
top2k_click = np.zeros(M)
for uid, results in query_set:
for result in results.split(','):
toks = result.split('*')
rk_before = int(toks[0])
rk_after = int(toks[1])
paper = toks[2]
if rk_before == 0 and rk_after < M:
top2k_shown[rk_after] += 1
if (uid, paper) in click_set:
top2k_click[rk_after] += 1
swap_ctr = np.zeros(M)
prop_est = np.zeros(M)
for i in range(M):
swap_ctr[i] = top2k_click[i] / top2k_shown[i]
for i in range(M):
prop_est[i] = swap_ctr[i] / swap_ctr[0]
res_list.append(prop_est)
task_queue.task_done()
cnt += 1
def bootstrap(M, n_samples, query_list, click_set, n_workers):
task_queue = mp.JoinableQueue()
manager = mp.Manager()
res_list = manager.list()
workers = []
for _ in range(n_workers):
w = EstWorker(task_queue, M, click_set, res_list)
w.daemon = True
w.start()
workers.append(w)
for _ in range(n_samples):
sample = random.choices(query_list, k=len(query_list))
task_queue.put(sample)
for _ in range(n_workers):
task_queue.put(None)
task_queue.close()
task_queue.join()
for w in workers:
w.join()
return res_list
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description='Propensity Estimation via swap intervention')
parser.add_argument('-m', type=int, help='max pos to be estimated')
parser.add_argument('-n', type=int, default=1000, help='num of bootstrap samples')
parser.add_argument('-p', type=float, default=0.95, help='confdence probability')
parser.add_argument('--n_workers', default=mp.cpu_count(), type=int,
help='number of workers')
parser.add_argument('query_path', help='query path')
parser.add_argument('click_path', help='click path')
parser.add_argument('output_path', help='output path')
args = parser.parse_args()
start = timeit.default_timer()
M = args.m
n_bootstrap = args.n
n_workers = min(mp.cpu_count(), args.n_workers)
conf_prop = args.p
n_samples = args.n
n_workers = args.n_workers
query_path = args.query_path
click_path = args.click_path
random.seed()
click_set = set()
with open(click_path, 'r') as fin:
reader = csv.DictReader(fin, delimiter='\t', fieldnames=click_field_name)
for line in reader:
click_set.add((line['uid'], line['paper']))
query_list = []
with open(query_path, 'r') as fin:
reader = csv.DictReader(fin, delimiter='\t', quotechar="'", fieldnames=query_field_name)
for line in reader:
uid = line['uid']
num_results = int(line['num_results'])
if num_results < M:
continue
results = line['results'].split('|')[-1]
query_list.append((uid, results))
prop_list = bootstrap(M, n_samples, query_list, click_set, n_workers)
lo = int(n_samples * ((1 - args.p) / 2))
mi = int(n_samples * 0.5)
hi = n_samples - lo
perc_conf = np.zeros((M, 3))
for i in range(M):
p = []
for prop in prop_list:
p.append(prop[i])
p.sort()
perc_conf[i][0] = p[lo]
perc_conf[i][1] = p[mi]
perc_conf[i][2] = p[hi]
makedirs(os.path.dirname(args.output_path))
np.savetxt(args.output_path, perc_conf)
end = timeit.default_timer()
print('Running time: {:.3f}s.'.format(end - start))
| fzc621/CondPropEst | src/arxiv_match/bootstrap_swap.py | bootstrap_swap.py | py | 4,987 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "multiprocessing.Process",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "numpy.zeros",
"line_number": 44,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
"line_number": 45,
"usage_type": "call"
},
{
"api_name": "numpy.zeros",
... |
21393799623 | """
21.vek API Client
"""
from typing import Optional, Tuple
from bgd.constants import TWENTYFIRSTVEK
from bgd.responses import GameSearchResult, Price
from bgd.services.api_clients import JsonHttpApiClient
from bgd.services.base import GameSearchService
from bgd.services.constants import GET
from bgd.services.responses import APIResponse
class TwentyFirstVekApiClient(JsonHttpApiClient):
"""Api client for 21vek.by"""
BASE_SEARCH_URL = "https://search.21vek.by/api/v1.0"
SEARCH_PATH = "/search/suggest"
async def search(self, query: str, _: Optional[dict] = None) -> APIResponse:
"""Search by query string"""
url = f"{self.SEARCH_PATH}?q={query}"
return await self.connect(GET, self.BASE_SEARCH_URL, url)
class TwentyFirstVekSearchService(GameSearchService):
"""Search service for 21vek.by"""
def _is_available_game(self, product: dict) -> bool:
"""True if it's available board game"""
return (
product["type"] == "product"
and product["price"] != "нет на складе"
and "board_games" in product["url"]
)
async def do_search(self, query: str, *args, **kwargs) -> Tuple[GameSearchResult]:
"""Search on api and build response"""
response = await self._client.search(query, **kwargs)
products = self.filter_results(response.response["items"], self._is_available_game)
return self.build_results(products)
class TwentyFirstVekGameSearchResultFactory:
"""Factory for search results from 21vek"""
BASE_URL = "https://21vek.by"
def create(self, search_result: dict) -> GameSearchResult:
"""Creates search result"""
return GameSearchResult(
description=search_result["highlighted"],
images=self._extract_images(search_result),
location=None,
owner=None,
prices=[self._extract_price(search_result)],
source=TWENTYFIRSTVEK,
subject=search_result["name"],
url=self._extract_url(search_result),
)
@staticmethod
def _extract_price(product: dict) -> Price:
"""Extract price"""
# "price": "60,00 р."
price = product["price"]
price = price.split(" ")[0]
price = int(price.replace(",", ""))
return Price(amount=price)
def _extract_url(self, product: dict) -> str:
"""Extract product url"""
return f"{self.BASE_URL}{product['url']}"
@staticmethod
def _extract_images(product: dict) -> list[str]:
"""Extract product images"""
pic_url = product["picture"]
bigger_img = pic_url.replace("preview_s", "preview_b")
return [bigger_img]
| ar0ne/bg_deal | bgd/services/apis/twenty_first_vek.py | twenty_first_vek.py | py | 2,730 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "bgd.services.api_clients.JsonHttpApiClient",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "bgd.services.constants.GET",
"line_number": 23,
"usage_type": "argument"
},
... |
3097533124 | import rospy, math, numpy, tf
from collections import deque
from spencer_bagfile_tools.msg import AdditionalOdometryData
from dynamic_reconfigure.server import Server
from spencer_bagfile_tools.cfg import ReconstructOdometryConfig
from visualization_msgs.msg import MarkerArray, Marker
from geometry_msgs.msg import Point, Quaternion
from std_msgs.msg import ColorRGBA
from nav_msgs.msg import Odometry
class State(object):
def __init__(self):
self.x = self.y = self.theta = 0
self.totalDistance = 0
self.stamp = rospy.Time(0)
class OdometryController(object):
def __init__(self):
self.msgHistory = []
self.stateHistory = self.emptyStateHistory()
self.previousMsg = self.previousState = None
self.rebuildingEntirePath = False
self.zeroPosition()
self.WHEEL_BASE = 0.665
self.TICKS_PER_METER_LEFT = 56263.5
self.TICKS_PER_METER_RIGHT = 57099.7
self.previousTimestampMarkerCount = 0
def zeroPosition(self):
self.stateHistory.append(State())
self.previousState = self.stateHistory[0]
def run(self):
self.markerArrayPublisher = rospy.Publisher("/spencer_bagfile_tools/reconstructed_odom_path", MarkerArray, queue_size=1)
self.odomPublisher = rospy.Publisher("/spencer/sensors/odom", Odometry, queue_size=3)
reconfigureServer = Server(ReconstructOdometryConfig, self.reconfigure)
topicName = "/spencer/sensors/additional_odom_data"
self.subscriber = rospy.Subscriber(topicName, AdditionalOdometryData, self.additionalOdometryDataCallback)
rospy.loginfo("Reconstructing odometry from " + topicName + ", now listening for messages...")
rospy.spin()
def additionalOdometryDataCallback(self, msg):
if not self.rebuildingEntirePath:
self.updateState(msg)
self.msgHistory.append(msg)
self.publishOdom()
self.visualizePath()
def reconfigure(self, config, level):
self.extraCalibOverallMultiplier = config["extra_calib_overall_multiplier"]
self.extraCalibLeftMultiplier = config["extra_calib_left_multiplier"]
self.lineWidth = config["line_width"]
self.arrowLength = config["arrow_length"]
self.showWaypoints = config["show_waypoints"]
self.recalculatePath = config["recalculate_path"]
if level > 0 and self.recalculatePath:
self.rebuildEntirePath()
return config
def emptyStateHistory(self):
# Limit max. state history length to prevent bad performance after driving for a while
# NOTE: msgHistory might still grow unboundedly, but there's no way of avoiding that...
# However, that is mainly a memory issue as the whole history is only processed in rebuildEntirePath()
return deque(maxlen=5000)
def rebuildEntirePath(self):
rospy.loginfo("Odometry parameters have changed! Rebuilding entire path!")
if self.rebuildingEntirePath:
return
self.rebuildingEntirePath = True
self.stateHistory = self.emptyStateHistory()
self.zeroPosition()
self.previousMsg = None
for msg in self.msgHistory:
self.updateState(msg)
self.rebuildingEntirePath = False
self.publishOdom()
self.visualizePath()
def updateState(self, msg):
newState = State()
newState.stamp = msg.header.stamp
previousLeftTicks = self.previousMsg.ticksLeft if self.previousMsg else msg.ticksLeft
previousRightTicks = self.previousMsg.ticksRight if self.previousMsg else msg.ticksRight
leftDiff = msg.ticksLeft - previousLeftTicks
rightDiff = msg.ticksRight - previousRightTicks
# Calculate metric travelled distances of both wheels and the base
metersTravelledLeft = leftDiff * msg.calibOverallMultiplier * self.extraCalibOverallMultiplier * msg.calibLeftEncMultiplier * self.extraCalibLeftMultiplier / self.TICKS_PER_METER_LEFT
metersTravelledRight = rightDiff * msg.calibOverallMultiplier * self.extraCalibOverallMultiplier / self.TICKS_PER_METER_RIGHT
distance = (metersTravelledLeft + metersTravelledRight) / 2.0
# Update position and bearing
newState.theta = self.previousState.theta + (metersTravelledLeft - metersTravelledRight) / self.WHEEL_BASE
newState.theta -= (int((newState.theta/(2*math.pi) ))) * 2*math.pi # clip to 2pi
newState.totalDistance = self.previousState.totalDistance + math.fabs(distance)
newState.x = self.previousState.x + distance * math.sin(newState.theta)
newState.y = self.previousState.y + distance * math.cos(newState.theta)
positionTolerance = 0.1 # in meters
if math.hypot(newState.x - self.stateHistory[-1].x, newState.y - self.stateHistory[-1].y) > positionTolerance:
# Do not cache every single state if the change in position is minimal, otherwise we'll soon run
# out of memory (note we still store previousState, since it is needed by publishOdom() and updateState())
self.stateHistory.append(newState)
self.previousState = newState # FIXME
self.previousMsg = msg
def publishOdom(self):
odom = Odometry()
odom.header.stamp = self.previousMsg.header.stamp if self.previousMsg else rospy.Time.now()
odom.header.frame_id = "odom"
odom.pose.pose.position.x = self.previousState.x
odom.pose.pose.position.y = self.previousState.y
for row in xrange(0, 6):
for col in xrange(0, 6):
odom.pose.covariance[6*row+col] = 0 if row != col else 0.1
odom.twist.covariance[6*row+col] = 0 if row != col else 999999
q = tf.transformations.quaternion_from_euler(0, 0, -self.previousState.theta + math.pi/2)
odom.pose.pose.orientation = Quaternion(x=q[0], y=q[1], z=q[2], w=q[3])
if len(self.stateHistory) >= 2:
odom.twist.twist.linear.x = odom.pose.pose.position.x - self.stateHistory[-2].x
odom.twist.twist.linear.y = odom.pose.pose.position.y - self.stateHistory[-2].y
self.odomPublisher.publish(odom)
def visualizePath(self):
if self.markerArrayPublisher.get_num_connections() <= 0:
return
markerArray = MarkerArray()
pathMarker = Marker()
pathMarker.header.stamp = rospy.Time.now()
pathMarker.header.frame_id = "odom"
pathMarker.ns = "Path"
pathMarker.type = Marker.LINE_STRIP
pathMarker.id = 0
pathMarker.color = ColorRGBA(r=1, g=1, a=1)
pathMarker.scale.x = 0.05 * self.lineWidth
waypointMarker = Marker()
waypointMarker.header = pathMarker.header
waypointMarker.ns = "Waypoints"
waypointMarker.type = Marker.SPHERE_LIST
waypointMarker.id = 1
waypointMarker.color = ColorRGBA(r=1, g=1, a=1)
waypointMarker.scale.x = waypointMarker.scale.y = 0.1 * self.lineWidth
lastWaypointTime = float("-inf")
lastWaypointPos = (float("99999"), float("99999"))
# Generate path and waypoints
for state in self.stateHistory:
pathMarker.points.append(Point(x=state.x, y=state.y))
if state.stamp.to_sec() - lastWaypointTime > 5 and self.showWaypoints:
dx = state.x - lastWaypointPos[0]
dy = state.y - lastWaypointPos[1]
if math.sqrt(dx*dx + dy*dy) > 1:
lastWaypointTime = state.stamp.to_sec()
lastWaypointPos = (state.x, state.y)
waypointMarker.points.append(Point(x=state.x, y=state.y))
timestampMarker = Marker()
timestampMarker.header = waypointMarker.header
timestampMarker.ns = "Timestamps"
timestampMarker.type = Marker.TEXT_VIEW_FACING
timestampMarker.id = 3 + len(markerArray.markers)
timestampMarker.color = ColorRGBA(r=0.6, a=1)
timestampMarker.scale.z = 0.1 * self.lineWidth
timestampMarker.pose.position.x = state.x
timestampMarker.pose.position.y = state.y
timestampMarker.text = "%.1f" % state.stamp.to_sec()
markerArray.markers.append(timestampMarker)
# Delete old markers
currentTimestampMarkerCount = len(markerArray.markers)
for i in xrange(0, self.previousTimestampMarkerCount - currentTimestampMarkerCount):
timestampMarker = Marker()
timestampMarker.header = waypointMarker.header
timestampMarker.ns = "Timestamps"
timestampMarker.action = Marker.DELETE
timestampMarker.id = 3 + currentTimestampMarkerCount + i
markerArray.markers.append(timestampMarker)
self.previousTimestampMarkerCount = currentTimestampMarkerCount
# Velocity arrow
velocitySmoothingNoPoints = 5
if len(pathMarker.points) > velocitySmoothingNoPoints:
arrowHeadMarker = Marker()
arrowHeadMarker.header = pathMarker.header
arrowHeadMarker.ns = "Path-ArrowHead"
arrowHeadMarker.type = Marker.LINE_STRIP
arrowHeadMarker.id = 2
arrowHeadMarker.color = ColorRGBA(r=1, g=1, a=1)
arrowHeadMarker.scale.x = arrowHeadMarker.scale.y = 0.1 * self.lineWidth
pointTip = numpy.array([pathMarker.points[-1].x, pathMarker.points[-1].y])
lastVelocity = numpy.array([pathMarker.points[-1].x - pathMarker.points[-velocitySmoothingNoPoints].x,
pathMarker.points[-1].y - pathMarker.points[-velocitySmoothingNoPoints].y])
speed = numpy.linalg.norm(lastVelocity)
lastVelocity /= speed
lastVelocity *= 0.3 * self.arrowLength
steepnessAngle = numpy.interp(speed, [0.03, 0.3], [0, 75])
pointLeft = pointTip + self.rotateVector(lastVelocity, 90 + steepnessAngle )
pointRight = pointTip + self.rotateVector(lastVelocity, -(90 + steepnessAngle) )
arrowHeadMarker.points.append(Point(x=pointLeft[0], y=pointLeft[1]))
arrowHeadMarker.points.append(Point(x=pointTip[0], y=pointTip[1]))
arrowHeadMarker.points.append(Point(x=pointRight[0], y=pointRight[1]))
markerArray.markers.append(arrowHeadMarker)
markerArray.markers.append(pathMarker)
markerArray.markers.append(waypointMarker)
self.markerArrayPublisher.publish(markerArray)
def rotateVector(self, vector, angleDeg):
theta = (angleDeg/180.) * numpy.pi
rotMatrix = numpy.array([[numpy.cos(theta), -numpy.sin(theta)],
[numpy.sin(theta), numpy.cos(theta)]])
return numpy.dot(rotMatrix, vector)
if __name__ == '__main__':
rospy.init_node("reconstruct_odometry")
odometryController = OdometryController()
odometryController.run()
| spencer-project/spencer_people_tracking | utils/spencer_bagfile_tools/scripts/reconstruct_odometry.py | reconstruct_odometry.py | py | 11,138 | python | en | code | 620 | github-code | 36 | [
{
"api_name": "rospy.Time",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "rospy.Publisher",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "visualization_msgs.msg.MarkerArray",
"line_number": 39,
"usage_type": "argument"
},
{
"api_name": "ros... |
71903386024 | from typing import Tuple, Union, Dict
import numpy as np
import torch as th
from gym import spaces
from torch.nn import functional as F
def preprocess_obs(obs: Union[th.Tensor, Dict, Tuple], observation_space: spaces.Space,
normalize_images: bool = True, allow_unexpected: bool = True) -> th.Tensor:
"""
Preprocess observation to be to a neural network.
For images, it normalizes the values by dividing them by 255 (to have values in [0, 1])
For discrete observations, it create a one hot vector.
:param obs: (th.Tensor) Observation
:param observation_space: (spaces.Space)
:param normalize_images: (bool) Whether to normalize images or not
(True by default)
:param allow_unexpected: allow keys that's not present in observation space, for dict obs only
:return: (th.Tensor)
"""
if isinstance(observation_space, spaces.Box):
if observation_space.dtype == np.uint8 and normalize_images:
return obs.float() / 255.0
return obs.float()
elif isinstance(observation_space, spaces.Discrete):
# One hot encoding and convert to float to avoid errors
return F.one_hot(obs.long(), num_classes=observation_space.n).float()
elif isinstance(observation_space, spaces.MultiDiscrete):
# Tensor concatenation of one hot encodings of each Categorical sub-space
return th.cat(
[
F.one_hot(obs_.long(), num_classes=int(observation_space.nvec[idx])).float()
for idx, obs_ in enumerate(th.split(obs.long(), 1, dim=1))
],
dim=-1,
).view(obs.shape[0], sum(observation_space.nvec))
elif isinstance(observation_space, spaces.MultiBinary):
return obs.float()
elif isinstance(observation_space, spaces.Dict):
processed_obs = {}
for k, o in obs.items():
if k in observation_space.spaces:
processed_obs[k] = preprocess_obs(o, observation_space.spaces[k], normalize_images)
elif allow_unexpected:
if o.dtype == th.uint8:
o = o / 255.0
processed_obs[k] = o.float()
else:
raise AttributeError('key {} not in observation space, set allow_unexpected=True to override'.format(k))
return processed_obs
elif isinstance(observation_space, spaces.Tuple):
return tuple(preprocess_obs(o, os, normalize_images) for o, os in zip(obs, observation_space.spaces))
else:
raise NotImplementedError() | buoyancy99/unsup-3d-keypoints | algorithms/common/utils.py | utils.py | py | 2,559 | python | en | code | 38 | github-code | 36 | [
{
"api_name": "typing.Union",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "torch.Tensor",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "typing.Dict",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "typing.Tuple",
"line_numb... |
34976365352 | #!/usr/bin/env python3
import requests
url = "http://10.10.90.182:8000"
url_= "https://10.10.90.182:1443/index.php"
header={'User-Agent':'<?php echo system($_REQUEST["c"];) ?>'}
r = requests.get(url_ + "?c=id", headers=header, verify=False)
print(r.text)
| lodwig/TryHackMe | Probe/check.py | check.py | py | 258 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 9,
"usage_type": "call"
}
] |
40193585375 | import xlrd
from account.backend.services import StateService
def read_data_from_excel(excel_file):
# reads data from an excel_file
file_path = str(excel_file)
# create a workbook using the excel file received
w_book = xlrd.open_workbook(file_path)
# open the excel_sheet with the data
sheet = w_book.sheet_by_index(0)
# import the database model Albums
from music.models import Album
# instantiate a state
state = StateService().get(name = 'Active')
# loop through the data printing all the data
for row in range(1, sheet.nrows):
# print (str(sheet.cell_value(row, col))),
obj = Album(
artist = sheet.cell_value(row, 0),
album_title = sheet.cell_value(row, 1),
genre = sheet.cell_value(row, 2),
state = state)
print('album added')
obj.save()
return 'Success'
| Trojkev/kev-music | music/backend/albums_script.py | albums_script.py | py | 811 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "xlrd.open_workbook",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "account.backend.services.StateService",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "music.models.Album",
"line_number": 25,
"usage_type": "call"
}
] |
30372123851 | import sys
import cv2
import numpy as np
import Analyzer
from learning import Parameters
import FeatureDebug
WINDOW = 'Options'
PARAM1 = '1) Param 1'
PARAM2 = '2) Param 2'
MIN_RAD = '3) Minimum Radius'
MAX_RAD = '4) Maximum Radius'
WINDOW_BOUND = '5) Top Left Window Px'
WINDOW_BOUND2 = '6) Top Right Window px'
HOUGH_PARAM1 = 1
HOUGH_MAX_PARAM2 = 300
HOUGH_MIN_RADIUS = 0
HOUGH_MAX_RADIUS = 40
HOUGH_MIN_DIST = 20 # the minimum distance two detected circles can be from one another
HOUGH_MAX_ATTEMPTS = 100 #define the number of attempts to find at least one circle
CANNY_LOW = '7) Canny LB'
CANNY_HIGH = '8) Canny UP'
p1 = 0
p2 = 0
minR = 0
maxR = 0
cannyLb = 0
cannyUb = 0
def nothing(dummyVar = None):
pass
def initHoughOptions(cameraType, callback):
if FeatureDebug.TRACKBAR:
global p1, p2, minR, maxR, cannyUb, cannyLb, adaptive1
#get default start values
p1, p2, minR, maxR = Parameters.HoughParamaters.getParams(cameraType)
cannyLb, cannyUb = Parameters.Canny.getParams(cameraType)
adaptive1 = 11
# Create a black image, a window
img = np.zeros((200,300,3), np.uint8)
cv2.namedWindow(WINDOW)
cv2.createTrackbar(PARAM1, WINDOW, 0, HOUGH_PARAM1, nothing)
cv2.createTrackbar(MIN_RAD, WINDOW, 0, 255, nothing)
cv2.createTrackbar(PARAM2, WINDOW, 0, HOUGH_MAX_PARAM2, nothing)
cv2.createTrackbar(MAX_RAD, WINDOW, 0, HOUGH_MAX_RADIUS, nothing)
cv2.createTrackbar(WINDOW_BOUND, WINDOW, 0, 100, nothing)
cv2.createTrackbar(CANNY_LOW, WINDOW, 0, 255, nothing)
cv2.createTrackbar(CANNY_HIGH, WINDOW, 0, 255, nothing)
cv2.createTrackbar('Block Size', WINDOW, -21, 21, nothing)
cv2.setTrackbarPos(PARAM1, WINDOW, p1)
cv2.setTrackbarPos(PARAM2, WINDOW, p2)
cv2.setTrackbarPos(MIN_RAD, WINDOW, minR)
cv2.setTrackbarPos(MAX_RAD, WINDOW, maxR)
cv2.setTrackbarPos(CANNY_LOW, WINDOW, 35)
cv2.setTrackbarPos(CANNY_HIGH, WINDOW, 150)
cv2.setTrackbarPos('Block Size', WINDOW, 11)
while(1):
cv2.imshow(WINDOW,img)
cv2.moveWindow(WINDOW, 0, 500)
k = cv2.waitKey(1) & 0xFF
if k == ord('q'):
Analyzer.close()
break
elif k == ord('e'):
sys.exit('Force Close')
p1Temp = cv2.getTrackbarPos(PARAM1, WINDOW)
p2Temp = cv2.getTrackbarPos(PARAM2, WINDOW)
minRTemp = cv2.getTrackbarPos(MIN_RAD, WINDOW)
maxRTemp = cv2.getTrackbarPos(MAX_RAD, WINDOW)
cannyLbTemp = cv2.getTrackbarPos(CANNY_LOW, WINDOW)
cannyUbTemp = cv2.getTrackbarPos(CANNY_HIGH, WINDOW)
adaptive1Temp = cv2.getTrackbarPos('Block Size', WINDOW)
updatedHoughCircle = False
updatedCanny = False
updatedAdaptive = False
if p1Temp != p1:
p1 = p1Temp
updatedHoughCircle = True
if p2Temp != p2:
p2 = p2Temp
updatedHoughCircle = True
if minRTemp != minR:
minR = minRTemp
updatedHoughCircle = True
if maxRTemp != maxR:
maxR = maxRTemp
updatedHoughCircle = True
if cannyLbTemp != cannyLb:
cannyLb = cannyLbTemp
updatedCanny = True
if cannyUbTemp != cannyUb:
cannyUb = cannyUbTemp
updatedCanny = True
if adaptive1Temp != adaptive1:
adaptive1 = adaptive1Temp
updatedAdaptive = True
if updatedHoughCircle:
callback(Parameters.Trackbar.Hough, param1 = p1, param2 = p2, minRadius = minR, maxRadius = maxR)
pass
if updatedCanny:
callback(Parameters.Trackbar.Canny, cannyLb = cannyLb, cannyUb = cannyUb)
pass
if updatedAdaptive:
callback(Parameters.Trackbar.AdaptiveThreshold, blockSize = adaptive1)
cv2.destroyWindow(WINDOW)
| vicidroiddev/eyeTracking | Fokus/debug/DebugOptions.py | DebugOptions.py | py | 4,149 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "FeatureDebug.TRACKBAR",
"line_number": 42,
"usage_type": "attribute"
},
{
"api_name": "learning.Parameters.HoughParamaters.getParams",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "learning.Parameters.HoughParamaters",
"line_number": 47,
"usage_... |
20961161383 | # Author:HU YUE
import pickle
import os
import sys
import logging
import random
BASE_DIR=os.path.dirname(os.path.dirname( os.path.abspath(__file__) ))
sys.path.append(BASE_DIR)
def loadd(f_all,name):
with open("%s.txt"%name, 'wb')as f:
pickle.dump(f_all, f)
# def nadd(wood):
# with open("%s.txt"%name, "rb")as t:
# # stuffs=stuffs
# t_all = pickle.load(t)
# print(t_all)
# # print(type(t_all))
# t_all.append(wood)
# # print(t_all)
# loadd(t_all)
logger = logging.getLogger('TEST_LOG')
def coller1(name): #管理员模块可修改用户信息!
while True:
print("____管理员系统1___\n"
"0.查询用户密码\n"
"1.创建用户和密码\n"
"2.修改用户信息\n"
"3.删除用户\n"
"4.冻结用户\n"
"5.退出")
number=input("输入数字进行操作:")
if number=="0":
name=input("输入用户名:")
if not os.path.exists("%s.txt"%name):
print("用户不存在!")
else:
with open("%s.txt"%name,"rb")as f:
f_all=pickle.load(f)
print(f_all)
logger.info('您查询了%s的用户信息。'%name)
if number=="1":
name=input("创建新用户:")
if os.path.exists("%s.txt"%name):
print("用户已存在请重新输出入")
else:
open("%s.txt"%name, 'w').close()
password=input("新用户密码:")
new_user={"card_number":"",
"user":name,
"password":password,
"Credit_line":10000,
"balance":0,
"repayment":0,
}
for i in range(6): #随机生成信用卡号!
each = random.randrange(0, 9)
tmp = chr(random.randint(65, 90))
new_user["card_number"]+= str(each) + str(tmp)
print("用户账号已创建!")
print(new_user)
with open("%s.txt"%name,"wb")as f:
pickle.dump(new_user,f)
logger.info('您创建了%s新用户!。' % name)
if number=="2":
name=input("输入需要修改的用户名:")
if os.path.exists("%s.txt" % name):
n=0
while n<3:
print("____修改用户信息___\n"
"0.修改用户password\n"
"1.修改用户Credit_line\n"
"2.修改用户balance\n"
"3.修改用户repayment\n"
"4.返回上层菜单")
with open("%s.txt" % name, "rb")as f: # 输出用户当前信息
f_all = pickle.load(f)
print(f_all)
number1 = input("选择修改:")
if number1 == "0":
new = input("新密码:")
with open("%s.txt" % name, "rb")as f:
f_all = pickle.load(f)
f_all["password"] = new
loadd(f_all, name)
logger.info('您对%s的密码进行了修改,新密码为%s!。' % (name,new))
if number1 == "1":
new = input("新额度:")
with open("%s.txt" % name, "rb")as f:
f_all = pickle.load(f)
f_all["Credit_line"] = new
loadd(f_all, name)
logger.info('您对%s的额度进行了修改,新额度为%s!。' % (name, new))
if number1 == "2":
new = input("新余额:")
with open("%s.txt" % name, "rb")as f:
f_all = pickle.load(f)
f_all["balance"] = new
loadd(f_all, name)
logger.info('您对%s的余额进行了修改,新余额为%s!。' % (name, new))
if number1 == "3":
new = input("新还款金度:")
with open("%s.txt" % name, "rb")as f:
f_all = pickle.load(f)
f_all["repayment"] = new
loadd(f_all, name)
logger.info('您对%s的还款金度进行了修改,新还款金额为%s!。' % (name, new))
if number1 == "4":
n=3
else:
print("要修改的用户不存在!请确认后输入")
if number=="3":
name=input("输入用户名:")
if os.path.exists("%s.txt"%name):
os.remove("%s.txt"%name)
logger.info('您删除了%s的用户信息!。' % name)
else:
print("要删除的用用户不存在!")
if number=="4":
if not os.path.exists("forzen_user.txt"):
open("forzen_user.txt","w").close()
forzen=[]
with open("forzen_user.txt","wb")as f:
pickle.dump(forzen,f)
else:
with open("forzen_user.txt", "rb")as f:
f_all=pickle.load(f)
print(f_all) #测试代码
dname=input("需冻结账户:")
if dname in f_all:
print("用户已冻结!")
continue
else:
with open("forzen_user.txt", "wb")as t:
f_all.append(dname)
pickle.dump(f_all,t)
logger.info('您冻结了%s用户!。' % name)
if number=="5":
break
# os.path.exists("user_ma.txt")
#
# print(os.path.exists("user_ma.txt"))
# coller1("hy")
| 001fly/-Module-two-operation | Atm/core/account1.py | account1.py | py | 6,440 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.abspath",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.path.append",
"line... |
14991188051 | #
# Copyright (C) 2012 ESIROI. All rights reserved.
# Dynamote is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Dynamote is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Dynamote. If not, see <http://www.gnu.org/licenses/>.
#
import msgpack
import time
import sys
import zmq
from random import choice
class stb():
mode = ["mode_radio", "mode_tv", "mode_vod", "mode_rss", "mode_dvd"]
Channel = ["channel1", "channel2", "channel3", "channel4"]
subtitle = ["on", "off"]
power = ["on", "off"]
# Attribut for parental controlling
lock = ["false", "true"]
mute = ["false", "true" ]
info_bar = ["false", "true"]
def __init__(self, mode, channel, subtitle):
self.mode = "mode_tv"
self.channel = "channel1"
self.subtitle = "off"
self.power = "on"
self.lock = "false"
self.mute = "false"
self.info_bar = "true"
global dynamote
global stb_rep
global stb_pub
global stb_sub
global stb_pull
global stb_push
global port
dynamote = zmq.Context()
stb_pub = dynamote.socket(zmq.PUB)
stb_sub = dynamote.socket(zmq.SUB)
stb_pull = dynamote.socket(zmq.PULL)
stb_push = dynamote.socket(zmq.PUSH)
default_port = "5000"
################################ MODE SERVEUR ###############################
# Autoconfiguration ip_address for connect is 169.254.1.2 on the port 5000
ip_address = str("169.254.1.2")
# Provide port as command line argument to run server at two different ports
if len(sys.argv)> 1:
port = sys.argv[1]
int(port)
if len(sys.argv) > 2:
port1 = sys.argv[2]
int(port1)
if len(sys.argv) > 3:
port2 = sys.argv[3]
int(port2)
global stb_description
stb_description ="Set top box is on this",ip_address,"and",default_port
print(stb_description)
# Bind link to dynamic discovery
################################### Description #######################################
def ready_to_process(self,port):
stb_rep = dynamote.socket(zmq.REP)
stb_rep.bind("tcp://127.0.0.1:%s" %port)
fichier_description = open ( "stb-device-api-description.json", "r")
msg_packed = msgpack.packb(str(fichier_description))
while True:
msg = stb_rep.recv()
print( "Got",msg)
time.sleep(1)
msg_description = str(stb_description)
stb_rep.send(msg_packed)
############################### MODE CLIENT ###################################
stb_req = dynamote.socket(zmq.REQ)
print ("Attempting to connect to other process ......")
#Connect link
stb_req.connect("tcp://localhost:5001")
for request in range(2):
stb_req.send(msg_description)
print ("Sending message")
# Get the reply.
message = stb_req.recv()
print ("Received reply ", request, "[", message, "]")
####################### Publish_subscribe ######################################
def subscribe_to_dvd(self,port):
#DVD subcribe fucntion
print ( " Middleware waiting for publish ....")
stb_sub.connect("tcp://localhost:%s"%port)
stb_sub.setsockopt_unicode(zmq.SUBSCRIBE, "on")
self.mode = "mode_dvd"
self.channel = "null"
self.subtitle = "on"
self.power = "on"
self.lock = "false"
self.mute = "false"
self.infobar = "false"
for i in range (3):
print("............There is a DVD disc in the device", stb_sub.recv())
def subscribe_to_tv(self, port):
# TV subscribe function
print ( "Middleware waiting for publish ...")
stb_sub.connect("tcp://localhost:%s")
stb_sub.setsockopt(zmq.SUBSCRIBE, "")
for i in range (3):
print("............There is a TV which is sense", stb_sub.recv())
| maxajeanaimee/Domotique_multimedia | stb_process.py | stb_process.py | py | 4,488 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "zmq.Context",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "zmq.PUB",
"line_number": 54,
"usage_type": "attribute"
},
{
"api_name": "zmq.SUB",
"line_number": 55,
"usage_type": "attribute"
},
{
"api_name": "zmq.PULL",
"line_number": 5... |
850417471 | #pylint:disable=no-member
import cv2 as cv
# Blurring is used to smooth the image by removing noice from the image
img = cv.imread('../Resources/Photos/cats.jpg')
cv.imshow('Cats', img)
# kernel window size (ksize) ask for rows and columns and the blurring algo work on that kernal window through the whole image
# Averaging blur
average = cv.blur(img, (3,3))
cv.imshow('Average Blur', average)
# Gaussian Blur
gauss = cv.GaussianBlur(img, (3,3), 0)
cv.imshow('Gaussian Blur', gauss)
# Median Blur
median = cv.medianBlur(img, 3)
cv.imshow('Median Blur', median)
# Bilateral
bilateral = cv.bilateralFilter(img, 10, 35, 25)
cv.imshow('Bilateral', bilateral)
cv.waitKey(0) | dheeraj120501/Lets-Code | 06-Cool Things Computer Can't Do/03-Computer Vision with OpenCV/2-Advanced/03-blurring.py | 03-blurring.py | py | 677 | python | en | code | 3 | github-code | 36 | [
{
"api_name": "cv2.imread",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "cv2.blur",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "cv2.imshow",
"line_number": 13,
... |
34620777008 | import json
def make_func(name, inputs, outputs, mutability):
# For now, pass all hints, and I'll manually drop those that aren't needed.
header = f"""
@{mutability}
func {name[0:-1]}{{syscall_ptr: felt*, pedersen_ptr: HashBuiltin*, bitwise_ptr: BitwiseBuiltin*, range_check_ptr
}} ({', '.join([f"{inp['name']}: {inp['type']}" for inp in inputs])}) -> ({', '.join([f"{outp['name']}: {outp['type']}" for outp in outputs])}):"""
# There are outputs: store them.
if len(outputs) > 0:
return header + f"""
let ({', '.join([outp['name'] for outp in outputs])}) = {name}({', '.join([inp['name'] for inp in inputs])})
return ({', '.join([outp['name'] for outp in outputs])})
end"""
else:
return header + f"""
{name}({', '.join([inp['name'] for inp in inputs])})
return ()
end"""
def generate(input_contract, output_path):
abi_path = f"artifacts/abis/{input_contract}.json"
abi = json.load(open(abi_path, "r"))
codeparts = []
imports = []
structs = []
for part in abi:
if part["type"] == "struct" and part["name"] != 'Uint256':
structs.append(part["name"])
if part["type"] != "function":
continue
if "stateMutability" not in part:
codeparts.append(make_func(part["name"], part["inputs"], part["outputs"], "external"))
else:
codeparts.append(make_func(part["name"], part["inputs"], part["outputs"], part["stateMutability"]))
imports.append(part["name"])
with open(output_path, "w") as f:
f.write("""
%lang starknet
from starkware.cairo.common.cairo_builtins import HashBuiltin, SignatureBuiltin, BitwiseBuiltin
""")
f.write(f"from contracts.{input_contract} import (\n\t" + ',\n\t'.join(imports) + '\n)\n')
f.write("from contracts.types import (\n\t" + ',\n\t'.join(structs) + '\n)\n')
for part in codeparts:
f.write(part)
f.write("\n")
print("Wrote to ", output_path)
| briqNFT/briq-protocol | briq_protocol/generate_interface.py | generate_interface.py | py | 2,004 | python | en | code | 63 | github-code | 36 | [
{
"api_name": "json.load",
"line_number": 24,
"usage_type": "call"
}
] |
2848097490 | import tensorflow as tf
import argparse
import pandas as pd
import numpy as np
from PIL import Image, ImageDraw, ImageEnhance
from tqdm import tqdm
from model import *
from losses import *
import albumentations as albu
args = argparse.ArgumentParser(description='Process Training model')
args.add_argument('-i','--img_dir', type=str, help='images_directory', required=True)
args.add_argument('-m','--model_dir', type=str, help='model_directory', required=True)
args.add_argument('-s','--resized_size', type=int,help='resized_size', required=True)
args.add_argument('-a','--annotations', type=str,help='annotations_file', required=True)
args.add_argument('-e','--epochs', type=int,help='epochs', required=True)
argumens = args.parse_args()
#Create Config
class config:
annotations_file = argumens.annotations
image_dir = argumens.img_dir + '/'
image_size = 1000
resized_size = argumens.resized_size
train_ratio = 0.8
checkpoint = argumens.model_dir + '/'
saved_model = argumens.model_dir + '/object_detection_model.h5'
#Load annotations file
labels = pd.read_csv(config.annotations_file)
print(labels.head())
#ground labels base on images_id
def group_boxes(group):
boundaries = group['yolo_bbox'].str.split(',', expand=True)
boundaries[0] = boundaries[0].str.slice(start=1)
boundaries[3] = boundaries[3].str.slice(stop=-1)
return boundaries.values.astype(float)
labels = labels.groupby('image_id').apply(group_boxes)
#spit data to train and val
train_idx = round(len(np.unique(labels.index.values)) * config.train_ratio)
train_image_ids = np.unique(labels.index.values)[0: train_idx]
val_image_ids = np.unique(labels.index.values)[train_idx:]
def load_image(image_id):
image = Image.open(config.image_dir + image_id)
image = image.resize((config.resized_size, config.resized_size))
return np.asarray(image)
#Loading Train data
print("Loading Training data")
train_pixels = {}
train_labels = {}
for image_id in tqdm(train_image_ids):
train_pixels[image_id] = load_image(image_id)
train_labels[image_id] = labels[image_id].copy() * (config.resized_size/config.image_size)
#Loading Val data
print("Loading Validation data data")
val_pixels = {}
val_labels = {}
for image_id in tqdm(val_image_ids):
val_pixels[image_id] = load_image(image_id)
val_labels[image_id] = labels[image_id].copy() * (config.resized_size/config.image_size)
model = build_model(config.resized_size,config.resized_size)
print(model.summary())
#Create Data Generator
class DataGenerator(tf.keras.utils.Sequence):
def __init__(self, image_ids, image_pixels, labels=None, batch_size=1, shuffle=False, augment=False):
self.image_ids = image_ids
self.image_pixels = image_pixels
self.labels = labels
self.batch_size = batch_size
self.shuffle = shuffle
self.augment = augment
self.on_epoch_end()
self.image_grid = self.form_image_grid()
def form_image_grid(self):
image_grid = np.zeros((model.output_shape[1], model.output_shape[2], 4))
# x, y, width, height
cell = [0, 0, config.resized_size / model.output_shape[1], config.resized_size / model.output_shape[2]]
for i in range(0, model.output_shape[1]):
for j in range(0, model.output_shape[2]):
image_grid[i ,j] = cell
cell[0] = cell[0] + cell[2]
cell[0] = 0
cell[1] = cell[1] + cell[3]
return image_grid
def __len__(self):
return int(np.floor(len(self.image_ids) / self.batch_size))
def on_epoch_end(self):
self.indexes = np.arange(len(self.image_ids))
if self.shuffle == True:
np.random.shuffle(self.indexes)
DataGenerator.__len__ = __len__
DataGenerator.on_epoch_end = on_epoch_end
DataGenerator.train_augmentations = albu.Compose([albu.RandomSizedCrop(
min_max_height=(config.resized_size, config.resized_size),
height=config.resized_size, width=config.resized_size, p=0.8),
albu.OneOf([
albu.Flip(),
albu.RandomRotate90()], p=1),
albu.OneOf([
albu.HueSaturationValue(),
albu.RandomBrightnessContrast()], p=1),
albu.OneOf([
albu.GaussNoise()], p=0.5),
albu.Cutout(
num_holes=8,
max_h_size=16,
max_w_size=16,
p=0.5
),
albu.CLAHE(p=1),
albu.ToGray(p=1),
], bbox_params={'format': 'coco', 'label_fields': ['labels']})
DataGenerator.val_augmentations = albu.Compose([
albu.CLAHE(p=1),
albu.ToGray(p=1),
])
def __getitem__(self, index):
indexes = self.indexes[index * self.batch_size:(index + 1) * self.batch_size]
batch_ids = [self.image_ids[i] for i in indexes]
X, y = self.__data_generation(batch_ids)
return X, y
def __data_generation(self, batch_ids):
X, y = [], []
# Generate data
for i, image_id in enumerate(batch_ids):
pixels = self.image_pixels[image_id]
bboxes = self.labels[image_id]
if self.augment:
pixels, bboxes = self.augment_image(pixels, bboxes)
else:
pixels = self.contrast_image(pixels)
bboxes = self.form_label_grid(bboxes)
X.append(pixels)
y.append(bboxes)
return np.array(X), np.array(y)
def augment_image(self, pixels, bboxes):
bbox_labels = np.ones(len(bboxes))
aug_result = self.train_augmentations(image=pixels, bboxes=bboxes, labels=bbox_labels)
bboxes = self.form_label_grid(aug_result['bboxes'])
return np.array(aug_result['image']) / 255, bboxes
def contrast_image(self, pixels):
aug_result = self.val_augmentations(image=pixels)
return np.array(aug_result['image']) / 255
def form_label_grid(self, bboxes):
label_grid = np.zeros((model.output_shape[1], model.output_shape[2], 10))
for i in range(0, model.output_shape[1]):
for j in range(0, model.output_shape[2]):
cell = self.image_grid[i, j]
label_grid[i, j] = self.rect_intersect(cell, bboxes)
return label_grid
def rect_intersect(self, cell, bboxes):
cell_x, cell_y, cell_width, cell_height = cell
cell_x_max = cell_x + cell_width
cell_y_max = cell_y + cell_height
anchor_one = np.array([0, 0, 0, 0, 0])
anchor_two = np.array([0, 0, 0, 0, 0])
# check all boxes
for bbox in bboxes:
box_x, box_y, box_width, box_height = bbox
box_x_centre = box_x + (box_width / 2)
box_y_centre = box_y + (box_height / 2)
if (box_x_centre >= cell_x and box_x_centre < cell_x_max and box_y_centre >= cell_y and box_y_centre < cell_y_max):
if anchor_one[0] == 0:
anchor_one = self.yolo_shape(
[box_x, box_y, box_width, box_height],
[cell_x, cell_y, cell_width, cell_height]
)
elif anchor_two[0] == 0:
anchor_two = self.yolo_shape(
[box_x, box_y, box_width, box_height],
[cell_x, cell_y, cell_width, cell_height]
)
else:
break
return np.concatenate((anchor_one, anchor_two), axis=None)
def yolo_shape(self, box, cell):
box_x, box_y, box_width, box_height = box
cell_x, cell_y, cell_width, cell_height = cell
# top left x,y to centre x,y
box_x = box_x + (box_width / 2)
box_y = box_y + (box_height / 2)
# offset bbox x,y to cell x,y
box_x = (box_x - cell_x) / cell_width
box_y = (box_y - cell_y) / cell_height
# bbox width,height relative to cell width,height
box_width = box_width / config.resized_size
box_height = box_height / config.resized_size
return [1, box_x, box_y, box_width, box_height]
#Setting up DataGenerator
DataGenerator.augment_image = augment_image
DataGenerator.contrast_image = contrast_image
DataGenerator.form_label_grid = form_label_grid
DataGenerator.rect_intersect = rect_intersect
DataGenerator.yolo_shape = yolo_shape
DataGenerator.__getitem__ = __getitem__
DataGenerator.__data_generation = __data_generation
train_generator = DataGenerator(
train_image_ids,
train_pixels,
train_labels,
batch_size=1,
shuffle=True,
augment=True
)
val_generator = DataGenerator(
val_image_ids,
val_pixels,
val_labels,
batch_size=1,
shuffle=False,
augment=False
)
image_grid = train_generator.image_grid
#Compile and Training Model
optimiser = tf.keras.optimizers.Adam(learning_rate=0.0001)
model.compile(
optimizer=optimiser,
loss=custom_loss
)
callbacks = [tf.keras.callbacks.ModelCheckpoint(config.checkpoint + '/object_detection_ckpt.weights.{epoch:02d}-{val_loss:.2f}.hdf5', monitor='val_loss', verbose=1, save_best_only=False, mode='auto', save_weights_only=True), \
tf.keras.callbacks.ReduceLROnPlateau(monitor='loss', patience=3, verbose=1), \
tf.keras.callbacks.EarlyStopping(monitor='val_loss', patience=5, verbose=1, restore_best_weights=True), \
]
history = model.fit(train_generator,validation_data=val_generator, epochs=argumens.epochs, callbacks=callbacks)
model.save(config.saved_model) | SandiRizqi/OBJECT-DETECTION-YOLO-ALGORITHM-FOR-AERIAL-IMAGERY_FROM-SCRATCH | train_yolo.py | train_yolo.py | py | 9,124 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
"line_number": 46,
"usage_type": "call"
},
{
"api_name": "numpy.unique",
... |
43916018301 | from functools import lru_cache
MOD = 10 ** 9 + 7
class Solution:
def findPaths(self, m, n, maxMove, startRow, startColumn):
@lru_cache(None)
def rec(sr, sc, mm):
if sr < 0 or sr >= m or sc < 0 or sc >= n:
return 1
if mm == 0: return 0
return (
rec(sr + 1, sc, mm - 1) +
rec(sr, sc + 1, mm - 1) +
rec(sr - 1, sc, mm - 1) +
rec(sr, sc - 1, mm - 1)
) % MOD
return rec(startRow, startColumn, maxMove)
| robinsdeepak/leetcode | 576-out-of-boundary-paths/576-out-of-boundary-paths.py | 576-out-of-boundary-paths.py | py | 604 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "functools.lru_cache",
"line_number": 8,
"usage_type": "call"
}
] |
11379198461 | from django.views.generic import ListView
from django.shortcuts import render_to_response
from django.template import RequestContext
from django.core.urlresolvers import reverse
from django.http import HttpResponseRedirect
from django.db.models import Count
from guesswho.core.models import (Game, Question, Trait, TraitValue, Player,
all_people)
from guesswho.core.logic import (get_game_opponent, is_game_complete,
rule_out_candidates)
from guesswho.core.forms import QuestionForm
class ListGames(ListView):
template_name = "core/list_games.html"
def get_queryset(self):
return Game.objects.filter(players__user=self.request.user)
def create_game(request):
game = Game.objects.create()
player1 = Player.objects.create(user=request.user)
player1.candidates.add(*all_people())
game.players.add(player1)
game.save()
return HttpResponseRedirect(reverse('games_to_join'))
def join_game(request):
ctx = {
'games': Game.objects.annotate(player_count=Count('players'))
.filter(player_count=1)
}
if request.method == 'POST':
game_id = request.POST.get('game_id')
game = Game.objects.get(pk=int(game_id))
player2 = Player.objects.create(user=request.user)
player2.candidates.add(*all_people())
game.players.add(player2)
game.save()
return HttpResponseRedirect(reverse('play_game', args=(game.pk,)))
return render_to_response('core/games_to_join.html', ctx,
context_instance=RequestContext(request))
def play_game(request, game_id):
game = Game.objects.get(pk=int(game_id))
player = game.players.filter(user=request.user)[0]
candidates = player.candidates.all()
ctx = {
'opponent': get_game_opponent(game, player),
'person': player.person,
'num_candidates': candidates.count(),
'candidates': candidates
}
if request.method == 'POST':
form = QuestionForm(game, player, request.POST)
if form.is_valid():
custom_key = form.cleaned_data.get('question')
trait_id, value_id = custom_key.split(':')
question_data = {
'game': game,
'player': player,
'trait': Trait.objects.get(pk=trait_id),
'value': TraitValue.objects.get(pk=value_id)
}
question = Question(**question_data)
rule_out_candidates(question)
winner = is_game_complete(game)
if winner:
ctx.update({
'game_over': True,
'user_won': winner.pk is player.pk
})
else:
form = QuestionForm(game, player)
ctx['form'] = form
return render_to_response('core/play_game.html', ctx,
context_instance=RequestContext(request))
| schallis/guesswho | guesswho/core/views.py | views.py | py | 2,911 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "django.views.generic.ListView",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "guesswho.core.models.Game.objects.filter",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "guesswho.core.models.Game.objects",
"line_number": 19,
"usage_type... |
19213479137 |
import os.path
from os import listdir
from os.path import isfile, join
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import style
import cv2
from sklearn.svm import SVC
import hog
def get_good_train_set(directory="./NICTA/TrainSet/PositiveSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_bad_train_set(directory="./NICTA/TrainSet/NegativeSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_good_test_set(directory="./NICTA/TestSet/PositiveSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_bad_test_set(directory="./NICTA/TestSet/NegativeSamples"):
test_files = [join(directory, image) for image in listdir(directory) if isfile(join(directory, image))]
return test_files
def get_hog_descriptor(image):
image = cv2.resize(image, (64, 128))
image = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
image = hog.gamma_correction(image, gamma_value)
gradient = hog.compute_gradients(image)
cell_histograms, _ = hog.compute_weighted_vote(gradient)
hog_blocks, _ = hog.normalize_blocks(cell_histograms)
return hog_blocks.ravel()
if __name__ == '__main__':
gamma_value = 1.0
good_set = get_good_train_set()
image_count = len(good_set)
good_set_hog = np.empty((image_count, 3780))
image_index = 0
for image_file in good_set:
test_image = cv2.imread(image_file)
good_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
good_set_tag = np.ones(image_count)
bad_set = get_bad_train_set()
image_count = len(bad_set)
bad_set_hog = np.empty((image_count, 3780))
image_index = 0
for image_file in bad_set:
test_image = cv2.imread(image_file)
bad_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
bad_set_tag = np.zeros(image_count)
good_test_set = get_good_test_set()
good_test_image_count = len(good_test_set)
good_test_set_hog = np.empty((good_test_image_count, 3780))
image_index = 0
for image_file in good_test_set:
test_image = cv2.imread(image_file)
good_test_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
bad_test_set = get_bad_test_set()
bad_test_image_count = len(bad_test_set)
bad_test_set_hog = np.empty((bad_test_image_count, 3780))
image_index = 0
for image_file in bad_test_set:
test_image = cv2.imread(image_file)
bad_test_set_hog[image_index] = get_hog_descriptor(test_image)
image_index += 1
train_data = np.concatenate((good_set_hog, bad_set_hog))
tag_data = np.concatenate((good_set_tag, bad_set_tag))
C = 1.0 # SVM regularization parameter
lin_svc = SVC(kernel='linear', C=C).fit(train_data, tag_data)
rbf_svc = SVC(kernel='rbf', C=C).fit(train_data, tag_data)
poly_svc = SVC(kernel='poly', C=C, degree=2).fit(train_data, tag_data)
# title for the classifiers
titles = ['SVC with linear kernel',
'SVC with RBF kernel',
'SVC with polynomial kernel']
for i, clf in enumerate((lin_svc, rbf_svc, poly_svc)):
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, x_max]x[y_min, y_max].
plt.subplot(2, 2, i + 1)
plt.subplots_adjust(wspace=0.4, hspace=0.4)
good_test_results = clf.predict(good_test_set_hog)
#print(good_test_results)
bad_test_results = clf.predict(bad_test_set_hog)
#print(bad_test_results)
print("Results for {}".format(titles[i]))
print("Accuracy for Positive Cases: {}".format(np.sum(good_test_results) / good_test_image_count * 100))
print("Accuracy for Negative Cases: {}".format(100 - (np.sum(bad_test_results) / bad_test_image_count * 100)))
del good_test_results, bad_test_results
| insomaniacvenkat/HOG | svm_train.py | svm_train.py | py | 4,128 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "os.path.join",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.listdir",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.isfile",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "os.path.join",
"line_numbe... |
32704563511 | from scipy.signal import hilbert
import numpy as np
import matplotlib.pyplot as plt
def compare_elements(array1, array2): # array1和array2大小相同
array = np.zeros(len(array1))
for i in range(len(array1)):
if array1[i] == array2[i]:
array[i] = 0
elif array1[i] > array2[i]:
array[i] = 1
else:
array[i] = -1
return array
def phase_locked_matrix(all_bands_eeg):
"""all_channel_eeg的shape例如是4 * 32 * 8064,其中4是四种频段,32是32个脑电极数脑电极,而8064是每个通道下采集的数据"""
# 得到输入的频段数,电极通道数和每个通道的采样点数
bands, channels, points = all_bands_eeg.shape
eeg_instantaneous_phase = np.zeros_like(all_bands_eeg) # 初始化每个通道下每个采样点的瞬时相位
for band, signal_band_eeg in enumerate(all_bands_eeg):
for channel, single_channel_eeg in enumerate(signal_band_eeg):
analytic_signal = hilbert(single_channel_eeg)
instantaneous_phase = np.unwrap(np.angle(analytic_signal))
eeg_instantaneous_phase[band, channel] = instantaneous_phase
matrix = np.zeros(shape=[bands, channels, channels]) # 初始化相位锁定矩阵,shape是4 * 32 * 32
for band in range(bands):
for i in range(channels):
for j in range(channels):
if i == j:
matrix[band][i][j] = 1
else:
matrix[band][i][j] = np.abs((compare_elements(eeg_instantaneous_phase[band][i], eeg_instantaneous_phase[band][j])).sum()) / points
return matrix
if __name__ == '__main__':
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import data_dir
eeg = pd.read_csv(data_dir.preprocess_dir + r'\level1\8.csv')
# print(phase_locked_matrix(eeg.values[:30, 1:]))
m = phase_locked_matrix(eeg.values[:30, 1:])
fig, ax = plt.subplots(figsize=(15, 15))
sns.heatmap(pd.DataFrame(m),vmax=1,vmin = 0, xticklabels= True, yticklabels= True, square=True)
plt.show()
| sheep9159/click_number | function_connective.py | function_connective.py | py | 2,127 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.zeros",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "numpy.zeros_like",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "scipy.signal.hilbert",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.unwrap",
"... |
5353638973 | # coding: utf-8
"""
NGSI-LD metamodel and Sensor NGSI-LD custom model
ETSI GS CIM 009 V1.6.1 cross-cutting Context Information Management (CIM); NGSI-LD API; NGSI-LD metamodel and Sensor NGSI-LD custom model. # noqa: E501
The version of the OpenAPI document: 1.6.1
Generated by OpenAPI Generator (https://openapi-generator.tech)
Do not edit the class manually.
"""
from __future__ import annotations
from inspect import getfullargspec
import json
import pprint
import re # noqa: F401
from typing import Any, List, Optional
from pydantic import BaseModel, Field, StrictStr, ValidationError, validator
from ngsi_ld_models.models.geo_property_fragment_input import GeoPropertyFragmentInput
from ngsi_ld_models.models.language_property_fragment_input import LanguagePropertyFragmentInput
from ngsi_ld_models.models.property_fragment_input import PropertyFragmentInput
from ngsi_ld_models.models.relationship_fragment_input import RelationshipFragmentInput
from typing import Any, List
from pydantic import StrictStr, Field
REPLACEATTRSREQUEST_ONE_OF_SCHEMAS = ["GeoPropertyFragmentInput", "LanguagePropertyFragmentInput", "PropertyFragmentInput", "RelationshipFragmentInput"]
class ReplaceAttrsRequest(BaseModel):
"""
ReplaceAttrsRequest
"""
# data type: PropertyFragmentInput
oneof_schema_1_validator: Optional[PropertyFragmentInput] = None
# data type: RelationshipFragmentInput
oneof_schema_2_validator: Optional[RelationshipFragmentInput] = None
# data type: GeoPropertyFragmentInput
oneof_schema_3_validator: Optional[GeoPropertyFragmentInput] = None
# data type: LanguagePropertyFragmentInput
oneof_schema_4_validator: Optional[LanguagePropertyFragmentInput] = None
actual_instance: Any
one_of_schemas: List[str] = Field(REPLACEATTRSREQUEST_ONE_OF_SCHEMAS, const=True)
class Config:
validate_assignment = True
def __init__(self, *args, **kwargs):
if args:
if len(args) > 1:
raise ValueError("If a position argument is used, only 1 is allowed to set `actual_instance`")
if kwargs:
raise ValueError("If a position argument is used, keyword arguments cannot be used.")
super().__init__(actual_instance=args[0])
else:
super().__init__(**kwargs)
@validator('actual_instance')
def actual_instance_must_validate_oneof(cls, v):
instance = ReplaceAttrsRequest.construct()
error_messages = []
match = 0
# validate data type: PropertyFragmentInput
if not isinstance(v, PropertyFragmentInput):
error_messages.append(f"Error! Input type `{type(v)}` is not `PropertyFragmentInput`")
else:
match += 1
# validate data type: RelationshipFragmentInput
if not isinstance(v, RelationshipFragmentInput):
error_messages.append(f"Error! Input type `{type(v)}` is not `RelationshipFragmentInput`")
else:
match += 1
# validate data type: GeoPropertyFragmentInput
if not isinstance(v, GeoPropertyFragmentInput):
error_messages.append(f"Error! Input type `{type(v)}` is not `GeoPropertyFragmentInput`")
else:
match += 1
# validate data type: LanguagePropertyFragmentInput
if not isinstance(v, LanguagePropertyFragmentInput):
error_messages.append(f"Error! Input type `{type(v)}` is not `LanguagePropertyFragmentInput`")
else:
match += 1
if match > 1:
# more than 1 match
raise ValueError("Multiple matches found when setting `actual_instance` in ReplaceAttrsRequest with oneOf schemas: GeoPropertyFragmentInput, LanguagePropertyFragmentInput, PropertyFragmentInput, RelationshipFragmentInput. Details: " + ", ".join(error_messages))
elif match == 0:
# no match
raise ValueError("No match found when setting `actual_instance` in ReplaceAttrsRequest with oneOf schemas: GeoPropertyFragmentInput, LanguagePropertyFragmentInput, PropertyFragmentInput, RelationshipFragmentInput. Details: " + ", ".join(error_messages))
else:
return v
@classmethod
def from_dict(cls, obj: dict) -> ReplaceAttrsRequest:
return cls.from_json(json.dumps(obj))
@classmethod
def from_json(cls, json_str: str) -> ReplaceAttrsRequest:
"""Returns the object represented by the json string"""
instance = ReplaceAttrsRequest.construct()
error_messages = []
match = 0
# deserialize data into PropertyFragmentInput
try:
instance.actual_instance = PropertyFragmentInput.from_json(json_str)
match += 1
except (ValidationError, ValueError) as e:
error_messages.append(str(e))
# deserialize data into RelationshipFragmentInput
try:
instance.actual_instance = RelationshipFragmentInput.from_json(json_str)
match += 1
except (ValidationError, ValueError) as e:
error_messages.append(str(e))
# deserialize data into GeoPropertyFragmentInput
try:
instance.actual_instance = GeoPropertyFragmentInput.from_json(json_str)
match += 1
except (ValidationError, ValueError) as e:
error_messages.append(str(e))
# deserialize data into LanguagePropertyFragmentInput
try:
instance.actual_instance = LanguagePropertyFragmentInput.from_json(json_str)
match += 1
except (ValidationError, ValueError) as e:
error_messages.append(str(e))
if match > 1:
# more than 1 match
raise ValueError("Multiple matches found when deserializing the JSON string into ReplaceAttrsRequest with oneOf schemas: GeoPropertyFragmentInput, LanguagePropertyFragmentInput, PropertyFragmentInput, RelationshipFragmentInput. Details: " + ", ".join(error_messages))
elif match == 0:
# no match
raise ValueError("No match found when deserializing the JSON string into ReplaceAttrsRequest with oneOf schemas: GeoPropertyFragmentInput, LanguagePropertyFragmentInput, PropertyFragmentInput, RelationshipFragmentInput. Details: " + ", ".join(error_messages))
else:
return instance
def to_json(self) -> str:
"""Returns the JSON representation of the actual instance"""
if self.actual_instance is None:
return "null"
to_json = getattr(self.actual_instance, "to_json", None)
if callable(to_json):
return self.actual_instance.to_json()
else:
return json.dumps(self.actual_instance)
def to_dict(self) -> dict:
"""Returns the dict representation of the actual instance"""
if self.actual_instance is None:
return None
to_dict = getattr(self.actual_instance, "to_dict", None)
if callable(to_dict):
return self.actual_instance.to_dict()
else:
# primitive type
return self.actual_instance
def to_str(self) -> str:
"""Returns the string representation of the actual instance"""
return pprint.pformat(self.dict())
| daniel-gonzalez-sanchez/ngsi-ld-client-tester | ngsi-ld-models/ngsi_ld_models/models/replace_attrs_request.py | replace_attrs_request.py | py | 7,277 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 32,
"usage_type": "name"
},
{
"api_name": "typing.Optional",
"line_number": 37,
"usage_type": "name"
},
{
"api_name": "ngsi_ld_models.models.property_fragment_input.PropertyFragmentInput",
"line_number": 37,
"usage_type":... |
35599175738 | # Standardize time series data
from pandas import Series
from sklearn.preprocessing import StandardScaler
from math import sqrt
# load the dataset and print the first 5 rows
series = Series.from_csv('daily-minimum-temperatures-in-me.csv', header=0)
print(series.head())
# 准备数据
values = series.values
values = values.reshape((len(values), 1))
# 定义标准化模型
scaler = StandardScaler()
scaler = scaler.fit(values)
print('Mean: %f, StandardDeviation: %f' % (scaler.mean_, sqrt(scaler.var_)))
# 开始标准化,打印前五行
normalized = scaler.transform(values)
for i in range(5):
print(normalized[i])
# 逆标准化数据
inversed = scaler.inverse_transform(normalized)
for i in range(5):
print(inversed[i]) | yangwohenmai/TimeSeriesForecasting | 数据准备/标准化和归一化/标准化.py | 标准化.py | py | 727 | python | en | code | 183 | github-code | 36 | [
{
"api_name": "pandas.Series.from_csv",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "sklearn.preprocessing.StandardScaler",
"line_number": 12,
"usage_type": "call"
},
{
"api_name"... |
10021006698 | """
data_metrics_calculation_ingestion.py
=====================================
This module contains code to fetch weather data records and calculate relevant analytics and save it to the database.
"""
import argparse
from typing import Any
import pandas as pd
from sqlalchemy import create_engine
from sqlalchemy.orm import sessionmaker
from data_metrics_model import Base, WeatherStats
from data_models import WeatherData
def get_weather_data(session: Any) -> pd.DataFrame:
"""
This method fetches weather data from WeatherData model and returns pandas dataframe.
Pandas dataframe can be useful to calculate aggregate metrics.
Parameters:
session (Any): SQL Alchemy session
Returns:
result_df (pd.DataFrame): Pandas dataframe containing weather data records.
"""
results = session.query(WeatherData).all()
result_df = pd.DataFrame([r.__dict__ for r in results])
result_df = result_df.drop(["_sa_instance_state", "id"], axis=1)
result_df.date = pd.to_datetime(result_df.date)
result_df["year"] = result_df.date.dt.year
return result_df
def calculate_analytics(result_df: pd.DataFrame) -> pd.DataFrame:
"""
This method calculates the aggreagte metrics on weather data.
Pandas dataframe is used to groupby data by year and calculate aggregate metrics.
Parameters:
result_df (pd.DataFrame): Records from weather data.
Returns:
result_df_grouped (pd.DataFrame): Aggregate stats for weather data.
"""
result_df_grouped = result_df.groupby("year").agg(
{"max_temp": "mean", "min_temp": "mean", "precipitation": "sum"}
)
result_df_grouped = result_df_grouped.rename(
columns={
"max_temp": "avg_max_temp",
"min_temp": "avg_min_temp",
"precipitation": "total_precipitation",
}
)
result_df_grouped = result_df_grouped.reset_index()
return result_df_grouped
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description="Argumennts to analyse data and save it in database."
)
parser.add_argument(
"-db", "--db_path", type=str, required=True, help="Path of sqlite3 database."
)
args = parser.parse_args()
# Create Database Engine
engine = create_engine(f"sqlite:///{args.db_path}")
Session = sessionmaker(bind=engine)
session = Session()
# Start connection
conn = engine.connect()
# Check if weather stats table exists
if not engine.dialect.has_table(conn, WeatherStats.__tablename__):
Base.metadata.create_all(bind=engine)
# Fetch weather data.
weather_data_df = get_weather_data(session=session)
# print(f"Weather Data:")
# print(weather_data_df.head())
# Calcualte Analytics.
result_df_grouped = calculate_analytics(weather_data_df)
result_df_grouped_dict = result_df_grouped.to_records(index=False)
# print(result_df_grouped_dict)
# Iterate and save in database.
for item in result_df_grouped_dict:
year = int(item[0])
avg_max_temp = item[1]
avg_min_temp = item[2]
total_precipitation = item[3]
weather_stats_data = WeatherStats(
year=year,
avg_max_temp=avg_max_temp,
avg_min_temp=avg_min_temp,
total_precipitation=total_precipitation,
)
session.add(weather_stats_data)
session.commit()
# Close connection.
session.close()
conn.close()
| pri2si17-1997/weather_data_processing | src/data_metrics_calculation_ingestion.py | data_metrics_calculation_ingestion.py | py | 3,490 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "typing.Any",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "data_models.WeatherData",
"line_number": 29,
"usage_type": "argument"
},
{
"api_name": "pandas.DataFrame",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "pandas.to_dat... |
72135965225 | import numpy as np
from scipy.stats import bernoulli, binom
# Parâmetros da distribuição de Bernoulli
p = 0.5 # Probabilidade de sucesso
# Número de rodadas no jogo
num_rodadas = 5
pontuacao = 0
print("Bem-vindo ao jogo de adivinhação!")
print(f"Você tem {num_rodadas} rodadas para adivinhar o resultado de uma distribuição de Bernoulli (1 ou 0).")
for rodada in range(num_rodadas):
# Gera uma amostra de distribuição de Bernoulli
resultado_real = bernoulli.rvs(p, size=1)[0]
# Pede ao jogador para adivinhar
palpite = input(f"Rodada {rodada + 1}: Adivinhe 0 ou 1: ")
try:
palpite = int(palpite)
if palpite != 0 and palpite != 1:
print("Insira 0 ou 1 como seu palpite.")
continue
except ValueError:
print("Insira 0 ou 1 como seu palpite.")
continue
if palpite == resultado_real:
print("Você acertou!")
pontuacao += 1
else:
print(f"Você errou. O resultado real era {resultado_real}.")
print(f"Jogo encerrado. Sua pontuação final é {pontuacao} pontos.")
# Calcula a pontuação final usando uma distribuição binomial
n = num_rodadas # Número de tentativas
p_acerto = p # Probabilidade de acerto em cada tentativa
pontuacao_final = binom.pmf(pontuacao, n, p_acerto)
print(f"Sua pontuação final é estatisticamente significativa? ({pontuacao_final:.2%} de chance de obtê-la por acaso)")
| Dhisting1/Estatisca-Python | Estatistica-Python/gameDIstribuiçãoBernoulli.py | gameDIstribuiçãoBernoulli.py | py | 1,430 | python | pt | code | 0 | github-code | 36 | [
{
"api_name": "scipy.stats.bernoulli.rvs",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "scipy.stats.bernoulli",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "scipy.stats.binom.pmf",
"line_number": 41,
"usage_type": "call"
},
{
"api_name": ... |
4000749197 | # -*- coding: utf-8 -*-
"""Dataset methods for natural language inference.
Tokenization -> lower casing -> stop words removal -> lemmatization
Authors:
Fangzhou Li - fzli@ucdavis.edu
Todo:
* TODOs
"""
import torch
from torch.utils.data import Dataset, DataLoader
from torch.nn.utils.rnn import pad_sequence
from transformers import PreTrainedTokenizerBase
import pandas as pd
def _truncate_seq_pair(tokens_a, tokens_b, max_length):
"""Truncates a sequence pair in place to the maximum length.
This is a simple heuristic which will always truncate the longer sequence
one token at a time. This makes more sense than truncating an equal percent
of tokens from each, since if one sequence is very short then each token
that's truncated likely contains more information than a longer sequence.
Reference: https://github.com/huggingface/transformers/blob/main/examples/
legacy/run_swag.py
Args:
tokens_a: A list of tokens.
tokens_b: A list of tokens.
max_length: Maximum length of the output sequence.
Returns:
A truncated list of tokens.
"""
while True:
total_length = len(tokens_a) + len(tokens_b)
if total_length <= max_length:
break
if len(tokens_a) > len(tokens_b):
tokens_a.pop()
else:
tokens_b.pop()
class FoodAtlasNLIDataset(Dataset):
"""NLI dataset class.
Reference: https://www.kaggle.com/code/tks0123456789/nli-by-bert-pytorch.
Args:
premises: list of premises
hypotheses: list of hypotheses
labels: list of labels
tokenizer: tokenizer
max_seq_len: maximum sequence length
"""
def __init__(
self,
premises: list[str],
hypotheses: list[str],
tokenizer: PreTrainedTokenizerBase,
labels: list[int] = None,
label_mapper: dict = {
'Entails': 1, 'Does not entail': 0
},
max_seq_len: int = 512):
if labels is not None:
self.labels = torch.LongTensor(
[label_mapper[label] for label in labels]
)
else:
self.labels = None
self.max_tokens = 0
self.inputs = []
for p, h in zip(premises, hypotheses):
p_ids = tokenizer.encode(p, add_special_tokens=False)
h_ids = tokenizer.encode(h, add_special_tokens=False)
_truncate_seq_pair(p_ids, h_ids, max_seq_len - 3)
input_ids = [tokenizer.cls_token_id] \
+ p_ids \
+ [tokenizer.sep_token_id] \
+ h_ids \
+ [tokenizer.sep_token_id]
attention_mask = [1] * len(input_ids)
token_type_ids = [0] * (len(p_ids) + 2) + [1] * (len(h_ids) + 1)
self.inputs.append([
torch.LongTensor(input_ids),
torch.IntTensor(attention_mask),
torch.IntTensor(token_type_ids)
])
self.max_tokens = max(self.max_tokens, len(input_ids))
print("Longest Sequence Length:", self.max_tokens)
def __len__(self):
return len(self.inputs)
def __getitem__(self, idx):
if self.labels is not None:
return self.inputs[idx], self.labels[idx]
else:
return self.inputs[idx], None
def collate_fn_padding(batch):
"""Collate function for padding.
Args:
batch: A list of samples.
Returns:
A tuple of (inputs, labels). Inputs are tuples of the following:
input_ids: A tensor of shape (batch_size, seq_len)
attention_mask: A tensor of shape (batch_size, seq_len)
token_type_ids: A tensor of shape (batch_size, seq_len)
"""
inputs, labels = list(zip(*batch))
input_ids_batch, attention_mask_batch, token_type_ids_batch = zip(*inputs)
input_ids_batch = pad_sequence(
input_ids_batch, batch_first=True, padding_value=0)
attention_mask_batch = pad_sequence(
attention_mask_batch, batch_first=True, padding_value=0)
token_type_ids_batch = pad_sequence(
token_type_ids_batch, batch_first=True, padding_value=1)
if labels[0] is None:
return (input_ids_batch, attention_mask_batch, token_type_ids_batch), \
None
else:
return (input_ids_batch, attention_mask_batch, token_type_ids_batch), \
torch.stack(labels, dim=0)
# def get_food_atlas_data_loaders(
# path_data_train: str,
# tokenizer: PreTrainedTokenizerBase,
# path_data_test: str = None,
# max_seq_len: int = 512,
# batch_size: int = 1,
# shuffle: bool = True,
# num_workers: int = 0,
# collate_fn: callable = collate_fn_padding,
# verbose: bool = True):
# """Get data loader for food atlas dataset.
# Args:
# path_data_train: path to the training data
# tokenizer: tokenizer
# path_data_test: path to the testing data
# max_seq_len: maximum sequence length
# batch_size: batch size
# shuffle: whether to shuffle the data
# num_workers: number of workers
# collate_fn: collate function
# verbose: whether to print out the information
# Returns:
# data loaders for training and testing
# """
# data_loaders = []
# for path, name in zip(
# [path_data_train, path_data_test], ['train', 'test']):
# if path is not None:
# data = pd.read_csv(path, sep='\t')
# data = data[['premise', 'hypothesis_string', 'answer']]
# data = data.rename(
# {'hypothesis_string': 'hypothesis'}, axis=1
# )
# data = data[~(data['answer'] == 'Skip')]
# if verbose:
# print(f"==={name} set info start===")
# print(data['answer'].value_counts())
# print(f"===={name} set info end====")
# dataset = FoodAtlasNLIDataset(
# premises=data['premise'].tolist(),
# hypotheses=data['hypothesis'].tolist(),
# labels=data['answer'].tolist(),
# tokenizer=tokenizer,
# max_seq_len=max_seq_len
# )
# data_loader = DataLoader(
# dataset=dataset,
# batch_size=batch_size,
# shuffle=shuffle,
# num_workers=num_workers,
# collate_fn=collate_fn
# )
# else:
# data_loader = None
# data_loaders += [data_loader]
# data_loader_train, data_loader_test = data_loaders
# return data_loader_train, data_loader_test
def get_food_atlas_data_loader(
path_data: str,
tokenizer: PreTrainedTokenizerBase,
train: bool = True,
max_seq_len: int = 512,
batch_size: int = 1,
shuffle: bool = True,
num_workers: int = 0,
collate_fn: callable = collate_fn_padding,
verbose: bool = True):
"""Get data loader for food atlas dataset.
Args:
path_data: path to the training data
tokenizer: tokenizer
train: whether the dataset is used to training. if false, the dataset
will not contain labels
max_seq_len: maximum sequence length
batch_size: batch size
shuffle: whether to shuffle the data
num_workers: number of workers
collate_fn: collate function
verbose: whether to print out the information
Returns:
data loaders for training and testing
"""
data = pd.read_csv(path_data, sep='\t')
if train:
data = data[['premise', 'hypothesis_string', 'answer']]
data = data[~(data['answer'] == 'Skip')]
else:
data = data[['premise', 'hypothesis_string']]
if verbose:
print()
print(f'Number of samples: {data.shape[0]}')
print()
if train:
print(data['answer'].value_counts())
print()
dataset = FoodAtlasNLIDataset(
premises=data['premise'].tolist(),
hypotheses=data['hypothesis_string'].tolist(),
labels=data['answer'].tolist() if train else None,
tokenizer=tokenizer,
max_seq_len=max_seq_len
)
data_loader = DataLoader(
dataset=dataset,
batch_size=batch_size,
shuffle=shuffle,
num_workers=num_workers,
collate_fn=collate_fn
)
return data_loader
| IBPA/SemiAutomatedFoodKBC | src/entailment/_dataset.py | _dataset.py | py | 8,558 | python | en | code | 1 | github-code | 36 | [
{
"api_name": "torch.utils.data.Dataset",
"line_number": 50,
"usage_type": "name"
},
{
"api_name": "transformers.PreTrainedTokenizerBase",
"line_number": 67,
"usage_type": "name"
},
{
"api_name": "torch.LongTensor",
"line_number": 74,
"usage_type": "call"
},
{
"ap... |
27103951389 | from flask import (
Blueprint, redirect, url_for
)
from Glastore.models.product import Product, product_heads
from Glastore.models.window import Window
from Glastore.views.auth import login_required
bp = Blueprint('product', __name__, url_prefix='/product')
@bp.route('/select_next_window/<int:id>')
@login_required
def select_next_window(id):
product = Product.get(id)
product.orientation.select_next_window()
return redirect(
url_for('quote.edit', id=product.quote_id)
)
@bp.route('/rotate_window/<int:id>')
@login_required
def rotate_window(id):
product = Product.get(id)
product.orientation.rotate_window()
return redirect(
url_for('quote.edit', id=product.quote_id)
)
@bp.route('/delete/<int:id>')
@login_required
def delete(id):
product = Product.get(id)
quote_id = product.quote.id
product.delete()
return redirect(
url_for('quote.edit', id=quote_id)
)
| ChrisPoul/Glastore | Glastore/views/product.py | product.py | py | 950 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "flask.Blueprint",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "Glastore.models.product.Product.get",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "Glastore.models.product.Product",
"line_number": 14,
"usage_type": "name"
},
{
... |
15062131417 | import requests
import pandas
import datetime_translator
brandIds = { 202, 88, 31, 123, 101, 122, 36, 48, 135 }
data = {}
for brandId in brandIds:
headers = { 'User-Agent': '', 'content-type': 'application/json' }
jsonData = '{"variables":{"area":"salt-lake","brandId":%d,"countryCode":"US","criteria":{"location_type":"county"},"fuel":1,"maxAge":0,"regionCode":"UT"},"query":"query LocationByArea($area: String, $brandId: Int, $countryCode: String, $criteria: Criteria, $fuel: Int, $maxAge: Int, $regionCode: String) { locationByArea( area: $area countryCode: $countryCode criteria: $criteria regionCode: $regionCode ) { displayName locationType stations(brandId: $brandId, fuel: $fuel, maxAge: $maxAge) { results { address { country line1 line2 locality postalCode region } brands { brandId brandingType imageUrl name } latitude longitude fuels id name prices(fuel: $fuel) { cash { nickname postedTime price } credit { nickname postedTime price } discount fuelProduct } } } } }"}' %(brandId)
#jsonData = '{"variables":{"area":"davis","brandId":%d,"countryCode":"US","criteria":{"location_type":"county"},"fuel":1,"maxAge":0,"regionCode":"UT"},"query":"query LocationByArea($area: String, $brandId: Int, $countryCode: String, $criteria: Criteria, $fuel: Int, $maxAge: Int, $regionCode: String) { locationByArea( area: $area countryCode: $countryCode criteria: $criteria regionCode: $regionCode ) { displayName locationType stations(brandId: $brandId, fuel: $fuel, maxAge: $maxAge) { results { address { country line1 line2 locality postalCode region } brands { brandId brandingType imageUrl name } latitude longitude fuels id name prices(fuel: $fuel) { cash { nickname postedTime price } credit { nickname postedTime price } discount fuelProduct } } } } }"}' %(brandId)
response = requests.post('https://www.gasbuddy.com/graphql', headers=headers, data=jsonData)
jsonResponse = response.json()
stations = jsonResponse['data']['locationByArea']['stations']
for station in stations['results']:
if int(station['brands'][0]['brandId']) in brandIds and (station['prices'][0]['credit']['price'] != 0):
stationId = station['id']
data[stationId] = {
'StationName': station['name'],
'BrandName': station['brands'][0]['name'],
'AddressLine1': station['address']['line1'],
'City': station['address']['locality'],
'RegularFuelPrice': '${:.2f}'.format(station['prices'][0]['credit']['price']),
'TimeSinceReported': datetime_translator.translate(station['prices'][0]['credit']['postedTime']),
'ReportedBy': station['prices'][0]['credit']['nickname']
}
output = pandas.DataFrame(data)
output.transpose().sort_values(by='RegularFuelPrice')
print(output) | ryanbarlow1/cheapest_gas_prices | get_gas_prices.py | get_gas_prices.py | py | 2,836 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.post",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "datetime_translator.translate",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 30,
"usage_type": "call"
}
] |
21413366584 | import pydot
DEFAULT_NODE_ATTRS = {
'color': 'cyan',
'shape': 'box',
'style': 'rounded',
'fontname': 'palatino',
'fontsize': 10,
'penwidth': 2
}
def node_label(token):
try:
label = token._.plot['label']
except:
label = '{0} [{1}]\n({2} / {3})'.format(
token.orth_,
token.i,
token.pos_,
token.tag_
)
return label
def get_edge_label(from_token, to_token):
label = ' ' + from_token.dep_
return label
def to_pydot(tokens, get_edge_label=get_edge_label):
graph = pydot.Dot(graph_type='graph')
# Add nodes to graph
idx2node = {}
for token in tokens:
try:
plot_attrs = token._.plot
except AttributeError:
plot_attrs = {}
for attr, val in DEFAULT_NODE_ATTRS.items():
if attr not in plot_attrs:
plot_attrs[attr] = val
label = node_label(token)
plot_attrs['name'] = token.i
plot_attrs['label'] = label
node = pydot.Node(**plot_attrs)
idx2node[token.i] = node
graph.add_node(node)
'''Add edges'''
for token in tokens:
if token.dep_ == 'ROOT':
continue
if token.head not in tokens:
continue
from_token = token
to_token = token.head
from_node = idx2node[from_token.i]
to_node = idx2node[to_token.i]
label = get_edge_label(from_token, to_token)
edge = pydot.Edge(
to_node, from_node, label=label,
fontsize=12
)
graph.add_edge(edge)
return graph
def create_png(tokens, prog=None):
graph = to_pydot(tokens)
png = graph.create_png(prog=prog)
return png
| cyclecycle/visualise-spacy-tree | visualise_spacy_tree/visualise_spacy_tree.py | visualise_spacy_tree.py | py | 1,757 | python | en | code | 7 | github-code | 36 | [
{
"api_name": "pydot.Dot",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "pydot.Node",
"line_number": 48,
"usage_type": "call"
},
{
"api_name": "pydot.Edge",
"line_number": 63,
"usage_type": "call"
}
] |
30857649928 | import json
import shutil
import hashlib
import os
def get_hash_md5(filename):
with open(filename, 'rb') as f:
m = hashlib.md5()
while True:
data = f.read(8192)
if not data:
break
m.update(data)
return m.hexdigest()
with open('sourse.json', 'r', encoding='utf-8') as f:
data = json.load(f)
ORIGINAL_PATH, COPY_PATH = data["OriginalPath"], data["CopyPath"]
def del_item(path):
if os.path.isdir(path):
os.rmdir(path)
else:
os.remove(path)
def copy_item(org_path, cp_path, item):
if os.path.isdir(org_path+item):
shutil.copytree(org_path+item, cp_path+item)
else:
shutil.copy(org_path+item, cp_path)
def run(dop_path=''):
Org_dir_set = set(os.listdir(ORIGINAL_PATH + dop_path))
Cp_dir_set = set(os.listdir(COPY_PATH + dop_path)) - {"System Volume Information"}
for item in Cp_dir_set - Org_dir_set: del_item(COPY_PATH + dop_path + item)
for item in Org_dir_set - Cp_dir_set: copy_item(ORIGINAL_PATH + dop_path, COPY_PATH + dop_path, item)
for item in Org_dir_set & Cp_dir_set:
if os.path.isdir(ORIGINAL_PATH + dop_path + item): run(dop_path=f'{item}\\')
elif get_hash_md5(ORIGINAL_PATH + dop_path + item) != get_hash_md5(COPY_PATH + dop_path + item):
shutil.copyfile(ORIGINAL_PATH + dop_path + item, COPY_PATH + dop_path + item)
run() | AlexVorobushek/UpdateFilesOnFlashDrive | main.py | main.py | py | 1,422 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "hashlib.md5",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 25,
... |
14002518900 | import requests
import os
import re
from lxml import etree
def ParseHTML(url):
rawDoc = requests.get(url).text
html = etree.HTML(rawDoc)
return html
class Comic():
def __init__(self):
self.baseurl = "https://manhua.fzdm.com/39/"
self.baseimgurl = "https://p5.manhuapan.com/"
self.name = "Attack On Titan"
self.chapters = []
def GetChapters(self):
html = ParseHTML(self.baseurl)
nodes = html.xpath('//div[@id="content"]/li/a')
for node in nodes:
title = node.text
url = self.baseurl + node.attrib['href']
self.chapters.append({'title': title, 'url': url})
return self
def GetImgIter(self, url):
idx = 0
while True:
pageurl = f"{url}index_{str(idx)}.html"
try:
yield {self.ExtractImg(pageurl), idx}
except IndexError:
return
idx += 1
def ExtractImg(self, pageurl):
res = requests.get(pageurl)
if res.status_code == 404:
raise IndexError
regexp = re.compile(r'(?<=mhurl=").*?(?=";)')
rawDoc = res.text
imgurl = self.baseimgurl + regexp.search(rawDoc).group(0)
return imgurl
def SaveChapter(self, chapter):
title = chapter['title']
url = chapter['url']
path = f"{self.name}/{title}/"
os.makedirs(path, exist_ok=True)
imgurls = self.GetImgIter(url)
for index, imgurl in imgurls:
print(imgurl)
# with requests.get(imgurl, stream=True) as res:
# with open(f"{path}{str(idx)}.jpg", "wb") as pic:
# for chunk in res.iter_content():
# pic.write(chunk)
def Run(self):
self.GetChapters()
for chapter in self.chapters:
self.SaveChapter(chapter)
c = Comic()
c.SaveChapter({'title': "test", 'url': "https://manhua.fzdm.com/39/001/"})
# c.ExtractImg("https://manhua.fzdm.com/39/001/index_100.html")
# img = c.ExtractImg("https://manhua.fzdm.com/39/001/index_1.html")
# print(img)
# for chp in c.GetChapters().chapters:
# print(chp)
| Rickenbacker620/Codes | Python/Comic/comic.py | comic.py | py | 2,192 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "lxml.etree.HTML",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "requests.get",
"line_number"... |
69826427303 | import setuptools
with open('README.md', 'r') as f:
long_description = f.read()
setuptools.setup(
name='coropy',
version='0.0.1',
author='Ante Lojic Kapetanovic',
author_email='alojic00@fesb.hr',
description='A set of Python modules for COVID-19 epidemics modeling',
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/antelk/coropy',
packages=setuptools.find_packages(),
install_requires=[
'numpy','scipy', 'scikit-learn', 'matplotlib', 'setuptools'],
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
'Operating System :: POSIX',
'Topic :: Scientific/Engineering :: Epidemiology',
'Intended Audience :: Science/Research',
],
python_requires='>=3.6',
)
| akapet00/coropy | setup.py | setup.py | py | 860 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "setuptools.setup",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 16,
"usage_type": "call"
}
] |
74353820583 | import torch
from numbers import Number
import numpy as np
class RandomMasking(torch.nn.Module):
"""
Random Masking from the paper "Hide-and-Seek: Forcing a Network to be Meticulous for
Weakly-supervised Object and Action Localization"
"""
def __init__(self, p_mask, patch_size, value):
"""
Arguments:
p_mask: float (0.0-1.0) - probabilty that a patch gets masked
patch_size: int/tuple/list - size of the patches (must fit into the image)
value: number or list of three numbers - value of the patches
"""
super().__init__()
if not isinstance(value, (Number, list)):
raise TypeError("Argument value should be a number or list of numbers.")
if not isinstance(patch_size, (int, tuple, list)):
raise TypeError("Argument patch_size should be an int, tuple or list.")
if not isinstance(p_mask, Number):
raise TypeError("Argument p_mask should be a number.")
if p_mask < 0 or p_mask > 1:
raise TypeError("Masking probability should be between 0 and 1.")
self.p_mask = p_mask
if isinstance(patch_size, (tuple, list)):
self.patch_size = patch_size
else:
self.patch_size = (patch_size, patch_size)
self.value = value
def forward(self, img):
"""
Args:
img (Tensor): Tensor image to be masked.
Returns:
img (Tensor): Masked Tensor image.
"""
size = img.shape
if len(size) == 3:
img = img.unsqueeze(0)
size = img.shape
elif len(size) < 3:
raise TypeError("Tensor must have 3 or 4 dimensions.")
reshape = False
if size[1] == 3:
reshape = True
img = torch.permute(img, (0, 2, 3, 1))
size = img.shape
B, H, W = size[0:-1]
if not (H % self.patch_size[0] == 0 and W % self.patch_size[1] == 0):
raise TypeError("Patch size must fit perfectly in image size.")
n_vert = H // self.patch_size[0]
n_hor = W // self.patch_size[1]
n_patches = (B, n_vert, n_hor)
masked = torch.from_numpy(np.random.binomial(1, self.p_mask, n_patches).astype(bool))
blocks = img.view(B, n_vert, self.patch_size[0], n_hor, self.patch_size[1],
3).swapaxes(2, 3)
blocks[masked] = torch.Tensor(self.value)
img = blocks.swapaxes(2, 3).view(size)
if reshape:
img = torch.permute(img, (0, 3, 1, 2))
return img | faberno/SurgicalToolLocalization | transforms/RandomMasking.py | RandomMasking.py | py | 2,599 | python | en | code | 2 | github-code | 36 | [
{
"api_name": "torch.nn",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "numbers.Number",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "numbers.Number",
"line_number": 22,
"usage_type": "argument"
},
{
"api_name": "torch.permute",
"l... |
19262622912 | from datetime import datetime, timedelta
from pokemongo_bot import inventory
from pokemongo_bot.base_task import BaseTask
from pokemongo_bot.worker_result import WorkerResult
from pokemongo_bot.tree_config_builder import ConfigException
class ShowBestPokemon(BaseTask):
"""
Periodically displays the user best pokemon in the terminal.
Example config :
{
"type": "ShowBestPokemon",
"config": {
"enabled": true,
"min_interval": 60,
"amount": 5,
"order_by": "cp",
"info_to_show": ["cp", "ivcp", "dps"]
}
}
min_interval : The minimum interval at which the pokemon are displayed,
in seconds (defaults to 120 seconds).
The update interval cannot be accurate as workers run synchronously.
amount : Amount of pokemon to show
order_by : Stat that will be used to get best pokemons
Available Stats: 'cp', 'iv', 'ivcp', 'ncp', 'dps', 'hp', 'level'
info_to_show : Info to show for each pokemon
Available info_to_show :
'cp',
'iv_ads',
'iv_pct',
'ivcp',
'ncp',
'level',
'hp',
'moveset',
'dps'
"""
SUPPORTED_TASK_API_VERSION = 1
def initialize(self):
self.next_update = None
self.min_interval = self.config.get('min_interval', 120)
self.amount = self.config.get('amount', 3)
self.order_by = self.config.get('order_by', 'cp')
self.info_to_show = self.config.get('info_to_show', [])
def work(self):
"""
Displays the pokemon if necessary.
:return: Always returns WorkerResult.SUCCESS.
:rtype: WorkerResult
"""
if not self.info_to_show or not self.amount or not self._should_print():
return WorkerResult.SUCCESS
self.pokemons = inventory.pokemons().all()
line = self._get_pokemons_line()
if not line:
return WorkerResult.SUCCESS
self.print_pokemons(line)
return WorkerResult.SUCCESS
def _should_print(self):
"""
Returns a value indicating whether the pokemon should be displayed.
:return: True if the stats should be displayed; otherwise, False.
:rtype: bool
"""
return self.next_update is None or datetime.now() >= self.next_update
def _compute_next_update(self):
"""
Computes the next update datetime based on the minimum update interval.
:return: Nothing.
:rtype: None
"""
self.next_update = datetime.now() + timedelta(seconds=self.min_interval)
def print_pokemons(self, pokemons):
"""
Logs the pokemon into the terminal using an event.
:param pokemons: The pokemon to display.
:type pokemons: string
:return: Nothing.
:rtype: None
"""
self.emit_event(
'show_best_pokemon',
formatted="*Best Pokemons* {pokemons}",
data={
'pokemons': pokemons
}
)
self._compute_next_update()
def _get_pokemons_line(self):
"""
Generates a string according to the configuration.
:return: A string containing pokemons and their info, ready to be displayed.
:rtype: string
"""
def get_poke_info(info, pokemon):
poke_info = {
'cp': pokemon.cp,
'iv': pokemon.iv,
'ivcp': pokemon.ivcp,
'ncp': pokemon.cp_percent,
'level': pokemon.level,
'hp': pokemon.hp,
'dps': pokemon.moveset.dps
}
if info not in poke_info:
raise ConfigException("order by {}' isn't available".format(self.order_by))
return poke_info[info]
def get_poke_info_formatted(info, pokemon):
poke_info = {
'name': pokemon.name,
'cp': 'CP {}'.format(pokemon.cp),
'iv_ads': 'A/D/S {}/{}/{}'.format(pokemon.iv_attack, pokemon.iv_defense, pokemon.iv_stamina),
'iv_pct': 'IV {}'.format(pokemon.iv),
'ivcp': 'IVCP {}'.format(round(pokemon.ivcp,2)),
'ncp': 'NCP {}'.format(round(pokemon.cp_percent,2)),
'level': "Level {}".format(pokemon.level),
'hp': 'HP {}/{}'.format(pokemon.hp, pokemon.hp_max),
'moveset': 'Moves: {}'.format(pokemon.moveset),
'dps': 'DPS {}'.format(round(pokemon.moveset.dps, 2))
}
if info not in poke_info:
raise ConfigException("info '{}' isn't available for displaying".format(info))
return poke_info[info]
info_to_show = ['name'] + self.info_to_show
pokemons_ordered = sorted(self.pokemons, key=lambda x: get_poke_info(self.order_by, x), reverse=True)
pokemons_ordered = pokemons_ordered[:self.amount]
poke_info = ['({})'.format(', '.join([get_poke_info_formatted(x, p) for x in info_to_show])) for p in pokemons_ordered]
line = ' | '.join(poke_info)
return line
| PokemonGoF/PokemonGo-Bot | pokemongo_bot/cell_workers/show_best_pokemon.py | show_best_pokemon.py | py | 5,191 | python | en | code | 3,815 | github-code | 36 | [
{
"api_name": "pokemongo_bot.base_task.BaseTask",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "pokemongo_bot.worker_result.WorkerResult.SUCCESS",
"line_number": 61,
"usage_type": "attribute"
},
{
"api_name": "pokemongo_bot.worker_result.WorkerResult",
"line_number... |
27293090578 | from django.conf import settings
from django.core.exceptions import ValidationError
# from django.core.validators import MinValueValidator
from django.db import models
from trip.validators import validator_datetime
from users.models import User
class Company(models.Model):
name = models.CharField(
max_length=64,
unique=True,
verbose_name='Компания',
help_text='Введите название компании'
)
class Meta:
verbose_name = 'Компания'
verbose_name_plural = 'Компании'
def __str__(self):
return self.name
class Plane(models.Model):
name = models.CharField(
max_length=128,
unique=True,
verbose_name='Название самолета',
help_text='Введите название самолета'
)
number = models.PositiveIntegerField(
verbose_name='Номер самолета',
help_text='Введите номер самолета'
)
ready = models.BooleanField(
default=False,
verbose_name='Готовность самолета',
help_text='Измените готовность самолета'
)
capacity = models.PositiveIntegerField(
verbose_name='Количество мест',
help_text='Введите кол-во мест в самолете'
)
class Meta:
verbose_name = 'Самолет'
verbose_name_plural = 'Самолеты'
def __str__(self):
return self.name
class Airport(models.Model):
TZ_CHOICES = [
("UTC", "UTC"),
("Europe/Moscow", "Europe/Moscow"),
("Asia/Kamchatka", "Asia/Kamchatka")
]
name = models.CharField(
max_length=128,
unique=True,
verbose_name='Название аэропорта',
help_text='Введите название аэропорта'
)
ap_time_zone = models.CharField(
max_length=128,
verbose_name='Таймзона аэропорта',
help_text='Введите таймзону аэропорта',
choices=TZ_CHOICES,
default=settings.TIME_ZONE
)
class Meta:
verbose_name = 'Аэропорт'
verbose_name_plural = 'Аэропорты'
def __str__(self):
return self.name
class Trip(models.Model):
company = models.ForeignKey(
Company,
on_delete=models.CASCADE,
related_name='trips',
verbose_name='Компания',
help_text='Компания'
)
plane = models.ForeignKey(
Plane,
on_delete=models.CASCADE,
related_name='trips',
verbose_name='Самолет',
help_text='Самолет'
)
airport_from = models.ForeignKey(
Airport,
on_delete=models.CASCADE,
related_name='trips_from',
verbose_name='Из аэропорта',
help_text='Из аэропорта'
)
airport_to = models.ForeignKey(
Airport,
on_delete=models.CASCADE,
related_name='trips_to',
verbose_name='В аэропорт',
help_text='В аэропорт'
)
time_out = models.DateTimeField(
validators=[validator_datetime, ],
verbose_name='Дата/Время вылета',
)
time_in = models.DateTimeField(
verbose_name='Дата/Время прилета',
validators=[validator_datetime, ],
)
class Meta:
verbose_name = 'Перелет'
verbose_name_plural = 'Перелеты'
def __str__(self):
return f'id: {self.id}, по маршруту: {self.airport_from} - {self.airport_to}, вылет {self.time_out}, прибытие {self.time_in}'
def clean(self):
board_buse = self.plane.trips.all().aggregate(models.Max('time_in'))
if self.time_out <= board_buse['time_in__max']:
raise ValidationError('В это время самолет еще в полете.')
class Pass_in_trip(models.Model):
passenger = models.ForeignKey(
User,
on_delete=models.CASCADE,
related_name='pass_in_trip',
verbose_name='Пассажир',
help_text='Пассажир'
)
place = models.PositiveIntegerField(
unique=True,
verbose_name='Номер места',
help_text='Введите номер места',
# validators=[MinValueValidator(1, 'Место не может быть менее 1.'),]
)
trip = models.ForeignKey(
Trip,
on_delete=models.CASCADE,
related_name='pass_in_trips',
verbose_name='Пассажиры в рейсе',
help_text='Пассажиры в рейсе',
)
class Meta:
verbose_name = 'Пассажир_место'
verbose_name_plural = 'Пассажиры_места'
def __str__(self):
return f'Пассажир - {self.passenger.first_name} {self.passenger.last_name} место - {self.place} рейс ID -{self.trip.id}'
def clean(self):
if self.place > self.trip.plane.capacity:
raise ValidationError('Место не может быть больше, чем мест в самолете.')
| ZOMini/avia_trip | avia/trip/models.py | models.py | py | 5,272 | python | ru | code | 0 | github-code | 36 | [
{
"api_name": "django.db.models.Model",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "django.db.models",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "django.db.models.CharField",
"line_number": 11,
"usage_type": "call"
},
{
"api_name"... |
29557430446 | import os
import sys
from typing import Optional
from brownie import network, accounts
def network_name() -> Optional[str]:
if network.show_active() is not None:
return network.show_active()
cli_args = sys.argv[1:]
net_ind = next(
(cli_args.index(arg) for arg in cli_args if arg == "--network"), len(cli_args)
)
net_name = None
if net_ind != len(cli_args):
net_name = cli_args[net_ind + 1]
if net_name == None:
return "mainnet"
return net_name
if network_name() in ("optimism-main", "optimism-fork"):
print(f"Using config_optimism.py addresses")
from utils.config_optimism import *
elif network_name() in ("arbitrum-main", "arbitrum-fork"):
print(f"Using arbitrum.py addresses")
from utils.config_arbitrum import *
else:
raise EnvironmentError(f"{network_name()} is not supported")
min_rewards_amount = 3000 * 10**18
def get_is_live():
return network.show_active() != "development"
def get_env(name, is_required=True, message=None, default=None):
if name not in os.environ:
if is_required:
raise EnvironmentError(message or f"Please set {name} env variable")
else:
return default
return os.environ[name]
def get_deployer_account(is_live):
if is_live and "DEPLOYER" not in os.environ:
raise EnvironmentError(
"Please set DEPLOYER env variable to the deployer account name"
)
deployer = (
accounts.load(os.environ["DEPLOYER"])
if is_live or "DEPLOYER" in os.environ
else accounts[0]
)
return deployer
def prompt_bool():
choice = input().lower()
if choice in {"yes", "y"}:
return True
elif choice in {"no", "n"}:
return False
else:
sys.stdout.write("Please respond with 'yes' or 'no'")
| lidofinance/curve-rewards-manager | utils/config.py | config.py | py | 1,846 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "brownie.network.show_active",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "brownie.network",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "brownie.network.show_active",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "... |
40588074038 | import cadquery as cq
from math import sin, pi
import numpy as np
plateRadius = 15
plateCenterHole = 4
pinRadius = 3.5/2
pinInter = 18
SCALE= 100 # scale profile dimentions
# input data from csv file of wing profile
data = np.genfromtxt('data/s7075-il.csv',delimiter=',')
pts = data[9:89]
# if we can normalize vectors< then we can scale it
def normalize(data: np.array) -> np.array:
'''
Input numpy 2D array, that describes wing profile
Putput as list of vector Tuples
cq.Sketch doent accepts any other format, even list of lists
'''
res = data/np.linalg.norm(data)
res = res*SCALE
res = [tuple(item) for item in res.tolist()]
return res
pts2 = normalize(pts)
################
# Sketch zone
prof = (
cq.Sketch()
.spline(pts2)
.close()
.assemble()
)
# Sketch Zone end
plate = (
cq.Workplane()
.circle(plateRadius)
.circle(plateCenterHole)
.rect(pinInter,pinInter,forConstruction=True)
.vertices()
.circle(pinRadius)
.extrude(5)
)
###########
#sweep test
path = (
(10,-1,10),
(50,15,-15),
(100,0,0)
)
pathWire = cq.Workplane().spline(path)
"""
res = (
cq.Workplane('YZ')
.placeSketch(prof)
.sweep(pathWire)
)
"""
###########
def makeIt(pts):
wp = cq.Workplane("XY").polyline(pts).close().workplane()
result = None
for i in range(0,20):
wp2 = (
wp.transformed(offset=cq.Vector(0, -20, 5),
rotate=cq.Vector(1, 0, 0))
.polyline(pts).close()
.workplane()
)
if result == None:
result = wp2.loft(combine=True)
else:
nextpart = wp2.loft(combine=True)
result = result.union(nextpart)
wp = wp.transformed(offset=cq.Vector(0, -5, 5),
rotate=cq.Vector(18, 0, 0)).polyline(pts).close().workplane()
show_object(result, options=dict(alpha=0.8,color='blue'))
def makeSweep(pts):
path = (
cq.Workplane()
.parametricCurve(lambda t:(100*sin(t*pi/180),t,0),
start=0, stop = 10, N = 1000)
)
debug(path)
res = (
cq.Workplane('YZ')
.polyline(pts)
.close()
.sweep(path)
)
show_object(res, options=dict(aplha=0.7, color='magenta'))
makeSweep(pts2)
| Opezdol/pohhmann | src/cooling/carlson.py | carlson.py | py | 2,475 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "numpy.genfromtxt",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "numpy.array",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.norm",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
... |
2884377589 | # -*- coding: utf8 -*-
__author__ = 'yqzhang'
from utils.util import get_requests, form_post,login,get_code_token
def detail(gooids):
login('0086','18810432995')
url='https://jf.lagou.com/integral/mall/goods/detail.json'
data={'goodsId':gooids}
return get_requests(url=url,remark='商品详情',data=data)
# detail() | Ariaxie-1985/aria | api_script/jianzhao_web/gouinH5/detail.py | detail.py | py | 334 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "utils.util.login",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "utils.util.get_requests",
"line_number": 10,
"usage_type": "call"
}
] |
3731695404 | from sqlalchemy import (
Boolean,
Column,
DateTime,
Integer,
String,
ForeignKey,
)
from sqlalchemy import exc as sqlalchemy_exc
from sqlalchemy.dialects.postgresql import (
JSONB,
UUID,
ARRAY,
)
from sqlalchemy.sql.expression import false, null
from sqlalchemy.orm import relationship
from abenga_site.py.lib.models.base import Base
class Person(Base):
__tablename__ = "people"
__table_args__ = {"schema": "core"}
id = Column(Integer, primary_key=True, name="id", quote=False)
uid = Column(UUID, unique=True, nullable=False, name="uid", quote=False)
username = Column(String, unique=True, name="username", quote=False)
email = Column(String, unique=True, nullable=False, name="email", quote=False)
primary_phone_number = Column(String(32), name="primary_phone_number", quote=False)
login_type = Column(String(255), name="login_type", quote=False)
password = Column(String, name="password", quote=False)
oauth_provider = Column(String, name="oauth_provider", quote=False)
oauth_token = Column(String, name="oauth_token", quote=False)
first_name = Column(String, name="first_name", quote=False)
last_name = Column(String, name="last_name", quote=False)
other_names = Column(String, name="other_names", quote=False)
date_added = Column(DateTime, name="date_added", quote=False)
contact_email = Column(String, name="contact_email", quote=False)
other_phone_numbers = Column(JSONB, name="other_phone_numbers", quote=False)
postal_address = Column(JSONB, name="postal_address", quote=False)
physical_address = Column(JSONB, name="physical_address", quote=False)
active = Column(Boolean, name="active", quote=False)
def __repr__(self):
return f"{self.first_name} {self.last_name}<{self.email}>"
class LoginSession(Base):
__tablename__ = "login_sessions"
__table_args__ = {"schema": "core"}
id = Column(Integer, primary_key=True, name="id", quote=False)
person_id = Column(
Integer, ForeignKey("core.people.id"), name="person_id", quote=False
)
session_id = Column(
String(128), unique=True, nullable=False, name="session_id", quote=False
)
time_started = Column(DateTime, nullable=False, name="time_started", quote=False)
last_action_time = Column(
DateTime, nullable=False, name="last_action_time", quote=False
)
ended = Column(Boolean, name="ended", server_default="f", quote=False)
time_ended = Column(DateTime, nullable=False, name="time_ended", quote=False)
def __repr__(self):
return f"LoginSession<{self.person_id}:{self.session_id}:{self.time_started}>"
| abenga/abenga.com | py/lib/models/core.py | core.py | py | 2,678 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "abenga_site.py.lib.models.base.Base",
"line_number": 23,
"usage_type": "name"
},
{
"api_name": "sqlalchemy.Column",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "sqlalchemy.Integer",
"line_number": 27,
"usage_type": "argument"
},
{
"api_... |
4788623202 | from pytz import timezone
from datetime import datetime
import re
from urllib.parse import urlparse, urljoin
from flask import request, escape, Request
import tiktoken
from werkzeug.datastructures import ImmutableMultiDict
class HTTPMethodOverrideMiddleware(object):
allowed_methods = frozenset([
'GET',
'HEAD',
'POST',
'DELETE',
'PUT',
'PATCH',
'OPTIONS'
])
bodyless_methods = frozenset(['GET', 'HEAD', 'OPTIONS', 'DELETE'])
def __init__(self, app, field='_method'):
self.app = app
self._regex = re.compile('.*' + field + '=([a-zA-Z]+)(&.*|$)')
def __call__(self, environ, start_response):
method = self._regex.match(environ.get('QUERY_STRING', ''))
if method is not None:
method = method.group(1).upper()
if method in self.allowed_methods:
environ['REQUEST_METHOD'] = method
if method in self.bodyless_methods:
environ['CONTENT_LENGTH'] = '0'
return self.app(environ, start_response)
class SanitizedRequest(Request):
"""Sanitizes form fields automatically to escape HTML."""
def __init__(self, environ, populate_request=True, shallow=False):
super(SanitizedRequest, self).__init__(environ, populate_request, shallow)
self.unsanitized_form = self.form
if self.form:
sanitized_form = {}
for k, v in self.form.items():
sanitized_form[k] = escape(v)
self.form = ImmutableMultiDict(sanitized_form)
def is_safe_url(target):
ref_url = urlparse(request.host_url)
test_url = urlparse(urljoin(request.host_url, target))
return test_url.scheme in ('http', 'https') and ref_url.netloc == test_url.netloc
def now_mytz():
rome = timezone('Europe/Rome')
return datetime.now(tz=rome)
class TokenCounter:
"""Returns the number of tokens used by a list of messages.
Based on: https://platform.openai.com/docs/guides/chat/managing-tokens
"""
def __init__(self, model="gpt-3.5-turbo-0301"):
self.model = model
try:
self.encoding = tiktoken.encoding_for_model(model)
except KeyError:
self.encoding = tiktoken.get_encoding("cl100k_base")
def num_tokens_from_string(self, text):
return len(self.encoding.encode(text))
def num_tokens_from_messages(self, messages):
"""Returns the number of tokens used by a list of messages.
From: https://platform.openai.com/docs/guides/chat/managing-tokens
"""
if self.model == "gpt-3.5-turbo-0301": # note: future models may deviate from this
num_tokens = 0
for message in messages:
num_tokens += 4 # every message follows <im_start>{role/name}\n{content}<im_end>\n
for key, value in message.items():
num_tokens += self.num_tokens_from_string(value)
if key == "name": # if there's a name, the role is omitted
num_tokens += -1 # role is always required and always 1 token
num_tokens += 2 # every reply is primed with <im_start>assistant
return num_tokens
else:
raise NotImplementedError(f"""num_tokens_from_messages() is not presently implemented for model {self.model}.
See https://github.com/openai/openai-python/blob/main/chatml.md for information on how messages are converted to tokens.""")
| mkmenta/chatgpt-research | utils.py | utils.py | py | 3,525 | python | en | code | 0 | github-code | 36 | [
{
"api_name": "re.compile",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "flask.Request",
"line_number": 38,
"usage_type": "name"
},
{
"api_name": "flask.escape",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "werkzeug.datastructures.Immutab... |
2353697076 | import os
import dotenv
from telethon import sync
_users_cache = set() # to avoid double DMs
dotenv.load_dotenv()
MESSAGE_TEMPLATE = os.getenv("AUTO_DM")
CURSOR_FILE = "cursor.txt"
def _read_cursor() -> int:
if os.path.exists(CURSOR_FILE):
with open(CURSOR_FILE) as file:
return int(file.read())
return 0
def _write_cursor(cursor: int):
with open(CURSOR_FILE, "w") as file:
file.write(str(cursor))
async def _dm_user(client: sync.TelegramClient, user_id: int):
try:
if user_id in _users_cache:
return
await client.send_message(user_id, MESSAGE_TEMPLATE)
_users_cache.add(user_id)
except Exception as e:
print(f"Failed to DM user {user_id}: {e}")
async def process(client: sync.TelegramClient, channel):
min_id = _read_cursor()
logs = await client.get_admin_log(channel, join=True, min_id=min_id)
for log in logs[::-1]:
try:
if log.joined and log.input_user and hasattr(log.input_user, "user_id"):
user_id = log.input_user.user_id
await _dm_user(client, user_id)
min_id = log.id
except Exception as e:
print(f"Failed to process log {log.id}: {e}")
_write_cursor(min_id)
| rebryk/supertelega | dm.py | dm.py | py | 1,280 | python | en | code | 15 | github-code | 36 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number"... |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.