seq_id string | text string | repo_name string | sub_path string | file_name string | file_ext string | file_size_in_byte int64 | program_lang string | lang string | doc_type string | stars int64 | dataset string | pt string | api list |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
8407358533 | from loguru import logger
from sqlite3 import Connection
class Repo:
def __init__(self, db: Connection):
self.db = db
def create_schema(self):
with open('database/init.sql') as f:
script = f.read()
self.db.executescript(script)
def get_subscriptions_list(self)-> list:
# Получение всех подписок
tags = self.db.execute("SELECT DISTINCT tags FROM subscriptions;").fetchall()
tags = ["".join(tag) for tag in tags]
return tags
def add_subscription(self, tags:str) -> bool:
# добавить tags в подписки
try:
self.db.execute("INSERT INTO subscriptions (tags) VALUES (?);", (tags,))
self.db.commit()
logger.info(f'{tags} - добавлено в подписки')
return True
except:
logger.debug(f'Ошибка добавления записи {tags}')
return False
def upsert_subscription(self, id:int, tags:str):
# обновляем существующую запись по id
result = self.db.execute('SELECT * FROM subscriptions WHERE id = ?', (id,)).fetchone()
if result is None:
# если подписка с данным id не найдена, создаем новую запись
self.db.execute('INSERT INTO subscriptions (id, tags) VALUES (?, ?)', (id, tags))
logger.info('Запись создана')
else:
# если подписка с данным id уже существует, обновляем информацию о ней
self.db.execute('UPDATE subscriptions SET tags = ? WHERE id = ?', (tags, id))
logger.info('Запись обновлена')
self.db.commit()
def filter_new_posts(self, posts) -> list|None:
# Фильтрует посты, записывая их в бд и возвращает список ссылок на новые
if posts:
new_posts = []
for post in posts:
if self.db.execute('SELECT id FROM posts WHERE id = ?', (post['id'],)).fetchone() is None:
new_posts.append(post)
self.db.execute('INSERT INTO posts (id) VALUES (?)', (post['id'],))
self.db.commit()
logger.info(f'Получено {len(new_posts)} новых постов')
return new_posts
else:
return None
def delete_sub(self, tags:str) -> bool:
# Удаляет запись
result = self.db.execute('SELECT * FROM subscriptions WHERE tags = ?', [tags]).fetchone()
if result:
self.db.execute("DELETE FROM subscriptions WHERE tags = ?;", [tags])
self.db.commit()
logger.info(f'{tags} - удалено')
return True
else:
logger.warning(f'{tags} - не найдено')
return False
| hermanguilliman/boorubot | repo.py | repo.py | py | 3,078 | python | ru | code | 0 | github-code | 1 | [
{
"api_name": "sqlite3.Connection",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "loguru.logger.info",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "loguru.logger",
"line_number": 27,
"usage_type": "name"
},
{
"api_name": "loguru.logger.debu... |
38782292285 | import logging
import itertools
import pytz
from parsedatetime import Calendar
from tzlocal import get_localzone
from .dbmanager import get_lastest_problem_id, Submission
from .utils import WebsiteSession
LOGGER = logging.getLogger(__name__)
class ScraperMeta(type):
name = 'Scraper'
loaded = {}
registered = {}
def __new__(mcs, name, bases, nmspc):
cls = super().__new__(mcs, name, bases, nmspc)
cls_name = nmspc['name']
if cls_name:
mcs.registered[cls_name] = cls
LOGGER.info('Register %s: %s', mcs.name, cls_name)
return cls
@classmethod
def get(mcs, name, reactor):
if name not in mcs.loaded:
mcs.loaded[name] = mcs.registered[name](reactor)
return mcs.loaded[name]
build_scraper = ScraperMeta.get
class BaseScraper(metaclass=ScraperMeta):
name = None
host = None
tzinfo = None
defaults = {}
CAL = Calendar()
def __init__(self, reactor):
self.reactor = reactor
self.options = dict(self.defaults)
self.options.update(self.reactor.options.get(self.name, {}))
assert self.tzinfo, 'Timezone missing'
self.session = WebsiteSession(self.host, login=self.login)
self.init()
self.login()
LOGGER.debug("%s '%s' has inited: %s", type(type(self)).name, self.name, self.options)
def init(self):
"""Init configuration here."""
raise NotImplementedError
def login(self):
"""Login to the website to scrape. Will be called when authentication failed
:return: Whether successful logged in or not
:rtype: Bool
"""
return False
def fetch(self):
"""
:rtype [Submission]:
"""
raise NotImplementedError
def parse_datetime(self, s):
return self.CAL.parseDT(s, tzinfo=self.tzinfo)[0]
class LeetCodeScraper(BaseScraper):
name = 'leetcode'
host = 'https://leetcode.com'
tzinfo = get_localzone()
defaults = {
'username': None,
'password': None,
}
def init(self):
self.username, self.password = map(self.options.get, ('username', 'password'))
assert self.username and self.password, 'Missing username and/or password in config file'
def login(self):
login_path = '/accounts/login/'
self.session.get(login_path)
csrf_token = self.session.cookies['csrftoken']
r = self.session.post(login_path, data={
'login': self.username,
'password': self.password,
'csrfmiddlewaretoken': csrf_token,
})
info_incorrect = 'The login and/or password you specified are not correct'
assert info_incorrect not in r.text, info_incorrect
return True
def fetch(self):
# Fetch a list of all accepted submissions
main_soup = self.session.soup('/problemset/algorithms/')
ac_dict = {}
for row in main_soup.select('#problemList > tbody > tr'):
ac, prob_id, title_path, _, _, _, _ = row('td')
if ac.span['class'] != ['ac']:
continue
title = title_path.a
path = title['href']
ac_dict[path] = [prob_id, title, None]
self.fetch_submit_times_by(ac_dict, get_lastest_problem_id(self.name))
# Refine data
def normalize_time(date):
words = date.split(',')
for i in range(len(words) - 1):
words[i] += ' ago'
return ','.join(words)
return [
Submission(self.name,
prob_id.string,
title.string,
self.session(path),
self.parse_datetime(normalize_time(ago.string)))
for path, (prob_id, title, ago) in ac_dict.items() if ago is not None
]
def fetch_submit_times_by(self, ac_dict, latest_id=None):
for i in itertools.count(1):
sub_soup = self.session.soup('/submissions/{}'.format(i))
rows = sub_soup.select('#result_testcases > tbody > tr')
if not rows:
break
for row in rows:
ago, title_path, status, _, _ = row('td')
if 'status-accepted' not in status.a['class']:
continue
sub = ac_dict[title_path.a['href']]
if sub[0].string == latest_id:
return
sub[-1] = ago
class POJScraper(BaseScraper):
name = 'poj'
host = 'http://poj.org'
tzinfo = pytz.timezone('Asia/Shanghai')
defaults = {
'username': None,
}
def init(self):
self.user_id = self.options['username']
assert self.user_id, 'Username missing'
def fetch(self):
ac_dict = {}
latest_id = get_lastest_problem_id(self.name)
soup = self.session.soup('/status', params={
'user_id': self.user_id,
'result': 0,
})
while True:
rows = soup.select('body > table:nth-of-type(2) > tr')[1:]
if not rows:
break
for row in rows:
_, _, prob_id, _, _, _, _, _, time = row('td')
prob_id = prob_id.a.string
if prob_id == latest_id:
break
ac_dict[prob_id] = time # assert sorted by submit_time
next_page = soup.select('body > p:nth-of-type(2) > a')[-1]['href']
soup = self.session.soup(next_page)
return [
Submission(self.name,
prob_id,
self.session.soup(
'/problem', params={'id': prob_id}).select('div.ptt')[0].string,
self.session.last_url,
self.parse_datetime(time.string))
for prob_id, time in ac_dict.items()
]
| yehzhang/Show-My-Solutions | show_my_solutions/scrapers.py | scrapers.py | py | 5,946 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "parsedatetime.Calendar",
"line_number": 42,
"usage_type": "call"
},
{
"api_name": "utils.WebsiteSession",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": "tzlocal.... |
71721970275 | import torch
import torch.nn as nn
import numpy as np
import sys
import os
sys.path.append(os.path.join(os.getcwd(), "lib")) # HACK add the lib folder
from models.backbone_module import Pointnet2Backbone
from models.voting_module import VotingModule
from models.proposal_module import ProposalModule
from models.graph_module import GraphModule
from models.caption_module import SceneCaptionModule, TopDownSceneCaptionModule
class CapNet(nn.Module):
def __init__(self, num_class, vocabulary, embeddings, num_heading_bin, num_size_cluster, mean_size_arr,
input_feature_dim=0, num_proposal=256, num_locals=-1, vote_factor=1, sampling="vote_fps",
no_caption=False, use_topdown=False, query_mode="corner",
graph_mode="graph_conv", num_graph_steps=0, use_relation=False, graph_aggr="add",
use_orientation=False, num_bins=6, use_distance=False, use_new=False,
emb_size=300, hidden_size=512):
super().__init__()
self.num_class = num_class
self.num_heading_bin = num_heading_bin
self.num_size_cluster = num_size_cluster
self.mean_size_arr = mean_size_arr
assert(mean_size_arr.shape[0] == self.num_size_cluster)
self.input_feature_dim = input_feature_dim
self.num_proposal = num_proposal
self.vote_factor = vote_factor
self.sampling = sampling
self.no_caption = no_caption
self.num_graph_steps = num_graph_steps
# --------- PROPOSAL GENERATION ---------
# Backbone point feature learning
self.backbone_net = Pointnet2Backbone(input_feature_dim=self.input_feature_dim)
# Hough voting
self.vgen = VotingModule(self.vote_factor, 256)
# Vote aggregation and object proposal
self.proposal = ProposalModule(num_class, num_heading_bin, num_size_cluster, mean_size_arr, num_proposal, sampling)
if use_relation: assert use_topdown # only enable use_relation in topdown captioning module
if num_graph_steps > 0:
self.graph = GraphModule(128, 128, num_graph_steps, num_proposal, 128, num_locals,
query_mode, graph_mode, return_edge=use_relation, graph_aggr=graph_aggr,
return_orientation=use_orientation, num_bins=num_bins, return_distance=use_distance)
# Caption generation
if not no_caption:
if use_topdown:
self.caption = TopDownSceneCaptionModule(vocabulary, embeddings, emb_size, 128,
hidden_size, num_proposal, num_locals, query_mode, use_relation)
else:
self.caption = SceneCaptionModule(vocabulary, embeddings, emb_size, 128, hidden_size, num_proposal)
def forward(self, data_dict, use_tf=True, is_eval=False):
""" Forward pass of the network
Args:
data_dict: dict
{
point_clouds,
lang_feat
}
point_clouds: Variable(torch.cuda.FloatTensor)
(B, N, 3 + input_channels) tensor
Point cloud to run predicts on
Each point in the point-cloud MUST
be formated as (x, y, z, features...)
Returns:
end_points: dict
"""
#######################################
# #
# DETECTION BRANCH #
# #
#######################################
# --------- HOUGH VOTING ---------
data_dict = self.backbone_net(data_dict)
# --------- HOUGH VOTING ---------
xyz = data_dict["fp2_xyz"]
features = data_dict["fp2_features"]
data_dict["seed_inds"] = data_dict["fp2_inds"]
data_dict["seed_xyz"] = xyz
data_dict["seed_features"] = features
xyz, features = self.vgen(xyz, features)
features_norm = torch.norm(features, p=2, dim=1)
features = features.div(features_norm.unsqueeze(1))
data_dict["vote_xyz"] = xyz
data_dict["vote_features"] = features
# --------- PROPOSAL GENERATION ---------
data_dict = self.proposal(xyz, features, data_dict)
#######################################
# #
# GRAPH ENHANCEMENT #
# #
#######################################
if self.num_graph_steps > 0: data_dict = self.graph(data_dict)
#######################################
# #
# CAPTION BRANCH #
# #
#######################################
# --------- CAPTION GENERATION ---------
if not self.no_caption:
data_dict = self.caption(data_dict, use_tf, is_eval)
return data_dict
| daveredrum/Scan2Cap | models/capnet.py | capnet.py | py | 4,977 | python | en | code | 89 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
31037849632 | from setuptools import setup
install_requires = [
# NOTE: Apache Beam tests depend on this library and cannot
# currently upgrade their httplib2 version.
# Please see https://github.com/googleapis/google-api-python-client/pull/84
"httplib2>=0.9.2,<1dev",
"google-auth>=1.16.0",
"google-auth-httplib2>=0.0.3",
"google-api-core>=1.18.0,<2dev",
"google-api-python-client",
"google",
"timeout_decorator",
"unittest2"
]
setup(
name='vm_network_migration',
version='1.0',
description='',
author='',
author_email='',
test_suite = 'tests',
packages=['vm_network_migration'], #same as name
install_requires=install_requires, #external packages as dependencies
) | edwinnab/vm-network-migration | setup.py | setup.py | py | 724 | python | en | code | null | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 18,
"usage_type": "call"
}
] |
72143448995 | import datetime
from typing import Any, Dict, List, Type, TypeVar, Union
import attr
from dateutil.parser import isoparse
from ..types import UNSET, Unset
T = TypeVar("T", bound="PatchedPipelineInvocation")
@attr.s(auto_attribs=True)
class PatchedPipelineInvocation:
"""Dynamically removes fields from serializer.
https://stackoverflow.com/questions/27935558/dynamically-exclude-or-include-a-field-in-django-rest-framework-serializer"""
url: Union[Unset, str] = UNSET
id: Union[Unset, int] = UNSET
arn: Union[Unset, str] = UNSET
release: Union[Unset, str] = UNSET
pipeline: Union[Unset, str] = UNSET
created: Union[Unset, datetime.datetime] = UNSET
additional_properties: Dict[str, Any] = attr.ib(init=False, factory=dict)
def to_dict(self) -> Dict[str, Any]:
url = self.url
id = self.id
arn = self.arn
release = self.release
pipeline = self.pipeline
created: Union[Unset, str] = UNSET
if not isinstance(self.created, Unset):
created = self.created.isoformat()
field_dict: Dict[str, Any] = {}
field_dict.update(self.additional_properties)
field_dict.update({})
if url is not UNSET:
field_dict["url"] = url
if id is not UNSET:
field_dict["id"] = id
if arn is not UNSET:
field_dict["arn"] = arn
if release is not UNSET:
field_dict["release"] = release
if pipeline is not UNSET:
field_dict["pipeline"] = pipeline
if created is not UNSET:
field_dict["created"] = created
return field_dict
@classmethod
def from_dict(cls: Type[T], src_dict: Dict[str, Any]) -> T:
d = src_dict.copy()
url = d.pop("url", UNSET)
id = d.pop("id", UNSET)
arn = d.pop("arn", UNSET)
release = d.pop("release", UNSET)
pipeline = d.pop("pipeline", UNSET)
created: Union[Unset, datetime.datetime] = UNSET
_created = d.pop("created", UNSET)
if not isinstance(_created, Unset):
created = isoparse(_created)
patched_pipeline_invocation = cls(
url=url,
id=id,
arn=arn,
release=release,
pipeline=pipeline,
created=created,
)
patched_pipeline_invocation.additional_properties = d
return patched_pipeline_invocation
@property
def additional_keys(self) -> List[str]:
return list(self.additional_properties.keys())
def __getitem__(self, key: str) -> Any:
return self.additional_properties[key]
def __setitem__(self, key: str, value: Any) -> None:
self.additional_properties[key] = value
def __delitem__(self, key: str) -> None:
del self.additional_properties[key]
def __contains__(self, key: str) -> bool:
return key in self.additional_properties
| caltechads/brigid-api-client | brigid_api_client/models/patched_pipeline_invocation.py | patched_pipeline_invocation.py | py | 2,948 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.TypeVar",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "typing.Union",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "types.Unset",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "types.UNSET",
"line_number... |
4474715554 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Jan 2 13:44:25 2021
@author: Scott T. Small
This module demonstrates documentation as specified by the `NumPy
Documentation HOWTO`_. Docstrings may extend over multiple lines. Sections
are created with a section header followed by an underline of equal length.
"""
import numpy as np
import allel
from .sequtils import get_seg
import bisect
from collections import Counter
def classical_stats(nhaplo, counts):
"""Compute heterozygosity, diversity (pairwise differences) and Tajima's D.
All calculation are per site
Parameters
----------
nhaplo : int
total number of haplotypes
counts : np.array[Nsnp_seg]
number of derived alleles at each position for each segment
Returns
-------
H, PI, D: float, float, float
mean of het, pi, and tajD
"""
Nhap = np.float(nhaplo)
a1 = np.sum([1.0/i for i in range(1, nhaplo)])
a2 = np.sum([1.0/i**2 for i in range(1, nhaplo)])
c1 = (Nhap+1)/(3*(Nhap-1)) - 1/a1 # b1-1/a1
c2 = 2*(Nhap**2+Nhap+3)/(9*Nhap*(Nhap-1)) - (Nhap+2)/(a1*Nhap) + a2/a1**2
H, PI, D = [], [], []
Nsnp = np.float(counts.shape[0])
# Expected heterozygosity (at each site) for snp data Arlequin 8.1.1.2 p.115
H = 2.0/(Nhap-1) * (counts-counts**2 / Nhap)
# Mean number of pariwise difference (at each site) for snp data Arlequin 8.1.2.1 p.116
PI = 2.0 / (Nhap * (Nhap-1)) * (counts * (Nhap-counts))
theta_pi = sum(PI)
# Other estimate of theta :
theta_s = Nsnp/a1
# var_theta_s = (a1**2 * Nsnp + a2 * Nsnp**2) / (a1**2 * (a1**2 + a2) )
# var_PI= (3*Nhap*(Nhap+1)*PI + 2*(Nhap**2+Nhap+3)*PI**2) / (11*(Nhap**2-7*Nhap+6))
# var_theta_pi= (3*Nhap*(Nhap+1)*theta_pi + 2*(Nhap**2+Nhap+3)*theta_pi**2) / (11*(Nhap**2-7*Nhap+6))
# Tajima D, formula from Tajim's paper (1989)
D = (theta_pi - theta_s) / np.sqrt(c1/a1 * Nsnp + (c2/(a1**2+a2))*Nsnp*(Nsnp-1))
return H, PI, D
def haplo_het(pos, gt, win_size, length_bp):
"""Estimate haplotype diversity in moving windows.
Parameters
----------
gt : TYPE
DESCRIPTION.
pos : TYPE
DESCRIPTION.
size : TYPE, optional
DESCRIPTION. The default is 4.
Returns
-------
haphet_mean : TYPE
DESCRIPTION.
haphet_std : TYPE
DESCRIPTION.
"""
hpseg, pos_s = get_seg(gt, pos)
hh_wins = []
coords = (range(0, length_bp + win_size, win_size))
windows = list(zip(coords[0::1], coords[1::1]))
for s, e in windows:
hp1_win = hpseg.compress((pos_s > s) & (pos_s <= e), axis=0)
hh_ = allel.haplotype_diversity(hp1_win)
hh_wins.append(hh_)
haphet_mean = np.nanmean(hh_wins)
haphet_std = np.nanstd(hh_wins)
return haphet_mean, haphet_std
def tajimaD(pos, gt, win_size, length_bp):
"""Calculate Tajima's D in steps of seg sites.
Parameters
----------
ac : array
allele counts array
size : int, optional
window size in number of variants. The default is 4.
Returns
-------
tajd_mean : float
DESCRIPTION.
tajd_std : float
DESCRIPTION.
"""
gtseg, pos_s = get_seg(gt, pos)
ac = gtseg.count_alleles()
tajd_, *_ = allel.windowed_tajima_d(pos_s, ac, size=win_size, start=1, stop=length_bp)
tajd_mean = np.nanmean(tajd_)
tajd_std = np.nanstd(tajd_)
return tajd_mean, tajd_std
def pi_window(pos, gt, win_size, length_bp):
"""Calculate pi in windows.
Parameters
----------
gt : TYPE
DESCRIPTION.
pos : TYPE
DESCRIPTION.
win_size : TYPE
DESCRIPTION.
length_bp : TYPE
DESCRIPTION.
Returns
-------
pi_mean : TYPE
DESCRIPTION.
pi_std : TYPE
DESCRIPTION.
"""
gtseg, pos_s = get_seg(gt, pos)
ac = gtseg.count_alleles()
pi, *_ = allel.windowed_diversity(pos_s, ac, size=win_size, start=1, stop=length_bp)
pi_mean = np.nanmean(pi)
pi_std = np.nanstd(pi)
return pi_mean, pi_std
def exp_het(pos, gt):
"""Calculate the expected rate of heterozygosity for each variant under HWE.
Parameters
----------
gt : TYPE
DESCRIPTION.
pos : TYPE
DESCRIPTION.
Returns
-------
het_mean : TYPE
DESCRIPTION.
het_std : TYPE
DESCRIPTION.
"""
gtseg, pos_s = get_seg(gt, pos)
af = gtseg.count_alleles().to_frequencies()
het = allel.heterozygosity_expected(af, ploidy=2)
het_mean = np.nanmean(het)
het_std = np.nanstd(het)
return het_mean, het_std
def het_one_win(dataslice, Nhap):
"""Compute haplotypic heterozygosity of a given window.
Parameters
----------
dataslice : np.array
subset of the data corresponding to a given window of the sequence
Nhap : int
total number of haplotypes
Returns
-------
het : float
haplotypic heterozygosity of dataslice
"""
haplos = [''.join([repr(num) for num in dataslice[i, :]]) for i in range(Nhap)]
tab = Counter(haplos)
het = 1.0-sum([x**2 for x in tab.values()])/float(Nhap)**2
return het
def haplo_win(pos, hap, win_size, length_bp):
"""Compute haplotypic heterozygosity in windows sliding along the genome.
Parameters
----------
pos : np.array
positions of SNP for rach segment
hap : np.array
haplotype data for each segment
win_size : int
lengh of the sliding windows considered as haplotypes (bp)
length_bp : int
length of each simulated segment (bp)
Returns
-------
hap_het : float, float
mean and standard deviation of haplotypic heterozygosity
"""
Nhap = hap.shape[0]
L = length_bp
win_size = int(win_size)
chunks = [bisect.bisect(pos, x) for x in range(0, L, win_size)]
hets = [het_one_win(hap[:, chunks[i]:chunks[i+1]], Nhap) for i in range(len(chunks)-1)]
Nhap = np.float(Nhap)
haphet_mean = Nhap/(Nhap-1.0) * np.mean(hets)
haphet_std = Nhap/(Nhap-1.0) * np.std(hets)
return haphet_mean, haphet_std
| stsmall/abc_scripts2 | project/stat_modules/popstats.py | popstats.py | py | 6,127 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "numpy.float",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "numpy.sum",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "numpy.float",
"line_number": 43,
... |
24858642559 | # -*- coding: utf-8 -*-
import datetime
from django.core.paginator import Paginator
from django.shortcuts import render, get_object_or_404
from blogs.models import Post
def index(request):
posts = Post.objects.filter(visible=True).order_by('-created')
try:
page = int(request.GET.get('page'))
except (TypeError, ValueError):
page = 1
objects = Paginator(posts, 5)
context = {
'posts': objects,
}
return render(request, 'blogs/index.html', context)
def read(request, slug):
post = get_object_or_404(Post, slug=slug)
context = {
'post': post,
}
return render(request, 'blogs/read.html', context)
| wd5/abakron | abakron/apps/blogs/views.py | views.py | py | 681 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "blogs.models.Post.objects.filter",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "blogs.models.Post.objects",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "blogs.models.Post",
"line_number": 11,
"usage_type": "name"
},
{
... |
71602513635 | # -*- coding: utf-8 -*-
"""
Converts the Microsoft Band GPS data to the open GPX format.
WORK IN PROGRESS
"""
import json
from pprint import pprint
import re
import matplotlib.pyplot as plt
import seaborn as sea
import datetime as dt
import matplotlib.dates as dates
import argparse
import isodate
import itertools as it
bandDataFile = 'GetActGPS.txt'
activityType = 'Running'
# For the few oddball cases where there are missing key/pairs, fill in with properly formatted 0s
class chkDict(dict):
def __missing__(self, key):
match = re.search(r'uration', key)
matchtime = re.search(r'Time', key)
matchgps = re.search(r'location', key)
if match:
return 'PT0M0S'
if matchtime:
return '0000-00-00T00:00:00.000+00:00'
if matchgps:
return {"latitude":0,"longitude":0,"elevationFromMeanSeaLevel":0}
else:
return 0
# Clean data for JSON (remove newlines and nextpages)
with open(bandDataFile) as inputfile:
rawData = ' '.join([line.strip() for line in inputfile])
rawData = re.sub(r',\"nextPage\":\"https:.+?(?=\")\",\"itemCount\":[0-9]*\} \{',r',',rawData.rstrip())
countFixRun = it.count()
countFixBike = it.count()
countFixGolf = it.count()
rawData = re.sub(r'bikeActivities', lambda x: 'bikeActivities{{{}}}'.format(next(countFixBike)),rawData)
rawData = re.sub(r'runActivities', lambda x: 'runActivities{{{}}}'.format(next(countFixRun)),rawData)
rawData = re.sub(r'golfActivities', lambda x: 'golfActivities{{{}}}'.format(next(countFixGolf)),rawData)
# Load our data!
data=json.loads(rawData, object_pairs_hook=chkDict)
headerString = '<?xml version="1.0" encoding="UTF-8"?>\n<gpx\n\tversion="1.1"\n\tcreator="BandSandbox apps@dendriticspine.com"\n\t\
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"\n\txmlns="http://www.topografix.com/GPX/1/1"\n\t\
xsi:schemaLocation="http://www.topografix.com/GPX/1/1 http://www.topografix.com/GPX/1/1/gpx.xsd"\n\t\
xmlns:gpxtpx="http://www.garmin.com/xmlschemas/TrackPointExtension/v1">\n<trk>\n'
endingString = '</trkseg>\n</trk>\n</gpx>\n'
gpsData = []
activityStart = []
activityStartStr = []
speedData = []
gpsTimeCalc = []
mapPointType = []
heartRatePoint = []
pointLat = []
pointLong = []
pointEle = []
# -------------------------------------------------
# BICYCLING ACTIVITY DATA
# -------------------------------------------------
if (activityType == 'Cycling'):
# Pulling out relevant data from the JSON array
for i1 in range(0,next(countFixBike)):
for i in range(0, len(data['bikeActivities{'+str(i1)+'}'])):
currStartTime = re.sub('.\d+[-+]\d\d:\d\d','',data['bikeActivities{'+str(i1)+'}'][i]['startTime'])
dtStartTime = dt.datetime.strptime(currStartTime, '%Y-%m-%dT%H:%M:%S')
activityStart.append(dtStartTime.strftime('%Y-%m-%dT%H:%M:%SZ'))
activityStartStr.append(dtStartTime.strftime('%Y-%m-%d-%H-%M-%S'))
for igps in range(0,len(data['bikeActivities{'+str(i1)+'}'][i]['mapPoints'])):
mapPointType.append(data['bikeActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['mapPointType'])
secSinceStart = data['bikeActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['secondsSinceStart']
gpsTimeCalcTemp = dtStartTime + dt.timedelta(seconds=secSinceStart)
gpsTimeCalc.append(gpsTimeCalcTemp.strftime('%Y-%m-%dT%H:%M:%SZ'))
heartRatePoint.append(data['bikeActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['heartRate'])
speedData.append(data['bikeActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['speed'])
pointLat.append(data['bikeActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['location']['latitude'])
pointLong.append(data['bikeActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['location']['longitude'])
pointEle.append(data['bikeActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['location']['elevationFromMeanSeaLevel'])
gpsData.append([pointLat, pointLong, pointEle, mapPointType, gpsTimeCalc, speedData, heartRatePoint])
speedData = []
gpsTimeCalc = []
mapPointType = []
heartRatePoint = []
pointLat = []
pointLong = []
pointEle = []
# -------------------------------------------------
# RUNNING ACTIVITY DATA
# -------------------------------------------------
if (activityType == 'Running'):
# Pulling out relevant data from the JSON array
for i1 in range(0,next(countFixRun)):
for i in range(0, len(data['runActivities{'+str(i1)+'}'])):
currStartTime = re.sub('.\d+[-+]\d\d:\d\d','',data['runActivities{'+str(i1)+'}'][i]['startTime'])
dtStartTime = dt.datetime.strptime(currStartTime, '%Y-%m-%dT%H:%M:%S')
activityStart.append(dtStartTime.strftime('%Y-%m-%dT%H:%M:%SZ'))
activityStartStr.append(dtStartTime.strftime('%Y-%m-%d-%H-%M-%S'))
for igps in range(0,len(data['runActivities{'+str(i1)+'}'][i]['mapPoints'])):
mapPointType.append(data['runActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['mapPointType'])
secSinceStart = data['runActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['secondsSinceStart']
gpsTimeCalcTemp = dtStartTime + dt.timedelta(seconds=secSinceStart)
gpsTimeCalc.append(gpsTimeCalcTemp.strftime('%Y-%m-%dT%H:%M:%SZ'))
heartRatePoint.append(data['runActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['heartRate'])
speedData.append(data['runActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['speed'])
pointLat.append(data['runActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['location']['latitude'])
pointLong.append(data['runActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['location']['longitude'])
pointEle.append(data['runActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['location']['elevationFromMeanSeaLevel'])
gpsData.append([pointLat, pointLong, pointEle, mapPointType, gpsTimeCalc, speedData, heartRatePoint])
speedData = []
gpsTimeCalc = []
mapPointType = []
heartRatePoint = []
pointLat = []
pointLong = []
pointEle = []
# -------------------------------------------------
# GOLF ACTIVITY DATA
# -------------------------------------------------
if (activityType == 'Golf'):
# Pulling out relevant data from the JSON array
for i1 in range(0,next(countFixGolf)):
for i in range(0, len(data['golfActivities{'+str(i1)+'}'])):
currStartTime = re.sub('.\d+[-+]\d\d:\d\d','',data['golfActivities{'+str(i1)+'}'][i]['startTime'])
dtStartTime = dt.datetime.strptime(currStartTime, '%Y-%m-%dT%H:%M:%S')
activityStart.append(dtStartTime.strftime('%Y-%m-%dT%H:%M:%SZ'))
activityStartStr.append(dtStartTime.strftime('%Y-%m-%d-%H-%M-%S'))
for igps in range(0,len(data['golfActivities{'+str(i1)+'}'][i]['mapPoints'])):
mapPointType.append(data['golfActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['mapPointType'])
secSinceStart = data['golfActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['secondsSinceStart']
gpsTimeCalcTemp = dtStartTime + dt.timedelta(seconds=secSinceStart)
gpsTimeCalc.append(gpsTimeCalcTemp.strftime('%Y-%m-%dT%H:%M:%SZ'))
heartRatePoint.append(data['golfActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['heartRate'])
speedData.append(data['golfActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['speed'])
pointLat.append(data['golfActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['location']['latitude'])
pointLong.append(data['golfActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['location']['longitude'])
pointEle.append(data['golfActivities{'+str(i1)+'}'][i]['mapPoints'][igps]['location']['elevationFromMeanSeaLevel'])
gpsData.append([pointLat, pointLong, pointEle, mapPointType, gpsTimeCalc, speedData, heartRatePoint])
speedData = []
gpsTimeCalc = []
mapPointType = []
heartRatePoint = []
pointLat = []
pointLong = []
pointEle = []
for p in range(0,len(activityStart)):
fileNameString = activityType + '_' + activityStartStr[p] + '.gpx'
f = open(fileNameString, 'w')
f.write(headerString)
nameTimeString = '<name><![CDATA[{0} {1}]]></name><time>{1}</time>\n\n<trkseg>\n'.format(activityType, activityStart[p])
f.write(nameTimeString)
for q in range(0,len(gpsData[p][0])):
gpsPointString = '<trkpt lat="{0}" lon="{1}"><ele>{2}</ele><name>{3}</name><time>{4}</time><speed>{5}</speed><desc>{6}</desc></trkpt>\n'.format(gpsData[p][0][q],gpsData[p][1][q],gpsData[p][2][q],gpsData[p][3][q],gpsData[p][4][q],gpsData[p][5][q],gpsData[p][6][q])
f.write(gpsPointString)
f.write(endingString)
f.close() | cryptogramber/Microsoft-Band-Utils | Band-Data-Analysis/ConvertBandGPX.py | ConvertBandGPX.py | py | 9,152 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "re.search",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "re.search",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "re.sub",
"line_number": 41,
"us... |
73498023712 | import setuptools
from ast import literal_eval
name = 'asterion'
with open(f'{name}/version.py') as file:
# Assuming version.py follows format __version__ = '<version_string>'
line = file.readline().strip()
version = literal_eval(line.split(' = ')[1])
description = 'Fits the asteroseismic helium-II ionisation zone glitch ' + \
'present in the mode frequencies of solar-like oscillators.'
packages = setuptools.find_packages(include=[name, f'{name}.*'])
author = 'Alex Lyttle'
url = 'https://github.com/alexlyttle/asterion'
classifiers = [
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
]
with open('requirements.txt') as file:
install_requires = file.read().splitlines()
with open('docs/requirements.txt') as file:
docs_require = file.read().splitlines()
with open('tests/requirements.txt') as file:
tests_require = file.read().splitlines()
setuptools.setup(
name=name,
version=version,
description=description,
packages=packages,
author=author,
url=url,
classifiers=classifiers,
install_requires=install_requires,
extras_require={
'docs': docs_require,
'tests': tests_require,
},
package_data={
'': ['*.nc', '*.mplstyle'],
},
include_package_data=True,
python_requires='>=3.8',
licence='MIT',
)
| alexlyttle/asterion | setup.py | setup.py | py | 1,415 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "ast.literal_eval",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "setuptools.find_packages",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_number": 35,
"usage_type": "call"
}
] |
16819999895 | from requests import Session
from requests.structures import CaseInsensitiveDict
class BaseSession(Session):
def __init__(self, driver_cookie: dict):
super().__init__()
self.verify = False
self.headers = CaseInsensitiveDict(
{'Content-Type': 'application/x-www-form-urlencoded',
'cookie': f'{driver_cookie["name"]}={driver_cookie["value"]}'}
)
| fonbeauty/developer_portal | common/sessions.py | sessions.py | py | 408 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "requests.Session",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "requests.structures.CaseInsensitiveDict",
"line_number": 10,
"usage_type": "call"
}
] |
26554305585 | """Module containing the classes Contingency and ContingencyType."""
from enum import unique, Enum
from copy import deepcopy
from collections import defaultdict
import logging
from typing import Any, Callable, Set as TgSet, Dict, List, Optional
from rxncon.core.effector import Effector, StructEquivalences, QualSpec, StateEffector, TrivialStructEquivalences, \
NotEffector, AndEffector, OrEffector, StructCounter
from rxncon.core.reaction import Reaction, OutputReaction
from rxncon.venntastic.sets import Set as VennSet, ValueSet, Intersection, Complement, Union, UniversalSet
from rxncon.core.state import State
LOGGER = logging.getLogger(__name__)
@unique
class ContingencyType(Enum):
"""The ContingencyTypes requirement, inhibition are known as `strict` contingencies, whereas the
positive, negative ContingencyTypes are referred to as quantitative."""
requirement = '!'
inhibition = 'x'
positive = 'k+'
negative = 'k-'
no_effect = '0'
unknown = '?'
class Contingency:
"""Contingency holds the triple `reaction`, `type`, `effector` describing a contingency in a rxncon model.
Contingency objects are constructed from ContingencyListEntry objects, that live in the module
rxncon.input.shared.contingency_list."""
def __init__(self, reaction: Reaction, contingency_type: ContingencyType,
effector: Effector, validate_equivs_specs: bool=True) -> None:
self.reaction, self.contingency_type, self.effector = reaction, contingency_type, effector
if validate_equivs_specs:
self.validate_equivs_specs()
def __eq__(self, other: object) -> bool:
if not isinstance(other, Contingency):
return NotImplemented
return self.reaction == other.reaction and self.contingency_type == other.contingency_type and \
self.effector == other.effector
def __repr__(self) -> str:
return str(self)
def __str__(self) -> str:
return 'Contingency({0}, {1}, {2}'.format(str(self.reaction), str(self.contingency_type), str(self.effector))
def clone(self) -> 'Contingency':
return deepcopy(self)
def with_merged_struct_effector(self, equivs: Optional[StructEquivalences]=None,
counter: Optional[StructCounter]=None,
namespace: Optional[List[str]]=None) -> 'Contingency':
"""Returns a Contingency object where the structure information is merged among all Effector objects using
`equivs`, an object that holds equivalent molecules: different names referring to the same molecule. For
more details, see the `to_global_struct_effector` and `collect_global_equivs` methods in Effector."""
structured = self.clone()
equivs, counter = structured.effector.collect_global_equivs(equivs, counter, namespace)
structured.effector = structured.effector.to_global_struct_effector(equivs, counter, namespace)
structured.validate_struct_indices()
structured.validate_equivs_specs()
return structured
def to_structured(self, counter_start: Optional[int]=None) -> 'Contingency':
"""Returns a Contingency object where the structure information is merged among all Effector objects. This
method first determines the equivalences, after which it calls the `with_merged_struct_effector` method."""
LOGGER.debug('to_structured: {}'.format(str(self)))
if isinstance(self.effector, StateEffector) and self.effector.is_structured:
# A fully structured StateEffector is fine.
if not self.effector.states[0].is_global and not isinstance(self.reaction, OutputReaction):
assert any(component in self.reaction.components_lhs_structured for component in
self.effector.states[0].components), \
"Non-overlapping contingency: {0} does not match structured reaction : {1} (components: {2})" \
.format(str(self.effector), str(self.reaction), str(self.reaction.components_lhs_structured))
return self
elif isinstance(self.effector, StateEffector) and not self.effector.is_structured:
# For a non-structured StateEffector, assume the Specs appearing in the Effector
# match those appearing in the Reaction.
equivs = StructEquivalences()
struct_components = {spec.to_non_struct_spec(): spec for spec in self.reaction.components_lhs_structured}
for spec in self.effector.expr.specs:
try:
equivs.add_equivalence(QualSpec([], struct_components[spec.to_component_spec()]),
QualSpec([str(self.reaction)], spec.to_component_spec()))
except KeyError:
pass
return self.with_merged_struct_effector(equivs, StructCounter(counter_start), [str(self.reaction)])
elif self.effector.is_structured:
# A fully structured Boolean Effector needs to have its structure indices merged.
return self.with_merged_struct_effector()
else:
# For a non-structured Boolean Effector, assume all Specs that could match, actually do match.
struct_components = {spec.to_non_struct_spec(): spec for spec in self.reaction.components_lhs_structured}
equivs = TrivialStructEquivalences(struct_components) # pylint: disable=redefined-variable-type
return self.with_merged_struct_effector(equivs)
def to_venn_set(self, k_plus_strict: bool=False, k_minus_strict: bool=False, structured: bool=True,
state_wrapper: Callable[[State], Any]=lambda x: x) -> VennSet[Any]:
"""Returns a Venntastic Set object corresponding to the Contingency: requirements are put in a ValueSet,
inhibitions in a ValueSet within a Complement. If `k_plus_strict` / `k_minus_strict`, then positive and
negative Contingencies are translated into strict requirements resp. strict inhibitions. If `structured`
is False, the structure information is discarded. Optionally all States can be wrapped in some other
class by providing a `state_wrapper`."""
def parse_effector(eff: Effector) -> VennSet:
if isinstance(eff, StateEffector):
if structured:
return ValueSet(state_wrapper(eff.expr))
else:
return ValueSet(state_wrapper(eff.expr.to_non_structured()))
elif isinstance(eff, NotEffector):
return Complement(parse_effector(eff.expr))
elif isinstance(eff, OrEffector):
return Union(*(parse_effector(x) for x in eff.exprs))
elif isinstance(eff, AndEffector):
return Intersection(*(parse_effector(x) for x in eff.exprs))
else:
raise AssertionError('Unknown Effector {}'.format(str(eff)))
if k_plus_strict:
positive = (ContingencyType.requirement, ContingencyType.positive)
else:
positive = (ContingencyType.requirement,) # type: ignore
if k_minus_strict:
negative = (ContingencyType.inhibition, ContingencyType.negative)
else:
negative = (ContingencyType.inhibition,) # type: ignore
if self.contingency_type in positive:
return parse_effector(self.effector)
elif self.contingency_type in negative:
return Complement(parse_effector(self.effector))
else:
return UniversalSet()
def validate_struct_indices(self) -> None:
"""Assert that every index is only used once."""
specs = [spec for state in self.effector.states for spec in state.specs]
index_to_specs = defaultdict(set) # type: Dict[int, TgSet]
for spec in specs:
assert spec.struct_index is not None, 'Struct index not assigned in spec {}, ' \
'contingency {}'.format(spec, self)
index_to_specs[spec.struct_index].add(spec.to_component_spec())
assert all(len(x) == 1 for _, x in index_to_specs.items()), 'Structure indices not uniquely assigned in {}'\
.format(index_to_specs)
def validate_equivs_specs(self) -> None:
"""Assert that the component Specs appearing in the struct equivalences are appearing either
in the States of the Effector or in the Reaction."""
from_equivs = [spec.to_non_struct_spec() for spec in self.effector.equivs_specs]
from_states = [spec.to_non_struct_spec() for state in self.effector.states for spec in state.components]
from_reaction = [spec.to_non_struct_spec() for spec in self.reaction.components]
for spec in from_equivs:
assert spec in from_states or spec in from_reaction, \
'Unknown Spec {} appearing in equivalences for reaction {}'.format(spec, self.reaction)
| rxncon/rxncon | rxncon/core/contingency.py | contingency.py | py | 9,032 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "enum.Enum",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "enum.unique",
"line_number": 20,
"usage_type": "name"
},
{
"api_name": "rxncon.core.reaction.Reaction... |
29876622273 | #!/usr/bin/env python
import os
import sys
import re
try:
from setuptools import setup
setup
except ImportError:
from distutils.core import setup
setup
if sys.argv[-1] == "publish":
os.system("python setup.py sdist upload")
sys.exit()
# Handle encoding
major, minor1, minor2, release, serial = sys.version_info
if major >= 3:
def rd(filename):
f = open(filename, encoding="utf-8")
r = f.read()
f.close()
return r
else:
def rd(filename):
f = open(filename)
r = f.read()
f.close()
return r
setup(
name='simfit',
packages =['simfit'],
version="0.1.1",
author='John Livingston',
author_email = 'jliv84@gmail.com',
url = 'https://github.com/john-livingston/sxp',
license = ['GNU GPLv3'],
description ='Framework for simultaneous fitting of heterogeneous datasets and models',
long_description=rd("README.md") + "\n\n"
+ "---------\n\n",
package_dir={"simfit": "simfit"},
package_data={"simfit": []},
scripts=['scripts/fitk2', 'scripts/fitk2spz'],
include_package_data=True,
keywords=[],
classifiers = [
'Development Status :: 3 - Alpha',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Programming Language :: Python'
],
install_requires = ['numpy', 'scipy', 'matplotlib', 'astropy', 'photutils', 'tqdm'],
)
| jpdeleon/exoplanet_class | simfit_master/setup.py | setup.py | py | 1,456 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "setuptools.setup",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "distutils.core.setup",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "sys.argv",
"line_number": 13,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"l... |
413504280 | # pylint: disable=W0621,C0114,C0116,W0212,W0613
import pathlib
from typing import Optional
import pytest
from dae.testing.foobar_import import foobar_gpf
from dae.studies.study import GenotypeData
from dae.testing import setup_pedigree, setup_vcf, vcf_study
from dae.utils.regions import Region
from dae.genotype_storage import GenotypeStorage
@pytest.fixture(scope="module")
def freq_vcf(
tmp_path_factory: pytest.TempPathFactory
) -> tuple[pathlib.Path, pathlib.Path]:
root_path = tmp_path_factory.mktemp("vcf_path")
in_vcf = setup_vcf(
root_path / "vcf_data" / "in.vcf.gz",
"""
##fileformat=VCFv4.2
##FORMAT=<ID=GT,Number=1,Type=String,Description="Genotype">
##contig=<ID=foo>
#CHROM POS ID REF ALT QUAL FILTER INFO FORMAT m1 d1 c1 mis d2 c2 m2
foo 10 . T G . . . GT 0/1 0/0 0/0 1/1 0/0 0/0 0/1
foo 11 . T G . . . GT 0/0 0/1 0/0 1/1 0/1 0/0 0/0
foo 12 . T G . . . GT 0/0 0/0 0/0 0/0 0/1 0/0 0/0
foo 13 . T G . . . GT 0/1 0/0 0/0 0/0 0/0 0/0 0/0
foo 14 . T G . . . GT 1/1 1/1 0/0 0/0 1/1 0/0 1/1
foo 15 . T G . . . GT 1/1 1/1 1/1 1/1 1/1 1/1 1/1
foo 16 . T G . . . GT 0/0 0/0 1/1 1/1 0/0 1/1 0/0
foo 17 . T G,A . . . GT 0/1 0/1 0/0 0/0 0/0 0/0 0/0
foo 18 . T G,A . . . GT 0/2 0/2 0/0 0/0 0/0 0/0 0/0
foo 19 . T G,A . . . GT 0/0 0/0 0/0 0/0 0/2 0/0 0/2
foo 20 . T G,A . . . GT 0/0 0/0 0/0 0/0 0/1 0/0 0/1
foo 21 . T G,A . . . GT 0/1 0/2 0/0 0/0 0/1 0/0 0/2
bar 11 . T G . . . GT ./. 0/2 0/0 0/0 0/1 0/1 0/0
""")
in_ped = setup_pedigree(
root_path / "vcf_data" / "in.ped",
"""
familyId personId dadId momId sex status role
f1 m1 0 0 2 1 mom
f1 d1 0 0 1 1 dad
f1 c1 d1 m1 2 2 prb
f2 m2 0 0 2 1 mom
f2 d2 0 0 1 1 dad
f2 c2 d2 m2 2 2 prb
""")
return in_ped, in_vcf
@pytest.fixture(scope="module")
def freq_study(
tmp_path_factory: pytest.TempPathFactory,
freq_vcf: tuple[pathlib.Path, pathlib.Path],
genotype_storage: GenotypeStorage
) -> GenotypeData:
# pylint: disable=import-outside-toplevel
root_path = tmp_path_factory.mktemp(genotype_storage.storage_id)
gpf_instance = foobar_gpf(root_path, genotype_storage)
ped_path, vcf_path = freq_vcf
study = vcf_study(
root_path, "freq_vcf", ped_path, [vcf_path], gpf_instance,
project_config_update={
"input": {
"vcf": {
"include_reference_genotypes": True,
"include_unknown_family_genotypes": True,
"include_unknown_person_genotypes": True,
"denovo_mode": "denovo",
"omission_mode": "omission",
}
}
})
return study
@pytest.mark.parametrize("region,count,freqs", [
# # single alt allele
(Region("foo", 10, 10), 2, [None, 25.0]),
(Region("foo", 11, 11), 2, [None, 25.0]),
(Region("foo", 12, 12), 1, [None, 12.5]),
(Region("foo", 13, 13), 1, [None, 12.5]),
(Region("foo", 14, 14), 2, [None, 100.0]),
(Region("foo", 15, 15), 2, [None, 100.0]),
(Region("foo", 16, 16), 2, [None, 0.0]),
# # multiple alt alleles
(Region("foo", 17, 17), 1, [None, 25.0]),
(Region("foo", 18, 18), 1, [None, 25.0]),
(Region("foo", 19, 19), 1, [None, 25.0]),
(Region("foo", 20, 20), 1, [None, 25.0]),
(Region("foo", 21, 21), 2, [None, 25.0, 25.0]),
# multiple variants
(Region("foo", 10, 11), 4, [None, 25.0]),
# unknown genotypes
(Region("bar", 11, 11), 1, [None, 25.0]),
# no alleles
(Region("bar", 30, 30), 0, []),
])
def test_variant_frequency_queries(
freq_study: GenotypeData, region: Region, count: int,
freqs: list[Optional[float]]) -> None:
fvs = list(freq_study.query_variants(regions=[region]))
assert len(fvs) == count
for v in fvs:
assert freqs[1:] == v.frequencies[1:]
| iossifovlab/gpf | dae/tests/integration/study_query_variants/test_allele_frequency.py | test_allele_frequency.py | py | 4,279 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pytest.TempPathFactory",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "dae.testing.setup_vcf",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "dae.testing.setup_pedigree",
"line_number": 41,
"usage_type": "call"
},
{
"api_... |
33499385148 | import os
import subprocess
import sys
import uuid
import dmake.common as common
from dmake.common import DMakeException, SharedVolumeNotFoundException, append_command
from dmake.deepobuild import DMakeFile
tag_push_error_msg = "Unauthorized to push the current state of deployment to git server. If the repository belongs to you, please check that the credentials declared in the DMAKE_JENKINS_SSH_AGENT_CREDENTIALS and DMAKE_JENKINS_HTTP_CREDENTIALS allow you to write to the repository."
###############################################################################
def find_symlinked_directories():
symlinks = []
for line in common.run_shell_command("for f in $(dmake_find . -type l); do echo \"$f $(ls -l $f | sed -e 's/.* -> //')\"; done").split('\n'):
l = line.split(' ')
if len(l) != 2:
continue
link_path = os.path.normpath(l[0])
if not os.path.isdir(link_path):
continue
linked_dir = os.path.normpath(os.path.join(os.path.dirname(link_path), l[1]))
if not os.path.isdir(linked_dir) or linked_dir[0] == '/':
continue
symlinks.append((link_path, linked_dir))
return symlinks
def look_for_changed_directories():
if common.change_detection_override_dirs is not None:
changed_dirs = common.change_detection_override_dirs
common.logger.info("Changed directories (forced via DMAKE_CHANGE_DETECTION_OVERRIDE_DIRS): %s", set(changed_dirs))
return changed_dirs
if not common.target:
tag = get_tag_name()
common.logger.info("Looking for changes between HEAD and %s" % tag)
git_ref = "%s...HEAD" % tag
try:
common.run_shell_command2("git fetch origin +refs/tags/{tag}:refs/tags/{tag}".format(tag=tag))
except common.ShellError as e:
common.logger.debug("Fetching tag {} failed: {}".format(tag, e))
common.logger.info("Tag {} not found on remote, assuming everything changed.")
return None
else:
if common.is_local:
common.logger.info("Looking for changes with {}".format(common.target))
git_ref = common.target
else:
common.logger.info("Looking for changes between HEAD and %s" % common.target)
git_ref = "%s/%s...HEAD" % (common.remote, common.target)
try:
output = common.run_shell_command("git diff --name-only %s" % git_ref)
except common.ShellError as e:
common.logger.error("Error: " + str(e))
return None
if len(output) == 0:
return []
output = [file.strip() for file in output.split('\n')]
symlinks = find_symlinked_directories()
to_append = []
for file in output:
if len(file) == 0:
continue
for sl in symlinks:
if file.startswith(sl[1]):
f = file[len(sl[1]):]
if len(f) == 0:
continue
if f[0] == '/':
f = f[1:]
to_append.append(os.path.join(sl[0], f))
output += to_append
common.logger.debug("Changed files: %s", output)
changed_dirs = set()
for file in output:
if len(file) == 0:
continue
d = os.path.dirname(file)
if d in changed_dirs:
continue
# Only keep bottom changed directories
do_add = True
to_remove = []
for directory in changed_dirs:
if directory.startswith(d): # sub directory of d
do_add = False
elif d.startswith(directory):
to_remove = directory
changed_dirs.difference_update(to_remove)
if do_add:
changed_dirs.add(d)
common.logger.info("Changed directories: %s", changed_dirs)
return list(changed_dirs)
###############################################################################
def load_dmake_files_list():
# Ignore permission issues when searching for dmake.yml files, in a portable way
build_files = common.run_shell_command('dmake_find . -name dmake.yml').split("\n")
build_files = filter(lambda f: len(f.strip()) > 0, build_files)
build_files = [file[2:] for file in build_files]
# Important: for block listed files: we load file in order from root to deepest file
build_files = sorted(build_files, key = lambda path: len(os.path.dirname(path)))
return build_files
###############################################################################
def add_service_provider(service_providers, service, file, needs = None, base_variant = None):
"""'service', 'needs' and 'base_variant' are all service names."""
common.logger.debug("add_service_provider: service: %s, needs: %s, variant: %s" % (service, needs, base_variant))
trigger_test_parents = set()
if service in service_providers:
existing_service_provider, _, _, trigger_test_parents = service_providers[service]
# to construct parents links (when child trigger test on parents) (4th member of the tuple), we temporarily have service providers not defined yet, but already with some parents. It's materialized with file==None
if existing_service_provider is not None:
if existing_service_provider != file:
raise DMakeException('Service %s re-defined in %s. First defined in %s' % (service, file, existing_service_provider))
service_providers[service] = (file, needs, base_variant, trigger_test_parents)
# add `service` to `trigger_test_parents` backlink for each needed_service child which was asked to trigger the parent test
# `trigger_test_parents` == reverse link of `needs`, filtered on `needed_for.trigger_test`
if needs is not None:
for child_service, child_service_customization in needs:
if not child_service_customization.needed_for.kind('trigger_test'):
continue
if child_service in service_providers:
# TODO do we need to do something for child_service_customization?
service_providers[child_service][3].add(service)
else:
service_providers[child_service] = (None, None, None, set([service]))
###############################################################################
def activate_file(loaded_files, service_providers, service_dependencies, command, file):
dmake_file = loaded_files[file]
if command in ['test', 'run', 'deploy', 'build_docker']:
nodes = []
for service in dmake_file.get_services():
full_service_name = "%s/%s" % (dmake_file.app_name, service.service_name)
nodes += activate_service(loaded_files, service_providers, service_dependencies, command, full_service_name)
return nodes
else:
raise DMakeException('Unexpected command %s' % command)
###############################################################################
def activate_shared_volumes(shared_volumes):
children = []
for shared_volume in shared_volumes:
shared_volume_service_name = shared_volume.get_service_name()
children += [('shared_volume', shared_volume_service_name, None)]
return children
###############################################################################
def activate_link_shared_volumes(loaded_files, service_providers, service):
file, _, _, _ = service_providers[service]
dmake = loaded_files[file]
link = dmake.get_docker_link(service)
try:
shared_volumes = link.get_shared_volumes()
except SharedVolumeNotFoundException as e:
raise DMakeException("%s in docker_link '%s' in file '%s'" % (e, link.link_name, file))
return activate_shared_volumes(shared_volumes)
###############################################################################
def activate_service_shared_volumes(loaded_files, service_providers, service):
file, _, _, _ = service_providers[service]
dmake = loaded_files[file]
s = dmake._get_service_(service)
try:
shared_volumes = s.get_shared_volumes()
except SharedVolumeNotFoundException as e:
raise DMakeException("%s in service '%s' in file '%s'" % (e, s.service_name, file))
return activate_shared_volumes(shared_volumes)
###############################################################################
def activate_base(base_variant):
if base_variant is None:
# base_variant is None when no base image is specified,
# (only root_image is)
# in which case we do not need to do anything
return []
return [('base', base_variant, None)]
###############################################################################
def activate_link(loaded_files, service_providers, service_dependencies, service):
file, _, _, _ = service_providers[service]
dmake = loaded_files[file]
s = dmake._get_service_(service)
children = []
for link in s.needed_links:
children += activate_service(loaded_files, service_providers, service_dependencies, 'run_link', 'links/%s/%s' % (dmake.get_app_name(), link))
return children
###############################################################################
def activate_needed_services(loaded_files, service_providers, service_dependencies, needs, command, needed_for):
children = []
for service, service_customization in needs:
if service_customization is None or service_customization.needed_for.kind(needed_for):
children += activate_service(loaded_files, service_providers, service_dependencies, command, service, service_customization)
return children
###############################################################################
def activate_service(loaded_files, service_providers, service_dependencies, command, service, service_customization=None):
common.logger.debug("activate_service: command: %s,\tservice: %s,\tservice_customization: %s" % (command, service, service_customization))
if command != 'run':
assert service_customization == None
node = (command, service, service_customization)
if command == 'test' and common.skip_tests:
return []
if node not in service_dependencies:
if service not in service_providers:
raise DMakeException("Cannot find service: %s" % service)
file, needs, base_variant, trigger_test_parents = service_providers[service]
children = []
if command == 'shell':
children += activate_service_shared_volumes(loaded_files, service_providers, service)
if common.options.with_dependencies and needs is not None:
children += activate_needed_services(loaded_files, service_providers, service_dependencies, needs, command='run', needed_for='run')
if common.options.with_dependencies:
children += activate_link(loaded_files, service_providers, service_dependencies, service)
children += activate_base(base_variant)
elif command == 'test':
children += activate_service_shared_volumes(loaded_files, service_providers, service)
if common.options.with_dependencies and needs is not None:
children += activate_needed_services(loaded_files, service_providers, service_dependencies, needs, command='run', needed_for='test')
children += activate_service(loaded_files, service_providers, service_dependencies, 'build_docker', service)
if common.options.with_dependencies:
children += activate_link(loaded_files, service_providers, service_dependencies, service)
elif command == 'build_docker':
children += activate_base(base_variant)
elif command == 'run':
children += activate_service_shared_volumes(loaded_files, service_providers, service)
# ~hackish: run service depends on test service if we are doing tests
if common.command in ['test', 'deploy']:
if common.change_detection:
# in change detection mode, don't add "test service" node: only create the link between run and test if the test node exists.
# the services to be tested are either:
# - created directly via graph construction starting on the target services: the ones that changed
# - independently created from "test child service" to "test parent service"
# we can reach here in the middle of the DAG construction (e.g. when multiple services have changed: we fully work one by one sequentially),
# so we don't know yet if the test node will exist at the end or not.
# the link will be created later, in a second global pass in make(), see "second pass" there
pass
else:
# normal mode, activate "test service" as dependance of "run service"
# REMARK: if we wanted, we could change the semantic of `dmake test foo` to only test foo (while still running its dependencies needed for tests or run, recursively), instead of also testing all children services too: just use the second pass
children += activate_service(loaded_files, service_providers, service_dependencies, 'test', service)
children += activate_service(loaded_files, service_providers, service_dependencies, 'build_docker', service)
if common.options.with_dependencies and needs is not None:
children += activate_needed_services(loaded_files, service_providers, service_dependencies, needs, command='run', needed_for='run')
if common.options.with_dependencies:
children += activate_link(loaded_files, service_providers, service_dependencies, service)
elif command == 'run_link':
children += activate_link_shared_volumes(loaded_files, service_providers, service)
elif command == 'deploy':
children += activate_service(loaded_files, service_providers, service_dependencies, 'build_docker', service)
children += activate_service(loaded_files, service_providers, service_dependencies, 'test', service)
if common.options.with_dependencies and needs is not None:
# enforce deployment order by re-using needed_services dependency graph
# but we don't want to create extra deployments because of customization
# => deploy recursively using needs dependency, but ignore service customization
uncustomized_needs = [(child_service, None) for child_service, child_service_customization in needs]
children += activate_needed_services(loaded_files, service_providers, service_dependencies, uncustomized_needs, command='deploy', needed_for='fake__not_used')
else:
raise Exception("Unknown command '%s'" % command)
service_dependencies[node] = children
# parent dependencies, after updating service_dependencies to avoid infinite recursion
# test parent when child changed
if command == 'test':
if common.options.with_dependencies and common.change_detection:
for parent_service in trigger_test_parents:
common.logger.debug("activate_service: parent test: service: %s,\tparent service: %s" % (service, parent_service))
parent_node = activate_service(loaded_files, service_providers, service_dependencies, 'test', parent_service)[0]
if node not in service_dependencies[parent_node]:
service_dependencies[parent_node].append(node)
return [node]
###############################################################################
def display_command_node(node):
command, service, service_customization = node
# daemon name: <app_name>/<service_name><optional_unique_suffix>; service already contains "<app_name>/"
return "%s @ %s%s" % (command, service, service_customization.get_service_name_unique_suffix() if service_customization else "")
###############################################################################
def find_active_files(loaded_files, service_providers, service_dependencies, sub_dir, command):
"""Find file where changes have happened, and activate them; or activate all when common.force_full_deploy"""
if common.force_full_deploy:
common.logger.info("Forcing full re-build")
else:
# TODO warn if command == deploy: not really supported? or fatal error? or nothing?
changed_dirs = look_for_changed_directories()
def has_changed(root):
for d in changed_dirs:
if d.startswith(root):
return True
return False
for file_name, dmake_file in loaded_files.items():
if not file_name.startswith(sub_dir):
continue
root = os.path.dirname(file_name)
if common.force_full_deploy or has_changed(root):
activate_file(loaded_files, service_providers, service_dependencies, command, file_name)
continue
# still, maybe activate some services in this file with extended build context
# (to support docker_image.build.context: ../)
for service in dmake_file.get_services():
contexts = set()
for additional_root in service.config.docker_image.get_source_directories_additional_contexts():
contexts.add(os.path.normpath(os.path.join(root, additional_root)))
# activate service if any of its additional contexts has changed
for root in contexts:
if has_changed(root):
full_service_name = "%s/%s" % (dmake_file.app_name, service.service_name)
activate_service(loaded_files, service_providers, service_dependencies, command, full_service_name)
break
###############################################################################
def load_dmake_file(loaded_files, blocklist, service_providers, service_dependencies, file):
if file in loaded_files:
return
if file in blocklist:
return
# Load YAML and check version
with open(file, 'r') as stream:
data = common.yaml_ordered_load(stream)
if 'dmake_version' not in data:
raise DMakeException("Missing field 'dmake_version' in %s" % file)
version = str(data['dmake_version'])
if version not in ['0.1']:
raise DMakeException("Incorrect version '%s'" % str(data['dmake_version']))
# Load appropriate version (TODO: versionning)
if version == '0.1':
dmake_file = DMakeFile(file, data)
loaded_files[file] = dmake_file
# Blocklist should be on child file because they are loaded this way
# TODO: 'blacklist' is deprecated. Remove the two following lines when the
# field will be completely removed
for bl in dmake_file.blacklist:
blocklist.append(bl)
for bl in dmake_file.blocklist:
blocklist.append(bl)
for volume in dmake_file.volumes:
shared_volume_service_name = volume.get_service_name()
add_service_provider(service_providers, shared_volume_service_name, file)
service_dependencies[('shared_volume', shared_volume_service_name, None)] = []
for link in dmake_file.docker_links:
add_service_provider(service_providers, 'links/%s/%s' % (dmake_file.get_app_name(), link.link_name), file)
# Unroll docker image references
if isinstance(dmake_file.docker, str):
ref = dmake_file.docker
load_dmake_file(loaded_files, blocklist, service_providers, service_dependencies, ref)
if isinstance(loaded_files[ref].docker, str):
raise DMakeException('Circular references: trying to load %s which is already loaded.' % loaded_files[ref].docker)
dmake_file.__fields__['docker'] = loaded_files[ref].docker
else:
if isinstance(dmake_file.docker.root_image, str):
ref = dmake_file.docker.root_image
load_dmake_file(loaded_files, blocklist, service_providers, service_dependencies, ref)
dmake_file.docker.__fields__['root_image'] = loaded_files[ref].docker.root_image
elif dmake_file.docker.root_image is not None:
default_root_image = dmake_file.docker.root_image
default_root_image = common.eval_str_in_env(default_root_image.name + ":" + default_root_image.tag)
dmake_file.docker.__fields__['root_image'] = default_root_image
default_root_image = dmake_file.docker.root_image
for base_image in dmake_file.docker.base_image:
base_image_service = base_image.get_service_name()
base_image_name = base_image.get_name_variant()
root_image = base_image.root_image
if root_image is None:
# set default root_image
if default_root_image is None:
raise DMakeException("Missing field 'root_image' (and default 'docker.root_image') for base_image '%s' in '%s'" % (base_image_name, file))
root_image = default_root_image
base_image.__fields__['root_image'] = root_image
add_service_provider(service_providers, base_image_service, file)
service_dependencies[('base', base_image_service, None)] = [('base', root_image, None)]
if len(dmake_file.docker.base_image) == 0 and default_root_image is None:
raise DMakeException("Missing field 'docker.root_image' in '%s'" % (file))
if isinstance(dmake_file.env, str):
ref = dmake_file.env
load_dmake_file(loaded_files, blocklist, service_providers, service_dependencies, ref)
if isinstance(loaded_files[ref].env, str):
raise DMakeException('Circular references: trying to load %s which is already loaded.' % ref)
dmake_file.__fields__['env'] = loaded_files[ref].env
###############################################################################
def check_no_circular_dependencies(dependencies):
is_leaf = {}
for k in dependencies:
is_leaf[k] = True
tree_depth = {}
def sub_check(key, walked_nodes = []):
if key in tree_depth:
return tree_depth[key]
if key not in dependencies:
return 0
walked_nodes = [key] + walked_nodes
depth = 0
for dep in dependencies[key]:
is_leaf[dep] = False
if dep in walked_nodes:
raise DMakeException("Circular dependencies: %s" % ' -> '.join(map(str, reversed([dep] + walked_nodes))))
depth = max(depth, 1 + sub_check(dep, walked_nodes))
tree_depth[key] = depth
return depth
for k in dependencies:
sub_check(k)
leaves = []
for k, v in is_leaf.items():
if v:
leaves.append((k, tree_depth[k]))
return leaves, tree_depth
###############################################################################
def order_dependencies(dependencies, leaves):
ordered_build_files = {}
def sub_order(key, depth):
if key in ordered_build_files and depth >= ordered_build_files[key]:
return
ordered_build_files[key] = depth
if key in dependencies:
for f in dependencies[key]:
sub_order(f, depth - 1)
for file, depth in leaves:
sub_order(file, depth)
return ordered_build_files
###############################################################################
def make_path_unique_per_variant(path, service_name):
"""If multi variant: prefix filename with `<variant>-`"""
service_name_parts = service_name.split(':')
if len(service_name_parts) == 2:
variant = service_name_parts[1]
head, tail = os.path.split(path)
path = os.path.join(head, '%s-%s' % (variant, tail))
return path
###############################################################################
def generate_command_pipeline(file, cmds):
indent_level = 0
def write_line(data):
if len(data) > 0:
file.write(' ' * indent_level)
file.write(data + '\n')
if common.build_description is not None:
write_line("currentBuild.description = '%s'" % common.build_description.replace("'", "\\'"))
write_line("def dmake_echo(message) { sh(script: \"echo '${message}'\", label: message) }")
write_line('try {')
indent_level += 1
cobertura_tests_results_dir = os.path.join(common.relative_cache_dir, 'cobertura_tests_results')
emit_cobertura = False
# checks to generate valid Jenkinsfiles
check_no_duplicate_stage_names = set()
check_no_duplicate_parallel_branch_names_stack = []
for cmd, kwargs in cmds:
if cmd == "stage":
assert kwargs['name'] not in check_no_duplicate_stage_names, \
'Duplicate stage name: {}'.format(kwargs['name'])
check_no_duplicate_stage_names.add(kwargs['name'])
name = kwargs['name'].replace("'", "\\'")
write_line('')
write_line("stage('%s') {" % name)
indent_level += 1
elif cmd == "stage_end":
indent_level -= 1
write_line("}")
elif cmd == "parallel":
# new scope on check_no_duplicate_parallel_branch_names stack
check_no_duplicate_parallel_branch_names_stack.append(set())
write_line("parallel(")
indent_level += 1
elif cmd == "parallel_end":
indent_level -= 1
write_line(")")
# end scope on check_no_duplicate_parallel_branch_names stack
check_no_duplicate_parallel_branch_names_stack.pop()
elif cmd == "parallel_branch":
assert kwargs['name'] not in check_no_duplicate_parallel_branch_names_stack[-1], \
'Duplicate parallel_branch name: {}'.format(kwargs['name'])
check_no_duplicate_parallel_branch_names_stack[-1].add(kwargs['name'])
name = kwargs['name'].replace("'", "\\'")
write_line("'%s': {" % name)
indent_level += 1
elif cmd == "parallel_branch_end":
indent_level -= 1
write_line("},")
elif cmd == "lock":
if 'quantity' not in kwargs:
kwargs['quantity'] = 1
if 'variable' not in kwargs:
kwargs['variable'] = "" # empty variable is accepted by the lock step as "'variable' not set"
write_line("lock(label: '{label}', quantity: {quantity}, variable: '{variable}') {{".format(**kwargs))
indent_level += 1
elif cmd == "lock_end":
indent_level -= 1
write_line("}")
elif cmd == "timeout":
time = kwargs['time']
write_line("timeout(time: %s, unit: 'SECONDS') {" % time)
indent_level += 1
elif cmd == "timeout_end":
indent_level -= 1
write_line("}")
elif cmd == "try":
write_line("try {")
indent_level += 1
elif cmd == "catch":
what = kwargs['what']
indent_level -= 1
write_line("} catch(%s) {" % what)
indent_level += 1
elif cmd == "throw":
what = kwargs['what']
write_line("throw %s" % what)
elif cmd == "catch_end":
indent_level -= 1
write_line("}")
elif cmd == "echo":
message = kwargs['message'].replace("'", "\\'")
write_line("dmake_echo '%s'" % message)
elif cmd == "sh":
commands = kwargs['shell']
if isinstance(commands, str):
commands = [commands]
commands = [common.escape_cmd(c) for c in commands]
if len(commands) == 0:
return
if len(commands) == 1:
write_line('sh("%s")' % commands[0])
else:
write_line('parallel (')
commands_list = []
for c in enumerate(commands):
commands_list.append("cmd%d: { sh('%s') }" % c)
write_line(','.join(commands_list))
write_line(')')
elif cmd == "read_sh":
file_output = os.path.join(common.cache_dir, "output_%s" % uuid.uuid4())
write_line("sh('%s > %s')" % (kwargs['shell'], file_output))
write_line("env.%s = readFile '%s'" % (kwargs['var'], file_output))
if kwargs['fail_if_empty']:
write_line("sh('if [ -z \"${%s}\" ]; then exit 1; fi')" % kwargs['var'])
elif cmd == "env":
write_line('env.%s = "%s"' % (kwargs['var'], kwargs['value']))
elif cmd == "git_tag":
if common.repo_url is not None:
write_line("sh('git tag --force %s')" % kwargs['tag'])
write_line('try {')
indent_level += 1
if common.repo_url.startswith('https://') or common.repo_url.startswith('http://'):
i = common.repo_url.find(':')
prefix = common.repo_url[:i]
host = common.repo_url[(i + 3):]
write_line("withCredentials([[$class: 'UsernamePasswordMultiBinding', credentialsId: env.DMAKE_JENKINS_HTTP_CREDENTIALS, usernameVariable: 'GIT_USERNAME', passwordVariable: 'GIT_PASSWORD']]) {")
indent_level += 1
write_line('try {')
write_line(""" sh('git push --force "%s://${GIT_USERNAME}:${GIT_PASSWORD}@%s" refs/tags/%s')""" % (prefix, host, kwargs['tag']))
write_line('} catch(error) {')
write_line(""" sh('echo "%s"')""" % tag_push_error_msg.replace("'", "\\'"))
write_line('}')
error_msg = "Define 'User/Password' credentials and set their ID in the 'DMAKE_JENKINS_HTTP_CREDENTIALS' environment variable to be able to build and deploy only changed parts of the app."
indent_level -= 1
write_line('}')
else:
write_line("sh('git push --force %s refs/tags/%s')" % (common.remote, kwargs['tag']))
error_msg = tag_push_error_msg
indent_level -= 1
write_line('} catch(error) {')
write_line(""" sh('echo "%s"')""" % error_msg.replace("'", "\\'"))
write_line('}')
elif cmd == "junit":
container_report = os.path.join(kwargs['mount_point'], kwargs['report'])
host_report = os.path.join(common.relative_cache_dir, 'tests_results', str(uuid.uuid4()), kwargs['service_name'].replace(':', '-'), kwargs['report'])
write_line('''sh('dmake_test_get_results "%s" "%s" "%s"')''' % (kwargs['service_name'], container_report, host_report))
write_line("junit keepLongStdio: true, testResults: '%s'" % host_report)
write_line('''sh('rm -rf "%s"')''' % host_report)
elif cmd == "cobertura":
container_report = os.path.join(kwargs['mount_point'], kwargs['report'])
host_report = os.path.join(cobertura_tests_results_dir, str(uuid.uuid4()), kwargs['service_name'].replace(':', '-'), kwargs['report'])
if not host_report.endswith('.xml'):
raise DMakeException("`cobertura_report` must end with '.xml' in service '%s'" % kwargs['service_name'])
write_line('''sh('dmake_test_get_results "%s" "%s" "%s"')''' % (kwargs['service_name'], container_report, host_report))
# coberturaPublisher plugin only supports one step, so we delay generating it, and make it get all reports
emit_cobertura = True
elif cmd == "publishHTML":
container_html_directory = os.path.join(kwargs['mount_point'], kwargs['directory'])
host_html_directory = os.path.join(common.cache_dir, 'tests_results', str(uuid.uuid4()), kwargs['service_name'].replace(':', '-'), kwargs['directory'])
write_line('''sh('dmake_test_get_results "%s" "%s" "%s"')''' % (kwargs['service_name'], container_html_directory, host_html_directory.rstrip('/')))
write_line("publishHTML(target: [allowMissing: false, alwaysLinkToLastBuild: false, keepAll: true, reportDir: '%s', reportFiles: '%s', reportName: '%s'])" % (host_html_directory, kwargs['index'], kwargs['title'].replace("'", "\'")))
write_line('''sh('rm -rf "%s"')''' % host_html_directory)
else:
raise DMakeException("Unknown command %s" % cmd)
indent_level -= 1
write_line('}')
write_line('catch (error) {')
write_line(' if ( env.DMAKE_PAUSE_ON_ERROR_BEFORE_CLEANUP == "1" ) {')
write_line(' slackSend channel: "#jenkins-dmake", message: "This jenkins build requires your attention: <${env.BUILD_URL}/console|${env.JOB_NAME} ${env.BUILD_NUMBER}>"')
write_line(" input message: 'An error occurred. DMake will stop and clean all the running containers upon any answer.'")
write_line(' }')
write_line(' throw error')
write_line('}')
write_line('finally {')
indent_level += 1
if emit_cobertura:
write_line("try {")
indent_level += 1
write_line("step([$class: 'CoberturaPublisher', autoUpdateHealth: false, autoUpdateStability: false, coberturaReportFile: '%s/**/*.xml', failUnhealthy: false, failUnstable: false, maxNumberOfBuilds: 0, onlyStable: false, sourceEncoding: 'ASCII', zoomCoverageChart: false])" % (cobertura_tests_results_dir))
write_line("publishCoverage adapters: [coberturaAdapter(mergeToOneReport: false, path: '%s/**/*.xml')], calculateDiffForChangeRequests: true, sourceFileResolver: sourceFiles('NEVER_STORE')" % (cobertura_tests_results_dir))
write_line('''sh('rm -rf "%s"')''' % cobertura_tests_results_dir)
indent_level -= 1
write_line("} catch (error) {")
write_line(" dmake_echo 'Late cobertura_report test result collection failed, it may be because the test steps were not reached (earlier error: check logs/steps above/before), or because the cobertura_report is misconfigured (check the path config).'")
write_line("}")
write_line('sh("dmake_clean")')
indent_level -= 1
write_line('}')
###############################################################################
def generate_command_bash(file, cmds):
assert not common.parallel_execution, "parallel execution not supported with bash runtime"
indent_level = 0
def write_line(data):
if len(data) > 0:
file.write(' ' * indent_level)
file.write(data + '\n')
write_line('test "${DMAKE_DEBUG}" = "1" && set -x')
write_line("""
# from https://stackoverflow.com/a/25180186/15151442 for try/catch
function try()
{
[[ $- = *e* ]]; SAVED_OPT_E=$?
set +e
}
function throw()
{
exit $1
}
function catch()
{
export ex_code=$?
(( $SAVED_OPT_E )) && set +e
return $ex_code
}
function throwErrors()
{
set -e
}
function ignoreErrors()
{
set +e
}
""")
write_line('set -e')
for cmd, kwargs in cmds:
if cmd == "stage":
write_line("")
write_line("{ echo -e '\n## %s ##'" % kwargs['name'])
indent_level += 1
elif cmd == "stage_end":
indent_level -= 1
write_line("}")
elif cmd == "parallel":
# parallel not supported with bash, fallback to running sequentially
pass
elif cmd == "parallel_end":
pass
elif cmd == "parallel_branch":
# parallel_branch not supported with bash, fallback to running sequentially
pass
elif cmd == "parallel_branch_end":
pass
elif cmd == "lock":
# lock not supported with bash, fallback to ignoring locks
pass
elif cmd == "lock_end":
pass
elif cmd == "timeout":
# timeout not supported with bash, fallback to ignoring timeouts
pass
elif cmd == "timeout_end":
pass
elif cmd == "try":
write_line("try")
write_line("(")
indent_level += 1
elif cmd == "catch":
what = kwargs['what']
indent_level -= 1
write_line(")")
write_line("catch || { %s=$ex_code;" % what)
indent_level += 1
elif cmd == "throw":
what = kwargs['what']
write_line("throw $%s" % what)
elif cmd == "catch_end":
indent_level -= 1
write_line("}")
elif cmd == "echo":
message = kwargs['message'].replace("'", "\\'")
write_line("echo '%s'" % message)
elif cmd == "sh":
commands = kwargs['shell']
if isinstance(commands, str):
commands = [commands]
for c in commands:
write_line("%s" % c)
elif cmd == "read_sh":
write_line("%s=`%s`" % (kwargs['var'], kwargs['shell']))
if kwargs['fail_if_empty']:
write_line("if [ -z \"${%s}\" ]; then exit 1; fi" % kwargs['var'])
elif cmd == "env":
write_line('%s="%s"' % (kwargs['var'], kwargs['value'].replace('"', '\\"')))
write_line('export %s' % kwargs['var'])
elif cmd == "git_tag":
write_line('git tag --force %s' % kwargs['tag'])
write_line('git push --force %s refs/tags/%s || echo %s' % (common.remote, kwargs['tag'], tag_push_error_msg))
elif cmd == "junit" or cmd == "cobertura":
container_report = os.path.join(kwargs['mount_point'], kwargs['report'])
host_report = make_path_unique_per_variant(kwargs['report'], kwargs['service_name'])
write_line('dmake_test_get_results "%s" "%s" "%s"' % (kwargs['service_name'], container_report, host_report))
elif cmd == "publishHTML":
container_html_directory = os.path.join(kwargs['mount_point'], kwargs['directory'])
host_html_directory = make_path_unique_per_variant(kwargs['directory'], kwargs['service_name'])
write_line('dmake_test_get_results "%s" "%s" "%s"' % (kwargs['service_name'], container_html_directory, host_html_directory))
else:
raise DMakeException("Unknown command %s" % cmd)
###############################################################################
def generate_command(file_name, cmds):
with open(file_name, "w") as file:
if common.use_pipeline:
generate_command_pipeline(file, cmds)
else:
generate_command_bash(file, cmds)
###############################################################################
def get_tag_name():
return 'deployed_version_%s' % common.branch
###############################################################################
def service_completer(prefix, parsed_args, **kwargs):
common.init(parsed_args, early_exit=True)
files = make(parsed_args, parse_files_only=True)
services = []
for file, dmake_file in files.items():
for service in dmake_file.get_services():
services.append(service.service_name)
return services
###############################################################################
def make(options, parse_files_only=False):
app = getattr(options, 'service', None)
if common.sub_dir:
common.logger.info("Working in subdirectory: %s", common.sub_dir)
# Format args
auto_complete = False
auto_completed_app = None
if app == "*":
# all services
app = None
common.force_full_deploy = True
elif app == "+":
# changed services only
common.change_detection = True
elif app is not None:
n = len(app.split('/'))
if n > 2:
raise DMakeException('Cannot have more than one slash in the app name')
auto_complete = n == 1
if not auto_complete:
auto_completed_app = app
common.force_full_deploy = True
elif common.command in ['shell']:
auto_complete = True
# Load build files
build_files = load_dmake_files_list()
if len(build_files) == 0:
raise DMakeException('No dmake.yml file found !')
# Load all dmake.yml files (except those blocklisted)
blocklist = []
loaded_files = {}
service_providers = {}
service_dependencies = {}
for file in build_files:
load_dmake_file(loaded_files, blocklist, service_providers, service_dependencies, file)
if parse_files_only:
return loaded_files
# Register all apps and services in the repo
docker_links = {}
services = {}
for file, dmake_file in loaded_files.items():
if dmake_file.env is not None and dmake_file.env.source is not None:
try:
common.pull_config_dir(os.path.dirname(dmake_file.env.source))
except common.NotGitRepositoryException:
common.logger.warning('Not a Git repository: %s' % (dmake_file.env.source))
app_name = dmake_file.get_app_name()
if app_name not in docker_links:
docker_links[app_name] = {}
if app_name not in services:
services[app_name] = {}
app_services = services[app_name]
for service in dmake_file.get_services():
full_service_name = "%s/%s" % (app_name, service.service_name)
if service.service_name in app_services:
raise DMakeException("Duplicated sub-app name: '%s'" % full_service_name)
needs = [("%s/%s" % (app_name, sa.service_name), sa) for sa in service.needed_services]
base_variant = None
try:
base_image = dmake_file.docker.get_base_image(variant=service.get_base_image_variant())
except DMakeException as e:
raise DMakeException("%s, for service '%s' in file '%s'" % (e, full_service_name, file))
if base_image is not None:
base_variant = base_image.get_service_name()
add_service_provider(service_providers, full_service_name, file, needs, base_variant)
app_services[service.service_name] = service
if auto_complete:
if app is None:
if dmake_file.get_path().startswith(common.sub_dir):
if auto_completed_app is None:
auto_completed_app = full_service_name
break # A bit hacky: we actually do not care about the full service name: we just want to select the proper dmake file.
else:
raise DMakeException("Ambigous service name: both services '%s' and '%s' are matching the current path." % (full_service_name, auto_completed_app))
else:
if service.service_name == app:
if auto_completed_app is None:
auto_completed_app = full_service_name
else:
raise DMakeException("Ambigous service name '%s' is matching '%s' and '%s'" % (app, full_service_name, auto_completed_app))
app_links = docker_links[app_name]
for link in dmake_file.get_docker_links():
if link.link_name in app_links:
raise DMakeException("Duplicate link name '%s' for application '%s'. Link names must be unique inside each app." % (link.link_name, app_name))
app_links[link.link_name] = link
if auto_complete and auto_completed_app is None:
raise DMakeException("Could not find any app or sub-app matching '%s'" % app)
# Remove base images which are not provided (by dmake.yml definitions): they are external base images
for deps in service_dependencies.values():
to_delete = []
for i, dep in enumerate(deps):
if dep[1] not in service_providers:
to_delete.append(i)
to_delete.reverse()
for i in to_delete:
del deps[i]
is_app_only = auto_completed_app is None or auto_completed_app.find('/') < 0
if auto_completed_app is None:
find_active_files(loaded_files, service_providers, service_dependencies, common.sub_dir, common.command)
else:
if is_app_only: # app only
if common.command == 'shell':
raise DMakeException("Could not find sub-app '%s'" % app)
active_file = set()
app_services = services[auto_completed_app]
for service in app_services.values():
full_service_name = "%s/%s" % (auto_completed_app, service.service_name)
file, _, _, _ = service_providers[full_service_name]
active_file.add(file)
for file in active_file:
activate_file(loaded_files, service_providers, service_dependencies, common.command, file)
else:
activate_service(loaded_files, service_providers, service_dependencies, common.command, auto_completed_app)
# second pass
for node in service_dependencies:
command, service, service_customization = node
if command == 'run' and common.change_detection:
# guarantee "always test a service before running it" when change detection mode: run doesn't trigger test, but if test exists for other reasons, we still want to order it after run, see activate_service() command=='run' comments
# remark: it's OK to assume the 3rd element of the test_node tuple is None: its the runtime service_customization: it's only set via needed_services for the command==run nodes only
test_node = ('test', service, None)
if test_node in service_dependencies:
common.logger.debug('activate_service: second pass: change detection mode, adding link: run->test\tfor service: {}'.format(service))
service_dependencies[node].append(test_node)
else:
common.logger.debug('activate_service: second pass: change detection mode, *not* adding link: run->test\tfor service: {}'.format(service))
# (warning: tree vocabulary is reversed here: `leaves` are the nodes with no parent dependency, and depth is the number of levels of child dependencies)
# check services circularity, and compute leaves, nodes_depth
leaves, nodes_depth = check_no_circular_dependencies(service_dependencies)
# get nodes leaves related to the dmake command (exclude notably `base` and `shared_volumes` which are created independently from the command)
dmake_command_leaves = filter(lambda a_b__c: a_b__c[0][0] == common.command, leaves)
# prepare reorder by computing shortest node depth starting from the dmake-command-created leaves
# WARNING: it seems to return different values than nodes_depth: seems to be min(child height)-1 here, vs max(parent height)+1 for nodes_depth (e.g. some run_links have >0 height, but no dependency)
# this effectively runs nodes as late as possible with build_files_order, and as soon as possible with nodes_depth
build_files_order = order_dependencies(service_dependencies, dmake_command_leaves)
# cleanup service_dependencies for debug dot graph: remove nodes with no depth: they are not related (directly or by dependency) to dmake-command-created leaves: they are not needed
service_dependencies_pruned = dict(filter(lambda service_deps: service_deps[0] in build_files_order, service_dependencies.items()))
debug_dot_graph = common.dump_debug_dot_graph(service_dependencies_pruned, nodes_depth)
if common.exit_after_generate_dot_graph:
print('Exiting after debug graph generation')
return debug_dot_graph
# Even with parallel execution we start with display (and thus compute) the execution plan the classic way: per stage and order.
# Sort by order
ordered_build_files = sorted(build_files_order.items(), key = lambda file_order: file_order[1])
# Separate into base / build / tests / deploy
if len(ordered_build_files) == 0:
common.logger.info("Nothing to do:")
else:
n = len(ordered_build_files)
base = list(filter(lambda a_b__c: a_b__c[0][0] in ['base'], ordered_build_files))
build = list(filter(lambda a_b__c: a_b__c[0][0] in ['build_docker'], ordered_build_files))
test = list(filter(lambda a_b__c: a_b__c[0][0] in ['test', 'run_link', 'run', 'shared_volume'], ordered_build_files))
deploy = list(filter(lambda a_b__c: a_b__c[0][0] in ['shell', 'deploy'], ordered_build_files))
if len(base) + len(build) + len(test) + len(deploy) != len(ordered_build_files):
raise Exception('Something went wrong when reorganizing build steps. One of the commands is probably missing.')
ordered_build_files = [('Building Base', base),
('Building App', build),
('Running App', test),
('Deploying', deploy)]
common.logger.info("Here is the plan:")
# Generate the list of command to run
common.logger.info("Generating commands...")
all_commands = []
nodes_commands = {}
nodes_need_gpu = {}
init_commands = []
append_command(init_commands, 'env', var = "REPO", value = common.repo)
append_command(init_commands, 'env', var = "COMMIT", value = common.commit_id)
append_command(init_commands, 'env', var = "BUILD", value = common.build_id)
append_command(init_commands, 'env', var = "BRANCH", value = common.branch)
append_command(init_commands, 'env', var = "NAME_PREFIX", value = common.name_prefix)
append_command(init_commands, 'env', var = "DMAKE_TMP_DIR", value = common.tmp_dir)
# check DMAKE_TMP_DIR still exists: detects unsupported jenkins reruns: clear error
append_command(init_commands, 'sh', shell = 'dmake_check_tmp_dir')
all_commands += init_commands
for stage, commands in ordered_build_files:
if len(commands) == 0:
continue
common.logger.info("## %s ##" % (stage))
append_command(all_commands, 'stage', name = stage)
stage_commands = []
for node, order in commands:
# Sanity check
sub_task_orders = [build_files_order[a] for a in service_dependencies[node]]
if any(map(lambda o: order <= o, sub_task_orders)):
raise DMakeException('Bad ordering')
command, service, service_customization = node
file, _, _, _ = service_providers[service]
dmake_file = loaded_files[file]
app_name = dmake_file.get_app_name()
links = docker_links[app_name]
step_commands = []
# temporarily reset need_gpu to isolate which step triggers it, for potential later parallel execution
restore_need_gpu = common.need_gpu
common.need_gpu = False
try:
if command == "base":
dmake_file.generate_base(step_commands, service)
elif command == "shared_volume":
dmake_file.generate_shared_volume(step_commands, service)
elif command == "shell":
dmake_file.generate_shell(step_commands, service, links, common.options.command)
elif command == "test":
dmake_file.generate_test(step_commands, service, links)
elif command == "run":
dmake_file.generate_run(step_commands, service, links, service_customization)
elif command == "run_link":
dmake_file.generate_run_link(step_commands, service, links)
elif command == "build_docker":
dmake_file.generate_build_docker(step_commands, service)
elif command == "deploy":
dmake_file.generate_deploy(step_commands, service)
else:
raise Exception("Unknown command '%s'" % command)
except DMakeException as e:
print(('ERROR in file %s:\n' % file) + str(e))
sys.exit(1)
nodes_commands[node] = step_commands
nodes_need_gpu[node] = common.need_gpu
common.need_gpu = restore_need_gpu
if len(step_commands) > 0:
node_display_str = display_command_node(node)
common.logger.info("- {}".format(node_display_str))
append_command(stage_commands, 'echo', message = '- Running {}'.format(node_display_str))
stage_commands += step_commands
# GPU resource lock
# `common.need_gpu` is set during Testing commands generations: need to delay adding commands to all_commands to create the gpu lock if needed around the Testing stage
lock_gpu = (stage == "Running App") and common.need_gpu
if lock_gpu:
append_command(all_commands, 'lock', label='GPUS', variable='DMAKE_GPU')
all_commands += stage_commands
if lock_gpu:
append_command(all_commands, 'lock_end')
append_command(all_commands, 'stage_end')
# Parallel execution?
if common.parallel_execution:
common.logger.info("===============")
common.logger.info("New plan: parallel execution, by height:")
# Parallel execution: drop all_commands, start again (but reuse already computed nodes_commands)
all_commands = []
all_commands += init_commands
# group nodes by height
# iterate on ordered_build_files instead of directly build_files_order to reuse common.is_pr filtering
# use nodes_depth instead of build_files_order/ordered_build_files order for ASAP execution instead of ALAP (As Late As Possible)
nodes_by_height = {}
deploy_nodes = []
max_height = 0
for stage, commands in ordered_build_files:
for node, _ in commands:
command = node[0]
if command == 'deploy':
# isolate deploy to run them all in parallel at the end
deploy_nodes.append(node)
continue
height = nodes_depth[node]
max_height = max(max_height, height)
if height not in nodes_by_height:
nodes_by_height[height] = []
nodes_by_height[height].append(node)
# inject back the deploy nodes as an extra height
deploy_height = max_height + 1
if deploy_nodes:
nodes_by_height[deploy_height] = deploy_nodes
# generate parallel by height
gpu_locked = False
for height, nodes in sorted(nodes_by_height.items()):
common.logger.info("## height: %s ##" % (height))
height_commands = []
height_need_gpu = False
for node in nodes:
step_commands = nodes_commands[node]
if len(step_commands) == 0:
continue
height_need_gpu |= nodes_need_gpu[node]
node_display_str = display_command_node(node)
common.logger.info("- {}".format(node_display_str))
append_command(height_commands, 'parallel_branch', name=node_display_str)
if height != deploy_height:
# don't lock PARALLEL_BUILDERS on deploy height, it could lead to deployment deadlock if there is a deployment runtime dependancy between services
append_command(height_commands, 'lock', label='PARALLEL_BUILDERS')
append_command(height_commands, 'echo', message = '- Running {}'.format(node_display_str))
height_commands += step_commands
if height != deploy_height:
# don't lock PARALLEL_BUILDERS on deploy height, it could lead to deployment deadlock if there is a deployment runtime dependancy between services
append_command(height_commands, 'lock_end')
append_command(height_commands, 'parallel_branch_end')
if len(height_commands) == 0:
continue
if height_need_gpu and not gpu_locked:
append_command(all_commands, 'lock', label='GPUS', variable='DMAKE_GPU')
gpu_locked = True
append_command(all_commands, 'stage', name = "height {}".format(height))
append_command(all_commands, 'parallel')
all_commands += height_commands
append_command(all_commands, 'parallel_end')
append_command(all_commands, 'stage_end')
if gpu_locked:
append_command(all_commands, 'lock_end')
# end parallel_execution
# If not on Pull Request, tag the commit as deployed
if common.command == "deploy" and not common.is_pr:
append_command(all_commands, 'git_tag', tag = get_tag_name())
# Generate output
if common.is_local:
file_to_generate = os.path.join(common.tmp_dir, "DMakefile")
else:
file_to_generate = "DMakefile"
generate_command(file_to_generate, all_commands)
common.logger.info("Commands have been written to %s" % file_to_generate)
if common.command == "deploy" and common.is_local:
r = input("Careful ! Are you sure you want to deploy ? [y/N] ")
if r.lower() != 'y':
print('Aborting')
sys.exit(0)
# If on local, run the commands
if common.is_local:
common.logger.info("===============")
common.logger.info("Executing plan...")
result = subprocess.call('bash %s' % file_to_generate, shell=True)
# Do not clean for the 'run' command
do_clean = common.command not in ['build_docker', 'run']
if result != 0 and common.command in ['shell', 'test']:
common.logger.error("""
PAUSE: An error was detected.
- check DMake logs above, notably the last step: '- Running <command> @ <service>'
- you can check the containers status and logs with: 'docker ps -a -f name={name_prefix}'
- the DMake temporary files are in : {tmp_dir}
- you can re-run your command with the DMAKE_DEBUG=1 environment variable to see what DMakes really does
""".format(name_prefix=common.name_prefix, tmp_dir=common.tmp_dir))
input("Once you have finished, press <ENTER> to let DMake stop and cleanup the containers and temporary files it created.")
if do_clean:
os.system('dmake_clean')
if result != 0:
common.logger.info("Cleanup finished!")
elif common.command == 'run':
common.logger.info("Containers started, you can stop them later with 'dmake stop'.")
sys.exit(result)
| Deepomatic/dmake | dmake/core.py | core.py | py | 58,937 | python | en | code | 37 | github-code | 1 | [
{
"api_name": "dmake.common.run_shell_command",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "dmake.common",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "os.path.normpath",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path"... |
27847975086 | from PyQt5.QtWidgets import QWidget, QApplication, QLabel, QSlider, QPushButton, QLineEdit, QGridLayout
from PyQt5.QtCore import Qt
import sys
import serial
#The following line is for serial over GPIO
port = 'COM3'
ard = serial.Serial(port,9600,timeout=5)
class Slider_Control(QWidget):
def __init__(self):
super().__init__()
self.setGeometry(300, 300, 500, 320)
ELBOW_label = QLabel('Elbow')
SHOULDER_label = QLabel('Shoulder')
WRISTx_label = QLabel('Wrist-x')
WRISTy_label = QLabel('Wrist-y')
WRISTz_label = QLabel('Wrist-z')
BASE_label = QLabel('Base')
CRAW_label = QLabel('Craw')
#define a slider that can control the elbow
self.ELBOW_slider = QSlider(Qt.Horizontal, self)
self.ELBOW_slider.setFocusPolicy(Qt.NoFocus)
self.ELBOW_slider.setMinimum(0)
self.ELBOW_slider.setMaximum(140)
self.ELBOW_slider.setValue(90)
self.ELBOW_slider.valueChanged.connect(self.ELBOW_show)
self.ELBOW_slider.sliderReleased.connect(self.ELBOW_changed)
# define a slider that can control the shoulder
self.SHOULDER_slider = QSlider(Qt.Horizontal, self)
self.SHOULDER_slider.setFocusPolicy(Qt.NoFocus)
self.SHOULDER_slider.setMinimum(0)
self.SHOULDER_slider.setMaximum(165)
self.SHOULDER_slider.setValue(20)
self.SHOULDER_slider.valueChanged.connect(self.SHOULDER_show)
self.SHOULDER_slider.sliderReleased.connect(self.SHOULDER_changed)
# define a slider that can control the wrist-x
self.WRISTx_slider = QSlider(Qt.Horizontal, self)
self.WRISTx_slider.setFocusPolicy(Qt.NoFocus)
self.WRISTx_slider.setMinimum(0)
self.WRISTx_slider.setMaximum(180)
self.WRISTx_slider.setValue(87)
self.WRISTx_slider.valueChanged.connect(self.WRISTx_show)
self.WRISTx_slider.sliderReleased.connect(self.WRISTx_changed)
# define a slider that can control the wrist-y
self.WRISTy_slider = QSlider(Qt.Horizontal, self)
self.WRISTy_slider.setFocusPolicy(Qt.NoFocus)
self.WRISTy_slider.setMinimum(0)
self.WRISTy_slider.setMaximum(90)
self.WRISTy_slider.setValue(70)
self.WRISTy_slider.valueChanged.connect(self.WRISTy_show)
self.WRISTy_slider.sliderReleased.connect(self.WRISTy_changed)
# define a slider that can control the wrist-z
self.WRISTz_slider = QSlider(Qt.Horizontal, self)
self.WRISTz_slider.setFocusPolicy(Qt.NoFocus)
self.WRISTz_slider.setMinimum(0)
self.WRISTz_slider.setMaximum(180)
self.WRISTz_slider.setValue(50)
self.WRISTz_slider.valueChanged.connect(self.WRISTz_show)
self.WRISTz_slider.sliderReleased.connect(self.WRISTz_changed)
# define a slider that can control the base
self.BASE_slider = QSlider(Qt.Horizontal, self)
self.BASE_slider.setFocusPolicy(Qt.NoFocus)
self.BASE_slider.setMinimum(0)
self.BASE_slider.setMaximum(180)
self.BASE_slider.setValue(96)
self.BASE_slider.valueChanged.connect(self.BASE_show)
self.BASE_slider.sliderReleased.connect(self.BASE_changed)
# define a slider that can control the craw
self.CRAW_slider = QSlider(Qt.Horizontal, self)
self.CRAW_slider.setFocusPolicy(Qt.NoFocus)
self.CRAW_slider.setMinimum(0)
self.CRAW_slider.setMaximum(100)
self.CRAW_slider.setValue(30)
self.CRAW_slider.valueChanged.connect(self.CRAW_show)
self.CRAW_slider.sliderReleased.connect(self.CRAW_changed)
self.ELBOW_num = QLabel(str(self.ELBOW_slider.value()))
self.SHOULDER_num = QLabel(str(self.SHOULDER_slider.value()))
self.WRISTx_num = QLabel(str(self.WRISTx_slider.value()))
self.WRISTy_num = QLabel(str(self.WRISTy_slider.value()))
self.WRISTz_num = QLabel(str(self.WRISTz_slider.value()))
self.BASE_num = QLabel(str(self.BASE_slider.value()))
self.CRAW_num = QLabel(str(self.CRAW_slider.value()))
layout = QGridLayout()
layout.addWidget(ELBOW_label, 1, 0)
layout.addWidget(SHOULDER_label, 2, 0)
layout.addWidget(WRISTx_label, 3, 0)
layout.addWidget(WRISTy_label, 4, 0)
layout.addWidget(WRISTz_label, 5, 0)
layout.addWidget(BASE_label, 6, 0)
layout.addWidget(CRAW_label, 7, 0)
layout.addWidget(self.ELBOW_slider, 1, 1)
layout.addWidget(self.SHOULDER_slider, 2, 1)
layout.addWidget(self.WRISTx_slider, 3, 1)
layout.addWidget(self.WRISTy_slider, 4, 1)
layout.addWidget(self.WRISTz_slider, 5, 1)
layout.addWidget(self.BASE_slider, 6, 1)
layout.addWidget(self.CRAW_slider, 7, 1)
layout.addWidget(self.ELBOW_num, 1, 2)
layout.addWidget(self.SHOULDER_num, 2, 2)
layout.addWidget(self.WRISTx_num, 3, 2)
layout.addWidget(self.WRISTy_num, 4, 2)
layout.addWidget(self.WRISTz_num, 5, 2)
layout.addWidget(self.BASE_num, 6, 2)
layout.addWidget(self.CRAW_num, 7, 2)
self.setLayout(layout)
self.setWindowTitle('Robotic Control')
self.show()
def ELBOW_show(self):
self.ELBOW_num.setText(str(self.ELBOW_slider.value()))
def ELBOW_changed(self):
info = str(1) + ',' + str(self.ELBOW_slider.value())
#print(self.ELBOW_slider.value())
ard.write(str.encode(info))
def SHOULDER_show(self):
self.SHOULDER_num.setText(str(self.SHOULDER_slider.value()))
def SHOULDER_changed(self):
info = str(2) + ',' + str(self.SHOULDER_slider.value())
#print(self.SHOULDER_slider.value())
ard.write(str.encode(info))
def WRISTx_show(self):
self.WRISTx_num.setText(str(self.WRISTx_slider.value()))
def WRISTx_changed(self):
info = str(3) + ',' + str(self.WRISTx_slider.value())
#print(self.WRISTx_slider.value())
ard.write(str.encode(info))
def WRISTy_show(self):
self.WRISTy_num.setText(str(self.WRISTy_slider.value()))
def WRISTy_changed(self):
info = str(4) + ',' + str(self.WRISTy_slider.value())
#print(self.WRISTy_slider.value())
ard.write(str.encode(info))
def WRISTz_show(self):
self.WRISTz_num.setText(str(self.WRISTz_slider.value()))
def WRISTz_changed(self):
info = str(5) + ',' + str(self.WRISTz_slider.value())
#print(self.WRISTz_slider.value())
ard.write(str.encode(info))
def BASE_show(self):
self.BASE_num.setText(str(self.BASE_slider.value()))
def BASE_changed(self):
info = str(6) + ',' + str(self.BASE_slider.value())
#print(self.BASE_slider.value())
ard.write(str.encode(info))
def CRAW_show(self):
self.CRAW_num.setText(str(self.CRAW_slider.value()))
def CRAW_changed(self):
info = str(7) + ',' + str(self.CRAW_slider.value())
#print(self.CRAW_slider.value())
ard.write(str.encode(info))
if __name__ == '__main__':
app = QApplication(sys.argv)
ex = Slider_Control()
sys.exit(app.exec_()) | zlby/Robotic-Arm | control_UI.py | control_UI.py | py | 7,319 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "serial.Serial",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtWidgets.QWidget",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.QLabel",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "PyQt5.QtW... |
36317609380 | # coding=utf-8
"""Resources API feature tests."""
__copyright__ = 'Copyright (c) 2020, Utrecht University'
__license__ = 'GPLv3, see LICENSE'
from pytest_bdd import (
given,
parsers,
scenarios,
then,
)
from conftest import api_request
scenarios('../../features/api/api_resources.feature')
@given('the Yoda resources API is queried for all research groups of current datamanager', target_fixture="api_response")
def api_get_groups_of_datamanger(user):
return api_request(
user,
"resource_groups_dm",
{}
)
@then('"<group>" for datamanager are found')
def api_response_groups_for_datamanager(api_response, group):
_, body = api_response
assert len(body["data"]) > 0
found_group = False
for list in body["data"]:
if group in list:
found_group = True
break
assert found_group
@given('the Yoda resources API is queried by a datamanager for monthly storage data', target_fixture="api_response")
def api_get_monthly_stats_dm(user):
return api_request(
user,
"resource_monthly_stats_dm",
{}
)
@then('monthly storage data for a datamanager is found')
def api_response_monthly_storage_for_dm(api_response):
_, body = api_response
# check presence all keys
assert body["data"][0]["category"]
assert body["data"][0]["tier"]
assert body["data"][0]["storage"]
@given('the Yoda resources API is queried by a for statistics data to be used as a feed for an export file', target_fixture="api_response")
def api_get_monthly_category_stats_export_dm(user):
return api_request(
user,
"resource_monthly_category_stats_export_dm",
{}
)
@then('storage data for export is found')
def api_response_storage_data_for_export(api_response):
_, body = api_response
assert body["data"][0]["category"]
assert body["data"][0]["subcategory"]
assert body["data"][0]["storage"]
assert body["data"][0]["month"]
assert body["data"][0]["groupname"]
assert body["data"][0]["tier"]
@given('the Yoda resources API is queried for all monthly statistics', target_fixture="api_response")
def api_monthly_stats(user):
return api_request(
user,
"resource_monthly_stats",
{}
)
@then('rodsadmin monthly statistics is found')
def api_response_monthly_statistics_rodsadmin(api_response):
_, body = api_response
assert body['data'][0]['category']
assert body['data'][0]['tier']
assert body['data'][0]['storage']
@given('the Yoda resources API is queried for all resources and tiers', target_fixture="api_response")
def api_resource_and_tier_data(user):
return api_request(
user,
"resource_resource_and_tier_data",
{}
)
@then('list of resources and tiers is found')
def api_response_list_of_resources_and_tiers(api_response):
_, body = api_response
# {'tier': 'Standard', 'name': 'dev001_2', 'id': '10018'}
assert body['data'][0]['tier']
assert body['data'][0]['name']
assert body['data'][0]['id']
@given('the Yoda resources API is queried for tier_name of "<resource_name>"', target_fixture="api_response")
def api_get_tier_on_resource(user, resource_name):
return api_request(
user,
"resource_tier",
{"res_name": resource_name}
)
@then('"<tier_name>" is found')
def api_response_tier_name_for_resource(api_response, tier_name):
_, body = api_response
assert body['data'] == tier_name
@given('the Yoda resources API is queried for all available tiers', target_fixture="api_response")
def api_get_tiers(user):
return api_request(
user,
"resource_get_tiers",
{}
)
@then('list with "<tier_name>" is found')
def api_response_all_tiers(api_response, tier_name):
_, body = api_response
assert tier_name in body['data']
@given('the Yoda resources API is requested to save tier "<tier_name>" for resource "<resource_name>"', target_fixture="api_response")
def api_save_tier_for_resource(user, resource_name, tier_name):
return api_request(
user,
"resource_save_tier",
{"resource_name": resource_name, "tier_name": tier_name}
)
@then('tier is saved successfully for resource')
def api_response_save_tier_name_successful(api_response):
_, body = api_response
assert body['status'] == 'ok'
@given('the Yoda resources API is queried for usertype of current user', target_fixture="api_response")
def api_get_user_type(user):
return api_request(
user,
"resource_user_get_type",
{}
)
@then('"<user_type>" is found')
def api_response_user_type(api_response, user_type):
_, body = api_response
assert body["data"] == user_type
@given('the Yoda resources API is queried for research groups of current user', target_fixture="api_response")
def api_get_user_research_groups(user):
return api_request(
user,
"resource_user_research_groups",
{}
)
@then('"<research_group>" are found for current user')
def api_response_research_groups_for_user(api_response, research_group):
_, body = api_response
assert research_group in body["data"]
@given('the Yoda resources API is queried to know if current user is datamanager', target_fixture="api_response")
def api_is_user_datamanager(user):
return api_request(
user,
"resource_user_is_datamanager",
{}
)
@then('current user is found')
def api_response_user_is_datamanager(api_response):
_, body = api_response
assert body["data"] == 'yes'
@given('the Yoda resources API is queried for full year of monthly data for group "<group_name>" starting from current month backward', target_fixture="api_response")
def api_get_monthly_user_research_groups(user, group_name):
from datetime import datetime
current_month = datetime.now().month
return api_request(
user,
"resource_full_year_group_data",
{"group_name": group_name, "current_month": current_month}
)
@then('full year storage data is found')
def api_response_full_year_storage(api_response):
_, body = api_response
# A list of dicts like following
# [{'month=10-tier=Standard': 6772}]
# Look at first entry
storage_month_data = body['data'][0]
for key in storage_month_data:
assert 'month=' in key
break
@then(parsers.parse('the response status code is "{code:d}"'))
def api_response_code(api_response, code):
http_status, _ = api_response
assert http_status == code
| peer35/irods-ruleset-uu | tests/step_defs/api/test_api_resources.py | test_api_resources.py | py | 6,600 | python | en | code | null | github-code | 1 | [
{
"api_name": "pytest_bdd.scenarios",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "conftest.api_request",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "pytest_bdd.given",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "pytest_bdd... |
3804617895 | import sys
sys.path.insert(0, 'src/vendor')
from imdb import IMDb
from imdb import helpers
ia = IMDb()
h = helpers()
def multipleMovies(title, allMoviesWithName, movie):
if(len(allMoviesWithName) == 1):
movie = ia.get_movie(allMoviesWithName[0].movieID)
else:
filterBy = input(f'Multiple items named "{title}" ({len(allMoviesWithName)} total). Filter by: \nkind\nyear\n')
if(filterBy == 'kind'):
possibleKinds = [x.get('kind', '') for x in allMoviesWithName]
chosenKind = input(f'Which kind?\n{possibleKinds}\n')
allMoviesWithName = [m for m in allMoviesWithName if m.get('kind', '') == chosenKind]
elif(filterBy == 'year'):
possibleYears = [x.get('year', 'unknown') for x in allMoviesWithName]
chosenYear = input(f'Multiple movies named "{title}".Which year?\n{possibleYears}\n')
allMoviesWithName = [m for m in allMoviesWithName if m.get('year', '') == int(chosenYear)]
if(len(allMoviesWithName) == 1):
return (0, allMoviesWithName, ia.get_movie(allMoviesWithName[0].movieID))
else:
return (1, allMoviesWithName, '')
def getMovie(movieDetails):
title = movieDetails[1]
# director = movieDetails[2]
year = int(movieDetails[3])
allMoviesWithName = ia.search_movie(title)
if(len(allMoviesWithName) > 1):
print(f'Multiple items found ({len(allMoviesWithName)}). Filtering by year = {year} and kind == movie')
allMoviesWithName = [m for m in allMoviesWithName if m.get('year','')==year and m.get('kind','')=='movie']
if(len(allMoviesWithName) == 1):
movie = ia.get_movie(allMoviesWithName[0].movieID)
else:
print(f'Something went wrong. Total moves found after filter: {len(allMoviesWithName)}. Stopping. ')
return
directors = [x['name'] for x in movie['directors']]
genres = movie['genres']
description = movie['plot outline']
rating = movie['rating']
numberVotes = movie['votes']
writers = [x.get('name', '') for x in movie['writers']]
stars = [x['name'] for x in movie['cast'][:3]]
runtime = movie['runtime'][0]
imageUrl = '.'.join(movie['cover url'].split('.')[:-2])+'.jpg'
budget = movie['box office'].get('Budget','') if movie.get('box office','') != '' else ''
openingWeekendUSA = movie['box office'].get('Opening Weekend United States','') if movie.get('box office','') != '' else ''
movieJ = {
"title": title,
"year": year,
"description": description,
"image": imageUrl,
"ratings": [{
"source": "IMDB",
"rating": rating,
"numberVotes": numberVotes
}],
"watched": "notWatched",
"liked": "",
"id": movie.movieID,
"director": directors,
"writer": writers,
"stars": stars,
"releaseDate": movie['original air date'],
"runtime": runtime,
"genres": genres,
"budget": budget,
"openingWeekendUSA": openingWeekendUSA,
"worldwideGross": "",
"tags": [],
"AFI100LaughsRank": movieDetails[0]
}
return movieJ
| allisontharp/laughtrack | backend/datapull/imdbFuncs.py | imdbFuncs.py | py | 2,816 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.insert",
"line_number": 2,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 2,
"usage_type": "attribute"
},
{
"api_name": "imdb.IMDb",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "imdb.helpers",
"line_number"... |
22129992552 | import discord, asyncio, requests, datetime
# -- INIT --
POST_TIME_H = 6
POST_TIME_M = 0
POST_TIME_S = 0
BOT_EMAIL = ''
BOT_PASS = ''
KEY = ''
CHAN_ID = ''
# Connect to client
client = discord.Client()
def apiGet(url):
response = requests.get(url)
return response.json()
def randomWord():
url = "http://api.wordnik.com:80/v4/words.json/randomWord?hasDictionaryDef=true&minCorpusCount=0&\
maxCorpusCount=-1&minDictionaryCount=1&maxDictionaryCount=-1&minLength=5&maxLength=-1&api_key=" + KEY
data = apiGet(url)
return data["word"]
def defLook(word):
url = "http://api.wordnik.com:80/v4/word.json/" + word +\
"/definitions?limit=200&includeRelated=true&useCanonical=false&includeTags=false&api_key=" + KEY
data = apiGet(url)
if len(data) == 0:
return ""
else:
return data[0]["text"]
def post(word, definition):
return "**WORD OF THE DAY: " + word + "**\n" +\
"*Def - " + definition + "*"
def toWait(time):
base = 86400
h = time.hour - POST_TIME_H
m = time.minute - POST_TIME_M
s = time.second - POST_TIME_S
dif = (h * 60 * 60) + (m * 60) + s
if dif < 0:
return abs(dif)
else:
return base - dif
# -- START --
@asyncio.coroutine
def my_background_task():
lastPin = None
yield from client.wait_until_ready()
print("-- Task Started --")
chan = discord.Object(id=CHAN_ID)
while not client.is_closed:
print("-- Selecting Word --")
# Pull word from toread
word = randomWord()
# Pull def
definition = defLook(word)
print(word)
print(definition)
# Input word and def into proper format
toPost = post(word, definition)
# Calculate time til post time
now = datetime.datetime.now()
timeToWait = toWait(now)
print("-- READY AT: {0} --".format(now))
print("-- WAITING FOR: {0}s --".format(timeToWait))
# Wait
yield from asyncio.sleep(timeToWait)
# Unpin last message
if lastPin is not None:
client.unpin_message(lastPin)
# Post to client
lastPin = yield from client.send_message(chan, toPost)
# Pin new message
client.pin_message(lastPin)
# -- END --
@asyncio.coroutine
def test_post():
print("-- TEST --")
yield from client.wait_until_ready()
yield from client.send_message(CHAN_ID, "test")
# -- RUN --
# client.loop.create_task(my_background_task())
client.async_event(test_post)
client.run(BOT_EMAIL, BOT_PASS)
| resloved/word-of-the-day-discord | wotd.py | wotd.py | py | 2,554 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "discord.Client",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "discord.Object",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "datetime.datetime.now",
... |
8556892866 | # coding:utf8
__author__ = 'Marcelo Ferreira da Costa Gomes'
# Apply filters of interest in a dataframe containing SINAN-SRAG data
import pandas as pd
import numpy as np
import argparse
import logging
import re
from argparse import RawDescriptionHelpFormatter
from .insert_epiweek import insert_epiweek
from .delay_table import extract_quantile, delayimputation, createtable
module_logger = logging.getLogger('update_system.sinan_filter_of_interest')
tabela_siglauf = {'RO': 11,
'AC': 12,
'AM': 13,
'RR': 14,
'PA': 15,
'AP': 16,
'TO': 17,
'MA': 21,
'PI': 22,
'CE': 23,
'RN': 24,
'PB': 25,
'PE': 26,
'AL': 27,
'SE': 28,
'BA': 29,
'MG': 31,
'ES': 32,
'RJ': 33,
'SP': 35,
'PR': 41,
'SC': 42,
'RS': 43,
'MS': 50,
'MT': 51,
'GO': 52,
'DF': 53,
'NI': 99}
filtro_dict = {1: '_hospdeath',
2: '_sragnofever',
3: ''}
dict_dtypes = {
'NU_NOTIFIC': 'str',
'DT_NOTIFIC': 'str',
'SEM_NOT': 'Int64',
'ANO': 'Int16',
'DT_SIN_PRI': 'str',
'SEM_PRI': 'Int64',
'SG_UF_NOT': 'str',
'ID_REGIONA': 'str',
'CO_REGIONA': 'Int32',
'ID_MUNICIP': 'str',
'CO_MUN_NOT': 'Int32',
'ID_UNIDADE': 'str',
'CO_UNI_NOT': 'Int32',
'NM_UN_INTE': 'str',
'CO_UN_INTE': 'Int32',
'LAB_AN': 'str',
'CO_LAB_AN': 'Int32',
'RES_CAPITAL': 'str',
'NOT_CAPITAL': 'str',
'NU_CPF': 'Int64',
'NU_CNS': 'str',
'NM_PACIENT': 'str',
'CS_SEXO': 'str',
'DT_NASC': 'str',
'NU_IDADE_N': 'str',
'TP_IDADE': 'Int8',
'COD_IDADE': 'str',
'CS_GESTANT': 'Int8',
'CS_RACA': 'Int8',
'CS_ETINIA': 'str',
'TP_POV_CT': 'str',
'CS_ESCOL_N': 'Int8',
'NM_MAE_PAC': 'str',
'NU_CEP': 'str',
'ID_PAIS': 'str',
'CO_PAIS': 'Int16',
'SG_UF': 'str',
'ID_RG_RESI': 'str',
'CO_RG_RESI': 'Int16',
'ID_MN_RESI': 'str',
'CO_MUN_RES': 'Int32',
'NM_BAIRRO': 'str',
'NM_LOGRADO': 'str',
'NU_NUMERO': 'str',
'NM_COMPLEM': 'str',
'NU_DDD_TEL': 'Int8',
'NU_TELEFON': 'str',
'CS_ZONA': 'Int8',
'SURTO_SG': 'Int8',
'NOSOCOMIAL': 'Int8',
'AVE_SUINO': 'Int8',
'FEBRE': 'Int8',
'TOSSE': 'Int8',
'GARGANTA': 'Int8',
'DISPNEIA': 'Int8',
'DESC_RESP': 'Int8',
'SATURACAO': 'Int8',
'DIARREIA': 'Int8',
'VOMITO': 'Int8',
'OUTRO_SIN': 'Int8',
'OUTRO_DES': 'str',
'PUERPERA': 'Int8',
'FATOR_RISC': 'str',
'CARDIOPATI': 'Int8',
'HEMATOLOGI': 'Int8',
'SIND_DOWN': 'Int8',
'HEPATICA': 'Int8',
'ASMA': 'Int8',
'DIABETES': 'Int8',
'NEUROLOGIC': 'Int8',
'PNEUMOPATI': 'Int8',
'IMUNODEPRE': 'Int8',
'RENAL': 'Int8',
'OBESIDADE': 'Int8',
'OBES_IMC': 'str',
'OUT_MORBI': 'Int8',
'MORB_DESC': 'str',
'VACINA': 'Int8',
'DT_UT_DOSE': 'str',
'MAE_VAC': 'Int8',
'DT_VAC_MAE': 'str',
'M_AMAMENTA': 'Int8',
'DT_DOSEUNI': 'str',
'DT_1_DOSE': 'str',
'DT_2_DOSE': 'str',
'ANTIVIRAL': 'Int8',
'TP_ANTIVIR': 'Int8',
'OUT_ANTIV': 'str',
'DT_ANTIVIR': 'str',
'HOSPITAL': 'Int8',
'DT_INTERNA': 'str',
'SG_UF_INTE': 'str',
'ID_RG_INTE': 'str',
'CO_RG_INTE': 'Int16',
'ID_MN_INTE': 'str',
'CO_MU_INTE': 'Int32',
'UTI': 'Int8',
'DT_ENTUTI': 'str',
'DT_SAIDUTI': 'str',
'SUPORT_VEN': 'Int8',
'RAIOX_RES': 'Int8',
'RAIOX_OUT': 'str',
'DT_RAIOX': 'str',
'AMOSTRA': 'Int8',
'DT_COLETA': 'str',
'TP_AMOSTRA': 'Int8',
'OUT_AMOST': 'str',
'REQUI_GAL': 'Int64',
'IF_RESUL': 'Int8',
'DT_IF': 'str',
'POS_IF_FLU': 'Int8',
'TP_FLU_IF': 'Int8',
'POS_IF_OUT': 'Int8',
'IF_VSR': 'Int8',
'IF_PARA1': 'Int8',
'IF_PARA2': 'Int8',
'IF_PARA3': 'Int8',
'IF_ADENO': 'Int8',
'IF_OUTRO': 'Int8',
'DS_IF_OUT': 'object',
'LAB_IF': 'str',
'CO_LAB_IF': 'Int64',
'PCR_RESUL': 'Int8',
'DT_PCR': 'str',
'POS_PCRFLU': 'Int8',
'TP_FLU_PCR': 'Int8',
'PCR_FLUASU': 'Int8',
'FLUASU_OUT': 'str',
'PCR_FLUBLI': 'Int8',
'FLUBLI_OUT': 'str',
'POS_PCROUT': 'Int8',
'PCR_VSR': 'Int8',
'PCR_PARA1': 'Int8',
'PCR_PARA2': 'Int8',
'PCR_PARA3': 'Int8',
'PCR_PARA4': 'Int8',
'PCR_ADENO': 'Int8',
'PCR_METAP': 'Int8',
'PCR_BOCA': 'Int8',
'PCR_RINO': 'Int8',
'PCR_OUTRO': 'Int8',
'DS_PCR_OUT': 'str',
'LAB_PCR': 'str',
'CO_LAB_PCR': 'Int64',
'CLASSI_FIN': 'Int8',
'CLASSI_OUT': 'str',
'CRITERIO': 'Int8',
'EVOLUCAO': 'Int8',
'DT_EVOLUCA': 'str',
'NU_DO': 'Int64',
'DT_ENCERRA': 'str',
'OBSERVA': 'str',
'NOME_PROF': 'str',
'REG_PROF': 'str',
'DT_DIGITA': 'str',
'HISTO_VGM': 'Int8',
'PAIS_VGM': 'str',
'CO_PS_VGM': 'Int16',
'LO_PS_VGM': 'str',
'DT_VGM': 'str',
'DT_RT_VGM': 'str',
'PCR_SARS2': 'Int8',
'PAC_COCBO': 'str',
'PAC_DSCBO': 'str',
'OUT_ANIM': 'str',
'DOR_ABD': 'Int8',
'FADIGA': 'Int8',
'PERD_OLFT': 'Int8',
'PERD_PALA': 'Int8',
'TOMO_RES': 'Int8',
'TOMO_OUT': 'str',
'DT_TOMO': 'str',
'TP_TES_AN': 'Int8',
'DT_RES_AN': 'str',
'RES_AN': 'Int8',
'POS_AN_FLU': 'Int8',
'TP_FLU_AN': 'Int8',
'POS_AN_OUT': 'Int8',
'AN_SARS2': 'Int8',
'AN_VSR': 'Int8',
'AN_PARA1': 'Int8',
'AN_PARA2': 'Int8',
'AN_PARA3': 'Int8',
'AN_ADENO': 'Int8',
'AN_OUTRO': 'Int8',
'DS_AN_OUT': 'str',
'TP_AM_SOR': 'Int8',
'SOR_OUT': 'str',
'DT_CO_SOR': 'str',
'TP_SOR': 'Int8',
'OUT_SOR': 'str',
'DT_RES': 'str',
'RES_IGG': 'Int8',
'RES_IGM': 'Int8',
'RES_IGA': 'Int8',
'DT_ATUALIZACAO': 'str',
'DOSE_1_COV': 'str',
'DOSE_2_COV': 'str',
'LOTE_1_COV': 'str',
'LOTE_2_COV': 'str',
'LAB_PR_COV': 'str',
'VACINA_COV': 'Int8',
'@VERSION': 'Int64',
'@TIMESTAMP': 'str'
}
def eq_notna(s: pd.Series, x):
return pd.notna(s) & s.eq(x)
def ne_orna(s: pd.Series, x):
return pd.isna(s) | s.ne(x)
def clean_open_field(s: pd.Series):
specialchar = '\@|\$|\%|\&|\*'
s = s.where(~s.str.contains(specialchar, regex=True, na=False), 'CENSURADO')
return s
def check_covid19_regex(df: pd.DataFrame, column: str):
if sum(~pd.isnull(df[column])) > 0:
sars_cov_2_regex = 'novo corona|covid|coovid|cov-2|covd-19|' \
'cov2|cov 2|cov- 2|cov -2|cov - 2|c0v-2|cav-2|cav 2|arvs-cov|ars- cov|ars cov|ars - ' \
'cov|ars-cov|ars2cov|covi-19|civid 19|cobid-19|covis|cov-2|civid-19'
sars_cov_2_regex_discard = 'nao detectado para covid19'
row_filter = (df[column].str.contains(sars_cov_2_regex.upper(), regex=True, na=False)) & \
(~df[column].str.contains(sars_cov_2_regex_discard.upper(), regex=True, na=False))
else:
row_filter = ~pd.isnull(df[column])
return row_filter
def readtable(fname, sep=','):
try:
df = pd.read_csv(fname, sep=sep, dtype=dict_dtypes, low_memory=False, encoding='utf-8')
except:
df = pd.read_csv(fname, sep=sep, dtype=dict_dtypes, low_memory=False, encoding='utf-16')
return df
def date_cleanup(df, dt_cols):
'''
Standardize column data and discard those without notification and/or first symptoms date.
:param df: notifications data frame
:param dt_cols: list of data columns
:return: df: standardized data frame
'''
# Filter by notification date
df = df.where(df != -1, None)
df[dt_cols] = df[dt_cols].where(df[dt_cols] != 10101, None)
if df.CRITERIO.dtype == 'O':
df.CRITERIO = df.CRITERIO.where(df.CRITERIO != 'NÃ', None)
df.dropna(subset=["DT_SIN_PRI", "DT_NOTIFIC"], inplace=True)
# Convert all date related columns to datetime format
for col in dt_cols:
# Convert all date columns to datetime format. Output will have the format YYYY-MM-DD
dtformat = '%Y%m%d'
if sum(~pd.isnull(df[col])) > 0:
sample = df.loc[~pd.isnull(df[col]), col].values[0]
if isinstance(sample, str):
if 'T' in sample:
df[col] = pd.to_datetime(df[col].str[:10], errors='coerce', format='%Y-%m-%d')
else:
dtsep = '-'
if '/' in sample:
dtsep = '/'
dttest = pd.DataFrame(list(
df.loc[~pd.isnull(df[col]), col].str.split(dtsep)
))
maxvals = [int(dttest[i].max()) for i in range(3)]
del dttest
yearpos = maxvals.index(max(maxvals))
if yearpos == 2:
if maxvals[1] > 12:
dtformat = '%m' + dtsep + '%d' + dtsep + '%Y'
else:
dtformat = '%d' + dtsep + '%m' + dtsep + '%Y'
else:
dtformat = '%Y' + dtsep + '%m' + dtsep + '%d'
df[col] = pd.to_datetime(df[col], errors='coerce', format=dtformat)
else:
df[col] = pd.to_datetime(df[col], errors='coerce', format=dtformat)
else:
df[col] = pd.to_datetime(df[col])
# Discard those neither hospitalized nor deceased. For cases from 2009, keep all:
df = df[(df.DT_SIN_PRI.apply(lambda x: x.year) == 2009) | (eq_notna(df.HOSPITAL, 1)) | (eq_notna(df.EVOLUCAO, 2))]
return df
def table_compatibility(df):
'''
Create column conversion to make old and new data compatible
:param df: input data frame
:return:
'''
# Create equivalence between 2020-07-27 new variables and old ones:
df['DT_IF'] = df['DT_RES_AN'].copy()
if 'RES_AN' in df.columns:
df['IF_RESUL'] = df['RES_AN'].copy()
l_new = ['CO_LAB_AN',
'POS_AN_FLU',
'TP_FLU_AN',
'POS_AN_OUT',
'AN_VSR',
'AN_PARA1',
'AN_PARA2',
'AN_PARA3',
'AN_ADENO',
'AN_OUTRO',
'DS_AN_OUT']
cols = df.columns
col_new = []
col_old = []
l_intersec = set(l_new).intersection(cols)
for li in l_intersec:
col_new.append(li.replace('AN', 'IF'))
col_old.append(li)
if len(l_intersec) > 0:
df[col_new] = df[col_old].copy()
return
def symptoms_filter(df):
# Filter by symptoms:
# All cases, regardless of year, must either attend symptoms definition or have evolved to
# death. Besides that, cases from 2009 do not need hospitalization to be considered as true case, per Ministry
# of Health request. For all other years, they must have been hospitalized or have evolved to death.
# The filter regarding hospitalization and case evolution is done after date columns consolidation to attend
# 2009's particularity.
# all
df['filtro'] = 0
# hospdeath:
df.loc[(eq_notna(df.EVOLUCAO, 2)) | (eq_notna(df.HOSPITAL, 1)), 'filtro'] = 1
# sragnofever:
df.loc[(df.filtro == 1) & (
((eq_notna(df.TOSSE, 1)) | (eq_notna(df.GARGANTA, 1))) &
((eq_notna(df.DISPNEIA, 1)) | (eq_notna(df.SATURACAO, 1)) | (eq_notna(df.DESC_RESP, 1)))
), 'filtro'] = 2
# srag:
df.loc[(df.filtro == 2) & (eq_notna(df.FEBRE, 1)), 'filtro'] = 3
return df
def filter_db2019(df, tag=None, filtertype='srag'):
tgtcols = ['AMOSTRA', 'ANTIVIRAL', 'AVE_SUINO', 'CLASSI_FIN', 'CLASSI_OUT', 'CO_LAB_IF', 'CO_LAB_PCR', 'CO_MU_INTE',
'CO_MUN_NOT', 'CO_MUN_RES', 'CO_PAIS', 'CO_REGIONA', 'CO_RG_RESI', 'CO_UNI_NOT', 'CRITERIO', 'CS_ETINIA',
'CO_UN_INTE',
'CS_RACA', 'CS_SEXO', 'DS_IF_OUT', 'DS_PCR_OUT', 'DT_ANTIVIR',
'DT_COLETA', 'DT_DIGITA', 'DT_ENCERRA', 'DT_EVOLUCA', 'DT_IF', 'DT_INTERNA', 'DT_NOTIFIC', 'DT_PCR',
'DT_IFI',
'DT_SIN_PRI', 'DT_UT_DOSE', 'EVOLUCAO', 'FLUASU_OUT', 'FLUBLI_OUT', 'HOSPITAL',
'ID_MN_RESI', 'ID_MUNICIP', 'ID_REGIONA', 'ID_RG_RESI', 'IF_ADENO', 'IF_OUTRO', 'IF_PARA1',
'IF_PARA2', 'IF_PARA3', 'IF_RESUL', 'IF_VSR', 'NU_CEP', 'NU_IDADE_N', 'NU_NOTIFIC', 'NM_BAIRRO',
'NM_LOGRADO', 'OUT_ANTIVIR', 'PCR_ADENO', 'PCR_BOCA', 'PCR_FLUASU', 'PCR_FLUBLI', 'PCR_METAP',
'PCR_OUTRO', 'PCR_PARA1',
'PCR_PARA2', 'PCR_PARA3', 'PCR_PARA4', 'PCR_RESUL', 'PCR_RINO', 'PCR_SARS2', 'PCR_VSR', 'POS_IF_FLU',
'POS_IF_OUT', 'POS_PCRFLU', 'POS_PCROUT', 'SEM_NOT', 'SEM_PRI', 'SG_UF', 'SG_UF_NOT',
'TP_ANTIVIR', 'TP_FLU_IF', 'TP_FLU_PCR', 'TP_IDADE', 'VACINA']
tgt_cols_sintomas = ['DESC_RESP',
'DISPNEIA',
'DIARREIA',
'FEBRE',
'GARGANTA',
'TOSSE',
'SATURACAO',
'DIARREIA',
'VOMITO',
'DOR_ABD',
'FADIGA',
'PERD_OLFT',
'PERD_PALA',
'OUTRO_SIN',
'OUTRO_DES']
tgtcols_uti = ['UTI',
'DT_ENTUTI',
'DT_SAIDUTI',
'SUPORT_VEN']
tgtcols_comorb = ['CS_GESTANT',
'FATOR_RISC',
'PUERPERA',
'CARDIOPATI',
'HEMATOLOGI',
'HEPATICA',
'SIND_DOWN',
'HEPATICA',
'ASMA',
'DIABETES',
'NEUROLOGIC',
'PNEUMOPATI',
'IMUNODEPRE',
'RENAL',
'OBESIDADE',
'OBES_IMC',
'OUT_MORBI',
'MORB_DESC']
tgtcols_2020_07_27 = ['TP_TES_AN',
'DT_RES_AN',
'RES_IGA',
'RES_IGG',
'RES_IGM',
'LAB_AN',
'CO_LAB_AN',
'POS_AN_FLU',
'TP_FLU_AN',
'POS_AN_OUT',
'AN_SARS2',
'AN_VSR',
'AN_PARA1',
'AN_PARA2',
'AN_PARA3',
'AN_ADENO',
'AN_OUTRO',
'DS_AN_OUT',
'SUPORT_VEN',
'RAIOX_RES',
'RAIOX_OUT',
'DT_RAIOX',
'TOMO_RES',
'TOMO_OUT',
'DT_TOMO']
tgt_cols_vac_covid = ['VACINA_COV', 'DOSE_1_COV', 'DOSE_2_COV', 'LAB_PR_COV', 'FNT_IN_COV']
if 'DT_RES_AN' in df.columns:
table_compatibility(df)
tgtcols = list(set(tgtcols).union(tgt_cols_sintomas).union(tgtcols_uti).union(tgtcols_comorb).union(
tgtcols_2020_07_27).union(tgt_cols_vac_covid))
if 'DT_IF' in df.columns:
df['DT_IFI'] = df.DT_IF
cols = df.columns
for col in set(tgtcols).difference(cols):
df[col] = None
df = df[tgtcols].copy()
df = symptoms_filter(df)
regexp = re.compile('^DT')
dt_cols = list(filter(regexp.match, tgtcols))
df = date_cleanup(df, dt_cols)
# Registro da idade, em anos.
# Campo TP_IDADE indica a escala da variável NU_IDADE_N, sendo:
# 1-Dias, 2-Meses, 3-Anos
df['idade_em_anos'] = df.NU_IDADE_N.where(df.TP_IDADE == 3, 0)
# If sample collection field is empty, convert to unknown:
df.AMOSTRA = df.AMOSTRA.where(pd.notnull(df.AMOSTRA), 9).astype(int)
# Convert UF abbreviation to numeric code:
df.loc[pd.notnull(df.SG_UF_NOT), 'SG_UF_NOT'] = df.loc[pd.notnull(df.SG_UF_NOT), 'SG_UF_NOT'].map(tabela_siglauf)
df.loc[pd.isnull(df.SG_UF_NOT), 'SG_UF_NOT'] = 99
df.loc[pd.notnull(df.SG_UF), 'SG_UF'] = df.loc[pd.notnull(df.SG_UF), 'SG_UF'].map(tabela_siglauf)
df.loc[pd.isnull(df.SG_UF), 'SG_UF'] = 99
# Clean up PCR_RESUL and IF_RESUL fields:
def labresultcleanup(dfin, x):
# If sample was collected, IF/PCR result cannot be unknown or empty:
dfin.loc[(eq_notna(dfin.AMOSTRA, 1)) & (~dfin[x].isin([1, 2, 3, 4])), x] = 5
# IF PCR/IF result field is marked unknown but sample was not collected, convert to not tested:
dfin.loc[(eq_notna(dfin[x], 9)) & (eq_notna(dfin.AMOSTRA, 2)), x] = 4
# If PCR/IF result field is empty and sample was NOT collected, convert to not tested
dfin[x] = dfin[x].where(pd.notnull(dfin[x]) | (ne_orna(dfin.AMOSTRA, 2)), 4)
# If PCR/IF result field is empty and sample field is empty or unknown, convert to unknown
dfin[x] = dfin[x].where(pd.notnull(dfin[x]) | (dfin.AMOSTRA.isin([1, 2])), 9)
return
labresultcleanup(df, 'PCR_RESUL')
labresultcleanup(df, 'IF_RESUL')
df['FLU_A'] = pd.Series([], dtype='Int8')
df['FLU_B'] = pd.Series([], dtype='Int8')
df['FLU_LAB'] = pd.Series([], dtype='Int8')
df['VSR'] = pd.Series([], dtype='Int8')
df['PARA1'] = pd.Series([], dtype='Int8')
df['PARA2'] = pd.Series([], dtype='Int8')
df['PARA3'] = pd.Series([], dtype='Int8')
df['PARA4'] = pd.Series([], dtype='Int8')
df['ADNO'] = pd.Series([], dtype='Int8')
df['METAP'] = pd.Series([], dtype='Int8')
df['BOCA'] = pd.Series([], dtype='Int8')
df['RINO'] = pd.Series([], dtype='Int8')
df['SARS2'] = pd.Series([], dtype='Int8')
df['OTHERS'] = pd.Series([], dtype='Int8')
df['NEGATIVE'] = pd.Series([], dtype='Int8')
df['POSITIVE'] = pd.Series([], dtype='Int8')
df['INCONCLUSIVE'] = pd.Series([], dtype='Int8')
df['DELAYED'] = pd.Series([], dtype='Int8')
df['TESTED'] = pd.Series([], dtype='Int8')
df['NOTTESTED'] = pd.Series([], dtype='Int8')
df['TESTING_IGNORED'] = pd.Series([], dtype='Int8')
def labresult(x, y=None, pos=1):
if all(pd.isnull([x, y])):
return None
elif pos in [pd.notna(x), pd.notna(y)]:
return 1
else:
return 0
mask = (df[['TP_FLU_PCR', 'TP_FLU_IF']].notna().sum(axis=1) >= 1)
df.loc[mask, 'FLU_A'] = df.loc[mask, 'FLU_A'] = (
eq_notna(df.TP_FLU_PCR[mask], 1) | eq_notna(df.TP_FLU_IF[mask], 1)
).astype('Int8')
df.loc[mask, 'FLU_B'] = df.loc[mask, 'FLU_A'] = (
eq_notna(df.TP_FLU_PCR[mask], 2) | eq_notna(df.TP_FLU_IF[mask], 2)
).astype('Int8')
df.loc[(eq_notna(df.POS_IF_FLU, 1)) |
(eq_notna(df.POS_PCRFLU, 1)) |
(eq_notna(df.FLU_A, 1)) |
(eq_notna(df.FLU_B, 1)) |
((eq_notna(df.CLASSI_FIN, 1)) & (eq_notna(df.CRITERIO, 1))), 'FLU_LAB'] = 1
df.loc[(ne_orna(df.FLU_LAB, 1)) & ((eq_notna(df.POS_IF_FLU, 2)) | (eq_notna(df.POS_PCRFLU, 2))), 'FLU_LAB'] = 0
df.loc[(eq_notna(df.IF_VSR, 1)) | (eq_notna(df.PCR_VSR, 1)), 'VSR'] = 1
df.loc[(eq_notna(df.IF_PARA1, 1)) | (eq_notna(df.PCR_PARA1, 1)), 'PARA1'] = 1
df.loc[(eq_notna(df.IF_PARA2, 1)) | (eq_notna(df.PCR_PARA2, 1)), 'PARA2'] = 1
df.loc[(eq_notna(df.IF_PARA3, 1)) | (eq_notna(df.PCR_PARA3, 1)), 'PARA3'] = 1
df.loc[eq_notna(df.PCR_PARA4, 1), 'PARA4'] = 1
df.loc[(eq_notna(df.IF_ADENO, 1)) | (eq_notna(df.PCR_ADENO, 1)), 'ADNO'] = 1
df.loc[eq_notna(df.PCR_BOCA, 1), 'BOCA'] = 1
df.loc[eq_notna(df.PCR_RINO, 1), 'RINO'] = 1
df.loc[eq_notna(df.PCR_METAP, 1), 'METAP'] = 1
df.loc[(eq_notna(df.IF_OUTRO, 1)) | (eq_notna(df.PCR_OUTRO, 1)), 'OTHERS'] = 1
mask_covid19_ds_pcr = check_covid19_regex(df, 'DS_PCR_OUT')
mask_covid19_ds_if = check_covid19_regex(df, 'DS_IF_OUT')
df.loc[(eq_notna(df.PCR_SARS2, 1)) |
(eq_notna(df.AN_SARS2, 1)) |
mask_covid19_ds_pcr |
mask_covid19_ds_if |
((eq_notna(df.CLASSI_FIN, 5)) & (eq_notna(df.CRITERIO, 1))),
'SARS2'] = 1
df.loc[(eq_notna(df.PCR_SARS2, 1)) |
mask_covid19_ds_pcr |
mask_covid19_ds_if,
'OTHERS'] = pd.NA
# Positive cases:
df.loc[(eq_notna(df.POS_PCRFLU, 1)) | (eq_notna(df.POS_PCROUT, 1)), 'PCR_RESUL'] = 1
df.loc[(eq_notna(df.POS_IF_FLU, 1)) | (eq_notna(df.POS_IF_OUT, 1)), 'IF_RESUL'] = 1
df.loc[(eq_notna(df.PCR_RESUL, 1)) |
(eq_notna(df.IF_RESUL, 1)) |
(eq_notna(df.FLU_LAB, 1)) |
(eq_notna(df.SARS2, 1)) |
(eq_notna(df.VSR, 1)) |
(eq_notna(df.PARA1, 1)) |
(eq_notna(df.PARA2, 1)) |
(eq_notna(df.PARA3, 1)) |
(eq_notna(df.PARA4, 1)) |
(eq_notna(df.ADNO, 1)) |
(eq_notna(df.BOCA, 1)) |
(eq_notna(df.RINO, 1)) |
(eq_notna(df.METAP, 1)) |
(eq_notna(df.OTHERS, 1)),
'POSITIVE'] = 1
# Negative cases:
df.loc[(eq_notna(df.POS_PCRFLU, 2)) & (eq_notna(df.POS_PCROUT, 2)), 'PCR_RESUL'] = 2
df.loc[(eq_notna(df.POS_IF_FLU, 2)) & (eq_notna(df.POS_IF_OUT, 2)), 'IF_RESUL'] = 2
mask = (
(ne_orna(df.POSITIVE, 1)) &
((eq_notna(df.FLU_LAB, 0)) | (eq_notna(df.POS_PCROUT, 2)) | (eq_notna(df.POS_IF_OUT, 2)))
)
df.loc[mask, 'NEGATIVE'] = 1
df.loc[(((eq_notna(df.IF_RESUL, 2)) & (df.PCR_RESUL.isin([2, 3, 4, 5, 9]))) |
((eq_notna(df.PCR_RESUL, 2)) & (df.IF_RESUL.isin([2, 3, 4, 5, 9])))) &
(ne_orna(df.POSITIVE, 1)), 'NEGATIVE'] = 1
df.loc[:, 'INCONCLUSIVE'] = (
(
((eq_notna(df.IF_RESUL, 3)) & (~df.PCR_RESUL.isin([1, 2, 5]))) |
((eq_notna(df.PCR_RESUL, 3)) & (~df.IF_RESUL.isin([1, 2, 5])))
) &
(ne_orna(df.POSITIVE, 1)) &
(ne_orna(df.NEGATIVE, 1))
).astype(int)
df.loc[(eq_notna(df.POSITIVE, 1)) | (eq_notna(df.NEGATIVE, 1)) | (eq_notna(df.INCONCLUSIVE, 1)), 'TESTED'] = 1
df.loc[:, 'DELAYED'] = (
(
((eq_notna(df.IF_RESUL, 5)) & (pd.isnull(df.TESTED))) |
((eq_notna(df.PCR_RESUL, 5)) & (pd.isnull(df.TESTED)))
) &
(ne_orna(df.POSITIVE, 1)) &
(ne_orna(df.NEGATIVE, 1)) &
(ne_orna(df.INCONCLUSIVE, 1))
).astype(int)
# Clinical and clinical-epidemiological diagnose:
df['FLU_CLINIC'] = ((ne_orna(df.POS_IF_FLU, 1)) & (ne_orna(df.POS_PCRFLU, 1)) & (eq_notna(df.CLASSI_FIN, 1)) &
(df.CRITERIO.isin([2, 3]))).astype(int)
notknownrows = (
((eq_notna(df.PCR_RESUL, 9)) | pd.isnull(df.PCR_RESUL)) &
((eq_notna(df.IF_RESUL, 9)) | pd.isnull(df.IF_RESUL)) &
(ne_orna(df.POSITIVE, 1)) &
(ne_orna(df.NEGATIVE, 1)) &
(ne_orna(df.DELAYED, 1)) &
(ne_orna(df.INCONCLUSIVE, 1))
)
nottestedrows = (
~(notknownrows) &
(df.PCR_RESUL.isin([4, 9])) &
(df.IF_RESUL.isin([4, 9])) &
(ne_orna(df.TESTED, 1))
)
df['NOTTESTED'] = nottestedrows.astype(int)
df['TESTING_IGNORED'] = notknownrows.astype(int)
if tag:
df['tag'] = tag
return df
def applysinanfilter(df, tag=None, filtertype='srag'):
# Filter columns of interest
# Na solicitação, além das variáveis abaixo, necessitamos também do ID do caso
tgtcols = ['SEM_NOT', 'DT_NOTIFIC', 'SG_UF_NOT', 'DT_INTERNA', 'DT_SIN_PRI', 'DT_DIGITA', 'HOSPITAL',
'FEBRE', 'CLASSI_FIN', 'CRITERIO', 'SG_UF', 'ID_MN_RESI', 'ID_RG_RESI', 'SEM_PRI',
'TOSSE', 'GARGANTA', 'DISPNEIA', 'SATURACAO', 'DESC_RESP', 'EVOLUCAO', 'DT_COLETA', 'IFI', 'DT_IFI',
'PCR', 'OUT_METODO', 'DS_OUTMET', 'DT_OUTMET', 'RES_FLUA', 'RES_FLUASU', 'RES_FLUB',
'RES_VSR', 'RES_PARA1', 'RES_PARA2', 'RES_PARA3', 'RES_ADNO', 'RES_OUTRO', 'DT_PCR', 'PCR_RES',
'PCR_ETIOL', 'PCR_TIPO_H', 'PCR_TIPO_N', 'DT_CULTURA', 'CULT_RES', 'DT_HEMAGLU', 'HEMA_RES',
'HEMA_ETIOL', 'HEM_TIPO_H', 'HEM_TIPO_N', 'VACINA', 'DT_UT_DOSE', 'ANT_PNEUMO', 'DT_PNEUM',
'CO_UF_INTE', 'CO_MU_INTE', 'CO_UN_INTE', 'DT_ENCERRA', 'NU_NOTIFIC', 'ID_AGRAVO', 'ID_MUNICIP',
'ID_REGIONA', 'ID_UNIDADE', 'NU_IDADE_N', 'NM_BAIRRO', 'NM_LOGRADO', 'CS_SEXO', 'CS_RACA',
'DT_ANTIVIR', 'DT_EVOLUCA']
tgtcols_uti = ['UTI',
'DT_ENTUTI',
'DT_SAIDUTI']
tgtcols_comorb = ['CS_GESTANT',
'PUERPERA',
'CARDIOPATI',
'HEMATOLOGI',
'SIND_DOWN',
'HEPATICA',
'ASMA',
'DIABETES',
'NEUROLOGIC',
'PNEUMOPATI',
'IMUNODEPRE',
'RENAL',
'OBESIDADE',
'OBES_IMC',
'OUT_MORBI',
'MORB_DESC']
tgtcols = list(set(tgtcols).union(tgtcols_uti).union(tgtcols_comorb))
cols = df.columns
if 'RES_VRS' in cols:
df.rename(columns={'RES_VRS': 'RES_VSR'}, inplace=True)
if 'DT_PCR_1' in cols:
df.DT_PCR.update(df.DT_PCR_1)
if 'DT_OBITO' in cols:
df.rename(columns={'DT_OBITO': 'DT_EVOLUCA'}, inplace=True)
if 'DT_EVOL' in cols:
df.DT_EVOLUCA.update(df.DT_EVOL)
elif 'DT_EVOL' in cols:
df.rename(columns={'DT_EVOL': 'DT_EVOLUCA'}, inplace=True)
if 'METABOLICA' in cols:
df.rename(columns={'METABOLICA': 'DIABETES'}, inplace=True)
cols = df.columns
for col in set(tgtcols).difference(cols):
df[col] = None
df = df[tgtcols].copy()
df = symptoms_filter(df)
regexp = re.compile('^DT')
dt_cols = list(filter(regexp.match, tgtcols))
df = date_cleanup(df, dt_cols)
# Create columns related to lab result
# Rows with lab test:
labrows = ((df.PCR_RES.isin([1, 2, 3])) |
(df.CULT_RES.isin([1, 2])) |
(df.HEMA_RES.isin([1, 2, 3])) |
(eq_notna(df.IFI, 1)) |
(eq_notna(df.PCR, 1)) |
(eq_notna(df.OUT_METODO, 1)))
notknownrows = (
pd.isnull(df.PCR_RES) &
pd.isnull(df.CULT_RES) &
pd.isnull(df.HEMA_RES) &
pd.isnull(df.IFI) &
pd.isnull(df.PCR) &
pd.isnull(df.OUT_METODO)
)
nottestedrows = (
~(notknownrows) &
(pd.isnull(df.PCR_RES) | (df.PCR_RES.isin([4]))) &
(pd.isnull(df.CULT_RES) | (df.CULT_RES.isin([3]))) &
(pd.isnull(df.HEMA_RES) | (df.HEMA_RES.isin([4]))) &
(pd.isnull(df.IFI) | (eq_notna(df.IFI, 2))) &
(pd.isnull(df.PCR) | (eq_notna(df.PCR, 2))) &
(pd.isnull(df.OUT_METODO) | (eq_notna(df.OUT_METODO, 2)))
)
df['FLU_A'] = pd.Series([], dtype='Int8')
df['FLU_B'] = pd.Series([], dtype='Int8')
df['FLU_LAB'] = pd.Series([], dtype='Int8')
df['VSR'] = pd.Series([], dtype='Int8')
df['PARA1'] = pd.Series([], dtype='Int8')
df['PARA2'] = pd.Series([], dtype='Int8')
df['PARA3'] = pd.Series([], dtype='Int8')
df['PARA4'] = pd.Series([], dtype='Int8')
df['ADNO'] = pd.Series([], dtype='Int8')
df['METAP'] = pd.Series([], dtype='Int8')
df['BOCA'] = pd.Series([], dtype='Int8')
df['RINO'] = pd.Series([], dtype='Int8')
df['SARS2'] = pd.Series([], dtype='Int8')
df['OTHERS'] = pd.Series([], dtype='Int8')
df['NEGATIVE'] = pd.Series([], dtype='Int8')
df['POSITIVE'] = pd.Series([], dtype='Int8')
df['INCONCLUSIVE'] = pd.Series([], dtype='Int8')
df['DELAYED'] = pd.Series([], dtype='Int8')
df['TESTED'] = pd.Series([], dtype='Int8')
df['NOTTESTED'] = pd.Series([], dtype='Int8')
df['TESTING_IGNORED'] = pd.Series([], dtype='Int8')
df['TESTED'] = labrows.astype(int)
df['NOTTESTED'] = nottestedrows.astype(int)
df['TESTING_IGNORED'] = notknownrows.astype(int)
df.loc[labrows, 'FLU_A'] = ((df.PCR_ETIOL[labrows].isin([1, 2, 4])) | (df.HEMA_ETIOL[labrows].isin([1, 2, 4])) |
(eq_notna(df.RES_FLUA[labrows], 1))).astype(int)
df.loc[labrows, 'FLU_B'] = ((eq_notna(df.PCR_ETIOL[labrows], 3)) | (eq_notna(df.HEMA_ETIOL[labrows], 3)) |
(eq_notna(df.RES_FLUB[labrows], 1))).astype(int)
df.loc[labrows, 'VSR'] = (eq_notna(df.RES_VSR[labrows], 1)).astype(int)
df.loc[labrows, 'PARA1'] = (eq_notna(df.RES_PARA1[labrows], 1)).astype(int)
df.loc[labrows, 'PARA2'] = (eq_notna(df.RES_PARA2[labrows], 1)).astype(int)
df.loc[labrows, 'PARA3'] = (eq_notna(df.RES_PARA3[labrows], 1)).astype(int)
df.loc[labrows, 'ADNO'] = (eq_notna(df.RES_ADNO[labrows], 1)).astype(int)
df.loc[labrows, 'SARS2'] = 0
df.loc[labrows, 'OTHERS'] = (
(eq_notna(df.PCR_ETIOL[labrows], 5)) |
(eq_notna(df.HEMA_ETIOL[labrows], 5)) |
(eq_notna(df.RES_OUTRO[labrows], 1))).astype(int)
df.loc[labrows, 'DELAYED'] = ((pd.isnull(df.PCR_RES[labrows]) | eq_notna(df.PCR_RES[labrows], 4)) &
(pd.isnull(df.HEMA_RES[labrows]) | eq_notna(df.HEMA_RES[labrows], 4)) &
(pd.isnull(df.RES_FLUA[labrows]) | eq_notna(df.RES_FLUA[labrows], 4)) &
(pd.isnull(df.RES_FLUB[labrows]) | eq_notna(df.RES_FLUB[labrows], 4)) &
(pd.isnull(df.RES_VSR[labrows]) | eq_notna(df.RES_VSR[labrows], 4)) &
(pd.isnull(df.RES_PARA1[labrows]) | eq_notna(df.RES_PARA1[labrows], 4)) &
(pd.isnull(df.RES_PARA2[labrows]) | eq_notna(df.RES_PARA2[labrows], 4)) &
(pd.isnull(df.RES_PARA3[labrows]) | eq_notna(df.RES_PARA3[labrows], 4)) &
(pd.isnull(df.RES_ADNO[labrows]) | eq_notna(df.RES_ADNO[labrows], 4)) &
(pd.isnull(df.RES_OUTRO[labrows]) | eq_notna(df.RES_OUTRO[labrows], 4))).astype(int)
df.loc[labrows, 'INCONCLUSIVE'] = ((eq_notna(df.DELAYED[labrows], 0)) &
(pd.isnull(df.PCR_RES[labrows]) | df.PCR_RES[labrows].isin([3, 4])) &
(pd.isnull(df.HEMA_RES[labrows]) | df.HEMA_RES[labrows].isin([3, 4])) &
(pd.isnull(df.RES_FLUA[labrows]) | df.RES_FLUA[labrows].isin([3, 4])) &
(pd.isnull(df.RES_FLUB[labrows]) | df.RES_FLUB[labrows].isin([3, 4])) &
(pd.isnull(df.RES_VSR[labrows]) | df.RES_VSR[labrows].isin([3, 4])) &
(pd.isnull(df.RES_PARA1[labrows]) | df.RES_PARA1[labrows].isin([3, 4])) &
(pd.isnull(df.RES_PARA2[labrows]) | df.RES_PARA2[labrows].isin([3, 4])) &
(pd.isnull(df.RES_PARA3[labrows]) | df.RES_PARA3[labrows].isin([3, 4])) &
(pd.isnull(df.RES_ADNO[labrows]) | df.RES_ADNO[labrows].isin([3, 4])) &
(pd.isnull(df.RES_OUTRO[labrows]) | df.RES_OUTRO[labrows].isin([3, 4]))).astype(
int)
df.loc[labrows, 'NEGATIVE'] = ((eq_notna(df.FLU_A[labrows], 0)) &
(eq_notna(df.FLU_B[labrows], 0)) &
(eq_notna(df.VSR[labrows], 0)) &
(eq_notna(df.PARA1[labrows], 0)) &
(eq_notna(df.PARA2[labrows], 0)) &
(eq_notna(df.PARA3[labrows], 0)) &
(eq_notna(df.ADNO[labrows], 0)) &
(eq_notna(df.OTHERS[labrows], 0)) &
(eq_notna(df.DELAYED[labrows], 0)) &
(eq_notna(df.INCONCLUSIVE[labrows], 0))).astype(int)
df.loc[labrows &
(ne_orna(df.INCONCLUSIVE, 1)) &
(ne_orna(df.NEGATIVE, 1)) &
(ne_orna(df.DELAYED, 1)),
'POSITIVE'] = 1
# Clinical and clinical-epidemiological diagnose:
df['FLU_CLINIC'] = ((ne_orna(df.FLU_A, 1)) &
(ne_orna(df.FLU_B, 1)) &
(eq_notna(df.CLASSI_FIN, 1)) &
(df.CRITERIO.isin([2, 3]))).astype(int)
df.NU_IDADE_N = df.NU_IDADE_N.astype(np.float)
def f_idade(x):
# System registers age with the following format:
# TAAA
# T: type (1-hours, 2-days, 3-months, 4-years)
# Hours used only if age < 24h, days used only if 24h <= age < 30d, months only if 30d <= a < 12 months
# AAA: count on the respective scale
# Ex.:
# 2010 : 10 days
# 3010: 10 months
# 4010: 10 years
if np.isnan(x):
return np.nan
if x < 4000:
return 0
else:
return x - 4000
if (df.NU_IDADE_N > 4000).any(axis = 0):
df['idade_em_anos'] = df['NU_IDADE_N'].apply(f_idade)
else:
df['idade_em_anos'] = df['NU_IDADE_N']
if tag:
df['tag'] = tag
return df
def delays_dist(df_in=pd.DataFrame(), filtertype='srag', append_file=None, dfappend=None):
module_logger.info('Entered function: delay_dist')
if filtertype not in ['srag', 'sragnofever', 'hospdeath']:
exit('Invalid filter type: %s' % filtertype)
suff = ''
if filtertype != 'srag':
suff = '_%s' % filtertype
df_reg = pd.read_csv('../data/regioesclimaticas.csv', low_memory=False)[['Código', 'Região', 'Região oficial']]
df_reg.rename(columns={'Código': 'UF', 'Região': 'Regional', 'Região oficial': 'Regiao'}, inplace=True)
def _cleanup(df_input):
tgt_cols = ['SG_UF_NOT',
'DT_NOTIFIC_epiyearweek',
'DT_NOTIFIC_epiyear',
'DT_NOTIFIC_epiweek',
'DT_SIN_PRI_epiyearweek',
'DT_SIN_PRI_epiyear',
'DT_SIN_PRI_epiweek',
'DT_DIGITA_epiyearweek',
'DT_DIGITA_epiyear',
'DT_DIGITA_epiweek',
'Notific2Digita_DelayWeeks',
'SinPri2Digita_DelayWeeks',
'SinPri2Antivir_DelayWeeks',
'SinPri2Notific_DelayWeeks',
'SinPri2Coleta_DelayWeeks',
'SinPri2Interna_DelayWeeks',
'Notific2Encerra_DelayWeeks',
'Coleta2IFI_DelayWeeks',
'Coleta2PCR_DelayWeeks',
'Notific2Coleta_DelayWeeks',
'Notific2Antivir_DelayWeeks',
'Digita2Antivir_DelayWeeks',
'Interna2Evoluca_DelayWeeks',
'Notific2Digita_DelayDays',
'SinPri2Digita_DelayDays',
'SinPri2Antivir_DelayDays',
'SinPri2Notific_DelayDays',
'SinPri2Coleta_DelayDays',
'SinPri2Interna_DelayDays',
'Notific2Encerra_DelayDays',
'Coleta2IFI_DelayDays',
'Coleta2PCR_DelayDays',
'Notific2Coleta_DelayDays',
'Notific2Antivir_DelayDays',
'Digita2Antivir_DelayDays',
'Interna2Evoluca_DelayDays',
'sragflu',
'obitoflu',
'sragcovid',
'obitocovid',
'obito',
'filtro']
df = df_input[tgt_cols].rename(columns={'DT_NOTIFIC_epiyear': 'epiyear',
'DT_NOTIFIC_epiweek': 'epiweek',
'SG_UF_NOT': 'UF'})
df.loc[pd.isnull(df.UF), 'UF'] = 99
df.UF = df.UF.astype(float).astype(int)
# Insert region info:
df = df.merge(df_reg, on='UF')
df['Pais'] = 'BR'
cols = ['DT_DIGITA_epiyear', 'DT_DIGITA_epiweek', 'SinPri2Digita_DelayWeeks']
try:
df[cols] = df[cols].astype(int)
except:
df[cols] = df[cols].astype(float)
cols = ['epiyear', 'epiweek']
try:
df[cols] = df[cols].astype(int)
except:
df[cols] = df[cols].astype(float)
cols = ['DT_SIN_PRI_epiyear', 'DT_SIN_PRI_epiweek']
try:
df[cols] = df[cols].astype(int)
except:
df[cols] = df[cols].astype(float)
return df
df = _cleanup(df_in)
def wide2long(dfwide2long):
dfwide2long['id'] = df.index
dfwide2long['srag'] = 1
varcols = ['srag',
'sragflu',
'obitoflu',
'sragcovid',
'obitocovid',
'obito']
dfwide2long_out = dfwide2long[['id'] + varcols].melt(id_vars='id',
var_name='dado',
value_name='valor')[lambda x: (x.valor == 1)]
dfwide2long_out = dfwide2long_out.merge(dfwide2long, how='left', on='id').drop(columns=['valor', 'id']+varcols)
dfwide2long.drop(columns=['id', 'srag'], inplace=True)
return dfwide2long_out
df_out = wide2long(df)
out_cols = ['Coleta2IFI_DelayDays', 'Coleta2IFI_DelayWeeks', 'Coleta2PCR_DelayDays',
'Coleta2PCR_DelayWeeks', 'DT_DIGITA_epiweek', 'DT_DIGITA_epiyear',
'DT_DIGITA_epiyearweek', 'DT_NOTIFIC_epiyearweek', 'DT_SIN_PRI_epiweek',
'DT_SIN_PRI_epiyear', 'DT_SIN_PRI_epiyearweek',
'Digita2Antivir_DelayDays', 'Digita2Antivir_DelayWeeks',
'Notific2Antivir_DelayDays', 'Notific2Antivir_DelayWeeks',
'Notific2Coleta_DelayDays', 'Notific2Coleta_DelayWeeks',
'Notific2Digita_DelayDays', 'Notific2Digita_DelayWeeks',
'Notific2Encerra_DelayDays', 'Notific2Encerra_DelayWeeks',
'SinPri2Antivir_DelayDays', 'SinPri2Antivir_DelayWeeks',
'SinPri2Coleta_DelayDays', 'SinPri2Coleta_DelayWeeks',
'SinPri2Digita_DelayDays', 'SinPri2Digita_DelayWeeks',
'SinPri2Notific_DelayDays', 'SinPri2Notific_DelayWeeks',
'SinPri2Interna_DelayDays', 'SinPri2Interna_DelayWeeks',
'Interna2Evoluca_DelayDays', 'Interna2Evoluca_DelayWeeks',
'UF', 'dado', 'filtro',
'epiweek', 'epiyear', 'Regional', 'Regiao', 'Pais']
df_out = df_out.sort_values(by=['dado', 'UF', 'DT_SIN_PRI_epiyearweek', 'DT_NOTIFIC_epiyearweek',
'DT_DIGITA_epiyearweek'])
df_out = df_out.reset_index()[out_cols]
if append_file:
tmp = pd.read_csv(append_file)
df_out = tmp.append(df_out[out_cols], ignore_index=True, sort=False)
for k, suffix in filtro_dict.items():
df_out.loc[(df_out.filtro >= k), out_cols].to_csv('../../data/data/delay_table%s.csv' % suffix, index=False)
#### Opportunities estimation
tgt_cols = ['UF', 'Regional', 'Regiao', 'Pais', 'dado', 'filtro', 'DT_SIN_PRI_epiyearweek', 'DT_SIN_PRI_epiyear',
'DT_SIN_PRI_epiweek', 'SinPri2Digita_DelayWeeks', 'DT_DIGITA_epiyearweek', 'DT_DIGITA_epiyear',
'DT_DIGITA_epiweek']
# Generate quantiles' file:
df_out = df_out[tgt_cols].rename(columns={'DT_SIN_PRI_epiyearweek': 'epiyearweek', 'DT_SIN_PRI_epiyear': 'epiyear',
'DT_SIN_PRI_epiweek': 'epiweek', 'SinPri2Digita_DelayWeeks': 'delayweeks'})
df_out.UF = df_out.UF.astype(int).astype(str)
for i, ft in enumerate(['hospdeath', 'sragnofever', 'srag']):
extract_quantile(df_out[df_out.filtro >= (i+1)], ft)
del df_out
# Impute digitalization date if needed:
if dfappend is not None:
dfappend = _cleanup(dfappend)
df = dfappend.append(df, ignore_index=True, sort=False)
opp_cols = list(set(tgt_cols).difference(['dado']).union(['sragflu', 'obitoflu', 'sragcovid', 'obitocovid',
'obito']))
df = df[opp_cols].rename(columns={'DT_SIN_PRI_epiyearweek': 'epiyearweek', 'DT_SIN_PRI_epiyear': 'epiyear',
'DT_SIN_PRI_epiweek': 'epiweek', 'SinPri2Digita_DelayWeeks': 'delayweeks'})
df.UF = df.UF.astype(int).astype(str)
df = delayimputation(df)
df_out = wide2long(df)
# Create tables and save to file:
for k, suffix in filtro_dict.items():
df_outnew = createtable(df_out[df_out.filtro >= k])
for dataset in df_out.dado.unique():
fout = '../clean_data/%s%s_sinpri2digita_table_weekly.csv' % (dataset, suffix)
module_logger.info('Write table %s' % fout)
df_outnew[df_outnew.dado == dataset].to_csv(fout, index=False)
return
def main(flist, sep=',', yearmax=None, filtertype='srag', append_cases=None, append_delay=None):
if filtertype not in ['srag', 'sragnofever', 'hospdeath']:
module_logger.error('Invalid filter type: %s', filtertype)
exit(1)
suff = ''
if filtertype != 'srag':
suff = '_%s' % filtertype
df = pd.DataFrame()
for fname in flist:
module_logger.info('Processing database file: %s', fname)
dftmp = readtable(fname, sep)
# Check if data file has 2019's database or not:
if int(re.findall(r'\d+', fname)[0]) < 2019:
module_logger.info('DB pre-2019')
df = df.append(applysinanfilter(dftmp, tag=fname, filtertype=filtertype), ignore_index=True, sort=True)
else:
module_logger.info('DB 2019 onwards')
df = df.append(filter_db2019(dftmp, tag=fname, filtertype=filtertype), ignore_index=True, sort=True)
del dftmp
if yearmax:
df = df[(df.DT_SIN_PRI.apply(lambda x: x.year) <= yearmax)]
df.NM_LOGRADO = clean_open_field(df.NM_LOGRADO)
df.DS_PCR_OUT = clean_open_field(df.DS_PCR_OUT)
df.DS_IF_OUT = clean_open_field(df.DS_IF_OUT)
df.DS_AN_OUT = clean_open_field(df.DS_AN_OUT)
df.OUTRO_DES = clean_open_field(df.OUTRO_DES)
df.MORB_DESC = clean_open_field(df.MORB_DESC)
df.RAIOX_OUT = clean_open_field(df.RAIOX_OUT)
df.TOMO_OUT = clean_open_field(df.TOMO_OUT)
# Clean obvious duplicates
df.drop_duplicates(inplace=True, ignore_index=True)
# Convert date fields to text and leave NaT as empty cell
regexp = re.compile('^DT')
target_cols = list(filter(regexp.match, df.columns))
df[target_cols] = df[target_cols].applymap(lambda x: str(x.date())).where(lambda x: x != 'NaT', np.nan)
df = insert_epiweek(df)
if 'ID_UNIDADE' in df.columns:
df.CO_UNI_NOT.update(df.ID_UNIDADE)
mask_sanity_check = (
(eq_notna(df.SARS2, 1)) &
(
(df.DT_SIN_PRI_epiyear.astype(int) < 2020) |
((df.DT_SIN_PRI_epiyear.astype(int) == 2020) &
(df.DT_SIN_PRI_epiweek.astype(int) < 8))
)
)
if sum((df.SARS2[mask_sanity_check])) > 1:
print('ATENÇÃO: Caso de COVID-19 anterior a 2020 08. Entrar em contato com GT-Influenza')
df.loc[mask_sanity_check, 'SARS2'] = 0
if append_cases:
tmp = readtable(append_cases)
dfout = tmp.append(df, ignore_index=True)
else:
dfout = df.copy()
mask_flu = ((eq_notna(dfout.FLU_A, 1)) |
(eq_notna(dfout.FLU_B, 1)) |
(eq_notna(dfout.FLU_CLINIC, 1)))
mask_covid = (eq_notna(dfout.SARS2, 1))
mask_obito = (eq_notna(dfout.EVOLUCAO, 2))
mask_obitoflu = mask_flu & mask_obito
mask_obitocovid = mask_covid & mask_obito
for k, suffix in filtro_dict.items():
dfout[dfout.filtro >= k].to_csv('../clean_data/clean_data_srag%s_epiweek.csv' % suffix, index=False)
dfout[(dfout.filtro >= k) & mask_flu].to_csv('../clean_data/clean_data_sragflu%s_epiweek.csv' % suffix, index=False)
dfout[(dfout.filtro >= k) & mask_obitoflu].to_csv('../clean_data/clean_data_obitoflu%s_epiweek.csv' % suffix,
index=False)
dfout[(dfout.filtro >= k) & mask_covid].to_csv('../clean_data/clean_data_sragcovid%s_epiweek.csv' % suffix,
index=False)
dfout[(dfout.filtro >= k) & mask_obitocovid].to_csv('../clean_data/clean_data_obitocovid%s_epiweek.csv' % suffix,
index=False)
dfout[(dfout.filtro >= k) & mask_obito].to_csv('../clean_data/clean_data_obito%s_epiweek.csv' % suffix, index=False)
del dfout
def masks(dfin: pd.DataFrame):
mask_flu = ((eq_notna(dfin.FLU_A, 1)) |
(eq_notna(dfin.FLU_B, 1)) |
(eq_notna(dfin.FLU_CLINIC, 1)))
mask_covid = (eq_notna(dfin.SARS2, 1))
mask_obito = (eq_notna(dfin.EVOLUCAO, 2))
mask_obitoflu = mask_flu & mask_obito
mask_obitocovid = mask_covid & mask_obito
dfin['obito'] = mask_obito.astype(int)
dfin['sragflu'] = mask_flu.astype(int)
dfin['obitoflu'] = mask_obitoflu.astype(int)
dfin['sragcovid'] = mask_covid.astype(int)
dfin['obitocovid'] = mask_obitocovid.astype(int)
return
masks(df)
if append_cases:
masks(tmp)
delays_dist(df, filtertype, append_delay, dfappend=tmp)
else:
delays_dist(df, filtertype)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description="Clean SINAN SRAG table.\n" +
"python3 sinan_clean.py --path ../data/influ*.csv --sep ,\n",
formatter_class=RawDescriptionHelpFormatter)
parser.add_argument('--path', nargs='*', action='append', help='Path to data file')
parser.add_argument('--sep', help='Column separator', default=',')
parser.add_argument('--year', help='Maximum year', default=None)
parser.add_argument('--filtertype', help='Default=srag. Which filter should be used? [srag, sragnofever, '
'hospdeath]', default='srag')
args = parser.parse_args()
print(args)
main(args.path[0], args.sep, args.year, args.filtertype)
| FluVigilanciaBR/seasonality | methods/data_filter/sinan_filter_of_interest.py | sinan_filter_of_interest.py | py | 47,482 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
"line_number": 256,
"usage_type": "attribute"
},
{
"api_name": "pandas.notna",
"line_number": 257,
"usage_type": "call"
},
{
"api_name": "pandas.Series",
... |
42557553956 | from fastapi import FastAPI, HTTPException
from pydantic import BaseModel
class Line(BaseModel):
matter: str
teacher: str
coef: float
ects: float
cc1: float
cc2: float
exam: float
app = FastAPI()
@app.get("/grades")
def grades():
with open("exportFIles/note.txt", "r", encoding="utf-8") as file:
return file.readlines()
@app.get("/grades/{matter}")
def grades_by_matter(matter):
with open("exportFIles/note.txt", "r", encoding="utf-8") as file:
for line in file.readlines():
if matter in line:
values = line[line.find(".00") + 9:line.find("\n")]
if values == "":
raise HTTPException(status_code=404, detail="No grades in this matter.")
average = calculate_average(values)
values = values.split()
values = [v.replace(',', '.') for v in values]
vals_float = [float(v) for v in values]
return {"grades": vals_float, "average": average}
raise HTTPException(status_code=404, detail="This matter doesn't exist.")
def calculate_average(values: str) -> float:
vals = values.split()
vals = [v.replace(',', '.') for v in vals]
vals_float = [float(v) for v in vals]
average = sum(vals_float) / len(vals_float)
average_rounded = round(average, 2)
return average_rounded
@app.get("/grades/semester/{semester}")
def gradesBySemester(semester):
with open("exportFIles/note.txt", "r") as file:
matter_values = 0
count = 0
for line in file.readlines():
if semester in line:
coef_str = line[line.find(".00") - 1:line.find(".00") + 3]
if coef_str != "":
coef = float(coef_str.split()[0]) if coef_str.split()[0].isdigit() else 0.0
values = line[line.find(".00") + 9:line.find("\n")]
if values == "":
raise HTTPException(status_code=404, detail="No grades in this semester.")
average = calculate_average(values)
for _ in range(int(coef)):
matter_values += average
count += 1
return {"average": round(matter_values / count, 2) if count != 0 else 0.0}
@app.get("/students")
def students():
with open("exportFIles/trobiEleve3al.txt", "r") as file:
return file.readlines()
@app.get("/teachers")
def teachers():
with open("exportFIles/trobiTeachers.txt", "r") as file:
return file.readlines()
@app.get("/planning")
def planning():
with open("exportFIles/Planing.txt", "r") as file:
return file.readlines()
| Alex061998/ScrapperPython | api.py | api.py | py | 2,717 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pydantic.BaseModel",
"line_number": 5,
"usage_type": "name"
},
{
"api_name": "fastapi.FastAPI",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPException",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "fastapi.HTTPE... |
42896814245 | from tokenizers import BertWordPieceTokenizer
import numpy as np
import argparse
import torch
parser = argparse.ArgumentParser(description="Numerize text into numpy format")
parser.add_argument('--vocab', default='path/to/vocab.txt',
help="path to vocabulary file")
parser.add_argument('--merge', default='',
help="path to merge file")
parser.add_argument('--input', default='en.train',
help="path to tokenized text file")
parser.add_argument('--bin_path', default='en.train.pth',
help="path to binary file output")
parser.add_argument("--model", choices=["bert", "roberta"], default="bert",
help="source pre-trained model")
params = parser.parse_args()
if params.model == 'bert':
CLS_TOKEN, CLS_INDEX = "[CLS]", 101
SEP_TOKEN, SEP_INDEX = "[SEP]", 102
UNK_TOKEN, UNK_INDEX = "[UNK]", 100
PAD_TOKEN, PAD_INDEX = "[PAD]", 0
MASK_TOKEN, MASK_INDEX = "[MASK]", 103
elif params.model == 'roberta':
CLS_TOKEN, CLS_INDEX = '<s>', 0
SEP_TOKEN, SEP_INDEX = '</s>', 2
UNK_TOKEN, UNK_INDEX = '<unk>', 3
PAD_TOKEN, PAD_INDEX = '<pad>', 1
MASK_TOKEN, MASK_INDEX = '<mask>', 50264
def numerize(vocab_path, input_path, bin_path):
tokenizer = BertWordPieceTokenizer(
vocab_path,
unk_token=UNK_TOKEN,
sep_token=SEP_TOKEN,
cls_token=CLS_TOKEN,
pad_token=PAD_TOKEN,
mask_token=MASK_TOKEN,
lowercase=False,
strip_accents=False)
sentences = []
with open(input_path, 'r') as f:
batch_stream = []
for i, line in enumerate(f):
batch_stream.append(line)
if i % 1000 == 0:
res = tokenizer.encode_batch(batch_stream)
batch_stream = []
# flatten the list
for s in res:
sentences.extend(s.ids[1:])
if i % 100000 == 0:
print(f'processed {i} lines')
print('convert the data to numpy')
# convert data to numpy format in uint16
if tokenizer.get_vocab_size() < 1 << 16:
sentences = np.uint16(sentences)
else:
assert tokenizer.get_vocab_size() < 1 << 31
sentences = np.int32(sentences)
# save special tokens for later processing
sep_index = tokenizer.token_to_id(SEP_TOKEN)
cls_index = tokenizer.token_to_id(CLS_TOKEN)
unk_index = tokenizer.token_to_id(UNK_TOKEN)
mask_index = tokenizer.token_to_id(MASK_TOKEN)
pad_index = tokenizer.token_to_id(PAD_TOKEN)
# sanity check
assert sep_index == SEP_INDEX
assert cls_index == CLS_INDEX
assert unk_index == UNK_INDEX
assert pad_index == PAD_INDEX
assert mask_index == MASK_INDEX
print('collect statistics')
# collect some statistics of the dataset
n_unks = (sentences == unk_index).sum()
n_toks = len(sentences)
p_unks = n_unks * 100. / n_toks
n_seqs = (sentences == sep_index).sum()
print(f'| {n_seqs} sentences - {n_toks} tokens - {p_unks:.2f}% unknown words')
# print some statistics
data = {'sentences': sentences,
'sep_index': sep_index,
'cls_index': cls_index,
'unk_index': unk_index,
'pad_index': pad_index,
'mask_index': mask_index}
torch.save(data, bin_path, pickle_protocol=4)
numerize(params.vocab, params.input, params.bin_path)
| alexa/ramen | code/utils/numerize.py | numerize.py | py | 3,426 | python | en | code | 17 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "tokenizers.BertWordPieceTokenizer",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "numpy.uint16",
"line_number": 63,
"usage_type": "call"
},
{
"api_name": ... |
30840715674 | import random
import string
import cv2
import os
import argparse
BASE_DIR = os.getcwd()
def cut_img(image, column_num, row_num, prefix_name):
"""
image: 이미지 경로
column_num: 가로로 자를 갯수
row_num: 세로로 자를 갯수
prefix_name: 고정된 이름
"""
if os.path.isdir(os.path.join(BASE_DIR, 'cuted_img')):
for file in os.scandir(os.path.join(BASE_DIR, 'cuted_img')):
os.remove(file.path)
else:
os.mkdir(os.path.join(BASE_DIR, 'cuted_img'))
h, w, _ = image.shape
if h % row_num != 0 or w % column_num != 0:
image = image[:h-(h % row_num), :w-(w % column_num)]
for i in range(row_num):
for j in range(column_num):
cut_img = image[i*int(h//row_num):(i+1)*int(h//row_num), j*int(w//column_num):(j+1)*int(w//column_num)]
# random 수정
rand = random.uniform(0,1)
if rand > 0.5:
cut_img = cv2.flip(cut_img,1) # 좌우 반전
rand = random.uniform(0,1)
if rand > 0.5:
cut_img = cv2.flip(cut_img,0) # 상하 반전
rand = random.uniform(0,1)
if rand > 0.5:
cut_img = cv2.rotate(cut_img, cv2.ROTATE_90_COUNTERCLOCKWISE)
random_name_len = 10
random_name = ""
for _ in range(random_name_len):
random_name += str(random.choice(string.ascii_lowercase))
cv2.imwrite(f'cuted_img/{prefix_name}-{random_name}.png', cut_img)
if __name__=="__main__":
parser = argparse.ArgumentParser(description="Divide image into patches")
parser.add_argument("image_file_name", type=str)
parser.add_argument("column_num", type=int)
parser.add_argument("row_num", type=int)
parser.add_argument("prefix_output_filename", type=str)
configs = parser.parse_args()
img = cv2.imread(configs.image_file_name, cv2.IMREAD_COLOR)
img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB)
cut_img(img, configs.column_num, configs.row_num, configs.prefix_output_filename)
| yyeseull/Assignment | image_cut_merge/cut_image.py | cut_image.py | py | 2,100 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "os.getcwd",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "os.path.isdir",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number":... |
39721890455 | # -*- coding: utf-8 -*-
"""
Created on Tue Jul 14 20:21:07 2020
@author: gabriel bustamante
"""
import pandas as pd
import numpy as np
import seaborn as sns
import matplotlib.pyplot as plt
def get_attributes_list(information_level, attributes_description):
attributes_list = list(attributes_description[attributes_description['information'] == information_level]['attribute'].unique())
if information_level == '125m x 125m Grid':
attributes_list += ['D19_BUCH_CD', 'D19_LETZTER_KAUF_BRANCHE', 'D19_LOTTO', 'D19_SOZIALES']
if information_level == 'Building':
attributes_list += ['FIRMENDICHTE', 'HH_DELTA_FLAG', 'KOMBIALTER']
if information_level == 'Household':
attributes_list += ['ALTER_KIND1', 'ALTER_KIND2', 'ALTER_KIND3', 'ALTER_KIND4',
'ANZ_KINDER', 'ANZ_STATISTISCHE_HAUSHALTE',
'D19_BANKEN_ANZ_12', 'D19_BANKEN_ANZ_24', 'D19_KONSUMTYP_MAX',
'D19_TELKO_ANZ_12', 'D19_TELKO_ANZ_24', 'D19_TELKO_ONLINE_QUOTE_12', 'D19_VERSAND_ANZ_12',
'D19_VERSAND_ANZ_24', 'D19_VERSI_ANZ_12', 'D19_VERSI_ANZ_24', 'D19_VERSI_ONLINE_QUOTE_12',
'KK_KUNDENTYP', 'KONSUMZELLE', 'UMFELD_ALT', 'STRUKTURTYP',
'UMFELD_JUNG', 'UNGLEICHENN_FLAG', 'VERDICHTUNGSRAUM',
'VHA', 'VHN', 'VK_DHT4A', 'VK_DISTANZ', 'VK_ZG11']
if information_level == 'PLZ8':
attributes_list += ['KBA13_ANTG1', 'KBA13_ANTG2', 'KBA13_ANTG3', 'KBA13_ANTG4', 'KBA13_BAUMAX', 'KBA13_CCM_1401_2500',
'KBA13_CCM_3000', 'KBA13_CCM_3001', 'KBA13_GBZ', 'KBA13_HHZ', 'KBA13_KMH_210']
if information_level == 'Person':
attributes_list += ['ALTERSKATEGORIE_FEIN', 'AKT_DAT_KL',
'CJT_KATALOGNUTZER', 'CJT_TYP_1', 'CJT_TYP_2', 'CJT_TYP_3', 'CJT_TYP_4', 'CJT_TYP_5', 'CJT_TYP_6',
'DSL_FLAG', 'EINGEFUEGT_AM', 'EINGEZOGENAM_HH_JAHR',
'RT_KEIN_ANREIZ', 'RT_SCHNAEPPCHEN', 'RT_UEBERGROESSE', 'SOHO_KZ']
if information_level == 'Microcell (RR4_ID)':
attributes_list += ['CAMEO_INTL_2015']
if information_level == 'Postcode':
attributes_list += ['GEMEINDETYP', 'EXTSEL992']
if information_level == 'RR1_ID':
attributes_list += ['MOBI_RASTER']
return attributes_list
def plot_heatmap_isna(information_columns, title, data):
plt.figure(figsize=(10,5))
sns.heatmap(data[information_columns].isna(), cbar=False)
plt.title(title)
plt.show()
| gabrielbfs/ArvatoProject_workbook | arvato_project.py | arvato_project.py | py | 2,654 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "matplotlib.pyplot.figure",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot",
"line_number": 53,
"usage_type": "name"
},
{
"api_name": "seaborn.heatmap",
"line_number": 54,
"usage_type": "call"
},
{
"api_name": "matplotlib... |
31511216914 | from pathlib import Path
from itertools import zip_longest
f = open(Path(__file__).resolve().parent / 'input.txt')
# eval would have been easier ...
def read_packet(s):
def read(s,p):
if s[p] == '[':
return read_list(s,p)
return read_number(s,p)
def read_number(s,p):
e = p
while s[e].isdigit():
e += 1
return int(s[p:e]),e
def read_list(s,p):
l = []
p += 1
while True:
if s[p] == ',':
p += 1
continue
if s[p] == ']':
p += 1
break
i,p = read(s,p)
l.append(i)
return l,p
return read_list(s,0)[0]
input = []
i = iter(f)
while True:
n = next(i)
a = read_packet(n.strip())
n = next(i)
b = read_packet(n.strip())
input.append((a,b))
try:
next(i)
except:
break
def cmp(a,b):
if isinstance(a,int) and isinstance(b,int):
if a < b:
return True
if a > b:
return False
return None
if isinstance(a,list) and isinstance(b,list):
for aa,bb in zip_longest(a,b):
if aa is None:
return True
if bb is None:
return False
c = cmp(aa,bb)
if c is not None:
return c
return None
if isinstance(a,int):
return cmp([a],b)
return cmp(a,[b])
sum = 0
for i,(a,b) in enumerate(input):
if cmp(a,b):
sum += i+1
print(sum)
from functools import cmp_to_key
div1,div2 = [[2]],[[6]]
input = [a for a,b in input] + [b for a,b in input] + [div1,div2]
def cmp2(a,b):
c = cmp(a,b)
if c is None:
raise RuntimeError('dafuq')
return -1 if c else 1
s = sorted(input, key=cmp_to_key(cmp2))
print((s.index(div1)+1) * (s.index(div2)+1))
| andiwand/adventofcode | 2022/day13/solution.py | solution.py | py | 1,641 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "itertools.zip_longest",
"line_number": 53,
"usage_type": "call"
},
{
"api_name": "functools.cmp_to_key",
"line_number": 83,
"usage_type": "call"
}
] |
43655547493 | from django import views
from django.contrib.auth.mixins import LoginRequiredMixin
from django.shortcuts import render
from .forms import ProfileChangeForm
class ProfileView(LoginRequiredMixin, views.View):
template_name = "LK.html"
form_class = ProfileChangeForm
def get(self, request, *args, **kwargs):
user = request.user
user_tours = user.reserved_tours.all()
user_reviews = user.review_set.all()
data = {
"user_tours": user_tours,
"user_reviews": user_reviews
}
return render(request, self.template_name, context=data)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
user = request.user
user.last_name = form.cleaned_data['last_name']
user.first_name = form.cleaned_data['first_name']
user.patronymic_name = form.cleaned_data['patronymic_name']
user.phone = form.cleaned_data['phone']
user.save()
return render(request, self.template_name)
else:
return render(request, self.template_name, {'form': form})
| Rimasko/touristik_site | ekvatour/users/views.py | views.py | py | 1,177 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "django.contrib.auth.mixins.LoginRequiredMixin",
"line_number": 8,
"usage_type": "name"
},
{
"api_name": "django.views.View",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "django.views",
"line_number": 8,
"usage_type": "name"
},
{
"ap... |
17441169318 | from bs4 import BeautifulSoup
import sqlite3
import urllib.request
import urllib.error
import re
import xlwt
def main():
baseurl = 'https://www.liepin.com/zhaopin/?sfrom=click-pc_homepage-centre_searchbox-search_new&dqs=010&key=JAVA'
datalist = getdata(baseurl)
savepath = ".\\职位信息"
print(datalist)
# 伪装请求
def askurl(url):
# 请求头
head = {
'accept': '*/*',
'accept - encoding': 'gzip, deflate, br',
'accept-language': 'zh-CN,zh;q=0.9,en;q=0.8,en-GB;q=0.7,en-US;q=0.6',
'x-requested-with': 'XMLHttpRequest',
'referer':'https://www.liepin.com/zhaopin/?sfrom=click-pc_homepage-centre_searchbox-search_new&dqs=010&key=JAVA',
'user-agent':'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/88.0.4324.96 Safari/537.36 Edg/88.0.705.50',
}
#构造request
request = urllib.request.Request(url=url, headers=head)
html = ""
try:
#构造response
response = urllib.request.urlopen(request)
html = response.read().decode("utf-8")
except urllib.error.URLError as e:
if hasattr(e, "code"):
print(e.code)
if hasattr(e, "reason"):
print(e.reason)
def getdata(baseurl):
data = [ ]
for i in range(0,1):
url = baseurl + "?page=2&ka=page-"+str(i)
html = askurl(url)
soup = BeautifulSoup(html, "html.parser")
print(html)
# 获取职位列表
for item in soup.find_all('div', class_="job-content"):
# 保存职位信息
data = []
item = str(item)
# 添加公司名字,月薪,技能要求,工作地点,职位描述到列表
companyname = re.findall(findcompanyname, item)[0]
data.append(companyname)
salary = re.findall(findsalary, item)[0]
data.append(salary)
workposition = re.findall(findworkposition, item)[0]
data.append(workposition)
return data
# 保存数据到excel
def savedata(datalist,savepath):
print("...")
book = xlwt.Workbook(encodeing="utf-8",style_copression=0)
sheet = book.add_sheet('职位表单',cell_overwrite_ok=True)
#定义列
col = ("公司名字")
for i in range(0,5):
sheet.write(0,i,col[i])
for i in range(0,):
dta = datalist[i]
for j in range(0,5):
sheet.write(i+1,j,data[j])
book.save(savepath)
if __name__ == "__main__":
main()
print("....")
# init_db("movetest.db")
# 公司名字正则
findcompanyname = re.compile(r'<p class="companyy-name">(.*?)<span>')
# 月薪正则
findsalary = re.compile(r'<span class="text-warning">(.*?)<span>')
# 工作地点正则
findworkposition = re.compile(r'<span class="area">(.*?)<span>')
# 描述正则
# finddescription = re.compile()
# # def init_db(dbpath):
# sql = '''
# create table jobtq(
# id integer primary key autoincrement,
# findcompanyname text,
# findsalary text,
# findtechnologyrequest text,
# findworkposition text,
# finddescription text,
# )
# '''
# conn = sqlite3.connect(dbpath)
# cursor = conn.cursor()
# cursor.execute(sql)
# conn.commit()
# conn.close() | waji785/py-spide | main/website crawl.py | website crawl.py | py | 3,302 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "urllib.request.request.Request",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "urllib.request.request",
"line_number": 25,
"usage_type": "attribute"
},
{
"api_name": "urllib.request",
"line_number": 25,
"usage_type": "name"
},
{
"api_nam... |
4685574067 | import numpy as np
import pandas as pd
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import make_pipeline
from sklearn.linear_model import LinearRegression
from plots import plot_series
from sunspots import error, get_data, sklearn_formatting
class GaussianFeatures(BaseEstimator, TransformerMixin):
def __init__(self, N, width_f=2.0):
self.N = N
self.width_f = width_f
@staticmethod
def _gauss_basis(x, y, width, axis=None):
arg = (x - y) / width
return np.exp(-0.5 * np.sum(arg ** 2, axis))
def fit(self, X, y=None):
# N centres along range
self._centres = np.linspace(X.min(), X.max(), self.N)
self._width = self.width_f * (self._centres[1] - self._centres[0])
return self
def transform(self, X):
return self._gauss_basis(X[:, :, np.newaxis], self._centres, self._width, axis=1)
def linear(train, test, t=132):
X_train, X_test = sklearn_formatting(train, test)
gauss_model = make_pipeline(
GaussianFeatures(40),
LinearRegression(),
)
gauss_model.fit(X_train, train.values)
y_fit = gauss_model.predict(X_train)
# predict a cycle
y_pred = gauss_model.predict(X_test)
rmse = error(test.values, y_pred)
return y_fit, y_pred, rmse
if __name__ == "__main__":
df_train, df_test = get_data()
lin_y, lin_y_pred, lin_rmse = linear(df_train, df_test)
plot_series(df_train, df_test, lin_y, lin_y_pred)
| rkhood/forecasting | linear.py | linear.py | py | 1,507 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.base.BaseEstimator",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "sklearn.base.TransformerMixin",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "numpy.exp",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "nu... |
32918003515 | import glob
import io
import logging
import re
import textwrap
import traceback
from contextlib import redirect_stdout
import discord
from discord.ext import commands
from discord.ext.commands import Cog, Bot, Context
from cogs.commands import settings
from utils import embeds
from utils.record import record_usage
# Enabling logs
log = logging.getLogger(__name__)
class AdministrationCog(Cog):
""" Administration Cog Cog """
def __init__(self, bot):
self.bot = bot
self._last_result = None
def _cleanup_code(self, content):
"""Automatically removes code blocks from the code."""
# remove ```py\n```
if content.startswith('```') and content.endswith('```'):
return '\n'.join(content.split('\n')[1:-1])
# remove `foo`
return content.strip('` \n')
@commands.before_invoke(record_usage)
@commands.group(aliases=["u", "ul"])
async def utilities(self, ctx):
if ctx.invoked_subcommand is None:
# Send the help command for this group
await ctx.send_help(ctx.command)
@commands.is_owner()
@utilities.command(name="ping")
async def ping(self, ctx):
"""Returns the Discord WebSocket latency."""
await ctx.send(f"{round(self.bot.latency * 1000)}ms.")
@commands.is_owner()
@utilities.command(name="say")
async def say(self, ctx, *, args):
"""Echos the input argument."""
await ctx.send(args)
@commands.is_owner()
@utilities.command(name="eval")
async def eval(self, ctx, *, body: str):
"""Evaluates input as Python code."""
# Required environment variables.
env = {
'bot': self.bot,
'ctx': ctx,
'channel': ctx.channel,
'author': ctx.author,
'guild': ctx.guild,
'message': ctx.message,
'embeds': embeds,
'_': self._last_result
}
# Creating embed.
embed = discord.Embed(title="Evaluating.", color=0xb134eb)
env.update(globals())
# Calling cleanup command to remove the markdown traces.
body = self._cleanup_code(body)
embed.add_field(
name="Input:", value=f"```py\n{body}\n```", inline=False)
# Output stream.
stdout = io.StringIO()
# Exact code to be compiled.
to_compile = f'async def func():\n{textwrap.indent(body, " ")}'
try:
# Attempting execution
exec(to_compile, env)
except Exception as e:
# In case there's an error, add it to the embed, send and stop.
errors = f'```py\n{e.__class__.__name__}: {e}\n```'
embed.add_field(name="Errors:", value=errors, inline=False)
await ctx.send(embed=embed)
return errors
func = env['func']
try:
with redirect_stdout(stdout):
ret = await func()
except Exception:
# In case there's an error, add it to the embed, send and stop.
value = stdout.getvalue()
errors = f'```py\n{value}{traceback.format_exc()}\n```'
embed.add_field(name="Errors:", value=errors, inline=False)
await ctx.send(embed=embed)
else:
value = stdout.getvalue()
try:
await ctx.message.add_reaction('\u2705')
except:
pass
if ret is None:
if value:
# Output.
output = f'```py\n{value}\n```'
embed.add_field(
name="Output:", value=output, inline=False)
await ctx.send(embed=embed)
else:
# Maybe the case where there's no output?
self._last_result = ret
output = f'```py\n{value}{ret}\n```'
embed.add_field(name="Output:", value=output, inline=False)
await ctx.send(embed=embed)
@commands.is_owner()
@utilities.command(name="reload")
async def reload_cog(self, ctx: commands.Context, name_of_cog: str = None):
""" Reloads specified cog or all cogs. """
regex = r"(?<=<).*(?=\..* object at 0x.*>)"
if name_of_cog is not None and name_of_cog in ctx.bot.cogs:
# Reload cog if it exists.
cog = re.search(regex, str(ctx.bot.cogs[name_of_cog]))
try:
self.bot.reload_extension(cog.group())
except commands.ExtensionError as e:
await ctx.message.add_reaction("❌")
await ctx.send(f'{e.__class__.__name__}: {e}')
else:
await ctx.message.add_reaction("✔")
await ctx.send(f"Reloaded `{cog.group()}` module!")
elif name_of_cog is None:
# Reload all the cogs in the folder named cogs.
# Skips over any cogs that start with '__' or do not end with .py.
cogs = []
try:
for cog in glob.iglob("cogs/**/[!^_]*.py", recursive=True):
if "\\" in cog: # Pathing on Windows.
self.bot.reload_extension(cog.replace("\\", ".")[:-3])
else: # Pathing on Linux.
self.bot.reload_extension(cog.replace("/", ".")[:-3])
except commands.ExtensionError as e:
await ctx.message.add_reaction("❌")
await ctx.send(f'{e.__class__.__name__}: {e}')
else:
await ctx.message.add_reaction("✔")
await ctx.send("Reloaded all modules!")
else:
await ctx.message.add_reaction("❌")
await ctx.send("Module not found, check spelling, it's case sensitive.")
@commands.is_owner()
@commands.bot_has_permissions(embed_links=True, send_messages=True)
@commands.before_invoke(record_usage)
@commands.command(name="rules")
async def rules(self, ctx: Context):
""" Generates the #rules channel embeds. """
# Captain Karen header image embed
embed = embeds.make_embed(color="quotes_grey")
embed.set_image(url="https://i.imgur.com/Yk4kwZy.gif")
await ctx.send(embed=embed)
# The actual rules embed
embed = embeds.make_embed(title="📃 Discord Server Rules", color="quotes_grey", description="This list is not all-encompassing and you may be actioned for a reason outside of these rules. Use common sense when interacting in our community.")
embed.add_field(name="Rule 1: Do not send copyright-infringing material.", inline=False, value="> Linking to torrents, pirated stream links, direct download links, or uploading files over Discord puts our community at risk of being shut down. We are a discussion community, not a file-sharing hub.")
embed.add_field(name="Rule 2: Be courteous and mindful of others.", inline=False, value="> Do not engage in toxic behavior such as spamming, derailing conversations, attacking other users, or attempting to instigate drama. Bigotry will not be tolerated. Avoid problematic avatars, usernames, or nicknames.")
embed.add_field(name="Rule 3: Do not post self-promotional content.", inline=False, value="> We are not a billboard nor the place to advertise your Discord server, app, website, service, etc.")
embed.add_field(name="Rule 4: Do not post unmarked spoilers.", inline=False, value="> Use spoiler tags and include what series or episode your spoiler is in reference to outside the spoiler tag so people don't blindly click a spoiler.")
embed.add_field(name="Rule 5: Do not backseat moderate.", inline=False, value="> If you see someone breaking the rules or have something to report, please submit a <#829861810999132160> ticket.")
embed.add_field(name="Rule 6: Do not abuse pings.", inline=False, value="> Do not ping staff outside of conversation unless necessary. Do not ping VIP users for questions or help with their service. Do not spam or ghost ping other users.")
embed.add_field(name="Rule 7: Do not beg, buy, sell, or trade.", inline=False, value="> This includes, but is not limited to, server ranks, roles, permissions, giveaways, private community invites, or any digital or physical goods.")
embed.add_field(name="Rule 8: Follow the Discord Community Guidelines and Terms of Service.", inline=False, value="> The Discord Community Guidelines and Terms of Service govern all servers on the platform. Please familarize yourself with them and the restrictions that come with them. \n> \n> https://discord.com/guidelines \n> https://discord.com/terms")
await ctx.send(embed=embed)
# /r/animepiracy links embed
embed = embeds.make_embed(title="🔗 Our Links", color="quotes_grey")
embed.add_field(name="Reddit:", inline=True, value="> [/r/animepiracy](https://reddit.com/r/animepiracy)")
embed.add_field(name="Discord:", inline=True, value="> [discord.gg/piracy](https://discord.gg/piracy)")
embed.add_field(name="Index:", inline=True, value="> [piracy.moe](https://piracy.moe)")
embed.add_field(name="Wiki:", inline=True, value="> [wiki.piracy.moe](https://wiki.piracy.moe)")
embed.add_field(name="Seadex:", inline=True, value="> [releases.moe](https://releases.moe)")
embed.add_field(name="GitHub:", inline=True, value="> [github.com/ranimepiracy](https://github.com/ranimepiracy)")
embed.add_field(name="Twitter:", inline=True, value="> [@ranimepiracy](https://twitter.com/ranimepiracy)")
embed.add_field(name="Uptime Status:", inline=True, value="> [status.piracy.moe](https://status.piracy.moe/)")
await ctx.send(embed=embed)
# Clean up the command invoker
await ctx.message.delete()
@commands.is_owner()
@commands.bot_has_permissions(embed_links=True, send_messages=True)
@commands.before_invoke(record_usage)
@commands.command(name="createticketembed")
async def create_ticket_embed(self, ctx: Context):
embed = embeds.make_embed(title="🎫 Create a new modmail ticket",
description="Click the react below to create a new modmail ticket.",
color="default")
embed.add_field(name="Warning:", value="Serious inquiries only. Abuse may result in warning or ban.")
spawned = await ctx.send(embed=embed)
await spawned.add_reaction("🎫")
await ctx.message.delete()
@commands.is_owner()
@commands.bot_has_permissions(embed_links=True, send_messages=True)
@commands.before_invoke(record_usage)
@commands.command(name="createcolorrolesembed", aliases=['ccre'])
async def create_color_roles_embed(self, ctx: Context):
embed = discord.Embed(description=f"You can react to one of the squares below to be assigned a colored user role. If you are interested in a different color, you can become a <@&{settings.get_value('role_server_booster')}> to receive a custom colored role.")
msg = await ctx.send(embed=embed)
# API call to fetch all the emojis to cache, so that they work in future calls
emotes_guild = await ctx.bot.fetch_guild(settings.get_value("emoji_guild_id"))
emojis = await emotes_guild.fetch_emojis()
await msg.add_reaction(":redsquare:805032092907601952")
await msg.add_reaction(":orangesquare:805032107952308235")
await msg.add_reaction(":yellowsquare:805032120971165709")
await msg.add_reaction(":greensquare:805032132325801994")
await msg.add_reaction(":bluesquare:805032145030348840")
await msg.add_reaction(":pinksquare:805032162197635114")
await msg.add_reaction(":purplesquare:805032172074696744")
await ctx.message.delete()
@commands.is_owner()
@commands.bot_has_permissions(embed_links=True, send_messages=True)
@commands.before_invoke(record_usage)
@commands.command(name="createassignablerolesembed", aliases=['care'])
async def create_assignable_roles_embed(self, ctx: Context):
role_assignment_text = """
You can react to one of the emotes below to assign yourself an event role.
🎁 <@&832528733763928094> - Receive giveaway pings.
📢 <@&827611682917711952> - Receive server announcement pings.
📽 <@&831999443220955136> - Receive group watch event pings.
<:kakeraW:830594599001129000> <@&832512304334766110> - Receive Mudae event and season pings.
🧩 <@&832512320306675722> - Receive Rin event pings.
<:pickaxe:831765423455993888> <@&832512327731118102> - Receive Minecraft server related pings.
"""
embed = discord.Embed(description=role_assignment_text)
msg = await ctx.send(embed=embed)
# API call to fetch all the emojis to cache, so that they work in future calls
emotes_guild = await ctx.bot.fetch_guild(settings.get_value("emoji_guild_id"))
await emotes_guild.fetch_emojis()
await msg.add_reaction("🎁")
await msg.add_reaction("📢")
await msg.add_reaction("📽")
await msg.add_reaction(":kakeraW:830594599001129000")
await msg.add_reaction("🧩")
await msg.add_reaction(":pickaxe:831765423455993888")
await ctx.message.delete()
def setup(bot: Bot) -> None:
""" Load the AdministrationCog cog. """
bot.add_cog(AdministrationCog(bot))
log.info("Commands loaded: administration")
| richtan/chiya | cogs/commands/moderation/administration.py | administration.py | py | 13,552 | python | en | code | null | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "discord.ext.commands.Cog",
"line_number": 21,
"usage_type": "name"
},
{
"api_name": "discord.ext.commands.before_invoke",
"line_number": 37,
"usage_type": "call"
},
{
"api... |
18684839781 | #==== MOVIE ADMIN VIEWS ====
from django.shortcuts import render, get_object_or_404
from django.urls import reverse
from django.http import HttpResponseRedirect
from django.contrib.auth import authenticate
from movies.models import *
from Metflix.settings import MEDIA_ROOT, MEDIA_URL
import os
# Create your views here.
def index(request):
print (MEDIA_ROOT[0])
print (MEDIA_ROOT[1])
movies = Movie.objects.all()
genres = Genre.objects.all()
context = {
'test': 'test'
}
return render(request, 'movie_admin/index.html', context)
def add_movies(request):
MOVIE_ROOT = os.path.join(os.path.join(MEDIA_ROOT[0], MEDIA_ROOT[1]), 'movies')
# Check movie root dir for new movie folders
# and adds them to database
movie_list_dirs = os.listdir(MOVIE_ROOT)
for movie in movie_list_dirs:
# Check if movie alredy is in database
# if so do not add movie
try:
movie_db = Movie.objects.get(title = movie)
except:
m = Movie.objects.create(title = movie, type = 1)
m.save()
else:
print ('Movie excists')
# Check al database entrys to the movie root dir
# if there is not dir with movie name
# Delete from database
movie_list_db = Movie.objects.all()
for movie in movie_list_db:
if not os.path.isdir(os.path.join(MOVIE_ROOT, movie.title)):
movie.delete()
return HttpResponseRedirect(reverse('movie_admin:index'))
def add_series(request):
SERIES_ROOT = os.path.join(MEDIA_ROOT, 'series')
series_list_dir = os.listdir(SERIES_ROOT)
for series in series_list_dir:
try:
series_db = Movie.objects.get(title = series)
except:
s = Movie.objects.create(title = movie, type = 2)
s.save()
else:
print("Series Excists")
# Search the series dir for season dirs
season_list_dir = os.listdir(os.path.join(SERIES_ROOT, series))
for season in season_list_dir:
episode_db = Episode.objects.all()
season_db = episode_db.filter()
return HttpResponseRedirect(reverse('movie_admin:index'))
| nall3n/Metflix | Metflix/movie_admin/views.py | views.py | py | 2,226 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "Metflix.settings.MEDIA_ROOT",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "Metflix.settings.MEDIA_ROOT",
"line_number": 18,
"usage_type": "name"
},
{
"api_name": "movies.models",
"line_number": 19,
"usage_type": "name"
},
{
"api_name": ... |
34366092564 | # -*- coding: utf-8 -*-
from builtins import object
from PyQt5 import QtCore, QtGui, QtWidgets
try:
_fromUtf8 = QtCore.QString.fromUtf8
except AttributeError:
_fromUtf8 = lambda s: s
class Ui_qgsnewhttpconnectionbase(object):
def setupUi(self, qgsnewhttpconnectionbase):
qgsnewhttpconnectionbase.setObjectName(_fromUtf8("qgsnewhttpconnectionbase"))
qgsnewhttpconnectionbase.resize(642, 153)
self.buttonBox = QtWidgets.QDialogButtonBox(qgsnewhttpconnectionbase)
self.buttonBox.setGeometry(QtCore.QRect(280, 110, 341, 32))
self.buttonBox.setFocusPolicy(QtCore.Qt.NoFocus)
self.buttonBox.setOrientation(QtCore.Qt.Horizontal)
self.buttonBox.setStandardButtons(QtWidgets.QDialogButtonBox.Cancel|QtWidgets.QDialogButtonBox.Ok)
self.buttonBox.setObjectName(_fromUtf8("buttonBox"))
self.label_NewSrvName = QtWidgets.QLabel(qgsnewhttpconnectionbase)
self.label_NewSrvName.setGeometry(QtCore.QRect(20, 33, 91, 17))
self.label_NewSrvName.setObjectName(_fromUtf8("label_NewSrvName"))
self.label_NewSrvUrl = QtWidgets.QLabel(qgsnewhttpconnectionbase)
self.label_NewSrvUrl.setGeometry(QtCore.QRect(20, 75, 91, 17))
self.label_NewSrvUrl.setObjectName(_fromUtf8("label_NewSrvUrl"))
self.txt_NewSrvName = QtWidgets.QLineEdit(qgsnewhttpconnectionbase)
self.txt_NewSrvName.setEnabled(True)
self.txt_NewSrvName.setGeometry(QtCore.QRect(120, 30, 501, 27))
self.txt_NewSrvName.setCursor(QtGui.QCursor(QtCore.Qt.IBeamCursor))
self.txt_NewSrvName.setFocusPolicy(QtCore.Qt.ClickFocus)
self.txt_NewSrvName.setObjectName(_fromUtf8("txt_NewSrvName"))
self.txt_NewSrvUrl = QtWidgets.QLineEdit(qgsnewhttpconnectionbase)
self.txt_NewSrvUrl.setGeometry(QtCore.QRect(120, 70, 501, 27))
self.txt_NewSrvUrl.setObjectName(_fromUtf8("txt_NewSrvUrl"))
self.retranslateUi(qgsnewhttpconnectionbase)
self.buttonBox.accepted.connect(qgsnewhttpconnectionbase.accept)
self.buttonBox.rejected.connect(qgsnewhttpconnectionbase.reject)
QtCore.QMetaObject.connectSlotsByName(qgsnewhttpconnectionbase)
def retranslateUi(self, qgsnewhttpconnectionbase):
qgsnewhttpconnectionbase.setWindowTitle(QtWidgets.QApplication.translate("qgsnewhttpconnectionbase", "New WCPS Server ", None))
self.label_NewSrvName.setText(QtWidgets.QApplication.translate("qgsnewhttpconnectionbase", "Server Name", None))
self.label_NewSrvUrl.setText(QtWidgets.QApplication.translate("qgsnewhttpconnectionbase", "Server URL", None))
| kalxas/rasdaman | applications/qgis-wcps/qgis3/QgsWcpsClient1/qgsnewhttpconnectionbase.py | qgsnewhttpconnectionbase.py | py | 2,617 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "PyQt5.QtCore.QString",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "PyQt5.QtCore",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "builtins.object",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PyQt5.QtWidgets.... |
12195682849 | """Point cloud generation functions to test benchmarks of rotation invariant functions
"""
from dataclasses import dataclass
from functools import lru_cache
import numpy as np
import torch
from numpy.linalg import norm
from scipy.spatial.transform import Rotation
from scipy.spatial.transform import Rotation as R
def to_torch_tensor(np_array):
"""
convert a numpy array to a torch tensor by putting it to the appropriate type
and dimension
"""
return torch.tensor(np_array).unsqueeze(0).to(torch.float32)
def to_numpy_array(torch_tensor):
"""
convert a torch tensor to a numpy array by putting it to the appropriate type
and dimension
"""
return torch_tensor.squeeze().detach().numpy()
# Rotation and Permutation matrices
def generate_permutation_matrix(N):
"""
Generate a random permutation matrix
"""
return np.random.permutation(np.identity(N))
def generate_rotation_matrix(theta=None, axis=None):
"""
Return a rotation matrix corresponding to a rotation of amplitude theta
around the provided axis. If an argument is set to None, a random value is assigned
"""
if theta is None:
theta = np.random.rand(1)[0] * 2 * np.pi
if axis is None:
axis = np.random.rand(3) - 0.5
axis /= norm(axis)
return Rotation.from_rotvec(theta * axis).as_matrix()
def rotate(*args):
"""
Return the rotated version of each tensor passed in parameter using the same rotation
matrix
"""
Q = generate_rotation_matrix()
return [points_tens @ Q for points_tens in args]
# Centering
def center_batch(batch):
"""center a batch
Args:
batch (torch.tensor): [n_batch x N_points x 1 x 3]
Returns:
[type]: [description]
"""
mean_input = batch.mean(axis=1).unsqueeze(1)
return batch - mean_input
def center(sample):
"""
Center batch to have a barycenter around 0
Args:
sample (np.array): N x 3 sample
"""
return sample - sample.mean(axis=0)
def get_points_on_sphere(N):
"""
Generate N evenly distributed points on the unit sphere centered at
the origin. Uses the 'Golden Spiral'.
Code by Chris Colbert from the numpy-discussion list.
"""
phi = (1 + np.sqrt(5)) / 2 # the golden ratio
long_incr = 2 * np.pi / phi # how much to increment the longitude
dz = 2.0 / float(N) # a unit sphere has diameter 2
bands = np.arange(N) # each band will have one point placed on it
z = bands * dz - 1 + (dz / 2) # the height z of each band/point
r = np.sqrt(1 - z * z) # project onto xy-plane
az = bands * long_incr # azimuthal angle of point modulo 2 pi
x = r * np.cos(az)
y = r * np.sin(az)
return np.array((x, y, z)).transpose()
def get_angle(u, v):
"""
Return the angle between 3d vectors u and v
"""
cos_angle = np.dot(u, v) / norm(u) / norm(v)
return np.arccos(np.clip(cos_angle, -1, 1))
def get_n_regular_rotations(N):
"""
Return N regular rotations matrices
Args:
N (int): number of rotation matrixes to return
"""
# generate random points on the unit sphere
reg_points = get_points_on_sphere(N)
# take the first vector as the reference one
ref_vec = reg_points[0]
# computation of angle and cross product as described here
# https://math.stackexchange.com/questions/2754627/rotation-matrices-between-two-points-on-a-sphere
thetas = [get_angle(ref_vec, v) for v in reg_points]
cross_vecs = np.cross(ref_vec, reg_points)
rotations = [
(R.from_rotvec(theta * cross_vec).as_matrix())
for theta, cross_vec in zip(thetas, cross_vecs)
]
return rotations
# Gaussian Point cloud
def get_gaussian_point_cloud(N_pts):
return np.random.rand(N_pts, 3)
# Spiral Point cloud
def get_asym_spiral(spiral_amp=1.0, width_factor=1.0, N_pts=40):
"""
Generate a spiral with the given amplitude
Args:
spiral_amp (float, optional): spiral amplitude
Returns:
np.array: spiral point cloud
"""
zline = np.linspace(0, spiral_amp, N_pts)
xline = [(i + 4) * np.sin(i) / 10 *
width_factor for i in zline * 4 * np.pi]
yline = [(i + 4) * np.cos(i) / 10 *
width_factor for i in zline * 4 * np.pi]
return np.array([xline, yline, zline]).transpose()
def get_spiral(spiral_amp=1.0, N_pts=40):
"""
Generate a spiral with the given amplitude
Args:
spiral_amp (float, optional): spiral amplitude
Returns:
np.array: spiral point cloud
"""
zline = np.linspace(0, spiral_amp, N_pts)
xline = np.sin(zline * 4 * np.pi)
yline = np.cos(zline * 4 * np.pi)
return np.array([xline, yline, zline]).transpose()
@dataclass(frozen=True)
class SpiralGenerator:
spiral_amp: float = 1.0
scaling: float = 1.0
shift: float = 0.0
asym: bool = False
width_factor: float = 1.0
centering: bool = False
def __post_init__(self):
assert(not (self.centering and self.shift != 0))
@lru_cache(None)
def generate(self,):
if self.asym:
points = get_asym_spiral(
spiral_amp=self.spiral_amp, width_factor=self.width_factor)
else:
points = get_spiral(spiral_amp=self.spiral_amp)
[xline, yline, zline] = points.transpose()
zline *= self.scaling
zline += self.shift
points = np.array([xline, yline, zline]).transpose()
if self.centering:
points = center(points)
return points
def get_custom_spiral(spiral_amp=1.0, scaling=1.0, shift=0.0, asym=False, centering=False):
assert(not (centering and shift != 0))
if asym:
points = get_asym_spiral(spiral_amp=spiral_amp)
else:
points = get_spiral(spiral_amp=spiral_amp)
[xline, yline, zline] = points.transpose()
zline *= scaling
zline += shift
points = np.array([xline, yline, zline]).transpose()
if centering:
points = center(points)
return points
def get_src_shifted_spirals(
spiral_amp=1.0, shift=0.5, asym=False, center_input=False, center_target=False
):
"""
Return vertical src spiral cloud point and its vertically shifted version.
"""
# torch.set_default_dtype(torch.float64) # works best in float64
if asym:
points = get_asym_spiral(spiral_amp=spiral_amp)
else:
points = get_spiral(spiral_amp=spiral_amp)
target_points = np.array([xline, yline, (zline + shift)]).transpose()
if center_input:
points = center(points)
if center_target:
target_points = center(target_points)
return points, target_points
def get_src_scaled_spirals(
spiral_amp=1.0, z_scale=3, asym=False, center_input=False, center_target=False
):
if asym:
points = get_asym_spiral(spiral_amp=spiral_amp)
else:
points = get_spiral(spiral_amp=spiral_amp)
[xline, yline, zline] = points.transpose()
target_points = np.array([xline, yline, zline * z_scale]).transpose()
if center_input:
points = center(points)
if center_target:
target_points = center(target_points)
return points, target_points
def get_src_inverted_spirals(spiral_amp=1.0):
"""
Return inverted spiral with same positions
"""
points = get_spiral(spiral_amp=spiral_amp)
[xline, yline, zline] = points
target_points = np.array([-xline, yline, zline]).transpose()
return points, target_points
| ymentha14/se3_molecule_generation | src/ri_distances/pnt_cloud_generation.py | pnt_cloud_generation.py | py | 7,508 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "torch.tensor",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "torch.float32",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "numpy.random.permutation",
"line_number": 34,
"usage_type": "call"
},
{
"api_name": "numpy.random... |
5282218579 | from django.urls import path
from .views import (
PdfextListView,
PdfextDetailView,
PdfextCreateView,
PdfextUpdateView,
PdfextDeleteView,
UserPdfextListView,
PdfextConvertView,
)
from . import views
urlpatterns = [
path('', PdfextListView.as_view(), name='pdfext-home'),
path('user/<str:username>', UserPdfextListView.as_view(), name='user-pdfexts'),
path('pdfext/<int:pk>/', PdfextDetailView.as_view(), name='pdfext-detail'),
path('pdfext/new/', PdfextCreateView.as_view(), name='pdfext-create'),
path('pdfext/<int:pk>/update/', PdfextUpdateView.as_view(), name='pdfext-update'),
path('pdfext/<int:pk>/delete/', PdfextDeleteView.as_view(), name='pdfext-delete'),
path('pdfext/<int:pk>/convert/', PdfextConvertView.as_view(), name='pdfext-convert'),
path('media/Files/<int:pk>',PdfextDeleteView.as_view(),name='pdfext-delete' ),
path('downloadjson/<int:pk>',views.downloadjson, name="downloadjson"),
path('search/',views.search,name='search' ),
path('about/', views.about, name='pdfext-about'),
]
| perpertuallearner/all_about_pdfs | pdfext/urls.py | urls.py | py | 1,070 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.PdfextListView.as_view",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "views.PdfextListView",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "dj... |
9618399050 | from itertools import groupby
lis = [1, 2, 2, 3, 4, 4, 4, 4]
print(list(groupby(lis)))
things = [("animal", "bear"), ("animal", "duck"), ("plant", "cactus"), ("vehicle", "speed boat"), ("vehicle", "school bus")]
for key, group in groupby(things, lambda x: x[0]):
for thing in group:
print("A %s is a %s." % (thing[1], key))
print(" ")
for key, group in groupby(lis, lambda x:x):
count = 0
for item in group:
count += 1
print("The key is "+ str(key) +" the count is "+str(count))
| hemantkgupta/Python3 | itertools/groupby.py | groupby.py | py | 518 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "itertools.groupby",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "itertools.groupby",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "itertools.groupby",
"line_number": 12,
"usage_type": "call"
}
] |
32068119633 | from selenium import webdriver
import time
import math
try:
browser = webdriver.Chrome()
link = "http://suninjuly.github.io/redirect_accept.html"
browser.get(link)
browser.find_element_by_tag_name("button").click()
#confirm = browser.switch_to.alert
#confirm.accept()
windows = browser.window_handles
current_window = browser.current_window_handle
for win in windows:
if current_window == win:
print(win, " with current index: ", windows.index(win))
else:
print(win, " with index: ", windows.index(win))
new_window = browser.window_handles[1]
browser.switch_to.window(new_window)
x = browser.find_element_by_id("input_value").text
print(x)
y = str(math.log(abs(12*math.sin(int(x)))))
browser.find_element_by_id("answer").send_keys(y)
browser.find_element_by_tag_name("button").click()
finally:
time.sleep(7)
browser.close()
browser.quit()
| EkS2018/stepik-selenium | 21.py | 21.py | py | 961 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "selenium.webdriver.Chrome",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "selenium.webdriver",
"line_number": 6,
"usage_type": "name"
},
{
"api_name": "math.log",
"line_number": 26,
"usage_type": "call"
},
{
"api_name": "math.sin",
"l... |
22078780419 | import ast
import inspect
class Unpacking(ast.AST):
_attributes = ()
_fields = ("value",)
def __init__(self, value, counter, starred=False):
self.value = value
self.counter = counter
self.starred = starred
class ForTargetPlaceholder(ast.AST):
_attributes = ()
_fields = ()
class ExceptionMatch(ast.AST):
_attributes = ()
_fields = ("value",)
def __init__(self, value):
self.value = value
class ExceptionPlaceholder(ast.AST):
_attributes = ()
_fields = ()
class Reraise(ast.AST):
_attributes = ()
_fields = ()
class ComprehensionBody(ast.AST):
_attributes = ()
_fields = ("value", "collection")
def __init__(self, value, collection):
self.collection = collection
self.value = value
class RewriteComprehensionArgs(ast.NodeTransformer):
def __init__(self, args):
self.args = args
self.cls = None
def visit_Name(self, node):
if node.id.startswith("."):
var_idx = int(node.id[1:])
return self.args[var_idx]
return node
def visit_For(self, node: ast.For):
assert (
len(node.body) == 1
or len(node.body) == 2
and isinstance(node.body[1], ast.Continue)
)
target = self.visit(node.target)
elt = self.visit(node.body[0])
iter = self.visit(node.iter)
condition = None
if isinstance(elt, ast.If):
condition = elt.test
assert (
len(elt.body) == 1
or len(elt.body) == 2
and isinstance(elt.body[1], ast.Continue)
)
elt = elt.body[0]
generators = [
ast.comprehension(
target=target,
iter=iter,
ifs=[condition] if condition is not None else [], # TODO
is_async=False, # TODO
)
]
# if not isinstance(elt, ast.IfExp):
# raise Exception("Expected IfExp instead of " + ast.dump(elt))
# TODO handle DictComp
if self.cls and isinstance(elt, self.cls):
generators = generators + elt.generators
elt = elt.elt
elif isinstance(elt, ComprehensionBody):
self.cls = {
ast.List: ast.ListComp,
ast.Set: ast.SetComp,
ast.Dict: ast.DictComp,
}[type(elt.collection)]
elt = elt.value
elif isinstance(elt, ast.Expr) and isinstance(elt.value, ast.Yield):
self.cls = ast.GeneratorExp
elt = elt.value.value
else:
raise Exception("Unexpected " + ast.dump(elt) + ", cls:" + str(self.cls))
if issubclass(self.cls, ast.DictComp):
return ast.DictComp(
key=elt[0],
value=elt[1],
generators=generators,
ifs=[],
)
else:
return self.cls(
elt=elt,
generators=generators,
ifs=[],
)
class RemoveLastContinue(ast.NodeTransformer):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.can_remove_continue = False
def generic_visit(self, node):
for field, old_value in ast.iter_fields(node):
if field in ("body", "orelse") and isinstance(old_value, list):
new_body = []
last_val = self.can_remove_continue
for last_in_body, split in [
(False, old_value[:-1]),
(True, old_value[-1:]),
]:
self.can_remove_continue = (
(isinstance(node, (ast.For, ast.While)) or last_val)
and last_in_body
and not (
len(old_value) == 1
and isinstance(old_value[0], ast.Continue)
)
)
for value in split:
if isinstance(value, ast.AST):
value = self.visit(value)
if value is None:
continue
new_body.append(value)
self.can_remove_continue = last_val
old_value[:] = new_body
elif isinstance(old_value, list):
new_values = []
for value in old_value:
if isinstance(value, ast.AST):
value = self.visit(value)
if value is None:
continue
new_values.append(value)
old_value[:] = new_values
return node
def visit_Continue(self, node):
if self.can_remove_continue:
return None
return node
class WhileBreakFixer(ast.NodeTransformer):
def __init__(self, while_fusions):
self.while_fusions = while_fusions
def visit_Break(self, node):
if hasattr(node, "_loop_node"):
if node._loop_node in self.while_fusions:
return ast.Continue()
return node
def negate(node):
if isinstance(node, ast.UnaryOp) and isinstance(node.op, ast.Not):
return node.operand
if isinstance(node, ast.BoolOp) and isinstance(node.op, ast.Or):
# num_neg = sum(isinstance(n, ast.UnaryOp) and isinstance(n.op, ast.Not) for n in node.values)
# if num_neg > len(node.values) / 2:
return ast.BoolOp(op=ast.And(), values=[negate(n) for n in node.values])
if isinstance(node, ast.BoolOp) and isinstance(node.op, ast.And):
# num_neg = sum(isinstance(n, ast.UnaryOp) and isinstance(n.op, ast.Not) for n in node.values)
# if num_neg > len(node.values) / 2:
return ast.BoolOp(op=ast.Or(), values=[negate(n) for n in node.values])
return ast.UnaryOp(op=ast.Not(), operand=node)
def get_origin(trees: ast.AST):
origin = None
offset = None
if isinstance(trees, ast.AST):
trees = [trees]
for tree in trees:
for node in ast.walk(tree):
try:
if offset is None or node._origin.offset < offset:
origin = node._origin_node
except AttributeError:
pass
return origin
def walk_with_parent(node):
"""
Recursively yield all descendant nodes in the tree starting at *node*
(including *node* itself), in no specified order. This is useful if you
only want to modify nodes in place and don't care about the context.
"""
from collections import deque
todo = deque([(None, node)])
while todo:
parent, node = todo.popleft()
todo.extend((node, child) for child in ast.iter_child_nodes(node))
yield parent, node
def remove_from_parent(item: ast.AST, parent: ast.AST):
for field, old_value in ast.iter_fields(parent):
if isinstance(old_value, list):
if item in old_value:
old_value.remove(item)
return True
elif old_value is item:
delattr(parent, field)
return True
return False
def make_bool_op(op, values):
assert len(values) > 0 and isinstance(op, (ast.And, ast.Or))
new_values = []
for v in values:
if isinstance(v, ast.BoolOp) and isinstance(v.op, op.__class__):
new_values.extend(v.values)
else:
new_values.append(v)
return ast.BoolOp(op=op, values=new_values)
def reconstruct_arguments_str(code):
arg_details = inspect.getargs(code)
sig = []
if len(arg_details.args):
sig.append(", ".join(arg_details.args))
keyword_only_args = getattr(arg_details, "kwonlyargs", None)
if keyword_only_args:
sig.append("*, " + ", ".join(keyword_only_args))
if arg_details.varargs:
sig.append("*" + arg_details.varargs)
if arg_details.varkw:
sig.append("**" + arg_details.varkw)
return ", ".join(sig)
| percevalw/pygetsource | pygetsource/ast_utils.py | ast_utils.py | py | 8,107 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "ast.AST",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "ast.AST",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "ast.AST",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "ast.AST",
"line_number": 28... |
43792968736 | # -*- coding:utf-8 -*-
__all__=['getDataGenerator']
import keras
from keras.preprocessing.image import ImageDataGenerator,array_to_img
from keras.datasets import cifar10
import numpy as np
import os
import pickle
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
def getDataGenerator(train_phase,rescale=1./255):
"""return the data generator that consistently
generates image batches after data augmentation
Args:
train_phase:
flag variable that denotes whether the data augmentation is
applied on the train set or validation set
rescale:
rescaling parameter for Keras ImageDataGenerator
Return:
keras data generator
"""
if train_phase == True:
datagen = ImageDataGenerator(
rotation_range=0.,
width_shift_range=0.05,
height_shift_range=0.05,
shear_range=0.05,
zoom_range=0.05,
channel_shift_range=0.,
fill_mode='nearest',
horizontal_flip=True,
vertical_flip=False,
rescale=rescale)
else:
#validation
#only rescaling is applied on validation set
datagen = ImageDataGenerator(
rescale=rescale
)
return datagen
def testDataGenerator(pics_num):
"""visualize the pics after data augmentation
Args:
pics_num:
the number of pics you want to observe
return:
None
"""
print("Now, we are testing data generator......")
(x_train,y_train),(x_test,y_test) = cifar10.load_data()
x_train = x_train.astype('float32')
y_train = keras.utils.to_categorical(y_train, 10)
# Load label names to use in prediction results
label_list_path = 'datasets/cifar-10-batches-py/batches.meta'
keras_dir = os.path.expanduser(os.path.join('~', '.keras'))
datadir_base = os.path.expanduser(keras_dir)
if not os.access(datadir_base, os.W_OK):
datadir_base = os.path.join('/tmp', '.keras')
label_list_path = os.path.join(datadir_base, label_list_path)
with open(label_list_path, mode='rb') as f:
labels = pickle.load(f)
datagen = getDataGenerator(train_phase=True)
"""
x_batch is a [-1,row,col,channel] np array
y_batch is a [-1,labels] np array
"""
figure = plt.figure()
plt.subplots_adjust(left=0.1,bottom=0.1, right=0.9, top=0.9,hspace=0.5, wspace=0.3)
for x_batch,y_batch in datagen.flow(x_train,y_train,batch_size = pics_num):
for i in range(pics_num):
pics_raw = x_batch[i]
pics = array_to_img(pics_raw)
ax = plt.subplot(pics_num//5, 5, i+1)
ax.axis('off')
ax.set_title(labels['label_names'][np.argmax(y_batch[i])])
plt.imshow(pics)
plt.savefig("./processed_data.jpg")
break
print("Everything seems OK...")
if __name__ == '__main__':
testDataGenerator(20) | Kexiii/DenseNet-Cifar10 | data_input/data_input.py | data_input.py | py | 2,951 | python | en | code | 40 | github-code | 1 | [
{
"api_name": "matplotlib.use",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 28,
"usage_type": "call"
},
{
"api_name": "keras.preprocessing.image.ImageDataGenerator",
"line_number": 42,
"usage_type... |
31130560486 | """
@Filename: core/testcase/test_login.py
@Author: 小蔡
@Time: 2022/09/28 20:30
@Describe: ...
"""
import json
import requests
# def img2code(file):
# url = "http://47.106.70.21:5516/auth/login"
#
# data = {
# "user": "pengjiangchun",
# "pass2": "3eb5f9a3a31fb000969e696d7c3cc71f", # 密码的md5值
# "sofid": "mashang", # 个人中心设置的软件id
# "codetype": 1902,
# }
# files = {"userfile": open(file, "rb")}
# resp = requests.post(url, data=data, files=files)
#
# res = resp.json()
# if res["err_no"] == 0:
# code = res["pic_str"]
# print("识别成功", code)
# return code
# else:
# print("识别失败")
# return False
def save_cookies(driver):
"""保存cookies到本地文件"""
cookies = driver.get_cookies() # 获取cookies
with open("temp/cookies.json", "w") as f:
f.write(json.dumps(cookies))
def load_cookies(driver):
"""从本地文件加载cookies"""
driver.get("https://scrm-uat.immotors.com/marketing/dataBoard/page?_tgt=fr")
try:
with open("temp/cookies.json") as f:
cookies = json.loads(f.read())
for cookie in cookies:
driver.add_cookie(cookie) # 使用cookies
driver.refresh() # 刷新页面,向服务器发送cookeis
except:
pass
def is_login(driver):
"""是否已经登录"""
if "请输入密码" in driver.page_source:
print("需要登录")
return False
else:
print("已经登录"+'\n')
return True
| 15946859495/UI_frame | core/until/funcs.py | funcs.py | py | 1,602 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "json.dumps",
"line_number": 39,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 48,
"usage_type": "call"
}
] |
8926189595 | from django import forms
from django.contrib.auth.models import User
from django.contrib.auth import authenticate, login
from django.core.urlresolvers import reverse
from django.shortcuts import render, get_object_or_404
from django.contrib import messages
from django.http import HttpResponseRedirect
from users.models import UserProfile, get_randfile_path, UserFile
my_default_errors = {
'required': ' -\tThis field is required\n',
'invalid': ' -\tEnter a valid value\n'
}
class UserForm(forms.ModelForm):
description = forms.CharField(widget=forms.Textarea(attrs={'cols': 80, 'rows': 5}))
class Meta:
model = UserProfile
exclude = {'user', 'description'}
class NewUser(forms.Form):
username = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Username'}))
first_name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'First name'}))
last_name = forms.CharField(max_length=50, widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Last name'}))
email = forms.EmailField(widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Email address'}))
password = forms.CharField(max_length=50, widget=forms.PasswordInput(attrs={'class': 'form-control', 'placeholder': 'Password'}))
# Use check_username to make sure that there are no existing users with the same username
# *** Raise a ValidationError if there is already a user with same username ***
#
def check_username(self):
try:
User.objects.get(username=self.cleaned_data['username'])
if User:
return False
except User.DoesNotExist:
return self.cleaned_data['username']
# Use check_email to make sure that there are no existing users with the same email addres
# *** Raise a ValidationError if there is already a user with same email address ***
#
def check_email(self):
try:
User.objects.get(email=self.cleaned_data['email'])
if User:
return False
except User.DoesNotExist:
return self.cleaned_data['email']
# Perform the first two validation checks to make sure username and email are OK to use for a new user
# If checks pass, create new user with the submitted data
def create_new_user(self):
username = self.check_username()
email = self.check_email()
new_user = User.objects.create_user(username=username, email=email, password=self.cleaned_data['password'])
# Add the first and last name to the user
new_user.first_name = self.cleaned_data['first_name']
new_user.last_name = self.cleaned_data['last_name']
# Save this new user in the database
new_user.save()
# Form for logging in and authorizing a User
class LoginForm(forms.Form):
username = forms.CharField(error_messages=my_default_errors, max_length=50, widget=forms.TextInput(attrs={'class': 'form-control m-t-2', 'placeholder': 'Username'}))
password = forms.CharField(error_messages=my_default_errors, max_length=50, widget=forms.PasswordInput(attrs={'class': 'form-control m-t-2', 'placeholder': 'Password'}))
class SearchForm(forms.Form):
AGES = (
('age', 'Select your age'),
('18-29', '18-29'),
('29-39', '29-39'),
('39-49', '39-49'),
('50+', '50+'),
)
GOALS = (
('goal', 'Select your goal'),
('lose_weight', 'Lose Weight'),
('gain_muscle', 'Gain Muscle'),
('gain_weight', 'Gain Weight'),
)
goal = forms.ChoiceField(widget=forms.Select(attrs={'class': 'form-control'}), choices=GOALS)
age = forms.ChoiceField(widget=forms.Select(attrs={'class': 'form-control'}), choices=AGES)
# Form for updating Tagline, Profile, and Background Image
class UserInfo(forms.Form):
prof_img = forms.FileField(required=False, widget=forms.FileInput(attrs={'class': 'file-input', 'id': 'file1'}))
bg_img = forms.FileField(required=False, widget=forms.FileInput(attrs={'class': 'file-input2', 'id': 'file2'}))
location = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'e.g. Charlotte, NC'}))
weight = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Weight in pounds e.g. 180'}))
age = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Age in years e.g. 32'}))
tagline = forms.CharField(widget=forms.TextInput(attrs={'class': 'form-control', 'placeholder': 'Briefly explain your goal for others to see'}))
# Define a method to process and save the form
def process_and_save(self, username):
# Grab the current User object and assign its ID to a variable
user = get_object_or_404(User, username=username)
user_id = user.id
# Grab the UserProfile object associated with the current User
userprofile = get_object_or_404(UserProfile, user=user_id)
# Grab the inputted data
bg_img = self.cleaned_data.get('bg_img', userprofile.bg_img)
prof_img = self.cleaned_data.get('prof_img', userprofile.prof_img)
tagline = self.cleaned_data.get('tagline', userprofile.tagline)
location = self.cleaned_data.get('location', userprofile.location)
weight = self.cleaned_data.get('weight', userprofile.weight)
age = self.cleaned_data.get('age', userprofile.age)
# If the current UserProfile's field is not equal to what was submitted
# Update the UserProfile with the submitted data
if not bg_img:
pass
elif bg_img != userprofile.bg_img:
userprofile.bg_img = bg_img
if not prof_img:
pass
elif prof_img != userprofile.prof_img:
userprofile.prof_img = prof_img
if tagline != userprofile.tagline:
userprofile.tagline = tagline
if weight != userprofile.weight:
userprofile.weight = weight
if age != userprofile.age:
userprofile.age = age
if location != userprofile.location:
userprofile.location = location
# Save the changes that were made to the UserProfile
userprofile.save()
class FileForm(forms.ModelForm):
class Meta:
model = UserFile
class Del_Video(forms.Form):
Delete = forms.BooleanField()
class Del_File(forms.Form):
Delete = forms.BooleanField()
class Del_Photo(forms.Form):
Delete = forms.BooleanField() | austin15140/mysite | users/forms.py | forms.py | py | 6,567 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.forms.ModelForm",
"line_number": 15,
"usage_type": "attribute"
},
{
"api_name": "django.forms",
"line_number": 15,
"usage_type": "name"
},
{
"api_name": "django.forms.CharField",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "djang... |
36766820641 | """
HTTP client tool for Svom
Retries requests with power law delays and a max tries limit
@author: henri.louvin@cea.fr
"""
import threading
import time
import requests
import asyncio
import aiohttp
import signal
# Log
import logging
log = logging.getLogger('http_client')
class HttpClient(threading.Thread):
"""
Threaded asyncio NATS client
"""
def __init__(self, _http_client, loop):
"""Init asyncio thread"""
threading.Thread.__init__(self)
self._http_client = _http_client
self.loop = loop
# called by thread.start()
def run(self):
"""Start asyncio loop"""
if not self.loop.is_running():
log.debug("Starting async event loop")
self.loop.run_forever()
class HttpIo:
"""
(A)synchronous HTTP client
"""
def __init__(self, server_url, max_tries=5, backoff_factor=1, asynchronous=False, loop=None): #pylint: disable=R0913
self.server_url = server_url.strip('/')
self.max_tries = max_tries
self.backoff_factor = backoff_factor
self.loop = None
self.async_session = None
self.asynchronous = asynchronous
if asynchronous is True:
self.loop = loop if loop is not None else asyncio.get_event_loop()
self.async_session = self._get_client(self.loop)
signal.signal(signal.SIGINT, self.sigint_handler)
async_client = HttpClient(self.async_session, self.loop)
async_client.start()
def _get_client(self, loop):
return aiohttp.ClientSession(loop=loop)
def _compute_delay(self, tried):
""" calculate delay in seconds as a power law """
delay = self.backoff_factor * (2 ** (tried-1))
return delay
def _sync_request(self, req, url, **kwargs):
""" Synchronous {req} request to {url} with data {kwargs}"""
tried = 0
response = None
success = False
log.info('%s request to %s: %s', req, url, kwargs)
while success is False:
if req == 'GET':
response = requests.get(url, **kwargs)
elif req == 'POST':
response = requests.post(url, **kwargs)
elif req == 'PUT':
response = requests.put(url, **kwargs)
else:
exc = f"_sync_request() only handles 'GET', 'POST' and 'PUT' requests, not '{req}'"
raise ValueError(exc)
tried += 1
# retrieve response.json()
json_resp = {}
try:
json_resp = response.json()
except Exception:
pass
log.debug('Server %s response (%s): %s', req, response.status_code, json_resp)
# accept all responses with status 2xx
if int(response.status_code/100) == 2:
log.info('%s request to %s successfull (%s)', req, url, response.status_code)
success = True
elif tried < self.max_tries:
delay = self._compute_delay(tried)
log.warning('%s request to %s failed (%s). Trying again in %ss',
req, url, response.status_code, delay)
time.sleep(delay)
log.debug('%s request (%sth try) to %s: %s', req, tried+1, url, kwargs)
else:
log.warning('%s request to %s failed (%s)', req, url, response.status_code)
log.error('Last server %s response (%s): %s', req, response.status_code, json_resp)
log.error('%s request to %s failed %s times. Aborting', req, url, self.max_tries)
break
# override response.json()
response.json = lambda: json_resp
return response
async def _async_request(self, req, url, tried, **kwargs):
""" Asynchronous request to {url} with data {kwatgs}"""
# delay request if needed
if tried > 0:
delay = self._compute_delay(tried)
await asyncio.sleep(delay)
log.debug('%s request (async, %sth try) to %s: %s', req, tried+1, url, kwargs)
else:
log.info('%s request (async) to %s: %s', req, url, kwargs)
tried += 1
try:
response = None
if req == 'GET':
response = await self.async_session.get(url, **kwargs)
elif req == 'POST':
response = await self.async_session.post(url, **kwargs)
elif req == 'PUT':
response = await self.async_session.put(url, **kwargs)
else:
exc = f"_async_request() only handles 'GET', 'POST' and 'PUT' requests, not '{req}'"
raise ValueError(exc)
# retrieve response.json()
json_resp = {}
try:
json_resp = await response.json()
except Exception:
pass
log.debug('Server %s response (%s): %s', req, response.status, json_resp)
# accept all responses with status 2xx
if int(response.status/100) == 2:
log.info('%s request (async) to %s successfull (%s)', req, url, response.status)
# override response.json()
response.json = lambda: json_resp
response.status_code = response.status
elif tried < self.max_tries:
delay = self._compute_delay(tried)
log.warning('%s request (async) to %s failed (%s). Trying again in %ss',
req, url, response.status, delay)
response = await self._async_request(req, url, tried, **kwargs)
else:
log.warning('%s request (async) to %s failed (%s)', req, url, response.status)
log.error('Last server %s response (%s): %s', req, response.status, json_resp)
log.error('%s request (async) to %s failed %s times. Aborting',
req, url, self.max_tries, response.status)
# override response.json()
response.json = lambda: json_resp
response.status_code = response.status
except Exception as exc:
log.error('Exception caught: %s', exc)
delay = self._compute_delay(tried)
log.error('%s request (async) to %s failed. Aborting', req, url)
raise
return response
def get(self, endpoint='/', **kwargs):
""" GET request to endpoint {endpoint} with json data {data}"""
url = f"{self.server_url}/{endpoint.strip('/')}"
return self._sync_request('GET', url, **kwargs)
def post(self, endpoint='/', **kwargs):
""" POST request to endpoint {endpoint} with json data {data}"""
url = f"{self.server_url}/{endpoint.strip('/')}"
return self._sync_request('POST', url, **kwargs)
def put(self, endpoint='/', **kwargs):
""" PUT request to endpoint {endpoint} with json data {data}"""
url = f"{self.server_url}/{endpoint.strip('/')}"
return self._sync_request('PUT', url, **kwargs)
async def async_get(self, endpoint='/', **kwargs):
""" GET request to endpoint {endpoint} with json data {data}"""
url = f"{self.server_url}/{endpoint.strip('/')}"
return await self._async_request('GET', url, tried=0, **kwargs)
async def async_post(self, endpoint='/', **kwargs):
""" POST request to endpoint {endpoint} with json data {data}"""
url = f"{self.server_url}/{endpoint.strip('/')}"
return await self._async_request('POST', url, tried=0, **kwargs)
async def async_put(self, endpoint='/', **kwargs):
""" PUT request to endpoint {endpoint} with json data {data}"""
url = f"{self.server_url}/{endpoint.strip('/')}"
return await self._async_request('PUT', url, tried=0, **kwargs)
def stop(self):
""" close http session if asynchronous """
if self.asynchronous is False:
return
log.debug('Stopping async client...')
log.debug('Cancelling pending tasks...')
# Cancel pending tasks
for task in asyncio.Task.all_tasks():
task.cancel()
# Stop HTTP session properly
future = asyncio.run_coroutine_threadsafe(self.async_session.close(),
loop=self.loop)
# Wait for HTTP session to close then stop async loop
try:
log.debug('Waiting for aiohttp.ClientSession to close...')
future.result()
log.debug('Done. Stopping async event loop')
self.loop.call_soon_threadsafe(self.loop.stop)
except Exception as err:
log.error(err)
def sigint_handler(self, signum, frame):
"""
stops gracefully, restore default signal handling
and raises KeyboardInterrupt
"""
self.stop()
signal.signal(signal.SIGINT, signal.SIG_DFL)
raise KeyboardInterrupt
| HSF/Crest | crestdb-client/python/cli/crest/io/httpio.py | httpio.py | py | 9,021 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "logging.getLogger",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "threading.Thread",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "threading.Thread.__init__",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "thr... |
3733088103 | # -*- coding: cp936 -*-
import matplotlib.pyplot as plt
import numpy as np
x = np.linspace(0, 2*np.pi, 50)
plt.subplot(3, 1, 1) # (行,列,活跃区)
plt.plot(x, np.sin(x), 'r')
plt.subplot(3, 1, 2)
plt.plot(x, np.cos(x), 'g')
plt.subplot(3, 1, 3)
plt.plot(x, np.tan(x), 'b')
plt.show()
# -*- coding: cp936 -*-
import matplotlib.pyplot as plt
import numpy as np
x= np.linspace(0, 2 * np.pi, 50)
plt.plot(x, np.sin(x), 'r-x', label='Sin(x)')
plt.plot(x, np.cos(x), 'g-*', label='Cos(x)')
plt.legend() # 展示图例
plt.xlabel('Rads') # 给 x 轴添加标签
plt.ylabel('Amplitude') # 给 y 轴添加标签
plt.title('Sin and Cos Waves') # 添加图形标题
plt.show()
# -*- coding: cp936 -*-
import numpy as np
import pandas as pd
from pandas import Series, DataFrame
import matplotlib.pyplot as plt
fig = plt.figure(figsize=(10,8)) #建立一个大小为10*8的画板
ax1 = fig.add_subplot(3,3,1) #在画板上添加3*3个画布,位置是第1个
ax2 = fig.add_subplot(3,3,2)
ax3 = fig.add_subplot(3,3,3)
ax4 = fig.add_subplot(3,3,4)
ax5 = fig.add_subplot(3,3,5)
ax6 = fig.add_subplot(3,3,6)
ax7 = fig.add_subplot(3,3,7)
ax8 = fig.add_subplot(3,3,8)
ax9 = fig.add_subplot(3,3,9)
ax1.plot(np.random.randn(10))
ax2.scatter(np.random.randn(10),np.arange(10),color='r') #作散点图
ax3.hist(np.random.randn(20),bins=10,alpha=0.3) #作柱形图
ax4.bar(np.arange(10),np.random.randn(10)) #做直方图
ax5.pie(np.random.randint(1,15,5),explode=[0,0,0.2,0,0]) #作饼形图
x = np.arange(10)
y = np.random.randn(10)
ax6.plot(x,y,color='green')
ax6.bar(x,y,color='k')
data = DataFrame(np.random.randn(1000,10),
columns=['one','two','three','four','five','six','seven','eight','nine','ten'])
data2 = DataFrame(np.random.randint(0,20,(10,2)),columns=['a','b'])
data.plot(x='one',y='two',kind='scatter',ax=ax7) #针对DataFrame的一些作图
data2.plot(x='a',y='b',kind='bar',ax=ax8,color='red',legend=False)
data2.plot(x='a',y='b',kind='barh',color='m',ax=ax9)
plt.tight_layout() #避免出现叠影
plt.show()
import numpy as np
import matplotlib.pyplot as plt
N = 5
menMeans = (20, 35, 30, 35, 27)
menStd = (2, 3, 4, 1, 2)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars
fig, ax = plt.subplots()
rects1 = ax.bar(ind, menMeans, width, color='r', yerr=menStd)
womenMeans = (25, 32, 34, 20, 25)
womenStd = (3, 5, 2, 3, 3)
rects2 = ax.bar(ind+width, womenMeans, width, color='y', yerr=womenStd)
# add some
ax.set_ylabel('Scores')
ax.set_title('Scores by group and gender')
ax.set_xticks(ind+width)
ax.set_xticklabels( ('G1', 'G2', 'G3', 'G4', 'G5') )
ax.legend( (rects1[0], rects2[0]), ('Men', 'Women') )
def autolabel(rects):
# attach some text labels
for rect in rects:
height = rect.get_height()
ax.text(rect.get_x()+rect.get_width()/2., 1.05*height, '%d'%int(height),
ha='center', va='bottom')
autolabel(rects1)
autolabel(rects2)
plt.show()
# -*- coding: cp936 -*-
import matplotlib.pyplot as plt
import numpy as np
# 彩色映射散点图
x = np.random.rand(1000)
y = np.random.rand(1000)
size = np.random.rand(1000) * 5
colour = np.random.rand(1000)
plt.scatter(x, y, size, colour)
plt.colorbar()
plt.show() | wsgan001/python-2 | 数据图形/图表.py | 图表.py | py | 3,281 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 4,
"usage_type": "call"
},
{
"api_name": "numpy.pi",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.subplot",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "matplotlib.pyplot... |
72869151394 | # This script will create all you need for a hunt group and tie it all together
# This creates a Line Group, Hunt List, and Pilot
# There's also logic to specify if you want calls to rona to a VM box
# If you want calls to rona to another number other than a VM box you will need to tweak the code a bit
from lxml import etree
from requests import Session
from requests.auth import HTTPBasicAuth
from zeep import Client, Settings, Plugin, xsd
from zeep.transports import Transport
from zeep.exceptions import Fault
import sys
import urllib3
import random
# Edit .env file to specify your Webex site/user details
import os
from dotenv import load_dotenv
load_dotenv()
# Change to true to enable output of request/response headers and XML
DEBUG = False
# The WSDL is a local file in the working directory, see README
# WSDL_FILE = 'schema/AXLAPI.wsdl'
WSDL_FILE = '<your wsdl file path here>'
# This class lets you view the incoming and outgoing http headers and XML
class MyLoggingPlugin( Plugin ):
def egress( self, envelope, http_headers, operation, binding_options ):
# Format the request body as pretty printed XML
xml = etree.tostring( envelope, pretty_print = True, encoding = 'unicode')
print( f'\nRequest\n-------\nHeaders:\n{http_headers}\n\nBody:\n{xml}' )
def ingress( self, envelope, http_headers, operation ):
# Format the response body as pretty printed XML
xml = etree.tostring( envelope, pretty_print = True, encoding = 'unicode')
print( f'\nResponse\n-------\nHeaders:\n{http_headers}\n\nBody:\n{xml}' )
# The first step is to create a SOAP client session
session = Session()
# We avoid certificate verification by default
# And disable insecure request warnings to keep the output clear
session.verify = False
urllib3.disable_warnings( urllib3.exceptions.InsecureRequestWarning )
# To enabled SSL cert checking (recommended for production)
# place the CUCM Tomcat cert .pem file in the root of the project
# and uncomment the line below
# session.verify = 'changeme.pem'
# Add Basic Auth credentials
session.auth = HTTPBasicAuth( os.getenv( 'AXL_USERNAME' ), os.getenv( 'AXL_PASSWORD' ) )
# Create a Zeep transport and set a reasonable timeout value
transport = Transport( session = session, timeout = 10 )
# strict=False is not always necessary, but it allows zeep to parse imperfect XML
settings = Settings( strict = False, xml_huge_tree = True )
# If debug output is requested, add the MyLoggingPlugin callback
plugin = [ MyLoggingPlugin() ] if DEBUG else [ ]
# Create the Zeep client with the specified settings
client = Client( WSDL_FILE, settings = settings, transport = transport,
plugins = plugin )
# Create the Zeep service binding to AXL at the specified CUCM
service = client.create_service( '{http://www.cisco.com/AXLAPIService/}AXLAPIBinding',
f'https://{os.getenv( "CUCM_ADDRESS" )}:8443/axl/' )
print('')
print('Add Hunt Group Script')
print('')
siteCode=input('Enter 4 Digit Site Code: ') # you can change siteCode to what you want, like huntGroupName maybe
locationType=input('Enter Location Type: ') # optional, you can remove this and just use site sideCode
globalName=(f'{siteCode}_{locationType}')
print('')
print('Department name would be like Front_Office or Cash_Room')
print('DO NOT include any spaces in name')
print('')
deptName=input('Enter Dept Name: ')
print('')
extension=input('Enter Pilot Extension: ')
print('')
# You need to have a RONA location. We can't let calls ring forever
ronaToNumber=input('Enter where calls RONA to: ')
print('')
# Checks to see if RONA is a voicemail so it knows to set the *
isVM=input('Is RONA number a Unity VM box? (Y/N): ')
choice=False
while choice == False:
isVM=input('Yes or No: ')
if isVM.upper() == 'Y':
ronaTo=( '*' + ronaToNumber )
mask=( ronaToNumber )
choice=True
elif isVM.upper() == 'N':
ronaTo=( ronaToNumber )
mask=('')
choice=True
else:
print('Invalid selection, please try again.')
# This will do its best to pick a CM Group to help maintain balance between active subscribers
# if you only have 1 active subscriber with 1 backup, you can remove this code and just specify your CM group in the
# hunt list creation part by putting cucmGroup=('<your cucm group'>) above with the user input variables
# Optional Begin
rando=random.randint(1,1000)
iRemainder = (rando % 2)
if iRemainder == 0:
cucmGroup = '<enter your CM group here>'
else:
cucmGroup = '<enter your CM group here>'
# Optional End
# Create Line Group
lineGroup = {
'name': ( f'{globalName}_{deptName}_LG' ),
'distributionAlgorithm': 'Broadcast',
'rnaReversionTimeOut': '32',
'huntAlgorithmNoAnswer': 'Try next member; then, try next group in Hunt List',
'huntAlgorithmBusy': 'Try next member; then, try next group in Hunt List',
'huntAlgorithmNotAvailable': 'Try next member; then, try next group in Hunt List'
}
# Execute the addLineGroup request
try:
resp = service.addLineGroup( lineGroup )
except Fault as err:
print( f'Zeep error: addLineGroup: { err }' )
sys.exit( 1 )
print( '\naddLineGroup response:\n' )
print( resp,'\n' )
# Create Hunt List
huntList = {
'name': ( f'{globalName}_{deptName}_HL' ),
'description': ( f'{globalName}_{deptName}_HL'),
'callManagerGroupName': (cucmGroup),
'routeListEnabled': 'true',
'voiceMailUsage': 'true',
'members':
{
'member':
{
'lineGroupName': ( f'{globalName}_{deptName}_LG' ),
'selectionOrder': "1"
}
}
}
# Execute the addHuntList request
try:
resp = service.addHuntList( huntList )
except Fault as err:
print( f'Zeep error: addHuntList: { err }' )
sys.exit( 1 )
print( '\naddHuntList response:\n' )
print( resp,'\n' )
# Create Pilot Number
huntPilot = {
'pattern': (extension),
'description': ( f'{globalName}_{deptName}_Pilot' ),
'routePartitionName': ('<your partition here>'),
'patternUrgency': 'false',
'patternPrecedence': '',
'provideOutsideDialtone': 'false',
'huntListName': ( f'{globalName}_{deptName}_HL' ),
'alertingName': ( '""' + globalName + '_' + deptName + '""'), # need "" quotes because in CUCM 12.5x hunt group name doesn't pass to clients phones
'asciiAlertingName': ( f'{globalName}_{deptName}' ),
'maxHuntduration': '32',
'blockEnable': 'false',
'useCallingPartyPhoneMask': '',
# if site needs to route calls that are busy or no answer to another destination, need to include the following and set destination
'forwardHuntNoAnswer':
{
'usePersonalPreferences': 'false',
'destination': ( ronaTo ),
'callingSearchSpaceName': ( '<your calling search space here>' )
},
'forwardHuntBusy':
{
'usePersonalPreferences': 'false',
'destination': ( ronaTo ),
'callingSearchSpaceName': ( '<your calling search space here>' )
},
'calledPartyTransformationMask': ( mask )
# if site is sending calls to VM that does NOT = pilot number then you need to include the following and enter the VM Extension
# 'calledPartyTransformationMask': (ronaTo)
}
# Execute the addHuntPilot request
try:
resp = service.addHuntPilot( huntPilot )
except Fault as err:
print( f'Zeep error: addHuntPilot: { err }' )
sys.exit( 1 )
print( '\naddHuntPilot response:\n' )
print( resp,'\n' ) | jfletcher76/CiscoDEVUC | CUCM/addRequests/addHuntGroup.py | addHuntGroup.py | py | 7,503 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "dotenv.load_dotenv",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "zeep.Plugin",
"line_number": 30,
"usage_type": "name"
},
{
"api_name": "lxml.etree.tostring",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "lxml.etree",
"... |
72387942114 | """
Script to install Souma on OsX, Windows, and Unix
Usage:
python package.py py2app
python package.py py2exe
"""
import ez_setup
import numpy # important for py2exe to work
ez_setup.use_setuptools()
import sys
import os
from esky.bdist_esky import Executable
from setuptools import setup
if sys.platform == 'win32':
import py2exe
APP = ['run.py', ]
""" Read current version identifier as recorded in `souma/__init__.py` """
with open("__init__.py", 'rb') as f:
VERSION = f.readline().split("=")[1].strip().replace('"', '')
""" Compile list of static files """
with open(".gitignore") as f:
ignorefiles = [l.strip() for l in f.readlines()]
DATA_FILES = [('', ['__init__.py'])]
for datadir in ['templates', 'static']:
for root, dirs, files in os.walk(datadir):
DATA_FILES.append((root, [os.path.join(root, fn) for fn in files if fn not in ignorefiles]))
""" Modules imported using import() need to be manually specified here """
INCLUDES = [
"web_ui",
"argparse",
"jinja2.ext",
"sqlalchemy.dialects.sqlite",
"sqlalchemy.ext.declarative",
"wtforms.ext",
"wtforms.ext.csrf",
"flask",
"flask_sqlalchemy",
"flask.views",
"flask.signals",
"flask.helpers",
"flask.ext",
"flaskext",
"flaskext.uploads",
"flask_misaka",
"flask_wtf",
"sqlalchemy.orm",
"sqlalchemy.event",
"sqlalchemy.ext.declarative",
"sqlalchemy.engine.url",
"sqlalchemy.connectors.mxodbc",
"sqlalchemy.connectors.mysqldb",
"sqlalchemy.connectors.zxJDBC",
"sqlalchemy.dialects.sqlite.base",
"sqlalchemy.dialects.sybase.base",
"sqlalchemy.dialects.sybase.mxodbc",
"sqlalchemy.engine.base",
"sqlalchemy.engine.default",
"sqlalchemy.engine.interfaces",
"sqlalchemy.engine.reflection",
"sqlalchemy.engine.result",
"sqlalchemy.engine.strategies",
"sqlalchemy.engine.threadlocal",
"sqlalchemy.engine.url",
"sqlalchemy.engine.util",
"sqlalchemy.event.api",
"sqlalchemy.event.attr",
"sqlalchemy.event.base",
"sqlalchemy.event.legacy",
"sqlalchemy.event.registry",
"sqlalchemy.events",
"sqlalchemy.exc",
"sqlalchemy.ext.associationproxy",
"sqlalchemy.ext.automap",
"sqlalchemy.ext.compiler",
"sqlalchemy.ext.declarative.api",
"sqlalchemy.ext.declarative.base",
"sqlalchemy.ext.declarative.clsregistry",
"sqlalchemy.ext.horizontal_shard",
"sqlalchemy.ext.hybrid",
"sqlalchemy.ext.instrumentation",
"sqlalchemy.ext.mutable",
"sqlalchemy.ext.orderinglist",
"sqlalchemy.ext.serializer",
"sqlalchemy.inspection",
"sqlalchemy.interfaces",
"sqlalchemy.log",
"sqlalchemy.orm.attributes",
"sqlalchemy.orm.base",
"sqlalchemy.orm.collections",
"sqlalchemy.orm.dependency",
"sqlalchemy.orm.deprecated_interfaces",
"sqlalchemy.orm.descriptor_props",
"sqlalchemy.orm.dynamic",
"sqlalchemy.orm.evaluator",
"sqlalchemy.orm.events",
"sqlalchemy.orm.exc",
"sqlalchemy.orm.identity",
"sqlalchemy.orm.instrumentation",
"sqlalchemy.orm.interfaces",
"sqlalchemy.orm.loading",
"sqlalchemy.orm.mapper",
"sqlalchemy.orm.path_registry",
"sqlalchemy.orm.persistence",
"sqlalchemy.orm.properties",
"sqlalchemy.orm.query",
"sqlalchemy.orm.relationships",
"sqlalchemy.orm.scoping",
"sqlalchemy.orm.session",
"sqlalchemy.orm.state",
"sqlalchemy.orm.strategies",
"sqlalchemy.orm.strategy_options",
"sqlalchemy.orm.sync",
"sqlalchemy.orm.unitofwork",
"sqlalchemy.orm.util",
"sqlalchemy.pool",
"sqlalchemy.processors",
"sqlalchemy.schema",
"sqlalchemy.sql.annotation",
"sqlalchemy.sql.base",
"sqlalchemy.sql.compiler",
"sqlalchemy.sql.ddl",
"sqlalchemy.sql.default_comparator",
"sqlalchemy.sql.dml",
"sqlalchemy.sql.elements",
"sqlalchemy.sql.expression",
"sqlalchemy.sql.functions",
"sqlalchemy.sql.naming",
"sqlalchemy.sql.operators",
"sqlalchemy.sql.schema",
"sqlalchemy.sql.selectable",
"sqlalchemy.sql.sqltypes",
"sqlalchemy.sql.type_api",
"sqlalchemy.sql.util",
"sqlalchemy.sql.visitors",
"sqlalchemy.types",
"sqlalchemy.util._collections",
"sqlalchemy.util.compat",
"sqlalchemy.util.deprecations",
"sqlalchemy.util.langhelpers",
"sqlalchemy.util.queue",
"sqlalchemy.util.topological",
"flask_sqlalchemy._compat",
"gzip",
"gevent",
"gevent.core",
"logging",
"Crypto",
"Crypto.Hash"
]
# might need to explicitly include dll:
# data_files=[('.', 'libmmd.dll')
# also:
# http://stackoverflow.com/questions/10060765/create-python-exe-without-msvcp90-dll
WIN_OPTIONS = {
"dist_dir": "../dist",
"includes": INCLUDES,
"iconfile": "static/images/icon_win.ico",
"packages": ["nucleus", "web_ui", "synapse", "requests"],
"dll_excludes": [],
'bundle_files': 1
}
DARWIN_OPTIONS = {
"argv_emulation": True,
"bdist_base": "../build",
"dist_dir": "../dist",
"iconfile": "static/images/icon_osx.icns",
"includes": INCLUDES,
"packages": ["nucleus", "web_ui", "synapse", "requests"],
"site_packages": True,
"plist": {
"CFBundleVersion": VERSION,
"LSBackgroundOnly": True,
"LSUIElement": True
},
}
""" Platform specific options """
if sys.platform == 'darwin':
# Compile .less files
filenames = ["main", ]
for fn in filenames:
rv = os.system("touch ./static/css/{}.css".format(fn))
rv += os.system("lesscpy ./static/css/{fn}.less > ./static/css/{fn}.css".format(fn=fn))
""" Patch gevent implicit loader """
patched = False
with open("../lib/python2.7/site-packages/gevent/os.py", "r+") as f:
patch = "\n# make os.path available here\nmy_os = __import__('os')\npath = my_os.path\n"
for line in f.readlines():
if line == "# make os.path available here":
patched = True
if not patched:
f.write(patch)
""" Setup Esky Executable """
exe = Executable("run.py",
description="Souma App",
gui_only=True,
icon=DARWIN_OPTIONS["iconfile"],
name="run")
extra_options = dict(
setup_requires=['py2app'],
app=['run.py'],
options=dict(
bdist_esky=dict(
freezer_module="py2app",
freezer_options=DARWIN_OPTIONS
)
),
scripts=[exe, ]
)
install_requires = open('requirements.txt').read()
elif sys.platform == 'win32':
""" Setup Esky Executable """
exe = Executable("run.py",
description="Souma App",
gui_only=True,
icon=WIN_OPTIONS["iconfile"],
name="run")
extra_options = dict(
setup_requires=['py2exe'],
options=dict(
bdist_esky=dict(
freezer_module="py2exe",
freezer_options=WIN_OPTIONS
)
),
scripts=[exe, ],
zipfile=None
)
#Some little hacks for making py2exe work
#Create empty __init__.py in flaskext directory
#so py2exe recognizes it as module
import flaskext
try:
flaskext.__file__
except:
flaskext_init = open(flaskext.__path__[0] + '\\__init__.py', 'w')
flaskext_init.close()
with open('requirements.txt') as f:
install_requires = [req.strip() for req in f.readlines()]
else:
extra_options = dict(
scripts=APP)
install_requires = open('requirements.txt').read()
setup(
name="Souma",
version=VERSION,
author="Cognitive Networks Group",
author_email="team@souma.io",
url="https://github.com/ciex/souma/",
packages=["nucleus", "web_ui", "synapse"],
data_files=DATA_FILES,
license="Apache License 2.0",
description="A Cognitive Network for Groups",
long_description=open("README.md").read(),
install_requires=install_requires,
**extra_options
)
| cafca/souma | package.py | package.py | py | 7,972 | python | en | code | 5 | github-code | 1 | [
{
"api_name": "ez_setup.use_setuptools",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "os.walk",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "os.path.join",
... |
71928968673 | import os
from pdfRecognition import pdfRecognition
import fitz
class pdfToPic():
def __init__(self,filepath,folderPath):
self.filepath = filepath
self.folderPath = folderPath
def pdfToPic(self):
pdfDoc = fitz.open(self.filepath)
for pg in range(pdfDoc.page_count):
page = pdfDoc[pg]
rotate = int(0)
zoom_x = 1.33333333
zoom_y = 1.33333333
mat = fitz.Matrix(zoom_x, zoom_y).prerotate(rotate)
pix = page.get_pixmap(matrix=mat, alpha=False)
if not os.path.exists(self.folderPath):
os.makedirs(self.folderPath)
fileName = self.folderPath + '/' + '%s.png' % pg
pix.save(fileName)
pdfNewName = pdfRecognition(fileName,self.folderPath).BarodeIdentification()
os.rename(self.folderPath + '/' + '%s.pdf' % pg, self.folderPath + '/' + pdfNewName + ".pdf")
os.remove(fileName)
| Guanguanka/pdfTool | pdfToPic.py | pdfToPic.py | py | 979 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "fitz.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "fitz.Matrix",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 20,... |
71730197474 | import shutil
import os
import glob2
from tqdm import tqdm
train_path = '../data/train/'
test_path = '../data/test/'
if not os.path.exists(train_path):
os.makedirs(train_path)
if not os.path.exists(test_path):
os.makedirs(test_path)
test_num = 0
for filename in tqdm(glob2.glob('../data/dicom-images-test/**/*.dcm')):
fname = str(filename).split('/')[-1]
shutil.copy(str(filename), os.path.join(test_path, fname))
test_num += 1
print('Moved ' + str(test_num) + ' files!' )
train_num = 0
for filename in tqdm(glob2.glob('../data/dicom-images-train/**/*.dcm')):
fname = str(filename).split('/')[-1]
shutil.copy(str(filename), os.path.join(train_path, fname))
train_num += 1
print('Moved ' + str(train_num) + ' files!' ) | kshannon/pneumothorax-segmentation | eda/move_data.py | move_data.py | py | 757 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "os.path.exists",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "os.makedirs",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "os.path.exists",
"line_numb... |
36997759739 | import sqlite3
from contextlib import closing
from typing import List, Dict, Union
table_name = "bot"
DBEntry = Dict[str, Union[str, int]]
def add_to_database(db_name: str, data: List[DBEntry]) -> int:
connection = sqlite3.connect(db_name)
with closing(connection):
cursor = connection.cursor()
# create table
cursor.execute("CREATE TABLE IF NOT EXISTS {} \
(word1 TEXT, word2 TEXT, word3 TEXT, id INTEGER, user TEXT);".format(table_name))
# insert to table
insert_sql = "INSERT INTO {} (word1, word2, word3, id, user)\
VALUES (?, ?, ?, ?, ?);".format(table_name)
params = [(d["word1"], d["word2"], d["word3"], d["id"], d["user"]) for d in data]
cursor.executemany(insert_sql, params)
connection.commit()
cursor.execute("SELECT * FROM {}".format(table_name))
return len(cursor.fetchall())
def get_latest_tweet(db_name: str) -> int:
connection = sqlite3.connect(db_name)
with closing(connection):
cursor = connection.cursor()
# create table
cursor.execute("CREATE TABLE IF NOT EXISTS {} \
(word1 TEXT, word2 TEXT, word3 TEXT, id INTEGER, user TEXT);".format(table_name))
max_sql = "SELECT MAX(id) FROM {};".format(table_name)
cursor.execute(max_sql)
result = cursor.fetchall()
if len(result) == 0:
return 0
m = result[0][0]
if m is None:
return 0
else:
return m
def get_db_entries(db_name: str) -> List[DBEntry]:
word_blocks = []
connection = sqlite3.connect(db_name)
with closing(connection):
cursor = connection.cursor()
# create table
cursor.execute("CREATE TABLE IF NOT EXISTS {} \
(word1 TEXT, word2 TEXT, word3 TEXT, id INTEGER, user TEXT);".format(table_name))
max_sql = "SELECT * FROM {};".format(table_name)
cursor.execute(max_sql)
for (w1, w2, w3, i, u) in cursor.fetchall():
word_blocks.append({
"word1": w1,
"word2": w2,
"word3": w3,
"id": i,
"user": u
})
return word_blocks
def delete_db_entries_by_id(db_name: str, tweet: int) -> bool:
connection = sqlite3.connect(db_name)
with closing(connection):
cursor = connection.cursor()
# create table
cursor.execute("CREATE TABLE IF NOT EXISTS {} \
(word1 TEXT, word2 TEXT, word3 TEXT, id INTEGER, user TEXT);".format(table_name))
delete_sql = "DELETE FROM {} WHERE (id) = (?);".format(table_name)
cursor.execute(delete_sql, (str(tweet), ))
connection.commit()
check_sql = "SELECT * FROM {} WHERE (id) = (?);".format(table_name)
cursor.execute(check_sql, (str(tweet), ))
return len(cursor.fetchall()) == 0
def clear_db(db_name: str) -> bool:
connection = sqlite3.connect(db_name)
with closing(connection):
cursor = connection.cursor()
# create table
cursor.execute("CREATE TABLE IF NOT EXISTS {} \
(word1 TEXT, word2 TEXT, word3 TEXT, id INTEGER, user TEXT);".format(table_name))
delete_sql = "DELETE FROM {};".format(table_name)
cursor.execute(delete_sql)
connection.commit()
check_sql = "SELECT * FROM {}".format(table_name)
cursor.execute(check_sql)
return len(cursor.fetchall()) == 0
| quietsato/TwitterBotPy | botkun/lib/database.py | database.py | py | 3,521 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "typing.Dict",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.Union",
"line_number": 7,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "sqlite3.connect",
"line_number... |
29550588888 | import video
import funkcijos
import plotly.graph_objects as go
import math
dataList = list()
eUploader = 0
eAge = 0
eCategory = 0
eLength = 0
eViews = 0
eRate = 0
eRatings = 0
eComments = 0
atributeNames = list()
missingValues = list()
avarages = list()
medians = list()
mins = list()
maxs = list()
cardinalities = list()
quartiles1st = list()
quartiles2nd = list()
standartDeviations = list()
atributeNames2 = list()
missingValues2 = list()
cardinalities2 = list()
modes2nd = list()
modeRates = list()
modePercents = list()
modes = list()
mode2ndRates = list()
mode2ndPercents = list()
def ReadData():
dataFile = open("Good data with commas.csv", "r")
dataRL = dataFile.readlines()
for x in dataRL:
wordList = x.split(",")
#print(f"{wordList[0]}|{wordList[1]}|{wordList[2]}|{wordList[3]}|{wordList[4]}|{wordList[5]}|{wordList[6]}|{wordList[7]}")
global eUploader
if not wordList[0]: eUploader += 1
global eAge
if not wordList[1]: eAge += 1
global eCategory
if not wordList[2]: eCategory += 1
global eLength
if not wordList[3]: eLength += 1
global eViews
if not wordList[4]: eViews += 1
global eRate
if not wordList[5]: eRate += 1
global eRatings
if not wordList[6]: eRatings += 1
global eComments
if not wordList[7]: eComments += 1
dataList.append(video.Video(wordList[0], wordList[1], wordList[2], wordList[3], wordList[4], wordList[5], wordList[6], wordList[7]))
dataFile.close()
ReadData()
def ProcessAge():
global atributeNames
global missingValues
global avarages
global medians
global mins
global maxs
global cardinalities
global quartiles1st
global quartiles2nd
global standartDeviations
valueList = list()
valueSet = set()
for x in dataList:
if x.age is not None:
valueList.append(x.age)
valueSet.add(x.age)
atributeNames.append('Age')
missingValues.append(100 / (len(valueList) + eAge) * eAge)
avarages.append(funkcijos.Funkcijos.Vidurkis(None, valueList))
medians.append(funkcijos.Funkcijos.Mediana(None, valueList))
mins.append(funkcijos.Funkcijos.Minimum(None, valueList))
maxs.append(funkcijos.Funkcijos.Maximum(None, valueList))
cardinalities.append(len(valueSet))
quartiles = funkcijos.Funkcijos.Quartiles(None, valueList)
quartiles1st.append(quartiles[0])
quartiles2nd.append(quartiles[1])
standartDeviations.append(funkcijos.Funkcijos.StandartDeviation(None,valueList))
def ProcessLength():
global atributeNames
global missingValues
global avarages
global medians
global mins
global maxs
global cardinalities
global quartiles1st
global quartiles2nd
global standartDeviations
valueList = list()
valueSet = set()
for x in dataList:
if x.length is not None:
valueList.append(x.length)
valueSet.add(x.length)
atributeNames.append('Length')
missingValues.append(100 / (len(valueList) + eLength) * eLength)
avarages.append(funkcijos.Funkcijos.Vidurkis(None, valueList))
medians.append(funkcijos.Funkcijos.Mediana(None, valueList))
mins.append(funkcijos.Funkcijos.Minimum(None, valueList))
maxs.append(funkcijos.Funkcijos.Maximum(None, valueList))
cardinalities.append(len(valueSet))
quartiles = funkcijos.Funkcijos.Quartiles(None, valueList)
quartiles1st.append(quartiles[0])
quartiles2nd.append(quartiles[1])
standartDeviations.append(funkcijos.Funkcijos.StandartDeviation(None,valueList))
def ProcessViews():
global atributeNames
global missingValues
global avarages
global medians
global mins
global maxs
global cardinalities
global quartiles1st
global quartiles2nd
global standartDeviations
valueList = list()
valueSet = set()
for x in dataList:
if x.views is not None:
valueList.append(x.views)
valueSet.add(x.views)
atributeNames.append('Views')
missingValues.append(100 / (len(valueList) + eViews) * eViews)
avarages.append(funkcijos.Funkcijos.Vidurkis(None, valueList))
medians.append(funkcijos.Funkcijos.Mediana(None, valueList))
mins.append(funkcijos.Funkcijos.Minimum(None, valueList))
maxs.append(funkcijos.Funkcijos.Maximum(None, valueList))
cardinalities.append(len(valueSet))
quartiles = funkcijos.Funkcijos.Quartiles(None, valueList)
quartiles1st.append(quartiles[0])
quartiles2nd.append(quartiles[1])
standartDeviations.append(funkcijos.Funkcijos.StandartDeviation(None,valueList))
def ProcessRate():
global atributeNames
global missingValues
global avarages
global medians
global mins
global maxs
global cardinalities
global quartiles1st
global quartiles2nd
global standartDeviations
valueList = list()
valueSet = set()
for x in dataList:
if x.rate is not None:
valueList.append(x.rate)
valueSet.add(x.rate)
atributeNames.append('Rate')
missingValues.append(100 / (len(valueList) + eRate) * eRate)
avarages.append(funkcijos.Funkcijos.Vidurkis(None, valueList))
medians.append(funkcijos.Funkcijos.Mediana(None, valueList))
mins.append(funkcijos.Funkcijos.Minimum(None, valueList))
maxs.append(funkcijos.Funkcijos.Maximum(None, valueList))
cardinalities.append(len(valueSet))
quartiles = funkcijos.Funkcijos.Quartiles(None, valueList)
quartiles1st.append(quartiles[0])
quartiles2nd.append(quartiles[1])
standartDeviations.append(funkcijos.Funkcijos.StandartDeviation(None,valueList))
def ProcessRatings():
global atributeNames
global missingValues
global avarages
global medians
global mins
global maxs
global cardinalities
global quartiles1st
global quartiles2nd
global standartDeviations
valueList = list()
valueSet = set()
for x in dataList:
if x.ratings is not None:
valueList.append(x.ratings)
valueSet.add(x.ratings)
atributeNames.append('Ratings')
missingValues.append(100 / (len(valueList) + eRatings) * eRatings)
avarages.append(funkcijos.Funkcijos.Vidurkis(None, valueList))
medians.append(funkcijos.Funkcijos.Mediana(None, valueList))
mins.append(funkcijos.Funkcijos.Minimum(None, valueList))
maxs.append(funkcijos.Funkcijos.Maximum(None, valueList))
cardinalities.append(len(valueSet))
quartiles = funkcijos.Funkcijos.Quartiles(None, valueList)
quartiles1st.append(quartiles[0])
quartiles2nd.append(quartiles[1])
standartDeviations.append(funkcijos.Funkcijos.StandartDeviation(None,valueList))
def ProcessComments():
global atributeNames
global missingValues
global avarages
global medians
global mins
global maxs
global cardinalities
global quartiles1st
global quartiles2nd
global standartDeviations
valueList = list()
valueSet = set()
for x in dataList:
if x.comments is not None:
valueList.append(x.comments)
valueSet.add(x.comments)
atributeNames.append('Comments')
missingValues.append(100 / (len(valueList) + eComments) * eComments)
avarages.append(funkcijos.Funkcijos.Vidurkis(None, valueList))
medians.append(funkcijos.Funkcijos.Mediana(None, valueList))
mins.append(funkcijos.Funkcijos.Minimum(None, valueList))
maxs.append(funkcijos.Funkcijos.Maximum(None, valueList))
cardinalities.append(len(valueSet))
quartiles = funkcijos.Funkcijos.Quartiles(None, valueList)
quartiles1st.append(quartiles[0])
quartiles2nd.append(quartiles[1])
standartDeviations.append(funkcijos.Funkcijos.StandartDeviation(None,valueList))
def ProcessUploader():
global atributeNames2
global missingValues2
global cardinalities2
global modes
global modeRates
global modePercents
global modes2nd
global mode2ndRates
global mode2ndPercents
valueList = list()
valueSet = set()
for x in dataList:
if x.uploader is not None:
valueList.append(x.uploader)
valueSet.add(x.uploader)
atributeNames2.append("Uploader")
missingValues2.append(100 / (len(valueList) + eUploader) * eUploader)
cardinalities2.append(len(valueSet))
modes.append(funkcijos.Funkcijos.Moda(None, valueList))
modeRates.append(valueList.count(funkcijos.Funkcijos.Moda(None, valueList)))
modePercents.append(100 / len(valueList) * valueList.count(funkcijos.Funkcijos.Moda(None, valueList)))
modes2nd.append(funkcijos.Funkcijos.Moda2nd(None, valueList))
mode2ndRates.append(valueList.count(funkcijos.Funkcijos.Moda2nd(None, valueList)))
mode2ndPercents.append(100 / len(valueList) * valueList.count(funkcijos.Funkcijos.Moda2nd(None, valueList)))
def ProcessCategory():
global atributeNames2
global missingValues2
global cardinalities2
global modes
global modeRates
global modePercents
global modes2nd
global mode2ndRates
global mode2ndPercents
valueList = list()
valueSet = set()
for x in dataList:
if x.category is not None:
valueList.append(x.category)
valueSet.add(x.category)
atributeNames2.append("Category")
missingValues2.append(100 / (len(valueList) + eCategory) * eCategory)
cardinalities2.append(len(valueSet))
modes.append(funkcijos.Funkcijos.Moda(None, valueList))
modeRates.append(valueList.count(funkcijos.Funkcijos.Moda(None, valueList)))
modePercents.append(100 / len(valueList) * valueList.count(funkcijos.Funkcijos.Moda(None, valueList)))
modes2nd.append(funkcijos.Funkcijos.Moda2nd(None, valueList))
mode2ndRates.append(valueList.count(funkcijos.Funkcijos.Moda2nd(None, valueList)))
mode2ndPercents.append(100 / len(valueList) * valueList.count(funkcijos.Funkcijos.Moda2nd(None, valueList)))
ProcessAge()
ProcessLength()
ProcessViews()
ProcessRate()
ProcessRatings()
ProcessComments()
fig = go.Figure(
data=[go.Table(header=dict(values=['Atributas', 'Trukstamos reiksmes %', 'Vidurkis','Mediana','Minimumas','Maximumas','Kardinalumas','Pirmas kvartilis','Trecias Kvartilis', 'Standartinis nuokrypis']),
cells=dict( values=[ atributeNames, [ round(e, 2) for e in missingValues ], [ round(e, 2) for e in avarages ], medians, mins, maxs, cardinalities, quartiles1st, quartiles2nd, [ round(e, 2) for e in standartDeviations ] ] ))
])
ProcessUploader()
ProcessCategory()
fig2 = go.Figure(
data=[go.Table(header=dict(values=['Atributas', 'Trukstamos reiksmes %', 'Kardinalumas','Moda','Modos Daznis','Moda %','2-oji Moda','2-osios Modos Daznis','2-oji Moda %']),
cells=dict( values=[ atributeNames2, [ round(e, 2) for e in missingValues2 ], cardinalities2, modes, modeRates, [ round(e, 2) for e in modePercents ], modes2nd, mode2ndRates, [ round(e, 2) for e in mode2ndPercents ] ] ))
])
fig.write_html('Breziniai\Tolydieji_duomenys.html', auto_open=True)
fig2.write_html('Breziniai\Kategoriniai_duomenys.html', auto_open=True)
def drawHists():
global dataList
valueList = list()
for x in dataList:
if x.age is not None:
valueList.append(x.age)
HistAge = go.Figure(data=[go.Histogram(x=valueList)])
HistAge.update_layout(xaxis_title_text='Amzius', yaxis_title_text='Kiekis')
HistAge.write_html('Breziniai\Amziaus_Hist.html', auto_open=True)
valueList = list()
for x in dataList:
if x.length is not None:
valueList.append(x.length)
HistLength = go.Figure(data=[go.Histogram(x=valueList)])
HistLength.update_layout(xaxis_title_text='Ilgis', yaxis_title_text='Kiekis')
HistLength.write_html('Breziniai\Ilgis_Hist.html', auto_open=True)
valueList = list()
for x in dataList:
if x.views is not None:
valueList.append(x.views)
HistViews = go.Figure(data=[go.Histogram(x=valueList)])
HistViews.update_layout(xaxis_title_text='Perziuros', yaxis_title_text='Kiekis')
HistViews.write_html('Breziniai\Perziuros_Hist.html', auto_open=True)
valueList = list()
for x in dataList:
if x.rate is not None:
valueList.append(x.rate)
HistRate = go.Figure(data=[go.Histogram(x=valueList)])
HistRate.update_layout(xaxis_title_text='Rate', yaxis_title_text='Kiekis')
HistRate.write_html('Breziniai\Rate_Hist.html', auto_open=True)
valueList = list()
for x in dataList:
if x.ratings is not None:
valueList.append(x.ratings)
HistRatings = go.Figure(data=[go.Histogram(x=valueList)])
HistRatings.update_layout(xaxis_title_text='Reitingai', yaxis_title_text='Kiekis')
HistRatings.write_html('Breziniai\Reitingai_Hist.html', auto_open=True)
valueList = list()
for x in dataList:
if x.comments is not None:
valueList.append(x.comments)
HistComments = go.Figure(data=[go.Histogram(x=valueList)])
HistComments.update_layout(xaxis_title_text='Komentarai', yaxis_title_text='Kiekis')
HistComments.write_html('Breziniai\Komentarai_Hist.html', auto_open=True)
def scatterAndSplom():
global dataList
ageList = list()
lengthList = list()
viewsList = list()
rateList = list()
ratingsList = list()
commentsList = list()
for x in dataList:
if x.age is not None:
ageList.append(x.age)
if x.length is not None:
lengthList.append(x.length)
if x.views is not None:
viewsList.append(x.views)
if x.rate is not None:
rateList.append(x.rate)
if x.ratings is not None:
ratingsList.append(x.ratings)
if x.comments is not None:
commentsList.append(x.comments)
Splom=go.Splom(dimensions=[dict(label='Amzius', values=ageList),
dict(label='Ilgis', values=lengthList),
dict(label='Perziuros', values=viewsList),
dict(label='Rate', values=rateList),
dict(label='Reitingai', values=ratingsList),
dict(label='Komentarai', values=commentsList)])
SplomFig = go.Figure(data=Splom)
SplomFig.write_html('Breziniai\Splom.html', auto_open=True)
ScatterFigKor1 = go.Figure(data=go.Scatter(x=lengthList, y=rateList, mode="markers"))
ScatterFigKor1.update_layout(xaxis_title_text='Ilgis', yaxis_title_text='Ivertinimas')
ScatterFigKor1.write_html('Breziniai\Scatter_Length_Rate_Kor.html', auto_open=True)
ScatterFigKor2 = go.Figure(data=go.Scatter(x=ageList, y=rateList, mode="markers"))
ScatterFigKor2.update_layout(xaxis_title_text='Amzius', yaxis_title_text='Ivertinimas')
ScatterFigKor2.write_html('Breziniai\Scatter_Age_Rate_Kor.html', auto_open=True)
ScatterFigNekor1 = go.Figure(data=go.Scatter(x=rateList, y=commentsList, mode="markers"))
ScatterFigNekor1.update_layout(xaxis_title_text='Ivertinimas', yaxis_title_text='Komentarai')
ScatterFigNekor1.write_html('Breziniai\Scatter_Rate_Comments_Nekor.html', auto_open=True)
ScatterFigNekor2 = go.Figure(data=go.Scatter(x=rateList, y=ratingsList, mode="markers"))
ScatterFigNekor2.update_layout(xaxis_title_text='Ivertinimas', yaxis_title_text='Ivertinimu kiekis')
ScatterFigNekor2.write_html('Breziniai\Scatter_Rate_Ratings_Nekor.html', auto_open=True)
def barPlots():
global dataList
categoriesList = list()
categoriesCount = list()
for x in dataList:
if categoriesList.__contains__(x.category):
categoriesCount[categoriesList.index(x.category)] += x.views
else:
categoriesList.append(x.category)
categoriesCount.append(x.views)
barCategoriesViewsFig = go.Figure([go.Bar(x=categoriesList, y=categoriesCount)])
barCategoriesViewsFig.update_layout(xaxis_title_text='Kategorija', yaxis_title_text='Perziuru kiekis')
barCategoriesViewsFig.write_html('Breziniai\\bar_Cat_Views.html', auto_open=True)
categoriesList = list()
categoriesCount = list()
for x in dataList:
if categoriesList.__contains__(x.category):
categoriesCount[categoriesList.index(x.category)] += x.views
else:
categoriesList.append(x.category)
categoriesCount.append(x.rate)
barCategoriesRatesFig = go.Figure([go.Bar(x=categoriesList, y=categoriesCount)])
barCategoriesRatesFig.update_layout(xaxis_title_text='Kategorija', yaxis_title_text='Ivertinmu suma')
barCategoriesRatesFig.write_html('Breziniai\\bar_Cat_Rates.html', auto_open=True)
def boxPlots():
global dataList
categoriesList = list()
categoriesCount = list()
for x in dataList:
if categoriesList.__contains__(x.category):
categoriesCount[categoriesList.index(x.category)].append(x.views)
else:
categoriesList.append(x.category)
newList = list()
newList.append(x.views)
categoriesCount.append(newList)
boxCategoriesViewsFig = go.Figure()
for x in range(len(categoriesCount) - 1):
boxCategoriesViewsFig.add_trace(go.Box(y=categoriesCount[x], name=categoriesList[x]))
boxCategoriesViewsFig.write_html('Breziniai\\box_Cat_Views.html', auto_open=True)
categoriesList = list()
categoriesCount = list()
for x in dataList:
if categoriesList.__contains__(x.category):
categoriesCount[categoriesList.index(x.category)].append(x.rate)
else:
categoriesList.append(x.category)
newList = list()
newList.append(x.rate)
categoriesCount.append(newList)
boxCategoriesViewsFig = go.Figure()
for x in range(len(categoriesCount) - 1):
boxCategoriesViewsFig.add_trace(go.Box(y=categoriesCount[x], name=categoriesList[x]))
boxCategoriesViewsFig.write_html('Breziniai\\box_Cat_Rate.html', auto_open=True)
drawHists()
scatterAndSplom()
barPlots()
boxPlots()
forCov = []
covAverages = []
covAverages.append(avarages[0])
covAverages.append(avarages[1])
covAverages.append(avarages[2])
covAverages.append(avarages[3])
covAverages.append(avarages[4])
covAverages.append(avarages[5])
for x in dataList:
forCov.append([x.age, x.length, x.views, x.rate, x.ratings, x.comments])
cov = []
for i in range(0, 6):
for j in range(0, 6):
sum = 0.0
for x in forCov:
if x[i] == None or x[j] == None:
continue
else:
sum = sum + ((x[i] - covAverages[i]) * (x[j] - covAverages[j]))
if j == 0:
cov.append([sum / (len(forCov) - 1)])
else:
cov[i].append(sum / (len(forCov) - 1))
atributeNamesWithoutSpace = atributeNames.copy()
atributeNames.insert(0," ")
fig3 = go.Figure(
data=[go.Table(header=dict(values=atributeNames),
cells=dict( values=[ atributeNamesWithoutSpace, cov[0], cov[1], cov[2], cov[3], cov[4], cov[5]] ))
])
fig3.write_html('Breziniai\\Kovariacijos lentele.html', auto_open=True)
cor = []
for i in range(0, 6):
for j in range(0, 6):
if j == 0:
cor.append([cov[i][j] / (standartDeviations[i] * standartDeviations[j])])
else:
cor[i].append(cov[i][j] / (standartDeviations[i] * standartDeviations[j]))
fig4 = go.Figure(
data=[go.Table(header=dict(values=atributeNames),
cells=dict( values=[ atributeNamesWithoutSpace, cor[0], cor[1], cor[2], cor[3], cor[4], cor[5]] ))
])
fig4.write_html('Breziniai\\Koreliacijos lentele.html', auto_open=True)
fig5 = go.Figure(data=go.Heatmap(z=cor, x = atributeNamesWithoutSpace, y = atributeNamesWithoutSpace))
fig5.write_html('Breziniai\\Koreliacijos matrica.html', auto_open=True)
NormalizedFile = open("Normalized data.csv", "w+")
NormalizedList = list()
for x in dataList:
nAge = (x.age - avarages[0]) / standartDeviations[0]
nLength = (x.length - avarages[1]) / standartDeviations[1]
nViews = (x.views - avarages[2]) / standartDeviations[2]
nRate = (x.rate - avarages[3]) / standartDeviations[3]
nRatings = (x.ratings - avarages[4]) / standartDeviations[4]
nComments = (x.comments - avarages[5]) / standartDeviations[5]
NormalizedList.append(video.Video(x.uploader, nAge, x.category, nLength, nViews, nRate, nRatings, nComments))
for x in NormalizedList:
NormalizedFile.write(x.ToStringWithCommas() + "\n") | winVIP/KTU-stuff | KTU semestras 6/Intelektika/Lab1/mainas.py | mainas.py | py | 20,500 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "video.Video",
"line_number": 66,
"usage_type": "call"
},
{
"api_name": "funkcijos.Funkcijos.Vidurkis",
"line_number": 94,
"usage_type": "call"
},
{
"api_name": "funkcijos.Funkcijos",
"line_number": 94,
"usage_type": "attribute"
},
{
"api_name": "fun... |
6331063126 | import numpy as np
from sklearn.datasets import make_moons
import matplotlib.pyplot as plt
import neural_network as nn
def plot_decision_boundary(pred_func, X, y):
x_min, x_max = X[:, 0].min() - 0.5, X[:, 0].max() + 0.5
y_min, y_max = X[:, 1].min() - 0.5, X[:, 1].max() + 0.5
h = 0.01
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
Z = pred_func(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Spectral)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Spectral)
#X = np.array([[-2, 1], [1, 1], [1.5, -0.5], [-2, -1], [-1, -1.5], [2, -2]])
#Y = np.array([[0, 1], [0, 1], [0, 1], [1, 0], [1, 0], [1, 0]])
#b = nn.build_model(X, Y, 4, print_loss=True)
np.random.seed(0)
X, y = make_moons(200, noise=0.20)
#nn.build_model(X, y, 4, 200000,print_loss=True)
plt.scatter(X[:, 0], X[:, 1], s=40, c=y, cmap=plt.cm.Spectral)
plt.figure(figsize=(16, 32))
hidden_layer_dimensions = [1, 2, 3, 4]
for i, nn_hdim in enumerate(hidden_layer_dimensions):
plt.subplot(5, 2, i+1)
plt.title('HiddenLayerSize%d' % nn_hdim)
model = nn.build_model(X, y, nn_hdim, 200000, print_loss=True)
#nn.plot_decision_boundary(lambda x: nn.predict(model, x), X, y)
plot_decision_boundary(lambda X: np.array([nn.predict(model, x) for x in X]), X, y)
plt.savefig('foo.png')
| blackz54/cs491project3 | driver.py | driver.py | py | 1,364 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.meshgrid",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "numpy.c_",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "matplotlib.pyplot.contourf"... |
4457682469 | import subprocess as sp
import sys
import ctypes
import os
import random
import json
import base64
import pathlib
import tempfile
import functools
import operator
import time
import numpy
import pylibsort
import libff.invoke
# ol-install: numpy
# from memory_profiler import profile
import cProfile
import pstats
import io
doProfile = False
def printCSV(pr, path):
path = pathlib.Path(path)
result = io.StringIO()
# pstats.Stats(pr,stream=result).print_stats()
pstats.Stats(pr,stream=result).sort_stats(pstats.SortKey.CUMULATIVE).print_stats()
result=result.getvalue()
# chop the string into a csv-like buffer
result='ncalls'+result.split('ncalls')[-1]
result='\n'.join([','.join(line.rstrip().split(None,5)) for line in result.split('\n')])
with open(path, 'w') as f:
f.write(result)
def sortPartial(event, ctx):
# Temporary limitation for testing
# if event['arrType'] != 'file':
# return { "error" : "Function currently only supports file distributed arrays" }
pylibsort.ConfigureBackend('file', ctx)
refs = pylibsort.getPartRefs(event)
rawBytes = pylibsort.readPartRefs(refs)
try:
boundaries = pylibsort.sortPartial(rawBytes, event['offset'], event['width'])
except Exception as e:
return { "error" : str(e) }
pylibsort.writeOutput(event, rawBytes, boundaries)
return { "error" : None }
def selfTest():
"""Main only used for testing purposes"""
import numpy as np
# nElem = 256
# nElem = 256*(1024*1024)
nElem = 1024*1024
nbyte = nElem*4
offset = 0
width = 8
# width = 16
narr = 2
npart = 2
bytesPerPart = int(nbyte / (narr * npart))
inBuf = pylibsort.generateInputs(nElem)
with tempfile.TemporaryDirectory() as tDir:
tDir = pathlib.Path(tDir)
libffCtx = libff.invoke.RemoteCtx(libff.array.ArrayStore('file', tDir), None)
pylibsort.ConfigureBackend('file', libffCtx)
inArrName = "faasSortTestIn"
outArrName = "faasSortTestOut"
# Write source arrays
inShape = pylibsort.ArrayShape.fromUniform(bytesPerPart, npart)
refs = []
for arrX in range(narr):
arrName = inArrName + str(arrX)
inArr = pylibsort.DistribArray.Create(arrName, inShape)
start = (arrX*npart)*bytesPerPart
end = start + (bytesPerPart*npart)
inArr.WriteAll(inBuf[start:end])
for partX in range(npart):
refs.append({
'arrayName': arrName,
'partID' : partX,
'start' : 0,
'nbyte' : -1
})
inArr.Close()
req = {
"offset" : offset,
"width" : width,
"arrType" : "file",
"input" : refs,
"output" : outArrName
}
if doProfile:
pr = cProfile.Profile()
pr.enable()
resp = sortPartial(req, libffCtx)
pr.disable()
printCSV(pr, "./faas{}b.csv".format(width))
pr.dump_stats("./faas{}b.prof".format(width))
else:
start = time.time()
resp = sortPartial(req, libffCtx)
print(time.time() - start)
if resp['error'] is not None:
print("FAILURE: Function returned error: " + resp['error'])
exit(1)
outArr = pylibsort.DistribArray.Open(outArrName)
outBuf = outArr.ReadAll()
boundaries = outArr.shape.starts
outArr.Destroy()
# byte capacities to int boundaries
caps = np.array(outArr.shape.caps) // 4
boundaries = np.cumsum(caps)
boundaries = np.roll(boundaries, 1)
boundaries[0] = 0
pylibsort.checkPartial(inBuf, outBuf, boundaries, offset, width)
print("PASS")
def libffProcessInvoke():
"""Use this as main when you are using libff.invoke to invoke the sort as a
remote process (ProcessRemoteFunc). See libff.invoke documentation for how
this works"""
libff.invoke.RemoteProcessServer({"sortPartial" : sortPartial}, sys.argv[1:])
def LibffInvokeRegister():
return {"sortPartial" : sortPartial}
if __name__ == "__main__":
# selfTest()
libffProcessInvoke()
| NathanTP/fakefaas | examples/sort/worker.py | worker.py | py | 4,313 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "pathlib.Path",
"line_number": 27,
"usage_type": "call"
},
{
"api_name": "io.StringIO",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "pstats.Stats",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "pstats.SortKey",
"line_numb... |
7053890236 | import sys, os
sys.path.append(os.path.join(os.path.dirname(os.path.abspath(__file__)),
"../datasets/"))
from mass_spring import get_dataset
import torch
import torch.nn as nn
import pytorch_lightning as pl
import numpy as np
import torch.utils.data as data
from TorchSnippet.energy import HNN
from TorchSnippet.dyna import odeint
import matplotlib.pyplot as plt
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
t = torch.linspace(0,1, 100).reshape(-1,1)
X = torch.cat([
torch.sin(2*np.pi*t),
torch.cos(2*np.pi*t)
],1).to(device)
y = torch.cat([
torch.cos(2*np.pi*t),
-torch.sin(2*np.pi*t)
],1).to(device)
# data1 = get_dataset(samples=50)
# X, y = torch.tensor(data1['x'], dtype=torch.float32).to(device), torch.tensor(data1['dx'], dtype=torch.float32).to(device)
# print(X.shape, y.shape)
train = data.TensorDataset(X, y)
dataloader = data.DataLoader(train, batch_size=15, shuffle=False)
class Learner(pl.LightningModule):
def __init__(self, model:nn.Module):
super().__init__()
self.model = model
self.c = 0
def forward(self, x):
return self.model(x)
def loss(self, y, y_hat):
return ((y-y_hat)**2).sum()
def training_step(self, batch, batch_idx):
x, y = batch
y_hat = self.model(x)
loss = self.loss(y_hat, y)
logs = {'train_loss': loss}
return {'loss': loss, 'log': logs}
def configure_optimizers(self):
return torch.optim.Adam(self.model.parameters(), lr=0.01)
def train_dataloader(self):
return dataloader
func = HNN(nn.Sequential(
nn.Linear(2,64),
nn.Tanh(),
nn.Linear(64,1))).to(device)
learner = Learner(func)
trainer = pl.Trainer(min_epochs=500, max_epochs=1000)
trainer.fit(learner)
func1 = lambda t, x: func(x)
# x_t = torch.randn(1000, 2).to(device)
x_t = X[:1, :]
print(x_t.shape)
s_span = torch.linspace(0, 2*np.pi, 618)
trajectory = odeint(func1, x_t, s_span).detach().cpu().numpy()
for i in range(len(x_t)):
plt.plot(trajectory[:, i, 0], trajectory[:, i, 1], 'b')
plt.plot(X[:, 0], X[:, 1], '+')
plt.show()
| yantijin/TorchSnippet | example/energy/hnn_test.py | hnn_test.py | py | 2,156 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.path.append",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "sys.path",
"line_number": 3,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"line_number": 3,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": ... |
39000943657 | import torch
import numpy as np
from torchvision import transforms
from torch.utils.data import DataLoader
from torch.utils.data._utils.collate import default_collate
from .datasets import IuxrayMultiImageDataset, MimiccxrSingleImageDataset, KnowledgeDataset
class ToPILImage(object):
def __init__(self):
self.to_pil = transforms.ToPILImage(mode="F")
def __call__(self, x):
return (self.to_pil(x[0]))
class LADataLoader(DataLoader):
def __init__(self, args, tokenizer, split, shuffle):
self.args = args
self.dataset_name = args.dataset_name
self.batch_size = args.batch_size
self.shuffle = shuffle
self.num_workers = args.num_workers
self.tokenizer = tokenizer
self.split = split
normalize = transforms.Normalize(mean=[0.500, 0.500, 0.500],
std=[0.275, 0.275, 0.275])
if split == 'train':
self.transform = transforms.Compose([
transforms.Resize(args.image_size),
transforms.RandomCrop(args.crop_size),
transforms.RandomHorizontalFlip(),
transforms.RandomAffine(degrees=10, translate=(0.2, 0.2), scale=(0.8, 1.2), fillcolor=(0, 0, 0)),
transforms.ToTensor(),
normalize])
else:
self.transform = transforms.Compose([
transforms.Resize(args.image_size),
transforms.CenterCrop(args.crop_size),
transforms.ToTensor(),
normalize])
if self.dataset_name == 'iu_xray':
self.dataset = IuxrayMultiImageDataset(self.args, self.tokenizer, self.split, transform=self.transform)
elif self.dataset_name == 'mimic_cxr':
self.dataset = MimiccxrSingleImageDataset(self.args, self.tokenizer, self.split, transform=self.transform)
else:
raise NotImplementedError
self.init_kwargs = {
'dataset': self.dataset,
'batch_size': self.batch_size,
'shuffle': self.shuffle,
'collate_fn': self.collate_fn,
'num_workers': self.num_workers,
'pin_memory': False
}
super().__init__(**self.init_kwargs)
@staticmethod
def collate_fn(data):
# image采用默认的batch方式,多进程的情况下可以减少额外的内存拷贝
images = default_collate([d['image'] for d in data])
images_id = [d['idx'] for d in data]
reports_ids = [d['report_ids'] for d in data]
reports_masks = [d['report_mask'] for d in data]
seq_lengths = [d['seq_length'] for d in data]
knowledges_ids = [d['knowledge_ids'] for d in data]
# knowledges_masks = [d['knowledge_mask'] for d in data]
max_seq_length = max(seq_lengths)
targets = np.zeros((len(reports_ids), max_seq_length), dtype=int)
targets_masks = np.zeros((len(reports_ids), max_seq_length), dtype=int)
for i, report_ids in enumerate(reports_ids):
targets[i, :len(report_ids)] = report_ids
for i, report_masks in enumerate(reports_masks):
targets_masks[i, :len(report_masks)] = report_masks
b_knowledges = np.zeros((len(knowledges_ids), 64), dtype=int)
# b_knowledges_masks = np.zeros((len(reports_ids), 64), dtype=int)
for i, knowledge_ids in enumerate(knowledges_ids):
knowledge_ids = knowledge_ids[:64]
b_knowledges[i, :len(knowledge_ids)] = knowledge_ids
# for i, knowledge_mask in enumerate(knowledges_masks):
# knowledge_mask = knowledge_mask[:64]
# b_knowledges_masks[i, :len(knowledge_mask)] = knowledge_mask
batch = {'images_id': images_id,
'images': images,
'targets': torch.LongTensor(targets),
'reports_mask': torch.FloatTensor(targets_masks),
'knowledges': torch.LongTensor(b_knowledges),
# 'knowledges_mask': torch.FloatTensor(b_knowledges_masks)
}
return batch
def get_bert_dataloader(args, tokenizer):
dataset = KnowledgeDataset(args, tokenizer)
dataloader = DataLoader(dataset,
batch_size=args.batch_size,
shuffle=False,
num_workers=0,
pin_memory=True)
return dataset, dataloader
| LX-doctorAI1/GSKET | modules/dataloaders.py | dataloaders.py | py | 4,467 | python | en | code | 20 | github-code | 1 | [
{
"api_name": "torchvision.transforms.ToPILImage",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "torchvision.transforms",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "torch.utils.data.DataLoader",
"line_number": 17,
"usage_type": "name"
},
{
... |
29499134745 |
import os
import secrets
from PIL import Image
from flask import render_template, url_for, flash, redirect, request, abort, jsonify, make_response
from app import app, db, bcrypt, mail
from forms import *
from models import *
from flask_login import login_user, current_user, logout_user, login_required
from datetime import datetime,timedelta
@app.route("/notification", methods=['GET','POST'])
def notification():
page = request.args.get('page', 1, type=int)
current_time = datetime.utcnow()
four_weeks_ago = current_time - timedelta(weeks=4)
posts = Post.query.filter(Post.date_posted>four_weeks_ago).order_by(Post.date_posted.desc()).paginate(page=page, per_page=6)
stocktakeAll = Verify_Product_Storage.query.distinct(Verify_Product_Storage.verify_id).all()
stocktakeAll.extend(Order_Product.query.distinct(Order_Product.verify_id).all())
allUpdatebooking=Verify_Listing_Booking.query.distinct(Verify_Listing_Booking.verify_id).all()
allUpdatebooking_verfiyid=[(v.verify_id) for v in allUpdatebooking]
if current_user.role=="Manager":
validStorageId = [user.user_id for user in User_Storage.query.filter_by(user_id=current_user.id).all()]
stocktake=[s for s in stocktakeAll if s.storage_id in validStorageId]
validVerify = [(v.id) for v in Verify.query.filter(Verify.id.in_(allUpdatebooking_verfiyid),(Verify.user_id==current_user.id)).distinct(Verify.id).all()]
stocktake.extend([s for s in allUpdatebooking if s.verify_id in validVerify])
else:
stocktakeAll.extend(allUpdatebooking)
stocktake=stocktakeAll
notificationVerify=[]
for stock in stocktake:
temp=[]
st = Verify.query.filter_by(id=stock.verify_id, user_id_confirm = None, date_verified = None).first()
if(st!=None):
temp={}
temp["title"]=st.title
temp["date"]=st.date_posted.strftime('%Y-%m-%d at %H:%M')
temp["verify_id"]=st.id
temp["username"]=User.query.filter_by(id=st.user_id).first().username
if(st.title=="Update Booking"):
temp["id"]=stock.listing_id
temp["name"]=stock.listing.name+' - '+stock.listing.location.suburb
else:
temp["id"]=stock.storage_id
temp["name"]=stock.storage.name+' - '+stock.storage.location.suburb
if(st.title=="Stocktake"):
temp["url"]="verifyStocktake"
elif(st.title=="Transfer"):
temp["url"]="verifyTransfer"
elif(st.title=="Shopping"):
temp["url"]="verifyShopping"
elif(st.title=="Update Booking"):
temp["url"]="verifyUpdateBooking"
notificationVerify.append(temp)
form = PostForm()
return render_template('notification/notification.html', posts=posts,notificationVerify=notificationVerify, form=form)
@app.route("/post/new", methods=['GET', 'POST'])
@login_required
def new_post():
if (request.json!=None):
if request.json.get('submit')=='post':
post = Post(title=request.json.get('title'), content=request.json.get('content'), author=current_user)
db.session.add(post)
db.session.commit()
return jsonify({'status':'success'})
return jsonify({'status':'fail'})
@app.route("/user/<string:username>")
def user_posts(username):
page = request.args.get('page', 1, type=int)
user = User.query.filter_by(username=username).first_or_404()
posts = Post.query.filter_by(author=user)\
.order_by(Post.date_posted.desc())\
.paginate(page=page, per_page=5)
return render_template('user/user_posts.html', posts=posts, user=user)
@app.route("/post/<int:post_id>")
def post(post_id):
post = Post.query.get_or_404(post_id)
userPost = User.query.filter_by(id=post.user_id).first()
image_file = url_for('static', filename='pictures/profile/' + userPost.image_file)
return render_template('notification/post.html', post=post, role=current_user.role,image_file=image_file)
'''
@app.route("/post/<int:post_id>/update", methods=['GET', 'POST'])
@login_required
def update_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
form = PostForm()
if form.validate_on_submit():
post.title = form.title.data
post.content = form.content.data
db.session.commit()
flash('Your post has been updated!', 'success')
return redirect(url_for('post', post_id=post.id))
elif request.method == 'GET':
form.title.data = post.title
form.content.data = post.content
return render_template('notification/create_post.html',
form=form, legend='Update Post')
'''
@app.route("/post/<int:post_id>/delete", methods=['POST'])
@login_required
def delete_post(post_id):
post = Post.query.get_or_404(post_id)
if post.author != current_user:
abort(403)
db.session.delete(post)
db.session.commit()
flash('Your post has been deleted!', 'success')
return redirect(url_for('notification'))
| alankhoangfr/NilStock_Inventory | routes/notificationRoutes.py | notificationRoutes.py | py | 5,177 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.request.args.get",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "flask.request.args",
"line_number": 17,
"usage_type": "attribute"
},
{
"api_name": "flask.request",
"line_number": 17,
"usage_type": "name"
},
{
"api_name": "datetime... |
20248498954 | import os, time, socket, threading, json
import paho.mqtt.client as mqtt
import h3.api.numpy_int as h3
import numpy as np
from scipy import stats
from geodata_toolbox import H3Grids
import matplotlib.pyplot as plt
class Indicator:
def __init__(self, H3, Table=None):
self.H3 = H3
self.h3_features = None
if not self.H3.dist_lookup:
self.H3._get_h3_dist_lookup(Table=Table)
def _get_straight_line_dist_to_h3_cells(self, start_h3_cell, target_h3_cells):
dist_list = [
self.H3.dist_lookup.get(start_h3_cell, {}).get(target_h3_cell, None)
for target_h3_cell in target_h3_cells
]
inner_dist = h3.edge_length(self.H3.resolution) / 2
# in case distance is not in precooked lookup...
while not all(dist_list):
idx_of_none = dist_list.index(None)
# print(start_h3_cell, target_h3_cells[idx_of_none], dist_list[idx_of_none])
target_h3_cell = target_h3_cells[idx_of_none]
this_dist = h3.point_dist(h3.h3_to_geo(start_h3_cell), h3.h3_to_geo(target_h3_cell)) \
if start_h3_cell != target_h3_cell else inner_dist
dist_list[idx_of_none] = this_dist
self.H3.dist_lookup.setdefault(start_h3_cell, {})[target_h3_cell] = this_dist
print('Strange error, check 123')
assert min(dist_list) > 0
return dist_list
def _get_network_dist_to_h3_cells(self, start_h3_cell, target_h3_cells):
# to do: network distance calculation
return []
def verify_heatmap(self, Table, name, target_attr, minimum_ratio_th,
focus_table_grid_code=None, cmap='Reds'):
if focus_table_grid_code is None:
focus_table_grid_code = []
elif type(focus_table_grid_code) != list:
focus_table_grid_code = [focus_table_grid_code]
ax0 = plt.subplot(2, 1, 1)
self.plot_heatmap(Table, name, ax=ax0, cmap=cmap)
ax1 = plt.subplot(2, 1, 2)
if not self.h3_features:
self.h3_features = self.H3.export_h3_features()
h3_features = self.h3_features
h3_cell_area = h3.hex_area(self.H3.resolution, 'm^2')
focus_h3_features = [
h3_fea for h3_fea, h3_cell in zip(h3_features, self.H3.h3_stats.keys())
if self.H3.h3_stats[h3_cell].get(target_attr, -1) > minimum_ratio_th * h3_cell_area
]
# h3_values = [rst.get(h3_feature['properties']['h3_id'], None) for h3_feature in h3_features]
# Table.plot(ax=ax2, features=h3_features,
# crs=4326, value=h3_values, cmap='Reds')
Table.plot(ax=ax1, features=focus_h3_features, crs=4326, facecolor='grey', edgecolor='grey')
if focus_table_grid_code:
focus_locations = [0 for i in range(len(Table.features[Table.crs['geographic']]))]
for zone, zone_layout in Table.interactive_grid_layout.items():
for cell_id, cell_state in zone_layout.items():
if cell_state['code'] in focus_table_grid_code:
focus_locations[cell_id] = 1
focus_locations_colorized = [
'none' if fl==0 else 'r'
for fl in focus_locations
]
Table.plot(color=focus_locations_colorized, ax=ax1)
Table.plot(ax=ax1, facecolor='none', edgecolor='grey', linewidth=0.5)
ax1.set_xlim(ax0.get_xlim())
ax1.set_ylim(ax0.get_ylim())
def plot_heatmap(self, Table, name, ax=None, cmap='Reds'):
if not ax:
ax = plt.gca()
table_grid_values = Table.get_grid_value_from_h3_cells(self.H3.resolution,
name,
self_update=False)
Table.plot(value=table_grid_values, ax=ax, cmap=cmap)
class Handler:
def __init__(self, udp_receiver_ip, udp_receiver_port, udp_sender_ip, udp_sender_port,
use_mqtt=True, mqtt_ip='localhost', mqtt_port=1883,
mqtt_update_topic='update', mqtt_results_topic='results',
buffer_size=1024*8):
self.udp_receiver = {
'ip': udp_receiver_ip,
'port': udp_receiver_port
}
self.udp_sender = {
'ip': udp_sender_ip,
'port': udp_sender_port
}
self.udp_receiver['socket'] = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
self.udp_sender['socket'] = socket.socket(socket.AF_INET,
socket.SOCK_DGRAM)
if use_mqtt:
self.mqtt = {
'ip': mqtt_ip,
'port': mqtt_port,
'topics': {
'update': mqtt_update_topic,
'results': mqtt_results_topic
},
'client': ClientMQTT(mqtt_ip, mqtt_port)
}
else:
self.mqtt = None
self.config = {'buffer_size': buffer_size}
self.Table = None
self.indicators = []
def add_table(self, Table):
self.Table = Table
def add_indicator(self, *args):
for indicator in args:
if not isinstance(indicator, 'Indicator'):
print(f'Warning: {indicator} is not a valid Indicator instance and ignored')
self.indicators.append(indicator)
def run(self):
thread_table = threading.Thread(target = self._listen_to_table,
args = (self.config['buffer_size'], True),
name = 'TableInteraction')
thread_table.start()
thread_indicators = threading.Thread(target = self._listen_to_indicators,
args = (),
name = 'IndicatorResults')
thread_indicators.start()
# self._listen_to_indicators()
def _listen_to_table(self, buffer_size, print_flag=False):
receiver = self.udp_receiver['socket']
try:
receiver.bind((self.udp_receiver['ip'], self.udp_receiver['port']))
while True:
data, addr = receiver.recvfrom(buffer_size)
data = data.strip().decode()
data_epoch = time.time()
if print_flag:
print(f'\nNew data received: {data}\n')
if self.Table:
self.Table.update(layout_str=data)
if self.mqtt:
self.mqtt['client'].publish(
self.mqtt['topics']['update'],
json.dumps({
'msg': 'table updated',
'epoch': data_epoch,
'time': time.strftime('%Y-%m-%d %H:%M:%S', time.gmtime(data_epoch))
})
)
else:
# use pure udp way
pass
else:
if print_flag:
print('New msg is thrown as no table exists')
finally:
if self.mqtt:
self.mqtt['client'].disconnect()
receiver.shutdown(socket.SHUT_RDWR)
receiver.close()
def _listen_to_indicators(self):
sender = self.udp_sender['socket']
def send_to(msg):
print('fuck1: ', msg)
sender.sendto(msg, (self.udp_sender['ip'], self.udp_sender['port']))
try:
if self.mqtt:
mqtt_client = self.mqtt['client']
# mqtt_client.register_handler(self.mqtt['topics']['results'], send_to)
mqtt_client.register_handler('results', send_to)
while True: # blocking
time.sleep(50)
else:
pass
finally:
if self.mqtt:
self.mqtt['client'].disconnect()
sender.shutdown(socket.SHUT_RDWR)
sender.close()
class ClientMQTT:
def __init__(self, broker, port, client_id=None, username=None, password=None):
if client_id:
self.client = mqtt.Client(client_id)
else:
self.client = mqtt.Client()
if username and password:
self.client.username_pw_set(username, password)
self.client.on_connect = self.on_connect
self.client.on_subscribe = self.on_subscribe
self.client.on_message = self.on_message
self.callbacks = {}
self.client.connect(broker, port)
# self.client.subscribe("results")
# self.__run()
threading.Thread(target=self.__run, daemon=True).start()
def __run(self):
self.client.loop_forever()
def on_connect(self, client, userdata, flags, rc):
if rc == 0:
print("Connected to MQTT Broker!")
else:
print("Failed to connect, return code %d\n", rc)
for topic in self.callbacks:
self.client.subscribe(topic)
def on_subscribe(self, client, userdata, mid, granted_qos):
pass
def on_message(self, client, userdata, msg):
self.callbacks[msg.topic](eval(msg.payload.decode()))
def register_handler(self, topic, handler):
self.callbacks[topic] = handler
self.client.subscribe(topic)
print(f'Register {handler} to topic "{topic}"')
def disconnect(self):
self.client.disconnect()
def dist_unit_converter(raw_value, raw_unit, return_unit, speed=None):
assert raw_unit in ['m', 'km', 'sec', 'min', 'h']
assert return_unit in ['m', 'km', 'sec', 'min', 'h']
if raw_unit in ['m', 'km']:
if raw_unit == 'km':
dist_in_km = raw_value
elif raw_unit == 'm':
dist_in_km = raw_value / 1000
dist_in_h = None
if raw_unit in ['sec', 'min', 'h']:
dist_in_km = None
if raw_unit == 'sec':
dist_in_h = raw_value / 3600
elif raw_unit == 'min':
dist_in_h = raw_value / 60
elif raw_unit == 'h':
dist_in_h = raw_value
if not dist_in_h:
dist_in_h = dist_in_km / speed
if not dist_in_km:
dist_in_km = dist_in_h * speed
if return_unit == 'm':
return_value = dist_in_km * 1000
elif return_unit == 'km':
return_value = dist_in_km
elif return_unit == 'h':
return_value = dist_in_h
elif return_unit == 'min':
return_value = dist_in_h * 60
elif return_unit == 'sec':
return_value = dist_in_h * 3600
return return_value
from geodata_toolbox import *
from population_toolbox import Population, Person
def main():
resolution = 11
POIs = PointGeoData(name='pois',
src_geojson_path='poi_LBCS.geojson')
POIs.make_h3_stats(resolution=resolution, agg_attrs={
"2100_area": "sum",
"2200_area": "sum",
"2500_area": "sum",
"5300_area": "sum",
"6200_area": "sum",
"6510_area": "sum",
"6560_area": "sum"
})
Buildings = PolygonGeoData(name='buildings',
src_geojson_path='building_LBCS.geojson')
Buildings.make_h3_stats(resolution, agg_attrs={
'2100_area': 'sum',
'2200_area': 'sum',
'2500_area': 'sum',
'5300_area': 'sum',
"6200_area": 'sum',
"6510_area": 'sum',
"6560_area": 'sum',
"1100_area": 'sum',
"2400_area": 'sum',
"3000_area": 'sum',
"3600_area": 'sum',
"4100_area": 'sum',
"4200_area": 'sum',
"4242_area": 'sum',
"4300_area": 'sum',
"5100_area": 'sum',
"5200_area": 'sum',
"5500_area": 'sum',
"6100_area": 'sum',
"6400_area": 'sum',
"6530_area": 'sum'
})
LU = PolygonGeoData(name='landuse',
src_geojson_path='Land_LBCS.geojson')
LU.make_h3_stats(resolution, agg_attrs={
"3600_area": "sum",
"5000_area": "sum",
"5500_area": "sum",
"4100_area": "sum",
"9000_area": "sum"
})
print('Data loaded')
Pop = Population('population', 'base_pop.geojson', None,
'shenzhen', proj_crs=4547, person_attrs=[])
Pop.set_base_sim_population(resolution)
print('Population loaded')
H3 = H3Grids(resolution)
pop_stats = {k: {'tt_pop': v} for k, v in Pop.h3_count_sim_pop['home'][resolution].items()}
h3_stats = H3.combine_h3_stats([Buildings.h3_stats[resolution],
POIs.h3_stats[resolution],
LU.h3_stats[resolution],
pop_stats
])
T = TableGrids('table', resolution, H3=H3,
src_geojson_path='grid1_4326.geojson',
table='shenzhen',
proj_crs=4546)
print('Tables initiated')
H = Handler(udp_receiver_ip='127.0.0.1',
udp_receiver_port=15800,
udp_sender_ip='127.0.0.1',
udp_sender_port=15801)
print('Handler initialized')
H.add_table(T)
H.run()
if __name__ == '__main__':
main() | csl-hcmc/SaiGon-Peninsula | Software/L3_SZ_CityScope-main/backend/indicator_toolbox.py | indicator_toolbox.py | py | 13,392 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "h3.api.numpy_int.edge_length",
"line_number": 22,
"usage_type": "call"
},
{
"api_name": "h3.api.numpy_int",
"line_number": 22,
"usage_type": "name"
},
{
"api_name": "h3.api.numpy_int.point_dist",
"line_number": 28,
"usage_type": "call"
},
{
"api_nam... |
20047483491 | import subprocess
from sys import exit
from itertools import combinations, permutations
from math import floor, ceil
# Facer lista de nxn coas reglas, para obter as filas e as columnas das regras
# Logo, con esto facemos as permutacions de 4 para a segunda regra
def filasColumnas(lista):
"""
:param lista: Lista de formato nxn coas regras do problema
:return: as filas e columnas da lista
"""
totalVerticais = []
for i in range(0, n):
totalVerticais.append([row[i] for row in lista])
return totalVerticais, lista
def list_to_nxn(lista):
"""
Convirte a lista de regras nunha lista nxn
:param lista: lista de regras do problema
:return: lista das regras no formato nxn
"""
result = []
for i in range(0, n ):
row = []
for j in range(0, n ):
row.append(lista[celda(i, j)] + 1)
result.append(row)
return result
def empty_list():
"""
:return: Lista vacia en formato nxn
"""
result = []
for i in range(0, n):
row = []
for j in range(0, n):
row.append(0)
result.append(row)
return result
def line_prepender(filename, line):
"""
Engade liña ao comezo do ficheiro co numero de regras e predicados
:param filename: Ficheiro no que escribir
:param line: Liña co numero de regras e predicados
"""
with open(filename, 'r+') as f:
content = f.read()
f.seek(0, 0)
f.write(line.rstrip('\r\n') + '\n' + content)
def celda(i, j):
c = i * n + j
return c
def fila(v):
i = floor(v/n)
return i
def columna(v):
j = (v) % n
return j
# Abrimos o ficheiro de entrada en modo lectura
f = open ('input.txt' , 'r')
# Gardamos nunha lista de listas os valores do input
l = [[elem for elem in line] for line in f]
# Quitamos da lista os saltos de liña. E INEFICIENTE porque facemos unha copia
# da lista
s = []
for elem in l:
try:
elem.remove("\n")
s.append(elem)
except:
s.append(elem)
#################
# Tamaño fila ou columna do problema. Primeiro xuntamos os numeros da primeira
# lista (numero de filas/columnas) nun string e logo pasamolo a int
tFila = ''.join(s[0])
n = int(tFila)
# Eliminamos o numero fila/columna porque non o necesitaremos mais
s.remove(s[0])
# Imprimimos o problema inicial
for elem in s:
print(*elem)
# Numero variables:
nvars = n*n
# Numero de predicados
npreds = 0
# Abrimos o ficheiro de saida
text_file = open("Output.txt", "w")
# Asignamos os valores booleanos as fichas que xa estan colocadas e
# gardamos as posibles regras (celdas) nunha lista
rulesList = []
for i in range(0, n):
for j in range(0, n):
var = celda(i, j)
rulesList.append(var)
if s[i][j] == '1':
text_file.write("{0} 0\n".format(var + 1))
npreds += 1
elif s[i][j] == '0':
text_file.write("-{0} 0\n".format(var + 1))
npreds += 1
# Pasamos a lista de regras a unha lista de listas nxn
rulesList_nxn = list_to_nxn(rulesList)
##############################################################################
########################## Regras do xogo ####################################
##############################################################################
# REGRA No 1 => Non pode haber 3 da mesma cor seguidas
# ¬(p ^ q ^ r)
# ==
# ¬p ∨ ¬q ∨ ¬r
for i in range(0, n):
for j in range(0, n):
var = celda(i, j)
# Comprobamos os dous veciños da fila pola dereita
if j + 2 < n:
varY2 = celda(i, j + 1)
varY3 = celda(i, j + 2)
text_file.write("{0} {1} {2} 0\n".format(
var + 1, varY2 + 1, varY3 + 1))
text_file.write("-{0} -{1} -{2} 0\n".format(
var + 1, varY2 + 1, varY3 + 1))
npreds += 2
# Comprobamos os dous veciños da columna por abaixo
if i + 2 < n:
varX2 = celda(i + 1, j)
varX3 = celda(i + 2, j)
text_file.write("{0} {1} {2} 0\n".format(
var + 1, varX2 + 1, varX3 + 1))
text_file.write("-{0} -{1} -{2} 0\n".format(
var + 1, varX2 + 1, varX3 + 1))
npreds += 2
# REGRA 2 => Ten que haber as mesmas de cada cor en cada fila e columna
# p ^ q ^ r --> ¬s
# ==
# ¬p ∨ ¬q ∨ ¬r ∨ ¬sº
# Obtemos as filas e columnas da lista de regras
verticais, laterais = filasColumnas(rulesList_nxn)
# En cada fila facemos as combinacions de (n/2 + 1) elementos
for lat in laterais:
for row in combinations(lat, int(n/2 + 1)):
regraBlancas = ""
regraNegras = ""
for item in row:
regraBlancas += str(item) + " "
regraNegras+= "-"+ str(item) + " "
regraBlancas += "0\n"
regraNegras += "0\n"
text_file.write(regraBlancas)
text_file.write(regraNegras)
npreds += 2
for ver in verticais:
for column in combinations(ver, int(n/2 + 1)):
regraBlancas = ""
regraNegras = ""
for item2 in column:
regraBlancas += str(item2) + " "
regraNegras+= "-"+ str(item2) + " "
regraBlancas += "0\n"
regraNegras += "0\n"
text_file.write(regraBlancas)
text_file.write(regraNegras)
npreds += 2
# REGRA 3 => Non pode haber filas/columnas repetidas
for fila1 in laterais:
for fila2 in laterais:
listaPs = []
regraP = ""
if fila1 != fila2:
if fila1 != fila2:
for i in range(0, len(fila1)):
nvars += 1
text_file.write("-{0} {1} {2} 0\n".format(nvars, fila1[i], fila2[i]))
text_file.write("-{0} -{1} -{2} 0\n".format(nvars, fila1[i], fila2[i]))
text_file.write("-{0} {1} {2} 0\n".format(fila1[i], fila2[i], nvars))
text_file.write("{0} -{1} {2} 0\n".format(fila1[i], fila2[i], nvars))
npreds += 4
listaPs.append(nvars)
for elem in listaPs:
regraP += str(elem) + " "
regraP += " 0\n"
text_file.write(regraP)
npreds += 1
for col1 in verticais:
for col2 in verticais:
listaPs = []
regraP = ""
if col1 != col2:
for i in range(0, len(col1)):
nvars += 1
text_file.write("-{0} {1} {2} 0\n".format(nvars, col1[i], col2[i]))
text_file.write("-{0} -{1} -{2} 0\n".format(nvars, col1[i], col2[i]))
text_file.write("-{0} {1} {2} 0\n".format(col1[i], col2[i], nvars))
text_file.write("{0} -{1} {2} 0\n".format(col1[i], col2[i], nvars))
npreds += 4
listaPs.append(nvars)
for elem in listaPs:
regraP += str(elem) + " "
regraP += " 0\n"
text_file.write(regraP)
npreds += 1
######################################################################
######################################################################
######################################################################
text_file.close()
# Engadimos ao comezo do ficheiro a liña inicial de SAT
line_prepender("Output.txt", "p cnf {0} {1}\n".format(nvars, npreds))
# Chamada a Clasp
out = subprocess.Popen(['clasp', '--verbose=0', 'Output.txt'],
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
# Procesamos a saida do Clasp
claspOutput = out.communicate()
claspError = claspOutput[1]
claspPrint = str(claspOutput[0])
claspPrint = claspPrint.split(" ")
# Elimina caracteres indesexados da saida
claspPrint.pop()
claspPrint.pop()
try:
del claspPrint[0]
except:
print("UNSATISFIABLE\n")
exit(0)
# Transforma a saida de clasp de String a matriz de numeros
output = []
for elem in claspPrint:
try:
output.append(int(elem))
except:
# Se hai un salto de liña, eliminase aqui
try:
output.append(int(elem[:-3]))
except:
# Imprime o erro nun caso inesperado
print(claspOutput[0])
print("\n")
# Convirte o resultado de clasp na matriz de 1's e 0's
visitados = []
result = empty_list()
for pos in output:
if abs(pos) <= n*n:
num = rulesList[abs(pos) - 1]
x = fila(num)
y = columna(num)
if (x, y) not in visitados:
visitados.append((x, y))
visitados.append((x, y))
if pos > 0:
result[x][y] = 1
elif pos < 0:
result[x][y] = 0
for elem in result:
print(*elem, sep="")
# Imprimime a matriz tal e como se pide
with open('result.txt', 'w') as f:
for elem in result:
print(*elem, sep="", file=f) | AlejandroFernandezLuces/practicas-SAT | binairo.py | binairo.py | py | 8,893 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "math.floor",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 172,
"usage_type": "call"
},
{
"api_name": "itertools.combinations",
"line_number": 184,
"usage_type": "call"
},
{
"api_name": "subprocess... |
23533446347 | from typing import List
class UnionFind:
"""
유니온 파인드 : 두 노드가 같은 그룹 인지 체크
"""
def __init__(self):
super().__init__()
self.parent: List[int] = []
def union(self, node_count: int, edge_list: List[List[int]]):
"""
유니온 연산 : 두 노드의 대표 노드끼리 연결
Args:
node_count: 노드의 개수
edge_list: edge 정보 리스트
Returns: None
"""
# 대표 노드 리스트
self.parent = [x for x in range(node_count + 1)]
# edge 연결
for edge in edge_list:
# 각 노드의 대표 노드를 조회
a = self.__find(edge[0])
b = self.__find(edge[1])
# 각 노드의 대표 노드가 다르면 대표 노드를 연결
if a != b:
self.parent[b] = a
def __find(self, idx: int):
"""
노드의 대표 노드를 조회
Args:
idx: 노드의 인덱스
Returns: 노드의 대표 노드 인덱스
"""
if self.parent[idx] == idx:
# 현재 노드의 대표노드가 자신이라면 자신의 인덱스 리턴
return idx
else:
# 대표 노드가 자신이 아닌 경우 현재 노드의 대표노드로 find 연산 다시 실행
self.parent[idx] = self.__find(self.parent[idx])
return self.parent[idx]
def check_same(self, first: int, second: int):
"""
두 노드의 대표 노드가 동일한지 체크
Args:
first: 첫 번째 노드
second: 두 번째 노드
Returns: 대표 노드의 동일 여부. True or False
"""
a = self.__find(first)
b = self.__find(second)
if a == b:
return True
else:
return False
if __name__ == "__main__":
N = 7
edge_list = [[1, 3], [7, 6], [3, 7], [4, 2], [1, 1]]
q_list = [[1, 7], [1, 5], [2, 6]]
obj = UnionFind()
obj.union(N, edge_list)
for q in q_list:
result = obj.check_same(q[0], q[1])
if result:
print("YES")
else:
print("NO")
| jinyul80/algorithm_study | python/graph/union_find.py | union_find.py | py | 2,253 | python | ko | code | 0 | github-code | 1 | [
{
"api_name": "typing.List",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "typing.List",
"line_number": 13,
"usage_type": "name"
}
] |
41291806836 | import boto3
from botocore.exceptions import ClientError
import json
from botocore.vendored import requests
dynamodb = boto3.resource('dynamodb', region_name='us-east-1', endpoint_url="https://dynamodb.us-east-1.amazonaws.com")
subscriptions_table = dynamodb.Table('test')
AWS_REGION = "us-east-1"
SUBJECT = "newsApp - newsletter"
CHARSET = "UTF-8"
def handler(json_input, context):
#get all subscribers
subscribers = subscriptions_table.scan(
AttributesToGet=['email']
)
# get news
news = get_news()
#send a newsletter to each subscriber
print(len(subscribers['Items']))
for subscriber in subscribers['Items']:
send_newsletter_report_to_address(subscriber['email'], news)
def send_newsletter_report_to_address(email_address, news):
global AWS_REGION, SUBJECT, CHARSET
body_html, body_text = compose_report_body(news)
client = boto3.client('ses', region_name=AWS_REGION)
try:
response = client.send_email(
Destination={
'ToAddresses': [
email_address,
],
},
Message={
'Body': {
'Html': {
'Charset': CHARSET,
'Data': body_html,
},
'Text': {
'Charset': CHARSET,
'Data': body_text,
},
},
'Subject': {
'Charset': CHARSET,
'Data': SUBJECT,
},
},
Source="allexandra.gadioi@gmail.com",
)
except ClientError as e:
print(e.response['Error']['Message'])
else:
print("Email sent! Message ID:"),
print(response['ResponseMetadata']['RequestId'])
#receives news, returns the html body of the email message to be sent (and the text body, for non-HTML email clients)
def compose_report_body(news):
text_body = "text"
html_body = """<html>
<head>
<style>
</style>
</head>
<body>
<h3>newsApp - newsletter</h3>
<p>Most recent news:</p>
"""
for article in news:
html_body += "<h1>" + article['title'] + "</h1><small>Publish date: " + \
article['publishedDate'] + "</small><br><a href=" + article['link'] + ">go to article</a><h3>" + article['summary'] + \
"</h3><p>" + article['content'] + "</p>"
html_body += "</body></html>"
return html_body, text_body
def get_news():
url = 'http://newsapp.syvh7ndmiz.eu-central-1.elasticbeanstalk.com/news'
r = requests.get(url)
return json.loads(r.text)
| allexg/newsApp | email-newsletter-Lambda/lambda_function.py | lambda_function.py | py | 2,691 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "boto3.resource",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "boto3.client",
"line_number": 29,
"usage_type": "call"
},
{
"api_name": "botocore.exceptions.ClientError",
"line_number": 55,
"usage_type": "name"
},
{
"api_name": "botocore.v... |
32534904655 | # main source file
import os
from .utils import update_poe_env, check_config
import poe
class Poe:
def __init__(self, bot: str = "capybara"):
self.poe_token = check_config()
self.poe_proxy = os.getenv('POE_PROXY')
self.poe_client = poe.Client(self.poe_token, proxy=self.poe_proxy)
self.message_queue = []
self.bot_name = bot;
def single_chat(self, msg: str, with_chat_break=True):
for chunk in self.poe_client.send_message(self.bot_name, msg, with_chat_break=with_chat_break):
print(chunk['text_new'], end="", flush=True)
def continus_chat(self):
while True:
question = ""
print('🍄> ', end='')
while True:
tmp = input("")
tmp = tmp.strip()
if tmp == '':
break
question += tmp
if question == "":
break
print("\n😊> ", end='')
try:
self.single_chat(question, with_chat_break=False)
except:
print("好像网络出了点问题,重新发一下吧~🙌", end='', flush=True)
print("\n")
print(f"Hope this journey with {self.bot_name} can help you!")
return
| Mushrr/poe_terminal_chat | poe_terminal_chat/poeterminal.py | poeterminal.py | py | 1,322 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "utils.check_config",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "os.getenv",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "poe.Client",
"line_number": 11,
"usage_type": "call"
}
] |
14240741723 | import gym
import gymnasium
def gym_space_migration(gym_space: gym.Space) -> gymnasium.Space:
if isinstance(gym_space, gym.spaces.Discrete):
return gymnasium.spaces.Discrete(gym_space.n)
elif isinstance(gym_space, gym.spaces.Box):
return gymnasium.spaces.Box(
low=gym_space.low,
high=gym_space.high,
shape=gym_space.shape,
dtype=gym_space.dtype
)
elif isinstance(gym_space, gym.spaces.Dict):
migrated_dict = {key: gym_space_migration(space) for key, space in gym_space.spaces.items()}
return gymnasium.spaces.Dict(migrated_dict)
elif isinstance(gym_space, gym.spaces.MultiDiscrete):
return gymnasium.spaces.MultiDiscrete(gym_space.nvec)
elif isinstance(gym_space, gym.spaces.Tuple):
migrated_spaces = tuple(gym_space_migration(space) for space in gym_space.spaces)
return gymnasium.spaces.Tuple(migrated_spaces)
def pretty_print_configs(config):
return {
"env": config.env,
"framework": config.framework_str,
"gamma": config.gamma
}
| DevSlem/recommender-system-rl-tutorial | recsim_env/utils.py | utils.py | py | 1,103 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "gym.Space",
"line_number": 4,
"usage_type": "attribute"
},
{
"api_name": "gym.spaces",
"line_number": 5,
"usage_type": "attribute"
},
{
"api_name": "gymnasium.spaces.Discrete",
"line_number": 6,
"usage_type": "call"
},
{
"api_name": "gymnasium.space... |
16565983104 | import keras
import numpy as np
import matplotlib.pyplot as plt
from keras.models import Sequential
# Dense 全连接层,Activation 激活函数
from keras.layers import Dense, Activation
# 优化器
from keras.optimizers import SGD
def replace_char(string, char, index):
string = list(string)
string[index] = char
return ''.join(string)
# 随机生成200个随机点
x_data = np.linspace(-0.5, 0.5, 200)
noise = np.random.normal(0, 0.02, x_data.shape)
# square 是平方
y_data = np.square(x_data) + noise
plt.scatter(x_data, y_data)
plt.show()
# 构建一个顺序模型
model = Sequential()
# 在模型中添加一个全连接层
# 1-10-1
# 构建一个隐藏层输入出是10个神经元,输入是一个神经元
model.add(Dense(units=10, input_dim=1))
# 激活函数默认下没有,需要指定激活函数,导入激活函数包,tanh大多数时候比sigmod函数好用
model.add(Activation('tanh'))
# 下面这样也行
# model.add(Dense(units=10, input_dim=1,activate='relu'))
# 构建输出层,上一层是10个神经元,这里不用指定,只指定输出即可
model.add(Dense(units=1))
# 添加激活函数
model.add(Activation('tanh'))
# sgd是随机梯度下降默认学习率很小,大概是0.01,loss均平方误差
# model.compile(optimizer='sgd', loss='mse')
# 修改学习率需要导入kera.optimizers impprt SGD
# 定义优化算法
sgd = SGD(lr=0.3)
# 将优化器装入神经网络中(上面的compile注释掉)
model.compile(optimizer=sgd, loss='mse')
progress = '[..............................]'
for step in range(3000):
cost = model.train_on_batch(x_data, y_data)
if step % 100 == 0:
progress = replace_char(progress, "=", progress.index("."))
print(progress)
print('cost', cost)
W, b = model.layers[0].get_weights()
print('W', W, 'b', b)
# x_data 输入网络中,得到预测值
y_pred = model.predict(x_data)
# 显示随机点
# plt.scatter(x_data, y_data, c='r')
plt.plot(x_data, y_pred, "r-")
plt.show()
| 1060807523/GitRepositories | code/Mask_RCNN-master/samples/drive/Nonlinear_regression.py | Nonlinear_regression.py | py | 2,013 | python | zh | code | 0 | github-code | 1 | [
{
"api_name": "numpy.linspace",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "numpy.random.normal",
"line_number": 19,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 19,
"usage_type": "attribute"
},
{
"api_name": "numpy.square",
... |
74095797154 | import sys
import os
import re
import numpy as np
import pandas as pd
from stock.utils.symbol_util import get_stock_symbols, get_archived_trading_dates, exsymbol_to_symbol
from stock.marketdata.storefactory import get_store
from stock.lib.finance import get_lrb_data
from sklearn import linear_model
import matplotlib.pyplot as plt
from config import store_type
from stock.lib.finance import load_stock_basics
def get_slope(array):
x = range(len(array))
reg = linear_model.LinearRegression()
X = np.array(x).reshape(-1,1)
reg.fit(X=X, y=array)
return reg.coef_[0]
def get_quarter_profit(df):
for i in range(len(df)):
date = df.index[i]
if i == 0:
df.loc[date, "profit"] = np.nan
dt = date.to_pydatetime()
if dt.month > 3:
df.loc[date, "profit"] = df.iloc[i].yylr - df.iloc[i-1].yylr
else:
df.loc[date, "profit"] = df.iloc[i].yylr
return df
def plot_profit(exsymbol):
df_lrb = get_lrb_data(exsymbol)
get_quarter_profit(df_lrb)
plt.plot(df_lrb.index, df_lrb["profit"])
plt.show()
def generate_middle():
store = get_store(store_type)
exsymbols = store.get_stock_exsymbols()
date = "2017-04-28"
df_res = pd.DataFrame(columns=["exsymbol", "incr_ratio", "pe", "mcap", "past_profit", "future_profit"])
for exsymbol in exsymbols:
try:
df_lrb = get_lrb_data(exsymbol).loc[:date]
get_quarter_profit(df_lrb)
if len(df_lrb) < 16:
continue
profits_q1 = df_lrb[df_lrb.quarter=="Q1"].profit.iloc[-3:]
profits_q2 = df_lrb[df_lrb.quarter=="Q2"].profit.iloc[-3:]
profits_q3 = df_lrb[df_lrb.quarter=="Q3"].profit.iloc[-3:]
profits_q4 = df_lrb[df_lrb.quarter=="Q4"].profit.iloc[-3:]
year_profits = df_lrb[df_lrb.quarter=="Q4"].yylr
if len(profits_q1) < 3 or \
len(profits_q2) < 3 or \
len(profits_q3) < 3 or \
len(profits_q4) < 3:
continue
if df_lrb.iloc[-16:].profit.isnull().any():
continue
if np.sum(df_lrb.iloc[-16:].profit <= 0) > 0:
continue
low1 = df_lrb.loc["2014-01-01":"2014-12-31"].profit.min()
low2 = df_lrb.loc["2015-01-01":"2015-12-31"].profit.min()
low3 = df_lrb.loc["2016-01-01":"2016-12-31"].profit.min()
if low3 < low2 or low2 < low1:
continue
#if profits_q1[-1] < profits_q1[-2] or profits_q1[-2] < profits_q1[-3] or profits_q1[-3] < profits_q1[-4]:
# continue
#if profits_q2[-1] < profits_q2[-2] or profits_q2[-2] < profits_q2[-3] or profits_q2[-3] < profits_q2[-4]:
# continue
#if profits_q3[-1] < profits_q3[-2] or profits_q3[-2] < profits_q3[-3] or profits_q3[-3] < profits_q3[-4]:
# continue
#if profits_q4[-1] < profits_q4[-2] or profits_q4[-2] < profits_q4[-3] or profits_q4[-3] < profits_q4[-4]:
# continue
df = store.get(exsymbol)
if date not in df.index:
continue
if year_profits[-1] > year_profits[-2] and year_profits[-2] > year_profits[-3]:
price = df.loc[date].close
df_basics = load_stock_basics()
total_shares = df_basics.loc[exsymbol, "totals"]
mcap = total_shares * price
slope = get_slope(np.log(df_lrb.profit[-16:]))
pe = mcap / year_profits[-1] * 10000
future_profit = df.iloc[-1].close / price - 1
past_profit = price / df.loc["2017-01-01":].iloc[0].close - 1
df_res.loc[len(df_res)] = [exsymbol, slope, pe, mcap, past_profit, future_profit]
except Exception as e:
print(str(e))
df_res.to_csv("/tmp/pe.csv")
def parse_middle(filepath="/tmp/pe.csv"):
df= pd.read_csv(filepath, encoding="utf-8")
df["score"] = df.incr_ratio/ df.pe
print(df[df.mcap > 300].sort_values(["incr_ratio"]))
if __name__ == "__main__":
#generate_middle()
#parse_middle()
PLOT = True
if PLOT:
df_lrb = get_lrb_data(sys.argv[1])
get_quarter_profit(df_lrb)
print(df_lrb["yylr"])
plt.plot(df_lrb.index[-16:], df_lrb.profit[-16:])
#plt.axhline(y=0.0, c='r')
plt.show()
| shenzhongqiang/cnstock_py | stock/quant/pe.py | pe.py | py | 4,429 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "numpy.array",
"line_number": 17,
"usage_type": "call"
},
{
"api_name":... |
21130305160 | from db import DB
from crawl import Crawl
from goods import Goods
from mail import Mail
import time
import sched
class Monitor:
def __init__(self, email='1656704949@qq.com', rate=60, note=60 * 60):
self.scheduler = sched.scheduler(time.time, time.sleep)
self.goods_dict = {}
self.db = DB()
self.crawl = Crawl()
self.mail = Mail()
self.email = [email] # 电子邮箱
self.rate = rate # 刷新频率
self.note = note # 通知频率
# 加载数据
result = self.db.query()
print('----------加载数据----------')
for id, item in result.items():
self.goods_dict[id] = Goods(item['id'], item['want'], item['status'])
print(self.goods_dict[id].__dict__)
print('----------加载完成----------')
# 添加商品
def add(self, id, want, status=True):
if id not in self.goods_dict.keys():
self.db.add(id, want, status)
goods = Goods(id, want, status)
name, price, date = self.crawl.get(id)
goods.update(name, price, date)
self.goods_dict[id] = goods
print(self.goods_dict[id].__dict__)
return True
else:
return False
# 删除商品
def remove(self, id):
if id in self.goods_dict.keys():
self.goods_dict.pop(id)
self.db.delete(id)
return True
else:
return False
# 更新期望价格
def update_want(self, id, want):
if id in self.goods_dict.keys():
self.goods_dict[id].update_want(want)
self.goods_dict[id].update_note(0) # 刷新通知时间
self.db.update_want(id, want)
return True
else:
return False
# 更新运行状态
def update_status(self, id, status):
if id in self.goods_dict.keys():
self.goods_dict[id].update_status(status)
self.goods_dict[id].update_note(0) # 刷新通知时间
self.db.update_status(id, status)
return True
else:
return False
# 获取历史价格
def history(self, id):
if id in self.goods_dict.keys():
return self.crawl.get_history(id)
else:
return ''
# 定时任务
def task(self):
ids = list(self.goods_dict.keys())
for id in ids:
goods = self.goods_dict[id]
if goods.status:
name, price, date = self.crawl.get(id)
if id not in self.goods_dict.keys(): continue # 防止商品已经删除
goods.update(name, price, date)
########## 检查是否符合发送条件 ##########
# 满足通知间隔时间 & 当前价格小于期望价格
if (date - goods.note >= self.note) and (price <= goods.want):
self.mail.send(self.email, name, price, goods.want, goods.url)
goods.update_note(date)
print('----------刷新数据----------')
for goods in self.goods_dict.values():
print(goods.__dict__)
print('----------刷新完成----------')
# 定时器
def _run(self):
self.scheduler.enter(self.rate, 0, self._run, ()) # delay, priority, action, argument=()
self.task()
# 定时器
def run(self):
self.scheduler.enter(0, 0, self._run, ()) # delay, priority, action, argument=()
self.scheduler.run()
if __name__ == '__main__':
ids = ['4311178', '4311182', '100002795959', '100004751037', '8797490']
wants = [219.00, 339.00, 4900.00, 5600.00, 8998.00]
monitor = Monitor(rate=10)
monitor.run()
| zhangbincheng1997/mall-monitor | web/monitor.py | monitor.py | py | 3,752 | python | en | code | 10 | github-code | 1 | [
{
"api_name": "sched.scheduler",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "time.time",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "time.sleep",
"line_number": 11,
"usage_type": "attribute"
},
{
"api_name": "db.DB",
"line_numb... |
5170271632 | from os import name
from django.shortcuts import render
from django.http import HttpResponse
from django.core.files.storage import FileSystemStorage
from subprocess import run, PIPE
import sys ,os
import detect
from webs.controller import count,data_label
from webs.controller import banana_ripe_or_raw,guava_ripe_or_raw,papaya_ripe_or_raw
# Create your views here.
def index(request):
return render(request, 'home.html')
def home(request):
return render(request, 'home.html')
def upimg(request):
return render(request, 'upload.html')
def profile(request):
return render(request, 'profile.html')
def upload(request):
if len(request.FILES) != 0:
request.method == 'POST'
image = request.FILES['image'] or None
fs = FileSystemStorage()
filename = fs.save(image.name, image)
fileurl = fs.open(filename)
templateurl = fs.url(filename)
print("file raw url:", filename)
print("file full url:", fileurl)
print("template url:", templateurl)
image = run([sys.executable,'detect.py',str(fileurl)],shell=False,stdout=PIPE)
file_count = count.countimage()
classes_label = data_label.all_label()
processdata = []
allsend = []
classes = []
classes_label = []
if os.stat("webs/text/data_label.txt").st_size == 0:
send = 'Not'
allsend.append(send)
with open("webs/text/data_label.txt", "r") as f:
classes = [line.strip() for line in f.readlines()]
for x in classes:
if x not in classes_label:
classes_label.append(x)
for i in classes_label:
if i == 'Banana':
send = 'Banana'
allsend.append(send)
data = banana_ripe_or_raw.banana_find_ripe_raw()
processdata.append(data)
elif i == 'Guava':
send = 'Guava'
allsend.append(send)
data = guava_ripe_or_raw.guava_find_ripe_raw()
processdata.append(data)
elif i == 'Papaya':
send = 'Papaya'
allsend.append(send)
data = papaya_ripe_or_raw.papaya_find_ripe_raw()
processdata.append(data)
fruit = processdata
return render(request, 'process.html', {'raw_url': templateurl,
'file_count':file_count,
'allsend': allsend,
'fruit':fruit})
else:
return render(request, 'process.html') | LoneWolf1999-Th/Fruit_Detect | webs/views.py | views.py | py | 2,801 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "django.shortcuts.render",
"line_number": 20,
"usage_type": "call"
},
{
"api_name"... |
290347917 | import requests
import re
import pymysql
from lxml import etree
header = {
"User-Agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/51.0.2704.103 Safari/537.36"}
reques = requests.get('http://www.open.com.cn/encrollregulation-3333.html', headers=header)
reques.encoding = 'utf-8'
c2 = reques.text
# print(c2)
yb = re.compile('<h2.*?id=aw_school_intro_title_3>(.*?)</h2>', re.S)
yb2 = re.findall(yb, c2)
# print(yp2)
for b in yb2:
if '报名办法' in b:
ybj = re.compile('<h2.*?id=aw_school_intro_title_3>.*?</h2>(.*?)<h.*?>', re.S)
ybs = re.findall(ybj, c2)
for ys in ybs:
yjsssb = re.sub('奥鹏', '舟炬', ys)
yjss1 = re.sub('http://www.open.com.cn', 'http://www.zhoujuedu.com', yjsssb)
yjss2 = re.sub(' |<span.*?>|</span>|<strong>|</strong>|<a>|</a>|<br/>', '', yjss1)
yjss3 = re.compile('<p.*?>(.*?)</p>', re.S)
yjss4 = re.findall(yjss3, yjss2)
yjss5 = ','.join(yjss4)
print(yjss5)
elif '入学资格' in b:
ybj2 = re.compile('<h2.*?id=aw_school_intro_title_3>.*?</h2>(.*?)<h.*?>', re.S)
ybs2 = re.findall(ybj2, c2)
for ys in ybs2:
yjsssb2 = re.sub('奥鹏', '舟炬', ys)
yjss12 = re.sub('http://www.open.com.cn', 'http://www.zhoujuedu.com', yjsssb2)
yjss22 = re.sub(' |<span.*?>|</span>|<strong>|</strong>|<a>|</a>|<br/>', '', yjss12)
yjss32 = re.compile('<p.*?>(.*?)</p>', re.S)
yjss42 = re.findall(yjss32, yjss22)
yjss52 = ','.join(yjss42)
print(yjss52)
elif '报名办法' or '入学资格' not in b:
s = []
yj2 = re.compile('<h2.*?id=aw_school_intro_title_4>.*?</h2>(.*?)<h.*?>', re.S)
yjs2 = re.findall(yj2, c2) # 获取所有p标签
for ys2 in yjs2:
yjs0 = re.sub('奥鹏','舟炬',ys2)
yjs6 = re.sub('<br/>', '', yjs0)
yjs4 = re.compile('<p.*?>(.*?)</p>', re.S)
yjs5 = re.findall(yjs4, yjs6) # 获取所有p标签的内容,并拼接为字符串
yjs3 = ','.join(yjs5)
s.append(yjs3)
yjj2 = re.compile('<h2.*?id=aw_school_intro_title_5>.*?</h2>(.*?)<h.*?>', re.S)
yjjs2 = re.findall(yjj2, c2) # 获取所有p标签
for ysj2 in yjjs2:
yjs01 = re.sub('奥鹏','舟炬',ysj2)
yjs61 = re.sub('<br/>', '', yjs01)
yjs41 = re.compile('<p.*?>(.*?)</p>', re.S)
yjs51 = re.findall(yjs41, yjs61) # 获取所有p标签的内容,并拼接为字符串
yjs31 = ','.join(yjs51)
s.append(yjs31)
s2 = ','.join(s)
print(s2)
| zhouf1234/untitled9pachon | 测试10.py | 测试10.py | py | 2,910 | python | es | code | 1 | github-code | 1 | [
{
"api_name": "requests.get",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "re.compile",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "re.S",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "re.findall",
"line_number": 13,
... |
24390060708 | from flask import Flask, make_response
from bson.json_util import dumps
from models import note
import json
import pymongo
import UserDAO
import PlacesDAO
import Place
import WeatherHour
import AlertInfo
import WeatherCamLink
app = Flask(__name__)
app.debug = True
connection_string = "mongodb://localhost"
connection = pymongo.MongoClient(connection_string)
database = connection.blog
users = UserDAO.UserDAO(database)
places = PlacesDAO.PlacesDAO(database)
# send in some test data
@app.route("/samples")
def addSamplePlaces():
return make_response(open('tstData.json').read())
@app.route("/")
def app_endpoint():
return make_response(open('templates/index.html').read())
@app.route("/notes")
def get_notes():
notes = []
list = [
{'a': 1, 'b': 2},
{'a': 5, 'b': 10}
]
for x in xrange(0,10):
n = note()
n.title = "Title_" + str(x)
notes.append(n.__dict__)
return json.dumps(notes)
@app.route("/getPlace/<username>")
def get_place(username):
return dumps(places.getPlaces(username));
@app.route("/getAlerts/<user_id>")
def get_alerts_for_user(user_id):
pass
if __name__ == "__main__":
app.run()
| theNerd247/twcHackathon | app.py | app.py | py | 1,188 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Flask",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "pymongo.MongoClient",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "UserDAO.UserDAO",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "PlacesDAO.PlacesDAO",... |
25142521036 | from typing import Any, Dict
import argparse
import torch
import torch.nn as nn
import torch.nn.functional as F
CONV_DIM = 64
FC_DIM = 128
IMAGE_SIZE = 28
class ConvBlock(nn.Module):
"""
Simple 3x3 conv with padding size 1 (to leave the input size unchanged), followed by a ReLU.
"""
def __init__(self, input_channels: int, output_channels: int) -> None:
super().__init__()
self.conv = nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=1, padding=1)
self.relu = nn.ReLU()
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x
of dimensions (B, C, H, W)
Returns
-------
torch.Tensor
of dimensions (B, C, H, W)
"""
c = self.conv(x)
r = self.relu(c)
return r
# kernel size 3 + padding of 1 maintains the dimensionality of the input to be eq to output so we can go for skip conns here
class ResBlock(nn.Module):
def __init__(self, input_channels: int, output_channels: int, batch_norm=False) -> None:
super().__init__()
self.conv1 = nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=1, padding=1)
self.bn1 = nn.Identity()
self.conv2 = nn.Conv2d(output_channels, output_channels, kernel_size=3, stride=1, padding=1)
self.bn2 = nn.Identity()
self.conv_skip = nn.Conv2d(input_channels, output_channels, kernel_size=3, stride=1, padding=1)
self.relu1 = nn.ReLU()
self.relu2 = nn.ReLU()
if batch_norm:
self.bn1 = nn.BatchNorm2d(output_channels)
self.bn2 = nn.BatchNorm2d(output_channels)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Parameters
----------
x
of dimensions (B, C, H, W)
Returns
-------
torch.Tensor
of dimensions (B, C, H, W)
"""
y = self.conv1(x)
y = self.bn1(y)
y = self.relu1(y)
y = self.conv2(y)
y = self.bn2(y)
skip_y = self.conv_skip(x)
y = y + skip_y
y = self.relu2(y)
return y
def _block(t: str, d1: int, d2: int) -> nn.Module:
if t == 'cnn':
return ConvBlock(d1, d2)
if t == 'res':
return ResBlock(d1, d2)
if t == 'res_bn':
return ResBlock(d1, d2, batch_norm = True)
raise ValueError()
class CNN(nn.Module):
"""Simple CNN for recognizing characters in a square image."""
def __init__(self, data_config: Dict[str, Any], args: argparse.Namespace = None,
block_type = 'cnn', batch_norm = False) -> None:
super().__init__()
self.args = vars(args) if args is not None else {}
input_dims = data_config["input_dims"]
num_classes = len(data_config["mapping"])
conv_dim = self.args.get("conv_dim", CONV_DIM)
fc_dim = self.args.get("fc_dim", FC_DIM)
self.conv1 = _block(block_type, input_dims[0], conv_dim)
self.conv2 = _block(block_type, conv_dim, conv_dim)
self.dropout = nn.Dropout(0.25)
self.max_pool = nn.MaxPool2d(2)
# Because our 3x3 convs have padding size 1, they leave the input size unchanged.
# The 2x2 max-pool divides the input size by 2. Flattening squares it.
conv_output_size = IMAGE_SIZE // 2
fc_input_dim = int(conv_output_size * conv_output_size * conv_dim)
self.fc1 = nn.Linear(fc_input_dim, fc_dim)
self.fc2 = nn.Linear(fc_dim, num_classes)
def forward(self, x: torch.Tensor) -> torch.Tensor:
"""
Args:
x
(B, C, H, W) tensor, where H and W must equal IMAGE_SIZE
Returns
-------
torch.Tensor
(B, C) tensor
"""
_B, _C, H, W = x.shape
assert H == W == IMAGE_SIZE
x = self.conv1(x)
x = self.conv2(x)
x = self.max_pool(x)
x = self.dropout(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.fc2(x)
return x
@staticmethod
def add_to_argparse(parser):
parser.add_argument("--conv_dim", type=int, default=CONV_DIM)
parser.add_argument("--fc_dim", type=int, default=FC_DIM)
return parser
| cluePrints/fsdl-text-recognizer-2021-labs | lab2/text_recognizer/models/cnn.py | cnn.py | py | 4,320 | python | en | code | null | github-code | 1 | [
{
"api_name": "torch.nn.Module",
"line_number": 14,
"usage_type": "attribute"
},
{
"api_name": "torch.nn",
"line_number": 14,
"usage_type": "name"
},
{
"api_name": "torch.nn.Conv2d",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "torch.nn",
"line_nu... |
73062309793 | # -*- coding: utf-8 -*-
from mrjob.job import MRJob
from mrjob.protocol import JSONProtocol, RawValueProtocol
from mrjob.step import MRStep
import numpy as np
######################## Helper Methods and Classes ##########################
def cholesky_solution_linear_regression(x_t_x,x_t_y):
'''
Finds parameters of regression through Cholesky decomposition,
given sample covariance of explanatory variables and covariance
between explanatory variable and dependent variable.
Paramaters:
-----------
x_t_x - numpy array of size 'm x m', represents sample covariance of explanatory variables
x_t_y - numpy array of size 'm x 1', represent covariance between expalanatory and dependent variable
Output:
-------
Theta - list of size m, represents values of coefficients
'''
# L*L.T*Theta = x_t_y
L = np.linalg.cholesky(x_t_x)
# solve L*z = x_t_y
z = np.linalg.solve(L,x_t_y)
# solve L.T*Theta = z
theta = np.linalg.solve(np.transpose(L),z)
return theta
class DimensionMismatchError(Exception):
def __init__(self,expected,observed):
self.exp = expected
self.obs = observed
def __str__(self):
err = "Expected number of dimensions: "+str(self.exp)+", observed: "+str(self.obs)
return err
############################## Map Reduce Job #################################
class LinearRegressionTS(MRJob):
'''
Calculates sample covariance matix of explanatory variables (x_t_x) and
vector of covariances between dependent variable expanatory variables (x_t_y)
in single map reduce pass and then uses cholesky decomposition to
obtain values of regression parameters.
Important!!! Since final computations are performed on single reducer,
assumption is that dimensionality of data is relatively small i.e. input
matrix is tall and skinny.
Input File:
-----------
Extract relevant features from input line by changing extract_variables
method. You can add features for non-linear models (like x^2 or exp(x)).
Current code assumes following input line format:
input line = <dependent variable>, <feature_1>,...,<feature_n>
Options:
-----------
-- dimension - (int) number of explanatory variables
-- bias - (bool) if True regression wil include bias term
Output:
-----------
json-encoded list of parameters
'''
INPUT_PROTOCOL = RawValueProtocol
INTERNAL_PROTOCOL = JSONProtocol
OUTPUT_PROTOCOL = RawValueProtocol
def __init__(self,*args, **kwargs):
super(LinearRegressionTS, self).__init__(*args, **kwargs)
n = self.options.dimension
self.x_t_x = np.zeros([n,n])
self.x_t_y = np.zeros(n)
self.counts = 0
#--------------------------- feature extraction --------------------------#
def extract_variables(self,line):
''' (str)--(float,[float,float,float...])
Extracts set of relevant features. (Needs to be rewriten depending
on file input structure)
'''
data = [float(e) for e in line.strip().split(",")]
y,features = data[0],data[1:]
return (y,features)
#---------------------------- Options ------------------------------------#
def configure_options(self):
''' Additional options'''
super(LinearRegressionTS,self).configure_options()
self.add_passthrough_option("--dimension",
type = int,
help = "Number of explanatory variables (do not count bias term)")
self.add_passthrough_option("--bias",
type = str, # (got error when tried to define bool) ???
help = "Bias term, bias not included if anything other than 'True' ",
default = "True")
def load_options(self,args):
''' Loads and checks whether options are provided'''
super(LinearRegressionTS,self).load_options(args)
if self.options.dimension is None:
self.option_parser.error("You should define number of explanatory variables")
else:
self.dim = self.options.dimension
#------------------------ map-reduce steps -------------------------------#
def mapper_lr(self,_,line):
'''
Calculates x_t_x and x_t_y for data processed by each mapper
'''
y,features = self.extract_variables(line)
if len(features) != self.dim:
raise DimensionMismatchError(self.dim,len(features))
if self.options.bias is "True":
features.append(1.0)
x = np.array(features)
self.x_t_x += np.outer(x, x)
self.x_t_y += y*x
self.counts += 1
def mapper_lr_final(self):
'''
Transforms numpy arrays x_t_x and x_t_y into json-encodable list format
and sends to reducer
'''
yield 1,("x_t_x", [list(row) for row in self.x_t_x])
yield 1,("x_t_y", [xy for xy in self.x_t_y])
yield 1,("counts", self.counts)
def reducer_lr(self,key,values):
'''
Aggregates results produced by each mapper and obtains x_t_x and x_t_y
for all data, then using cholesky decomposition obtains parameters of
linear regression.
'''
n = self.dim
observations = 0
x_t_x = np.zeros([n,n]); x_t_y = np.zeros(n)
for val in values:
if val[0]=="x_t_x":
x_t_x += np.array(val[1])
elif val[0]=="x_t_y":
x_t_y += np.array(val[1])
elif val[0]=="counts":
observations += val[1]
betas = cholesky_solution_linear_regression(x_t_x,x_t_y)
yield None,[e for e in betas]
def steps(self):
'''Defines map-reduce steps '''
return [MRStep(mapper = self.mapper_lr,
mapper_final = self.mapper_lr_final,
reducer = self.reducer_lr)]
if __name__=="__main__":
LinearRegressionTS.run()
| AmazaspShumik/MapReduce-Machine-Learning | Linear Regression MapReduce/LinearRegressionTS.py | LinearRegressionTS.py | py | 6,517 | python | en | code | 22 | github-code | 1 | [
{
"api_name": "numpy.linalg.cholesky",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.linalg",
"line_number": 30,
"usage_type": "attribute"
},
{
"api_name": "numpy.linalg.solve",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "numpy.lina... |
13271941086 | from __future__ import annotations
import json
from pathlib import Path
from typing import TYPE_CHECKING
import pytest
from pyk.kore.parser import KoreParser
from pyk.kore.syntax import And, App, Kore, Or, Pattern, SortVar, kore_term
if TYPE_CHECKING:
from collections.abc import Mapping
from typing import Any, Final
TEST_DATA_DIR: Final = Path(__file__).parent / 'test-data'
DEFINITION_PASS_KORE_FILES: Final = tuple((TEST_DATA_DIR / 'definitions/pass').iterdir())
DEFINITION_FAIL_KORE_FILES: Final = tuple(
test_file for test_file in (TEST_DATA_DIR / 'definitions/fail').iterdir() if test_file.suffix == '.kore'
)
PATTERN_FILES: Final = tuple((TEST_DATA_DIR / 'patterns').iterdir())
JSON_FILES: Final = tuple((TEST_DATA_DIR / 'json').iterdir())
JSON_TEST_DATA: Final = tuple(
(json_file, i, dct) for json_file in JSON_FILES for i, dct in enumerate(json.loads(json_file.read_text()))
)
@pytest.mark.parametrize(
'kore_file', DEFINITION_PASS_KORE_FILES, ids=lambda path: path.name
) # mypy complains on Path.name.fget
def test_parse_definition_pass(kore_file: Path) -> None:
# Given
text = kore_file.read_text()
# When
parser1 = KoreParser(text)
definition1 = parser1.definition()
parser2 = KoreParser(definition1.text)
definition2 = parser2.definition()
# Then
assert parser1.eof
assert parser2.eof
assert definition1 == definition2
@pytest.mark.parametrize('kore_file', DEFINITION_FAIL_KORE_FILES, ids=lambda path: path.name)
def test_parse_definition_fail(kore_file: Path) -> None:
# Given
text = kore_file.read_text()
parser = KoreParser(text)
# Then
with pytest.raises(ValueError):
# When
parser.definition()
@pytest.mark.parametrize('kore_file', PATTERN_FILES, ids=lambda path: path.name)
def test_parse_pattern(kore_file: Path) -> None:
# Given
text = kore_file.read_text()
# When
parser1 = KoreParser(text)
pattern1 = parser1.pattern()
parser2 = KoreParser(pattern1.text)
pattern2 = parser2.pattern()
pattern3 = Pattern.from_dict(pattern1.dict)
# Then
assert parser1.eof
assert parser2.eof
assert pattern1 == pattern2
assert pattern1 == pattern3
@pytest.mark.parametrize(
'json_file,i,dct', JSON_TEST_DATA, ids=[f'{json_file.name}-{i}' for json_file, i, _ in JSON_TEST_DATA]
)
def test_parse_json(json_file: Path, i: int, dct: Mapping[str, Any]) -> None:
# When
kore1: Kore = kore_term(dct) # TODO type hint should be unnecessary
parser = KoreParser(kore1.text)
kore2 = parser.pattern()
kore3 = Kore.from_json(kore1.json)
# Then
assert parser.eof
assert kore1 == kore2
assert kore1 == kore3
x, y, z = (App(name) for name in ['x', 'y', 'z'])
MULTI_OR_TEST_DATA: Final[tuple[tuple[str, str, list[Pattern]], ...]] = (
('nullary', r'\left-assoc{}(\or{S}())', []),
('unary', r'\left-assoc{}(\or{S}(x{}()))', [x]),
('binary', r'\left-assoc{}(\or{S}(x{}(), y{}()))', [x, y]),
('multiary', r'\left-assoc{}(\or{S}(x{}(), y{}(), z{}()))', [x, y, z]),
)
@pytest.mark.parametrize(
'test_id,text,expected',
MULTI_OR_TEST_DATA,
ids=[test_id for test_id, *_ in MULTI_OR_TEST_DATA],
)
def test_multi_or(test_id: str, text: str, expected: list[Pattern]) -> None:
# Given
parser = KoreParser(text)
# When
actual = parser.multi_or()
# Then
assert parser.eof
assert actual == expected
S = SortVar('S')
a, b, c, d = (App(name) for name in ['a', 'b', 'c', 'd'])
MULTIARY_TEST_DATA: Final = (
('nullary-and', r'\and{S}()', And(S, ())),
('unary-and', r'\and{S}(a{}())', And(S, (a,))),
('binary-and', r'\and{S}(a{}(), b{}())', And(S, (a, b))),
('ternary-and', r'\and{S}(a{}(), b{}(), c{}())', And(S, (a, b, c))),
('quaternary-and', r'\and{S}(a{}(), b{}(), c{}(), d{}())', And(S, (a, b, c, d))),
('nullary-or', r'\or{S}()', Or(S, ())),
('unary-or', r'\or{S}(a{}())', Or(S, (a,))),
('binary-or', r'\or{S}(a{}(), b{}())', Or(S, (a, b))),
('ternary-or', r'\or{S}(a{}(), b{}(), c{}())', Or(S, (a, b, c))),
('quaternary-or', r'\or{S}(a{}(), b{}(), c{}(), d{}())', Or(S, (a, b, c, d))),
)
@pytest.mark.parametrize(
'test_id,text,expected',
MULTIARY_TEST_DATA,
ids=[test_id for test_id, *_ in MULTIARY_TEST_DATA],
)
def test_multiary(test_id: str, text: str, expected: list[Pattern]) -> None:
# Given
parser = KoreParser(text)
# When
actual = parser.pattern()
# Then
assert parser.eof
assert actual == expected
| runtimeverification/pyk | src/tests/unit/kore/test_parser.py | test_parser.py | py | 4,551 | python | en | code | 12 | github-code | 1 | [
{
"api_name": "typing.TYPE_CHECKING",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "typing.Final",
"line_number": 16,
"usage_type": "name"
},
{
"api_name": "pathlib.Path",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "typing.Final",
"li... |
25348338231 | # encoding: utf-8
import sys
import os
import io
from managermentFileForqiniu import managerfile
import logging
import datetime
##log
logging.basicConfig(level=logging.DEBUG,format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',datefmt='%Y %m %d %H:%M:%S',filename='SyncOperation_main.log',filemode='a')
logging.info("/r/t")
##main
Sync_path='/home/pi/Documents/qiniuclouddir'
now=datetime.datetime.now()
strnow=now.strftime('%Y-%m-%d %H:%M:%S')
logging.info(strnow)
files=os.listdir(Sync_path)
if(len(files)==0):
sys.exit()
logging.info(strnow+"Do not find file in Sync_path.")
mf=managerfile()
ret,eof,info=mf.getfileslist(-1)
logging.info(info.status_code)
if(info.status_code!=200):
logging.error(strnow+"qiniucloud connect fail.statusCode:"+info.status_code)
sys.exit()
remotefile=ret.get("items")
logging.info(remotefile[0].get('key'))
for localfile in files:
exist=0
for file in remotefile:
if(file.get("key")==localfile):
exist=1
break
if(exist==0):
result=mf.uploadfile(localfile,Sync_path+"/"+localfile)
if(result.status_code==200):
logging.info(strnow+"uplaod %s successed" % (localfile))
else:
logging.info(strnow+"upload %s fail" % (localfile))
| weixlkevin/SmartHomekit | SyncOperation.py | SyncOperation.py | py | 1,213 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 10,
"usage_type": "attribute"
},
{
"api_name": "logging.info",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "datetime.datetime... |
44401912202 | import serial
import numpy as np
import csv
device=serial.Serial("/dev/ttyUSB0",230400)
distance0=1
distance1=7.5
##mm
arange=np.arange(distance0,distance1,0.1)
f= open("data/trail9-5.4mm.csv","w")
writer=csv.writer(f)
for i in arange:
print("Move Slit to: ",str(i))
input("Press enter to continue")
for a in range(10):
device.reset_input_buffer()
line=device.readline()
nice= line.strip((b'\n')).split(b',')
print(a)
nice.append(i)
writer.writerow(nice)
f.close
| cwru-robin/Phys301 | Photon Counting/take_data.py | take_data.py | py | 532 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "serial.Serial",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "numpy.arange",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "csv.writer",
"line_number": 14,
"usage_type": "call"
}
] |
22623785092 | import logging
import time
import socket
import config
import json
import sys
if config.API_ENABLED:
if sys.version_info >= (3, 0):
# If using python 3, use urllib.parse and urllib.request instead of urllib and urllib2
from urllib.parse import urlencode
from urllib.request import Request, urlopen
else:
from urllib import urlencode
from urllib2 import Request, urlopen
else:
import cymysql
class DbTransfer(object):
@staticmethod
def send_command(cmd):
data = ''
try:
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.settimeout(2)
cli.sendto(cmd.encode(), ('%s' % config.MANAGER_BIND_IP, config.MANAGER_PORT))
data, addr = cli.recvfrom(1500)
cli.close()
# TODO: bad way solve timed out
time.sleep(0.05)
except Exception as e:
if config.SS_VERBOSE:
import traceback
traceback.print_exc()
logging.warning('Exception thrown when sending command: %s' % e)
return data
@staticmethod
def get_servers_transfer():
DbTransfer.verbose_print('request transfer count from manager - start')
dt_transfer = {}
cli = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
cli.settimeout(2)
cli.sendto(b'transfer: {}', (config.MANAGER_BIND_IP, config.MANAGER_PORT))
while True:
data, addr = cli.recvfrom(1500)
if data == b'e':
break
data = json.loads(data)
dt_transfer.update(data)
cli.close()
DbTransfer.verbose_print('request transfer count from manager - done')
return dt_transfer
@staticmethod
def verbose_print(msg):
if config.SS_VERBOSE:
logging.info(msg)
@staticmethod
def http_post(url, data):
data = urlencode(data).encode()
req = Request(url, data)
response = urlopen(req)
response_data = response.read()
response.close()
DbTransfer.verbose_print('%s - %s - %s' % (url, data, response_data))
return response_data
@staticmethod
def start_server(row, restart=False):
if restart:
DbTransfer.send_command('remove: {"server_port":%d}' % row[0])
time.sleep(0.1)
DbTransfer.send_command(
'add: {"server_port": %d, "password":"%s", "method":"%s", "email":"%s"}' %
(row[0], row[4], row[7], row[8])
)
@staticmethod
def del_server_out_of_bound_safe(rows):
for row in rows:
server = json.loads(DbTransfer.send_command(
'stat: {"server_port":%s}' % row[0]))
if server['stat'] != 'ko':
if row[5] == 0 or row[6] == 0:
# stop disabled or switched-off user
logging.info(
'U[%d] Server has been stopped: user is disabled' % row[0])
DbTransfer.send_command(
'remove: {"server_port":%d}' % row[0])
elif row[1] + row[2] >= row[3]:
# stop user that exceeds data transfer limit
logging.info(
'U[%d] Server has been stopped: data transfer limit exceeded' % row[0])
DbTransfer.send_command(
'remove: {"server_port":%d}' % row[0])
elif server['password'] != row[4]:
# password changed
logging.info(
'U[%d] Server is restarting: password is changed' % row[0])
DbTransfer.start_server(row, True)
else:
if not config.SS_CUSTOM_METHOD:
row[7] = config.SS_METHOD
if server['method'] != row[7]:
# encryption method changed
logging.info(
'U[%d] Server is restarting: encryption method is changed' % row[0])
DbTransfer.start_server(row, True)
else:
if row[5] != 0 and row[6] != 0 and row[1] + row[2] < row[3]:
if not config.SS_CUSTOM_METHOD:
row[7] = config.SS_METHOD
DbTransfer.start_server(row)
if config.MANAGER_BIND_IP != '127.0.0.1':
logging.info(
'U[%s] Server Started with password [%s] and method [%s]' % (row[0], row[4], row[7]))
@staticmethod
def thread_pull():
socket.setdefaulttimeout(config.TIMEOUT)
while True:
try:
if config.API_ENABLED:
rows = DbTransfer.pull_api_user()
else:
rows = DbTransfer.pull_db_user()
DbTransfer.del_server_out_of_bound_safe(rows)
except Exception as e:
if config.SS_VERBOSE:
import traceback
traceback.print_exc()
logging.error('Except thrown while pulling user data:%s' % e)
finally:
time.sleep(config.CHECKTIME)
@staticmethod
def pull_api_user():
DbTransfer.verbose_print('api download - start')
# Node parameter is not included for the ORIGINAL version of SS-Panel V3
url = config.API_URL + '/users?key=' + config.API_PASS + '&node=' + config.API_NODE_ID
response = urlopen(url)
response_data = json.load(response)
response.close()
rows = []
for user in response_data['data']:
if user['port'] in config.SS_SKIP_PORTS:
DbTransfer.verbose_print('api skipped port %d' % user['port'])
elif user.get('switch') == 0 or user['enable'] == 0:
rows.append([
user['port'],
None, None, None, None,
user.get('switch'),
user['enable'],
None, None, None
])
else:
rows.append([
user['port'],
user['u'],
user['d'],
user['transfer_enable'],
user['passwd'],
user['switch'],
user['enable'],
user['method'],
user['email'],
user['id']
])
DbTransfer.verbose_print('api download - done')
return rows
@staticmethod
def pull_db_user():
DbTransfer.verbose_print('db download - start')
string = ''
for index in range(len(config.SS_SKIP_PORTS)):
port = config.SS_SKIP_PORTS[index]
DbTransfer.verbose_print('db skipped port %d' % port)
if index == 0:
string = ' WHERE `port`<>%d' % port
else:
string = '%s AND `port`<>%d' % (string, port)
conn = cymysql.connect(host=config.MYSQL_HOST, port=config.MYSQL_PORT, user=config.MYSQL_USER,
passwd=config.MYSQL_PASS, db=config.MYSQL_DB, charset='utf8')
cur = conn.cursor()
cur.execute('SELECT port, u, d, transfer_enable, passwd, switch, enable, method, email FROM %s%s ORDER BY `port` ASC'
% (config.MYSQL_USER_TABLE, string))
rows = []
for r in cur.fetchall():
rows.append(list(r))
# Release resources
cur.close()
conn.close()
DbTransfer.verbose_print('db download - done')
return rows
@staticmethod
def thread_push():
socket.setdefaulttimeout(config.TIMEOUT)
while True:
try:
dt_transfer = DbTransfer.get_servers_transfer()
if config.API_ENABLED:
DbTransfer.push_api_user(dt_transfer)
else:
DbTransfer.push_db_user(dt_transfer)
except Exception as e:
import traceback
if config.SS_VERBOSE:
traceback.print_exc()
logging.error('Except thrown while pushing user data:%s' % e)
finally:
time.sleep(config.SYNCTIME)
@staticmethod
def push_api_user(dt_transfer):
i = 0
DbTransfer.verbose_print('api upload: pushing transfer statistics - start')
users = DbTransfer.pull_api_user()
for port in dt_transfer.keys():
user = None
for result in users:
if str(result[0]) == port:
user = result[9]
break
if not user:
logging.warning('U[%s] User Not Found', port)
server = json.loads(DbTransfer.get_instance().send_command(
'stat: {"server_port":%s}' % port))
if server['stat'] != 'ko':
logging.info(
'U[%s] Server has been stopped: user is removed' % port)
DbTransfer.send_command(
'remove: {"server_port":%s}' % port)
continue
DbTransfer.verbose_print('U[%s] User ID Obtained:%s' % (port, user))
tran = str(dt_transfer[port])
data = {'d': tran, 'node_id': config.API_NODE_ID, 'u': '0'}
url = config.API_URL + '/users/' + str(user) + '/traffic?key=' + config.API_PASS
DbTransfer.http_post(url, data)
DbTransfer.verbose_print('api upload: pushing transfer statistics - done')
i += 1
# online user count
DbTransfer.verbose_print('api upload: pushing online user count - start')
data = {'count': i}
url = config.API_URL + '/nodes/' + config.API_NODE_ID + '/online_count?key=' + config.API_PASS
DbTransfer.http_post(url, data)
DbTransfer.verbose_print('api upload: pushing online user count - done')
# load info
DbTransfer.verbose_print('api upload: node status - start')
url = config.API_URL + '/nodes/' + config.API_NODE_ID + '/info?key=' + config.API_PASS
f = open("/proc/loadavg")
load = f.read().split()
f.close()
loadavg = load[0] + ' ' + load[1] + ' ' + load[2] + ' ' + load[3] + ' ' + load[4]
f = open("/proc/uptime")
uptime = f.read().split()
uptime = uptime[0]
f.close()
data = {'load': loadavg, 'uptime': uptime}
DbTransfer.http_post(url, data)
DbTransfer.verbose_print('api upload: node status - done')
@staticmethod
def push_db_user(dt_transfer):
DbTransfer.verbose_print('db upload - start')
query_head = 'UPDATE `user`'
query_sub_when = ''
query_sub_when2 = ''
query_sub_in = None
last_time = time.time()
for port in dt_transfer.keys():
query_sub_when += ' WHEN %s THEN `u`+%s' % (port, 0) # all in d
query_sub_when2 += ' WHEN %s THEN `d`+%s' % (
port, dt_transfer[port])
if query_sub_in is not None:
query_sub_in += ',%s' % port
else:
query_sub_in = '%s' % port
if query_sub_when == '':
return
query_sql = query_head + ' SET u = CASE port' + query_sub_when + \
' END, d = CASE port' + query_sub_when2 + \
' END, t = ' + str(int(last_time)) + \
' WHERE port IN (%s)' % query_sub_in
# print query_sql
conn = cymysql.connect(host=config.MYSQL_HOST, port=config.MYSQL_PORT, user=config.MYSQL_USER,
passwd=config.MYSQL_PASS, db=config.MYSQL_DB, charset='utf8')
cur = conn.cursor()
cur.execute(query_sql)
cur.close()
conn.commit()
conn.close()
DbTransfer.verbose_print('db upload - done')
| ZuopanYao/TravelNetwork | shadowsocks-py-mu/shadowsocks/dbtransfer.py | dbtransfer.py | py | 12,012 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "config.API_ENABLED",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "sys.version_info",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "socket.socket",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "socket.AF_I... |
365934322 | import sys
from hypothesis.version import __version__
message = """
Hypothesis {} requires Python 3.6 or later.
This can only happen if your packaging toolchain is older than python_requires.
See https://packaging.python.org/guides/distributing-packages-using-setuptools/
"""
if sys.version_info[:3] < (3, 6): # pragma: no cover
raise Exception(message.format(__version__))
| webanck/GigaVoxels | lib/python3.8/site-packages/hypothesis/_error_if_old.py | _error_if_old.py | py | 383 | python | en | code | 23 | github-code | 1 | [
{
"api_name": "sys.version_info",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "hypothesis.version.__version__",
"line_number": 13,
"usage_type": "argument"
}
] |
36189820928 | #!/usr/bin/env python3
import os
from setuptools import setup
def read(fname):
return open(os.path.join(os.path.dirname(__file__), fname)).read()
setup(
name="har2requests",
version="0.2.2",
author="Louis Abraham",
license="MIT",
author_email="louis.abraham@yahoo.fr",
description="Generate Python Requests code from HAR file",
long_description=read("README.md"),
long_description_content_type="text/markdown",
url="https://github.com/louisabraham/har2requests",
packages=["har2requests"],
install_requires=["black", "click", "python-dateutil", "tqdm"],
extras_require={"dev": ["wheel"]},
python_requires=">=3.6",
entry_points={"console_scripts": ["har2requests = har2requests:main"]},
classifiers=[
"Topic :: Internet :: WWW/HTTP",
"Topic :: Software Development :: Code Generators",
"Topic :: Utilities",
],
)
| louisabraham/har2requests | setup.py | setup.py | py | 909 | python | en | code | 93 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "os.path",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "os.path.dirname",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "setuptools.setup",
"line_n... |
36415915522 | import copy
import mmcv
import re
import torch
import torch.nn as nn
import warnings
from mmcv.cnn import (build_conv_layer, build_norm_layer, constant_init,
kaiming_init)
from mmcv.cnn.bricks.registry import NORM_LAYERS
from mmcv.runner import load_checkpoint
from numpy.random import rand
from operator import pos
from torch.nn import functional as F
from mmcls.models.builder import build_backbone, build_neck
from mmcls.models.classifiers.base import BaseClassifier
from ..builder import (CLASSIFIERS, build_distill_losses,
build_classifier, build_head)
ADAPTATION = "adaptation_layer_{}"
NORM_LAYER = "adaptation_norm_layer_{}"
@CLASSIFIERS.register_module()
class KnowledgeDistillationImageClassifier(BaseClassifier):
def __init__(self,
backbone,
teacher_config,
distill_losses,
adaptation=None,
neck=None,
head=None,
teacher_ckpt=None,
eval_teacher=True,
adaptive=False,
pretrained=None,
init_cfg=None,
add_layer_loss=True,
alpha_distill=1.0):
super(KnowledgeDistillationImageClassifier, self).__init__(init_cfg)
"""
backbone (dict): student backbone configuration.
teacher_config (str): path to teacher configuration.add()
distillation_losses (list): list of distillation losses
adaptation (dict): configuration for adaptation layers
neck (dict): student neck configuration. Default:None
head (dict): student prediciton head configuration. Default:None
teacher_ckpt (str): path to teacher checkpoint file
eval_teacher (bool): flag to change teacher mode. Default:True
adaptive (bool): flag to specify whether to use adaptive distillation. Default:False
pretrained (str): path to pretrained checkpoint for student model.add(). Default:None
init_cfg (dict): student model initialization configuration. Default:None
add_layer_loss (bool): flag to switch between adaptive or adaptive-layerwise methods.add()
alpha_distill (float): relative importance between distillation loss and empirical loss.
"""
if pretrained is not None:
warnings.warn('DeprecationWarning: pretrained is a deprecated \
key, please consider using init_cfg')
self.init_cfg = dict(type='Pretrained', checkpoint=pretrained)
self.backbone = build_backbone(backbone) # build student backbone
self.eval_teacher = eval_teacher
self.adaptive = adaptive
self.distill_losses = {}
self.distill_paths = []
self.add_layer_loss = add_layer_loss
self.adaptation = adaptation
self.alpha_distill = alpha_distill
# Build adaptation layers
if adaptation:
for i, (in_c, out_c) in enumerate(zip(adaptation['in_channels'], adaptation['out_channels'])):
conv = build_conv_layer(backbone.get('conv_cfg', None), in_c, out_c,
kernel_size=1,
stride=1, bias=False)
_, norm = build_norm_layer(backbone.get('norm_cfg', 'BN'), out_c, postfix=i)
kaiming_init(conv)
constant_init(norm, 1)
self.add_module(ADAPTATION.format(i), conv)
self.add_module(NORM_LAYER.format(i), norm)
# Build teacher model and load from checkpoint
if isinstance(teacher_config, str):
teacher_config = mmcv.Config.fromfile(teacher_config)
self.teacher_model = build_classifier(teacher_config['model'])
if teacher_ckpt is not None:
load_checkpoint(self.teacher_model, teacher_ckpt)
# Build distillation losses
for distill_loss_cfg in distill_losses:
distill_loss = build_distill_losses(distill_loss_cfg)
self.distill_losses[distill_loss.loss_name] = distill_loss
if not add_layer_loss and distill_loss.mode == 'feature':
for ii in self.backbone.out_indices:
self.distill_paths.append(distill_loss.loss_name + ":" + str(ii))
else:
self.distill_paths.append(distill_loss.loss_name)
# Build loss scale for adaptive disitllation
self.loss_scaler = AdaptiveLossScaler(
list(self.distill_paths)) if self.adaptive else None
if neck is not None:
self.neck = build_neck(neck)
if head is not None:
self.head = build_head(head)
def cuda(self, device=None):
"""Since teacher_model is registered as a plain object, it is necessary
to put the teacher model to devices when calling cuda function."""
self.teacher_model.cuda(device=device)
for dl_name in self.distill_losses:
self.distill_losses[dl_name].cuda(device=device)
if self.loss_scaler:
self.loss_scaler.cuda(device=device)
return super().cuda(device=device)
def extract_feat(self, img, with_neck=True):
"""Directly extract features from the backbone + neck."""
x = self.backbone(img)
if self.with_neck and with_neck:
x = self.neck(x)
return x
def forward_train(self, img, gt_label, **kwargs):
"""Forward computation during training.
Args:
img (Tensor): of shape (N, C, H, W) encoding input images.
Typically these should be mean centered and std scaled.
gt_label (Tensor): It should be of shape (N, 1) encoding the
ground-truth label of input images for single label task. It
shoulf be of shape (N, C) encoding the ground-truth label
of input images for multi-labels task.
Returns:
dict[str, Tensor]: a dictionary of loss components
"""
losses = dict()
# ----Extract student outputs------
# x is tuple of student backbone output layers
# gap_x is global average pooled output from last layer of student backbone
# out is the output logit from student
x = self.extract_feat(img, with_neck=False)
gap_x = self.neck(x[-1])
out = self.head.forward_train(gap_x, gt_label=None, return_loss=False)
loss = self.head.loss(out, gt_label)
losses['loss_cls'] = loss['loss']
# Transform student features to teacher using adaptation layers
if self.adaptation:
feats = []
for i, feat in enumerate(x):
feat = getattr(self, ADAPTATION.format(i))(x[i])
feat = getattr(self, NORM_LAYER.format(i))(feat)
feats.append(feat)
# ----Extract teacher outputs------
# teacher_x is tuple of teacher backbone output layers
# teacher_gap_x is global average pooled output from last layer of teacher backbone
# out_teacher is the output logit from teacher
with torch.no_grad():
teacher_x = self.teacher_model.extract_feat(img, with_neck=False)
teacher_gap_x = self.teacher_model.neck(teacher_x[-1])
out_teacher = self.teacher_model.head.forward_train(
teacher_gap_x, gt_label=None, return_loss=False)
# Compute different distillation losses using features or logits
for dl_name in self.distill_losses:
distill_loss = self.distill_losses[dl_name]
if distill_loss.mode == 'feature':
s_feats = x if 'attention_transfer' in distill_loss.loss_name else feats
if isinstance(s_feats, tuple) or isinstance(s_feats, list):
if self.add_layer_loss:
loss = sum([distill_loss(s_x, t_x)
for s_x, t_x in zip(s_feats, teacher_x)])
losses[dl_name] = loss*distill_loss.loss_weight
else:
for ii, (s_x, t_x) in enumerate(zip(s_feats, teacher_x)):
dl_name = distill_loss.loss_name + ":" + str(ii)
losses[dl_name] = distill_loss(s_x, t_x)*distill_loss.loss_weight
else:
loss = distill_loss(x, teacher_x)
elif distill_loss.mode == 'logits':
loss = distill_loss(out, out_teacher, gt_label)
losses[dl_name] = loss*distill_loss.loss_weight
elif distill_loss.mode == 'softmax_regression':
adapted_student_gap_x = self.neck(feats[-1])
out_cross_student = self.teacher_model.head.forward_train(
adapted_student_gap_x, gt_label=None, return_loss=False)
loss = distill_loss(out_teacher, out_cross_student)
losses[dl_name] = loss*distill_loss.loss_weight
# scale distillation losses using adaptive distillation loss scaler
if self.loss_scaler:
losses.update(self.loss_scaler(losses))
# change the relative importance of distillation loss using alpha distill
for key, loss in losses.items():
if 'loss_kd' in key or 'loss_alphas' in key:
losses[key] = loss*self.alpha_distill
return losses
def simple_test(self, img, img_metas):
"""Test without augmentation."""
x = self.extract_feat(img, with_neck=False)
x = self.neck(x[-1])
out = self.head.simple_test(x)
teacher_x = self.teacher_model.extract_feat(img, with_neck=False)
teacher_x = self.teacher_model.neck(teacher_x[-1])
teacher_out = self.teacher_model.head.simple_test(teacher_x)
return torch.tensor([out, teacher_out])
def train(self, mode=True):
"""Set the same train mode for teacher and student model."""
if self.eval_teacher:
self.teacher_model.train(False)
else:
self.teacher_model.train(mode)
super().train(mode)
def __setattr__(self, name, value):
"""Set attribute, i.e. self.name = value
This reloading prevent the teacher model from being registered as a
nn.Module. The teacher module is registered as a plain object, so that
the teacher parameters will not show up when calling
``self.parameters``, ``self.modules``, ``self.children`` methods.
"""
if name == 'teacher_model':
object.__setattr__(self, name, value)
else:
super().__setattr__(name, value)
class AdaptiveLossScaler(nn.Module):
def __init__(self, positions):
super().__init__()
self.positions = positions
self.alphas = nn.Parameter(torch.zeros(len(positions)))
def forward(self, losses):
loss_cls = losses.pop('loss_cls')
scaled_losses = self.get_scaled_losses(losses, torch.exp(-self.alphas))
scaled_losses.update(dict(loss_alphas=self.alphas.sum()))
scaled_losses.update(dict(loss_cls=loss_cls))
return scaled_losses
def get_scaled_losses(self, losses, alphas):
if len(list(losses.keys())) != len(self.positions):
raise ValueError('Check distillation positions. Losses: {}, Positions: {}'.format(
list(losses.keys()), self.positions))
scaled_losses = {}
for index, position in enumerate(self.positions):
scaled_losses.update(self.scale_losses(losses.pop(position), alphas[index], position))
scaled_losses.update({'alpha_{}'.format(position.strip("loss_")): alphas[index]})
return scaled_losses
def scale_losses(self, losses, alpha, position=None):
# Scale losses with alpha.
if isinstance(losses, dict):
for task, loss in losses.items():
if isinstance(loss, list):
losses[task] = [l*alpha for l in loss]
else:
losses[task] = loss*alpha
elif isinstance(losses, list):
losses = {'{}'.format(position): [l*alpha for l in losses]}
elif isinstance(losses, torch.Tensor):
losses = {'{}'.format(position): losses*alpha}
return losses
| wyzelabs-inc/AdaptiveDistillation | adaptivedistillation/models/classifiers/kd_image.py | kd_image.py | py | 12,381 | python | en | code | 3 | github-code | 1 | [
{
"api_name": "mmcls.models.classifiers.base.BaseClassifier",
"line_number": 26,
"usage_type": "name"
},
{
"api_name": "warnings.warn",
"line_number": 60,
"usage_type": "call"
},
{
"api_name": "mmcls.models.builder.build_backbone",
"line_number": 64,
"usage_type": "call"
... |
21640719384 | """
This file
"""
from textblob import TextBlob
import sys
user_input = sys.argv
if len(user_input) != 2:
print("Need a txt file to read")
exit()
with open(user_input[1], 'r') as f:
text = f.read()
blob = TextBlob(text)
sentiment = blob.sentiment.polarity
print(sentiment) | mkPuzon/NLP-Tone-Checker | textAnalysis.py | textAnalysis.py | py | 288 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sys.argv",
"line_number": 7,
"usage_type": "attribute"
},
{
"api_name": "textblob.TextBlob",
"line_number": 15,
"usage_type": "call"
}
] |
38793762624 | #!/usr/bin/env python
# Create status info of ISC DHCPd leases in JSON format.
#
# Depends on python module netaddr.
# Install with pip install netaddr.
# Depends on iscconf for parsing dhcpd.conf.
# Install with pip install iscconf
# Depends on python 2.7 for argparse.
#
# This was written because dhcpstatus kept crashing with OOM errors on a
# 500M large leases file.
#
# Run dhcpstatus.py -h for more info.
#
# By Stefan Midjich
from __future__ import print_function
from sys import exit, stderr, stdout
from argparse import ArgumentParser, FileType
from json import dumps
from netaddr import IPNetwork
from iscconf import parse
# This does the actual counting of leases in dhcpd.leases. It takes a list of
# valid IP-addresses as argument, and a file object to the leases.
# Return value is a dictionary of matched leases.
# Counting can take a long time on big leases files.
def count_leases(file, valid_ips):
matched_ips = {}
for line in file:
if line.startswith('lease '):
(junk, current_ip, junk) = line.split(' ')
if current_ip in valid_ips:
try:
matched_ips[current_ip]['count'] += 1
except:
matched_ips[current_ip] = {
'count': 1,
'starts': None,
'ends': None
}
else:
if line.lstrip(' ').startswith('starts '):
(
junk1,
junk2,
date,
time
) = line.lstrip(' ').split(' ')
try:
matched_ips[current_ip]['starts'] = date
except:
pass
if line.lstrip(' ').startswith('ends '):
(
junk1,
junk2,
date,
time
) = line.lstrip(' ').split(' ')
try:
matched_ips[current_ip]['ends'] = date
except:
pass
return matched_ips
def get_valid_ips(subnets, ips):
for subnet in subnets:
_ip = IPNetwork(subnet)
for _ip_address in list(_ip):
ips.append(str(_ip_address))
arse = ArgumentParser(
description = 'Create JSON statistics of used leases in ISC DHCPd',
epilog = '''This program works by reading all the shared-network blocks in
a dhcpd.conf file. Only subnets in shared-network blocks are processed.
By Stefan Midjich'''
)
arse.add_argument(
'-v', '--verbose',
action = 'store_true',
help = 'Debugging info'
)
arse.add_argument(
'-l', '--leases',
metavar = '/var/lib/dhcp/dhcpd.leases',
default = '/var/lib/dhcp/dhcpd.leases',
type = FileType('r'),
help = 'File containing all leases for ISC DHCPd'
)
arse.add_argument(
'-L', '--list',
action = 'store_true',
help = 'Only list available subnets and exit'
)
arse.add_argument(
'-i', '--indent',
action = 'store_true',
help = 'Indent output to prettify it'
)
arse.add_argument(
'-c', '--configuration',
metavar = '/etc/dhcp/dhcpd.conf',
default = '/etc/dhcp/dhcpd.conf',
type = FileType('r'),
help = 'ISC DHCPd Configuration file containing shared-network blocks'
)
arse.add_argument(
'-o', '--output',
metavar = '/tmp/dhcpstatus_output.json',
default = stdout,
type = FileType('w'),
help = 'JSON output file for DHCP statistics'
)
arse.add_argument(
'isp_name',
metavar = 'ISP-name',
help = 'Name of shared-network to create statistics for, can be \'any\''
)
args = arse.parse_args()
indent = None
if args.indent:
indent = 4
try:
parsed_dhcp_config = parse(args.configuration.read())
except Exception as e:
print(str(e), file=stderr)
arse.print_usage()
exit(1)
# Start building the output dict by reading subnet info for each ISP defined
# in dhcpd.conf as a shared-network.
json_isp = {}
for item in parsed_dhcp_config:
try:
(k, v) = item
except:
continue
if k == 'shared-network':
last_isp = v
json_isp[last_isp] = {}
for subitem in parsed_dhcp_config[item]:
try:
(sk, sv, sk2, sv2) = subitem
except:
continue
if sk == 'subnet':
_subnet = sv + '/' + sv2
try:
json_isp[last_isp]['subnets'].append(_subnet)
except:
json_isp[last_isp]['subnets'] = [_subnet]
# Just list the ISPs and their subnets, and exit.
if args.list:
if args.isp_name == 'any':
print(dumps(json_isp, indent=indent), file=args.output)
else:
print(dumps(json_isp[args.isp_name], indent=indent), file=args.output)
exit(0)
# Else proceed with regular execution of the program.
# Get a list of valid IP-addresses to search the leases for.
search_ips = []
if args.isp_name == 'any':
for isp in json_isp:
get_valid_ips(json_isp[isp]['subnets'], search_ips)
else:
get_valid_ips(json_isp[args.isp_name]['subnets'], search_ips)
matched_ips = count_leases(args.leases, search_ips)
print(dumps(matched_ips, indent=indent), file=args.output)
| stemid/devops | tools/dhcpstatus.py | dhcpstatus.py | py | 5,321 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "netaddr.IPNetwork",
"line_number": 71,
"usage_type": "call"
},
{
"api_name": "argparse.ArgumentParser",
"line_number": 75,
"usage_type": "call"
},
{
"api_name": "argparse.FileType",
"line_number": 93,
"usage_type": "call"
},
{
"api_name": "argparse.... |
41337878373 | import pandas as pd
from sodapy import Socrata
from math import ceil
soil_variables = ['ph_agua_suelo_2_5_1_0', 'potasio_k_intercambiable_cmol_kg', 'f_sforo_p_bray_ii_mg_kg']
def create_client():
client = Socrata("www.datos.gov.co", None)
return client
def get_data(dataset_identifier, **kwargs):
client = create_client()
result = client.get(dataset_identifier, **kwargs)
return result
def convert_dataset_to_df(data):
result_df = pd.DataFrame.from_records(data)
if result_df.shape[1] == 0:
raise ValueError(
"No se encontraron valores con estos parametros, verifique que haya escrito todo de manera correcta")
return result_df
def data_normalize(dataset_values):
to_float = lambda x: float(x)
for iterable in range(len(dataset_values)):
try:
dataset_values[iterable] = to_float(dataset_values[iterable])
except ValueError:
dataset_values.pop(iterable)
def calculate_median(data):
medians = {}
for soil_variable in soil_variables:
values = data[soil_variable]
data_normalize(values)
length = len(values)
if length % 2 == 0:
poss_median1 = ceil(length / 2)
poss_median2 = poss_median1 - 1
median = (values[poss_median1] + values[poss_median2]) / 2
medians[soil_variable] = median
else:
poss_median = ceil(length / 2)
medians[soil_variable] = values[poss_median]
return medians
def get_relevant_info(data):
columns_dataframe = ["departamento", "municipio", "cultivo", "topografia"] + soil_variables
new_df = pd.DataFrame()
for column_name in columns_dataframe:
new_df[column_name] = data[column_name]
return new_df
def normalize_params(params):
params["departamento"] = params["departamento"].upper()
params["municipio"] = params["municipio"].upper()
params["cultivo"] = params["cultivo"].title()
| Juanes7222/Parcial1 | API/api.py | api.py | py | 2,044 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "sodapy.Socrata",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame.from_records",
"line_number": 20,
"usage_type": "call"
},
{
"api_name": "pandas.DataFrame",
"line_number": 20,
"usage_type": "attribute"
},
{
"api_name": "mat... |
30647054410 | # -*- coding: utf-8 -*-
"""
Created on Tue Oct 24 09:27:21 2017
@author: Admin
"""
import pandas as pd
### READ IN DATA SOURCE
### READ DIFFRENT SECTIONS FOR TRAIN, TEST, PREDICT
df_train = pd.read_csv('LMPD_STOPS_DATA_CLEAN_V1_HEADERS.csv', nrows=100000, skipinitialspace=True)
# SAve Headers for the next selections that skip header row.
headers = list(df_train)
df_test = pd.read_csv('LMPD_STOPS_DATA_CLEAN_V1_HEADERS.csv',names=headers,skiprows=100001, nrows=1000, skipinitialspace=True)
#df_predict = pd.read_csv('LMPD_STOPS_DATA_CLEAN_V1_HEADERS.csv',names=headers,skiprows=93001, nrows=100, skipinitialspace=True)
df_train =df_train.dropna(axis=0, how='any')
df_test =df_test.dropna(axis=0, how='any')
#df_predict =df_predict.dropna(axis=0, how='any')
obj_columns = df_train.select_dtypes(['object']).columns
df_train[obj_columns] = df_train[obj_columns].apply(lambda x: x.astype('category'))
df_test[obj_columns] = df_test[obj_columns].apply(lambda x: x.astype('category'))
#df_predict[obj_columns] = df_predict[obj_columns].apply(lambda x: x.astype('category'))
cat_columns = df_train.select_dtypes(['category']).columns
df_train[cat_columns] = df_train[cat_columns].apply(lambda x: x.cat.codes)
df_test[cat_columns] = df_test[cat_columns].apply(lambda x: x.cat.codes)
#df_predict[cat_columns] = df_predict[cat_columns].apply(lambda x: x.cat.codes)
#####################################################
########################
df_train_data = df_train.drop('ACTIVITY_RESULTS', 1)
train_data = df_train_data.values
train_target = df_train.ACTIVITY_RESULTS.values
##############
df_test_data = df_test.drop('ACTIVITY_RESULTS', 1)
test_data = df_test_data.values
test_target = df_test.ACTIVITY_RESULTS.values
############################
# Create and fit a nearest-neighbor classifier
from sklearn.neighbors import KNeighborsClassifier
from sklearn import linear_model
from sklearn import svm
model = linear_model.LinearRegression()
model.fit(train_data, train_target)
predict = model.predict(test_data)
df_output = pd.DataFrame(
{'Prediciton': predict,
'Target': test_target
})
df_output.Prediciton = df_output.Prediciton.round()
df_output['Match'] = df_output['Prediciton'] == df_output['Target']
accuracy = df_output.Match.value_counts(normalize=True)
print (accuracy) | arkingsolver/SciKit_Learn_LMPD | scikit_linear_citation.py | scikit_linear_citation.py | py | 2,314 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "pandas.read_csv",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "sklearn.linear_model.LinearRegression",
"line_number": 51,
"usage_type": "call"
},
{
"api_name": ... |
74130368032 | import yaml
import os
import random
import numpy as np
import pandas as pd
from tqdm import tqdm
import torch
import torchvision
from torch.utils.tensorboard import SummaryWriter
from utils import get_root
from transforms import Resize, ToTensor, BatchRicianNoise, BatchRandomErasing
from dataset import DenoisingDataset
from models import DnCNN
from metrics import PSNR, SSIM, FSIM
from losses import SIMMLoss, L1Loss, CombinedLoss
# for printing
torch.set_printoptions(precision=2)
# for reproducibility
seed = 0
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
if torch.cuda.is_available():
torch.cuda.manual_seed(seed)
torch.backends.cudnn.deterministic = True
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
def get_params(root):
with open(os.path.join(root, "configs.yaml"), "r") as config_file:
configs = yaml.load(config_file, Loader=yaml.FullLoader)
params = {'train_data': configs['paths']['data']['train_data'],
'dataset_table_path': configs['paths']['dataset_table'],
'log_dir': configs['paths']['log_dir']}
for param in params.keys():
params[param] = os.path.join(root, params[param])
params.update({'image_size': tuple(map(int, configs['data_parameters']['image_size'].split(', '))),
'batch_size': int(configs['data_parameters']['batch_size'])})
params.update({'num_features': int(configs['model_parameters']['DnCNN']['num_features']),
'num_layers': int(configs['model_parameters']['DnCNN']['num_layers'])})
params.update({'fourier_layer': configs['model_parameters']['Fourier']['fourier_layer']})
params.update({'lr': float(configs['train_parameters']['lr']),
'n_epochs': int(configs['train_parameters']['epochs'])})
return params
def make_dataset_table(data_path, csv_file_path):
image_names = sorted([name for name in os.listdir(os.path.join(data_path, 'Cropped images'))])
data = []
print('dataset csv table creating...')
for image_name in tqdm(image_names):
image_path = os.path.join(data_path, 'Cropped images', image_name)
data.append(np.array([image_path]))
pd.DataFrame(np.vstack(data), columns=['image']).to_csv(csv_file_path, index=False)
def train_val_split(csv_file_path, val_size=0.2):
dataset = pd.read_csv(csv_file_path)
test_number = int(len(dataset) * val_size) + 1
train_number = len(dataset) - test_number
phase = ['train'] * train_number + ['val'] * test_number
random.Random(1).shuffle(phase)
pd.concat([dataset[['image']],
pd.DataFrame(phase, columns=['phase'])],
axis=1).to_csv(csv_file_path, index=False)
def setup_experiment(title, params, log_dir):
model_name = [title, 'fourier', params['fourier_layer']]
# model_name.extend(['lr', str(params['lr'])])
model_name = '_'.join(model_name)
writer = SummaryWriter(log_dir=os.path.join(log_dir, model_name))
best_model_path = f"{model_name}.best.pth"
return writer, model_name, best_model_path
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
def collate_transform(batch_transform=None):
def collate(batch):
collated = torch.utils.data.dataloader.default_collate(batch)
if batch_transform is not None:
collated = batch_transform(collated)
return collated
return collate
def run_epoch(model, iterator,
criterion, optimizer,
metrics,
phase='train', epoch=0,
device='cpu', writer=None):
is_train = (phase == 'train')
if is_train:
model.train()
else:
model.eval()
epoch_loss = 0.0
epoch_metrics = {metric_name: 0.0 for metric_name in metrics.keys()}
with torch.set_grad_enabled(is_train):
for i, (images, masks) in enumerate(tqdm(iterator)):
images, masks = images.to(device), masks.to(device)
cleaned_images = model(images)
loss = criterion(cleaned_images, masks)
if is_train:
optimizer.zero_grad()
loss.backward()
optimizer.step()
epoch_loss += loss.item()
for metric_name in epoch_metrics.keys():
epoch_metrics[metric_name] += metrics[metric_name](cleaned_images.detach(), masks)
epoch_loss = epoch_loss / len(iterator)
for metric_name in epoch_metrics.keys():
epoch_metrics[metric_name] = epoch_metrics[metric_name] / len(iterator)
if writer is not None:
writer.add_scalar(f"loss_epoch/{phase}", epoch_loss, epoch)
for metric_name in epoch_metrics.keys():
writer.add_scalar(f"metric_epoch/{metric_name}/{phase}", epoch_metrics[metric_name], epoch)
return epoch_loss, epoch_metrics
def train(model,
train_dataloader, val_dataloader,
criterion,
optimizer, scheduler,
metrics,
n_epochs,
device,
writer,
best_model_path):
best_val_loss = float('+inf')
for epoch in range(n_epochs):
train_loss, train_metrics = run_epoch(model, train_dataloader,
criterion, optimizer,
metrics,
phase='train', epoch=epoch,
device=device, writer=writer)
val_loss, val_metrics = run_epoch(model, val_dataloader,
criterion, None,
metrics,
phase='val', epoch=epoch,
device=device, writer=writer)
if scheduler is not None:
scheduler.step(val_loss)
if val_loss < best_val_loss:
best_val_loss = val_loss
torch.save(model.state_dict(), best_model_path)
print(f'Epoch: {epoch + 1:02}')
metrics_output = ' | '.join([metric_name + ': ' +
"{:.2f}".format(train_metrics[metric_name]) for metric_name in train_metrics.keys()])
print(f'\tTrain Loss: {train_loss:.2f} | Train Metrics: ' + metrics_output)
metrics_output = ' | '.join([metric_name + ': ' +
"{:.2f}".format(val_metrics[metric_name]) for metric_name in val_metrics.keys()])
print(f'\t Val Loss: {val_loss:.2f} | Val Metrics: ' + metrics_output)
def main():
root = get_root()
params = get_params(root)
train_data_path, dataset_table_path, log_dir = (params['train_data'],
params['dataset_table_path'],
params['log_dir'])
image_size, batch_size = (params['image_size'],
params['batch_size'])
lr, n_epochs = (params['lr'],
params['n_epochs'])
make_dataset_table(train_data_path, dataset_table_path)
train_val_split(dataset_table_path)
dataset = pd.read_csv(dataset_table_path)
pre_transforms = torchvision.transforms.Compose([Resize(size=image_size), ToTensor()])
train_batch_transforms = BatchRicianNoise()
# train_batch_transforms = BatchRandomErasing()
train_dataset = DenoisingDataset(dataset=dataset[dataset['phase'] == 'train'],
transform=pre_transforms)
train_collate = collate_transform(train_batch_transforms)
train_dataloader = torch.utils.data.DataLoader(dataset=train_dataset,
batch_size=batch_size,
shuffle=True,
collate_fn=train_collate)
val_batch_transforms = BatchRicianNoise()
# val_batch_transforms = BatchRandomErasing()
val_dataset = DenoisingDataset(dataset=dataset[dataset['phase'] == 'val'],
transform=pre_transforms)
val_collate = collate_transform(val_batch_transforms)
val_dataloader = torch.utils.data.DataLoader(dataset=val_dataset,
collate_fn=val_collate)
fourier_params = None
if params['fourier_layer'] != 'None':
fourier_params = {'fourier_layer': params['fourier_layer']}
num_features, num_layers = params['num_features'], params['num_layers']
model = DnCNN(n_channels=1,
num_features=num_features, num_layers=num_layers,
image_size=image_size, fourier_params=fourier_params).to(device)
writer, model_name, best_model_path = setup_experiment(model.__class__.__name__, params, log_dir)
best_model_path = os.path.join(root, best_model_path)
print(f"Model name: {model_name}")
print(f"Model has {count_parameters(model):,} trainable parameters")
print()
criterion = CombinedLoss([SIMMLoss(multiscale=True), L1Loss()], [0.8, 0.2])
optimizer = torch.optim.Adam(model.parameters(), lr=lr)
scheduler = torch.optim.lr_scheduler.ReduceLROnPlateau(optimizer, 'min', factor=0.5, patience=5)
metrics = {'PSNR': PSNR(), 'SSIM': SSIM(multiscale=False), 'FSIM': FSIM()}
print("To see the learning process, use command in the new terminal:\ntensorboard --logdir <path to log directory>")
print()
train(model,
train_dataloader, val_dataloader,
criterion,
optimizer, scheduler,
metrics,
n_epochs,
device,
writer,
best_model_path)
if __name__ == "__main__":
main()
| cviaai/ADAIR | denoising/train_denoising.py | train_denoising.py | py | 9,811 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.set_printoptions",
"line_number": 25,
"usage_type": "call"
},
{
"api_name": "random.seed",
"line_number": 30,
"usage_type": "call"
},
{
"api_name": "numpy.random.seed",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "numpy.random",
... |
72152597793 | from fastapi import APIRouter, Depends, HTTPException, status, Response
from requests import session
from controllers.controllers import ReservationController
from controllers.exceptions import ReservationException
from dependencies import get_token_header
from models import models
from schemas import schemas
from db.database import get_db, engine
from sqlalchemy.orm import Session
models.Base.metadata.create_all(bind=engine)
router = APIRouter(
prefix="/reservations",
tags=["Reservations"],
)
@router.post(
"/create_reservation",
response_model=schemas.Reservation,
summary="Création d'une reservation",
dependencies=[Depends(get_token_header)]
)
def create_reservation(response: Response, reservation: schemas.ReservationCreate, db: Session = Depends(get_db)):
try:
reservation_to_create = ReservationController.create_reservation(reservation, db)
response.status_code = status.HTTP_201_CREATED
return reservation_to_create
except Exception:
raise HTTPException(status_code=400, detail="Impossible d'effectuer la reservation")
@router.put(
"/edit_statut_reservation/{_id}/",
response_model=schemas.Reservation,
summary="Modification de statut d'une reservation",
dependencies=[Depends(get_token_header)]
)
def edit_statut_reservation(response: Response, _id: int, new_statut: str, db: Session = Depends(get_db)):
try:
reservation_edit = ReservationController.edit_statut_reservation(_id, new_statut, db)
response.status_code = status.HTTP_201_CREATED
return reservation_edit
except ReservationException as e:
raise HTTPException(**e.__dict__)
@router.delete(
"/delete_reservation/{_id}",
summary="Suppression d'une reservation",
dependencies=[Depends(get_token_header)]
)
def delete_reservation(_id: int, db: Session = Depends(get_db)):
try:
ReservationController.delete_reservation(_id, db)
return {"message": "Reservation supprimée"}
except ReservationException as e:
raise HTTPException(**e.__dict__)
@router.get("/get_all_reservations", response_model=list[schemas.Reservations], summary="Récupération de toutes les réservations")
def get_all_reservations(db: Session = Depends(get_db)):
try:
reservations = ReservationController.get_all_reservations(db)
return reservations
except Exception:
raise HTTPException(status_code=400, detail="Impossible de récupérer la liste des reservations")
| Landris18/PamREST | Backend/routers/reservations.py | reservations.py | py | 2,557 | python | en | code | 4 | github-code | 1 | [
{
"api_name": "models.models.Base.metadata.create_all",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "models.models.Base",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "models.models",
"line_number": 12,
"usage_type": "name"
},
{
"api_... |
26198111833 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
# PYTHON_ARGCOMPLETE_OK
"""
SVG Templating System (C) Max Gaukler and other members of the FAU FabLab 2013-2022
unlimited usage allowed, see LICENSE file
"""
from lxml import etree
from copy import deepcopy
import inspect
from io import BytesIO
import re
import locale
import codecs
import argparse
import argcomplete
import sys
import os
import subprocess
from json import loads, dumps
from repoze.lru import lru_cache # caching decorator for time-intensive read functions
from logging import error, warning
import requests
__author__ = 'Max Gaukler, sedrubal'
__license__ = 'unlicense'
# <editor-fold desc="argparse">
parser = argparse.ArgumentParser(description='Automated generating of labels for products from the ERP-web-API')
parser.add_argument('ids', metavar='ids', type=str, nargs='*', default='',
help='the ids of the products (4 digits) or purchase orders (PO + 5 digits) to generate a label. '
'You can use expressions like 5x1337 to print 5 times the label for 1337. '
'And you can also use stdin for ids input. '
'Can\'t be used with \'json-input\'.')
parser.add_argument('-o', '--json-output', action='store_true', dest='json_output',
help='use this, if you only want to fetch the data for the labels from the ERP-web-API '
'and if you want to read the data as json from stdout. '
'Can\'t be used with \'json-input\'.')
parser.add_argument('-i', '--json-input', action='store_true', dest='json_input',
help='use this, if the data for the labels should be provided through stdin as json '
'instead of fetching it from ERP-web-API. '
'Can\'t be used with \'ids\' and \'json-output\'.')
argcomplete.autocomplete(parser)
args = parser.parse_args()
# </editor-fold>
# <editor-fold desc="create svg label">
def clear_group_members(tree, group):
"""
removes all content of a given group in a svg
:param tree: the svg tree
:param group: name of the group
"""
for e in tree.findall(".//{http://www.w3.org/2000/svg}g[@id='" + group + "']/*"):
e.clear()
# <editor-fold desc="barcode">
def make_barcode_xml_elements(string, barcode):
"""
generates an EAN8 barcode and returns a lst of lxml-elements
:param string: text to be encoded as a barcode
:param barcode: (?)
:return: a list of lxml elements
"""
# Pseudo-Datei-Objekt
s = BytesIO()
ean8 = barcode.get_barcode_class('ean8')
b = ean8(string)
b.write(s)
# oder zu Debugzwecken: barcode.save('barcode') speichert in barcode.svg
barcode_elements = etree.fromstring(s.getvalue())
s.close()
return barcode_elements.findall(".//{http://www.w3.org/2000/svg}rect")
def ean8_check_digit(num):
"""
EAN checksum
gewichtete Summe: die letzte Stelle (vor der Pruefziffer) mal 3,
die vorletzte mal 1, ..., addiert.
Pruefziffer ist dann die Differenz
dieser Summe zum naechsten Vielfachen von 10
:param num: number to be encoded as EAN
:return: checksum digit
"""
s = str(num)[::-1] # in string wandeln, umkehren
checksum = 0
even = True
for char in s:
n = int(char)
if even:
n *= 3
checksum += n
even = not even
return (10 - (checksum % 10)) % 10
def create_ean8(num):
"""
baue gueltige EAN8 aus Zahl:
- vorne Nullen auffuellen
- wenn Zahl kleiner 10000, mache eine EAN8 im privaten Bereich daraus: 200nnnn
- add checksum digit
:param num: number for the barcode
:return: (?)
"""
if len(str(num)) == 8:
return str(num)
num = int(num)
if num < 10000:
num += 2000000
return '%07d%d' % (num, ean8_check_digit(num))
# </editor-fold>
def make_label(data, etikett_num, barcode, label_template): # , dict_input
"""
Generates a label with following information
:param data: a dict containing the data for the label
:param etikett_num: the number (?) of the label
:param barcode: the barcode (generated from the product ID (?))
:param label_template: the template for labels
:return: a label in svg (?) or None, when the product couldn't be found
"""
# :param dict_input: deprecated
etikett = deepcopy(label_template)
etikett.set("id", "etikettGeneriert" + str(etikett_num))
if len(data) == 0:
return None
# replace all text
replacements = [[key, value] for [key, value] in data.items() if key in ['ID', 'ORT', 'PREIS', 'TITEL', 'VERKAUFSEINHEIT']]
for element in etikett.iter("*"):
for [key, value] in replacements:
if element.tail is not None and key in element.tail:
element.tail = element.tail.replace(key, value)
break # break to avoid double substitution (e.g. "TITEL" -> "3D-Druck NORMALPREIS" -> "3D-Druck Normal0,30 €")
for [key, value] in replacements:
if element.text is not None and key in element.text:
element.text = element.text.replace(key, value)
break # break to avoid double substitution (e.g. "TITEL" -> "3D-Druck NORMALPREIS" -> "3D-Druck Normal0,30 €")
for e in make_barcode_xml_elements(create_ean8(data["ID"]), barcode):
etikett.find(".//{http://www.w3.org/2000/svg}g[@id='barcode']").append(e)
etikett.find(".//{http://www.w3.org/2000/svg}g[@id='barcode']").set("id", "barcode" + str(etikett_num))
return etikett
# </editor-fold>
@lru_cache(1)
def read_product_db():
r = requests.get('https://brain.fablab.fau.de/build/pricelist/price_list-Alle_Produkte.html.json')
return r.json()
# <editor-fold desc="fetch data">
# <editor-fold desc="fetch data from oerp">
@lru_cache(1024)
def read_product(product_id):
"""
Fetches the data for the requested product
:param product_id: the openERP product ID of the requested product
:param oerp: the openERP lib instance
:return: a data dict or an empty dict if the product couldn't be found
"""
# produktRef='0009'
# adds leading 0
product_id = int(product_id)
product_id_zeroes = "{:04}".format(product_id)
products = read_product_db()
if product_id_zeroes not in products:
error("ID %d nicht gefunden!" % product_id)
return {}
p = products[product_id_zeroes]
location_string = p["_location_str"]
verkaufseinheit = p['_per_uom_str']
price = p['_price_str']
data = {"TITEL": p['name'], "ORT": location_string, "ID": product_id_zeroes,
"PREIS": price,
"VERKAUFSEINHEIT": verkaufseinheit}
return data
@lru_cache(128)
def get_ids_from_order(po_id):
"""
Fetches the product IDs of a purchase order
:param po_id: The openERP purchase order ID
:param oerp: the openERP lib instance
:return: an array containing the openERP product IDs of a purchase
"""
error("purchase orders (PO1234) are currently not supported. We first need to create a JSON exporter for that to have an API that works with Py3")
# return [1, 42, 2937]
# </editor-fold>
def read_products_from_stdin():
"""
Reads the json label description from stdin
:return: a dict containing the information for the labels
"""
labels_data = loads(read_stdin())
return labels_data
# </editor-fold>
def read_stdin():
"""
Reads text from stdin
:return: the text given through stdin
"""
text = sys.stdin.read()
return text
def main():
"""
The main class of the script: generate labels for products and produces a pdf file
:raise Exception:
"""
# <editor-fold desc="check arguments">
if args.json_input and args.json_output:
error("Invalid arguments. If you don't want to create a PDF-label you can't provide data through stdin.")
parser.print_help()
exit(1)
elif args.json_input and args.ids:
error("Invalid arguments. If you want to use the stdin for json data input, you mustn't provide ids.")
parser.print_help()
exit(1)
# </editor-fold>
script_path = os.path.realpath(os.path.dirname(inspect.getfile(inspect.currentframe()))) # path of this script
if not args.json_input:
# <editor-fold desc="evaluate input, replace PO IDs with their product ids, fetch data from oerp">
purchase_regex = re.compile(r"^(\d{1,2}x)?po\d{1,5}$") # (a number and 'x' and) 'PO' or 'po' and 1-5 digits
product_regex = re.compile(r"^(\d{1,2}x)?\d{1,4}$") # (a number and 'x' and) 1 to 4 digits
labels_data = dict()
if len(args.ids):
input_ids = args.ids
else:
input_ids = read_stdin().strip().split(' ')
for args_id in input_ids:
number_of_labels = 1
args_id = args_id.lower()
if 'x' in args_id:
number_of_labels_str = args_id[:3].split('x', 2)[0]
assert number_of_labels_str.isdigit(), "invalid input"
# multiple labels requested: (1-25)x(product_id)
number_of_labels = max(0, min(int(number_of_labels_str), 25))
x_position = args_id.find('x')
args_id = args_id[x_position + 1:]
if purchase_regex.match(args_id):
prod_ids = get_ids_from_order(args_id)
for prod_id in prod_ids:
prod_id = int(prod_id)
if prod_id not in labels_data.keys():
prod_data = deepcopy(read_product(prod_id))
if len(prod_data):
labels_data[prod_id] = prod_data
labels_data[prod_id]['COUNT'] = number_of_labels
else:
labels_data[prod_id]['COUNT'] += number_of_labels
elif product_regex.match(args_id):
args_id = int(args_id)
if args_id not in labels_data.keys():
prod_data = deepcopy(read_product(args_id))
if len(prod_data):
labels_data[args_id] = prod_data
labels_data[args_id]['COUNT'] = number_of_labels
else:
labels_data[args_id]['COUNT'] += number_of_labels
else:
error("The ID '" + args_id + "' you entered is invalid.")
exit(1)
if not labels_data:
error("No valid products found. Products must have a valid 'internal ID' like 0123.")
exit(1)
# </editor-fold>
else:
labels_data = read_products_from_stdin()
label_count = 0
for prod in labels_data.values():
label_count += prod['COUNT']
if label_count > 50:
error("Too many labels!")
exit(1)
if args.json_output:
print(dumps(labels_data, sort_keys=True, indent=4, separators=(',', ': '))) # json.dumps in pretty
else:
import barcode
# <editor-fold desc="load page template (for labels) and empty is">
template = etree.parse(script_path + "/vorlage-etikettenpapier-60x30.svg")
# remove everything with an id starting with 'ignore'
for e in template.findall("*"):
if e.get("id", "").endswith("ignore"):
e.clear()
# pick out items
# they need to be directly on the root level in the file
# (or at least not inside a group with an applied transformation), so that position and size is correct
etikett_template = deepcopy(template.find(".//{http://www.w3.org/2000/svg}g[@id='etikett']"))
clear_group_members(etikett_template, 'barcode')
clear_group_members(template, 'etikett')
# </editor-fold>
# <editor-fold desc="deprecated things">
# <editor-fold desc="tab-newline-separated data aus google doc">
# url=urllib2.urlopen("https://docs.google.com/spreadsheet/pub?key=0AlfhdBG4Ni7BdFJtU2dGRDh2MFBfWHVoUEk5UlhLV3c&single=true&gid=0&output=txt")
# textInput=url.read().decode('utf-8')
# # convert to array
# listInput=[]
# for line in textInput.split('\n'):
# listInput.append(line.split('\t'))
# # HARDCODED: the fourth column contains the column name
# columnNames=listInput[3]
# # convert to dictionary: {"SPALTENNAME":"Inhalt",...}
# </editor-fold>
# dict_input = {}
# for line in listInput:
# n=0
# d={}
# for col in line:
# d[columnNames[n]]=col
# n=n+1
# dict_input[d["ID"]]=d
# print(p)
# p['lst_price'] p['name'] p['description']
# Fehler vermeiden: wenn leere Ausgabe gefordert, erzeuge eine leere Seite, statt garnix
# if len(product_ids) == 0:
# product_ids = [None]
# </editor-fold>
# <editor-fold desc="make temp dir">
output_dir = script_path + '/public/temp/'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# </editor-fold>
page_count = 0
pdfs_to_merge = []
for label_data in labels_data.values():
# <editor-fold desc="generate and save a svg->pdf for each label"
page = deepcopy(template)
for i in range(label_data['COUNT']):
label_svg = make_label(label_data, 0, barcode, etikett_template)
if label_svg is not None:
page.getroot().append(label_svg)
# <editor-fold desc="write svg and convert it to pdf">
output_file_base_name = "output-etikettenpapier-%d" % page_count
svg_file = output_dir + output_file_base_name + ".svg"
pdf_file = output_dir + output_file_base_name + ".pdf"
page.write(svg_file)
subprocess.call("inkscape {in_file} --export-filename={out_file} 2>&1 | egrep -v '(^$|dbus|Failed to get connection)'".format(
in_file=svg_file,
out_file=pdf_file
), shell=True)
# </editor-fold>
pdfs_to_merge.append(pdf_file)
page_count += 1
# <editor-fold>
# <editor-fold desc="append pages (pdftk)"
pdf_output = output_dir + "output-etikettenpapier.pdf"
subprocess.check_call(["qpdf", "--empty", "--pages"] + pdfs_to_merge + ["--", pdf_output])
# </editor-fold>
# <editor-fold desc="clean">
for p in range(page_count):
try:
os.remove(output_dir + ("output-etikettenpapier-%d.pdf" % p))
except OSError:
pass
try:
os.remove(output_dir + ("output-etikettenpapier-%d.svg" % p))
except OSError:
pass
# </editor-fold>
exit(0)
if __name__ == "__main__":
main()
| fau-fablab/etiketten | svgtemplate.py | svgtemplate.py | py | 15,188 | python | en | code | 2 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "argcomplete.autocomplete",
"line_number": 47,
"usage_type": "call"
},
{
"api_name": "io.BytesIO",
"line_number": 72,
"usage_type": "call"
},
{
"api_name": "lxml.etre... |
16168565185 | from datetime import datetime
from json import loads
from random import choice
from shutil import disk_usage
def main():
"""Main function that returns the values to the shell script that runs this."""
time = datetime.now()
time = time.strftime("%m/%d/%Y, %H:%M:%S")
print(f"{check_session()} / {get_disk_usage()} / {get_virtual_memory()} / {time}")
def get_quote():
"""Loads a quote object that contains a 'text' and a 'author' keys"""
data = loads(open('quotes.json').read())
selected = choice(data)
text = selected['text']
author = selected['author']
return f'{text[:30]}.. — {author}'
def get_disk_usage():
"""Returns the disk usage in gigabytes"""
total, used, free = disk_usage("/")
del total, used
return f'💾 {free // (2**30)} GB'
def get_virtual_memory():
"""Returns the RAM usage in percents (using the psutil 3rd-party module)"""
try:
from psutil import virtual_memory
ram_usage = virtual_memory().percent
return f'âš¡ {ram_usage}%'
except ImportError:
# if psutil is not installed, then notify the user
return 'psutil is not installed'
def check_session():
"""Checks if the second is the first of a minute so it only parses a quote once."""
if datetime.now().second == 1:
quote = get_quote()
with open('session.txt', 'w') as f:
f.write(quote)
return quote
else:
with open('session.txt', 'r') as f:
x = f.read()
return x
if __name__ == "__main__":
main()
| pasenidis/ricing | bar/main.py | main.py | py | 1,569 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "datetime.datetime.now",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "datetime.datetime",
"line_number": 9,
"usage_type": "name"
},
{
"api_name": "json.loads",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "random.choice",
... |
72212308833 | import numpy as np
import torch
import torch.nn as nn
import torch.optim as optim
from torch.autograd import Variable
from . import MNISTAttack
class MNIST_LBFGS(MNISTAttack):
def __init__(self, model_class, weights_file, regularization="l2"):
super().__init__(model_class, weights_file)
assert regularization in ['l2', 'l1', None], \
'Please choose valid regularization'
self._regularization = regularization
def attack(self, x, y_true, y_target, regularization=None):
_x = Variable(torch.FloatTensor(x))
_y_target = Variable(torch.LongTensor([y_target]))
# Reset value of r
self._model.r.data = torch.zeros(28, 28)
# Classification before modification
y_pred = np.argmax(self._model(_x).data.numpy())
# Optimization Loop
for iteration in range(1000):
self._optimizer.zero_grad()
outputs = self._model(_x)
xent_loss = self._loss_fn(outputs, _y_target)
if self._regularization == "l1":
adv_loss = xent_loss + torch.mean(torch.abs(self._model.r))
elif self._regularization == "l2":
adv_loss = xent_loss + torch.mean(torch.pow(self._model.r,2))
else:
adv_loss = xent_loss
adv_loss.backward()
self._optimizer.step()
# keep optimizing Until classif_op == _y_target
y_pred_adversarial = np.argmax(self._model(_x).data.numpy())
if y_pred_adversarial == y_target:
break
return self._model.r.data.numpy(), y_pred, y_pred_adversarial
| FoConrad/NN-Hashing | attacks/mnist_lbfgs.py | mnist_lbfgs.py | py | 1,664 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.autograd.Variable",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.FloatTensor",
"line_number": 17,
"usage_type": "call"
},
{
"api_name": "torch.autograd.Variable",
"line_number": 18,
"usage_type": "call"
},
{
"api_name": "tor... |
16348260053 | import inspect
import traceback
import uuid
import warnings
from dataclasses import dataclass, field
from datetime import datetime
from typing import Any
import pyttman
from pyttman.core.containers import MessageMixin, Reply
from pyttman.core.decorators import LifecycleHookRepository
from pyttman.core.mixins import PrettyReprMixin
def depr_raise(message: str, version: str) -> None:
"""
Raise DeprecationWarning with a message and version tag for users.
:param message: Deprecation message to display to users
:param version: Pyttman version in which deprecation was declared
:raise DeprecationWarning
"""
out = f"{message} - This was deprecated in version {version}."
raise DeprecationWarning(out)
def depr_graceful(message: str, version: str):
"""
Uses warnings.warn with a message and version tag for users.
:param message: Deprecation message to display to users
:param version: Pyttman version in which deprecation was declared
"""
out = f"{message} - This was deprecated in version {version}."
warnings.warn(out, DeprecationWarning)
class Settings:
"""
Dataclass holding settings configured in the settings.py
settings module. Modules are not picklable in Python,
thus this class holds the user-level variables and
objects created in the module instead of the flooding
of using the entire module as reference in 'pyttman.settings'
throughout Pyttman apps.
The Settings class automatically omits any instance
in **kwargs being of instance <module> since modules
aren't picklable. It also omits functions as callables
aren't valid settings.
"""
def __init__(self, **kwargs):
self.APPEND_LOG_FILES: bool = True
self.MIDDLEWARE: dict | None = None
self.ABILITIES: list | None = None
self.FATAL_EXCEPTION_AUTO_REPLY: list | None = None
self.CLIENT: dict | None = None
self.APP_BASE_DIR: str | None = None
self.LOG_FILE_DIR: str | None = None
self.APP_NAME: str | None = None
self.LOG_FORMAT: str | None = None
self.LOG_TO_STDOUT: bool = False
[setattr(self, k, v) for k, v in kwargs.items()
if not inspect.ismodule(v)
and not inspect.isfunction(v)]
def __repr__(self):
_attrs = {name: value for name, value in self.__dict__.items()}
return f"Settings({_attrs})"
def _generate_name(name):
"""
Generates a user-friendly name out of
Command or Ability class names, by
inserting spaces in camel cased names
as well as truncating 'Command' and 'Ability'
in the names.
:param name: string, name of a class.
hint: Command or Ability subclass
:return: str, 'SetTimeCommand' -> 'Set Time'
"""
new_name = ""
for i in ("Ability", "feature", "Command", "command"):
name = name.replace(i, "")
for i, c in enumerate(name):
if i > 0 and c.isupper():
new_name += " "
new_name += c
return new_name
def _generate_error_entry(message: MessageMixin, exc: BaseException) -> Reply:
"""
Creates a log entry for critical errors with a UUID bound
to the log file entry, explaining the error. For the front
end clients, a Reply object is returned to provide for
end users who otherwise would experience a chatbot who
didn't reply at all.
:param message: MessageMixin
:param exc: Exception
:return: Reply
"""
error_id = uuid.uuid4()
traceback.print_exc()
warnings.warn(f"{datetime.now()} - A critical error occurred in the "
f"application logic. Error id: {error_id}")
pyttman.logger.log(level="error",
message=f"CRITICAL ERROR: ERROR ID={error_id} - "
f"The error was caught while processing "
f"message: '{message}'. Error message: '{exc}'")
auto_reply = pyttman.settings.MIDDLEWARE['FATAL_EXCEPTION_AUTO_REPLY']
return Reply(f"{auto_reply} ({error_id})")
@dataclass
class PyttmanApp(PrettyReprMixin):
"""
The highest point of abstraction for a Pyttman application.
This class holds the Settings, the Abilities and lifecycle hooks
for the application, including the Client class used to interface with
the platform of choice.
This singleton instance is available through 'from pyttman import app'.
"""
__repr_fields__ = ("name", "client", "hooks")
client: Any
name: str | None = field(default=None)
settings: Settings | None = field(default=None)
abilities: set = field(default_factory=set)
hooks: LifecycleHookRepository = field(
default_factory=lambda: LifecycleHookRepository())
def start(self):
"""
Start a Pyttman application.
"""
# noinspection PyBroadException
try:
self.client.run_client()
except Exception:
warnings.warn(traceback.format_exc())
| Hashmap-Software-Agency/Pyttman | pyttman/core/internals.py | internals.py | py | 4,999 | python | en | code | 7 | github-code | 1 | [
{
"api_name": "warnings.warn",
"line_number": 33,
"usage_type": "call"
},
{
"api_name": "inspect.ismodule",
"line_number": 64,
"usage_type": "call"
},
{
"api_name": "inspect.isfunction",
"line_number": 65,
"usage_type": "call"
},
{
"api_name": "pyttman.core.contai... |
11548748606 | from flask import Blueprint, views, current_app, session, request, redirect, render_template, jsonify
from Formclasses.admin_classes import DiaryandAdminPost, AinforClass, UserManage, EmployeeinforClass, PostClass
import datetime
from .Manager import toNormal
import re
import random
import os
Adminbp = Blueprint("admin", __name__)
class AdminIndex(views.View): # 设计为root用户,权限最高可随意查看及修改各教师及学生的信息,且阻止所有教师和学生的访问
'''
处理逻辑:
首先判断session中是否存在user,即是否为可以登陆的用户
然后判断查询字符串中的xh是否与当前登陆的学号一致,查询字符串中没有内容则报告用户不存在
最后用学号取出权限若为0则代表为管理员可进入,否则代表是普通用户更改路由试图越权
总之,先session(必须登录先),接着xh必须存在且和session['user']必须相等,最后xh的权限必须为0
'''
def __init__(self):
self.DB = current_app.config["DATABASE"].connection()
self.Cursor = self.DB.cursor()
def check_identity(self):
if request.args.get("xh"): # 查询字符串中取学号
xh = request.args.get("xh")
if xh == session["user"]: # 判断和当前登陆用户是否一致
self.Cursor.execute("select identity from admin_ where xh = '%s'" % (xh))
xh_identity = self.Cursor.fetchone()[0]
if xh_identity == 0: # 权限绝对不会查询不到,且权限为管理员才允许进入
return 1
else:
return "/errormsg/1"
else:
return "/errormsg/1"
else: # 取不到学号
return "/errormsg/2"
def dispatch_request(self):
if session.get("user"): # 来这必须session必须有user,必须先登陆于是牵扯到登陆者的权限问题
if request.method == "GET":
check_result = self.check_identity()
if isinstance(self.check_identity(), str):
return redirect(check_result)
else:
self.Cursor.execute("select head, postdate, xh from admin_post order by postdate desc")
result = self.Cursor.fetchall()
admin_posts = list()
for i in result:
admin_posts.append(
{"head": i[0], "time": toNormal(i[1], "second"), "xh": i[2]}) # 获取系统公告的标题和发布时间及发布人
today = datetime.date.today() # 获取今天日期
user_date = session["user"] + str(today.year) + "#" + str(today.month) + "#" + str(today.day)
self.Cursor.execute("select txtpath from users_diary where xh = :s", [session["user"]])
if (user_date + ".txt",) in self.Cursor.fetchall(): # 返回本人当天日记内容前先找找有没有这个文件
with open(current_app.config["DOWNLOADPATHA"] + "\\" + toNormal(user_date, "first") + ".txt", "r",
encoding="utf-8") as file:
content = file.read()
else: # 找不到就返回空给content
content = ""
dpform = DiaryandAdminPost((content, admin_posts)) # 该对象包括公告信息
self.Cursor.execute("select xh, xm, sex, dept, major, job, comedate, image from employee_infor where xh = :s",[session["user"]])
infor = self.Cursor.fetchone()
sinfor = AinforClass(infor) # 造studentinformation对象,包含用户部分基本信息
return render_template("index.html", name=session["user"], identity="admin", proxy="", dpform=dpform, basic_infor=sinfor)
else:
return redirect("/errormsg/4")
else:
return redirect("/errormsg/1")
def __del__(self):
self.Cursor.close()
self.DB.close()
class AdminSystem(views.View):
def __init__(self):
self.DB = current_app.config["DATABASE"].connection()
self.Cursor = self.DB.cursor()
self.get = AdminUsers()
self.Form = PostClass()
def toSpecial(self, string):
newstring = ""
strlist = string.split(".")
if len(strlist[0]) == 1:
newstring += "0"
newstring += strlist[0]
else:
newstring += strlist[0]
newstring += "#"
if len(strlist[1]) == 1:
newstring += "0"
newstring += strlist[1]
else:
newstring += strlist[1]
newstring += "#"
if len(strlist[2]) == 1:
newstring += "0"
newstring += strlist[2]
else:
newstring += strlist[2]
return newstring
def dispatch_request(self):
if session.get("user"):
check_result = self.get.noquery_check_identity()
if isinstance(check_result, str):
return redirect(check_result)
else:
self.Cursor.execute("select xh, head, postdate from admin_post")
result = self.Cursor.fetchall()
posts = list() # 所有公告,结构:[[],[],[]...]
for i in result:
string = ""
adminpost = list()
adminpost.append(i[0])
adminpost.append(i[1])
adminpost.append("系统公告")
adminpost.append(toNormal(i[2], "third"))
for i in toNormal(i[2], "third").split("."):
string += i
adminpost.append(string)
posts.append(adminpost)
# self.Cursor.execute("select xh, head, to_char(kh, '000000') from teacher_post")
# result = self.Cursor.fetchall()
# for i in result:
# teacherpost = list()
# teacherpost.append(i[0])
# teacherpost.append(i[1])
# teacherpost.append("课程公告")
# teacherpost.append(i[2])
# posts.append(teacherpost)
self.Cursor.execute(
"select admin_.xh, identity from admin_, modify_infor_permission where admin_.xh = modify_infor_permission.xh and posted = 1")
xh_identity = self.Cursor.fetchall()
xh_identity_xm = list() # 所有修改信息申请,结构:[[],[],[]...]
for i in xh_identity: # 通过identity决定查哪个表的xm
temp = list()
temp.append(i[0])
if i[1] == 1:
temp.append("学生")
self.Cursor.execute("select xm from student_infor where xh = :s", [i[0]])
else:
temp.append("教师")
self.Cursor.execute("select xm from employee_infor where xh = :s", [i[0]])
temp.append(self.Cursor.fetchone()[0])
xh_identity_xm.append(temp)
if request.method == "GET":
return render_template("admin_setting.html", identity="admin", name=session["user"], proxy="", posts=posts,
applies=xh_identity_xm, postform=self.Form)
else: # post,首先要清楚主键为学号、发表时间二者复合主键
if self.Form.validate_on_submit():
xh = self.Form.Xh.data
head = self.Form.Head.data
date = self.toSpecial(self.Form.Date.data) # 将.分隔转化为#分隔
content = self.Form.Postcontent.data
print(self.Form.data)
print(xh, head, date, content)
self.Cursor.execute("select identity from admin_ where xh = :s", [self.Form.Xh.data])
i = self.Cursor.fetchone()
if i:
if i[0] == 0: # 系统公告
filename = xh + date + ".txt" # 每个用户的文章以学号标题日期作为文件名
self.Cursor.execute("select * from admin_post where xh = :s and postdate = :s", [xh, date]) # 用户在该天写了公告
if self.Cursor.fetchone(): # 查到就只需要更改文章内容即可
with open(current_app.config["DOWNLOADPATHP"] + "/" + filename, "w", encoding="utf-8") as file:
file.write(content)
self.Cursor.execute("update admin_post set head = :s where xh = :s and postdate = :s", [head, xh, date])
self.DB.commit()
else: # 否则没查到就得添加一篇文章
with open(current_app.config["DOWNLOADPATHP"] + "/" + filename, "w", encoding="utf-8") as file:
file.write(content)
self.Cursor.execute("insert into admin_post values (:s, :s, :s, :s)", [xh, head, date, filename])
self.DB.commit()
return redirect("/admin/system")
else:
return render_template("admin_setting.html", identity="admin", name=session["user"], proxy="",
posts=posts,
applies=xh_identity_xm, postform=self.Form,
postmsg="您只可查看教师的课程公告")
else:
return render_template("admin_setting.html", identity="admin", name=session["user"], proxy="",
posts=posts,
applies=xh_identity_xm, postform=self.Form,
postmsg="输入表单有误,学号不存在")
else:
return render_template("admin_setting.html", identity="admin", name=session["user"], proxy="", posts=posts,
applies=xh_identity_xm, postform=self.Form, postmsg="请按表单要求格式输出")
else:
return redirect("/errormsg/1")
def __del__(self):
self.Cursor.close()
self.DB.close()
class AdminUsers(views.View): # 分为一个get和一个ajax post
def __init__(self):
self.DB = current_app.config["DATABASE"].connection()
self.Cursor = self.DB.cursor()
def noquery_check_identity(self):
self.Cursor.execute("select xh from admin_ where identity = 0")
if (session["user"],) in self.Cursor.fetchall():
return 1
else:
return "/errormsg/1"
def dispatch_request(self):
if session.get("user"):
check_result = self.noquery_check_identity()
if isinstance(check_result, str):
return redirect(check_result)
else:
if request.method == "GET":
self.Cursor.execute("select xh, pwd from admin_ where identity = 1")
stu_xh_pwd = self.Cursor.fetchall()
self.Cursor.execute("select xh, pwd from admin_ where identity = 2")
tea_xh_pwd = self.Cursor.fetchall()
studentmanage = list()
teachermanage = list()
for i in range(len(stu_xh_pwd)):
xh = stu_xh_pwd[i][0]
pwd = stu_xh_pwd[i][1]
self.Cursor.execute("select permission from modify_infor_permission where xh = :s", [xh])
permission = self.Cursor.fetchone()[0]
studentmanage.append(UserManage((xh, pwd, permission)))
for j in range(len(tea_xh_pwd)):
xh = tea_xh_pwd[j][0]
pwd = tea_xh_pwd[j][1]
self.Cursor.execute("select permission from modify_infor_permission where xh = :s", [xh])
permission = self.Cursor.fetchone()[0]
teachermanage.append(UserManage((xh, pwd, permission)))
return render_template("users.html", identity="admin", proxy="", name=session["user"], students=studentmanage, teachers=teachermanage)
else:
xhs = request.form.getlist("xhs[]")
pwds = request.form.getlist("pwds[]")
permissions = request.form.getlist("permissions[]")
types = request.form.getlist("types[]")
for i in pwds:
if len(i) < 4 or len(i) > 18:
return "添加失败!密码长度有误"
# try:
for i in range(len(xhs)):
default_content = (xhs[i], "未填写", "未填写", "未填写", "未填写(中国大陆居民身份证)", "未填写(年.月.日)",
"未填写", "未填写", "未填写(班号)", "未填写", "未填写(邮箱格式)", "未填写(学生类型)", "未填写(入学时间)",
0, "未填写", "未填写", "default.jpg")
default_content1 = [xhs[i], "未填写", "未填写", "未填写", "未填写(可选)", "未填写", "未填写(中国大陆居民身份证)",
"未填写", "未填写", "未填写", "未填写", "未填写(邮箱格式)", "未填写(入职时间)", "未填写", "default.jpg"]
try:
self.Cursor.execute("insert into admin_ values (:s, :s, :s)", [xhs[i], pwds[i], 1 if types[i]=="s" else 2])
self.Cursor.execute("insert into modify_infor_permission values (:s, :s, :s, 0)", [xhs[i], session["user"], permissions[i]])
if types[i] == "s":
self.Cursor.execute("insert into student_infor values (:s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s)", default_content)
else:
self.Cursor.execute("insert into employee_infor values (:s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s, :s)", default_content1)
self.DB.commit()
except:
self.DB.rollback()
return "添加失败!账号重复"
else:
return "添加成功!"
else:
return "添加失败!权限不足"
def __del__(self):
self.Cursor.close()
self.DB.close()
class AdminInfor(views.View):
def __init__(self):
self.DB = current_app.config["DATABASE"].connection()
self.Cursor = self.DB.cursor()
self.get = AdminIndex()
self.Form = EmployeeinforClass()
self.all_fields = ['xh', 'xm', 'mz', 'sex', 'marry', 'society', 'id', 'major',
'dept', 'job', 'phone', 'mail', 'comedate', 'address']
def copy_form(self):
self.Cursor.execute("select * from employee_infor where xh = :s", [request.args.get("xh")])
infor_tuple = self.Cursor.fetchone()
self.Form.Xh.data = infor_tuple[0]
self.Form.Xm.data = infor_tuple[1]
self.Form.Mz.data = infor_tuple[2]
self.Form.Sex.data = infor_tuple[3]
self.Form.Marry.data = infor_tuple[4]
self.Form.Society.data = infor_tuple[5]
self.Form.Id.data = infor_tuple[6]
self.Form.Major.data = infor_tuple[7]
self.Form.Dept.data = infor_tuple[8]
self.Form.Job.data = infor_tuple[9]
self.Form.Phone.data = infor_tuple[10]
self.Form.Mail.data = infor_tuple[11]
self.Form.Comedate.data = infor_tuple[12]
self.Form.Address.data = infor_tuple[13]
self.avatarname = infor_tuple[14]
def get_postsql(self, type):
if self.Form.Avatar.data.filename: # 上传头像文件
RE = ".*(\..*)$"
end = re.findall(RE, self.Form.Avatar.data.filename)[0]
randomstring = ''.join(
random.sample('zyxwvutsrqponmlkjihgfedcba%ZAQWESDFYRUIOOMJ', 10)
) # 在上述字符中随机10个作为文件名以防浏览器缓存的直接调取
filename = randomstring + end
self.Cursor.execute("select image from employee_infor where xh = :s", [request.args.get("xh")]) # 查询原文件名
img = self.Cursor.fetchone()[0]
if type == "admin":
path = "DOWNLOADPATHA"
else:
path = "DOWNLOADPATHT"
if img != "default.jpg":
os.remove(os.path.join(current_app.config["DOWNLOADPATHA"], img)) # 删除原文件
self.Form.Avatar.data.save(os.path.join(current_app.config[path], filename)) # 存取新文件
else:
self.Form.Avatar.data.save(os.path.join(current_app.config[path], filename))
# 带有文件修改的sql
sql = "update employee_infor set xm='%s',mz='%s',sex='%s',marry='%s',society='%s',id='%s',major='%s',dept='%s',phonenumber='%s',mail='%s',comedate='%s'," \
"address='%s',image='%s' where xh = '%s'" % (
self.Form.Xm.data, self.Form.Mz.data, self.Form.Sex.data, self.Form.Marry.data,
self.Form.Society.data, self.Form.Id.data, self.Form.Major.data, self.Form.Dept.data,
self.Form.Phone.data, self.Form.Mail.data, self.Form.Comedate.data, self.Form.Address.data,
filename, request.args.get("xh"))
else:
sql = "update employee_infor set xm='%s',mz='%s',sex='%s',marry='%s',society='%s',id='%s',major='%s',dept='%s',phonenumber='%s',mail='%s',comedate='%s'," \
"address='%s' where xh = '%s'" % (
self.Form.Xm.data, self.Form.Mz.data, self.Form.Sex.data, self.Form.Marry.data,
self.Form.Society.data, self.Form.Id.data, self.Form.Major.data, self.Form.Dept.data,
self.Form.Phone.data, self.Form.Mail.data, self.Form.Comedate.data, self.Form.Address.data,
request.args.get("xh"))
return sql
def dispatch_request(self):
if session.get("user"):
check_result = self.get.check_identity()
if isinstance(check_result, str):
return redirect(check_result)
else:
if request.method == "GET":
self.copy_form()
return render_template("infor.html", form=self.Form, identity="admin", name=session["user"], realname=self.Form.Xm.data, toNone=False,
proxy="", modify=1, first="", all_fields=self.all_fields, avatar=self.avatarname)
else:
if self.Form.validate_on_submit() and self.Form.Xh.data == "":
self.Cursor.execute("select xh, xm from employee_infor where xh = :s",
[request.args.get("xh")])
xh_xm = self.Cursor.fetchone()
try:
sql = self.get_postsql("admin")
self.Cursor.execute(sql)
self.DB.commit()
except:
self.Cursor.execute("select image from employee_infor where xh = :s",
[request.args.get("xh")])
self.avatarname = self.Cursor.fetchone()[0]
self.DB.rollback()
return render_template("infor.html", form=self.Form, identity="admin", name=session["user"], realname=self.Form.Xm.data, toNone=False,
proxy="", modify=1, first="", all_fields=self.all_fields, avatar=self.avatarname, errormsg="修改失败,请稍后重试")
else:
return redirect("/admin/information?xh=" + session["user"])
else: # 验证器报错或者用户修改了学号
self.Cursor.execute("select image from employee_infor where xh = :s",
[request.args.get("xh")])
self.avatarname = self.Cursor.fetchone()[0] # 用户修改内容但未修改图片时的情况,需要将图片返还
for i in list(self.Form.errors.keys()):
self.Form[i].data = ""
return render_template("infor.html", form=self.Form, identity="admin", name=session["user"], realname=self.Form.Xm.data, toNone=False,
proxy="", modify=1, first="", all_fields=self.all_fields, avatar=self.avatarname, errormsg="学号不可更改且请按表单要求输入")
else:
return redirect("/errormsg/1")
def __del__(self):
self.Cursor.close()
self.DB.close()
@Adminbp.route("/admin/<type>/user/<xh>", methods=["POST"])
def ManageUser(type, xh):
if session.get('user'):
check_result = AdminUsers().noquery_check_identity()
if isinstance(check_result, str):
return redirect(check_result)
else:
if request.method == "POST":
if xh:
DB = current_app.config["DATABASE"].connection()
cursor = DB.cursor()
if type == "delete":
try:
cursor.execute("delete from admin_ where xh = :s", [xh])
DB.commit()
except:
DB.rollback()
return "输入的学号不正确"
else:
return "删除成功"
elif type == "modify":
pwd = request.form.get("pwd")
checked = request.form.get("checked")
try:
cursor.execute("update admin_ set pwd = :s where xh = :s", [pwd, xh])
cursor.execute("update modify_infor_permission set permission = :s where xh = :s", [checked, xh])
DB.commit()
except:
DB.rollback()
return "输入的学号不正确"
else:
return "修改成功"
else:
return redirect("/errormsg/3")
else:
return redirect("/errormsg/3")
else:
return redirect("/errormsg/4")
else:
return "权限不足"
@Adminbp.route("/managerequest/<type>/<xh>", methods=["GET"])
def Manageinforrequest(type, xh):
if session.get('user'):
check_result = AdminUsers().noquery_check_identity()
if isinstance(check_result, str):
return redirect(check_result)
else:
if request.method == "GET":
print("jajaaj")
DB = current_app.config["DATABASE"].connection()
cursor = DB.cursor()
if type == "agree":
try:
cursor.execute("update modify_infor_permission set permission = 1, posted = 0 where xh = :s", [xh])
DB.commit()
except:
DB.rollback()
return "允许申请操作失败"
else:
return "允许申请操作成功"
else:
try:
cursor.execute("update modify_infor_permission set posted = 0 where xh = :s", [xh])
DB.commit()
except:
DB.rollback()
return "删除申请操作失败"
else:
return "删除申请操作成功"
else:
return redirect("/errormsg/4")
else:
return "权限不足"
@Adminbp.route("/admin/<type>/post", methods=["POST"])
def ManagePost(type):
if session.get("user"):
check_result = AdminUsers().noquery_check_identity()
if isinstance(check_result, str):
return redirect(check_result)
else:
DB = current_app.config["DATABASE"].connection()
cursor = DB.cursor()
xh = request.form["xh"]
time = request.form["time"]
date = ""
timelist = time.split(".")
date += timelist[0]
date += "#"
if len(timelist[1]) == 1:
date += "0"
date += timelist[1]
else:
date += timelist[1]
date += "#"
if len(timelist[2]) == 1:
date += "0"
date += timelist[2]
else:
date += timelist[2]
if type == "delete":
try:
cursor.execute("select txtpath from admin_post where xh = :s and postdate = :s", [xh, date])
os.remove(current_app.config["DOWNLOADPATHP"] + '/' + cursor.fetchone()[0])
cursor.execute("delete from admin_post where xh = :s and postdate = :s", [xh, date])
DB.commit()
except:
DB.rollback()
return "删除失败"
else:
return "删除成功"
elif type == "get":
try:
cursor.execute("select txtpath from admin_post where xh = :s and postdate = :s", [xh, date])
txtpath = cursor.fetchone()
normaldate = toNormal(date, "third") # 从以#分隔变为以点分隔
try:
with open(current_app.config["DOWNLOADPATHP"] + "//" + txtpath[0], "r",
encoding="utf-8") as file:
content = file.read()
except:
content = ""
return jsonify({"msg": "success", "content": content, "date": normaldate})
except:
return jsonify({"msg": "failed"})
else:
return redirect("/errormsg/1")
@Adminbp.route("/all/course", methods=["POST", "GET"])
def AllCourse():
if session.get("user"):
check_result = AdminUsers().noquery_check_identity()
if isinstance(check_result, str):
return redirect(check_result)
else:
DB = current_app.config["DATABASE"].connection()
cursor = DB.cursor()
if request.method == "GET":
xm = cursor.execute("select xm from employee_infor where xh = :s", [session["user"]]).fetchone()[0]
courses_infor = cursor.execute("select kh, kname, kcredit, kamount, testtype, kteacher from course order by kteacher").fetchall()
return render_template("course.html", proxy="", identity="admin", name=session["user"], realname=xm, courses=courses_infor)
else:
try:
cursor.execute("delete from sc where kh = :s", [request.form.get("kh")])
cursor.execute("delete from course where kh = :s", [request.form.get("kh")])
DB.commit()
except:
DB.rollback()
return "修改失败"
else:
return "修改成功"
else:
return redirect("/errormsg/1") | spidereyes/Oracle- | Blueprints/Admin.py | Admin.py | py | 28,518 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "flask.Blueprint",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.views.View",
"line_number": 12,
"usage_type": "attribute"
},
{
"api_name": "flask.views",
"line_number": 12,
"usage_type": "name"
},
{
"api_name": "flask.current_app.co... |
28558110791 | from django.http import HttpResponse
import json
from .models import MailingList, MailingListGroup
def listinfo(request):
resp = HttpResponse(content_type='application/json')
groupdata = [{
'id': g.id,
'name': g.groupname,
'sort': g.sortkey,
} for g in MailingListGroup.objects.all()]
listdata = [{
'id': l.id,
'name': l.listname,
'groupid': l.group_id,
'active': l.active,
'shortdesc': l.shortdesc,
'description': l.description,
} for l in MailingList.objects.all()]
json.dump({'groups': groupdata, 'lists': listdata}, resp)
return resp
| postgres/pgweb | pgweb/lists/views.py | views.py | py | 642 | python | en | code | 66 | github-code | 1 | [
{
"api_name": "django.http.HttpResponse",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "models.MailingListGroup.objects.all",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "models.MailingListGroup.objects",
"line_number": 14,
"usage_type": "attribute... |
22239787685 | import numpy as np
import codecs
import sys
import logging
from nns import RecurrentNN
from utils import Timer
from dataproviders import TwitterApiDataProvider, TwitterTrainingDataProvider, DatasetBuilder
logging.basicConfig(stream=sys.stderr)
logger = logging.getLogger("root")
logger.setLevel(logging.DEBUG)
timer = Timer()
timer.checkin("start_getting_training_data")
dataset_builder = DatasetBuilder(
use_pos_tag=True,
use_single_words=True,
use_lexicon_features=True)
data_provider = TwitterTrainingDataProvider(amount=1.0)
texts, labels = data_provider.read()
features_list, features, labels_list, labels = dataset_builder.build_labeled_dataset(texts, labels)
timer.checkin("end_getting_training_data")
config = {
"accuracy_threshold": 0.94,
"n_input": len(features_list),
"n_steps": 1,
"n_layers": 1,
"n_hidden": 150,
"n_classes": len(labels_list)
}
nn = RecurrentNN(config)
nn.learn(features, labels)
timer.checkin("end_training")
logger.info("-"*50)
logger.info("ML :" + timer.diff("end_getting_training_data", "end_training"))
logger.info("Total :" + timer.diff("start_getting_training_data", "end_training"))
timer.checkin("end_training")
logger.info("-"*50)
logger.info("do some testing predictions")
api_tweet_provider = TwitterApiDataProvider(
consumer_key='mUnZ9yDNN2pRwzyqzrkwjQ',
consumer_secret='Ow9pJWZNzmg4TX1zrLxfQFnvBFpBi8CydxeQ3Xu6uM',
access_token='238066404-NDqnqYLV7rNO8QKTRw0hWUxiHqKHa4LyZp5ViKUT',
access_secret='OEjieKwOLdXwSpss1DmNzLyucBfre3oWuKK1JNdD5wwC9')
tweets = api_tweet_provider.read("dreamhost")
features_list, features_matrix, unclassifiable, labels_list = (
dataset_builder.build_dataset(features_list[:-2], tweets))
if len(features_matrix):
prediction = nn.predict(features_matrix)
with codecs.open("./predictions/prediction.csv", "w", "utf-8") as file:
file.write('tweets')
for label in labels_list:
file.write('\t%s' % label)
file.write('\n')
for idx in range(0, len(prediction)):
file.write(tweets[idx].encode('ascii', 'ignore'))
for value in prediction[idx]:
file.write('\t{:.6f}'.format(value))
file.write('\n')
| mishadev/stuff | tensorflow_tests/scripts/main.py | main.py | py | 2,247 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "sys.stderr",
"line_number": 8,
"usage_type": "attribute"
},
{
"api_name": "logging.getLogger",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
... |
264626137 | from django.urls import path
from . import views
urlpatterns = [
path('school_list/', views.schList, name='schList'), #院校列表展示
path('edit_sch/', views.editSch, name='editSch'), #编辑院校信息
path('update_sch/', views.updateSch, name='updateSch'),
path('detail_sch/', views.detailSch, name='detailSch'), #查看院校信息详情
path('delete_sch/', views.delSch, name='delSch'), #删除院校信息
path('add_sch/', views.addSch, name='addSch'), #添加新院校,需要优化填写判断输入
path('addss_sch/', views.addssSch, name='addssSch'),
path('school_type/', views.schType, name='schType'), #院校类型展示
path('edit_type/', views.editType, name='editType'), #编辑院校类型
path('update_type/', views.updateType, name='updateType'),
path('delete_type/', views.delType, name='delType'), #删除院校类型
path('add_schtype/', views.addSchtype, name='addSchtype'), #添加新院校类型,需要优化填写判断输入
path('addss_schtype/', views.addssSchtype, name='addssSchtype'),
path('school_fea/', views.schFea, name='schFea'), #院校特征展示
path('edit_fea/', views.editFea, name='editFea'), #编辑院校特征
path('update_fea/', views.updateFea, name='updateFea'),
path('delete_fea/', views.delFea, name='delFea'), #删除院校特征
path('add_schfea/', views.addSchfea, name='addSchfea'), #添加新院校特征,需要优化填写判断输入
path('addss_schfea/', views.addssSchfea, name='addssSchfea'),
path('deld_sch/', views.deldSch, name='deldSch'), #已删除院校展示
path('sch_sch/', views.schSch, name='schSch'), #还原已删除院校
path('deletes_sch/', views.delesSch, name='delesSch'),#永久删除院校信息,数据库数据删除
path('deld_schtype/', views.deldSchtype, name='deldSchtype'), #已删除院校类型展示
path('type_type/', views.typeType, name='typeType'), #还原已删除院校类型
path('deletes_type/', views.delesType, name='delesType'), #永久删除院校类型
path('deld_schfea/', views.deldSchfea, name='deldSchfea'), #已删除院校特征展示
path('fea_fea/', views.feaFea, name='feaFea'), #还原已删除院校特征
path('deletes_fea/', views.delesFea, name='delesFea'), #永久删除院校特征
] | zhouf1234/django_obj | school/urls.py | urls.py | py | 2,415 | python | kn | code | 0 | github-code | 1 | [
{
"api_name": "django.urls.path",
"line_number": 5,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "django.urls.path",
... |
33810970716 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 9 17:19:35 2020
Class Message
- Access to elements of a message dict in ReDial
- Methods:
-
@author: nicholas
"""
import json
from nltk.tokenize import word_tokenize
from nltk.stem.porter import PorterStemmer
import re
import Settings as Settings
#####################
### TEXT MANAGEMENT
#####################
# TO EXTRACT Movies Mentions with @
re_filmId = re.compile('@[0-9]{5,6}')
# Knowledge Base of movies (from IMDB)
with open('/Users/nicholas/ReDial/Data/PreProcessed/KB_IMDB_movies.json', 'r') as fp:
KB = json.load(fp)
# Functions called by .sub
# A re method to substitue matching pattern in a string
# Here, substitute film ID with film NL title + str(list on genre)
def filmIdtoTitle(match):
filmId = match.group()[1:] # Remove @
return KB[filmId]['title']
def filmIdtoTitleAndGenres(match):
filmId = match.group()[1:] # Remove @
title = KB[filmId]['title']
list_genres = KB[filmId]['genres']
return title + " (" + " ".join(list_genres) + ")"
#####################
### CLASS MESSAGE
#####################
class Message:
def __init__(self, message_dict, seeker_id):
"""
Initializing a Message from a ReDial message dict
(which are in a list of messages in a Conversation)
Parameters
----------
message_dict : TYPE: dict
FORMAT: {"messageId": int, "text": str,
"timeOffset": int, "senderWorkerId": int}
DESCRIPTION: A message dict from ReDial
seeker_id : TYPE: int
DESCRIPTION: Id of the seeker in the conversation
Returns
-------
None.
"""
self.speaker_id = message_dict['senderWorkerId']
self.role = 'S::' if seeker_id == self.speaker_id else 'R::'
self.text_raw = message_dict['text']
def GetGenres(self):
"""
Takes a Message object and returns genres mentioned in the text of that Message
Returns
-------
l_genres : TYPE: list of genres
FORMAT: [str]
DESCRIPTION: List of genres in the text of that Message
"""
l_genres = []
# Lemmatize genres
genres_lem = [PorterStemmer().stem(word) for word in Settings.genres]
# Spit str by words after lowering case
text_token = word_tokenize(self.text_raw.lower())
# Lemmatize text
text_token_lem = [PorterStemmer().stem(word) for word in text_token]
# Go through all genres lemmatize
for i, g in enumerate(genres_lem):
# If it's found in the text
if g in text_token_lem:
# Return the original genres
l_genres.append(Settings.genres[i])
return l_genres
def GetMovies(self):
"""
Takes a Message object and returns movies ReD_id mentioned in the text of that Message
Returns
-------
l_movies : TYPE: list of movies by ReD_id
FORMAT: [str]
DESCRIPTION: List of movies ReD_id in the text of that Message
"""
l_movies = []
# Use 'regular expressions'(re) to extract movie mentions
l_movies = re_filmId.findall(self.text_raw)
# Remmove '@'at begining and return as str
l_movies = [m[1:] for m in l_movies]
return l_movies
def TextNL(self):
# Use 'regular expressions'(re) on a ReDial text
# to change movie mention in ReD_id to Natural Language (NL) tile
return re_filmId.sub(filmIdtoTitle, self.text_raw)
def TextNLGenres(self):
# Use 'regular expressions'(re) on a ReDial text
# to change movie mention in ReD_id to Natural Language (NL) tile
return re_filmId.sub(filmIdtoTitleAndGenres, self.text_raw)
| Vachonni/ReDial | Objects/Message.py | Message.py | py | 4,340 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "re.compile",
"line_number": 36,
"usage_type": "call"
},
{
"api_name": "json.load",
"line_number": 40,
"usage_type": "call"
},
{
"api_name": "nltk.stem.porter.PorterStemmer",
"line_number": 114,
"usage_type": "call"
},
{
"api_name": "Settings.genres"... |
73506504353 | from django.shortcuts import redirect, render
from django.http import HttpResponse
from apps.Habitacion.models import Habitacion
from apps.Habitacion.form import HabitacionForm
# Create your views here.
def home(request):
return render(request, 'base/base.html')
def index(request):
habitacion = Habitacion.objects.all().order_by('-id')
context = {'habitaciones': habitacion}
return render(request, 'Habitacion/index.html', context)
# return render(request,'Agencia_de_viaje/index.html')
# /////////////////////////////////////Codigo de Crear - mostrar///////////////////////////
def habitacionCreate(request):
if (request.method == 'POST'):
form = HabitacionForm(request.POST)
if form.is_valid():
form.save()
return redirect('Habitaciones:index')
else:
form = HabitacionForm()
return render(request, 'Habitacion/formHabitacion.html', {'form': form})
# /////////////////////////////////////Codigo de actualizar///////////////////////////
def habitacionEdit(request,id_habitacion):
habitacion = Habitacion.objects.get(pk=id_habitacion)
if request.method == 'GET':
form = HabitacionForm(instance=habitacion)
else:
form = HabitacionForm(request.POST, instance=habitacion)
if form.is_valid():
form.save()
return redirect('Habitaciones:index')
return render(request, 'Habitacion/formHabitacion.html', {'form': form})
# /////////////////////////////////////Codigo de Eliminar///////////////////////////
def habitacionEliminar(request,id_habitacion):
habitacion = Habitacion.objects.get(pk=id_habitacion)
if request.method == 'POST':
habitacion.delete()
return redirect('habitaciones:index')
return render(request, 'Habitacion/habitacionEliminar.html', {'Habitacion': habitacion})
| Daniel-Vega-Rojas/Actividad08_base | apps/Habitacion/views.py | views.py | py | 1,899 | python | es | code | 1 | github-code | 1 | [
{
"api_name": "django.shortcuts.render",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "apps.Habitacion.models.Habitacion.objects.all",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "apps.Habitacion.models.Habitacion.objects",
"line_number": 16,
"usa... |
9670033772 | from ab5 import hgratient
from typing import Optional
import colorama
import sys
from pystyle import Center, Colorate, Colors, Write
import tls_client
import os
def setTitle(title: Optional[any] = None):
os.system("title "+title)
setTitle("BitBoost V2 | Server Booster")
def clear():
if sys.platform in ["linux", "linux2", "darwin"]:
os.system("clear")
else:
os.system("cls")
clear()
sub_ids = []
logo = ("""__________.__ __ __________ __
\______ \__|/ |\______ \ ____ ____ _______/ |_
| | _/ \ __\ | _// _ \ / _ \/ ___/\ __\
| | \ || | | | ( <_> | <_> )___ \ | |
|______ /__||__| |______ /\____/ \____/____ > |__|
\/ \/ \/ """)
banner = ("""Please make sure that all your tokens are already in the server you want to boost.\n""")
print(hgratient(logo, [0, 223, 50], [0, 25, 222]))
print(banner)
__guild_id__ = Write.Input("Guild ID: ", Colors.blue_to_green, interval=0.05)
colorama.init(convert=True)
class Nitro:
def __init__(self, token: str):
self.token = token
self.headers = {
"accept": "*/*",
"accept-encoding": "gzip, deflate, br",
"accept-language": "en-US",
"authorization": token,
"referer": "https://discord.com/channels/@me",
"sec-fetch-dest": "empty",
"sec-fetch-mode": "cors",
"sec-fetch-site": "same-origin",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) discord/1.0.9007 Chrome/91.0.4472.164 Electron/13.6.6 Safari/537.36",
"x-debug-options": "bugReporterEnabled",
"x-discord-locale": "en-US",
"x-super-properties": "eyJvcyI6IldpbmRvd3MiLCJicm93c2VyIjoiRGlzY29yZCBDbGllbnQiLCJyZWxlYXNlX2NoYW5uZWwiOiJzdGFibGUiLCJjbGllbnRfdmVyc2lvbiI6IjEuMC45MDA3Iiwib3NfdmVyc2lvbiI6IjEwLjAuMTkwNDMiLCJvc19hcmNoIjoieDY0Iiwic3lzdGVtX2xvY2FsZSI6ImVuLVVTIiwiY2xpZW50X2J1aWxkX251bWJlciI6MTYxODQyLCJjbGllbnRfZXZlbnRfc291cmNlIjpudWxsfQ=="
}
self.session = tls_client.Session(client_identifier="chrome_107")
self.sub_ids = []
def removeTokenFromTxt(self):
with open("tokens.txt", "r") as f:
lines = f.readlines()
with open("tokens.txt", "w") as f:
for line in lines:
if line.strip("\n") != self.token:
f.write(line)
def hasNitro(self):
sex = self.session.get(
"https://discord.com/api/v9/users/@me/guilds/premium/subscription-slots",
headers=self.headers,
)
if sex.status_code in [403, 401]:
return self._extracted_from_hasNitro_7('Token is invalid, removing.')
try:
for sub in sex.json():
self.sub_ids.append(sub["id"])
except Exception as e:
print(e)
print(sex.text)
if len(self.sub_ids) == 0:
return self._extracted_from_hasNitro_7('Token has no nitro, removing.')
log(f"{colorama.Fore.GREEN}Token has nitro.")
return True
# TODO Rename this here and in `hasNitro`
def _extracted_from_hasNitro_7(self, arg0):
log(f"{colorama.Fore.RED}{arg0}")
self.removeTokenFromTxt()
return False
def boostServer(self, guildID):
for i in range(len(self.sub_ids)):
self.headers["Content-Type"] = "application/json"
r = self.session.put(
url=f"https://discord.com/api/v9/guilds/{guildID}/premium/subscriptions",
headers=self.headers,
json={
"user_premium_guild_subscription_slot_ids": [f"{self.sub_ids[i]}"]
},
)
if r.status_code == 201:
log(
f"{colorama.Fore.GREEN}Boosted {i + 1} of {len(sub_ids)} from {self.token[25:]}"
)
elif r.status_code == 400:
log(
f"{colorama.Fore.YELLOW}Boost already used {i + 1} of {len(sub_ids)} from {self.token[25:]}"
)
else:
log(f"{colorama.Fore.RED}ERROR: {r.status_code}")
def log(text):
print(f"{text}{colorama.Fore.RESET}")
def main():
with open("tokens.txt", "r") as f:
tokens = f.read().splitlines()
for token in tokens:
nitro = Nitro(token)
if nitro.hasNitro():
nitro.boostServer(__guild_id__)
if __name__ == "__main__":
main()
input("Press enter to exit.")
| BitStore-dev/BitBoost | BitBoost.py | BitBoost.py | py | 4,595 | python | en | code | 84 | github-code | 1 | [
{
"api_name": "typing.Optional",
"line_number": 10,
"usage_type": "name"
},
{
"api_name": "os.system",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "sys.platform",
"line_number": 18,
"usage_type": "attribute"
},
{
"api_name": "os.system",
"line_num... |
34660031378 | # -*- coding: utf-8 -*-
import logging
import os
import unittest
from linkml.generators.pythongen import PythonGenerator
from linkml_runtime import SchemaView
from linkml_owl.util.loader_wrapper import load_structured_file
from linkml_owl.dumpers.owl_dumper import OWLDumper
from funowl.converters.functional_converter import to_python
from tests import INPUT_DIR, OUTPUT_DIR
"""Test the module can be imported."""
SCHEMA_IN = os.path.join(INPUT_DIR, 'monsters_and_magic.yaml')
DATA_IN = os.path.join(INPUT_DIR, 'monsters_and_magic.data.yaml')
OWL_OUT = os.path.join(OUTPUT_DIR, 'monsters_and_magic.ofn')
EXPECTED = os.path.join(INPUT_DIR, 'monsters_and_magic.expected.ofn')
class TestRolePlayGameExample(unittest.TestCase):
"""Test case using a fantasy RPG example.
Note: the example data here is also an experiment in co-pilot assisted
knowledge base generation; the majority of the content was created by
LLM-autocomplete.
"""
def test_build_rpg(self):
"""
Test creation of an OWL TBox using RPG template.
"""
sv = SchemaView(SCHEMA_IN)
python_module = PythonGenerator(SCHEMA_IN).compile_module()
data = load_structured_file(DATA_IN, schemaview=sv, python_module=python_module)
dumper = OWLDumper()
dumper.schemaview = sv
doc = dumper.to_ontology_document(data, schema=sv.schema)
with open(OWL_OUT, 'w') as stream:
stream.write(str(doc))
doc_rt = to_python(str(doc))
axioms = doc_rt.ontology.axioms
logging.info(f'AXIOMS={len(axioms)}')
assert len(axioms) > 5
# compare with expected output
doc_expected = to_python(str(EXPECTED))
self.assertEquals(len(axioms), len(doc_expected.ontology.axioms))
self.assertCountEqual(axioms, doc_expected.ontology.axioms)
| linkml/linkml-owl | tests/test_examples/test_rpg.py | test_rpg.py | py | 1,849 | python | en | code | 9 | github-code | 1 | [
{
"api_name": "os.path.join",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "tests.INPUT_DIR",
"line_number": 16,
"usage_type": "argument"
},
{
"api_name": "os.path",
"line_number": 16,
"usage_type": "attribute"
},
{
"api_name": "os.path.join",
"lin... |
19209564635 | from tkinter import *
import matplotlib.pyplot as plt
from PIL import ImageFilter,Image
import numpy as np
#import cv2
from IPython import get_ipython
import pyperclip
def input_emnist(st):
#opening the input image to be predicted
im_open = Image.open(st)
im = Image.open(st).convert('LA') #conversion to gray-scale image
width = float(im.size[0])
height = float(im.size[1])
newImage = Image.new('L',(28,28),(255))
if width > height: #check which dimension is bigger
#Width is bigger. Width becomes 20 pixels.
nheight = int(round((28.0/width*height),0)) #resize height according to ratio width
if (nheight == 0): #rare case but minimum is 1 pixel
nheight = 1
# resize and sharpen
img = im.resize((28,nheight), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wtop = int(round(((28 - nheight)/2),0)) #caculate horizontal pozition
newImage.paste(img, (0,wtop)) #paste resized image on white canvas
else:
#Height is bigger. Heigth becomes 20 pixels.
nwidth = int(round((28.0/height*width),0)) #resize width according to ratio height
if (nwidth == 0): #rare case but minimum is 1 pixel
nwidth = 1
# resize and sharpen
img = im.resize((nwidth,28), Image.ANTIALIAS).filter(ImageFilter.SHARPEN)
wleft = int(round(((28 - nwidth)/2),0)) #calculate vertical pozition
newImage.paste(img, (wleft,0)) #paste resize
# # Normalizing image into pixel values
tv = list(newImage.getdata())
tva = [ (255-x)*1.0/255.0 for x in tv]
for i in range(len(tva)):
if tva[i]<=0.45:
tva[i]=0.0
n_image = np.array(tva)
rn_image = n_image.reshape(28,28)
#displaying input image
plt.imshow(im_open)
plt.title("Input Image")
plt.show()
#displaying gray-scale image
plt.imshow(newImage.convert('LA'))
plt.title("Rescaled Image")
plt.show()
#displaying normalized image
plt.imshow(n_image.reshape(28,28))
plt.title("Normalized Image")
plt.show()
# return all the images
return n_image,im_open,newImage
'''
from keras.models import Sequential
#from keras.layers.convolutional import Conv2D
#from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
#from keras.layers.core import Flatten
#from keras.layers.core import Dense
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization
from keras import backend as K
class LeNet:
@staticmethod
def build(width, height, depth, classes):
# initialize the model
model = Sequential()
inputShape = (height, width, depth)
# if we are using "channels first", update the input shape
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
#model=Sequential()
model.add(Conv2D(16, kernel_size=4, input_shape=inputShape, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(32, kernel_size=4, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(2))
model.add(Conv2D(64, kernel_size=4, activation='relu'))
model.add(BatchNormalization())
model.add(Conv2D(128, kernel_size=4, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(3200, activation='tanh'))
model.add(BatchNormalization())
model.add(Dense(47, activation='softmax'))
#model.summary()
return model
'''
from keras.models import Sequential, load_model
from keras.layers import Conv2D, MaxPooling2D, Dropout, Flatten, Dense, BatchNormalization
from keras import backend as K
class LeNet:
@staticmethod
def build(width, height, depth, classes):
# initialize the model
model = Sequential()
inputShape = (height, width, depth)
# if we are using "channels first", update the input shape
if K.image_data_format() == "channels_first":
inputShape = (depth, height, width)
model=Sequential()
model.add(Conv2D(128, kernel_size=3, input_shape=inputShape, activation='relu'))
model.add(BatchNormalization())
model.add(MaxPooling2D(2))
model.add(Conv2D(64, kernel_size=3, activation='relu'))
model.add(BatchNormalization())
model.add(Flatten())
model.add(Dense(256, activation='tanh'))
model.add(Dropout(0.5))
model.add(Dense(classes, activation='softmax'))
return model
import tensorflow as tf
def model_predict(n_image):
model = LeNet.build(width=28, height=28, depth=1, classes=47)
#model.compile(loss="categorical_crossentropy", optimizer=opt, metrics=["accuracy"])
from skimage import transform,io
#gray = io.imread('file name with path', as_gray = True)
model.load_weights('emnist.pt')
arr=n_image.reshape(1,28,28,1)
prediction = model.predict(arr)[0]
#print(prediction)
pred = np.argmax(prediction)
labels_dict ={0:0,1:1,2:2,3:3,4:4,5:5,6:6,7:7,8:8,9:9,10:'A',11:'B',12:'C',13:'D',14:'E',15:'F',16:'G',17:'H',18:'I',19:'J',20:'K',21:'l',22:'M',23:'N',24:'O',25:'P',26:'Q',27:'R',28:'S',29:'T',30:'u',31:'V',32:'W',33:'X',34:'Y',35:'Z',36:'a',37:'b',38:'d',39:'e',40:'f',41:'g',42:'h',43:'n',44:'q',45:'r',46:'t',47:'அ',48:'ஆ',49:'இ',50:'ஈ',51:'உ',52:'ஊ',53:'எ',54:'ஏ',55:'ஐ',56:'ஒ',57:'ஓ',58:'ஔ'}
s = "The predicted character is {}".format(labels_dict[pred])
print(s)
print('pred :', pred)
return s,labels_dict[pred]
st = ""
root = Tk() #tkinter GUI
root.geometry("500x500")
root.winfo_toplevel().title("Handwritten Character Recognition")
label1 = Label( root, text="Enter the name of the file: ")
E1 = Entry(root, bd =5)
def getVal():
global st
st= E1.get()
root.destroy()
submit = Button(root, text ="Submit", command = getVal)
label1.pack()
E1.pack()
submit.pack(side =BOTTOM)
mainloop()
n_image,image,convo_image = input_emnist(st) #call to Function with name of the file as parameter
res,cpy = model_predict(n_image)
pyperclip.copy(str(cpy)) #copy the predicted character to clipboard
root2 = Tk()
root2.winfo_toplevel().title("Handwritten Character Recognition")
root2.geometry("500x500")
label2 = Label(root2,text = res)
label2.config(font=("Courier", 20))
label2.pack()
mainloop()
| sg1498/EMNIST | final_PROJECT_KERAS.py | final_PROJECT_KERAS.py | py | 6,648 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "PIL.Image.open",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number": 11,
"usage_type": "name"
},
{
"api_name": "PIL.Image.open",
"line_number": 12,
"usage_type": "call"
},
{
"api_name": "PIL.Image",
"line_number"... |
11698126912 | from cgi import test
import torch
import dataset
import model
import numpy as np
import os
import argparse
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
parser = argparse.ArgumentParser()
parser.add_argument("--dataset_name", default='PeMSD8', help='Dataset Name', type=str)
args = parser.parse_args()
dataset_name = args.dataset_name
path = './save_pth/' + dataset_name
if not os.path.exists(path):
os.mkdir(path)
train_data, input_dim = dataset.get_mts_train_data(dataset_name)
train_data = train_data.to(device)
ae = model.TSautoencoder(train_data.shape[1], train_data.shape[0]).to(device)
criterion = torch.nn.MSELoss()
optimizer = torch.optim.Adam(ae.parameters(), lr=1e-3)
losses_min = 1e+8
for epoch in range(1000):
losses_train = []
E, mu, log_var, rx = ae(train_data)
mse_loss = criterion(train_data, rx)
kld_loss = -0.5 * torch.sum(1 + log_var - mu.pow(2) - log_var.exp())
loss = mse_loss + kld_loss
optimizer.zero_grad()
loss.backward()
optimizer.step()
mean_loss = loss.item()
if mean_loss < losses_min:
np.save('emb_vae_'+dataset_name+'.npy', E.detach().cpu().numpy())
torch.save(ae.state_dict(),'./save_pth/'+dataset_name+'/ae.pth')
losses_min = mean_loss
| gortwwh/GMTSCLR | pretrain_vae.py | pretrain_vae.py | py | 1,271 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "torch.device",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.cuda.is_available",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "torch.cuda",
"line_number": 9,
"usage_type": "attribute"
},
{
"api_name": "argparse.ArgumentPa... |
16555099455 | from copy import deepcopy
from functools import reduce
import numpy as np
class Chromosome:
def crossover(self, other):
pass
def mutate(self, step=None):
pass
def evaluate(self):
pass
class FloatChromosome(Chromosome):
def __init__(self, ll, ul, degree, pm, b, crossover_function):
self.ll = ll
self.ul = ul
self.degree = degree
self.data = np.random.uniform(self.ll, self.ul, size=self.degree)
self.pm = pm
self.b = b
self.crossover_function = crossover_function
def crossover(self, other):
if self.crossover_function == "arithmetic":
return self.arithmetic_crossover(other)
if self.crossover_function == "heuristic":
return self.heuristic_crossover(other)
def arithmetic_crossover(self, other):
child = deepcopy(self)
alpha = np.random.uniform(size=self.data.shape)
child.data = alpha * self.data + (1 - alpha) * other.data
# child.data = (self.data + other.data) / 2
return child
def heuristic_crossover(self, other):
child = deepcopy(self)
alpha = np.random.uniform(size=self.data.shape)
if self.evaluate() < other.evaluate():
b = self
a = other
else:
b = other
a = self
child.data = b.data + alpha * (b.data - a.data)
return child
def mutate(self, step=None, max_step=None):
res = deepcopy(self)
if np.random.uniform(size=1) < self.pm:
r = 1 - np.random.uniform(size=1)**(1-step/max_step)**self.b
noise = (self.ul - self.ll) * np.random.uniform(-r, r, size=self.degree)
res.data = np.clip(res.data + noise, self.ll, self.ul)
return res
def evaluate(self):
return self.data
class BinaryChromosome(Chromosome):
def __init__(self, ll, ul, precision, degree, pm, crossover_function):
self.ll = ll
self.ul = ul
self.precision = precision
self.degree = degree
self.n = np.int(np.ceil(np.log2(np.floor(1+(self.ul-self.ll) * 10**precision))))
self.data = np.random.randint(2, size=[degree, self.n])
self.crossover_function = crossover_function
self.pm = pm
def crossover(self, other):
if self.crossover_function == "uniform":
return self.uniform_crossover(other)
def uniform_crossover(self, other):
child = deepcopy(self)
child.data = np.where(np.random.uniform(size=self.data.shape), self.data, other.data)
return child
def mutate(self, step=None, max_step=None):
res = deepcopy(self)
res.data = np.where(np.random.uniform(size=(self.degree, self.n)) < self.pm, 1 - self.data, self.data)
return res
def evaluate(self):
return self.ll + (self.ul - self.ll) * reduce(lambda a, b: 2*a + b, self.data.T) / (2 ** self.n - 1)
| hrkec/APR | Lab4/chromosome.py | chromosome.py | py | 2,957 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "numpy.random.uniform",
"line_number": 23,
"usage_type": "call"
},
{
"api_name": "numpy.random",
"line_number": 23,
"usage_type": "attribute"
},
{
"api_name": "copy.deepcopy",
"line_number": 35,
"usage_type": "call"
},
{
"api_name": "numpy.random.uni... |
18084590411 | import datetime,requests
import pickle
from pathlib import Path
import requests
import streamlit as st
from streamlit_lottie import st_lottie
import json
import ssl,os,urllib
import altair as alt
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
from PIL import Image
from io import BytesIO
# url = 'https://upload.wikimedia.org/wikipedia/commons/9/9d/Agilisys-Logo-Black-RGB.png'
# response = requests.get(url)
# img = Image.open(BytesIO(response.content))
st.set_page_config(page_title='API call', page_icon=':rocket:')
def load_lottieurl(url):
r = requests.get(url)
if r.status_code != 200:
return None
return r.json()
lottie_coding = load_lottieurl('https://assets5.lottiefiles.com/private_files/lf30_jyndijva.json')
with st.container():
# image = img
# st.image(image)
st.write("---")
left_column, right_column = st.columns(2)
with left_column:
st.subheader('ML backend demonstration')
st.title('Azure dependant scoring')
st.write('Cloud deployed logistic regression API called on this page to predict JSON data')
with right_column:
st_lottie(lottie_coding, height=300, key='coding')
st.write("---")
with st.container():
left_column, right_column = st.columns(2)
with left_column:
st.subheader('Input JSON file of correct format')
uploaded_file = st.file_uploader("Submit file")
if uploaded_file is not None:
with left_column:
st.write('File recieved, predicting samples....')
print('acceptedfile')
def allowSelfSignedHttps(allowed):# bypass the server certificate verification on client side
if allowed and not os.environ.get('PYTHONHTTPSVERIFY', '') and getattr(ssl, '_create_unverified_context', None):
ssl._create_default_https_context = ssl._create_unverified_context
allowSelfSignedHttps(True) # this line is needed if you use self-signed certificate in your scoring service.
# Request data goes here
# The example below assumes JSON formatting which may be updated
# depending on the format your endpoint expects.
# More information can be found here:
# https://docs.microsoft.com/azure/machine-learning/how-to-deploy-advanced-entry-script
data = uploaded_file
body = data
url = 'http://1f529717-4167-4806-971f-a91fd449e981.uksouth.azurecontainer.io/score'
headers = {'Content-Type':'application/json'}
req = urllib.request.Request(url, body, headers)
try:
response = urllib.request.urlopen(req)
result = response.read()
print(result)
diabetic_count = 0
result_dict = json.loads(result.decode('utf-8'))
with st.expander("See breakdown"):
for i,j in enumerate(result_dict['predict_proba']):
val = result_dict['predict_proba'][i][1]
if val>=0.5:
diabetic_count+=1
st.write(f'sample {i+1} predicted diabetic with probability {val}')
else:
st.write(f'sample {i+1} predicted non-diabetic with probability {val}')
except urllib.error.HTTPError as error:
print("The request failed with status code: " + str(error.code))
# Print the headers - they include the requert ID and the timestamp, which are useful for debugging the failure
print(error.info())
print(error.read().decode("utf8", 'ignore'))
with right_column:
percentage = ((diabetic_count/len(result_dict['predict_proba'])*100))
dicto = {'Count':[diabetic_count,len(result_dict['predict_proba'])]}
chart_data = np.array([diabetic_count,len(result_dict['predict_proba'])])
st.markdown('''
<style>
/*center metric label*/
[data-testid="stMetricLabel"] > div:nth-child(1) {
justify-content: center;
}
/*center metric value*/
[data-testid="stMetricValue"] > div:nth-child(1) {
justify-content: center;
}
</style>
''', unsafe_allow_html=True)
st.metric(label='predicted diabetic',value=f'{percentage}%')
fig,ax = plt.subplots()
ax.pie(chart_data,explode=(0.1,0.1),labels=['Diabetic','Non-diabetic'],
shadow=False,startangle=90)
st.pyplot(fig)
| williamc1998/ml-test | azuredash.py | azuredash.py | py | 4,844 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "streamlit.set_page_config",
"line_number": 21,
"usage_type": "call"
},
{
"api_name": "requests.get",
"line_number": 24,
"usage_type": "call"
},
{
"api_name": "streamlit.container",
"line_number": 32,
"usage_type": "call"
},
{
"api_name": "streamlit.... |
40316461163 | from tkinter import *
import tkinter as tk
from tkinter import ttk
from tkinter import filedialog
from tkinter import scrolledtext
import time, datetime
import threading
import logging
import soundfile as sf
import numpy as np
import os
window = Tk()
window.geometry('450x520')
window.title('INotepad.cloud - Phần mềm MergeAudio WAV')
window.iconbitmap('.\icon.ico')
# window.resizable(0, 0)
window.minsize(450, 520)
folder_Out = ""
folder_In = ""
sample_rate = 44100
nameOut = StringVar(window, value='outFile')
input_NameOut = Entry(window,width=20,textvariable=nameOut)
delayTime = StringVar(window, value='1')
input_DelayTime = Entry(window,width=20,textvariable=delayTime)
input_files = [];
logging.basicConfig(level=logging.DEBUG, filename='MergeAudio.log', filemode='w')
def sort_func(val):
num_str = ''.join(filter(str.isdigit, val))
if num_str:
return int(num_str)
else:
return 0
def fileIn():
global folder_In
global input_files
folder_In = filedialog.askdirectory()
if folder_In != "" :
input_files = [f for f in os.listdir(folder_In) if f.endswith('.wav')]
input_files = sorted(input_files, key=sort_func)
addTreeView(input_files)
def folderOut():
global folder_Out
folder_Out = filedialog.askdirectory()
labelFileOut.set('Folder: '+folder_Out)
def proccessMerge():
# Create an empty array to hold the concatenated audio
output_audio = np.array([], dtype=np.float32)
file_nameOut = ""
padding_duration = int(input_DelayTime.get())
if input_NameOut.get() != "":
file_nameOut = input_NameOut.get()
print("file_name>>>",file_nameOut)
for i, file_path in enumerate(input_files):
# Load the audio data from the file
audio, sr = sf.read(os.path.join(folder_In, file_path))
print(">>>>RD: ",os.path.join(folder_In, file_path))
setMessageText(">>>>merge: "+os.path.join(folder_In, file_path))
# If this is not the first file, add padding to the output array
if i > 0:
padding = np.zeros((int(sample_rate * padding_duration),))
output_audio = np.concatenate((output_audio, padding))
# Append the audio data to the output array
output_audio = np.concatenate((output_audio, audio))
# Write the concatenated audio to the output file
sf.write(os.path.join(folder_Out, file_nameOut+".wav"), output_audio, sample_rate)
print("Done")
setMessageText(">>>>Done "+os.path.join(folder_Out, file_nameOut+".wav"))
labelMessage.set('Done')
def addTreeView(data):
delTreeview()
labelMessage.set('')
count =0
for fileName in data:
logging.info(">>>>>fileName",fileName)
my_tree.insert(parent='',index='end',iid=count,text="Parent",values=(count,fileName))
setMessageText(">>>>add: "+fileName)
count +=1
if(count>0):
btnMerge["state"] = "normal"
def delTreeview():
for i in my_tree.get_children():
my_tree.delete(i)
def setMessageText(newStr):
my_message.configure(state='normal')
my_message.insert(1.0,"\n")
my_message.insert(1.0," "+newStr)
my_message.insert(1.0,datetime.datetime.now())
my_message.configure(state=DISABLED)
my_message = scrolledtext.ScrolledText(window, width=48, height=10)
my_message.configure(state=DISABLED)
#table treeview
my_tree = ttk.Treeview(window)
#define our columms
my_tree['columns'] = ("Index","FileName")
#Formate our columms
my_tree.column("#0",width=0,stretch=NO)
my_tree.column("Index",anchor=W,width=50)
my_tree.column("FileName",anchor=W,width=345)
#create headings
my_tree.heading("#0",text="Label",anchor=W)
my_tree.heading("Index",text="Index",anchor=W)
my_tree.heading("FileName",text="FileName",anchor=W)
btnIn = Button(window,text="Folder Merge",width = 15,command=fileIn)
btnOut = Button(window,text="Folder Out",command=folderOut)
btnMerge = Button(window,text="Merge file",command=proccessMerge)
btnMerge["state"] = "disabled"
btnIn.grid(row=1,column=0)
btnOut.grid(row=1,column=1)
btnMerge.grid(row=1,column=2)
labelFileOut = StringVar()
labelMessage = StringVar()
lblFileOut = Label(window,textvariable=labelFileOut).grid(row=3, columnspan=3)
Label(window,text="Name file").grid(row=4,column=0)
input_NameOut.grid(row=4,column=1)
Label(window,text="Delay time(s)").grid(row=5,column=0)
input_DelayTime.grid(row=5,column=1)
my_tree.grid(row=6,columnspan =3,sticky=tk.W)
my_message.grid(row=7,columnspan=3)
lblmessage = Label(window,textvariable=labelMessage).grid(row=8,columnspan=3)
def on_closing():
try:
driver.quit()
finally:
window.destroy()
window.protocol("WM_DELETE_WINDOW", on_closing)
window.mainloop() | truyentdm/Apps | MergeAudio/MergeAudio.py | MergeAudio.py | py | 4,725 | python | en | code | 1 | github-code | 1 | [
{
"api_name": "logging.basicConfig",
"line_number": 31,
"usage_type": "call"
},
{
"api_name": "logging.DEBUG",
"line_number": 31,
"usage_type": "attribute"
},
{
"api_name": "tkinter.filedialog.askdirectory",
"line_number": 43,
"usage_type": "call"
},
{
"api_name":... |
9122061868 | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import time
import argparse
import os
import datetime
from algorithms.kmeans import KMeans
from algorithms.kmeanspp import KPlusPlus
from algorithms.kmeansgraph import KMeansGraph
from algorithms.kmeans_sa import KMeans_SA
if __name__ == '__main__':
# Argument Parser
parser = argparse.ArgumentParser(description='Description.')
parser.add_argument('--dataset', dest='datasets', required=True, nargs='+',
help='The path for the dataset')
parser.add_argument('--k', dest='k', required=True, type=int,
help='The path for the dataset')
parser.add_argument('--iterations', dest='iterations', type=int,
help='Number of iterations that the algorithm will execute')
args = parser.parse_args()
iterations = 10
if args.iterations is not None:
iterations = args.iterations
print('Running for {} iterations'.format(iterations))
for path_dataset in args.datasets:
method = {
'K-Means' : {'phi': [], 'fit_time': []},
'K-Means++': {'phi': [], 'fit_time': []},
'GK-Means' : {'phi': [], 'fit_time': []},
'IF K-Means':{'phi': [], 'fit_time': []}
}
fdataset = pd.read_csv(path_dataset, sep=",", header=None)
_, dataset_name = os.path.split(path_dataset)
# Remove the .txt
dataset_name = dataset_name[:-4]
if(dataset_name == 'KDD99.'):
dataset_name = 'KDD99'
train = fdataset.sample(frac=0.1)
else:
train = fdataset
#test = fdataset.drop(train.index)
print(train.shape)
for i in range(0, iterations):
print('Iteration {}'.format(i+1))
# Train
train = train.rename(columns = {0:'label'})
y_train = train.label
y_train = y_train.apply(str)
X_train = train.drop("label", axis=1)
### Random initialization
print('\tK-Means Random Initialization')
start_time = time.clock()
kmeans = KMeans(args.k, X=X_train.values, Y=y_train.values, name=dataset_name)
kmeans.find_centers()
end_time = time.clock()
#accuracy['kmeans'].append(kmeans.get_error_count(X_test.values, y_test.values))
method['K-Means']['phi'].append(kmeans.get_sum_distances())
method['K-Means']['fit_time'].append(end_time-start_time)
#kmeans.plot_board()
### K-means++ initialization
print('\tK-Means++')
start_time = time.clock()
kpp = KPlusPlus(args.k, X=X_train.values, Y=y_train.values, name=dataset_name)
kpp.init_centers()
kpp.find_centers(method='++')
end_time = time.clock()
#accuracy['kmeanspp'].append(kpp.get_error_count(X_test.values, y_test.values))
method['K-Means++']['phi'].append(kpp.get_sum_distances())
method['K-Means++']['fit_time'].append(end_time-start_time)
#kpp.plot_board()
### K-Means Graph
print('\tK-Means Graph')
start_time = time.clock()
kmeansgraph = KMeansGraph(args.k, X=X_train, Y=y_train, name=dataset_name)
kmeansgraph.init_centers()
#kmeansgraph.plot_init_centers()
kmeansgraph.find_centers(method='graph')
end_time = time.clock()
#kmeansgraph.plot_board()
#accuracy['kmeans'].append(kmeans.get_error_count(X_test.values, y_test.values))
method['GK-Means']['phi'].append(kmeansgraph.get_sum_distances())
method['GK-Means']['fit_time'].append(end_time-start_time)
### IFaber
print('\tK-Means IFABER')
start_time = time.clock()
kmeans_sa = KMeans_SA(args.k, X=X_train, Y=y_train.values, name=dataset_name)
kmeans_sa.init_centers()
#kmeans_sa.plot_init_centers()
kmeans_sa.find_centers()
end_time = time.clock()
method['IF K-Means']['phi'].append(kmeans_sa.get_sum_distances())
method['IF K-Means']['fit_time'].append(end_time-start_time)
print('K-Means')
print('phi: {} +- {}'.format(np.mean(method['K-Means']['phi']), np.std(method['K-Means']['phi'])))
print('fit_time: {} +- {}'.format(np.mean(method['K-Means']['fit_time']), np.std(method['K-Means']['fit_time'])))
print('K-Means++')
print('phi: {} +- {}'.format(np.mean(method['K-Means++']['phi']), np.std(method['K-Means++']['phi'])))
print('fit_time: {} +- {}'.format(np.mean(method['K-Means++']['fit_time']), np.std(method['K-Means++']['fit_time'])))
print('GK-Means')
print('phi: {} +- {}'.format(np.mean(method['GK-Means']['phi']), np.std(method['GK-Means']['phi'])))
print('fit_time: {} +- {}'.format(np.mean(method['GK-Means']['fit_time']), np.std(method['GK-Means']['fit_time'])))
print('IF K-Means')
print('phi: {} +- {}'.format(np.mean(method['IF K-Means']['phi']), np.std(method['IF K-Means']['phi'])))
print('fit_time: {} +- {}'.format(np.mean(method['IF K-Means']['fit_time']), np.std(method['IF K-Means']['fit_time'])))
#for key, value in method:
plt.close('all')
plt.style.use('ggplot')
f = plt.figure(figsize=(7,6))
ax_labels = list(method)
plt.boxplot( [ method[x]['phi'] for x in ax_labels ] )
plt.xticks(np.arange(len(ax_labels))+1, ax_labels, rotation= 45)
plt.title('Phi\n{} - K = {}'.format(dataset_name, args.k))
current_time = datetime.datetime.now().strftime('%Y-%m-%dT%H_%M_%S')
plt.savefig( os.path.join('experiments', 'runs', 'phi_' + current_time + '.pdf'),
dpi=200, bbox_inches='tight')
f = plt.figure(figsize=(7,6))
ax_labels = list(method)
plt.boxplot( [ method[x]['fit_time'] for x in ax_labels ] )
plt.xticks(np.arange(len(ax_labels))+1, ax_labels, rotation= 45)
plt.title('Tempo de execução (segundos)\n{} - K = {}'.format(dataset_name, args.k))
current_time = datetime.datetime.now().strftime('%Y-%m-%dT%H_%M_%S')
plt.savefig( os.path.join('experiments', 'runs', 'tempo_exec_' + current_time + '.pdf'),
dpi=200, bbox_inches='tight') | alepmaros/k-means | main.py | main.py | py | 6,439 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "argparse.ArgumentParser",
"line_number": 16,
"usage_type": "call"
},
{
"api_name": "pandas.read_csv",
"line_number": 37,
"usage_type": "call"
},
{
"api_name": "os.path.split",
"line_number": 38,
"usage_type": "call"
},
{
"api_name": "os.path",
"... |
15866997363 | import base64
import os
import json
def encode64_file(file_path):
with open(file_path, "rb") as content_file:
content = content_file.read()
# print(content)
encoded = base64.b64encode(content)
return encoded.decode()
def read_allfile(file_path):
with open(file_path, "r") as content_file:
content = content_file.read()
return content
def read_file_tojson(file_path):
data = {}
with open(file_path, "r") as content_file:
content = content_file.read()
data = json.loads(content)
return data
if __name__ == "__main__":
a = read_file_tojson("test.json")
| quydx/acme-client-restapi | apiserver/djangorest/rest/filelib.py | filelib.py | py | 645 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "base64.b64encode",
"line_number": 10,
"usage_type": "call"
},
{
"api_name": "json.loads",
"line_number": 24,
"usage_type": "call"
}
] |
42865082927 | from flask import Flask, request, render_template
import numpy as np
import pandas as pd
#import xgboost as xgb
import joblib
model = joblib.load('xgb_model.sav')
app = Flask(__name__)
@app.route('/')
def home():
return render_template('home.html')
@app.route('/prediction', methods=[ 'POST'])
def prediction():
df={}
df['Tenure'] = [int(request.values['Tenure'])]
df['PreferredLoginDevice'] = [request.values['PreferredLoginDevice']]
df['CityTier'] = [int(request.values['CityTier'])]
df['WarehouseToHome'] = [float(request.values['WarehouseToHome'])]
df['PreferredPaymentMode'] = [request.values['PreferredPaymentMode']]
df['Gender'] = [request.values['Gender']]
df['HoursSpendOnApp'] = [int(request.values['HoursSpendOnApp'])]
df['NumberOfDeviceRegistered'] = [int(request.values['NumberOfDeviceRegistered'])]
df['PreferedOrderCat'] = [request.values['PreferedOrderCat']]
df['SatisfactionScore'] = [int(request.values['SatisfactionScore'])]
df['MaritalStatus'] = [request.values['MaritalStatus']]
df['NumberOfAddress'] = [int(request.values['NumberOfAddress'])]
df['Complain'] = [int(request.values['Complain'])]
df['OrderAmountHikeFromlastYear'] = [float(request.values['OrderAmountHikeFromlastYear'])]
df['CouponUsed'] = [int(request.values['CouponUsed'])]
df['OrderCount'] = [int(request.values['OrderCount'])]
df['DaySinceLastOrder'] = [int(request.values['DaySinceLastOrder'])]
df['CashbackAmount'] = [float(request.values['CashbackAmount'])]
df = pd.DataFrame(df)
#df['Tenure'] = np.where(df['Tenure'] > 30,30,df['Tenure'])
#df['WarehouseToHome'] = np.where(df['WarehouseToHome'] > 36, 36,df['WarehouseToHome'])
import joblib
ohe = joblib.load('oneHotEnc.sav')
# apply one hot to data, and join encoded columns with data
category= ['PreferredLoginDevice', 'PreferredPaymentMode', 'Gender', 'PreferedOrderCat', 'MaritalStatus']
encoded = ohe.transform(df[category]).toarray()
df_enc = df.join(pd.DataFrame(encoded,columns =['PreferredLoginDevice_Mobile Phone',
'PreferredPaymentMode_Credit Card',
'PreferredPaymentMode_Debit Card', 'PreferredPaymentMode_E wallet',
'PreferredPaymentMode_UPI', 'Gender_Male',
'PreferedOrderCat_Grocery', 'PreferedOrderCat_Laptop & Accessory',
'PreferedOrderCat_Mobile Phone', 'PreferedOrderCat_Others',
'MaritalStatus_Married', 'MaritalStatus_Single'] ))
df_enc =df_enc.drop(['PreferredLoginDevice','PreferredPaymentMode', 'Gender',
'PreferedOrderCat','MaritalStatus'],axis=1)
df_enc['Tenure_year'] = df_enc['Tenure']/12
prediction=model.predict(df_enc)
return render_template("prediction.html",prediction_text='Churn Score is {}'.format(prediction))
print(prediction)
if __name__=="__main__":
app.run()
| riyag25/Customer-Churn-Prediction | app.py | app.py | py | 2,912 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "joblib.load",
"line_number": 7,
"usage_type": "call"
},
{
"api_name": "flask.Flask",
"line_number": 9,
"usage_type": "call"
},
{
"api_name": "flask.render_template",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "flask.request.values",
... |
7518399441 | import sys
from collections import namedtuple, OrderedDict
import atexit
import logging
from absl import flags
import gin
import numpy as np
from pysc2.lib.features import parse_agent_interface_format, SCREEN_FEATURES, MINIMAP_FEATURES, Features, FeatureType
from pysc2.env.environment import StepType
from pysc2.lib.actions import FunctionCall, FUNCTIONS
from pysc2.lib.remote_controller import RequestError
EnvironmentSpec = namedtuple('EnvironmentSpec', ['action_spec', 'observation_spec'])
ObservationSpec = namedtuple('ObservationSpec', ['id', 'shape', 'is_spatial', 'features'])
FeatureSpec = namedtuple('FeatureSpec', ['index', 'scale', 'is_categorical'])
ActionSpec = namedtuple('ActionSpec', ['id', 'sizes', 'obs_space', 'args_mask'])
@gin.configurable
class SC2EnvironmentConfig:
def __init__(self, map_name=gin.REQUIRED, screen_size=16, minimap_size=16, function_set='minigames', visualize=False):
self.map_name = map_name
self.screen_size = screen_size
self.minimap_size = minimap_size
self.function_set = function_set
self.visualize = visualize
class SC2Environment:
def __init__(self, config: SC2EnvironmentConfig):
self._aif = parse_agent_interface_format(feature_screen=config.screen_size, feature_minimap=config.minimap_size)
self._visualize = config.visualize
if config.function_set == 'all':
self._func_ids = [f.id for f in FUNCTIONS]
elif config.function_set == 'minigames':
self._func_ids = [0, 1, 2, 3, 4, 6, 7, 12, 13, 42, 44, 50, 91, 183, 234, 309, 331, 332, 333, 334, 451, 452, 490]
else:
raise ValueError
sc2_features = Features(agent_interface_format=self._aif)
sc2_action_spec = sc2_features.action_spec()
sc2_obs_spec = sc2_features.observation_spec()
fn_args_mask = np.zeros((len(self._func_ids), len(sc2_action_spec.types) + 1), dtype=np.bool)
fn_args_mask[:, 0] = 1
for i, func_id in enumerate(self._func_ids):
used_args = [a.id + 1 for a in FUNCTIONS[func_id].args]
fn_args_mask[i, used_args] = 1
action_spec = [('function_id', ActionSpec(0, (len(self._func_ids),), None, fn_args_mask))]
for t in sc2_action_spec.types:
if t.name == 'screen' or t.name == 'screen2':
space = 'screen'
elif t.name == 'minimap':
space = 'minimap'
else:
space = None
action_spec.append((t.name, ActionSpec(len(action_spec), t.sizes, space, None)))
action_spec = OrderedDict(action_spec)
def feature_spec(features):
return [FeatureSpec(f.index, f.scale, f.type == FeatureType.CATEGORICAL) for f in features]
obs_spec = OrderedDict([
('screen', ObservationSpec(0, sc2_obs_spec['feature_screen'], True, feature_spec(SCREEN_FEATURES))),
('minimap', ObservationSpec(1, sc2_obs_spec['feature_minimap'], True, feature_spec(MINIMAP_FEATURES))),
('available_actions', ObservationSpec(2, (len(self._func_ids),), False, None)),
('player', ObservationSpec(3, sc2_obs_spec['player'], False, None))
])
self.spec = EnvironmentSpec(action_spec, obs_spec)
from pysc2.env.sc2_env import SC2Env, Agent, Race
if not flags.FLAGS.is_parsed():
flags.FLAGS(sys.argv)
num_retries = 3
while True:
try:
self._env = SC2Env(map_name=config.map_name, agent_interface_format=self._aif, players=[
Agent(Race.protoss)
], visualize=self._visualize)
break
except RequestError:
num_retries -= 1
logging.error('SC2Env creation failed, {} retries remaining'.format(num_retries))
if num_retries <= 0:
raise
atexit.register(self._env.close)
def close(self):
self._env.close()
atexit.unregister(self._env.close)
def reset(self):
obs, rewards, done = self._wrap_obs(self._env.reset())
return obs
def step(self, actions):
sc2_actions = [self._actions_to_sc2(a) for a in actions]
obs = self._env.step(sc2_actions)
obs, rewards, done = self._wrap_obs(obs)
if done:
obs = self.reset()
return obs, rewards, done
def _wrap_obs(self, obs):
def wrap(o):
available_actions = np.zeros(self.spec.observation_spec['available_actions'].shape, dtype=np.int32)
func_ids = [i for i, func_id in enumerate(self._func_ids) if func_id in o.observation['available_actions']] # TODO: this is too slow when using all function ids
available_actions[func_ids] = 1
return {
'screen': np.asarray(o.observation['feature_screen']),
'minimap': np.asarray(o.observation['feature_minimap']),
'available_actions': available_actions,
'player': np.asarray(o.observation['player'])
}
return [wrap(o) for o in obs], [o.reward for o in obs], obs[0].step_type == StepType.LAST
def _actions_to_sc2(self, actions):
def convert_arg(value, spec):
if len(spec.sizes) == 2:
value = np.unravel_index(value, spec.sizes)
value = np.flip(value)
return list(value)
else:
return [value]
function = self._func_ids[actions['function_id']]
args = [
convert_arg(actions[arg.name].item(), self.spec.action_spec[arg.name])
for arg in FUNCTIONS[function].args
]
return FunctionCall(function, args)
def _palettes(self):
feat_palettes = [[None] * len(s.features) for s in env_spec.spaces]
for s in env_spec.spaces:
for f in s.features:
palette = f.palette
if len(palette) < f.scale:
palette = np.append(f.palette, [[255, 0, 255] * (f.scale - len(f.palette))], axis=0)
feat_palettes[s.index][f.index] = tf.constant(palette, dtype=tf.uint8,
name='{}_{}_palette'.format(s.name, f.name))
| sati290/sc2ai | sc2ai/environments/sc2env.py | sc2env.py | py | 6,301 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "collections.namedtuple",
"line_number": 13,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 14,
"usage_type": "call"
},
{
"api_name": "collections.namedtuple",
"line_number": 15,
"usage_type": "call"
},
{
"api_name": "... |
71217212195 | import keyboard
import time
each_line = input("What to spam")
amount = int(input("How many times"))
print("you have 5 seconds to open whatever you want to spam")
time.sleep(5)
for i in range (1, amount):
keyboard.write(each_line)
keyboard.press('enter')
print("Done")
| GnomeyDev/PythonScripts | UniSpammer1.py | UniSpammer1.py | py | 296 | python | en | code | 0 | github-code | 1 | [
{
"api_name": "time.sleep",
"line_number": 8,
"usage_type": "call"
},
{
"api_name": "keyboard.write",
"line_number": 11,
"usage_type": "call"
},
{
"api_name": "keyboard.press",
"line_number": 12,
"usage_type": "call"
}
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.