code stringlengths 22 1.05M | apis listlengths 1 3.31k | extract_api stringlengths 75 3.25M |
|---|---|---|
# -*-coding:Utf-8 -*
# Copyright (c) 2010-2017 <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# * Redistributions of source code must retain the above copyright notice, this
# raise of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright notice,
# this raise of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT
# OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Fichier contenant le paramètre 'tenir' de la commande 'gouvernail'."""
from primaires.interpreteur.masque.parametre import Parametre
class PrmTenir(Parametre):
"""Commande 'gouvernail tenir'.
"""
def __init__(self):
"""Constructeur du paramètre"""
Parametre.__init__(self, "tenir", "hold")
self.aide_courte = "fait tenir le gouvernail"
self.aide_longue = \
"Cette commande permet de tenir le gouvernail. Il est " \
"obligatoire de tenir un gouvernail pour virer. En outre, " \
"un gouvernail qui n'est pas tenu ne sera pas du tout " \
"stable et le navire pourra faire des embardées."
def interpreter(self, personnage, dic_masques):
"""Interprétation du paramètre"""
salle = personnage.salle
if not hasattr(salle, "navire") or salle.navire is None or \
salle.navire.etendue is None:
personnage << "|err|Vous n'êtes pas sur un navire.|ff|"
return
navire = salle.navire
gouvernail = salle.gouvernail
if not gouvernail:
personnage << "|err|Il n'y a pas de gouvernail ici.|ff|"
return
if gouvernail.tenu:
if gouvernail.tenu is personnage:
personnage << "|err|Vous tenez déjà ce gouvernail.|ff|"
else:
personnage << "|err|Ce gouvernail est déjà tenu par " \
"quelqu'un d'autre.|ff|"
else:
gouvernail.tenir(personnage)
| [
"primaires.interpreteur.masque.parametre.Parametre.__init__"
] | [((1844, 1885), 'primaires.interpreteur.masque.parametre.Parametre.__init__', 'Parametre.__init__', (['self', '"""tenir"""', '"""hold"""'], {}), "(self, 'tenir', 'hold')\n", (1862, 1885), False, 'from primaires.interpreteur.masque.parametre import Parametre\n')] |
# Generated by Django 2.2.2 on 2019-06-14 14:37
from django.db import migrations, models
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Card',
fields=[
('id', models.AutoField(primary_key=True, serialize=False)),
('name', models.CharField(max_length=50, verbose_name='Название')),
('description', models.CharField(max_length=500, verbose_name='Описание')),
('is_archived', models.BooleanField(default=False, verbose_name='Архивировано')),
],
),
]
| [
"django.db.models.CharField",
"django.db.models.BooleanField",
"django.db.models.AutoField"
] | [((300, 351), 'django.db.models.AutoField', 'models.AutoField', ([], {'primary_key': '(True)', 'serialize': '(False)'}), '(primary_key=True, serialize=False)\n', (316, 351), False, 'from django.db import migrations, models\n'), ((379, 435), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(50)', 'verbose_name': '"""Название"""'}), "(max_length=50, verbose_name='Название')\n", (395, 435), False, 'from django.db import migrations, models\n'), ((470, 527), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(500)', 'verbose_name': '"""Описание"""'}), "(max_length=500, verbose_name='Описание')\n", (486, 527), False, 'from django.db import migrations, models\n'), ((562, 625), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)', 'verbose_name': '"""Архивировано"""'}), "(default=False, verbose_name='Архивировано')\n", (581, 625), False, 'from django.db import migrations, models\n')] |
import json
from django.core.paginator import Paginator
from django.shortcuts import render, redirect
from django.contrib import messages
from django.utils.translation import gettext as _
from .forms import (
OnboardingRegistrationForm,
OnboardingCreateTrustChain,
OnboardingValidatingTrustMarkForm
)
from .models import OnBoardingRegistration
from spid_cie_oidc.entity.jwks import (
private_pem_from_jwk,
public_pem_from_jwk,
new_rsa_key,
serialize_rsa_key
)
from spid_cie_oidc.entity.jwtse import unpad_jwt_head, unpad_jwt_payload, verify_jws
from spid_cie_oidc.authority.views import trust_mark_status, resolve_entity_statement
def onboarding_landing(request):
return render(request, "onboarding_landing.html")
def onboarding_registration(request):
form = OnboardingRegistrationForm()
context = {"form": form}
if request.method == "POST":
form = OnboardingRegistrationForm(request.POST)
if not form.is_valid():
context = {"form": form}
else:
form_dict = {**form.cleaned_data}
OnBoardingRegistration.objects.create(**form_dict)
messages.success(request, _("Registration successfully"))
return redirect("oidc_onboarding_entities")
return render(request, "onboarding_registration.html", context)
def onboarding_entities(request):
entity_list = OnBoardingRegistration.objects.all()
p = Paginator(entity_list, 10)
page = request.GET.get("page")
entities = p.get_page(page)
return render(
request,
"onboarding_entities.html",
{"entity_list": entity_list, "entities": entities},
)
def onboarding_create_jwk(request):
_rsa_key = new_rsa_key()
private_jwk = serialize_rsa_key(_rsa_key.priv_key, 'private')
public_jwk = serialize_rsa_key(_rsa_key.pub_key)
context = {
"private_jwk": private_jwk,
"public_jwk": public_jwk
}
return render(request, 'onboarding_jwk.html', context)
def onboarding_convert_jwk(request):
jwk_type = request.GET.get('type')
context = {
"jwk": "",
"pem": "",
"jwk_type": jwk_type
}
if request.method == 'POST':
try:
jwk_str = request.POST.get('jwk')
jwk_str_double_quote = jwk_str.replace("'", '"')
jwk_dict = json.loads(jwk_str_double_quote)
if jwk_type == 'private':
pem = private_pem_from_jwk(jwk_dict)
if jwk_type == 'public':
pem = public_pem_from_jwk(jwk_dict)
context = {
"jwk": jwk_dict,
"pem": pem,
"jwk_type": jwk_type
}
except Exception as e:
messages.error(request, _(f" {e} "))
return render(request, 'onboarding_convert_jwk.html', context)
return render(request, 'onboarding_convert_jwk.html', context)
def onboarding_resolve_statement(request):
if "sub" in request.GET :
form = OnboardingCreateTrustChain(request.GET)
else:
form = OnboardingCreateTrustChain()
context = {'form': form}
if form.is_valid():
context = {
'form': form,
"resolved_statement": "",
}
try:
res = resolve_entity_statement(request, format="json")
context["resolved_statement"] = res.content.decode()
except Exception:
messages.error(request, _('Failed to resolve entity statement, Please check your inserted data'))
render(request, 'onboarding_resolve_statement.html', context)
return render(request, 'onboarding_resolve_statement.html', context)
def onboarding_validating_trustmark(request):
if "id" in request.GET or "trust_mark" in request.GET:
form = OnboardingValidatingTrustMarkForm(request.GET)
else:
form = OnboardingValidatingTrustMarkForm()
context = {"form": form}
if form.is_valid():
res = trust_mark_status(request)
content = json.loads(res.content.decode())
context = {'form': form}
if content['active']:
messages.success(request, _('Validation Trust Mark Successfully'))
else:
messages.error(request, _('Validation Trust Mark Failed'))
return render(request, 'onboarding_validating_tm.html', context)
def onboarding_decode_jwt(request):
context = {
"jwt": "",
"jwk": "",
"head": "",
"payload": ""
}
if request.POST.get('jwt'):
jwt = request.POST['jwt']
head = unpad_jwt_head(jwt)
payload = unpad_jwt_payload(jwt)
context["jwt"] = jwt
context["head"] = head
context["payload"] = payload
if request.POST.get('jwk'):
jwk_str = request.POST['jwk']
context["jwk"] = jwk_str
jwk_str_double_quote = jwk_str.replace("'", '"')
jwk = json.loads(jwk_str_double_quote)
try:
verify_jws(jwt, jwk)
messages.success(request, _('Your jws is verified'))
except Exception:
messages.error(request, _("Jws verification failed"))
render(request, 'onboarding_decode_jwt.html', context)
return render(request, 'onboarding_decode_jwt.html', context)
| [
"spid_cie_oidc.entity.jwks.new_rsa_key",
"spid_cie_oidc.entity.jwks.private_pem_from_jwk",
"json.loads",
"spid_cie_oidc.entity.jwtse.unpad_jwt_head",
"django.utils.translation.gettext",
"django.shortcuts.redirect",
"spid_cie_oidc.authority.views.resolve_entity_statement",
"spid_cie_oidc.entity.jwks.pu... | [((707, 749), 'django.shortcuts.render', 'render', (['request', '"""onboarding_landing.html"""'], {}), "(request, 'onboarding_landing.html')\n", (713, 749), False, 'from django.shortcuts import render, redirect\n'), ((1277, 1333), 'django.shortcuts.render', 'render', (['request', '"""onboarding_registration.html"""', 'context'], {}), "(request, 'onboarding_registration.html', context)\n", (1283, 1333), False, 'from django.shortcuts import render, redirect\n'), ((1433, 1459), 'django.core.paginator.Paginator', 'Paginator', (['entity_list', '(10)'], {}), '(entity_list, 10)\n', (1442, 1459), False, 'from django.core.paginator import Paginator\n'), ((1538, 1637), 'django.shortcuts.render', 'render', (['request', '"""onboarding_entities.html"""', "{'entity_list': entity_list, 'entities': entities}"], {}), "(request, 'onboarding_entities.html', {'entity_list': entity_list,\n 'entities': entities})\n", (1544, 1637), False, 'from django.shortcuts import render, redirect\n'), ((1718, 1731), 'spid_cie_oidc.entity.jwks.new_rsa_key', 'new_rsa_key', ([], {}), '()\n', (1729, 1731), False, 'from spid_cie_oidc.entity.jwks import private_pem_from_jwk, public_pem_from_jwk, new_rsa_key, serialize_rsa_key\n'), ((1750, 1797), 'spid_cie_oidc.entity.jwks.serialize_rsa_key', 'serialize_rsa_key', (['_rsa_key.priv_key', '"""private"""'], {}), "(_rsa_key.priv_key, 'private')\n", (1767, 1797), False, 'from spid_cie_oidc.entity.jwks import private_pem_from_jwk, public_pem_from_jwk, new_rsa_key, serialize_rsa_key\n'), ((1815, 1850), 'spid_cie_oidc.entity.jwks.serialize_rsa_key', 'serialize_rsa_key', (['_rsa_key.pub_key'], {}), '(_rsa_key.pub_key)\n', (1832, 1850), False, 'from spid_cie_oidc.entity.jwks import private_pem_from_jwk, public_pem_from_jwk, new_rsa_key, serialize_rsa_key\n'), ((1953, 2000), 'django.shortcuts.render', 'render', (['request', '"""onboarding_jwk.html"""', 'context'], {}), "(request, 'onboarding_jwk.html', context)\n", (1959, 2000), False, 'from django.shortcuts import render, redirect\n'), ((2861, 2916), 'django.shortcuts.render', 'render', (['request', '"""onboarding_convert_jwk.html"""', 'context'], {}), "(request, 'onboarding_convert_jwk.html', context)\n", (2867, 2916), False, 'from django.shortcuts import render, redirect\n'), ((3616, 3677), 'django.shortcuts.render', 'render', (['request', '"""onboarding_resolve_statement.html"""', 'context'], {}), "(request, 'onboarding_resolve_statement.html', context)\n", (3622, 3677), False, 'from django.shortcuts import render, redirect\n'), ((4294, 4351), 'django.shortcuts.render', 'render', (['request', '"""onboarding_validating_tm.html"""', 'context'], {}), "(request, 'onboarding_validating_tm.html', context)\n", (4300, 4351), False, 'from django.shortcuts import render, redirect\n'), ((5279, 5333), 'django.shortcuts.render', 'render', (['request', '"""onboarding_decode_jwt.html"""', 'context'], {}), "(request, 'onboarding_decode_jwt.html', context)\n", (5285, 5333), False, 'from django.shortcuts import render, redirect\n'), ((3978, 4004), 'spid_cie_oidc.authority.views.trust_mark_status', 'trust_mark_status', (['request'], {}), '(request)\n', (3995, 4004), False, 'from spid_cie_oidc.authority.views import trust_mark_status, resolve_entity_statement\n'), ((4589, 4608), 'spid_cie_oidc.entity.jwtse.unpad_jwt_head', 'unpad_jwt_head', (['jwt'], {}), '(jwt)\n', (4603, 4608), False, 'from spid_cie_oidc.entity.jwtse import unpad_jwt_head, unpad_jwt_payload, verify_jws\n'), ((4627, 4649), 'spid_cie_oidc.entity.jwtse.unpad_jwt_payload', 'unpad_jwt_payload', (['jwt'], {}), '(jwt)\n', (4644, 4649), False, 'from spid_cie_oidc.entity.jwtse import unpad_jwt_head, unpad_jwt_payload, verify_jws\n'), ((1229, 1265), 'django.shortcuts.redirect', 'redirect', (['"""oidc_onboarding_entities"""'], {}), "('oidc_onboarding_entities')\n", (1237, 1265), False, 'from django.shortcuts import render, redirect\n'), ((2344, 2376), 'json.loads', 'json.loads', (['jwk_str_double_quote'], {}), '(jwk_str_double_quote)\n', (2354, 2376), False, 'import json\n'), ((3280, 3328), 'spid_cie_oidc.authority.views.resolve_entity_statement', 'resolve_entity_statement', (['request'], {'format': '"""json"""'}), "(request, format='json')\n", (3304, 3328), False, 'from spid_cie_oidc.authority.views import trust_mark_status, resolve_entity_statement\n'), ((4941, 4973), 'json.loads', 'json.loads', (['jwk_str_double_quote'], {}), '(jwk_str_double_quote)\n', (4951, 4973), False, 'import json\n'), ((1178, 1208), 'django.utils.translation.gettext', '_', (['"""Registration successfully"""'], {}), "('Registration successfully')\n", (1179, 1208), True, 'from django.utils.translation import gettext as _\n'), ((2438, 2468), 'spid_cie_oidc.entity.jwks.private_pem_from_jwk', 'private_pem_from_jwk', (['jwk_dict'], {}), '(jwk_dict)\n', (2458, 2468), False, 'from spid_cie_oidc.entity.jwks import private_pem_from_jwk, public_pem_from_jwk, new_rsa_key, serialize_rsa_key\n'), ((2528, 2557), 'spid_cie_oidc.entity.jwks.public_pem_from_jwk', 'public_pem_from_jwk', (['jwk_dict'], {}), '(jwk_dict)\n', (2547, 2557), False, 'from spid_cie_oidc.entity.jwks import private_pem_from_jwk, public_pem_from_jwk, new_rsa_key, serialize_rsa_key\n'), ((2794, 2849), 'django.shortcuts.render', 'render', (['request', '"""onboarding_convert_jwk.html"""', 'context'], {}), "(request, 'onboarding_convert_jwk.html', context)\n", (2800, 2849), False, 'from django.shortcuts import render, redirect\n'), ((3542, 3603), 'django.shortcuts.render', 'render', (['request', '"""onboarding_resolve_statement.html"""', 'context'], {}), "(request, 'onboarding_resolve_statement.html', context)\n", (3548, 3603), False, 'from django.shortcuts import render, redirect\n'), ((4157, 4196), 'django.utils.translation.gettext', '_', (['"""Validation Trust Mark Successfully"""'], {}), "('Validation Trust Mark Successfully')\n", (4158, 4196), True, 'from django.utils.translation import gettext as _\n'), ((4248, 4281), 'django.utils.translation.gettext', '_', (['"""Validation Trust Mark Failed"""'], {}), "('Validation Trust Mark Failed')\n", (4249, 4281), True, 'from django.utils.translation import gettext as _\n'), ((5007, 5027), 'spid_cie_oidc.entity.jwtse.verify_jws', 'verify_jws', (['jwt', 'jwk'], {}), '(jwt, jwk)\n', (5017, 5027), False, 'from spid_cie_oidc.entity.jwtse import unpad_jwt_head, unpad_jwt_payload, verify_jws\n'), ((2762, 2773), 'django.utils.translation.gettext', '_', (['f""" {e} """'], {}), "(f' {e} ')\n", (2763, 2773), True, 'from django.utils.translation import gettext as _\n'), ((3456, 3528), 'django.utils.translation.gettext', '_', (['"""Failed to resolve entity statement, Please check your inserted data"""'], {}), "('Failed to resolve entity statement, Please check your inserted data')\n", (3457, 3528), True, 'from django.utils.translation import gettext as _\n'), ((5070, 5095), 'django.utils.translation.gettext', '_', (['"""Your jws is verified"""'], {}), "('Your jws is verified')\n", (5071, 5095), True, 'from django.utils.translation import gettext as _\n'), ((5213, 5267), 'django.shortcuts.render', 'render', (['request', '"""onboarding_decode_jwt.html"""', 'context'], {}), "(request, 'onboarding_decode_jwt.html', context)\n", (5219, 5267), False, 'from django.shortcuts import render, redirect\n'), ((5167, 5195), 'django.utils.translation.gettext', '_', (['"""Jws verification failed"""'], {}), "('Jws verification failed')\n", (5168, 5195), True, 'from django.utils.translation import gettext as _\n')] |
"""Implements Comet Logger."""
from abc import ABC
from typing import TYPE_CHECKING, List
from torchflare.callbacks.callback import Callbacks
from torchflare.callbacks.states import CallbackOrder
from torchflare.utils.imports_check import module_available
if TYPE_CHECKING:
from torchflare.experiments.experiment import Experiment
_AVAILABLE = module_available("come_ml")
if _AVAILABLE:
import comet_ml
else:
comet_ml = None
class CometLogger(Callbacks, ABC):
"""Callback to log your metrics and loss values to Comet to track your experiments.
For more information about Comet look at [Comet.ml](https://www.comet.ml/site/)
Args:
api_token: Your API key obtained from comet.ml
params: The hyperparameters for your model and experiment as a dictionary
project_name: Send your experiment to a specific project.
Otherwise, will be sent to Uncategorized Experiments.
workspace: Attach an experiment to a project that belongs to this workspace
tags: List of strings.
Examples:
.. code-block::
from torchflare.callbacks import CometLogger
params = {"bs": 16, "lr": 0.3}
logger = CometLogger(
project_name="experiment_10",
workspace="username",
params=params,
tags=["Experiment", "fold_0"],
api_token="your_secret_api_token",
)
"""
def __init__(
self,
api_token: str,
params: dict,
project_name: str,
workspace: str,
tags: List[str],
):
"""Constructor for CometLogger class."""
super(CometLogger, self).__init__(order=CallbackOrder.LOGGING)
self.api_token = api_token
self.project_name = project_name
self.workspace = workspace
self.params = params
self.tags = tags
self.experiment = None
def on_experiment_start(self, experiment: "Experiment"):
"""Start of experiment."""
self.experiment = comet_ml.Experiment(
project_name=self.project_name,
api_key=self.api_token,
workspace=self.workspace,
log_code=False,
display_summary_level=0,
)
if self.tags is not None:
self.experiment.add_tags(self.tags)
if self.params is not None:
self.experiment.log_parameters(self.params)
def on_epoch_end(self, experiment: "Experiment"):
"""Function to log your metrics and values at the end of very epoch."""
logs = {k: v for k, v in experiment.exp_logs.items() if k != experiment.epoch_key}
self.experiment.log_metrics(logs, step=experiment.exp_logs[experiment.epoch_key])
def on_experiment_end(self, experiment: "Experiment"):
"""Function to close the experiment when training ends."""
self.experiment.end()
self.experiment = None
| [
"comet_ml.Experiment",
"torchflare.utils.imports_check.module_available"
] | [((352, 379), 'torchflare.utils.imports_check.module_available', 'module_available', (['"""come_ml"""'], {}), "('come_ml')\n", (368, 379), False, 'from torchflare.utils.imports_check import module_available\n'), ((2061, 2207), 'comet_ml.Experiment', 'comet_ml.Experiment', ([], {'project_name': 'self.project_name', 'api_key': 'self.api_token', 'workspace': 'self.workspace', 'log_code': '(False)', 'display_summary_level': '(0)'}), '(project_name=self.project_name, api_key=self.api_token,\n workspace=self.workspace, log_code=False, display_summary_level=0)\n', (2080, 2207), False, 'import comet_ml\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
from shutil import copyfile
class Task(object):
"""
Documentation: https://docs.droppyapp.com/tasks/filter-by-extensions
"""
def __init__(self, input_dir, output_dir, **kwargs):
# Get keyword arguments.
extensions = kwargs.get(str('extensions'), ['txt', 'json', 'xml'])
# Process files and directories.
for item_name in os.listdir(input_dir):
item_path = os.path.join(input_dir, item_name)
if os.path.isfile(item_path):
full_output_path = (os.path.join(output_dir, item_name))
self.check_and_copy(extensions, item_path, full_output_path)
elif os.path.isdir(item_path):
output_base = item_name
os.makedirs(os.path.join(output_dir, output_base))
for root, dirs, files in os.walk(item_path):
for d in dirs:
relative_path = root[len(item_path) + 1:]
os.makedirs(os.path.join(output_dir, output_base, relative_path, d))
for f in files:
relative_path = root[len(item_path) + 1:]
full_input_path = os.path.join(root, f)
full_output_path = os.path.join(output_dir, output_base, relative_path, f)
self.check_and_copy(extensions, full_input_path, full_output_path)
@staticmethod
def check_and_copy(extensions, full_input_path, full_output_path):
file_name, file_extension = os.path.splitext(full_input_path)
extensions_uppercased = [extension.upper() for extension in extensions]
if file_extension.replace('.', '').upper() in extensions_uppercased:
copyfile(full_input_path, full_output_path)
| [
"os.path.isdir",
"os.walk",
"os.path.isfile",
"os.path.splitext",
"shutil.copyfile",
"os.path.join",
"os.listdir"
] | [((464, 485), 'os.listdir', 'os.listdir', (['input_dir'], {}), '(input_dir)\n', (474, 485), False, 'import os\n'), ((1630, 1663), 'os.path.splitext', 'os.path.splitext', (['full_input_path'], {}), '(full_input_path)\n', (1646, 1663), False, 'import os\n'), ((511, 545), 'os.path.join', 'os.path.join', (['input_dir', 'item_name'], {}), '(input_dir, item_name)\n', (523, 545), False, 'import os\n'), ((562, 587), 'os.path.isfile', 'os.path.isfile', (['item_path'], {}), '(item_path)\n', (576, 587), False, 'import os\n'), ((1833, 1876), 'shutil.copyfile', 'copyfile', (['full_input_path', 'full_output_path'], {}), '(full_input_path, full_output_path)\n', (1841, 1876), False, 'from shutil import copyfile\n'), ((625, 660), 'os.path.join', 'os.path.join', (['output_dir', 'item_name'], {}), '(output_dir, item_name)\n', (637, 660), False, 'import os\n'), ((757, 781), 'os.path.isdir', 'os.path.isdir', (['item_path'], {}), '(item_path)\n', (770, 781), False, 'import os\n'), ((932, 950), 'os.walk', 'os.walk', (['item_path'], {}), '(item_path)\n', (939, 950), False, 'import os\n'), ((851, 888), 'os.path.join', 'os.path.join', (['output_dir', 'output_base'], {}), '(output_dir, output_base)\n', (863, 888), False, 'import os\n'), ((1291, 1312), 'os.path.join', 'os.path.join', (['root', 'f'], {}), '(root, f)\n', (1303, 1312), False, 'import os\n'), ((1356, 1411), 'os.path.join', 'os.path.join', (['output_dir', 'output_base', 'relative_path', 'f'], {}), '(output_dir, output_base, relative_path, f)\n', (1368, 1411), False, 'import os\n'), ((1089, 1144), 'os.path.join', 'os.path.join', (['output_dir', 'output_base', 'relative_path', 'd'], {}), '(output_dir, output_base, relative_path, d)\n', (1101, 1144), False, 'import os\n')] |
from __future__ import unicode_literals, print_function
from netmiko import Netmiko
from getpass import getpass
from jinja2 import FileSystemLoader, StrictUndefined
from jinja2.environment import Environment
password = getpass()
env = Environment()
env.loader = FileSystemLoader('.')
cisco3 = {
"host": 'cisco3.lasthop.io',
"username": 'pyclass',
"password": password,
"device_type": 'cisco_ios',
}
my_vars = {
"ntp_server1":'172.16.31.10',
"ntp_server2": '172.16.31.10',
"timezone": 'PST',
"timezone_offset": '-8',
"timezone_dst": 'PDT',
}
net_connect = Netmiko(**cisco3)
template_file = 'base_template.j2'
template = env.get_template(template_file)
output = template.render(**my_vars)
print(output)
| [
"getpass.getpass",
"jinja2.FileSystemLoader",
"jinja2.environment.Environment",
"netmiko.Netmiko"
] | [((219, 228), 'getpass.getpass', 'getpass', ([], {}), '()\n', (226, 228), False, 'from getpass import getpass\n'), ((235, 248), 'jinja2.environment.Environment', 'Environment', ([], {}), '()\n', (246, 248), False, 'from jinja2.environment import Environment\n'), ((262, 283), 'jinja2.FileSystemLoader', 'FileSystemLoader', (['"""."""'], {}), "('.')\n", (278, 283), False, 'from jinja2 import FileSystemLoader, StrictUndefined\n'), ((618, 635), 'netmiko.Netmiko', 'Netmiko', ([], {}), '(**cisco3)\n', (625, 635), False, 'from netmiko import Netmiko\n')] |
# -*- coding: utf-8 -*-
# flake8: noqa
__version__ = '2.0.1'
import logging
import utool as ut
ut.noinject(__name__, '[guitool.__init__]')
# try:
# # try seeing if importing plottool before any guitool things helps
# import wbia.plottool
# except Exception as ex:
# import utool as ut
# ut.printex(ex, 'tried to import wbia.plottool to solve win crash')
# raise
# #pass
# print('__guitool__1')
from wbia.guitool import __PYQT__
# print('__guitool__2')
from wbia.guitool import api_item_model
from wbia.guitool import api_table_view
from wbia.guitool import api_tree_view
from wbia.guitool import api_item_widget
from wbia.guitool import stripe_proxy_model
from wbia.guitool import guitool_tables
from wbia.guitool import guitool_dialogs
from wbia.guitool import guitool_decorators
from wbia.guitool import guitool_delegates
from wbia.guitool import guitool_components
from wbia.guitool import guitool_main
from wbia.guitool import guitool_misc
from wbia.guitool import qtype
import utool
print, rrr, profile = utool.inject2(__name__, '[guitool]')
logger = logging.getLogger('wbia')
def reload_subs():
"""Reloads utool and submodules """
rrr()
if hasattr(guitool_tables, 'rrr'):
guitool_tables.rrr()
if hasattr(guitool_dialogs, 'rrr'):
guitool_dialogs.rrr()
if hasattr(guitool_decorators, 'rrr'):
guitool_decorators.rrr()
if hasattr(guitool_main, 'rrr'):
guitool_main.rrr()
if hasattr(guitool_misc, 'rrr'):
guitool_misc.rrr()
if hasattr(api_item_model, 'rrr'):
api_item_model.rrr()
if hasattr(qtype, 'rrr'):
qtype.rrr()
if hasattr(guitool_components, 'rrr'):
guitool_components.rrr()
rrrr = reload_subs
IMPORT_TUPLES = [
('guitool_main', None),
('guitool_components', None),
('guitool_dialogs', None),
('guitool_decorators', None),
('guitool_misc', None),
('api_item_model', None),
('api_tree_view', None),
('api_table_view', None),
('qtype', None),
('stripe_proxy_model', None),
('filter_proxy_model', None),
]
"""
python -c "import wbia.guitool" --dump-guitool-init
python -c "import wbia.guitool" --update-guitool-init
"""
__DYNAMIC__ = not ut.get_argflag('--nodyn')
DOELSE = False
if __DYNAMIC__:
# TODO: import all utool external prereqs. Then the imports will not import
# anything that has already in a toplevel namespace
# COMMENTED OUT FOR FROZEN __INIT__
# Dynamically import listed util libraries and their members.
from utool._internal import util_importer
# FIXME: this might actually work with rrrr, but things arent being
# reimported because they are already in the modules list
ignore_endswith = ['_cyth']
ignore_list = ['Qt']
import_execstr = util_importer.dynamic_import(
__name__,
IMPORT_TUPLES,
ignore_endswith=ignore_endswith,
ignore_list=ignore_list,
verbose=False,
)
exec(import_execstr)
DOELSE = False
else:
# Do the nonexec import (can force it to happen no matter what if alwyas set
# to True)
DOELSE = True
# This screws up dynamic_import if it is placed before
from wbia.guitool.guitool_tables import *
from wbia.guitool.guitool_dialogs import *
from wbia.guitool.guitool_decorators import *
from wbia.guitool.guitool_delegates import *
from wbia.guitool.guitool_components import *
from wbia.guitool.guitool_main import *
from wbia.guitool.guitool_misc import *
from wbia.guitool.api_item_model import *
from wbia.guitool.api_table_view import *
from wbia.guitool.api_tree_view import *
from wbia.guitool.api_item_widget import *
from wbia.guitool.stripe_proxy_model import *
from wbia.guitool.filter_proxy_model import *
from wbia.guitool.qtype import *
if DOELSE:
pass
# <AUTOGEN_INIT>
from wbia.guitool import guitool_main
from wbia.guitool import guitool_components
from wbia.guitool import guitool_dialogs
from wbia.guitool import guitool_decorators
from wbia.guitool import guitool_misc
from wbia.guitool import api_item_model
from wbia.guitool import api_tree_view
from wbia.guitool import api_table_view
from wbia.guitool import qtype
from wbia.guitool import stripe_proxy_model
from wbia.guitool import filter_proxy_model
from wbia.guitool.guitool_main import (
GUITOOL_PYQT_VERSION,
GuitoolApplication,
IS_ROOT_WINDOW,
QAPP,
QUIET,
VERBOSE,
activate_qwindow,
ensure_qapp,
ensure_qtapp,
exit_application,
get_qtapp,
ping_python_interpreter,
qtapp_loop,
qtapp_loop_nonblocking,
remove_pyqt_input_hook,
)
from wbia.guitool.guitool_components import (
ALIGN_DICT,
BlockSignals,
ConfigConfirmWidget,
DEBUG_WIDGET,
GuiProgContext,
GuitoolWidget,
PROG_TEXT,
ProgHook,
ResizableTextEdit,
SimpleTree,
Spoiler,
WIDGET_BASE,
adjust_font,
fix_child_attr_heirarchy,
fix_child_size_heirarchy,
getAvailableFonts,
get_nested_attr,
get_widget_text_width,
layoutSplitter,
make_style_sheet,
msg_event,
newButton,
newCheckBox,
newComboBox,
newFont,
newFrame,
newLabel,
newLineEdit,
newMenu,
newMenuAction,
newMenubar,
newOutputLog,
newProgressBar,
newQPoint,
newScrollArea,
newSizePolicy,
newSplitter,
newTabWidget,
newTextEdit,
newToolbar,
newWidget,
print_widget_heirarchy,
prop_text_map,
rectify_qt_const,
walk_widget_heirarchy,
)
from wbia.guitool.guitool_dialogs import (
ResizableMessageBox,
SELDIR_CACHEID,
are_you_sure,
build_nested_qmenu,
connect_context_menu,
msgbox,
newDirectoryDialog,
newFileDialog,
popup_menu,
select_directory,
select_files,
select_images,
user_info,
user_input,
user_option,
user_question,
)
from wbia.guitool.guitool_decorators import (
DEBUG,
checks_qt_error,
signal_,
slot_,
)
from wbia.guitool.guitool_misc import (
ALT_KEY,
BlockContext,
GUILoggingHandler,
GUILoggingSender,
QLoggedOutput,
find_used_chars,
get_cplat_tab_height,
get_view_selection_as_str,
make_option_dict,
make_word_hotlinks,
)
from wbia.guitool.api_item_model import (
APIItemModel,
API_MODEL_BASE,
ChangeLayoutContext,
QVariantHack,
VERBOSE_MODEL,
default_method_decorator,
simple_thumbnail_widget,
updater,
)
from wbia.guitool.api_tree_view import (
APITreeView,
API_VIEW_BASE,
testdata_tree_view,
)
from wbia.guitool.api_table_view import APITableView
from wbia.guitool.qtype import (
ItemDataRoles,
LOCALE,
QLocale,
QString,
QT_BUTTON_TYPES,
QT_COMBO_TYPES,
QT_DELEGATE_TYPES,
QT_ICON_TYPES,
QT_IMAGE_TYPES,
QT_PIXMAP_TYPES,
QVariant,
SIMPLE_CASTING,
cast_from_qt,
cast_into_qt,
infer_coltype,
locale_float,
numpy_to_qicon,
numpy_to_qpixmap,
qindexinfo,
to_qcolor,
)
from wbia.guitool.stripe_proxy_model import (
STRIPE_PROXY_BASE,
STRIP_PROXY_META_CLASS,
STRIP_PROXY_SIX_BASE,
StripeProxyModel,
)
from wbia.guitool.filter_proxy_model import (
BASE_CLASS,
FilterProxyModel,
)
import utool
print, rrr, profile = utool.inject2(__name__, '[guitool]')
def reassign_submodule_attributes(verbose=1):
"""
Updates attributes in the __init__ modules with updated attributes
in the submodules.
"""
import sys
if verbose and '--quiet' not in sys.argv:
print('dev reimport')
# Self import
import wbia.guitool
# Implicit reassignment.
seen_ = set([])
for tup in IMPORT_TUPLES:
if len(tup) > 2 and tup[2]:
continue # dont import package names
submodname, fromimports = tup[0:2]
submod = getattr(guitool, submodname)
for attr in dir(submod):
if attr.startswith('_'):
continue
if attr in seen_:
# This just holds off bad behavior
# but it does mimic normal util_import behavior
# which is good
continue
seen_.add(attr)
setattr(guitool, attr, getattr(submod, attr))
def reload_subs(verbose=1):
""" Reloads guitool and submodules """
if verbose:
print('Reloading guitool submodules')
rrr(verbose > 1)
def wrap_fbrrr(mod):
def fbrrr(*args, **kwargs):
""" fallback reload """
if verbose > 0:
print('Auto-reload (using rrr) not setup for mod=%r' % (mod,))
return fbrrr
def get_rrr(mod):
if hasattr(mod, 'rrr'):
return mod.rrr
else:
return wrap_fbrrr(mod)
def get_reload_subs(mod):
return getattr(mod, 'reload_subs', wrap_fbrrr(mod))
get_rrr(guitool_main)(verbose > 1)
get_rrr(guitool_components)(verbose > 1)
get_rrr(guitool_dialogs)(verbose > 1)
get_rrr(guitool_decorators)(verbose > 1)
get_rrr(guitool_misc)(verbose > 1)
get_rrr(api_item_model)(verbose > 1)
get_rrr(api_tree_view)(verbose > 1)
get_rrr(api_table_view)(verbose > 1)
get_rrr(qtype)(verbose > 1)
get_rrr(stripe_proxy_model)(verbose > 1)
get_rrr(filter_proxy_model)(verbose > 1)
rrr(verbose > 1)
try:
# hackish way of propogating up the new reloaded submodule attributes
reassign_submodule_attributes(verbose=verbose)
except Exception as ex:
print(ex)
rrrr = reload_subs
# </AUTOGEN_INIT>
| [
"wbia.guitool.guitool_decorators.rrr",
"wbia.guitool.guitool_misc.rrr",
"utool._internal.util_importer.dynamic_import",
"wbia.guitool.guitool_tables.rrr",
"wbia.guitool.guitool_dialogs.rrr",
"utool.inject2",
"utool.noinject",
"wbia.guitool.api_item_model.rrr",
"wbia.guitool.qtype.rrr",
"utool.get_... | [((98, 141), 'utool.noinject', 'ut.noinject', (['__name__', '"""[guitool.__init__]"""'], {}), "(__name__, '[guitool.__init__]')\n", (109, 141), True, 'import utool as ut\n'), ((1039, 1075), 'utool.inject2', 'utool.inject2', (['__name__', '"""[guitool]"""'], {}), "(__name__, '[guitool]')\n", (1052, 1075), False, 'import utool\n'), ((1085, 1110), 'logging.getLogger', 'logging.getLogger', (['"""wbia"""'], {}), "('wbia')\n", (1102, 1110), False, 'import logging\n'), ((2226, 2251), 'utool.get_argflag', 'ut.get_argflag', (['"""--nodyn"""'], {}), "('--nodyn')\n", (2240, 2251), True, 'import utool as ut\n'), ((2784, 2915), 'utool._internal.util_importer.dynamic_import', 'util_importer.dynamic_import', (['__name__', 'IMPORT_TUPLES'], {'ignore_endswith': 'ignore_endswith', 'ignore_list': 'ignore_list', 'verbose': '(False)'}), '(__name__, IMPORT_TUPLES, ignore_endswith=\n ignore_endswith, ignore_list=ignore_list, verbose=False)\n', (2812, 2915), False, 'from utool._internal import util_importer\n'), ((7893, 7929), 'utool.inject2', 'utool.inject2', (['__name__', '"""[guitool]"""'], {}), "(__name__, '[guitool]')\n", (7906, 7929), False, 'import utool\n'), ((1229, 1249), 'wbia.guitool.guitool_tables.rrr', 'guitool_tables.rrr', ([], {}), '()\n', (1247, 1249), False, 'from wbia.guitool import guitool_tables\n'), ((1298, 1319), 'wbia.guitool.guitool_dialogs.rrr', 'guitool_dialogs.rrr', ([], {}), '()\n', (1317, 1319), False, 'from wbia.guitool import guitool_dialogs\n'), ((1371, 1395), 'wbia.guitool.guitool_decorators.rrr', 'guitool_decorators.rrr', ([], {}), '()\n', (1393, 1395), False, 'from wbia.guitool import guitool_decorators\n'), ((1441, 1459), 'wbia.guitool.guitool_main.rrr', 'guitool_main.rrr', ([], {}), '()\n', (1457, 1459), False, 'from wbia.guitool import guitool_main\n'), ((1505, 1523), 'wbia.guitool.guitool_misc.rrr', 'guitool_misc.rrr', ([], {}), '()\n', (1521, 1523), False, 'from wbia.guitool import guitool_misc\n'), ((1571, 1591), 'wbia.guitool.api_item_model.rrr', 'api_item_model.rrr', ([], {}), '()\n', (1589, 1591), False, 'from wbia.guitool import api_item_model\n'), ((1630, 1641), 'wbia.guitool.qtype.rrr', 'qtype.rrr', ([], {}), '()\n', (1639, 1641), False, 'from wbia.guitool import qtype\n'), ((1693, 1717), 'wbia.guitool.guitool_components.rrr', 'guitool_components.rrr', ([], {}), '()\n', (1715, 1717), False, 'from wbia.guitool import guitool_components\n')] |
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), ".."))
import setup_malcolm_paths
from collections import OrderedDict
import unittest
from mock import MagicMock, patch, call
# logging
# import logging
# logging.basicConfig(level=logging.DEBUG)
# module imports
from malcolm.controllers.defaultcontroller import DefaultController
from malcolm.core import Attribute, ClientController
from malcolm.core.vmetas import StringMeta, NumberMeta
from malcolm.compat import queue
from malcolm.parts.demo import HelloPart
class TestClientController(unittest.TestCase):
def setUp(self):
p = MagicMock()
part = HelloPart(p, None)
# Serialized version of the block we want
source = DefaultController(
"blockname", p, parts={"hello":part}).block
self.serialized = source.to_dict()
# Setup client controller prerequisites
self.p = MagicMock()
self.p.name = "process"
self.comms = MagicMock()
self.cc = ClientController("blockname", self.p)
self.b = self.cc.block
# get process to give us comms
self.p.get_client_comms.return_value = self.comms
# tell our controller which blocks the process can talk to
response = MagicMock(id=self.cc.REMOTE_BLOCKS_ID, value=["blockname"])
self.cc.put(response)
# tell our controller the serialized state of the block
response = MagicMock(id=self.cc.BLOCK_ID, changes=[[[], self.serialized]])
self.cc.put(response)
def test_init(self):
self.assertEqual(self.p.q.put.call_count, 1)
req = self.p.q.put.call_args[0][0]
self.assertEqual(req.typeid, "malcolm:core/Subscribe:1.0")
self.assertEqual(req.endpoint, [self.p.name, "remoteBlocks", "value"])
self.assertEqual(req.response_queue, self.cc)
self.p.get_client_comms.assert_called_with("blockname")
self.assertEqual(self.comms.q.put.call_count, 1)
req = self.comms.q.put.call_args[0][0]
self.assertEqual(req.typeid, "malcolm:core/Subscribe:1.0")
self.assertEqual(req.delta, True)
self.assertEqual(req.response_queue, self.cc)
self.assertEqual(req.endpoint, ["blockname"])
def test_methods_created(self):
self.assertEqual(list(self.b), [
'meta', 'state', 'status', 'busy', 'disable', 'reset', 'say_hello'])
m = self.b["say_hello"]
self.assertEqual(list(m.takes.elements), ["name", "sleep"])
self.assertEqual(type(m.takes.elements["name"]), StringMeta)
self.assertEqual(type(m.takes.elements["sleep"]), NumberMeta)
self.assertEqual(list(m.returns.elements), ["greeting"])
self.assertEqual(type(m.returns.elements["greeting"]), StringMeta)
self.assertEqual(m.defaults, dict(sleep=0))
def test_call_method(self):
self.p.create_queue.return_value = queue.Queue()
def f(request):
request.respond_with_return(dict(
greeting="Hello %s" % request.parameters["name"]))
self.comms.q.put.side_effect = f
ret = self.b.say_hello(name="me")
self.assertEqual(ret.greeting, "Hello me")
def test_put_update_response(self):
m = MagicMock(spec=Attribute)
self.b.replace_endpoints(dict(child=m))
response = MagicMock(
id=self.cc.BLOCK_ID,
changes=[[["child", "value"], "change"]])
self.cc.put(response)
m.set_value.assert_called_once_with("change", notify=False)
def test_put_root_update_response(self):
attr1 = StringMeta("dummy").make_attribute()
attr2 = StringMeta("dummy2").make_attribute()
new_block_structure = OrderedDict(typeid='malcolm:core/Block:1.0')
new_block_structure["attr1"] = attr1.to_dict()
new_block_structure["attr2"] = attr2.to_dict()
response = MagicMock(
id=self.cc.BLOCK_ID,
changes=[[[], new_block_structure]])
self.cc.put(response)
self.assertEqual(self.b.to_dict(), new_block_structure)
if __name__ == "__main__":
unittest.main(verbosity=2)
| [
"unittest.main",
"malcolm.core.vmetas.StringMeta",
"os.path.dirname",
"malcolm.core.ClientController",
"malcolm.parts.demo.HelloPart",
"collections.OrderedDict",
"mock.MagicMock",
"malcolm.controllers.defaultcontroller.DefaultController",
"malcolm.compat.queue.Queue"
] | [((4114, 4140), 'unittest.main', 'unittest.main', ([], {'verbosity': '(2)'}), '(verbosity=2)\n', (4127, 4140), False, 'import unittest\n'), ((50, 75), 'os.path.dirname', 'os.path.dirname', (['__file__'], {}), '(__file__)\n', (65, 75), False, 'import os\n'), ((627, 638), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (636, 638), False, 'from mock import MagicMock, patch, call\n'), ((654, 672), 'malcolm.parts.demo.HelloPart', 'HelloPart', (['p', 'None'], {}), '(p, None)\n', (663, 672), False, 'from malcolm.parts.demo import HelloPart\n'), ((923, 934), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (932, 934), False, 'from mock import MagicMock, patch, call\n'), ((988, 999), 'mock.MagicMock', 'MagicMock', ([], {}), '()\n', (997, 999), False, 'from mock import MagicMock, patch, call\n'), ((1018, 1055), 'malcolm.core.ClientController', 'ClientController', (['"""blockname"""', 'self.p'], {}), "('blockname', self.p)\n", (1034, 1055), False, 'from malcolm.core import Attribute, ClientController\n'), ((1270, 1329), 'mock.MagicMock', 'MagicMock', ([], {'id': 'self.cc.REMOTE_BLOCKS_ID', 'value': "['blockname']"}), "(id=self.cc.REMOTE_BLOCKS_ID, value=['blockname'])\n", (1279, 1329), False, 'from mock import MagicMock, patch, call\n'), ((1443, 1506), 'mock.MagicMock', 'MagicMock', ([], {'id': 'self.cc.BLOCK_ID', 'changes': '[[[], self.serialized]]'}), '(id=self.cc.BLOCK_ID, changes=[[[], self.serialized]])\n', (1452, 1506), False, 'from mock import MagicMock, patch, call\n'), ((2910, 2923), 'malcolm.compat.queue.Queue', 'queue.Queue', ([], {}), '()\n', (2921, 2923), False, 'from malcolm.compat import queue\n'), ((3248, 3273), 'mock.MagicMock', 'MagicMock', ([], {'spec': 'Attribute'}), '(spec=Attribute)\n', (3257, 3273), False, 'from mock import MagicMock, patch, call\n'), ((3341, 3413), 'mock.MagicMock', 'MagicMock', ([], {'id': 'self.cc.BLOCK_ID', 'changes': "[[['child', 'value'], 'change']]"}), "(id=self.cc.BLOCK_ID, changes=[[['child', 'value'], 'change']])\n", (3350, 3413), False, 'from mock import MagicMock, patch, call\n'), ((3720, 3764), 'collections.OrderedDict', 'OrderedDict', ([], {'typeid': '"""malcolm:core/Block:1.0"""'}), "(typeid='malcolm:core/Block:1.0')\n", (3731, 3764), False, 'from collections import OrderedDict\n'), ((3894, 3961), 'mock.MagicMock', 'MagicMock', ([], {'id': 'self.cc.BLOCK_ID', 'changes': '[[[], new_block_structure]]'}), '(id=self.cc.BLOCK_ID, changes=[[[], new_block_structure]])\n', (3903, 3961), False, 'from mock import MagicMock, patch, call\n'), ((740, 796), 'malcolm.controllers.defaultcontroller.DefaultController', 'DefaultController', (['"""blockname"""', 'p'], {'parts': "{'hello': part}"}), "('blockname', p, parts={'hello': part})\n", (757, 796), False, 'from malcolm.controllers.defaultcontroller import DefaultController\n'), ((3599, 3618), 'malcolm.core.vmetas.StringMeta', 'StringMeta', (['"""dummy"""'], {}), "('dummy')\n", (3609, 3618), False, 'from malcolm.core.vmetas import StringMeta, NumberMeta\n'), ((3652, 3672), 'malcolm.core.vmetas.StringMeta', 'StringMeta', (['"""dummy2"""'], {}), "('dummy2')\n", (3662, 3672), False, 'from malcolm.core.vmetas import StringMeta, NumberMeta\n')] |
# Some implementation details regarding PyBullet:
#
# PyBullet's IK solver uses damped least squares (DLS) optimization. This is commonly
# known as Levenberg-Marquardt (LM) optimization.
from __future__ import annotations
import dataclasses
from typing import NamedTuple, Optional
import numpy as np
import pybullet as p
from dm_robotics.geometry.geometry import Pose
from dm_robotics.transformations import transformations as tr
from coffee.client import BulletClient, ClientConfig, ConnectionMode
from coffee.joints import Joints
from coffee.structs import LinkState
from coffee.utils import geometry_utils
class IKSolution(NamedTuple):
"""An IK solution returned by the IKSolver.
Attributes:
qpos: The joint configuration.
linear_err: The linear error between the solved pose and the target pose.
angular_err: The angular error between the solved pose and the target pose.
"""
qpos: np.ndarray
linear_err: float
angular_err: float
@dataclasses.dataclass
class IKSolver:
"""Inverse kinematics solver.
Computes a joint configuration that brings an element (in a kinematic chain) to a
desired pose.
"""
pb_client: BulletClient
joints: Joints
ik_point_joint_id: int
joint_damping: float = 0.0
nullspace_reference: Optional[np.ndarray] = None
def __post_init__(self) -> None:
if self.nullspace_reference is None:
self.nullspace_reference = 0.5 * np.sum(self.joints.joints_range, axis=1)
# Dirty hack to get around pybullet's lack of support for computing FK given a
# joint configuration as an argument.
# See: https://github.com/bulletphysics/bullet3/issues/2603
self._shadow_client = BulletClient.create(
mode=ConnectionMode.DIRECT,
config=ClientConfig(),
)
manipulator_kwargs = self.pb_client._body_cache[self.joints.body_id]
shadow_body_id = self._shadow_client.load_urdf(**manipulator_kwargs)
# Make sure the shadow robot is in the same world pose as the actual one.
pos, quat = self.pb_client.getBasePositionAndOrientation(self.joints.body_id)
self._shadow_client.resetBasePositionAndOrientation(
bodyUniqueId=shadow_body_id,
posObj=pos,
ornObj=quat,
)
self._shadow_joints = Joints.from_body_id(shadow_body_id, self._shadow_client)
def solve(
self,
ref_pose: Pose,
linear_tol: float = 1e-3,
angular_tol: float = 1e-3,
max_steps: int = 100,
num_attempts: int = 50,
stop_on_first_successful_attempt: bool = False,
inital_joint_configuration: Optional[np.ndarray] = None,
nullspace_reference: Optional[np.ndarray] = None,
verbose: bool = False,
) -> Optional[np.ndarray]:
"""Attempts to solve the inverse kinematics problem.
This method computes a joint configuration that solves the IK problem. It
returns None if no solution is found. If multiple solutions are
found, it will return the one where the joints are closer to the
`nullspace_reference`. If no `nullspace_reference is provided, it will use the
center of the joint ranges as reference.
Args:
ref_pose: Target pose of the controlled element in Cartesian world frame.
linear_tol: The linear tolerance, in meters, that determines if the solution
found is valid.
angular_tol: The angular tolerance, in radians, that determines if the
solution found is valid.
max_steps:
num_attempts: The number of different attempts the solver should do. For a
given target pose, there exists an infinite number of possible
solutions, having more attempts allows to compare different joint
configurations. The solver will return the solution where the joints are
closer to the `nullspace_reference`. Note that not all attempts are
successful, and thus, having more attempts gives better chances of
finding a correct solution.
stop_on_first_successful_attempt: If true, the method will return the
first solution that meets the tolerance criteria. If false, returns the
solution where the joints are closer to `nullspace_reference`.
inital_joint_configuration: A joint configuration that will be used for
the first attempt. This can be useful in the case of a complex pose,
a user could provide the initial guess that is close to the desired
solution. If None, all the joints will be set to 0 for the first
attempt.
nullspace_reference: The desired joint configuration that is set as the
nullspace goal. When the controlled element is in the desired pose, the
solver will try and bring the joint configuration closer to the
nullspace reference without moving the element. If no nullspace
reference is provided, the center of the joint ranges is used as
reference. Can be overriden in the `solve` method.
Returns:
The corresponding joint configuration if a solution is found, else None.
Raises:
ValueError: If the `nullspace_reference` does not have the correct length.
ValueError: If the `inital_joint_configuration` does not have the correct
length.
"""
if nullspace_reference is None:
nullspace_reference = self.nullspace_reference
else:
if len(nullspace_reference) != self.joints.dof:
raise ValueError("nullspace_reference has an invalid length.")
if inital_joint_configuration is None:
inital_joint_configuration = self.joints.zeros_array()
else:
inital_joint_configuration = np.array(inital_joint_configuration)
if len(inital_joint_configuration) != self.joints.dof:
raise ValueError("inital_joint_configuration has an invalid length.")
nullspace_jnt_qpos_min_err = np.inf
sol_qpos = None
success = False
# Each iteration of this loop attempts to solve the inverse kinematics.
# If a solution is found, it is compared to previous solutions.
for attempt in range(num_attempts):
# Use the user provided joint configuration for the first attempt.
if attempt == 0:
qpos_new = inital_joint_configuration
else:
# Randomize the initial joint configuration so that the IK can find
# a different solution.
qpos_new = np.random.uniform(
low=self.joints.joints_lower_limit,
high=self.joints.joints_upper_limit,
)
# Reset the joints to this configuration.
for i, joint_id in enumerate(self._shadow_joints.controllable_joints):
self._shadow_client.resetJointState(
self._shadow_joints.body_id,
joint_id,
qpos_new[i],
)
# Solve the IK.
joint_qpos, linear_err, angular_err = self._solve_ik(
ref_pose,
max_steps,
verbose,
)
# Check if the attempt was successful. The solution is saved if the
# joints are closer to the nullspace reference.
if linear_err <= linear_tol and angular_err <= angular_tol:
success = True
nullspace_jnt_qpos_err = float(
np.linalg.norm(joint_qpos - nullspace_reference)
)
if nullspace_jnt_qpos_err < nullspace_jnt_qpos_min_err:
nullspace_jnt_qpos_min_err = nullspace_jnt_qpos_err
sol_qpos = joint_qpos
if verbose:
print(
f"attempt: {attempt} "
f"- nullspace_jnt_qpos_min_err: {nullspace_jnt_qpos_min_err:.4f} "
f"- success: {success}"
)
if success and stop_on_first_successful_attempt:
break
if not success:
print(f"Unable to solve inverse kinematics for ref_pose: {ref_pose}")
else:
if verbose:
print(f"Found a solution in {attempt} attempts.")
return sol_qpos
def _solve_ik(
self,
ref_pose: Pose,
max_steps: int,
verbose: bool,
) -> IKSolution:
"""Finds a joint configuration that brings element pose to target pose."""
try:
qpos = self._shadow_client.calculateInverseKinematics(
bodyUniqueId=self._shadow_joints.body_id,
endEffectorLinkIndex=self.ik_point_joint_id,
targetPosition=ref_pose.position,
targetOrientation=geometry_utils.as_quaternion_xyzw(
ref_pose.quaternion
),
residualThreshold=1e-5,
maxNumIterations=max_steps,
jointDamping=self._shadow_joints.const_array(
self.joint_damping
).tolist(),
)
if np.isnan(np.sum(qpos)):
qpos = None
else:
# Clip to joint limits.
qpos = np.clip(
a=qpos,
a_min=self._shadow_joints.joints_lower_limit,
a_max=self._shadow_joints.joints_upper_limit,
)
except p.error as e:
if verbose:
print(f"IK failed with error message: {e}")
qpos = None
# If we were unable to find a solution, exit early.
if qpos is None:
return IKSolution(np.empty(self._shadow_joints.dof), np.inf, np.inf)
# If we found a solution, we compute its associated pose and compare with the
# target pose. We do this by first using forward kinematics to compute the
# pose of the controlled element associated with the solution and then computing
# linear and angular errors.
# Forward kinematics.
for i, joint_id in enumerate(self._shadow_joints.controllable_joints):
self._shadow_client.resetJointState(
self._shadow_joints.body_id,
joint_id,
qpos[i],
)
cur_pose = self.forward_kinematics(shadow=True)
# Error computation.
linear_err = float(np.linalg.norm(ref_pose.position - cur_pose.position))
err_quat = tr.quat_diff_active(ref_pose.quaternion, cur_pose.quaternion)
err_axis_angle = tr.quat_to_axisangle(err_quat)
angular_err = float(np.linalg.norm(err_axis_angle))
return IKSolution(np.array(qpos), linear_err, angular_err)
def forward_kinematics(self, shadow: bool = False) -> Pose:
if shadow:
eef_link_state = LinkState(
*self._shadow_client.getLinkState(
bodyUniqueId=self._shadow_joints.body_id,
linkIndex=self.ik_point_joint_id,
computeLinkVelocity=0,
computeForwardKinematics=True,
)
)
else:
eef_link_state = LinkState(
*self.pb_client.getLinkState(
bodyUniqueId=self.joints.body_id,
linkIndex=self.ik_point_joint_id,
computeLinkVelocity=0,
computeForwardKinematics=True,
)
)
return Pose(
position=eef_link_state.link_world_position,
quaternion=geometry_utils.as_quaternion_wxyz(
eef_link_state.link_world_orientation
),
)
| [
"numpy.random.uniform",
"coffee.client.ClientConfig",
"numpy.sum",
"coffee.joints.Joints.from_body_id",
"numpy.empty",
"dm_robotics.transformations.transformations.quat_diff_active",
"numpy.clip",
"coffee.utils.geometry_utils.as_quaternion_wxyz",
"coffee.utils.geometry_utils.as_quaternion_xyzw",
"... | [((2359, 2415), 'coffee.joints.Joints.from_body_id', 'Joints.from_body_id', (['shadow_body_id', 'self._shadow_client'], {}), '(shadow_body_id, self._shadow_client)\n', (2378, 2415), False, 'from coffee.joints import Joints\n'), ((10821, 10882), 'dm_robotics.transformations.transformations.quat_diff_active', 'tr.quat_diff_active', (['ref_pose.quaternion', 'cur_pose.quaternion'], {}), '(ref_pose.quaternion, cur_pose.quaternion)\n', (10840, 10882), True, 'from dm_robotics.transformations import transformations as tr\n'), ((10908, 10938), 'dm_robotics.transformations.transformations.quat_to_axisangle', 'tr.quat_to_axisangle', (['err_quat'], {}), '(err_quat)\n', (10928, 10938), True, 'from dm_robotics.transformations import transformations as tr\n'), ((6023, 6059), 'numpy.array', 'np.array', (['inital_joint_configuration'], {}), '(inital_joint_configuration)\n', (6031, 6059), True, 'import numpy as np\n'), ((10747, 10800), 'numpy.linalg.norm', 'np.linalg.norm', (['(ref_pose.position - cur_pose.position)'], {}), '(ref_pose.position - cur_pose.position)\n', (10761, 10800), True, 'import numpy as np\n'), ((10967, 10997), 'numpy.linalg.norm', 'np.linalg.norm', (['err_axis_angle'], {}), '(err_axis_angle)\n', (10981, 10997), True, 'import numpy as np\n'), ((11026, 11040), 'numpy.array', 'np.array', (['qpos'], {}), '(qpos)\n', (11034, 11040), True, 'import numpy as np\n'), ((1466, 1506), 'numpy.sum', 'np.sum', (['self.joints.joints_range'], {'axis': '(1)'}), '(self.joints.joints_range, axis=1)\n', (1472, 1506), True, 'import numpy as np\n'), ((1819, 1833), 'coffee.client.ClientConfig', 'ClientConfig', ([], {}), '()\n', (1831, 1833), False, 'from coffee.client import BulletClient, ClientConfig, ConnectionMode\n'), ((6834, 6929), 'numpy.random.uniform', 'np.random.uniform', ([], {'low': 'self.joints.joints_lower_limit', 'high': 'self.joints.joints_upper_limit'}), '(low=self.joints.joints_lower_limit, high=self.joints.\n joints_upper_limit)\n', (6851, 6929), True, 'import numpy as np\n'), ((9454, 9466), 'numpy.sum', 'np.sum', (['qpos'], {}), '(qpos)\n', (9460, 9466), True, 'import numpy as np\n'), ((9578, 9690), 'numpy.clip', 'np.clip', ([], {'a': 'qpos', 'a_min': 'self._shadow_joints.joints_lower_limit', 'a_max': 'self._shadow_joints.joints_upper_limit'}), '(a=qpos, a_min=self._shadow_joints.joints_lower_limit, a_max=self.\n _shadow_joints.joints_upper_limit)\n', (9585, 9690), True, 'import numpy as np\n'), ((10018, 10051), 'numpy.empty', 'np.empty', (['self._shadow_joints.dof'], {}), '(self._shadow_joints.dof)\n', (10026, 10051), True, 'import numpy as np\n'), ((11920, 11992), 'coffee.utils.geometry_utils.as_quaternion_wxyz', 'geometry_utils.as_quaternion_wxyz', (['eef_link_state.link_world_orientation'], {}), '(eef_link_state.link_world_orientation)\n', (11953, 11992), False, 'from coffee.utils import geometry_utils\n'), ((7804, 7852), 'numpy.linalg.norm', 'np.linalg.norm', (['(joint_qpos - nullspace_reference)'], {}), '(joint_qpos - nullspace_reference)\n', (7818, 7852), True, 'import numpy as np\n'), ((9109, 9163), 'coffee.utils.geometry_utils.as_quaternion_xyzw', 'geometry_utils.as_quaternion_xyzw', (['ref_pose.quaternion'], {}), '(ref_pose.quaternion)\n', (9142, 9163), False, 'from coffee.utils import geometry_utils\n')] |
from qcrash.backends.base import BaseBackend
from qcrash.formatters.email import EmailFormatter
import pytest
def test_qsettings():
b = BaseBackend(None, '', '', None)
assert b.qsettings() is not None
def test_set_formatter():
b = BaseBackend(None, '', '', None)
assert b.formatter is None
b.set_formatter(EmailFormatter("test"))
assert isinstance(b.formatter, EmailFormatter)
def test_send_report():
b = BaseBackend(None, '', '', None)
with pytest.raises(NotImplementedError):
b.send_report('', '')
| [
"qcrash.formatters.email.EmailFormatter",
"pytest.raises",
"qcrash.backends.base.BaseBackend"
] | [((143, 174), 'qcrash.backends.base.BaseBackend', 'BaseBackend', (['None', '""""""', '""""""', 'None'], {}), "(None, '', '', None)\n", (154, 174), False, 'from qcrash.backends.base import BaseBackend\n'), ((248, 279), 'qcrash.backends.base.BaseBackend', 'BaseBackend', (['None', '""""""', '""""""', 'None'], {}), "(None, '', '', None)\n", (259, 279), False, 'from qcrash.backends.base import BaseBackend\n'), ((440, 471), 'qcrash.backends.base.BaseBackend', 'BaseBackend', (['None', '""""""', '""""""', 'None'], {}), "(None, '', '', None)\n", (451, 471), False, 'from qcrash.backends.base import BaseBackend\n'), ((331, 353), 'qcrash.formatters.email.EmailFormatter', 'EmailFormatter', (['"""test"""'], {}), "('test')\n", (345, 353), False, 'from qcrash.formatters.email import EmailFormatter\n'), ((481, 515), 'pytest.raises', 'pytest.raises', (['NotImplementedError'], {}), '(NotImplementedError)\n', (494, 515), False, 'import pytest\n')] |
import sys
sys.path.insert(0,'./shield/enableConfigure/lambda')
import json
import boto3
import os
import botocore
import urllib3
import cfnresponse
import logging
logger = logging.getLogger('hc')
logger.setLevel('DEBUG')
shield_client = boto3.client('shield')
iam_client = boto3.client('iam')
s3_client = boto3.client('s3')
#Get Shield Config Values and Options
enabledProactiveEngagement = os.environ['EnabledProactiveEngagement']
enableDRTAccess = os.environ['EnableDRTAccess']
emergencyContactCount = os.environ['EmergencyContactCount']
accountId = os.environ['AccountId']
#Build Emergency Contact List
def lambda_handler(event, context):
logger.debug(event)
responseData = {}
if "RequestType" in event:
if event['RequestType'] in ['Create','Update']:
try:
shield_client.create_subscription()
logger.info ("Shield Enabled!")
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == 'ResourceAlreadyExistsException':
logger.info ("Subscription already active")
else:
logger.error(error.response['Error'])
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "SubscribeFailed")
return ()
else:
responseData = {}
cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "CFNDeleteGracefulContinue")
return()
try:
emergencyContactList = []
emergencyContactList.append({
"EmailAddress": os.environ['EmergencyContactEmail1'],
"PhoneNumber": os.environ['EmergencyContactPhone1']
})
if emergencyContactCount == 2:
emergencyContactList.append({
"EmailAddress": os.environ['EmergencyContactEmail2'],
"PhoneNumber": os.environ['EmergencyContactPhone2']
})
except KeyError as error:
responseData = {}
responseData['Error'] = "KeyError for: " + error
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "BuildContactListFailed")
return ()
#Activate Shield Subscription
#Create DRT Role if needed
try:
iam_role_response = iam_client.get_role(
RoleName='AWSSRTAccess'
)
roleArn = iam_role_response['Role']['Arn']
logger.debug ("AWS SRTAccess already exists")
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == 'NoSuchEntity':
try:
iam_role_response = iam_client.create_role(
RoleName='AWSSRTAccess',
AssumeRolePolicyDocument='{"Version":"2012-10-17","Statement":[{"Sid":"","Effect":"Allow","Principal":{"Service":"drt.shield.amazonaws.com"},"Action":"sts:AssumeRole"}]}',
MaxSessionDuration=3600,
)
roleArn = iam_role_response['Role']['Arn']
except:
logger.error(error.response['Error'])
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "CreateDRTRoleFailed")
return ()
else:
logger.error(error.response['Error'])
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "SRTRolePolicyConfigFailed")
return ()
#Ensure DRT Policy Attached to Role
try:
logger.info("Listing attached role policies for AWSSRTAccess role.")
iam_response = iam_client.list_attached_role_policies(
RoleName='AWSSRTAccess'
)
policyList = []
for p in iam_response['AttachedPolicies']:
policyList.append(p['PolicyName'])
if 'AWSShieldDRTAccessPolicy' not in policyList:
logger.info("Required Policy not attached to role, attaching")
response = iam_client.attach_role_policy(
RoleName='AWSSRTAccess',
PolicyArn='arn:aws:iam::aws:policy/service-role/AWSShieldDRTAccessPolicy'
)
else:
logger.debug ("Required Policy Already attached")
except botocore.exceptions.ClientError as error:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "SRTRolePolicyConfigFailed")
return ()
if enableDRTAccess == 'true':
try:
logger.info("Associating DRT role.")
shield_response = shield_client.associate_drt_role(
RoleArn=roleArn
)
except botocore.exceptions.ClientError as error:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "SRTEnablementFailed")
return ()
else:
try:
logger.info("Describing DRT access.")
shield_drt_response = shield_client.describe_drt_access()
if 'RoleArn' in shield_drt_response:
logger.info("Disassociating DRT role.")
shield_drt_response = shield_client.disassociate_drt_role()
except botocore.exceptions.ClientError as error:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "SRTDisableFailed")
return ()
try:
logger.info("Updating emergency contact settings.")
shield_response = shield_client.update_emergency_contact_settings(
EmergencyContactList=emergencyContactList
)
logger.debug(shield_response)
except botocore.exceptions.ClientError as error:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "EmergencyContactUpdateFailed")
return ()
if enabledProactiveEngagement == 'true':
try:
logger.info("Enabling proactive engagement.")
shield_response = shield_client.enable_proactive_engagement()
logger.info("Associating proactive engagement details.")
shield_client.associate_proactive_engagement_details(
EmergencyContactList=emergencyContactList)
except botocore.exceptions.ClientError as error:
if error.response['Error']['Code'] == 'InvalidOperationException':
logger.info("ProactiveEngagementAlreadyEnabled")
elif error.response['Error']['Code'] == 'InvalidParameterException':
logger.info("Error Enabling Proactive Support, continue regardless")
else:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "ProactiveEngagementEnableFailed")
return ()
else:
try:
logger.info("Disabling proactive engagement.")
shield_response = shield_client.disable_proactive_engagement()
except botocore.exceptions.ClientError as error:
logger.error(error)
responseData['Error'] = error.response['Error']
cfnresponse.send(event, context, cfnresponse.FAILED, responseData, "ProactiveEngagementEnableFailed")
return ()
responseData = {}
cfnresponse.send(event, context, cfnresponse.SUCCESS, responseData, "ConfigureShieldAdvancedSucceesful")
return()
| [
"cfnresponse.send",
"sys.path.insert",
"logging.getLogger",
"boto3.client"
] | [((11, 64), 'sys.path.insert', 'sys.path.insert', (['(0)', '"""./shield/enableConfigure/lambda"""'], {}), "(0, './shield/enableConfigure/lambda')\n", (26, 64), False, 'import sys\n'), ((174, 197), 'logging.getLogger', 'logging.getLogger', (['"""hc"""'], {}), "('hc')\n", (191, 197), False, 'import logging\n'), ((240, 262), 'boto3.client', 'boto3.client', (['"""shield"""'], {}), "('shield')\n", (252, 262), False, 'import boto3\n'), ((276, 295), 'boto3.client', 'boto3.client', (['"""iam"""'], {}), "('iam')\n", (288, 295), False, 'import boto3\n'), ((308, 326), 'boto3.client', 'boto3.client', (['"""s3"""'], {}), "('s3')\n", (320, 326), False, 'import boto3\n'), ((7722, 7830), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.SUCCESS', 'responseData', '"""ConfigureShieldAdvancedSucceesful"""'], {}), "(event, context, cfnresponse.SUCCESS, responseData,\n 'ConfigureShieldAdvancedSucceesful')\n", (7738, 7830), False, 'import cfnresponse\n'), ((1449, 1549), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.SUCCESS', 'responseData', '"""CFNDeleteGracefulContinue"""'], {}), "(event, context, cfnresponse.SUCCESS, responseData,\n 'CFNDeleteGracefulContinue')\n", (1465, 1549), False, 'import cfnresponse\n'), ((2152, 2248), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.FAILED', 'responseData', '"""BuildContactListFailed"""'], {}), "(event, context, cfnresponse.FAILED, responseData,\n 'BuildContactListFailed')\n", (2168, 2248), False, 'import cfnresponse\n'), ((4534, 4633), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.FAILED', 'responseData', '"""SRTRolePolicyConfigFailed"""'], {}), "(event, context, cfnresponse.FAILED, responseData,\n 'SRTRolePolicyConfigFailed')\n", (4550, 4633), False, 'import cfnresponse\n'), ((6122, 6224), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.FAILED', 'responseData', '"""EmergencyContactUpdateFailed"""'], {}), "(event, context, cfnresponse.FAILED, responseData,\n 'EmergencyContactUpdateFailed')\n", (6138, 6224), False, 'import cfnresponse\n'), ((3499, 3598), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.FAILED', 'responseData', '"""SRTRolePolicyConfigFailed"""'], {}), "(event, context, cfnresponse.FAILED, responseData,\n 'SRTRolePolicyConfigFailed')\n", (3515, 3598), False, 'import cfnresponse\n'), ((5020, 5113), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.FAILED', 'responseData', '"""SRTEnablementFailed"""'], {}), "(event, context, cfnresponse.FAILED, responseData,\n 'SRTEnablementFailed')\n", (5036, 5113), False, 'import cfnresponse\n'), ((5617, 5707), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.FAILED', 'responseData', '"""SRTDisableFailed"""'], {}), "(event, context, cfnresponse.FAILED, responseData,\n 'SRTDisableFailed')\n", (5633, 5707), False, 'import cfnresponse\n'), ((7571, 7676), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.FAILED', 'responseData', '"""ProactiveEngagementEnableFailed"""'], {}), "(event, context, cfnresponse.FAILED, responseData,\n 'ProactiveEngagementEnableFailed')\n", (7587, 7676), False, 'import cfnresponse\n'), ((1277, 1366), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.FAILED', 'responseData', '"""SubscribeFailed"""'], {}), "(event, context, cfnresponse.FAILED, responseData,\n 'SubscribeFailed')\n", (1293, 1366), False, 'import cfnresponse\n'), ((3247, 3340), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.FAILED', 'responseData', '"""CreateDRTRoleFailed"""'], {}), "(event, context, cfnresponse.FAILED, responseData,\n 'CreateDRTRoleFailed')\n", (3263, 3340), False, 'import cfnresponse\n'), ((7125, 7230), 'cfnresponse.send', 'cfnresponse.send', (['event', 'context', 'cfnresponse.FAILED', 'responseData', '"""ProactiveEngagementEnableFailed"""'], {}), "(event, context, cfnresponse.FAILED, responseData,\n 'ProactiveEngagementEnableFailed')\n", (7141, 7230), False, 'import cfnresponse\n')] |
from django.contrib import admin
from .models import Bar
@admin.register(Bar)
class BarAdmin(admin.ModelAdmin):
pass
| [
"django.contrib.admin.register"
] | [((60, 79), 'django.contrib.admin.register', 'admin.register', (['Bar'], {}), '(Bar)\n', (74, 79), False, 'from django.contrib import admin\n')] |
import os
import json
import shutil
import argparse
import numpy as np
from PIL import Image
def getSeqInfo(dataset_dir, seq):
ann_dir = os.path.join(dataset_dir, 'Annotations', '480p')
seq_path = os.path.join(ann_dir, seq)
frame_list = os.listdir(seq_path)
frame_num = len(frame_list)
frames = os.listdir(os.path.join(ann_dir, seq))
masks = np.stack([np.array(Image.open(os.path.join(ann_dir, seq, f)).convert('P'), dtype=np.uint8) for f in frames])
img_size = [masks.shape[1], masks.shape[0]]
obj_ids = np.delete(np.unique(masks), 0)
return frame_num, img_size, len(obj_ids)
def create_json(root_dir):
val_txt_dst = os.path.join(root_dir, 'ImageSets', '2017', 'val.txt')
with open(val_txt_dst, 'r') as f:
val_seqs = f.readlines()
f.close()
val_seqs = list(map(lambda elem: elem.strip(), val_seqs))
# create davis.json
'''Generate global json'''
json_dict = dict()
json_dict['attributes'] = []
json_dict['sets'] = ["train", "val"]
json_dict['years'] = [2018]
json_dict['sequences'] = dict()
for idx, seq in enumerate(val_seqs):
seq = seq.strip()
seq_dict = {'attributes': [], 'eval_t': True, 'name': seq, 'set': 'val', 'year': 2018, 'num_scribbles': 3}
seq_dict['num_frames'], seq_dict['image_size'], seq_dict['num_objects'] = getSeqInfo(root_dir, seq)
json_dict['sequences'][seq] = seq_dict
print(f'valid: {idx+1}')
global_json_path = os.path.join(root_dir, 'scb_ytbvos.json')
with open(global_json_path, 'wt') as f:
json.dump(json_dict, f, indent=2, separators=(',', ': '))
def create_dataset(src_ytbvos_path, dst_ytbvos_path, scb_ytbvos_path):
if os.path.exists(src_ytbvos_path):
os.makedirs(dst_ytbvos_path, exist_ok=True)
# set youtube original path
src_dir_JPEGImages = os.path.join(src_ytbvos_path, 'train', 'JPEGImages')
src_dir_Annotations = os.path.join(src_ytbvos_path, 'train', 'CleanedAnnotations')
# set youtube davis-like path
dst_dir_ImageSets = os.path.join(dst_ytbvos_path, 'ImageSets', '2017')
dst_dir_JPEGImages = os.path.join(dst_ytbvos_path, 'JPEGImages', '480p')
dst_dir_Annotations = os.path.join(dst_ytbvos_path, 'Annotations', '480p')
dst_dir_Scribbles = os.path.join(dst_ytbvos_path, 'Scribbles')
if os.path.isdir(src_dir_JPEGImages) and os.path.isdir(src_dir_Annotations) and os.path.isdir(scb_ytbvos_path):
# load sequence list
assert len(os.listdir(src_dir_JPEGImages)) == len(os.listdir(src_dir_Annotations))
with open(os.path.join(scb_ytbvos_path, 'val.txt'), 'r') as f:
seqs_list = f.readlines()
f.close()
seqs_list = list(map(lambda elem: elem.strip(), seqs_list))
else:
if not os.path.isdir(src_dir_JPEGImages): print(f"{src_dir_JPEGImages} is not found in {src_ytbvos_path}")
if not os.path.isdir(src_dir_Annotations): print(f"{src_dir_Annotations} is not found in {src_ytbvos_path}")
if not os.path.isdir(scb_ytbvos_path): print(f"{scb_ytbvos_path} is not found")
return
# create dist dirs
os.makedirs(dst_dir_ImageSets, exist_ok=True)
os.makedirs(dst_dir_JPEGImages, exist_ok=True)
os.makedirs(dst_dir_Annotations, exist_ok=True)
os.makedirs(dst_dir_Scribbles, exist_ok=True)
# --- copy files ---
# ImageSets
shutil.copyfile(os.path.join(scb_ytbvos_path, 'val.txt'), os.path.join(dst_dir_ImageSets, 'val.txt'))
len_seq = []
for i, seq in enumerate(seqs_list):
print(f"validation set {i+1}")
# JPEGImages
src_dir_JPEGImages_seq = os.path.join(src_dir_JPEGImages, seq)
dst_dir_JPEGImages_seq = os.path.join(dst_dir_JPEGImages, seq)
os.makedirs(dst_dir_JPEGImages_seq, exist_ok=True)
file_name = np.sort(os.listdir(src_dir_JPEGImages_seq))
for j, file in enumerate(file_name):
src_path = os.path.join(src_dir_JPEGImages_seq, file)
dst_path = os.path.join(dst_dir_JPEGImages_seq, f"{str(j).zfill(5)}.jpg")
if not os.path.exists(dst_path): shutil.copyfile(src_path, dst_path)
# if not os.path.exists(dst_path): os.symlink(src_path, dst_path)
# Annotations
src_dir_Annotations_seq = os.path.join(src_dir_Annotations, seq)
dst_dir_Annotations_seq = os.path.join(dst_dir_Annotations, seq)
os.makedirs(dst_dir_Annotations_seq, exist_ok=True)
file_name = np.sort(os.listdir(src_dir_Annotations_seq))
for j, file in enumerate(file_name):
src_path = os.path.join(src_dir_Annotations_seq, file)
dst_path = os.path.join(dst_dir_Annotations_seq, f"{str(j).zfill(5)}.png")
if not os.path.exists(dst_path): shutil.copyfile(src_path, dst_path)
# if not os.path.exists(dst_path): os.symlink(src_path, dst_path)
# Scribbles
src_dir_Scribbles_seq = os.path.join(scb_ytbvos_path, seq)
dst_dir_Scribbles_seq = os.path.join(dst_dir_Scribbles, seq)
os.makedirs(dst_dir_Scribbles_seq, exist_ok=True)
file_name = np.sort(os.listdir(src_dir_Scribbles_seq))
for j, file in enumerate(file_name):
src_path = os.path.join(src_dir_Scribbles_seq, file)
dst_path = os.path.join(dst_dir_Scribbles_seq, file)
if not os.path.exists(dst_path): shutil.copyfile(src_path, dst_path)
# statistic
file_name = np.sort(os.listdir(src_dir_JPEGImages_seq))
len_seq.append(len(file_name))
# create sequences information
create_json(dst_ytbvos_path)
print(f"done")
else:
print(f"{src_ytbvos_path} not existed")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--src', type=str, required=True)
parser.add_argument('--scb', type=str, required=True)
parser.add_argument('--dst', type=str, default='data/Scribble_Youtube_VOS')
args = parser.parse_args()
src_ytbvos_path = args.src
dst_ytbvos_path = args.dst
scb_ytbvos_path = args.scb
create_dataset(src_ytbvos_path, dst_ytbvos_path, scb_ytbvos_path)
if __name__ == '__main__':
main() | [
"json.dump",
"argparse.ArgumentParser",
"os.makedirs",
"os.path.isdir",
"os.path.exists",
"shutil.copyfile",
"os.path.join",
"os.listdir",
"numpy.unique"
] | [((143, 191), 'os.path.join', 'os.path.join', (['dataset_dir', '"""Annotations"""', '"""480p"""'], {}), "(dataset_dir, 'Annotations', '480p')\n", (155, 191), False, 'import os\n'), ((207, 233), 'os.path.join', 'os.path.join', (['ann_dir', 'seq'], {}), '(ann_dir, seq)\n', (219, 233), False, 'import os\n'), ((251, 271), 'os.listdir', 'os.listdir', (['seq_path'], {}), '(seq_path)\n', (261, 271), False, 'import os\n'), ((664, 718), 'os.path.join', 'os.path.join', (['root_dir', '"""ImageSets"""', '"""2017"""', '"""val.txt"""'], {}), "(root_dir, 'ImageSets', '2017', 'val.txt')\n", (676, 718), False, 'import os\n'), ((1486, 1527), 'os.path.join', 'os.path.join', (['root_dir', '"""scb_ytbvos.json"""'], {}), "(root_dir, 'scb_ytbvos.json')\n", (1498, 1527), False, 'import os\n'), ((1719, 1750), 'os.path.exists', 'os.path.exists', (['src_ytbvos_path'], {}), '(src_ytbvos_path)\n', (1733, 1750), False, 'import os\n'), ((5977, 6002), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {}), '()\n', (6000, 6002), False, 'import argparse\n'), ((329, 355), 'os.path.join', 'os.path.join', (['ann_dir', 'seq'], {}), '(ann_dir, seq)\n', (341, 355), False, 'import os\n'), ((550, 566), 'numpy.unique', 'np.unique', (['masks'], {}), '(masks)\n', (559, 566), True, 'import numpy as np\n'), ((1580, 1637), 'json.dump', 'json.dump', (['json_dict', 'f'], {'indent': '(2)', 'separators': "(',', ': ')"}), "(json_dict, f, indent=2, separators=(',', ': '))\n", (1589, 1637), False, 'import json\n'), ((1760, 1803), 'os.makedirs', 'os.makedirs', (['dst_ytbvos_path'], {'exist_ok': '(True)'}), '(dst_ytbvos_path, exist_ok=True)\n', (1771, 1803), False, 'import os\n'), ((1870, 1922), 'os.path.join', 'os.path.join', (['src_ytbvos_path', '"""train"""', '"""JPEGImages"""'], {}), "(src_ytbvos_path, 'train', 'JPEGImages')\n", (1882, 1922), False, 'import os\n'), ((1953, 2013), 'os.path.join', 'os.path.join', (['src_ytbvos_path', '"""train"""', '"""CleanedAnnotations"""'], {}), "(src_ytbvos_path, 'train', 'CleanedAnnotations')\n", (1965, 2013), False, 'import os\n'), ((2081, 2131), 'os.path.join', 'os.path.join', (['dst_ytbvos_path', '"""ImageSets"""', '"""2017"""'], {}), "(dst_ytbvos_path, 'ImageSets', '2017')\n", (2093, 2131), False, 'import os\n'), ((2161, 2212), 'os.path.join', 'os.path.join', (['dst_ytbvos_path', '"""JPEGImages"""', '"""480p"""'], {}), "(dst_ytbvos_path, 'JPEGImages', '480p')\n", (2173, 2212), False, 'import os\n'), ((2243, 2295), 'os.path.join', 'os.path.join', (['dst_ytbvos_path', '"""Annotations"""', '"""480p"""'], {}), "(dst_ytbvos_path, 'Annotations', '480p')\n", (2255, 2295), False, 'import os\n'), ((2324, 2366), 'os.path.join', 'os.path.join', (['dst_ytbvos_path', '"""Scribbles"""'], {}), "(dst_ytbvos_path, 'Scribbles')\n", (2336, 2366), False, 'import os\n'), ((3229, 3274), 'os.makedirs', 'os.makedirs', (['dst_dir_ImageSets'], {'exist_ok': '(True)'}), '(dst_dir_ImageSets, exist_ok=True)\n', (3240, 3274), False, 'import os\n'), ((3283, 3329), 'os.makedirs', 'os.makedirs', (['dst_dir_JPEGImages'], {'exist_ok': '(True)'}), '(dst_dir_JPEGImages, exist_ok=True)\n', (3294, 3329), False, 'import os\n'), ((3338, 3385), 'os.makedirs', 'os.makedirs', (['dst_dir_Annotations'], {'exist_ok': '(True)'}), '(dst_dir_Annotations, exist_ok=True)\n', (3349, 3385), False, 'import os\n'), ((3394, 3439), 'os.makedirs', 'os.makedirs', (['dst_dir_Scribbles'], {'exist_ok': '(True)'}), '(dst_dir_Scribbles, exist_ok=True)\n', (3405, 3439), False, 'import os\n'), ((2379, 2412), 'os.path.isdir', 'os.path.isdir', (['src_dir_JPEGImages'], {}), '(src_dir_JPEGImages)\n', (2392, 2412), False, 'import os\n'), ((2417, 2451), 'os.path.isdir', 'os.path.isdir', (['src_dir_Annotations'], {}), '(src_dir_Annotations)\n', (2430, 2451), False, 'import os\n'), ((2456, 2486), 'os.path.isdir', 'os.path.isdir', (['scb_ytbvos_path'], {}), '(scb_ytbvos_path)\n', (2469, 2486), False, 'import os\n'), ((3514, 3554), 'os.path.join', 'os.path.join', (['scb_ytbvos_path', '"""val.txt"""'], {}), "(scb_ytbvos_path, 'val.txt')\n", (3526, 3554), False, 'import os\n'), ((3556, 3598), 'os.path.join', 'os.path.join', (['dst_dir_ImageSets', '"""val.txt"""'], {}), "(dst_dir_ImageSets, 'val.txt')\n", (3568, 3598), False, 'import os\n'), ((3771, 3808), 'os.path.join', 'os.path.join', (['src_dir_JPEGImages', 'seq'], {}), '(src_dir_JPEGImages, seq)\n', (3783, 3808), False, 'import os\n'), ((3846, 3883), 'os.path.join', 'os.path.join', (['dst_dir_JPEGImages', 'seq'], {}), '(dst_dir_JPEGImages, seq)\n', (3858, 3883), False, 'import os\n'), ((3896, 3946), 'os.makedirs', 'os.makedirs', (['dst_dir_JPEGImages_seq'], {'exist_ok': '(True)'}), '(dst_dir_JPEGImages_seq, exist_ok=True)\n', (3907, 3946), False, 'import os\n'), ((4456, 4494), 'os.path.join', 'os.path.join', (['src_dir_Annotations', 'seq'], {}), '(src_dir_Annotations, seq)\n', (4468, 4494), False, 'import os\n'), ((4533, 4571), 'os.path.join', 'os.path.join', (['dst_dir_Annotations', 'seq'], {}), '(dst_dir_Annotations, seq)\n', (4545, 4571), False, 'import os\n'), ((4584, 4635), 'os.makedirs', 'os.makedirs', (['dst_dir_Annotations_seq'], {'exist_ok': '(True)'}), '(dst_dir_Annotations_seq, exist_ok=True)\n', (4595, 4635), False, 'import os\n'), ((5144, 5178), 'os.path.join', 'os.path.join', (['scb_ytbvos_path', 'seq'], {}), '(scb_ytbvos_path, seq)\n', (5156, 5178), False, 'import os\n'), ((5215, 5251), 'os.path.join', 'os.path.join', (['dst_dir_Scribbles', 'seq'], {}), '(dst_dir_Scribbles, seq)\n', (5227, 5251), False, 'import os\n'), ((5264, 5313), 'os.makedirs', 'os.makedirs', (['dst_dir_Scribbles_seq'], {'exist_ok': '(True)'}), '(dst_dir_Scribbles_seq, exist_ok=True)\n', (5275, 5313), False, 'import os\n'), ((2861, 2894), 'os.path.isdir', 'os.path.isdir', (['src_dir_JPEGImages'], {}), '(src_dir_JPEGImages)\n', (2874, 2894), False, 'import os\n'), ((2980, 3014), 'os.path.isdir', 'os.path.isdir', (['src_dir_Annotations'], {}), '(src_dir_Annotations)\n', (2993, 3014), False, 'import os\n'), ((3101, 3131), 'os.path.isdir', 'os.path.isdir', (['scb_ytbvos_path'], {}), '(scb_ytbvos_path)\n', (3114, 3131), False, 'import os\n'), ((3979, 4013), 'os.listdir', 'os.listdir', (['src_dir_JPEGImages_seq'], {}), '(src_dir_JPEGImages_seq)\n', (3989, 4013), False, 'import os\n'), ((4091, 4133), 'os.path.join', 'os.path.join', (['src_dir_JPEGImages_seq', 'file'], {}), '(src_dir_JPEGImages_seq, file)\n', (4103, 4133), False, 'import os\n'), ((4668, 4703), 'os.listdir', 'os.listdir', (['src_dir_Annotations_seq'], {}), '(src_dir_Annotations_seq)\n', (4678, 4703), False, 'import os\n'), ((4781, 4824), 'os.path.join', 'os.path.join', (['src_dir_Annotations_seq', 'file'], {}), '(src_dir_Annotations_seq, file)\n', (4793, 4824), False, 'import os\n'), ((5346, 5379), 'os.listdir', 'os.listdir', (['src_dir_Scribbles_seq'], {}), '(src_dir_Scribbles_seq)\n', (5356, 5379), False, 'import os\n'), ((5457, 5498), 'os.path.join', 'os.path.join', (['src_dir_Scribbles_seq', 'file'], {}), '(src_dir_Scribbles_seq, file)\n', (5469, 5498), False, 'import os\n'), ((5526, 5567), 'os.path.join', 'os.path.join', (['dst_dir_Scribbles_seq', 'file'], {}), '(dst_dir_Scribbles_seq, file)\n', (5538, 5567), False, 'import os\n'), ((5710, 5744), 'os.listdir', 'os.listdir', (['src_dir_JPEGImages_seq'], {}), '(src_dir_JPEGImages_seq)\n', (5720, 5744), False, 'import os\n'), ((2544, 2574), 'os.listdir', 'os.listdir', (['src_dir_JPEGImages'], {}), '(src_dir_JPEGImages)\n', (2554, 2574), False, 'import os\n'), ((2583, 2614), 'os.listdir', 'os.listdir', (['src_dir_Annotations'], {}), '(src_dir_Annotations)\n', (2593, 2614), False, 'import os\n'), ((2638, 2678), 'os.path.join', 'os.path.join', (['scb_ytbvos_path', '"""val.txt"""'], {}), "(scb_ytbvos_path, 'val.txt')\n", (2650, 2678), False, 'import os\n'), ((4247, 4271), 'os.path.exists', 'os.path.exists', (['dst_path'], {}), '(dst_path)\n', (4261, 4271), False, 'import os\n'), ((4273, 4308), 'shutil.copyfile', 'shutil.copyfile', (['src_path', 'dst_path'], {}), '(src_path, dst_path)\n', (4288, 4308), False, 'import shutil\n'), ((4939, 4963), 'os.path.exists', 'os.path.exists', (['dst_path'], {}), '(dst_path)\n', (4953, 4963), False, 'import os\n'), ((4965, 5000), 'shutil.copyfile', 'shutil.copyfile', (['src_path', 'dst_path'], {}), '(src_path, dst_path)\n', (4980, 5000), False, 'import shutil\n'), ((5591, 5615), 'os.path.exists', 'os.path.exists', (['dst_path'], {}), '(dst_path)\n', (5605, 5615), False, 'import os\n'), ((5617, 5652), 'shutil.copyfile', 'shutil.copyfile', (['src_path', 'dst_path'], {}), '(src_path, dst_path)\n', (5632, 5652), False, 'import shutil\n'), ((399, 428), 'os.path.join', 'os.path.join', (['ann_dir', 'seq', 'f'], {}), '(ann_dir, seq, f)\n', (411, 428), False, 'import os\n')] |
#graph_visualize.py
from graphviz import Digraph
import pairwise
# Given a vector of the form generated in pairwise.py for
# easy reading into NNs, produce a diagram of the represented graph
def vec_to_graph(vec, name='no_name_graph', save=False, fromTorch=True):
matrix = None
if fromTorch:
matrix = pairwise.vec_to_matrix(vec.numpy())
else:
matrix = pairwise.vec_to_matrix(vec)
n_cands = len(matrix[0])
dot = Digraph(comment='Preference Graph',format='png')
# init nodes
for i, row in enumerate(matrix):
dot.node(chr(i+97), 'alt {}'.format(i+1))
# init edges
for i, row in enumerate(matrix):
# only care about the upper triangluar part
li = row[i+1:]
for j, alt in enumerate(li):
# math got confusing
a = i+1
b = i+j+2
p_a = chr(a+96)
p_b = chr(b+96)
if alt == 1:
dot.edge(p_a, p_b)
elif alt == -1:
dot.edge(p_b, p_a)
file_output = '../diagrams/graph_views/{}'.format(name)
if save:
dot.render(file_output,view=False)
return dot
def vote_to_graph(vote, name='no_name_graph', save=False):
if 0 in vote:
raise Exception('There should be no 0 values in vote vector')
return vec_to_graph(pairwise.process_vote(vote), name, save, fromTorch=False) | [
"pairwise.vec_to_matrix",
"graphviz.Digraph",
"pairwise.process_vote"
] | [((450, 499), 'graphviz.Digraph', 'Digraph', ([], {'comment': '"""Preference Graph"""', 'format': '"""png"""'}), "(comment='Preference Graph', format='png')\n", (457, 499), False, 'from graphviz import Digraph\n'), ((383, 410), 'pairwise.vec_to_matrix', 'pairwise.vec_to_matrix', (['vec'], {}), '(vec)\n', (405, 410), False, 'import pairwise\n'), ((1327, 1354), 'pairwise.process_vote', 'pairwise.process_vote', (['vote'], {}), '(vote)\n', (1348, 1354), False, 'import pairwise\n')] |
from tensorflow.keras.datasets import cifar10
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten
from tensorflow.keras.layers import Conv2D, MaxPooling2D
from tensorflow.keras import utils
import matplotlib.pyplot as plt
num_classes = 10
im_rows = 32
im_cols = 32
in_shape = (im_rows, im_cols, 3)
(X_train, y_train), (X_test, y_test) = cifar10.load_data()
X_train = X_train.astype('float32') / 255
X_test = X_test.astype('float32') / 255
y_train = utils.to_categorical(y_train, num_classes)
y_test = utils.to_categorical(y_test, num_classes)
model = Sequential()
model.add(Conv2D(32, (3, 3), padding='same',
input_shape=in_shape))
model.add(Activation('relu'))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Conv2D(64, (3, 3), padding='same'))
model.add(Activation('relu'))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Dropout(0.25))
model.add(Flatten())
model.add(Dense(512))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(num_classes))
model.add(Activation('softmax'))
model.compile(
loss='categorical_crossentropy',
optimizer='adam',
metrics=['accuracy'])
hist = model.fit(X_train, y_train,
batch_size=32, epochs=50,
verbose=1,
validation_data=(X_test, y_test))
score = model.evaluate(X_test, y_test, verbose=1)
print('accuracy=', score[1], 'loss=', score[0])
plt.plot(hist.history['accuracy'])
plt.plot(hist.history['val_accuracy'])
plt.title('Accuracy')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
plt.plot(hist.history['loss'])
plt.plot(hist.history['val_loss'])
plt.title('Loss')
plt.legend(['train', 'test'], loc='upper left')
plt.show()
model.save_weights('cifar10-cnn-weight.h5')
| [
"matplotlib.pyplot.title",
"tensorflow.keras.utils.to_categorical",
"matplotlib.pyplot.show",
"matplotlib.pyplot.plot",
"tensorflow.keras.layers.Conv2D",
"tensorflow.keras.layers.MaxPooling2D",
"tensorflow.keras.layers.Dropout",
"matplotlib.pyplot.legend",
"tensorflow.keras.layers.Dense",
"tensorf... | [((406, 425), 'tensorflow.keras.datasets.cifar10.load_data', 'cifar10.load_data', ([], {}), '()\n', (423, 425), False, 'from tensorflow.keras.datasets import cifar10\n'), ((520, 562), 'tensorflow.keras.utils.to_categorical', 'utils.to_categorical', (['y_train', 'num_classes'], {}), '(y_train, num_classes)\n', (540, 562), False, 'from tensorflow.keras import utils\n'), ((572, 613), 'tensorflow.keras.utils.to_categorical', 'utils.to_categorical', (['y_test', 'num_classes'], {}), '(y_test, num_classes)\n', (592, 613), False, 'from tensorflow.keras import utils\n'), ((623, 635), 'tensorflow.keras.models.Sequential', 'Sequential', ([], {}), '()\n', (633, 635), False, 'from tensorflow.keras.models import Sequential\n'), ((1563, 1597), 'matplotlib.pyplot.plot', 'plt.plot', (["hist.history['accuracy']"], {}), "(hist.history['accuracy'])\n", (1571, 1597), True, 'import matplotlib.pyplot as plt\n'), ((1598, 1636), 'matplotlib.pyplot.plot', 'plt.plot', (["hist.history['val_accuracy']"], {}), "(hist.history['val_accuracy'])\n", (1606, 1636), True, 'import matplotlib.pyplot as plt\n'), ((1637, 1658), 'matplotlib.pyplot.title', 'plt.title', (['"""Accuracy"""'], {}), "('Accuracy')\n", (1646, 1658), True, 'import matplotlib.pyplot as plt\n'), ((1659, 1706), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (1669, 1706), True, 'import matplotlib.pyplot as plt\n'), ((1707, 1717), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1715, 1717), True, 'import matplotlib.pyplot as plt\n'), ((1718, 1748), 'matplotlib.pyplot.plot', 'plt.plot', (["hist.history['loss']"], {}), "(hist.history['loss'])\n", (1726, 1748), True, 'import matplotlib.pyplot as plt\n'), ((1749, 1783), 'matplotlib.pyplot.plot', 'plt.plot', (["hist.history['val_loss']"], {}), "(hist.history['val_loss'])\n", (1757, 1783), True, 'import matplotlib.pyplot as plt\n'), ((1784, 1801), 'matplotlib.pyplot.title', 'plt.title', (['"""Loss"""'], {}), "('Loss')\n", (1793, 1801), True, 'import matplotlib.pyplot as plt\n'), ((1802, 1849), 'matplotlib.pyplot.legend', 'plt.legend', (["['train', 'test']"], {'loc': '"""upper left"""'}), "(['train', 'test'], loc='upper left')\n", (1812, 1849), True, 'import matplotlib.pyplot as plt\n'), ((1850, 1860), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1858, 1860), True, 'import matplotlib.pyplot as plt\n'), ((646, 702), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {'padding': '"""same"""', 'input_shape': 'in_shape'}), "(32, (3, 3), padding='same', input_shape=in_shape)\n", (652, 702), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((731, 749), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (741, 749), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((761, 779), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(32)', '(3, 3)'], {}), '(32, (3, 3))\n', (767, 779), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((791, 809), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (801, 809), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((821, 851), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (833, 851), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((863, 876), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (870, 876), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((889, 923), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {'padding': '"""same"""'}), "(64, (3, 3), padding='same')\n", (895, 923), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((935, 953), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (945, 953), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((965, 983), 'tensorflow.keras.layers.Conv2D', 'Conv2D', (['(64)', '(3, 3)'], {}), '(64, (3, 3))\n', (971, 983), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((995, 1013), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1005, 1013), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1025, 1055), 'tensorflow.keras.layers.MaxPooling2D', 'MaxPooling2D', ([], {'pool_size': '(2, 2)'}), '(pool_size=(2, 2))\n', (1037, 1055), False, 'from tensorflow.keras.layers import Conv2D, MaxPooling2D\n'), ((1067, 1080), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.25)'], {}), '(0.25)\n', (1074, 1080), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1093, 1102), 'tensorflow.keras.layers.Flatten', 'Flatten', ([], {}), '()\n', (1100, 1102), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1114, 1124), 'tensorflow.keras.layers.Dense', 'Dense', (['(512)'], {}), '(512)\n', (1119, 1124), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1136, 1154), 'tensorflow.keras.layers.Activation', 'Activation', (['"""relu"""'], {}), "('relu')\n", (1146, 1154), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1166, 1178), 'tensorflow.keras.layers.Dropout', 'Dropout', (['(0.5)'], {}), '(0.5)\n', (1173, 1178), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1190, 1208), 'tensorflow.keras.layers.Dense', 'Dense', (['num_classes'], {}), '(num_classes)\n', (1195, 1208), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n'), ((1220, 1241), 'tensorflow.keras.layers.Activation', 'Activation', (['"""softmax"""'], {}), "('softmax')\n", (1230, 1241), False, 'from tensorflow.keras.layers import Dense, Dropout, Activation, Flatten\n')] |
# Generated by Django 3.0.7 on 2020-06-09 12:24
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('program', '0006_auto_20200609_1522'),
]
operations = [
migrations.AlterField(
model_name='shedule',
name='group',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='program.Students', verbose_name='Номер группы'),
),
migrations.AlterField(
model_name='students',
name='group',
field=models.CharField(max_length=10, verbose_name='Номер группы'),
),
migrations.AlterField(
model_name='teachers',
name='class_number',
field=models.PositiveSmallIntegerField(blank=True, null=True, verbose_name='Номер кабинета'),
),
migrations.DeleteModel(
name='Group',
),
]
| [
"django.db.models.ForeignKey",
"django.db.models.CharField",
"django.db.migrations.DeleteModel",
"django.db.models.PositiveSmallIntegerField"
] | [((903, 939), 'django.db.migrations.DeleteModel', 'migrations.DeleteModel', ([], {'name': '"""Group"""'}), "(name='Group')\n", (925, 939), False, 'from django.db import migrations, models\n'), ((369, 488), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'to': '"""program.Students"""', 'verbose_name': '"""Номер группы"""'}), "(on_delete=django.db.models.deletion.CASCADE, to=\n 'program.Students', verbose_name='Номер группы')\n", (386, 488), False, 'from django.db import migrations, models\n'), ((606, 666), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(10)', 'verbose_name': '"""Номер группы"""'}), "(max_length=10, verbose_name='Номер группы')\n", (622, 666), False, 'from django.db import migrations, models\n'), ((796, 887), 'django.db.models.PositiveSmallIntegerField', 'models.PositiveSmallIntegerField', ([], {'blank': '(True)', 'null': '(True)', 'verbose_name': '"""Номер кабинета"""'}), "(blank=True, null=True, verbose_name=\n 'Номер кабинета')\n", (828, 887), False, 'from django.db import migrations, models\n')] |
from django.db.models import fields
from rest_framework import serializers
from country_manager.countries.models import Country, Currency
class SerializerCurrency(serializers.ModelSerializer):
class Meta:
model = Currency
fields = ['code']
class BaseSerializerCountry(serializers.ModelSerializer):
currency = serializers.StringRelatedField(many=False)
class Meta:
model = Country
fields = ('id', 'name', 'flag_icon', 'currency')
class SerializerCountry(BaseSerializerCountry):
class Meta(BaseSerializerCountry.Meta):
fields = BaseSerializerCountry.Meta.fields + ('phone_prefix',)
class SerializerRestrictedCountry(serializers.ModelSerializer):
currency = SerializerCurrency(many=False)
class Meta:
model = Country
fields = ["name", "currency"]
| [
"rest_framework.serializers.StringRelatedField"
] | [((339, 381), 'rest_framework.serializers.StringRelatedField', 'serializers.StringRelatedField', ([], {'many': '(False)'}), '(many=False)\n', (369, 381), False, 'from rest_framework import serializers\n')] |
# @Created Date: 2020-01-12 01:27:18 pm
# @Filename: api.py
# @Email: <EMAIL>
# @Author: <NAME>
# @Last Modified: 2020-02-11 04:22:22 pm
# @Copyright (c) 2020 MinghuiGroup, Soochow University
from numpy import array, nan, count_nonzero
import pandas as pd
from typing import Union, Optional, Iterator, Iterable, Dict, List, Any, Generator, Callable, Tuple
import orjson as json
from pathlib import Path
from aiofiles import open as aiofiles_open
from collections import defaultdict
from unsync import unsync, Unfuture
from random import choice
from hashlib import sha1
from pdb_profiling.processors.recordbase import IdentifierBase
from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams
from pdb_profiling.log import Abclog
from pdb_profiling.fetcher.webfetch import UnsyncFetch
from pdb_profiling.processors.transformer import Dict2Tabular
from pdb_profiling.exceptions import WithoutExpectedKeyError, InvalidFileContentError
from pdb_profiling.ensure import EnsureBase
from tenacity import retry, wait_random, stop_after_attempt, retry_if_exception_type, RetryError
ensure = EnsureBase()
msc_rt_kw = dict(wait=wait_random(max=1), stop=stop_after_attempt(3), retry=retry_if_exception_type(InvalidFileContentError))
BASE_URL: str = 'https://www.ebi.ac.uk/pdbe/'
FTP_URL: str = 'ftp://ftp.ebi.ac.uk/'
FTP_DEFAULT_PATH: str = 'pub/databases/msd/sifts/flatfiles/tsv/uniprot_pdb.tsv.gz'
PDB_ARCHIVE_URL_EBI: str = 'http://ftp.ebi.ac.uk/pub/databases/pdb/data/structures/'
PDB_ARCHIVE_URL_WWPDB: str = 'https://ftp.wwpdb.org/pub/pdb/data/structures/'
PDB_ARCHIVE_VERSIONED_URL: str = 'http://ftp-versioned.wwpdb.org/pdb_versioned/data/'
# https://ftp.wwpdb.org/pub/pdb/data/structures/obsolete/mmCIF/a0/2a01.cif.gz
# http://ftp.ebi.ac.uk/pub/databases/pdb/data/structures/obsolete/mmCIF/a0/2a01.cif.gz
# http://ftp-versioned.wwpdb.org/pdb_versioned/data/entries/wm/pdb_00002wmg/pdb_00002wmg_xyz_v1-2.cif.gz
FUNCS = []
def mask_ib(i, default='', raise_error=False):
if i.source == 'PDB' and i.level == 'entry':
return 'pdb_id'
elif i.source == 'UniProt':
return 'UniProt'
elif raise_error:
raise AssertionError('Unexpected Case!')
else:
return default
def str_number_converter(x):
try:
return int(x)
except ValueError:
return -100000
def dispatch_on_set(*keys):
'''
Decorator to add new dispatch functions
'''
def register(func):
FUNCS.append((func, frozenset(keys)))
return func
return register
def traverseSuffixes(query: Any, *args):
for func, keySet in FUNCS:
if query in keySet:
return func(*args)
else:
raise ValueError(f'Invalid query: {query}')
class ProcessPDBe(Abclog):
headers = {'Connection': 'close', 'Content-Type': 'application/json'}
converters = {
'pdb_id': str,
'chain_id': str,
'struct_asym_id': str,
'entity_id': str_number_converter,
'author_residue_number': int,
'residue_number': str_number_converter,
'author_insertion_code': str,
'id': int,
'interface_id': int,
'interface_number': int,
'pdb_code': str,
'assemble_code': int,
'assembly_id': int,
'oper_expression': str,
'structure_1.range': str,
'structure_2.range': str,
'alt_code': str,
'sheet_id': str_number_converter
}
@classmethod
def yieldTasks(cls, pdbs: Union[Iterable, Iterator], suffix: str, method: str, folder: Union[str, Path], chunksize: int = 25, task_id: int = 0) -> Generator:
file_prefix = suffix.replace('/', '%')
method = method.lower()
if method == 'post':
url = f'{BASE_URL}{suffix}'
for i in range(0, len(pdbs), chunksize):
params = {'headers': cls.headers, 'url': url, 'data': ','.join(pdbs[i:i+chunksize])}
yield method, params, folder/f'{file_prefix}+{task_id}+{i}.json'
elif method == 'get':
for pdb in pdbs:
identifier = pdb.replace('/', '%')
yield method, {'headers': cls.headers, 'url': f'{BASE_URL}{suffix}{pdb}'}, folder/f'{file_prefix}+{identifier}.json'
else:
raise ValueError(
f'Invalid method: {method}, method should either be "get" or "post"')
@classmethod
def single_retrieve(cls, pdb: str, suffix: str, method: str, folder: Union[Path, str], semaphore, rate: float = 1.5, **kwargs):
return UnsyncFetch.single_task(
task=next(cls.yieldTasks((pdb, ), suffix, method, folder)),
semaphore=semaphore,
to_do_func=kwargs.get('to_do_func', cls.process),
rate=rate)
@classmethod
def retrieve(cls, pdbs: Union[Iterable, Iterator], suffix: str, method: str, folder: Union[str, Path], chunksize: int = 20, concur_req: int = 20, rate: float = 1.5, task_id: int = 0, ret_res: bool = True, **kwargs):
# t0 = time.perf_counter()
res = UnsyncFetch.multi_tasks(
cls.yieldTasks(pdbs, suffix, method, folder, chunksize, task_id),
cls.process,
concur_req=concur_req,
rate=rate,
ret_res=ret_res,
semaphore=kwargs.get('semaphore', None))
# elapsed = time.perf_counter() - t0
# cls.logger.info('{} ids downloaded in {:.2f}s'.format(len(res), elapsed))
return res
@classmethod
@unsync
@ensure.make_sure_complete(**msc_rt_kw)
async def json2tsv(cls, suffix:str, ori_path: Union[str, Path], path: Union[str, Path]):
cls.logger.debug('Start to decode')
async with aiofiles_open(ori_path) as handle:
try:
data = json.loads(await handle.read())
except Exception as e:
cls.logger.error(f"Error in '{ori_path}'")
raise e
res = Dict2Tabular.pyexcel_io(traverseSuffixes(suffix, data))
if res is not None:
if isinstance(res, Generator):
count = 0
for r in res:
if r is not None:
await pipe_out(df=r, path=path, format='tsv', mode='a' if count else 'w')
count += 1
if not count:
cls.logger.debug(f"Without Expected Data ({suffix}): {data}")
return None
else:
await pipe_out(df=res, path=path, format='tsv', mode='w')
cls.logger.debug(f"Decoded file in '{path}'")
return path
else:
cls.logger.debug(f"Without Expected Data ({suffix}): {data}")
return None
@classmethod
@unsync
async def process(cls, path: Union[str, Path, Unfuture]):
if not isinstance(path, (str, Path)):
path = await path
if path is None:
return
path = Path(path)
suffix = path.name.replace('%', '/').split('+')[0]
new_path = Path(str(path).replace('.json', '.tsv'))
try:
return await cls.json2tsv(suffix=suffix, ori_path=path, path=new_path)
except RetryError:
cls.logger.error(f"Retry failed for: {path.name} -> {new_path.name}")
raise
class PDBeDecoder(object):
@staticmethod
@dispatch_on_set('api/pdb/entry/status/', 'api/pdb/entry/summary/', 'api/pdb/entry/modified_AA_or_NA/',
'api/pdb/entry/mutated_AA_or_NA/', 'api/pdb/entry/cofactor/', 'api/pdb/entry/molecules/',
'api/pdb/entry/entities/',
'api/pdb/entry/ligand_monomers/', 'api/pdb/entry/experiment/', 'api/pdb/entry/carbohydrate_polymer/',
'api/pdb/entry/electron_density_statistics/', 'api/pdb/entry/related_experiment_data/',
'api/pdb/entry/drugbank/', 'api/mappings/best_structures/',
'graph-api/pdb/mutated_AA_or_NA/', 'graph-api/pdb/modified_AA_or_NA/',
'graph-api/mappings/best_structures/', 'graph-api/compound/atoms/',
'graph-api/compound/bonds/', 'graph-api/compound/summary/',
'graph-api/compound/cofactors/', 'graph-api/pdb/funpdbe/',
'graph-api/pdb/bound_excluding_branched/',
'graph-api/pdb/bound_molecules/', 'graph-api/pdb/ligand_monomers/',
'api/validation/global-percentiles/entry/', 'api/validation/summary_quality_scores/entry/',
'api/validation/key_validation_stats/entry/', 'api/validation/xray_refine_data_stats/entry/',
'api/validation/vdw_clashes/entry/', 'api/validation/outliers/all/',
'api/validation/nmr_cyrange_cores/entry/', # TODO: 2tablar
'api/validation/nmr_ensemble_clustering/entry/'
)
def yieldCommon(data: Dict) -> Generator:
for pdb in data:
values = data[pdb]
for value in values:
for key in value:
if isinstance(value[key], (Dict, List)):
value[key] = json.dumps(value[key]).decode('utf-8')
yield values, (mask_ib(IdentifierBase(pdb), '_code_'),), (pdb,)
@staticmethod
@dispatch_on_set('api/pdb/entry/polymer_coverage/')
def yieldPolymerCoverage(data: Dict) -> Generator:
for pdb in data:
molecules = data[pdb]['molecules']
for entity in molecules:
chains = entity['chains']
for chain in chains:
observed = chain['observed']
for fragement in observed:
for key in ('start', 'end'):
fragement[key] = json.dumps(
fragement[key]).decode('utf-8')
yield observed, ('chain_id', 'struct_asym_id', 'entity_id', 'pdb_id'), (chain['chain_id'], chain['struct_asym_id'], entity['entity_id'], pdb)
@staticmethod
@dispatch_on_set('api/pdb/entry/observed_residues_ratio/')
def yieldObservedResiduesRatio(data: Dict) -> Generator:
for pdb in data:
for entity_id, entity in data[pdb].items():
yield entity, ('entity_id', 'pdb_id'), (entity_id, pdb)
@staticmethod
@dispatch_on_set('api/pdb/entry/residue_listing/')
def yieldResidues(data: Dict) -> Generator:
for pdb in data:
molecules = data[pdb]['molecules']
for entity in molecules:
chains = entity['chains']
for chain in chains:
residues = chain['residues']
for res in residues:
if 'multiple_conformers' not in res:
res['multiple_conformers'] = ''
else:
res['multiple_conformers'] = json.dumps(
res['multiple_conformers']).decode('utf-8')
yield residues, ('chain_id', 'struct_asym_id', 'entity_id', 'pdb_id'), (chain['chain_id'], chain['struct_asym_id'], entity['entity_id'], pdb)
@staticmethod
@dispatch_on_set('api/pdb/entry/secondary_structure/', 'graph-api/pdb/secondary_structure/')
def yieldSecondaryStructure(data: Dict) -> Generator:
for pdb in data:
molecules = data[pdb]['molecules']
for entity in molecules:
chains = entity['chains']
for chain in chains:
secondary_structure = chain['secondary_structure']
for name in secondary_structure:
fragment = secondary_structure[name]
for record in fragment:
for key in record:
if isinstance(record[key], (Dict, List)):
record[key] = json.dumps(
record[key]).decode('utf-8')
if 'sheet_id' not in record:
record['sheet_id'] = None
yield fragment, ('secondary_structure', 'chain_id', 'struct_asym_id', 'entity_id', 'pdb_id'), (name, chain['chain_id'], chain['struct_asym_id'], entity['entity_id'], pdb)
@staticmethod
@dispatch_on_set('api/pdb/entry/binding_sites/')
def yieldBindingSites(data: Dict) -> Generator:
for pdb in data:
for site in data[pdb]:
for tage in ('site_residues', 'ligand_residues'):
residues = site[tage]
for res in residues:
if 'symmetry_symbol' not in res:
res['symmetry_symbol'] = None
yield residues, ('residues_type', 'details', 'evidence_code', 'site_id', 'pdb_id'), (tage, site['details'], site['evidence_code'], site['site_id'], pdb)
@staticmethod
@dispatch_on_set('api/pdb/entry/assembly/')
def yieldAssembly(data: Dict) -> Generator:
for pdb in data:
for biounit in data[pdb]:
entities = biounit['entities']
for entity in entities:
for key in entity:
if isinstance(entity[key], (Dict, List)):
entity[key] = json.dumps(
entity[key]).decode('utf-8')
keys = list(biounit)
keys.remove('entities')
yield entities, tuple(keys)+('pdb_id',), tuple(biounit[key] for key in keys)+(pdb, )
@staticmethod
@dispatch_on_set('api/pdb/entry/files/')
def yieldAssociatedFiles(data: Dict) -> Generator:
for pdb in data:
for key in data[pdb]:
for innerKey in data[pdb][key]:
record = data[pdb][key][innerKey]
if record:
yield record, ('innerKey', 'key', 'pdb_id'), (innerKey, key, pdb)
else:
continue
@staticmethod
@dispatch_on_set('api/mappings/all_isoforms/', 'api/mappings/uniprot/',
'api/mappings/uniprot_segments/', 'api/mappings/isoforms/',
'api/mappings/uniref90/', 'api/mappings/homologene_uniref90/',
'api/mappings/interpro/', 'api/mappings/pfam/',
'api/mappings/cath/', 'api/mappings/cath_b/',
'api/mappings/scop/', 'api/mappings/go/',
'api/mappings/ec/', 'api/mappings/ensembl/',
'api/mappings/hmmer/', 'api/mappings/sequence_domains/',
'api/mappings/structural_domains/', 'api/mappings/homologene/',
'api/mappings/uniprot_to_pfam/', 'api/mappings/uniprot_publications/',
'graph-api/mappings/uniprot/', 'graph-api/mappings/uniprot_segments/',
'graph-api/mappings/all_isoforms/', 'graph-api/mappings/',
'graph-api/mappings/isoforms/', 'graph-api/mappings/ensembl/',
'graph-api/mappings/homologene/', 'graph-api/mappings/sequence_domains/',
'api/mappings/', 'api/nucleic_mappings/', 'api/nucleic_mappings/rfam/',
'api/nucleic_mappings/sequence_domains/'
# 'graph-api/uniprot/'
)
def yieldSIFTSAnnotation(data: Dict) -> Generator:
valid_annotation_set = {'UniProt', 'Ensembl', 'Pfam', 'CATH',
'CATH-B', 'SCOP', 'InterPro', 'GO', 'EC', 'Homologene', 'HMMER', 'Rfam'}
for top_root in data:
# top_root: PDB_ID or else ID
if data[top_root].keys() <= valid_annotation_set:
# from PDB to ('UniProt', 'Ensembl', 'Pfam', 'CATH', 'CATH-B', 'SCOP', 'InterPro', 'GO', 'EC', 'Homologene', 'HMMER')
# from PDB_ENTITY (i.e. graph-api/mappings/homologene/)
# OR: from Uniprot (i.e. api/mappings/uniprot_to_pfam/)
for sec_root in data[top_root]:
child = data[top_root][sec_root]
for annotation in child:
chains = child[annotation]['mappings']
for chain in chains:
for key, value in chain.items():
chain[key] = json.dumps(value).decode(
'utf-8') if isinstance(value, Dict) else value
for key, value in child[annotation].items():
if key == 'mappings':
continue
chain[key] = json.dumps(value).decode(
'utf-8') if isinstance(value, Dict) else value
chain[mask_ib(IdentifierBase(top_root), raise_error=True)] = top_root
chain[sec_root] = annotation
yield chains, None
elif len(data[top_root].keys()) == 1 and 'PDB' in data[top_root].keys():
# from UniProt to PDB
for sec_root in data[top_root]:
child = data[top_root][sec_root]
for pdb in child:
chains = child[pdb]
for chain in chains:
chain['start'] = json.dumps(
chain['start']).decode('utf-8')
chain['end'] = json.dumps(
chain['end']).decode('utf-8')
yield chains, ('pdb_id', 'UniProt'), (pdb, top_root)
else:
raise ValueError(
f'Unexpected data structure for inputted data: {data}')
@staticmethod
@dispatch_on_set('api/pisa/asiscomponent/')
def yield_pisa_asiscomponent(data: Dict):
for pdb in data:
if data[pdb]['status'] != 'Ok' or 'assembly_detail' not in data[pdb]:
raise WithoutExpectedKeyError(f"Without Expected interfacelist info: {data}")
try:
records = data[pdb]['assembly_detail']['engaged_interfaces_list']['engaged_interfaces_array']
except KeyError:
raise WithoutExpectedKeyError(f"Without Expected interfacelist info: {data}")
yield records, ('pdb_id',), (pdb,)
@staticmethod
@dispatch_on_set('api/pisa/interfacelist/')
def yieldPISAInterfaceList(data: Dict):
for pdb in data:
try:
records = data[pdb]['interfaceentries']
except KeyError:
raise WithoutExpectedKeyError(
f"Without Expected interface_detail: {data}")
for record in records:
flatten_dict(record, 'structure_1')
flatten_dict(record, 'structure_2')
yield records, ('pdb_id', 'assembly_id'), (pdb, data[pdb]['page_title']['assemble_code'])
@staticmethod
@dispatch_on_set('api/pisa/interfacedetail/')
def yieldPISAInterfaceDetail(data: Dict):
usecols = (
'pdb_code', 'assemble_code', 'interface_number',
'interface_detail.interface_structure_1.structure.selection',
'interface_detail.interface_structure_2.structure.selection')
# 'interface_atoms', 'interface_residue', 'interface_area', 'solvation_energy'
edge_cols1 = ('structure',)
# 'interface_atoms', 'interface_residues', 'interface_area', 'solvation_energy'
edge_cols2 = ('structure',)
for pdb in data:
try:
records = data[pdb]['interface_detail']
except KeyError:
raise WithoutExpectedKeyError(
f"Without Expected interface_detail: {data}")
del records['bonds']
for col in edge_cols1:
flatten_dict(records['interface_structure_1'], col)
for col in edge_cols2:
flatten_dict(records['interface_structure_2'], col)
flatten_dict(data[pdb], 'page_title', False)
flatten_dict(records, 'interface_structure_1')
flatten_dict(records, 'interface_structure_2')
flatten_dict(data[pdb], 'interface_detail')
# cols = sorted(i for i in data[pdb].keys() if i != 'interface_detail.residues')
yield data[pdb]['interface_detail.residues']['residue1']['residue']['residue_array'], usecols, tuple(data[pdb][col] for col in usecols)
yield data[pdb]['interface_detail.residues']['residue2']['residue']['residue_array'], usecols, tuple(data[pdb][col] for col in usecols)
@staticmethod
@dispatch_on_set('graph-api/residue_mapping/')
def graph_api_residue_mapping(data: Dict):
'''
* <https://www.ebi.ac.uk/pdbe/graph-api/residue_mapping/:pdbId/:entityId/:residueNumber>
* <https://www.ebi.ac.uk/pdbe/graph-api/residue_mapping/:pdbId/:entityId/:residueStart/:residueEnd>
NOTE: only yield UniProt Residue Related Data
'''
cols = (
'pdb_id', 'entity_id', 'chain_id', 'struct_asym_id',
'residue_number', 'author_residue_number',
'author_insertion_code', 'observed', 'UniProt')
for pdb_id in data:
assert len(data[pdb_id]) == 1, f"Unexpected Cases: {pdb_id}"
molecules = data[pdb_id][0]
for chain in molecules['chains']:
for residue in chain['residues']:
yield list({**dict(zip(cols, (
pdb_id, molecules['entity_id'], chain['auth_asym_id'],
chain['struct_asym_id'], residue['residue_number'],
residue['author_residue_number'], residue['author_insertion_code'],
residue['observed'], feature_tag))), **feature} for feature_tag, feature in residue['features']['UniProt'].items()), None
@staticmethod
@dispatch_on_set('graph-api/pdb/sequence_conservation/')
def sequence_conservation(data: Dict):
for pdb in data:
yield [{
'pdb_id': pdb,
'entity_id': data[pdb]['entity_id'],
'length': data[pdb]['length'],
'residue_number': val['start'],
'conservation_score': val['conservation_score'],
'letter_array': json.dumps(tuple(i['letter'] for i in val['amino'])).decode('utf-8'),
'proba_array': json.dumps(tuple(i['proba'] for i in val['amino'])).decode('utf-8')}
for val in data[pdb]['data']], None
# letter_array, proba_array = zip(*((i['letter'], i['proba']) for i in val['amino']))
@staticmethod
@dispatch_on_set('graph-api/pdb/funpdbe_annotation/depth/',
'graph-api/pdb/funpdbe_annotation/cath-funsites/',
'graph-api/pdb/funpdbe_annotation/3Dcomplex/',
'graph-api/pdb/funpdbe_annotation/akid/',
'graph-api/pdb/funpdbe_annotation/3dligandsite/',
'graph-api/pdb/funpdbe_annotation/camkinet/',
'graph-api/pdb/funpdbe_annotation/canSAR/',
'graph-api/pdb/funpdbe_annotation/ChannelsDB/',
'graph-api/pdb/funpdbe_annotation/dynamine/',
'graph-api/pdb/funpdbe_annotation/FoldX/',
'graph-api/pdb/funpdbe_annotation/MetalPDB/',
'graph-api/pdb/funpdbe_annotation/M-CSA/',
'graph-api/pdb/funpdbe_annotation/p2rank/',
'graph-api/pdb/funpdbe_annotation/Missense3D/',
'graph-api/pdb/funpdbe_annotation/POPScomp_PDBML/',
'graph-api/pdb/funpdbe_annotation/ProKinO/',
'graph-api/pdb/funpdbe_annotation/14-3-3-pred/',
'graph-api/pdb/funpdbe_annotation/'
)
def funpdbe_resources(data: Dict):
for pdb in data:
info = data[pdb]
for val in info:
for annotation in val['annotations']:
yield annotation['site_residues'], ('pdb_id', 'origin', 'evidence_codes', 'site_id', 'label'), (pdb, val['origin'], val['evidence_codes'], annotation['site_id'], annotation['label'])
@staticmethod
@dispatch_on_set('graph-api/pdbe_pages/rfam/',
'graph-api/pdbe_pages/annotations/',
'graph-api/pdbe_pages/uniprot_mapping/',
'graph-api/pdbe_pages/binding_sites/',
'graph-api/pdbe_pages/interfaces/',
'graph-api/pdbe_pages/secondary_structure/',
'graph-api/pdbe_pages/domains/',
'graph-api/uniprot/unipdb/',
'graph-api/uniprot/annotations/',
'graph-api/uniprot/interface_residues/',
'graph-api/uniprot/ligand_sites/',
'graph-api/uniprot/secondary_structures/',
'graph-api/uniprot/domains/',
'graph-api/uniprot/sequence_conservation/')
def graph_api_data_common(data: Dict):
for pdb in data:
id_type = 'pdb_id' if len(pdb) == 4 else 'UniProt'
for info in data[pdb]['data']:
if 'additionalData' in info:
flatten_dict(info, 'additionalData')
com_keys = tuple(key for key in info.keys()
if key != 'residues')
yield info['residues'], (id_type,)+com_keys, (pdb,)+tuple(info[key] for key in com_keys)
@staticmethod
@dispatch_on_set('graph-api/pdb/bound_molecule_interactions/')
def graph_api_bound(data: Dict):
for pdb in data:
info = data[pdb]
for interactions in info:
ret = [{j: json.dumps(i[j]).decode('utf-8') for j in i.keys()}
for i in interactions['interactions']]
yield ret, ('pdb_id', 'bm_id'), (pdb, interactions['bm_id'])
@staticmethod
@dispatch_on_set('api/validation/protein-ramachandran-sidechain-outliers/entry/', 'api/validation/RNA_pucker_suite_outliers/entry/')
def yield_protein_ramachandran_sidechain_outlier(data):
for pdb in data:
for tage in data[pdb]:
residues = data[pdb][tage]
yield residues, ('_type_', 'pdb_id'), (tage, pdb)
@staticmethod
@dispatch_on_set('api/validation/rama_sidechain_listing/entry/', 'api/validation/residuewise_outlier_summary/entry/',
'api/validation/protein-RNA-DNA-geometry-outlier-residues/entry/')
def yield_rama_sidechain_listing(data):
for pdb in data:
molecules = data[pdb]['molecules']
for entity in molecules:
chains = entity['chains']
for chain in chains:
models = chain['models']
for model in models:
residues = model['residues']
yield residues, ('chain_id', 'struct_asym_id', 'model_id', 'entity_id', 'pdb_id'), (chain['chain_id'], chain['struct_asym_id'], model['model_id'], entity['entity_id'], pdb)
@staticmethod
@dispatch_on_set('graph-api/uniprot/superposition/')
def yield_unp_pdb_struct_cluster(data):
for unp in data:
for segment_id, segment in enumerate(data[unp]):
clusters = segment['clusters']
for sub_cluster_id, sub_cluster in enumerate(clusters):
yield sub_cluster, ('pdbekb_cluster', 'segment_start', 'segment_end', 'UniProt'), (f'{segment_id}_{sub_cluster_id}', segment['segment_start'], segment['segment_end'], unp)
class PDBeModelServer(object):
'''
Implement ModelServer API
'''
pdbe_root = f'{BASE_URL}model-server/v1/'
rcsb_root = 'https://models.rcsb.org/v1/'
root = rcsb_root
headers = {'Connection': 'close', 'accept': 'text/plain', 'Content-Type': 'application/json'}
api_set = frozenset(('atoms', 'residueInteraction', 'assembly', 'full', 'ligand'
'residueSurroundings', 'symmetryMates', 'query-many'))
@classmethod
def task_unit(cls, pdb, suffix, method, folder, data_collection, params, filename='_subset'):
if data_collection is None:
assert method == 'get', 'Invalid method!'
args = dict(
url=f'{cls.root}{pdb}/{suffix}?{dumpsParams(params)}',
headers=cls.headers)
else:
assert method == 'post', 'Invalid method!'
args = dict(
url=f'{cls.root}{pdb}/{suffix}?{dumpsParams(params)}',
headers=cls.headers,
data=data_collection)
return method, args, folder/f'{pdb}{filename}.{params.get("encoding", "cif")}'
@classmethod
def single_retrieve(cls, pdb: str, suffix: str, method: str, folder: Union[Path, str], semaphore, params=None, data_collection=None, rate: float = 1.5, filename='_subset'):
if params is None or len(params) == 0:
params = {'model_nums': 1, 'encoding': 'cif'}
return UnsyncFetch.single_task(
task=cls.task_unit(pdb, suffix, method, folder,
data_collection, params, filename=filename),
semaphore=semaphore,
rate=rate)
class PDBeCoordinateServer(object):
roots = (f'{BASE_URL}coordinates/', 'https://cs.litemol.org/')
headers = {'Connection': 'close', 'accept': 'text/plain'}
api_set = frozenset(('ambientResidues', 'assembly', 'backbone', 'cartoon', 'chains'
'entities', 'full', 'het', 'ligandInteraction', 'residueRange',
'residues', 'sidechain', 'symmetryMates', 'trace', 'water'))
def __init__(self, root: str = 'random'):
if root == 'random':
self.root = choice(self.roots)
elif root == 'ebi':
self.root = self.roots[0]
elif root == 'litemol':
self.root = self.roots[1]
else:
raise ValueError("root should be (ebi, litemol, random)")
def __repr__(self):
return f'<CoordinateServerAPI: {self.root}>'
def task_unit(self, pdb_id, suffix: str, params, folder):
args = dict(
url=f'{self.root}{pdb_id}/{suffix}?',
headers=self.headers,
params=params)
return 'get', args, Path(folder)/f'{pdb_id}_{dumpsParams(params)}.{params.get("encoding", "cif")}'
def single_retrieve(self, pdb_id: str, suffix: str, params: Dict, folder: Union[Path, str], semaphore, rate: float = 1.5):
return UnsyncFetch.single_task(
task=self.task_unit(pdb_id, suffix, params, folder),
semaphore=semaphore,
rate=rate)
class PDBArchive(object):
'''
Download files from PDB Archive
* wwPDB/RCSB: PDB_ARCHIVE_URL_WWPDB: str = 'https://ftp.wwpdb.org/pub/pdb/data/structures/'
* EBI: PDB_ARCHIVE_URL_EBI: str = 'http://ftp.ebi.ac.uk/pub/databases/pdb/data/structures/'
'''
root = PDB_ARCHIVE_URL_EBI
api_set = frozenset(f'{i}/{j}/' for i in ('obsolete', 'divided')
for j in ('mmCIF', 'pdb', 'XML'))
file_dict = {
'mmCIF': '.cif.gz',
'pdb': '.ent.gz',
'XML': '.xml.gz'
}
@staticmethod
def wrap_id(pdb_id, suffix):
if suffix.endswith('pdb/'):
return f"pdb{pdb_id}"
else:
return pdb_id
@classmethod
def get_file_suffix(cls, api_suffix):
for key, value in cls.file_dict.items():
if key in api_suffix:
return value
raise AssertionError(
f"Unexpected Case for api_suffix: {api_suffix}, {cls.file_dict}")
@classmethod
def task_unit(cls, pdb: str, suffix: str, file_suffix: str, folder: Path):
args = dict(
url=f'{cls.root}{suffix}{pdb[1:3]}/{cls.wrap_id(pdb, suffix)}{cls.get_file_suffix(suffix)}')
return 'get', args, folder/f'{pdb}{file_suffix}'
@classmethod
def yieldTasks(cls, pdbs, suffix: str, file_suffix: str, folder: Path) -> Generator:
for pdb in pdbs:
yield cls.task_unit(pdb, suffix, file_suffix, folder)
@classmethod
def retrieve(cls, pdbs, suffix: str, folder: Path, file_suffix: Optional[str] = None, concur_req: int = 20, rate: float = 1.5, ret_res: bool = True, **kwargs):
res = UnsyncFetch.multi_tasks(
cls.yieldTasks(pdbs, suffix, file_suffix, folder),
concur_req=concur_req,
rate=rate,
ret_res=ret_res,
semaphore=kwargs.get('semaphore', None))
return res
@classmethod
def single_retrieve(cls, pdb, suffix: str, folder: Path, semaphore, file_suffix: Optional[str] = None, rate: float = 1.5):
if file_suffix is None:
file_suffix = cls.get_file_suffix(suffix)
return UnsyncFetch.single_task(
task=cls.task_unit(pdb, suffix, file_suffix, folder),
semaphore=semaphore,
rate=rate)
class PDBVersioned(PDBArchive):
'''
Download files from PDB Versioned
* wwPDB Versioned: PDB_ARCHIVE_VERSIONED_URL: str = 'http://ftp-versioned.wwpdb.org/pdb_versioned/data/entries/'
>>> PDBVersioned.single_retrieve(
('2wmg', '_v1-2'), 'entries/',
init_folder_from_suffix(Base.get_folder(), 'pdb-versioned/entries'),
Base.get_web_semaphore()).result()
'''
root = PDB_ARCHIVE_VERSIONED_URL
api_set = frozenset(('entries/', 'removed/'))
@classmethod
def task_unit(cls, pdb_with_version: Tuple, suffix: str, file_suffix: str, folder: Path):
pdb, version_info = pdb_with_version
file_name = f'pdb_0000{pdb}_xyz{version_info}{file_suffix}'
args = dict(url=f'{cls.root}{suffix}{pdb[1:3]}/pdb_0000{pdb}/{file_name}')
return 'get', args, folder/file_name
class PDBeKBAnnotations(object):
ftp_root = f"{FTP_URL}pub/databases/pdbe-kb/annotations/"
https_root = ftp_root.replace('ftp:', 'https:')
root = https_root
api_set = frozenset({
'14-3-3-pred/', '3DComplex/',
'3DLigandSite/', 'AKID/',
'COSPI-Depth/', 'CamKinet/',
'ChannelsDB/', 'Covalentizer/',
'DynaMine/', 'FireProtDB/',
'FoldX/', 'KnotProt/',
'M-CSA/', 'MetalPDB/',
'Missense3D/', 'P2rank/',
'POPScomp_PDBML/', 'ProKinO/',
'Scop3P/', 'canSAR/',
'cath-funsites/', 'webNMA/'})
@staticmethod
def wrap_id(pdb_id, suffix):
if suffix == 'M-CSA/':
return f"{pdb_id}-mcsa"
else:
return pdb_id
@classmethod
def task_unit(cls, pdb: str, suffix: str, folder: Path):
pdb_ = cls.wrap_id(pdb, suffix)
args = dict(
url=f'{cls.root}{suffix}{pdb[1:3]}/{pdb_}.json')
return 'ftp' if cls.root == cls.ftp_root else 'get', args, folder/f'{pdb_}.json'
@classmethod
def single_retrieve(cls, pdb, suffix: str, folder: Path, semaphore, rate: float = 1.5):
return UnsyncFetch.single_task(
task=cls.task_unit(pdb, suffix, folder),
semaphore=semaphore,
rate=rate)
@staticmethod
def yieldPDBeKBAnnotations(data):
for chain in data['chains']:
yield chain['residues'], ('data_resource', 'pdb_id', 'chain_id'), (data['data_resource'], data['pdb_id'], chain['chain_label'])
| [
"pdb_profiling.ensure.EnsureBase",
"tenacity.stop_after_attempt",
"tenacity.retry_if_exception_type",
"aiofiles.open",
"pdb_profiling.utils.pipe_out",
"random.choice",
"pdb_profiling.utils.dumpsParams",
"pathlib.Path",
"pdb_profiling.exceptions.WithoutExpectedKeyError",
"tenacity.wait_random",
"... | [((1111, 1123), 'pdb_profiling.ensure.EnsureBase', 'EnsureBase', ([], {}), '()\n', (1121, 1123), False, 'from pdb_profiling.ensure import EnsureBase\n'), ((1146, 1164), 'tenacity.wait_random', 'wait_random', ([], {'max': '(1)'}), '(max=1)\n', (1157, 1164), False, 'from tenacity import retry, wait_random, stop_after_attempt, retry_if_exception_type, RetryError\n'), ((1171, 1192), 'tenacity.stop_after_attempt', 'stop_after_attempt', (['(3)'], {}), '(3)\n', (1189, 1192), False, 'from tenacity import retry, wait_random, stop_after_attempt, retry_if_exception_type, RetryError\n'), ((1200, 1248), 'tenacity.retry_if_exception_type', 'retry_if_exception_type', (['InvalidFileContentError'], {}), '(InvalidFileContentError)\n', (1223, 1248), False, 'from tenacity import retry, wait_random, stop_after_attempt, retry_if_exception_type, RetryError\n'), ((6943, 6953), 'pathlib.Path', 'Path', (['path'], {}), '(path)\n', (6947, 6953), False, 'from pathlib import Path\n'), ((5693, 5716), 'aiofiles.open', 'aiofiles_open', (['ori_path'], {}), '(ori_path)\n', (5706, 5716), True, 'from aiofiles import open as aiofiles_open\n'), ((20164, 20208), 'pdb_profiling.utils.flatten_dict', 'flatten_dict', (['data[pdb]', '"""page_title"""', '(False)'], {}), "(data[pdb], 'page_title', False)\n", (20176, 20208), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((20221, 20267), 'pdb_profiling.utils.flatten_dict', 'flatten_dict', (['records', '"""interface_structure_1"""'], {}), "(records, 'interface_structure_1')\n", (20233, 20267), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((20280, 20326), 'pdb_profiling.utils.flatten_dict', 'flatten_dict', (['records', '"""interface_structure_2"""'], {}), "(records, 'interface_structure_2')\n", (20292, 20326), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((20339, 20382), 'pdb_profiling.utils.flatten_dict', 'flatten_dict', (['data[pdb]', '"""interface_detail"""'], {}), "(data[pdb], 'interface_detail')\n", (20351, 20382), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((30082, 30100), 'random.choice', 'choice', (['self.roots'], {}), '(self.roots)\n', (30088, 30100), False, 'from random import choice\n'), ((18121, 18192), 'pdb_profiling.exceptions.WithoutExpectedKeyError', 'WithoutExpectedKeyError', (['f"""Without Expected interfacelist info: {data}"""'], {}), "(f'Without Expected interfacelist info: {data}')\n", (18144, 18192), False, 'from pdb_profiling.exceptions import WithoutExpectedKeyError, InvalidFileContentError\n'), ((18892, 18927), 'pdb_profiling.utils.flatten_dict', 'flatten_dict', (['record', '"""structure_1"""'], {}), "(record, 'structure_1')\n", (18904, 18927), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((18944, 18979), 'pdb_profiling.utils.flatten_dict', 'flatten_dict', (['record', '"""structure_2"""'], {}), "(record, 'structure_2')\n", (18956, 18979), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((19997, 20048), 'pdb_profiling.utils.flatten_dict', 'flatten_dict', (["records['interface_structure_1']", 'col'], {}), "(records['interface_structure_1'], col)\n", (20009, 20048), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((20100, 20151), 'pdb_profiling.utils.flatten_dict', 'flatten_dict', (["records['interface_structure_2']", 'col'], {}), "(records['interface_structure_2'], col)\n", (20112, 20151), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((30622, 30634), 'pathlib.Path', 'Path', (['folder'], {}), '(folder)\n', (30626, 30634), False, 'from pathlib import Path\n'), ((6470, 6521), 'pdb_profiling.utils.pipe_out', 'pipe_out', ([], {'df': 'res', 'path': 'path', 'format': '"""tsv"""', 'mode': '"""w"""'}), "(df=res, path=path, format='tsv', mode='w')\n", (6478, 6521), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((18371, 18442), 'pdb_profiling.exceptions.WithoutExpectedKeyError', 'WithoutExpectedKeyError', (['f"""Without Expected interfacelist info: {data}"""'], {}), "(f'Without Expected interfacelist info: {data}')\n", (18394, 18442), False, 'from pdb_profiling.exceptions import WithoutExpectedKeyError, InvalidFileContentError\n'), ((18750, 18819), 'pdb_profiling.exceptions.WithoutExpectedKeyError', 'WithoutExpectedKeyError', (['f"""Without Expected interface_detail: {data}"""'], {}), "(f'Without Expected interface_detail: {data}')\n", (18773, 18819), False, 'from pdb_profiling.exceptions import WithoutExpectedKeyError, InvalidFileContentError\n'), ((19822, 19891), 'pdb_profiling.exceptions.WithoutExpectedKeyError', 'WithoutExpectedKeyError', (['f"""Without Expected interface_detail: {data}"""'], {}), "(f'Without Expected interface_detail: {data}')\n", (19845, 19891), False, 'from pdb_profiling.exceptions import WithoutExpectedKeyError, InvalidFileContentError\n'), ((25508, 25544), 'pdb_profiling.utils.flatten_dict', 'flatten_dict', (['info', '"""additionalData"""'], {}), "(info, 'additionalData')\n", (25520, 25544), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((30647, 30666), 'pdb_profiling.utils.dumpsParams', 'dumpsParams', (['params'], {}), '(params)\n', (30658, 30666), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((6183, 6250), 'pdb_profiling.utils.pipe_out', 'pipe_out', ([], {'df': 'r', 'path': 'path', 'format': '"""tsv"""', 'mode': "('a' if count else 'w')"}), "(df=r, path=path, format='tsv', mode='a' if count else 'w')\n", (6191, 6250), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((9250, 9269), 'pdb_profiling.processors.recordbase.IdentifierBase', 'IdentifierBase', (['pdb'], {}), '(pdb)\n', (9264, 9269), False, 'from pdb_profiling.processors.recordbase import IdentifierBase\n'), ((28632, 28651), 'pdb_profiling.utils.dumpsParams', 'dumpsParams', (['params'], {}), '(params)\n', (28643, 28651), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((28834, 28853), 'pdb_profiling.utils.dumpsParams', 'dumpsParams', (['params'], {}), '(params)\n', (28845, 28853), False, 'from pdb_profiling.utils import related_dataframe, flatten_dict, pipe_out, dumpsParams\n'), ((9176, 9198), 'orjson.dumps', 'json.dumps', (['value[key]'], {}), '(value[key])\n', (9186, 9198), True, 'import orjson as json\n'), ((26007, 26023), 'orjson.dumps', 'json.dumps', (['i[j]'], {}), '(i[j])\n', (26017, 26023), True, 'import orjson as json\n'), ((9803, 9829), 'orjson.dumps', 'json.dumps', (['fragement[key]'], {}), '(fragement[key])\n', (9813, 9829), True, 'import orjson as json\n'), ((10945, 10983), 'orjson.dumps', 'json.dumps', (["res['multiple_conformers']"], {}), "(res['multiple_conformers'])\n", (10955, 10983), True, 'import orjson as json\n'), ((13385, 13408), 'orjson.dumps', 'json.dumps', (['entity[key]'], {}), '(entity[key])\n', (13395, 13408), True, 'import orjson as json\n'), ((16929, 16953), 'pdb_profiling.processors.recordbase.IdentifierBase', 'IdentifierBase', (['top_root'], {}), '(top_root)\n', (16943, 16953), False, 'from pdb_profiling.processors.recordbase import IdentifierBase\n'), ((17481, 17507), 'orjson.dumps', 'json.dumps', (["chain['start']"], {}), "(chain['start'])\n", (17491, 17507), True, 'import orjson as json\n'), ((17600, 17624), 'orjson.dumps', 'json.dumps', (["chain['end']"], {}), "(chain['end'])\n", (17610, 17624), True, 'import orjson as json\n'), ((11961, 11984), 'orjson.dumps', 'json.dumps', (['record[key]'], {}), '(record[key])\n', (11971, 11984), True, 'import orjson as json\n'), ((16452, 16469), 'orjson.dumps', 'json.dumps', (['value'], {}), '(value)\n', (16462, 16469), True, 'import orjson as json\n'), ((16778, 16795), 'orjson.dumps', 'json.dumps', (['value'], {}), '(value)\n', (16788, 16795), True, 'import orjson as json\n')] |
from time import time
from pitop.pma.servo_controller import ServoHardwareSpecs
from .simple_pid import PID
class PanTiltObjectTracker:
_pid_tunings = {
"slow": {"kp": 0.075, "ki": 0.002, "kd": 0.04},
"normal": {"kp": 0.25, "ki": 0.005, "kd": 0.1},
}
_target_lock_range = 10
_slow_fps_limit = 5.0
def __init__(self, pan_servo, tilt_servo):
self.__pan_servo = pan_servo
self.__tilt_servo = tilt_servo
self._previous_time = time()
self.pan_pid = PID(
setpoint=0,
output_limits=(
-ServoHardwareSpecs.SPEED_RANGE,
ServoHardwareSpecs.SPEED_RANGE,
),
)
self.tilt_pid = PID(
setpoint=0,
output_limits=(
-ServoHardwareSpecs.SPEED_RANGE,
ServoHardwareSpecs.SPEED_RANGE,
),
)
self.__set_pid_tunings(pid_mode="normal")
def __call__(self, center):
current_time = time()
dt = current_time - self._previous_time
if dt > 1 / self._slow_fps_limit:
pid_mode = "slow"
else:
pid_mode = "normal"
self._previous_time = current_time
self.__set_pid_tunings(pid_mode=pid_mode)
x, y = center
if abs(x) < self._target_lock_range:
self.__pan_servo.sweep(speed=0)
self.pan_pid.reset()
else:
pan_speed = self.pan_pid(x)
self.__pan_servo.sweep(pan_speed)
if abs(y) < self._target_lock_range:
self.__tilt_servo.sweep(speed=0)
self.tilt_pid.reset()
else:
tilt_speed = self.tilt_pid(y)
self.__tilt_servo.sweep(tilt_speed)
def __set_pid_tunings(self, pid_mode):
self.pan_pid.tunings = list(self._pid_tunings[pid_mode].values())
self.tilt_pid.tunings = list(self._pid_tunings[pid_mode].values())
def reset(self):
self.pan_pid.reset()
self.tilt_pid.reset()
def stop(self):
self.__pan_servo.sweep(0)
self.__tilt_servo.sweep(0)
self.reset()
| [
"time.time"
] | [((487, 493), 'time.time', 'time', ([], {}), '()\n', (491, 493), False, 'from time import time\n'), ((1005, 1011), 'time.time', 'time', ([], {}), '()\n', (1009, 1011), False, 'from time import time\n')] |
# Copyright 2013 VMware, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from distutils import version
import random
import time
from neutron_lib import constants as lib_const
from neutron_lib import context as q_context
from oslo_config import cfg
from oslo_log import log as logging
from oslo_serialization import jsonutils
from oslo_utils import excutils
from sqlalchemy.orm import exc as sa_exc
from vmware_nsx._i18n import _
from vmware_nsx.common import exceptions as nsxv_exc
from vmware_nsx.common import nsxv_constants
from vmware_nsx.common import utils
from vmware_nsx.db import nsxv_db
from vmware_nsx.plugins.nsx_v.vshield.common import constants
from vmware_nsx.plugins.nsx_v.vshield.common import exceptions
from vmware_nsx.plugins.nsx_v.vshield import edge_utils
from vmware_nsx.plugins.nsx_v.vshield.tasks import (
constants as task_constants)
from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks
LOG = logging.getLogger(__name__)
class EdgeApplianceDriver(object):
def __init__(self):
super(EdgeApplianceDriver, self).__init__()
# store the last task per edge that has the latest config
self.updated_task = {
'nat': {},
'route': {},
}
random.seed()
def _assemble_edge(self, name, appliance_size="compact",
deployment_container_id=None, datacenter_moid=None,
enable_aesni=True, dist=False,
enable_fips=False, remote_access=False,
edge_ha=False):
edge = {
'name': name,
'fqdn': None,
'enableAesni': enable_aesni,
'enableFips': enable_fips,
'featureConfigs': {
'features': [
{
'featureType': 'firewall_4.0',
'globalConfig': {
'tcpTimeoutEstablished': 7200
}
}
]
},
'cliSettings': {
'remoteAccess': remote_access
},
'autoConfiguration': {
'enabled': False,
'rulePriority': 'high'
},
'appliances': {
'applianceSize': appliance_size
},
}
if not dist:
edge['type'] = "gatewayServices"
edge['vnics'] = {'vnics': []}
else:
edge['type'] = "distributedRouter"
edge['interfaces'] = {'interfaces': []}
if deployment_container_id:
edge['appliances']['deploymentContainerId'] = (
deployment_container_id)
if datacenter_moid:
edge['datacenterMoid'] = datacenter_moid
if not dist and edge_ha:
self._enable_high_availability(edge)
return edge
def _select_datastores(self, availability_zone):
primary_ds = availability_zone.datastore_id
secondary_ds = availability_zone.ha_datastore_id
if availability_zone.ha_placement_random:
# we want to switch primary and secondary datastores
# half of the times, to balance it
if random.random() > 0.5:
primary_ds = availability_zone.ha_datastore_id
secondary_ds = availability_zone.datastore_id
return primary_ds, secondary_ds
def _assemble_edge_appliances(self, availability_zone):
appliances = []
if availability_zone.ha_datastore_id and availability_zone.edge_ha:
# create appliance with HA
primary_ds, secondary_ds = self._select_datastores(
availability_zone)
appliances.append(self._assemble_edge_appliance(
availability_zone.resource_pool,
primary_ds))
appliances.append(self._assemble_edge_appliance(
availability_zone.resource_pool,
secondary_ds))
elif availability_zone.datastore_id:
# Single datastore
appliances.append(self._assemble_edge_appliance(
availability_zone.resource_pool,
availability_zone.datastore_id))
return appliances
def _assemble_edge_appliance(self, resource_pool_id, datastore_id):
appliance = {}
if resource_pool_id:
appliance['resourcePoolId'] = resource_pool_id
if datastore_id:
appliance['datastoreId'] = datastore_id
return appliance
def _assemble_edge_vnic(self, name, index, portgroup_id, tunnel_index=-1,
primary_address=None, subnet_mask=None,
secondary=None,
type="internal",
enable_proxy_arp=False,
enable_send_redirects=True,
is_connected=True,
mtu=1500,
address_groups=None):
vnic = {
'index': index,
'name': name,
'type': type,
'portgroupId': portgroup_id,
'mtu': mtu,
'enableProxyArp': enable_proxy_arp,
'enableSendRedirects': enable_send_redirects,
'isConnected': is_connected
}
if address_groups is None:
address_groups = []
if not address_groups:
if primary_address and subnet_mask:
address_group = {
'primaryAddress': primary_address,
'subnetMask': subnet_mask
}
if secondary:
address_group['secondaryAddresses'] = {
'ipAddress': secondary,
'type': 'secondary_addresses'
}
vnic['addressGroups'] = {
'addressGroups': [address_group]
}
else:
vnic['subInterfaces'] = {'subInterfaces': address_groups}
else:
if tunnel_index < 0:
vnic['addressGroups'] = {'addressGroups': address_groups}
else:
vnic['subInterfaces'] = {'subInterfaces': address_groups}
return vnic
def _assemble_vdr_interface(self, portgroup_id,
primary_address=None, subnet_mask=None,
secondary=None,
type="internal",
is_connected=True,
mtu=1500,
address_groups=None):
interface = {
'type': type,
'connectedToId': portgroup_id,
'mtu': mtu,
'isConnected': is_connected
}
if address_groups is None:
address_groups = []
if not address_groups:
if primary_address and subnet_mask:
address_group = {
'primaryAddress': primary_address,
'subnetMask': subnet_mask
}
if secondary:
address_group['secondaryAddresses'] = {
'ipAddress': secondary,
'type': 'secondary_addresses'
}
interface['addressGroups'] = {
'addressGroups': [address_group]
}
else:
interface['addressGroups'] = {'addressGroups': address_groups}
interfaces = {'interfaces': [interface]}
return interfaces
def _edge_status_to_level(self, status):
if status == 'GREEN':
status_level = constants.RouterStatus.ROUTER_STATUS_ACTIVE
elif status in ('GREY', 'YELLOW'):
status_level = constants.RouterStatus.ROUTER_STATUS_DOWN
else:
status_level = constants.RouterStatus.ROUTER_STATUS_ERROR
return status_level
def _enable_loadbalancer(self, edge):
if (not edge.get('featureConfigs') or
not edge['featureConfigs'].get('features')):
edge['featureConfigs'] = {'features': []}
edge['featureConfigs']['features'].append(
{'featureType': 'loadbalancer_4.0',
'enabled': True})
def _enable_high_availability(self, edge):
if (not edge.get('featureConfigs') or
not edge['featureConfigs'].get('features')):
edge['featureConfigs'] = {'features': []}
edge['featureConfigs']['features'].append(
{'featureType': 'highavailability_4.0',
'enabled': True})
def get_edge_status(self, edge_id):
try:
response = self.vcns.get_edge_status(edge_id)[1]
status_level = self._edge_status_to_level(
response['edgeStatus'])
except exceptions.VcnsApiException as e:
LOG.error("VCNS: Failed to get edge %(edge_id)s status: "
"Reason: %(reason)s",
{'edge_id': edge_id, 'reason': e.response})
status_level = constants.RouterStatus.ROUTER_STATUS_ERROR
try:
desc = jsonutils.loads(e.response)
if desc.get('errorCode') == (
constants.VCNS_ERROR_CODE_EDGE_NOT_RUNNING):
status_level = constants.RouterStatus.ROUTER_STATUS_DOWN
except ValueError:
LOG.error('Error code not present. %s', e.response)
return status_level
def get_interface(self, edge_id, vnic_index):
# get vnic interface address groups
try:
return self.vcns.query_interface(edge_id, vnic_index)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("NSXv: Failed to query vnic %s", vnic_index)
def update_interface(self, router_id, edge_id, index, network,
tunnel_index=-1, address=None, netmask=None,
secondary=None, is_connected=True,
address_groups=None):
LOG.debug("VCNS: update vnic %(index)d: %(addr)s %(netmask)s", {
'index': index, 'addr': address, 'netmask': netmask})
if index == constants.EXTERNAL_VNIC_INDEX:
name = constants.EXTERNAL_VNIC_NAME
intf_type = 'uplink'
else:
name = constants.INTERNAL_VNIC_NAME + str(index)
if tunnel_index < 0:
intf_type = 'internal'
else:
intf_type = 'trunk'
config = self._assemble_edge_vnic(
name, index, network, tunnel_index,
address, netmask, secondary, type=intf_type,
address_groups=address_groups, is_connected=is_connected)
self.vcns.update_interface(edge_id, config)
def add_vdr_internal_interface(self, edge_id,
network, address=None, netmask=None,
secondary=None, address_groups=None,
type="internal", is_connected=True):
LOG.debug("Add VDR interface on edge: %s", edge_id)
if address_groups is None:
address_groups = []
interface_req = (
self._assemble_vdr_interface(network, address, netmask, secondary,
address_groups=address_groups,
is_connected=is_connected, type=type))
self.vcns.add_vdr_internal_interface(edge_id, interface_req)
header, response = self.vcns.get_edge_interfaces(edge_id)
for interface in response['interfaces']:
if interface['connectedToId'] == network:
vnic_index = int(interface['index'])
return vnic_index
def update_vdr_internal_interface(self, edge_id, index, network,
address_groups=None, is_connected=True):
if not address_groups:
address_groups = []
interface = {
'type': 'internal',
'connectedToId': network,
'mtu': 1500,
'isConnected': is_connected,
'addressGroups': {'addressGroup': address_groups}
}
interface_req = {'interface': interface}
try:
header, response = self.vcns.update_vdr_internal_interface(
edge_id, index, interface_req)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to update vdr interface on edge: "
"%s", edge_id)
def delete_vdr_internal_interface(self, edge_id, interface_index):
LOG.debug("Delete VDR interface on edge: %s", edge_id)
try:
header, response = self.vcns.delete_vdr_internal_interface(
edge_id, interface_index)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to delete vdr interface on edge: "
"%s",
edge_id)
def delete_interface(self, router_id, edge_id, index):
LOG.debug("Deleting vnic %(vnic_index)s: on edge %(edge_id)s",
{'vnic_index': index, 'edge_id': edge_id})
try:
self.vcns.delete_interface(edge_id, index)
except exceptions.ResourceNotFound:
LOG.error('Failed to delete vnic %(vnic_index)s on edge '
'%(edge_id)s: edge was not found',
{'vnic_index': index,
'edge_id': edge_id})
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to delete vnic %(vnic_index)s: "
"on edge %(edge_id)s",
{'vnic_index': index,
'edge_id': edge_id})
LOG.debug("Deletion complete vnic %(vnic_index)s: on edge %(edge_id)s",
{'vnic_index': index, 'edge_id': edge_id})
def deploy_edge(self, context, router_id, name, internal_network,
dist=False, loadbalancer_enable=True,
appliance_size=nsxv_constants.LARGE,
availability_zone=None, deploy_metadata=False):
edge_name = name
edge = self._assemble_edge(
edge_name, datacenter_moid=availability_zone.datacenter_moid,
deployment_container_id=self.deployment_container_id,
appliance_size=appliance_size, remote_access=False, dist=dist,
edge_ha=availability_zone.edge_ha)
appliances = self._assemble_edge_appliances(availability_zone)
if appliances:
edge['appliances']['appliances'] = appliances
if not dist:
vnic_external = self._assemble_edge_vnic(
constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX,
availability_zone.external_network, type="uplink")
edge['vnics']['vnics'].append(vnic_external)
else:
edge['mgmtInterface'] = {
'connectedToId': availability_zone.external_network,
'name': "mgmtInterface"}
if internal_network:
vnic_inside = self._assemble_edge_vnic(
constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX,
internal_network,
edge_utils.get_vdr_transit_network_plr_address(),
edge_utils.get_vdr_transit_network_netmask(),
type="internal")
edge['vnics']['vnics'].append(vnic_inside)
# If default login credentials for Edge are set, configure accordingly
if (cfg.CONF.nsxv.edge_appliance_user and
cfg.CONF.nsxv.edge_appliance_password):
edge['cliSettings'].update({
'userName': cfg.CONF.nsxv.edge_appliance_user,
'password': cfg.CONF.nsxv.edge_appliance_password})
if not dist and loadbalancer_enable:
self._enable_loadbalancer(edge)
edge_id = None
try:
header = self.vcns.deploy_edge(edge)[0]
edge_id = header.get('location', '/').split('/')[-1]
if edge_id:
nsxv_db.update_nsxv_router_binding(
context.session, router_id, edge_id=edge_id)
if not dist:
# Init Edge vnic binding
nsxv_db.init_edge_vnic_binding(
context.session, edge_id)
else:
if router_id:
nsxv_db.update_nsxv_router_binding(
context.session, router_id,
status=lib_const.ERROR)
error = _('Failed to deploy edge')
raise nsxv_exc.NsxPluginException(err_msg=error)
self.callbacks.complete_edge_creation(
context, edge_id, name, router_id, dist, True,
availability_zone=availability_zone,
deploy_metadata=deploy_metadata)
except exceptions.VcnsApiException:
self.callbacks.complete_edge_creation(
context, edge_id, name, router_id, dist, False,
availability_zone=availability_zone)
with excutils.save_and_reraise_exception():
LOG.exception("NSXv: deploy edge failed.")
return edge_id
def update_edge(self, context, router_id, edge_id, name, internal_network,
dist=False, loadbalancer_enable=True,
appliance_size=nsxv_constants.LARGE,
set_errors=False, availability_zone=None):
"""Update edge name."""
edge = self._assemble_edge(
name, datacenter_moid=availability_zone.datacenter_moid,
deployment_container_id=self.deployment_container_id,
appliance_size=appliance_size, remote_access=False, dist=dist,
edge_ha=availability_zone.edge_ha)
edge['id'] = edge_id
appliances = self._assemble_edge_appliances(availability_zone)
if appliances:
edge['appliances']['appliances'] = appliances
if not dist:
vnic_external = self._assemble_edge_vnic(
constants.EXTERNAL_VNIC_NAME, constants.EXTERNAL_VNIC_INDEX,
availability_zone.external_network, type="uplink")
edge['vnics']['vnics'].append(vnic_external)
else:
edge['mgmtInterface'] = {
'connectedToId': availability_zone.external_network,
'name': "mgmtInterface"}
if internal_network:
internal_vnic = self._assemble_edge_vnic(
constants.INTERNAL_VNIC_NAME, constants.INTERNAL_VNIC_INDEX,
internal_network,
edge_utils.get_vdr_transit_network_plr_address(),
edge_utils.get_vdr_transit_network_netmask(),
type="internal")
edge['vnics']['vnics'].append(internal_vnic)
if not dist and loadbalancer_enable:
self._enable_loadbalancer(edge)
try:
self.vcns.update_edge(edge_id, edge)
self.callbacks.complete_edge_update(
context, edge_id, router_id, True, set_errors)
except exceptions.VcnsApiException as e:
LOG.error("Failed to update edge: %s",
e.response)
self.callbacks.complete_edge_update(
context, edge_id, router_id, False, set_errors)
return False
return True
def rename_edge(self, edge_id, name):
"""rename edge."""
try:
# First get the current edge structure
# [0] is the status, [1] is the body
edge = self.vcns.get_edge(edge_id)[1]
if edge['name'] == name:
LOG.debug('Edge %s is already named %s', edge_id, name)
return
# remove some data that will make the update fail
edge_utils.remove_irrelevant_keys_from_edge_request(edge)
# set the new name in the request
edge['name'] = name
# update the edge
self.vcns.update_edge(edge_id, edge)
except exceptions.VcnsApiException as e:
LOG.error("Failed to rename edge: %s",
e.response)
def resize_edge(self, edge_id, size):
"""update the size of a router edge."""
try:
# First get the current edge structure
# [0] is the status, [1] is the body
edge = self.vcns.get_edge(edge_id)[1]
if edge.get('appliances'):
if edge['appliances']['applianceSize'] == size:
LOG.debug('Edge %s is already with size %s',
edge_id, size)
return
ver = self.vcns.get_version()
if version.LooseVersion(ver) < version.LooseVersion('6.2.3'):
# remove some data that will make the update fail
edge_utils.remove_irrelevant_keys_from_edge_request(edge)
# set the new size in the request
edge['appliances']['applianceSize'] = size
# update the edge
self.vcns.update_edge(edge_id, edge)
except exceptions.VcnsApiException as e:
LOG.error("Failed to resize edge: %s", e.response)
def delete_edge(self, context, router_id, edge_id, dist=False):
LOG.debug("Deleting edge %s", edge_id)
if context is None:
context = q_context.get_admin_context()
try:
LOG.debug("Deleting router binding %s", router_id)
nsxv_db.delete_nsxv_router_binding(context.session, router_id)
if not dist:
LOG.debug("Deleting vnic bindings for edge %s", edge_id)
nsxv_db.clean_edge_vnic_binding(context.session, edge_id)
except sa_exc.NoResultFound:
LOG.warning("Router Binding for %s not found", router_id)
if edge_id:
try:
self.vcns.delete_edge(edge_id)
return True
except exceptions.ResourceNotFound:
return True
except exceptions.VcnsApiException as e:
LOG.exception("VCNS: Failed to delete %(edge_id)s:\n"
"%(response)s",
{'edge_id': edge_id, 'response': e.response})
return False
except Exception:
LOG.exception("VCNS: Failed to delete %s", edge_id)
return False
def _assemble_nat_rule(self, action, original_address,
translated_address,
vnic_index=None,
enabled=True,
protocol='any',
original_port='any',
translated_port='any'):
nat_rule = {}
nat_rule['action'] = action
if vnic_index is not None:
nat_rule['vnic'] = vnic_index
nat_rule['originalAddress'] = original_address
nat_rule['translatedAddress'] = translated_address
nat_rule['enabled'] = enabled
nat_rule['protocol'] = protocol
nat_rule['originalPort'] = original_port
nat_rule['translatedPort'] = translated_port
return nat_rule
def get_nat_config(self, edge_id):
try:
return self.vcns.get_nat_config(edge_id)[1]
except exceptions.VcnsApiException as e:
LOG.exception("VCNS: Failed to get nat config:\n%s",
e.response)
raise e
def update_nat_rules(self, edge_id, snats, dnats, indices=None):
LOG.debug("VCNS: update nat rule\n"
"SNAT:%(snat)s\n"
"DNAT:%(dnat)s\n"
"INDICES: %(index)s\n", {
'snat': snats, 'dnat': dnats, 'index': indices})
nat_rules = []
for dnat in dnats:
vnic_index = None
if 'vnic_index' in dnat:
vnic_index = dnat['vnic_index']
if vnic_index or not indices:
# we are adding a predefined index or
# adding to all interfaces
nat_rules.append(self._assemble_nat_rule(
'dnat', dnat['dst'], dnat['translated'],
vnic_index=vnic_index
))
nat_rules.append(self._assemble_nat_rule(
'snat', dnat['translated'], dnat['dst'],
vnic_index=vnic_index
))
else:
for index in indices:
nat_rules.append(self._assemble_nat_rule(
'dnat', dnat['dst'], dnat['translated'],
vnic_index=index
))
nat_rules.append(self._assemble_nat_rule(
'snat', dnat['translated'], dnat['dst'],
vnic_index=index
))
for snat in snats:
vnic_index = None
if 'vnic_index' in snat:
vnic_index = snat['vnic_index']
if vnic_index or not indices:
# we are adding a predefined index
# or adding to all interfaces
nat_rules.append(self._assemble_nat_rule(
'snat', snat['src'], snat['translated'],
vnic_index=vnic_index
))
else:
for index in indices:
nat_rules.append(self._assemble_nat_rule(
'snat', snat['src'], snat['translated'],
vnic_index=index
))
nat = {
'featureType': 'nat',
'rules': {
'natRulesDtos': nat_rules
}
}
try:
self.vcns.update_nat_config(edge_id, nat)
return True
except exceptions.VcnsApiException as e:
LOG.exception("VCNS: Failed to create snat rule:\n%s",
e.response)
return False
def update_routes(self, edge_id, gateway, routes):
if gateway:
gateway = gateway.split('/')[0]
static_routes = []
for route in routes:
if route.get('vnic_index') is None:
static_routes.append({
"description": "",
"vnic": constants.INTERNAL_VNIC_INDEX,
"network": route['cidr'],
"nextHop": route['nexthop']
})
else:
static_routes.append({
"description": "",
"vnic": route['vnic_index'],
"network": route['cidr'],
"nextHop": route['nexthop']
})
request = {
"staticRoutes": {
"staticRoutes": static_routes
}
}
if gateway:
request["defaultRoute"] = {
"description": "default-gateway",
"gatewayAddress": gateway
}
try:
self.vcns.update_routes(edge_id, request)
return True
except exceptions.VcnsApiException as e:
LOG.exception("VCNS: Failed to update routes:\n%s",
e.response)
return False
def create_lswitch(self, name, tz_config, tags=None,
port_isolation=False, replication_mode="service"):
lsconfig = {
'display_name': utils.check_and_truncate(name),
"tags": tags or [],
"type": "LogicalSwitchConfig",
"_schema": "/ws.v1/schema/LogicalSwitchConfig",
"transport_zones": tz_config
}
if port_isolation is bool:
lsconfig["port_isolation_enabled"] = port_isolation
if replication_mode:
lsconfig["replication_mode"] = replication_mode
response = self.vcns.create_lswitch(lsconfig)[1]
return response
def delete_lswitch(self, lswitch_id):
self.vcns.delete_lswitch(lswitch_id)
def get_loadbalancer_config(self, edge_id):
try:
header, response = self.vcns.get_loadbalancer_config(
edge_id)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to get service config")
return response
def enable_service_loadbalancer(self, edge_id):
config = self.get_loadbalancer_config(
edge_id)
if not config['enabled']:
config['enabled'] = True
try:
self.vcns.enable_service_loadbalancer(edge_id, config)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to enable loadbalancer "
"service config")
def _delete_port_group(self, task):
try:
self.vcns.delete_port_group(
task.userdata['dvs_id'],
task.userdata['port_group_id'])
except Exception as e:
LOG.error('Unable to delete %(pg)s exception %(ex)s',
{'pg': task.userdata['port_group_id'],
'ex': e})
return task_constants.TaskStatus.ERROR
return task_constants.TaskStatus.COMPLETED
def _retry_task(self, task):
delay = 0.5
max_retries = max(cfg.CONF.nsxv.retries, 1)
args = task.userdata.get('args', [])
kwargs = task.userdata.get('kwargs', {})
retry_number = task.userdata['retry_number']
retry_command = task.userdata['retry_command']
try:
retry_command(*args, **kwargs)
except Exception as exc:
LOG.debug("Task %(name)s retry %(retry)s failed %(exc)s",
{'name': task.name,
'exc': exc,
'retry': retry_number})
retry_number += 1
if retry_number > max_retries:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to %s", task.name)
else:
task.userdata['retry_number'] = retry_number
# Sleep twice as long as the previous retry
tts = (2 ** (retry_number - 1)) * delay
time.sleep(min(tts, 60))
return task_constants.TaskStatus.PENDING
LOG.info("Task %(name)s completed.", {'name': task.name})
return task_constants.TaskStatus.COMPLETED
def delete_port_group(self, dvs_id, port_group_id):
task_name = 'delete-port-group-%s-%s' % (port_group_id, dvs_id)
userdata = {'retry_number': 1,
'retry_command': self.vcns.delete_port_group,
'args': [dvs_id, port_group_id]}
task = tasks.Task(task_name, port_group_id,
self._retry_task,
status_callback=self._retry_task,
userdata=userdata)
self.task_manager.add(task)
def delete_virtual_wire(self, vw_id):
task_name = 'delete-virtualwire-%s' % vw_id
userdata = {'retry_number': 1,
'retry_command': self.vcns.delete_virtual_wire,
'args': [vw_id]}
task = tasks.Task(task_name, vw_id,
self._retry_task,
status_callback=self._retry_task,
userdata=userdata)
self.task_manager.add(task)
def create_bridge(self, device_name, bridge):
try:
self.vcns.create_bridge(device_name, bridge)
except exceptions.VcnsApiException:
with excutils.save_and_reraise_exception():
LOG.exception("Failed to create bridge in the %s",
device_name)
def delete_bridge(self, device_name):
try:
self.vcns.delete_bridge(device_name)
except exceptions.VcnsApiException:
LOG.exception("Failed to delete bridge in the %s",
device_name)
def update_edge_ha(self, edge_id):
ha_request = {
'featureType': "highavailability_4.0",
'enabled': True}
self.vcns.enable_ha(edge_id, ha_request)
def update_edge_syslog(self, edge_id, syslog_config, router_id):
if 'server_ip' not in syslog_config:
LOG.warning("Server IP missing in syslog config for %s",
router_id)
return
protocol = syslog_config.get('protocol', 'tcp')
if protocol not in ['tcp', 'udp']:
LOG.warning("Invalid protocol in syslog config for %s",
router_id)
return
loglevel = syslog_config.get('log_level')
if loglevel and loglevel not in edge_utils.SUPPORTED_EDGE_LOG_LEVELS:
LOG.warning("Invalid loglevel in syslog config for %s",
router_id)
return
server_ip = syslog_config['server_ip']
request = {'featureType': 'syslog',
'protocol': protocol,
'serverAddresses': {'ipAddress': [server_ip],
'type': 'IpAddressesDto'}}
# edge allows up to 2 syslog servers
if 'server2_ip' in syslog_config:
request['serverAddresses']['ipAddress'].append(
syslog_config['server2_ip'])
self.vcns.update_edge_syslog(edge_id, request)
# update log level for routing in separate API call
if loglevel:
edge_utils.update_edge_loglevel(self.vcns, edge_id,
'routing', loglevel)
| [
"oslo_log.log.getLogger",
"vmware_nsx.db.nsxv_db.delete_nsxv_router_binding",
"vmware_nsx.plugins.nsx_v.vshield.edge_utils.get_vdr_transit_network_plr_address",
"vmware_nsx.plugins.nsx_v.vshield.edge_utils.remove_irrelevant_keys_from_edge_request",
"vmware_nsx._i18n._",
"vmware_nsx.plugins.nsx_v.vshield.e... | [((1462, 1489), 'oslo_log.log.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1479, 1489), True, 'from oslo_log import log as logging\n'), ((1765, 1778), 'random.seed', 'random.seed', ([], {}), '()\n', (1776, 1778), False, 'import random\n'), ((31756, 31868), 'vmware_nsx.plugins.nsx_v.vshield.tasks.tasks.Task', 'tasks.Task', (['task_name', 'port_group_id', 'self._retry_task'], {'status_callback': 'self._retry_task', 'userdata': 'userdata'}), '(task_name, port_group_id, self._retry_task, status_callback=self\n ._retry_task, userdata=userdata)\n', (31766, 31868), False, 'from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks\n'), ((32232, 32336), 'vmware_nsx.plugins.nsx_v.vshield.tasks.tasks.Task', 'tasks.Task', (['task_name', 'vw_id', 'self._retry_task'], {'status_callback': 'self._retry_task', 'userdata': 'userdata'}), '(task_name, vw_id, self._retry_task, status_callback=self.\n _retry_task, userdata=userdata)\n', (32242, 32336), False, 'from vmware_nsx.plugins.nsx_v.vshield.tasks import tasks\n'), ((20714, 20771), 'vmware_nsx.plugins.nsx_v.vshield.edge_utils.remove_irrelevant_keys_from_edge_request', 'edge_utils.remove_irrelevant_keys_from_edge_request', (['edge'], {}), '(edge)\n', (20765, 20771), False, 'from vmware_nsx.plugins.nsx_v.vshield import edge_utils\n'), ((22272, 22301), 'neutron_lib.context.get_admin_context', 'q_context.get_admin_context', ([], {}), '()\n', (22299, 22301), True, 'from neutron_lib import context as q_context\n'), ((22390, 22452), 'vmware_nsx.db.nsxv_db.delete_nsxv_router_binding', 'nsxv_db.delete_nsxv_router_binding', (['context.session', 'router_id'], {}), '(context.session, router_id)\n', (22424, 22452), False, 'from vmware_nsx.db import nsxv_db\n'), ((28384, 28414), 'vmware_nsx.common.utils.check_and_truncate', 'utils.check_and_truncate', (['name'], {}), '(name)\n', (28408, 28414), False, 'from vmware_nsx.common import utils\n'), ((34542, 34614), 'vmware_nsx.plugins.nsx_v.vshield.edge_utils.update_edge_loglevel', 'edge_utils.update_edge_loglevel', (['self.vcns', 'edge_id', '"""routing"""', 'loglevel'], {}), "(self.vcns, edge_id, 'routing', loglevel)\n", (34573, 34614), False, 'from vmware_nsx.plugins.nsx_v.vshield import edge_utils\n'), ((3739, 3754), 'random.random', 'random.random', ([], {}), '()\n', (3752, 3754), False, 'import random\n'), ((16098, 16146), 'vmware_nsx.plugins.nsx_v.vshield.edge_utils.get_vdr_transit_network_plr_address', 'edge_utils.get_vdr_transit_network_plr_address', ([], {}), '()\n', (16144, 16146), False, 'from vmware_nsx.plugins.nsx_v.vshield import edge_utils\n'), ((16164, 16208), 'vmware_nsx.plugins.nsx_v.vshield.edge_utils.get_vdr_transit_network_netmask', 'edge_utils.get_vdr_transit_network_netmask', ([], {}), '()\n', (16206, 16208), False, 'from vmware_nsx.plugins.nsx_v.vshield import edge_utils\n'), ((16937, 17016), 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding', 'nsxv_db.update_nsxv_router_binding', (['context.session', 'router_id'], {'edge_id': 'edge_id'}), '(context.session, router_id, edge_id=edge_id)\n', (16971, 17016), False, 'from vmware_nsx.db import nsxv_db\n'), ((17442, 17468), 'vmware_nsx._i18n._', '_', (['"""Failed to deploy edge"""'], {}), "('Failed to deploy edge')\n", (17443, 17468), False, 'from vmware_nsx._i18n import _\n'), ((17491, 17533), 'vmware_nsx.common.exceptions.NsxPluginException', 'nsxv_exc.NsxPluginException', ([], {'err_msg': 'error'}), '(err_msg=error)\n', (17518, 17533), True, 'from vmware_nsx.common import exceptions as nsxv_exc\n'), ((19516, 19564), 'vmware_nsx.plugins.nsx_v.vshield.edge_utils.get_vdr_transit_network_plr_address', 'edge_utils.get_vdr_transit_network_plr_address', ([], {}), '()\n', (19562, 19564), False, 'from vmware_nsx.plugins.nsx_v.vshield import edge_utils\n'), ((19582, 19626), 'vmware_nsx.plugins.nsx_v.vshield.edge_utils.get_vdr_transit_network_netmask', 'edge_utils.get_vdr_transit_network_netmask', ([], {}), '()\n', (19624, 19626), False, 'from vmware_nsx.plugins.nsx_v.vshield import edge_utils\n'), ((21614, 21639), 'distutils.version.LooseVersion', 'version.LooseVersion', (['ver'], {}), '(ver)\n', (21634, 21639), False, 'from distutils import version\n'), ((21642, 21671), 'distutils.version.LooseVersion', 'version.LooseVersion', (['"""6.2.3"""'], {}), "('6.2.3')\n", (21662, 21671), False, 'from distutils import version\n'), ((21755, 21812), 'vmware_nsx.plugins.nsx_v.vshield.edge_utils.remove_irrelevant_keys_from_edge_request', 'edge_utils.remove_irrelevant_keys_from_edge_request', (['edge'], {}), '(edge)\n', (21806, 21812), False, 'from vmware_nsx.plugins.nsx_v.vshield import edge_utils\n'), ((22567, 22624), 'vmware_nsx.db.nsxv_db.clean_edge_vnic_binding', 'nsxv_db.clean_edge_vnic_binding', (['context.session', 'edge_id'], {}), '(context.session, edge_id)\n', (22598, 22624), False, 'from vmware_nsx.db import nsxv_db\n'), ((9723, 9750), 'oslo_serialization.jsonutils.loads', 'jsonutils.loads', (['e.response'], {}), '(e.response)\n', (9738, 9750), False, 'from oslo_serialization import jsonutils\n'), ((10302, 10339), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (10337, 10339), False, 'from oslo_utils import excutils\n'), ((13065, 13102), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (13100, 13102), False, 'from oslo_utils import excutils\n'), ((13545, 13582), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (13580, 13582), False, 'from oslo_utils import excutils\n'), ((14312, 14349), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (14347, 14349), False, 'from oslo_utils import excutils\n'), ((17132, 17188), 'vmware_nsx.db.nsxv_db.init_edge_vnic_binding', 'nsxv_db.init_edge_vnic_binding', (['context.session', 'edge_id'], {}), '(context.session, edge_id)\n', (17162, 17188), False, 'from vmware_nsx.db import nsxv_db\n'), ((17282, 17373), 'vmware_nsx.db.nsxv_db.update_nsxv_router_binding', 'nsxv_db.update_nsxv_router_binding', (['context.session', 'router_id'], {'status': 'lib_const.ERROR'}), '(context.session, router_id, status=\n lib_const.ERROR)\n', (17316, 17373), False, 'from vmware_nsx.db import nsxv_db\n'), ((17981, 18018), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (18016, 18018), False, 'from oslo_utils import excutils\n'), ((29174, 29211), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (29209, 29211), False, 'from oslo_utils import excutils\n'), ((29632, 29669), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (29667, 29669), False, 'from oslo_utils import excutils\n'), ((32628, 32665), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (32663, 32665), False, 'from oslo_utils import excutils\n'), ((30944, 30981), 'oslo_utils.excutils.save_and_reraise_exception', 'excutils.save_and_reraise_exception', ([], {}), '()\n', (30979, 30981), False, 'from oslo_utils import excutils\n')] |
#!/usr/bin/env python
"""
Created by: <NAME>
Description: Functions for HMMER-DB.
"""
# Imports & Setup:
import csv
import sys
from Bio import SeqIO
import subprocess
import re
# Regex's
LocusRegex = re.compile("\(Locus:\s\S*\)")
LocationRegex = re.compile("\(Location:\s\[(\S*)\:(\S*)\]\((\S)\)\)")
# ----------------------------------------------------------------------------------------
def extract_sequence_records(organism_file_path, file_type):
"""
Read in sequence files as a sequence record object using Biopython.
:param organism_file_path: The path to the input file.
:return: Biopython sequence record object.
"""
try:
print(">> Opening FASTA file: " + organism_file_path)
handle = open(organism_file_path, "rU")
try:
records = list(SeqIO.parse(handle, file_type))
except ValueError as error:
print("Error has occurred while parsing " + organism_file_path + "!")
print(str(error))
sys.exit(1)
handle.close()
except IOError:
print("Failed to open " + organism_file_path)
sys.exit(1)
return records
# -----------------------------------------------------------------------------------------------------------
def check_extensions(organism_file_path, csv_file_path, hmm_file_paths, sql_file_paths):
"""
Performs file extension checks.
:param organism_file_path: Path to the organism database file.
:param csv_file_path: Path to the organism information database file.
:param hmm_file_paths: Path to the HMM model file.
:param sql_file_paths: Path to the sqlite3 file.
"""
print(">> Performing file extension checks...")
if not organism_file_path.endswith(".faa"):
print("[Warning] " + organism_file_path + " may not be a fasta file!")
if not csv_file_path.endswith(".csv"):
print("[Warning] " + csv_file_path + " may not be a csv file!")
for hmm_path in hmm_file_paths:
if not hmm_path.endswith(".hmm"):
print("[Warning] " + hmm_path + " may not be a HMM file!")
if not sql_file_paths.endswith(".sqlite"):
print("[Warning] " + sql_file_paths + " may not be a sqlite file!")
# ----------------------------------------------------------------------------------------
def generate_fasta_string(sec_record_list):
"""
Creates a FASTA formatted string containing sequences from a list of sequence record objects.
:param sec_record_list: List of Biopython sequence record objects.
:return: String containing FASTA formatted strings.
"""
fasta_string_list = []
for record in sec_record_list:
fasta_string_list.append(record.format("fasta"))
fasta_string = ''.join(fasta_string_list)
return fasta_string
# ----------------------------------------------------------------------------------------
def generate_fasta_dict(sec_record_list):
"""
Creates a dictionary containing FASTA formatted strings from a list of sequence record objects.
This dictionary is keyed by the sequence ID.
:param sec_record_list: List of Biopython sequence record objects.
:return: Dictionary containing FASTA formatted strings.
"""
fasta_string_dict = {}
for record in sec_record_list:
fasta_string_dict[record.id] = record.format("fasta")
return fasta_string_dict
# ----------------------------------------------------------------------------------------
def hmm_search(fasta_string, hmmer_model_path, processes):
"""
Runs HMMER with settings specific for extracting subject sequences.
:param fasta_string: String containing protein sequences in FASTA format.
:param hmmer_model_path: Path to the HMM model to be used as a query.
:return: String containing hmmsearch output.
"""
process = subprocess.Popen(["hmmsearch", "--acc", "--cpu", str(processes), hmmer_model_path, "-"],
stdin=subprocess.PIPE, stdout=subprocess.PIPE, bufsize=1)
# This returns a list with both stderr and stdout. Only return stdout. Fail if error.
stdout, error = process.communicate(fasta_string)
if error:
print(str(error))
sys.exit(1)
else:
return stdout
# ------------------------------------------------------------------------------------------------------------
def get_hit_protein_data(hmm_hit_table, annotation_fasta_dict, organism_accession):
"""
Creates a list of lists which contain protein information.
:param hmm_hit_table: Table of HMM hit objects.
:param annotation_fasta_dict: Dictionary containing FASTA sequences keyed by their IDs
:param organism_accession: The accession of the organism.
:return: A list of lists of hit protein properties.
"""
hit_proteins = []
for hit in hmm_hit_table:
protein_accession = hit.target_protein
protein_fasta = annotation_fasta_dict[protein_accession]
locus = str(LocusRegex.search(protein_fasta).group(0))
locus = locus.split()[1].rstrip(")")
location_data = LocationRegex.search(protein_fasta)
try:
start = int(location_data.group(1))
end = int(location_data.group(2))
strand = location_data.group(3)
protein_data = [protein_accession, organism_accession, locus, start, end, strand, protein_fasta]
hit_proteins.append(protein_data)
except AttributeError as error:
print(hit)
print(protein_fasta)
print(location_data)
print("This is the organism: ", organism_accession)
print("The AttributeError was ", str(error))
sys.exit(1)
return hit_proteins
# -----------------------------------------------------------------------------------------------------------
def extract_csv_dict(input_csv_path):
"""
Opens OrganismDB CSV file for reading and stores as dictionary.
:param input_csv_path: Path to the input OrganismDB CSV file.
:return: Dictionary with each row in the CSV keyed by the organism accession (CSV row one).
"""
organism_data_csv = {}
try:
print(">> Opening organism CSV file: " + input_csv_path)
read_file = open(input_csv_path, "r")
reader = csv.reader(read_file)
for row in reader:
organism_data_csv[row[0].split('.')[0]] = row # Row[0] is the organism accession.
read_file.close()
except IOError:
print("Failed to open " + input_csv_path)
sys.exit(1)
return organism_data_csv
# -----------------------------------------------------------------------------------------------------------
def insert_organism_info(db_cursor, organism_info):
"""
Inserts organism info into DB.
:param db_cursor: Sqlite3 database cursor.
:param organism_info: List containing organism info.
"""
query = '''INSERT OR REPLACE INTO Organisms
(
Organism_Accession,
Accession_Type,
Organism_Description,
Source,
Organism_Phylogeny,
Sequence_Length
)
VALUES
(?,?,?,?,?,?)'''
db_cursor.execute(query, organism_info)
# -----------------------------------------------------------------------------------------------------------
# 8:
def insert_proteins(db_cursor, hit_proteins):
"""
Inserts protein info into DB.
:param db_cursor: Sqlite3 database cursor.
:param hit_proteins: List containing protein info.
"""
query = '''INSERT OR REPLACE INTO Proteins
(
Protein_Accession,
Organism_Accession,
Locus,
Start,
"End",
Strand,
FASTA_Sequence
)
VALUES
(?,?,?,?,?,?,?)'''
for protein in hit_proteins:
db_cursor.execute(query, protein)
# -----------------------------------------------------------------------------------------------------------
def insert_hits(cursor, hmm_hit_list):
"""
Inserts hits into DB and creates md5 hash for primary key.
:param cursor: Sqlite3 database cursor.
:param hmm_hit_list: List of hmm hit objects.
"""
query = '''INSERT OR REPLACE INTO HMM_Hits
(
Hit_HASH,
Protein_Accession,
HMM_Model,
HMM_Score,
HMM_E_Value,
Ali_From,
Ali_To,
HMM_From,
HMM_To,
HMM_Coverage
)
VALUES
(?,?,?,?,?,?,?,?,?,?)'''
for hit in hmm_hit_list:
hit_list = [hit.get_md5(), hit.target_protein, hit.hmm_name, hit.score, hit.e_value, hit.ali_from, hit.ali_to,
hit.hmm_from, hit.hmm_to, hit.hmm_coverage]
cursor.execute(query, hit_list)
| [
"Bio.SeqIO.parse",
"csv.reader",
"sys.exit",
"re.compile"
] | [((207, 240), 're.compile', 're.compile', (['"""\\\\(Locus:\\\\s\\\\S*\\\\)"""'], {}), "('\\\\(Locus:\\\\s\\\\S*\\\\)')\n", (217, 240), False, 'import re\n'), ((253, 317), 're.compile', 're.compile', (['"""\\\\(Location:\\\\s\\\\[(\\\\S*)\\\\:(\\\\S*)\\\\]\\\\((\\\\S)\\\\)\\\\)"""'], {}), "('\\\\(Location:\\\\s\\\\[(\\\\S*)\\\\:(\\\\S*)\\\\]\\\\((\\\\S)\\\\)\\\\)')\n", (263, 317), False, 'import re\n'), ((3928, 3939), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (3936, 3939), False, 'import sys\n'), ((5789, 5810), 'csv.reader', 'csv.reader', (['read_file'], {}), '(read_file)\n', (5799, 5810), False, 'import csv\n'), ((1025, 1036), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (1033, 1036), False, 'import sys\n'), ((6001, 6012), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (6009, 6012), False, 'import sys\n'), ((770, 800), 'Bio.SeqIO.parse', 'SeqIO.parse', (['handle', 'file_type'], {}), '(handle, file_type)\n', (781, 800), False, 'from Bio import SeqIO\n'), ((929, 940), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (937, 940), False, 'import sys\n'), ((5234, 5245), 'sys.exit', 'sys.exit', (['(1)'], {}), '(1)\n', (5242, 5245), False, 'import sys\n')] |
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
from traits.api import Bool
from force_bdss.api import BaseUIHooksFactory, BaseUIHooksManager
class ProbeUIHooksManager(BaseUIHooksManager):
before_execution_called = Bool()
after_execution_called = Bool()
before_save_called = Bool()
# Set this one to raise an exception in the methods
before_execution_raises = Bool(False)
after_execution_raises = Bool(False)
before_save_raises = Bool(False)
def before_execution(self, task):
self.before_execution_called = True
if self.before_execution_raises:
raise Exception("Boom")
def after_execution(self, task):
self.after_execution_called = True
if self.after_execution_raises:
raise Exception("Boom")
def before_save(self, task):
self.before_save_called = True
if self.before_save_raises:
raise Exception("Boom")
class ProbeUIHooksFactory(BaseUIHooksFactory):
create_ui_hooks_manager_raises = Bool()
def get_identifier(self):
return "probe_ui_hooks"
def get_name(self):
return "Probe UI Hooks"
def get_ui_hooks_manager_class(self):
return ProbeUIHooksManager
def create_ui_hooks_manager(self):
if self.create_ui_hooks_manager_raises:
raise Exception("Boom")
return self.ui_hooks_manager_class(self)
| [
"traits.api.Bool"
] | [((253, 259), 'traits.api.Bool', 'Bool', ([], {}), '()\n', (257, 259), False, 'from traits.api import Bool\n'), ((289, 295), 'traits.api.Bool', 'Bool', ([], {}), '()\n', (293, 295), False, 'from traits.api import Bool\n'), ((321, 327), 'traits.api.Bool', 'Bool', ([], {}), '()\n', (325, 327), False, 'from traits.api import Bool\n'), ((415, 426), 'traits.api.Bool', 'Bool', (['(False)'], {}), '(False)\n', (419, 426), False, 'from traits.api import Bool\n'), ((456, 467), 'traits.api.Bool', 'Bool', (['(False)'], {}), '(False)\n', (460, 467), False, 'from traits.api import Bool\n'), ((493, 504), 'traits.api.Bool', 'Bool', (['(False)'], {}), '(False)\n', (497, 504), False, 'from traits.api import Bool\n'), ((1053, 1059), 'traits.api.Bool', 'Bool', ([], {}), '()\n', (1057, 1059), False, 'from traits.api import Bool\n')] |
# -*- coding: utf-8 -*-
# Define here the models for your scraped items
#
# See documentation in:
# http://doc.scrapy.org/en/latest/topics/items.html
import scrapy
class YokaBotItem(scrapy.Item):
# define the fields for your item here like:
# name = scrapy.Field()
pass
class YokaBotBrandListItem(scrapy.Item):
item_name = scrapy.Field()
name = scrapy.Field()
link = scrapy.Field()
is_hot = scrapy.Field()
pass
class YokaBotBrandItem(scrapy.Item):
item_name = scrapy.Field()
url = scrapy.Field()
avator = scrapy.Field()
brand_cn = scrapy.Field()
brand_en = scrapy.Field()
country = scrapy.Field()
created = scrapy.Field()
official_url = scrapy.Field()
story = scrapy.Field()
product_list_url = scrapy.Field()
pass
class YokaBotProductListItem(scrapy.Item):
item_name = scrapy.Field()
url = scrapy.Field()
page = scrapy.Field()
product_url = scrapy.Field()
img = scrapy.Field()
title = scrapy.Field()
pass
class YokaBotProductItem(scrapy.Item):
item_name = scrapy.Field()
url = scrapy.Field()
product_id = scrapy.Field()
breadcrumb = scrapy.Field()
title = scrapy.Field()
attrib = scrapy.Field()
img = scrapy.Field()
| [
"scrapy.Field"
] | [((345, 359), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (357, 359), False, 'import scrapy\n'), ((371, 385), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (383, 385), False, 'import scrapy\n'), ((397, 411), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (409, 411), False, 'import scrapy\n'), ((425, 439), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (437, 439), False, 'import scrapy\n'), ((504, 518), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (516, 518), False, 'import scrapy\n'), ((529, 543), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (541, 543), False, 'import scrapy\n'), ((557, 571), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (569, 571), False, 'import scrapy\n'), ((587, 601), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (599, 601), False, 'import scrapy\n'), ((617, 631), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (629, 631), False, 'import scrapy\n'), ((646, 660), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (658, 660), False, 'import scrapy\n'), ((675, 689), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (687, 689), False, 'import scrapy\n'), ((709, 723), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (721, 723), False, 'import scrapy\n'), ((736, 750), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (748, 750), False, 'import scrapy\n'), ((774, 788), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (786, 788), False, 'import scrapy\n'), ((859, 873), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (871, 873), False, 'import scrapy\n'), ((884, 898), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (896, 898), False, 'import scrapy\n'), ((910, 924), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (922, 924), False, 'import scrapy\n'), ((943, 957), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (955, 957), False, 'import scrapy\n'), ((968, 982), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (980, 982), False, 'import scrapy\n'), ((995, 1009), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1007, 1009), False, 'import scrapy\n'), ((1076, 1090), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1088, 1090), False, 'import scrapy\n'), ((1101, 1115), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1113, 1115), False, 'import scrapy\n'), ((1133, 1147), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1145, 1147), False, 'import scrapy\n'), ((1165, 1179), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1177, 1179), False, 'import scrapy\n'), ((1192, 1206), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1204, 1206), False, 'import scrapy\n'), ((1220, 1234), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1232, 1234), False, 'import scrapy\n'), ((1245, 1259), 'scrapy.Field', 'scrapy.Field', ([], {}), '()\n', (1257, 1259), False, 'import scrapy\n')] |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 2 22:40:45 2020
@author: yashm
"""
import json
from itsdangerous import TimedJSONWebSignatureSerializer as Serializer
from flask import current_app
from epollchain import login_manager#db,
from flask_login import UserMixin
@login_manager.user_loader
def load_user(user_id):
user = User()
return user
class User(UserMixin):
with open('epollchain/data/personal.json') as file:
personal = json.load(file)
id = personal['emailid']
username = personal['name']
email = personal['emailid']
#image_file = db.Column(db.String(20), nullable=False, default='default.jpg')
password = personal['password']
#posts = db.relationship('Post', backref='author', lazy=True)
image_file = 'profile.jpg'
def get_reset_token(self, expires_sec=1800):
s = Serializer(current_app.config['SECRET_KEY'], expires_sec)
return s.dumps({'user_id': self.id}).decode('utf-8')
@staticmethod
def verify_reset_token(token):
s = Serializer(current_app.config['SECRET_KEY'])
try:
user_id = s.loads(token)['user_id']
except:
return None
return User
def __repr__(self):
return f"User('{self.username}', '{self.email}')"
| [
"itsdangerous.TimedJSONWebSignatureSerializer",
"json.load"
] | [((460, 475), 'json.load', 'json.load', (['file'], {}), '(file)\n', (469, 475), False, 'import json\n'), ((846, 903), 'itsdangerous.TimedJSONWebSignatureSerializer', 'Serializer', (["current_app.config['SECRET_KEY']", 'expires_sec'], {}), "(current_app.config['SECRET_KEY'], expires_sec)\n", (856, 903), True, 'from itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n'), ((1035, 1079), 'itsdangerous.TimedJSONWebSignatureSerializer', 'Serializer', (["current_app.config['SECRET_KEY']"], {}), "(current_app.config['SECRET_KEY'])\n", (1045, 1079), True, 'from itsdangerous import TimedJSONWebSignatureSerializer as Serializer\n')] |
#!/usr/bin/env python3
#
# Copyright (C) 2020 Trillium Solutions <<EMAIL>>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this program except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import zipfile
import csv
import argparse
import subprocess
from sys import argv, stderr, stdout
from io import TextIOWrapper
from pathlib import Path
def parse_args():
parser = argparse.ArgumentParser(
allow_abbrev=False,
description="""
Find the lat/lon bounds of a GTFS file.
If an OSM input file is provided, create an output file which is a
trimmed version of the input file.
Alternatively, OSM may be downloaded from the Overpass API
(https://wiki.openstreetmap.org/wiki/Overpass_API) and written to
an output file in OSM XML format.
""")
input_group = parser.add_mutually_exclusive_group()
input_group.add_argument('-i', '--osm-input',
type=argparse.FileType(),
help="Input OSM file, used by osmconvert.")
input_group.add_argument('-d', '--download-from-overpass',
action='store_true',
help="Download OSM from Overpass API, and save to the OSM_OUTPUT file. Uses the wget program.")
parser.add_argument('-o',
'--osm-output',
help="Output OSM file, will be overwritten.")
parser.add_argument('--force',
action='store_true',
help="Force overwrite of the OSM_OUTPUT file.")
parser.add_argument('--buffer-degrees',
type=float,
help="Increase the bounds by a Buffer of this many degrees.")
parser.add_argument('gtfs_file', nargs='*', help="Input GTFS file. Multiple files may be provided.")
args = parser.parse_args()
for g in args.gtfs_file:
if not zipfile.is_zipfile(g):
parser.print_help()
print ("\nERROR, the GTFS file '%s' doesn't appear to be a zip archive." % g, file=stderr)
exit(1)
if args.osm_output:
o = Path(args.osm_output)
if o.exists() and not args.force:
parser.print_help()
print ("\nERROR, output osm file '%s' exists and --force was not used." % args.osm_output)
exit(1)
return args
def main():
min_lat = 1000
max_lat = -1000
min_lon = 1000
max_lon = -1000
args = parse_args()
for g in args.gtfs_file:
with zipfile.ZipFile(g) as z:
#print ('z is: %s' % z, file=stderr)
stopsfile = TextIOWrapper(z.open('stops.txt'))
#print ('stops is: %s' % stopsfile, file=stderr)
stops = csv.DictReader(stopsfile)
for stop in stops:
try:
min_lat = min(min_lat, float(stop['stop_lat']))
max_lat = max(max_lat, float(stop['stop_lat']))
min_lon = min(min_lon, float(stop['stop_lon']))
max_lon = max(max_lon, float(stop['stop_lon']))
except e:
pass
if 1000 in (min_lat, min_lon) or -1000 in (max_lat, max_lon):
print('Sorry, bounds not found.')
exit(1)
print('Note: please use caution when intepreting these results near longitude +180/-180!', file=stderr)
print('Bounds are lat: [%s, %s] lon: [%s, %s]' %(min_lat, max_lat, min_lon, max_lon))
if args.buffer_degrees:
min_lat -= args.buffer_degrees
min_lon -= args.buffer_degrees
max_lat += args.buffer_degrees
max_lon += args.buffer_degrees
print('Buffered Bounds are lat: [%s, %s] lon: [%s, %s]' %(min_lat, max_lat, min_lon, max_lon))
# print ('osmconvert -b=%s,%s,%s,%s --complete-ways ' % (min_lon,min_lat,max_lon,max_lat))
if args.osm_input and args.osm_output:
run_arguments = [
'osmconvert',
args.osm_input.name,
'-b=%s,%s,%s,%s' % (min_lon, min_lat, max_lon, max_lat),
'--complete-ways',
'-o=%s' % args.osm_output,
]
# print('Running:', ' '.join(run_arguments), file=stderr)
subprocess.run(run_arguments)
if args.osm_output and args.download_from_overpass:
url = 'https://overpass-api.de/api/map?bbox=%s,%s,%s,%s' % (min_lon,min_lat,max_lon,max_lat)
print('Downloading from the Overpass URL: %s' % url)
run_arguments = [ 'wget', url, '--compression=gzip', '-O', args.osm_output ]
subprocess.run(run_arguments)
if __name__ == '__main__':
main()
| [
"subprocess.run",
"zipfile.is_zipfile",
"zipfile.ZipFile",
"argparse.ArgumentParser",
"csv.DictReader",
"pathlib.Path",
"argparse.FileType"
] | [((833, 1290), 'argparse.ArgumentParser', 'argparse.ArgumentParser', ([], {'allow_abbrev': '(False)', 'description': '"""\n Find the lat/lon bounds of a GTFS file.\n\n If an OSM input file is provided, create an output file which is a\n trimmed version of the input file.\n\n Alternatively, OSM may be downloaded from the Overpass API\n (https://wiki.openstreetmap.org/wiki/Overpass_API) and written to\n an output file in OSM XML format.\n """'}), '(allow_abbrev=False, description=\n """\n Find the lat/lon bounds of a GTFS file.\n\n If an OSM input file is provided, create an output file which is a\n trimmed version of the input file.\n\n Alternatively, OSM may be downloaded from the Overpass API\n (https://wiki.openstreetmap.org/wiki/Overpass_API) and written to\n an output file in OSM XML format.\n """\n )\n', (856, 1290), False, 'import argparse\n'), ((2499, 2520), 'pathlib.Path', 'Path', (['args.osm_output'], {}), '(args.osm_output)\n', (2503, 2520), False, 'from pathlib import Path\n'), ((4579, 4608), 'subprocess.run', 'subprocess.run', (['run_arguments'], {}), '(run_arguments)\n', (4593, 4608), False, 'import subprocess\n'), ((4921, 4950), 'subprocess.run', 'subprocess.run', (['run_arguments'], {}), '(run_arguments)\n', (4935, 4950), False, 'import subprocess\n'), ((1423, 1442), 'argparse.FileType', 'argparse.FileType', ([], {}), '()\n', (1440, 1442), False, 'import argparse\n'), ((2284, 2305), 'zipfile.is_zipfile', 'zipfile.is_zipfile', (['g'], {}), '(g)\n', (2302, 2305), False, 'import zipfile\n'), ((2893, 2911), 'zipfile.ZipFile', 'zipfile.ZipFile', (['g'], {}), '(g)\n', (2908, 2911), False, 'import zipfile\n'), ((3107, 3132), 'csv.DictReader', 'csv.DictReader', (['stopsfile'], {}), '(stopsfile)\n', (3121, 3132), False, 'import csv\n')] |
#
# Created by djz on 2022/04/01.
#
import numpy as np
from typing import Dict
from transformers.file_utils import ExplicitEnum, add_end_docstrings, is_tf_available, is_torch_available
from .base import GenericTensor, Pipeline
def sigmoid(_outputs):
return 1.0 / (1.0 + np.exp(-_outputs))
def softmax(_outputs):
maxes = np.max(_outputs, axis=-1, keepdims=True)
shifted_exp = np.exp(_outputs - maxes)
return shifted_exp / shifted_exp.sum(axis=-1, keepdims=True)
class ClassificationFunction(ExplicitEnum):
SIGMOID = "sigmoid"
SOFTMAX = "softmax"
NONE = "none"
class TextClassificationPipeline(Pipeline):
return_all_scores = False
function_to_apply = ClassificationFunction.NONE
def __init__(self, **kwargs):
super().__init__(**kwargs)
def _sanitize_parameters(self, return_all_scores=None, function_to_apply=None, **tokenizer_kwargs):
preprocess_params = tokenizer_kwargs
postprocess_params = {}
if hasattr(self.model.config, "return_all_scores") and return_all_scores is None:
return_all_scores = self.model.config.return_all_scores
if return_all_scores is not None:
postprocess_params["return_all_scores"] = return_all_scores
if isinstance(function_to_apply, str):
function_to_apply = ClassificationFunction[function_to_apply.upper()]
if function_to_apply is not None:
postprocess_params["function_to_apply"] = function_to_apply
return preprocess_params, {}, postprocess_params
def __call__(self, *args, **kwargs):
result = super().__call__(*args, **kwargs)
if isinstance(args[0], str):
# This pipeline is odd, and return a list when single item is run
return [result]
else:
return result
def preprocess(self, inputs, **tokenizer_kwargs) -> Dict[str, GenericTensor]:
return_tensors = 'pt'
return self.tokenizer(inputs, return_tensors=return_tensors, **tokenizer_kwargs)
def _forward(self, model_inputs):
return self.model(**model_inputs)
def postprocess(self, model_outputs, function_to_apply=None, return_all_scores=False):
# Default value before `set_parameters`
if function_to_apply is None:
if self.model.config.problem_type == "multi_label_classification" or self.model.config.num_labels == 1:
function_to_apply = ClassificationFunction.SIGMOID
elif self.model.config.problem_type == "single_label_classification" or self.model.config.num_labels > 1:
function_to_apply = ClassificationFunction.SOFTMAX
elif hasattr(self.model.config, "function_to_apply") and function_to_apply is None:
function_to_apply = self.model.config.function_to_apply
else:
function_to_apply = ClassificationFunction.NONE
outputs = model_outputs["logits"][0]
outputs = outputs.numpy()
if function_to_apply == ClassificationFunction.SIGMOID:
scores = sigmoid(outputs)
elif function_to_apply == ClassificationFunction.SOFTMAX:
scores = softmax(outputs)
elif function_to_apply == ClassificationFunction.NONE:
scores = outputs
else:
raise ValueError(f"Unrecognized `function_to_apply` argument: {function_to_apply}")
if return_all_scores:
return [{"label": self.model.config.id2label[i], "score": score.item()} for i, score in enumerate(scores)]
else:
return {"label": self.model.config.id2label[scores.argmax().item()], "score": scores.max().item()}
| [
"numpy.max",
"numpy.exp"
] | [((331, 371), 'numpy.max', 'np.max', (['_outputs'], {'axis': '(-1)', 'keepdims': '(True)'}), '(_outputs, axis=-1, keepdims=True)\n', (337, 371), True, 'import numpy as np\n'), ((390, 414), 'numpy.exp', 'np.exp', (['(_outputs - maxes)'], {}), '(_outputs - maxes)\n', (396, 414), True, 'import numpy as np\n'), ((276, 293), 'numpy.exp', 'np.exp', (['(-_outputs)'], {}), '(-_outputs)\n', (282, 293), True, 'import numpy as np\n')] |
import torch
from utils import AverageMeter
import time
import math
class OutdoorVlnTrainer:
def __init__(self, opts, agent, optimizer):
self.opts = opts
self.agent = agent
self.optimizer = optimizer
self.device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
def train(self, epoch, train_env, tb_logger=None):
print('Training on {} env ...'.format(train_env.splits[0]))
print('Learning rate: {}'.format(self.optimizer.param_groups[0]['lr']))
self.agent.env = train_env
self.agent.model.train()
self.agent.instr_encoder.train()
self.agent.env.reset_epoch()
losses = AverageMeter()
batch_time = AverageMeter()
end = time.time()
self.train_iters_epoch = math.ceil(len(train_env.data) / self.opts.batch_size)
for iter_ in range(1, self.train_iters_epoch + 1):
loss, _, _ = self.agent.rollout(is_test=False)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
batch_time.update(time.time() - end)
losses.update(loss.item(), len(self.agent.env.batch))
end = time.time()
if tb_logger and iter_ % 10 == 0:
current_iter = iter_ + (epoch - 1) * self.train_iters_epoch
tb_logger.add_scalar('train/loss_train', loss, current_iter)
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\n'.format(
epoch, iter_, self.train_iters_epoch, batch_time=batch_time,
loss=losses), end='')
if tb_logger:
tb_logger.add_scalar('epoch/learning_rate', self.optimizer.param_groups[0]['lr'], epoch)
tb_logger.add_scalar('epoch/train/loss', losses.avg, epoch)
def eval_(self, epoch, val_env, tb_logger=None):
phase = val_env.env.name
print('Evaluating on {} env ...'.format(phase))
losses = AverageMeter()
batch_time = AverageMeter()
self.agent.env = val_env
self.agent.env.reset_epoch()
self.agent.model.eval()
self.agent.instr_encoder.eval()
val_iters_epoch = math.ceil(len(val_env.data) / self.opts.batch_size)
metrics = [0] * 3 # [TC, SPD, SED]
if self.opts.CLS:
metrics += [0]
if self.opts.DTW:
metrics += [0] * 5
with torch.no_grad():
end = time.time()
for iter_ in range(1, val_iters_epoch + 1):
_, trajs, agent_actions = self.agent.rollout(is_test=True)
#print_actions(agent_actions)
self.agent.env.eva_metrics(trajs, metrics)
batch_time.update(time.time() - end)
end = time.time()
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})'.format(
epoch, iter_, val_iters_epoch, batch_time=batch_time))
metrics = [m / len(val_env.data) for m in metrics]
metrics = [m * 100 if m < 1 else m for m in metrics]
if tb_logger:
tb_logger.add_scalar('epoch/{}/TC'.format(phase), metrics[0], epoch)
tb_logger.add_scalar('epoch/{}/SPD'.format(phase), metrics[1], epoch)
tb_logger.add_scalar('epoch/{}/SED'.format(phase), metrics[2], epoch)
d_metrics = dict(TC=metrics[0], SPD=metrics[1], SED=metrics[2])
print("=======[%s] Evaluation Metrics=======" % phase)
print("TC: %.2f, SPD: %.2f, SED: %.2f" % tuple(metrics[:3]), end='')
if self.opts.CLS:
print(', CLS:%.2f' % metrics[3], end='')
d_metrics['CLS'] = metrics[3]
if self.opts.DTW:
print(', DTW:%.2f, nDTW:%.2f, SDTW:%.2f' % tuple(metrics[-3:]))
d_metrics['DTW'] = metrics[-3]
d_metrics['nDTW'] = metrics[-2]
d_metrics['SDTW'] = metrics[-1]
else:
print('')
print("================================")
return d_metrics
| [
"time.time",
"torch.no_grad",
"torch.cuda.is_available",
"utils.AverageMeter"
] | [((681, 695), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (693, 695), False, 'from utils import AverageMeter\n'), ((717, 731), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (729, 731), False, 'from utils import AverageMeter\n'), ((747, 758), 'time.time', 'time.time', ([], {}), '()\n', (756, 758), False, 'import time\n'), ((2065, 2079), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2077, 2079), False, 'from utils import AverageMeter\n'), ((2101, 2115), 'utils.AverageMeter', 'AverageMeter', ([], {}), '()\n', (2113, 2115), False, 'from utils import AverageMeter\n'), ((1198, 1209), 'time.time', 'time.time', ([], {}), '()\n', (1207, 1209), False, 'import time\n'), ((2506, 2521), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (2519, 2521), False, 'import torch\n'), ((2541, 2552), 'time.time', 'time.time', ([], {}), '()\n', (2550, 2552), False, 'import time\n'), ((275, 300), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (298, 300), False, 'import torch\n'), ((2864, 2875), 'time.time', 'time.time', ([], {}), '()\n', (2873, 2875), False, 'import time\n'), ((1095, 1106), 'time.time', 'time.time', ([], {}), '()\n', (1104, 1106), False, 'import time\n'), ((2823, 2834), 'time.time', 'time.time', ([], {}), '()\n', (2832, 2834), False, 'import time\n')] |
# -*- coding: utf-8 -*
import hmac
import base64
import time
import hashlib
import os
import sys
import pika
class AliyunCredentialsProvider:
"""
Python2.7适用,根据阿里云的 accessKey,accessSecret,instanceId算出amqp连接使用的username和password
instanceId可以从AMQP控制台首页复制
"""
ACCESS_FROM_USER = 0
def __init__(self, access_key, access_secret, instanceId):
self.accessKey = access_key
self.accessSecret = access_secret
self.instanceId = instanceId
def get_username(self):
t = '%i:%s:%s' % (self.ACCESS_FROM_USER, self.instanceId, self.accessKey)
return base64.b64encode(t.encode('utf-8'))
def get_password(self):
ts = str(int(round(time.time() * 1000)))
h = hmac.new(ts.encode('utf-8'), self.accessSecret.encode('utf-8'), hashlib.sha1)
sig = h.hexdigest().upper()
sig_str = "%s:%s" % (sig, ts)
return base64.b64encode(sig_str.encode('utf-8'))
| [
"time.time"
] | [((696, 707), 'time.time', 'time.time', ([], {}), '()\n', (705, 707), False, 'import time\n')] |
from django.contrib import admin
from . import models
@admin.register(models.Message)
class MessageAdmin(admin.ModelAdmin):
list_filter = ('owner', 'created')
list_display = ('text', 'to_chat')
search_fields = ('owner', 'to_chat')
@admin.register(models.Chat)
class ChatAdmin(admin.ModelAdmin):
pass
@admin.register(models.Member)
class MemberAdmin(admin.ModelAdmin):
pass
| [
"django.contrib.admin.register"
] | [((57, 87), 'django.contrib.admin.register', 'admin.register', (['models.Message'], {}), '(models.Message)\n', (71, 87), False, 'from django.contrib import admin\n'), ((248, 275), 'django.contrib.admin.register', 'admin.register', (['models.Chat'], {}), '(models.Chat)\n', (262, 275), False, 'from django.contrib import admin\n'), ((323, 352), 'django.contrib.admin.register', 'admin.register', (['models.Member'], {}), '(models.Member)\n', (337, 352), False, 'from django.contrib import admin\n')] |
# (c) Copyright IBM Corporation 2020.
# LICENSE: Apache License 2.0 (Apache-2.0)
# http://www.apache.org/licenses/LICENSE-2.0
import glob
import logging
import os
import traceback
from collections import Counter
from enum import Enum
from typing import Mapping, List, Sequence, Tuple, Set
import lrtc_lib.data_access.data_access_factory as data_access_factory
from lrtc_lib.active_learning.strategies import ActiveLearningStrategy
from lrtc_lib.data_access.core.data_structs import Label, TextElement
from lrtc_lib.data_access.core.utils import get_workspace_labels_dump_filename
from lrtc_lib.definitions import PROJECT_PROPERTIES
from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api
from lrtc_lib.orchestrator.core.state_api.orchestrator_state_api import ModelInfo, ActiveLearningRecommendationsStatus
from lrtc_lib.train_and_infer_service.model_type import ModelType
from lrtc_lib.train_and_infer_service.train_and_infer_api import ModelStatus
from lrtc_lib.training_set_selector import training_set_selector_factory
# constants
MAX_VALUE = 1000000
TRAIN_COUNTS_STR_KEY = "train_counts"
DEV_COUNTS_STR_KEY = "dev_counts"
LABEL_POSITIVE = "true"
LABEL_NEGATIVE = "false"
BINARY_LABELS = frozenset({LABEL_NEGATIVE, LABEL_POSITIVE})
# members
active_learning_strategy = PROJECT_PROPERTIES["active_learning_strategy"]
training_set_selection_strategy = PROJECT_PROPERTIES["training_set_selection"]
active_learner = PROJECT_PROPERTIES["active_learning_factory"].get_active_learner(active_learning_strategy)
data_access = data_access_factory.get_data_access()
train_and_dev_sets_selector = training_set_selector_factory.get_training_set_selector(
selector=training_set_selection_strategy)
def _delete_orphan_labels():
"""
delete labels that are not attached to a known workspace
"""
all_label_dump_files = glob.glob(get_workspace_labels_dump_filename(workspace_id='*', dataset_name='*'))
existing_workspace_ids = [w.workspace_id for w in orchestrator_state_api.get_all_workspaces()]
dump_files_with_parents = [file for wid in existing_workspace_ids for file in
glob.glob(get_workspace_labels_dump_filename(workspace_id=wid, dataset_name='*'))]
for dump_file in all_label_dump_files:
if dump_file not in dump_files_with_parents:
logging.info(f"deleting orphan labels file {dump_file}")
os.remove(dump_file)
_delete_orphan_labels()
def copy_workspace(existing_workspace_id: str, new_workspace_id: str):
"""
Creates a copy of a given workspace with its labels under a new workspace id
:param existing_workspace_id:
:param new_workspace_id:
:return:
"""
workspace = get_workspace(existing_workspace_id)
dataset_name = workspace.dataset_name
dev_dataset_name = workspace.dev_dataset_name
data_access.copy_labels_to_new_workspace(existing_workspace_id, new_workspace_id, dataset_name, dev_dataset_name)
orchestrator_state_api.copy_workspace(existing_workspace_id, new_workspace_id)
return new_workspace_id
def set_training_set_selection_strategy(new_training_set_selection_strategy=None):
"""
Set the logic for selecting training examples from the training dataset.
The default strategy is ALL_LABELED, which means we use all the labeled elements
other strategies enable to add weak labels, for example by using unlabeled elements as weak negative
:return:
"""
global train_and_dev_sets_selector
global training_set_selection_strategy
if new_training_set_selection_strategy is not None:
training_set_selection_strategy = new_training_set_selection_strategy
train_and_dev_sets_selector = training_set_selector_factory.get_training_set_selector(
selector=training_set_selection_strategy)
def set_active_learning_strategy(new_active_learning_strategy=None):
"""
Set active learning policy to use
:param new_active_learning_strategy:
:return:
"""
global active_learner, active_learning_strategy
if new_active_learning_strategy is not None:
active_learning_strategy = new_active_learning_strategy
active_learner = PROJECT_PROPERTIES["active_learning_factory"].get_active_learner(active_learning_strategy)
def create_workspace(workspace_id: str, dataset_name: str, dev_dataset_name: str = None, test_dataset_name: str = None):
"""
create a new workspace
:param workspace_id:
:param dataset_name:
:param dev_dataset_name:
:param test_dataset_name:
"""
orchestrator_state_api.create_workspace(workspace_id, dataset_name, dev_dataset_name, test_dataset_name)
logging.info(f"Creating workspace {workspace_id} using dataset {dataset_name}")
def create_new_category(workspace_id: str, category_name: str, category_description: str,
category_labels: Set[str] = BINARY_LABELS):
"""
declare a new category in the given workspace
:param workspace_id:
:param category_name:
:param category_description:
:param category_labels:
"""
orchestrator_state_api.add_category_to_workspace(workspace_id, category_name, category_description, category_labels)
class DeleteModels(Enum):
ALL = 0
FALSE = 1
ALL_BUT_FIRST_MODEL = 2
def delete_workspace(workspace_id: str, delete_models: DeleteModels = DeleteModels.ALL, ignore_errors=False):
"""
delete a given workspace
:param workspace_id:
:param delete_models: ALL - delete all the models of the workspace, FALSE - do not delete models,
ALL_BUT_FIRST_MODEL - keep the first model of each category
:param ignore_errors:
"""
logging.info(f"deleting workspace {workspace_id} ignore errors {ignore_errors}")
models_to_delete = []
if workspace_exists(workspace_id):
try:
workspace = orchestrator_state_api.get_workspace(workspace_id)
if delete_models != DeleteModels.FALSE:
for category in workspace.category_to_models:
for idx, model_id in enumerate(workspace.category_to_models[category]):
if idx == 0 and delete_models == DeleteModels.ALL_BUT_FIRST_MODEL:
continue
models_to_delete.append(_get_model(workspace_id, model_id))
orchestrator_state_api.delete_workspace_state(workspace_id)
except Exception as e:
logging.error(f"error deleting workspace {workspace_id}")
traceback.print_exc()
if not ignore_errors:
raise e
try:
data_access.clear_saved_labels(workspace_id, workspace.dataset_name)
if workspace.dev_dataset_name:
data_access.clear_saved_labels(workspace_id, workspace.dev_dataset_name)
except Exception as e:
logging.error(f"error clearing saved label for workspace {workspace_id}")
traceback.print_exc()
if not ignore_errors:
raise e
for model in models_to_delete:
model_type = model.model_type
train_and_infer = PROJECT_PROPERTIES["train_and_infer_factory"].get_train_and_infer(model_type)
train_and_infer.delete_model(model.model_id)
def edit_category(workspace_id: str, prev_category_name: str, new_category_name: str, new_category_description: str):
raise Exception("Not implemented yet")
def delete_category(workspace_id: str, category_name: str):
raise Exception("Not implemented yet")
def add_documents(dataset_name, docs):
data_access.add_documents(dataset_name=dataset_name, documents=docs)
def query(workspace_id: str, dataset_name: str, category_name: str, query: str,
sample_size: int, unlabeled_only: bool = False, remove_duplicates=False) -> Mapping[str, object]:
"""
query a dataset using the given regex, returning up to *sample_size* elements that meet the query
:param workspace_id:
:param dataset_name:
:param category_name:
:param query: regex string
:param unlabeled_only: if True, filters out labeled elements
:param sample_size: maximum items to return
:param remove_duplicates: if True, remove duplicate elements
:return: a dictionary with two keys: 'results' whose value is a list of TextElements, and 'hit_count' whose
value is the total number of TextElements in the dataset matched by the query.
{'results': [TextElement], 'hit_count': int}
"""
if unlabeled_only:
return data_access.sample_unlabeled_text_elements(workspace_id=workspace_id, dataset_name=dataset_name,
category_name=category_name, sample_size=sample_size,
query=query, remove_duplicates=remove_duplicates)
else:
return data_access.sample_text_elements_with_labels_info(workspace_id=workspace_id, dataset_name=dataset_name,
sample_size=sample_size, query=query,
remove_duplicates=remove_duplicates)
def get_documents(workspace_id: str, dataset_name: str, uris: Sequence[str]) -> List[object]:
"""
:rtype: list of Document
:param workspace_id:
:param dataset_name:
:param uris:
"""
return data_access.get_documents_with_labels_info(workspace_id, dataset_name, uris)
def get_text_elements(workspace_id: str, dataset_name: str, uris: Sequence[str]) -> List[object]:
"""
:param workspace_id:
:param dataset_name:
:param uris:
"""
return data_access.get_text_elements_with_labels_info(workspace_id, dataset_name, uris)
def _update_recommendation(workspace_id, dataset_name, category_name, count, model: ModelInfo = None):
"""
Using the AL strategy, update the workspace with next recommended elements for labeling
:param workspace_id:
:param dataset_name:
:param category_name:
:param count:
:param model: model to use or None to use the latest model in status READY
"""
if model is None:
model = orchestrator_state_api.get_latest_model_by_state(workspace_id, category_name, ModelStatus.READY)
curr_cat_recommendations = orchestrator_state_api.get_current_category_recommendations(workspace_id, category_name,
model.model_id)
num_recommendations = len(curr_cat_recommendations)
if num_recommendations < count:
orchestrator_state_api.update_active_learning_status(workspace_id, category_name, model.model_id,
ActiveLearningRecommendationsStatus.AL_IN_PROGRESS)
new_recommendations = active_learner.get_recommended_items_for_labeling(
workspace_id=workspace_id, model_id=model.model_id, dataset_name=dataset_name, category_name=category_name,
sample_size=count)
orchestrator_state_api.update_category_recommendations(workspace_id=workspace_id, category_name=category_name,
model_id=model.model_id,
recommended_items=new_recommendations)
orchestrator_state_api.update_active_learning_status(workspace_id, category_name, model.model_id,
ActiveLearningRecommendationsStatus.READY)
return model.model_id
def get_model_active_learning_status(workspace_id, model_id):
return orchestrator_state_api.get_active_learning_status(workspace_id, model_id)
def get_elements_to_label(workspace_id: str, category_name: str, count: int) -> Sequence[TextElement]:
"""
returns a list of the top *count* elements recommended for labeling by the AL strategy.
The active learner is invoked only if the requested count of elements have not yet been added to the workspace.
:param workspace_id:
:param category_name:
:param count:
"""
dataset_name = get_workspace(workspace_id).dataset_name
model_id = _update_recommendation(workspace_id, dataset_name, category_name, count)
updated_recommended = \
orchestrator_state_api.get_current_category_recommendations(workspace_id, category_name, model_id)
return updated_recommended
def set_labels(workspace_id: str, labeled_sentences: Sequence[Tuple[str, Mapping[str, Label]]],
propagate_to_duplicates=False):
"""
set labels for URIs.
:param workspace_id:
:param labeled_sentences: Sequence of tuples of URI and a dict in the format of {"category_name":Label},
where Label is an instance of data_structs.Label
:param propagate_to_duplicates: if True, also set the same labels for additional URIs that are duplicates of
the URIs provided.
"""
return_value = data_access.set_labels(workspace_id, labeled_sentences, propagate_to_duplicates)
return return_value
def unset_labels(workspace_id: str, category_name, uris: Sequence[str]):
"""
unset labels of a given category for URIs.
:param workspace_id:
:param category_name:
:param uris:
"""
data_access.unset_labels(workspace_id, category_name, uris)
def _convert_to_dicts_with_numeric_labels(data, category_name, all_category_labels: Set[str]) -> Sequence[Mapping]:
"""
convert textual labels to integers and convert to expected inference input format
:param data:
"""
text_to_number = {label: i for i, label in enumerate(sorted(all_category_labels))}
def get_numeric_value(labels_set):
if len(labels_set) == 1:
return text_to_number[next(iter(labels_set))]
else:
raise ValueError("multilabel is not supported currently")
converted_data = [{"text": element.text,
"label": get_numeric_value(element.category_to_label[category_name].labels)}
for element in data]
return converted_data
def train(workspace_id: str, category_name: str, model_type: ModelType, train_params=None, infer_after_train=True):
"""
train a model for a category in the specified workspace
:param workspace_id:
:param category_name:
:param model_type:
:param train_params:
:param infer_after_train:
:return: model id
"""
workspace = get_workspace(workspace_id)
dataset_name = workspace.dataset_name
(train_data, train_counts), (dev_data, dev_counts) = train_and_dev_sets_selector.get_train_and_dev_sets(
workspace_id=workspace_id, train_dataset_name=dataset_name, category_name=category_name,
dev_dataset_name=workspace.dev_dataset_name)
logging.info(f"training a new model with {train_counts}")
# label_counts != train_counts as train_counts may refer to negative and weak negative labels separately
labels = [element.category_to_label[category_name].labels for element in train_data]
labels = [item for subset in labels for item in subset] # flatten list of sets
label_counts = Counter(labels)
all_category_labels = workspace.category_to_labels[category_name]
labels_not_in_train = [label for label in all_category_labels if label_counts[label] == 0]
if len(labels_not_in_train) > 0:
raise Exception(f"no train examples for labels: {labels_not_in_train}, cannot train a model: {train_counts}")
model_metadata = dict()
model_metadata[TRAIN_COUNTS_STR_KEY] = train_counts
if dev_data is not None:
model_metadata[DEV_COUNTS_STR_KEY] = dev_counts
logging.info(
f"workspace {workspace_id} training a model for category '{category_name}', model_metadata: {model_metadata}")
train_data = _convert_to_dicts_with_numeric_labels(train_data, category_name, all_category_labels)
if dev_data:
dev_data = _convert_to_dicts_with_numeric_labels(dev_data, category_name, all_category_labels)
elements_to_infer = None
if infer_after_train: # add data to be inferred and cached after the training process
test_dataset = data_access.sample_text_elements(workspace.test_dataset_name, MAX_VALUE)['results'] \
if workspace.test_dataset_name is not None else []
all_train_dataset = data_access.sample_text_elements(workspace.dataset_name, MAX_VALUE)['results']
elements_to_infer = [{"text": element.text} for element in test_dataset + all_train_dataset]
params = model_metadata if train_params is None else {**train_params, **model_metadata}
train_and_infer = PROJECT_PROPERTIES["train_and_infer_factory"].get_train_and_infer(model_type)
model_id = train_and_infer.train(train_data=train_data, dev_data=dev_data, test_data=elements_to_infer,
train_params=params)
logging.info(f"new model id is {model_id}")
model_status = train_and_infer.get_model_status(model_id)
orchestrator_state_api.add_model(workspace_id=workspace_id, category_name=category_name, model_id=model_id,
model_status=model_status, model_type=model_type, model_metadata=params)
return model_id
def get_model_status(workspace_id: str, model_id: str) -> ModelStatus:
"""
ModelStatus can be TRAINING, READY or ERROR
:param workspace_id:
:param model_id:
:return:
"""
model = _get_model(workspace_id, model_id)
return model.model_status
def get_model_train_counts(workspace_id: str, model_id: str) -> Mapping:
"""
number of elements for each label that were used to train a given model
:param workspace_id:
:param model_id:
:return:
"""
model = _get_model(workspace_id, model_id)
return model.model_metadata[TRAIN_COUNTS_STR_KEY]
def get_all_models_for_category(workspace_id, category_name: str):
"""
:param workspace_id:
:param category_name:
:return: dict from model_id to ModelInfo
"""
workspace = get_workspace(workspace_id)
return workspace.category_to_models.get(category_name, {})
def infer(workspace_id: str, category_name: str, texts_to_infer: Sequence[TextElement], model_id: str = None,
infer_params: dict = None, use_cache: bool = True) -> dict:
"""
get the prediction for a list of TextElements
:param workspace_id:
:param category_name:
:param texts_to_infer: list of TextElements
:param model_id: model_id to use. If set to None, the latest model for the category will be used
:param infer_params: dictionary for additional inference parameters. Default is None
:param use_cache: utilize a cache that stores inference results
:return: a dictionary of inference results, with at least the "labels" key, where the value is a list of string
labels for each element in texts_to_infer. Additional keys, with list values of the same length, can be passed.
e.g. {"labels": ['false', 'true', 'true'],
"scores": [0.23, 0.79, 0.98],
"gradients": [[0.24, -0.39, -0.66, 0.25], [0.14, 0.29, -0.26, 0.16], [-0.46, 0.61, -0.02, 0.23]]}
"""
models = get_all_models_for_category(workspace_id, category_name)
if len(models) == 0:
raise Exception(f"There are no models in workspace {workspace_id} for category {category_name}")
if model_id is None: # use latest
model = orchestrator_state_api.get_latest_model_by_state(workspace_id=workspace_id,
category_name=category_name,
model_status=ModelStatus.READY)
else:
model = _get_model(workspace_id, model_id)
if model.model_status is not ModelStatus.READY:
raise Exception(f"model id {model_id} is not in READY status")
train_and_infer = PROJECT_PROPERTIES["train_and_infer_factory"].get_train_and_infer(model.model_type)
list_of_dicts = [{"text": element.text} for element in texts_to_infer]
infer_results = train_and_infer.infer(model_id=model.model_id, items_to_infer=list_of_dicts,
infer_params=infer_params, use_cache=use_cache)
all_labels = get_workspace(workspace_id).category_to_labels[category_name]
numeric_label_to_text = {i: label for i, label in enumerate(sorted(all_labels))}
infer_results['labels'] = [numeric_label_to_text[l] for l in infer_results['labels']]
return infer_results
def infer_by_uris(workspace_id: str, category_name: str, uris_to_infer: Sequence[str], model_id: str = None,
infer_params: dict = None, use_cache: bool = True) -> dict:
"""
get the prediction for a list of URIs
:param workspace_id:
:param category_name:
:param uris_to_infer: list of uris (str)
:param model_id: model_id to use. If set to None, the latest model for the category will be used
:param infer_params: dictionary for additional inference parameters. Default is None
:param use_cache: utilize a cache that stores inference results
:return: a dictionary of inference results, with at least the "labels" key, where the value is a list of string
labels for each element in texts_to_infer. Additional keys, with list values of the same length, can be passed.
e.g. {"labels": ['false', 'true', 'true'],
"scores": [0.23, 0.79, 0.98],
"gradients": [[0.24, -0.39, -0.66, 0.25], [0.14, 0.29, -0.26, 0.16], [-0.46, 0.61, -0.02, 0.23]]}
"""
dataset_name = get_workspace(workspace_id).dataset_name
elements_to_infer = data_access.get_text_elements_with_labels_info(workspace_id, dataset_name, uris_to_infer)
return infer(workspace_id, category_name, elements_to_infer, model_id, infer_params, use_cache)
def get_all_text_elements(dataset_name: str) -> List[TextElement]:
"""
get all the text elements of the given dataset
:param dataset_name:
"""
return data_access.get_all_text_elements(dataset_name=dataset_name)
def get_all_text_elements_uris(dataset_name: str) -> List[str]:
"""
Return a List of all TextElement uris in the given dataset_name.
:param dataset_name: the name of the dataset from which the TextElement uris should be retrieved.
:return: a List of all TextElement uris in the given dataset_name.
"""
return data_access.get_all_text_elements_uris(dataset_name=dataset_name)
def get_all_document_uris(workspace_id):
dataset_name = get_workspace(workspace_id).dataset_name
return data_access.get_all_document_uris(dataset_name)
def get_label_counts(workspace_id: str, dataset_name: str, category_name: str, remove_duplicates=True):
"""
get the number of elements that were labeled.
:param workspace_id:
:param dataset_name:
:param category_name:
:param remove_duplicates: whether to count all labeled elements or only unique instances
:return:
"""
return data_access.get_label_counts(workspace_id, dataset_name, category_name, remove_duplicates=remove_duplicates)
def is_model_compatible_with_active_learning(al: ActiveLearningStrategy, model: ModelType):
"""
return true if active learning strategy is supported by the given model type
for example, ActiveLearningStrategies.CORE_SET and ActiveLearningStrategies.DAL are not supported by Naive Bayes
defined in method get_compatible_models() under lrtc_lib.active_learning.strategies.py
:param al:
:param model:
:return:
"""
return PROJECT_PROPERTIES["models_compatible_with_strategies_func"](model, al)
def delete_model_from_workspace(workspace_id, category_name, model_id):
model_type = _get_model(workspace_id, model_id).model_type
train_and_infer = PROJECT_PROPERTIES["train_and_infer_factory"].get_train_and_infer(model_type)
logging.info(f"deleting model id {model_id} from workspace {workspace_id} in category {category_name}")
orchestrator_state_api.delete_model(workspace_id, category_name, model_id)
train_and_infer.delete_model(model_id)
def add_train_param(workspace_id: str, train_param_key: str, train_param_value: str):
raise Exception("Not implemented yet")
def workspace_exists(workspace_id: str) -> bool:
return orchestrator_state_api.workspace_exists(workspace_id)
def get_workspace(workspace_id):
if not workspace_exists(workspace_id):
raise Exception(f"workspace_id '{workspace_id}' doesn't exist")
return orchestrator_state_api.get_workspace(workspace_id)
def _get_model(workspace_id, model_id):
workspace = get_workspace(workspace_id)
all_models = {k: v for d in workspace.category_to_models.values() for k, v in d.items()}
if all_models[model_id]:
return all_models[model_id]
raise Exception(f"model id {model_id} does not exist in workspace {workspace_id}")
| [
"lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.get_current_category_recommendations",
"os.remove",
"lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.create_workspace",
"lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.get_all_workspaces",
"lrtc_lib.orchestrator.core.state... | [((1547, 1584), 'lrtc_lib.data_access.data_access_factory.get_data_access', 'data_access_factory.get_data_access', ([], {}), '()\n', (1582, 1584), True, 'import lrtc_lib.data_access.data_access_factory as data_access_factory\n'), ((1616, 1718), 'lrtc_lib.training_set_selector.training_set_selector_factory.get_training_set_selector', 'training_set_selector_factory.get_training_set_selector', ([], {'selector': 'training_set_selection_strategy'}), '(selector=\n training_set_selection_strategy)\n', (1671, 1718), False, 'from lrtc_lib.training_set_selector import training_set_selector_factory\n'), ((2968, 3046), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.copy_workspace', 'orchestrator_state_api.copy_workspace', (['existing_workspace_id', 'new_workspace_id'], {}), '(existing_workspace_id, new_workspace_id)\n', (3005, 3046), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((4560, 4668), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.create_workspace', 'orchestrator_state_api.create_workspace', (['workspace_id', 'dataset_name', 'dev_dataset_name', 'test_dataset_name'], {}), '(workspace_id, dataset_name,\n dev_dataset_name, test_dataset_name)\n', (4599, 4668), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((4669, 4748), 'logging.info', 'logging.info', (['f"""Creating workspace {workspace_id} using dataset {dataset_name}"""'], {}), "(f'Creating workspace {workspace_id} using dataset {dataset_name}')\n", (4681, 4748), False, 'import logging\n'), ((5091, 5211), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.add_category_to_workspace', 'orchestrator_state_api.add_category_to_workspace', (['workspace_id', 'category_name', 'category_description', 'category_labels'], {}), '(workspace_id,\n category_name, category_description, category_labels)\n', (5139, 5211), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((5668, 5753), 'logging.info', 'logging.info', (['f"""deleting workspace {workspace_id} ignore errors {ignore_errors}"""'], {}), "(f'deleting workspace {workspace_id} ignore errors {ignore_errors}'\n )\n", (5680, 5753), False, 'import logging\n'), ((10284, 10392), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.get_current_category_recommendations', 'orchestrator_state_api.get_current_category_recommendations', (['workspace_id', 'category_name', 'model.model_id'], {}), '(workspace_id,\n category_name, model.model_id)\n', (10343, 10392), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((11644, 11717), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.get_active_learning_status', 'orchestrator_state_api.get_active_learning_status', (['workspace_id', 'model_id'], {}), '(workspace_id, model_id)\n', (11693, 11717), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((12300, 12402), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.get_current_category_recommendations', 'orchestrator_state_api.get_current_category_recommendations', (['workspace_id', 'category_name', 'model_id'], {}), '(workspace_id,\n category_name, model_id)\n', (12359, 12402), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((14782, 14839), 'logging.info', 'logging.info', (['f"""training a new model with {train_counts}"""'], {}), "(f'training a new model with {train_counts}')\n", (14794, 14839), False, 'import logging\n'), ((15142, 15157), 'collections.Counter', 'Counter', (['labels'], {}), '(labels)\n', (15149, 15157), False, 'from collections import Counter\n'), ((15653, 15786), 'logging.info', 'logging.info', (['f"""workspace {workspace_id} training a model for category \'{category_name}\', model_metadata: {model_metadata}"""'], {}), '(\n f"workspace {workspace_id} training a model for category \'{category_name}\', model_metadata: {model_metadata}"\n )\n', (15665, 15786), False, 'import logging\n'), ((16874, 16917), 'logging.info', 'logging.info', (['f"""new model id is {model_id}"""'], {}), "(f'new model id is {model_id}')\n", (16886, 16917), False, 'import logging\n'), ((16985, 17175), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.add_model', 'orchestrator_state_api.add_model', ([], {'workspace_id': 'workspace_id', 'category_name': 'category_name', 'model_id': 'model_id', 'model_status': 'model_status', 'model_type': 'model_type', 'model_metadata': 'params'}), '(workspace_id=workspace_id, category_name=\n category_name, model_id=model_id, model_status=model_status, model_type\n =model_type, model_metadata=params)\n', (17017, 17175), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((23847, 23960), 'logging.info', 'logging.info', (['f"""deleting model id {model_id} from workspace {workspace_id} in category {category_name}"""'], {}), "(\n f'deleting model id {model_id} from workspace {workspace_id} in category {category_name}'\n )\n", (23859, 23960), False, 'import logging\n'), ((23955, 24029), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.delete_model', 'orchestrator_state_api.delete_model', (['workspace_id', 'category_name', 'model_id'], {}), '(workspace_id, category_name, model_id)\n', (23990, 24029), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((24266, 24319), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.workspace_exists', 'orchestrator_state_api.workspace_exists', (['workspace_id'], {}), '(workspace_id)\n', (24305, 24319), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((24481, 24531), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.get_workspace', 'orchestrator_state_api.get_workspace', (['workspace_id'], {}), '(workspace_id)\n', (24517, 24531), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((1864, 1934), 'lrtc_lib.data_access.core.utils.get_workspace_labels_dump_filename', 'get_workspace_labels_dump_filename', ([], {'workspace_id': '"""*"""', 'dataset_name': '"""*"""'}), "(workspace_id='*', dataset_name='*')\n", (1898, 1934), False, 'from lrtc_lib.data_access.core.utils import get_workspace_labels_dump_filename\n'), ((3710, 3812), 'lrtc_lib.training_set_selector.training_set_selector_factory.get_training_set_selector', 'training_set_selector_factory.get_training_set_selector', ([], {'selector': 'training_set_selection_strategy'}), '(selector=\n training_set_selection_strategy)\n', (3765, 3812), False, 'from lrtc_lib.training_set_selector import training_set_selector_factory\n'), ((10156, 10256), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.get_latest_model_by_state', 'orchestrator_state_api.get_latest_model_by_state', (['workspace_id', 'category_name', 'ModelStatus.READY'], {}), '(workspace_id,\n category_name, ModelStatus.READY)\n', (10204, 10256), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((10580, 10738), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.update_active_learning_status', 'orchestrator_state_api.update_active_learning_status', (['workspace_id', 'category_name', 'model.model_id', 'ActiveLearningRecommendationsStatus.AL_IN_PROGRESS'], {}), '(workspace_id,\n category_name, model.model_id, ActiveLearningRecommendationsStatus.\n AL_IN_PROGRESS)\n', (10632, 10738), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((11032, 11215), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.update_category_recommendations', 'orchestrator_state_api.update_category_recommendations', ([], {'workspace_id': 'workspace_id', 'category_name': 'category_name', 'model_id': 'model.model_id', 'recommended_items': 'new_recommendations'}), '(workspace_id=\n workspace_id, category_name=category_name, model_id=model.model_id,\n recommended_items=new_recommendations)\n', (11086, 11215), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((11341, 11485), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.update_active_learning_status', 'orchestrator_state_api.update_active_learning_status', (['workspace_id', 'category_name', 'model.model_id', 'ActiveLearningRecommendationsStatus.READY'], {}), '(workspace_id,\n category_name, model.model_id, ActiveLearningRecommendationsStatus.READY)\n', (11393, 11485), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((19398, 19538), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.get_latest_model_by_state', 'orchestrator_state_api.get_latest_model_by_state', ([], {'workspace_id': 'workspace_id', 'category_name': 'category_name', 'model_status': 'ModelStatus.READY'}), '(workspace_id=workspace_id,\n category_name=category_name, model_status=ModelStatus.READY)\n', (19446, 19538), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((1990, 2033), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.get_all_workspaces', 'orchestrator_state_api.get_all_workspaces', ([], {}), '()\n', (2031, 2033), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((2339, 2395), 'logging.info', 'logging.info', (['f"""deleting orphan labels file {dump_file}"""'], {}), "(f'deleting orphan labels file {dump_file}')\n", (2351, 2395), False, 'import logging\n'), ((2408, 2428), 'os.remove', 'os.remove', (['dump_file'], {}), '(dump_file)\n', (2417, 2428), False, 'import os\n'), ((5851, 5901), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.get_workspace', 'orchestrator_state_api.get_workspace', (['workspace_id'], {}), '(workspace_id)\n', (5887, 5901), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((6332, 6391), 'lrtc_lib.orchestrator.core.state_api.orchestrator_state_api.delete_workspace_state', 'orchestrator_state_api.delete_workspace_state', (['workspace_id'], {}), '(workspace_id)\n', (6377, 6391), False, 'from lrtc_lib.orchestrator.core.state_api import orchestrator_state_api\n'), ((2158, 2228), 'lrtc_lib.data_access.core.utils.get_workspace_labels_dump_filename', 'get_workspace_labels_dump_filename', ([], {'workspace_id': 'wid', 'dataset_name': '"""*"""'}), "(workspace_id=wid, dataset_name='*')\n", (2192, 2228), False, 'from lrtc_lib.data_access.core.utils import get_workspace_labels_dump_filename\n'), ((6435, 6492), 'logging.error', 'logging.error', (['f"""error deleting workspace {workspace_id}"""'], {}), "(f'error deleting workspace {workspace_id}')\n", (6448, 6492), False, 'import logging\n'), ((6505, 6526), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6524, 6526), False, 'import traceback\n'), ((6854, 6927), 'logging.error', 'logging.error', (['f"""error clearing saved label for workspace {workspace_id}"""'], {}), "(f'error clearing saved label for workspace {workspace_id}')\n", (6867, 6927), False, 'import logging\n'), ((6940, 6961), 'traceback.print_exc', 'traceback.print_exc', ([], {}), '()\n', (6959, 6961), False, 'import traceback\n')] |
#!/usr/bin/env python
import os
import crowdai_api
########################################################################
# Instatiate Event Notifier
########################################################################
crowdai_events = crowdai_api.events.CrowdAIEvents()
class CrowdAIMarloEvents:
REQUEST_ENV_JOIN_TOKENS="marlo.events.REQUEST_JOIN_TOKENS"
END_OF_GRADING="marlo.events.END_OF_GRADING"
GAME_INIT="marlo.events.GAME_INIT"
ENV_RESET="marlo.events.ENV_RESET"
ENV_ACTION="marlo.events.ENV_ACTION"
STEP_REWARD="marlo.events.STEP_REWARD"
EPISODE_PENDING="marlo.events.EPISODE_PENDING"
EPISODE_INITIATED="marlo.events.EPISODE_INITIATED"
EPISODE_RUNNING="marlo.events.EPISODE_RUNNING"
EPISODE_DONE="marlo.events.EPISODE_DONE" #Episode Complete
EPISODE_ERROR="marlo.events.EPISODE_ERROR"
EVALUATION_PENDING="marlo.events.EVALUATION_PENDING"
EVALUATION_RUNNING="marlo.events.EVALUATION_RUNNING"
EVALUATION_ERROR="marlo.events.EVALUATION_ERROR"
EVALUATION_COMPLETE="marlo.events.EVALUATION_COMPLETE"
def is_grading():
"""Returns if the code is being executed inside the crowdAI evaluation
system.
:returns: bool
"""
return os.getenv("CROWDAI_IS_GRADING", False)
def evaluator_join_token(params={}):
"""Returns evaluator join tokens from the crowdAI evaluation system
:param params: a dictionary containing game params. Note that only a certain
subset of params will be considered by the grader. TODO: Add list
:type params: dict
:returns: a list of strings representing join tokens for all the agents
in a game; or marks the end of the evaluation
"""
crowdai_events = crowdai_api.CrowdAIEvents()
# Request a list of JOIN_TOKENS
response = crowdai_events.register_event(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type": CrowdAIMarloEvents.REQUEST_ENV_JOIN_TOKENS,
"params": params
},
blocking=True
)
if not response:
register_end_of_grading(crowdai_events)
return response
def register_end_of_grading(crowdai_events):
"""Marks the end of an evaluation, and waits for the rest of the
evaluation system to complete the post processing.
:param crowdai_events: a crowdai events object
:type `crowdai_api.CrowdAIEvents` object
"""
crowdai_events.register_event(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type": CrowdAIMarloEvents.END_OF_GRADING
},
blocking=True
)
class CrowdAiNotifier():
@staticmethod
def _send_notification(event_type, message, payload={}, blocking=False):
crowdai_events = crowdai_api.events.CrowdAIEvents()
default_payload = {"challenge_id": "MarLo"}
default_payload.update(payload)
crowdai_events.register_event(event_type, message, payload, blocking)
@staticmethod
def _game_init():
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="Game Initialized",
payload={
"event_type" : CrowdAIMarloEvents.GAME_INIT
},
blocking=False)
@staticmethod
def _env_reset():
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="Environment Reset",
payload={
"event_type" : CrowdAIMarloEvents.ENV_RESET
},
blocking=False)
@staticmethod
def _env_action(action):
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type" : CrowdAIMarloEvents.ENV_ACTION,
"action": action
},
blocking=False)
@staticmethod
def _step_reward(reward):
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type" : CrowdAIMarloEvents.STEP_REWARD,
"r":reward
},
blocking=False)
@staticmethod
def _episode_done():
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_INFO,
message="",
payload={
"event_type" : CrowdAIMarloEvents.EPISODE_DONE,
},
blocking=False)
@staticmethod
def _env_error(error_message):
CrowdAiNotifier._send_notification(
event_type=crowdai_events.CROWDAI_EVENT_ERROR,
message="execution_error",
payload={
"event_type" : CrowdAIMarloEvents.EPISODE_ERROR,
"error_message":error_message
},
blocking=False)
| [
"crowdai_api.CrowdAIEvents",
"os.getenv",
"crowdai_api.events.CrowdAIEvents"
] | [((243, 277), 'crowdai_api.events.CrowdAIEvents', 'crowdai_api.events.CrowdAIEvents', ([], {}), '()\n', (275, 277), False, 'import crowdai_api\n'), ((1229, 1267), 'os.getenv', 'os.getenv', (['"""CROWDAI_IS_GRADING"""', '(False)'], {}), "('CROWDAI_IS_GRADING', False)\n", (1238, 1267), False, 'import os\n'), ((1734, 1761), 'crowdai_api.CrowdAIEvents', 'crowdai_api.CrowdAIEvents', ([], {}), '()\n', (1759, 1761), False, 'import crowdai_api\n'), ((2824, 2858), 'crowdai_api.events.CrowdAIEvents', 'crowdai_api.events.CrowdAIEvents', ([], {}), '()\n', (2856, 2858), False, 'import crowdai_api\n')] |
import os
import json
import numpy as np
import pandas as pd
import requests
from sklearn.datasets import load_boston
from sklearn.pipeline import FeatureUnion
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import PolynomialFeatures
from tqdm import tqdm
from source.code.preprocessing.itemsselector import ItemSelector
from source.code.preprocessing.mylabelbinarizer import MyLabelBinarizer
from source.code.preprocessing.utils import create_sub_folders
data_sources_description = '../../../data/data_sources.json'
local_path = '../../../data/dataset'
def download_data_from(from_param, to_param):
file_name = '{}.{}'.format(from_param['name'], from_param['fmt'])
file_path = os.path.join(to_param, file_name)
if not os.path.exists(to_param):
create_sub_folders(to_param)
if not os.path.exists(file_path):
response = requests.get(from_param['link'], stream=True)
with open(file_path, "wb") as handle:
for data in tqdm(response.iter_content()):
handle.write(data)
return file_path
def read_and_clean_titanic_data():
data_sources = json.load(open(data_sources_description, 'r'))
titanic = pd.read_excel(download_data_from(data_sources[0], local_path))
titanic.age.fillna(titanic.age.mean(), inplace=True)
titanic.fare.fillna(titanic.fare.mean(), inplace=True)
titanic.sex.replace({'male': 0, 'female': 1}, inplace=True)
titanic.embarked.replace({'S': 0, 'C': 1, 'Q': 2}, inplace=True)
titanic = titanic[~titanic.embarked.isnull()]
num_features = ['age', 'fare']
cat_features = ['pclass', 'embarked', 'parch', 'sibsp']
bin_features = ['sex']
pipeline = Pipeline([
('union', FeatureUnion([
('bin', Pipeline(
[
('choose', ItemSelector(bin_features))
]
)),
('num', Pipeline(
[
('choose', ItemSelector(num_features)),
('scale', StandardScaler())
]
))
]))
])
X = titanic[num_features + cat_features + bin_features]
X = pipeline.fit_transform(X)
y = titanic.survived.values
y = y.reshape([len(y), 1])
return X, y
def read_and_clean_thyroid_data():
data_sources = json.load(open(data_sources_description, 'r'))
hypothyroid = pd.read_csv(download_data_from(data_sources[1], local_path))
hypothyroid.sex.replace({'M': 0, 'F': 1}, inplace=True)
hypothyroid.replace({'f': 0, 't': 1}, inplace=True)
hypothyroid.replace({'?': 0}, inplace=True)
hypothyroid.drop(['TBG', 'TBG_measured'], axis=1, inplace=True)
num_features = ['age', 'TSH', 'T3', 'TT4', 'T4U', 'FTI']
cat_features = ['referral_source']
bin_features = [
'sex',
'on_thyroxine',
'query_on_thyroxine',
'on_antithyroid_medication',
'sick',
'pregnant',
'thyroid_surgery',
'I131_treatment',
'query_hypothyroid',
'query_hyperthyroid',
'lithium',
'goitre',
'tumor',
'hypopituitary',
'psych',
'TSH_measured',
'T3_measured',
'TT4_measured',
'T4U_measured',
'FTI_measured'
]
for feature in num_features:
hypothyroid[feature] = hypothyroid[feature].astype(np.float32)
hypothyroid[feature].fillna(hypothyroid[feature].mean(), inplace=True)
pipeline = Pipeline([
('union', FeatureUnion([
('bin', Pipeline(
[
('choose', ItemSelector(bin_features))
]
)),
('num', Pipeline(
[
('choose', ItemSelector(num_features)),
('scale', StandardScaler())
]
)),
('cat', Pipeline(
[
('choose', ItemSelector(cat_features)),
('binarize', MyLabelBinarizer())
]
))
]))
])
X = hypothyroid[num_features + cat_features + bin_features]
X = pipeline.fit_transform(X)
y = MyLabelBinarizer().fit_transform(hypothyroid.Class)
return X, y
def read_and_clean_boston_data():
X, y = load_boston(return_X_y=True)
X = StandardScaler().fit_transform(X)
X = PolynomialFeatures().fit_transform(X)
y = y.reshape([len(y), 1])
return X, y
def read_and_clean_feedback_data():
data_sources = json.load(open(data_sources_description, 'r'))
feedback = pd.read_csv(download_data_from(data_sources[2], local_path))
X = feedback['0'].values.T
y = feedback['1'].values.T
return X, y
| [
"source.code.preprocessing.itemsselector.ItemSelector",
"sklearn.preprocessing.StandardScaler",
"os.path.exists",
"sklearn.datasets.load_boston",
"sklearn.preprocessing.PolynomialFeatures",
"requests.get",
"source.code.preprocessing.mylabelbinarizer.MyLabelBinarizer",
"source.code.preprocessing.utils.... | [((758, 791), 'os.path.join', 'os.path.join', (['to_param', 'file_name'], {}), '(to_param, file_name)\n', (770, 791), False, 'import os\n'), ((4344, 4372), 'sklearn.datasets.load_boston', 'load_boston', ([], {'return_X_y': '(True)'}), '(return_X_y=True)\n', (4355, 4372), False, 'from sklearn.datasets import load_boston\n'), ((803, 827), 'os.path.exists', 'os.path.exists', (['to_param'], {}), '(to_param)\n', (817, 827), False, 'import os\n'), ((837, 865), 'source.code.preprocessing.utils.create_sub_folders', 'create_sub_folders', (['to_param'], {}), '(to_param)\n', (855, 865), False, 'from source.code.preprocessing.utils import create_sub_folders\n'), ((877, 902), 'os.path.exists', 'os.path.exists', (['file_path'], {}), '(file_path)\n', (891, 902), False, 'import os\n'), ((923, 968), 'requests.get', 'requests.get', (["from_param['link']"], {'stream': '(True)'}), "(from_param['link'], stream=True)\n", (935, 968), False, 'import requests\n'), ((4228, 4246), 'source.code.preprocessing.mylabelbinarizer.MyLabelBinarizer', 'MyLabelBinarizer', ([], {}), '()\n', (4244, 4246), False, 'from source.code.preprocessing.mylabelbinarizer import MyLabelBinarizer\n'), ((4382, 4398), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (4396, 4398), False, 'from sklearn.preprocessing import StandardScaler\n'), ((4425, 4445), 'sklearn.preprocessing.PolynomialFeatures', 'PolynomialFeatures', ([], {}), '()\n', (4443, 4445), False, 'from sklearn.preprocessing import PolynomialFeatures\n'), ((1871, 1897), 'source.code.preprocessing.itemsselector.ItemSelector', 'ItemSelector', (['bin_features'], {}), '(bin_features)\n', (1883, 1897), False, 'from source.code.preprocessing.itemsselector import ItemSelector\n'), ((2012, 2038), 'source.code.preprocessing.itemsselector.ItemSelector', 'ItemSelector', (['num_features'], {}), '(num_features)\n', (2024, 2038), False, 'from source.code.preprocessing.itemsselector import ItemSelector\n'), ((2071, 2087), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (2085, 2087), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3655, 3681), 'source.code.preprocessing.itemsselector.ItemSelector', 'ItemSelector', (['bin_features'], {}), '(bin_features)\n', (3667, 3681), False, 'from source.code.preprocessing.itemsselector import ItemSelector\n'), ((3796, 3822), 'source.code.preprocessing.itemsselector.ItemSelector', 'ItemSelector', (['num_features'], {}), '(num_features)\n', (3808, 3822), False, 'from source.code.preprocessing.itemsselector import ItemSelector\n'), ((3855, 3871), 'sklearn.preprocessing.StandardScaler', 'StandardScaler', ([], {}), '()\n', (3869, 3871), False, 'from sklearn.preprocessing import StandardScaler\n'), ((3986, 4012), 'source.code.preprocessing.itemsselector.ItemSelector', 'ItemSelector', (['cat_features'], {}), '(cat_features)\n', (3998, 4012), False, 'from source.code.preprocessing.itemsselector import ItemSelector\n'), ((4048, 4066), 'source.code.preprocessing.mylabelbinarizer.MyLabelBinarizer', 'MyLabelBinarizer', ([], {}), '()\n', (4064, 4066), False, 'from source.code.preprocessing.mylabelbinarizer import MyLabelBinarizer\n')] |
import setuptools
# read the contents of your README file
from os import path
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setuptools.setup(
name='AD-cs207',
version='1.0.0',
author='<NAME>, <NAME>, <NAME>, <NAME>',
author_email=" ",
description='Automatic Differentiation Package',
long_description=long_description,
long_description_content_type='text/markdown',
install_requires=[ 'sympy>=1.3' ],
packages=setuptools.find_packages(),
keywords=['Automatic differentiation', 'gradients', 'Python'],
url='https://github.com/cs207-f18-WIRS/cs207-FinalProject',
license='MIT',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: MIT License',
],
)
| [
"os.path.dirname",
"os.path.join",
"setuptools.find_packages"
] | [((110, 132), 'os.path.dirname', 'path.dirname', (['__file__'], {}), '(__file__)\n', (122, 132), False, 'from os import path\n'), ((144, 182), 'os.path.join', 'path.join', (['this_directory', '"""README.md"""'], {}), "(this_directory, 'README.md')\n", (153, 182), False, 'from os import path\n'), ((567, 593), 'setuptools.find_packages', 'setuptools.find_packages', ([], {}), '()\n', (591, 593), False, 'import setuptools\n')] |
import requests
import json
import ctypes
import tempfile
import os
import time
import base64
from dotenv import load_dotenv
load_dotenv()
REFRESH_TOKEN = os.getenv("REFRESH_TOKEN")
ACCESS_TOKEN = os.getenv("ACCESS_TOKEN")
EXPIRATION_TIME = float(os.getenv("EXPIRATION_TIME"))
CLIENT_ID = os.getenv("CLIENT_ID")
CLIENT_SECRET = os.getenv("CLIENT_SECRET")
authorization_str = CLIENT_ID + ":" + CLIENT_SECRET
authorization_str = base64.b64encode(authorization_str.encode("utf-8"))
#print("EXPIRATION_TIME={}".format(EXPIRATION_TIME))
#print("REFRESH_TOKEN= {} \n ACCESS_TOKEN=
def get_token():
global EXPIRATION_TIME
global REFRESH_TOKEN
global ACCESS_TOKEN
global CLIENT_ID
global CLIENT_SECRET
global authorization_str
if (time.time() > EXPIRATION_TIME):
headers = {
'Authorization': 'Basic OGIyOGEzYmFkMWZkNDM0Mjk3OTMwYzAyOGI1YTdjYjY6NWEwM2QyZGNhM2NkNDBmNjk0YWM4N2M1ZWQxYmZhMmM='.format(authorization_str),
}
data = {
'grant_type': 'refresh_token',
'refresh_token': '{}'.format(REFRESH_TOKEN)
}
r = requests.post('https://accounts.spotify.com/api/token', headers=headers, data=data)
print("REFRESH TOKEN REQUEST RESPONSE: {}".format(r.text))
return(r.text)
else:
print("Access token hasn't expired: returning stored one")
return ACCESS_TOKEN
def get_current_album():
global ACCESS_TOKEN
global album_id
headers = {
'Accept': 'application/json',
'Content-Type': 'application/json',
'Authorization': 'Bearer {}'.format(ACCESS_TOKEN),
}
ra = requests.get('https://api.spotify.com/v1/me/player/currently-playing', headers=headers)
if not (ra.status_code == "200"):
if (ra.status_code == "429"):
print("RATE LIMITED")
print("STATUS CODE FROM GET ALBUM FUNCTION: {}".format(ra.status_code))
jsondata = json.loads(ra.text)
album_link = jsondata["item"]['album']['images'][0]["url"]
album_id = jsondata["item"]['album']['id']
print("Album id: " + album_id)
return(album_link, album_id)
def set_album(album_link, album_id):
fldr = tempfile.gettempdir() + "\\wallpaperify\\"
file = fldr + album_id + ".png"
if not os.path.isdir(fldr):
os.mkdir(fldr)
if os.path.exists(file):
ctypes.windll.user32.SystemParametersInfoW(20, 0, file , 0)
else:
rf = requests.get(album_link)
with open(file, 'wb') as f:
f.write(rf.content)
ctypes.windll.user32.SystemParametersInfoW(20, 0, file , 0)
def main():
get_token()
album_link, album_id = get_current_album()
set_album(album_link, album_id)
while True:
main()
time.sleep(60)
| [
"os.mkdir",
"json.loads",
"os.path.isdir",
"tempfile.gettempdir",
"os.path.exists",
"time.sleep",
"dotenv.load_dotenv",
"time.time",
"requests.get",
"requests.post",
"os.getenv",
"ctypes.windll.user32.SystemParametersInfoW"
] | [((126, 139), 'dotenv.load_dotenv', 'load_dotenv', ([], {}), '()\n', (137, 139), False, 'from dotenv import load_dotenv\n'), ((158, 184), 'os.getenv', 'os.getenv', (['"""REFRESH_TOKEN"""'], {}), "('REFRESH_TOKEN')\n", (167, 184), False, 'import os\n'), ((200, 225), 'os.getenv', 'os.getenv', (['"""ACCESS_TOKEN"""'], {}), "('ACCESS_TOKEN')\n", (209, 225), False, 'import os\n'), ((292, 314), 'os.getenv', 'os.getenv', (['"""CLIENT_ID"""'], {}), "('CLIENT_ID')\n", (301, 314), False, 'import os\n'), ((331, 357), 'os.getenv', 'os.getenv', (['"""CLIENT_SECRET"""'], {}), "('CLIENT_SECRET')\n", (340, 357), False, 'import os\n'), ((250, 278), 'os.getenv', 'os.getenv', (['"""EXPIRATION_TIME"""'], {}), "('EXPIRATION_TIME')\n", (259, 278), False, 'import os\n'), ((1652, 1743), 'requests.get', 'requests.get', (['"""https://api.spotify.com/v1/me/player/currently-playing"""'], {'headers': 'headers'}), "('https://api.spotify.com/v1/me/player/currently-playing',\n headers=headers)\n", (1664, 1743), False, 'import requests\n'), ((1946, 1965), 'json.loads', 'json.loads', (['ra.text'], {}), '(ra.text)\n', (1956, 1965), False, 'import json\n'), ((2341, 2361), 'os.path.exists', 'os.path.exists', (['file'], {}), '(file)\n', (2355, 2361), False, 'import os\n'), ((2766, 2780), 'time.sleep', 'time.sleep', (['(60)'], {}), '(60)\n', (2776, 2780), False, 'import time\n'), ((755, 766), 'time.time', 'time.time', ([], {}), '()\n', (764, 766), False, 'import time\n'), ((1114, 1201), 'requests.post', 'requests.post', (['"""https://accounts.spotify.com/api/token"""'], {'headers': 'headers', 'data': 'data'}), "('https://accounts.spotify.com/api/token', headers=headers,\n data=data)\n", (1127, 1201), False, 'import requests\n'), ((2195, 2216), 'tempfile.gettempdir', 'tempfile.gettempdir', ([], {}), '()\n', (2214, 2216), False, 'import tempfile\n'), ((2290, 2309), 'os.path.isdir', 'os.path.isdir', (['fldr'], {}), '(fldr)\n', (2303, 2309), False, 'import os\n'), ((2319, 2333), 'os.mkdir', 'os.mkdir', (['fldr'], {}), '(fldr)\n', (2327, 2333), False, 'import os\n'), ((2371, 2429), 'ctypes.windll.user32.SystemParametersInfoW', 'ctypes.windll.user32.SystemParametersInfoW', (['(20)', '(0)', 'file', '(0)'], {}), '(20, 0, file, 0)\n', (2413, 2429), False, 'import ctypes\n'), ((2458, 2482), 'requests.get', 'requests.get', (['album_link'], {}), '(album_link)\n', (2470, 2482), False, 'import requests\n'), ((2559, 2617), 'ctypes.windll.user32.SystemParametersInfoW', 'ctypes.windll.user32.SystemParametersInfoW', (['(20)', '(0)', 'file', '(0)'], {}), '(20, 0, file, 0)\n', (2601, 2617), False, 'import ctypes\n')] |
import csv
from io import StringIO
import os.path as op
from flask import request, redirect, Response, flash, url_for
from werkzeug.exceptions import HTTPException
from flask_admin import Admin
from flask_admin.base import expose
from flask_admin.contrib.sqla import ModelView
from flask_admin.contrib.fileadmin import FileAdmin
from app import app, db
from app.provider import Provider
from app.application import Application
from app.admin import ProviderImportForm
from app.admin.forms import CSV_SCHEMA
admin = Admin(app, name="Admin", template_mode="bootstrap3")
class ModelView(ModelView):
def is_accessible(self):
auth = request.authorization or request.environ.get(
"REMOTE_USER"
) # workaround for Apache
if (
not auth
or (auth.username, auth.password) != app.config["ADMIN_CREDENTIALS"]
):
raise HTTPException(
"",
Response(
"You have to be an administrator.",
401,
{"WWW-Authenticate": 'Basic realm="Login Required"'},
),
)
return True
class ApplicationView(ModelView):
can_export = True
class ProviderView(ModelView):
can_export = True
list_template = (
'admin/providers.html'
) # Extending the list view to allow for CSV import
@expose('/import', methods=['GET', 'POST'])
def import_file(self):
form = ProviderImportForm()
if form.validate_on_submit():
# Coerce form.file.data to a stream to read CSV data
file_content = form.file.data.stream.read().decode('utf-8')
with StringIO(file_content) as csv_file:
csv_file_reader = csv.DictReader(
csv_file, fieldnames=CSV_SCHEMA)
next(csv_file_reader) # Skip the header row
for item in csv_file_reader:
record = Provider.from_dict(item)
# Update or insert the record into the db.
db.session.merge(record)
db.session.commit()
flash('Provider info imported successfully.')
return redirect(url_for('provider.index_view'))
return self.render('admin/import.html', form=form)
# Applications
admin.add_view(ApplicationView(Application, db.session))
# Providers
admin.add_view(ProviderView(Provider, db.session))
# Static files
path = op.join(op.dirname(__file__), "../static")
admin.add_view(FileAdmin(path, "/static/", name="Static"))
| [
"io.StringIO",
"flask_admin.Admin",
"flask.flash",
"flask_admin.base.expose",
"csv.DictReader",
"os.path.dirname",
"app.provider.Provider.from_dict",
"flask_admin.contrib.fileadmin.FileAdmin",
"flask.request.environ.get",
"app.admin.ProviderImportForm",
"flask.url_for",
"app.db.session.merge",... | [((518, 570), 'flask_admin.Admin', 'Admin', (['app'], {'name': '"""Admin"""', 'template_mode': '"""bootstrap3"""'}), "(app, name='Admin', template_mode='bootstrap3')\n", (523, 570), False, 'from flask_admin import Admin\n'), ((1394, 1436), 'flask_admin.base.expose', 'expose', (['"""/import"""'], {'methods': "['GET', 'POST']"}), "('/import', methods=['GET', 'POST'])\n", (1400, 1436), False, 'from flask_admin.base import expose\n'), ((2482, 2502), 'os.path.dirname', 'op.dirname', (['__file__'], {}), '(__file__)\n', (2492, 2502), True, 'import os.path as op\n'), ((2532, 2574), 'flask_admin.contrib.fileadmin.FileAdmin', 'FileAdmin', (['path', '"""/static/"""'], {'name': '"""Static"""'}), "(path, '/static/', name='Static')\n", (2541, 2574), False, 'from flask_admin.contrib.fileadmin import FileAdmin\n'), ((1479, 1499), 'app.admin.ProviderImportForm', 'ProviderImportForm', ([], {}), '()\n', (1497, 1499), False, 'from app.admin import ProviderImportForm\n'), ((670, 704), 'flask.request.environ.get', 'request.environ.get', (['"""REMOTE_USER"""'], {}), "('REMOTE_USER')\n", (689, 704), False, 'from flask import request, redirect, Response, flash, url_for\n'), ((2148, 2193), 'flask.flash', 'flash', (['"""Provider info imported successfully."""'], {}), "('Provider info imported successfully.')\n", (2153, 2193), False, 'from flask import request, redirect, Response, flash, url_for\n'), ((947, 1054), 'flask.Response', 'Response', (['"""You have to be an administrator."""', '(401)', '{\'WWW-Authenticate\': \'Basic realm="Login Required"\'}'], {}), '(\'You have to be an administrator.\', 401, {\'WWW-Authenticate\':\n \'Basic realm="Login Required"\'})\n', (955, 1054), False, 'from flask import request, redirect, Response, flash, url_for\n'), ((1692, 1714), 'io.StringIO', 'StringIO', (['file_content'], {}), '(file_content)\n', (1700, 1714), False, 'from io import StringIO\n'), ((1762, 1809), 'csv.DictReader', 'csv.DictReader', (['csv_file'], {'fieldnames': 'CSV_SCHEMA'}), '(csv_file, fieldnames=CSV_SCHEMA)\n', (1776, 1809), False, 'import csv\n'), ((2115, 2134), 'app.db.session.commit', 'db.session.commit', ([], {}), '()\n', (2132, 2134), False, 'from app import app, db\n'), ((2222, 2252), 'flask.url_for', 'url_for', (['"""provider.index_view"""'], {}), "('provider.index_view')\n", (2229, 2252), False, 'from flask import request, redirect, Response, flash, url_for\n'), ((1966, 1990), 'app.provider.Provider.from_dict', 'Provider.from_dict', (['item'], {}), '(item)\n', (1984, 1990), False, 'from app.provider import Provider\n'), ((2074, 2098), 'app.db.session.merge', 'db.session.merge', (['record'], {}), '(record)\n', (2090, 2098), False, 'from app import app, db\n')] |
# Generated by Django 3.1.1 on 2020-10-15 19:02
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Hackathon',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created', models.DateTimeField(default=django.utils.timezone.now)),
('updated', models.DateTimeField(auto_now=True)),
('display_name', models.CharField(default='', max_length=254)),
('description', models.TextField()),
('start_date', models.DateTimeField()),
('end_date', models.DateTimeField()),
('created_by', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='hackathon_created', to=settings.AUTH_USER_MODEL)),
('judges', models.ManyToManyField(blank=True, related_name='hackathon_judges', to=settings.AUTH_USER_MODEL)),
('organiser', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='hackathon_organiser', to=settings.AUTH_USER_MODEL)),
],
),
] | [
"django.db.models.TextField",
"django.db.migrations.swappable_dependency",
"django.db.models.ManyToManyField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.models.DateTimeField"
] | [((276, 333), 'django.db.migrations.swappable_dependency', 'migrations.swappable_dependency', (['settings.AUTH_USER_MODEL'], {}), '(settings.AUTH_USER_MODEL)\n', (307, 333), False, 'from django.db import migrations, models\n'), ((467, 560), 'django.db.models.AutoField', 'models.AutoField', ([], {'auto_created': '(True)', 'primary_key': '(True)', 'serialize': '(False)', 'verbose_name': '"""ID"""'}), "(auto_created=True, primary_key=True, serialize=False,\n verbose_name='ID')\n", (483, 560), False, 'from django.db import migrations, models\n'), ((587, 642), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'default': 'django.utils.timezone.now'}), '(default=django.utils.timezone.now)\n', (607, 642), False, 'from django.db import migrations, models\n'), ((673, 708), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now': '(True)'}), '(auto_now=True)\n', (693, 708), False, 'from django.db import migrations, models\n'), ((744, 788), 'django.db.models.CharField', 'models.CharField', ([], {'default': '""""""', 'max_length': '(254)'}), "(default='', max_length=254)\n", (760, 788), False, 'from django.db import migrations, models\n'), ((823, 841), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (839, 841), False, 'from django.db import migrations, models\n'), ((875, 897), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (895, 897), False, 'from django.db import migrations, models\n'), ((929, 951), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {}), '()\n', (949, 951), False, 'from django.db import migrations, models\n'), ((985, 1115), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'on_delete': 'django.db.models.deletion.CASCADE', 'related_name': '"""hackathon_created"""', 'to': 'settings.AUTH_USER_MODEL'}), "(on_delete=django.db.models.deletion.CASCADE, related_name\n ='hackathon_created', to=settings.AUTH_USER_MODEL)\n", (1002, 1115), False, 'from django.db import migrations, models\n'), ((1140, 1241), 'django.db.models.ManyToManyField', 'models.ManyToManyField', ([], {'blank': '(True)', 'related_name': '"""hackathon_judges"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, related_name='hackathon_judges', to=\n settings.AUTH_USER_MODEL)\n", (1162, 1241), False, 'from django.db import migrations, models\n'), ((1269, 1430), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'blank': '(True)', 'null': '(True)', 'on_delete': 'django.db.models.deletion.SET_NULL', 'related_name': '"""hackathon_organiser"""', 'to': 'settings.AUTH_USER_MODEL'}), "(blank=True, null=True, on_delete=django.db.models.\n deletion.SET_NULL, related_name='hackathon_organiser', to=settings.\n AUTH_USER_MODEL)\n", (1286, 1430), False, 'from django.db import migrations, models\n')] |
from re import sub
from os import walk, mkdir, rename, remove
from sys import stderr
from shutil import rmtree
from os.path import isfile, isdir, join, basename, splitext, exists
from filecmp import dircmp
import mekpie.debug as debug
import mekpie.messages as messages
from .cli import panic, log
# Numerical
# ---------------------------------------------------------------------------- #
def clamp(value, bottom, top):
return max(bottom, min(top, value))
# Collections
# ---------------------------------------------------------------------------- #
def empty(collection):
return len(collection) == 0
def first(collection):
if not empty(collection):
return collection[0]
def rest(collection):
if not empty(collection):
return collection[1:]
def last(collection):
if not empty(collection):
return collection[-1]
def shift(collection, n=1):
for _ in range(n):
if not empty(collection):
collection.pop(0)
def flatten(collection):
return sum(collection, [])
def split(collection, item):
if item not in collection:
return collection, []
index = collection.index(item)
first = collection[:index]
second = collection[index + 1:]
return first, second
# Strings
# ---------------------------------------------------------------------------- #
def tab(string, spaces=4):
return sub(r'^|\n', '\n' + (spaces * ' '), string)
def underline(element, collection):
top = ' '.join(collection)
bottom = ' '.join(underlined_collection(element, collection))
return f'{top}\n{bottom}'
def underlined_collection(underlined_element, collection):
def underline_or_hide(element):
rep = '^' if element == underlined_element else ' '
return sub(r'.', rep, str(element))
return map(underline_or_hide, collection)
# Files
# ---------------------------------------------------------------------------- #
def smkdir(path):
log(f'Creating directory {path}...')
if not exists(path):
mkdir(path)
def srmtree(path):
if exists(path):
rmtree(path)
def smv(source, destination):
log(f'Moving {source} to {destination}')
remove(destination)
if exists(source):
rename(source, destination)
def list_files(path, with_filter=None, with_ext=None, recursive=False):
if with_filter is None:
with_filter = lambda _ : True
if with_ext is not None:
with_filter = lambda filename : filename.endswith(with_ext)
return list(filter(with_filter, list_all_files(path)))
def list_all_files(path):
return flatten([[join(pre, post)
for post
in posts]
for (pre, _, posts)
in walk(path)
])
def list_all_dirs(path):
return flatten([[join(path, pre)
for pre
in pres]
for (_, pres, posts)
in walk(path)
])
def filename(path):
return splitext(basename(path))[0]
def file_as_str(path):
check_is_file(path)
log(f'Reading the contents of {path}...')
with open(path) as resource:
return resource.read()
def remove_contents(path):
log(f'Deleting the contents of {path}...')
srmtree(path)
smkdir(path)
def check_is_file(path):
if isfile(path):
return path
panic(messages.file_not_found.format(path))
def check_is_dir(path):
if isdir(path):
return path
panic(messages.directory_not_found.format(path))
def same_dir(dir1, dir2):
def recursive(dcmp):
if dcmp.diff_files:
return False
return all([recursive(sub_dcmp)
for sub_dcmp
in dcmp.subdirs.values()
])
return recursive(dircmp(dir1, dir2))
def exec_str(source, handle, ctx={}):
try:
exec(source, ctx)
except Exception as err:
panic(messages.execution_error.format(handle, tab(str(err))))
return ctx
def exec_file(path, ctx={}):
return exec_str(file_as_str(path), path, ctx)
# Types
# ---------------------------------------------------------------------------- #
def type_name(x):
return type(x).__name__
| [
"os.mkdir",
"os.remove",
"os.path.join",
"os.path.basename",
"os.path.isdir",
"os.rename",
"mekpie.messages.directory_not_found.format",
"os.path.exists",
"os.walk",
"os.path.isfile",
"mekpie.messages.file_not_found.format",
"shutil.rmtree",
"re.sub",
"filecmp.dircmp"
] | [((1411, 1452), 're.sub', 'sub', (['"""^|\\\\n"""', "('\\n' + spaces * ' ')", 'string'], {}), "('^|\\\\n', '\\n' + spaces * ' ', string)\n", (1414, 1452), False, 'from re import sub\n'), ((2090, 2102), 'os.path.exists', 'exists', (['path'], {}), '(path)\n', (2096, 2102), False, 'from os.path import isfile, isdir, join, basename, splitext, exists\n'), ((2206, 2225), 'os.remove', 'remove', (['destination'], {}), '(destination)\n', (2212, 2225), False, 'from os import walk, mkdir, rename, remove\n'), ((2233, 2247), 'os.path.exists', 'exists', (['source'], {}), '(source)\n', (2239, 2247), False, 'from os.path import isfile, isdir, join, basename, splitext, exists\n'), ((3268, 3280), 'os.path.isfile', 'isfile', (['path'], {}), '(path)\n', (3274, 3280), False, 'from os.path import isfile, isdir, join, basename, splitext, exists\n'), ((3382, 3393), 'os.path.isdir', 'isdir', (['path'], {}), '(path)\n', (3387, 3393), False, 'from os.path import isfile, isdir, join, basename, splitext, exists\n'), ((2029, 2041), 'os.path.exists', 'exists', (['path'], {}), '(path)\n', (2035, 2041), False, 'from os.path import isfile, isdir, join, basename, splitext, exists\n'), ((2051, 2062), 'os.mkdir', 'mkdir', (['path'], {}), '(path)\n', (2056, 2062), False, 'from os import walk, mkdir, rename, remove\n'), ((2112, 2124), 'shutil.rmtree', 'rmtree', (['path'], {}), '(path)\n', (2118, 2124), False, 'from shutil import rmtree\n'), ((2257, 2284), 'os.rename', 'rename', (['source', 'destination'], {}), '(source, destination)\n', (2263, 2284), False, 'from os import walk, mkdir, rename, remove\n'), ((3312, 3348), 'mekpie.messages.file_not_found.format', 'messages.file_not_found.format', (['path'], {}), '(path)\n', (3342, 3348), True, 'import mekpie.messages as messages\n'), ((3425, 3466), 'mekpie.messages.directory_not_found.format', 'messages.directory_not_found.format', (['path'], {}), '(path)\n', (3460, 3466), True, 'import mekpie.messages as messages\n'), ((3707, 3725), 'filecmp.dircmp', 'dircmp', (['dir1', 'dir2'], {}), '(dir1, dir2)\n', (3713, 3725), False, 'from filecmp import dircmp\n'), ((2948, 2962), 'os.path.basename', 'basename', (['path'], {}), '(path)\n', (2956, 2962), False, 'from os.path import isfile, isdir, join, basename, splitext, exists\n'), ((2628, 2643), 'os.path.join', 'join', (['pre', 'post'], {}), '(pre, post)\n', (2632, 2643), False, 'from os.path import isfile, isdir, join, basename, splitext, exists\n'), ((2726, 2736), 'os.walk', 'walk', (['path'], {}), '(path)\n', (2730, 2736), False, 'from os import walk, mkdir, rename, remove\n'), ((2791, 2806), 'os.path.join', 'join', (['path', 'pre'], {}), '(path, pre)\n', (2795, 2806), False, 'from os.path import isfile, isdir, join, basename, splitext, exists\n'), ((2889, 2899), 'os.walk', 'walk', (['path'], {}), '(path)\n', (2893, 2899), False, 'from os import walk, mkdir, rename, remove\n')] |
#!/usr/bin/env python
#
# Copyright 2019 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
# Ensure that all jobs and roles appear in the documentation.
import os
import re
import sys
import yaml
class ZuulSafeLoader(yaml.SafeLoader):
def __init__(self, *args, **kwargs):
super(ZuulSafeLoader, self).__init__(*args, **kwargs)
self.add_multi_constructor('!encrypted/', self.construct_encrypted)
@classmethod
def construct_encrypted(cls, loader, tag_suffix, node):
return loader.construct_sequence(node)
class Layout(object):
def __init__(self):
self.jobs = []
class ZuulConfig(object):
def find_zuul_yaml(self):
root = os.getcwd()
while root:
for fn in ['zuul.yaml', '.zuul.yaml', 'zuul.d', '.zuul.d']:
path = os.path.join(root, fn)
if os.path.exists(path):
return path
root = os.path.split(root)[0]
raise Exception(
"Unable to find zuul config in zuul.yaml, .zuul.yaml,"
" zuul.d or .zuul.d")
def parse_zuul_yaml(self, path):
with open(path) as f:
data = yaml.load(f, Loader=ZuulSafeLoader)
layout = Layout()
for obj in data:
if 'job' in obj:
layout.jobs.append(obj['job'])
return layout
def parse_zuul_d(self, path):
layout = Layout()
for conf in os.listdir(path):
with open(os.path.join(path, conf)) as f:
data = yaml.load(f, Loader=ZuulSafeLoader)
for obj in data:
if 'job' in obj:
layout.jobs.append(obj['job'])
return layout
def parse_zuul_layout(self):
path = self.find_zuul_yaml()
if path.endswith('zuul.d'):
layout = self.parse_zuul_d(path)
else:
layout = self.parse_zuul_yaml(path)
return layout
def __init__(self):
self.layout = self.parse_zuul_layout()
class Docs(object):
def __init__(self):
self.jobs = set()
self.roles = set()
self.autojobs = False
self.autoroles = False
self.walk(os.path.join(os.getcwd(), 'doc', 'source'))
def walk(self, path):
for root, dirs, files in os.walk(path):
for fn in files:
if fn.endswith('.rst'):
with open(os.path.join(root, fn)) as f:
for line in f:
m = re.match(r'.*\.\. zuul:job:: (.*)$', line)
if m:
self.jobs.add(m.group(1))
m = re.match(r'.*\.\. zuul:autojob:: (.*)$', line)
if m:
self.jobs.add(m.group(1))
m = re.match(r'.*\.\. zuul:autojobs::.*$', line)
if m:
self.autojobs = True
m = re.match(r'.*\.\. zuul:role:: (.*)$', line)
if m:
self.roles.add(m.group(1))
m = re.match(r'.*\.\. zuul:autorole:: (.*)$', line)
if m:
self.roles.add(m.group(1))
m = re.match(r'.*\.\. zuul:autoroles::.*$', line)
if m:
self.autoroles = True
class Roles(object):
def __init__(self):
self.roles = set()
self.walk(os.path.join(os.getcwd(), 'roles'))
def walk(self, path):
for role in os.listdir(path):
if os.path.isdir(os.path.join(path, role, 'tasks')):
self.roles.add(role)
z = ZuulConfig()
r = Roles()
d = Docs()
ret = 0
for role in r.roles:
if role not in d.roles:
print("Role %s not included in document tree" % (role,))
ret = 1
for job in [x['name'] for x in z.layout.jobs]:
if job not in d.jobs:
print("Job %s not included in document tree" % (job,))
ret = 1
sys.exit(ret)
| [
"yaml.load",
"os.getcwd",
"os.walk",
"os.path.exists",
"re.match",
"os.path.split",
"os.path.join",
"os.listdir",
"sys.exit"
] | [((4621, 4634), 'sys.exit', 'sys.exit', (['ret'], {}), '(ret)\n', (4629, 4634), False, 'import sys\n'), ((1199, 1210), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (1208, 1210), False, 'import os\n'), ((1943, 1959), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (1953, 1959), False, 'import os\n'), ((2799, 2812), 'os.walk', 'os.walk', (['path'], {}), '(path)\n', (2806, 2812), False, 'import os\n'), ((4167, 4183), 'os.listdir', 'os.listdir', (['path'], {}), '(path)\n', (4177, 4183), False, 'import os\n'), ((1677, 1712), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'ZuulSafeLoader'}), '(f, Loader=ZuulSafeLoader)\n', (1686, 1712), False, 'import yaml\n'), ((1326, 1348), 'os.path.join', 'os.path.join', (['root', 'fn'], {}), '(root, fn)\n', (1338, 1348), False, 'import os\n'), ((1368, 1388), 'os.path.exists', 'os.path.exists', (['path'], {}), '(path)\n', (1382, 1388), False, 'import os\n'), ((1441, 1460), 'os.path.split', 'os.path.split', (['root'], {}), '(root)\n', (1454, 1460), False, 'import os\n'), ((2038, 2073), 'yaml.load', 'yaml.load', (['f'], {'Loader': 'ZuulSafeLoader'}), '(f, Loader=ZuulSafeLoader)\n', (2047, 2073), False, 'import yaml\n'), ((2708, 2719), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (2717, 2719), False, 'import os\n'), ((4097, 4108), 'os.getcwd', 'os.getcwd', ([], {}), '()\n', (4106, 4108), False, 'import os\n'), ((4214, 4247), 'os.path.join', 'os.path.join', (['path', 'role', '"""tasks"""'], {}), "(path, role, 'tasks')\n", (4226, 4247), False, 'import os\n'), ((1983, 2007), 'os.path.join', 'os.path.join', (['path', 'conf'], {}), '(path, conf)\n', (1995, 2007), False, 'import os\n'), ((2913, 2935), 'os.path.join', 'os.path.join', (['root', 'fn'], {}), '(root, fn)\n', (2925, 2935), False, 'import os\n'), ((3014, 3057), 're.match', 're.match', (['""".*\\\\.\\\\. zuul:job:: (.*)$"""', 'line'], {}), "('.*\\\\.\\\\. zuul:job:: (.*)$', line)\n", (3022, 3057), False, 'import re\n'), ((3181, 3228), 're.match', 're.match', (['""".*\\\\.\\\\. zuul:autojob:: (.*)$"""', 'line'], {}), "('.*\\\\.\\\\. zuul:autojob:: (.*)$', line)\n", (3189, 3228), False, 'import re\n'), ((3352, 3397), 're.match', 're.match', (['""".*\\\\.\\\\. zuul:autojobs::.*$"""', 'line'], {}), "('.*\\\\.\\\\. zuul:autojobs::.*$', line)\n", (3360, 3397), False, 'import re\n'), ((3516, 3560), 're.match', 're.match', (['""".*\\\\.\\\\. zuul:role:: (.*)$"""', 'line'], {}), "('.*\\\\.\\\\. zuul:role:: (.*)$', line)\n", (3524, 3560), False, 'import re\n'), ((3685, 3733), 're.match', 're.match', (['""".*\\\\.\\\\. zuul:autorole:: (.*)$"""', 'line'], {}), "('.*\\\\.\\\\. zuul:autorole:: (.*)$', line)\n", (3693, 3733), False, 'import re\n'), ((3858, 3904), 're.match', 're.match', (['""".*\\\\.\\\\. zuul:autoroles::.*$"""', 'line'], {}), "('.*\\\\.\\\\. zuul:autoroles::.*$', line)\n", (3866, 3904), False, 'import re\n')] |
#coding: UTF-8
import sys
import os
import os.path
import glob
import cv2
import numpy as np
CAPTUREDDIR = './captured'
CALIBFLAG = 0 # cv2.CALIB_FIX_K3
def calibFromImages(dirname, chess_shape, chess_block_size):
if not os.path.exists(dirname):
print('Directory \'' + dirname + '\' was not found')
return None
filenames = sorted(glob.glob(dirname + '/*'))
if len(filenames) == 0:
print('No image was found in \'' + dirname + '\'')
return None
print('=== Camera Calibration ===')
objp = np.zeros((chess_shape[0]*chess_shape[1], 3), np.float32)
objp[:, :2] = chess_block_size * \
np.mgrid[0:chess_shape[0], 0:chess_shape[1]].T.reshape(-1, 2)
print('Finding chess corners in input images ...')
objp_list = []
imgp_list = []
img_shape = None
for f in filenames:
print(' ' + f + ' : ', end='')
img = cv2.imread(f, cv2.IMREAD_GRAYSCALE)
if img_shape is None:
img_shape = img.shape
elif img_shape != img.shape:
print('Mismatch size')
continue
ret, imgp = cv2.findChessboardCorners(img, chess_shape, None)
if ret:
print('Found')
objp_list.append(objp)
imgp_list.append(imgp)
else:
print('Not found')
print(' ', len(objp_list), 'images are used')
ret, cam_int, cam_dist, rvecs, tvecs = cv2.calibrateCamera(
objp_list, imgp_list, img_shape, None, None, None, None, CALIBFLAG
)
print('Image size :', img_shape)
print('RMS :', ret)
print('Intrinsic parameters :')
print(cam_int)
print('Distortion parameters :')
print(cam_dist)
print()
rmtxs = list(map(lambda vec: cv2.Rodrigues(vec)[0], rvecs))
fs = cv2.FileStorage('calibration.xml', cv2.FILE_STORAGE_WRITE)
fs.write('img_shape', img_shape)
fs.write('rms', ret)
fs.write('intrinsic', cam_int)
fs.write('distortion', cam_dist)
fs.write('rotation_vectors', np.array(rvecs))
fs.write('rotation_matrixes', np.array(rmtxs))
fs.write('translation_vectors', np.array(tvecs))
fs.release()
return (img_shape, ret, cam_int, cam_dist, rvecs, tvecs)
if __name__ == '__main__':
if len(sys.argv) == 4:
chess_shape = (int(sys.argv[1]), int(sys.argv[2]))
chess_block_size = float(sys.argv[3])
calibFromImages(CAPTUREDDIR, chess_shape, chess_block_size)
else:
print('Usage :')
print(' Save captured images into \'' + CAPTUREDDIR + '\'')
print(
' Run \'python3 caliblate_camera_from_images.py <num of chess corners in vert> <num of chess corners in hori> <chess block size(m or mm)>')
| [
"cv2.findChessboardCorners",
"numpy.zeros",
"os.path.exists",
"cv2.imread",
"cv2.FileStorage",
"cv2.Rodrigues",
"numpy.array",
"cv2.calibrateCamera",
"glob.glob"
] | [((548, 606), 'numpy.zeros', 'np.zeros', (['(chess_shape[0] * chess_shape[1], 3)', 'np.float32'], {}), '((chess_shape[0] * chess_shape[1], 3), np.float32)\n', (556, 606), True, 'import numpy as np\n'), ((1422, 1513), 'cv2.calibrateCamera', 'cv2.calibrateCamera', (['objp_list', 'imgp_list', 'img_shape', 'None', 'None', 'None', 'None', 'CALIBFLAG'], {}), '(objp_list, imgp_list, img_shape, None, None, None, None,\n CALIBFLAG)\n', (1441, 1513), False, 'import cv2\n'), ((1784, 1842), 'cv2.FileStorage', 'cv2.FileStorage', (['"""calibration.xml"""', 'cv2.FILE_STORAGE_WRITE'], {}), "('calibration.xml', cv2.FILE_STORAGE_WRITE)\n", (1799, 1842), False, 'import cv2\n'), ((231, 254), 'os.path.exists', 'os.path.exists', (['dirname'], {}), '(dirname)\n', (245, 254), False, 'import os\n'), ((361, 386), 'glob.glob', 'glob.glob', (["(dirname + '/*')"], {}), "(dirname + '/*')\n", (370, 386), False, 'import glob\n'), ((907, 942), 'cv2.imread', 'cv2.imread', (['f', 'cv2.IMREAD_GRAYSCALE'], {}), '(f, cv2.IMREAD_GRAYSCALE)\n', (917, 942), False, 'import cv2\n'), ((1120, 1169), 'cv2.findChessboardCorners', 'cv2.findChessboardCorners', (['img', 'chess_shape', 'None'], {}), '(img, chess_shape, None)\n', (1145, 1169), False, 'import cv2\n'), ((2010, 2025), 'numpy.array', 'np.array', (['rvecs'], {}), '(rvecs)\n', (2018, 2025), True, 'import numpy as np\n'), ((2061, 2076), 'numpy.array', 'np.array', (['rmtxs'], {}), '(rmtxs)\n', (2069, 2076), True, 'import numpy as np\n'), ((2114, 2129), 'numpy.array', 'np.array', (['tvecs'], {}), '(tvecs)\n', (2122, 2129), True, 'import numpy as np\n'), ((1743, 1761), 'cv2.Rodrigues', 'cv2.Rodrigues', (['vec'], {}), '(vec)\n', (1756, 1761), False, 'import cv2\n')] |
#!/usr/bin/env python3
# Copyright 2019 ZTE corporation. All Rights Reserved.
# SPDX-License-Identifier: Apache-2.0
import re
import subprocess
def main():
subject_regex = re.compile(r'[^a-z\s]\S*( \S+)*[^.]')
has_failure = False
commit_messages = subprocess.check_output(args=['git', 'log', '--format=%s', 'origin/master..'],
universal_newlines=True)
for subject in commit_messages.splitlines():
if subject_regex.fullmatch(subject):
print('Valid commit message subject:', subject)
else:
has_failure = True
print('Invalid commit message subject:', subject)
if has_failure:
exit(1)
if __name__ == "__main__":
main()
| [
"subprocess.check_output",
"re.compile"
] | [((180, 219), 're.compile', 're.compile', (['"""[^a-z\\\\s]\\\\S*( \\\\S+)*[^.]"""'], {}), "('[^a-z\\\\s]\\\\S*( \\\\S+)*[^.]')\n", (190, 219), False, 'import re\n'), ((265, 372), 'subprocess.check_output', 'subprocess.check_output', ([], {'args': "['git', 'log', '--format=%s', 'origin/master..']", 'universal_newlines': '(True)'}), "(args=['git', 'log', '--format=%s',\n 'origin/master..'], universal_newlines=True)\n", (288, 372), False, 'import subprocess\n')] |
# -*- encoding: utf-8 -*-
'''
Created on 2012-3-22
@author: Neil
'''
from django.db import models
# from photo import Photo
from user import User
class Comment(models.Model):
"""
评论的数据模型
"""
photo_id = models.IntegerField()
photo_owner = models.ForeignKey(User, related_name='comment_photo_owner') # related_name修改名字
author = models.ForeignKey(User, related_name='author')
content = models.TextField()
# 不要用默认值,默认值只在第一次时被赋值,以后都是用相同的默认值,也就是相同的时间
# date_posted = models.DateTimeField(default=datetime.datetime.now())
date_posted = models.DateTimeField(auto_now_add=True)
deleted_by_photo_owner = models.BooleanField(default=False) # 照片拥有者删除评论时,将不再照片的评论列表中显示
photo_deleted = models.BooleanField(default=False) # 照片已被删除
def __unicode__(self):
return str(self.id)
class Meta:
ordering = ['id']
app_label = 'glow'
| [
"django.db.models.TextField",
"django.db.models.ForeignKey",
"django.db.models.BooleanField",
"django.db.models.IntegerField",
"django.db.models.DateTimeField"
] | [((221, 242), 'django.db.models.IntegerField', 'models.IntegerField', ([], {}), '()\n', (240, 242), False, 'from django.db import models\n'), ((261, 320), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""comment_photo_owner"""'}), "(User, related_name='comment_photo_owner')\n", (278, 320), False, 'from django.db import models\n'), ((354, 400), 'django.db.models.ForeignKey', 'models.ForeignKey', (['User'], {'related_name': '"""author"""'}), "(User, related_name='author')\n", (371, 400), False, 'from django.db import models\n'), ((415, 433), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (431, 433), False, 'from django.db import models\n'), ((573, 612), 'django.db.models.DateTimeField', 'models.DateTimeField', ([], {'auto_now_add': '(True)'}), '(auto_now_add=True)\n', (593, 612), False, 'from django.db import models\n'), ((642, 676), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (661, 676), False, 'from django.db import models\n'), ((725, 759), 'django.db.models.BooleanField', 'models.BooleanField', ([], {'default': '(False)'}), '(default=False)\n', (744, 759), False, 'from django.db import models\n')] |
import re
import string
import tensorflow_hub as hub
from scipy.spatial.distance import cdist
module_url = "https://tfhub.dev/google/universal-sentence-encoder/4"
class SimilarityModel():
def __init__(self):
print("Loading model from tf hub...")
self.model = hub.load(module_url)
print("module %s loaded" % module_url)
def process_text(self, text):
'''Clean text by removing unnecessary characters and altering the format of words.'''
re_print = re.compile('[^%s]' % re.escape(string.printable))
text = text.lower()
text = re.sub(r"i'm", "i am", text)
text = re.sub(r"he's", "he is", text)
text = re.sub(r"she's", "she is", text)
text = re.sub(r"it's", "it is", text)
text = re.sub(r"that's", "that is", text)
text = re.sub(r"what's", "that is", text)
text = re.sub(r"where's", "where is", text)
text = re.sub(r"how's", "how is", text)
text = re.sub(r"\'ll", " will", text)
text = re.sub(r"\'ve", " have", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"\'d", " would", text)
text = re.sub(r"\'re", " are", text)
text = re.sub(r"won't", "will not", text)
text = re.sub(r"can't", "cannot", text)
text = re.sub(r"n't", " not", text)
text = re.sub(r"n'", "ng", text)
text = re.sub(r"'bout", "about", text)
text = re.sub(r"'til", "until", text)
text = re.sub(r"[$-()\"#/@;:<>{}`+=~|.!?,'*-^]", "", text)
text = text.split()
text = [re_print.sub('', w) for w in text]
return ' '.join(text)
def similarity(self, sentence1, sentence2):
processed_sent1 = self.process_text(sentence1)
processed_sent2 = self.process_text(sentence2)
sent_vector1 = self.model([processed_sent1])
sent_vector2 = self.model([processed_sent2])
similarities = cdist(sent_vector1, sent_vector2, metric='cosine')
return similarities
if __name__ == "__main__":
sim_model = SimilarityModel()
sentence1 = "<NAME>"
sentence2 = "I want money"
distance = sim_model.similarity(sentence1, sentence2)
print("Similarity score is: ", 1 - distance[0][0])
| [
"scipy.spatial.distance.cdist",
"tensorflow_hub.load",
"re.sub",
"re.escape"
] | [((282, 302), 'tensorflow_hub.load', 'hub.load', (['module_url'], {}), '(module_url)\n', (290, 302), True, 'import tensorflow_hub as hub\n'), ((592, 619), 're.sub', 're.sub', (['"""i\'m"""', '"""i am"""', 'text'], {}), '("i\'m", \'i am\', text)\n', (598, 619), False, 'import re\n'), ((636, 665), 're.sub', 're.sub', (['"""he\'s"""', '"""he is"""', 'text'], {}), '("he\'s", \'he is\', text)\n', (642, 665), False, 'import re\n'), ((682, 713), 're.sub', 're.sub', (['"""she\'s"""', '"""she is"""', 'text'], {}), '("she\'s", \'she is\', text)\n', (688, 713), False, 'import re\n'), ((730, 759), 're.sub', 're.sub', (['"""it\'s"""', '"""it is"""', 'text'], {}), '("it\'s", \'it is\', text)\n', (736, 759), False, 'import re\n'), ((776, 809), 're.sub', 're.sub', (['"""that\'s"""', '"""that is"""', 'text'], {}), '("that\'s", \'that is\', text)\n', (782, 809), False, 'import re\n'), ((826, 859), 're.sub', 're.sub', (['"""what\'s"""', '"""that is"""', 'text'], {}), '("what\'s", \'that is\', text)\n', (832, 859), False, 'import re\n'), ((876, 911), 're.sub', 're.sub', (['"""where\'s"""', '"""where is"""', 'text'], {}), '("where\'s", \'where is\', text)\n', (882, 911), False, 'import re\n'), ((928, 959), 're.sub', 're.sub', (['"""how\'s"""', '"""how is"""', 'text'], {}), '("how\'s", \'how is\', text)\n', (934, 959), False, 'import re\n'), ((976, 1006), 're.sub', 're.sub', (['"""\\\\\'ll"""', '""" will"""', 'text'], {}), '("\\\\\'ll", \' will\', text)\n', (982, 1006), False, 'import re\n'), ((1022, 1052), 're.sub', 're.sub', (['"""\\\\\'ve"""', '""" have"""', 'text'], {}), '("\\\\\'ve", \' have\', text)\n', (1028, 1052), False, 'import re\n'), ((1068, 1097), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" are"""', 'text'], {}), '("\\\\\'re", \' are\', text)\n', (1074, 1097), False, 'import re\n'), ((1113, 1143), 're.sub', 're.sub', (['"""\\\\\'d"""', '""" would"""', 'text'], {}), '("\\\\\'d", \' would\', text)\n', (1119, 1143), False, 'import re\n'), ((1159, 1188), 're.sub', 're.sub', (['"""\\\\\'re"""', '""" are"""', 'text'], {}), '("\\\\\'re", \' are\', text)\n', (1165, 1188), False, 'import re\n'), ((1204, 1237), 're.sub', 're.sub', (['"""won\'t"""', '"""will not"""', 'text'], {}), '("won\'t", \'will not\', text)\n', (1210, 1237), False, 'import re\n'), ((1254, 1285), 're.sub', 're.sub', (['"""can\'t"""', '"""cannot"""', 'text'], {}), '("can\'t", \'cannot\', text)\n', (1260, 1285), False, 'import re\n'), ((1302, 1329), 're.sub', 're.sub', (['"""n\'t"""', '""" not"""', 'text'], {}), '("n\'t", \' not\', text)\n', (1308, 1329), False, 'import re\n'), ((1346, 1370), 're.sub', 're.sub', (['"""n\'"""', '"""ng"""', 'text'], {}), '("n\'", \'ng\', text)\n', (1352, 1370), False, 'import re\n'), ((1387, 1417), 're.sub', 're.sub', (['"""\'bout"""', '"""about"""', 'text'], {}), '("\'bout", \'about\', text)\n', (1393, 1417), False, 'import re\n'), ((1434, 1463), 're.sub', 're.sub', (['"""\'til"""', '"""until"""', 'text'], {}), '("\'til", \'until\', text)\n', (1440, 1463), False, 'import re\n'), ((1480, 1532), 're.sub', 're.sub', (['"""[$-()\\\\"#/@;:<>{}`+=~|.!?,\'*-^]"""', '""""""', 'text'], {}), '(\'[$-()\\\\"#/@;:<>{}`+=~|.!?,\\\'*-^]\', \'\', text)\n', (1486, 1532), False, 'import re\n'), ((1930, 1980), 'scipy.spatial.distance.cdist', 'cdist', (['sent_vector1', 'sent_vector2'], {'metric': '"""cosine"""'}), "(sent_vector1, sent_vector2, metric='cosine')\n", (1935, 1980), False, 'from scipy.spatial.distance import cdist\n'), ((519, 546), 're.escape', 're.escape', (['string.printable'], {}), '(string.printable)\n', (528, 546), False, 'import re\n')] |
import numpy as np
import torch
import torch.nn as nn
from modules.envelope import Envelope
from modules.initializers import GlorotOrthogonal
class EmbeddingBlock(nn.Module):
def __init__(self,
emb_size,
num_radial,
bessel_funcs,
cutoff,
envelope_exponent,
num_atom_types=95,
activation=None):
super(EmbeddingBlock, self).__init__()
self.bessel_funcs = bessel_funcs
self.cutoff = cutoff
self.activation = activation
self.envelope = Envelope(envelope_exponent)
self.embedding = nn.Embedding(num_atom_types, emb_size)
self.dense_rbf = nn.Linear(num_radial, emb_size)
self.dense = nn.Linear(emb_size * 3, emb_size)
self.reset_params()
def reset_params(self):
nn.init.uniform_(self.embedding.weight, a=-np.sqrt(3), b=np.sqrt(3))
GlorotOrthogonal(self.dense_rbf.weight)
GlorotOrthogonal(self.dense.weight)
def edge_init(self, edges):
""" msg emb init """
# m init
rbf = self.dense_rbf(edges.data['rbf'])
if self.activation is not None:
rbf = self.activation(rbf)
m = torch.cat([edges.src['h'], edges.dst['h'], rbf], dim=-1)
m = self.dense(m)
if self.activation is not None:
m = self.activation(m)
# rbf_env init
d_scaled = edges.data['d'] / self.cutoff
rbf_env = [f(d_scaled) for f in self.bessel_funcs]
rbf_env = torch.stack(rbf_env, dim=1)
d_cutoff = self.envelope(d_scaled)
rbf_env = d_cutoff[:, None] * rbf_env
return {'m': m, 'rbf_env': rbf_env}
def forward(self, g):
g.ndata['h'] = self.embedding(g.ndata['Z'])
g.apply_edges(self.edge_init)
return g | [
"torch.stack",
"torch.nn.Embedding",
"torch.cat",
"modules.envelope.Envelope",
"torch.nn.Linear",
"numpy.sqrt",
"modules.initializers.GlorotOrthogonal"
] | [((598, 625), 'modules.envelope.Envelope', 'Envelope', (['envelope_exponent'], {}), '(envelope_exponent)\n', (606, 625), False, 'from modules.envelope import Envelope\n'), ((651, 689), 'torch.nn.Embedding', 'nn.Embedding', (['num_atom_types', 'emb_size'], {}), '(num_atom_types, emb_size)\n', (663, 689), True, 'import torch.nn as nn\n'), ((715, 746), 'torch.nn.Linear', 'nn.Linear', (['num_radial', 'emb_size'], {}), '(num_radial, emb_size)\n', (724, 746), True, 'import torch.nn as nn\n'), ((768, 801), 'torch.nn.Linear', 'nn.Linear', (['(emb_size * 3)', 'emb_size'], {}), '(emb_size * 3, emb_size)\n', (777, 801), True, 'import torch.nn as nn\n'), ((948, 987), 'modules.initializers.GlorotOrthogonal', 'GlorotOrthogonal', (['self.dense_rbf.weight'], {}), '(self.dense_rbf.weight)\n', (964, 987), False, 'from modules.initializers import GlorotOrthogonal\n'), ((996, 1031), 'modules.initializers.GlorotOrthogonal', 'GlorotOrthogonal', (['self.dense.weight'], {}), '(self.dense.weight)\n', (1012, 1031), False, 'from modules.initializers import GlorotOrthogonal\n'), ((1251, 1307), 'torch.cat', 'torch.cat', (["[edges.src['h'], edges.dst['h'], rbf]"], {'dim': '(-1)'}), "([edges.src['h'], edges.dst['h'], rbf], dim=-1)\n", (1260, 1307), False, 'import torch\n'), ((1567, 1594), 'torch.stack', 'torch.stack', (['rbf_env'], {'dim': '(1)'}), '(rbf_env, dim=1)\n', (1578, 1594), False, 'import torch\n'), ((928, 938), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (935, 938), True, 'import numpy as np\n'), ((914, 924), 'numpy.sqrt', 'np.sqrt', (['(3)'], {}), '(3)\n', (921, 924), True, 'import numpy as np\n')] |
import pygame
from gamesettings import GameSettings as gs
class TextInput(pygame.sprite.Sprite):
def __init__(self, x, y, width, height):
"""Inputs any kind of text on screen.
Parameters
----------
x: int
X axis position of the input bar.
y: int
Y axis position of the input bar.
width: int
Width of the input bar. Its value is multiplied by the size of
the tile.
height: int
Height of the input bar. Its value is multiplied by the size of
the tile.
"""
self.x = x
self.y = y
self.width = width * gs.TILESIZE
self.height = height * gs.TILESIZE
self.rect = pygame.Rect(self.x, self.y, self.width, self.height)
self.font = pygame.font.SysFont(None, self.height//2)
self.text = " "
def update_text(self, events):
"""Updates text on key press.
Parameters
----------
events: list
Events queue.
Returns
-------
return_press: bool
Return key indicates the user finished the input.
"""
for event in events:
if event.type == pygame.KEYDOWN:
if event.key == pygame.K_RETURN:
return True
elif event.key == pygame.K_BACKSPACE:
self.text = self.text[:-1]
return False
else:
self.text += event.unicode
return False
def draw(self, screen):
"""Draws the input bar and the text.
Parameters
----------
screen: pygame.Surface
Screen object.
"""
pygame.draw.rect(screen, gs.WHITE, self.rect)
rendered = self.font.render(self.text, True, gs.LIGHT_RED)
screen.blit(rendered, (self.x, (self.y) +
rendered.get_rect().height))
def get_input(self):
"""Returns the text attribute. """
return self.text.strip()
| [
"pygame.draw.rect",
"pygame.Rect",
"pygame.font.SysFont"
] | [((741, 793), 'pygame.Rect', 'pygame.Rect', (['self.x', 'self.y', 'self.width', 'self.height'], {}), '(self.x, self.y, self.width, self.height)\n', (752, 793), False, 'import pygame\n'), ((814, 857), 'pygame.font.SysFont', 'pygame.font.SysFont', (['None', '(self.height // 2)'], {}), '(None, self.height // 2)\n', (833, 857), False, 'import pygame\n'), ((1756, 1801), 'pygame.draw.rect', 'pygame.draw.rect', (['screen', 'gs.WHITE', 'self.rect'], {}), '(screen, gs.WHITE, self.rect)\n', (1772, 1801), False, 'import pygame\n')] |
#!/usr/bin/python3
#
# Copyright (c) 2012 <NAME> <<EMAIL>>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#
import copy
import itertools
from typing import (
Any,
AnyStr,
Callable,
Dict,
FrozenSet,
Hashable,
Iterable,
Iterator,
List,
Sequence,
Tuple,
TypeVar,
Union,
overload,
)
T = TypeVar("T")
def safe_coerce_to_tuple(value: Any) -> Tuple[Any, ...]:
"""Convert value to a tuple, unless it is a string or a non-sequence, in which case
it is return as a single-element tuple."""
if isinstance(value, str):
return (value,)
try:
return tuple(value)
except TypeError:
return (value,)
def safe_coerce_to_frozenset(value: Any) -> FrozenSet[Any]:
"""Convert value to a tuple, unless it is a string or a non-sequence, in which case
it is return as a single-element tuple."""
if isinstance(value, str):
return frozenset((value,))
try:
return frozenset(value)
except TypeError:
return frozenset((value,))
def try_cast(value: Any, cast_to: type) -> Any:
try:
return cast_to(value)
except (ValueError, TypeError):
return value
def set_in(dictionary: Dict[Any, Any], keys: Iterable[Hashable], value: Any) -> None:
"""Traverses a set of nested dictionaries using the given keys,
and assigns the specified value to the inner-most
dictionary (obtained from the second-to-last key), using
the last key in keys. Thus calling set_in is(d, [X, Y, Z], v)
is equivalent to calling
d.setdefault(X, {}).setdefault(Y, {})[Z] = v
Behavior on non-dictionaries is undefined."""
keys = list(keys)
if not keys:
raise ValueError("No keys passed to 'set_in'!")
for key in keys[:-1]:
try:
dictionary = dictionary[key]
except KeyError:
new_dict = {} # type: Dict[Any, Any]
dictionary[key] = new_dict
dictionary = new_dict
dictionary[keys[-1]] = value
def get_in(
dictionary: Dict[Any, Any],
keys: Iterable[Hashable],
default: Any = None,
) -> Any:
"""Traverses a set of nested dictionaries using the keys in
kws, and returns the value assigned to the final keyword
in the innermost dictionary. Calling get_in(d, [X, Y])
is equivalent to calling d.get(X).get(Y), with the
difference that any missing keys causes the default value
to be returned.
Behavior on non-dictgionaries is undefined."""
keys = list(keys)
for key in keys[:-1]:
try:
dictionary = dictionary[key]
except KeyError:
return default
return dictionary.get(keys[-1], default)
def split_before(
iterable: Iterable[Any], pred: Callable[[Any], bool]
) -> Iterator[List[Any]]:
"""Takes a sequence and splits it before every value where pred(v) is true.
Thus split_before(range(10), key = lambda x: x % 2 == 0) would return the
sequence [[1], [2,3], [4,5], [6,7], [7,8], [9]]"""
items = [] # type: List[Any]
for value in iterable:
if pred(value) and items:
yield items
items = []
items.append(value)
if items:
yield items
# Copied from the Python 'itertools' module documentation
def grouper(size: int, iterable: Iterable[Any], fillvalue: Any = None):
"grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx"
args = [iter(iterable)] * size
return itertools.zip_longest(fillvalue=fillvalue, *args)
def group_by_pred(
pred: Callable[[Any], bool], iterable: Iterable[Any]
) -> Tuple[List[Any], List[Any]]:
"""Splits items in a sequence into two lists, one containing
items matching the predicate, and another containing those that
do not."""
is_true = [] # type: List[Any]
is_false = [] # type: List[Any]
for item in iterable:
if pred(item):
is_true.append(item)
else:
is_false.append(item)
return is_true, is_false
@overload
def fragment(size: int, lstlike: AnyStr) -> Iterable[AnyStr]:
...
@overload
def fragment(size: int, lstlike: Sequence[T]) -> Iterable[Sequence[T]]:
...
_Fragmentable = Union[AnyStr, Sequence[T]]
def fragment(size: int, lstlike: _Fragmentable) -> Iterable[_Fragmentable]:
"""Faster alternative to grouper for lists/strings."""
return (lstlike[i : i + size] for i in range(0, len(lstlike), size))
def fill_dict(destination: Dict[Any, Any], source: Dict[Any, Any]) -> Dict[Any, Any]:
"""Returns a copy of 'destination' after setting missing key-
pairs with copies of those of 'source' recursively."""
if not isinstance(destination, dict) or not isinstance(source, dict):
raise TypeError("Non-dictionary parameters in 'fill_dict'")
def _fill_dict(cur_dest: Dict[Any, Any], cur_src: Dict[Any, Any]) -> Dict[Any, Any]:
for key in cur_src:
if isinstance(cur_src[key], dict) and isinstance(cur_dest.get(key), dict):
_fill_dict(cur_dest[key], cur_src[key])
elif key not in cur_dest:
cur_dest[key] = cur_src[key]
return cur_dest
return _fill_dict(copy.deepcopy(destination), copy.deepcopy(source))
class Immutable:
"""Mixin implementing a immutable class; member variables are specified in
the init function, cannot be changed afterwards; note that this does not
prevent changes to the member variables themselves (if not immutable)."""
def __init__(self, **kwargs: Any):
object.__init__(self)
for (key, value) in kwargs.items():
object.__setattr__(self, key, value)
def __setattr__(self, _name: str, _value: Any) -> None:
raise NotImplementedError("Object is immutable")
def __delattr__(self, _name: str) -> None:
raise NotImplementedError("Object is immutable")
class TotallyOrdered:
"""Mixin implementing a rich-comparison interface, provided
that the subclass implements the less-than operator (__lt__).
The __lt__ function should return NotImplemented if the other
object is not the same type.
The implementation assumes total order:
http://en.wikipedia.org/wiki/Total_order
"""
def __lt__(self, other: Any) -> bool:
raise NotImplementedError("__lt__ must be implemented!")
def __eq__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return not ((self < other) or (other < self))
def __ne__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return not (self == other)
def __le__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return not (other < self)
def __ge__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return not (self < other)
def __gt__(self, other: Any) -> bool:
if not isinstance(other, type(self)):
return NotImplemented
return other < self
| [
"typing.TypeVar",
"copy.deepcopy",
"itertools.zip_longest"
] | [((1350, 1362), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (1357, 1362), False, 'from typing import Any, AnyStr, Callable, Dict, FrozenSet, Hashable, Iterable, Iterator, List, Sequence, Tuple, TypeVar, Union, overload\n'), ((4462, 4511), 'itertools.zip_longest', 'itertools.zip_longest', (['*args'], {'fillvalue': 'fillvalue'}), '(*args, fillvalue=fillvalue)\n', (4483, 4511), False, 'import itertools\n'), ((6180, 6206), 'copy.deepcopy', 'copy.deepcopy', (['destination'], {}), '(destination)\n', (6193, 6206), False, 'import copy\n'), ((6208, 6229), 'copy.deepcopy', 'copy.deepcopy', (['source'], {}), '(source)\n', (6221, 6229), False, 'import copy\n')] |
# -*- coding: utf-8 -*-
""" Various registration routines to reduce duplication. """
import numpy as np
import sksurgerycore.transforms.matrix as mt
import sksurgerysurfacematch.interfaces.rigid_registration as rr
def do_rigid_registration(reconstructed_cloud,
reference_cloud,
rigid_registration: rr.RigidRegistration,
initial_ref2recon: np.ndarray = None,
):
"""
Triggers a rigid body registration using rigid_registration.
:param reconstructed_cloud: [Nx3] point cloud, e.g. from video.
:param reference_cloud: [Mx3] point cloud, e.g. from CT/MR
:param rigid_registration: Object that implements a rigid registration.
:param initial_ref2recon_transform: [4x4] ndarray representing an initial \
estimate.
:return: residual (float), [4x4] transform
"""
if initial_ref2recon is not None:
reference_cloud = \
np.matmul(
initial_ref2recon[0:3, 0:3], np.transpose(reference_cloud)) \
+ initial_ref2recon[0:3, 3].reshape((3, 1))
reference_cloud = np.transpose(reference_cloud)
# Do registration. Best to register recon points to
# the provided model (likely from CT or MR), and then invert.
residual, transform = \
rigid_registration.register(reconstructed_cloud,
reference_cloud
)
transform = np.linalg.inv(transform)
# Combine initial, if we have one.
if initial_ref2recon is not None:
init_mat = \
mt.construct_rigid_transformation(
initial_ref2recon[0:3, 0:3],
initial_ref2recon[0:3, 3]
)
transform = np.matmul(transform, init_mat)
return residual, transform
| [
"numpy.transpose",
"numpy.linalg.inv",
"numpy.matmul",
"sksurgerycore.transforms.matrix.construct_rigid_transformation"
] | [((1492, 1516), 'numpy.linalg.inv', 'np.linalg.inv', (['transform'], {}), '(transform)\n', (1505, 1516), True, 'import numpy as np\n'), ((1148, 1177), 'numpy.transpose', 'np.transpose', (['reference_cloud'], {}), '(reference_cloud)\n', (1160, 1177), True, 'import numpy as np\n'), ((1628, 1721), 'sksurgerycore.transforms.matrix.construct_rigid_transformation', 'mt.construct_rigid_transformation', (['initial_ref2recon[0:3, 0:3]', 'initial_ref2recon[0:3, 3]'], {}), '(initial_ref2recon[0:3, 0:3],\n initial_ref2recon[0:3, 3])\n', (1661, 1721), True, 'import sksurgerycore.transforms.matrix as mt\n'), ((1784, 1814), 'numpy.matmul', 'np.matmul', (['transform', 'init_mat'], {}), '(transform, init_mat)\n', (1793, 1814), True, 'import numpy as np\n'), ((1033, 1062), 'numpy.transpose', 'np.transpose', (['reference_cloud'], {}), '(reference_cloud)\n', (1045, 1062), True, 'import numpy as np\n')] |
# From https://groups.google.com/forum/#!topic/networkx-discuss/FwYk0ixLDuY
# Plot weighted directed positive/negative network graph
import networkx as nx
import matplotlib.pyplot as plt
from matplotlib.patches import FancyArrowPatch, Circle
import numpy as np
def draw_curvy_network(G, pos, ax, node_radius=0.02, node_color='b', node_edge_color='b', node_alpha=0.5, edge_color=None, edge_alpha=0.5, edge_width=None):
assert isinstance(G, nx.Graph), "G must be a NetworkX graph!"
# Convert node colors to lists
def _to_list(x, N):
if isinstance(x, list):
assert len(x) == N
return x
else:
return [x] * N
node_radius = _to_list(node_radius, len(G.nodes()))
node_color = _to_list(node_color, len(G.nodes()))
node_edge_color = _to_list(node_edge_color, len(G.nodes()))
node_alpha = _to_list(node_alpha, len(G.nodes()))
if edge_color is None:
edge_color = _to_list('k', len(G.edges()))
edge_alpha = _to_list(edge_alpha, len(G.edges()))
# if user specify edge-width it is not the same
if edge_width is None:
edge_width = 2
edge_width = _to_list(edge_width, len(G.edges()))
# Plot the nodes
for n, r, a, fc, ec in zip(G, node_radius, node_alpha, node_color, node_edge_color):
c = Circle(pos[n], radius=r, alpha=a, fc=fc, ec=ec)
ax.add_patch(c)
G.node[n]['patch'] = c
# Plot the edges
seen = {}
for (u, v, d), a, lw, ec in zip(G.edges(data=True), edge_alpha, edge_width, edge_color):
n1 = G.node[u]['patch']
n2 = G.node[v]['patch']
rad = -0.1
if (u, v) in seen:
rad = seen.get((u, v))
rad = (rad + np.sign(rad) * 0.1) * -1
e = FancyArrowPatch(n1.center, n2.center, patchA=n1, patchB=n2, arrowstyle='-|>',
connectionstyle='arc3,rad=%s' % rad, mutation_scale=10.0, lw=lw, alpha=a, color=ec)
seen[(u, v)] = rad
ax.add_patch(e)
return e
if __name__ == "__main__":
from hips.plotting.colormaps import harvard_colors
color = harvard_colors()[0:10]
G = nx.MultiDiGraph([(1, 1), (1, 2), (2, 1), (2, 3), (3, 4), (2, 4), (3, 2)])
pos = nx.spring_layout(G)
ax = plt.gca()
edge_width = [5, 0.9, 0.8, 2, 2, 1, 5]
edge_color = [color[0], color[0], color[0], color[0], color[1], color[1], color[1]]
draw_curvy_network(G, pos, ax, node_color='k', node_edge_color='k', edge_width=edge_width, edge_color=edge_color)
ax.autoscale()
plt.axis('equal')
plt.axis('off')
# plt.savefig("graph.pdf")
plt.show()
| [
"matplotlib.pyplot.show",
"networkx.MultiDiGraph",
"hips.plotting.colormaps.harvard_colors",
"matplotlib.patches.FancyArrowPatch",
"matplotlib.pyplot.axis",
"matplotlib.patches.Circle",
"numpy.sign",
"networkx.spring_layout",
"matplotlib.pyplot.gca"
] | [((2141, 2214), 'networkx.MultiDiGraph', 'nx.MultiDiGraph', (['[(1, 1), (1, 2), (2, 1), (2, 3), (3, 4), (2, 4), (3, 2)]'], {}), '([(1, 1), (1, 2), (2, 1), (2, 3), (3, 4), (2, 4), (3, 2)])\n', (2156, 2214), True, 'import networkx as nx\n'), ((2226, 2245), 'networkx.spring_layout', 'nx.spring_layout', (['G'], {}), '(G)\n', (2242, 2245), True, 'import networkx as nx\n'), ((2255, 2264), 'matplotlib.pyplot.gca', 'plt.gca', ([], {}), '()\n', (2262, 2264), True, 'import matplotlib.pyplot as plt\n'), ((2562, 2579), 'matplotlib.pyplot.axis', 'plt.axis', (['"""equal"""'], {}), "('equal')\n", (2570, 2579), True, 'import matplotlib.pyplot as plt\n'), ((2584, 2599), 'matplotlib.pyplot.axis', 'plt.axis', (['"""off"""'], {}), "('off')\n", (2592, 2599), True, 'import matplotlib.pyplot as plt\n'), ((2635, 2645), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (2643, 2645), True, 'import matplotlib.pyplot as plt\n'), ((1319, 1366), 'matplotlib.patches.Circle', 'Circle', (['pos[n]'], {'radius': 'r', 'alpha': 'a', 'fc': 'fc', 'ec': 'ec'}), '(pos[n], radius=r, alpha=a, fc=fc, ec=ec)\n', (1325, 1366), False, 'from matplotlib.patches import FancyArrowPatch, Circle\n'), ((1759, 1929), 'matplotlib.patches.FancyArrowPatch', 'FancyArrowPatch', (['n1.center', 'n2.center'], {'patchA': 'n1', 'patchB': 'n2', 'arrowstyle': '"""-|>"""', 'connectionstyle': "('arc3,rad=%s' % rad)", 'mutation_scale': '(10.0)', 'lw': 'lw', 'alpha': 'a', 'color': 'ec'}), "(n1.center, n2.center, patchA=n1, patchB=n2, arrowstyle=\n '-|>', connectionstyle='arc3,rad=%s' % rad, mutation_scale=10.0, lw=lw,\n alpha=a, color=ec)\n", (1774, 1929), False, 'from matplotlib.patches import FancyArrowPatch, Circle\n'), ((2109, 2125), 'hips.plotting.colormaps.harvard_colors', 'harvard_colors', ([], {}), '()\n', (2123, 2125), False, 'from hips.plotting.colormaps import harvard_colors\n'), ((1721, 1733), 'numpy.sign', 'np.sign', (['rad'], {}), '(rad)\n', (1728, 1733), True, 'import numpy as np\n')] |
import json
import requests
from rest_framework import status
from presqt.utilities import PresQTResponseException
def zenodo_upload_helper(auth_parameter, project_title=None):
"""
Initialize a new project on Zenodo.
Parameters
----------
auth_parameter : str
The Authentication parameter expected by Zenodo.
Returns
-------
The new Project ID.
"""
headers = {"Content-Type": "application/json"}
project_info = requests.post('https://zenodo.org/api/deposit/depositions', params=auth_parameter,
json={}, headers=headers)
if project_info.status_code != 201:
raise PresQTResponseException(
"Zenodo returned a {} status code while trying to create the project.".format(
project_info.status_code), status.HTTP_400_BAD_REQUEST)
project_helper = project_info.json()
project_id = project_helper['id']
project_owner = project_helper['owner']
# Now we need to add some info to the project.
data = {
'metadata': {
'title': project_title,
'upload_type': 'other',
'description': 'PresQT Upload',
'creators': [{'name': str(project_owner)}]}}
requests.put('https://zenodo.org/api/deposit/depositions/{}'.format(project_id),
params=auth_parameter, data=json.dumps(data), headers=headers)
return project_id
| [
"requests.post",
"json.dumps"
] | [((471, 584), 'requests.post', 'requests.post', (['"""https://zenodo.org/api/deposit/depositions"""'], {'params': 'auth_parameter', 'json': '{}', 'headers': 'headers'}), "('https://zenodo.org/api/deposit/depositions', params=\n auth_parameter, json={}, headers=headers)\n", (484, 584), False, 'import requests\n'), ((1371, 1387), 'json.dumps', 'json.dumps', (['data'], {}), '(data)\n', (1381, 1387), False, 'import json\n')] |
#
# Copyright (c) 2016-present, Cisco Systems, Inc. All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree.
#
from django.core.management.base import BaseCommand, CommandError
from dashboard.models import Alert
from django.db.models import Q
class Command(BaseCommand):
help = 'Removes ALL alerts.'
def handle(self, *args, **options):
Alert.objects.all().delete()
| [
"dashboard.models.Alert.objects.all"
] | [((458, 477), 'dashboard.models.Alert.objects.all', 'Alert.objects.all', ([], {}), '()\n', (475, 477), False, 'from dashboard.models import Alert\n')] |
from django.conf import settings
from django.conf.urls import patterns, include, url
from django.conf.urls.static import static
from django.contrib import admin
from django.contrib.staticfiles.urls import staticfiles_urlpatterns
# Monkey patching ftw...
import dolweb.utils.monkey
admin.autodiscover()
urlpatterns = patterns('',
# Homepage
url(r'^$', 'dolweb.homepage.views.home', name='home'),
# Media (image gallery, link to videos)
url(r'^media/', include('dolweb.media.urls')),
# Documentation (FAQ and guides)
url(r'^docs/', include('dolweb.docs.urls')),
# Downloads
url(r'^download/', include('dolweb.downloads.urls')),
# Blog
url(r'^blog/', include('dolweb.blog.urls')),
# Compatibility list
url(r'^compat/', include('dolweb.compat.urls')),
# Django administration
url(r'^admin/', include(admin.site.urls)),
# Management interface
url(r'^mgmt/(?P<cmd>.+)$', 'dolweb.management.views.run_command', name='mgmt_run_command'),
)
urlpatterns += staticfiles_urlpatterns()
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
| [
"django.contrib.admin.autodiscover",
"django.contrib.staticfiles.urls.staticfiles_urlpatterns",
"django.conf.urls.include",
"django.conf.urls.url",
"django.conf.urls.static.static"
] | [((283, 303), 'django.contrib.admin.autodiscover', 'admin.autodiscover', ([], {}), '()\n', (301, 303), False, 'from django.contrib import admin\n'), ((1022, 1047), 'django.contrib.staticfiles.urls.staticfiles_urlpatterns', 'staticfiles_urlpatterns', ([], {}), '()\n', (1045, 1047), False, 'from django.contrib.staticfiles.urls import staticfiles_urlpatterns\n'), ((1063, 1124), 'django.conf.urls.static.static', 'static', (['settings.MEDIA_URL'], {'document_root': 'settings.MEDIA_ROOT'}), '(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)\n', (1069, 1124), False, 'from django.conf.urls.static import static\n'), ((351, 403), 'django.conf.urls.url', 'url', (['"""^$"""', '"""dolweb.homepage.views.home"""'], {'name': '"""home"""'}), "('^$', 'dolweb.homepage.views.home', name='home')\n", (354, 403), False, 'from django.conf.urls import patterns, include, url\n'), ((912, 1006), 'django.conf.urls.url', 'url', (['"""^mgmt/(?P<cmd>.+)$"""', '"""dolweb.management.views.run_command"""'], {'name': '"""mgmt_run_command"""'}), "('^mgmt/(?P<cmd>.+)$', 'dolweb.management.views.run_command', name=\n 'mgmt_run_command')\n", (915, 1006), False, 'from django.conf.urls import patterns, include, url\n'), ((471, 499), 'django.conf.urls.include', 'include', (['"""dolweb.media.urls"""'], {}), "('dolweb.media.urls')\n", (478, 499), False, 'from django.conf.urls import patterns, include, url\n'), ((559, 586), 'django.conf.urls.include', 'include', (['"""dolweb.docs.urls"""'], {}), "('dolweb.docs.urls')\n", (566, 586), False, 'from django.conf.urls import patterns, include, url\n'), ((629, 661), 'django.conf.urls.include', 'include', (['"""dolweb.downloads.urls"""'], {}), "('dolweb.downloads.urls')\n", (636, 661), False, 'from django.conf.urls import patterns, include, url\n'), ((695, 722), 'django.conf.urls.include', 'include', (['"""dolweb.blog.urls"""'], {}), "('dolweb.blog.urls')\n", (702, 722), False, 'from django.conf.urls import patterns, include, url\n'), ((772, 801), 'django.conf.urls.include', 'include', (['"""dolweb.compat.urls"""'], {}), "('dolweb.compat.urls')\n", (779, 801), False, 'from django.conf.urls import patterns, include, url\n'), ((853, 877), 'django.conf.urls.include', 'include', (['admin.site.urls'], {}), '(admin.site.urls)\n', (860, 877), False, 'from django.conf.urls import patterns, include, url\n')] |
import copy
import types
from smach_based_introspection_framework.online_part.framework_core.states import (
RollBackRecovery,
)
import introspection_execute
from smach_based_introspection_framework.online_part.robot_screen_visualization.setter import(
show_anomaly_detected,
show_everyhing_is_good,
)
def run(sm):
import smach
with sm:
raw_user_states = copy.deepcopy(sm._states)
# redirect all NeedRecovery to their respective anomay diagnosis
for user_state in raw_user_states:
obj = sm._states[user_state]
obj.execute = types.MethodType(introspection_execute.execute, obj)
obj._outcomes.add("Revert")
state_name = user_state
state_transitions = sm._transitions[state_name]
state_transitions["Revert"] = RollBackRecovery.__name__
# build Recovery states automatically
recovery_outcomes = ['RecoveryFailed']
recovery_state_transitions = {
'RecoveryFailed': 'TaskFailed'
}
for user_state in raw_user_states:
state_name = user_state
recovery_outcomes.append('Reenter_'+state_name)
recovery_state_transitions['Reenter_'+state_name] = state_name
smach.StateMachine.add(
RollBackRecovery.__name__,
RollBackRecovery(outcomes=recovery_outcomes),
transitions=recovery_state_transitions
)
return sm
| [
"smach_based_introspection_framework.online_part.framework_core.states.RollBackRecovery",
"copy.deepcopy",
"types.MethodType"
] | [((384, 409), 'copy.deepcopy', 'copy.deepcopy', (['sm._states'], {}), '(sm._states)\n', (397, 409), False, 'import copy\n'), ((594, 646), 'types.MethodType', 'types.MethodType', (['introspection_execute.execute', 'obj'], {}), '(introspection_execute.execute, obj)\n', (610, 646), False, 'import types\n'), ((1322, 1366), 'smach_based_introspection_framework.online_part.framework_core.states.RollBackRecovery', 'RollBackRecovery', ([], {'outcomes': 'recovery_outcomes'}), '(outcomes=recovery_outcomes)\n', (1338, 1366), False, 'from smach_based_introspection_framework.online_part.framework_core.states import RollBackRecovery\n')] |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# created: 2014-12-02
"""Demo and test program to verify WS server."""
import sys
from autobahn.twisted.websocket import WebSocketClientFactory
from autobahn.twisted.websocket import WebSocketClientProtocol
from twisted.internet import reactor
from twisted.python import log
class MyClientProtocol(WebSocketClientProtocol):
def onOpen(self):
print ('connected to server')
self.sendMessage(u"Hello, world!".encode('utf8'))
def onMessage(self, payload, isBinary):
if isBinary:
print("Binary message received: {0} bytes".format(len(payload)))
else:
print("Text message received: {0}".format(payload.decode('utf8')))
def onClose(self, was_clean, code, reason):
print("connection closed: ", was_clean, code, reason)
reactor.stop() # @UndefinedVariable
def main():
log.startLogging(sys.stdout)
factory = WebSocketClientFactory()
factory.protocol = MyClientProtocol
reactor.connectTCP("127.0.0.1", 10086, factory) # @UndefinedVariable
reactor.run() # @UndefinedVariable
# Test Codes
if __name__ == "__main__":
main()
print('Done')
| [
"twisted.python.log.startLogging",
"twisted.internet.reactor.connectTCP",
"twisted.internet.reactor.run",
"twisted.internet.reactor.stop",
"autobahn.twisted.websocket.WebSocketClientFactory"
] | [((908, 936), 'twisted.python.log.startLogging', 'log.startLogging', (['sys.stdout'], {}), '(sys.stdout)\n', (924, 936), False, 'from twisted.python import log\n'), ((956, 980), 'autobahn.twisted.websocket.WebSocketClientFactory', 'WebSocketClientFactory', ([], {}), '()\n', (978, 980), False, 'from autobahn.twisted.websocket import WebSocketClientFactory\n'), ((1030, 1077), 'twisted.internet.reactor.connectTCP', 'reactor.connectTCP', (['"""127.0.0.1"""', '(10086)', 'factory'], {}), "('127.0.0.1', 10086, factory)\n", (1048, 1077), False, 'from twisted.internet import reactor\n'), ((1104, 1117), 'twisted.internet.reactor.run', 'reactor.run', ([], {}), '()\n', (1115, 1117), False, 'from twisted.internet import reactor\n'), ((853, 867), 'twisted.internet.reactor.stop', 'reactor.stop', ([], {}), '()\n', (865, 867), False, 'from twisted.internet import reactor\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Jul 3 23:36:17 2017
Unit tests for utils.py
@author: duc
"""
import unittest
import utils as ut
from nltk.corpus import cmudict
def to_string(tokenized):
return " ".join(sum(tokenized, []))
class UtilTests(unittest.TestCase):
#------ test boolean functions ------------------------------------------------
def test_is_link(self):
s = "http://t.co/rlqo5xfbul"
self.assertFalse(ut.is_not_link(s))
def test_is_not_link(self):
s = "fake.website"
self.assertTrue(ut.is_not_link(s))
def test_is_contraction(self):
s = "couldn't"
self.assertFalse(ut.is_not_contraction(s))
def test_is_not_contraction(self):
s = "peoples'"
self.assertTrue(ut.is_not_contraction(s))
def test_is_compound(self):
s = "word-compound"
self.assertFalse(ut.is_not_compound(s))
def test_is_not_compound(self):
s = "wordcompound"
self.assertTrue(ut.is_not_compound(s))
def test_is_emoticon(self):
s = "xd"
self.assertFalse(ut.is_not_emoticon(s))
def test_is_not_emoticon(self):
s = "exd"
self.assertTrue(ut.is_not_emoticon(s))
#------------------------------------------------------------------------------
#------ test ut.remove_special_characters function ----------------------------
def test_no_punctuation(self):
s = "He said: 'Hey, my name is... Tim!' - Tim."
self.assertEqual(
to_string(ut.remove_special_characters(s)),
"he said hey my name is tim tim"
)
def test_no_emojis(self):
s = "💪🔥"
self.assertEqual(to_string(ut.remove_special_characters(s)), "")
def test_no_twitter_signs(self):
s = "#scandal @elonmusk #innovation @here"
self.assertEqual(
to_string(ut.remove_special_characters(s)),
"scandal elonmusk innovation here"
)
def test_numbers(self):
s = "1,2 1.2 1,000"
self.assertEqual(
to_string(ut.remove_special_characters(s)),
"12 12 1000"
)
def test_no_emoticons_without_letters_or_numbers(self):
s = (
"Here are some emoticons without letters or numbers in them"
" >:( :) :-)"
)
self.assertEqual(
to_string(ut.remove_special_characters(s)),
"here are some emoticons without letters or numbers in them"
)
#------------------------------------------------------------------------------
#------ test remove functions -------------------------------------------------
def test_no_emoticons_with_letters_or_numbers(self):
s = (
"here are some emoticons containing letters or numbers"
" :D :d :P :p :'D xd :o which the tokenizer may not know"
" :-3 :3 8) 8-) <3 </3"
)
self.assertEqual(
ut.remove_emoticons(s),
(
"here are some emoticons containing letters or numbers"
" which the tokenizer may not know"
)
)
def test_no_links(self):
s = (
"some links http://t.co/rlqo5xfbul www.google.com"
" bplaced.homepage.net/article/2221 g.com g.co"
)
self.assertEqual(ut.remove_links(s), "some links")
def test_no_stopwords(self):
s = [["i", "couldn","t", "wouldn", "t", "to", "do", "this"]]
self.assertEqual(ut.remove_stopwords(s), [[]])
def test_no_retweets(self):
s = [
"RT @realDonaldTrump: This is really sad! Fake news.",
"Some random tweet",
"RT @test: test"
]
self.assertEqual(ut.remove_retweets(s), ["Some random tweet"])
#------------------------------------------------------------------------------
#------ test split functions --------------------------------------------------
def test_split_compounds(self):
s = (
"e-mail enterprise-level level-14"
" three-level-building best-in-class"
)
self.assertEqual(
ut.split_compounds(s),
(
"e mail enterprise level level 14"
" three level building best in class"
)
)
def test_split_contractions(self):
s = r"I'm won't we'll can't he's that's there's"
self.assertEqual(
ut.split_contractions(s),
"i m won t we ll can t he s that s there s"
)
#------------------------------------------------------------------------------
#------ test count functions --------------------------------------------------
def test_count_word_syllables(self):
pronouncingDict = cmudict.dict()
strings = {
"123456789": 0,
"supercalifragilisticexpialidocious": 14,
"demagogue": 3,
"anathema": 4,
"payday": 2,
"Syrian": 3,
"crepuscular": 4,
"preservative": 4,
"significantly": 5,
"embezzlement": 4
}
for string in strings:
sylCount = ut.get_word_syllables(string, pronouncingDict)
self.assertEqual(sylCount, strings[string])
def test_count_word_syllables_offline(self):
# fails at supercalifragilistic..., asserts 13 instead of 14 syllables
# print accuracy of function instead
pronouncingDict = cmudict.dict()
strings = {
"123456789": 0,
"supercalifragilisticexpialidocious": 14,
"demagogue": 3,
"anathema": 4,
"payday": 2,
"Syrian": 3,
"crepuscular": 4,
"preservative": 4,
"significantly": 5,
"embezzlement": 4
}
accuracy = 0
for string in strings:
sylCount = ut.get_word_syllables_offline(string, pronouncingDict)
if sylCount == strings[string]:
accuracy += 10
print("\nsyllable counter offline accuracy: " + str(accuracy) + "%")
#------------------------------------------------------------------------------
def test_preprocessing(self):
s = (
"💪🔥 >:( xd <3 :'D http://t.co/rlqo5xfbul www.google.com e-mail"
" three-level-building I'm wouldn't @trump #bad"
" 1.2 Hi, my name is: Jon!? Next sentence."
)
self.assertEqual(
to_string(ut.preprocess(s)),
(
"e mail three level building i m wouldn t"
" trump bad 12 hi my name is jon next sentence"
)
)
if __name__ == '__main__':
unittest.main() | [
"unittest.main",
"utils.is_not_emoticon",
"utils.remove_retweets",
"utils.remove_emoticons",
"utils.is_not_contraction",
"utils.get_word_syllables",
"utils.split_compounds",
"utils.remove_links",
"utils.split_contractions",
"utils.remove_stopwords",
"utils.get_word_syllables_offline",
"utils.r... | [((6796, 6811), 'unittest.main', 'unittest.main', ([], {}), '()\n', (6809, 6811), False, 'import unittest\n'), ((4774, 4788), 'nltk.corpus.cmudict.dict', 'cmudict.dict', ([], {}), '()\n', (4786, 4788), False, 'from nltk.corpus import cmudict\n'), ((5526, 5540), 'nltk.corpus.cmudict.dict', 'cmudict.dict', ([], {}), '()\n', (5538, 5540), False, 'from nltk.corpus import cmudict\n'), ((474, 491), 'utils.is_not_link', 'ut.is_not_link', (['s'], {}), '(s)\n', (488, 491), True, 'import utils as ut\n'), ((577, 594), 'utils.is_not_link', 'ut.is_not_link', (['s'], {}), '(s)\n', (591, 594), True, 'import utils as ut\n'), ((680, 704), 'utils.is_not_contraction', 'ut.is_not_contraction', (['s'], {}), '(s)\n', (701, 704), True, 'import utils as ut\n'), ((793, 817), 'utils.is_not_contraction', 'ut.is_not_contraction', (['s'], {}), '(s)\n', (814, 817), True, 'import utils as ut\n'), ((905, 926), 'utils.is_not_compound', 'ut.is_not_compound', (['s'], {}), '(s)\n', (923, 926), True, 'import utils as ut\n'), ((1016, 1037), 'utils.is_not_compound', 'ut.is_not_compound', (['s'], {}), '(s)\n', (1034, 1037), True, 'import utils as ut\n'), ((1114, 1135), 'utils.is_not_emoticon', 'ut.is_not_emoticon', (['s'], {}), '(s)\n', (1132, 1135), True, 'import utils as ut\n'), ((1216, 1237), 'utils.is_not_emoticon', 'ut.is_not_emoticon', (['s'], {}), '(s)\n', (1234, 1237), True, 'import utils as ut\n'), ((2958, 2980), 'utils.remove_emoticons', 'ut.remove_emoticons', (['s'], {}), '(s)\n', (2977, 2980), True, 'import utils as ut\n'), ((3346, 3364), 'utils.remove_links', 'ut.remove_links', (['s'], {}), '(s)\n', (3361, 3364), True, 'import utils as ut\n'), ((3508, 3530), 'utils.remove_stopwords', 'ut.remove_stopwords', (['s'], {}), '(s)\n', (3527, 3530), True, 'import utils as ut\n'), ((3749, 3770), 'utils.remove_retweets', 'ut.remove_retweets', (['s'], {}), '(s)\n', (3767, 3770), True, 'import utils as ut\n'), ((4152, 4173), 'utils.split_compounds', 'ut.split_compounds', (['s'], {}), '(s)\n', (4170, 4173), True, 'import utils as ut\n'), ((4453, 4477), 'utils.split_contractions', 'ut.split_contractions', (['s'], {}), '(s)\n', (4474, 4477), True, 'import utils as ut\n'), ((5223, 5269), 'utils.get_word_syllables', 'ut.get_word_syllables', (['string', 'pronouncingDict'], {}), '(string, pronouncingDict)\n', (5244, 5269), True, 'import utils as ut\n'), ((5996, 6050), 'utils.get_word_syllables_offline', 'ut.get_word_syllables_offline', (['string', 'pronouncingDict'], {}), '(string, pronouncingDict)\n', (6025, 6050), True, 'import utils as ut\n'), ((1540, 1571), 'utils.remove_special_characters', 'ut.remove_special_characters', (['s'], {}), '(s)\n', (1568, 1571), True, 'import utils as ut\n'), ((1714, 1745), 'utils.remove_special_characters', 'ut.remove_special_characters', (['s'], {}), '(s)\n', (1742, 1745), True, 'import utils as ut\n'), ((1889, 1920), 'utils.remove_special_characters', 'ut.remove_special_characters', (['s'], {}), '(s)\n', (1917, 1920), True, 'import utils as ut\n'), ((2085, 2116), 'utils.remove_special_characters', 'ut.remove_special_characters', (['s'], {}), '(s)\n', (2113, 2116), True, 'import utils as ut\n'), ((2386, 2417), 'utils.remove_special_characters', 'ut.remove_special_characters', (['s'], {}), '(s)\n', (2414, 2417), True, 'import utils as ut\n'), ((6583, 6599), 'utils.preprocess', 'ut.preprocess', (['s'], {}), '(s)\n', (6596, 6599), True, 'import utils as ut\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
import numpy as np
import scipy.io
# Exercise 5 | Regularized Linear Regression and Bias-Variance
#
# Instructions
# ------------
#
# This file contains code that helps you get started on the
# exercise. You will need to complete the following functions:
#
# linearRegCostFunction.m
# learningCurve.m
# validationCurve.m
#
# For this exercise, you will not need to change any code in this file,
# or any other files other than those mentioned above.
#
class test_ex5_regularized_linear_regressionand_bias_vs_variance(unittest.TestCase):
@classmethod
def setUp(cls):
# Load Training Data
print('Loading and Visualizing Data ...')
data_file = "resource/ex5data1.mat"
# Load
# You will have X, y, Xval, yval, Xtest, ytest in your environment
mat = scipy.io.loadmat(data_file)
cls.X = mat["X"]
cls.y = mat["y"]
cls.Xval = mat["Xval"]
cls.yval = mat["yval"]
cls.Xtest = mat["Xtest"]
cls.ytest = mat["ytest"]
cls.m = np.shape(cls.X)[0]
# =========== Part 1: Loading and Visualizing Data =============
# We start the exercise by first loading and visualizing the dataset.
# The following code will load the dataset into your environment and plot
# the data.
#
def test_load_and_visualzing_data(self):
import matplotlib.pyplot as plt
# print("point_end_y: {max_y}".format(max_y = point_end_y))
plt.figure(1)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.scatter(self.X, self.y, marker='o', color='k', s=10)
plt.show()
# Plot training data
print('Program paused. Press enter to continue.')
# =========== Part 2: Regularized Linear Regression Cost =============
# You should now implement the cost function for regularized linear
# regression.
def test_regularized_linear_regression_cost_and_grad(self):
# m = Number of examples
theta = np.array([[1], [1]])
X_padded = np.column_stack((np.ones((self.m, 1)), self.X))
from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction
J, grad = linearRegCostFunction(X_padded, self.y, theta, 1)
self.assertAlmostEqual(J, 303.993, delta=0.001)
print('Cost at theta = [1 ; 1]: {cost} \n'
'(this value should be about 303.993192)'.format(cost=J))
# =========== Part 3: Regularized Linear Regression Gradient =============
# You should now implement the gradient for regularized linear
# regression.
self.assertAlmostEqual(grad[0], -15.303016, delta=0.0001)
self.assertAlmostEqual(grad[1], 598.250744, delta=0.0001)
print('Gradient at theta = [1 ; 1]: [{grad_0}; {grad_1}] \n'
'(this value should be about [-15.303016; 598.250744])\n'.format(grad_0=grad[0], grad_1=grad[1]))
# =========== Part 4: Train Linear Regression =============
# Once you have implemented the cost and gradient correctly, the
# trainLinearReg function will use your cost function to train
# regularized linear regression.
#
# Write Up Note: The data is non - linear, so this will not give a great
# fit.
#
def test_train_linear_reg(self):
from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg
# Train linear regression with lambda = 0
_lambda = 0
x_with_bias = np.column_stack((np.ones(self.m), self.X))
cost, theta = trainLinearReg(x_with_bias, self.y, _lambda)
ret = x_with_bias.dot(theta)
import matplotlib.pyplot as plt
plt.figure(1)
plt.xlabel('Change in water level (x)')
plt.ylabel('Water flowing out of the dam (y)')
plt.scatter(self.X, self.y, marker='x', c='r', s=30, linewidth=2)
plt.plot(self.X, ret, linewidth=2)
plt.show()
# =========== Part 5: Learning Curve for Linear Regression =============
# Next, you should implement the learningCurve function.
#
# Write Up Note: Since the model is underfitting the data, we expect to
# see a graph with "high bias" -- slide 8 in ML-advice.pdf
#
def test_learning_curve_for_linear_regression(self):
_lambda = 0
from ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve import learningCurve
x_with_bias = np.column_stack((np.ones(self.m), self.X))
x_val_with_bias = np.column_stack((np.ones(np.shape(self.Xval)[0]), self.Xval))
error_train, error_val = learningCurve(x_with_bias, self.y, x_val_with_bias, self.yval, 0)
print('# Training Examples\tTrain Error\tCross Validation Error')
for i in range(self.m):
print(' \t{index}\t\t{error_train}\t{error_val}\n'.format(index=i,
error_train=error_train[i],
error_val=error_val[i]))
import matplotlib.pyplot as plt
temp = np.array([x for x in range(1, self.m + 1)])
# plt.plot(1:m, error_train, 1:m, error_val);
plt.title('Learning curve for linear regression')
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.plot(temp, np.array(error_train), color='b', linewidth=2, label='Train')
plt.plot(temp, np.array(error_val), color='y', linewidth=2, label='Cross Validation')
plt.legend()
plt.show(block=True)
# =========== Part 6: Feature Mapping for Polynomial Regression =============
# One solution to this is to use polynomial regression.You should now
# complete polyFeatures to map each example into its powers
#
def test_feature_mapping_for_polynomial_regression(self):
p = 8
# Map X onto Polynomial Features and Normalize
from ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures import polyFeatures
X_poly = polyFeatures(self.X, p)
X_poly_m, X_poly_n = np.shape(X_poly)
self.assertEqual(X_poly_m, self.m)
self.assertEqual(X_poly_n, p)
from ex5_regularized_linear_regressionand_bias_vs_variance.featureNormalize import featureNormalize
X_poly, mu, sigma = featureNormalize(X_poly)
X_poly = np.column_stack((np.ones((self.m, 1)), X_poly))
X_poly_test = polyFeatures(self.Xtest, p)
X_poly_test_m, X_poly_test_n = np.shape(X_poly_test)
self.assertEqual(X_poly_test_m, np.shape(self.Xtest)[0])
self.assertEqual(X_poly_test_n, p)
X_poly_test = X_poly_test - mu
X_poly_test = X_poly_test / sigma
X_poly_test = np.column_stack((np.ones((X_poly_test.shape[0], 1)), X_poly_test))
X_poly_val = polyFeatures(self.Xval, p)
X_poly_val_m, X_poly_val_n = np.shape(X_poly_val)
self.assertEqual(X_poly_val_m, np.shape(self.Xval)[0])
self.assertEqual(X_poly_val_n, p)
X_poly_val = X_poly_val - mu
X_poly_val = X_poly_val / sigma
X_poly_val = np.column_stack((np.ones((X_poly_val.shape[0], 1)), X_poly_val))
print('Normalized Training Example 1:\n'
' {X_poly} '.format(X_poly=X_poly))
# =========== Part 7: Learning Curve for Polynomial Regression =============
# Now, you will get to experiment with polynomial regression with multiple
# values of lambda .The code below runs polynomial regression with
# lambda = 0. You should try running the code with different values of
# lambda to see how the fit and learning curve change.
#
_lambda = 0
from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg
cost, theta = trainLinearReg(X_poly, self.y, _lambda)
self.assertIsNotNone(cost)
self.assertIsNotNone(theta)
import matplotlib.pyplot as plt
plt.figure(1)
plt.scatter(self.X, self.y, marker='x', c='r', s=30, linewidth=2)
plt.xlim([-80, 80])
plt.ylim([-20, 60])
plt.xlabel('Change in water level(x)')
plt.ylabel('Water flowing out of the dam(y)')
plt.title('Polynomial Regression Fit (lambda = {:f})'.format(_lambda))
# plt.plot(self.X, self.y, 'rx', markersize=10, linewidth=1.5)
from ex5_regularized_linear_regressionand_bias_vs_variance.plotFit import plotFit
plotFit(min(self.X), max(self.X), mu, sigma, theta, p)
plt.show(block=False)
plt.figure(2)
from ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve import learningCurve
error_train, error_val = learningCurve(X_poly, self.y, X_poly_val, self.yval, 0)
p1, p2 = plt.plot(range(1, self.m + 1), error_train, range(1, self.m + 1), error_val)
plt.legend((p1, p2), ('Train', 'Cross Validation'))
plt.show(block=False)
print('Polynomial Regression (lambda =%{_lambda})'.format(_lambda=_lambda))
print('# Training Examples\tTrain Error\tCross Validation Error')
for i in range(0, self.m):
print('\t{i}\t\t{error_train}\t{error_val}'.format(i=i, error_train=error_train[i], error_val=error_val[i]))
# =========== Part 8: Validation for Selecting Lambda =============
# You will now implement validationCurve to test various values of
# lambda on a validation set. You will then use this to select the
# "best" lambda value.
#
from ex5_regularized_linear_regressionand_bias_vs_variance.validationCurve import validationCurve
lambda_vec, error_train, error_val = validationCurve(X_poly, self.y, X_poly_val, self.yval)
self.assertEqual(len(error_train), len(lambda_vec))
self.assertEqual(len(error_val), len(lambda_vec))
plt.close('all')
p1, p2, = plt.plot(lambda_vec, error_train, lambda_vec, error_val)
plt.legend((p1, p2), ('Train', 'Cross Validation'))
plt.xlabel('lambda')
plt.ylabel('Error')
plt.show(block=False)
print('lambda\t\tTrain Error\tValidation Error')
for i in range(len(lambda_vec)):
print(
'{lambda_vec}\t{error_train}\t{error_val}'.format(lambda_vec=lambda_vec[i], error_train=error_train[i],
error_val=error_val[i]))
# =========== Part 9: Computing test set error and Plotting learning curves with randomly selected examples
# ============= best lambda value from previous step
lambda_val = 3
# note that we're using X_poly - polynomial linear regression with polynomial features
from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg
_, theta = trainLinearReg(X_poly, self.y, lambda_val)
# because we're using X_poly, we also have to use X_poly_test with polynomial features
from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction
error_test, _ = linearRegCostFunction(X_poly_test, self.ytest, theta, 0)
print('Test set error: {error_test}'.format(error_test=error_test)) # expected 3.859
# why? something wrong
# self.assertAlmostEqual(error_test, 3.859, delta=0.01)
# =========== Part 10: Plot learning curves with randomly selected examples =============
#
# lambda_val value for this step
lambda_val = 0.01
times = 50
error_train_rand = np.zeros((self.m, times))
error_val_rand = np.zeros((self.m, times))
for i in range(self.m):
for k in range(times):
rand_sample_train = np.random.permutation(X_poly.shape[0])
rand_sample_train = rand_sample_train[:i + 1]
rand_sample_val = np.random.permutation(X_poly_val.shape[0])
rand_sample_val = rand_sample_val[:i + 1]
X_poly_train_rand = X_poly[rand_sample_train, :]
y_train_rand = self.y[rand_sample_train]
X_poly_val_rand = X_poly_val[rand_sample_val, :]
y_val_rand = self.yval[rand_sample_val]
_, theta = trainLinearReg(X_poly_train_rand, y_train_rand, lambda_val)
cost, _ = linearRegCostFunction(X_poly_train_rand, y_train_rand, np.asarray(theta), 0)
error_train_rand[i, k] = cost
cost, _ = linearRegCostFunction(X_poly_val_rand, y_val_rand, theta, 0)
error_val_rand[i, k] = cost
error_train = np.mean(error_train_rand, axis=1)
error_val = np.mean(error_val_rand, axis=1)
p1, p2 = plt.plot(range(self.m), error_train, range(self.m), error_val)
plt.title('Polynomial Regression Learning Curve (lambda = {:f})'.format(lambda_val))
plt.legend((p1, p2), ('Train', 'Cross Validation'))
plt.xlabel('Number of training examples')
plt.ylabel('Error')
plt.axis([0, 13, 0, 150])
plt.show(block=False)
if __name__ == '__main__':
unittest.main()
| [
"matplotlib.pyplot.title",
"ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg.trainLinearReg",
"numpy.ones",
"ex5_regularized_linear_regressionand_bias_vs_variance.featureNormalize.featureNormalize",
"numpy.shape",
"matplotlib.pyplot.figure",
"numpy.mean",
"unittest.main",
"matplo... | [((13315, 13330), 'unittest.main', 'unittest.main', ([], {}), '()\n', (13328, 13330), False, 'import unittest\n'), ((1535, 1548), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (1545, 1548), True, 'import matplotlib.pyplot as plt\n'), ((1557, 1596), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Change in water level (x)"""'], {}), "('Change in water level (x)')\n", (1567, 1596), True, 'import matplotlib.pyplot as plt\n'), ((1605, 1651), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Water flowing out of the dam (y)"""'], {}), "('Water flowing out of the dam (y)')\n", (1615, 1651), True, 'import matplotlib.pyplot as plt\n'), ((1660, 1716), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.X', 'self.y'], {'marker': '"""o"""', 'color': '"""k"""', 's': '(10)'}), "(self.X, self.y, marker='o', color='k', s=10)\n", (1671, 1716), True, 'import matplotlib.pyplot as plt\n'), ((1725, 1735), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (1733, 1735), True, 'import matplotlib.pyplot as plt\n'), ((2103, 2123), 'numpy.array', 'np.array', (['[[1], [1]]'], {}), '([[1], [1]])\n', (2111, 2123), True, 'import numpy as np\n'), ((2327, 2376), 'ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction.linearRegCostFunction', 'linearRegCostFunction', (['X_padded', 'self.y', 'theta', '(1)'], {}), '(X_padded, self.y, theta, 1)\n', (2348, 2376), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction\n'), ((3683, 3727), 'ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg.trainLinearReg', 'trainLinearReg', (['x_with_bias', 'self.y', '_lambda'], {}), '(x_with_bias, self.y, _lambda)\n', (3697, 3727), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg\n'), ((3814, 3827), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (3824, 3827), True, 'import matplotlib.pyplot as plt\n'), ((3836, 3875), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Change in water level (x)"""'], {}), "('Change in water level (x)')\n", (3846, 3875), True, 'import matplotlib.pyplot as plt\n'), ((3884, 3930), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Water flowing out of the dam (y)"""'], {}), "('Water flowing out of the dam (y)')\n", (3894, 3930), True, 'import matplotlib.pyplot as plt\n'), ((3939, 4004), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.X', 'self.y'], {'marker': '"""x"""', 'c': '"""r"""', 's': '(30)', 'linewidth': '(2)'}), "(self.X, self.y, marker='x', c='r', s=30, linewidth=2)\n", (3950, 4004), True, 'import matplotlib.pyplot as plt\n'), ((4013, 4047), 'matplotlib.pyplot.plot', 'plt.plot', (['self.X', 'ret'], {'linewidth': '(2)'}), '(self.X, ret, linewidth=2)\n', (4021, 4047), True, 'import matplotlib.pyplot as plt\n'), ((4056, 4066), 'matplotlib.pyplot.show', 'plt.show', ([], {}), '()\n', (4064, 4066), True, 'import matplotlib.pyplot as plt\n'), ((4741, 4806), 'ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve.learningCurve', 'learningCurve', (['x_with_bias', 'self.y', 'x_val_with_bias', 'self.yval', '(0)'], {}), '(x_with_bias, self.y, x_val_with_bias, self.yval, 0)\n', (4754, 4806), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve import learningCurve\n'), ((5352, 5401), 'matplotlib.pyplot.title', 'plt.title', (['"""Learning curve for linear regression"""'], {}), "('Learning curve for linear regression')\n", (5361, 5401), True, 'import matplotlib.pyplot as plt\n'), ((5410, 5451), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of training examples"""'], {}), "('Number of training examples')\n", (5420, 5451), True, 'import matplotlib.pyplot as plt\n'), ((5460, 5479), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (5470, 5479), True, 'import matplotlib.pyplot as plt\n'), ((5667, 5679), 'matplotlib.pyplot.legend', 'plt.legend', ([], {}), '()\n', (5677, 5679), True, 'import matplotlib.pyplot as plt\n'), ((5688, 5708), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(True)'}), '(block=True)\n', (5696, 5708), True, 'import matplotlib.pyplot as plt\n'), ((6184, 6207), 'ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures.polyFeatures', 'polyFeatures', (['self.X', 'p'], {}), '(self.X, p)\n', (6196, 6207), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures import polyFeatures\n'), ((6237, 6253), 'numpy.shape', 'np.shape', (['X_poly'], {}), '(X_poly)\n', (6245, 6253), True, 'import numpy as np\n'), ((6472, 6496), 'ex5_regularized_linear_regressionand_bias_vs_variance.featureNormalize.featureNormalize', 'featureNormalize', (['X_poly'], {}), '(X_poly)\n', (6488, 6496), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.featureNormalize import featureNormalize\n'), ((6585, 6612), 'ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures.polyFeatures', 'polyFeatures', (['self.Xtest', 'p'], {}), '(self.Xtest, p)\n', (6597, 6612), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures import polyFeatures\n'), ((6652, 6673), 'numpy.shape', 'np.shape', (['X_poly_test'], {}), '(X_poly_test)\n', (6660, 6673), True, 'import numpy as np\n'), ((6974, 7000), 'ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures.polyFeatures', 'polyFeatures', (['self.Xval', 'p'], {}), '(self.Xval, p)\n', (6986, 7000), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.polyFeatures import polyFeatures\n'), ((7038, 7058), 'numpy.shape', 'np.shape', (['X_poly_val'], {}), '(X_poly_val)\n', (7046, 7058), True, 'import numpy as np\n'), ((7971, 8010), 'ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg.trainLinearReg', 'trainLinearReg', (['X_poly', 'self.y', '_lambda'], {}), '(X_poly, self.y, _lambda)\n', (7985, 8010), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg\n'), ((8131, 8144), 'matplotlib.pyplot.figure', 'plt.figure', (['(1)'], {}), '(1)\n', (8141, 8144), True, 'import matplotlib.pyplot as plt\n'), ((8153, 8218), 'matplotlib.pyplot.scatter', 'plt.scatter', (['self.X', 'self.y'], {'marker': '"""x"""', 'c': '"""r"""', 's': '(30)', 'linewidth': '(2)'}), "(self.X, self.y, marker='x', c='r', s=30, linewidth=2)\n", (8164, 8218), True, 'import matplotlib.pyplot as plt\n'), ((8227, 8246), 'matplotlib.pyplot.xlim', 'plt.xlim', (['[-80, 80]'], {}), '([-80, 80])\n', (8235, 8246), True, 'import matplotlib.pyplot as plt\n'), ((8255, 8274), 'matplotlib.pyplot.ylim', 'plt.ylim', (['[-20, 60]'], {}), '([-20, 60])\n', (8263, 8274), True, 'import matplotlib.pyplot as plt\n'), ((8283, 8321), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Change in water level(x)"""'], {}), "('Change in water level(x)')\n", (8293, 8321), True, 'import matplotlib.pyplot as plt\n'), ((8330, 8375), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Water flowing out of the dam(y)"""'], {}), "('Water flowing out of the dam(y)')\n", (8340, 8375), True, 'import matplotlib.pyplot as plt\n'), ((8688, 8709), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (8696, 8709), True, 'import matplotlib.pyplot as plt\n'), ((8719, 8732), 'matplotlib.pyplot.figure', 'plt.figure', (['(2)'], {}), '(2)\n', (8729, 8732), True, 'import matplotlib.pyplot as plt\n'), ((8868, 8923), 'ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve.learningCurve', 'learningCurve', (['X_poly', 'self.y', 'X_poly_val', 'self.yval', '(0)'], {}), '(X_poly, self.y, X_poly_val, self.yval, 0)\n', (8881, 8923), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.learningCurve import learningCurve\n'), ((9026, 9077), 'matplotlib.pyplot.legend', 'plt.legend', (['(p1, p2)', "('Train', 'Cross Validation')"], {}), "((p1, p2), ('Train', 'Cross Validation'))\n", (9036, 9077), True, 'import matplotlib.pyplot as plt\n'), ((9086, 9107), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (9094, 9107), True, 'import matplotlib.pyplot as plt\n'), ((9846, 9900), 'ex5_regularized_linear_regressionand_bias_vs_variance.validationCurve.validationCurve', 'validationCurve', (['X_poly', 'self.y', 'X_poly_val', 'self.yval'], {}), '(X_poly, self.y, X_poly_val, self.yval)\n', (9861, 9900), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.validationCurve import validationCurve\n'), ((10028, 10044), 'matplotlib.pyplot.close', 'plt.close', (['"""all"""'], {}), "('all')\n", (10037, 10044), True, 'import matplotlib.pyplot as plt\n'), ((10063, 10119), 'matplotlib.pyplot.plot', 'plt.plot', (['lambda_vec', 'error_train', 'lambda_vec', 'error_val'], {}), '(lambda_vec, error_train, lambda_vec, error_val)\n', (10071, 10119), True, 'import matplotlib.pyplot as plt\n'), ((10128, 10179), 'matplotlib.pyplot.legend', 'plt.legend', (['(p1, p2)', "('Train', 'Cross Validation')"], {}), "((p1, p2), ('Train', 'Cross Validation'))\n", (10138, 10179), True, 'import matplotlib.pyplot as plt\n'), ((10188, 10208), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""lambda"""'], {}), "('lambda')\n", (10198, 10208), True, 'import matplotlib.pyplot as plt\n'), ((10217, 10236), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (10227, 10236), True, 'import matplotlib.pyplot as plt\n'), ((10245, 10266), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (10253, 10266), True, 'import matplotlib.pyplot as plt\n'), ((11015, 11057), 'ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg.trainLinearReg', 'trainLinearReg', (['X_poly', 'self.y', 'lambda_val'], {}), '(X_poly, self.y, lambda_val)\n', (11029, 11057), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg\n'), ((11296, 11352), 'ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction.linearRegCostFunction', 'linearRegCostFunction', (['X_poly_test', 'self.ytest', 'theta', '(0)'], {}), '(X_poly_test, self.ytest, theta, 0)\n', (11317, 11352), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction\n'), ((11767, 11792), 'numpy.zeros', 'np.zeros', (['(self.m, times)'], {}), '((self.m, times))\n', (11775, 11792), True, 'import numpy as np\n'), ((11818, 11843), 'numpy.zeros', 'np.zeros', (['(self.m, times)'], {}), '((self.m, times))\n', (11826, 11843), True, 'import numpy as np\n'), ((12820, 12853), 'numpy.mean', 'np.mean', (['error_train_rand'], {'axis': '(1)'}), '(error_train_rand, axis=1)\n', (12827, 12853), True, 'import numpy as np\n'), ((12874, 12905), 'numpy.mean', 'np.mean', (['error_val_rand'], {'axis': '(1)'}), '(error_val_rand, axis=1)\n', (12881, 12905), True, 'import numpy as np\n'), ((13088, 13139), 'matplotlib.pyplot.legend', 'plt.legend', (['(p1, p2)', "('Train', 'Cross Validation')"], {}), "((p1, p2), ('Train', 'Cross Validation'))\n", (13098, 13139), True, 'import matplotlib.pyplot as plt\n'), ((13148, 13189), 'matplotlib.pyplot.xlabel', 'plt.xlabel', (['"""Number of training examples"""'], {}), "('Number of training examples')\n", (13158, 13189), True, 'import matplotlib.pyplot as plt\n'), ((13198, 13217), 'matplotlib.pyplot.ylabel', 'plt.ylabel', (['"""Error"""'], {}), "('Error')\n", (13208, 13217), True, 'import matplotlib.pyplot as plt\n'), ((13226, 13251), 'matplotlib.pyplot.axis', 'plt.axis', (['[0, 13, 0, 150]'], {}), '([0, 13, 0, 150])\n', (13234, 13251), True, 'import matplotlib.pyplot as plt\n'), ((13260, 13281), 'matplotlib.pyplot.show', 'plt.show', ([], {'block': '(False)'}), '(block=False)\n', (13268, 13281), True, 'import matplotlib.pyplot as plt\n'), ((1111, 1126), 'numpy.shape', 'np.shape', (['cls.X'], {}), '(cls.X)\n', (1119, 1126), True, 'import numpy as np\n'), ((5503, 5524), 'numpy.array', 'np.array', (['error_train'], {}), '(error_train)\n', (5511, 5524), True, 'import numpy as np\n'), ((5588, 5607), 'numpy.array', 'np.array', (['error_val'], {}), '(error_val)\n', (5596, 5607), True, 'import numpy as np\n'), ((2160, 2180), 'numpy.ones', 'np.ones', (['(self.m, 1)'], {}), '((self.m, 1))\n', (2167, 2180), True, 'import numpy as np\n'), ((3635, 3650), 'numpy.ones', 'np.ones', (['self.m'], {}), '(self.m)\n', (3642, 3650), True, 'import numpy as np\n'), ((4594, 4609), 'numpy.ones', 'np.ones', (['self.m'], {}), '(self.m)\n', (4601, 4609), True, 'import numpy as np\n'), ((6531, 6551), 'numpy.ones', 'np.ones', (['(self.m, 1)'], {}), '((self.m, 1))\n', (6538, 6551), True, 'import numpy as np\n'), ((6714, 6734), 'numpy.shape', 'np.shape', (['self.Xtest'], {}), '(self.Xtest)\n', (6722, 6734), True, 'import numpy as np\n'), ((6902, 6936), 'numpy.ones', 'np.ones', (['(X_poly_test.shape[0], 1)'], {}), '((X_poly_test.shape[0], 1))\n', (6909, 6936), True, 'import numpy as np\n'), ((7098, 7117), 'numpy.shape', 'np.shape', (['self.Xval'], {}), '(self.Xval)\n', (7106, 7117), True, 'import numpy as np\n'), ((7279, 7312), 'numpy.ones', 'np.ones', (['(X_poly_val.shape[0], 1)'], {}), '((X_poly_val.shape[0], 1))\n', (7286, 7312), True, 'import numpy as np\n'), ((11948, 11986), 'numpy.random.permutation', 'np.random.permutation', (['X_poly.shape[0]'], {}), '(X_poly.shape[0])\n', (11969, 11986), True, 'import numpy as np\n'), ((12084, 12126), 'numpy.random.permutation', 'np.random.permutation', (['X_poly_val.shape[0]'], {}), '(X_poly_val.shape[0])\n', (12105, 12126), True, 'import numpy as np\n'), ((12457, 12516), 'ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg.trainLinearReg', 'trainLinearReg', (['X_poly_train_rand', 'y_train_rand', 'lambda_val'], {}), '(X_poly_train_rand, y_train_rand, lambda_val)\n', (12471, 12516), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.trainLinearReg import trainLinearReg\n'), ((12692, 12752), 'ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction.linearRegCostFunction', 'linearRegCostFunction', (['X_poly_val_rand', 'y_val_rand', 'theta', '(0)'], {}), '(X_poly_val_rand, y_val_rand, theta, 0)\n', (12713, 12752), False, 'from ex5_regularized_linear_regressionand_bias_vs_variance.linearRegCostFunction import linearRegCostFunction\n'), ((12598, 12615), 'numpy.asarray', 'np.asarray', (['theta'], {}), '(theta)\n', (12608, 12615), True, 'import numpy as np\n'), ((4671, 4690), 'numpy.shape', 'np.shape', (['self.Xval'], {}), '(self.Xval)\n', (4679, 4690), True, 'import numpy as np\n')] |
from selectors import EpollSelector
from scipy import ma
import torch
from torch.cuda.amp import autocast
from advbench import attacks
from einops import repeat, rearrange
import torch.nn.functional as F
from tqdm import tqdm
from advbench.datasets import FFCV_AVAILABLE
class PerturbationEval():
def __init__(self, algorithm, loader, max_perturbations=None, batched=True):
self.algorithm = algorithm
self.classifier = self.algorithm.classifier
self.hparams = self.algorithm.hparams
self.device = self.algorithm.device
self.loader = loader
self.max_perturbations = max_perturbations
self.perturbation = self.algorithm.attack.perturbation
self.dim = self.perturbation.dim
self.batched = batched
def eval_perturbed(self, single_img=False, batches=1):
self.grid = self.get_grid()
self.grid_size = self.grid.shape[0]
self.algorithm.classifier.eval()
self.algorithm.export()
adv_losses = []
adv_accs = []
with torch.no_grad():
if single_img:
imgs, labels = self.loader.dataset[0]
imgs, labels = imgs.unsqueeze(0).to(self.device), torch.tensor([labels]).to(self.device)
if FFCV_AVAILABLE:
with autocast():
adv_losses, adv_accs = self.step(imgs, labels)
else:
adv_losses, adv_accs = self.step(imgs, labels)
else:
for idx, batch in tqdm(enumerate(self.loader)):
if idx < batches:
imgs, labels = batch
imgs, labels = imgs.to(self.device), labels.to(self.device)
if FFCV_AVAILABLE:
with autocast():
adv_loss, adv_acc = self.step(imgs, labels)
else:
adv_loss, adv_acc = self.step(imgs, labels)
adv_losses.append(adv_loss)
adv_accs.append(adv_acc)
else:
break
self.algorithm.unexport()
self.algorithm.classifier.train()
self.loader.shuffle = True
if batches>1 or not single_img:
adv_losses = torch.concat(adv_losses, dim=0).mean(dim=0)
adv_accs = torch.concat(adv_accs, dim=0).mean(dim=0)
return self.grid, adv_losses, adv_accs
def step(self, imgs, labels):
batch_size = imgs.shape[0]
if self.batched:
adv_imgs = self.perturbation.perturb_img(
repeat(imgs, 'B W H C -> (B S) W H C', B=batch_size, S=self.grid_size),
repeat(self.grid, 'S D -> (B S) D', B=batch_size, D=self.dim, S=self.grid_size))
pred = self.classifier(adv_imgs)
adv_loss = F.cross_entropy(pred, repeat(labels, 'B -> (B S)', S=self.grid_size), reduction="none")
adv_acc = torch.eq(pred, repeat(labels, 'B -> (B S)', S=self.grid_size))
adv_loss = rearrange(adv_loss, '(B S) -> B S', B=batch_size, S=self.grid_size)
adv_acc = rearrange(adv_acc, '(B S) -> B S', B=batch_size, S=self.grid_size)
else:
adv_loss = torch.empty((batch_size, self.grid_size), device=imgs.device)
adv_acc = torch.empty((batch_size, self.grid_size), device=imgs.device)
for s in range(self.grid_size):
grid = repeat(self.grid[s], 'D -> B D', B=batch_size, D=self.dim)
adv_imgs = self.perturbation.perturb_img(imgs, grid)
pred = self.classifier(adv_imgs)
angle_loss = F.cross_entropy(pred, labels, reduction="none")
adv_loss[:, s] = angle_loss
adv_acc[:, s] = torch.eq(pred.argmax(dim=1), labels)
return adv_loss, adv_acc
def get_grid(self):
pass
class GridEval(PerturbationEval):
def __init__(self,algorithm, loader, max_perturbations=None):
super(GridEval, self).__init__(algorithm, loader, max_perturbations=max_perturbations)
self.attack = attacks.Grid_Search(algorithm.classifier, algorithm.hparams, algorithm.device, perturbation=algorithm.perturbation_name, grid_size=max_perturbations)
def get_grid(self):
return self.attack.grid
class AngleGrid(PerturbationEval):
def __init__(self,algorithm, loader, tx=0, ty=0, max_perturbations=None, batched=False):
super(AngleGrid, self).__init__(algorithm, loader, max_perturbations=max_perturbations, batched=False)
self.attack = attacks.Grid_Search(algorithm.classifier, algorithm.hparams, algorithm.device, perturbation="Rotation", grid_size=max_perturbations)
self.tx = tx
self.ty = ty
def get_grid(self):
angle_grid = self.attack.grid
ones = torch.ones_like(angle_grid)
grid = torch.column_stack([angle_grid, self.tx*ones, self.ty*ones])
return grid | [
"torch.ones_like",
"torch.cuda.amp.autocast",
"torch.column_stack",
"torch.empty",
"torch.nn.functional.cross_entropy",
"einops.rearrange",
"torch.concat",
"advbench.attacks.Grid_Search",
"torch.tensor",
"torch.no_grad",
"einops.repeat"
] | [((4211, 4370), 'advbench.attacks.Grid_Search', 'attacks.Grid_Search', (['algorithm.classifier', 'algorithm.hparams', 'algorithm.device'], {'perturbation': 'algorithm.perturbation_name', 'grid_size': 'max_perturbations'}), '(algorithm.classifier, algorithm.hparams, algorithm.\n device, perturbation=algorithm.perturbation_name, grid_size=\n max_perturbations)\n', (4230, 4370), False, 'from advbench import attacks\n'), ((4679, 4816), 'advbench.attacks.Grid_Search', 'attacks.Grid_Search', (['algorithm.classifier', 'algorithm.hparams', 'algorithm.device'], {'perturbation': '"""Rotation"""', 'grid_size': 'max_perturbations'}), "(algorithm.classifier, algorithm.hparams, algorithm.\n device, perturbation='Rotation', grid_size=max_perturbations)\n", (4698, 4816), False, 'from advbench import attacks\n'), ((4931, 4958), 'torch.ones_like', 'torch.ones_like', (['angle_grid'], {}), '(angle_grid)\n', (4946, 4958), False, 'import torch\n'), ((4974, 5038), 'torch.column_stack', 'torch.column_stack', (['[angle_grid, self.tx * ones, self.ty * ones]'], {}), '([angle_grid, self.tx * ones, self.ty * ones])\n', (4992, 5038), False, 'import torch\n'), ((1052, 1067), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (1065, 1067), False, 'import torch\n'), ((3147, 3214), 'einops.rearrange', 'rearrange', (['adv_loss', '"""(B S) -> B S"""'], {'B': 'batch_size', 'S': 'self.grid_size'}), "(adv_loss, '(B S) -> B S', B=batch_size, S=self.grid_size)\n", (3156, 3214), False, 'from einops import repeat, rearrange\n'), ((3237, 3303), 'einops.rearrange', 'rearrange', (['adv_acc', '"""(B S) -> B S"""'], {'B': 'batch_size', 'S': 'self.grid_size'}), "(adv_acc, '(B S) -> B S', B=batch_size, S=self.grid_size)\n", (3246, 3303), False, 'from einops import repeat, rearrange\n'), ((3341, 3402), 'torch.empty', 'torch.empty', (['(batch_size, self.grid_size)'], {'device': 'imgs.device'}), '((batch_size, self.grid_size), device=imgs.device)\n', (3352, 3402), False, 'import torch\n'), ((3425, 3486), 'torch.empty', 'torch.empty', (['(batch_size, self.grid_size)'], {'device': 'imgs.device'}), '((batch_size, self.grid_size), device=imgs.device)\n', (3436, 3486), False, 'import torch\n'), ((2714, 2784), 'einops.repeat', 'repeat', (['imgs', '"""B W H C -> (B S) W H C"""'], {'B': 'batch_size', 'S': 'self.grid_size'}), "(imgs, 'B W H C -> (B S) W H C', B=batch_size, S=self.grid_size)\n", (2720, 2784), False, 'from einops import repeat, rearrange\n'), ((2802, 2881), 'einops.repeat', 'repeat', (['self.grid', '"""S D -> (B S) D"""'], {'B': 'batch_size', 'D': 'self.dim', 'S': 'self.grid_size'}), "(self.grid, 'S D -> (B S) D', B=batch_size, D=self.dim, S=self.grid_size)\n", (2808, 2881), False, 'from einops import repeat, rearrange\n'), ((2973, 3019), 'einops.repeat', 'repeat', (['labels', '"""B -> (B S)"""'], {'S': 'self.grid_size'}), "(labels, 'B -> (B S)', S=self.grid_size)\n", (2979, 3019), False, 'from einops import repeat, rearrange\n'), ((3076, 3122), 'einops.repeat', 'repeat', (['labels', '"""B -> (B S)"""'], {'S': 'self.grid_size'}), "(labels, 'B -> (B S)', S=self.grid_size)\n", (3082, 3122), False, 'from einops import repeat, rearrange\n'), ((3554, 3612), 'einops.repeat', 'repeat', (['self.grid[s]', '"""D -> B D"""'], {'B': 'batch_size', 'D': 'self.dim'}), "(self.grid[s], 'D -> B D', B=batch_size, D=self.dim)\n", (3560, 3612), False, 'from einops import repeat, rearrange\n'), ((3760, 3807), 'torch.nn.functional.cross_entropy', 'F.cross_entropy', (['pred', 'labels'], {'reduction': '"""none"""'}), "(pred, labels, reduction='none')\n", (3775, 3807), True, 'import torch.nn.functional as F\n'), ((2389, 2420), 'torch.concat', 'torch.concat', (['adv_losses'], {'dim': '(0)'}), '(adv_losses, dim=0)\n', (2401, 2420), False, 'import torch\n'), ((2456, 2485), 'torch.concat', 'torch.concat', (['adv_accs'], {'dim': '(0)'}), '(adv_accs, dim=0)\n', (2468, 2485), False, 'import torch\n'), ((1331, 1341), 'torch.cuda.amp.autocast', 'autocast', ([], {}), '()\n', (1339, 1341), False, 'from torch.cuda.amp import autocast\n'), ((1224, 1246), 'torch.tensor', 'torch.tensor', (['[labels]'], {}), '([labels])\n', (1236, 1246), False, 'import torch\n'), ((1840, 1850), 'torch.cuda.amp.autocast', 'autocast', ([], {}), '()\n', (1848, 1850), False, 'from torch.cuda.amp import autocast\n')] |
# remove some of the asim-style columns added by marin_work_tour_mode_choice.py
# so data input files look 'realistic' - and that work is done instaed by 'import_tours' annotation expression files
import os
import pandas as pd
import openmatrix as omx
input_dir = './data_3_marin'
output_dir = './data_3_marin/fix' # don't overwrite - but these files shold replace 'oritinals'
def input_path(filenane):
return os.path.join(input_dir, filenane)
def output_path(filenane):
return os.path.join(output_dir, filenane)
# 0 - get county zones
mazs = pd.read_csv(input_path("maz_data_asim.csv"))
del mazs['zone_id']
del mazs['county_id']
mazs.to_csv(output_path("maz_data_asim.csv"), index=False)
tazs = mazs["TAZ"].unique()
tazs.sort()
assert ((tazs - 1) == range(len(tazs))).all()
# MAZ,TAZ
taps = pd.read_csv(input_path("maz_taz.csv"))
# nothing
taps.to_csv(output_path("maz_taz.csv"), index=False)
taps = pd.read_csv(input_path("tap_data.csv"))
# nothing
taps.to_csv(output_path("tap_data.csv"), index=False)
# 2 - nearby skims need headers
maz_tap_walk = pd.read_csv(input_path("maz_tap_walk.csv"))
maz_maz_walk = pd.read_csv(input_path("maz_maz_walk.csv"))
maz_maz_bike = pd.read_csv(input_path("maz_maz_bike.csv"))
del maz_tap_walk['TAP.1']
del maz_maz_walk['DMAZ.1']
del maz_maz_bike['DMAZ.1']
maz_tap_walk.to_csv(output_path("maz_tap_walk.csv"), index=False)
maz_maz_walk.to_csv(output_path("maz_maz_walk.csv"), index=False)
maz_maz_bike.to_csv(output_path("maz_maz_bike.csv"), index=False)
# 3 - accessibility data
access = pd.read_csv(input_path("access.csv"))
del access['zone_id']
access.to_csv(output_path("access.csv"), index=False)
# 4 - maz to tap drive data
taz_tap_drive = pd.read_csv(input_path("maz_taz_tap_drive.csv"))
taz_tap_drive.to_csv(output_path("maz_taz_tap_drive.csv"), index=False)
# 5 - households
households = pd.read_csv(input_path("households_asim.csv"))
del households['home_zone_id']
del households['household_id']
households.to_csv(output_path("households_asim.csv"), index=False)
# 6 - persons
persons = pd.read_csv(input_path("persons_asim.csv"))
del persons['person_id']
del persons['household_id']
del persons['is_university']
persons.to_csv(output_path("persons_asim.csv"), index=False)
# 7 - tours file
work_tours = pd.read_csv(input_path("work_tours.csv"))
del work_tours["household_id"]
del work_tours["destination"]
del work_tours["start"]
del work_tours["end"]
del work_tours["tour_type"]
work_tours.to_csv(output_path("work_tours.csv"), index=False)
| [
"os.path.join"
] | [((419, 452), 'os.path.join', 'os.path.join', (['input_dir', 'filenane'], {}), '(input_dir, filenane)\n', (431, 452), False, 'import os\n'), ((493, 527), 'os.path.join', 'os.path.join', (['output_dir', 'filenane'], {}), '(output_dir, filenane)\n', (505, 527), False, 'import os\n')] |
import RPi.GPIO as GPIO
import time
import random
GPIO.setmode(GPIO.BOARD)
BTN_PIN = 11
BTN_PIN1 = 10
LED_PIN = 12
WAIT_TIME = 200
status = GPIO.LOW
GPIO.setup(BTN_PIN, GPIO.IN, pull_up_down = GPIO.PUD_UP)
# GPIO.setup(LED_PIN, GPIO.OUT, initial = status)
R_pin = 33 # R:33 號腳位(第 33 根 pin)
G_pin = 35 # G:35 號腳位(第 35 根 pin)
B_pin = 37 # B:37 號腳位(第 37 根 pin)
GPIO.setup(R_pin, GPIO.OUT)
GPIO.setup(G_pin, GPIO.OUT)
GPIO.setup(B_pin, GPIO.OUT)
R_pwm = GPIO.PWM(R_pin, 100)
G_pwm = GPIO.PWM(G_pin, 100)
B_pwm = GPIO.PWM(B_pin, 100)
# use python RPi.GPIO, square wave is 70k Hz
# use python wiringpi2 or bindings, square wave is 28k Hz
# use C wiringPi, square wave is 4.1-4.6M Hz
def check_RGB_range(R,G,B):
if R < 0 : R = 0
if G < 0 : G = 0
if B < 0 : B = 0
if R > 255 : R = 255
if G > 255 : G = 255
if B > 255 : B = 255
return R,G,B
R,G,B = 255,255,255
state = 0
interval = 32
R_pwm.start(0)
G_pwm.start(0)
B_pwm.start(0)
def mycallback(channel):
print("Button pressed @", time.ctime())
global R,G,B,state
while(1):
if (state == 0):
R = R
G = G - interval
B = B - interval
if (state == 1):
R = R - interval
G = G + interval
B = B
if (state == 2):
R = R
G = G - interval
B = B + interval
if (state == 3):
R = R + interval
G = G
B = B
if (state == 4):
R = R - interval
G = G + interval
B = B
if (state == 5):
R = R + interval
G = G
B = B - interval
if (state == 6):
R = R
G = G - interval
B = B
R,G,B = check_RGB_range(R,G,B)
if(R == 255 and G == 255 and B == 255): state = 0 # WHITE
if(R == 255 and G == 0 and B == 0): state = 1 # RED
if(R == 0 and G == 255 and B == 0): state = 2 # GREEN
if(R == 0 and G == 0 and B == 255): state = 3 # BLUE
if(R == 255 and G == 0 and B == 255): state = 4 # Magenta
if(R == 0 and G == 255 and B == 255): state = 5 # cyan-blue
if(R == 255 and G == 255 and B == 0): state = 6 # YELLOW
#mapping
R_mapping = int (R / 255 * 100)
G_mapping = int (G / 255 * 100)
B_mapping = int (B / 255 * 100)
R_pwm.ChangeDutyCycle(R_mapping)
G_pwm.ChangeDutyCycle(G_mapping)
B_pwm.ChangeDutyCycle(B_mapping)
print("R,G,B:\t",R,G,B,"\t mapping: ",R_mapping,G_mapping,B_mapping)
time.sleep(0.3)
#GPIO.output(LED_PIN, status)
def mycallback1(channel):
print("Button pressed @", time.ctime())
global R,G,B,state
state=random.random(0,6)
while(1):
if (state == 0):
R = R
G = G - interval
B = B - interval
if (state == 1):
R = R - interval
G = G + interval
B = B
if (state == 2):
R = R
G = G - interval
B = B + interval
if (state == 3):
R = R + interval
G = G
B = B
if (state == 4):
R = R - interval
G = G + interval
B = B
if (state == 5):
R = R + interval
G = G
B = B - interval
if (state == 6):
R = R
G = G - interval
B = B
R,G,B = check_RGB_range(R,G,B)
if(R == 255 and G == 255 and B == 255): state = 0 # WHITE
if(R == 255 and G == 0 and B == 0): state = 1 # RED
if(R == 0 and G == 255 and B == 0): state = 2 # GREEN
if(R == 0 and G == 0 and B == 255): state = 3 # BLUE
if(R == 255 and G == 0 and B == 255): state = 4 # Magenta
if(R == 0 and G == 255 and B == 255): state = 5 # cyan-blue
if(R == 255 and G == 255 and B == 0): state = 6 # YELLOW
#mapping
R_mapping = int (R / 255 * 100)
G_mapping = int (G / 255 * 100)
B_mapping = int (B / 255 * 100)
R_pwm.ChangeDutyCycle(R_mapping)
G_pwm.ChangeDutyCycle(G_mapping)
B_pwm.ChangeDutyCycle(B_mapping)
print("R,G,B:\t",R,G,B,"\t mapping: ",R_mapping,G_mapping,B_mapping)
time.sleep(0.3)
try:
GPIO.add_event_detect(BTN_PIN, GPIO.FALLING, callback=mycallback(11), bouncetime=WAIT_TIME)
GPIO.add_event_detect(BTN_PIN1, GPIO.FALLING, callback=mycallback1(10), bouncetime=WAIT_TIME)
while True:
time.sleep(10)
except KeyboardInterrupt:
print("Exception: KeyboardInterrupt")
finally:
GPIO.cleanup()
| [
"RPi.GPIO.setmode",
"RPi.GPIO.cleanup",
"RPi.GPIO.setup",
"time.ctime",
"time.sleep",
"random.random",
"RPi.GPIO.PWM"
] | [((51, 75), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BOARD'], {}), '(GPIO.BOARD)\n', (63, 75), True, 'import RPi.GPIO as GPIO\n'), ((170, 224), 'RPi.GPIO.setup', 'GPIO.setup', (['BTN_PIN', 'GPIO.IN'], {'pull_up_down': 'GPIO.PUD_UP'}), '(BTN_PIN, GPIO.IN, pull_up_down=GPIO.PUD_UP)\n', (180, 224), True, 'import RPi.GPIO as GPIO\n'), ((381, 408), 'RPi.GPIO.setup', 'GPIO.setup', (['R_pin', 'GPIO.OUT'], {}), '(R_pin, GPIO.OUT)\n', (391, 408), True, 'import RPi.GPIO as GPIO\n'), ((409, 436), 'RPi.GPIO.setup', 'GPIO.setup', (['G_pin', 'GPIO.OUT'], {}), '(G_pin, GPIO.OUT)\n', (419, 436), True, 'import RPi.GPIO as GPIO\n'), ((437, 464), 'RPi.GPIO.setup', 'GPIO.setup', (['B_pin', 'GPIO.OUT'], {}), '(B_pin, GPIO.OUT)\n', (447, 464), True, 'import RPi.GPIO as GPIO\n'), ((474, 494), 'RPi.GPIO.PWM', 'GPIO.PWM', (['R_pin', '(100)'], {}), '(R_pin, 100)\n', (482, 494), True, 'import RPi.GPIO as GPIO\n'), ((503, 523), 'RPi.GPIO.PWM', 'GPIO.PWM', (['G_pin', '(100)'], {}), '(G_pin, 100)\n', (511, 523), True, 'import RPi.GPIO as GPIO\n'), ((532, 552), 'RPi.GPIO.PWM', 'GPIO.PWM', (['B_pin', '(100)'], {}), '(B_pin, 100)\n', (540, 552), True, 'import RPi.GPIO as GPIO\n'), ((2549, 2568), 'random.random', 'random.random', (['(0)', '(6)'], {}), '(0, 6)\n', (2562, 2568), False, 'import random\n'), ((4121, 4135), 'RPi.GPIO.cleanup', 'GPIO.cleanup', ([], {}), '()\n', (4133, 4135), True, 'import RPi.GPIO as GPIO\n'), ((1089, 1101), 'time.ctime', 'time.ctime', ([], {}), '()\n', (1099, 1101), False, 'import time\n'), ((2358, 2373), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (2368, 2373), False, 'import time\n'), ((2508, 2520), 'time.ctime', 'time.ctime', ([], {}), '()\n', (2518, 2520), False, 'import time\n'), ((3803, 3818), 'time.sleep', 'time.sleep', (['(0.3)'], {}), '(0.3)\n', (3813, 3818), False, 'import time\n'), ((4029, 4043), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (4039, 4043), False, 'import time\n')] |
import cv2
eyeCascade = cv2.CascadeClassifier('haarcascade_eye.xml')
video_capture = cv2.VideoCapture(0)
while True:
ret,frame = video_capture.read()
gray = cv2.cvtColor(frame,cv2.COLOR_BGR2GRAY)
eyes = eyeCascade.detectMultiScale(gray,
scaleFactor=1.2,
minNeighbors=5,
minSize=(30,30))
for(x,y,w,h) in eyes:
cv2.rectangle(frame,(x,y),(x+w, y+h),(0,0,255),2)
cv2.imshow("Video",frame)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
video_capture.release()
cv2.DestroyAllWindows()
| [
"cv2.cvtColor",
"cv2.waitKey",
"cv2.VideoCapture",
"cv2.rectangle",
"cv2.DestroyAllWindows",
"cv2.CascadeClassifier",
"cv2.imshow"
] | [((25, 69), 'cv2.CascadeClassifier', 'cv2.CascadeClassifier', (['"""haarcascade_eye.xml"""'], {}), "('haarcascade_eye.xml')\n", (46, 69), False, 'import cv2\n'), ((86, 105), 'cv2.VideoCapture', 'cv2.VideoCapture', (['(0)'], {}), '(0)\n', (102, 105), False, 'import cv2\n'), ((475, 498), 'cv2.DestroyAllWindows', 'cv2.DestroyAllWindows', ([], {}), '()\n', (496, 498), False, 'import cv2\n'), ((161, 200), 'cv2.cvtColor', 'cv2.cvtColor', (['frame', 'cv2.COLOR_BGR2GRAY'], {}), '(frame, cv2.COLOR_BGR2GRAY)\n', (173, 200), False, 'import cv2\n'), ((376, 402), 'cv2.imshow', 'cv2.imshow', (['"""Video"""', 'frame'], {}), "('Video', frame)\n", (386, 402), False, 'import cv2\n'), ((324, 384), 'cv2.rectangle', 'cv2.rectangle', (['frame', '(x, y)', '(x + w, y + h)', '(0, 0, 255)', '(2)'], {}), '(frame, (x, y), (x + w, y + h), (0, 0, 255), 2)\n', (337, 384), False, 'import cv2\n'), ((407, 421), 'cv2.waitKey', 'cv2.waitKey', (['(1)'], {}), '(1)\n', (418, 421), False, 'import cv2\n')] |
###################################################################
# This implementation is based on the following papaer:
#
# <NAME> and <NAME>. Automatic data and computation
# decomposition on distributed# memory parallel computers.
# ACM Transactions on Programming Languages and Systems,
# 24(1):1–50, Jan. 2002.
#
# Algorithm of Figure 5
#
###################################################################
import heterocl as hcl
import numpy as np
def top_adi(Nx=20, Ny=20, NT=20, Dx=0.1, Dy=0.1, DT=0.1, B1=0.1, B2=0.1,
mu1=0.1, mu2=0.1, a=0.1, b=0.1, c=0.1, d=0.1, e=0.1, f=0.1,
dtype=hcl.Int(), target=None):
hcl.init(dtype)
u = hcl.placeholder((Nx, Ny), "u")
v = hcl.placeholder((Nx, Ny), "v")
p = hcl.placeholder((Nx, Ny), "p")
q = hcl.placeholder((Nx, Ny), "q")
def kernel_adi(u, v, p, q):
def sweep(u, v, p, q):
with hcl.for_(1, Ny - 1, name="L1") as i:
v[0][i] = hcl.scalar(1.0)
p[i][0] = hcl.scalar(0.0)
q[i][0] = v[0][i]
with hcl.for_(1, Nx - 1, name="L2") as j:
p[i][j] = -1.0 * c / (a * p[i][j - 1] + b)
q[i][j] = (-1.0 * d * u[j][i - 1] + (1.0 + 2.0 * d) * u[j][i] - f * u[j][i + 1] - a * q[i][j - 1])/(a * p[i][j - 1] + b)
v[Nx - 1][i] = hcl.scalar(1.0)
with hcl.for_(Nx - 2, 0, -1, name="L3") as j:
v[j][i] = p[i][j] * v[j + 1][i] + q[i][j]
with hcl.for_(1, Nx - 1, name="L4") as i:
u[i][0] = hcl.scalar(1.0)
p[i][0] = hcl.scalar(0.0)
q[i][0] = u[i][0]
with hcl.for_(1, Ny - 1, name="L5") as j:
p[i][j] = -1.0 * f / (d * p[i][j - 1] + e)
q[i][j] = (-1.0 * a * v[i - 1][j] + (1.0 + 2 * a) * v[i][j] - c * v[i + 1][j] - d * q[i][j - 1])/(d * p[i][j - 1] + e)
u[i][Ny - 1] = hcl.scalar(1.0)
with hcl.for_(Ny - 2, 0, -1, name="L6") as j:
u[i][j] = p[i][j] * u[i][j + 1] + q[i][j]
hcl.mutate((NT,), lambda m: sweep(u, v, p, q), "main_loop")
s = hcl.create_schedule([u, v, p, q], kernel_adi)
#### Apply customizations ####
main_loop = kernel_adi.main_loop
#s[main_loop].pipeline(main_loop.L1)
#s[main_loop].pipeline(main_loop.L4)
#### Apply customizations ####
return hcl.build(s, target=target)
def adi_golden(N, TSTEPS, Dx, Dy, DT, B1, B2, mu1, mu2, a, b, c, d, e, f, u, v, p, q):
for t in range(TSTEPS):
## Column sweep
for i in range(1, N - 1):
v[0][i] = 1.0
p[i][0] = 0.0
q[i][0] = v[0][i]
for j in range(1, N - 1):
p[i][j] = -1.0 * c / (a * p[i][j - 1] + b)
q[i][j] = (-1.0 * d * u[j][i - 1] + (1.0 + 2.0 * d) * u[j][i] - f * u[j][i + 1] - a * q[i][j - 1])/(a * p[i][j - 1] + b)
v[N - 1][i] = 1.0
for j in range(N - 2, 0, -1):
v[j][i] = p[i][j] * v[j + 1][i] + q[i][j]
## Row sweep
for i in range(1, N - 1):
u[i][0] = 1.0
p[i][0] = 0.0
q[i][0] = u[i][0]
for j in range(1, N - 1):
p[i][j] = -1.0 * f / (d * p[i][j - 1] + e)
q[i][j] = (-1.0 * a * v[i - 1][j] + (1.0 + 2.0 * a) *
v[i][j] - c * v[i + 1][j] -d * q[i][j - 1])/(d * p[i][j - 1] + e)
u[i][N - 1] = 1.0
for j in range(N - 2, 0, -1):
u[i][j] = p[i][j] * u[i][j + 1] + q[i][j]
def main(Nx=20, Ny=20, NT=20, Dx=0.1, Dy=0.1, DT=0.1, B1=0.1, B2=0.1,
mu1=0.1, mu2=0.1, a=0.1, b=0.1, c=0.1, d=0.1, e=0.1, f=0.1,
dtype=hcl.Float(32), target=None):
u = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
v = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
p = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
q = np.random.randint(10, size=(Nx, Ny)).astype(np.float32)
f = top_adi(Nx, Ny, NT, Dx, Dy, DT, B1, B2, mu1, mu2, a, b, c, d, e, f, dtype, target)
f(u, v, p, q)
if __name__ == "__main__":
main() | [
"heterocl.for_",
"heterocl.placeholder",
"numpy.random.randint",
"heterocl.build",
"heterocl.create_schedule",
"heterocl.init",
"heterocl.scalar",
"heterocl.Int",
"heterocl.Float"
] | [((621, 630), 'heterocl.Int', 'hcl.Int', ([], {}), '()\n', (628, 630), True, 'import heterocl as hcl\n'), ((651, 666), 'heterocl.init', 'hcl.init', (['dtype'], {}), '(dtype)\n', (659, 666), True, 'import heterocl as hcl\n'), ((675, 705), 'heterocl.placeholder', 'hcl.placeholder', (['(Nx, Ny)', '"""u"""'], {}), "((Nx, Ny), 'u')\n", (690, 705), True, 'import heterocl as hcl\n'), ((714, 744), 'heterocl.placeholder', 'hcl.placeholder', (['(Nx, Ny)', '"""v"""'], {}), "((Nx, Ny), 'v')\n", (729, 744), True, 'import heterocl as hcl\n'), ((753, 783), 'heterocl.placeholder', 'hcl.placeholder', (['(Nx, Ny)', '"""p"""'], {}), "((Nx, Ny), 'p')\n", (768, 783), True, 'import heterocl as hcl\n'), ((792, 822), 'heterocl.placeholder', 'hcl.placeholder', (['(Nx, Ny)', '"""q"""'], {}), "((Nx, Ny), 'q')\n", (807, 822), True, 'import heterocl as hcl\n'), ((2176, 2221), 'heterocl.create_schedule', 'hcl.create_schedule', (['[u, v, p, q]', 'kernel_adi'], {}), '([u, v, p, q], kernel_adi)\n', (2195, 2221), True, 'import heterocl as hcl\n'), ((2432, 2459), 'heterocl.build', 'hcl.build', (['s'], {'target': 'target'}), '(s, target=target)\n', (2441, 2459), True, 'import heterocl as hcl\n'), ((3761, 3774), 'heterocl.Float', 'hcl.Float', (['(32)'], {}), '(32)\n', (3770, 3774), True, 'import heterocl as hcl\n'), ((3799, 3835), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(Nx, Ny)'}), '(10, size=(Nx, Ny))\n', (3816, 3835), True, 'import numpy as np\n'), ((3863, 3899), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(Nx, Ny)'}), '(10, size=(Nx, Ny))\n', (3880, 3899), True, 'import numpy as np\n'), ((3927, 3963), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(Nx, Ny)'}), '(10, size=(Nx, Ny))\n', (3944, 3963), True, 'import numpy as np\n'), ((3991, 4027), 'numpy.random.randint', 'np.random.randint', (['(10)'], {'size': '(Nx, Ny)'}), '(10, size=(Nx, Ny))\n', (4008, 4027), True, 'import numpy as np\n'), ((905, 935), 'heterocl.for_', 'hcl.for_', (['(1)', '(Ny - 1)'], {'name': '"""L1"""'}), "(1, Ny - 1, name='L1')\n", (913, 935), True, 'import heterocl as hcl\n'), ((968, 983), 'heterocl.scalar', 'hcl.scalar', (['(1.0)'], {}), '(1.0)\n', (978, 983), True, 'import heterocl as hcl\n'), ((1010, 1025), 'heterocl.scalar', 'hcl.scalar', (['(0.0)'], {}), '(0.0)\n', (1020, 1025), True, 'import heterocl as hcl\n'), ((1353, 1368), 'heterocl.scalar', 'hcl.scalar', (['(1.0)'], {}), '(1.0)\n', (1363, 1368), True, 'import heterocl as hcl\n'), ((1511, 1541), 'heterocl.for_', 'hcl.for_', (['(1)', '(Nx - 1)'], {'name': '"""L4"""'}), "(1, Nx - 1, name='L4')\n", (1519, 1541), True, 'import heterocl as hcl\n'), ((1574, 1589), 'heterocl.scalar', 'hcl.scalar', (['(1.0)'], {}), '(1.0)\n', (1584, 1589), True, 'import heterocl as hcl\n'), ((1616, 1631), 'heterocl.scalar', 'hcl.scalar', (['(0.0)'], {}), '(0.0)\n', (1626, 1631), True, 'import heterocl as hcl\n'), ((1957, 1972), 'heterocl.scalar', 'hcl.scalar', (['(1.0)'], {}), '(1.0)\n', (1967, 1972), True, 'import heterocl as hcl\n'), ((1081, 1111), 'heterocl.for_', 'hcl.for_', (['(1)', '(Nx - 1)'], {'name': '"""L2"""'}), "(1, Nx - 1, name='L2')\n", (1089, 1111), True, 'import heterocl as hcl\n'), ((1390, 1424), 'heterocl.for_', 'hcl.for_', (['(Nx - 2)', '(0)', '(-1)'], {'name': '"""L3"""'}), "(Nx - 2, 0, -1, name='L3')\n", (1398, 1424), True, 'import heterocl as hcl\n'), ((1687, 1717), 'heterocl.for_', 'hcl.for_', (['(1)', '(Ny - 1)'], {'name': '"""L5"""'}), "(1, Ny - 1, name='L5')\n", (1695, 1717), True, 'import heterocl as hcl\n'), ((1994, 2028), 'heterocl.for_', 'hcl.for_', (['(Ny - 2)', '(0)', '(-1)'], {'name': '"""L6"""'}), "(Ny - 2, 0, -1, name='L6')\n", (2002, 2028), True, 'import heterocl as hcl\n')] |
#
#Copyright Odin Solutions S.L. All Rights Reserved.
#
#SPDX-License-Identifier: Apache-2.0
#
import json
import uuid
#import os
from subprocess import Popen, PIPE
def processUri(uri):
try:
if(str(uri).upper().startswith("/v1".upper()) == False ):
uri = "/v1"+uri
return uri
except:
return uri
def validateMethodPath(method,path):
return True
def processBody(method,uri,body,sPAE,rPAE,noEncryptedKeys):
bodyBackUp = body
try:
#Determine if method / uri is actually comtemplated by the process and run the corresponding process
#depending de body.
state = False
#TODO
body, state = processCypher(body,sPAE,rPAE,noEncryptedKeys)
return body, state
except:
return bodyBackUp, False
#This process consider ONLY a JSON format.
def processCypher(body,sPAE,rPAE,noEncryptedKeys):
bodyBackUp = body
try:
encryptAttributes,state = obtainAttributesToCipher(body,sPAE,rPAE,noEncryptedKeys)
if(state == False):
return bodyBackUp, False
body,state = cipherBodyAttributes(body,encryptAttributes)
if(state == False):
return bodyBackUp, False
return body, True
except:
return bodyBackUp, False
def obtainAttributesToCipher(body,sPAE,rPAE,noEncryptedKeys):
encryptAttributes = []
try:
#TODO
return encryptAttributes, True
except:
return encryptAttributes, False
def getstatusoutput(command):
process = Popen(command, stdout=PIPE,stderr=PIPE)
out, err = process.communicate()
#print("out")
#print(out)
#print("err")
#print(err)
return (process.returncode, out)
#This process consider ONLY a JSON format.
def cipherBodyAttributes(body,encryptAttributes):
bodyBackUp = body
try:
#print("body - BEFORE ENCRYPT")
#print(body)
#Encrypt values of attributes of array list encryptAttributes.
for m in range(len(encryptAttributes)):
try:
#TODO
print(encryptAttributes[m])
except Exception as e:
print(e)
bodyBackUp, False
#print("body - AFTER ENCRYPT")
#print(body)
return body, True
except:
return bodyBackUp, False
def errorHeaders(method=None,message=None):
headers = dict()
'''
#GET - headersError
if(method.upper()=="GET"):
else:
#POST - headersError
if(method.upper()=="POST"):
else: #PATCH - headersError
if(method.upper()=="PATCH"):
else:#PUT - headersError
if(method.upper()=="PUT"):
'''
#headers['Connection'] = 'Keep-Alive'
#headers['Content-Length'] = len(message)
headers['Access-Control-Allow-Origin'] = '*'
headers['Content-Type'] = 'application/json'
headers['Fiware-Correlator'] = uuid.uuid4()
#Second value is false because API no send Transfer-Encoding=chunked header response.
return headers, False
def errorBody(method,code,title,details):
#return {"orionError":{"code":"400","reasonPhrase":"Bad Request","details":"service not found"}}
return {"code": code, "error": title, "details": details}
#def errorCode(method):
#
# return 400 | [
"subprocess.Popen",
"uuid.uuid4"
] | [((1561, 1601), 'subprocess.Popen', 'Popen', (['command'], {'stdout': 'PIPE', 'stderr': 'PIPE'}), '(command, stdout=PIPE, stderr=PIPE)\n', (1566, 1601), False, 'from subprocess import Popen, PIPE\n'), ((3024, 3036), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (3034, 3036), False, 'import uuid\n')] |
import os
import time
vd = os.environ['CUDA_VISIBLE_DEVICES']
print(f'gpu_id={vd}')
num_gpus = len(vd.split(','))
if vd == '' or num_gpus != 1:
raise Exception('There must be one visible gpu.')
time.sleep(0.5)
| [
"time.sleep"
] | [((201, 216), 'time.sleep', 'time.sleep', (['(0.5)'], {}), '(0.5)\n', (211, 216), False, 'import time\n')] |
"""This program will focus on developing a program package for the application of RFM (Recency, Frequency, Monetary Value)
model and output the customer classification results into a new document.
Name: <NAME>
Date:28/05/2020
"""
from app.saver import Saver
if __name__ == '__main__':
saver = Saver()
saver.reset()
saver.customer_to_csv()
saver.customer_by_cate_chart()
| [
"app.saver.Saver"
] | [((300, 307), 'app.saver.Saver', 'Saver', ([], {}), '()\n', (305, 307), False, 'from app.saver import Saver\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import re
from typing import Iterable
from refinery.units.blockwise import Arg, BlockTransformationBase
from refinery.units.encoding.base import base as BaseUnit
from refinery.lib.argformats import number
from refinery.lib.patterns import formats
class pack(BlockTransformationBase):
"""
Scans the input data for numeric constants and packs them into a binary
format. This is useful to convert the textual representation of an array of
numbers into its binary form. For example, `123,34,256,12,1,234` would be
transformed into the byte sequence `7B22000C01EA`, where `256` was wrapped
and packed as a null byte because the default block size is one byte. If
the above sequence would be packed with options -EB2, the result would be
equal to `007B00220100000C000100EA` in hexadecimal.
"""
def __init__(self,
base: Arg(type=number[2:36], help=(
'Find only numbers in given base. Default of 0 means that '
'common expressions for hexadecimal, octal and binary are '
'accepted.')) = 0,
prefix: Arg.Switch('-r', help='Add numeric prefixes like 0x, 0b, and 0o in reverse mode.') = False,
strict: Arg.Switch('-s', help='Only parse integers that fit in one block of the given block size.') = False,
width : Arg.Number('-w', help='Pad numbers with the specified amount of leading zeros.') = 0,
bigendian=False, blocksize=1
):
super().__init__(
base=base,
prefix=prefix,
strict=strict,
width=width,
bigendian=bigendian,
blocksize=blocksize
)
@property
def bytestream(self):
# never alow bytes to be left unchunked
return False
def reverse(self, data):
base = self.args.base or 10
width = self.args.width
prefix = B''
self.log_debug(F'using base {base:d}')
if self.args.prefix:
prefix = {
0x02: b'0b',
0x08: b'0o',
0x10: b'0x'
}.get(base, B'')
converter = BaseUnit(base, not self.args.bigendian)
for n in self.chunk(data, raw=True):
converted = converter.reverse(n)
if width:
converted = converted.rjust(width, B'0')
if prefix:
converted = prefix + converted
yield converted
def process(self, data):
base: int = self.args.base
strict: bool = self.args.strict
def intb(literals: Iterable[bytes]):
for literal in literals:
if base == 0 and literal[0] == 0x30 and literal[1:].isdigit():
literal = B'0o%s' % literal
N = int(literal, base)
M = N & self.fmask
if strict and M != N:
continue
yield M
if base == 0:
pattern = formats.integer
elif base <= 10:
pattern = re.compile(B'[-+]?[0-%d]{1,64}' % (base - 1))
else:
pattern = re.compile(B'[-+]?[0-9a-%c]{1,20}' % (0x57 + base), re.IGNORECASE)
it = (m[0] for m in pattern.finditer(data))
return self.unchunk(intb(it))
| [
"refinery.units.encoding.base.base",
"refinery.units.blockwise.Arg.Number",
"refinery.units.blockwise.Arg",
"refinery.units.blockwise.Arg.Switch",
"re.compile"
] | [((2157, 2196), 'refinery.units.encoding.base.base', 'BaseUnit', (['base', '(not self.args.bigendian)'], {}), '(base, not self.args.bigendian)\n', (2165, 2196), True, 'from refinery.units.encoding.base import base as BaseUnit\n'), ((912, 1076), 'refinery.units.blockwise.Arg', 'Arg', ([], {'type': 'number[2:36]', 'help': '"""Find only numbers in given base. Default of 0 means that common expressions for hexadecimal, octal and binary are accepted."""'}), "(type=number[2:36], help=\n 'Find only numbers in given base. Default of 0 means that common expressions for hexadecimal, octal and binary are accepted.'\n )\n", (915, 1076), False, 'from refinery.units.blockwise import Arg, BlockTransformationBase\n'), ((1133, 1220), 'refinery.units.blockwise.Arg.Switch', 'Arg.Switch', (['"""-r"""'], {'help': '"""Add numeric prefixes like 0x, 0b, and 0o in reverse mode."""'}), "('-r', help=\n 'Add numeric prefixes like 0x, 0b, and 0o in reverse mode.')\n", (1143, 1220), False, 'from refinery.units.blockwise import Arg, BlockTransformationBase\n'), ((1241, 1337), 'refinery.units.blockwise.Arg.Switch', 'Arg.Switch', (['"""-s"""'], {'help': '"""Only parse integers that fit in one block of the given block size."""'}), "('-s', help=\n 'Only parse integers that fit in one block of the given block size.')\n", (1251, 1337), False, 'from refinery.units.blockwise import Arg, BlockTransformationBase\n'), ((1358, 1443), 'refinery.units.blockwise.Arg.Number', 'Arg.Number', (['"""-w"""'], {'help': '"""Pad numbers with the specified amount of leading zeros."""'}), "('-w', help='Pad numbers with the specified amount of leading zeros.'\n )\n", (1368, 1443), False, 'from refinery.units.blockwise import Arg, BlockTransformationBase\n'), ((3053, 3098), 're.compile', 're.compile', (["(b'[-+]?[0-%d]{1,64}' % (base - 1))"], {}), "(b'[-+]?[0-%d]{1,64}' % (base - 1))\n", (3063, 3098), False, 'import re\n'), ((3135, 3199), 're.compile', 're.compile', (["(b'[-+]?[0-9a-%c]{1,20}' % (87 + base))", 're.IGNORECASE'], {}), "(b'[-+]?[0-9a-%c]{1,20}' % (87 + base), re.IGNORECASE)\n", (3145, 3199), False, 'import re\n')] |
# Shows how to delete files
import glob
import os
from os import listdir
from os.path import isfile, join
def get_and_delete_symbol_files(path, symbol):
filenames = []
pattern = path + "/" + symbol + "-*.csv"
for name in glob.glob(pattern):
filenames.append(name)
for filename in filenames:
if os.path.exists(filename):
os.remove(filename)
def deletefile(filename):
if os.path.exists(filename):
os.remove(filename)
def getsymbolfiles(mypath, symbol):
hits = []
pattern = mypath + "/" + symbol + "-*.csv"
for name in glob.glob(pattern):
hits.append(name)
return hits
def deletesymbolfiles(filenames):
for filename in filenames:
deletefile(filename)
if __name__ == "__main__":
path = os.environ["BMTOP"]
# path = path + "/bluemesa/tmp/fun/in/test"
path = "/tmp/fun"
#
# This is the first way
#
# symbolfiles = getsymbolfiles(path, "ui")
# print(symbolfiles)
# for filename in symbolfiles:
# print(filename)
# deletefile(filename)
get_and_delete_symbol_files(path, "rdfn")
| [
"os.remove",
"os.path.exists",
"glob.glob"
] | [((236, 254), 'glob.glob', 'glob.glob', (['pattern'], {}), '(pattern)\n', (245, 254), False, 'import glob\n'), ((422, 446), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (436, 446), False, 'import os\n'), ((591, 609), 'glob.glob', 'glob.glob', (['pattern'], {}), '(pattern)\n', (600, 609), False, 'import glob\n'), ((329, 353), 'os.path.exists', 'os.path.exists', (['filename'], {}), '(filename)\n', (343, 353), False, 'import os\n'), ((456, 475), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (465, 475), False, 'import os\n'), ((367, 386), 'os.remove', 'os.remove', (['filename'], {}), '(filename)\n', (376, 386), False, 'import os\n')] |
# Copyright (C) 2020 MoveAngel and MinaProject
#
# Licensed under the Raphielscape Public License, Version 1.d (the "License");
# you may not use this file except in compliance with the License.
#
# Multifunction memes
#
# Based code + improve from AdekMaulana and aidilaryanto
import asyncio
import os
import random
import re
import textwrap
import time
from asyncio.exceptions import TimeoutError
from glitch_this import ImageGlitcher
from PIL import Image, ImageDraw, ImageFont
from telethon import events, functions, types
from telethon.errors.rpcerrorlist import YouBlockedUserError
from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot
from userbot.events import register
from userbot.utils import check_media, progress
Glitched = TEMP_DOWNLOAD_DIRECTORY + "glitch.gif"
EMOJI_PATTERN = re.compile(
"["
"\U0001F1E0-\U0001F1FF" # flags (iOS)
"\U0001F300-\U0001F5FF" # symbols & pictographs
"\U0001F600-\U0001F64F" # emoticons
"\U0001F680-\U0001F6FF" # transport & map symbols
"\U0001F700-\U0001F77F" # alchemical symbols
"\U0001F780-\U0001F7FF" # Geometric Shapes Extended
"\U0001F800-\U0001F8FF" # Supplemental Arrows-C
"\U0001F900-\U0001F9FF" # Supplemental Symbols and Pictographs
"\U0001FA00-\U0001FA6F" # Chess Symbols
"\U0001FA70-\U0001FAFF" # Symbols and Pictographs Extended-A
"\U00002702-\U000027B0" # Dingbats
"]+"
)
@register(outgoing=True, pattern=r"^\.glitch(?: |$)(.*)")
async def glitch(event):
if not event.reply_to_msg_id:
await event.edit("`Não vou falhar um fantasma!`")
return
reply_message = await event.get_reply_message()
if not reply_message.media:
await event.edit("`responda a uma imagem/sticker`")
return
await bot.download_file(reply_message.media)
await event.edit("`Baixando mídia..`")
if event.is_reply:
data = await check_media(reply_message)
if isinstance(data, bool):
await event.edit("`Arquivos não suportados...`")
return
else:
await event.edit("`Responda a qualquer mídia`")
return
try:
value = int(event.pattern_match.group(1))
if value > 8:
raise ValueError
except ValueError:
value = 2
await event.edit("```Falhando essa mídia```")
await asyncio.sleep(2)
file_name = "glitch.png"
to_download_directory = TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, file_name)
downloaded_file_name = await bot.download_media(
reply_message,
downloaded_file_name,
)
glitch_file = downloaded_file_name
glitcher = ImageGlitcher()
img = Image.open(glitch_file)
glitch_img = glitcher.glitch_image(img, value, color_offset=True, gif=True)
DURATION = 200
LOOP = 0
glitch_img[0].save(
Glitched,
format="GIF",
append_images=glitch_img[1:],
save_all=True,
duration=DURATION,
loop=LOOP,
)
await event.edit("`Enviando mídia falhada...`")
c_time = time.time()
nosave = await event.client.send_file(
event.chat_id,
Glitched,
force_document=False,
reply_to=event.reply_to_msg_id,
progress_callback=lambda d, t: asyncio.get_event_loop().create_task(
progress(d, t, event, c_time, "[UPLOAD]")
),
)
await event.delete()
os.remove(Glitched)
await bot(
functions.messages.SaveGifRequest(
id=types.InputDocument(
id=nosave.media.document.id,
access_hash=nosave.media.document.access_hash,
file_reference=nosave.media.document.file_reference,
),
unsave=True,
)
)
os.remove(glitch_file)
@register(outgoing=True, pattern=r"^\.mmf(?: |$)(.*)")
async def mim(event):
if not event.reply_to_msg_id:
await event.edit(
"`Syntax: responda a uma imagem com .mmf` 'texto de cima' ; 'texto de baixo' "
)
return
reply_message = await event.get_reply_message()
if not reply_message.media:
await event.edit("```responda a uma imagem/sticker/gif```")
return
await bot.download_file(reply_message.media)
if event.is_reply:
data = await check_media(reply_message)
if isinstance(data, bool):
await event.edit("`Arquivos não suportados...`")
return
await event.edit(
"```Hora da Transfiguração! Mwahaha Memificando essa imagem! (」゚ロ゚)」 ```"
)
await asyncio.sleep(5)
text = event.pattern_match.group(1)
if event.reply_to_msg_id:
file_name = "meme.jpg"
to_download_directory = TEMP_DOWNLOAD_DIRECTORY
downloaded_file_name = os.path.join(to_download_directory, file_name)
downloaded_file_name = await bot.download_media(
reply_message,
downloaded_file_name,
)
dls_loc = downloaded_file_name
webp_file = await draw_meme_text(dls_loc, text)
await event.client.send_file(
event.chat_id, webp_file, reply_to=event.reply_to_msg_id
)
await event.delete()
os.remove(webp_file)
os.remove(dls_loc)
async def draw_meme_text(image_path, text):
img = Image.open(image_path)
os.remove(image_path)
i_width, i_height = img.size
m_font = ImageFont.truetype(
"resources/MutantAcademyStyle.ttf", int((70 / 640) * i_width)
)
if ";" in text:
upper_text, lower_text = text.split(";")
else:
upper_text = text
lower_text = ""
draw = ImageDraw.Draw(img)
current_h, pad = 10, 5
if upper_text:
for u_text in textwrap.wrap(upper_text, width=15):
u_width, u_height = draw.textsize(u_text, font=m_font)
draw.text(
xy=(((i_width - u_width) / 2) - 1, int((current_h / 640) * i_width)),
text=u_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(((i_width - u_width) / 2) + 1, int((current_h / 640) * i_width)),
text=u_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=((i_width - u_width) / 2, int(((current_h / 640) * i_width)) - 1),
text=u_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(((i_width - u_width) / 2), int(((current_h / 640) * i_width)) + 1),
text=u_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=((i_width - u_width) / 2, int((current_h / 640) * i_width)),
text=u_text,
font=m_font,
fill=(255, 255, 255),
)
current_h += u_height + pad
if lower_text:
for l_text in textwrap.wrap(lower_text, width=15):
u_width, u_height = draw.textsize(l_text, font=m_font)
draw.text(
xy=(
((i_width - u_width) / 2) - 1,
i_height - u_height - int((20 / 640) * i_width),
),
text=l_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(
((i_width - u_width) / 2) + 1,
i_height - u_height - int((20 / 640) * i_width),
),
text=l_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(
(i_width - u_width) / 2,
(i_height - u_height - int((20 / 640) * i_width)) - 1,
),
text=l_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(
(i_width - u_width) / 2,
(i_height - u_height - int((20 / 640) * i_width)) + 1,
),
text=l_text,
font=m_font,
fill=(0, 0, 0),
)
draw.text(
xy=(
(i_width - u_width) / 2,
i_height - u_height - int((20 / 640) * i_width),
),
text=l_text,
font=m_font,
fill=(255, 255, 255),
)
current_h += u_height + pad
image_name = "memify.webp"
webp_file = os.path.join(TEMP_DOWNLOAD_DIRECTORY, image_name)
img.save(webp_file, "WebP")
return webp_file
@register(outgoing=True, pattern=r"^\.q")
async def quotess(qotli):
if qotli.fwd_from:
return
if not qotli.reply_to_msg_id:
return await qotli.edit("```Responda a qualquer mensagem do usuário.```")
reply_message = await qotli.get_reply_message()
if not reply_message.text:
return await qotli.edit("```Responda a uma mensagem de texto```")
chat = "@QuotLyBot"
if reply_message.sender.bot:
return await qotli.edit("```Responda a uma mensagem de usuários reais.```")
await qotli.edit("```Fazendo uma citação```")
try:
async with bot.conversation(chat) as conv:
try:
response = conv.wait_event(
events.NewMessage(incoming=True, from_users=1031952739)
)
msg = await bot.forward_messages(chat, reply_message)
response = await response
""" - don't spam notif - """
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
return await qotli.reply(
"```Desbloqueie @QuotLyBot e tente novamente```"
)
if response.text.startswith("Hi!"):
await qotli.edit(
"```Você pode gentilmente desativar suas configurações de privacidade de encaminhamento?```"
)
else:
await qotli.delete()
await bot.forward_messages(qotli.chat_id, response.message)
await bot.send_read_acknowledge(qotli.chat_id)
""" - cleanup chat after completed - """
await qotli.client.delete_messages(conv.chat_id, [msg.id, response.id])
except TimeoutError:
await qotli.edit()
@register(outgoing=True, pattern=r"^.hz(:? |$)(.*)?")
async def hazz(hazmat):
await hazmat.edit("`Enviando informação...`")
level = hazmat.pattern_match.group(2)
if hazmat.fwd_from:
return
if not hazmat.reply_to_msg_id:
await hazmat.edit("`WoWoWo Capt!, não vamos vestir um fantasma!...`")
return
reply_message = await hazmat.get_reply_message()
if not reply_message.media:
await hazmat.edit("`Palavras podem destruir qualquer coisa Capt!...`")
return
if reply_message.sender.bot:
await hazmat.edit("`Responda a um usuário real...`")
return
chat = "@hazmat_suit_bot"
await hazmat.edit("```Se vista Capt!, Vamos exterminar alguns vírus...```")
message_id_to_reply = hazmat.message.reply_to_msg_id
msg_reply = None
async with hazmat.client.conversation(chat) as conv:
try:
msg = await conv.send_message(reply_message)
if level:
m = f"/hazmat {level}"
msg_reply = await conv.send_message(m, reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
elif reply_message.gif:
m = f"/hazmat"
msg_reply = await conv.send_message(m, reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
else:
response = await conv.get_response()
""" - don't spam notif - """
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await hazmat.reply("`Desbloqueie` @hazmat_suit_bot`...`")
return
if response.text.startswith("I can't"):
await hazmat.edit("`GIF não suportado...`")
await hazmat.client.delete_messages(
conv.chat_id, [msg.id, response.id, r.id, msg_reply.id]
)
return
else:
downloaded_file_name = await hazmat.client.download_media(
response.media, TEMP_DOWNLOAD_DIRECTORY
)
await hazmat.client.send_file(
hazmat.chat_id,
downloaded_file_name,
force_document=False,
reply_to=message_id_to_reply,
)
""" - cleanup chat after completed - """
if msg_reply is not None:
await hazmat.client.delete_messages(
conv.chat_id, [msg.id, msg_reply.id, r.id, response.id]
)
else:
await hazmat.client.delete_messages(conv.chat_id, [msg.id, response.id])
await hazmat.delete()
return os.remove(downloaded_file_name)
@register(outgoing=True, pattern=r"^.df(:? |$)([1-8])?")
async def fryerrr(fry):
await fry.edit("`Enviando informação...`")
level = fry.pattern_match.group(2)
if fry.fwd_from:
return
if not fry.reply_to_msg_id:
await fry.edit("`Responda a qualquer foto de mensagem do usuário...`")
return
reply_message = await fry.get_reply_message()
if not reply_message.media:
await fry.edit("`Nenhuma imagem encontrada para fritar...`")
return
if reply_message.sender.bot:
await fry.edit("`Responda a um usuário real...`")
return
chat = "@image_deepfrybot"
message_id_to_reply = fry.message.reply_to_msg_id
async with fry.client.conversation(chat) as conv:
try:
msg = await conv.send_message(reply_message)
if level:
m = f"/deepfry {level}"
msg_level = await conv.send_message(m, reply_to=msg.id)
r = await conv.get_response()
response = await conv.get_response()
else:
response = await conv.get_response()
""" - don't spam notif - """
await bot.send_read_acknowledge(conv.chat_id)
except YouBlockedUserError:
await fry.reply("`Desbloqueie` @image_deepfrybot`...`")
return
if response.text.startswith("Forward"):
await fry.edit("`Desative sua configuração de privacidade de encaminhamento...`")
else:
downloaded_file_name = await fry.client.download_media(
response.media, TEMP_DOWNLOAD_DIRECTORY
)
await fry.client.send_file(
fry.chat_id,
downloaded_file_name,
force_document=False,
reply_to=message_id_to_reply,
)
""" - cleanup chat after completed - """
try:
msg_level
except NameError:
await fry.client.delete_messages(conv.chat_id, [msg.id, response.id])
else:
await fry.client.delete_messages(
conv.chat_id, [msg.id, response.id, r.id, msg_level.id]
)
await fry.delete()
return os.remove(downloaded_file_name)
@register(outgoing=True, pattern="^.sg(?: |$)(.*)")
async def lastname(steal):
if steal.fwd_from:
return
if not steal.reply_to_msg_id:
await steal.edit("```Responda a qualquer mensagem do usuário.```")
return
message = await steal.get_reply_message()
chat = "@SangMataInfo_bot"
user_id = message.sender.id
id = f"/search_id {user_id}"
if message.sender.bot:
await steal.edit("```Responda a mensagem de usuários reais.```")
return
await steal.edit("```Espere enquanto eu roubo alguns dados da NASA```")
async with bot.conversation(chat) as conv:
try:
msg = await conv.send_message(id)
r = await conv.get_response()
response = await conv.get_response()
except YouBlockedUserError:
await steal.reply("```Desbloqueie @sangmatainfo_bot e tente novamente```")
return
if response.text.startswith("No records"):
await steal.edit("```Nenhum registro encontrado para este usuário```")
await steal.client.delete_messages(
conv.chat_id, [msg.id, r.id, response.id]
)
return
else:
respond = await conv.get_response()
await steal.edit(f"{response.message}")
await steal.client.delete_messages(
conv.chat_id, [msg.id, r.id, response.id, respond.id]
)
@register(outgoing=True, pattern="^.waifu(?: |$)(.*)")
async def waifu(animu):
text = animu.pattern_match.group(1)
if not text:
if animu.is_reply:
text = (await animu.get_reply_message()).message
else:
await animu.answer("`Nenhum texto fornecido, por isso a waifu fugiu.`")
return
animus = [20, 32, 33, 40, 41, 42, 58]
sticcers = await bot.inline_query(
"stickerizerbot", f"#{random.choice(animus)}{(deEmojify(text))}"
)
await sticcers[0].click(
animu.chat_id,
reply_to=animu.reply_to_msg_id,
silent=True if animu.is_reply else False,
hide_via=True,
)
await animu.delete()
def deEmojify(inputString: str) -> str:
return re.sub(EMOJI_PATTERN, "", inputString)
CMD_HELP.update(
{
"glitch": ".glitch <1-8>\
\nUso: Responda a um sticker/imagem e envia com cmd.\
\no valor varia de 1 a 8 se não, ele usará o valor padrão que é 2"
}
)
CMD_HELP.update(
{
"memify": ".mmf textodecima ; textodebaixo\
\nUso: Responda a um sticker/imagem/gif e envia com cmd."
}
)
CMD_HELP.update(
{
"quotly": ".q \
\nUso: Transforma um texto em sticker."
}
)
CMD_HELP.update(
{
"hazmat": ".hz or .hz [flip, x2, rotate (graus), background (numero), black]"
"\nUso: Responda a uma imagem/sticker para se vestir!"
"\n@hazmat_suit_bot"
}
)
CMD_HELP.update(
{
"deepfry": ".df ou .df [level(1-8)]"
"\nUso: Frita a imagem/sticker da resposta."
"\n@image_deepfrybot"
}
)
CMD_HELP.update(
{
"sangmata": ".sg \
\nUso: Descobre nomes passados do usuário."
}
)
CMD_HELP.update(
{
"waifu": ".waifu \
\nUso: Melhore seu texto com belos modelos de anime girl. \
\n@StickerizerBot"
}
)
| [
"os.remove",
"userbot.bot.download_file",
"userbot.utils.check_media",
"textwrap.wrap",
"userbot.utils.progress",
"os.path.join",
"telethon.events.NewMessage",
"userbot.bot.send_read_acknowledge",
"PIL.ImageDraw.Draw",
"re.sub",
"glitch_this.ImageGlitcher",
"userbot.bot.conversation",
"async... | [((802, 925), 're.compile', 're.compile', (['"""[\U0001f1e0-🇿🌀-🗿😀-🙏🚀-\U0001f6ff🜀-\U0001f77f🞀-\U0001f7ff🠀-\U0001f8ff🤀-🧿🨀-\U0001fa6f🩰-\U0001faff✂-➰]+"""'], {}), "(\n '[\\U0001f1e0-🇿🌀-🗿😀-🙏🚀-\\U0001f6ff🜀-\\U0001f77f🞀-\\U0001f7ff🠀-\\U0001f8ff🤀-🧿🨀-\\U0001fa6f🩰-\\U0001faff✂-➰]+'\n )\n", (812, 925), False, 'import re\n'), ((1407, 1463), 'userbot.events.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^\\\\.glitch(?: |$)(.*)"""'}), "(outgoing=True, pattern='^\\\\.glitch(?: |$)(.*)')\n", (1415, 1463), False, 'from userbot.events import register\n'), ((3788, 3841), 'userbot.events.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^\\\\.mmf(?: |$)(.*)"""'}), "(outgoing=True, pattern='^\\\\.mmf(?: |$)(.*)')\n", (3796, 3841), False, 'from userbot.events import register\n'), ((8761, 8801), 'userbot.events.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^\\\\.q"""'}), "(outgoing=True, pattern='^\\\\.q')\n", (8769, 8801), False, 'from userbot.events import register\n'), ((10540, 10591), 'userbot.events.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^.hz(:? |$)(.*)?"""'}), "(outgoing=True, pattern='^.hz(:? |$)(.*)?')\n", (10548, 10591), False, 'from userbot.events import register\n'), ((13285, 13339), 'userbot.events.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^.df(:? |$)([1-8])?"""'}), "(outgoing=True, pattern='^.df(:? |$)([1-8])?')\n", (13293, 13339), False, 'from userbot.events import register\n'), ((15562, 15612), 'userbot.events.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^.sg(?: |$)(.*)"""'}), "(outgoing=True, pattern='^.sg(?: |$)(.*)')\n", (15570, 15612), False, 'from userbot.events import register\n'), ((16984, 17037), 'userbot.events.register', 'register', ([], {'outgoing': '(True)', 'pattern': '"""^.waifu(?: |$)(.*)"""'}), "(outgoing=True, pattern='^.waifu(?: |$)(.*)')\n", (16992, 17037), False, 'from userbot.events import register\n'), ((17774, 17970), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'glitch\':\n """.glitch <1-8> \nUso: Responda a um sticker/imagem e envia com cmd. \no valor varia de 1 a 8 se não, ele usará o valor padrão que é 2"""\n }'], {}), '({\'glitch\':\n """.glitch <1-8> \nUso: Responda a um sticker/imagem e envia com cmd. \no valor varia de 1 a 8 se não, ele usará o valor padrão que é 2"""\n })\n', (17789, 17970), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((17985, 18127), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'memify\':\n """.mmf textodecima ; textodebaixo \nUso: Responda a um sticker/imagem/gif e envia com cmd."""\n }'], {}), '({\'memify\':\n """.mmf textodecima ; textodebaixo \nUso: Responda a um sticker/imagem/gif e envia com cmd."""\n })\n', (18000, 18127), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((18139, 18228), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'quotly\': """.q \nUso: Transforma um texto em sticker."""}'], {}), '({\'quotly\':\n """.q \nUso: Transforma um texto em sticker."""})\n', (18154, 18228), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((18245, 18422), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'hazmat\':\n """.hz or .hz [flip, x2, rotate (graus), background (numero), black]\nUso: Responda a uma imagem/sticker para se vestir!\n@hazmat_suit_bot"""\n }'], {}), '({\'hazmat\':\n """.hz or .hz [flip, x2, rotate (graus), background (numero), black]\nUso: Responda a uma imagem/sticker para se vestir!\n@hazmat_suit_bot"""\n })\n', (18260, 18422), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((18455, 18582), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'deepfry\':\n """.df ou .df [level(1-8)]\nUso: Frita a imagem/sticker da resposta.\n@image_deepfrybot"""\n }'], {}), '({\'deepfry\':\n """.df ou .df [level(1-8)]\nUso: Frita a imagem/sticker da resposta.\n@image_deepfrybot"""\n })\n', (18470, 18582), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((18616, 18712), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'sangmata\': """.sg \nUso: Descobre nomes passados do usuário."""}'], {}), '({\'sangmata\':\n """.sg \nUso: Descobre nomes passados do usuário."""})\n', (18631, 18712), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((18730, 18873), 'userbot.CMD_HELP.update', 'CMD_HELP.update', (['{\'waifu\':\n """.waifu \nUso: Melhore seu texto com belos modelos de anime girl. \n@StickerizerBot"""\n }'], {}), '({\'waifu\':\n """.waifu \nUso: Melhore seu texto com belos modelos de anime girl. \n@StickerizerBot"""\n })\n', (18745, 18873), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((2451, 2497), 'os.path.join', 'os.path.join', (['to_download_directory', 'file_name'], {}), '(to_download_directory, file_name)\n', (2463, 2497), False, 'import os\n'), ((2664, 2679), 'glitch_this.ImageGlitcher', 'ImageGlitcher', ([], {}), '()\n', (2677, 2679), False, 'from glitch_this import ImageGlitcher\n'), ((2690, 2713), 'PIL.Image.open', 'Image.open', (['glitch_file'], {}), '(glitch_file)\n', (2700, 2713), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((3068, 3079), 'time.time', 'time.time', ([], {}), '()\n', (3077, 3079), False, 'import time\n'), ((3411, 3430), 'os.remove', 'os.remove', (['Glitched'], {}), '(Glitched)\n', (3420, 3430), False, 'import os\n'), ((3762, 3784), 'os.remove', 'os.remove', (['glitch_file'], {}), '(glitch_file)\n', (3771, 3784), False, 'import os\n'), ((5353, 5375), 'PIL.Image.open', 'Image.open', (['image_path'], {}), '(image_path)\n', (5363, 5375), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((5380, 5401), 'os.remove', 'os.remove', (['image_path'], {}), '(image_path)\n', (5389, 5401), False, 'import os\n'), ((5684, 5703), 'PIL.ImageDraw.Draw', 'ImageDraw.Draw', (['img'], {}), '(img)\n', (5698, 5703), False, 'from PIL import Image, ImageDraw, ImageFont\n'), ((8655, 8704), 'os.path.join', 'os.path.join', (['TEMP_DOWNLOAD_DIRECTORY', 'image_name'], {}), '(TEMP_DOWNLOAD_DIRECTORY, image_name)\n', (8667, 8704), False, 'import os\n'), ((13250, 13281), 'os.remove', 'os.remove', (['downloaded_file_name'], {}), '(downloaded_file_name)\n', (13259, 13281), False, 'import os\n'), ((15527, 15558), 'os.remove', 'os.remove', (['downloaded_file_name'], {}), '(downloaded_file_name)\n', (15536, 15558), False, 'import os\n'), ((17733, 17771), 're.sub', 're.sub', (['EMOJI_PATTERN', '""""""', 'inputString'], {}), "(EMOJI_PATTERN, '', inputString)\n", (17739, 17771), False, 'import re\n'), ((1765, 1803), 'userbot.bot.download_file', 'bot.download_file', (['reply_message.media'], {}), '(reply_message.media)\n', (1782, 1803), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((2326, 2342), 'asyncio.sleep', 'asyncio.sleep', (['(2)'], {}), '(2)\n', (2339, 2342), False, 'import asyncio\n'), ((2531, 2586), 'userbot.bot.download_media', 'bot.download_media', (['reply_message', 'downloaded_file_name'], {}), '(reply_message, downloaded_file_name)\n', (2549, 2586), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((4218, 4256), 'userbot.bot.download_file', 'bot.download_file', (['reply_message.media'], {}), '(reply_message.media)\n', (4235, 4256), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((5249, 5269), 'os.remove', 'os.remove', (['webp_file'], {}), '(webp_file)\n', (5258, 5269), False, 'import os\n'), ((5278, 5296), 'os.remove', 'os.remove', (['dls_loc'], {}), '(dls_loc)\n', (5287, 5296), False, 'import os\n'), ((5772, 5807), 'textwrap.wrap', 'textwrap.wrap', (['upper_text'], {'width': '(15)'}), '(upper_text, width=15)\n', (5785, 5807), False, 'import textwrap\n'), ((7026, 7061), 'textwrap.wrap', 'textwrap.wrap', (['lower_text'], {'width': '(15)'}), '(lower_text, width=15)\n', (7039, 7061), False, 'import textwrap\n'), ((16150, 16172), 'userbot.bot.conversation', 'bot.conversation', (['chat'], {}), '(chat)\n', (16166, 16172), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((1891, 1917), 'userbot.utils.check_media', 'check_media', (['reply_message'], {}), '(reply_message)\n', (1902, 1917), False, 'from userbot.utils import check_media, progress\n'), ((4301, 4327), 'userbot.utils.check_media', 'check_media', (['reply_message'], {}), '(reply_message)\n', (4312, 4327), False, 'from userbot.utils import check_media, progress\n'), ((4580, 4596), 'asyncio.sleep', 'asyncio.sleep', (['(5)'], {}), '(5)\n', (4593, 4596), False, 'import asyncio\n'), ((4805, 4851), 'os.path.join', 'os.path.join', (['to_download_directory', 'file_name'], {}), '(to_download_directory, file_name)\n', (4817, 4851), False, 'import os\n'), ((9358, 9380), 'userbot.bot.conversation', 'bot.conversation', (['chat'], {}), '(chat)\n', (9374, 9380), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((4893, 4948), 'userbot.bot.download_media', 'bot.download_media', (['reply_message', 'downloaded_file_name'], {}), '(reply_message, downloaded_file_name)\n', (4911, 4948), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((12079, 12118), 'userbot.bot.send_read_acknowledge', 'bot.send_read_acknowledge', (['conv.chat_id'], {}), '(conv.chat_id)\n', (12104, 12118), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((14457, 14496), 'userbot.bot.send_read_acknowledge', 'bot.send_read_acknowledge', (['conv.chat_id'], {}), '(conv.chat_id)\n', (14482, 14496), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((3504, 3657), 'telethon.types.InputDocument', 'types.InputDocument', ([], {'id': 'nosave.media.document.id', 'access_hash': 'nosave.media.document.access_hash', 'file_reference': 'nosave.media.document.file_reference'}), '(id=nosave.media.document.id, access_hash=nosave.media.\n document.access_hash, file_reference=nosave.media.document.file_reference)\n', (3523, 3657), False, 'from telethon import events, functions, types\n'), ((9471, 9526), 'telethon.events.NewMessage', 'events.NewMessage', ([], {'incoming': '(True)', 'from_users': '(1031952739)'}), '(incoming=True, from_users=1031952739)\n', (9488, 9526), False, 'from telethon import events, functions, types\n'), ((9573, 9614), 'userbot.bot.forward_messages', 'bot.forward_messages', (['chat', 'reply_message'], {}), '(chat, reply_message)\n', (9593, 9614), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((9724, 9763), 'userbot.bot.send_read_acknowledge', 'bot.send_read_acknowledge', (['conv.chat_id'], {}), '(conv.chat_id)\n', (9749, 9763), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((10223, 10276), 'userbot.bot.forward_messages', 'bot.forward_messages', (['qotli.chat_id', 'response.message'], {}), '(qotli.chat_id, response.message)\n', (10243, 10276), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((10299, 10339), 'userbot.bot.send_read_acknowledge', 'bot.send_read_acknowledge', (['qotli.chat_id'], {}), '(qotli.chat_id)\n', (10324, 10339), False, 'from userbot import CMD_HELP, TEMP_DOWNLOAD_DIRECTORY, bot\n'), ((17435, 17456), 'random.choice', 'random.choice', (['animus'], {}), '(animus)\n', (17448, 17456), False, 'import random\n'), ((3323, 3364), 'userbot.utils.progress', 'progress', (['d', 't', 'event', 'c_time', '"""[UPLOAD]"""'], {}), "(d, t, event, c_time, '[UPLOAD]')\n", (3331, 3364), False, 'from userbot.utils import check_media, progress\n'), ((3273, 3297), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (3295, 3297), False, 'import asyncio\n')] |
# -*- coding: utf-8 -*-
"""Jinja2 template engine."""
import os
import re
from jinja2 import Environment
from jinja2.exceptions import UndefinedError, TemplateSyntaxError
from piecutter.engines import Engine
from piecutter.exceptions import TemplateError
def path_join(*args, **kwargs):
"""Return ``args`` joined as file paths like with os.path.join().
>>> from piecutter.engines.jinja import path_join
>>> path_join('foo', 'bar')
'foo/bar'
Paths are normalized.
>>> path_join('foo', '..', 'bar')
'bar'
You can pass an extra keyword argument 'target_os': a value in os.name
capabilities.
>>> path_join('foo', 'bar', target_os='posix')
'foo/bar'
Currently, this is using os.path, i.e. the separator and rules for the
computer running Jinja2 engine. A NotImplementedError exception will be
raised if 'os' argument differs from 'os.name'.
>>> import os
>>> os.name == 'posix' # Sorry if you are running tests on another OS.
True
>>> path_join('foo', 'bar', target_os='nt') # Doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Cannot join path with "nt" style. Host OS is "posix".
"""
target_os = kwargs.get('target_os', None)
if target_os and target_os is not os.name:
raise NotImplementedError('Cannot join path with "{target}" style. '
'Host OS is "{host}".'.format(
target=target_os,
host=os.name))
result = os.path.join(*args)
result = path_normalize(result, target_os)
return result
def path_normalize(path, target_os=None):
"""Normalize path (like os.path.normpath) for given os.
>>> from piecutter.engines.jinja import path_normalize
>>> path_normalize('foo/bar')
'foo/bar'
>>> path_normalize('foo/toto/../bar')
'foo/bar'
Currently, this is using os.path, i.e. the separator and rules for the
computer running Jinja2 engine. A NotImplementedError exception will be
raised if 'os' argument differs from 'os.name'.
>>> import os
>>> os.name == 'posix' # Sorry if you are running tests on another OS.
True
>>> path_normalize('foo/bar', target_os='nt') # Doctest: +ELLIPSIS
Traceback (most recent call last):
...
NotImplementedError: Cannot join path with "nt" style. Host OS is "posix".
"""
if target_os and target_os is not os.name:
raise NotImplementedError('Cannot join path with "{target}" style. '
'Host OS is "{host}".'.format(
target=target_os,
host=os.name))
return os.path.normpath(path)
class Jinja2Engine(Engine):
"""Jinja2 template engine."""
def __init__(self, environment=None):
if environment is None:
environment = Environment()
self.environment = environment
self.register_environment_functions()
def register_environment_functions(self):
"""Populate self.environment.globals with some global functions."""
self.environment.globals['path_join'] = path_join
self.environment.globals['path_normalize'] = path_normalize
def render(self, template, context):
"""Return the rendered template against context."""
try:
template = self.environment.from_string(template)
except TemplateSyntaxError as e:
raise TemplateError(e)
try:
return template.render(**context)
except (UndefinedError, TypeError) as e:
raise TemplateError(e)
def match(self, template, context):
"""Return a ratio showing whether template looks like using engine.
>>> engine = Jinja2Engine()
>>> engine.match('', {})
0.0
>>> engine.match('{# Jinja2 #}', {})
1.0
>>> engine.match('Not shebang {# Jinja2 #}', {})
0.0
>>> engine.match('{{ key }}', {})
0.9
"""
# Try to locate a root variable in template.
if template.startswith('{# Jinja2 #}'):
return 1.0
if re.search(r'{{ .+ }}', template):
return 0.9
return 0.0
| [
"piecutter.exceptions.TemplateError",
"jinja2.Environment",
"os.path.normpath",
"re.search",
"os.path.join"
] | [((1570, 1589), 'os.path.join', 'os.path.join', (['*args'], {}), '(*args)\n', (1582, 1589), False, 'import os\n'), ((2749, 2771), 'os.path.normpath', 'os.path.normpath', (['path'], {}), '(path)\n', (2765, 2771), False, 'import os\n'), ((4207, 4238), 're.search', 're.search', (['"""{{ .+ }}"""', 'template'], {}), "('{{ .+ }}', template)\n", (4216, 4238), False, 'import re\n'), ((2936, 2949), 'jinja2.Environment', 'Environment', ([], {}), '()\n', (2947, 2949), False, 'from jinja2 import Environment\n'), ((3520, 3536), 'piecutter.exceptions.TemplateError', 'TemplateError', (['e'], {}), '(e)\n', (3533, 3536), False, 'from piecutter.exceptions import TemplateError\n'), ((3663, 3679), 'piecutter.exceptions.TemplateError', 'TemplateError', (['e'], {}), '(e)\n', (3676, 3679), False, 'from piecutter.exceptions import TemplateError\n')] |
# Copyright 2020 Cognite AS
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Module containing tools for pushers for metric reporting.
The classes in this module scrape the default Prometheus registry and uploads it periodically to either a Prometheus
push gateway, or to CDF as time series.
The ``BaseMetrics`` class forms the basis for a metrics collection for an extractor, containing some general metrics
that all extractors should report. To create your own set of metrics, subclass this class and populate it with
extractor-specific metrics, as such:
.. code-block:: python
class MyMetrics(BaseMetrics):
def __init__(self):
super().__init__(extractor_name="my_extractor", extractor_version=__version__)
self.a_counter = Counter("my_extractor_example_counter", "An example counter")
...
"""
import logging
import os
import threading
from abc import ABC, abstractmethod
from time import sleep
from typing import Any, Callable, Dict, List, Optional, T, Tuple, Type, Union
import arrow
import psutil
from prometheus_client import Gauge, Info, Metric
from prometheus_client.core import REGISTRY
from prometheus_client.exposition import basic_auth_handler, delete_from_gateway, pushadd_to_gateway
from cognite.client import CogniteClient
from cognite.client.data_classes import Asset, TimeSeries
from cognite.client.exceptions import CogniteDuplicatedError
from .util import ensure_time_series
_metrics_singularities = {}
def safe_get(cls: Type[T]) -> T:
"""
A factory for instances of metrics collections.
Since Prometheus doesn't allow multiple metrics with the same name, any subclass of BaseMetrics must never be
created more than once. This function creates an instance of the given class on the first call and stores it, any
subsequent calls with the same class as argument will return the same instance.
.. code-block:: python
>>> a = safe_get(MyMetrics) # This will create a new instance of MyMetrics
>>> b = safe_get(MyMetrics) # This will return the same instance
>>> a is b
True
Args:
cls: Metrics class to either create or get a cached version of
Returns:
An instance of given class
"""
global _metrics_singularities
if cls not in _metrics_singularities:
_metrics_singularities[cls] = cls()
return _metrics_singularities[cls]
class BaseMetrics:
"""
Base collection of extractor metrics. The class also spawns a collector thread on init that regularly fetches
process information and update the ``process_*`` gauges.
To create a set of metrics for an extractor, create a subclass of this class.
**Note that only one instance of this class (or any subclass) can exist simultaneously**
The collection includes the following metrics:
* startup: Startup time (unix epoch)
* finish: Finish time (unix epoch)
* process_num_threads Number of active threads. Set automatically.
* process_memory_bytes Memory usage of extractor. Set automatically.
* process_cpu_percent CPU usage of extractor. Set automatically.
Args:
extractor_name: Name of extractor, used to prefix metric names
process_scrape_interval: Interval (in seconds) between each fetch of data for the ``process_*`` gauges
"""
def __init__(self, extractor_name: str, extractor_version: str, process_scrape_interval: float = 15):
extractor_name = extractor_name.strip().replace(" ", "_")
self.startup = Gauge(f"{extractor_name}_start_time", "Timestamp (seconds) of when the extractor last started")
self.finish = Gauge(
f"{extractor_name}_finish_time", "Timestamp (seconds) of then the extractor last finished cleanly"
)
self._process = psutil.Process(os.getpid())
self.process_num_threads = Gauge(f"{extractor_name}_num_threads", "Number of threads")
self.process_memory_bytes = Gauge(f"{extractor_name}_memory_bytes", "Memory usage in bytes")
self.process_cpu_percent = Gauge(f"{extractor_name}_cpu_percent", "CPU usage percent")
self.info = Info(f"{extractor_name}_info", "Information about running extractor")
self.info.info({"extractor_version": extractor_version, "extractor_type": extractor_name})
self.process_scrape_interval = process_scrape_interval
self._start_proc_collector()
self.startup.set_to_current_time()
def _proc_collect(self) -> None:
"""
Collect values for process metrics
"""
while True:
self.process_num_threads.set(self._process.num_threads())
self.process_memory_bytes.set(self._process.memory_info().rss)
self.process_cpu_percent.set(self._process.cpu_percent())
sleep(self.process_scrape_interval)
def _start_proc_collector(self) -> None:
"""
Start a thread that collects process metrics at a regular interval
"""
thread = threading.Thread(target=self._proc_collect, name="ProcessMetricsCollector", daemon=True)
thread.start()
class AbstractMetricsPusher(ABC):
"""
Base class for metric pushers. Metric pushers spawns a thread that routinely pushes metrics to a configured
destination.
Contains all the logic for starting and running threads.
Args:
push_interval: Seconds between each upload call
thread_name: Name of thread to start. If omitted, a standard name such as Thread-4 will be generated.
"""
def __init__(self, push_interval: Optional[int] = None, thread_name: Optional[str] = None):
self.push_interval = push_interval
self.thread_name = thread_name
self.thread: Optional[threading.Thread] = None
self.thread_name = thread_name
self.stopping = threading.Event()
self.logger = logging.getLogger(__name__)
@abstractmethod
def _push_to_server(self) -> None:
"""
Push metrics to a remote server, to be overrided in subclasses.
"""
pass
def _run(self) -> None:
"""
Run push loop.
"""
while not self.stopping.is_set():
self._push_to_server()
self.stopping.wait(self.push_interval)
def start(self) -> None:
"""
Starts a thread that pushes the default registry to the configured gateway at certain intervals.
"""
self.stopping.clear()
self.thread = threading.Thread(target=self._run, daemon=True, name=self.thread_name)
self.thread.start()
def stop(self) -> None:
"""
Stop the push loop.
"""
# Make sure everything is pushed
self._push_to_server()
self.stopping.set()
def __enter__(self) -> "AbstractMetricsPusher":
"""
Wraps around start method, for use as context manager
Returns:
self
"""
self.start()
return self
def __exit__(self, exc_type, exc_val, exc_tb) -> None:
"""
Wraps around stop method, for use as context manager
Args:
exc_type: Exception type
exc_val: Exception value
exc_tb: Traceback
"""
self.stop()
class PrometheusPusher(AbstractMetricsPusher):
"""
Pusher to a Prometheus push gateway.
Args:
job_name: Prometheus job name
username: Push gateway credentials
password: <PASSWORD>
url: URL (with portnum) of push gateway
push_interval: Seconds between each upload call
thread_name: Name of thread to start. If omitted, a standard name such as Thread-4 will be generated.
"""
def __init__(
self,
job_name: str,
url: str,
push_interval: int,
username: Optional[str] = None,
password: Optional[str] = None,
thread_name: Optional[str] = None,
):
super(PrometheusPusher, self).__init__(push_interval, thread_name)
self.username = username
self.job_name = job_name
self.password = password
self.url = url
def _auth_handler(self, url: str, method: str, timeout: int, headers: Dict[str, str], data: Any) -> Callable:
"""
Returns a authentication handler against the Prometheus Pushgateway to use in the pushadd_to_gateway method.
Args:
url: Push gateway
method: HTTP method
timeout: Request timeout (seconds)
headers: HTTP headers
data: Data to send
Returns:
prometheus_client.exposition.basic_auth_handler: A authentication handler based on this client.
"""
return basic_auth_handler(url, method, timeout, headers, data, self.username, self.password)
def _push_to_server(self) -> None:
"""
Push the default metrics registry to the configured Prometheus Pushgateway.
"""
if not self.url or not self.job_name:
return
try:
pushadd_to_gateway(self.url, job=self.job_name, registry=REGISTRY, handler=self._auth_handler)
except OSError as exp:
self.logger.warning("Failed to push metrics to %s: %s", self.url, str(exp))
except:
self.logger.exception("Failed to push metrics to %s", self.url)
self.logger.debug("Pushed metrics to %s", self.url)
def clear_gateway(self) -> None:
"""
Delete metrics stored at the gateway (reset gateway).
"""
delete_from_gateway(self.url, job=self.job_name, handler=self._auth_handler)
self.logger.debug("Deleted metrics from push gateway %s", self.url)
class CognitePusher(AbstractMetricsPusher):
"""
Pusher to CDF. Creates time series in CDF for all Gauges and Counters in the default Prometheus registry.
Optional contextualization with an Asset to make the time series observable in Asset Data Insight. The given asset
will be created at root level in the tenant if it doesn't already exist.
Args:
cdf_client: The CDF tenant to upload time series to
external_id_prefix: Unique external ID prefix for this pusher.
push_interval: Seconds between each upload call
asset: Optional contextualization.
thread_name: Name of thread to start. If omitted, a standard name such as Thread-4 will be generated.
"""
def __init__(
self,
cdf_client: CogniteClient,
external_id_prefix: str,
push_interval: int,
asset: Optional[Asset] = None,
thread_name: Optional[str] = None,
):
super(CognitePusher, self).__init__(push_interval, thread_name)
self.cdf_client = cdf_client
self.asset = asset
self.external_id_prefix = external_id_prefix
self._init_cdf()
self._cdf_project = cdf_client.login.status().project
def _init_cdf(self) -> None:
"""
Initialize the CDF tenant with the necessary time series and asset.
"""
time_series: List[TimeSeries] = []
if self.asset is not None:
# Ensure that asset exist, and retrieve internal ID
try:
asset = self.cdf_client.assets.create(self.asset)
except CogniteDuplicatedError:
asset = self.cdf_client.assets.retrieve(external_id=self.asset.external_id)
asset_id = asset.id if asset is not None else None
else:
asset_id = None
for metric in REGISTRY.collect():
if type(metric) == Metric and metric.type in ["gauge", "counter"]:
external_id = self.external_id_prefix + metric.name
time_series.append(
TimeSeries(
external_id=external_id,
name=metric.name,
legacy_name=external_id,
description=metric.documentation,
asset_id=asset_id,
)
)
ensure_time_series(self.cdf_client, time_series)
def _push_to_server(self) -> None:
"""
Create datapoints an push them to their respective time series
"""
timestamp = int(arrow.get().float_timestamp * 1000)
datapoints: List[Dict[str, Union[str, List[Tuple[float, float]]]]] = []
for metric in REGISTRY.collect():
if type(metric) == Metric and metric.type in ["gauge", "counter"]:
if len(metric.samples) == 0:
continue
external_id = self.external_id_prefix + metric.name
datapoints.append({"externalId": external_id, "datapoints": [(timestamp, metric.samples[0].value)]})
self.cdf_client.datapoints.insert_multiple(datapoints)
self.logger.debug("Pushed metrics to CDF tenant '%s'", self._cdf_project)
| [
"threading.Thread",
"prometheus_client.exposition.delete_from_gateway",
"arrow.get",
"os.getpid",
"prometheus_client.Info",
"prometheus_client.exposition.pushadd_to_gateway",
"time.sleep",
"prometheus_client.core.REGISTRY.collect",
"threading.Event",
"prometheus_client.exposition.basic_auth_handle... | [((4121, 4220), 'prometheus_client.Gauge', 'Gauge', (['f"""{extractor_name}_start_time"""', '"""Timestamp (seconds) of when the extractor last started"""'], {}), "(f'{extractor_name}_start_time',\n 'Timestamp (seconds) of when the extractor last started')\n", (4126, 4220), False, 'from prometheus_client import Gauge, Info, Metric\n'), ((4239, 4348), 'prometheus_client.Gauge', 'Gauge', (['f"""{extractor_name}_finish_time"""', '"""Timestamp (seconds) of then the extractor last finished cleanly"""'], {}), "(f'{extractor_name}_finish_time',\n 'Timestamp (seconds) of then the extractor last finished cleanly')\n", (4244, 4348), False, 'from prometheus_client import Gauge, Info, Metric\n'), ((4456, 4515), 'prometheus_client.Gauge', 'Gauge', (['f"""{extractor_name}_num_threads"""', '"""Number of threads"""'], {}), "(f'{extractor_name}_num_threads', 'Number of threads')\n", (4461, 4515), False, 'from prometheus_client import Gauge, Info, Metric\n'), ((4552, 4616), 'prometheus_client.Gauge', 'Gauge', (['f"""{extractor_name}_memory_bytes"""', '"""Memory usage in bytes"""'], {}), "(f'{extractor_name}_memory_bytes', 'Memory usage in bytes')\n", (4557, 4616), False, 'from prometheus_client import Gauge, Info, Metric\n'), ((4652, 4711), 'prometheus_client.Gauge', 'Gauge', (['f"""{extractor_name}_cpu_percent"""', '"""CPU usage percent"""'], {}), "(f'{extractor_name}_cpu_percent', 'CPU usage percent')\n", (4657, 4711), False, 'from prometheus_client import Gauge, Info, Metric\n'), ((4733, 4802), 'prometheus_client.Info', 'Info', (['f"""{extractor_name}_info"""', '"""Information about running extractor"""'], {}), "(f'{extractor_name}_info', 'Information about running extractor')\n", (4737, 4802), False, 'from prometheus_client import Gauge, Info, Metric\n'), ((5598, 5690), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._proc_collect', 'name': '"""ProcessMetricsCollector"""', 'daemon': '(True)'}), "(target=self._proc_collect, name='ProcessMetricsCollector',\n daemon=True)\n", (5614, 5690), False, 'import threading\n'), ((6428, 6445), 'threading.Event', 'threading.Event', ([], {}), '()\n', (6443, 6445), False, 'import threading\n'), ((6469, 6496), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (6486, 6496), False, 'import logging\n'), ((7081, 7151), 'threading.Thread', 'threading.Thread', ([], {'target': 'self._run', 'daemon': '(True)', 'name': 'self.thread_name'}), '(target=self._run, daemon=True, name=self.thread_name)\n', (7097, 7151), False, 'import threading\n'), ((9331, 9421), 'prometheus_client.exposition.basic_auth_handler', 'basic_auth_handler', (['url', 'method', 'timeout', 'headers', 'data', 'self.username', 'self.password'], {}), '(url, method, timeout, headers, data, self.username, self\n .password)\n', (9349, 9421), False, 'from prometheus_client.exposition import basic_auth_handler, delete_from_gateway, pushadd_to_gateway\n'), ((10156, 10232), 'prometheus_client.exposition.delete_from_gateway', 'delete_from_gateway', (['self.url'], {'job': 'self.job_name', 'handler': 'self._auth_handler'}), '(self.url, job=self.job_name, handler=self._auth_handler)\n', (10175, 10232), False, 'from prometheus_client.exposition import basic_auth_handler, delete_from_gateway, pushadd_to_gateway\n'), ((12151, 12169), 'prometheus_client.core.REGISTRY.collect', 'REGISTRY.collect', ([], {}), '()\n', (12167, 12169), False, 'from prometheus_client.core import REGISTRY\n'), ((13025, 13043), 'prometheus_client.core.REGISTRY.collect', 'REGISTRY.collect', ([], {}), '()\n', (13041, 13043), False, 'from prometheus_client.core import REGISTRY\n'), ((4407, 4418), 'os.getpid', 'os.getpid', ([], {}), '()\n', (4416, 4418), False, 'import os\n'), ((5400, 5435), 'time.sleep', 'sleep', (['self.process_scrape_interval'], {}), '(self.process_scrape_interval)\n', (5405, 5435), False, 'from time import sleep\n'), ((9656, 9755), 'prometheus_client.exposition.pushadd_to_gateway', 'pushadd_to_gateway', (['self.url'], {'job': 'self.job_name', 'registry': 'REGISTRY', 'handler': 'self._auth_handler'}), '(self.url, job=self.job_name, registry=REGISTRY, handler=\n self._auth_handler)\n', (9674, 9755), False, 'from prometheus_client.exposition import basic_auth_handler, delete_from_gateway, pushadd_to_gateway\n'), ((12375, 12511), 'cognite.client.data_classes.TimeSeries', 'TimeSeries', ([], {'external_id': 'external_id', 'name': 'metric.name', 'legacy_name': 'external_id', 'description': 'metric.documentation', 'asset_id': 'asset_id'}), '(external_id=external_id, name=metric.name, legacy_name=\n external_id, description=metric.documentation, asset_id=asset_id)\n', (12385, 12511), False, 'from cognite.client.data_classes import Asset, TimeSeries\n'), ((12885, 12896), 'arrow.get', 'arrow.get', ([], {}), '()\n', (12894, 12896), False, 'import arrow\n')] |
# -*- coding: utf-8 -*-
# Copyright (c) CKM Analytix Corp. All rights reserved.
# Authors: <NAME> (<EMAIL>), <NAME> (<EMAIL>)
"""
Metrics for determining quality of community structure
"""
import numpy as np
from scipy.sparse import identity
__all__ = ['modularity_r', 'modularity_density', 'mula_modularity_density']
def cluster_total_weight(adj_r, c, cluster_num, dict_bool):
"""Determines the 2*total weight of a community.
Parameters
----------
adj_r : SciPy sparse matrix (csr or csc)
The N x N rescaled Adjacency matrix constructed from N x N adjacency
matrix of the graph and scale 'r'.
c : Integer array
Array of community labels for the nodes in the graph as ordered by the
adjacency matrix.
cluster_num : Integer
Label of the community of interest.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary
keys, and the corresponding boolean arrays (c == label) as values
Returns
-------
float
Twice the total weight of all nodes in the rescaled topology of
cluster 'cluster_num'.
"""
bool_r = dict_bool[cluster_num]
zero = np.zeros(adj_r.shape[0], dtype=int)
zero[bool_r] = 1
return (adj_r[bool_r].dot(zero)).sum()
def cluster_total_volume(adj, c, cluster_num, dict_bool):
"""Determines the volume of a community.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N rescaled Adjacency matrix constructed from
N x N adjacency matrix of the graph and scale r.
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
cluster_num : Integer
Label of the community of interest.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Total volume of all nodes in the rescaled topology of
cluster 'cluster_num'.
"""
return adj[dict_bool[cluster_num]].sum()
def modularity_r(adj, c, cluster_labels, r=0, dict_bool=None):
r"""Determines the modularity (of rescaled topology) for a subset of
communities in the network.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Array of community labels for the nodes in the graph as ordered by
the adjacency matrix.
cluster_labels : Integer array or list
Array/list of unique cluster labels for which modularity is calculated.
r : float
Resolution of the topology: smaller 'r' favors larger communities,
while larger 'r' favors smaller communities.
dict_bool : dictionary, optional
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
total modularity (of rescaled topology) for a set of communities given
by 'cluster_labels'.
Examples
--------
>>> G = nx.karate_club_graph()
>>> adj = nx.to_scipy_sparse_matrix(G)
>>> c = fine_tuned_clustering_q(G)
>>> c
array([2, 2, 2, 2, 4, 4, 4, 2, 3, 3, 4, 2, 2, 2, 3, 3, 4, 2, 3, 2, 3, 2,
3, 1, 1, 1, 3, 1, 1, 3, 3, 1, 3, 3])
>>> modularity_r(adj, c, np.unique(c), r=0)
0.4197896120973044
>>> modularity_r(adj, c, [1, 2], r=0)
0.21301775147928995
Notes
-----
Modularity in [1]_,[2]_ is given as
.. math::
Q = \sum_{c_i \in C}\left [ \frac{|E_{c_i}^{in}|}{|E|} -
\left (\frac{2|E_{c_i}^{in}| +
|E_{c_i}^{out}|}{2|E|} \right )^2 \right ],
where $C$ is the set of all communities. $c_i$ is a specific community in
$C$, $|E_{c_i}^{in}|$ is the total weight of edges between nodes within
community $c_i$, $|E_{c_i}{out}|$ is the total weight of edges from
nodes in community $c_i$ to the nodes outside $c_i$, and $|E|$ is the
total weight of edges in the network.
Modularity for rescaled topology (see [1]_) at scale $r$ is given as
.. math::
Q_r = \sum_{c_i \in C}\left [ \frac{2|E_{c_i}^{in}| +r|c_i|}{2|E| +
r|V|} - \left (\frac{2|E_{c_i}^{in}| + |E_{c_i}^{out}| +
r|c_i|}{2|E| + r|V|} \right )^2 \right ],
where $|c_i|$ is the number of nodes in a specific community. $|V|$ is the
total number of nodes in the entire network structure.
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Community detection via maximization
of modularity and its variants. IEEE Transactions on Computational
Social Systems. 1(1), 46–65, 2014
.. [2] <NAME>, <NAME>. Finding and evaluating community structure in
community structure in networks. Phys. Rev. E. 69, 026113, 2004
"""
Identity = identity(n=(adj).shape[0])
# Rescaled adjancency matrix
adj = adj + (Identity*r)
if (dict_bool is None):
# Track the nodes in each community
dict_bool = {}
for label in np.unique(cluster_labels):
dict_bool[label] = (c == label)
one = np.ones(adj.shape[0], dtype=int)
# Twice the total weight of all nodes in the rescaled topology
total_weight = (adj.dot(one)).sum()
# Function to determine modularity of each community in the network
modularize = np.vectorize(lambda cluster_num:
(cluster_total_weight(adj, c,
cluster_num, dict_bool)/total_weight) -
((cluster_total_volume(adj, c, cluster_num,
dict_bool)/total_weight)**2))
# Total modularity (of rescaled topology) for a set of communities
# given by 'cluster_labels'
return np.sum(modularize(cluster_labels))
def split_penalty(adj, c, ci, conn_clusters, total_weight, dict_bool):
"""Determines total Split Penalty density for splitting edges between a
community and a set of communities.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Current array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
ci : Integer
Label of the community of interest.
conn_clusters : Integer array
Array of unique labels of communities that may be connected
to the community 'ci'.
total_weight : float
Twice the total weight of all nodes in the adjacency matrix.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Total Split Penalty density for splitting edges between 'ci' and
a set of other communities in 'conn_clusters'.
"""
bool_ci = dict_bool[ci]
adj_ci = adj[bool_ci]
# Make sure the array of unique labels do not contain 'ci'
search_bool = (conn_clusters != ci)
# Determine total split penalty density of splitting edges between
# 'ci' and 'conn_clusters'
if(np.sum(search_bool) > 0):
penalty = sum_penalty(adj_ci, c, conn_clusters[search_bool],
dict_bool)/(np.count_nonzero(bool_ci)
* total_weight)
else:
penalty = 0
# Total Split Penalty density for splitting edges between 'ci' and
# a set of other communities in 'conn_clusters'
return penalty
def individual_penalty(adj_ci, c, cj, dict_bool):
"""Determines partial component of split penalty density for splitting edges
between two communities.
Parameters
----------
adj_ci : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
cj : Integer
Label of a community connected to the community 'ci'.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Partial component of split penalty density for splitting edges
between 'ci' and 'cj'.
"""
bool_cj = dict_bool[cj]
zero = np.zeros(len(c), dtype=int)
zero[bool_cj] = 1
# Determine partial component of split penalty density for splitting edges
# between 'ci' and 'cj'
return ((((adj_ci.dot(zero)).sum())**2)/np.count_nonzero(bool_cj))
def sum_penalty(adj_ci, c, conn_clusters, dict_bool):
"""Determines partial component of total Split Penalty density for splitting
edges between a community and a set of communities.
Parameters
----------
adj_ci : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
conn_clusters : Integer array
Array of unique labels of communities that may be connected
to community 'ci'.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Partial component of total Split Penalty density for splitting edges
between 'ci' and a set of other communities in 'conn_clusters'.
"""
# Function to determine partial component of total Split Penalty density
# for splitting edges between 'ci' and 'cj'
penalize = np.vectorize(lambda cj: individual_penalty(adj_ci, c,
cj, dict_bool))
# Partial component of total Split Penalty density for splitting edges
# between 'ci'and a set of other communities in 'conn_clusters'
return np.sum(penalize(conn_clusters))
def density_based_modularity(adj, c, ci, total_weight, dict_bool):
"""Determines partial component of modularity density of a community.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
ci : Integer
Label of community of interest.
total_weight : float
Twice the total weight of all nodes in the adjacency matrix.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Partial component of modularity density of a community 'ci'.
"""
# Determine Internal community density of 'ci'
comm_density = community_density(adj, c, ci, dict_bool)
first_term = (cluster_total_weight(adj, c,
ci, dict_bool) * comm_density)/total_weight
second_term = ((cluster_total_volume(adj, c,
ci, dict_bool) * comm_density)/total_weight)**2
# Partial component of modularity density of 'ci'
return (first_term - second_term)
def community_density(adj, c, ci, dict_bool):
"""Determines internal community density of a community.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The subset of N X N adjacency matrix: adj[c == ci].
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
ci : Integer
Label of community of interest.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Determines internal community density of community 'ci'.
"""
bool_ci = dict_bool[ci]
zero = np.zeros(adj.shape[0], dtype=int)
zero[bool_ci] = 1
# Twice the weight of all edges in the cluster 'ci'
community_sum = (adj[bool_ci].dot(zero)).sum()
# Number of nodes in commmunity 'ci'
size = np.count_nonzero(bool_ci)
# Internal community density of 'ci'
if(size <= 1):
density = 0
else:
density = (community_sum)/(size*(size - 1))
# Internal community density of 'ci'
return density
def compute_modularity_density(adj, c, conn_clusters, cluster_labels,
total_weight, dict_bool):
"""Determines modularity density of a set of communities.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
conn_clusters : Integer array
Array of unique labels of communities that may be connected to
communities in 'cluster_labels'.
cluster_labels : Integer array or list
Array/list of unique labels of communities of interest.
total_weight : float
Twice the total weight of all nodes in the adjacency matrix.
dict_bool : dictionary
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Determines modularity density of a set of communities in
'cluster_labels' with a set of connected communities
in 'conn_clusters'.
"""
# Function to determine modularity density of 'ci' with connected
# communities in 'conn_clusters'
mod_density = np.vectorize(lambda ci: density_based_modularity(adj, c, ci,
total_weight, dict_bool) - split_penalty(adj, c,
ci, conn_clusters, total_weight, dict_bool))
# Modularity density of a set of communities in 'cluster_labels' with a
# set of connected communities in 'conn_clusters'
return np.sum(mod_density(cluster_labels))
def modularity_density(adj, c, cluster_labels,
dict_bool=None, conn_clusters=None):
r"""Determines modularity_density of a set of communities.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Current array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
cluster_labels : Integer array or list
Array/list of unique labels of communities of interest.
dict_bool : dictionary, optional
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
conn_clusters : Integer array, optional
Array of unique labels of communities that may be connected to
communities in 'cluster_labels'. It is helpful to send this input when
computing modularity density for each community in order to reduce the
computational time
Returns
-------
float
Determines modularity_density of a set of communities
in 'cluster_labels'.
Examples
--------
>>> G = nx.karate_club_graph()
>>> adj = nx.to_scipy_sparse_matrix(G)
>>> c = fine_tuned_clustering_qds(G)
>>> c
array([4, 4, 4, 4, 2, 2, 2, 4, 3, 3, 2, 4, 4, 4, 3, 3, 2, 4, 3, 4, 3, 4,
3, 3, 1, 1, 3, 3, 3, 3, 3, 1, 3, 3])
>>> modularity_density(adj, c, np.unique(c))
0.23126500169457212
>>> modularity_density(adj, c, [1, 2])
0.06929093698324468
>>> modularity_density(adj, c, [1])
0.028788874942721095
>>> modularity_density(adj, c, [1], conn_clusters = np.array([3, 4]))
0.028788874942721095
Notes
-----
Modularity density in [1] is given as
.. math::
Q = \sum_{c_i \in C}\left [ \frac{|E_{c_i}^{in}|}{|E|}d_{c_i} -
\left (\frac{2|E_{c_i}^{in}| +
|E_{c_i}{out}|}{2|E|}d_{c_i} \right )^2 -
\sum_{c_j \in C, c_j \neq c_i}
\frac{|E_{c_i, c_j}|}{2|E|}d_{c_i,c_j} \right ],
d_{c_i} = \frac{2|E_{c_i}^{in}|}{|c_i|\left ( |c_i| - 1 \right )},
d_{c_i,c_j} = \frac{|E_{c_i, c_j}|}{|c_i||c_j|}.
where $C$ is the set of all communities. $c_i$ is a specific community in
$C$, $|E_{c_i}^{in}|$ is the total weight of edges between nodes within
community $c_i$, $|E_{c_i}{out}|$ is the total weight of edges from
nodes in community $c_i$ to the nodes outside $c_i$, and $|E|$ is the
total weight of edges in the network. $d_{c_i}$ is the internal community
density of community $c_i$, $d_{c_i, c_j}$ is the pair-wise density between
communities $c_i$ and $c_j$.
References
----------
.. [1] <NAME>, <NAME>, <NAME>. Community detection via maximization
of modularity and its variants. IEEE Transactions on Computational
Social Systems. 1(1), 46–65, 2014
"""
one = np.ones(adj.shape[0], dtype=int)
# Twice the total weight of all nodes in the adjacency matrix
total_weight = (adj.dot(one)).sum()
# Array of unique labels of communities in the network
unique_clusters = np.unique(c)
if (dict_bool is None):
# Track the nodes in each community
dict_bool = {}
for label in unique_clusters:
dict_bool[label] = (c == label)
if (conn_clusters is None):
# Array of labels of communities that may be connected to communities
# in 'cluster_labels'
conn_clusters = unique_clusters
# Compute modularity density of a set of communities in 'cluster_labels'
return compute_modularity_density(adj, c, conn_clusters, cluster_labels,
total_weight, dict_bool)
def dotdot(adj, vec1, vec2):
"""Computes the dot product of a matrix with two vectors
Parameters
----------
adj : Numpy Matrix or SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
vec1 : first Numpy array
vec2 : second Numpy array
Returns
-------
scalar (float, int, boolean, etc.)
Resulting scalar of dot product
"""
return ((((adj).dot(vec1)).dot(vec2)))
def norm_vector(vec):
"""Normalizes vector for modularity density calculation
Parameters
----------
vec : Numpy array to be normalized
Returns
-------
Numpy array
"""
mod = (np.count_nonzero(vec))**0.5
vec = vec/mod
return vec
def mula_modularity_density(adj, c, dict_vec=None):
r"""Determines modularity_density of a set of communities using a metric
that is free from bias and faster to compute.
Parameters
----------
adj : SciPy sparse matrix (csr or csc)
The N x N Adjacency matrix of the graph of interest.
c : Integer array
Current array of community labels for the nodes in the graph as ordered
by the adjacency matrix.
dict_vec : dictionary, optional
Tracks the nodes in each community, with cluster labels as dictionary-
keys, and the corresponding boolean arrays (c == label) as values.
Returns
-------
float
Determines modularity_density of a set of communities
in 'cluster_labels'.
Examples
--------
>>> G = nx.karate_club_graph()
>>> adj = nx.to_scipy_sparse_matrix(G)
>>> c = fine_tuned_clustering_qds(G)
>>> c
array([4, 4, 4, 4, 2, 2, 2, 4, 3, 3, 2, 4, 4, 4, 3, 3, 2, 4, 3, 4, 3, 4,
3, 3, 1, 1, 3, 3, 3, 3, 3, 1, 3, 3])
>>> new_modularity_density(adj, c, np.unique(c))
0.23126500169457212
>>> new_modularity_density(adj, c, [1, 2])
0.06929093698324468
>>> new_modularity_density(adj, c, [1])
0.028788874942721095
>>> new_modularity_density(adj, c, [1])
0.028788874942721095
Notes
-----
Modularity density in [1] is given as
.. math::
Q = \sum_{c \in C}\Bigg\{\frac{\sum_{i,j \in c}T_{ij}}{n_c} - \sum_{c^{\prime} \in C-c}\Bigg( \frac{\sum_{{i \in c,}{j \in c^{\prime}}}T_{ij}}{\sqrt{n_c n_{c^{\prime}}}}\Bigg)\Bigg\}
where:
- each cluster ${c \in C}$ is represented by an indicator vector ${\vec{v}_c = [v{_{c_i}}] \in {\R}^{|V|} : v{_{c_i}}= 1}$ if ${i \in c}$, else $0$
- \hat{n}_c = \frac{\vec{v}_c}{|\vec{v}_c|}
References
----------
.. [1] <NAME>, <NAME>. A new measure of modularity density for
community detection. arXiv:1908.08452 2019.
"""
cluster_labels = np.unique(c)
Nsum = 0
if (dict_vec is None):
collect_dict_vec = True
dict_vec = {}
for label in cluster_labels:
if collect_dict_vec:
vector = norm_vector((c == label)*1)
dict_vec[label] = vector
else:
dict_vect = dict_vec[label]*1 # verify vec is 0|1
Nsum += dict_vec[label]
# penalty
penalty = dotdot(adj, Nsum, Nsum)
modularize = np.vectorize(lambda label: dotdot(adj, dict_vec[label],
dict_vec[label]))
# Compute reduced modularity density of a set of communities
# in 'cluster_labels'
metric = 2*np.sum(modularize(cluster_labels)) - penalty
return(metric)
| [
"numpy.count_nonzero",
"numpy.sum",
"numpy.zeros",
"numpy.ones",
"scipy.sparse.identity",
"numpy.unique"
] | [((1202, 1237), 'numpy.zeros', 'np.zeros', (['adj_r.shape[0]'], {'dtype': 'int'}), '(adj_r.shape[0], dtype=int)\n', (1210, 1237), True, 'import numpy as np\n'), ((5019, 5043), 'scipy.sparse.identity', 'identity', ([], {'n': 'adj.shape[0]'}), '(n=adj.shape[0])\n', (5027, 5043), False, 'from scipy.sparse import identity\n'), ((5308, 5340), 'numpy.ones', 'np.ones', (['adj.shape[0]'], {'dtype': 'int'}), '(adj.shape[0], dtype=int)\n', (5315, 5340), True, 'import numpy as np\n'), ((12277, 12310), 'numpy.zeros', 'np.zeros', (['adj.shape[0]'], {'dtype': 'int'}), '(adj.shape[0], dtype=int)\n', (12285, 12310), True, 'import numpy as np\n'), ((12494, 12519), 'numpy.count_nonzero', 'np.count_nonzero', (['bool_ci'], {}), '(bool_ci)\n', (12510, 12519), True, 'import numpy as np\n'), ((17395, 17427), 'numpy.ones', 'np.ones', (['adj.shape[0]'], {'dtype': 'int'}), '(adj.shape[0], dtype=int)\n', (17402, 17427), True, 'import numpy as np\n'), ((17617, 17629), 'numpy.unique', 'np.unique', (['c'], {}), '(c)\n', (17626, 17629), True, 'import numpy as np\n'), ((20949, 20961), 'numpy.unique', 'np.unique', (['c'], {}), '(c)\n', (20958, 20961), True, 'import numpy as np\n'), ((5226, 5251), 'numpy.unique', 'np.unique', (['cluster_labels'], {}), '(cluster_labels)\n', (5235, 5251), True, 'import numpy as np\n'), ((7358, 7377), 'numpy.sum', 'np.sum', (['search_bool'], {}), '(search_bool)\n', (7364, 7377), True, 'import numpy as np\n'), ((8826, 8851), 'numpy.count_nonzero', 'np.count_nonzero', (['bool_cj'], {}), '(bool_cj)\n', (8842, 8851), True, 'import numpy as np\n'), ((18878, 18899), 'numpy.count_nonzero', 'np.count_nonzero', (['vec'], {}), '(vec)\n', (18894, 18899), True, 'import numpy as np\n'), ((7495, 7520), 'numpy.count_nonzero', 'np.count_nonzero', (['bool_ci'], {}), '(bool_ci)\n', (7511, 7520), True, 'import numpy as np\n')] |
import numpy as np
from scipy.stats import gamma as RVgamma
# the gamma distribution consider a varying shape parameter and a scale parameter equal to 1
class HMM_approxSEIR_expanded:
def __init__( self, N, beta, rho, gamma, q, eta_zero, q_r, t_star ):
self.N = N
self.beta = beta
self.rho = rho
self.gamma = gamma
self.q = q
self.eta_zero = eta_zero
self.q_r = q_r
self.t_star = t_star
def eta_computation(self, T):
eta = np.zeros((4, T))
eta[:, 0] = self.eta_zero
pC = 1 - np.exp(-self.rho)
pR = 1 - np.exp(-self.gamma)
for t in range(1, T):
Kappa_eta_prev = np.array([[ np.exp(-self.beta*eta[2,t-1]), 1 - np.exp(-self.beta*eta[2,t-1]), 0, 0 ], [ 0, 1 - pC, pC, 0 ], [ 0, 0, 1 - pR, pR ], [ 0, 0, 0, 1 ]])
eta[:, t] = eta[:, t-1] @ Kappa_eta_prev
return eta
def filtering(self, y):
T = np.size(y[0, 0, :])
pC = 1 - np.exp(-self.rho)
pR = 1 - np.exp(-self.gamma)
pitt = np.zeros([4,T])
pitt[:,0]= self.eta_zero
pitt_expanded = np.zeros((4, 4, T))
pitt_expanded[0, :, 0] = pitt[:,0]
pitt_prev_expanded = np.zeros((4, 4, T))
pitt_prev_expanded[0, :, 0] = pitt[:,0]
Kappa = np.zeros([4,4,T-1])
pitt_expanded_q = np.zeros([4,4,T])
for t in range(1, T):
beta_restr = self.beta*(t< self.t_star) + self.beta*(np.exp(-self.q_r*(t-self.t_star)))*(t>= self.t_star)
Kappa_eta_prev = np.array([[ np.exp(-beta_restr*pitt[2,t-1]), 1 - np.exp(-beta_restr*pitt[2,t-1]), 0, 0 ], [ 0, 1 - pC, pC, 0 ], [ 0, 0, 1 - pR, pR ], [ 0, 0, 0, 1 ]])
Kappa[:,:,t-1] = Kappa_eta_prev
pitt_prev_expanded[:,:, t] = Kappa_eta_prev*( np.sum(pitt_expanded[:, :, t-1], 0) ).reshape(4,1)
#rho_vec = pitt_prev_expanded[:,:, t]*(1-self.q)
#rho_vec = rho_vec/np.sum(rho_vec)
pitt_expanded_q[:,:,t] = pitt_prev_expanded[:,:, t]*(1-self.q)
pitt_expanded_q[:,:,t] = pitt_expanded_q[:,:,t]/np.sum(pitt_expanded_q[:,:,t])
pitt_expanded[:,:, t] = y[:,:, t]/self.N + ( 1 - (np.sum( y[:,:, t] ))/(self.N) )*pitt_expanded_q[:,:,t]
pitt[:,t] = np.sum( pitt_expanded[:,:, t], 0 )
return pitt, Kappa, pitt_expanded, pitt_prev_expanded
def smoothing(self,pitt_expanded, pitt):
T = np.size(pitt_expanded[1,1,:])
pist = np.zeros((4, T))
pist[:,T-1] = np.sum(pitt_expanded[:,:,T-1],0)
L = np.zeros((4,4))
pist_expanded = np.zeros((4, 4, T))
pist_expanded[:,:,T-1] = pitt_expanded[:,:,T-1]
for t in range(T-1,1,-1):
pist[:,t-1] = np.sum(pist_expanded[:,:,t],1)
L[np.outer(pitt[:,t-1],np.ones(4))!=0] = np.transpose(pitt_expanded[:,:,t-1])[np.outer(pitt[:,t-1],np.ones(4))!=0] / np.outer(pitt[:,t-1],np.ones(4))[np.outer(pitt[:,t-1],np.ones(4))!=0]
pist_expanded[:,:,t-1] = np.outer(np.ones(4),pist[:,t-1]) * np.transpose(L)
pist[:,0] = np.sum(pist_expanded[:,:,1],1)
pist_expanded[0, :, 0] = pist[:,0]
return pist, pist_expanded | [
"numpy.size",
"numpy.sum",
"numpy.zeros",
"numpy.transpose",
"numpy.ones",
"numpy.exp"
] | [((547, 563), 'numpy.zeros', 'np.zeros', (['(4, T)'], {}), '((4, T))\n', (555, 563), True, 'import numpy as np\n'), ((999, 1018), 'numpy.size', 'np.size', (['y[0, 0, :]'], {}), '(y[0, 0, :])\n', (1006, 1018), True, 'import numpy as np\n'), ((1108, 1124), 'numpy.zeros', 'np.zeros', (['[4, T]'], {}), '([4, T])\n', (1116, 1124), True, 'import numpy as np\n'), ((1183, 1202), 'numpy.zeros', 'np.zeros', (['(4, 4, T)'], {}), '((4, 4, T))\n', (1191, 1202), True, 'import numpy as np\n'), ((1278, 1297), 'numpy.zeros', 'np.zeros', (['(4, 4, T)'], {}), '((4, 4, T))\n', (1286, 1297), True, 'import numpy as np\n'), ((1371, 1394), 'numpy.zeros', 'np.zeros', (['[4, 4, T - 1]'], {}), '([4, 4, T - 1])\n', (1379, 1394), True, 'import numpy as np\n'), ((1426, 1445), 'numpy.zeros', 'np.zeros', (['[4, 4, T]'], {}), '([4, 4, T])\n', (1434, 1445), True, 'import numpy as np\n'), ((2575, 2606), 'numpy.size', 'np.size', (['pitt_expanded[1, 1, :]'], {}), '(pitt_expanded[1, 1, :])\n', (2582, 2606), True, 'import numpy as np\n'), ((2629, 2645), 'numpy.zeros', 'np.zeros', (['(4, T)'], {}), '((4, T))\n', (2637, 2645), True, 'import numpy as np\n'), ((2668, 2705), 'numpy.sum', 'np.sum', (['pitt_expanded[:, :, T - 1]', '(0)'], {}), '(pitt_expanded[:, :, T - 1], 0)\n', (2674, 2705), True, 'import numpy as np\n'), ((2713, 2729), 'numpy.zeros', 'np.zeros', (['(4, 4)'], {}), '((4, 4))\n', (2721, 2729), True, 'import numpy as np\n'), ((2754, 2773), 'numpy.zeros', 'np.zeros', (['(4, 4, T)'], {}), '((4, 4, T))\n', (2762, 2773), True, 'import numpy as np\n'), ((3243, 3276), 'numpy.sum', 'np.sum', (['pist_expanded[:, :, 1]', '(1)'], {}), '(pist_expanded[:, :, 1], 1)\n', (3249, 3276), True, 'import numpy as np\n'), ((621, 638), 'numpy.exp', 'np.exp', (['(-self.rho)'], {}), '(-self.rho)\n', (627, 638), True, 'import numpy as np\n'), ((656, 675), 'numpy.exp', 'np.exp', (['(-self.gamma)'], {}), '(-self.gamma)\n', (662, 675), True, 'import numpy as np\n'), ((1037, 1054), 'numpy.exp', 'np.exp', (['(-self.rho)'], {}), '(-self.rho)\n', (1043, 1054), True, 'import numpy as np\n'), ((1072, 1091), 'numpy.exp', 'np.exp', (['(-self.gamma)'], {}), '(-self.gamma)\n', (1078, 1091), True, 'import numpy as np\n'), ((2405, 2438), 'numpy.sum', 'np.sum', (['pitt_expanded[:, :, t]', '(0)'], {}), '(pitt_expanded[:, :, t], 0)\n', (2411, 2438), True, 'import numpy as np\n'), ((2892, 2925), 'numpy.sum', 'np.sum', (['pist_expanded[:, :, t]', '(1)'], {}), '(pist_expanded[:, :, t], 1)\n', (2898, 2925), True, 'import numpy as np\n'), ((2226, 2258), 'numpy.sum', 'np.sum', (['pitt_expanded_q[:, :, t]'], {}), '(pitt_expanded_q[:, :, t])\n', (2232, 2258), True, 'import numpy as np\n'), ((3196, 3211), 'numpy.transpose', 'np.transpose', (['L'], {}), '(L)\n', (3208, 3211), True, 'import numpy as np\n'), ((2976, 3016), 'numpy.transpose', 'np.transpose', (['pitt_expanded[:, :, t - 1]'], {}), '(pitt_expanded[:, :, t - 1])\n', (2988, 3016), True, 'import numpy as np\n'), ((3170, 3180), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (3177, 3180), True, 'import numpy as np\n'), ((748, 782), 'numpy.exp', 'np.exp', (['(-self.beta * eta[2, t - 1])'], {}), '(-self.beta * eta[2, t - 1])\n', (754, 782), True, 'import numpy as np\n'), ((1541, 1578), 'numpy.exp', 'np.exp', (['(-self.q_r * (t - self.t_star))'], {}), '(-self.q_r * (t - self.t_star))\n', (1547, 1578), True, 'import numpy as np\n'), ((1636, 1672), 'numpy.exp', 'np.exp', (['(-beta_restr * pitt[2, t - 1])'], {}), '(-beta_restr * pitt[2, t - 1])\n', (1642, 1672), True, 'import numpy as np\n'), ((1893, 1930), 'numpy.sum', 'np.sum', (['pitt_expanded[:, :, t - 1]', '(0)'], {}), '(pitt_expanded[:, :, t - 1], 0)\n', (1899, 1930), True, 'import numpy as np\n'), ((2958, 2968), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (2965, 2968), True, 'import numpy as np\n'), ((3073, 3083), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (3080, 3083), True, 'import numpy as np\n'), ((783, 817), 'numpy.exp', 'np.exp', (['(-self.beta * eta[2, t - 1])'], {}), '(-self.beta * eta[2, t - 1])\n', (789, 817), True, 'import numpy as np\n'), ((1673, 1709), 'numpy.exp', 'np.exp', (['(-beta_restr * pitt[2, t - 1])'], {}), '(-beta_restr * pitt[2, t - 1])\n', (1679, 1709), True, 'import numpy as np\n'), ((2326, 2344), 'numpy.sum', 'np.sum', (['y[:, :, t]'], {}), '(y[:, :, t])\n', (2332, 2344), True, 'import numpy as np\n'), ((3034, 3044), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (3041, 3044), True, 'import numpy as np\n'), ((3106, 3116), 'numpy.ones', 'np.ones', (['(4)'], {}), '(4)\n', (3113, 3116), True, 'import numpy as np\n')] |
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("textfile", "0001_initial")]
operations = [
migrations.CreateModel(
name="TextFileTranslation",
fields=[
(
"id",
models.AutoField(
verbose_name="ID",
serialize=False,
auto_created=True,
primary_key=True,
),
),
(
"language_code",
models.CharField(max_length=15, verbose_name="Language", db_index=True),
),
("content", models.TextField(verbose_name="File contents")),
(
"master",
models.ForeignKey(
related_name="text_translations",
editable=False,
to="textfile.TextFile",
on_delete=models.CASCADE,
null=True,
),
),
],
options={
"managed": True,
"db_table": "textfile_textfile_translation",
"db_tablespace": "",
"default_permissions": (),
"verbose_name": "Plain text file Translation",
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name="textfiletranslation",
unique_together={("language_code", "master")},
),
]
| [
"django.db.models.TextField",
"django.db.models.CharField",
"django.db.models.ForeignKey",
"django.db.models.AutoField",
"django.db.migrations.AlterUniqueTogether"
] | [((1486, 1596), 'django.db.migrations.AlterUniqueTogether', 'migrations.AlterUniqueTogether', ([], {'name': '"""textfiletranslation"""', 'unique_together': "{('language_code', 'master')}"}), "(name='textfiletranslation', unique_together=\n {('language_code', 'master')})\n", (1516, 1596), False, 'from django.db import migrations, models\n'), ((310, 403), 'django.db.models.AutoField', 'models.AutoField', ([], {'verbose_name': '"""ID"""', 'serialize': '(False)', 'auto_created': '(True)', 'primary_key': '(True)'}), "(verbose_name='ID', serialize=False, auto_created=True,\n primary_key=True)\n", (326, 403), False, 'from django.db import migrations, models\n'), ((614, 685), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(15)', 'verbose_name': '"""Language"""', 'db_index': '(True)'}), "(max_length=15, verbose_name='Language', db_index=True)\n", (630, 685), False, 'from django.db import migrations, models\n'), ((734, 780), 'django.db.models.TextField', 'models.TextField', ([], {'verbose_name': '"""File contents"""'}), "(verbose_name='File contents')\n", (750, 780), False, 'from django.db import migrations, models\n'), ((851, 984), 'django.db.models.ForeignKey', 'models.ForeignKey', ([], {'related_name': '"""text_translations"""', 'editable': '(False)', 'to': '"""textfile.TextFile"""', 'on_delete': 'models.CASCADE', 'null': '(True)'}), "(related_name='text_translations', editable=False, to=\n 'textfile.TextFile', on_delete=models.CASCADE, null=True)\n", (868, 984), False, 'from django.db import migrations, models\n')] |
from django.db import models
from cookiedjango.core.models import TimeStampedModel
class TwitterPost(TimeStampedModel):
tag = models.CharField(max_length=140)
text = models.TextField()
| [
"django.db.models.CharField",
"django.db.models.TextField"
] | [((131, 163), 'django.db.models.CharField', 'models.CharField', ([], {'max_length': '(140)'}), '(max_length=140)\n', (147, 163), False, 'from django.db import models\n'), ((175, 193), 'django.db.models.TextField', 'models.TextField', ([], {}), '()\n', (191, 193), False, 'from django.db import models\n')] |
import torch
import torch.nn.functional as F
import torch.nn as nn
from torch.nn import BatchNorm1d
from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv
from torch_geometric.utils import add_remaining_self_loops, add_self_loops
class BasicNet(torch.nn.Module):
def __init__(self):
super(BasicNet, self).__init__()
self.conv1 = GCNConv(1, 16)
self.conv2 = GCNConv(16, 1)
def forward(self, data):
weight = data["x"].view(-1, 1)
edge_index = data["edge_index"]
x = self.conv1(weight, edge_index)
x = F.relu(x)
x = self.conv2(x, edge_index)
x = torch.sigmoid(x)
return x
class MultiNet(torch.nn.Module):
def __init__(self):
super(MultiNet, self).__init__()
self.conv_in = GCNConv(1, 32)
self.conv_mid1 = GCNConv(32, 32)
self.conv_mid2 = GCNConv(32, 32)
self.conv_mid3 = GCNConv(32, 32)
self.conv_out = GCNConv(32, 1)
def forward(self, data):
weight = data["x"].view(-1, 1)
edge_index = data["edge_index"] # add_self_loops(data["edge_index"])
x = self.conv_in(weight, edge_index)
x = F.relu(x)
x = self.conv_mid1(x, edge_index)
x = F.relu(x)
x = self.conv_mid2(x, edge_index)
x = F.relu(x)
x = self.conv_mid3(x, edge_index)
x = F.relu(x)
x = self.conv_out(x, edge_index)
x = torch.sigmoid(x)
return x.reshape(-1)
class ConvNet(torch.nn.Module):
def __init__(self):
super(ConvNet, self).__init__()
self.conv_in = GraphConv(1, 32, aggr="add")
self.conv_max1 = GraphConv(32, 64, aggr="max")
self.conv_mean1 = GraphConv(64, 32, aggr="mean")
self.conv_max2 = GraphConv(32, 64, aggr="max")
self.conv_mean2 = GraphConv(64, 32, aggr="mean")
self.conv_max3 = GraphConv(32, 64, aggr="max")
self.conv_mean3 = GraphConv(64, 32, aggr="mean")
self.conv_out = GraphConv(32, 1, aggr="max")
def forward(self, data):
weight = data["x"].view(-1, 1)
edge_index = data["edge_index"]
x = self.conv_in(weight, edge_index)
x = F.relu(x)
x = self.conv_max1(x, edge_index)
x = F.relu(x)
x = self.conv_mean1(x, edge_index)
x = F.relu(x)
x = self.conv_max2(x, edge_index)
x = F.relu(x)
x = self.conv_mean2(x, edge_index)
x = F.relu(x)
x = self.conv_max3(x, edge_index)
x = F.relu(x)
x = self.conv_mean3(x, edge_index)
x = F.relu(x)
x = self.conv_out(x, edge_index)
x = torch.sigmoid(x)
return x
class ConvNet2(torch.nn.Module):
def __init__(self):
super(ConvNet2, self).__init__()
self.conv_in = GraphConv(1, 32, aggr="add")
self.conv_mean1 = GraphConv(32, 32, aggr="mean")
self.conv_mean2 = GraphConv(32, 32, aggr="add")
self.conv_mean3 = GraphConv(32, 32, aggr="mean")
self.conv_mean4 = GraphConv(32, 32, aggr="add")
self.conv_mean5 = GraphConv(32, 32, aggr="mean")
self.conv_max1 = GraphConv(32, 32, aggr="max")
self.conv_out = GraphConv(32, 1, aggr="mean")
def forward(self, data):
weight = data["x"].view(-1, 1)
edge_index = data["edge_index"]
x = self.conv_in(weight, edge_index)
x = F.relu(x)
x = self.conv_mean1(x, edge_index)
x = F.relu(x)
x = self.conv_mean2(x, edge_index)
x = F.relu(x)
x = self.conv_mean3(x, edge_index)
x = F.relu(x)
x = self.conv_mean4(x, edge_index)
x = F.relu(x)
x = self.conv_mean5(x, edge_index)
x = F.relu(x)
x = self.conv_max1(x, edge_index)
x = F.relu(x)
x = self.conv_out(x, edge_index)
x = torch.sigmoid(x).reshape(-1)
return x
class ConvNet3(torch.nn.Module):
def __init__(self):
super(ConvNet3, self).__init__()
self.conv_in = GraphConv(1, 32, aggr="add")
self.conv_mean1 = GraphConv(32, 32, aggr="mean")
self.conv_max1 = GraphConv(32, 32, aggr="max")
self.conv_mean2 = GraphConv(32, 32, aggr="add")
self.conv_max2 = GraphConv(32, 32, aggr="max")
self.conv_mean3 = GraphConv(32, 32, aggr="mean")
self.conv_max3 = GraphConv(32, 32, aggr="max")
self.conv_mean4 = GraphConv(32, 32, aggr="mean")
self.conv_max4 = GraphConv(32, 32, aggr="max")
self.conv_mean5 = GraphConv(32, 32, aggr="mean")
self.conv_max5 = GraphConv(32, 32, aggr="max")
self.conv_out = GraphConv(32, 1, aggr="max")
def forward(self, data):
weight = data["x"].view(-1, 1)
edge_index, _ = add_self_loops(data["edge_index"], num_nodes=len(data["x"]))
#edge_index = data["edge_index"]
x = self.conv_in(weight, edge_index)
x = F.relu(x)
x = self.conv_mean1(x, edge_index)
x = F.relu(x)
x = self.conv_max1(x, edge_index)
x = F.relu(x)
x = self.conv_mean2(x, edge_index)
x = F.relu(x)
x = self.conv_max2(x, edge_index)
x = F.relu(x)
x = self.conv_mean3(x, edge_index)
x = F.relu(x)
x = self.conv_max3(x, edge_index)
x = F.relu(x)
x = self.conv_mean4(x, edge_index)
x = F.relu(x)
x = self.conv_max4(x, edge_index)
x = F.relu(x)
x = self.conv_mean5(x, edge_index)
x = F.relu(x)
x = self.conv_max5(x, edge_index)
x = F.relu(x)
x = self.conv_out(x, edge_index)
x = torch.sigmoid(x).reshape(-1)
return x
| [
"torch_geometric.nn.GCNConv",
"torch_geometric.nn.GraphConv",
"torch.sigmoid",
"torch.nn.functional.relu"
] | [((373, 387), 'torch_geometric.nn.GCNConv', 'GCNConv', (['(1)', '(16)'], {}), '(1, 16)\n', (380, 387), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((409, 423), 'torch_geometric.nn.GCNConv', 'GCNConv', (['(16)', '(1)'], {}), '(16, 1)\n', (416, 423), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((588, 597), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (594, 597), True, 'import torch.nn.functional as F\n'), ((648, 664), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (661, 664), False, 'import torch\n'), ((805, 819), 'torch_geometric.nn.GCNConv', 'GCNConv', (['(1)', '(32)'], {}), '(1, 32)\n', (812, 819), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((845, 860), 'torch_geometric.nn.GCNConv', 'GCNConv', (['(32)', '(32)'], {}), '(32, 32)\n', (852, 860), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((886, 901), 'torch_geometric.nn.GCNConv', 'GCNConv', (['(32)', '(32)'], {}), '(32, 32)\n', (893, 901), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((927, 942), 'torch_geometric.nn.GCNConv', 'GCNConv', (['(32)', '(32)'], {}), '(32, 32)\n', (934, 942), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((967, 981), 'torch_geometric.nn.GCNConv', 'GCNConv', (['(32)', '(1)'], {}), '(32, 1)\n', (974, 981), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((1185, 1194), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1191, 1194), True, 'import torch.nn.functional as F\n'), ((1250, 1259), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1256, 1259), True, 'import torch.nn.functional as F\n'), ((1315, 1324), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1321, 1324), True, 'import torch.nn.functional as F\n'), ((1380, 1389), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (1386, 1389), True, 'import torch.nn.functional as F\n'), ((1444, 1460), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (1457, 1460), False, 'import torch\n'), ((1610, 1638), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(1)', '(32)'], {'aggr': '"""add"""'}), "(1, 32, aggr='add')\n", (1619, 1638), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((1664, 1693), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(64)'], {'aggr': '"""max"""'}), "(32, 64, aggr='max')\n", (1673, 1693), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((1720, 1750), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(64)', '(32)'], {'aggr': '"""mean"""'}), "(64, 32, aggr='mean')\n", (1729, 1750), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((1777, 1806), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(64)'], {'aggr': '"""max"""'}), "(32, 64, aggr='max')\n", (1786, 1806), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((1833, 1863), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(64)', '(32)'], {'aggr': '"""mean"""'}), "(64, 32, aggr='mean')\n", (1842, 1863), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((1890, 1919), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(64)'], {'aggr': '"""max"""'}), "(32, 64, aggr='max')\n", (1899, 1919), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((1946, 1976), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(64)', '(32)'], {'aggr': '"""mean"""'}), "(64, 32, aggr='mean')\n", (1955, 1976), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((2002, 2030), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(1)'], {'aggr': '"""max"""'}), "(32, 1, aggr='max')\n", (2011, 2030), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((2197, 2206), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2203, 2206), True, 'import torch.nn.functional as F\n'), ((2262, 2271), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2268, 2271), True, 'import torch.nn.functional as F\n'), ((2328, 2337), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2334, 2337), True, 'import torch.nn.functional as F\n'), ((2393, 2402), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2399, 2402), True, 'import torch.nn.functional as F\n'), ((2459, 2468), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2465, 2468), True, 'import torch.nn.functional as F\n'), ((2524, 2533), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2530, 2533), True, 'import torch.nn.functional as F\n'), ((2590, 2599), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (2596, 2599), True, 'import torch.nn.functional as F\n'), ((2654, 2670), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (2667, 2670), False, 'import torch\n'), ((2810, 2838), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(1)', '(32)'], {'aggr': '"""add"""'}), "(1, 32, aggr='add')\n", (2819, 2838), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((2865, 2895), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""mean"""'}), "(32, 32, aggr='mean')\n", (2874, 2895), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((2922, 2951), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""add"""'}), "(32, 32, aggr='add')\n", (2931, 2951), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((2978, 3008), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""mean"""'}), "(32, 32, aggr='mean')\n", (2987, 3008), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((3035, 3064), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""add"""'}), "(32, 32, aggr='add')\n", (3044, 3064), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((3091, 3121), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""mean"""'}), "(32, 32, aggr='mean')\n", (3100, 3121), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((3147, 3176), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""max"""'}), "(32, 32, aggr='max')\n", (3156, 3176), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((3201, 3230), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(1)'], {'aggr': '"""mean"""'}), "(32, 1, aggr='mean')\n", (3210, 3230), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((3397, 3406), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3403, 3406), True, 'import torch.nn.functional as F\n'), ((3462, 3471), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3468, 3471), True, 'import torch.nn.functional as F\n'), ((3527, 3536), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3533, 3536), True, 'import torch.nn.functional as F\n'), ((3592, 3601), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3598, 3601), True, 'import torch.nn.functional as F\n'), ((3657, 3666), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3663, 3666), True, 'import torch.nn.functional as F\n'), ((3722, 3731), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3728, 3731), True, 'import torch.nn.functional as F\n'), ((3786, 3795), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (3792, 3795), True, 'import torch.nn.functional as F\n'), ((4017, 4045), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(1)', '(32)'], {'aggr': '"""add"""'}), "(1, 32, aggr='add')\n", (4026, 4045), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4073, 4103), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""mean"""'}), "(32, 32, aggr='mean')\n", (4082, 4103), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4129, 4158), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""max"""'}), "(32, 32, aggr='max')\n", (4138, 4158), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4186, 4215), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""add"""'}), "(32, 32, aggr='add')\n", (4195, 4215), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4241, 4270), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""max"""'}), "(32, 32, aggr='max')\n", (4250, 4270), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4298, 4328), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""mean"""'}), "(32, 32, aggr='mean')\n", (4307, 4328), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4354, 4383), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""max"""'}), "(32, 32, aggr='max')\n", (4363, 4383), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4411, 4441), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""mean"""'}), "(32, 32, aggr='mean')\n", (4420, 4441), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4467, 4496), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""max"""'}), "(32, 32, aggr='max')\n", (4476, 4496), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4524, 4554), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""mean"""'}), "(32, 32, aggr='mean')\n", (4533, 4554), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4580, 4609), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(32)'], {'aggr': '"""max"""'}), "(32, 32, aggr='max')\n", (4589, 4609), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4635, 4663), 'torch_geometric.nn.GraphConv', 'GraphConv', (['(32)', '(1)'], {'aggr': '"""max"""'}), "(32, 1, aggr='max')\n", (4644, 4663), False, 'from torch_geometric.nn import GCNConv, GraphConv, GATConv, GatedGraphConv\n'), ((4917, 4926), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (4923, 4926), True, 'import torch.nn.functional as F\n'), ((4983, 4992), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (4989, 4992), True, 'import torch.nn.functional as F\n'), ((5048, 5057), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5054, 5057), True, 'import torch.nn.functional as F\n'), ((5114, 5123), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5120, 5123), True, 'import torch.nn.functional as F\n'), ((5179, 5188), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5185, 5188), True, 'import torch.nn.functional as F\n'), ((5245, 5254), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5251, 5254), True, 'import torch.nn.functional as F\n'), ((5310, 5319), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5316, 5319), True, 'import torch.nn.functional as F\n'), ((5376, 5385), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5382, 5385), True, 'import torch.nn.functional as F\n'), ((5441, 5450), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5447, 5450), True, 'import torch.nn.functional as F\n'), ((5507, 5516), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5513, 5516), True, 'import torch.nn.functional as F\n'), ((5572, 5581), 'torch.nn.functional.relu', 'F.relu', (['x'], {}), '(x)\n', (5578, 5581), True, 'import torch.nn.functional as F\n'), ((3849, 3865), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (3862, 3865), False, 'import torch\n'), ((5636, 5652), 'torch.sigmoid', 'torch.sigmoid', (['x'], {}), '(x)\n', (5649, 5652), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
# Generated by Django 1.11.29 on 2021-01-28 16:46
from __future__ import unicode_literals
import contentcuration.models
from django.db import migrations, models
import django.db.models.functions.text
class Migration(migrations.Migration):
dependencies = [
('contentcuration', '0119_task_channel_id'),
]
operations = [
migrations.AddIndex(
model_name='user',
index=contentcuration.models.UniqueActiveUserIndex(django.db.models.functions.text.Lower('email'), condition=models.Q(('is_active', True)), name='contentcura_email_d4d492_idx'),
),
]
| [
"django.db.models.Q"
] | [((548, 577), 'django.db.models.Q', 'models.Q', (["('is_active', True)"], {}), "(('is_active', True))\n", (556, 577), False, 'from django.db import migrations, models\n')] |
#!/usr/bin/env python
'''
Initialize a molecular system.
There are many methods to define/initialize a molecule. This example presents
three methods to create/initialize the molecular object. Mole object is a
Python object. You can initialize the Mole object using any methods supported
by Python.
See also
pyscf/examples/pbc/06-load_mol_from_chkfile.py to initialize mol from chkfile
pyscf/examples/pbc/00-input_cell.py for initialization of crystal
'''
from pyscf import gto
#
# First method is to assign the geometry, basis etc. to Mole object, then
# call build() function to initialize the molecule
#
mol = gto.Mole()
mol.atom = '''O 0 0 0; H 0 1 0; H 0 0 1'''
mol.basis = 'sto-3g'
mol.build()
#
# Shortcuts for initialization.
# Use the keyword arguments of mol.build() to initialize a molecule
#
mol = gto.Mole()
mol.build(
atom = '''O 0 0 0; H 0 1 0; H 0 0 1''',
basis = 'sto-3g',
)
#
# Use shortcut function gto.M to initialize a molecule
#
mol = gto.M(
atom = '''O 0 0 0; H 0 1 0; H 0 0 1''',
basis = 'sto-3g',
)
#
# Other parameters
# ================
#
mol.charge = 0
mol.spin = 0 # 2j == nelec_alpha - nelec_beta
mol.symmetry = 1 # Allow the program to apply point group symmetry if possible
# .unit can be 'bohr', 'ang' to indicate the coordinates unit of the input mol.atom
# If a number is assigned to unit, this number will be used as the length of
# 1 Bohr (in Angstrom). Eg you can double the bond length of a system by
# setting mol.unit = 0.529*.5.
mol.unit = 'Ang' # (New in version 1.1)
# Output
# ------
# To write output on disk, assign a filename to Mole.output
mol.output = 'path/to/my_out.txt'
# if Mole.output is not given, the default output would be stdout
# Print level
# -----------
# Mole.verbose is used to control print level. The print level can be 0 (quite,
# no output) to 9 (very noise). The default level is 1, which only outputs the
# error message, it works almost the same as level 0. Level 4 (info), or 5 (debug)
# are recommended value if some calculation detials are needed.
mol.verbose = 4
# level 4 hides some details such as CPU timings, the orbital energies during
# the SCF iterations.
# max memory to use
# -----------------
mol.max_memory = 1000 # in MB
# or use evnrionment PYSCF_MAX_MEMORY to control the memory usage
# (New in PySCF-1.3) eg
# export PYSCF_MAX_MEMORY=10000 # 10 GB
# python 00-input_mole.py
| [
"pyscf.gto.M",
"pyscf.gto.Mole"
] | [((625, 635), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (633, 635), False, 'from pyscf import gto\n'), ((824, 834), 'pyscf.gto.Mole', 'gto.Mole', ([], {}), '()\n', (832, 834), False, 'from pyscf import gto\n'), ((981, 1037), 'pyscf.gto.M', 'gto.M', ([], {'atom': '"""O 0 0 0; H 0 1 0; H 0 0 1"""', 'basis': '"""sto-3g"""'}), "(atom='O 0 0 0; H 0 1 0; H 0 0 1', basis='sto-3g')\n", (986, 1037), False, 'from pyscf import gto\n')] |
import uuid
from aiogram.utils.callback_data import CallbackData
POSTS = {
str(uuid.uuid4()): {
'title': f'Grup Yönetimi\n',
'body': '\n@Combot \n@MissRose_bot \n@baymax_en_bot',
}
}
posts_cb = CallbackData('post','id', 'action')
| [
"aiogram.utils.callback_data.CallbackData",
"uuid.uuid4"
] | [((223, 259), 'aiogram.utils.callback_data.CallbackData', 'CallbackData', (['"""post"""', '"""id"""', '"""action"""'], {}), "('post', 'id', 'action')\n", (235, 259), False, 'from aiogram.utils.callback_data import CallbackData\n'), ((85, 97), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (95, 97), False, 'import uuid\n')] |
from __future__ import print_function, absolute_import, division, unicode_literals
import numpy as np
from astropy.io import ascii
from pkg_resources import resource_filename
'''
Function to read in atomic line information for a given rest frame wavelength.
Or
For the line matching the closest wavelength.
Input :
lambda_rest :- Rest Frame wavelength (in \AA) of the line to match
method :- 'closest' -> If set will match the closest line.
'Exact' -> If set will match the exact wavelength.
Output: dic :- Dictionary with fval,lambda and species name.
Example: str=rb_setline(2796.3,'closest')
Written By: <NAME> Jan 2018, Python 2.7
Edit: <NAME> Sep 2018, Depreciated kwargs to be compatible with python 3
'''
def rb_setline(lambda_rest,method,linelist='atom'):
#if kwargs.has_key('linelist'):
# linelist=kwargs['linelist']
#else:
# linelist='LLS'
line_str=read_line_list(linelist)
wavelist=np.zeros((len(line_str),))
name = np.empty(len(line_str), dtype='object')
fval=np.zeros((len(line_str),))
if linelist=='atom':
gamma=np.zeros((len(line_str),))
for i in range(0,len(wavelist)):
wavelist[i]=np.double(line_str[i]['wrest'])
fval[i]=np.float(line_str[i]['fval'])
name[i]=np.str(line_str[i]['ion'])
if linelist=='atom':
gamma[i]=np.str(line_str[i]['gamma'])
if method=='Exact':
q= np.where( (np.abs(lambda_rest-wavelist) < 1e-3))
if linelist=='atom':
outstr={'wave':wavelist[q],'fval':fval[q],'name':name[q],'gamma':gamma[q]}
else:
outstr={'wave':wavelist[q],'fval':fval[q],'name':name[q]}
elif method=='closest':
idx=(np.abs(lambda_rest-wavelist)).argmin()
if linelist=='atom':
outstr={'wave':wavelist[idx],'fval':fval[idx],'name':name[idx],'gamma':gamma[idx]}
else:
outstr={'wave':wavelist[idx],'fval':fval[idx],'name':name[idx]}
else:
raise NameError('Specify the matching method, closest or Exact')
return outstr
def read_line_list(label):
if label=='atom':
filename=resource_filename('rbvfit','lines/atom_full.dat')
elif label == 'LLS':
filename=resource_filename('rbvfit','lines/lls.lst')
elif label == 'LLS Small':
filename=resource_filename('rbvfit','lines/lls_sub.lst')
elif label == 'DLA':
filename=resource_filename('rbvfit','lines/dla.lst')
else:
print('Give Correct LineList')
data = []
if label=='atom':
s=ascii.read(filename)
for line in range(0,len(s['col1'])):
source = {}
source['wrest'] = float(s['col2'][line])
source['ion'] = s['col1'][line]+' '+np.str(np.int(s['col2'][line]))
source['fval']=float(s['col3'][line])
source['gamma']=float(s['col4'][line])
data.append(source)
else:
f=open(filename,'r')
header1 = f.readline()
for line in f:
line = line.strip()
columns = line.split()
source = {}
source['wrest'] = float(columns[0])
source['ion'] = columns[1]+' '+columns[2]
source['fval']=float(columns[3])
data.append(source)
return data
| [
"astropy.io.ascii.read",
"numpy.abs",
"numpy.double",
"numpy.float",
"pkg_resources.resource_filename",
"numpy.int",
"numpy.str"
] | [((1244, 1275), 'numpy.double', 'np.double', (["line_str[i]['wrest']"], {}), "(line_str[i]['wrest'])\n", (1253, 1275), True, 'import numpy as np\n'), ((1286, 1315), 'numpy.float', 'np.float', (["line_str[i]['fval']"], {}), "(line_str[i]['fval'])\n", (1294, 1315), True, 'import numpy as np\n'), ((1326, 1352), 'numpy.str', 'np.str', (["line_str[i]['ion']"], {}), "(line_str[i]['ion'])\n", (1332, 1352), True, 'import numpy as np\n'), ((2077, 2127), 'pkg_resources.resource_filename', 'resource_filename', (['"""rbvfit"""', '"""lines/atom_full.dat"""'], {}), "('rbvfit', 'lines/atom_full.dat')\n", (2094, 2127), False, 'from pkg_resources import resource_filename\n'), ((2445, 2465), 'astropy.io.ascii.read', 'ascii.read', (['filename'], {}), '(filename)\n', (2455, 2465), False, 'from astropy.io import ascii\n'), ((1388, 1416), 'numpy.str', 'np.str', (["line_str[i]['gamma']"], {}), "(line_str[i]['gamma'])\n", (1394, 1416), True, 'import numpy as np\n'), ((2160, 2204), 'pkg_resources.resource_filename', 'resource_filename', (['"""rbvfit"""', '"""lines/lls.lst"""'], {}), "('rbvfit', 'lines/lls.lst')\n", (2177, 2204), False, 'from pkg_resources import resource_filename\n'), ((1455, 1485), 'numpy.abs', 'np.abs', (['(lambda_rest - wavelist)'], {}), '(lambda_rest - wavelist)\n', (1461, 1485), True, 'import numpy as np\n'), ((2243, 2291), 'pkg_resources.resource_filename', 'resource_filename', (['"""rbvfit"""', '"""lines/lls_sub.lst"""'], {}), "('rbvfit', 'lines/lls_sub.lst')\n", (2260, 2291), False, 'from pkg_resources import resource_filename\n'), ((1697, 1727), 'numpy.abs', 'np.abs', (['(lambda_rest - wavelist)'], {}), '(lambda_rest - wavelist)\n', (1703, 1727), True, 'import numpy as np\n'), ((2324, 2368), 'pkg_resources.resource_filename', 'resource_filename', (['"""rbvfit"""', '"""lines/dla.lst"""'], {}), "('rbvfit', 'lines/dla.lst')\n", (2341, 2368), False, 'from pkg_resources import resource_filename\n'), ((2611, 2634), 'numpy.int', 'np.int', (["s['col2'][line]"], {}), "(s['col2'][line])\n", (2617, 2634), True, 'import numpy as np\n')] |
from fylesdk import FyleSDK
from django.conf import settings
from allauth.socialaccount.providers.oauth2.views import (
OAuth2Adapter,
OAuth2CallbackView,
OAuth2LoginView,
)
from accounting_integrations.allauth.providers.fylein.provider import FyleProvider
class FyleOAuth2Adapter(OAuth2Adapter):
provider_id = FyleProvider.id
base_url = settings.FYLE_BASE_URL.rstrip('/')
access_token_url = '{0}/api/oauth/token'.format(base_url)
authorize_url = '{0}/app/main/#/oauth/authorize'.format(base_url)
profile_url = '{0}/user'.format(base_url)
def complete_login(self, request, app, token, **kwargs):
# Setup the fyle API
fyle_api = FyleSDK(
client_id=settings.FYLE_CLIENT_ID,
client_secret=settings.FYLE_CLIENT_SECRET,
refresh_token=token.token_secret
)
profile = fyle_api.Employees.get_my_profile()
extra_data = {
'id': profile['data']['id'],
'email': profile['data']['employee_email'],
'name': profile['data']['full_name']
}
return self.get_provider().sociallogin_from_response(
request, extra_data
)
oauth2_login = OAuth2LoginView.adapter_view(FyleOAuth2Adapter)
oauth2_callback = OAuth2CallbackView.adapter_view(FyleOAuth2Adapter)
| [
"fylesdk.FyleSDK",
"allauth.socialaccount.providers.oauth2.views.OAuth2CallbackView.adapter_view",
"allauth.socialaccount.providers.oauth2.views.OAuth2LoginView.adapter_view",
"django.conf.settings.FYLE_BASE_URL.rstrip"
] | [((1206, 1253), 'allauth.socialaccount.providers.oauth2.views.OAuth2LoginView.adapter_view', 'OAuth2LoginView.adapter_view', (['FyleOAuth2Adapter'], {}), '(FyleOAuth2Adapter)\n', (1234, 1253), False, 'from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView\n'), ((1272, 1322), 'allauth.socialaccount.providers.oauth2.views.OAuth2CallbackView.adapter_view', 'OAuth2CallbackView.adapter_view', (['FyleOAuth2Adapter'], {}), '(FyleOAuth2Adapter)\n', (1303, 1322), False, 'from allauth.socialaccount.providers.oauth2.views import OAuth2Adapter, OAuth2CallbackView, OAuth2LoginView\n'), ((362, 396), 'django.conf.settings.FYLE_BASE_URL.rstrip', 'settings.FYLE_BASE_URL.rstrip', (['"""/"""'], {}), "('/')\n", (391, 396), False, 'from django.conf import settings\n'), ((686, 810), 'fylesdk.FyleSDK', 'FyleSDK', ([], {'client_id': 'settings.FYLE_CLIENT_ID', 'client_secret': 'settings.FYLE_CLIENT_SECRET', 'refresh_token': 'token.token_secret'}), '(client_id=settings.FYLE_CLIENT_ID, client_secret=settings.\n FYLE_CLIENT_SECRET, refresh_token=token.token_secret)\n', (693, 810), False, 'from fylesdk import FyleSDK\n')] |
#!/usr/bin/env python
# Google Code Jam
# Google Code Jam 2017
# Round 1C 2017
# Problem A. <NAME>
# Solved small test set
from __future__ import print_function, division
import math
import itertools
def get_k_highest_pancakes(n_pancakes, k):
n_pancakes = sorted(n_pancakes, key=lambda x: -x[1])
return n_pancakes[:k]
def get_k_largest_pancakes(n_pancakes, k):
n_pancakes = sorted(n_pancakes, key=lambda x: -x[0])
return n_pancakes[:k]
def get_k_pancakes(n_pancakes, k):
n_pancakes = sorted(n_pancakes, key=lambda x: -(x[0] ** 2 + 2 * x[0] * x[1]))
return n_pancakes[:k]
def get_exposed_area(pancakes):
pancakes = sorted(pancakes, key=lambda x: -x[0])
largest_r_pancake = pancakes[0]
horizontal = math.pi * largest_r_pancake[0] ** 2
vertical_total = 0
for pancake in pancakes:
r, h = pancake
vertical = h * 2 * math.pi * r
vertical_total += vertical
return vertical_total + horizontal
def solve(n, k, pancakes):
pancakes_high = get_k_highest_pancakes(pancakes, k)
area_high = get_exposed_area(pancakes_high)
pancakes_large = get_k_largest_pancakes(pancakes, k)
area_large = get_exposed_area(pancakes_large)
return max(area_large, area_high)
max_area = 0
potential_combo = itertools.combinations(pancakes, k)
for c in potential_combo:
area = get_exposed_area(c)
if area > max_area:
print(pancakes)
print(n, k)
print(c)
print()
max_area = area
return max_area
if __name__ == '__main__':
import os
samples = [
(2, 1, [(100, 20), (200, 10)]),
(2, 2, [(100, 20), (200, 10)]),
(3, 2, [(100, 10), (100, 10), (100, 10)]),
(4, 2, [(9, 3), (7, 1), (10, 1), (8, 4)]),
]
for sample in samples:
print(solve(*sample))
data_files = ['A-large']
for f in data_files:
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.in'.format(f)), 'r') as input_file:
lines = input_file.readlines()
input_count = int(lines[0].replace('\n' ,''))
inputs = [line.replace('\n', '') for line in lines[1:]]
test_cases = []
j = 0
for _ in range(input_count):
pancakes = []
n, k = tuple([int(_) for _ in inputs[j].split(' ')])
j += 1
for _ in range(n):
row = tuple([int(_) for _ in inputs[j].split(' ')])
pancakes.append(row)
j += 1
test_cases.append((n, k, pancakes))
i = 1
with open(os.path.join(os.path.dirname(os.path.realpath(__file__)),
'{0}.out'.format(f)), 'w') as output_file:
for test_case in test_cases:
area = solve(*test_case)
output_file.write('Case #{0}: {1:0.6f}\n'.format(i, area))
i += 1
| [
"itertools.combinations",
"os.path.realpath"
] | [((1284, 1319), 'itertools.combinations', 'itertools.combinations', (['pancakes', 'k'], {}), '(pancakes, k)\n', (1306, 1319), False, 'import itertools\n'), ((1962, 1988), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (1978, 1988), False, 'import os\n'), ((2667, 2693), 'os.path.realpath', 'os.path.realpath', (['__file__'], {}), '(__file__)\n', (2683, 2693), False, 'import os\n')] |
# Copyright (c) OpenMMLab. All rights reserved.
import pytest
import torch
from mmcv.ops import Correlation
_input1 = [[[[1., 2., 3.], [0., 1., 2.], [3., 5., 2.]]]]
_input2 = [[[[1., 2., 3.], [3., 1., 2.], [8., 5., 2.]]]]
gt_out_shape = (1, 1, 1, 3, 3)
_gt_out = [[[[[1., 4., 9.], [0., 1., 4.], [24., 25., 4.]]]]]
gt_input1_grad = [[[[1., 2., 3.], [3., 1., 2.], [8., 5., 2.]]]]
def assert_equal_tensor(tensor_a, tensor_b):
assert tensor_a.eq(tensor_b).all()
class TestCorrelation:
def _test_correlation(self, dtype=torch.float):
layer = Correlation(max_displacement=0)
input1 = torch.tensor(_input1, dtype=dtype).cuda()
input2 = torch.tensor(_input2, dtype=dtype).cuda()
input1.requires_grad = True
input2.requires_grad = True
out = layer(input1, input2)
out.backward(torch.ones_like(out))
# `eq_cpu` is not implemented for 'Half' in torch1.5.0,
# so we need to make a comparison for cuda tensor
# rather than cpu tensor
gt_out = torch.tensor(_gt_out, dtype=dtype).cuda()
assert_equal_tensor(out, gt_out)
assert_equal_tensor(input1.grad.detach(), input2)
assert_equal_tensor(input2.grad.detach(), input1)
@pytest.mark.skipif(
not torch.cuda.is_available(), reason='requires CUDA support')
def test_correlation(self):
self._test_correlation(torch.float)
self._test_correlation(torch.double)
self._test_correlation(torch.half)
| [
"torch.ones_like",
"mmcv.ops.Correlation",
"torch.cuda.is_available",
"torch.tensor"
] | [((563, 594), 'mmcv.ops.Correlation', 'Correlation', ([], {'max_displacement': '(0)'}), '(max_displacement=0)\n', (574, 594), False, 'from mmcv.ops import Correlation\n'), ((843, 863), 'torch.ones_like', 'torch.ones_like', (['out'], {}), '(out)\n', (858, 863), False, 'import torch\n'), ((1275, 1300), 'torch.cuda.is_available', 'torch.cuda.is_available', ([], {}), '()\n', (1298, 1300), False, 'import torch\n'), ((613, 647), 'torch.tensor', 'torch.tensor', (['_input1'], {'dtype': 'dtype'}), '(_input1, dtype=dtype)\n', (625, 647), False, 'import torch\n'), ((672, 706), 'torch.tensor', 'torch.tensor', (['_input2'], {'dtype': 'dtype'}), '(_input2, dtype=dtype)\n', (684, 706), False, 'import torch\n'), ((1038, 1072), 'torch.tensor', 'torch.tensor', (['_gt_out'], {'dtype': 'dtype'}), '(_gt_out, dtype=dtype)\n', (1050, 1072), False, 'import torch\n')] |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# **************************************************************************
# Copyright © 2016 jianglin
# File Name: lazy.py
# Author: jianglin
# Email: <EMAIL>
# Created: 2016-11-08 23:02:24 (CST)
# Last Update: Wednesday 2018-09-26 10:52:51 (CST)
# By:
# Description:
# **************************************************************************
from werkzeug import import_string, cached_property
class LazyView(object):
def __init__(self, app=None, url=None, name=None, **options):
self.app = app
self.url = url
self.name = name
self.options = options
if app is not None:
self.init_app(app)
def init_app(self, app):
app.add_url_rule(self.url, view_func=self.view, **self.options)
@cached_property
def view(self):
view = import_string(self.name)
if isinstance(view, (object, )):
assert self.options.get('endpoint') is not None
endpoint = self.options.pop('endpoint')
view = view.as_view(endpoint)
return view
class LazyBlueprint(object):
def __init__(self, app=None, blueprint=None, module='app.', **options):
self.app = app
self.module = module
self.blueprint = blueprint
self.options = options
if app is not None:
self.init_app(app)
def init_app(self, app):
if isinstance(self.blueprint, (list, tuple)):
self._multi(app)
else:
self._single(app)
def _single(self, app):
blueprint = import_string(self.module + self.blueprint)
app.register_blueprint(blueprint, **self.options)
def _multi(self, app):
blueprints = list(set(self.blueprint))
for name in blueprints:
blueprint = import_string(self.module + name)
app.register_blueprint(blueprint, **self.options)
class LazyExtension(object):
def __init__(self, app=None, extension=None, module='app.extensions.'):
self.app = app
self.module = module
self.extension = extension
if app is not None:
self.init_app(app)
def init_app(self, app):
if isinstance(self.extension, (list, tuple)):
self._multi(app)
else:
self._single(app)
def _single(self, app):
extension = import_string(self.module + self.extension)
extension.init_app(app)
def _multi(self, app):
extensions = list(set(self.extension))
for name in extensions:
extension = import_string(self.module + name)
extension.init_app(app)
| [
"werkzeug.import_string"
] | [((867, 891), 'werkzeug.import_string', 'import_string', (['self.name'], {}), '(self.name)\n', (880, 891), False, 'from werkzeug import import_string, cached_property\n'), ((1597, 1640), 'werkzeug.import_string', 'import_string', (['(self.module + self.blueprint)'], {}), '(self.module + self.blueprint)\n', (1610, 1640), False, 'from werkzeug import import_string, cached_property\n'), ((2385, 2428), 'werkzeug.import_string', 'import_string', (['(self.module + self.extension)'], {}), '(self.module + self.extension)\n', (2398, 2428), False, 'from werkzeug import import_string, cached_property\n'), ((1830, 1863), 'werkzeug.import_string', 'import_string', (['(self.module + name)'], {}), '(self.module + name)\n', (1843, 1863), False, 'from werkzeug import import_string, cached_property\n'), ((2592, 2625), 'werkzeug.import_string', 'import_string', (['(self.module + name)'], {}), '(self.module + name)\n', (2605, 2625), False, 'from werkzeug import import_string, cached_property\n')] |
from flask import Flask
#import json
import RPi.GPIO as GPIO
GPIO.setwarnings(False) # Ignore warning for now
GPIO.setmode(GPIO.BCM) # Use physical pin numbering
GPIO.setup(21, GPIO.OUT, initial=GPIO.LOW)
app = Flask(__name__)
@app.route("/")
def hello():
return "Lets Have a Party"
@app.route("/<key>")
def led(key):
if key == "1":
GPIO.output(21, GPIO.HIGH)
return "LED ON \n"
elif key == "0":
GPIO.output(21, GPIO.LOW)
return "LED OFF \n"
else:
return "Command Not Found \n"
if __name__ == "__main__":
app.run(host='0.0.0.0',port=5000,debug=True)
| [
"RPi.GPIO.setmode",
"RPi.GPIO.setup",
"flask.Flask",
"RPi.GPIO.output",
"RPi.GPIO.setwarnings"
] | [((62, 85), 'RPi.GPIO.setwarnings', 'GPIO.setwarnings', (['(False)'], {}), '(False)\n', (78, 85), True, 'import RPi.GPIO as GPIO\n'), ((114, 136), 'RPi.GPIO.setmode', 'GPIO.setmode', (['GPIO.BCM'], {}), '(GPIO.BCM)\n', (126, 136), True, 'import RPi.GPIO as GPIO\n'), ((168, 210), 'RPi.GPIO.setup', 'GPIO.setup', (['(21)', 'GPIO.OUT'], {'initial': 'GPIO.LOW'}), '(21, GPIO.OUT, initial=GPIO.LOW)\n', (178, 210), True, 'import RPi.GPIO as GPIO\n'), ((218, 233), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (223, 233), False, 'from flask import Flask\n'), ((358, 384), 'RPi.GPIO.output', 'GPIO.output', (['(21)', 'GPIO.HIGH'], {}), '(21, GPIO.HIGH)\n', (369, 384), True, 'import RPi.GPIO as GPIO\n'), ((441, 466), 'RPi.GPIO.output', 'GPIO.output', (['(21)', 'GPIO.LOW'], {}), '(21, GPIO.LOW)\n', (452, 466), True, 'import RPi.GPIO as GPIO\n')] |
import datetime
import warnings
from copy import copy
from types import MappingProxyType
from typing import Sequence, Callable, Mapping, Union, TypeVar, TYPE_CHECKING
import numpy as np
import pandas as pd
import sidekick as sk
from .clinical_acessor import Clinical
from .metaclass import ModelMeta
from .. import fitting as fit
from .. import formulas
from ..diseases import Disease, DiseaseParams, disease as get_disease
from ..logging import log
from ..mixins import (
Meta,
WithParamsMixin,
WithDataModelMixin,
WithInfoMixin,
WithResultsMixin,
WithRegionDemography,
)
from ..packages import plt
from ..utils import today, not_implemented, extract_keys, param_property
T = TypeVar("T")
NOW = datetime.datetime.now()
TODAY = datetime.date(NOW.year, NOW.month, NOW.day)
DAY = datetime.timedelta(days=1)
pplt = sk.import_later("..plot", package=__package__)
if TYPE_CHECKING:
from ..model_group import ModelGroup
from pydemic_ui.model import UIProperty
class Model(
WithDataModelMixin,
WithInfoMixin,
WithResultsMixin,
WithParamsMixin,
WithRegionDemography,
metaclass=ModelMeta,
):
"""
Base class for all models.
"""
meta: Meta
class Meta:
model_name = "Model"
data_aliases = {}
# Initial values
state: np.ndarray = None
initial_cases: float = sk.lazy(lambda self: self._initial_cases())
initial_infected: float = sk.lazy(lambda self: self._initial_infected())
# Initial time
date: datetime.date = None
time: float = 0.0
iter: int = sk.property(lambda m: len(m.data))
dates: pd.DatetimeIndex = sk.property(lambda m: m.to_dates(m.times))
times: pd.Index = sk.property(lambda m: m.data.index)
# Common epidemiological parameters
R0: float = param_property("R0", default=2.0)
K = sk.property(not_implemented)
duplication_time = property(lambda self: np.log(2) / self.K)
# Special accessors
clinical: Clinical = property(lambda self: Clinical(self))
clinical_model: type = None
clinical_params: Mapping = MappingProxyType({})
disease: Disease = None
disease_params: DiseaseParams = None
@property
def ui(self) -> "UIProperty":
try:
from pydemic_ui.model import UIProperty
except ImportError as ex:
log.warn(f"Could not import pydemic_ui.model: {ex}")
msg = (
"must have pydemic-ui installed to access the model.ui attribute.\n"
"Please 'pip install pydemic-ui' before proceeding'"
)
raise RuntimeError(msg)
return UIProperty(self)
def __init__(
self, params=None, *, run=None, name=None, date=None, clinical=None, disease=None, **kwargs
):
self.name = name or f"{type(self).__name__} model"
self.date = pd.to_datetime(date or today())
self.disease = get_disease(disease)
self._initialized = False
# Fix demography
demography_opts = WithRegionDemography._init_from_dict(self, kwargs)
self.disease_params = self.disease.params(**demography_opts)
# Init other mixins
WithParamsMixin.__init__(self, params, keywords=kwargs)
WithInfoMixin.__init__(self)
WithResultsMixin.__init__(self)
WithDataModelMixin.__init__(self)
if clinical:
clinical = dict(clinical)
self.clinical_model = clinical.pop("model", None)
self.clinical_params = clinical
for k, v in kwargs.items():
if hasattr(self, k):
try:
setattr(self, k, v)
except AttributeError:
name = type(self).__name__
msg = f"cannot set '{k}' attribute in '{name}' model"
raise AttributeError(msg)
else:
raise TypeError(f"invalid arguments: {k}")
if run is not None:
self.run(run)
def __str__(self):
return self.name
def _initial_cases(self):
raise NotImplementedError("must be implemented in subclass")
def _initial_infected(self):
raise NotImplementedError("must be implemented in subclass")
def epidemic_model_name(self):
"""
Return the epidemic model name.
"""
return self.meta.model_name
#
# Pickling and copying
#
# noinspection PyUnresolvedReferences
def copy(self, **kwargs):
"""
Copy instance possibly setting new values for attributes.
Keyword Args:
All keyword arguments are used to reset attributes in the copy.
Examples:
>>> m.copy(R0=1.0, name="Stable")
<SIR(name="Stable")>
"""
cls = type(self)
data = self.__dict__.copy()
params = data.pop("_params")
data.pop("_results_cache")
new = object.__new__(cls)
for k in list(kwargs):
if k in data:
data[k] = kwargs.pop(k)
new._params = copy(params)
new._results_cache = {}
new.__dict__.update(copy(data))
for k, v in kwargs.items():
setattr(new, k, v)
return new
def split(self, n=None, **kwargs) -> "ModelGroup":
"""
Create n copies of model, each one may override a different set of
parameters and return a ModelGroup.
Args:
n:
Number of copies in the resulting list. It can also be a sequence
of dictionaries with arguments to pass to the .copy() constructor.
Keyword Args:
Keyword arguments are passed to the `.copy()` method of the model. If
the keyword is a sequence, it applies the n-th component of the sequence
to the corresponding n-th model.
"""
from ..model_group import ModelGroup
if n is None:
for k, v in kwargs.items():
if not isinstance(v, str) and isinstance(v, Sequence):
n = len(v)
break
else:
raise TypeError("cannot determine the group size from arguments")
if isinstance(n, int):
options = [{} for _ in range(n)]
else:
options = [dict(d) for d in n]
n: int = len(options)
# Merge option dicts
for k, v in kwargs.items():
if not isinstance(v, str) and isinstance(v, Sequence):
xs = v
m = len(xs)
if m != n:
raise ValueError(
f"sizes do not match: "
f"{k} should be a sequence of {n} "
f"items, got {m}"
)
for opt, x in zip(options, xs):
opt.setdefault(k, x)
else:
for opt in options:
opt.setdefault(k, v)
# Fix name
for opt in options:
try:
name = opt["name"]
except KeyError:
pass
else:
opt["name"] = name.format(n=n, **opt)
return ModelGroup(self.copy(**opt) for opt in options)
def split_children(self, options=MappingProxyType({}), **kwargs) -> "ModelGroup":
"""
Similar to split, but split into the children of the given class.
Args:
options:
A mapping between region or region id
"""
from ..model_group import ModelGroup
if self.region is None:
raise ValueError("model is not bound to a region")
for k in self._params:
if k not in kwargs:
kwargs[k] = self.get_param(k)
for attr in ("disease",):
kwargs.setdefault(attr, getattr(self, attr))
return ModelGroup.from_children(self.region, type(self), options, **kwargs)
def reset(self, date: Union[datetime.date, float] = None, **kwargs):
"""
Return a copy of the model setting the state to the final state. If a
positional "date" argument is given, reset to the state to the one in the
specified date.
Args:
date (float or date):
An optional float or datetime selecting the desired date.
Keyword Args:
Additional keyword arguments are handled the same way as the
:method:`copy` method.
"""
if date is None:
date = self.date
time = self.time
elif isinstance(date, (float, int)):
time = float(date)
date = self.to_date(date)
else:
time: float = self.to_time(date)
kwargs["data"] = self.data.loc[[time]]
kwargs["date"] = date
kwargs["state"] = kwargs["data"].iloc[0].values
kwargs["time"] = 1
return self.copy(**kwargs)
def trim_dates(self, start=0, end=None):
"""
Trim data in model to the given interval specified by start and end
dates or times.
Args:
start (int or date):
Starting date. If not given, start at zero.
end (int or date):
End date. If not given, select up to the final date.
"""
start = int(start or 0)
end = int(end or self.time)
new = self.copy(
date=self.to_date(start),
data=self.data.iloc[start:end].reset_index(drop=True),
time=end - start,
state=self.data.iloc[end].values,
)
return new
#
# Initial conditions
#
def set_ic(self, state=None, **kwargs):
"""
Set initial conditions.
"""
if self.state is None:
if state is None:
state = self.initial_state(**kwargs)
self.state = np.array(state, dtype=float)
alias = self.meta.data_aliases
for k, v in list(kwargs.items()):
if k in alias:
del kwargs[k]
kwargs[alias[k]] = v
components = extract_keys(self.meta.variables, kwargs)
for k, v in components.items():
idx = self.meta.get_variable_index(k)
self.state[idx] = v
return self
def set_data(self, data):
"""
Force a dataframe into simulation state.
"""
data = data.copy()
data.columns = [self.meta.data_aliases.get(c, c) for c in data.columns]
self.set_ic(state=data.iloc[0])
self.data = data.reset_index(drop=True)
self.time = len(data) - 1
self.date = data.index[-1]
self.state[:] = data.iloc[-1]
self.info["observed.dates"] = data.index[[0, -1]]
self._initialized = True
return self
def set_cases_from_region(self: T) -> T:
"""
Set the number of cases from region.
"""
self.set_cases()
return self
def set_cases(self: T, curves=None, adjust_R0=False, save_observed=False) -> T:
"""
Initialize model from a dataframe with the deaths and cases curve.
This curve is usually the output of disease.epidemic_curve(region), and is
automatically retrieved if not passed explicitly and the region of the model
is set.
Args:
curves:
Dataframe with cumulative ["cases", "deaths"] columns. If not given,
or None, fetches from disease.epidemic_curves(info)
adjust_R0:
If true, adjust R0 from the observed cases.
save_observed:
If true, save the cases curves into the model.info["observed.cases"] key.
"""
if curves is None:
warnings.warn("omitting curves from set_cases will be deprecated.")
if self.region is None or self.disease is None:
msg = 'must provide both "region" and "disease" or an explicit cases ' "curve."
raise ValueError(msg)
curves = self.region.pydemic.epidemic_curve(self.disease)
if adjust_R0:
warnings.warn("adjust_R0 argument is deprecated")
method = "RollingOLS" if adjust_R0 is True else adjust_R0
Re, _ = value = fit.estimate_R0(self, curves, Re=True, method=method)
assert np.isfinite(Re), f"invalid value for R0: {value}"
self.R0 = Re
# Save notification it in the info dictionary for reference
if "cases_observed" in curves:
tf = curves.index[-1]
rate = curves.loc[tf, "cases_observed"] / curves.loc[tf, "cases"]
else:
rate = 1.0
self.info["observed.notification_rate"] = rate
# Save simulation state from data
model = self.epidemic_model_name()
curve = fit.cases(curves)
data = fit.epidemic_curve(model, curve, self)
self.set_data(data)
self.initial_cases = curve.iloc[0]
if adjust_R0:
self.R0 /= self["susceptible:final"] / self.population
self.info["observed.R0"] = self.R0
# Optionally save cases curves into the info dictionary
if save_observed:
key = "observed.curves" if save_observed is True else save_observed
df = curves.rename(columns={"cases": "cases_raw"})
df["cases"] = curve
self.info[key] = df
return self
def adjust_R0(self, method="RollingOLS"):
curves = self["cases"]
self.R0, _ = fit.estimate_R0(self, curves, method=method)
self.info["observed.R0"] = self.R0
def initial_state(self, cases=None, **kwargs):
"""
Create the default initial vector for model.
"""
if cases is not None:
kwargs.setdefault("population", self.population)
return formulas.initial_state(self.epidemic_model_name(), cases, self, **kwargs)
return self._initial_state()
def infect(self, n=1, column="infectious"):
"""
Convert 'n' susceptible individuals to infectious.
"""
last = self.data.index[-1]
n = min(n, self.data.loc[last, "susceptible"])
self.data.loc[last, column] += n
self.data.loc[last, "susceptible"] -= n
return self
def _initial_state(self):
raise NotImplementedError
def initialize(self):
"""
Force initialization.
"""
if not self._initialized:
self.set_ic()
self.data = make_dataframe(self)
self._initialized = True
#
# Running simulation
#
def run(self: T, time) -> T:
"""
Runs the model for the given duration.
"""
steps = int(time)
self.initialize()
if time == 0:
return
_, *shape = self.data.shape
ts = self.time + 1.0 + np.arange(steps)
data = np.zeros((steps, *shape))
date = self.date
if self.info.get("event.simulation_start") is None:
self.info.save_event("simulation_start")
self.run_to_fill(data, ts)
extra = pd.DataFrame(data, columns=self.data.columns, index=ts)
self.data = pd.concat([self.data, extra])
self.date = date + time * DAY
self.time = ts[-1]
self.state = data[-1]
return self
def run_to_fill(self: T, data, times) -> T:
"""
Run simulation to fill pre-allocated array of data.
"""
raise NotImplementedError
def run_until(self, condition: Callable[["Model"], bool]):
"""
Run until stop condition is satisfied.
Args:
condition:
A function that receives a model and return True if stop
criteria is satisfied.
"""
raise NotImplementedError
#
# Utility methods
#
def to_dates(self, times: Sequence[int], start_date=None) -> pd.DatetimeIndex:
"""
Convert an array of numerical times to dates.
Args:
times:
Sequence of times.
start_date:
Starting date. If not given, uses the starting date for
simulation.
"""
dates: pd.DatetimeIndex
if isinstance(times, pd.DatetimeIndex):
return times
if start_date is None:
start_date = self.date - self.time * DAY
# noinspection PyTypeChecker
return pd.to_datetime(times, unit="D", origin=start_date)
def to_date(self, time: Union[float, int]) -> datetime.date:
"""
Convert a single instant to the corresponding datetime
"""
return pd.to_datetime(time - self.time, unit="D", origin=self.date)
def to_times(self, dates: Sequence, start_date=None) -> np.ndarray:
"""
Convert an array of numerical times to dates.
Args:
dates:
Sequence of dates.
start_date:
Starting date. If not given, uses the starting date for
simulation.
"""
if start_date is None:
start_date = self.date - self.time * DAY
data = [(date - start_date).days for date in dates]
return np.array(data) if data else np.array([], dtype=int)
def to_time(self, date, start_date=None) -> float:
"""
Convert date to time.
"""
if start_date is None:
return self.to_time(date, self.date) - self.time
return float((date - start_date).days)
def get_times(self, idx=None):
"""
Get times possibly sliced by an index.
"""
if idx is None:
return self.times
else:
return self.times[idx]
def get_data_time(self, idx):
times = self.get_times(idx)
return pd.Series(times, index=times)
def get_data_date(self, idx):
times = self.get_times(idx)
dates = self.to_dates(times)
return pd.Series(dates, index=times)
def get_data_cases(self, idx):
raise NotImplementedError
#
# Plotting and showing information
#
def plot(
self,
components=None,
*,
ax=None,
logy=False,
show=False,
dates=False,
legend=True,
grid=True,
):
"""
Plot the result of simulation.
"""
ax = ax or plt.gca()
kwargs = {"logy": logy, "ax": ax, "grid": grid, "legend": legend}
def get_column(col):
if dates:
col += ":dates"
data = self[col]
return data
components = self.meta.variables if components is None else components
for col in components:
data = get_column(col)
data.plot(**kwargs)
if show:
plt.show()
def make_dataframe(model: Model):
"""
Create the initial dataframe for the given model.
"""
data = [model.state]
cols = model.meta.variables
index = [model.time]
return pd.DataFrame(data, columns=cols, index=index)
| [
"numpy.arange",
"pandas.DataFrame",
"pydemic_ui.model.UIProperty",
"sidekick.property",
"numpy.isfinite",
"datetime.timedelta",
"typing.TypeVar",
"sidekick.import_later",
"datetime.datetime.now",
"pandas.concat",
"types.MappingProxyType",
"datetime.date",
"pandas.to_datetime",
"pandas.Seri... | [((704, 716), 'typing.TypeVar', 'TypeVar', (['"""T"""'], {}), "('T')\n", (711, 716), False, 'from typing import Sequence, Callable, Mapping, Union, TypeVar, TYPE_CHECKING\n'), ((723, 746), 'datetime.datetime.now', 'datetime.datetime.now', ([], {}), '()\n', (744, 746), False, 'import datetime\n'), ((755, 798), 'datetime.date', 'datetime.date', (['NOW.year', 'NOW.month', 'NOW.day'], {}), '(NOW.year, NOW.month, NOW.day)\n', (768, 798), False, 'import datetime\n'), ((805, 831), 'datetime.timedelta', 'datetime.timedelta', ([], {'days': '(1)'}), '(days=1)\n', (823, 831), False, 'import datetime\n'), ((839, 885), 'sidekick.import_later', 'sk.import_later', (['"""..plot"""'], {'package': '__package__'}), "('..plot', package=__package__)\n", (854, 885), True, 'import sidekick as sk\n'), ((1698, 1733), 'sidekick.property', 'sk.property', (['(lambda m: m.data.index)'], {}), '(lambda m: m.data.index)\n', (1709, 1733), True, 'import sidekick as sk\n'), ((1833, 1861), 'sidekick.property', 'sk.property', (['not_implemented'], {}), '(not_implemented)\n', (1844, 1861), True, 'import sidekick as sk\n'), ((2078, 2098), 'types.MappingProxyType', 'MappingProxyType', (['{}'], {}), '({})\n', (2094, 2098), False, 'from types import MappingProxyType\n'), ((19097, 19142), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'cols', 'index': 'index'}), '(data, columns=cols, index=index)\n', (19109, 19142), True, 'import pandas as pd\n'), ((2620, 2636), 'pydemic_ui.model.UIProperty', 'UIProperty', (['self'], {}), '(self)\n', (2630, 2636), False, 'from pydemic_ui.model import UIProperty\n'), ((5047, 5059), 'copy.copy', 'copy', (['params'], {}), '(params)\n', (5051, 5059), False, 'from copy import copy\n'), ((7279, 7299), 'types.MappingProxyType', 'MappingProxyType', (['{}'], {}), '({})\n', (7295, 7299), False, 'from types import MappingProxyType\n'), ((14946, 14971), 'numpy.zeros', 'np.zeros', (['(steps, *shape)'], {}), '((steps, *shape))\n', (14954, 14971), True, 'import numpy as np\n'), ((15163, 15218), 'pandas.DataFrame', 'pd.DataFrame', (['data'], {'columns': 'self.data.columns', 'index': 'ts'}), '(data, columns=self.data.columns, index=ts)\n', (15175, 15218), True, 'import pandas as pd\n'), ((15240, 15269), 'pandas.concat', 'pd.concat', (['[self.data, extra]'], {}), '([self.data, extra])\n', (15249, 15269), True, 'import pandas as pd\n'), ((16503, 16553), 'pandas.to_datetime', 'pd.to_datetime', (['times'], {'unit': '"""D"""', 'origin': 'start_date'}), "(times, unit='D', origin=start_date)\n", (16517, 16553), True, 'import pandas as pd\n'), ((16722, 16782), 'pandas.to_datetime', 'pd.to_datetime', (['(time - self.time)'], {'unit': '"""D"""', 'origin': 'self.date'}), "(time - self.time, unit='D', origin=self.date)\n", (16736, 16782), True, 'import pandas as pd\n'), ((17883, 17912), 'pandas.Series', 'pd.Series', (['times'], {'index': 'times'}), '(times, index=times)\n', (17892, 17912), True, 'import pandas as pd\n'), ((18036, 18065), 'pandas.Series', 'pd.Series', (['dates'], {'index': 'times'}), '(dates, index=times)\n', (18045, 18065), True, 'import pandas as pd\n'), ((5120, 5130), 'copy.copy', 'copy', (['data'], {}), '(data)\n', (5124, 5130), False, 'from copy import copy\n'), ((9891, 9919), 'numpy.array', 'np.array', (['state'], {'dtype': 'float'}), '(state, dtype=float)\n', (9899, 9919), True, 'import numpy as np\n'), ((11780, 11847), 'warnings.warn', 'warnings.warn', (['"""omitting curves from set_cases will be deprecated."""'], {}), "('omitting curves from set_cases will be deprecated.')\n", (11793, 11847), False, 'import warnings\n'), ((12147, 12196), 'warnings.warn', 'warnings.warn', (['"""adjust_R0 argument is deprecated"""'], {}), "('adjust_R0 argument is deprecated')\n", (12160, 12196), False, 'import warnings\n'), ((12368, 12383), 'numpy.isfinite', 'np.isfinite', (['Re'], {}), '(Re)\n', (12379, 12383), True, 'import numpy as np\n'), ((14913, 14929), 'numpy.arange', 'np.arange', (['steps'], {}), '(steps)\n', (14922, 14929), True, 'import numpy as np\n'), ((17286, 17300), 'numpy.array', 'np.array', (['data'], {}), '(data)\n', (17294, 17300), True, 'import numpy as np\n'), ((17314, 17337), 'numpy.array', 'np.array', (['[]'], {'dtype': 'int'}), '([], dtype=int)\n', (17322, 17337), True, 'import numpy as np\n'), ((1907, 1916), 'numpy.log', 'np.log', (['(2)'], {}), '(2)\n', (1913, 1916), True, 'import numpy as np\n')] |
""" Testing module for the Proof-Of-Work implementation
of the blockchain client.
"""
import hashlib
import math
import time
from queue import Queue
from queue import Empty
import nacl.encoding
import nacl.signing
from chains import Transaction, Block, Header, PoW_Blockchain
import utils
VERSION = 0.7
class TestPOW(object):
""" Testcase used to bundle all tests for the
Proof-Of-Work blockchain
"""
def setup(self):
""" Setup of the blockchain for the tests.
"""
utils.set_debug()
self.sends = Queue()
self.gui_queue = Queue()
self.blockchain = PoW_Blockchain(VERSION, self.sends, self.gui_queue)
self.sender_sign = nacl.signing.SigningKey(seed=b'a' * 32)
self.sender_verify = self.sender_sign.verify_key.encode(
nacl.encoding.HexEncoder)
self.receiver_sign = nacl.signing.SigningKey(seed=b'b' * 32)
self.receiver_verify = self.receiver_sign.verify_key.encode(
nacl.encoding.HexEncoder)
def test_block(self):
""" Test that the block creation works as intended.
"""
proof = self.blockchain.create_proof(self.sender_verify)
block = self.blockchain.create_block(proof)
mining_transaction = \
Transaction(sender='0',
recipient=self.sender_verify,
amount=50,
fee=0,
timestamp=time.time(),
signature='0')
block.transactions.append(mining_transaction)
root_hash = self.blockchain.create_merkle_root(block.transactions)
real_header = Header(
block.header.version,
block.header.index,
block.header.timestamp,
block.header.previous_hash,
root_hash,
block.header.proof
)
real_block = Block(real_header, block.transactions)
self.blockchain.new_block(real_block)
assert self.blockchain.latest_block() == real_block
assert (mining_transaction in
self.blockchain.latest_block().transactions)
assert self.blockchain.check_balance(
self.sender_verify, time.time()) == 50
def test_transaction_invalid_balance(self):
""" Test that the transactions with invalid balances are recognized and
not added to the blockchain.
"""
transaction = self.create_transaction()
assert not self.blockchain.validate_transaction(transaction, False)
self.blockchain.new_transaction(transaction)
assert transaction not in self.blockchain.transaction_pool
assert self.sends.empty()
def test_transaction_invalid_signature(self):
""" Test that the transactions with invalid signatures are recognized
and not added to the blockchain.
"""
self.mine_block(self.blockchain)
transaction = self.create_transaction()
transaction = Transaction(
transaction.sender,
transaction.recipient,
transaction.amount,
transaction.fee,
transaction.timestamp,
self.receiver_sign.sign(
self.create_transaction_hash(
transaction.amount,
transaction.fee,
transaction.timestamp
).encode()
)
)
assert not self.blockchain.validate_transaction(transaction, False)
self.blockchain.new_transaction(transaction)
assert transaction not in self.blockchain.transaction_pool
assert self.sends.empty()
def test_transaction_invalid_double(self):
""" Test that the same transaction is not added twice to the blockchain.
"""
self.mine_block(self.blockchain)
transaction = self.create_transaction()
assert self.blockchain.validate_transaction(transaction, False)
self.blockchain.new_transaction(transaction)
assert transaction in self.blockchain.transaction_pool
assert not self.sends.empty()
assert not self.blockchain.validate_transaction(transaction, False)
def test_transaction_valid(self):
""" Test that a valid transaction is recognized and added to the
blockchain.
"""
self.mine_block(self.blockchain)
transaction = self.create_transaction()
assert self.blockchain.validate_transaction(transaction, False)
self.blockchain.new_transaction(transaction)
assert transaction in self.blockchain.transaction_pool
assert not self.sends.empty()
def test_new_header(self, capsys):
""" Test that a new incoming header is processed accordingly.
"""
with capsys.disabled():
proof = self.blockchain.create_proof(self.sender_verify)
last_header = self.blockchain.latest_header()
# Valid
new_header = Header(0,
1,
time.time(),
last_header.root_hash,
123,
proof
)
self.blockchain.process_message(('new_header',
new_header,
''))
assert self.sends.get() == ('get_block', new_header, 'broadcast')
# Invalid
new_header = Header(0,
1,
time.time(),
321,
123,
proof
)
self.blockchain.process_message(('new_header',
new_header,
''))
captured = capsys.readouterr()
assert captured.out == '### DEBUG ### Invalid header\n'
# Farther away
new_header = Header(0,
123,
time.time(),
321,
last_header.root_hash,
proof
)
self.blockchain.process_message(('new_header',
new_header,
''))
assert self.sends.get() == ('get_chain', '', 'broadcast')
def test_get_block(self):
""" Test that get_block works.
Uses latest_block for comparison.
"""
b = self.blockchain.latest_block()
assert b == self.blockchain.get_block(b.header)
# Invalid header -> return None
assert not self.blockchain.get_block('')
def test_send_block(self):
""" Test that send_block works.
"""
self.mine_block(self.blockchain)
b = self.blockchain.latest_block()
self.blockchain.send_block(b.header, '123')
assert self.sends.get() == ('new_block', b, '123')
def test_merkle_root(self):
""" Test that Merkle root is independent of transaction order.
Only factor for the Merkle root should be timestamp of the transaction.
"""
t = [self.create_transaction() for i in range(15)]
assert self.blockchain.create_merkle_root(t) == \
self.blockchain.create_merkle_root(list(reversed(t)))
def test_msg_transaction(self):
""" Test that the process message can process new transactions
"""
self.mine_block(self.blockchain)
t = self.create_transaction()
self.blockchain.process_message(('new_transaction', t, ''))
assert t in self.blockchain.transaction_pool
def test_resolve_conflict(self):
""" Test that resolve conflict works
"""
# Initial chain
self.mine_block(self.blockchain)
t = self.create_transaction()
self.blockchain.new_transaction(t)
self.mine_block(self.blockchain)
# Secondary chain
bchain2 = PoW_Blockchain(VERSION,
Queue(),
Queue()
)
# Fill secondary chain
for _ in range(3):
self.mine_block(bchain2)
bchain2.new_transaction(t)
bchain2.process_message(('mine', self.sender_verify, 'local'))
# Check new_chain of the initial blockchain
self.blockchain.resolve_conflict(bchain2.get_header_chain())
assert bchain2.latest_header() == self.blockchain.nc_latest_header()
# Add to secondary chain, to test "pre-filling" of new_chain
for _ in range(3):
self.mine_block(bchain2)
self.blockchain.resolve_conflict(bchain2.get_header_chain())
assert bchain2.latest_header() == self.blockchain.nc_latest_header()
# Chain exchange
for b in bchain2.get_block_chain():
self.blockchain.new_block(b)
assert bchain2.latest_block() == self.blockchain.latest_block()
# ####################### HELPER FUNCTIONS ###########################
def mine_block(self, chain):
""" Mine an initial block to add a balance to the test account.
Args:
chain: Chain to mine on
"""
proof = chain.create_proof(self.sender_verify)
block = chain.create_block(proof)
block.transactions.append(
Transaction(sender='0',
recipient=self.sender_verify,
amount=50,
fee=0,
timestamp=time.time(),
signature='0'))
root_hash = chain.create_merkle_root(block.transactions)
real_header = Header(
block.header.version,
block.header.index,
block.header.timestamp,
block.header.previous_hash,
root_hash,
block.header.proof
)
real_block = Block(real_header, block.transactions)
chain.new_block(real_block)
try:
self.sends.get(block=False) # Remove new_block message
except Empty:
pass
def create_transaction(self):
""" Create simple transaction used in tests.
Returns:
A new transaction.
"""
amount = 10
timestamp = time.time()
fee = math.ceil(amount * 0.05)
transaction_hash = self.create_transaction_hash(amount, fee, timestamp)
return Transaction(
self.sender_verify,
self.receiver_verify,
amount,
fee,
timestamp,
self.sender_sign.sign(transaction_hash.encode())
)
def create_transaction_hash(self, amount, fee, timestamp):
""" Creates the transaction-hash used in tests.
Args:
amount: Amount of coins for transaction.
fee: Fee for the transaction.
timestamp: Time of the transaction.
Returns:
Hash for the given transaction data
"""
return hashlib.sha256(
(str(self.sender_verify) + str(self.receiver_verify) +
str(amount) + str(fee) + str(timestamp)).encode()
).hexdigest()
| [
"math.ceil",
"time.time",
"chains.PoW_Blockchain",
"chains.Block",
"utils.set_debug",
"queue.Queue",
"chains.Header"
] | [((512, 529), 'utils.set_debug', 'utils.set_debug', ([], {}), '()\n', (527, 529), False, 'import utils\n'), ((552, 559), 'queue.Queue', 'Queue', ([], {}), '()\n', (557, 559), False, 'from queue import Queue\n'), ((586, 593), 'queue.Queue', 'Queue', ([], {}), '()\n', (591, 593), False, 'from queue import Queue\n'), ((620, 671), 'chains.PoW_Blockchain', 'PoW_Blockchain', (['VERSION', 'self.sends', 'self.gui_queue'], {}), '(VERSION, self.sends, self.gui_queue)\n', (634, 671), False, 'from chains import Transaction, Block, Header, PoW_Blockchain\n'), ((1661, 1796), 'chains.Header', 'Header', (['block.header.version', 'block.header.index', 'block.header.timestamp', 'block.header.previous_hash', 'root_hash', 'block.header.proof'], {}), '(block.header.version, block.header.index, block.header.timestamp,\n block.header.previous_hash, root_hash, block.header.proof)\n', (1667, 1796), False, 'from chains import Transaction, Block, Header, PoW_Blockchain\n'), ((1896, 1934), 'chains.Block', 'Block', (['real_header', 'block.transactions'], {}), '(real_header, block.transactions)\n', (1901, 1934), False, 'from chains import Transaction, Block, Header, PoW_Blockchain\n'), ((9849, 9984), 'chains.Header', 'Header', (['block.header.version', 'block.header.index', 'block.header.timestamp', 'block.header.previous_hash', 'root_hash', 'block.header.proof'], {}), '(block.header.version, block.header.index, block.header.timestamp,\n block.header.previous_hash, root_hash, block.header.proof)\n', (9855, 9984), False, 'from chains import Transaction, Block, Header, PoW_Blockchain\n'), ((10084, 10122), 'chains.Block', 'Block', (['real_header', 'block.transactions'], {}), '(real_header, block.transactions)\n', (10089, 10122), False, 'from chains import Transaction, Block, Header, PoW_Blockchain\n'), ((10470, 10481), 'time.time', 'time.time', ([], {}), '()\n', (10479, 10481), False, 'import time\n'), ((10496, 10520), 'math.ceil', 'math.ceil', (['(amount * 0.05)'], {}), '(amount * 0.05)\n', (10505, 10520), False, 'import math\n'), ((5593, 5604), 'time.time', 'time.time', ([], {}), '()\n', (5602, 5604), False, 'import time\n'), ((6111, 6122), 'time.time', 'time.time', ([], {}), '()\n', (6120, 6122), False, 'import time\n'), ((8184, 8191), 'queue.Queue', 'Queue', ([], {}), '()\n', (8189, 8191), False, 'from queue import Queue\n'), ((8226, 8233), 'queue.Queue', 'Queue', ([], {}), '()\n', (8231, 8233), False, 'from queue import Queue\n'), ((1456, 1467), 'time.time', 'time.time', ([], {}), '()\n', (1465, 1467), False, 'import time\n'), ((2220, 2231), 'time.time', 'time.time', ([], {}), '()\n', (2229, 2231), False, 'import time\n'), ((5060, 5071), 'time.time', 'time.time', ([], {}), '()\n', (5069, 5071), False, 'import time\n'), ((9708, 9719), 'time.time', 'time.time', ([], {}), '()\n', (9717, 9719), False, 'import time\n')] |
import os
import torch
from torchvision import datasets
from utils.boxlist import BoxList
import cv2
import numpy as np
import random
def has_only_empty_bbox(annot):
# if bbox width and height <=1 , then it is a empty box
return all(any(o <= 1 for o in obj['bbox'][2:]) for obj in annot)
def has_valid_annotation(annot):
if len(annot) == 0:
return False
if has_only_empty_bbox(annot):
return False
return True
class DOTADataset(datasets.CocoDetection):
NAME_TAB = ('__background__', 'plane', 'baseball-diamond', 'bridge', 'ground-track-field',
'small-vehicle', 'large-vehicle', 'ship',
'tennis-court', 'basketball-court',
'storage-tank', 'soccer-ball-field',
'roundabout', 'harbor',
'swimming-pool', 'helicopter')
def __init__(self, path, split,image_folder_name, anno_folder_name,transform=None):
"""
path : dataset folder path
dataset structure:
├── dataset_path
│ ├── annotations
│ │ ├── anno_folder_name +'train'.json
│ │ ├── anno_folder_name + 'val'.json
│ │ ├── anno_folder_name + 'test'.json
│ ├── image_folder_name+'train'
│ ├── image_folder_name+'val'
│ ├── image_folder_name+'test'
"""
root, annot = self.get_root_annotation_path(path,split,image_folder_name,anno_folder_name)
super().__init__(root, annot)
self.ids = sorted(self.ids)
if split == 'train' or split == 'val_loss':
ids = []
for id in self.ids:
ann_ids = self.coco.getAnnIds(imgIds=id, iscrowd=None)
annot = self.coco.loadAnns(ann_ids)
if has_valid_annotation(annot):
ids.append(id)
self.ids = ids
self.category2id = {v: i + 1 for i, v in enumerate(self.coco.getCatIds())}
self.id2category = {v: k for k, v in self.category2id.items()}
self.id2img = {k: v for k, v in enumerate(self.ids)}
self.transformer = transform
def set_transform(self,transform):
self.transformer = transform
def get_root_annotation_path(self,path,split,image_folder_name,anno_folder_name):
'''
root : image dir
annot: annotation file path
'''
self.split = split
self.anno_folder_name = anno_folder_name
self.image_folder_name = image_folder_name
'''split: train, val, test'''
if split == 'val_loss':
annot = os.path.join(path, 'annotations', f"{self.anno_folder_name}val.json")
root = os.path.join(path, f'{self.image_folder_name}val')
else:
annot = os.path.join(path, 'annotations', f'{self.anno_folder_name}{split}.json')
root = os.path.join(path, f'{self.image_folder_name}{split}')
return root, annot
def __getitem__(self, index, transform_enable=True):
if isinstance(index, tuple) or isinstance(index, list):
transform_enable = index[1]
index = index[0]
else:
transform_enable = True
coco = self.coco
img_id = self.ids[index]
ann_ids = coco.getAnnIds(imgIds=img_id)
annots = coco.loadAnns(ann_ids)
path = coco.loadImgs(img_id)[0]['file_name']
img = cv2.imread(os.path.join(self.root, path), cv2.IMREAD_UNCHANGED)
if img.ndim == 2:
# if single channel image, then convert to BGR
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.ndim == 3:
pass
else:
raise RuntimeError("{} channel image not supported".format(img.ndim))
height, width,_ = img.shape
annots = [o for o in annots if o['iscrowd'] == 0]
boxes = [o['bbox'] for o in annots]
boxes = torch.as_tensor(boxes).reshape(-1, 8)
#target = BoxList(boxes, (width,height), mode='xyxyxyxy').convert('xywha')
target = BoxList(boxes, (width,height), mode='xyxyxyxy')
target = target.change_order_to_clockwise()
target = target.convert('xywha_d')
#target = target.convert('xywha')
classes = [o['category_id'] for o in annots]
classes = [self.category2id[c] for c in classes]
classes = torch.tensor(classes)
# target.fields['labels'] = classes
target.add_field('labels', classes)
target = target.clip_to_image(remove_empty=True)
if self.transformer is not None and transform_enable:
img, target = self.transformer(img, target)
return img, target, index, path
def get_image_meta(self, index):
id = self.id2img[index]
img_data = self.coco.imgs[id]
return img_data
class ImageList:
def __init__(self, tensors, sizes):
self.tensors = tensors
self.sizes = sizes
def to(self, *args, **kwargs):
tensor = self.tensors.to(*args, **kwargs)
return ImageList(tensor, self.sizes)
def image_list(tensors, size_divisible=0):
max_size = tuple(max(s) for s in zip(*[img.shape for img in tensors]))
if size_divisible > 0:
stride = size_divisible
max_size = list(max_size)
if max_size[1] % stride != 0:
max_size[1] = (max_size[1] | (stride - 1)) + 1
if max_size[2] % stride != 0:
max_size[2] = (max_size[2] | (stride - 1)) + 1
max_size = tuple(max_size)
shape = (len(tensors),) + max_size
batch = tensors[0].new(*shape).zero_()
for img, pad_img in zip(tensors, batch):
pad_img[: img.shape[0], : img.shape[1], : img.shape[2]].copy_(img)
sizes = [img.shape[-2:] for img in tensors]
return ImageList(batch, sizes)
def collate_fn(config):
def collate_data(batch):
batch = list(zip(*batch))
imgs = image_list(batch[0], config.size_divisible)
targets = batch[1]
ids = batch[2]
return imgs, targets, ids
return collate_data
| [
"cv2.cvtColor",
"utils.boxlist.BoxList",
"torch.as_tensor",
"os.path.join",
"torch.tensor"
] | [((4207, 4255), 'utils.boxlist.BoxList', 'BoxList', (['boxes', '(width, height)'], {'mode': '"""xyxyxyxy"""'}), "(boxes, (width, height), mode='xyxyxyxy')\n", (4214, 4255), False, 'from utils.boxlist import BoxList\n'), ((4536, 4557), 'torch.tensor', 'torch.tensor', (['classes'], {}), '(classes)\n', (4548, 4557), False, 'import torch\n'), ((2717, 2786), 'os.path.join', 'os.path.join', (['path', '"""annotations"""', 'f"""{self.anno_folder_name}val.json"""'], {}), "(path, 'annotations', f'{self.anno_folder_name}val.json')\n", (2729, 2786), False, 'import os\n'), ((2807, 2857), 'os.path.join', 'os.path.join', (['path', 'f"""{self.image_folder_name}val"""'], {}), "(path, f'{self.image_folder_name}val')\n", (2819, 2857), False, 'import os\n'), ((2894, 2967), 'os.path.join', 'os.path.join', (['path', '"""annotations"""', 'f"""{self.anno_folder_name}{split}.json"""'], {}), "(path, 'annotations', f'{self.anno_folder_name}{split}.json')\n", (2906, 2967), False, 'import os\n'), ((2988, 3042), 'os.path.join', 'os.path.join', (['path', 'f"""{self.image_folder_name}{split}"""'], {}), "(path, f'{self.image_folder_name}{split}')\n", (3000, 3042), False, 'import os\n'), ((3559, 3588), 'os.path.join', 'os.path.join', (['self.root', 'path'], {}), '(self.root, path)\n', (3571, 3588), False, 'import os\n'), ((3718, 3755), 'cv2.cvtColor', 'cv2.cvtColor', (['img', 'cv2.COLOR_GRAY2BGR'], {}), '(img, cv2.COLOR_GRAY2BGR)\n', (3730, 3755), False, 'import cv2\n'), ((4067, 4089), 'torch.as_tensor', 'torch.as_tensor', (['boxes'], {}), '(boxes)\n', (4082, 4089), False, 'import torch\n')] |
# -*- coding: utf-8 -*-
"""
colorful
~~~~~~~~
Terminal string styling done right, in Python.
:copyright: (c) 2017 by <NAME> <<EMAIL>>
:license: MIT, see LICENSE for more details.
"""
import sys
import colorful
def show():
"""
Show the modifiers and colors
"""
# modifiers
sys.stdout.write(colorful.bold('bold') + ' ')
sys.stdout.write(colorful.dimmed('dimmed') + ' ')
sys.stdout.write(colorful.italic('italic') + ' ')
sys.stdout.write(colorful.underlined('underlined') + ' ')
sys.stdout.write(colorful.inversed('inversed') + ' ')
sys.stdout.write(colorful.concealed('concealed') + ' ')
sys.stdout.write(colorful.struckthrough('struckthrough') + '\n')
# foreground colors
sys.stdout.write(colorful.red('red') + ' ')
sys.stdout.write(colorful.green('green') + ' ')
sys.stdout.write(colorful.yellow('yellow') + ' ')
sys.stdout.write(colorful.blue('blue') + ' ')
sys.stdout.write(colorful.magenta('magenta') + ' ')
sys.stdout.write(colorful.cyan('cyan') + ' ')
sys.stdout.write(colorful.white('white') + '\n')
# background colors
sys.stdout.write(colorful.on_red('red') + ' ')
sys.stdout.write(colorful.on_green('green') + ' ')
sys.stdout.write(colorful.on_yellow('yellow') + ' ')
sys.stdout.write(colorful.on_blue('blue') + ' ')
sys.stdout.write(colorful.on_magenta('magenta') + ' ')
sys.stdout.write(colorful.on_cyan('cyan') + ' ')
sys.stdout.write(colorful.on_white('white') + '\n')
if __name__ == '__main__':
show()
| [
"colorful.on_yellow",
"colorful.inversed",
"colorful.on_blue",
"colorful.concealed",
"colorful.on_green",
"colorful.italic",
"colorful.green",
"colorful.on_white",
"colorful.struckthrough",
"colorful.cyan",
"colorful.on_red",
"colorful.on_cyan",
"colorful.white",
"colorful.red",
"colorfu... | [((336, 357), 'colorful.bold', 'colorful.bold', (['"""bold"""'], {}), "('bold')\n", (349, 357), False, 'import colorful\n'), ((386, 411), 'colorful.dimmed', 'colorful.dimmed', (['"""dimmed"""'], {}), "('dimmed')\n", (401, 411), False, 'import colorful\n'), ((440, 465), 'colorful.italic', 'colorful.italic', (['"""italic"""'], {}), "('italic')\n", (455, 465), False, 'import colorful\n'), ((494, 527), 'colorful.underlined', 'colorful.underlined', (['"""underlined"""'], {}), "('underlined')\n", (513, 527), False, 'import colorful\n'), ((556, 585), 'colorful.inversed', 'colorful.inversed', (['"""inversed"""'], {}), "('inversed')\n", (573, 585), False, 'import colorful\n'), ((614, 645), 'colorful.concealed', 'colorful.concealed', (['"""concealed"""'], {}), "('concealed')\n", (632, 645), False, 'import colorful\n'), ((674, 713), 'colorful.struckthrough', 'colorful.struckthrough', (['"""struckthrough"""'], {}), "('struckthrough')\n", (696, 713), False, 'import colorful\n'), ((768, 787), 'colorful.red', 'colorful.red', (['"""red"""'], {}), "('red')\n", (780, 787), False, 'import colorful\n'), ((816, 839), 'colorful.green', 'colorful.green', (['"""green"""'], {}), "('green')\n", (830, 839), False, 'import colorful\n'), ((868, 893), 'colorful.yellow', 'colorful.yellow', (['"""yellow"""'], {}), "('yellow')\n", (883, 893), False, 'import colorful\n'), ((922, 943), 'colorful.blue', 'colorful.blue', (['"""blue"""'], {}), "('blue')\n", (935, 943), False, 'import colorful\n'), ((972, 999), 'colorful.magenta', 'colorful.magenta', (['"""magenta"""'], {}), "('magenta')\n", (988, 999), False, 'import colorful\n'), ((1028, 1049), 'colorful.cyan', 'colorful.cyan', (['"""cyan"""'], {}), "('cyan')\n", (1041, 1049), False, 'import colorful\n'), ((1078, 1101), 'colorful.white', 'colorful.white', (['"""white"""'], {}), "('white')\n", (1092, 1101), False, 'import colorful\n'), ((1156, 1178), 'colorful.on_red', 'colorful.on_red', (['"""red"""'], {}), "('red')\n", (1171, 1178), False, 'import colorful\n'), ((1207, 1233), 'colorful.on_green', 'colorful.on_green', (['"""green"""'], {}), "('green')\n", (1224, 1233), False, 'import colorful\n'), ((1262, 1290), 'colorful.on_yellow', 'colorful.on_yellow', (['"""yellow"""'], {}), "('yellow')\n", (1280, 1290), False, 'import colorful\n'), ((1319, 1343), 'colorful.on_blue', 'colorful.on_blue', (['"""blue"""'], {}), "('blue')\n", (1335, 1343), False, 'import colorful\n'), ((1372, 1402), 'colorful.on_magenta', 'colorful.on_magenta', (['"""magenta"""'], {}), "('magenta')\n", (1391, 1402), False, 'import colorful\n'), ((1431, 1455), 'colorful.on_cyan', 'colorful.on_cyan', (['"""cyan"""'], {}), "('cyan')\n", (1447, 1455), False, 'import colorful\n'), ((1484, 1510), 'colorful.on_white', 'colorful.on_white', (['"""white"""'], {}), "('white')\n", (1501, 1510), False, 'import colorful\n')] |
# AUTOGENERATED! DO NOT EDIT! File to edit: 09_rnn.ipynb (unless otherwise specified).
__all__ = ['generate_data', 'encode', 'decode']
# Cell
from tqdm import tqdm
from sklearn.model_selection import train_test_split
from tensorflow.keras.models import Sequential
from tensorflow.keras.layers import LSTM, TimeDistributed, Dense, RepeatVector
#export
def generate_data(training_size=10):
X = []
y = []
duplicates = set()
p_bar = tqdm(total=training_size)
while len(X) < training_size:
a = int(''.join(np.random.choice(list('0123456789')) for i in range(np.random.randint(1, DIGITS + 1))))
b = int(''.join(np.random.choice(list('0123456789')) for i in range(np.random.randint(1, DIGITS + 1))))
pair = tuple(sorted((a, b)))
if pair in duplicates:
continue
duplicates.add(pair)
pair_str = '{}+{}'.format(a,b)
pair_str = ' ' * (MAXLEN - len(pair_str)) + pair_str
ans = str(a + b)
ans = ' ' * ((DIGITS + 1) - len(ans)) + ans
X.append(pair_str)
y.append(ans)
p_bar.update(1)
return X,y
#export
def encode(questions, answers, alphabet):
char_to_index = dict((c, i) for i, c in enumerate(alphabet))
x = np.zeros((len(questions), MAXLEN, len(alphabet)))
y = np.zeros((len(questions), DIGITS + 1, len(alphabet)))
for q_counter, pair in enumerate(questions):
encoded_pair = np.zeros((MAXLEN, len(alphabet)))
for i, c in enumerate(pair):
encoded_pair[i, char_to_index[c]] = 1
x[q_counter] = encoded_pair
for a_counter, ans in enumerate(answers):
encoded_ans = np.zeros((DIGITS + 1, len(alphabet)))
for i, c in enumerate(ans):
encoded_ans[i, char_to_index[c]] = 1
y[a_counter] = encoded_ans
return x, y
#export
def decode(seq, alphabet, calc_argmax=True):
index_to_char = dict((i, c) for i, c in enumerate(alphabet))
if calc_argmax:
seq = np.argmax(seq, axis=-1)
return ''.join(index_to_char[c] for c in seq) | [
"tqdm.tqdm"
] | [((448, 473), 'tqdm.tqdm', 'tqdm', ([], {'total': 'training_size'}), '(total=training_size)\n', (452, 473), False, 'from tqdm import tqdm\n')] |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
query_processing.py - Matches the query against the saved indexed chunks and returns a list of dictionaries with docID
author: <NAME>
email: <EMAIL>
date: 12/1/2017
"""
from bs4 import BeautifulSoup
sample_indexed_chunks_dict = {"'s": {'doc_id': {'0': [6, 39]}, 'word_count': 2},
'.onion': {'doc_id': {'4': [4, 27]}, 'word_count': 2},
'//dirnxxdraygbifgc.onion/': {'doc_id': {'0': [25]}, 'word_count': 1},
'10years.debconf.org': {'doc_id': {'4': [27]}, 'word_count': 1},
'ammo': {'doc_id': {'4': [3, 9, 19, 25]}, 'word_count': 4},
'android': {'doc_id': {'0': [24]}, 'word_count': 1},
'back': {'doc_id': {'0': [10]}, 'word_count': 1},
'backends': {'doc_id': {'4': [23]}, 'word_count': 1},
'bank': {'doc_id': {'3': [9]}, 'word_count': 1},
'become': {'doc_id': {'2': [8, 34]}, 'word_count': 2},
'best': {'doc_id': {'3': [7]}, 'word_count': 1},
'biggest': {'doc_id': {'4': [2, 25]}, 'word_count': 2},
'bitcoi': {'doc_id': {'2': [22]}, 'word_count': 1},
'bitcoin': {'doc_id': {'0': [1, 5, 8, 28, 31]}, 'word_count': 5},
'bitcoins': {'doc_id': {'0': [13]}, 'word_count': 1},
'bitpharma': {'doc_id': {'4': [0, 23]}, 'word_count': 2},
'buy': {'doc_id': {'2': [3, 29]}, 'word_count': 2},
'cannabis': {'doc_id': {'2': [18]}, 'word_count': 1},
'check': {'doc_id': {'0': [11]}, 'word_count': 1},
'ci': {'doc_id': {'2': [37]}, 'word_count': 1},
'citizen': {'doc_id': {'2': [11]}, 'word_count': 1},
'co': {'doc_id': {'3': [18]}, 'word_count': 1},
'cocai': {'doc_id': {'0': [47]}, 'word_count': 1},
'cocaine': {'doc_id': {'0': [14], '4': [8]}, 'word_count': 2},
'counterfeit': {'doc_id': {'3': [8]}, 'word_count': 1},
'counterfeits': {'doc_id': {'3': [5]}, 'word_count': 1},
'darkweb': {'doc_id': {'0': [5, 38]}, 'word_count': 2},
'debian': {'doc_id': {'4': [13]}, 'word_count': 1},
'dedope': {'doc_id': {'2': [0, 24]}, 'word_count': 2},
'deep': {'doc_id': {'0': [15]}, 'word_count': 1},
'device': {'doc_id': {'0': [20]}, 'word_count': 1},
'drug': {'doc_id': {'0': [1, 9, 27, 34, 42], '4': [5, 28]}, 'word_count': 7},
'easycoin': {'doc_id': {'0': [0, 30]}, 'word_count': 2},
'etc': {'doc_id': {'0': [25]}, 'word_count': 1},
'euro': {'doc_id': {'3': [4, 17]}, 'word_count': 2},
'europe': {'doc_id': {'3': [12]}, 'word_count': 1},
'european': {'doc_id': {'4': [3, 26]}, 'word_count': 2},
'fake': {'doc_id': {'2': [6, 19]}, 'word_count': 2},
'free': {'doc_id': {'0': [4]}, 'word_count': 1},
'für': {'doc_id': {'2': [11, 21]}, 'word_count': 2},
'german': {'doc_id': {'2': [2]}, 'word_count': 1},
'get': {'doc_id': {'2': [4, 17]}, 'word_count': 2},
'guns': {'doc_id': {'4': [1, 7, 17, 23]}, 'word_count': 4},
'heroin': {'doc_id': {'0': [22]}, 'word_count': 1},
'hidden': {'doc_id': {'0': [18]}, 'word_count': 1},
'high': {'doc_id': {'3': [2, 15]}, 'word_count': 2},
'hqer': {'doc_id': {'3': [0, 13]}, 'word_count': 2},
'http': {'doc_id': {'0': [23]}, 'word_count': 1},
'identity': {'doc_id': {'2': [1, 11, 14, 24]}, 'word_count': 4},
'iphone': {'doc_id': {'0': [22]}, 'word_count': 1},
'kaufen': {'doc_id': {'2': [8, 16, 20]}, 'word_count': 3},
'laundry': {'doc_id': {'0': [9]}, 'word_count': 1},
'list': {'doc_id': {'4': [6]}, 'word_count': 1},
'location': {'doc_id': {'0': [16]}, 'word_count': 1},
'maintainance': {'doc_id': {'0': [5]}, 'word_count': 1},
'manage': {'doc_id': {'0': [11]}, 'word_count': 1},
'marijuana': {'doc_id': {'2': [14]}, 'word_count': 1},
'mdma': {'doc_id': {'0': [20]}, 'word_count': 1},
'mixer': {'doc_id': {'0': [6]}, 'word_count': 1},
'n/a': {'doc_id': {'0': [0]}, 'word_count': 1},
'new': {'doc_id': {'2': [10, 23]}, 'word_count': 2},
'notes': {'doc_id': {'3': [10]}, 'word_count': 1},
'ns': {'doc_id': {'2': [23]}, 'word_count': 1},
'onion': {'doc_id': {'2': [0, 13]}, 'word_count': 2},
'onion.debian.org': {'doc_id': {'4': [0, 1, 2]}, 'word_count': 3},
'onionbalance': {'doc_id': {'4': [25]}, 'word_count': 1},
'oniondir': {'doc_id': {'0': [12]}, 'word_count': 1},
'online': {'doc_id': {'0': [27]}, 'word_count': 1},
'passport': {'doc_id': {'2': [7, 20]}, 'word_count': 2},
'passports': {'doc_id': {'2': [1, 6, 15, 19, 24, 27, 32]}, 'word_count': 7},
'pay': {'doc_id': {'0': [30]}, 'word_count': 1},
'peoples': {'doc_id': {'0': [0, 26, 33]}, 'word_count': 3},
'prescriptions': {'doc_id': {'4': [16]}, 'word_count': 1},
'privacy': {'doc_id': {'4': [3]}, 'word_count': 1},
'project': {'doc_id': {'4': [14]}, 'word_count': 1},
'psychedelics': {'doc_id': {'4': [12]}, 'word_count': 1},
'quality': {'doc_id': {'3': [3, 16]}, 'word_count': 2},
'real': {'doc_id': {'2': [4, 23, 30]}, 'word_count': 3},
'run': {'doc_id': {'4': [10]}, 'word_count': 1},
'search': {'doc_id': {'1': [3, 8]}, 'word_count': 2},
'served': {'doc_id': {'4': [20]}, 'word_count': 1},
'service': {'doc_id': {'0': [19]}, 'word_count': 1},
'services': {'doc_id': {'2': [2, 15]}, 'word_count': 2},
'several': {'doc_id': {'4': [22]}, 'word_count': 1},
'shop': {'doc_id': {'2': [4]}, 'word_count': 1},
'site': {'doc_id': {'0': [1]}, 'word_count': 1},
'speed': {'doc_id': {'0': [16]}, 'word_count': 1},
'sto': {'doc_id': {'4': [29]}, 'word_count': 1},
'store': {'doc_id': {'0': [2, 28, 35], '4': [6]}, 'word_count': 4},
'supplier': {'doc_id': {'0': [10, 43]}, 'word_count': 2},
'today': {'doc_id': {'2': [12, 25]}, 'word_count': 2},
'tor': {'doc_id': {'4': [0]}, 'word_count': 1},
'torch': {'doc_id': {'1': [0, 5]}, 'word_count': 2},
'uk': {'doc_id': {'2': [0, 5, 10, 26, 31, 36]}, 'word_count': 6},
'untill': {'doc_id': {'0': [7]}, 'word_count': 1},
'using': {'doc_id': {'4': [24]}, 'word_count': 1},
'viagra': {'doc_id': {'4': [20]}, 'word_count': 1},
'wall': {'doc_id': {'0': [32]}, 'word_count': 1},
'wallet': {'doc_id': {'0': [2, 29]}, 'word_count': 2},
'web': {'doc_id': {'0': [16]}, 'word_count': 1},
'websites': {'doc_id': {'0': [21]}, 'word_count': 1},
'weed': {'doc_id': {'2': [3, 6, 10]}, 'word_count': 3},
'xtc': {'doc_id': {'0': [18]}, 'word_count': 1}}
sample_content_chunk = [{'doc_id': 0,
'html': 'Site is down for maintainance.<br>\r\n<br>\r\nUntill we are back check OnionDir for other Deep Web Tor hidden service .onion websites:<br>\r\n<br>\r\n<a href="http://dirnxxdraygbifgc.onion/">http://dirnxxdraygbifgc.onion/</a>',
'link': 'http://6w6vcynl6dumn67c.onion',
'title': 'N/A'},
{'doc_id': 1,
'html': '',
'link': 'http://p3igkncehackjtib.onion',
'title': ''},
{'doc_id': 2,
'html': '',
'link': 'http://54flq67kqr5wvjqf.onion',
'title': ''},
{'doc_id': 3,
'html': '',
'link': 'http://dppmfxaacucguzpc.onion',
'title': ''},
{'doc_id': 4,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="UK Guns and ammo store, buy guns and ammo on the deep web with bitcoin at our Tor store." />\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>UK Guns and Ammo Store - Buy guns and ammo in the UK for Bitcoi',
'link': 'http://tuu66yxvrnn3of7l.onion',
'title': 'UK Guns and Ammo Store - Buy guns and ammo in the UK for Bitcoin.'},
{'doc_id': 0,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="Peoples drug store, the number one deep web drug vendor. Buy drugs with Bitcoin"/>\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>Peoples Drug Store - The Darkweb\'s Best Online Drug Supplier! - Buy cocai',
'link': 'http://newpdsuslmzqazvr.onion',
'title': "Peoples Drug Store - The Darkweb's Best Online Drug Supplier! - Buy cocaine, speed, xtc, mdma, heroin and more at peoples drug store, pay with Bitcoin"},
{'doc_id': 1,
'html': '<html xmlns="http://www.w3.org/1999/xhtml"> \n<head> \n<title>TORCH: Tor Search!</title> \n<meta http-equiv="Content-Type" content="text/html; charset=utf-8"/> \n<meta name="description" content=""/> \n<meta name="keywords" content=""/> \n<link rel="shortcut icon" href="favicon.png" type="image/png" />\n \n<style type="text/css"> \nbody{\n\ttext-align: center;\n\tfont-family:Verdana, Arial, Helvetica, sans-serif;\n\tfont-size:.7em;\n\tmargin: 10px;\n\tcolor: #000;\n\tbackground: #fff;\n\tmin-width: 520px;\n}\na{\n\tcolor: #009;\n\tt',
'link': 'http://xmh57jrzrnw6insl.onion',
'title': 'TORCH: Tor Search!'},
{'doc_id': 2,
'html': 'ns<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="DeDope - Deutscher Weed und Hash Shop, weed online kaufen, weed für bitcoins, marijuana online kaufen, cannabis online kaufen für Bitcoins" />\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>DeDope - ',
'link': 'http://kbvbh4kdddiha2ht.onion',
'title': 'DeDope - German Weed Shop - weed online kaufen, weed für bitcoins, marijuana online kaufen, cannabis online kaufen für Bitcoi'},
{'doc_id': 3,
'html': '',
'link': 'http://fogcore5n3ov3tui.onion',
'title': ''},
{'doc_id': 4,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="Bitpharma - Cocaine for Bitcoins, Psychedelics for Bitcoins, Prescriptions for Bitcoins, Viagra for Bitcoins"/>\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>BitPharma - biggest european .onion drug sto',
'link': 'http://s5q54hfww56ov2xc.onion',
'title': 'BitPharma - biggest european .onion drug store - Cocaine for Bitcoins, Psychedelics for Bitcoins, Prescriptions for Bitcoins, Viagra for Bitcoins'},
{'doc_id': 0,
'html': '',
'link': 'https://www.facebookcorewwwi.onion',
'title': ''},
{'doc_id': 1,
'html': '',
'link': 'http://xdagknwjc7aaytzh.onion',
'title': ''},
{'doc_id': 2,
'html': '',
'link': 'http://wvk32thojln4gpp4.onion',
'title': ''},
{'doc_id': 3,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="High quality counterfeit euro banknotes for bitcoin - buy fake euros with bitcoin - best quality counterfeits on the deep web"/>\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>HQER - High Quality Euro Co',
'link': 'http://y3fpieiezy2sin4a.onion',
'title': 'HQER - High Quality Euro Counterfeits - best counterfeit bank notes in europe'},
{'doc_id': 4,
'html': '<!DOCTYPE html>\n <html>\n <head>\n <meta charset="utf-8">\n <meta http-equiv="X-UA-Compatible" content="IE=edge">\n <meta name="viewport" content="width\\=device-width, initial-scale=1">\n <meta name="author" content="The Tor Project, Inc.">\n <meta name="description" content="The Tor Project\'s free software protects your privacy online. Site blocked? Email [mailto:<EMAIL>] for help downloading Tor Browser.">\n <meta name="keywords" content="tor, tor project, tor browser, avoid censorsh',
'link': 'http://expyuzz4wqqyqhjn.onion',
'title': 'Tor Project | Privacy Online'},
{'doc_id': 0,
'html': '',
'link': 'http://storegsq3o5mfxiz.onion',
'title': ''},
{'doc_id': 1,
'html': '',
'link': 'http://jvrnuue4bvbftiby.onion',
'title': ''},
{'doc_id': 2,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="UKPassports - Buy passport from the United Kingdom UK, real passports from the UK, no fake passports"/>\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>UK Passports - Buy real UK passports, become a UK ci',
'link': 'http://vfqnd6mieccqyiit.onion',
'title': 'UK Passports - Buy real UK passports, become a UK citizen now. Our passports are no fake passports, they are real passports.'},
{'doc_id': 3,
'html': '',
'link': 'http://5plvrsgydwy2sgce.onion',
'title': ''},
{'doc_id': 4,
'html': '<!DOCTYPE html>\n<HTML lang="en">\n<HEAD>\n <TITLE>onion.debian.org</TITLE>\n <meta charset="UTF-8">\n</HEAD>\n<BODY>\n\n<H1>onion.debian.org</H1>\n\nThis is a list of <a href="https://www.torproject.org/docs/hidden-services">onion services</a>\nrun by the <a href="https://www.debian.org/">Debian project</a>. Most of them are served\nfrom several backends using\n<a href="https://github.com/DonnchaC/onionbalance">OnionBalance</a>.\n\n<ul>\n\n<li id="10years.debconf.org"><strong>10years.debconf.org</strong>: <a href="',
'link': 'http://5nca3wxl33tzlzj5.onion',
'title': 'onion.debian.org'},
{'doc_id': 0,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="EasyCoin.net is a Bitcoin Wallet and Bitcoin Laundry service, we offer bitcoin laundry without any fees, use on Iphone, Android." />\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>\r\nEasyCoin Bitcoin Wall',
'link': 'http://easycoinsayj7p5l.onion',
'title': '\r\nEasyCoin Bitcoin Wallet and free Bitcoin Mixer / Bitcoin Laundry, manage your Bitcoins from any location, from any device: Iphone, Android etc - Online Bitcoin Wallet'},
{'doc_id': 1,
'html': '',
'link': 'http://5mvm7cg6bgklfjtp.onion',
'title': ''},
{'doc_id': 2,
'html': '<!DOCTYPE HTML PUBLIC "-//W3C//DTD HTML 4.01 Transitional//EN" "http://www.w3.org/TR/html4/loose.dtd">\r\n<html>\r\n<head>\r\n<meta http-equiv="Content-type" content="text/html; charset=utf-8">\r\n<meta name="description" content="Onion Identity Store - buy european fake ids, fake passports with Bitcoin"/>\r\n<link rel="icon" type="image/icon" href="favicon.ico">\t\r\n<link rel="shortcut icon" type="image/icon" href="favicon.ico">\r\n<title>Onion Identity Services - Get your fake passport and a new identity today</titl',
'link': 'http://abbujjh5vqtq77wg.onion',
'title': 'Onion Identity Services - Get your fake passport and a new identity today'},
{'doc_id': 3,
'html': '',
'link': 'http://lw4ipk5choakk5ze.onion',
'title': ''},
{'doc_id': 4,
'html': '',
'link': 'http://e2qizoerj4d6ldif.onion',
'title': ''}]
key_docs = [k['doc_id'] for k in sample_content_chunk if k.get('doc_id')]
N = len(key_docs)
print(N)
def snippet_builder(doc_ID):
if doc_ID in key_docs:
a = search_list_of_dict(doc_ID, sample_content_chunk)
snippet = {}
soup = BeautifulSoup(a['html'])
s = soup.getText()
desc = s[1:300]
snippet['title'] = a['title']
snippet['href'] = a['link']
snippet['desc'] = desc + '...'
return snippet
def search_list_of_dict(doc_ID, content_chunk_list):
for item in content_chunk_list:
if item['doc_ID'] == doc_ID:
return item
| [
"bs4.BeautifulSoup"
] | [((15479, 15503), 'bs4.BeautifulSoup', 'BeautifulSoup', (["a['html']"], {}), "(a['html'])\n", (15492, 15503), False, 'from bs4 import BeautifulSoup\n')] |
from rest_captcha.serializers import RestCaptchaSerializer
from rest_framework import serializers
from .models import OrgAdditionRequest
from apps.profiles.models import UserProfile
class OrgAdditionRequestSerializer(serializers.ModelSerializer):
class Meta:
model = OrgAdditionRequest
fields = ('org_name', 'org_website', 'parent_org_name', 'supplicant_email_address', 'supplicant_name')
class AnonymousUserCreationSerializer(RestCaptchaSerializer, serializers.ModelSerializer):
password = serializers.CharField(max_length=24, min_length=24, allow_blank=False, allow_null=False, write_only=True)
class Meta:
model = UserProfile
fields = ('country', 'captcha_value', 'captcha_key', 'password')
| [
"rest_framework.serializers.CharField"
] | [((519, 628), 'rest_framework.serializers.CharField', 'serializers.CharField', ([], {'max_length': '(24)', 'min_length': '(24)', 'allow_blank': '(False)', 'allow_null': '(False)', 'write_only': '(True)'}), '(max_length=24, min_length=24, allow_blank=False,\n allow_null=False, write_only=True)\n', (540, 628), False, 'from rest_framework import serializers\n')] |
from posixpath import dirname
from flask import Flask, request, render_template,redirect, url_for,abort,send_from_directory
from werkzeug.utils import secure_filename
import os.path
import tempfile
import io
import os
import base64
from datetime import datetime
from pathlib import Path
import torchvision
from torchvision import transforms
import torch
from torch import no_grad
import cv2
import numpy as np
from PIL import Image
# Here are the 91 classes.
OBJECTS = [
'__background__', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'N/A', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'N/A', 'backpack', 'umbrella', 'N/A', 'N/A',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'N/A', 'dining table',
'N/A', 'N/A', 'toilet', 'N/A', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'N/A', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush'
]
# Here are the classesj for display
OBJECTS_html=['all', 'person', 'bicycle', 'car', 'motorcycle', 'airplane', 'bus',
'train', 'truck', 'boat', 'traffic light', 'fire hydrant', 'stop sign',
'parking meter', 'bench', 'bird', 'cat', 'dog', 'horse', 'sheep', 'cow',
'elephant', 'bear', 'zebra', 'giraffe', 'backpack', 'umbrella',
'handbag', 'tie', 'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard', 'surfboard', 'tennis racket',
'bottle', 'N/A', 'wine glass', 'cup', 'fork', 'knife', 'spoon', 'bowl',
'banana', 'apple', 'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed', 'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote', 'keyboard', 'cell phone',
'microwave', 'oven', 'toaster', 'sink', 'refrigerator', 'book',
'clock', 'vase', 'scissors', 'teddy bear', 'hair drier', 'toothbrush']
#key type of objects valuue: list of files that pertain to each objects
FILE_OBJ={}
def get_predictions(pred,threshold=0.8,objects=None ):
"""
This function will assign a string name to a predicted class and eliminate predictions whose likelihood is under a threshold
pred: a list where each element contains a tuple that corresponds to information about the different objects; Each element includes a tuple with the class yhat, probability of belonging to that class and the coordinates of the bounding box corresponding to the object
image : frozen surface
predicted_classes: a list where each element contains a tuple that corresponds to information about the different objects; Each element includes a tuple with the class name, probability of belonging to that class and the coordinates of the bounding box corresponding to the object
thre
"""
predicted_classes= [(OBJECTS[i],p,[(box[0], box[1]), (box[2], box[3])]) for i,p,box in zip(list(pred[0]['labels'].numpy()),pred[0]['scores'].detach().numpy(),list(pred[0]['boxes'].detach().numpy()))]
predicted_classes=[ stuff for stuff in predicted_classes if stuff[1]>threshold ]
if objects and predicted_classes :
predicted_classes=[ (name, p, box) for name, p, box in predicted_classes if name in objects ]
return predicted_classes
def draw_box(predicted_classes,image,rect_th= 30,text_size= 3,text_th=3):
"""
draws box around each object
predicted_classes: a list where each element contains a tuple that corresponds to information about the different objects; Each element includes a tuple with the class name, probability of belonging to that class and the coordinates of the bounding box corresponding to the object
image : frozen surface
"""
img=(np.clip(cv2.cvtColor(np.clip(image.numpy().transpose((1, 2, 0)),0,1), cv2.COLOR_RGB2BGR),0,1)*255).astype(np.uint8).copy()
for predicted_class in predicted_classes:
label=str(predicted_class[0]) + " likelihood"
probability=predicted_class[1]
box=predicted_class[2]
cv2.rectangle(img, (int(box[0][0]), int(box[0][1])), (int(box[1][0]), int(box[1][1])),(0, 255, 0), 4) # Draw Rectangle with the coordinates
cv2.putText(img,label, (int(box[0][0]), int(box[0][1])), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=3)
cv2.putText(img,label+": "+str(round(probability,2)), (int(box[0][0]), int(box[0][1])), cv2.FONT_HERSHEY_SIMPLEX, text_size, (0,255,0),thickness=3)
return img
#Faster R-CNN is a model that predicts both bounding boxes and class scores for potential objects in the image pre-trained on COCO.
model_ = torchvision.models.detection.fasterrcnn_resnet50_fpn(pretrained=True)
# set to eval
model_.eval()
# save memory
for name, param in model_.named_parameters():
param.requires_grad = False
#the function calls Faster R-CNN model_ but save RAM:
def model(x):
with torch.no_grad():
yhat = model_(x)
return yhat
# transform image to tensor
transform = transforms.Compose([transforms.ToTensor()])
app=Flask(__name__)
# EXTENSIONS allowed
dostuff=None
app.config['UPLOAD_EXTENSIONS'] = ['.jpg', '.png', '.gif','.jpeg']
# paths
app.config['UPLOAD_PATH'] = 'uploads'
app.config['OBJECTS_PATH'] = 'objects'
# confident_range
app.config['CONFIDENT_RANG'] = None
#path of images
app.config['FILE_PATH']=None
app.config['FILE_NAME']=[]
# directory of path
dir_name = Path(app.instance_path)
@app.route('/')
def home():
#new file that has been uploaded
files= os.listdir(app.config['UPLOAD_PATH'])
# check if a the following
files=[ file for file in files if os.path.splitext(file )[1] in app.config['UPLOAD_EXTENSIONS'] ]
#files that has been uploaded that have been uploaded
object_files=os.listdir(app.config['OBJECTS_PATH'])
object_files=[ file for file in object_files if os.path.splitext(file )[1] in app.config['UPLOAD_EXTENSIONS'] ]
return render_template('index.html', files=app.config['FILE_NAME'] ,objects_list=OBJECTS_html,object_files=object_files)
@app.route('/', methods=['POST'])
def upload_file():
#file object
uploaded_file = request.files['file']
#file name
filename= secure_filename(uploaded_file.filename)
#file extention
file_ext = os.path.splitext(filename)[1]
#check if empty file
if filename != '':
# file path /uploads/filename
#check if .jpg, .png, .gif if not send an error
if file_ext not in app.config['UPLOAD_EXTENSIONS']:
abort(400)
#send back to home agument is the fuction "home"
#upload file path
#uploaded_file.save(filename)
file_path=Path(app.config['UPLOAD_PATH']).joinpath(filename)
# same the file name to be used in other parts of app
app.config['FILE_NAME']=[filename]
# file path to be used in app
app.config['FILE_PATH']=file_path
uploaded_file.save(file_path)
return redirect(url_for('home'))
@app.route('/find_object', methods=['POST'])
def find():
redirect(url_for('home'))
# object to find
object=request.form.get("objects")
confident_range = request.form.get("confident_range")
app.config['CONFIDENT_RANG'] = int(confident_range) / int(100)
print("++++++++", confident_range)
# this is a bug fix as it will only save the image twice
object_=object
if object_:
half = 0.5
print(app.config['FILE_PATH'])
image = Image.open(app.config['FILE_PATH'])
arr = []
image.resize( [int(half * s) for s in image.size] )
img = transform(image)
pred = model(torch.unsqueeze(img,0))
if object=='all':
pred_thresh=get_predictions(pred,threshold=app.config['CONFIDENT_RANG'])
else:
pred_thresh=get_predictions(pred,threshold=app.config['CONFIDENT_RANG'],objects=object)
object_=None
#draw box on image
image_numpy=draw_box(pred_thresh,img,rect_th= 1,text_size= 1,text_th=1)
#save image with box with new name
filename, file_extension = os.path.splitext(app.config['FILE_NAME'][0])
print(filename, file_extension)
app.config['FILE_NAME'] = []
#name of file with lables
new_file_name=filename+"_object"+file_extension
new_file_path=Path(app.config['OBJECTS_PATH']).joinpath(new_file_name)
#save file we use opencv as the boxes look better
cv2.imwrite(str(new_file_path), image_numpy)
#get differnet objects and save as image
for obj in pred_thresh:
#Top and bottom corner of box
x_0,y_0=obj[2][0]
x_1,y_1=obj[2][1]
#save the image with a name that inculds the object and time
now = datetime.now()
dt_string = now.strftime("_%d_%m_%Y_%H_%M_%S_%f").strip()
obj_file_name=obj[0]+dt_string+file_extension
object_file_ext=Path(app.config['OBJECTS_PATH']).joinpath(obj_file_name)
if not(obj[0] in set(FILE_OBJ.keys())):
FILE_OBJ[obj[0]]=[obj_file_name ]
else:
FILE_OBJ[obj[0]].append(obj_file_name)
new_image=image.copy().crop((x_0,y_0,x_1,y_1))
new_image.save(object_file_ext)
if (request.form.get("Find_New")):
os.remove(app.config['FILE_PATH'])
return redirect(url_for('home'))
return render_template("find_object.html" ,objects=object,file=new_file_name, title=object, range1=confident_range)
@app.route('/your_object')
def your_gallery():
print('assss',FILE_OBJ)
return render_template("your_object.html" ,obj_files=FILE_OBJ)
#serve these uploade files from following route
@app.route('/uploads/<filename>')
def upload(filename):
#get file this is called in index.html
return send_from_directory(app.config['UPLOAD_PATH'], filename)
#serve these files from following routey
@app.route('/objects/<filename>')
def upload_objects(filename):
#get file this is called in index.html
return send_from_directory(app.config['OBJECTS_PATH'], filename)
@app.route('/your_object/<galleryName>')
def view_obejct(galleryName):
return render_template("view_obejct.html" ,obj_files=FILE_OBJ[galleryName], title=galleryName)
@app.route('/your_galary')
def view_gallery():
files = os.listdir(app.config['UPLOAD_PATH'])
print("test")
return render_template("your_galary.html" ,obj_files=files)
if __name__=="__main__":
app.run(host="0.0.0.0", port=8080)
| [
"os.remove",
"flask.request.form.get",
"flask.Flask",
"flask.abort",
"werkzeug.utils.secure_filename",
"PIL.Image.open",
"pathlib.Path",
"torchvision.models.detection.fasterrcnn_resnet50_fpn",
"flask.url_for",
"os.path.splitext",
"flask.render_template",
"torch.unsqueeze",
"flask.send_from_d... | [((5165, 5234), 'torchvision.models.detection.fasterrcnn_resnet50_fpn', 'torchvision.models.detection.fasterrcnn_resnet50_fpn', ([], {'pretrained': '(True)'}), '(pretrained=True)\n', (5217, 5234), False, 'import torchvision\n'), ((5586, 5601), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (5591, 5601), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((5956, 5979), 'pathlib.Path', 'Path', (['app.instance_path'], {}), '(app.instance_path)\n', (5960, 5979), False, 'from pathlib import Path\n'), ((6063, 6100), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_PATH']"], {}), "(app.config['UPLOAD_PATH'])\n", (6073, 6100), False, 'import os\n'), ((6323, 6361), 'os.listdir', 'os.listdir', (["app.config['OBJECTS_PATH']"], {}), "(app.config['OBJECTS_PATH'])\n", (6333, 6361), False, 'import os\n'), ((6491, 6610), 'flask.render_template', 'render_template', (['"""index.html"""'], {'files': "app.config['FILE_NAME']", 'objects_list': 'OBJECTS_html', 'object_files': 'object_files'}), "('index.html', files=app.config['FILE_NAME'], objects_list=\n OBJECTS_html, object_files=object_files)\n", (6506, 6610), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((6751, 6790), 'werkzeug.utils.secure_filename', 'secure_filename', (['uploaded_file.filename'], {}), '(uploaded_file.filename)\n', (6766, 6790), False, 'from werkzeug.utils import secure_filename\n'), ((7673, 7700), 'flask.request.form.get', 'request.form.get', (['"""objects"""'], {}), "('objects')\n", (7689, 7700), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((7723, 7758), 'flask.request.form.get', 'request.form.get', (['"""confident_range"""'], {}), "('confident_range')\n", (7739, 7758), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((9924, 9952), 'flask.request.form.get', 'request.form.get', (['"""Find_New"""'], {}), "('Find_New')\n", (9940, 9952), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((10055, 10168), 'flask.render_template', 'render_template', (['"""find_object.html"""'], {'objects': 'object', 'file': 'new_file_name', 'title': 'object', 'range1': 'confident_range'}), "('find_object.html', objects=object, file=new_file_name,\n title=object, range1=confident_range)\n", (10070, 10168), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((10268, 10323), 'flask.render_template', 'render_template', (['"""your_object.html"""'], {'obj_files': 'FILE_OBJ'}), "('your_object.html', obj_files=FILE_OBJ)\n", (10283, 10323), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((10490, 10546), 'flask.send_from_directory', 'send_from_directory', (["app.config['UPLOAD_PATH']", 'filename'], {}), "(app.config['UPLOAD_PATH'], filename)\n", (10509, 10546), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((10711, 10768), 'flask.send_from_directory', 'send_from_directory', (["app.config['OBJECTS_PATH']", 'filename'], {}), "(app.config['OBJECTS_PATH'], filename)\n", (10730, 10768), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((10855, 10947), 'flask.render_template', 'render_template', (['"""view_obejct.html"""'], {'obj_files': 'FILE_OBJ[galleryName]', 'title': 'galleryName'}), "('view_obejct.html', obj_files=FILE_OBJ[galleryName], title=\n galleryName)\n", (10870, 10947), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((11004, 11041), 'os.listdir', 'os.listdir', (["app.config['UPLOAD_PATH']"], {}), "(app.config['UPLOAD_PATH'])\n", (11014, 11041), False, 'import os\n'), ((11071, 11123), 'flask.render_template', 'render_template', (['"""your_galary.html"""'], {'obj_files': 'files'}), "('your_galary.html', obj_files=files)\n", (11086, 11123), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((5435, 5450), 'torch.no_grad', 'torch.no_grad', ([], {}), '()\n', (5448, 5450), False, 'import torch\n'), ((5556, 5577), 'torchvision.transforms.ToTensor', 'transforms.ToTensor', ([], {}), '()\n', (5575, 5577), False, 'from torchvision import transforms\n'), ((6827, 6853), 'os.path.splitext', 'os.path.splitext', (['filename'], {}), '(filename)\n', (6843, 6853), False, 'import os\n'), ((7621, 7636), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (7628, 7636), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((8035, 8070), 'PIL.Image.open', 'Image.open', (["app.config['FILE_PATH']"], {}), "(app.config['FILE_PATH'])\n", (8045, 8070), False, 'from PIL import Image\n'), ((8693, 8737), 'os.path.splitext', 'os.path.splitext', (["app.config['FILE_NAME'][0]"], {}), "(app.config['FILE_NAME'][0])\n", (8709, 8737), False, 'import os\n'), ((9963, 9997), 'os.remove', 'os.remove', (["app.config['FILE_PATH']"], {}), "(app.config['FILE_PATH'])\n", (9972, 9997), False, 'import os\n'), ((7077, 7087), 'flask.abort', 'abort', (['(400)'], {}), '(400)\n', (7082, 7087), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((7532, 7547), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (7539, 7547), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((8210, 8233), 'torch.unsqueeze', 'torch.unsqueeze', (['img', '(0)'], {}), '(img, 0)\n', (8225, 8233), False, 'import torch\n'), ((9383, 9397), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (9395, 9397), False, 'from datetime import datetime\n'), ((10022, 10037), 'flask.url_for', 'url_for', (['"""home"""'], {}), "('home')\n", (10029, 10037), False, 'from flask import Flask, request, render_template, redirect, url_for, abort, send_from_directory\n'), ((7231, 7262), 'pathlib.Path', 'Path', (["app.config['UPLOAD_PATH']"], {}), "(app.config['UPLOAD_PATH'])\n", (7235, 7262), False, 'from pathlib import Path\n'), ((8929, 8961), 'pathlib.Path', 'Path', (["app.config['OBJECTS_PATH']"], {}), "(app.config['OBJECTS_PATH'])\n", (8933, 8961), False, 'from pathlib import Path\n'), ((6180, 6202), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (6196, 6202), False, 'import os\n'), ((6416, 6438), 'os.path.splitext', 'os.path.splitext', (['file'], {}), '(file)\n', (6432, 6438), False, 'import os\n'), ((9554, 9586), 'pathlib.Path', 'Path', (["app.config['OBJECTS_PATH']"], {}), "(app.config['OBJECTS_PATH'])\n", (9558, 9586), False, 'from pathlib import Path\n')] |
# -*- coding: utf-8 -*-
from multiprocessing import Pool
import os
import time
start = time.time()
def f(x):
time.sleep(1)
value = x * x
print('{}s passed...\t{}\t(pid:{})'.format(int(time.time() - start), value, os.getpid()))
return value
timeout = time.time() + 10 # sec
while True:
with Pool(processes=2) as p:
if time.time() > timeout:
p.close()
break
print(p.map(f, [1, 2]))
p.close()
# コンテキストマネージャを使わずに以下のように書いても良い
# Pool(3).map(f, [1, 2, 3, 4, 5, 6, 7, 8, 9, 10])
| [
"time.sleep",
"multiprocessing.Pool",
"os.getpid",
"time.time"
] | [((88, 99), 'time.time', 'time.time', ([], {}), '()\n', (97, 99), False, 'import time\n'), ((115, 128), 'time.sleep', 'time.sleep', (['(1)'], {}), '(1)\n', (125, 128), False, 'import time\n'), ((269, 280), 'time.time', 'time.time', ([], {}), '()\n', (278, 280), False, 'import time\n'), ((313, 330), 'multiprocessing.Pool', 'Pool', ([], {'processes': '(2)'}), '(processes=2)\n', (317, 330), False, 'from multiprocessing import Pool\n'), ((227, 238), 'os.getpid', 'os.getpid', ([], {}), '()\n', (236, 238), False, 'import os\n'), ((348, 359), 'time.time', 'time.time', ([], {}), '()\n', (357, 359), False, 'import time\n'), ((198, 209), 'time.time', 'time.time', ([], {}), '()\n', (207, 209), False, 'import time\n')] |
"""
GradeFast Grader - Runs commands on submissions and controls the grading process.
Licensed under the MIT License. For more, see the LICENSE file.
Author: <NAME> <<EMAIL>>
"""
import difflib
import os
import random
import re
from collections import defaultdict
from typing import Any, Dict, List, Mapping, Optional, Sequence, Tuple, Union
from iochannels import Channel, HTMLMemoryLog, MemoryLog, Msg
from pyprovide import Injector, inject
from gradefast import events
from gradefast.grader.banners import BANNERS
from gradefast.hosts import BackgroundCommand, CommandRunError, CommandStartError, Host
from gradefast.loggingwrapper import get_logger
from gradefast.models import Command, CommandItem, Path, Settings
from gradefast.submissions import Submission, SubmissionManager
_logger = get_logger("grader")
class Grader:
"""
Control the grading process and run commands on submissions.
"""
@inject(injector=Injector.CURRENT_INJECTOR)
def __init__(self, injector: Injector, channel: Channel, host: Host,
event_manager: events.EventManager, settings: Settings,
submission_manager: SubmissionManager) -> None:
self.injector = injector
self.channel = channel
self.host = host
self.event_manager = event_manager
self.settings = settings
self.submission_manager = submission_manager
def prompt_for_submissions(self) -> bool:
"""
Nag the user into choosing at least one folder of submissions. The user is prompted
(repeatedly) to choose the folder.
:return: True if there's some submissions to go on; False if they got nuthin'.
"""
if self.submission_manager.has_submissions():
# Go easy on them
while self.channel.prompt("Want to add another folder of submissions?",
["y", "N"], "n") == "y":
self.add_submissions(None)
else:
# They don't have anything yet; they're in for quite a treat
self.channel.input("Press Enter to choose a folder containing the submissions...")
while True:
if not self.add_submissions(None):
# They've actually hit "cancel"; I guess we can give up
break
if not self.submission_manager.has_submissions():
self.channel.error("No submissions found")
continue
self.channel.print()
if self.channel.prompt("Add another folder?", ["y", "N"], "n") == "n":
break
return self.submission_manager.has_submissions()
def add_submissions(self, base_folder: Path = None) -> bool:
"""
Add a folder of submissions to our list of submissions. The user is prompted to choose the
folder.
The regular expression is used to limit which folders are picked up. Also, the first
matched group in the regex is used as the name of the submission.
For more info on some of the parameters, see the documentation on the GradeFast wiki:
https://github.com/jhartz/gradefast/wiki
:param base_folder: The path to a base folder to use when prompting the user to choose a
folder. If it does not exist, then it falls back to the Host::choose_dir default.
:return: True if the user actually tried to pick something (even if we couldn't find any
submissions in the folder they picked); False if they cancelled.
"""
check_file_extensions = self.settings.check_file_extensions
if check_file_extensions is None:
check_file_extensions = []
# Step 1: Prompt the user for a folder
path = self.host.choose_folder(base_folder)
if not path:
self.channel.error("No folder provided")
return False
# Step 2: Find matching submissions
regex = None
if self.settings.submission_regex:
regex = re.compile(self.settings.submission_regex)
for name, type, is_link in sorted(self.host.list_folder(path)):
submission_match = False # type: Any
folder_path = None # type: Path
valid_submission = False
if type == "folder":
if regex:
submission_match = regex.fullmatch(name)
else:
submission_match = True
if submission_match:
self.channel.print("Found submission folder: {}", name)
folder_path = path.append(name)
valid_submission = True
elif type == "file" and name.find(".") > 0:
name, ext = name.rsplit(".", maxsplit=1)
if regex:
submission_match = regex.fullmatch(name)
else:
submission_match = True
if submission_match and not self.host.exists(path.append(name)):
folder_path = path.append(name)
file_path = path.append(name + "." + ext)
if self.settings.check_zipfiles and ext == "zip":
self.channel.print("Found submission zipfile: {}.zip", name)
self.host.unzip(file_path, folder_path)
self.channel.print(" extracted to {}/", name)
valid_submission = True
elif ext in check_file_extensions:
self.channel.print("Found submission file: {}.{}", name, ext)
self.host.move_to_folder(file_path, folder_path)
self.channel.print(" moved into {}/", name)
valid_submission = True
if valid_submission:
submission_name = name
if regex:
for group in submission_match.groups():
if group:
submission_name = group
break
# Add the submission, but don't send the event yet
# (we'll send one big one at the end)
self.submission_manager.add_submission(submission_name, name, folder_path,
send_event=False)
# Step 3: Tell the world
if self.submission_manager.has_submissions():
self.event_manager.dispatch_event(events.NewSubmissionsEvent())
return True
def run_commands(self) -> None:
"""
Run some commands on each of the previously added submissions.
For details on what should be in the list of commands, see the GradeFast wiki:
https://github.com/jhartz/gradefast/wiki/Command-Structure
"""
self.channel.print()
self.channel.print()
self.channel.error_bordered(random.choice(BANNERS))
self.channel.print()
submission_id = self.submission_manager.get_first_submission_id()
background_commands = [] # type: List[BackgroundCommand]
while True:
if not self.submission_manager.has_submissions():
# Ideally, this shouldn't ever happen, but...
self.channel.error_bordered("No submissions!")
break
if submission_id is None:
# Special case: we're at the end
self.channel.print()
self.channel.status_bordered("End of submissions!")
loop = self.channel.prompt(
"Loop back around to the front?", ["y", "n"],
empty_choice_msg="C'mon, you're almost done; you can make a simple choice "
"between `yes' and `no'")
if loop == "y":
submission_id = self.submission_manager.get_first_submission_id()
else:
# Well, they said they're done
break
submission = self.submission_manager.get_submission(submission_id)
self.channel.print()
self.channel.status_bordered("Next Submission: {} ({}/{})",
submission.get_name(), submission.get_id(),
self.submission_manager.get_last_submission_id())
what_to_do = self.channel.prompt(
"Press Enter to begin; (g)oto, (b)ack, (s)kip, (l)ist, (a)dd, (d)rop, "
"(q)uit, (h)elp",
["", "g", "goto", "b", "back", "s", "skip", "l", "list", "a", "add", "d", "drop",
"q", "quit", "h", "help", "?"],
show_choices=False)
if what_to_do == "?" or what_to_do == "h" or what_to_do == "help":
# Print more help
self.channel.print("(Enter): Start the next submission")
self.channel.print("g/goto: Go to a specific submission")
self.channel.print("b/back: Go to the previous submission (goto -1)")
self.channel.print("s/skip: Skip the next submission (goto +1)")
self.channel.print("l/list: List all the submissions and corresponding indices")
self.channel.print("a/add: Add another folder of submissions")
self.channel.print("d/drop: Drop the next submission from the list of submissions")
self.channel.print("q/quit: Give up on grading")
elif what_to_do == "g" or what_to_do == "goto":
# Go to a user-entered submission
self.channel.print("Enter index of submission to jump to.")
self.channel.print("n Jump to submission n")
self.channel.print("+n Jump forward n submissions")
self.channel.print("-n Jump back n submissions")
new_id = self.channel.input("Go:")
if new_id:
try:
if new_id[0] == "+":
new_submission_id = submission_id + int(new_id[1:])
elif new_id[0] == "-":
new_submission_id = submission_id - int(new_id[1:])
else:
new_submission_id = int(new_id)
except (ValueError, IndexError):
self.channel.error("Invalid index!")
if new_submission_id not in self.submission_manager.get_all_submission_ids():
self.channel.error("Invalid index: {}", new_submission_id)
else:
submission_id = new_submission_id
elif what_to_do == "b" or what_to_do == "back":
# Go back to the last-completed submission
new_submission_id = self.submission_manager.get_previous_submission_id(
submission_id)
if new_submission_id is not None:
submission_id = new_submission_id
elif what_to_do == "s" or what_to_do == "skip":
# Skip to the next submission
# (skipping the last submission will trigger the "end of submissions" branch at the
# beginning of the infinite while loop)
submission_id = self.submission_manager.get_next_submission_id(submission_id)
elif what_to_do == "l" or what_to_do == "list":
# List all the submissions
id_len = len(str(self.submission_manager.get_last_submission_id()))
for submission in self.submission_manager.get_all_submissions():
self.channel.print("{:{}}: {}",
submission.get_id(), id_len, submission.get_name())
elif what_to_do == "a" or what_to_do == "add":
# Add another folder of submissions
self.add_submissions(None)
elif what_to_do == "d" or what_to_do == "drop":
# Drop the next submission, moving on to the one after it
if self.channel.prompt("Are you sure you want to drop " + submission.get_name() +
"?", ["y", "n"]) == "y":
new_submission_id = self.submission_manager.get_next_submission_id(
submission_id)
self.submission_manager.drop_submission(submission_id)
submission_id = new_submission_id
elif what_to_do == "q" or what_to_do == "quit":
# Give up on the rest
if self.channel.prompt("Are you sure you want to quit grading?", ["y", "n"]) == "y":
break
else:
# Run the next submission
# Set up logs for the submission
html_log = HTMLMemoryLog()
text_log = MemoryLog()
self.channel.add_delegate(html_log, text_log)
submission.add_logs(html_log, text_log)
timer_context = submission.start_timer()
self.event_manager.dispatch_event(events.SubmissionStartedEvent(submission_id))
runner = CommandRunner(self.injector, self.channel, self.host, self.settings,
submission)
runner.run()
# Stop the logs and clean up
html_log.close()
text_log.close()
background_commands += runner.get_background_commands()
submission.stop_timer(timer_context)
self.event_manager.dispatch_event(events.SubmissionFinishedEvent(submission_id))
# By default, we want to move on to the next submission in the list
submission_id = self.submission_manager.get_next_submission_id(submission_id)
# All done with everything
self.event_manager.dispatch_event(events.EndOfSubmissionsEvent())
for background_command in background_commands:
background_command = background_command
self.channel.print()
self.channel.output(Msg().status("Waiting for background command")
.print("{}", background_command.get_description()))
background_command.wait()
self.channel.print()
if background_command.get_error():
self.channel.error("ERROR: {}", background_command.get_error())
if background_command.get_output():
self.channel.status("Background command output:")
self.channel.print(background_command.get_output())
class CommandRunner:
"""
Class that actually handles running commands on a submission.
"""
def __init__(self, injector: Injector, channel: Channel, host: Host, settings: Settings,
submission: Submission) -> None:
"""
Initialize a new CommandRunner to use for running commands on a submission.
"""
self.injector = injector
self.channel = channel
self.host = host
self.settings = settings
self._submission = submission
self._background_commands = [] # type: List[BackgroundCommand]
def _check_folder(self, path: Path) -> Optional[Path]:
"""
Check whether the user is satisfied with a folder, and, if not, allow them to choose a
different one.
:param path: The path to the folder to check.
:return: Either the original folder (if they're satisfied), a different folder of their
choice, or None if they're feeling particularly unagreeable today.
"""
self.channel.print()
self.host.print_folder(path, self._submission.get_path())
choice = self.channel.prompt("Does this folder satisfy your innate human needs?",
["Y", "n"], "y")
if choice == "y":
return path
else:
return self.host.choose_folder(path)
def _find_folder_from_regex(self, base_path: Path, folder_regex: str) -> Optional[Path]:
"""
Find a folder, relative to an existing folder, based on a regular expression.
:param base_path: The path to the current folder.
:param folder_regex: The regex to match to a subfolder of base_folder.
:return: The path to a valid subfolder, or None if none was found.
"""
regex = re.compile(folder_regex)
matches = []
for name, type, is_link in self.host.list_folder(base_path):
if type == "folder":
match = regex.fullmatch(name)
if match is not None:
matches.append(name)
folder = None
if len(matches) == 1:
folder = matches[0]
elif len(matches) > 1:
self.channel.status("Multiple folders found when looking for {} in {}:", folder_regex,
base_path.relative_str(self._submission.get_path()))
for name in matches:
self.channel.print(" ", name)
choice = self.channel.input("Make a choice:", matches)
if choice and choice in matches:
folder = choice
if folder is None:
return None
return base_path.append(folder)
def _find_folder(self, base_path: Path, subfolder: Union[str, Sequence[str]]) -> Optional[Path]:
"""
Find a new path to a folder based on a current folder and either a subfolder or a list of
regular expressions representing subfolders. Prompts the user for validation.
:param base_path: The path to the base folder to start the search from.
:param subfolder: The name of a subfolder (or relative path to a subfolder), or a list of
regular expressions.
:return: The path to a valid (sub)*folder, or None if none was found.
"""
path = base_path
if isinstance(subfolder, str):
path = path.append(subfolder)
else:
for folder_regex in subfolder:
new_path = self._find_folder_from_regex(path, folder_regex)
if new_path is None:
break
path = new_path
if not self.host.folder_exists(path):
self.channel.error("Folder not found: {}", path.relative_str(base_path))
path = base_path
return path
def _get_modified_command(self, command: CommandItem) -> CommandItem:
"""
Prompt the user for a modified version of a command.
:param command: The command to modify.
:return: A copy of "command" with "name" and "command" changed.
"""
self.channel.print("Existing command: {}", command.command)
new_command = self.channel.input("Enter new command (TAB to input old):", command.command)
if not new_command:
self.channel.print("No change :(")
return command
return command.get_modified(new_command)
def run(self) -> None:
"""
Run the commands on the submission.
"""
_logger.info("Running commands for: {}", self._submission)
try:
base_path = self._check_folder(self._submission.get_path())
if base_path is None:
_logger.info("Skipping submission because user didn't pick a folder")
self.channel.error("Skipping submission")
return
self._do_command_set(self.settings.commands, base_path, self.settings.base_env or {})
except (InterruptedError, KeyboardInterrupt):
self.channel.print("")
self.channel.error("Submission interrupted")
self.channel.print("")
def get_background_commands(self) -> List[BackgroundCommand]:
"""
Get any background commands that were started. (They're not necessarily still running.)
"""
return self._background_commands
def _do_command_set(self, commands: Sequence[Command], path: Path,
environment: Mapping[str, str]) -> bool:
"""
Run a group of commands on the submission.
:param commands: The commands to run.
:param path: The initial working directory for the commands.
:param environment: A base dictionary of environment variables for the commands.
:return: True if we made it through successfully, or False if we should skip the rest of
this submission.
"""
if not self.host.folder_exists(path):
_logger.warning("_do_command_set: Folder not found: {}", path)
self.channel.print()
self.channel.error("Folder not found: {}", path)
self.channel.error("Skipping {} commands: {}",
len(commands),
[command.name for command in commands])
return False
_logger.debug("_do_command_set: in {}", path)
for command in commands:
if hasattr(command, "commands"):
# It's a command set
msg = Msg(sep="").print("\n").status("Command Set")
if command.name:
msg.status(": {}", command.name)
if command.folder:
msg.print(" ({})", command.folder)
self.channel.output(msg)
new_path = path
if command.folder:
new_path = self._find_folder(path, command.folder)
if command.confirm_folder:
new_path = self._check_folder(new_path)
if new_path is None:
# The user didn't let us get a path; cancel this bit
self.channel.print()
self.channel.error("Skipping {} commands: {}",
len(command.commands),
[command.name for command in command.commands])
self.channel.input("Press Enter to continue...")
continue
new_environment = dict(environment)
new_environment.update(command.environment)
# Run the command set
# If it returns False, then we want to skip the rest of this submission
if not self._do_command_set(command.commands, new_path, new_environment):
return False
self.channel.print()
self.channel.status("End Command Set", end="")
if command.name:
self.channel.status(": {}", command.name, end="")
self.channel.print()
else:
# It's a command item
# Run the command
# If it returns False, then we want to skip the rest of this submission
if not self._do_command(command, path, environment):
return False
# Everything went well!
return True
def _do_command(self, command: CommandItem, path: Path, environment: Mapping[str, str]) -> bool:
"""
Run an individual command on the submission.
:param command: The command to run.
:param path: The working directory for the command.
:param environment: A base dictionary of environment variables for the command.
:return: True to move on to the next command, False to skip the rest of this submission.
"""
_logger.debug("_do_command: {}", command)
msg = Msg(sep="\n").print()
status_title = ("-" * 3) + " " + self._submission.get_name()
if len(status_title) < 56:
status_title += " "
status_title += "-" * (56 - len(status_title))
msg.status(status_title)
msg.status("::: {}", command.name)
if command.is_background:
msg.status(" (background command)")
for line in command.command.split("\n"):
if line:
msg.bright(" {}", line)
self.channel.output(msg.print())
# Set up the command environment dictionary
# (This is used for running the command, and if we open a shell)
env = dict(environment)
env.update(command.environment)
env.update({
"SUBMISSION_NAME": self._submission.get_name()
})
# Before starting, ask the user what they want to do
while True:
choice = self.channel.prompt("What now?", ["o", "f", "m", "s", "ss", "?", ""])
if choice == "o":
# Open a shell in the current folder
self.host.open_shell(path, env)
elif choice == "f":
# Open the current folder
self.host.open_folder(path)
elif choice == "m":
# Modify the command
command = self._get_modified_command(command)
elif choice == "s":
# Skip this command
return True
elif choice == "ss":
# Skip the rest of this submission
return False
elif choice == "?":
# Show help
msg = Msg(sep="\n")
msg.print(" o: Open a shell in the current folder")
msg.print(" f: Open the current folder")
msg.print(" m: Modify the command (just for this submission)")
msg.print(" s: Skip this command")
msg.print(" ss: Skip the rest of this submission")
msg.print(" ?: Show this help message")
msg.print(" Enter: Run the command")
self.channel.output(msg)
else:
# Run the command
self.channel.print("")
break
# Alrighty, it's command-running time!
if command.is_background:
self._run_background_command(command, path, env)
else:
self._run_foreground_command(command, path, env)
# All done with the command!
# Ask user what they want to do
while True:
self.channel.print("")
choice = self.channel.prompt("Repeat command?", ["y", "N"], "n")
self.channel.print("")
if choice == "y":
# Repeat the command
return self._do_command(command, path, environment)
else:
# Move on to the next command
return True
def _run_background_command(self, command: CommandItem, path: Path,
environment: Mapping[str, str]) -> None:
"""
Actually run an individual background command.
:param command: The command to run.
:param path: The working directory for the command.
:param environment: A dictionary of environment variables for the command.
"""
try:
self._background_commands.append(self.host.start_background_command(
command.command, path, environment, command.stdin))
except CommandStartError as e:
self.channel.print()
self.channel.error("Error starting background command: {}", e.message)
else:
self.channel.print()
self.channel.status("Background command started.")
def _run_foreground_command(self, command: CommandItem, path: Path,
environment: Mapping[str, str]) -> None:
"""
Actually run an individual foreground command.
:param command: The command to run.
:param path: The working directory for the command.
:param environment: A dictionary of environment variables for the command.
"""
# Filled with the text content to compare the command's output to (if any)
diff_reference = None
diff_reference_source = None
if command.diff:
if command.diff.content:
diff_reference = command.diff.content
diff_reference_source = "content from command config"
elif command.diff.file and self.settings.diff_file_path:
local_diff_path = os.path.join(self.settings.diff_file_path.get_local_path(),
command.diff.file)
try:
with open(local_diff_path) as f:
diff_reference = f.read()
diff_reference_source = "local file ({})".format(command.diff.file)
except FileNotFoundError:
self.channel.error("Diff file not found: {} ({})", command.diff.file,
self.settings.diff_file_path)
elif command.diff.submission_file:
diff_path = path.append(command.diff.submission_file)
try:
diff_reference = self.host.read_text_file(diff_path)
diff_reference_source = "submission file ({})".format(
command.diff.submission_file)
except FileNotFoundError:
self.channel.error("Diff file not found: {} ({})",
command.diff.submission_file, path)
elif command.diff.command:
try:
diff_reference = self.host.run_command(command.diff.command, path, environment,
print_output=False)
diff_reference_source = "command ({})".format(command.diff.command)
except CommandStartError as e:
self.channel.error("Error starting diff command: {}", e.message)
except CommandRunError as e:
self.channel.error("Error running diff command: {}", e.message)
else:
self.channel.error("Diff object doesn't include "
"\"content\", \"file\", \"submission_file\", or \"command\"")
output = None
try:
if command.is_passthrough:
self.host.run_command_passthrough(command.command, path, environment)
else:
output = self.host.run_command(command.command, path, environment, command.stdin)
except CommandStartError as e:
self.channel.print()
self.channel.error("Error starting command: {}", e.message)
return
except CommandRunError as e:
self.channel.print()
self.channel.error("Error running command: {}", e.message)
return
if diff_reference is not None:
self.channel.print()
self.channel.status("DIFF with reference from {}", diff_reference_source)
self.channel.print()
self._print_diff(output, diff_reference, command.diff)
@staticmethod
def _clean_lines(lines: Sequence[str], collapse_whitespace: bool = False) \
-> Tuple[List[str], Dict[str, List[str]]]:
"""
Clean up some lines of output to make diffing work better. In particular, make an
entirely-lowercase version and optionally collapse whitespace.
:return: A tuple with (list of str, dict) representing the list of cleaned-up lines
(each ending with a newline) and a dictionary mapping each cleaned-up line to a list
of the original line(s) that it came from (none ending with a newline).
"""
clean_to_orig = defaultdict(lambda: []) # type: Dict[str, List[str]]
clean_lines = [] # type: List[str]
for line in lines:
if collapse_whitespace:
line = re.sub(r'\s+', " ", line.strip())
else:
line = line.rstrip()
clean_line = line.lower() + "\n"
clean_lines.append(clean_line)
clean_to_orig[clean_line].append(line)
return clean_lines, clean_to_orig
def _print_diff(self, output: str, reference: str, options: CommandItem.Diff) -> None:
"""
Print the results of performing a diff between "output" and "reference".
"""
# Nothing ain't anything without a reference
self.channel.bg_happy("- Reference")
self.channel.bg_sad ("+ Output")
self.channel.bg_meh (" Both")
self.channel.print ("-----------")
self.channel.print ("")
# Split everything by lines
output_lines = output.splitlines()
reference_lines = reference.splitlines()
# Try some metric-level hackery to ignore case and clean up a bit
reference_clean, reference_orig = CommandRunner._clean_lines(
reference_lines, options.collapse_whitespace)
output_clean, output_orig = CommandRunner._clean_lines(
output_lines, options.collapse_whitespace)
# Print that diff!
for line in difflib.ndiff(reference_clean, output_clean):
signal = line[0]
content = line[2:]
self.channel.bright("{}", line[0:2], end="")
if signal == "-":
# Line from reference only
self.channel.bg_happy("{}", reference_orig[content].pop(0))
elif signal == "+":
# Line from output only
self.channel.bg_sad("{}", output_orig[content].pop(0))
elif signal == "?":
# Extra line (to mark locations, etc.)
self.channel.bright("{}", content.rstrip("\n"))
else:
# Line from both reference and output
# Pop the reference side
reference_orig[content].pop(0)
# Pop and print the output side
self.channel.bg_meh("{}", output_orig[content].pop(0))
| [
"iochannels.MemoryLog",
"difflib.ndiff",
"gradefast.events.EndOfSubmissionsEvent",
"gradefast.events.NewSubmissionsEvent",
"gradefast.events.SubmissionStartedEvent",
"pyprovide.inject",
"gradefast.loggingwrapper.get_logger",
"random.choice",
"collections.defaultdict",
"iochannels.HTMLMemoryLog",
... | [((799, 819), 'gradefast.loggingwrapper.get_logger', 'get_logger', (['"""grader"""'], {}), "('grader')\n", (809, 819), False, 'from gradefast.loggingwrapper import get_logger\n'), ((923, 965), 'pyprovide.inject', 'inject', ([], {'injector': 'Injector.CURRENT_INJECTOR'}), '(injector=Injector.CURRENT_INJECTOR)\n', (929, 965), False, 'from pyprovide import Injector, inject\n'), ((16573, 16597), 're.compile', 're.compile', (['folder_regex'], {}), '(folder_regex)\n', (16583, 16597), False, 'import re\n'), ((31719, 31743), 'collections.defaultdict', 'defaultdict', (['(lambda : [])'], {}), '(lambda : [])\n', (31730, 31743), False, 'from collections import defaultdict\n'), ((33129, 33173), 'difflib.ndiff', 'difflib.ndiff', (['reference_clean', 'output_clean'], {}), '(reference_clean, output_clean)\n', (33142, 33173), False, 'import difflib\n'), ((4051, 4093), 're.compile', 're.compile', (['self.settings.submission_regex'], {}), '(self.settings.submission_regex)\n', (4061, 4093), False, 'import re\n'), ((6960, 6982), 'random.choice', 'random.choice', (['BANNERS'], {}), '(BANNERS)\n', (6973, 6982), False, 'import random\n'), ((14045, 14075), 'gradefast.events.EndOfSubmissionsEvent', 'events.EndOfSubmissionsEvent', ([], {}), '()\n', (14073, 14075), False, 'from gradefast import events\n'), ((6527, 6555), 'gradefast.events.NewSubmissionsEvent', 'events.NewSubmissionsEvent', ([], {}), '()\n', (6553, 6555), False, 'from gradefast import events\n'), ((23727, 23740), 'iochannels.Msg', 'Msg', ([], {'sep': '"""\n"""'}), "(sep='\\n')\n", (23730, 23740), False, 'from iochannels import Channel, HTMLMemoryLog, MemoryLog, Msg\n'), ((14250, 14255), 'iochannels.Msg', 'Msg', ([], {}), '()\n', (14253, 14255), False, 'from iochannels import Channel, HTMLMemoryLog, MemoryLog, Msg\n'), ((21284, 21295), 'iochannels.Msg', 'Msg', ([], {'sep': '""""""'}), "(sep='')\n", (21287, 21295), False, 'from iochannels import Channel, HTMLMemoryLog, MemoryLog, Msg\n'), ((25393, 25406), 'iochannels.Msg', 'Msg', ([], {'sep': '"""\n"""'}), "(sep='\\n')\n", (25396, 25406), False, 'from iochannels import Channel, HTMLMemoryLog, MemoryLog, Msg\n'), ((12951, 12966), 'iochannels.HTMLMemoryLog', 'HTMLMemoryLog', ([], {}), '()\n', (12964, 12966), False, 'from iochannels import Channel, HTMLMemoryLog, MemoryLog, Msg\n'), ((12994, 13005), 'iochannels.MemoryLog', 'MemoryLog', ([], {}), '()\n', (13003, 13005), False, 'from iochannels import Channel, HTMLMemoryLog, MemoryLog, Msg\n'), ((13232, 13276), 'gradefast.events.SubmissionStartedEvent', 'events.SubmissionStartedEvent', (['submission_id'], {}), '(submission_id)\n', (13261, 13276), False, 'from gradefast import events\n'), ((13741, 13786), 'gradefast.events.SubmissionFinishedEvent', 'events.SubmissionFinishedEvent', (['submission_id'], {}), '(submission_id)\n', (13771, 13786), False, 'from gradefast import events\n')] |
# Copyright 2015-2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
#
import os
import pytest
@pytest.fixture
def F5PluginTemplateLoc(SupportedDir):
return os.path.join(
os.path.join(SupportedDir, 'f5_plugins', 'deploy_lb.yaml')
)
def test_deploy_lb(
HeatStack,
symbols,
F5PluginTemplateLoc
):
hc, stack = HeatStack(
F5PluginTemplateLoc,
'func_test_deploy_lb',
parameters={
'client_server_image': symbols.ubuntu_image,
'client_server_flavor': symbols.ubuntu_flavor,
'client_server_sec_group': symbols.secgroup,
'key_name': symbols.ssh_key,
'client_network': symbols.client_net,
'server_network': symbols.server_net,
'bigip_pw': symbols.bigip_admin_password,
'bigip_fip': symbols.bigip_ip,
'vs_vip': symbols.vs_vip
}
)
| [
"os.path.join"
] | [((703, 761), 'os.path.join', 'os.path.join', (['SupportedDir', '"""f5_plugins"""', '"""deploy_lb.yaml"""'], {}), "(SupportedDir, 'f5_plugins', 'deploy_lb.yaml')\n", (715, 761), False, 'import os\n')] |
###############################################################################
# plot_afefeh: the basic [a/Fe] vs. [Fe/H] plot for the data section
###############################################################################
import sys
import matplotlib
import numpy
from scipy import special
matplotlib.use('Agg')
from galpy.util import bovy_plot
from matplotlib import pyplot
import define_rcsample
def plot_afefeh(plotfilename):
# Load the data
data= define_rcsample.get_rcsample()
# Plot the data
bovy_plot.bovy_print()
bovy_plot.scatterplot(data[define_rcsample._FEHTAG],
data[define_rcsample._AFETAG],
'k.',ms=.8,
levels=special.erf(numpy.arange(1,2)/numpy.sqrt(2.)),
xrange=[-1.,0.4],
yrange=[-0.15,0.35],
xlabel=r'$[\mathrm{Fe/H}]$',
ylabel=define_rcsample._AFELABEL)
# Overplot sub-samples
# low alpha, low feh
lowfeh= define_rcsample._lowlow_lowfeh(0.)
highfeh= define_rcsample._lowlow_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._lowlow_lowafe(lowfeh),
define_rcsample._lowlow_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._lowlow_lowafe(highfeh),
define_rcsample._lowlow_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._lowlow_lowafe(lowfeh),
define_rcsample._lowlow_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._lowlow_highafe(lowfeh),
define_rcsample._lowlow_highafe(highfeh)],
'k--',lw=2.)
# high alpha
lowfeh= define_rcsample._highalpha_lowfeh(0.)
highfeh= define_rcsample._highalpha_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._highalpha_lowafe(lowfeh),
define_rcsample._highalpha_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._highalpha_lowafe(highfeh),
define_rcsample._highalpha_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highalpha_lowafe(lowfeh),
define_rcsample._highalpha_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highalpha_highafe(lowfeh),
define_rcsample._highalpha_highafe(highfeh)],
'k--',lw=2.)
# solar
lowfeh= define_rcsample._solar_lowfeh(0.)
highfeh= define_rcsample._solar_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._solar_lowafe(lowfeh),
define_rcsample._solar_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._solar_lowafe(highfeh),
define_rcsample._solar_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._solar_lowafe(lowfeh),
define_rcsample._solar_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._solar_highafe(lowfeh),
define_rcsample._solar_highafe(highfeh)],
'k--',lw=2.)
# high [Fe/H]
lowfeh= define_rcsample._highfeh_lowfeh(0.)
highfeh= define_rcsample._highfeh_highfeh(0.)
pyplot.plot([lowfeh,lowfeh],[define_rcsample._highfeh_lowafe(lowfeh),
define_rcsample._highfeh_highafe(lowfeh)],
'k--',lw=2.)
pyplot.plot([highfeh,highfeh],[define_rcsample._highfeh_lowafe(highfeh),
define_rcsample._highfeh_highafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highfeh_lowafe(lowfeh),
define_rcsample._highfeh_lowafe(highfeh)],
'k--',lw=2.)
pyplot.plot([lowfeh,highfeh],[define_rcsample._highfeh_highafe(lowfeh),
define_rcsample._highfeh_highafe(highfeh)],
'k--',lw=2.)
# Label them
bovy_plot.bovy_text(-0.4,0.265,r'$\mathrm{high}\ [\alpha/\mathrm{Fe}]$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(-0.975,0.05,r'$\mathrm{low\ [Fe/H]}$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(0.,-0.125,r'$\mathrm{high\ [Fe/H]}$',
size=15.,backgroundcolor='w')
bovy_plot.bovy_text(-0.225,-0.125,r'$\mathrm{solar}$',
size=15.,backgroundcolor='w')
# Loci
if False:
haloc= define_rcsample.highalphalocus()
bovy_plot.bovy_plot(haloc[:,0],haloc[:,1],'k-',lw=2.,overplot=True)
haloc= define_rcsample.lowalphalocus()
bovy_plot.bovy_plot(haloc[:,0],haloc[:,1],'k-',lw=2.,overplot=True)
bovy_plot.bovy_end_print(plotfilename)
return None
if __name__ == '__main__':
plot_afefeh(sys.argv[1])
| [
"galpy.util.bovy_plot.bovy_print",
"define_rcsample._highalpha_lowafe",
"numpy.arange",
"define_rcsample._solar_lowfeh",
"define_rcsample._highfeh_highafe",
"define_rcsample._solar_highfeh",
"define_rcsample._highalpha_lowfeh",
"define_rcsample.get_rcsample",
"define_rcsample._highfeh_lowafe",
"de... | [((297, 318), 'matplotlib.use', 'matplotlib.use', (['"""Agg"""'], {}), "('Agg')\n", (311, 318), False, 'import matplotlib\n'), ((466, 496), 'define_rcsample.get_rcsample', 'define_rcsample.get_rcsample', ([], {}), '()\n', (494, 496), False, 'import define_rcsample\n'), ((521, 543), 'galpy.util.bovy_plot.bovy_print', 'bovy_plot.bovy_print', ([], {}), '()\n', (541, 543), False, 'from galpy.util import bovy_plot\n'), ((1046, 1081), 'define_rcsample._lowlow_lowfeh', 'define_rcsample._lowlow_lowfeh', (['(0.0)'], {}), '(0.0)\n', (1076, 1081), False, 'import define_rcsample\n'), ((1094, 1130), 'define_rcsample._lowlow_highfeh', 'define_rcsample._lowlow_highfeh', (['(0.0)'], {}), '(0.0)\n', (1125, 1130), False, 'import define_rcsample\n'), ((1879, 1917), 'define_rcsample._highalpha_lowfeh', 'define_rcsample._highalpha_lowfeh', (['(0.0)'], {}), '(0.0)\n', (1912, 1917), False, 'import define_rcsample\n'), ((1930, 1969), 'define_rcsample._highalpha_highfeh', 'define_rcsample._highalpha_highfeh', (['(0.0)'], {}), '(0.0)\n', (1964, 1969), False, 'import define_rcsample\n'), ((2737, 2771), 'define_rcsample._solar_lowfeh', 'define_rcsample._solar_lowfeh', (['(0.0)'], {}), '(0.0)\n', (2766, 2771), False, 'import define_rcsample\n'), ((2784, 2819), 'define_rcsample._solar_highfeh', 'define_rcsample._solar_highfeh', (['(0.0)'], {}), '(0.0)\n', (2814, 2819), False, 'import define_rcsample\n'), ((3561, 3597), 'define_rcsample._highfeh_lowfeh', 'define_rcsample._highfeh_lowfeh', (['(0.0)'], {}), '(0.0)\n', (3592, 3597), False, 'import define_rcsample\n'), ((3610, 3647), 'define_rcsample._highfeh_highfeh', 'define_rcsample._highfeh_highfeh', (['(0.0)'], {}), '(0.0)\n', (3642, 3647), False, 'import define_rcsample\n'), ((4396, 4514), 'galpy.util.bovy_plot.bovy_text', 'bovy_plot.bovy_text', (['(-0.4)', '(0.265)', '"""$\\\\mathrm{high}\\\\ [\\\\alpha/\\\\mathrm{Fe}]$"""'], {'size': '(15.0)', 'backgroundcolor': '"""w"""'}), "(-0.4, 0.265,\n '$\\\\mathrm{high}\\\\ [\\\\alpha/\\\\mathrm{Fe}]$', size=15.0, backgroundcolor='w'\n )\n", (4415, 4514), False, 'from galpy.util import bovy_plot\n'), ((4528, 4625), 'galpy.util.bovy_plot.bovy_text', 'bovy_plot.bovy_text', (['(-0.975)', '(0.05)', '"""$\\\\mathrm{low\\\\ [Fe/H]}$"""'], {'size': '(15.0)', 'backgroundcolor': '"""w"""'}), "(-0.975, 0.05, '$\\\\mathrm{low\\\\ [Fe/H]}$', size=15.0,\n backgroundcolor='w')\n", (4547, 4625), False, 'from galpy.util import bovy_plot\n'), ((4646, 4743), 'galpy.util.bovy_plot.bovy_text', 'bovy_plot.bovy_text', (['(0.0)', '(-0.125)', '"""$\\\\mathrm{high\\\\ [Fe/H]}$"""'], {'size': '(15.0)', 'backgroundcolor': '"""w"""'}), "(0.0, -0.125, '$\\\\mathrm{high\\\\ [Fe/H]}$', size=15.0,\n backgroundcolor='w')\n", (4665, 4743), False, 'from galpy.util import bovy_plot\n'), ((4763, 4855), 'galpy.util.bovy_plot.bovy_text', 'bovy_plot.bovy_text', (['(-0.225)', '(-0.125)', '"""$\\\\mathrm{solar}$"""'], {'size': '(15.0)', 'backgroundcolor': '"""w"""'}), "(-0.225, -0.125, '$\\\\mathrm{solar}$', size=15.0,\n backgroundcolor='w')\n", (4782, 4855), False, 'from galpy.util import bovy_plot\n'), ((5149, 5187), 'galpy.util.bovy_plot.bovy_end_print', 'bovy_plot.bovy_end_print', (['plotfilename'], {}), '(plotfilename)\n', (5173, 5187), False, 'from galpy.util import bovy_plot\n'), ((4913, 4945), 'define_rcsample.highalphalocus', 'define_rcsample.highalphalocus', ([], {}), '()\n', (4943, 4945), False, 'import define_rcsample\n'), ((4954, 5028), 'galpy.util.bovy_plot.bovy_plot', 'bovy_plot.bovy_plot', (['haloc[:, 0]', 'haloc[:, 1]', '"""k-"""'], {'lw': '(2.0)', 'overplot': '(True)'}), "(haloc[:, 0], haloc[:, 1], 'k-', lw=2.0, overplot=True)\n", (4973, 5028), False, 'from galpy.util import bovy_plot\n'), ((5037, 5068), 'define_rcsample.lowalphalocus', 'define_rcsample.lowalphalocus', ([], {}), '()\n', (5066, 5068), False, 'import define_rcsample\n'), ((5077, 5151), 'galpy.util.bovy_plot.bovy_plot', 'bovy_plot.bovy_plot', (['haloc[:, 0]', 'haloc[:, 1]', '"""k-"""'], {'lw': '(2.0)', 'overplot': '(True)'}), "(haloc[:, 0], haloc[:, 1], 'k-', lw=2.0, overplot=True)\n", (5096, 5151), False, 'from galpy.util import bovy_plot\n'), ((1163, 1201), 'define_rcsample._lowlow_lowafe', 'define_rcsample._lowlow_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (1193, 1201), False, 'import define_rcsample\n'), ((1236, 1275), 'define_rcsample._lowlow_highafe', 'define_rcsample._lowlow_highafe', (['lowfeh'], {}), '(lowfeh)\n', (1267, 1275), False, 'import define_rcsample\n'), ((1342, 1381), 'define_rcsample._lowlow_lowafe', 'define_rcsample._lowlow_lowafe', (['highfeh'], {}), '(highfeh)\n', (1372, 1381), False, 'import define_rcsample\n'), ((1418, 1458), 'define_rcsample._lowlow_highafe', 'define_rcsample._lowlow_highafe', (['highfeh'], {}), '(highfeh)\n', (1449, 1458), False, 'import define_rcsample\n'), ((1524, 1562), 'define_rcsample._lowlow_lowafe', 'define_rcsample._lowlow_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (1554, 1562), False, 'import define_rcsample\n'), ((1598, 1637), 'define_rcsample._lowlow_lowafe', 'define_rcsample._lowlow_lowafe', (['highfeh'], {}), '(highfeh)\n', (1628, 1637), False, 'import define_rcsample\n'), ((1703, 1742), 'define_rcsample._lowlow_highafe', 'define_rcsample._lowlow_highafe', (['lowfeh'], {}), '(lowfeh)\n', (1734, 1742), False, 'import define_rcsample\n'), ((1778, 1818), 'define_rcsample._lowlow_highafe', 'define_rcsample._lowlow_highafe', (['highfeh'], {}), '(highfeh)\n', (1809, 1818), False, 'import define_rcsample\n'), ((2002, 2043), 'define_rcsample._highalpha_lowafe', 'define_rcsample._highalpha_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (2035, 2043), False, 'import define_rcsample\n'), ((2078, 2120), 'define_rcsample._highalpha_highafe', 'define_rcsample._highalpha_highafe', (['lowfeh'], {}), '(lowfeh)\n', (2112, 2120), False, 'import define_rcsample\n'), ((2187, 2229), 'define_rcsample._highalpha_lowafe', 'define_rcsample._highalpha_lowafe', (['highfeh'], {}), '(highfeh)\n', (2220, 2229), False, 'import define_rcsample\n'), ((2266, 2309), 'define_rcsample._highalpha_highafe', 'define_rcsample._highalpha_highafe', (['highfeh'], {}), '(highfeh)\n', (2300, 2309), False, 'import define_rcsample\n'), ((2375, 2416), 'define_rcsample._highalpha_lowafe', 'define_rcsample._highalpha_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (2408, 2416), False, 'import define_rcsample\n'), ((2452, 2494), 'define_rcsample._highalpha_lowafe', 'define_rcsample._highalpha_lowafe', (['highfeh'], {}), '(highfeh)\n', (2485, 2494), False, 'import define_rcsample\n'), ((2560, 2602), 'define_rcsample._highalpha_highafe', 'define_rcsample._highalpha_highafe', (['lowfeh'], {}), '(lowfeh)\n', (2594, 2602), False, 'import define_rcsample\n'), ((2638, 2681), 'define_rcsample._highalpha_highafe', 'define_rcsample._highalpha_highafe', (['highfeh'], {}), '(highfeh)\n', (2672, 2681), False, 'import define_rcsample\n'), ((2852, 2889), 'define_rcsample._solar_lowafe', 'define_rcsample._solar_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (2881, 2889), False, 'import define_rcsample\n'), ((2924, 2962), 'define_rcsample._solar_highafe', 'define_rcsample._solar_highafe', (['lowfeh'], {}), '(lowfeh)\n', (2954, 2962), False, 'import define_rcsample\n'), ((3029, 3067), 'define_rcsample._solar_lowafe', 'define_rcsample._solar_lowafe', (['highfeh'], {}), '(highfeh)\n', (3058, 3067), False, 'import define_rcsample\n'), ((3104, 3143), 'define_rcsample._solar_highafe', 'define_rcsample._solar_highafe', (['highfeh'], {}), '(highfeh)\n', (3134, 3143), False, 'import define_rcsample\n'), ((3209, 3246), 'define_rcsample._solar_lowafe', 'define_rcsample._solar_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (3238, 3246), False, 'import define_rcsample\n'), ((3282, 3320), 'define_rcsample._solar_lowafe', 'define_rcsample._solar_lowafe', (['highfeh'], {}), '(highfeh)\n', (3311, 3320), False, 'import define_rcsample\n'), ((3386, 3424), 'define_rcsample._solar_highafe', 'define_rcsample._solar_highafe', (['lowfeh'], {}), '(lowfeh)\n', (3416, 3424), False, 'import define_rcsample\n'), ((3460, 3499), 'define_rcsample._solar_highafe', 'define_rcsample._solar_highafe', (['highfeh'], {}), '(highfeh)\n', (3490, 3499), False, 'import define_rcsample\n'), ((3680, 3719), 'define_rcsample._highfeh_lowafe', 'define_rcsample._highfeh_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (3711, 3719), False, 'import define_rcsample\n'), ((3754, 3794), 'define_rcsample._highfeh_highafe', 'define_rcsample._highfeh_highafe', (['lowfeh'], {}), '(lowfeh)\n', (3786, 3794), False, 'import define_rcsample\n'), ((3861, 3901), 'define_rcsample._highfeh_lowafe', 'define_rcsample._highfeh_lowafe', (['highfeh'], {}), '(highfeh)\n', (3892, 3901), False, 'import define_rcsample\n'), ((3938, 3979), 'define_rcsample._highfeh_highafe', 'define_rcsample._highfeh_highafe', (['highfeh'], {}), '(highfeh)\n', (3970, 3979), False, 'import define_rcsample\n'), ((4045, 4084), 'define_rcsample._highfeh_lowafe', 'define_rcsample._highfeh_lowafe', (['lowfeh'], {}), '(lowfeh)\n', (4076, 4084), False, 'import define_rcsample\n'), ((4120, 4160), 'define_rcsample._highfeh_lowafe', 'define_rcsample._highfeh_lowafe', (['highfeh'], {}), '(highfeh)\n', (4151, 4160), False, 'import define_rcsample\n'), ((4226, 4266), 'define_rcsample._highfeh_highafe', 'define_rcsample._highfeh_highafe', (['lowfeh'], {}), '(lowfeh)\n', (4258, 4266), False, 'import define_rcsample\n'), ((4302, 4343), 'define_rcsample._highfeh_highafe', 'define_rcsample._highfeh_highafe', (['highfeh'], {}), '(highfeh)\n', (4334, 4343), False, 'import define_rcsample\n'), ((741, 759), 'numpy.arange', 'numpy.arange', (['(1)', '(2)'], {}), '(1, 2)\n', (753, 759), False, 'import numpy\n'), ((759, 774), 'numpy.sqrt', 'numpy.sqrt', (['(2.0)'], {}), '(2.0)\n', (769, 774), False, 'import numpy\n')] |
#!/usr/bin/env python
#=========================================================================
# This is OPEN SOURCE SOFTWARE governed by the Gnu General Public
# License (GPL) version 3, as described at www.opensource.org.
# Author:<NAME>
#=========================================================================
from __future__ import (absolute_import, division, print_function,
unicode_literals, generators, nested_scopes, with_statement)
from builtins import (bytes, dict, int, list, object, range, str, ascii,
chr, hex, input, next, oct, open, pow, round, super, filter, map, zip)
# The above imports should allow this program to run in both Python 2 and
# Python 3. You might need to update your version of module "future".
import sys
import statistics
import os
import ProgramName
import gzip
#=========================================================================
# main()
#=========================================================================
if len(sys.argv)!=3:
exit(ProgramName.get()+" <in:mRNA.mtx.gz> <out:library-sizes.txt>\n")
(mtxFile,outFile)=sys.argv[1:]
d={}
with gzip.open(mtxFile,"r") as matr:
matr.readline()
matr.readline()
matr.readline()
for line in matr:
line = line.decode("utf8")
(guide,cell,lib)=line.strip().split()
if cell in d:
d[cell]+=int(lib)
else:
d[cell]=int(lib)
OUT=open(outFile,"wt")
for key in d:
print(key,d[key],sep="\t",file=OUT)
OUT.close()
| [
"builtins.open",
"ProgramName.get",
"gzip.open",
"builtins.int"
] | [((1404, 1423), 'builtins.open', 'open', (['outFile', '"""wt"""'], {}), "(outFile, 'wt')\n", (1408, 1423), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((1109, 1132), 'gzip.open', 'gzip.open', (['mtxFile', '"""r"""'], {}), "(mtxFile, 'r')\n", (1118, 1132), False, 'import gzip\n'), ((1002, 1019), 'ProgramName.get', 'ProgramName.get', ([], {}), '()\n', (1017, 1019), False, 'import ProgramName\n'), ((1347, 1355), 'builtins.int', 'int', (['lib'], {}), '(lib)\n', (1350, 1355), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n'), ((1390, 1398), 'builtins.int', 'int', (['lib'], {}), '(lib)\n', (1393, 1398), False, 'from builtins import bytes, dict, int, list, object, range, str, ascii, chr, hex, input, next, oct, open, pow, round, super, filter, map, zip\n')] |
from django.apps import AppConfig
from django.utils.translation import ugettext_lazy as _
class TransactionsConfig(AppConfig):
name = 'bejmy.transactions'
category = 'transactions'
verbose_name = _("transactions")
def ready(self):
# apply signal receivers after all apps are ready
import bejmy.transactions.receivers # noqa
| [
"django.utils.translation.ugettext_lazy"
] | [((210, 227), 'django.utils.translation.ugettext_lazy', '_', (['"""transactions"""'], {}), "('transactions')\n", (211, 227), True, 'from django.utils.translation import ugettext_lazy as _\n')] |
"""
Copyright (c) 2021 <NAME>
"""
import statedb
import traceback
import time
import datetime
from devicedetectionskill import DetectDevicePresenceSkill
from wheatherskill import WeatherSkill
from daytimeskill import DaytimeSkill
from raumfeldskill import RaumfeldTVWakeup
from hueskill import HueDaytimeAndWeatherSkill
# logFile = str(datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")) + ".log"
jsonSettingsFile = "my_skills_config.json"
statedb = statedb.StateDataBase()
skillList = [
DetectDevicePresenceSkill(statedb=statedb, settingsFile=jsonSettingsFile),
WeatherSkill(statedb=statedb, settingsFile=jsonSettingsFile),
DaytimeSkill(statedb=statedb, settingsFile=jsonSettingsFile),
RaumfeldTVWakeup(statedb=statedb, settingsFile=jsonSettingsFile),
HueDaytimeAndWeatherSkill(statedb=statedb, settingsFile=jsonSettingsFile)
]
def startSkills():
for skill in skillList:
skill.start()
time.sleep(10)
def joinSkills():
for skill in skillList:
skill.join()
def interruptSkills():
for skill in skillList:
skill.stopEvent.set()
if __name__ == "__main__":
try:
print("Starting")
startSkills()
joinSkills()
except KeyboardInterrupt:
print("terminating...")
interruptSkills()
except Exception:
print("Error... " + str(traceback.format_exc))
finally:
joinSkills()
print("Terminated")
| [
"devicedetectionskill.DetectDevicePresenceSkill",
"daytimeskill.DaytimeSkill",
"time.sleep",
"raumfeldskill.RaumfeldTVWakeup",
"statedb.StateDataBase",
"hueskill.HueDaytimeAndWeatherSkill",
"wheatherskill.WeatherSkill"
] | [((456, 479), 'statedb.StateDataBase', 'statedb.StateDataBase', ([], {}), '()\n', (477, 479), False, 'import statedb\n'), ((498, 571), 'devicedetectionskill.DetectDevicePresenceSkill', 'DetectDevicePresenceSkill', ([], {'statedb': 'statedb', 'settingsFile': 'jsonSettingsFile'}), '(statedb=statedb, settingsFile=jsonSettingsFile)\n', (523, 571), False, 'from devicedetectionskill import DetectDevicePresenceSkill\n'), ((577, 637), 'wheatherskill.WeatherSkill', 'WeatherSkill', ([], {'statedb': 'statedb', 'settingsFile': 'jsonSettingsFile'}), '(statedb=statedb, settingsFile=jsonSettingsFile)\n', (589, 637), False, 'from wheatherskill import WeatherSkill\n'), ((643, 703), 'daytimeskill.DaytimeSkill', 'DaytimeSkill', ([], {'statedb': 'statedb', 'settingsFile': 'jsonSettingsFile'}), '(statedb=statedb, settingsFile=jsonSettingsFile)\n', (655, 703), False, 'from daytimeskill import DaytimeSkill\n'), ((709, 773), 'raumfeldskill.RaumfeldTVWakeup', 'RaumfeldTVWakeup', ([], {'statedb': 'statedb', 'settingsFile': 'jsonSettingsFile'}), '(statedb=statedb, settingsFile=jsonSettingsFile)\n', (725, 773), False, 'from raumfeldskill import RaumfeldTVWakeup\n'), ((779, 852), 'hueskill.HueDaytimeAndWeatherSkill', 'HueDaytimeAndWeatherSkill', ([], {'statedb': 'statedb', 'settingsFile': 'jsonSettingsFile'}), '(statedb=statedb, settingsFile=jsonSettingsFile)\n', (804, 852), False, 'from hueskill import HueDaytimeAndWeatherSkill\n'), ((934, 948), 'time.sleep', 'time.sleep', (['(10)'], {}), '(10)\n', (944, 948), False, 'import time\n')] |
import logging
import torch
from torch import nn
from transformers import AutoModel
from ..pooling import MeanPooling
logger = logging.getLogger(__name__)
class SiameseTransformer(nn.Module):
def __init__(self, args, config):
super(SiameseTransformer, self).__init__()
self.model_a = AutoModel.from_pretrained(args.model_name, config=config, cache_dir=args.cache_dir)
self.model_b = AutoModel.from_pretrained(args.model_name, config=config, cache_dir=args.cache_dir)
logging.info("**** Encoder will not be trained ****")
for param in self.model_a.parameters():
param.requires_grad = False
for param in self.model_b.parameters():
param.requires_grad = False
self.classifier = nn.Linear(config.hidden_size*3, config.num_labels)
self.criterion = nn.CrossEntropyLoss()
def forward(self, a, b):
labels = a.pop('labels')
if 'labels' in list(b.keys()):
b.pop('labels')
output_a = self.model_a(**a)
output_b = self.model_b(**b)
embeddings_a = MeanPooling(output_a[0], a['attention_mask'])
embeddings_b = MeanPooling(output_b[0], b['attention_mask'])
output = torch.cat([embeddings_a, embeddings_b, embeddings_a-embeddings_b], dim=1)
logits = self.classifier(output)
loss = self.criterion(logits, labels)
return loss, logits
| [
"torch.nn.CrossEntropyLoss",
"torch.cat",
"transformers.AutoModel.from_pretrained",
"logging.info",
"torch.nn.Linear",
"logging.getLogger"
] | [((129, 156), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (146, 156), False, 'import logging\n'), ((307, 395), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['args.model_name'], {'config': 'config', 'cache_dir': 'args.cache_dir'}), '(args.model_name, config=config, cache_dir=args.\n cache_dir)\n', (332, 395), False, 'from transformers import AutoModel\n'), ((414, 502), 'transformers.AutoModel.from_pretrained', 'AutoModel.from_pretrained', (['args.model_name'], {'config': 'config', 'cache_dir': 'args.cache_dir'}), '(args.model_name, config=config, cache_dir=args.\n cache_dir)\n', (439, 502), False, 'from transformers import AutoModel\n'), ((507, 560), 'logging.info', 'logging.info', (['"""**** Encoder will not be trained ****"""'], {}), "('**** Encoder will not be trained ****')\n", (519, 560), False, 'import logging\n'), ((763, 815), 'torch.nn.Linear', 'nn.Linear', (['(config.hidden_size * 3)', 'config.num_labels'], {}), '(config.hidden_size * 3, config.num_labels)\n', (772, 815), False, 'from torch import nn\n'), ((839, 860), 'torch.nn.CrossEntropyLoss', 'nn.CrossEntropyLoss', ([], {}), '()\n', (858, 860), False, 'from torch import nn\n'), ((1221, 1296), 'torch.cat', 'torch.cat', (['[embeddings_a, embeddings_b, embeddings_a - embeddings_b]'], {'dim': '(1)'}), '([embeddings_a, embeddings_b, embeddings_a - embeddings_b], dim=1)\n', (1230, 1296), False, 'import torch\n')] |
# Code generated by `typeddictgen`. DO NOT EDIT.
"""V1WindowsSecurityContextOptionsDict generated type."""
from typing import TypedDict
V1WindowsSecurityContextOptionsDict = TypedDict(
"V1WindowsSecurityContextOptionsDict",
{
"gmsaCredentialSpec": str,
"gmsaCredentialSpecName": str,
"runAsUserName": str,
},
total=False,
)
| [
"typing.TypedDict"
] | [((175, 322), 'typing.TypedDict', 'TypedDict', (['"""V1WindowsSecurityContextOptionsDict"""', "{'gmsaCredentialSpec': str, 'gmsaCredentialSpecName': str, 'runAsUserName': str\n }"], {'total': '(False)'}), "('V1WindowsSecurityContextOptionsDict', {'gmsaCredentialSpec': str,\n 'gmsaCredentialSpecName': str, 'runAsUserName': str}, total=False)\n", (184, 322), False, 'from typing import TypedDict\n')] |
from urllib import parse
from flask import Flask, send_from_directory
from flask_restful import Resource, Api, reqparse, fields, marshal_with
from os import path
from datetime import datetime
import json
app = Flask(__name__)
api = Api(app)
# parser = reqparse.RequestParser()
users = {"test": "1"}
rooms = {
"Public": [],
"1": ["test"]
}
# messages = {
# "Public": [],
# "1": {0: {"time": "2021.04.25 00:00:00", "name": "test", "text": "hey there"}}
# }
messages = {
"Public": [],
"1": [[ "2021.04.25 00:00:00", "test", "hey there"]]
}
pwd = path.dirname(path.realpath(__file__))
user_room_change = 0
message_change = {
"Public": 0,
"1": 1
}
next_room = 2
# return the full app
@app.route('/', methods=['GET'])
def index_get():
return send_from_directory(pwd, "vue_app/index.html")
@app.route('/vue.js', methods=['GET'])
def vuejs_get():
return send_from_directory(pwd, "vue_app/vue.js")
@app.route('/script.js', methods=['GET'])
def scriptjs_get():
return send_from_directory(pwd, "vue_app/script.js")
@app.route('/bootstrap.css', methods=['GET'])
def bootstrapcss_get():
return send_from_directory(pwd, "vue_app/bootstrap.css")
@app.route('/style.css', methods=['GET'])
def stylecss_get():
return send_from_directory(pwd, "vue_app/style.css")
class Login(Resource):
def post(self):
global user_room_change
parser = reqparse.RequestParser()
parser.add_argument("username",type=str)
username = parser.parse_args()["username"]
if username in users.keys():
return {"status": 0}
else:
users[username] = "Public"
rooms["Public"].append(username)
user_room_change += 1
print("new user " + username)
return {"status": 1}
class Logout(Resource):
def post(self):
global user_room_change
parser = reqparse.RequestParser()
parser.add_argument("username",type=str)
username = parser.parse_args()["username"]
if username in users.keys():
room = users.pop(username)
rooms[room].remove(username)
user_room_change += 1
if ((room != "Public") and (rooms[room] == [])):
del rooms[room]
del message_change[room]
return {"status": 1}
class GetUserRoomChange(Resource):
def post(self):
global user_room_change
parser = reqparse.RequestParser()
parser.add_argument("user_room_change", type=int)
parser.add_argument("username",type=str)
client_urc = parser.parse_args()["user_room_change"]
username = parser.parse_args()["username"]
if client_urc == user_room_change:
return {"changed": 0}
else:
# room = users[username]
# print(room)
# room_member = rooms[room]
# room.remove(username)
return {
"changed": 1,
"server_urc": user_room_change,
"users": users
}
class MakeUserRoomChange(Resource):
def post(self):
global user_room_change
global next_room
parser = reqparse.RequestParser()
parser.add_argument("myName", type=str)
parser.add_argument("otherName",type=str)
myId = parser.parse_args()["myName"]
otherId = parser.parse_args()["otherName"]
myRoom = users[myId]
otherRoom = users[otherId]
user_room_change += 1
if myRoom == otherRoom:
new_room = str(next_room)
next_room += 1
rooms[new_room] = [myId, otherId]
message_change[new_room] = 0
messages[new_room] = []
users[myId] = new_room
users[otherId] = new_room
rooms[myRoom].remove(myId)
rooms[myRoom].remove(otherId)
if ((myRoom != "Public") and (rooms[myRoom] == [])):
del rooms[myRoom]
else:
users[myId] = otherRoom
rooms[myRoom].remove(myId)
rooms[otherRoom].append(myId)
if ((myRoom != "Public") and (rooms[myRoom] == [])):
del rooms[myRoom]
return {"status": 1}
class GetMessageChange(Resource):
def post(self):
global message_change
parser = reqparse.RequestParser()
parser.add_argument("messageChange", type=int)
parser.add_argument("username",type=str)
client_msgc = parser.parse_args()["messageChange"]
username = parser.parse_args()["username"]
if username == '':
return {"changed":0}
room = users[username]
if client_msgc == message_change[room]:
return {"changed":0}
else:
return {
"changed": 1,
"messageChange": message_change[room],
"messages": messages[room]
}
class SendMessage(Resource):
def post(self):
global message_change
parser = reqparse.RequestParser()
parser.add_argument("message", type=str)
parser.add_argument("username",type=str)
msg = parser.parse_args()["message"]
username = parser.parse_args()["username"]
room = users[username]
time = datetime.now().strftime('%Y.%M.%D %H:%M:%S')
messages[room].append([time, username, msg])
message_change[room] += 1
return {"status": 1}
api.add_resource(Login,"/login")
api.add_resource(Logout,"/logout")
api.add_resource(GetUserRoomChange, "/getUserRoomChange")
api.add_resource(MakeUserRoomChange, "/makeUserRoomChange")
api.add_resource(GetMessageChange, "/getMessageChange")
api.add_resource(SendMessage, "/sendMessage")
if __name__ == "__main__":
app.run("127.0.0.1", 5000, True) | [
"flask_restful.Api",
"os.path.realpath",
"flask.Flask",
"flask_restful.reqparse.RequestParser",
"flask.send_from_directory",
"datetime.datetime.now"
] | [((211, 226), 'flask.Flask', 'Flask', (['__name__'], {}), '(__name__)\n', (216, 226), False, 'from flask import Flask, send_from_directory\n'), ((234, 242), 'flask_restful.Api', 'Api', (['app'], {}), '(app)\n', (237, 242), False, 'from flask_restful import Resource, Api, reqparse, fields, marshal_with\n'), ((585, 608), 'os.path.realpath', 'path.realpath', (['__file__'], {}), '(__file__)\n', (598, 608), False, 'from os import path\n'), ((778, 824), 'flask.send_from_directory', 'send_from_directory', (['pwd', '"""vue_app/index.html"""'], {}), "(pwd, 'vue_app/index.html')\n", (797, 824), False, 'from flask import Flask, send_from_directory\n'), ((901, 943), 'flask.send_from_directory', 'send_from_directory', (['pwd', '"""vue_app/vue.js"""'], {}), "(pwd, 'vue_app/vue.js')\n", (920, 943), False, 'from flask import Flask, send_from_directory\n'), ((1018, 1063), 'flask.send_from_directory', 'send_from_directory', (['pwd', '"""vue_app/script.js"""'], {}), "(pwd, 'vue_app/script.js')\n", (1037, 1063), False, 'from flask import Flask, send_from_directory\n'), ((1147, 1196), 'flask.send_from_directory', 'send_from_directory', (['pwd', '"""vue_app/bootstrap.css"""'], {}), "(pwd, 'vue_app/bootstrap.css')\n", (1166, 1196), False, 'from flask import Flask, send_from_directory\n'), ((1271, 1316), 'flask.send_from_directory', 'send_from_directory', (['pwd', '"""vue_app/style.css"""'], {}), "(pwd, 'vue_app/style.css')\n", (1290, 1316), False, 'from flask import Flask, send_from_directory\n'), ((1410, 1434), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (1432, 1434), False, 'from flask_restful import Resource, Api, reqparse, fields, marshal_with\n'), ((1906, 1930), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (1928, 1930), False, 'from flask_restful import Resource, Api, reqparse, fields, marshal_with\n'), ((2454, 2478), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (2476, 2478), False, 'from flask_restful import Resource, Api, reqparse, fields, marshal_with\n'), ((3202, 3226), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (3224, 3226), False, 'from flask_restful import Resource, Api, reqparse, fields, marshal_with\n'), ((4349, 4373), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (4371, 4373), False, 'from flask_restful import Resource, Api, reqparse, fields, marshal_with\n'), ((5039, 5063), 'flask_restful.reqparse.RequestParser', 'reqparse.RequestParser', ([], {}), '()\n', (5061, 5063), False, 'from flask_restful import Resource, Api, reqparse, fields, marshal_with\n'), ((5304, 5318), 'datetime.datetime.now', 'datetime.now', ([], {}), '()\n', (5316, 5318), False, 'from datetime import datetime\n')] |
import typing as tp
from datetime import date
import numpy as np
import pendulum
def get_latest_price_from_quote(price_data: tp.Dict[str, tp.Any]) -> float:
if not price_data:
raise ValueError("No price data found.")
# regular market price
regular_market_price = price_data["regularMarketPrice"]["raw"]
regular_market_time = pendulum.from_timestamp(price_data["regularMarketTime"])
prices = {"regular": (regular_market_time, regular_market_price)}
# pre-market price
pre_market_price = (
price_data.get("preMarketPrice", {}).get("raw")
if price_data.get("preMarketPrice", {}) is not None
else None
)
if pre_market_price is not None:
prices["pre"] = (
pendulum.from_timestamp(price_data["preMarketTime"]),
pre_market_price,
)
# post-market price
post_market_price = (
price_data.get("postMarketPrice", {}).get("raw")
if price_data.get("postMarketPrice", {}) is not None
else None
)
if post_market_price is not None:
prices["post"] = (
pendulum.from_timestamp(price_data["postMarketTime"]),
post_market_price,
)
_, (_, price) = min(prices.items(), key=lambda x: abs(pendulum.now() - x[1][0]))
return price
def get_asset_data_from_quote(quote: tp.Dict[str, tp.Any]) -> tp.Dict[str, tp.Any]:
profile = quote.get("summaryProfile", {}) or {}
quote_type = quote.get("quoteType", {}) or {}
price_data = quote.get("price", {}) or {}
symbol = quote.get("symbol") or quote_type.get("symbol") or price_data.get("symbol")
name = quote_type.get("longName", price_data.get("longName")) or quote_type.get(
"shortName", price_data.get("shortName")
)
asset_type = quote_type.get("quoteType") or price_data.get("quoteType")
currency = price_data.get("currency")
latest_price = get_latest_price_from_quote(price_data)
sector = profile.get("sector")
industry = profile.get("industry")
return {
"symbol": symbol,
"name": name,
"asset_type": asset_type,
"currency": currency,
"latest_price": latest_price,
"sector": sector,
"industry": industry,
}
def get_earnings_data_from_quote(
quote: tp.Dict[str, tp.Any]
) -> tp.List[tp.Dict[str, tp.Union[str, float]]]:
earnings = quote.get("earnings")
if not earnings or not isinstance(earnings, dict):
return []
earnings_chart = earnings.get("earningsChart", {})
quarterly_earnings = earnings_chart.get("quarterly", [])
quarterly_financial_chart = earnings.get("financialsChart", {}).get("quarterly", [])
date_to_earnings = {
e.get("date", ""): {
"actual": e.get("actual", {}).get("raw", np.nan),
"estimate": e.get("estimate", {}).get("raw", np.nan),
}
for e in quarterly_earnings
if "date" in e
}
date_to_fin_chart = {
c.get("date", ""): {
"revenue": c.get("revenue", {}).get("raw", np.nan),
"earnings": c.get("earnings", {}).get("raw", np.nan),
}
for c in quarterly_financial_chart
if "date" in c
}
all_dates = set(list(date_to_earnings.keys()) + list(date_to_fin_chart.keys()))
passed_earnings = [
dict(
quarter=quarter,
actual=date_to_earnings.get(quarter, {}).get("actual", np.nan),
estimate=date_to_earnings.get(quarter, {}).get("estimate", np.nan),
revenue=date_to_fin_chart.get(quarter, {}).get("revenue", np.nan),
earnings=date_to_fin_chart.get(quarter, {}).get("earnings", np.nan),
)
for quarter in all_dates
]
next_earnings = [
dict(
quarter=(
f"{earnings_chart.get('currentQuarterEstimateDate', '')}"
f"{earnings_chart.get('currentQuarterEstimateYear', '')}"
),
estimate=earnings_chart.get("currentQuarterEstimate", {}).get(
"raw", np.nan
),
actual=np.nan,
revenue=np.nan,
earnings=np.nan,
)
]
return passed_earnings + next_earnings
def get_trends_data_from_quote(
quote: tp.Dict[str, tp.Any]
) -> tp.List[tp.Dict[str, tp.Any]]:
if not quote:
return []
recommendation_trend = quote.get("recommendationTrend", {})
if not recommendation_trend:
return []
trends = recommendation_trend.get("trend", [])
return [
{
"date": date.today() + pendulum.duration(months=int(trend["period"][:-1])),
"strong_buy": trend.get("strongBuy", 0),
"buy": trend.get("buy", 0),
"hold": trend.get("hold", 0),
"sell": trend.get("sell", 0),
"strong_sell": trend.get("stronSell", 0),
}
for trend in trends
]
| [
"pendulum.now",
"datetime.date.today",
"pendulum.from_timestamp"
] | [((353, 409), 'pendulum.from_timestamp', 'pendulum.from_timestamp', (["price_data['regularMarketTime']"], {}), "(price_data['regularMarketTime'])\n", (376, 409), False, 'import pendulum\n'), ((745, 797), 'pendulum.from_timestamp', 'pendulum.from_timestamp', (["price_data['preMarketTime']"], {}), "(price_data['preMarketTime'])\n", (768, 797), False, 'import pendulum\n'), ((1109, 1162), 'pendulum.from_timestamp', 'pendulum.from_timestamp', (["price_data['postMarketTime']"], {}), "(price_data['postMarketTime'])\n", (1132, 1162), False, 'import pendulum\n'), ((4559, 4571), 'datetime.date.today', 'date.today', ([], {}), '()\n', (4569, 4571), False, 'from datetime import date\n'), ((1264, 1278), 'pendulum.now', 'pendulum.now', ([], {}), '()\n', (1276, 1278), False, 'import pendulum\n')] |